blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0e99b3383d46e9047400b74985c4db6e967fc27f
|
a90254b94137497077573b358e47dcfa35ceaa05
|
/man/rlr_type.Rd
|
90830c25a973e1c1dc0ca7482f4a29537d6ba2c9
|
[] |
no_license
|
cran/regressoR
|
07277cd527fcda07dbd3d06983b4a867a1409e4f
|
2e687cea7d2019b61ae2f513ca350cab596a7d3f
|
refs/heads/master
| 2023-07-05T22:32:40.063059
| 2023-06-29T15:40:02
| 2023-06-29T15:40:02
| 209,471,676
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 465
|
rd
|
rlr_type.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod_penalized_Regression_utils.R
\name{rlr_type}
\alias{rlr_type}
\title{rlr_type}
\usage{
rlr_type(alpha_rlr = 0)
}
\arguments{
\item{alpha_rlr}{the penalty is defined as alpha=1 is the lasso penalty, and alpha=0 the ridge penalty.}
}
\description{
returns the name of the penalty according to the alpha.
}
\examples{
rlr_type(1)
rlr_type(0)
}
\seealso{
\code{\link[glmnet]{glmnet}}
}
|
ad52e513b3b4cdfccfbdc75c0d879fbe1f851abc
|
d0c3bfcf62364de23de01b55ac97792bfe9116cd
|
/USG_Collaboration_Plots_160705.r
|
caa0154d8462c1166d8ed1992d60d42b20c9d983
|
[] |
no_license
|
rungec/USG-Infrastructure-sharing
|
d3c4d6167d52af4b8f903e42139941aff4022a6f
|
6b344151c29fe464ac85b586978df7f2cb6d8d23
|
refs/heads/master
| 2021-01-12T11:52:10.422522
| 2017-05-30T23:48:44
| 2017-05-30T23:48:44
| 69,593,658
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 32,957
|
r
|
USG_Collaboration_Plots_160705.r
|
#Redo of figures following reviewer comments #Round 2
#NOTE I looked at changing how homogenous impacts are calculated but didn't end up using this in the analysis
scen1Dir <- "Y:/Data/GPEM_Postdoc/1_USG_Collaboration/Analysis/tables/final costs/1_high_finalcosts_161221.csv"
minegroupingsDir <- "Y:/Data/GPEM_Postdoc/1_USG_Collaboration/Analysis/tables/Mine_groupings.csv"
scen3Dir <- "Y:/Data/GPEM_Postdoc/1_USG_Collaboration/Analysis/tables/final costs/3_low_finalcosts_150420.csv"
scen1byportDir <- "Y:/Data/GPEM_Postdoc/1_USG_Collaboration/Analysis/tables/1_high_diffuse_biodiversity_byport.csv"
scen5Dir <- "Y:/Data/GPEM_Postdoc/1_USG_Collaboration/Analysis/tables/final costs/5_partial_finalcosts.csv"
scen7Dir <- "Y:/Data/GPEM_Postdoc/1_USG_Collaboration/Analysis/tables/final costs/7_lowimpact_shared_finalcosts.csv"
#inpRast <- "Y:/Data/GPEM_Postdoc/1_USG_Collaboration/Data/Species/thresholdmaps/183sp/summed_presence/Summed_presence_183Sp.tif"
#spInpDir <- "Y:/Data/GPEM_Postdoc/1_USG_Collaboration/Data/Species/Species spreadsheets/Species_list_taxa_impacts.csv"
sppDir <- c("Y:/Data/GPEM_Postdoc/1_USG_Collaboration/Analysis/tables/", "_diffuse_biodiversity_totals_dropAgrostisLimitanea.csv")
plotOutDir <- "Y:/Data/GPEM_Postdoc/1_USG_Collaboration/Analysis/figures/dataplots/183Sp_diffuse_rail4m_FINALSCENARIOS/"
#library(vioplot)
library(ggplot2)
library(gridExtra)
scen1data <- read.csv(scen1Dir, stringsAsFactors=FALSE)
scen3data <- read.csv(scen3Dir, stringsAsFactors=FALSE)[,1:22]#split off empty last column
dataPorts <- read.csv(scen1byportDir, stringsAsFactors=FALSE)
groupings <- read.csv(minegroupingsDir, stringsAsFactors=FALSE)
#Calculate homogenous impacts
avSpLoss <- sum(scen3data$SumSpLoss183SpDiffuse)/sum(scen3data$Area)#average species loss per area in scenario 3
#Alternate:
# r <- raster(inpRast)
# spAreadata <- read.csv(spInpDir, stringsAsFactors=FALSE)
# avSpLoss <- sum(spAreadata$Total_Area/(ncell(r)*0.25))/183 #Species area divided by area of study region = proportion of species habitat lost if all region developed #average species loss per km^2
#gave up here - this got a bit complicated because of the diffuse impacts - impact value in a cell depends on what happends to neighbours
dataAll <- data.frame(rep(c(1,3), each=28), rbind(scen1data,scen3data))
names(dataAll)[1]<-"Scenario"
dataAll$RailCostHighM <- dataAll$RailCostHigh/1000000
dataAll$RailCostExtraHighM <- dataAll$RailCostExtraHigh/1000000
dataAll$RoadCostM <- dataAll$RoadCost/1000000
dataAll$TotalCostRoad <- rowSums(dataAll[,c("AgriLoss", "TransCost", "RoadCost")])/1000000
dataAll$TotalCostRail <- rowSums(dataAll[,c("AgriLoss", "TransCost", "RailCostExtraHigh")])/1000000
dataAll$SumSpLoss183SpDiffHomog <- dataAll$Area*avSpLoss
altNames <- c("3PortShared", "3PortUnshare")
sapply(list(scen1data, scen3data), nrow)
mycols <- rainbow(4)
#top 5 high impact mines = Kingsgate, Eyre Iron, Mungana, Minotaur, Illuka
#big 5 high value mines = Bhp Billiton, Arrium, Illuka, Iron Road, OzMinerals
###################################
###################################
#SET UP VARIABLES
#biodiversity benefit from collaboration
bioBen3port <- (dataAll[dataAll$Scenario==3, "SumSpLoss183SpDiffuse"] - dataAll[dataAll$Scenario==1, "SumSpLoss183SpDiffuse"])/100
bioBen3portHomog <- (dataAll[dataAll$Scenario==3, "SumSpLoss183SpDiffHomog"] - dataAll[dataAll$Scenario==1, "SumSpLoss183SpDiffHomog"])/100
bioBen3portdirect <- (dataAll[dataAll$Scenario==3, "SumSpLoss183Sp"] - dataAll[dataAll$Scenario==1, "SumSpLoss183Sp"])/100
#biodiversity benefit as % of total
bioBen3portPer <- bioBen3port/dataAll[dataAll$Scenario==3, "SumSpLoss183SpDiffuse"]*100*100
bioBen3portHomogPer <- bioBen3portHomog/dataAll[dataAll$Scenario==3, "SumSpLoss183SpDiffHomog"]*100*100
#financial benefit from collaboration
finBen3port <- (dataAll[dataAll$Scenario==3, "TotalCostRail"] - dataAll[dataAll$Scenario==1, "TotalCostRail"])/1000
#financial benefit as % of total cost
finBen3portPer <- finBen3port/dataAll[dataAll$Scenario==3, "TotalCostRail"]*100*1000
#number of mines collaborating
numMine3port <- dataAll[dataAll$Scenario==1, "NumCollab"]
numMine3portF <- factor(numMine3port, levels=as.character(5:16))
#normalised biodiversity impact
bioNorm3port3 <- dataAll[dataAll$Scenario==3, "SumSpLoss183SpDiffuse"]/dataAll[dataAll$Scenario==3, "Len"]
bioNorm3port1<- dataAll[dataAll$Scenario==1, "SumSpLoss183SpDiffuse"]/dataAll[dataAll$Scenario==1, "Len"]
finNorm3port <- finBen3port/dataAll[dataAll$Scenario==3, "Len"]
DFben <- data.frame(dataAll$Mine[1:28], dataAll$Len[1:28],bioBen3port,bioBen3portHomog, bioBen3portdirect, bioBen3portPer, bioBen3portHomogPer, finBen3port, finBen3portPer, numMine3port, numMine3portF, bioNorm3port3, bioNorm3port1, finNorm3port, groupings$Group_1, groupings$Group_2, groupings$Group_3)
names(DFben)[1:2] <- c("Mine", "Len")
write.csv(DFben, paste0(sppDir[1], "Data_for_plots_161221.csv"), row.names=FALSE)
#######################
#statistical tests whether means are different between scenarios - by mine
#statistical test whether means are different between independent and shared scenarios
tbio13 <- t.test(dataAll[dataAll$Scenario==3, "SumSpLoss183SpDiffuse"], dataAll[dataAll$Scenario==1, "SumSpLoss183SpDiffuse"], paired=TRUE)
tbio13
tfin13 <- t.test(dataAll[dataAll$Scenario==3, "TotalCostRail"], dataAll[dataAll$Scenario==1, "TotalCostRail"], paired=TRUE)
tfin13
#test whether means are different between independent and low impact scenarios
lowimpact <- dataAll[dataAll$Scenario==3, c("Mine", "SumSpLoss183SpDiffuse", "TotalCostRail")]
lowimpact[lowimpact$Mine %in% c("Eyre Iron","Iluka", "Kingsgate","Minotaur", "Mungana"), c("SumSpLoss183SpDiffuse","TotalCostRail")] <-0
tbio16 <- t.test(dataAll[dataAll$Scenario==3, "SumSpLoss183SpDiffuse"], lowimpact$SumSpLoss183SpDiffuse, paired=TRUE)
tbio16
tfin16 <- t.test(dataAll[dataAll$Scenario==3, "TotalCostRail"], lowimpact$TotalCostRail, paired=TRUE)
tfin16
#test whether means are different between independent and staged sharing scenarios
scen5data <- read.csv(scen5Dir, header=TRUE)
tbio15 <- t.test(dataAll[dataAll$Scenario==3, "SumSpLoss183SpDiffuse"], scen5data$SumSpLoss183spDiffuse, paired=TRUE)
tbio15
tfin15 <- t.test(dataAll[dataAll$Scenario==3, "TotalCostRail"], (scen5data$RailCostExtraHigh+scen5data$AgriLoss+scen5data$TransCost)/1000000, paired=TRUE)
tfin15
#######################
#statistical tests whether means are different between scenarios - by species
#Calculate impacts by species
spData1 <- read.csv(paste0(sppDir[1], "1_high", sppDir[2]), header=TRUE)
spData3 <- read.csv(paste0(sppDir[1], "3_low", sppDir[2]), header=TRUE)
spData5 <- read.csv(paste0(sppDir[1], "5_partial", sppDir[2]), header=TRUE)
spData6 <- read.csv(paste0(sppDir[1], "6_lowimpact", sppDir[2]), header=TRUE)
spData7 <- read.csv(paste0(sppDir[1], "7_lowimpact_shared", sppDir[2]), header=TRUE)
#Drop Agrostis.limitanea
notcols <- grep("Agrostis.limitanea", names(spData1))
spData1 <- spData1[, -notcols]
notcols <- grep("Agrostis.limitanea", names(spData3))
spData3 <- spData3[, -notcols]
notcols <- grep("Agrostis.limitanea", names(spData5))
spData5 <- spData5[, -notcols]
notcols <- grep("Agrostis.limitanea", names(spData6))
spData6 <- spData6[, -notcols]
notcols <- grep("Agrostis.limitanea", names(spData7))
spData7 <- spData7[, -notcols]
impactBySpeciesS1 <- matrix(colSums(spData1[,6:733]), ncol=4, byrow=TRUE)
impactBySpeciesS3 <- matrix(colSums(spData3[,6:733]), ncol=4, byrow=TRUE)
impactBySpeciesS5 <- matrix(colSums(spData5[,14:741]), ncol=4, byrow=TRUE)
impactBySpeciesS6 <- matrix(colSums(spData6[,6:733]), ncol=4, byrow=TRUE)
impactBySpeciesS7 <- matrix(colSums(spData7[,7:734]), ncol=4, byrow=TRUE)
#total number of species impacted
length(which(rowSums(impactBySpeciesS1)!=0))
length(which(rowSums(impactBySpeciesS3)!=0))
length(which(rowSums(impactBySpeciesS5)!=0))
length(which(rowSums(impactBySpeciesS6)!=0))
length(which(rowSums(impactBySpeciesS7)!=0))
#Mean habitat loss per species (% of total habitat)
mean(rowSums(impactBySpeciesS1))
mean(rowSums(impactBySpeciesS3))
mean(rowSums(impactBySpeciesS5))
mean(rowSums(impactBySpeciesS6))
mean(rowSums(impactBySpeciesS7))
#For all 183 species
#t test whether means are different independent vs shared
tsp13 <- t.test(rowSums(impactBySpeciesS3), rowSums(impactBySpeciesS1), paired=TRUE)
tsp13
#t test whether means are different independent vs lowimpact
tsp16 <- t.test(rowSums(impactBySpeciesS3), rowSums(impactBySpeciesS6), paired=TRUE)
tsp16
#t test whether means are different independent vs staged shared
tsp15 <- t.test(rowSums(impactBySpeciesS3), rowSums(impactBySpeciesS5), paired=TRUE)
tsp15
#t test whether means are different independent vs staged shared
tsp17 <- t.test(rowSums(impactBySpeciesS3), rowSums(impactBySpeciesS7), paired=TRUE)
tsp17
#mean habitat loss for species impacted
mean(rowSums(impactBySpeciesS1)[which(rowSums(impactBySpeciesS1)!=0)])
sd(rowSums(impactBySpeciesS1)[which(rowSums(impactBySpeciesS1)!=0)])
mean(rowSums(impactBySpeciesS3)[which(rowSums(impactBySpeciesS3)!=0)])
sd(rowSums(impactBySpeciesS3)[which(rowSums(impactBySpeciesS3)!=0)])
#######################
#FINAL PLOTS
#######################
# #Pannell plot
# p1 <- ggplot(DFben, aes(x=finBen3portPer, y=bioBen3port))+
# geom_blank() +
# theme_bw(17) + #remove grey #remove grids
# theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
# #scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand = c(0, 0)) #make start at zero
# coord_cartesian(xlim=c(0,100), ylim=c(0,0.60001))+ #set x and y limits
# #theme(axis.ticks = element_blank(), axis.text = element_blank())+#hide tixk marks and numbers
# labs(x="Private net benefit", y="Public net benefit")+
# theme(axis.title.x = element_text(vjust=-0.5),axis.title.y = element_text(vjust=1))+ #move xylabels away from graph
# geom_rect(xmin=0, ymin=0.1, xmax=10, ymax=0.6, fill="lightskyblue")+
# geom_rect(xmin=10, ymin=0.1, xmax=25, ymax=0.6, fill="lightskyblue1")+
# geom_rect(xmin=0, ymin=0, xmax=100, ymax=0.1, fill="grey70")+
# geom_polygon(aes(x=c(25, 100, 100, 25), y=c(0.1, 0.1, 0.3,0.1)), fill="grey90") +
# geom_line(aes(x=c(24.99,25), y=c(0.6, 0.1)), linetype="dashed", size=1)+
# geom_line(aes(x=c(25,100), y=c(0.1, 0.3)), linetype="dashed", size=1)+
# geom_line(aes(x=c(10,10), y=c(0.6, 0.1)), linetype="solid", size=1)+
# geom_line(aes(x=c(10,100), y=c(0.1, 0.1)), linetype="solid", size=1)+
# geom_segment(aes(x=10,xend=25,y=0.45,yend=0.45),arrow=arrow(length=unit(0.2,"cm")),show_guide=F, size=1)+ #add arrow
# geom_segment(aes(x=80,xend=80,y=0.1,yend=0.24),arrow=arrow(length=unit(0.2,"cm")),show_guide=F, size=1)+ #add arrow
# annotate("text", label="POSITIVE INCENTIVES", x=5, y=0.3, angle=90, size=6)+
# annotate("text", label="NO ACTION", x=50, y=0.05, size=6)+
# annotate("text", label="EXTENSION", x=70, y=0.5, size=6)+
# annotate("text", label="Transaction \n cost (private)", x=37, y=0.45, angle=0, size=5)+
# annotate("text", label="Transaction \n cost (public)", x=90, y=0.18, angle=0, size=5)
# outPath <- paste0(plotOutDir, "USG_collab_Pannell_fig.png")
# ggsave(filename=outPath)
#######################
#Public vs private incentive plot
#######################
#p2 <- ggplot(DFben, aes(x=finBen3portPer, y=bioBen3port, color=groupings.Group_3, shape=groupings.Group_3)) +
p2 <- ggplot(DFben, aes(x=finBen3portPer, y=bioBen3port, shape=groupings.Group_3, color=groupings.Group_3)) +
geom_blank() +
#theme_bw(17) + #remove grey #remove grids
theme_classic(17) + #remove grey #remove grids
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line.x = element_line(color="black"), axis.line.y = element_line(color="black"))+
#scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand = c(0, 0)) #make start at zero
coord_cartesian(xlim=c(15,100), ylim=c(0,0.60001))+ #set x and y limits
labs(x="Averted capital cost (% of total)", y="Averted biodiversity impact")+
theme(axis.title.x = element_text(vjust=-0.6),axis.title.y = element_text(vjust=1))+ #move xylabels away from graph
#geom_rect(xmin=0, ymin=0.1, xmax=25, ymax=0.6, fill="lightskyblue1")+
#geom_rect(xmin=0, ymin=0, xmax=100, ymax=0.1, fill="grey70")+
#annotate("text", label="POSITIVE INCENTIVES", x=20, y=0.3, angle=90, size=6)+
#annotate("text", label="NO ACTION", x=30, y=0.05, size=6)+
#annotate("text", label="EXTENSION", x=70, y=0.5, size=6)+
#geom_abline(slope=-0.003464, intercept=0.354103, colour="grey70", linetype='dashed')+
#geom_smooth(method="lm", se=FALSE, colour="grey70", linetype='dashed')+
geom_point(size=4, show.legend = FALSE)+
#geom_point(size=4, colour='black', show.legend = FALSE)+
scale_shape_manual(values=c(17,17,15,16,15))+
scale_color_manual(values=c("darkgoldenrod1", "cyan4","darkblue", "cyan2", "darkgray"))
# outPath <- paste0(plotOutDir, "USG_collab_Pannell_withData_fig.png")
outPath <- paste0(plotOutDir, "USG_collab_PublicvsPrivate_fig.png")
#outPath <- paste0(plotOutDir, "USG_collab_PublicvsPrivate_fig_withline.png")
ggsave(filename=outPath)
outPath <- paste0(plotOutDir, "USG_collab_PublicvsPrivate_fig.pdf")
#outPath <- paste0(plotOutDir, "USG_collab_PublicvsPrivate_fig_withline.pdf")
ggsave(filename=outPath)
dev.off()
#[stats for trendline]
fit <- lm(bioBen3port~finBen3portPer)
summary(fit) #not significant
fit2 <- lm(bioBen3port[-9]~finBen3portPer[-9])
summary(fit2)
outPath <- paste0(plotOutDir, "USG_collab_PublicvsPrivate_fig_withline_fit.pdf")
pdf(file=outPath)
par(mfrow = c(2, 2), oma = c(0, 0, 2, 0))
plot(fit)
dev.off()
outPath <- paste0(plotOutDir, "USG_collab_PublicvsPrivate_fig_withline_fit2.pdf")
pdf(file=outPath)
par(mfrow = c(2, 2), oma = c(0, 0, 2, 0))
plot(fit2)
dev.off()
# library(nlme)
# fit2 <- lme(bioBen3port~finBen3portPer, random=~1|DFben$groupings.Group_3) #glmm with line as random factor
# summary(fit2) #couldnt get this to work
# #######################
# #2-part pannell plot
# #######################
# outPath <- paste0(plotOutDir, "USG_collab_Pannell_2part_fig.png")
# png(filename=outPath, width=960, height=480)
# grid.arrange(p1+annotate("text", label="a", size=7, x=95, y=0.57, fontface="bold"), p2+annotate("text", label="b", size=7, x=95, y=0.57, fontface="bold"), ncol=2)
# dev.off()
#######################
#Summary & plot of scenario impacts by Port grouping
#######################
#with points added for the 5 shared mine-port links
scen6data <- dataAll[dataAll$Scenario==3,]
scen6data[scen6data$Mine %in% c("Eyre Iron", "Iluka", "Minotaur", "Mungana", "Kingsgate"),"SumSpLoss183SpDiffuse"] <- 0
scen5 <- read.csv(scen5Dir, header=TRUE)
scen5data <- scen5[order(scen5$Mine),]
scen7 <- read.csv(scen7Dir, header=TRUE, stringsAsFactors=FALSE)
blankrows <- data.frame(c("Eyre Iron", "Iluka", "Minotaur", "Mungana", "Kingsgate"), 0,0,0,0,0,0,0,0,0,0,0,0)
names(blankrows) <- names(scen7)
scen7 <- rbind(scen7,blankrows)
scen7data <- scen7[order(scen7$Mine),]
DF5portlinks <- data.frame(aggregate(DFben$Len, by=list(DFben$groupings.Group_3), FUN=sum),
HeteroSharedImpact=aggregate(dataAll[dataAll$Scenario==1, "SumSpLoss183SpDiffuse"]/100, by=list(DFben$groupings.Group_3), FUN=sum)$x,
HomogImpact=aggregate(dataAll[dataAll$Scenario==1, "SumSpLoss183SpDiffHomog"]/100, by=list(DFben$groupings.Group_3), FUN=sum)$x,
HeteroUnsharedImpact=aggregate(dataAll[dataAll$Scenario==3, "SumSpLoss183SpDiffuse"]/100, by=list(DFben$groupings.Group_3), FUN=sum)$x,
LowImpact = aggregate(scen6data[, "SumSpLoss183SpDiffuse"]/100, by=list(DFben$groupings.Group_3), FUN=sum)$x,
LowImpactShared = aggregate(scen7data[, "SumSpLoss183spDiffuse"]/100, by=list(DFben$groupings.Group_3), FUN=sum)$x,
RestrictedAccess = aggregate(scen5data[, "SumSpLoss183spDiffuse"]/100, by=list(DFben$groupings.Group_3), FUN=sum)$x
)
names(DF5portlinks)[1:2] <- c("Group_3", "Len")
write.csv(DF5portlinks, "Y:/Data/GPEM_Postdoc/1_USG_Collaboration/Analysis/tables/Totals_diffuse_biodiversity_by5mineportlinks.csv", row.names=FALSE)
#######################
#Plot of benefit of collaborating by 5 mine-port links
#######################
#totalimpact <- sum(DF5portlinks$HeteroUnsharedImpact)
plotDf <- with(DF5portlinks, data.frame(Group3=rep(Group_3, 5), PercContrib=c(HeteroSharedImpact, HeteroUnsharedImpact, LowImpact, LowImpactShared, RestrictedAccess), Scen=rep(c("Shared", "Independent", "LowImpact", "LowImpactShared", "RestrictedAccess"), each=5)))
plotDf$ScenOrdered <- factor(plotDf$Scen, levels=c("Independent", "RestrictedAccess", "LowImpact", "LowImpactShared", "Shared"))
#Plot for supp info
p <- ggplot(plotDf, aes(x=Group3, y=PercContrib, fill=ScenOrdered))+
geom_bar(stat='identity', position='dodge')+
scale_fill_grey(name="Scenario",
breaks=c("Independent", "RestrictedAccess", "LowImpact", "LowImpactShared", "Shared"),
labels=c("Independent", "Restricted access", "Low impact independent", "Low Impact shared", "Shared"), start=0.8, end=0.2)+
theme_classic(17) + #get rid of grey bkg and gridlines
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
#coord_cartesian(xlim=c(0,780), ylim=c(0,0.75))+ #set x and y limits
labs(x="Mine-port link region", y="Biodiversity impact")+
theme(axis.title.x = element_text(vjust=-0.6),axis.title.y = element_text(vjust=1), axis.line.x = element_line(color="black"), axis.line.y = element_line(color="black"))+#move xylabels away from graph
theme(legend.position="right", legend.text=element_text(size=14))+#use 'none' to get rid of legend
theme(legend.title=element_blank())#get rid of legend title
outPath <- paste0(plotOutDir, "USG_collab_Shared_vs_Unshared_5mineportlinks_fig_suppinfo.png")
ggsave(filename=outPath)
outPath <- paste0(plotOutDir, "USG_collab_Shared_vs_Unshared_5mineportlinks_fig_suppinfo.pdf")
ggsave(filename=outPath)
#Plot for manuscript - Submission 3
plotDfSub <- plotDf[plotDf$Scen %in% c("Independent", "RestrictedAccess", "Shared"),]
p <- ggplot(plotDfSub, aes(x=Group3, y=PercContrib, fill=ScenOrdered))+
geom_bar(stat='identity', position='dodge')+
scale_fill_grey(name="Scenario",
breaks=c("Independent", "RestrictedAccess", "Shared"),
labels=c("Independent", "Restricted access", "Shared"), start=0.8, end=0.2)+
theme_classic(17) + #get rid of grey bkg and gridlines
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
#coord_cartesian(xlim=c(0,780), ylim=c(0,0.75))+ #set x and y limits
labs(x="Mine-port link region", y="Biodiversity impact")+
theme(axis.title.x = element_text(vjust=-0.6),axis.title.y = element_text(vjust=1), axis.line.x = element_line(color="black"), axis.line.y = element_line(color="black"))+#move xylabels away from graph
theme(legend.position="top", legend.text=element_text(size=14))+#use 'none' to get rid of legend
theme(legend.title=element_blank())#get rid of legend title
outPath <- paste0(plotOutDir, "USG_collab_Shared_vs_Unshared_5mineportlinks_fig_printproof.png")
ggsave(filename=outPath)
outPath <- paste0(plotOutDir, "USG_collab_Shared_vs_Unshared_5mineportlinks_fig_printproof.pdf")
ggsave(filename=outPath)
#Plot for manuscript - Submission 2
plotDfSub <- plotDf[plotDf$Scen %in% c("Independent", "RestrictedAccess", "LowImpact", "Shared"),]
p <- ggplot(plotDfSub, aes(x=Group3, y=PercContrib, fill=ScenOrdered))+
geom_bar(stat='identity', position='dodge')+
scale_fill_grey(name="Scenario",
breaks=c("Independent", "RestrictedAccess", "LowImpact", "LowImpactShared", "Shared"),
labels=c("Independent", "Restricted access", "Low impact", "LowImpactShared", "Shared"), start=0.8, end=0.2)+
theme_classic(17) + #get rid of grey bkg and gridlines
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
#coord_cartesian(xlim=c(0,780), ylim=c(0,0.75))+ #set x and y limits
labs(x="Mine-port link region", y="Biodiversity impact")+
theme(axis.title.x = element_text(vjust=-0.6),axis.title.y = element_text(vjust=1), axis.line.x = element_line(color="black"), axis.line.y = element_line(color="black"))+#move xylabels away from graph
theme(legend.position="bottom", legend.text=element_text(size=14))+#use 'none' to get rid of legend
theme(legend.title=element_blank())#get rid of legend title
outPath <- paste0(plotOutDir, "USG_collab_Shared_vs_Unshared_5mineportlinks_fig_sub2.png")
ggsave(filename=outPath)
outPath <- paste0(plotOutDir, "USG_collab_Shared_vs_Unshared_5mineportlinks_fig_sub2.pdf")
ggsave(filename=outPath)
################
#Model & plot of relationship between biodiversity & length
################
DFHetero <- data.frame(rep(dataAll[dataAll$Scenario==3, "Len"], 2), append(dataAll[dataAll$Scenario==3, "SumSpLoss183SpDiffHomog"]/100,dataAll[dataAll$Scenario==3, "SumSpLoss183SpDiffuse"]/100), rep(c("Homog", "Hetero"), each=28))
names(DFHetero)<- c("Len", "Impact", "HetorHom")
#add points for the shared scenario with 3 ports (lines)
# DFHetero <- rbind(DFHetero, data.frame(
# Len = unlist(rep(dataPorts[dataPorts$X=="Length_km",c("Bonython", "Hardy", "Myponie")], 2)),
# Impact = unlist(c( dataPorts[dataPorts$X=="SumSpTotal", c("Bonython", "Hardy", "Myponie")]/183, dataPorts[dataPorts$X=="Area_km2", c("Bonython", "Hardy", "Myponie")]*avSpLoss/100)),
# HetorHom = rep(c("Hetero", "Homog"), each=3)
# ))
#subHetero <- subset(DFHetero, DFHetero$HetorHom=="Hetero")[c(1:28, 30, 31),] #remove outlier at 1880km
DFHetero <- rbind(DFHetero, data.frame(
Len = rep(DF5portlinks$Len, 2),
Impact = c(DF5portlinks$HeteroSharedImpact, DF5portlinks$HomogSharedImpact),
HetorHom = rep(c("Hetero", "Homog"), each=5)
))
subHetero <- subset(DFHetero, DFHetero$HetorHom=="Hetero")[c(1:29, 31:33),] #remove outlier at 1589km
mod1 <- lm(subHetero$Impact~0+subHetero$Len) #set intercept to zero
summary(mod1)
mod1b <- lm(DFHetero$Impact~0+DFHetero$Len) #all datapoints
summary(mod1b)
outPath <- paste0(plotOutDir, "USG_collab_biodiv_vs_length_modelfit_excluding1589km.pdf")
pdf(file=outPath)
par(mfrow = c(2, 2), oma = c(0, 0, 2, 0))
plot(mod1)
dev.off()
outPath <- paste0(plotOutDir, "USG_collab_biodiv_vs_length_modelfit_alldata.pdf")
pdf(file=outPath)
par(mfrow = c(2, 2), oma = c(0, 0, 2, 0))
plot(mod1b)
dev.off()
modpred <- cbind(subHetero,predict(mod1,interval="confidence"), Scenario=rep(c("Independent", "Shared"), times=c(28, 4)))
p <- ggplot(modpred, aes(Len, Impact, shape=Scenario, colour=HetorHom)) +
geom_ribbon(aes(ymin=lwr,ymax=upr),fill = "gray80", colour="gray80")+
geom_abline(slope=coef(mod1), intercept=0, colour="black") +
geom_point(color="black", size=4) +#add points
#scale_color_manual(name="Species\nDistribution", breaks=c("Hetero", "Homog"),labels=c("Heterogeneous", "Homogeneous"), values=c("black", "lightblue"))+ #change point colours and legend titles
#scale_colour_manual(name="Distribution of biodiversity", breaks=c("Homog", "Hetero"), labels=c("Homogeneous", "Real data"), values=c("black", "grey70"))+ #change point colours and legend titles
#scale_shape_manual(name="Distribution of biodiversity", breaks=c("Homog", "Hetero"), labels=c("Homogeneous", "Real data"), values=c(19, 17))+ #change point shapes and make it a single legend
theme_classic(17) + #get rid of grey bkg and gridlines
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
coord_cartesian(xlim=c(0,780), ylim=c(0,0.75))+ #set x and y limits
labs(x="Infrastructure length (km)", y="Biodiversity impact")+
theme(axis.title.x = element_text(vjust=-0.6),axis.title.y = element_text(vjust=1), axis.line.x = element_line(color="black"), axis.line.y = element_line(color="black"))+
theme(legend.position="none")#gets rid of legend
#theme(legend.title=element_blank())#get rid of legend title
#theme(legend.position=c(.25, .85), legend.title = element_text(size=14, face="bold"), legend.key = element_blank())
#theme(legend.background = element_rect(colour="grey70", size=.5, linetype="solid"))+#add box
outPath <- paste0(plotOutDir, "USG_collab_Hetero_Homog_vs_Length_bothscenarios_fig_5lines_v2.png")
ggsave(filename=outPath)
outPath <- paste0(plotOutDir, "USG_collab_Hetero_Homog_vs_Length_bothscenarios_fig_5lines_v2.pdf")
ggsave(filename=outPath)
#######################
#Plot of collaboration benefit per number of partners
#######################
# meanlines <- data.frame(aggregate(data.frame(DFben$bioBen3portPer,DFben$finBen3portPer), by=list(DFben$numMine3portF), FUN=mean))
# names(meanlines)=c("numMine3portF", "Meanbio", "Meanfin")
# p1 <- ggplot(DFben, aes(x=numMine3portF, y=bioBen3portPer)) +
# geom_point(size=3.5)+
# geom_errorbar(data=meanlines, aes(y=Meanbio, ymin=Meanbio, ymax=Meanbio), width=0.8, size=1)+
# theme_bw(17) + #the number here changes the size of all the text in the graph
# theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
# #coord_cartesian(ylim=c(30,100))+
# labs(x="Number of companies collaborating", y="Averted biodiversity impact (% of total)")+
# theme(axis.title.x = element_text(vjust=-0.5),axis.title.y = element_text(vjust=0.6))+ #move xylabels away from graph
# annotate("text", x = 0.65, y = 99, label = "a", face="bold", size=8)
# p2 <- ggplot(DFben, aes(x=numMine3portF, y=finBen3portPer)) +
# geom_point(size=3.5)+
# geom_errorbar(data=meanlines, aes(y=Meanfin, ymin=Meanfin, ymax=Meanfin), width=0.8, size=1)+
# theme_bw(17) + #the number here changes the size of all the text in the graph
# theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
# #coord_cartesian(ylim=c(20,90))+
# labs(x="Number of companies collaborating", y="Averted capital cost (% of total)")+
# theme(axis.title.x = element_text(vjust=-0.5),axis.title.y = element_text(vjust=1))+ #move xylabels away from graph
# annotate("text", x = 0.65, y = 99, label = "b", face="bold", size=8)
# outPath <- paste0(plotOutDir, "USG_collab_NumCollab_fig.png")
# png(filename=outPath, width=960, height=480)
# grid.arrange(p1, p2, ncol=2)
# dev.off()
#######################
#Plot comparing shared with unshared
#######################
#set up data
DFShareUn <- data.frame(scen1data$SumSpLoss183SpDiffuse/100, scen3data$SumSpLoss183SpDiffuse/100, dataAll[dataAll$Scenario==1, "TotalCostRail"], dataAll[dataAll$Scenario==3, "TotalCostRail"])
names(DFShareUn) <- c("ShareSpLoss","UnshareSpLoss", "Sharecost", "Unsharecost")
pB <- ggplot(DFShareUn)+
geom_segment(aes(x=DFShareUn$"Sharecost", y=DFShareUn$"ShareSpLoss", xend=DFShareUn$"Unsharecost", yend=DFShareUn$"UnshareSpLoss"), size=0.7, colour="grey30")+
geom_point(aes(x=DFShareUn$"Sharecost", y=DFShareUn$"ShareSpLoss", colour="ShareSpLoss"), size=3)+
geom_point(aes(x=DFShareUn$"Unsharecost", y=DFShareUn$"UnshareSpLoss", colour="UnshareSpLoss"), size=3)+
# geom_point(aes(x=DFShareUn$"Sharecost", y=DFShareUn$"ShareSpLoss"), colour='black', size=3)+
# geom_point(aes(x=DFShareUn$"Unsharecost", y=DFShareUn$"UnshareSpLoss"), colour="grey70", size=3)+
labs(x="Capital cost (million $)", y="Biodiversity impact")+
theme_classic(17) + #get rid of grey bkg and gridlines
#theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
theme(axis.title.x = element_text(vjust=-0.6),axis.title.y = element_text(vjust=1), axis.line.x = element_line(color="black"), axis.line.y = element_line(color="black"))+ #move xylabels away from graph
#edit legend
scale_fill_discrete(breaks=c("UnshareSpLoss", "ShareSpLoss"))+
#scale_color_discrete(name="Scenario", labels=c("Independent", "Shared"),breaks=c("UnshareSpLoss", "ShareSpLoss"))+
scale_color_manual(name="Scenario", labels=c("Independent", "Shared"),breaks=c("UnshareSpLoss", "ShareSpLoss"), values=c('black', 'grey70'))+
theme(legend.position=c(.2, .85), legend.title = element_text(size=14, face="bold"))#legend position
#theme(legend.background = element_rect(colour="grey70", size=.5, linetype="solid"))#add box
#theme(legend.key = element_blank())#remove boxes from around legend items
outPath <- paste0(plotOutDir, "USG_collab_Shared_vs_Unshared_fig.png")
ggsave(filename=outPath)
outPath <- paste0(plotOutDir, "USG_collab_Shared_vs_Unshared_fig.pdf")
ggsave(filename=outPath)
#######################
#Plot comparing shared with unshared - agricultural data
#######################
#set up data
DFShareUnAg <- data.frame(dataAll[dataAll$Scenario==1, "AgriLoss"]/1000000, dataAll[dataAll$Scenario==3, "AgriLoss"]/1000000, dataAll[dataAll$Scenario==1, "TotalCostRail"], dataAll[dataAll$Scenario==3, "TotalCostRail"])
names(DFShareUnAg) <- c("ShareAgLoss","UnshareAgLoss", "Sharecost", "Unsharecost")
pA <- ggplot(DFShareUnAg)+
geom_segment(aes(x=DFShareUnAg$"Sharecost", y=DFShareUnAg$"ShareAgLoss", xend=DFShareUnAg$"Unsharecost", yend=DFShareUnAg$"UnshareAgLoss"), size=0.7, colour="grey30")+
geom_point(aes(x=DFShareUnAg$"Sharecost", y=DFShareUnAg$"ShareAgLoss", colour="ShareAgLoss"), size=3)+
geom_point(aes(x=DFShareUnAg$"Unsharecost", y=DFShareUnAg$"UnshareAgLoss", colour="UnshareAgLoss"), size=3)+
labs(x="Capital cost (million $)", y="Agricultural impact (million $)")+
theme_classic(17) + #get rid of grey bkg and gridlines
#theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
theme(axis.title.x = element_text(vjust=-0.6),axis.title.y = element_text(vjust=1), axis.line.x = element_line(color="black"), axis.line.y = element_line(color="black"))+ #move xylabels away from graph
#edit legend
scale_fill_discrete(breaks=c("UnshareAgLoss", "ShareAgLoss"))+
scale_color_discrete(name="Scenario", labels=c("Independent", "Shared"),breaks=c("UnshareAgLoss", "ShareAgLoss"))+
#theme(legend.position=c(.2, .85))#legend position
#theme(legend.background = element_rect(colour="grey70", size=.5, linetype="solid"))#add box
#theme(legend.key = element_blank())#remove boxes from around legend items
theme(legend.position="none")
outPath <- paste0(plotOutDir, "USG_collab_Shared_vs_Unshared_AgriLoss_fig.png")
ggsave(filename=outPath)
outPath <- paste0(plotOutDir, "USG_collab_Shared_vs_Unshared_AgriLoss_fig.pdf")
ggsave(filename=outPath)
#######################
#Plot violin plot of biodiversity impact of each scenario
#######################
bioL <- list("3_low_diffuse_biodiversity_total_byspecies_areas.csv", "1_high_diffuse_biodiversity_total_byspecies_areas.csv")
bioLDf <- lapply(bioL, function(x) {
currBio <- read.csv(paste0(sppDir[1], x))
currBio <- rowSums(matrix(colSums(currBio[,6:ncol(currBio)]), ncol=4, byrow=TRUE))#sum the impacts across the 4 impact classes (0m, 125m, 500m, 750m) for each species
})
bioDF <- data.frame(Scenario=rep(c("Independent", "Shared"), each=183), BioAreaLost=unlist(bioLDf)*100)#convert km to 1000*ha
pV <- ggplot(data=bioDF, aes(factor(Scenario), BioAreaLost))+
#geom_violin(colour="grey70", fill="grey70")+
geom_boxplot(colour="black", fill="grey70")+
geom_jitter(height=0, width=0.5, colour="black",)+
theme_classic(17)+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
#coord_trans(y="log10")+
scale_y_log10(labels=scales::comma) +
labs(x="", y="Area habitat loss (ha)")+
#labs(x="", y=expression(paste("Area habitat loss (", 10^3, " ha)")))+
theme(axis.title.x = element_text(vjust=-0.6),axis.title.y = element_text(vjust=1)) #move xylabels away from graph
outPath <- paste0(plotOutDir, "USG_collab_Shared_vs_Unshared_HabitatAreaLoss.png")
ggsave(filename=outPath)
#loss as % of total habitat
bioL2 <- list("3_low_diffuse_biodiversity_totals.csv", "1_high_diffuse_biodiversity_totals.csv")
bioLDf2 <- lapply(bioL2, function(x) {
currBio <- read.csv(paste0(sppDir[1], x))
currBio <- rowSums(matrix(colSums(currBio[,6:ncol(currBio)]), ncol=4, byrow=TRUE))#sum the impacts across the 4 impact classes (0m, 125m, 500m, 750m) for each species
})
bioDF2 <- data.frame(Scenario=rep(c("Independent", "Shared"), each=183), BioAreaLost=unlist(bioLDf2))
summary(bioDF2[bioDF2$Scenario=="Independent", "BioAreaLost"])
summary(bioDF2[bioDF2$Scenario=="Shared", "BioAreaLost"])
bioDF3 <- subset(bioDF2, BioAreaLost!=0)
summary(bioDF3[bioDF3$Scenario=="Independent", "BioAreaLost"])
summary(bioDF3[bioDF3$Scenario=="Shared", "BioAreaLost"])
|
986cc6925d21eb853d589678d7c2928bf3b66df7
|
1f547ef48e642eff3847aa3fc6c137a90e5540e1
|
/KNN/Zoo_sol.R
|
9f4ed2168cafec80aaecba78789c80e24d74dd08
|
[] |
no_license
|
prateek-gitcode/Data-Science-practice-problems
|
2f5a8af5f15f72b2d1625e883a779f9ea4c902d4
|
7d7709eb383e7e93a27ca311dc051d736dd531da
|
refs/heads/master
| 2023-04-19T04:49:22.852158
| 2021-05-10T10:08:14
| 2021-05-10T10:08:14
| 365,997,624
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 807
|
r
|
Zoo_sol.R
|
#Importing the Dataset
library(readr)
zoo_data <- read.csv(file.choose())
zoo_data <- zoo_data[-1]
#EDA
summary(zoo_data)
#Splitting the data into training set and test set
library(caTools)
split <- sample.split(zoo_data$type,SplitRatio = 0.75)
training_set <- subset(zoo_data,split==TRUE)
test_set <- subset(zoo_data,split==FALSE)
#Implementing KNN Algorithm
library(class)
train_acc <- NULL
test_acc <- NULL
for (i in seq(3,200,2))
{
classifier_train <- knn(train=training_set[-17],test=test_set[-17],cl=training_set[,17],k=i)
train_acc <-c(train_acc,mean(classifier_train==training_set[,17]))
}
#This shows accuracy when k=3
y_pred <- knn(train = training_set[-17],test=test_set[-17],cl=training_set[,17],k=3)
library(gmodels)
CrossTable(test_set[,17],y_pred)
mean(y_pred==test_set[,17])*100
|
af078ada0e38fe5a76e54a80ce32b068091fd53e
|
a43a5f942ebf81cbc2bda2b8b5413efdedb03ed8
|
/tests/testthat/test-espn_ratings_fpi.R
|
f48f6b53d5e17c7ab45fcda4fa306dfb718f5806
|
[
"MIT"
] |
permissive
|
Engy-22/cfbfastR
|
7d6775943c8124c532c36728dec5cc7aee9ad4f5
|
92ebfdd0fb4a70bcb9f3cc2d11f3a61d863d9743
|
refs/heads/master
| 2023-08-30T11:08:09.332930
| 2021-10-26T17:34:33
| 2021-10-26T17:34:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 537
|
r
|
test-espn_ratings_fpi.R
|
# context("ESPN FPI Ratings")
#
# cols <- c(
# "year", "team_id", "name", "abbr",
# "fpi", "fpi_rk", "trend", "proj_w", "proj_l", "win_out",
# "win_6", "win_div", "playoff", "nc_game", "nc_win",
# "win_conf", "w", "l", "t"
# )
#
# test_that("ESPN FPI Ratings", {
# skip_on_cran()
# x <- espn_ratings_fpi(2019)
#
# y <- espn_ratings_fpi(2018)
#
# expect_equal(colnames(x), cols)
# expect_equal(colnames(y), cols)
# expect_s3_class(x, "data.frame")
# expect_s3_class(y, "data.frame")
# })
|
8b7da2e72d838a9221d0e20bd5ca7027a9329991
|
a53ea6b185a697cd57e54b5e8285ae2aa3fd5cb1
|
/R_scripts_to_makePLOTS/CoveragePLOTs.R
|
cb90d45ab7f80ff28e7d5c1454965d555e259258
|
[] |
no_license
|
kovimallik/Cas9Enrichment
|
6f21003654542cd99f1cacde1f808aa403689c71
|
cb2be67b165d0d98c1f8f84979c21ccb489dfe47
|
refs/heads/master
| 2022-04-17T06:28:01.137279
| 2020-02-10T19:03:27
| 2020-02-10T19:03:27
| 414,004,831
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,452
|
r
|
CoveragePLOTs.R
|
#!/usr/env Rscript
# this script takes in an input bam alignment file
# and a bed file containing a region of interest
# output is a saved pdf file with coverage at that locus
#install.packages("tidyverse")
#install.packages("cowplot")
library(GenomicRanges)
library(GenomicAlignments)
library(tidyverse)
library(cowplot)
getwd()
list.files()
##These are the regions:
in_data="input_bam_file.bam"
target.bed="chr6_bed.bed"
#how file name is saved:
nombre='how_plot_file_wil_be_saved'
##########################
reads=readGAlignments(in_data, use.names=T)
target=read_tsv(target.bed,col_names=c("chr","start","stop"))
target.gr=GRanges(target)
reads.gr=GRanges(reads)
#subsetting reads to data overlapping the target
#here we get the granges of the 'on-targ' for each data set
on.targ=overlapsAny(reads.gr, target.gr)
#here we only take those reads that meet that on-targ criteria
ontarg.reads=reads[on.targ]
#get coverage data
cov.targ=as.vector(coverage(ontarg.reads)[[target$chr]])
cov.targ<-cov.targ[target$start:target$stop]
cov.plt= tibble(cov=cov.targ, pos=seq(start(target.gr),end(target.gr)))
g.cov=ggplot( )+ theme_bw()+ geom_line(data=cov.plt, mapping=aes(x=pos,y=cov), color='indianred1', size=1.5 ) +
xlim(target$start,target$stop)
save_plot( paste0( nombre, ".targeSIDE_PDF.pdf" ), g.cov, base_aspect_ratio = 1 )
|
cd10dcd1f35622131f475b039f31017209409ee4
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/A_github/sources/authors/2353/TBSSurvival/dist.error.r
|
17e9d5f1957e357edd07cfddf29842a1137e790d
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,681
|
r
|
dist.error.r
|
# TBSSurvival package for R (http://www.R-project.org)
# Copyright (C) 2012-2013 Adriano Polpo, Cassio de Campos, Debajyoti Sinha
# Jianchang Lin and Stuart Lipsitz.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#######################################################################
## builder of the four functions for well-known distributions
## d(x,xi), p(x,xi), q(x,xi), r(x,xi) for the chosen distribution and
## the last element in the list is the distribution name.
dist.error <- function(dist="norm") {
if ((dist != "norm") && (dist != "doubexp") && (dist != "cauchy") &&
(dist != "t") && (dist != "logistic") && (dist != "logis2") && (dist != "all"))
stop("TBS: Distribution not available at dist.error")
switch(dist,
## Normal distribution
norm = list(
d = function(x,xi) dnorm(x,mean=0,sd=sqrt(xi)), # density
p = function(x,xi) pnorm(x,mean=0,sd=sqrt(xi)), # distr
q = function(x,xi) qnorm(x,mean=0,sd=sqrt(xi)), # quantile
r = function(x,xi) rnorm(x,mean=0,sd=sqrt(xi)), # generation
name = "norm"
),
## t-student distribution
t = list(
d = function(x,xi) .dt2(x,df=xi), # density
p = function(x,xi) .pt2(x,df=xi), # distr
q = function(x,xi) .qt2(x,df=xi), # quantile
r = function(x,xi) .rt2(x,df=xi), # generation
name = "t"
),
## Cauchy distribution
cauchy = list(
d = function(x,xi) dcauchy(x,location=0,scale=xi), # density
p = function(x,xi) pcauchy(x,location=0,scale=xi), # distr
q = function(x,xi) qcauchy(x,location=0,scale=xi), # quantile
r = function(x,xi) rcauchy(x,location=0,scale=xi), # generation
name = "cauchy"
),
## Laplace/Double exponential distribution
doubexp = list(
d = function(x,xi) dnormp(x,sigmap=xi,mu=0,p=1), # density
p = function(x,xi) pnormp(x,sigmap=xi,mu=0,p=1), # distr
q = function(x,xi) qnormp(x,sigmap=xi,mu=0,p=1), # quantile
r = function(x,xi) rnormp(x,sigmap=xi,mu=0,p=1), # generation
name = "doubexp"
),
## Logistic distribution
logis2 = list(
d = function(x,xi) .dlogis2(x,s=xi), # density
p = function(x,xi) .plogis2(x,s=xi), # distr
q = function(x,xi) .qlogis2(x,s=xi), # quantile
r = function(x,xi) .rlogis2(x,s=xi), # generation
name = "logis2"
),
logistic = list(
d = function(x,xi) dlogis(x,location=0,scale=xi), # density
p = function(x,xi) plogis(x,location=0,scale=xi), # distr
q = function(x,xi) qlogis(x,location=0,scale=xi), # quantile
r = function(x,xi) rlogis(x,location=0,scale=xi), # generation
name = "logistic"
),
all = list(dist.error("norm"),dist.error("t"),dist.error("cauchy"),dist.error("doubexp"),dist.error("logistic"))
)
}
|
fab04f40aa698b96a18f9711232d6447e3c64830
|
87760ba06690cf90166a879a88a09cd2e64f3417
|
/tests/testthat/test-algo-prophet_reg.R
|
1354ae256ad71c28e002e0698637ded082798b4f
|
[
"MIT"
] |
permissive
|
topepo/modeltime
|
1189e5fe6c86ee3a70aec0f100387a495f8add5f
|
bff0b3784d1d8596aa80943b221eb621481534e1
|
refs/heads/master
| 2022-12-27T07:11:58.979836
| 2020-10-08T16:07:27
| 2020-10-08T16:07:27
| 289,933,114
| 1
| 0
|
NOASSERTION
| 2020-08-24T13:17:10
| 2020-08-24T13:17:10
| null |
UTF-8
|
R
| false
| false
| 9,777
|
r
|
test-algo-prophet_reg.R
|
# ---- STANDARD ARIMA ----
context("TEST prophet_reg: prophet")
# SETUP ----
# Data
m750 <- m4_monthly %>% filter(id == "M750")
# Split Data 80/20
splits <- initial_time_split(m750, prop = 0.8)
# Model Spec
model_spec <- prophet_reg(
growth = 'linear',
changepoint_num = 10,
changepoint_range = 0.75,
seasonality_yearly = TRUE,
seasonality_weekly = FALSE,
seasonality_daily = FALSE,
season = 'multiplicative',
prior_scale_changepoints = 20,
prior_scale_seasonality = 20,
prior_scale_holidays = 20
) %>%
set_engine("prophet")
# PARSNIP ----
# * NO XREGS ----
test_that("prophet_reg: prophet, (NO XREGS), Test Model Fit Object", {
# ** MODEL FIT ----
# Model Fit
model_fit <- model_spec %>%
fit(log(value) ~ date, data = training(splits))
# Structure
testthat::expect_s3_class(model_fit$fit, "prophet_fit_impl")
testthat::expect_s3_class(model_fit$fit$data, "tbl_df")
testthat::expect_equal(names(model_fit$fit$data)[1], "date")
testthat::expect_true(is.null(model_fit$fit$extras$xreg_recipe))
# $fit PROPHET
testthat::expect_s3_class(model_fit$fit$models$model_1, "prophet")
testthat::expect_identical(model_fit$fit$models$model_1$growth, "linear")
testthat::expect_identical(model_fit$fit$models$model_1$n.changepoints, 10)
testthat::expect_identical(model_fit$fit$models$model_1$changepoint.range, 0.75)
testthat::expect_identical(model_fit$fit$models$model_1$yearly.seasonality, TRUE)
testthat::expect_identical(model_fit$fit$models$model_1$weekly.seasonality, FALSE)
testthat::expect_identical(model_fit$fit$models$model_1$daily.seasonality, FALSE)
testthat::expect_identical(model_fit$fit$models$model_1$seasonality.mode, 'multiplicative')
testthat::expect_identical(model_fit$fit$models$model_1$seasonality.prior.scale, 20)
testthat::expect_identical(model_fit$fit$models$model_1$changepoint.prior.scale, 20)
testthat::expect_identical(model_fit$fit$models$model_1$holidays.prior.scale, 20)
testthat::expect_identical(model_fit$fit$models$model_1$uncertainty.samples, 0)
# $preproc
testthat::expect_equal(model_fit$preproc$y_var, "value")
# ** PREDICTIONS ----
# Predictions
predictions_tbl <- model_fit %>%
modeltime_calibrate(testing(splits)) %>%
modeltime_forecast(new_data = testing(splits))
# Structure
testthat::expect_identical(nrow(testing(splits)), nrow(predictions_tbl))
testthat::expect_identical(testing(splits)$date, predictions_tbl$.index)
# Out-of-Sample Accuracy Tests
resid <- testing(splits)$value - exp(predictions_tbl$.value)
# - Max Error less than 1500
testthat::expect_lte(max(abs(resid)), 1500)
# - MAE less than 700
testthat::expect_lte(mean(abs(resid)), 700)
})
# * XREGS ----
test_that("prophet_reg: prophet, (XREGS), Test Model Fit Object", {
# ** MODEL FIT ----
# Model Fit
model_fit <- model_spec %>%
fit(log(value) ~ date + as.numeric(date) + factor(month(date, label = TRUE), ordered = F),
data = training(splits))
# Structure
testthat::expect_s3_class(model_fit$fit, "prophet_fit_impl")
testthat::expect_s3_class(model_fit$fit$data, "tbl_df")
testthat::expect_equal(names(model_fit$fit$data)[1], "date")
testthat::expect_true(!is.null(model_fit$fit$extras$xreg_recipe))
# $fit PROPHET
testthat::expect_s3_class(model_fit$fit$models$model_1, "prophet")
testthat::expect_identical(model_fit$fit$models$model_1$growth, "linear")
testthat::expect_identical(model_fit$fit$models$model_1$n.changepoints, 10)
testthat::expect_identical(model_fit$fit$models$model_1$changepoint.range, 0.75)
testthat::expect_identical(model_fit$fit$models$model_1$yearly.seasonality, TRUE)
testthat::expect_identical(model_fit$fit$models$model_1$weekly.seasonality, FALSE)
testthat::expect_identical(model_fit$fit$models$model_1$daily.seasonality, FALSE)
testthat::expect_identical(model_fit$fit$models$model_1$seasonality.mode, 'multiplicative')
testthat::expect_identical(model_fit$fit$models$model_1$seasonality.prior.scale, 20)
testthat::expect_identical(model_fit$fit$models$model_1$changepoint.prior.scale, 20)
testthat::expect_identical(model_fit$fit$models$model_1$holidays.prior.scale, 20)
testthat::expect_identical(model_fit$fit$models$model_1$uncertainty.samples, 0)
# $preproc
testthat::expect_equal(model_fit$preproc$y_var, "value")
# ** PREDICTIONS ----
# Predictions
predictions_tbl <- model_fit %>%
modeltime_calibrate(testing(splits)) %>%
modeltime_forecast(new_data = testing(splits))
# Structure
testthat::expect_identical(nrow(testing(splits)), nrow(predictions_tbl))
testthat::expect_identical(testing(splits)$date, predictions_tbl$.index)
# Out-of-Sample Accuracy Tests
resid <- testing(splits)$value - exp(predictions_tbl$.value)
# - Max Error less than 1500
testthat::expect_lte(max(abs(resid)), 1500)
# - MAE less than 700
testthat::expect_lte(mean(abs(resid)), 700)
})
# ---- WORKFLOWS ----
# Recipe spec
recipe_spec <- recipe(value ~ date, data = training(splits)) %>%
step_log(value, skip = FALSE) %>%
step_date(date, features = "month") %>%
step_mutate(date_num = as.numeric(date))
# Workflow
wflw <- workflow() %>%
add_recipe(recipe_spec) %>%
add_model(model_spec)
# TESTS
test_that("prophet_reg: prophet (workflow), Test Model Fit Object", {
# Fitted Workflow
wflw_fit <- wflw %>%
fit(training(splits))
# Structure
testthat::expect_s3_class(wflw_fit$fit$fit$fit, "prophet_fit_impl")
testthat::expect_s3_class(wflw_fit$fit$fit$fit$data, "tbl_df")
testthat::expect_equal(names(wflw_fit$fit$fit$fit$data)[1], "date")
testthat::expect_true(!is.null(wflw_fit$fit$fit$fit$extras$xreg_recipe))
# $fit prophet
testthat::expect_s3_class(wflw_fit$fit$fit$fit$models$model_1, "prophet")
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_1$growth, "linear")
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_1$n.changepoints, 10)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_1$changepoint.range, 0.75)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_1$yearly.seasonality, TRUE)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_1$weekly.seasonality, FALSE)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_1$daily.seasonality, FALSE)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_1$seasonality.mode, 'multiplicative')
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_1$seasonality.prior.scale, 20)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_1$changepoint.prior.scale, 20)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_1$holidays.prior.scale, 20)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_1$uncertainty.samples, 0)
# $preproc
mld <- wflw_fit %>% workflows::pull_workflow_mold()
testthat::expect_equal(names(mld$outcomes), "value")
# ** PREDICTIONS ----
# Forecast
predictions_tbl <- wflw_fit %>%
modeltime_calibrate(testing(splits)) %>%
modeltime_forecast(new_data = testing(splits), actual_data = training(splits)) %>%
mutate_at(vars(.value), exp)
full_data <- bind_rows(training(splits), testing(splits))
# Structure
testthat::expect_identical(nrow(full_data), nrow(predictions_tbl))
testthat::expect_identical(full_data$date, predictions_tbl$.index)
# Out-of-Sample Accuracy Tests
predictions_tbl <- predictions_tbl %>% filter(.key == "prediction")
resid <- testing(splits)$value - predictions_tbl$.value
# - Max Error less than 1500
testthat::expect_lte(max(abs(resid)), 1500)
# - MAE less than 700
testthat::expect_lte(mean(abs(resid)), 700)
})
# LOGISTIC GROWTH ----
# * MODELS ----
test_that("prophet_reg: prophet, Logistic Growth", {
# ** MODEL FIT ----
# Model Fit
model_fit <- prophet_reg(
growth = "logistic",
logistic_cap = 11000
) %>%
set_engine(engine = "prophet") %>%
fit(value ~ date, m750)
# Structure
testthat::expect_s3_class(model_fit$fit, "prophet_fit_impl")
testthat::expect_s3_class(model_fit$fit$data, "tbl_df")
testthat::expect_equal(names(model_fit$fit$data)[1], "date")
testthat::expect_false(is.null(model_fit$fit$extras$logistic_params$logistic_cap))
# $fit PROPHET
testthat::expect_s3_class(model_fit$fit$models$model_1, "prophet")
testthat::expect_identical(model_fit$fit$models$model_1$growth, "logistic")
testthat::expect_identical(model_fit$fit$extras$logistic_params$growth, "logistic")
testthat::expect_identical(model_fit$fit$extras$logistic_params$logistic_cap, 11000)
testthat::expect_true(is.null(model_fit$fit$extras$logistic_params$logistic_floor))
# $preproc
testthat::expect_equal(model_fit$preproc$y_var, "value")
# ** PREDICTIONS ----
forecast_prophet_logisitic <- modeltime_table(
model_fit
) %>%
modeltime_forecast(
h = 12 * 10,
actual_data = m750
) %>%
filter(.model_desc != "ACTUAL")
expect_lt(
forecast_prophet_logisitic$.value %>% max(),
11500
)
# ERROR IF CAP/FLOOR NOT SPECIFIED
expect_error({
prophet_reg(
growth = "logistic"
) %>%
set_engine(engine = "prophet") %>%
fit(value ~ date, m750)
})
})
|
04118df50e5aa3a626c17d41ceab8953554c8372
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610383197-test.R
|
c873a4d4b93833354a8117aa7cc8afd0c1d8026f
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 482
|
r
|
1610383197-test.R
|
testlist <- list(rates = numeric(0), thresholds = NaN, x = c(NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, 2.78105030004262e-309, 1.28683596660321e-167, 0, 0, 0, 0, 0, 0, 0, 3.92660099062145e-310, NaN, 1.25986739689518e-321, 0, 0, 0, 0, 0, 0, 0, NaN, 1.06150300700353e-313, NaN, NaN, NaN, -3.23465380139002e+244, -8.73989987746104e+245, 9.34531023086085e-307, 1.42991673003811e-308, -Inf))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
8d24764392d5b18ede3fac63d8bcf7bbed927650
|
25f62747b4306d1436ebb394ac443729aae24181
|
/man/updateData.Rd
|
43ce32a5a7ad8e3fa224ccdb4b840ba0e37d4309
|
[] |
no_license
|
takewiki/DTedit
|
79b987a135c82533d5207d234b148e5be08195e3
|
4551a951347cd131799974053d8f1c6c42d49cf5
|
refs/heads/master
| 2021-07-07T20:48:31.151739
| 2020-10-02T06:57:09
| 2020-10-02T06:57:09
| 193,644,846
| 1
| 0
| null | 2019-06-25T06:06:22
| 2019-06-25T06:06:22
| null |
UTF-8
|
R
| false
| true
| 360
|
rd
|
updateData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{updateData}
\alias{updateData}
\title{更新数据}
\usage{
updateData(proxy, data, ...)
}
\arguments{
\item{proxy}{代理对象,支持更新}
\item{data}{数据}
\item{...}{其他选英}
}
\value{
返回值
}
\description{
更新数据
}
\examples{
updateData()
}
|
a31bd92142b942caa6a57ca688d22ccb1825b9b6
|
0f24c8bfc4257f25e6397d627d95103b8fba028c
|
/lib/gsea_helpers.R
|
71a18aa5266c00ed80b33972c43b314af111c1b9
|
[] |
no_license
|
BrownellLab/UISO_code
|
10c9c068d99dd1e982810ba19fb21d42d9b30d73
|
f81c34ea81c585e628cca1e23ad08cf9e0ea8454
|
refs/heads/master
| 2021-01-16T00:03:57.743454
| 2014-12-10T20:15:02
| 2014-12-10T20:15:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,061
|
r
|
gsea_helpers.R
|
# From https://gist.github.com/kdaily/7806586
# Convert an ExpressionSet into a GCT file for GSEA or IGV.
eset2gct <- function(eset, filename) {
numsamples <- ncol(exprs(eset))
numprobes <- nrow(exprs(eset))
row1.dummy <- rep("", numsamples + 1)
row2.dummy <- rep("", numsamples)
cat(c(paste(c("#1.2", row1.dummy), collapse="\t"), paste(c(numprobes, numsamples, row2.dummy), collapse="\t")), file=filename, sep="\n")
tmp <- data.frame(NAME=rownames(exprs(eset)), Description="NA", exprs(eset))
write.table(tmp, file=filename, sep="\t", append=TRUE, quote=FALSE, row.names=FALSE)
}
# Write out class labels
# Assumes that df has rows that represent each sample
# The classcol selected is converted to a factor
write_class <- function(df, classcol, filename) {
classes <- factor(df[, classcol])
cat(paste(c(nrow(df), length(levels(classes)), 1), collapse=" "),
paste(c("#", levels(classes)), collapse=" "),
paste(classes, collapse=" "), file=filename, sep="\n")
}
# Convert the ExpressionSet featureData into CHIP format for GSEA.
# Specify which column from the featureData to use as the ID, symbol, and description (title)
# Headers need to be as specified (Probe Set ID, Gene Symbol, Gene Title)
eset2chip <- function(eset, idcol="ID", symbolcol="gene.symbol", titlecol="Gene.Title", filename) {
tmp <- fData(eset)[, c(idcol, symbolcol, titlecol)]
colnames(tmp) <- c("Probe Set ID", "Gene Symbol", "Gene Title")
write.table(tmp, file=filename, sep="\t", quote=FALSE, row.names=FALSE)
}
# Convert a limma differential expression fit object to a ranked list.
# Currently defaults to use the t-statistic.
fit2rnk <- function(fit, stat='t', coef, filename) {
tt <- topTable(fit, coef=coef, n=1e10, sort.by="none")
write.table(tt[, c("ID", stat)], file=filename, sep="\t", quote=FALSE, row.names=FALSE)
}
gsea_result_filename <- function(gsea_template, gsea_run, gsea_direction) {
paste(gsea_template, ".", gsea_run, "/",
"gsea_report_for_na_", gsea_direction, "_", gsea_run, ".xls",
sep="")
}
|
b38d9931008d39a4258cfed6df1adb2fb14e1526
|
fddeb9bdb530fbaa7ad37b23f667c31bf2ba5f1d
|
/man/protcomp.Rd
|
404ed06e96d11e80ee8f1525bcabf89982a35055
|
[] |
no_license
|
cran/canprot
|
405873c9be0becd7ac8edbbc29207eae31a3bd13
|
bf69c4c70a56f5d36bc17d7034b9a482b2841eba
|
refs/heads/master
| 2022-02-05T21:19:46.105674
| 2022-01-17T07:22:51
| 2022-01-17T07:22:51
| 94,257,433
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,234
|
rd
|
protcomp.Rd
|
\encoding{UTF-8}
\name{protcomp}
\alias{protcomp}
\title{Amino Acid Compositions}
\description{
Get amino acid compositions of proteins.
}
\usage{
protcomp(uniprot = NULL, aa = NULL, aa_file = NULL)
}
\arguments{
\item{uniprot}{character, UniProt IDs of proteins}
\item{aa}{data frame, amino acid compositions}
\item{aa_file}{character, file name}
}
\details{
This function retrieves the amino acid compositions of one or more proteins specified by \code{uniprot}.
This function depends on the amino acid compositions of human proteins, which are stored in the \code{\link{human}} environment when the package is attached.
If \code{aa_file} is specified, additional amino acid compositions to be considered are read from this file, which should be in the same format as e.g. \code{\link{human_extra}.csv} (see also \code{\link[CHNOSZ]{thermo}$protein}).
Alternatively, the amino acid compositions can be given in \code{aa}, bypassing the search step.
}
\value{
The function returns a list with elements \code{uniprot} (UniProt IDs as given in the arguments) and \code{aa} (amino acid compositions of the proteins).
}
\seealso{
\code{\link{cleanup}}
}
\examples{
protcomp("P24298")
}
\concept{Amino acid composition}
|
44a22a5a978daac46aab9992d4694d942f452edd
|
bac3ad65c587f5d96816789e68eefcc2c53a761d
|
/man/measure_access.Rd
|
55072739511cfb2e0e75bfbb1413e9acf45cd14b
|
[] |
no_license
|
franc703/minnccaccess
|
a4eef2cb96f60abc39cfa9751c317b47b83238f5
|
19d4edf95c6c2f5e042540ae90f320a0998e09c4
|
refs/heads/main
| 2023-05-31T05:27:35.413098
| 2021-07-10T10:13:12
| 2021-07-10T10:13:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 903
|
rd
|
measure_access.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/measure_access.R
\name{measure_access}
\alias{measure_access}
\title{Measure child care access}
\usage{
measure_access(
method = "hexagon",
geography = "census-block-group",
geo_year = 2010,
acs_year = 2019,
hex_side = 1,
radius = 10,
decay = 4,
as_of = "2021-04-01"
)
}
\arguments{
\item{method}{"hexagon" or "census"}
\item{geography}{Geography}
\item{geo_year}{Geography year. 2010 or 2020}
\item{acs_year}{5-year ACS end year}
\item{hex_side}{Length of side of hexagons}
\item{radius}{Consider providers/families that are in \code{radius} miles or closer.}
\item{decay}{Decay parameter (used to weight access by distance) in calculating the access measure}
\item{as_of}{Access as of date}
}
\value{
An sf object
}
\description{
Measure child care access data using hexagons or census geographies
}
|
d44f8be73f6241819a88e9447f44b122da7ee15b
|
c18980f3afc6b564344de9a52b9cf0d3972fa618
|
/R/ggplot2_small_map_data.R
|
4b07fd20e75ca24be838d240fe54e2f444d7d6a0
|
[] |
no_license
|
jhnwllr/gbifapi
|
949b42e28749e034b67dc393694f88c12e9a05d6
|
010347732fcde62cf645078bdfbbecfaeb13fa5b
|
refs/heads/master
| 2022-04-26T23:23:19.966690
| 2022-04-26T12:01:32
| 2022-04-26T12:01:32
| 153,618,081
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 541
|
r
|
ggplot2_small_map_data.R
|
# reduces the size of ggplot2::map_data for plotting with svg
ggplot2_small_map_data = function(dTolerance=1.1) {
countries = ggplot2::map_data("world") %>%
sf::st_as_sf(coords = c("long", "lat")) %>%
group_by(group) %>%
summarize(do_union=FALSE) %>%
sf::st_cast("POLYGON") %>%
ungroup() %>%
mutate(geometry = sf::st_simplify(geometry, dTolerance=dTolerance)) %>%
mutate(is_empty = sf::st_is_empty(geometry)) %>%
filter(!is_empty) %>%
select(-is_empty) %>%
glimpse() %>%
sf::as_Spatial() %>%
ggplot2::fortify()
}
|
da191fe1709588ab80770201520fad08dbfc4379
|
c19dfad6d86f3dd6b8d5bacbdd0ab7c055962fa5
|
/tests/testthat/test-lmertest.R
|
aad1620cccf272b73be828519cd41e270d082110
|
[] |
no_license
|
bbolker/broom.mixed
|
e0ff763ade4df0ebecd6fd7332866cfe0e20e46f
|
3ba939b400fbec61e0aa1860a57a09c046df2d20
|
refs/heads/main
| 2023-08-17T16:42:40.148968
| 2023-08-04T15:21:49
| 2023-08-04T15:21:49
| 113,072,861
| 221
| 33
| null | 2023-05-19T14:16:36
| 2017-12-04T17:24:08
|
R
|
UTF-8
|
R
| false
| false
| 1,662
|
r
|
test-lmertest.R
|
stopifnot(require("testthat"), require("broom.mixed"))
## test lmerTest
if (require(lmerTest, quietly = TRUE)) {
test_that("testing lmerTest p-values", {
lmm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy)
td <- tidy(lmm1, "fixed")
expect_equal(td$df, c(17, 17), tolerance=1e-3)
check_tidy(td, 2, 7, c(
"effect", "term", "estimate",
"std.error", "df", "statistic", "p.value"
))
td_ran <- tidy(lmm1, "ran_pars")
check_tidy(td_ran, 4, 4, c("effect", "group", "term", "estimate"))
expect_false(all(is.na(td_ran$estimate)))
if (requireNamespace("pbkrtest")) {
td_kr <- tidy(lmm1, "fixed", ddf.method="Kenward-Roger")
expect_equal(td_kr$df, c(17,17), tol=1e-4)
}
td_nodf <- tidy(lmm1, "fixed", ddf.method="lme4")
check_tidy(td_nodf, 2, 5, c("effect", "term", "std.error", "statistic"))
})
test_that("Wald t intervals", {
set.seed(101)
## unbalance to make K-R slightly different from Satterthwaite
ss <- sleepstudy[sample(seq(nrow(sleepstudy)),size=round(0.9*nrow(sleepstudy))),]
m1 <- lmer(Reaction~Days+(1|Subject),REML=TRUE,ss)
tmpf <- function(ddfm="Satterthwaite") {
tt <- tidy(m1,conf.int=TRUE,conf.method="Wald",ddf.method=ddfm,effect="fixed")
unname(unlist(tt[,c("conf.low","conf.high")][1,]))
}
expect_equal(tmpf("Satterthwaite"), c(231.320558648089, 271.015491535434), tolerance=1e-6)
expect_equal(tmpf("lme4"), c(232.327411469392, 270.008638714131), tolerance=1e-6)
expect_equal(tmpf("Kenward-Roger"), c(231.331769141079, 271.004281042444), tolerance=1e-6)
})
} ## if require(lmerTest)
|
9af4876f623653b4ace170434277edca824e1a4b
|
8229099d8000ee0e905971f4c5464d78bc8e2634
|
/man/all_na.Rd
|
2aed97c87ed5607443566b828118fa17d041819f
|
[] |
no_license
|
alecplotkin/alpacage
|
1ea46f7e83e42cbc40dc0d286ef38c10a537d64b
|
84a1b16f70bea14e771c284e4c88a705d202ac6a
|
refs/heads/main
| 2023-01-23T15:52:51.453014
| 2020-11-22T05:21:04
| 2020-11-22T05:21:04
| 314,960,928
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 519
|
rd
|
all_na.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_na.R
\name{all_na}
\alias{all_na}
\title{A function to return TRUE for columns of a data frame
which are all NA.}
\usage{
all_na(df)
}
\arguments{
\item{df}{A data frame.}
}
\value{
A logical vector of length ncol(df), that is TRUE only for columns
where all rows are NA.
}
\description{
This can be useful for removing missing columns, which may occur while cleaning
a data set
}
\author{
Alec Plotkin, \email{alp.plotkin@gmail.com}
}
|
315cea782518f801741997e89b60d64b15857b32
|
a83dafea71d80e3c8a2f06e93007b03de7355254
|
/functions.R
|
61d15887c9f70f1eb11f3a1beabebb8a26049537
|
[] |
no_license
|
gtsitsiridis/lung_aging_atlas
|
3717387f7eb7bb7ea16298cc5fa8e9b5d04b24d6
|
d6b7723fe723abe46ce358cf1f95adbba10f9162
|
refs/heads/master
| 2020-03-20T02:24:48.727852
| 2018-06-20T10:30:05
| 2018-06-20T10:30:05
| 137,112,175
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,969
|
r
|
functions.R
|
getMarkersTable <- function(cell_type = "Alveolar_macrophage") {
dt <-
markers_table[cluster == cell_type,-c(which(colnames(markers_table) == "cluster")), with =
F]
dt <- cbind(gene = dt$gene, dt[, 1:5])
dt
}
# Volcano plot
plot_volcano <- function(de_table=copy(gene_de_table),cell_type = "Alveolar_macrophage", gene_name="Frem1") {
# extract cell type info
pval_keyword <- "p_val"
fc_keyword <- "avg_logFC"
pval_column.name <- paste(cell_type, pval_keyword, sep = "__")
fc_column.name <- paste(cell_type, fc_keyword, sep = "__")
de.dt <-
de_table[, c("rn", pval_column.name, fc_column.name), with = F]
colnames(de.dt) <- c("Gene", "pvalue", "log2FoldChange")
# remove NAs
de.dt <- de.dt[complete.cases(de.dt)]
# add colour for top 10
de.dt <- de.dt[, colour := "none"]
de.dt[Gene == gene_name, colour := "selected"]
# de.dt <- de.dt[order(pvalue)]
# de.dt[(1:10), colour := "top"]
p <-
ggplot(data = de.dt,
aes(
x = log2FoldChange,
y = -log10(pvalue),
col = colour,
label = Gene
)) +
# , colour = threshold)) +
geom_point() +
labs(x = "Fold change (log2)", y = "-log10 p-value") +
scale_color_manual(values = c(selected = "red", none = "black")) +
guides(col = F) +
ggtitle(cell_type)
p +
geom_point(
data = de.dt[de.dt$colour == "selected",],
aes(x = log2FoldChange, y = -log10(pvalue)),
colour = "red",
size = 2
) +
geom_text_repel(data = de.dt[de.dt$colour == "selected",],
aes(label = Gene),
colour = "red",
size = 5)
}
# Protein bulk age DE boxplot
genBoxplot_protein <- function(protein = "Bpifa1") {
if (length(intersect(protein, rownames(protein_bulk))) == 0)
return(emptyPlot())
expression <- log(protein_bulk[protein, ])
dt <-
data.frame(expression, grouping = c(rep("24m", 4), rep("3m", 4)))
ggplot(dt, aes(
factor(grouping, levels = c("3m", "24m")),
expression,
col = grouping,
fill = grouping
)) +
geom_boxplot() + geom_jitter(colour = "black") +
scale_color_manual(values = c(`3m` = "blue", `24m` = "red")) +
scale_fill_manual(values = c(`3m` = "white", `24m` = "white")) + theme(axis.title.x = element_blank()) +
ylab("MS intensity") + ggtitle(protein) + guides(col=F, fill=F)
}
# Dot plot
dotPlot <- function (gene_name = "Scgb1a1") {
# Defaults
cols.use = c("lightgrey", "blue")
plot.legend = FALSE
do.return = FALSE
x.lab.rot = FALSE
scale.func <- switch(EXPR = "radius",
'size' = scale_size,
'radius' = scale_radius,
stop("'scale.by' must be either 'size' or 'radius'"))
MinMax <- function (data, min, max) {
data2 <- data
data2[data2 > max] <- max
data2[data2 < min] <- min
return(data2)
}
PercentAbove <- function (x, threshold) {
return(length(x = x[x > threshold]) / length(x = x))
}
# Load gene expression
expression <-
h5read("data/AgingData.h5", name = as.character(gene_name))
data.to.plot <- data.table(expression)
colnames(x = data.to.plot) <- "expression"
data.to.plot$id <- cell_info$celltype
# filtering step: is there a cluster that has at least 10 cells.
if(data.to.plot[,sum(expression>0),by=id][,sum(V1 >= 5) == 0])
return(emptyPlot())
data.to.plot[, expression := (expression - mean(expression)) / sd(expression)]
setnames(data.to.plot, "expression", gene_name)
data.to.plot <-
data.to.plot %>% gather(key = genes.plot, value = expression,-c(id))
data.to.plot <- data.to.plot %>% group_by(id, genes.plot) %>%
dplyr::summarize(avg.exp = mean(expm1(x = expression)),
pct.exp = PercentAbove(x = expression, threshold = 0))
data.to.plot <-
data.to.plot %>% ungroup() %>% group_by(genes.plot) %>%
mutate(avg.exp.scale = scale(x = avg.exp)) %>% mutate(avg.exp.scale = MinMax(
data = avg.exp.scale,
max = 2.5,
min = -2.5
))
data.to.plot$pct.exp[data.to.plot$pct.exp < 0] <- NA
data.to.plot <- as.data.frame(data.to.plot)
colnames(data.to.plot) <-
c("Cell_type", "Gene", "AvgExpr", "PctExpressed", "AvgRelExpr")
bad <-
c("red_blood_cells",
"Gamma-Delta_T_cells",
"low_quality_cells")
data.to.plot <-
data.to.plot[-match(bad, as.character(data.to.plot$Cell_type)),]
data.to.plot <-
data.to.plot[-which(is.na(data.to.plot$Cell_type)),]
celltype_order <-
rev(
c(
"Alveolar_macrophage",
"Mki67+_proliferating_cells",
"Natural_Killer_cells",
"Plasma_cells",
"B_cells",
"Cd4+_T_cells",
"CD8+_T_cells",
"Interstitial_macrophages",
"non-classical_monocyte_(Ly6c2-)",
"classical_monocyte_(Ly6c2+)",
"Cd103+/Cd11b-_dendritic_cells",
"CD209+/Cd11b+_dendritic_cells",
"Ccl17+/Cd103-/Cd11b-_dendritic_cells",
"Megakaryocytes",
"Neutrophils",
"Eosinophils",
"Fn1+_macrophage",
"lymphatic_endothelial_cells",
"Vcam1+_endothelial_cells",
"vascular_endothelial_cells",
"Capillary_endothelial_cells",
"Mesothelial_cells",
"Smooth_muscle_cells",
"Interstitial_Fibroblast",
"Lipofibroblast",
"Type1_pneumocytes",
"Type_2_pneumocytes",
"Ciliated_cells",
"Club_cells",
"Goblet_cells"
)
)
data.to.plot$Cell_type <-
factor(data.to.plot$Cell_type, levels = celltype_order)
p <-
ggplot(data = data.to.plot, mapping = aes(x = Gene, y = Cell_type)) +
geom_point(mapping = aes(size = PctExpressed, color = AvgRelExpr)) +
scale.func(range = c(0, 10), limits = c(NA, NA)) +
theme(
axis.text.y = element_text(size = 13),
plot.margin = unit(c(1, 1, 1, 1), "cm"),
legend.text = element_text(size = 8),
legend.title = element_text(size = 8),
legend.position = c(0.75, 0.5),
axis.title.x = element_blank(),
axis.title.y = element_blank()
)
p
}
# mRNA boxplot
genBoxplot <-
function(gene_name = "Scd1",
cell_type = "Type_2_pneumocytes") {
expression <-
h5read("data/AgingData.h5", name = as.character(gene_name))
dt <-
cbind(expression = expression, cell_info[, .(grouping, celltype)])
dt <- dt[celltype == cell_type]
ggplot(dt, aes(
factor(grouping, levels = c("3m", "24m")),
expression,
col = grouping,
fill = grouping
)) + geom_violin() + geom_jitter(colour = "black") +
scale_color_manual(values = c(`3m` = "blue", `24m` = "red")) +
scale_fill_manual(values = c(`3m` = "blue", `24m` = "red")) +
xlab("") + ylab("UMI counts [log2]") + ggtitle(gene_name) + guides(color=F, fill =F)
}
# tSNE plot
# genTSNEplot <- function(gene = "Ear2"){
# expr <-
# h5read("data/AgingData.h5", name = as.character(gene))
#
# if(sum(expr) == 0) return(emptyPlot())
#
# expr <- (expr - mean(expr)) / sd(expr)
# expr.min <- quantile(expr, 0.01)
# expr.max <- quantile(expr, 0.99)
# if(expr.min == expr.max) expr <- h5read("data/AgingData.h5", name = as.character(gene))
# #expr[which(expr > expr.max)] <- expr.max
# #expr[which(expr < expr.min)] <- expr.min
# farben <- color.scale(expr, extremes = c("grey", "darkblue"), alpha = 0.5)
# plot(
# tsne_coord,
# col = farben,
# pch = 19,
# main = gene,
# cex = 0.6)
# }
# tSNE plot
genTSNEplot <- function(gene_name = 'Frem1') {
gene <-
h5read(expression.file, name = gene_name)
# gene.min <- quantile(gene, 0.01)
# gene.max <- quantile(gene, 0.99)
# gene[which(gene > gene.max)] <- gene.max
# gene[which(gene < gene.min)] <- gene.min
H5close()
dt <- cbind(tsne_coord, expression = gene)
high <- "darkblue"
if (all(gene == 0)) {
high = "grey"
}
ggplot(dt) + geom_point(aes(tSNE_1, tSNE_2, col = gene), alpha = .5) +
guides(col = F) +
ggtitle(gene_name) + scale_color_continuous(low = "grey", high = high)
}
# Solubility plot
genLinePlot <- function(protein = "Frem1") {
if (length(intersect(protein, rownames(protein_fractions))) == 0)
return(emptyPlot())
age <- c(rep("young", 16), rep("old", 16))
fractions <-
c(
rep("FR1", 4),
rep("FR2", 4),
rep("FR3", 4),
rep("ECM", 4),
rep("FR1", 4),
rep("FR2", 4),
rep("FR3", 4),
rep("ECM", 4)
)
fractions <- factor(fractions, c("FR1", "FR2", "FR3", "ECM"))
expr_tmp <- protein_fractions[protein,]
expr_tmp <- log2(expr_tmp)
means <-
c(rep(mean(expr_tmp[1:16], na.rm = T), 16), rep(mean(expr_tmp[17:32], na.rm = T), 16))
expr_tmp <- expr_tmp - means
data <- data.frame(expression = expr_tmp, age, fractions)
res <- summary(aov(expression ~ age * fractions, data = data))
pval <- signif(res[[1]]$`Pr(>F)`[3], 2)
title <- paste(protein, '(ANOVA interaction P:', pval, ")")
agg <-
ddply(data, .(age, fractions), function(x)
c(
mean = mean(x$expression, na.rm = T),
se = sd(x$expression, na.rm = T) / sqrt(length(x$expression))
))
agg$lower <- agg$mean + agg$se
agg$upper <- agg$mean - agg$se
ggplot(agg, aes(y = mean, x = fractions, colour = age)) +
geom_errorbar(aes(ymin = lower, ymax = upper), width = .3) +
geom_point() + ggtitle(title) +
stat_summary(fun.y = mean,
geom = "smooth",
aes(group = age),
lwd = 1) +
scale_color_manual(values = c(old = "red", young = "blue")) +
ylab("Normalized MS-Intensity") + xlab("") +
geom_hline(yintercept = 0, lty = 2) +
theme(plot.margin = unit(c(0.5, 0.5, 0.5, 2), "cm"))
}
# Some meta functions
RNA_panel <- function(gene, celltype) {
boxplot_rna <- genBoxplot(gene_name = gene, cell_type = celltype)
dotplot_rna <- dotPlot(gene_name = gene)
vulcano_rna <-
plot_volcano(de_table = de_table,
cell_type = celltype,
gene_name = gene)
lay <- rbind(c(1, 2, 3))
grid.arrange(dotplot_rna, vulcano_rna, boxplot_rna, layout_matrix = lay)
}
Solubility_panel <- function(gene) {
dotplot_rna <- dotPlot(gene_name = gene)
solubility_prot <- genLinePlot(gene)
lay <- rbind(c(1, 2, 2))
grid.arrange(dotplot_rna, solubility_prot, layout_matrix = lay)
}
emptyPlot <- function() {
df <- data.frame(x = 5, y = 5, text = "not enough data")
p<- ggplot(df, aes(x, y, label = text)) +
geom_point(col = "white") + xlim(0, 10) + ylim(0, 10) + geom_text()+ theme_bw() +
theme(
axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major= element_blank()
) +
theme(plot.margin = unit(c(2, 2, 2, 2), "cm"))
class(p)[4] <- "empty_plot"
p
}
getEnrichmentTable <- function(cell_type="Type_2_pneumocytes", enrichment_type = "All"){
dt <- copy(enrichment_table)
dt <- enrichment_table[`Cell type`==cell_type ]
if(enrichment_type!="All"){
dt <- dt[Type==enrichment_type ]
}
dt[,-1, with=F]
}
enrichmentBarPlot <- function(cell_type = "Type_2_pneumocytes", enrichment_type = "All"){
dt <- copy(enrichment_table)
dt <- enrichment_table[`Cell type`==cell_type ]
if(enrichment_type!="All"){
dt <- dt[Type==enrichment_type ]
}
if(nrow(dt)==0){return(emptyPlot())}
dt <- dt[, .(Name, `Score`)]
dt1 <- dt[Score>0][order(`Score`, decreasing = T)][1:min(10, .N)]
dt2 <- dt[Score<0][order(`Score`, decreasing = F)][1:min(10, .N)]
dt <- rbind(dt2, dt1)
dt <- dt[order(Score, decreasing = F)]
dt[, Name := gsub("(.{30,}?)\\s", "\\1\n", Name)]
dt[, Name := factor(Name, levels = dt$Name)]
dt[, up := Score > 0]
# dt[, transPvalue := -log10(`Benj. Hoch. FDR`)]
title <- paste(cell_type, ifelse(enrichment_type=="All", "", paste0("-", enrichment_type)), sep = "")
ggplot(dt) + geom_bar(aes(x = Name, y = Score, fill = up),position = position_dodge(width=.1),stat="identity") +
theme(axis.title.y = element_blank(), axis.text.y = element_text(size = 13)) +
coord_flip() + labs(y="Score") + scale_fill_manual(values = c(`TRUE`="green", `FALSE` = "red"))+
guides(fill=F) + ggtitle(title) + scale_y_continuous(limits = c(-1,1))
# +
# scale_x_discrete(expand=c(0,-.5))
}
|
d8727b1d683133dae8a0788bf0bb3117cbf52e3a
|
ef01bab1215f822fe415021c73c2b915fdd787ba
|
/02_survival_models/helper_functions.R
|
8c57933d0133145856cca018c94557fe5da18b7c
|
[] |
no_license
|
nvkov/MA_Code
|
b076512473cf463e617ed7b24d6553a7ee733155
|
8c996d3fdbbdd1b1b84a46f84584e3b749f89ec3
|
refs/heads/master
| 2021-01-17T02:48:40.082306
| 2016-09-25T18:53:53
| 2016-09-25T18:53:53
| 58,817,375
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 790
|
r
|
helper_functions.R
|
#helper functions
##' @S3method predictSurvProb rpart
predictSurvProb.rpart <- function(object,newdata,times,train.data,...){
-# require(rpart)
- ## require(rms)
learndat <- train.data
nclass <- length(unique(object$where))
learndat$rpartFactor <- factor(predict(object,newdata=train.data,...))
newdata$rpartFactor <- factor(predict(object,newdata=newdata))
rpart.form <- reformulate("rpartFactor",eval(object$call$formula)[[2]])
- ## rpart.form <- reformulate("rpartFactor",object$call$formula[[2]])
- # fit.rpart <- cph(rpart.form,data=learndat,se.fit=FALSE,surv=TRUE,x=TRUE,y=TRUE)
fit.rpart <- prodlim(rpart.form,data=learndat)
p <- predictSurvProb(fit.rpart,newdata=newdata,times=times)
- # print(p[100:113,1:10])
p
}
|
ec242a3a578eb46bd337c06fe04f76eef137f79c
|
b4182374ee423938631aef113d503ef65bc45080
|
/cachematrix.R
|
fc6ac524e354cf78dfcffa1a6318aaa9791ee532
|
[] |
no_license
|
DavideDelVecchio/ProgrammingAssignment2
|
e97db5df0a83b0d3a23eee5c71c15d0017e01666
|
be70d99f65081adf88822c2dd07d3503381339b7
|
refs/heads/master
| 2020-12-14T09:43:21.452810
| 2014-12-20T11:07:25
| 2014-12-20T11:07:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,125
|
r
|
cachematrix.R
|
## The makeCacheMatrix constructs a suitable object that can store the value
## of both the direct and cached inverse.It should be used in combination with cacheSolve
## that, when called, before perforimg the computation of the inverse checks for the cached variable
## if not found computes the inverse and caches it in the object returned by makeCacheMatrix
## usage example
## > set.seed(44)
## > m <- matrix(sample.int(100,size=9,replace=TRUE), nrow=3)
## > d <- makeCacheMatrix(m)
## > cacheSolve(d)
## subsequent invocation returns cached data
## > inv <- cacheSolve(d)
## returning cached data
## makeCacheMatrix stores in variable i the inverse and provides accessor methods for both
## the data (ie the matrix one wants to calculate the inverse) and the inverse itself
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve searches for a cached value of the inverse matrix one wants to calculate
## by accessing the object provided by the makeCacheMatrix function and invoking the getinverse() function
## implemented in the first part of the exercise.
# if founded (the object returned by the method is not null) returns it and exit the function
# otherwise accesses again the object
## gets the data (the matrix one wants to calcutlate the inverse)
## invokes the solve functions provided by R base system to compute the inverse,
## stores the inverse for future invocations on the same variable passed and finally returns inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)){
## Data already cached
message("returning cached data")
return(inv)
}
## no data cached accessing the matrix and computing the inverse
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
f605ce829cd9918e4bae67d8f650f29477d39b0d
|
712561e4275220a6fe93c6b0e204df41ba169a85
|
/plot3.R
|
c2bdfb209f0395c0377c3652c82725a7a657d5a3
|
[] |
no_license
|
urvog/ExData_Plotting1
|
3b8ae73e8783203ab45942e202e6df90d27dec31
|
422ea4617ba3f65c56c2612948d56bd18e12eb69
|
refs/heads/master
| 2021-01-12T13:59:21.384589
| 2016-03-27T01:08:09
| 2016-03-27T01:08:09
| 54,789,335
| 0
| 0
| null | 2016-03-26T16:55:22
| 2016-03-26T16:55:22
| null |
UTF-8
|
R
| false
| false
| 990
|
r
|
plot3.R
|
## Plot 3
## The file household_power_consumption.txt must be in the same directory
data_raw<-read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
##Changing column type to date with as.Date() function and subsetting with date interval needed
data_raw$Date<-as.Date(data_raw$Date, format="%d/%m/%Y")
data<-subset(data_raw, Date=="2007-02-01"|Date=="2007-02-02")
data$Time<-strptime(paste(as.character(data$Date), as.character(data$Time), sep = ' '), format = "%Y-%m-%d %H:%M:%S")
##plotting and saving png file
png("plot3.png", width = 480, height = 480)
plot(data$Time, data$Sub_metering_1, type = "n", xlab = "", ylab = "Energy sub metering")
lines(data$Time, data$Sub_metering_1, col = "black")
lines(data$Time, data$Sub_metering_2, col = "red")
lines(data$Time, data$Sub_metering_3, col = "blue")
legend("topright", col = c("black","red","blue"), lty = 1, legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3")) #imprime leyenda
dev.off()
|
5decf24fb76d373921eb19fb61b4832aa5452b79
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/tadaatoolbox/examples/tadaa_pairwise_tukey.Rd.R
|
e8b7c52c58dc06f86b1ec4e709ce39080b4062b9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 283
|
r
|
tadaa_pairwise_tukey.Rd.R
|
library(tadaatoolbox)
### Name: tadaa_pairwise_tukey
### Title: Tukey HSD pairwise comparisons
### Aliases: tadaa_pairwise_tukey
### ** Examples
tadaa_pairwise_tukey(data = ngo, deutsch, jahrgang, geschl)
tadaa_pairwise_tukey(data = ngo, deutsch, jahrgang, print = "console")
|
d945d4be769f7af0e975b11ab4bed306728bc6ef
|
2404b054351c57922bd54fa6230ee9046f1be961
|
/Factor Analysis/bibliometrix/bibliometrix_cocit_matrix.R
|
bb43790a2f51ae270171972c1c96bee53c0c7275
|
[] |
no_license
|
andrelmfsantos/R_Scripts
|
2840aca19a8038d2a5f4896d1e1a0880c21a8085
|
cf5d95c1dcd2a0ee7954320133e5ab8eab90cc08
|
refs/heads/master
| 2021-03-22T08:00:08.483741
| 2020-03-07T13:20:12
| 2020-03-07T13:20:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,507
|
r
|
bibliometrix_cocit_matrix.R
|
library(bibliometrix)
# se for usar direto da Scopus
D <- readFiles("~/Google Drive/Academic/Orientações/Jefferson da Costa/RSL/BibTeX_Scopus/158_stakeholder.bib")
M <- convert2df(D, format = "bibtex", dbsource = "scopus")
write.csv2(M, "Bruto_Bibliometrix.csv")
# import the clean dataset to R
M <- read.csv2("Bruto_Bibliometrix_limpo.csv",
colClasses = rep('character', 62))
# Citation Matrix
M$CR<- stringr::str_replace_all(as.character(M$CR),"DOI;","DOI ")
# Cocitation
cocit <- biblioNetwork(M, analysis = "co-citation",
network = "references", sep = ";", shortlabel = T)
cocit <- cocit[nchar(colnames(cocit)) != 0,
nchar(colnames(cocit)) != 0]
dim(cocit)
# top-cited
CR <- citations(M, field = "article", sep = ";")
topcited <-rownames(cbind(CR$Cited[1:1300])) # escolher top-cited para cortar na co-cit
cited <- Matrix::diag(cocit)
cited <- as.data.frame(cbind(rownames(cocit), cited))[order(-cited),]
# Export top-cited to CSV
write.csv2(cited, "top-cited.csv")
# fix the diagonal to zero
Matrix::diag(cocit) <- 0
# Subset Matrix
# where cocit > threshold is the number of references in common
threshold <- 14
cocit_subset <- cocit[Matrix::rowSums(cocit >= threshold) >= 1,
Matrix::colSums(cocit >= threshold) >= 1]
cocit_matrix <- as.data.frame(as.matrix(cocit_subset))
assertthat::are_equal(names(cocit_matrix), rownames(cocit_matrix))
# Export CSV
write.csv2(cocit_matrix, "cocit_matrix.csv")
|
19b7b64e4b49122b6894938c2d805b90656c69db
|
d6716ad11768a252ca4d9b88172937f24b904329
|
/R/jumpoints.R
|
8ac9893d4f423ee2b63eedf958c841e8e95f0f63
|
[] |
no_license
|
cran/cumSeg
|
5651f714af39aa5cb090a92e42c35ab368476d2a
|
67cf77a9f02f991f197a2e6084d72aa678af7e70
|
refs/heads/master
| 2023-04-08T08:40:01.063844
| 2020-07-17T08:10:02
| 2020-07-17T08:10:02
| 17,695,344
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,580
|
r
|
jumpoints.R
|
jumpoints <-function(y, x, k=min(30,round(length(y)/10)), output="2",
psi=NULL, round=TRUE, control = fit.control(), selection=sel.control(), ...) {
#jump-point models
#y: the response; x the explanatory (if missing index integers are assumed)
#psi: the starting values for the breakpoints. If NULL k quantiles are used.
#k: the number of breakpoints to be estimated. This argument is ignored when psi is specified
#output: "1" restituisce tutti i breakpoint individuati. fit.control() puo essere usato per modificare
# it.max. Aumentandolo puo ridurre il n. di "putative" (candidate) psi e migliorare la performance
# della selezione dei psi attraverso un qualche criterio.
#output: "2" applica il criterio in selection per selezionare i jumpoints "significativi"
#output: "3" ri-applica l'algoritmo segmented assumendo come punti di partenza quelli selezionati; in genere
# se ci sono psi spuri questi possono essere ulteriormente eliminati
#------------
#--- seg.lm.fit0
#funzioni interne
seg.lm.fit0<-NULL
seg.lm.fit0<- function(y, Z, PSI, control, round=FALSE, ...){
#Questa e una versione semplificata di seg.lm.fit() per evitare calcoli inutili
#y: la risposta
#Z: matrice di variabile segmented
#control: lista che controlla il processo di stima
#round: approssimare le soluzioni del punto di svolta?
#----------------
it.max <- old.it.max <- control$it.max
toll <- control$toll
visual <- control$visual
last <- control$last
stop.if.error<-control$stop.if.error
h <- min(abs(control$h), 1)
if (h < 1)
it.max <- it.max + round(it.max/2)
it <- 1
epsilon<-10
k <- ncol(PSI)
psi.values <- NULL
H <- 1
psi<-PSI[1,]
#NB Poiche Z contiene ripetizioni della stessa variabile e sufficiente prendere Z[,1]
#if (intercept) XREG <- cbind(1,Z[,1],Xlinear) else XREG <- cbind(Z[,1],Xlinear)
#se AR
# n<-length(y)
# XREG<-cbind(c(y[1],y[1:(n-1)]),Z[,1])
#
XREG<-cbind(Z[,1])
#obj sotto serve solo per la stampare la dev
#obj<-lm.fit(y=y, x=XREG)
obj<-list(residuals=rep(10,3))
while (abs(epsilon) > toll) {
U <- pmax((Z - PSI), 0)
V <- ifelse((Z > PSI), -1, 0)
X<-cbind(XREG, U, V)
dev.old <- sum(obj$residuals^2)
rownames(X) <- NULL
if (ncol(V) == 1) {
#colnames(X)[ (ncol(XREG) + 1):ncol(X)] <- c("U", "V")
colnames(X)[ (ncol(XREG) ):ncol(X)] <- c("firstSlope","U", "V")
} else {
colnames(X)<-rev(c(paste("V", ncol(V):1, sep = ""),
paste("U",ncol(U):1, sep = ""),rep("firstSlope",ncol(X)-ncol(U)-ncol(V))))
}
obj <- lm.fit(x = X, y = y) #drop(solve(crossprod(X),crossprod(X,y)))
dev.new <- sum(obj$residuals^2)
if (visual) {
if (it == 1) cat(0, " ", formatC(dev.old, 3, format = "f"),"", "(No breakpoint(s))", "\n")
spp <- if (it < 10) "" else NULL
cat(it, spp, "", formatC(dev.new, 3, format = "f"), "---",ncol(V),"breakpoints","\n")
}
epsilon <- (dev.new - dev.old)/dev.old
obj$epsilon <- epsilon
it <- it + 1
obj$it <- it
class(obj) <- c("segmented", class(obj))
if (k == 1) {
beta.c <- coef(obj)["U"]
gamma.c <- coef(obj)["V"]
} else {
#se ci sono contrasti i beta.c quali sono?
beta.c <- coef(obj)[paste("U", 1:ncol(U), sep = "")]
gamma.c <- coef(obj)[paste("V", 1:ncol(V), sep = "")]
}
if (it > it.max) break
psi.values[[length(psi.values) + 1]] <- psi.old <- psi
if (it >= old.it.max && h < 1) H <- h
psi <- round(psi.old + H * gamma.c/beta.c,0)
PSI <- matrix(rep(psi, rep(nrow(Z), ncol(Z))), ncol = ncol(Z))
#check if psi is admissible..
a <- apply((Z <= PSI), 2, all)
b <- apply((Z >= PSI), 2, all)
if(stop.if.error) {
if(sum(a + b) != 0 || is.na(sum(a + b))) stop("(Some) estimated psi out of its range")
} else {
id.psi.ok<-!is.na((a+b)<=0)&(a+b)<=0
Z <- Z[,id.psi.ok,drop=FALSE]
psi <- psi[id.psi.ok]
PSI <- PSI[,id.psi.ok,drop=FALSE]
#id.psi.ok<-!a|b #indici di psi validi
# Z <- Z[,!is.na(id.psi.ok),drop=FALSE]
# psi <- psi[!is.na(id.psi.ok)]
# PSI <- PSI[,!is.na(id.psi.ok),drop=FALSE]
}
if(ncol(PSI)<=0) {
warning("No breakpoint estimated", call. = FALSE)
obj<-lm.fit(x = XREG, y = y)
obj$fitted.values<-rep(obj$coef,length(y))
obj$est.means<-obj$coef
return(obj)
}
} # end while
if(round) {
psi<-round(psi,0)
PSI <- matrix(rep(psi, rep(nrow(Z), ncol(Z))), ncol = ncol(Z))
V <- ifelse((Z > PSI), -1, 0)
#V serve per i fitted...si puo evitare di crearla?
}
#obj$psi <- if(round) round(sort(psi),2) else sort(psi)
obj$psi <- sort(psi)
obj$beta.c <- beta.c[order(psi)]
obj$gamma.c <- gamma.c[order(psi)]
obj$epsilon <- epsilon
obj$V<- V[,order(psi)]
#un'ultima verifica..
obj$psi<-obj$psi[!is.na(obj$beta.c)]
obj$V<-as.matrix(as.matrix(obj$V)[,!is.na(obj$beta.c)])
obj$beta.c<-obj$beta.c[!is.na(obj$beta.c)]
return(obj)
}
#
#-- pen.MDL
pen.MDL<-function(id,n){
#restituisce un vettore (di dim=length(id)) che rappresenta la penalita 2*sum\log n_j per
#ogni partizione (active set)
#length(id) e il num (max) di breakpoints ed n e' il vettore delle numerosita della
#partizione.
do.m<-function(id,n.col){
blockdiag <- function(...) {
args <- list(...)
nc <- sapply(args,ncol)
cumnc <- cumsum(nc)
## nr <- sapply(args,nrow)
## NR <- sum(nr)
NC <- sum(nc)
rowfun <- function(m,zbefore,zafter) {
cbind(matrix(0,ncol=zbefore,nrow=nrow(m)),m,
matrix(0,ncol=zafter,nrow=nrow(m)))
}
ret <- rowfun(args[[1]],0,NC-ncol(args[[1]]))
for (i in 2:length(args)) {
ret <- rbind(ret,rowfun(args[[i]],cumnc[i-1],NC-cumnc[i]))
}
ret
} #end blockgiag
id<-sort(id) #sort(unlist(id))
if(length(id)==1) {
m<-t(rep(1,id))} else {
m<-do.call(blockdiag,lapply(c(id[1],diff(id)),function(xx)t(rep(1,xx))))
}
m<-blockdiag(m,t(rep(1,n.col-ncol(m))))
m
} #end do.m
#inizio codici veri
if(length(n)!=(length(id)+1)) stop("Errore in 'id' o 'n'")
A<-matrix(rev(id),length(id),length(id),byrow=FALSE)
A[col(A)>row(A)]<-NA
r<-rev(apply(A,2,function(x)(x[!is.na(x)])))
lista.m<-lapply(r,do.m,n.col=length(n))
#sapply(lista.m,function(xx)drop(xx%*%n))
ris<-sapply(lista.m,function(xx)2*sum(log(drop(xx%*%n))))
ris<-c(2*sum(log(n)),ris)
ris
}
#-----------
#------------
n <- length(y)
# if(any(is.na(y))) {
# id<-complete.cases(y,x)
# x<-x[id]
# y<-y[id]
# }
if(missing(x)) {
x<-1:n
Y<-cumsum(y)
miss.x<-TRUE
} else {
miss.x<-FALSE
if(length(x)!=n) stop("Lengths of x and y differ")
y<-y[order(x)]
x<-sort(x)
diffx <- c(x[1],diff(x))
# DD <- matrix(diffx, ncol=n, nrow=n, byrow=TRUE)
# DD[col(DD)>row(DD)]<-0 # matrice di trasformazione di y in Y
# Y <- drop(DD%*%y)
Y<-cumsum(y*diffx)
}
rangeX<-range(x)
if(is.null(psi)) psi<-quantile(x, prob= seq(0,1,l=k+2)[-c(1,k+2)], names=FALSE)
k<-length(psi)
Z <- matrix(rep(x, k), nrow = n)
PSI <- matrix(rep(psi, rep(n, k)), ncol = k)
it.max <- old.it.max <- control$it.max
if (it.max == 0) U <- pmax((Z - PSI), 0)
Xlin<- NULL
obj<-seg.lm.fit0(y=Y, Z=Z, PSI=PSI, control=control, round=round, ...)
obj$y<-y
if(!is.null(obj$psi)){
obj$fitted.values<-drop(abs(obj$V)%*%obj$beta.c)
if("firstSlope"%in%names(coef(obj))) obj$fitted.values<- obj$fitted.values + obj$coef["firstSlope"]
obj$id.group<- -rowSums(obj$V)
}
obj$est.means<-cumsum(c(obj$coef["firstSlope"], obj$beta.c))
obj$n.psi<-length(obj$psi)
##---------------------------------------
##--- primo output
if(output=="1") {
class(obj)<-"aCGHsegmented"
obj$rangeX<-rangeX
return(obj)
}
#se vuoi selezionare anche i psi significativi...
psi0<-obj$psi
est.means0<-obj$est.means
display1<-selection$display
edf.psi<-selection$edf.psi
type<-selection$type
##nome<-deparse(substitute(type))
#plot.it<-selection$plot.it
Cn<-eval(parse(text=selection$Cn))
S<-selection$S
tipoAlg<-selection$alg
#require(lars)
if(is.null(obj$psi)) stop("No estimated breakpoint in obj")
#-------------------
#pesi2<-1/abs(diff(c(obj$psi,n))*obj$beta.c)
#pesi3<-1/sqrt(diff(c(obj$psi,n))^2+obj$beta.c^2)
#s1<-scale(diff(c(obj$psi,n)))
#b1<-scale(obj$beta.c)
#pesi3<-1/sqrt(b1^2+(s1/2)^2)
#olars<-lars(t(pesi3*t(abs(obj$V))), y=y, type="lasso", normalize=FALSE, intercept=TRUE, trace=display1)
#
##se bic mdl non considerare penaliz.:
#if(type!="rss") tipoAlg<-"stepwise"
olars<-lars(abs(obj$V), y=y, type=tipoAlg, normalize=FALSE, intercept=TRUE, trace=display1)
#type="stepwise" ,"lasso"
id.var.entry<-(1:ncol(obj$V))[order(olars$entry)]
edf<- if(edf.psi) (olars$df-1)*2+1 else olars$df
RSS<-olars$RSS
#######ottieni RSS unpenalized
# if(RSS.unp){
# RSS<-vector(length=length(RSS))
# RSS[1]<-olars$RSS[1]
# for(i in 1:(length(RSS)-1)){
# X.ok<- cbind(1,obj$V[,id.var.entry[1:i]])
# RSS[i+1]<-sum(lm.fit(y=y,x=X.ok)$residuals^2)
# }
# }
#######################################################
max.rss<-function(RSS){
var.mod<-(RSS/n)
ll<- -(log(2*pi*var.mod)+1)*n/2
new.r<-((ll[length(ll)]-ll[-1])/(ll[length(ll)]-ll[2]))*(length(ll)-1)+1
diff2<-diff(new.r,diff=2)>S
if(!any(diff2)) return(0)
maxll=max(which(diff2))+1
return(maxll)
}
min.r<-switch(type,
bic = which.min(log(RSS/n)+log(n)*edf*Cn/n),
mdl = which.min(n*log(RSS/(n-edf))+Cn*pen.MDL(id.var.entry,as.numeric(table(-rowSums(obj$V))))),
rss = max.rss(RSS)
)
crit<-switch(type,
bic = (log(RSS/n)+log(n)*edf*Cn/n),
mdl = (n*log(RSS/(n-edf))+Cn*pen.MDL(id.var.entry,as.numeric(table(-rowSums(obj$V))))),
rss = (RSS)
)
# rss = {
# diff.rss<-diff(RSS,diff=2)>.75
# if(any(diff.rss)) max(which(diff.rss)) else 0 #olars$RSS
# }
#new.r<-((RSS[length(RSS)]-RSS[-1])/(RSS[length(RSS)]-rss[2]))*(length(RSS)-1)+1
#max(which(c(10,diff(new.r,diff=2))>.75))
id<-sort(c(0,id.var.entry)[1:min.r])
if(length(id)<=1) {
o<-list(y=y,est.means=mean(y),id.var.entry=id.var.entry,n.psi=0,criterion=crit)
o$rangeX<-rangeX
if(!miss.x) o$x<-x
class(o)<-"aCGHsegmented"
return(o)
}
id<-id[-1] #escludi l'intercetta
psi1<- obj$psi[id]
if(output=="2"){
V.ok<- cbind(1,abs(obj$V[,id]))
if(tipoAlg!="stepwise"){
hat.b<-drop(solve(crossprod(V.ok),crossprod(V.ok,y))) #better than hat.b<-lm.fit(x=V.ok,y=y)$coef
} else {
hat.b<-olars$beta[min.r,id]
hat.b<-c(olars$mu-sum(colMeans(as.matrix(V.ok[,-1]))*hat.b),hat.b)
}
fittedvalues<-drop(V.ok%*%hat.b)
#gruppi<-rowSums(V.ok[,-1])
#fittedvalues<-rep(cumsum(hat.b),tapply(gruppi,gruppi,length))
ris<-list(id.var.entry=id.var.entry,psi=psi1,n.psi=length(id), psi.order.entry=psi0[id.var.entry],
psi0=psi0,est.means0=est.means0,est.means=cumsum(hat.b),criterion=crit,
fitted.values=fittedvalues)
ris$y<-y
ris$rangeX <- rangeX
if(!miss.x) ris$x<-x
class(ris)<-"aCGHsegmented"
return(ris)
}
k<-length(psi1)
Z <- matrix(rep(x, k), nrow = n)
PSI <- matrix(rep(psi1, rep(n, k)), ncol = k)
obj<-seg.lm.fit0(y=Y, Z=Z, PSI=PSI, control=fit.control(toll = 1e-04, it.max = 10, stop.if.error=FALSE),
round=round, ...)
# obj$y<-y
if(!is.null(obj$est.means)){
obj$n.psi<-0
obj$psi0<-psi0
obj$psi1<-psi1
obj$criterion<-crit
return(obj)
}
fitted.v<-drop(abs(obj$V)%*%obj$beta.c)
if("firstSlope"%in%names(coef(obj))) fitted.v<- fitted.v + obj$coef["firstSlope"]
obj$id.group<- -rowSums(obj$V)
est.means<-cumsum(c(obj$coef["firstSlope"], obj$beta.c)) #unique(fitted.v)
ris<-list(id.var.entry=id.var.entry,psi=obj$psi,n.psi=length(obj$psi), psi.order.entry=psi0[id.var.entry],
psi0=psi0,psi1=psi1,est.means0=est.means0, est.means=est.means,criterion=crit) #type=r)
ris$fitted.values<-fitted.v
ris$y<-y
ris$rangeX<-rangeX
if(!miss.x) ris$x<-x
class(ris)<-"aCGHsegmented"
return(ris)
}
|
6c1673b514b7234d1be90a6c2eeb83d23dbd7d73
|
bc13a38c71f9d95bf5bc1bbaadbcd4b0d4d412c6
|
/00_packageLoad.R
|
c6bd3c76b8176fb10047d6f607b7f67ecddf2bea
|
[] |
no_license
|
nturaga/RNAseq_workflow
|
6660296ed0557944f21b54ea59ce1bc9e6903b0c
|
0f0a4b2f35d44b1ad81a8d9601937d0165f62441
|
refs/heads/master
| 2021-03-24T13:52:25.089808
| 2014-03-21T17:06:49
| 2014-03-21T17:06:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 439
|
r
|
00_packageLoad.R
|
# Nitesh Turaga
# TCGA- Expression-Gene UNC Agilent analysis
# Set path
my.path = "~/TestRun/TCGA-Expression-Gene/"
setwd(my.path)
# Install packages required
packageList = c("EDASeq","edgeR","DESeq","BitSeq","Rsubread","easyRNASeq","goseq","DSS")
library(BiocInstaller)
biocLite(packageList)
# Load packages
require("DESeq")
require("Biostrings")
require("Biobase")
require("parallel") #Provides substitute for lapply
require("Rmpi")
|
7f9aa56de6bd7d17188e83193ca01a2338737871
|
c878d39a4dd0a0d30015f7b520336fd8949ac13d
|
/tests/testutils.R
|
0967838d747b253e42790b5d29a249c3692710f5
|
[] |
no_license
|
patdab90/etric
|
45df5ac863ecdb8222cd7041fd25500231ea2f66
|
5a32a008c707f7a0596c30b254f3c69b3c0ca2ea
|
refs/heads/master
| 2021-01-10T20:43:03.921425
| 2015-02-09T10:06:52
| 2015-02-09T10:06:52
| 22,640,557
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 825
|
r
|
testutils.R
|
library(stringr) # for tests cases
constraintsToString <- function(lhs, dir, rhs){
res <- matrix("", nrow=nrow(lhs), ncol=1, dimnames=list(rownames(lhs)))
for(j in 1:nrow(lhs)){
for(i in 1:ncol(lhs)){
if(lhs[j,i] != 0){
if(lhs[j,i] > 0){
sign <- "+"
if(res[j,] == "") {
sign <- ""
}
if(lhs[j,i] == 1){
res[j,] <- paste(res[j,], sign ,colnames(lhs)[i])
}else{
res[j,] <- paste(res[j,],sign,lhs[j,i],colnames(lhs)[i])
}
}else{
if(lhs[j,i] == -1){
res[j,] <- paste(res[j,],"-",colnames(lhs)[i])
}else{
res[j,] <- paste(res[j,],lhs[j,i],colnames(lhs)[i])
}
}
}
}
res[j,] <- paste(res[j,],dir[j,],rhs[j,])
}
return(res)
}
|
fcbc0e0ee39bbdaf57471e4cba3949501a9a1893
|
41bd4616c0ed105a58e82ac69def9c74767948f9
|
/configure_Newbler.R
|
b6a345d3ca6ee8412befec0399d13164d80bac1d
|
[
"Artistic-2.0"
] |
permissive
|
liangdp1984/GRC_Scripts
|
0385409d00283a1d0c47f0c0c3b9e340b4ce0aa1
|
b5619db7475c7d94dbd871cc556bac656bb086dd
|
refs/heads/master
| 2023-07-04T15:12:01.374105
| 2015-08-31T16:57:10
| 2015-08-31T16:57:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,996
|
r
|
configure_Newbler.R
|
#!/usr/bin/env Rscript
suppressPackageStartupMessages(library("optparse"))
# specify our desired options in a list
# by default OptionParser will add an help option equivalent to
# make_option(c("-h", "--help"), action="store_true", default=FALSE,
# help="Show this help message and exit")
option_list <- list(
make_option(c("-f", "--file"), type="character", default="samples.txt",
help="The filename of the sample file [default %default]",
dest="samplesFile"),
make_option(c("-c", "--column"), type="character", default="SAMPLE_ID",
help="Column name from the sample sheet to use as read folder names [default %default]",
dest="samplesColumn"),
make_option(c("-r", "--readFolder"), type="character", default="02-Cleaned",
help="Directory where the sequence data is stored [default %default]",
dest="readFolder"),
make_option(c("-n", "--newblerFolder"), type="character", default="03-NewblerAssemblies",
help="Directory where to store the newbler results [default %default]",
dest="newblerFolder"),
make_option(c("-p", "--processors"), type="integer", default=0,
help="number of processors to use [defaults to number available]",
dest="procs"),
make_option(c("-q", "--newbler_processors"), type="integer", default=10,
help="number of processors to use in the newbler call [defaults %default]",
dest="nprocs"),
make_option(c("-v", "--vector-file"), type="character", default=NULL,
help="file name with vector sequences in fasta format to provide Newbler [default %default]",
dest="vector")
)
# get command line options, if help option encountered print help and exit,
# otherwise if options not found on command line then set defaults,
opt <- parse_args(OptionParser(option_list=option_list))
#opt <- list(samplesFile="samples.txt",readsFolder="02-Cleaned",newblerFolder="05-Assemblies")
#opt <- list(samplesFile="samples.txt", samplesColumn="SAMPLE_ID", readFolder="04-Screened-extra_human",newblerFolder="05-NewblerAssemblies", procs=0, nprocs=10)
suppressPackageStartupMessages(library("tools"))
suppressPackageStartupMessages(library("parallel"))
######################################################################
## loadSampleFile
## reads in the sample sheet, check for expected format (columns SAMPLE_ID and SEQUENCE_ID)
## then check to make sure sequence reads are available using the specified column
## Parameters
## file: sample sheet filename
## reads_folder: path to folder containing reads
## column: sample sheet column to use that specified folders
"loadSamplesFile" <- function(file, reads_folder,column){
## debug
file = opt$samplesFile; reads_folder = opt$readFolder; column = opt$samplesColumn
##
if ( !file.exists(file) ) {
write(paste("Sample file",file,"does not exist\n"), stderr())
stop()
}
### column SEQUENCE_ID should be the folder name inside of Raw_Folder
### column SAMPLE_ID should be the sample name
### rows can be commented out with #
targets <- read.table(file,sep="",header=TRUE,as.is=TRUE)
if( !all(c("SAMPLE_ID","SEQUENCE_ID") %in% colnames(targets)) ){
write(paste("Expecting the two columns SAMPLE_ID and SEQUENCE_ID in samples file (tab-delimited)\n"), stderr())
stop()
}
if (any(is.na(match(targets[,column],dir(path=reads_folder))))){
write(paste(column,"do not match the read data folder structure\n\n"), stderr())
write(paste(column,"FOUND\n",sep="\t"),stderr())
write(paste(apply(data.frame(targets[,column],targets[,column] %in% dir(path=reads_folder)),1,paste,collapse="\t"),collapse="\n"),stderr())
stop()
}
targets$isDir <- sapply(targets[,column],function(x) file.info(file.path(reads_folder,x))$isdir)
targets$type <- NA
for (i in seq.int(to=nrow(targets))){
if (targets[i,"isDir"]){
ext <- unique(file_ext(dir(file.path(reads_folder,targets[i,column]),pattern="fastq|sff")))
if (length(ext) == 0){
write(paste("Cannot locate fastq or sff file in folder",targets[i,column],"\n"), stderr())
stop()
}
targets$type[i] <- paste(ext,sep="/")
}
else {
ext <- file_ext(grep("fastq|sff",dir(file.path(reads_folder,targets[i,column])),value=TRUE))
if (length(ext) == 0){
write(paste(targets[i,column],"is not a fastq or sff file\n"), stderr())
stop()
}
targets$type[i] <- paste(ext,sep="/")
}
}
write(paste("samples sheet contains", nrow(targets), "samples to process",sep=" "),stdout())
return(targets)
}
######################################################################
## prepareCore
## Set up the numer of processors to use
##
## Parameters
## opt_procs: processors given on the option line
## samples: number of samples
## targets: number of targets
"prepareCore" <- function(opt_procs){
# if opt_procs set to 0 then expand to samples by targets
if( opt_procs == 0 ) opt_procs <- detectCores()
write(paste("Using",opt_procs,"processors",sep=" "),stdout())
return(opt_procs)
}
samples <- loadSamplesFile(opt$samplesFile,opt$readFolder,opt$samplesColumn)
procs <- prepareCore(opt$procs)
## create output folder
dir.create(opt$newblerFolder,showWarnings=FALSE,recursive=TRUE)
######################
newblerList <- function(samples, reads_folder, column){
newbler_list <- list()
for (i in seq.int(to=nrow(samples))){
reads <- dir(path=file.path(reads_folder,samples[i,column]),pattern="fastq$",full.names=TRUE)
bt <- lapply(c("_merged|_SE","_PE1|_R1","_PE2|_R2"),grep,x=reads,value=TRUE)
names(bt) <- c("SE","PE1","PE2")
bt$sampleFolder=samples[i,column]
newbler_list[[bt$sampleFolder]] <- bt
}
write(paste("Setting up",length(newbler_list),"jobs",sep=" "),stdout())
return(newbler_list)
}
newbler <- newblerList(samples,opt$readFolder,opt$samplesColumn)
## run newbler
newbler_out <- mclapply(newbler, function(index){
dir.create(file.path(opt$newblerFolder,index$sampleFolder))
try({
system(paste("runAssembly",
"-force -noace -m -sio",
"-cpu", opt$nprocs,
"-o",file.path(opt$newblerFolder,index$sampleFolder),
ifelse(opt$vector == NULL,"",paste("-vt",opt$vector,sep=" ")),
paste(index$PE1,collapse=" "),
paste(index$PE2,collapse=" "),
paste(index$SE,collapse=" "),sep=" "));
})
},mc.cores=floor(procs/opt$nprocs))
parse_newblerFiles <- function(file){
lines <- readLines(file)
# remove extra whitespace
lines <- gsub("\\s+$", "", lines)
lines <- lines[!(lines == "")]
# remove comments
scomment <- grep("/*",lines,fixed=T)
ecomment <- grep("*/",lines,fixed=T)
if (length(scomment) != length(ecomment)){
write(paste("Comment tags aren't matches, file:",file,"\n"), stderr())
stop()
}
lines <- lines[-unlist(apply(cbind(scomment,ecomment),1,function(x) seq.int(x[1],x[2])))]
procLines <- function(iloop){
res <- {}
slist <- which(iloop== "{")
elist <- which(iloop == "}")
if (length(slist) > 0 & length(slist) == length(elist)){
nlist <- split(iloop[unlist(apply(cbind(slist,elist),1,function(x) seq.int(x[1]+1,x[2]-1)))],
rep(seq.int(1,length(slist)),times=apply(cbind(slist,elist),1,function(x) (x[2]-x[1]-1))))
names(nlist) = iloop[slist-1]
res <- lapply(nlist,function(x) procLines(sub("^\t","",x)))
iloop = iloop[-unlist(apply(cbind(slist,elist),1,function(x) seq.int(x[1]-1,x[2])))]
}
if (length(iloop) > 0){
iloop <- gsub("^\\s+", "", iloop)
iloop <- gsub(";|\"| MB", "", iloop)
ll <- strsplit(iloop,split=" += +")
nlist <- lapply(ll,"[[",2L)
names(nlist) <- sapply(ll,"[[",1L)
res <- c(nlist,res)
}
return(res)
}
# outer list
return(procLines(lines))
}
newblertb <- sapply(newbler, function(newb){
if (!newbler_out[[newb$sampleFolder]]){
require("Hmisc")
cfile <- t(data.frame(strsplit(grep("contig",readLines(file.path(opt$newblerFolder,newb$sampleFolder,"454ContigGraph.txt")),value=TRUE),"\t"),stringsAsFactors=F))
cfile <- data.frame("Contig"=cfile[,2],"Length"=as.numeric(cfile[,3]),"Cov"=as.numeric(cfile[,4]))
cov <- c(wtd.mean(cfile$Cov,cfile$Length),sqrt(wtd.var(cfile$Cov,cfile$Length)))
pfile <- parse_newblerFiles(file.path(opt$newblerFolder,newb$sampleFolder,"454NewblerMetrics.txt"))
# run data
areads <- as.numeric(c(pfile$runMetrics$totalNumberOfReads, unlist(strsplit(gsub("%","",pfile$consensusResults$readStatus$numAlignedReads),split=" *, *"))))
abases <- as.numeric(c(pfile$runMetrics$totalNumberOfBases, unlist(strsplit(gsub("%","",pfile$consensusResults$readStatus$numAlignedBases),split=" *, *"))))
rstatus <- c(pfile$consensusResults$readStatus$numberAssembled,
pfile$consensusResults$readStatus$numberPartial,
pfile$consensusResults$readStatus$numberSingleton,
pfile$consensusResults$readStatus$numberRepeat,
pfile$consensusResults$readStatus$numberOutlier,
pfile$consensusResults$readStatus$numberTooShort)
rstatus <- as.numeric(sapply(strsplit(rstatus,split=" |, "),"[[",1L))
passembled <- (sum(rstatus[0:1])/areads[1])*100
largecontigs <- as.numeric(c(pfile$consensusResults$largeContigMetrics$numberOfContigs,
pfile$consensusResults$largeContigMetrics$numberOfBases,
pfile$consensusResults$largeContigMetrics$avgContigSize,
pfile$consensusResults$largeContigMetrics$N50ContigSize,
pfile$consensusResults$largeContigMetrics$largestContigSize,
unlist(strsplit(sub("%","",pfile$consensusResults$largeContigMetrics$Q40PlusBases),split=" *, *"))))
allcontigs <- as.numeric(c(pfile$consensusResults$allContigMetrics$numberOfContigs,pfile$consensusResults$allContigMetrics$numberOfBases))
ndata <- c(areads[1:3],abases[1:3],rstatus,passembled,largecontigs,allcontigs,cov)
names(ndata) <- c("totalNumberOfReads","numAlignedReads","numAlignedReadsPercent",
"totalNumberOfBases","numAlignedBases","numAlignedReadsBases",
"numberAssembled","numberPartial","numberSingleton","numberRepeat","numberOutlier","numberTooShart","assembledPercent",
"numLargeContigsAssembled","numLargeBasesAssembled","avgLargeContigSize","N50LargeContigSize","largestContigSize","numQ40PlusBases","Q40PlusBasesPercent",
"numAllContigsAssembled","numAllBasesAssembled","meanWeightedCov","sdWeightedCov")
return(round(ndata,3))
}
})
newblertb <- t(newblertb)
write.table(newblertb,file.path(opt$newblerFolder,"SummaryNewblerAssemblies.txt"),sep="\t",row.names=TRUE,col.names=TRUE,quote=FALSE)
|
1414a71d4dec3ca5de3a21287fabeceaf8549e93
|
f7546999748d00b74db8551ed65e02cc564f7f4f
|
/man/getSegPurity.Rd
|
a1e4154779a956ad3a463dca7f5fd75aea550fe6
|
[] |
no_license
|
cran/CHAT
|
b5887ac9eb97d1deace91cd638477e5d7bf56319
|
1819354a80335e6d92384002b90899b70c60719f
|
refs/heads/master
| 2021-01-19T08:15:18.756721
| 2014-02-10T00:00:00
| 2014-02-10T00:00:00
| 19,303,604
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,357
|
rd
|
getSegPurity.Rd
|
\name{getSegPurity}
\alias{getSegPurity}
\title{
Segment-specific AGP inference by sample
}
\description{
This function implements the sAGP inference algorithm, by placing each of the data point onto a BAF-LRR plot. AGP inference must be done in prior.
}
\usage{
getSegPurity(seg.dat, oo, AGP = 1, type = 1, para, rm.thr = 50, ref.dd = NULL)
}
\arguments{
\item{seg.dat}{
numeric matrix, segmentation data returned from getSegChr() or getSeg()
}
\item{oo}{
list, origin information returned from getOrigin() or getDiploidOrigin()
}
\item{AGP}{
numeric, AGP value for the sample being evaluated
}
\item{type}{
integer, ploidy type of tumors. 1, diploid; 2, tetraploid; 3, hexaploid.
}
\item{para}{
list, parameters returned fro getPara.sAGP()
}
\item{rm.thr}{
integer, segments with number of BAF markers below this threshold will be removed.
}
\item{ref.dd}{
numeric matrix, in the same format as sam.dd. If given, no copy number estimation will be performed, and only sAGP will be inferred.
}
}
\details{
For a data point A on BAF-LRR plot, the algorithm search through all the canonical lines and find the nearest ones to A. Each canonical line corresponds to the contraction path from a canonical point with (nb, nt) towards the origin, where nb is the number of minor allele and nt the number of total alleles. It then uses an empirical objective function F=nt-2*type+K*|sAGP-AGP|, to determine which line to choose. sAGP is the estimated aneuploid fraction of the specific segment for a given canonical line and K the constant parameter set in para. The purporse of this function is to find the most parsimonious combination of nb, nt and sAGP, with parsimony meaning close to genome-wide average.
}
\value{
a numeric matrix with following columns: chromosome, start position, end position, LRR value, number of LRR markers, BAF value, number of BAF markers, sAGP, number of minor alleles, number of total alleles.
}
\author{
Bo Li
}
\examples{
\donttest{
data(A0SD.BAF)
data(A0SD.LRR)
seg.dat=c()
for(CHR in c(8,9,10)){
baf=A0SD.BAF[A0SD.BAF[,2]==CHR,]
lrr=A0SD.LRR[A0SD.LRR[,2]==CHR,]
x=getSegChr(baf,lrr)
seg.dat=rbind(seg.dat,x)
}
dd.dat=seg.dat[,2:8]
rownames(dd.dat)=seg.dat[,1]
mode(dd.dat)='numeric'
para.s=getPara.sAGP()
para=getPara()
oo=getOrigin(dd.dat,para=para)
sAGP.dat=getSegPurity(dd.dat,oo,AGP=0.5,para=para.s)
}
}
|
ab8170e58b95bc7530b74a2f76304e231c8473cb
|
d1fafdc9f199bac28aa5265ba5927d7b77277e11
|
/data_analysis/exotic grass simulation.R
|
f53ea2593d756bf2d0b7ebaa83f81874fcac9527
|
[] |
no_license
|
laurenmh/sToration-vernal-pools
|
d979b33becb1c86ecbde1cdc4c020bc09a347c98
|
344606cc6c0e90fd0d21514e16cac287197b00f4
|
refs/heads/master
| 2022-12-04T16:57:51.637103
| 2022-11-27T22:24:01
| 2022-11-27T22:24:01
| 213,767,447
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,327
|
r
|
exotic grass simulation.R
|
#####################################################
#Would adaptive management improve LACO populations?#
#####################################################
#Goal: Simulate exotic grasses (EG) removal to promote LACO persistence
#Step 1. Simulate EG removal
#Step 2. Average the growth rates of LACO over time for all simulation scenarios
#Step 3. Plot modeled abundance and GRWR
# Load packages
library(tidyverse)
library(ggplot2)
library(ggpubr)
library(rstan)
library(StanHeaders)
library(HDInterval)
library(tidyr)
library(RColorBrewer)
# Remember to set your data pathway first!
# Data
source("data_compiling/compile_composition.R")
# Run "data_wrangling/prep data before modeling.R"
#Extract parameters for constructed pools
# Run "analysis/complex_belowground_v5.R"
Post <- rstan::extract(BH_fit)
alpha_LACO_mean <- as.data.frame(colMeans(Post$alpha_LACO)) %>% mutate(Year = c(2001:2017)) %>% pivot_wider(names_from = Year, values_from = "colMeans(Post$alpha_LACO)") %>% as.matrix()
alpha_EG_mean <- as.data.frame(colMeans(Post$alpha_EG)) %>% mutate(Year = c(2001:2017)) %>% pivot_wider(names_from = Year, values_from = "colMeans(Post$alpha_EG)")%>% as.matrix()
alpha_ERVA_mean <- as.data.frame(colMeans(Post$alpha_ERVA)) %>% mutate(Year = c(2001:2017)) %>% pivot_wider(names_from = Year, values_from = "colMeans(Post$alpha_ERVA)")%>% as.matrix()
alpha_NF_mean <- as.data.frame(colMeans(Post$alpha_NF)) %>% mutate(Year = c(2001:2017)) %>% pivot_wider(names_from = Year, values_from = "colMeans(Post$alpha_NF)")%>% as.matrix()
lambda_mean <- as.data.frame(colMeans(Post$lambda)) %>% mutate(Year = c(2001:2017)) %>% pivot_wider(names_from = Year, values_from = "colMeans(Post$lambda)")%>% as.matrix()
s_mean <- as.data.frame(mean(Post$survival_LACO))%>% as.matrix()
# Function for standard error
se <- function(x){
sd(x)/sqrt(length(x))# this is a function for calculating standard error
}
#-----------------------
#Step 1. Simulate EG removal
# Simulation model:
sim_n_pools <- 142 #number of pools
sim_n_years <- 18 #years of data
sim_obs_LACO <- matrix(nrow = sim_n_pools, ncol = sim_n_years) #empty matrix of LACO seed counts
sim_mu <- matrix(nrow = sim_n_pools, ncol = sim_n_years) #empty matrix of mean LACO seed counts
bh.formula <- function(sim_obs_LACO, EG, ERVA, NF, aii, a1, a2, a3, lambda, s, g){
sim_obs_LACO*lambda/(1+sim_obs_LACO*aii+EG*a1+ERVA*a2+NF*a3)+s*(1-g)*sim_obs_LACO/g
} #this is the modified Beverton-Holt model we'll use for LACO stem counts
bh.sim <- function(n_pools, seedtrt, EG, ERVA, NF, aii, a1, a2, a3, lambda, s, g, glow){
for(i in 1:nrow(sim_mu)){
for(j in 1:1){
sim_mu[i,j] <- 100
sim_obs_LACO[i,j] <- rbinom(1,100,g)
}
for(j in 2:3){
if (EG[i,j-1]> 100){
g = glow
}
else{g = g}
sim_mu[i,j] <- bh.formula(sim_obs_LACO = sim_obs_LACO[i,j-1],
EG = EG[i,j-1], ERVA = ERVA[i,j-1], NF = NF[i,j-1],
aii = aii[j-1], a1 = a1[j-1], a2 = a2[j-1], a3 = a3[j-1],
lambda = lambda[j-1], s = s, g = g)
sim_obs_LACO[i,j] <- rpois(1, lambda = (sim_mu[i,j] + seedtrt[i,j] * g))
}
for(j in 4:ncol(sim_mu)){
if (EG[i,j-1]> 100){
g = glow
}
else{g = g}
if (sim_obs_LACO[i,j-1] > 0){
sim_mu[i,j] <- bh.formula(sim_obs_LACO = sim_obs_LACO[i,j-1],
EG = EG[i,j-1], ERVA = ERVA[i,j-1], NF = NF[i,j-1],
aii = aii[j-1], a1 = a1[j-1], a2 = a2[j-1], a3 = a3[j-1],
lambda = lambda[j-1], s = s, g = g)
}
else {
sim_mu[i,j] <- bh.formula(sim_obs_LACO = sim_obs_LACO[i,j-2]*lambda[j-2]/(1+sim_obs_LACO[i,j-2]*aii[j-2]+EG[i,j-2]*a1[j-2]+ERVA[i,j-2]*a2[j-2]+NF[i,j-2]*a3[j-2])+s*(1-g)*sim_obs_LACO[i,j-2]/g,
EG = EG[i,j-1], ERVA = ERVA[i,j-1], NF = NF[i,j-1],
aii = aii[j-1], a1 = a1[j-1], a2 = a2[j-1], a3 = a3[j-1],
lambda = lambda[j-1], s = s, g = g)
}
sim_obs_LACO[i,j] <- rpois(1, lambda = sim_mu[i,j])
}
}
return(sim_obs_LACO)
}
#Simulate LACO abundance without exotic grass removal
predicted_LACO <- bh.sim(n_pools = n_pools,
seedtrt = as.matrix(seedtrt[,4:6]),
EG = as.matrix(sumEGcover),
ERVA = as.matrix(ERVAdens),
NF = as.matrix(sumNFcover),
aii = alpha_LACO_mean,
a1 = alpha_EG_mean,
a2 = alpha_ERVA_mean,
a3 = alpha_NF_mean,
lambda = lambda_mean,
s = s_mean,
g = 0.7,
glow = 0.2)
#Remove 25% of EG every year from 2001-2017
mult.25 <- function(x)(x*0.75)
reduced25EGcover <- sumEGcover %>%
mutate_at(c("2001", "2002", "2003", "2004", "2005","2006", "2007", "2008", "2009", "2010", "2011",
"2012", "2013", "2014", "2015", "2016", "2017"), mult.25)
#Remove 50% of EG every year from 2001-2017
mult.5 <- function(x)(x*0.5)
reduced50EGcover <- sumEGcover %>%
mutate_at(c("2001", "2002", "2003", "2004", "2005","2006", "2007", "2008", "2009", "2010", "2011",
"2012", "2013", "2014", "2015", "2016", "2017"), mult.5)
#Remove 75% of EG every year from 2001-2017
mult.75 <- function(x)(x*0.25)
reduced75EGcover <- sumEGcover %>%
mutate_at(c("2001", "2002", "2003", "2004", "2005","2006", "2007", "2008", "2009", "2010", "2011",
"2012", "2013", "2014", "2015", "2016", "2017"), mult.75)
#Remove 100% of EG every year from 2001-2017
mult.100 <- function(x)(x*0)
reduced100EGcover <- sumEGcover %>%
mutate_at(c("2001", "2002", "2003", "2004", "2005","2006", "2007", "2008", "2009", "2010", "2011",
"2012", "2013", "2014", "2015", "2016", "2017"), mult.100)
#Simulate LACO abundance with exotic grass removal
reduced50EG_LACO <- bh.sim(n_pools = n_pools,
seedtrt = as.matrix(seedtrt[,4:6]),
EG = as.matrix(reduced50EGcover),
ERVA = as.matrix(ERVAdens),
NF = as.matrix(sumNFcover),
aii = alpha_LACO_mean,
a1 = alpha_EG_mean,
a2 = alpha_ERVA_mean,
a3 = alpha_NF_mean,
lambda = lambda_mean,
s = s_mean,
g = 0.7,
glow = 0.2)
reduced75EG_LACO <- bh.sim(n_pools = n_pools,
seedtrt = as.matrix(seedtrt[,4:6]),
EG = as.matrix(reduced75EGcover),
ERVA = as.matrix(ERVAdens),
NF = as.matrix(sumNFcover),
aii = alpha_LACO_mean,
a1 = alpha_EG_mean,
a2 = alpha_ERVA_mean,
a3 = alpha_NF_mean,
lambda = lambda_mean,
s = s_mean,
g = 0.7,
glow = 0.2)
reduced25EG_LACO <- bh.sim(n_pools = n_pools,
seedtrt = as.matrix(seedtrt[,4:6]),
EG = as.matrix(reduced25EGcover),
ERVA = as.matrix(ERVAdens),
NF = as.matrix(sumNFcover),
aii = alpha_LACO_mean,
a1 = alpha_EG_mean,
a2 = alpha_ERVA_mean,
a3 = alpha_NF_mean,
lambda = lambda_mean,
s = s_mean,
g = 0.7,
glow = 0.2)
reduced100EG_LACO <- bh.sim(n_pools = n_pools,
seedtrt = as.matrix(seedtrt[,4:6]),
EG = as.matrix(reduced100EGcover),
ERVA = as.matrix(ERVAdens),
NF = as.matrix(sumNFcover),
aii = alpha_LACO_mean,
a1 = alpha_EG_mean,
a2 = alpha_ERVA_mean,
a3 = alpha_NF_mean,
lambda = lambda_mean,
s = s_mean,
g = 0.7,
glow = 0.2)
#Combine simulated LACO
years <- c("2000", "2001", "2002", "2003", "2004", "2005", "2006",
"2007", "2008", "2009", "2010", "2011", "2012", "2013",
"2014", "2015", "2016", "2017")
colnames(predicted_LACO) <- years
colnames(reduced50EG_LACO) <- years
colnames(reduced75EG_LACO) <- years
colnames(reduced25EG_LACO) <- years
colnames(reduced100EG_LACO) <- years
predicted_LACO <- as.data.frame(predicted_LACO) %>%
mutate(Pool = row_number()) %>%
gather(`2000`,`2001`,`2002`,`2003`,`2004`,`2005`,`2006`, `2007`, `2008`, `2009`, `2010`,
`2011`,`2012`,`2013`,`2014`,`2015`,`2016`,`2017`, key = time, value = predicted_LACO)
reduced50EG_LACO <- as.data.frame(reduced50EG_LACO) %>%
mutate(Pool = row_number()) %>%
gather(`2000`,`2001`,`2002`,`2003`,`2004`,`2005`,`2006`, `2007`, `2008`, `2009`, `2010`,
`2011`,`2012`,`2013`,`2014`,`2015`,`2016`,`2017`, key = time, value = reduced50EG_LACO)
reduced75EG_LACO <- as.data.frame(reduced75EG_LACO) %>%
mutate(Pool = row_number()) %>%
gather(`2000`,`2001`,`2002`,`2003`,`2004`,`2005`,`2006`, `2007`, `2008`, `2009`, `2010`,
`2011`,`2012`,`2013`,`2014`,`2015`,`2016`,`2017`, key = time, value = reduced75EG_LACO)
reduced25EG_LACO <- as.data.frame(reduced25EG_LACO) %>%
mutate(Pool = row_number()) %>%
gather(`2000`,`2001`,`2002`,`2003`,`2004`,`2005`,`2006`, `2007`, `2008`, `2009`, `2010`,
`2011`,`2012`,`2013`,`2014`,`2015`,`2016`,`2017`, key = time, value = reduced25EG_LACO)
grass_sim_LACO <-left_join(left_join(predicted_LACO, reduced50EG_LACO, by = c("Pool", "time")),
reduced75EG_LACO, by = c("Pool", "time")) %>%
gather(`predicted_LACO`, `reduced50EG_LACO`, `reduced75EG_LACO` , key = type, value = LACO) %>%
mutate(log_LACO = log(LACO)) %>%
mutate_if(is.numeric, ~replace(., is.infinite(.), 0))
summary_grass_sim_LACO <- grass_sim_LACO %>%
group_by(time, type) %>%
summarise(mean_log_LACO = mean(log_LACO),
se_log_LACO = se(log_LACO),
mean_LACO = mean(LACO),
se_LACO = se(LACO),
sd_LACO = sd(LACO))
#Step 2. Average the growth rates of LACO over time for all simulation scenarios
#Set up a simpler population model for calculating GRWR
bh.sim.control <- function(LACO, EG, ERVA, NF, aii, a1, a2, a3, lambda, s, g, glow){
for(i in 1:nrow(sim_LACO)){
for(j in 1:1){
sim_LACO[i,j] <- LACO*lambda[i,j]/(1+LACO*aii[i,j]+EG[j]*a1[i,j]+ERVA[j]*a2[i,j]+NF[j]*a3[i,j])+s[i]*(1-g)*LACO/g #this is the modified Beverton-Holt model we'll use for LACO stem counts
}
for(j in 2:ncol(sim_LACO)){
if (EG[j-1]> 100){
g = glow
}
else{g = g}
sim_LACO[i,j] <- LACO*lambda[i,j]/(1+LACO*aii[i,j]+EG[j]*a1[i,j]+ERVA[j]*a2[i,j]+NF[j]*a3[i,j])+s[i]*(1-g)*LACO/g
}
}
return(sim_LACO)
}
sim_LACO <- matrix(nrow = 2000, ncol = 17)
#Use the control plots in reference pools (no LACO present) to calculate the stable equilibrium frequency of non-LACO species in the model each year.
#Non-LACO species in our model:
#ERVA
#Exotic grass group - BRHO, HOMA, LOMU
#Native forb group - PLST, DOCO
const_com_control <- const_com %>% #use constructed pools data
filter(Treatment.1999 == "Control") %>% #filter control plots only
drop_na() %>% #remove any rows with na
filter(LACO <= 0) %>% #remove communities with LACO present
mutate(sumEG = BRHO + HOMA + LOMU,
sumNF = PLST + DOCO) %>% #create a new column for sum of EG and sum of NF
group_by(Year)%>% #summarize by year
summarize(avg_ERVA = round(mean(ERVA), digits = 0),
avg_sumEG = round(mean(sumEG), digits = 0),
avg_sumNF = round(mean(sumNF), digits = 0)) %>%#take the average freq.
filter(Year != "2017") %>% #filter out 2017
pivot_longer(-Year)%>%
pivot_wider(names_from = Year, values_from = value)
# Extract parameters from constructed pool model this is the same as in GRWR_invader.R
alpha_LACO <- as.matrix(Post$alpha_LACO)
alpha_EG <- as.matrix(Post$alpha_EG)
alpha_ERVA <- as.matrix(Post$alpha_ERVA)
alpha_NF <- as.matrix(Post$alpha_NF)
lambda <- as.matrix(Post$lambda)
s <- as.matrix(Post$survival_LACO)
# 0% EG removal
LACO_const <- bh.sim.control( LACO = 1,
EG = as.numeric(const_com_control[2,2:18]),
ERVA = as.numeric(const_com_control[1,2:18]),
NF = as.numeric(const_com_control[3,2:18]),
aii = alpha_LACO,
a1 = alpha_EG,
a2 = alpha_ERVA,
a3 = alpha_NF,
lambda = lambda,
s = s,
g = 0.7,
glow = 0.2)
GRWR_LACO_const_all <- log(LACO_const) #2001-2017 #log transform
GRWR_LACO_const <- GRWR_LACO_const_all[,1:15] #2001-2015 truncate the last two points
#Remove 25% of EG in all years
sim_LACO <- matrix(nrow = 2000, ncol = 17)
mult.25 <- function(x)(x*0.75)
reduced25EGcover_all <- const_com_control[2,2:18] %>%
mutate_at(c("2000", "2001", "2002", "2003", "2004", "2005","2006", "2007", "2008", "2009", "2010", "2011",
"2012", "2013", "2014", "2015", "2016"), mult.25)
LACO_25EG_all <- bh.sim.control( LACO = 1,
EG = as.numeric(reduced25EGcover_all),
ERVA = as.numeric(const_com_control[1,2:18]),
NF = as.numeric(const_com_control[3,2:18]),
aii = alpha_LACO,
a1 = alpha_EG,
a2 = alpha_ERVA,
a3 = alpha_NF,
lambda = lambda,
s = s,
g = 0.7,
glow = 0.2)
GRWR_LACO_25EG_all <- log(LACO_25EG_all)#2001-2017 #log transform
GRWR_LACO_25EG <- GRWR_LACO_25EG_all[,1:15]#2001-2015 truncate the last two points
#Remove 50% of EG in all years
mult.5 <- function(x)(x*0.5)
reduced50EGcover_all <- const_com_control[2,2:18] %>%
mutate_at(c("2000", "2001", "2002", "2003", "2004", "2005","2006", "2007", "2008", "2009", "2010", "2011",
"2012", "2013", "2014", "2015", "2016"), mult.5)
LACO_50EG_all <- bh.sim.control( LACO = 1,
EG = as.numeric(reduced50EGcover_all),
ERVA = as.numeric(const_com_control[1,2:18]),
NF = as.numeric(const_com_control[3,2:18]),
aii = alpha_LACO,
a1 = alpha_EG,
a2 = alpha_ERVA,
a3 = alpha_NF,
lambda = lambda,
s = s,
g = 0.7,
glow = 0.2)
GRWR_LACO_50EG_all <- log(LACO_50EG_all)#2001-2017 #log transform
GRWR_LACO_50EG <- GRWR_LACO_50EG_all[,1:15]#2001-2015 truncate the last two points
#Remove 75% of EG in all years
mult.75 <- function(x)(x*0.25)
reduced75EGcover_all <- const_com_control[2,2:18] %>%
mutate_at(c("2000", "2001", "2002", "2003", "2004", "2005","2006", "2007", "2008", "2009", "2010", "2011",
"2012", "2013", "2014", "2015", "2016"), mult.75)
LACO_75EG_all <- bh.sim.control( LACO = 1,
EG = as.numeric(reduced75EGcover_all),
ERVA = as.numeric(const_com_control[1,2:18]),
NF = as.numeric(const_com_control[3,2:18]),
aii = alpha_LACO,
a1 = alpha_EG,
a2 = alpha_ERVA,
a3 = alpha_NF,
lambda = lambda,
s = s,
g = 0.7,
glow = 0.2)
GRWR_LACO_75EG_all <- log(LACO_75EG_all)#2001-2017 #log transform
GRWR_LACO_75EG <- GRWR_LACO_75EG_all[,1:15]#2001-2015 truncate the last two points
#Remove 100% of EG in all years
mult.100 <- function(x)(x*0)
reduced100EGcover_all <- const_com_control[2,2:18] %>%
mutate_at(c("2000", "2001", "2002", "2003", "2004", "2005","2006", "2007", "2008", "2009", "2010", "2011",
"2012", "2013", "2014", "2015", "2016"), mult.100)
LACO_100EG_all <- bh.sim.control( LACO = 1,
EG = as.numeric(reduced100EGcover_all),
ERVA = as.numeric(const_com_control[1,2:18]),
NF = as.numeric(const_com_control[3,2:18]),
aii = alpha_LACO,
a1 = alpha_EG,
a2 = alpha_ERVA,
a3 = alpha_NF,
lambda = lambda,
s = s,
g = 0.7,
glow = 0.2)
GRWR_LACO_100EG_all <- log(LACO_100EG_all)#2001-2017 #log transform
GRWR_LACO_100EG <- GRWR_LACO_100EG_all[,1:15]#2001-2015 truncate the last two points
#Average the growth rates of LACO over time for all simulation scenarios.
GRWR_LACO_const_mean <- as.data.frame(GRWR_LACO_const) %>%
magrittr::set_colnames(c(2001:2015)) %>%
rownames_to_column(., var = "iteration") %>% #2000 iterations from Bayesian modeling
pivot_longer(!iteration, names_to = "Year", values_to = "GRWR") %>%
group_by(iteration) %>%
summarize(mean_GRWR = mean(GRWR)) #mean of GRWR across years
GRWR_LACO_const_summary <- GRWR_LACO_const_mean %>%
summarise(Mean = mean(mean_GRWR),
CI = hdi(mean_GRWR, credMass = 0.95))%>% #take the 95% CI across iterations
mutate(name = c("lowCI", "upCI")) %>% #top values is the low CI and bottom value is the high CI
pivot_wider(names_from = name, values_from = CI) #Average GRWR LACO for 0% EG removed
GRWR_LACO_25EG_mean <- as.data.frame(GRWR_LACO_25EG) %>%
magrittr::set_colnames(c(2001:2015)) %>%
rownames_to_column(., var = "iteration") %>% #2000 iterations from Bayesian modeling
pivot_longer(!iteration, names_to = "Year", values_to = "GRWR") %>%
group_by(iteration) %>%
summarize(mean_GRWR = mean(GRWR)) #mean of GRWR across years
GRWR_LACO_25EG_summary <- GRWR_LACO_25EG_mean %>%
summarise(Mean = mean(mean_GRWR),
CI = hdi(mean_GRWR, credMass = 0.95))%>% #take the 95% CI across iterations
mutate(name = c("lowCI", "upCI")) %>% #top values is the low CI and bottom value is the high CI
pivot_wider(names_from = name, values_from = CI) #Average GRWR LACO for 25% EG removed
GRWR_LACO_50EG_mean <- as.data.frame(GRWR_LACO_50EG) %>%
magrittr::set_colnames(c(2001:2015)) %>%
rownames_to_column(., var = "iteration") %>% #2000 iterations from Bayesian modeling
pivot_longer(!iteration, names_to = "Year", values_to = "GRWR") %>%
group_by(iteration) %>%
summarize(mean_GRWR = mean(GRWR)) #mean of GRWR across years
GRWR_LACO_50EG_summary <- GRWR_LACO_50EG_mean %>%
summarise(Mean = mean(mean_GRWR),
CI = hdi(mean_GRWR, credMass = 0.95))%>% #take the 95% CI across iterations
mutate(name = c("lowCI", "upCI")) %>% #top values is the low CI and bottom value is the high CI
pivot_wider(names_from = name, values_from = CI) #Average GRWR LACO for 50% EG removed
GRWR_LACO_75EG_mean <- as.data.frame(GRWR_LACO_75EG) %>%
magrittr::set_colnames(c(2001:2015)) %>%
rownames_to_column(., var = "iteration") %>% #2000 iterations from Bayesian modeling
pivot_longer(!iteration, names_to = "Year", values_to = "GRWR") %>%
group_by(iteration) %>%
summarize(mean_GRWR = mean(GRWR)) #mean of GRWR across years
GRWR_LACO_75EG_summary <- GRWR_LACO_75EG_mean %>%
summarise(Mean = mean(mean_GRWR),
CI = hdi(mean_GRWR, credMass = 0.95))%>% #take the 95% CI across iterations
mutate(name = c("lowCI", "upCI")) %>% #top values is the low CI and bottom value is the high CI
pivot_wider(names_from = name, values_from = CI) #Average GRWR LACO for 75% EG removed
GRWR_LACO_100EG_mean <- as.data.frame(GRWR_LACO_100EG) %>%
magrittr::set_colnames(c(2001:2015)) %>%
rownames_to_column(., var = "iteration") %>% #2000 iterations from Bayesian modeling
pivot_longer(!iteration, names_to = "Year", values_to = "GRWR") %>%
group_by(iteration) %>%
summarize(mean_GRWR = mean(GRWR)) #mean of GRWR across years
GRWR_LACO_100EG_summary <- GRWR_LACO_100EG_mean %>%
summarise(Mean = mean(mean_GRWR),
CI = hdi(mean_GRWR, credMass = 0.95))%>% #take the 95% CI across iterations
mutate(name = c("lowCI", "upCI")) %>% #top values is the low CI and bottom value is the high CI
pivot_wider(names_from = name, values_from = CI) #Average GRWR LACO for 100% EG removed
GRWR_simulated_all <- as.data.frame(rbind(GRWR_LACO_const_summary, GRWR_LACO_50EG_summary, GRWR_LACO_75EG_summary)) %>%
mutate(treatment = c("0%", "50%", "75%"))
GRWR_simulated_all$treatment <- ordered(GRWR_simulated_all$treatment, levels = c("0%", "50%", "75%" ))
#Step 3. Plot modeled abundance and GRWR
#FIGURE4
sim_timeseries <- ggplot(summary_grass_sim_LACO%>%filter(time %in% c(2000:2015)), aes(x = as.numeric(time), y = mean_LACO)) +
geom_point() +
geom_line(size = 1.4, aes(linetype = type)) +
geom_errorbar(aes(ymin = mean_LACO-se_LACO, ymax = mean_LACO+se_LACO), width = 0.4, alpha = 0.9, size = 1) +
theme(text = element_text(size=18),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.text = element_text(size = 18),
axis.line = element_line(colour = "black"),
legend.position = c(0.4, 0.2)) +
scale_y_log10()+
labs(y = bquote(Predicted~italic(L.~conj.)~Density~(stems/m^2))) +
scale_x_continuous(name = "Year", limits = c(1999.5,2015.5))+
scale_linetype_manual(values=c("solid", "twodash", "dotted"),
name = "Reduction in Exotic Grasses",
labels = c("0%", "50%", "75%"))
sim_GRWR <- ggplot(GRWR_simulated_all , aes(x = treatment, y = Mean))+
geom_bar(stat = "identity", col = "grey27")+
theme(text = element_text(size=18),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.text = element_text(size = 18),
axis.line = element_line(colour = "black"),
legend.position = "none")+
geom_errorbar(aes(ymin = lowCI, ymax = upCI), width = 0.4, alpha = 0.9, size = 1) +
labs(y = "Average Low Density Growth Rate", x = "Reduction in Exotic Grasses")+
geom_hline(yintercept = 0)
ggarrange(sim_timeseries, sim_GRWR, ncol = 2, nrow = 1,
labels = c("(a)", "(b)"), widths = c(0.6, 0.4),
#common.legend = TRUE, legend = "bottom",
font.label = list(size = 20))
################################################
#Which year had the greatest effect of removal?#
################################################
# #Calculate the effect size of trt = (mean_LACO_EGreduced-mean_LACO_no_removal)/sd_LACO_no_removal
# removal_eff <- summary_grass_sim_LACO %>%
# select(time, type, mean_LACO, sd_LACO) %>%
# pivot_wider(names_from = type, values_from = c(mean_LACO, sd_LACO)) %>%
# select(-sd_LACO_reduced25EG_LACO, -sd_LACO_reduced50EG_LACO, -sd_LACO_reduced75EG_LACO) %>%
# mutate(eff_50 = (mean_LACO_reduced50EG_LACO-mean_LACO_predicted_LACO)/sd_LACO_predicted_LACO,
# eff_75 = (mean_LACO_reduced75EG_LACO-mean_LACO_predicted_LACO)/sd_LACO_predicted_LACO) %>%
# select(-mean_LACO_predicted_LACO, -mean_LACO_reduced25EG_LACO, -mean_LACO_reduced50EG_LACO, -mean_LACO_reduced75EG_LACO,
# -sd_LACO_predicted_LACO)
#write.csv(removal_eff, "C:\\Users\\Lina\\Desktop\\Repositories\\sToration-vernal-pools\\data_analysis\\Table2.csv", row.names = FALSE )
###########################################
#Show the relationship between LACO and EG#
###########################################
#Supplemental Figure 2
ggplot(const_dummy_join, aes(x = sum_EG, y= LACOdens))+
geom_point()+
scale_y_log10(limits=c(0.1,6000))+
theme(text = element_text(size=16),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"))+
geom_smooth(method = "lm")+
labs(x = "Sum of exotic grass cover (%)", y = "LACO density (log)")
|
5e21d5de7b31993985f06060ce57c35b262e28f0
|
a06ad0b5797e82bde9ae94c15216aaddc654f214
|
/R/calculate_n_and_p.R
|
39f029594bbdc8e70fa1e174e93c9fd4274fc948
|
[
"Artistic-2.0"
] |
permissive
|
khemlalnirmalkar/HMP16SData
|
5fb436a4aa6638ed1ec07e0f267f7a68bea9e8d9
|
11795265b43cb5eea1c1de57fd6a9936fe54577a
|
refs/heads/master
| 2023-09-02T18:12:38.092116
| 2021-10-26T16:29:57
| 2021-10-26T16:29:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 171
|
r
|
calculate_n_and_p.R
|
#' @keywords internal
#'
#' @importFrom dplyr full_join
calculate_n_and_p <- function(x) {
y <- calculate_n(x)
z <- calculate_p(x)
full_join(y, z, by = "x")
}
|
355b9aed506732a8e3b2b35153fcdc8fa08ee80e
|
0b6983d442055421658983158179a563b745256b
|
/DataWranglingEx1.R
|
e29022eaf85ee94c0d5c6ba6f881f10be41805a5
|
[] |
no_license
|
roblwallace/DataWranglingEx1
|
702240ed017de10bb08b4f49335b774a44ac0dc7
|
b1624da1ca9afcbc39466d7a3340035a60cdb510
|
refs/heads/master
| 2020-09-12T07:05:20.031017
| 2016-09-07T23:08:25
| 2016-09-07T23:08:25
| 67,648,061
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,380
|
r
|
DataWranglingEx1.R
|
# Data Wrangling Excercise 1: Basic Data Maniplulation
# Rob Wallace, roblwallace@gmail.com
# 5-SEPT-2016
#
# [X] 0. Load the data in RStudio
# [X] 1. Clean Up Brand Names
# [X] 2. Separate product code and number
# [X] 3. Add product categories
# [X] 4. Add full address for geocoding
# [X] 5. Create dummy variables for company and product category
# [X] 6. Submit the project on Github
# Get Library or Package Wanted
library(dplyr)
# Step 0. Load the data in RStudio into a data frame
df_purchases = read.csv("refine_original.csv")
# Get a look at the content
glimpse(df_purchases)
# Step 1. Clean Up Brand Names. All caps seems like a better choice for potential reports
# All Caps For Brand Names
df_purchases <- mutate_each(df_purchases, funs(toupper), company)
# Clean Up Spelling Errors within List
# If first letter is desired letter, replaces the word with <"replacement">.
df_purchases$company <- gsub("^P.*$", "PHILIPS", df_purchases$company)
df_purchases$company <- gsub("^A.*$", "AKZO", df_purchases$company)
df_purchases$company <- gsub("^U.*$", "UNILEVER", df_purchases$company)
df_purchases$company <- gsub("^F.*$", "PHILLIPS", df_purchases$company)
# Grab the product code from column (Product.code...number) & put into a new column called (product_code)
df_purchases <- mutate(df_purchases, product_code = substr(df_purchases$Product.code...number,1,1))
# Grab the product number from column (Product.code...number) & put into a new column (product_number)
df_purchases <- mutate(df_purchases, product_number = substr(df_purchases$Product.code...number, 3, length(df_purchases$Product.code...number)))
# Add a product_category column, and populate with corresponding entry of:
# p = Smartphone, v = TV, x = Laptop, q = Tablet
# Create a new column and insert a plug value for now
df_purchases <- mutate(df_purchases, product_category = "plug")
# Loop through the data frame and place a correct value
for (i in 1:nrow(df_purchases)) {
if (df_purchases$product_code[[i]] == "q") {
df_purchases$product_category[[i]] <- "Tablet"
}
if (df_purchases$product_code[[i]] == "v") {
df_purchases$product_category[[i]] <- "TV"
}
if (df_purchases$product_code[[i]] == "x") {
df_purchases$product_category[[i]] <- "Laptop"
}
if (df_purchases$product_code[[i]] == "p") {
df_purchases$product_category[[i]] <- "Smartphone"
}
}
# Add full address for geocoding concatenate Address, City, State in a column
df_purchases <- mutate(df_purchases, full_address = paste(df_purchases$address, df_purchases$city, df_purchases$country, sep = ","))
# Create dummy binary variables for company and product category
# Add four binary columns for company's (future analysis)
df_purchases <- mutate(df_purchases, company_philips = FALSE)
df_purchases <- mutate(df_purchases, company_akzo = FALSE)
df_purchases <- mutate(df_purchases, company_van_houten = FALSE)
df_purchases <- mutate(df_purchases, company_unilever = FALSE)
# Add four binary columns for products (future analysis)
df_purchases <- mutate(df_purchases, product_smartphone = FALSE)
df_purchases <- mutate(df_purchases, product_tv = FALSE)
df_purchases <- mutate(df_purchases, product_laptop = FALSE)
df_purchases <- mutate(df_purchases, product_tablet = FALSE)
# Create massaged output file as a .csv in working directory
write.csv(df_purchases, file = "refine_clean.csv")
|
63be56ca98b669559815f728ce591e3b479d4eb4
|
f2d6a9cae53ab792d7cc5aac1967e40b2cd623c2
|
/R/term.R
|
66d99898b7aac8125b506a0676c6997bb6323a1f
|
[] |
no_license
|
stevencarlislewalker/setup
|
071c04dc37917e472d5791388819c7e131ca3bdd
|
9d00d77586f7e5f477ad3186fa94f3057f205c59
|
refs/heads/master
| 2021-01-21T11:08:28.265796
| 2015-02-17T13:41:37
| 2015-02-17T13:41:37
| 28,967,449
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 321
|
r
|
term.R
|
##' Open bash terminal in current directory
##'
##' @export
term <- function() system("bash")
##' Relative directory name
##'
##' @param dir directory, character string
##' @export
reldir <- function(dir) {
if(missing(dir)) dir <- getwd()
dirSplit <- strsplit(getwd(), "/")[[1]]
dirSplit[length(dirSplit)]
}
|
b6a6ba121ae935d15a8bc17e47497d4e0529cfce
|
b4640a579976061201bf896505d65f4bab5569b2
|
/tokeniser_demo.R
|
8f3fab89b70ca7fe261aa04c5b5825670e033145
|
[] |
no_license
|
strategist922/IDA-MOOC-Data-Exploratory-Cleaning
|
871f7afbdb88a148eda7c2f745adc54fb72a4725
|
ffd944795cc535f5fb3960ddf4c44d9392e8e07c
|
refs/heads/master
| 2021-05-30T18:49:04.796051
| 2015-12-20T11:59:40
| 2015-12-20T11:59:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 870
|
r
|
tokeniser_demo.R
|
library(tm)
sample = c('There is “something” going on right now, like right… now. He’s there.',
'There is "something" going on right now, like right... now. He\'s there.')
corpus = VCorpus(VectorSource(sample), readerControl = list(reader = readPlain))
# NLP's Bigram Tokeniser
NLPBigramTokenizer <- function(x) {
unlist(lapply(ngrams(words(x), 2), paste, collapse = " "), use.names = FALSE)
}
dtm_nlp = DocumentTermMatrix(corpus, control = list(tokenize = NLPBigramTokenizer))
inspect(dtm_nlp)
dtm_nlp_mat = as.data.frame(colSums(as.matrix(dtm_nlp)))
# RWeka's Bigram Tokeniser
library(RWeka)
WekaBigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
dtm_weka = DocumentTermMatrix(corpus, control = list(tokenize = WekaBigramTokenizer))
inspect(dtm_weka)
dtm_weka_mat = as.data.frame(colSums(as.matrix(dtm_weka)))
|
d09717bab1ca00292ef06ca3de5fcbea51adf21f
|
726a92a53407406654d5498be6170ad86b02502d
|
/Scripts/Variograms.R
|
b10cc03187301c353654b1b2b215f36f7a4ccd5f
|
[] |
no_license
|
dansmi-hub/SomersetLevels
|
416e7a18b61f31174a318e388e230bbaa248b636
|
6df06d0ec4f90d0b67031c5ffb448d41765633e5
|
refs/heads/master
| 2023-03-05T18:31:26.897957
| 2021-02-11T12:38:14
| 2021-02-11T12:38:14
| 269,067,962
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,019
|
r
|
Variograms.R
|
# Semivariogram
library(dplyr)
library(geoR)
library(gstat)
library(moments)
library(raster)
library(cowplot)
data <- readRDS("Data/hmscdata.rds")
data <- as.data.frame(data)
data <- na.omit(data)
# Columns 7, 8, 9, 10 are the mosquitoes we care about
cols = 7:10
# Assign coords using sp
spdata <- data
coordinates(spdata) <- ~ x + y
bubble.list = list()
for (i in seq_along(cols)) {
bubble.list[[i]] = bubble(spdata,
zcol = cols[i],
xlab = "Eastings (m)",
ylab = "Northings (m)")
}
bubble.plots <-
plot_grid(bubble.list[[1]], bubble.list[[2]], bubble.list[[3]], bubble.list[[4]])
# Experimental Variogram
## Shows all pairs of locations in a dataset and their semi-variance
vcloud.pip <- variogram(cx_pipiens ~ 1, data = spdata, cloud = T)
vcloud.ann <- variogram(cs_annulata ~ 1, data = spdata, cloud = T)
vcloud.mac <- variogram(an_maculipennis ~ 1, data = spdata, cloud = T)
vcloud.cla <- variogram(an_claviger ~ 1, data = spdata, cloud = T)
vcloud.variograms <- plot_grid(
plot(vcloud.pip, main = "Variogram Cloud - Cx. pipiens", xlab = "Seperation Distance (m)"),
plot(vcloud.ann, main = "Variogram Cloud - Cs. annulata", xlab = "Seperation Distance (m)"),
plot(vcloud.mac, main = "Variogram Cloud - An. maculipennis", xlab = "Seperation Distance (m)"),
plot(vcloud.cla, main = "Variogram Cloud - An. claviger", xlab = "Seperation Distance (m)")
)
# Isotropic Variogram
## When spatial dependence is the same in all directions
v.cx <- variogram(cx_pipiens ~ 1, data = spdata, cloud = F)
v.cs <- variogram(cs_annulata ~ 1, data = spdata, cloud = F)
v.ma <- variogram(an_maculipennis ~ 1, data = spdata, cloud = F)
v.cl <- variogram(an_claviger ~ 1, data = spdata, cloud = F)
iso.pip <-
plot(v.cx, main = "Isotropic Variogram - Cx. pipiens", xlab = "Seperation Distance (m)")
iso.ann <-
plot(v.cs, main = "Isotropic Variogram - Cs. annulata", xlab = "Seperation Distance (m)")
iso.mac <-
plot(v.ma, main = "Isotropic Variogram - An. maculipennis", xlab = "Seperation Distance (m)")
iso.cla <-
plot(v.cl, main = "Isotropic Variogram - An. claviger", xlab = "Seperation Distance (m)")
iso.variograms <- plot_grid(iso.pip, iso.ann, iso.cla, iso.mac)
# Ansiotropic Variogram
## Visualisation of the variogram surface
## (variogram map or directional variogram)
vmap.pip <-
variogram(
cx_pipiens ~ 1,
data = spdata,
map = TRUE,
cutoff = 10000,
width = 10000 / 20
)
vmap.ann <-
variogram(
cs_annulata ~ 1,
data = spdata,
map = TRUE,
cutoff = 10000,
width = 10000 / 20
)
vmap.mac <-
variogram(
an_maculipennis ~ 1,
data = spdata,
map = TRUE,
cutoff = 10000,
width = 10000 / 20
)
vmap.cla <-
variogram(
an_claviger ~ 1,
data = spdata,
map = TRUE,
cutoff = 10000,
width = 10000 / 20
)
vmaps.variograms <- plot_grid(
plot(vmap.pip, col.regions = bpy.colors(64), main = "Cx. pipiens"),
plot(vmap.ann, col.regions = bpy.colors(64), main = "Cs. annulata"),
plot(vmap.mac, col.regions = bpy.colors(64), main = "An. maculipennis"),
plot(vmap.cla, col.regions = bpy.colors(64), main = "An. claviger")
)
## NCF correlelograms
library(ncf)
clog.pip <- correlog(data$x, data$y, data$cx_pipiens, increment = 1000)
clog.ann <- correlog(data$x, data$y, data$cs_annulata, increment = 1000)
clog.mac <- correlog(data$x, data$y, data$an_maculipennis, increment = 1000)
clog.cla <- correlog(data$x, data$y, data$an_claviger, increment = 1000)
correlelograms <- plot_grid(
plot(clog.pip, main = "Cx. pipiens"),
plot(clog.ann, main = "Cs. annulata"),
plot(clog.mac, main = "An. maculipennis"),
plot(clog.cla, main = "An. claviger")
)
# PDF
pdf(file = "Panels/Variograms.pdf")
bubble.plots
vcloud.variograms
iso.variograms
vmaps.variograms
plot(clog.pip, main = "Cx. pipiens")
plot(clog.ann, main = "Cs. annulata")
plot(clog.mac, main = "An. maculipennis")
plot(clog.cla, main = "An. claviger")
dev.off()
|
14e259a4ae194768a5d1c87345c5b545b72a88bd
|
cb1edbd312fe5583702e8567e1aa6e32e103d300
|
/man/markChanges.Rd
|
91673b859e2f9c282d74792b5c2d2980beaa111a
|
[] |
no_license
|
cran/phytools
|
e8cb2ddac5592a9c27a0036df4599649a393717a
|
910fa95b3f5f1619c85ac420bd07286a3fe8cfcf
|
refs/heads/master
| 2023-07-22T15:18:46.363446
| 2023-07-14T20:00:02
| 2023-07-14T21:30:43
| 17,698,535
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,722
|
rd
|
markChanges.Rd
|
\name{markChanges}
\alias{markChanges}
\title{Add marked changes to a plotted tree with mapped discrete character}
\usage{
markChanges(tree, colors=NULL, cex=1, lwd=2, plot=TRUE)
}
\arguments{
\item{tree}{an object of class \code{"simmap"}.}
\item{colors}{a named vector of colors used to plot the stochastically mapped character on the tree.}
\item{cex}{expansion factor for line height.}
\item{lwd}{line width.}
\item{plot}{logical value indicating whether the changes should be plotted or not.}
}
\description{
Adds the reconstructed changes to a plotted tree with a stochastically mapped discrete character.
}
\value{
This function returns (invisibly) a matrix containing the x & y coordinates of the marked changes on the plotted tree.
}
\references{
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\seealso{
\code{\link{plotSimmap}}
}
\examples{
## load tree and data from Revell & Collar (2009)
data(sunfish.tree)
data(sunfish.data)
## extract discrete character (feeding mode)
fmode<-setNames(sunfish.data$feeding.mode,
rownames(sunfish.data))
## fit model
er_model<-fitMk(sunfish.tree,fmode,model="ER",
pi="fitzjohn")
## generate single stochastic map
sunfish_smap<-simmap(er_model,nsim=1)
## plot stochastic map & mark changes
cols<-setNames(c("blue","red"),levels(fmode))
plot(sunfish_smap,cols,ftype="i")
markChanges(sunfish_smap,colors=cols,lwd=6)
par(mar=c(5.1,4.1,4.1,2.1))
}
\keyword{ancestral states}
\keyword{bayesian}
\keyword{phylogenetics}
\keyword{plotting}
\keyword{discrete character}
|
f85c895c077c4814ffd0eaba871bd7024d764ed7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pbdNCDF4/examples/nc_open.Rd.R
|
9bff0bb47aae76a56bff92f43974829ea4372cdc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,269
|
r
|
nc_open.Rd.R
|
library(pbdNCDF4)
### Name: nc_open
### Title: Open a netCDF File
### Aliases: nc_open
### Keywords: utilities
### ** Examples
## Not run:
##D # Define an integer dimension
##D dimState <- ncdim_def( "StateNo", "count", 1:50 )
##D
##D # Make an integer variable. Note that an integer variable can have
##D # a double precision dimension, or vice versa; there is no fixed
##D # relationship between the precision of the dimension and that of the
##D # associated variable. We just make an integer variable here for
##D # illustration purposes.
##D varPop <- ncvar_def("Pop", "count", dimState, -1,
##D longname="Population", prec="integer")
##D
##D # Create a netCDF file with this variable
##D ncnew <- nc_create( "states_population.nc", varPop )
##D
##D # Write some values to this variable on disk.
##D popAlabama <- 4447100
##D ncvar_put( ncnew, varPop, popAlabama, start=1, count=1 )
##D
##D # Add source info metadata to file
##D ncatt_put( ncnew, 0, "source", "Census 2000 from census bureau web site")
##D
##D nc_close(ncnew)
##D
##D # Now open the file and read its data
##D ncold <- nc_open("states_population.nc")
##D data <- ncvar_get(ncold)
##D print("here is the data in the file:")
##D print(data)
##D nc_close( ncold )
## End(Not run)
|
6799d42a1be12a10b5b1463f38c20ff244aad126
|
d75b7bc015b47d94254bcc9334ba15972d3ec9a1
|
/1. FIRST YEAR/Introduction to Computing/Exercices_Laura/exercici36b.R
|
e0fbb394720ea248079e8c248c0b0b0858f303b6
|
[] |
no_license
|
laurajuliamelis/BachelorDegree_Statistics
|
a0dcfec518ef70d4510936685672933c54dcee80
|
2294e3f417833a4f3cdc60141b549b50098d2cb1
|
refs/heads/master
| 2022-04-22T23:55:29.102206
| 2020-04-22T14:14:23
| 2020-04-22T14:14:23
| 257,890,534
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 134
|
r
|
exercici36b.R
|
n <- scan(n=1, what=numeric(), quiet=TRUE)
i <- 2
while ( i < n ) {
if ( i %% 2 == 0){
cat(i, "\n")
}
i <- i + 1
}
|
60b4e4fe77e79cb424adf8ce4013079ca410958a
|
c0f710dd706e7fea94b09aee9f35caf1103cf4be
|
/LinearReression.R
|
d03002d09515e59162d44e97703160ca8721b9ee
|
[] |
no_license
|
bhawneshdipu/Rshiny
|
d47d2921027d6ac70cba1411b54c82162a145a9b
|
ec7331e69c131962d77776547c3da0247724e1ba
|
refs/heads/master
| 2020-03-07T04:41:04.865737
| 2018-04-07T19:07:46
| 2018-04-07T19:07:46
| 127,273,391
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,585
|
r
|
LinearReression.R
|
## Lib.R ##
library(DT)
library(readr)
library(plotly)
library(stringr)
library(DataLoader)
library(forecast)
library(tseries)
library(zoo)
library(xts)
library(fts)
library(MASS)
library(caret)
library(e1071)
library(dplyr)
library(h2o) # Awesome ML Library
library(timetk) # Toolkit for working with time series in R
library(tidyquant)
library(anomalyDetection)
library(TSMining)
library(randomForest)
#install.packages("devtools")
#devtools::install_github("twitter/AnomalyDetection")
library(AnomalyDetection)
packageVersion('plotly')
#devtools::install_github("tidyverse/ggplot2")
#devtools::install_github('hadley/ggplot2')
library(ggplot2)
## Linear Regression ##
#====================================timetk + linear regression: MAPE = 4.3% (timetk demo)==================================
SUM_DATA <- pac[,c(1,31)]
SUM_DATA %>%
tk_index() %>%
tk_get_timeseries_summary() %>%
glimpse()
beer_sales_tbl_aug <- SUM_DATA %>%
tk_augment_timeseries_signature()
beer_sales_tbl_aug
beer_sales_tbl_aug<-na.omit(beer_sales_tbl_aug)
(l <- sapply(beer_sales_tbl_aug, function(x) is.factor(x)))
m <- beer_sales_tbl_aug[, l]
ifelse(n <- sapply(m, function(x) length(levels(x))) == 1, "DROP", "NODROP")
fit_lm <- lm(SUM~ ., data = select(beer_sales_tbl_aug, -c(Index, diff,month.lbl)))
summary(fit_lm)
#na.omit(fit_lm)
beer_sales_idx <- SUM_DATA %>%
tk_index()
tail(beer_sales_idx)
Sys.setenv(TZ = "America/Toronto")
# Make future index
future_idx <- beer_sales_idx %>%
tk_make_future_timeseries(n_future =10)
future_idx
new_data_tbl <- future_idx %>%
tk_get_timeseries_signature()
new_data_tbl
# Make predictions
pred <- predict(fit_lm, newdata = select(new_data_tbl, -c(index, diff)))
predictions_tbl <- tibble(
Index = future_idx,
value = pred
)
predictions_tbl
split <- round(nrow(SUM_DATA) * .90)
datat_to <- SUM_DATA[1:split,]
actuals_tbl <- SUM_DATA[(split + 1):nrow(SUM_DATA),]
#colnames(actuals_tbl)[2] <- "value"
p<-ggplot(SUM_DATA,aes(x=Index,y=SUM))+
# Training data
geom_line(color = palette_light()[[1]]) +
geom_point(color = palette_light()[[1]])+
# Predictions
geom_line(aes(y = value), color = palette_light()[[4]], data = predictions_tbl) +
geom_point(aes(y = value), color = palette_light()[[4]], data = predictions_tbl)+
# Actuals
geom_line(aes(y = SUM),color = palette_light()[[3]], data = actuals_tbl) +
geom_point(aes(y = SUM),color = palette_light()[[3]], data = actuals_tbl)+
#theme_tq() +
labs(title = "Time series sum data")
ggplotly(p)
error_tbl <- left_join(actuals_tbl, predictions_tbl) %>%
rename(actual = SUM, pred = value) %>%
mutate(
error = actual - pred,
error_pct = error / actual
)
error_tbl
# Calculating test error metrics
test_residuals <- error_tbl$error
test_error_pct <- error_tbl$error_pct * 100 # Percentage error
me <- mean(test_residuals, na.rm=TRUE)
rmse <- mean(test_residuals^2, na.rm=TRUE)^0.5
mae <- mean(abs(test_residuals), na.rm=TRUE)
mape <- mean(abs(test_error_pct), na.rm=TRUE)
mpe <- mean(test_error_pct, na.rm=TRUE)
tibble(me, rmse, mae, mape, mpe) %>% glimpse()
# Coerce to xts
beer_sales_xts <- tk_xts(SUM_DATA)
# Show the first six rows of the xts object
beer_sales_xts %>%
head()
tk_tbl(beer_sales_xts, rename_index = "date")
# Coerce to ts
beer_sales_ts <- tk_ts(SUM_DATA)
# Show the calendar-printout
beer_sales_ts
tk_tbl(beer_sales_ts, rename_index = "date")
has_timetk_idx(beer_sales_ts)
# If timetk_idx is present, can get original dates back
tk_tbl(beer_sales_ts, timetk_idx = TRUE, rename_index = "date")
|
f3e8afa0c78394ae9ec657129b9abb294c8587dd
|
965b3a75a2597de3cc534c83b1b61847f2c4f71d
|
/VEST/script.R
|
e41cb5b1e21b92ffc741f2311a81cb5d7c6f84d4
|
[] |
no_license
|
muchuVishal/analysis
|
71615a2a02c36b546ea40a3f2728731e57ad367e
|
2b68c2ece2110584a0181920d67a8088016f8ad2
|
refs/heads/master
| 2021-01-19T03:57:09.147270
| 2016-06-28T08:01:26
| 2016-06-28T08:01:26
| 50,667,911
| 0
| 0
| null | 2016-06-28T08:01:27
| 2016-01-29T14:37:02
|
R
|
UTF-8
|
R
| false
| false
| 2,751
|
r
|
script.R
|
library("RMySQL")
library("reshape2")
source("CodonMap.R")
#***************** General-purpose functions ********************
#****************************************************************
# Connect to database
mydb <- dbConnect(dbDriver("MySQL"), user = "rousniakl", password = "rousn!@k1", dbname = "vest_snvbox", host = "172.16.16.5")
# Get the names of the tables from the db
GetTableList <- function(database) {
rs <- dbSendQuery(mydb, "show tables;")
table_name_df <- as.character((fetch(rs, n = -1))[,1])
dbClearResult(rs)
table_name_df
}
# Get a df from a table
GetTable <- function(database, tableName) {
rs <- dbSendQuery(mydb, paste0("
select *
from ",tableName,";"))
Table <- fetch(rs, n = -1)
dbClearResult(rs)
Table
}
# Select uids
GetCodonPos <- function(database, uid) {
rs <- dbSendQuery(mydb, paste0("
select *
from CodonTable where UID=",uid,";"))
CodonTable <- fetch(rs, n = -1)
dbClearResult(rs)
CodonTable
}
# Get all unique UID's from a chromosome
GetUniqueUID <- function(database,chromosome) {
rs <- dbSendQuery(mydb, paste0("
select distinct UID
from CodonTable
where chrom=",chromosome,";"))
UniqueUIDTable <- fetch(rs, n=-1)
dbClearResult(rs)
UniqueUIDTable
}
# Split codon base
CodonBaseSplit <- function(df) {
df$base1 = substr(df$bases,1,1)
df$base2 = substr(df$bases,2,2)
df$base3 = substr(df$bases,3,3)
df$codon = df$bases
df$bases = NULL
df
}
# Melt dataframe and merge it
GetGenomicVariants <- function(df) {
PosCodonTable = df[,-which(names(df) %in% c("base1","base2","base3"))]
colnames(PosCodonTable)[which(names(PosCodonTable)=="pos1")] = "codpos1"
colnames(PosCodonTable)[which(names(PosCodonTable)=="pos2")] = "codpos2"
colnames(PosCodonTable)[which(names(PosCodonTable)=="pos3")] = "codpos3"
PosCodonTable = melt(PosCodonTable, id.vars = c("UID","chrom","Pos","codon"), value.name = "genpos")
BaseCodonTable = df[,-which(names(df) %in% c("pos1","pos2","pos3"))]
colnames(BaseCodonTable)[which(names(BaseCodonTable)=="base1")] = "codpos1"
colnames(BaseCodonTable)[which(names(BaseCodonTable)=="base2")] = "codpos2"
colnames(BaseCodonTable)[which(names(BaseCodonTable)=="base3")] = "codpos3"
BaseCodonTable = melt(BaseCodonTable, id.vars = c("UID","chrom","Pos","codon"), value.name = "base")
df=merge(PosCodonTable,BaseCodonTable,by = c("UID","chrom","Pos","variable","codon"))
names(df)[names(df)=="value.x"] = "genpos"
names(df)[names(df)=="value.y"] = "ref"
df
}
#***************** Script ***************************************
#****************************************************************
df = CodonTable[,c(4,5,6)]
dfTest = melt(df)
dfTest = dfTest[order(dfTest$value,decreasing=True),]
|
08a7b34b844e9e5973ee39cfd34d531c72442173
|
5c8787bcb1bc2a75295db85d57a87d4a3ac33ad1
|
/man/show_handers.Rd
|
9d7a82cf2fed7f16d219db3ce71cf3787bd714ac
|
[
"MIT"
] |
permissive
|
zcm2403/ngstk
|
3f00d9f8fdf87fd714d5375b73a1ea28de4d9770
|
07bdc274c6eef90726d4f0255d364271b984fe4e
|
refs/heads/master
| 2021-08-19T07:27:34.839632
| 2017-11-24T15:39:26
| 2017-11-25T06:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,015
|
rd
|
show_handers.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hander.R
\name{show_handers}
\alias{show_handers}
\title{Function to show all avaliabe handler function}
\usage{
show_handers(hander_lib = "default_handers", show_all_funs = TRUE,
show_code = NULL, show_description = FALSE,
hander_confg_file = system.file("extdata", "config/hander.toml", package =
"ngstk"))
}
\arguments{
\item{hander_lib}{hander lib name}
\item{show_all_funs}{Default is TRUE and to show all functions in the hander_lib}
\item{show_code}{Default is NULL, select hander you want to see its source code}
\item{show_description}{Default is FALSE}
\item{hander_confg_file}{ngstk hander configuration file path, default is
system.file('extdata', 'config/hander.toml', package = 'ngstk')}
}
\description{
Function to show all avaliabe handler function
}
\examples{
show_handers(show_description = TRUE)
show_handers(show_description = FALSE, show_all_funs = FALSE,
show_code = 'handler_na_replace')
}
|
67184175ad4916774361bfca027ecb6491dcc2eb
|
8531cb0526ca547b2ecd1f0a86683c4d3328577b
|
/Code/Process_large_raster_f.R
|
3fe1ad5d4b0f6ad04192e9e954948a17d5a33b28
|
[] |
no_license
|
shaohuizhang/Global-to-local-GLOBIOM
|
8b1c5042d58a5dfc03e4515d3bafefa033977ec7
|
85687084068bdf05081cbb868f063de3d65289a0
|
refs/heads/master
| 2020-03-21T07:40:36.927153
| 2018-03-07T15:04:11
| 2018-03-07T15:04:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,260
|
r
|
Process_large_raster_f.R
|
# Function to remove values from very large raster where simple raster operations give memory problems
# https://cran.r-project.org/web/packages/raster/vignettes/functions.pdf
# For progress bar:
# https://www.r-bloggers.com/all-in-on-r%e2%81%b4-progress-bars-on-first-post/?utm_source=feedburner&utm_medium=email&utm_campaign=Feed%3A+RBloggers+%28R+bloggers%29
# Working with large rasters
# Progress bar does not work yet
remove_value_r_f <- function(x, a, filename) {
out <- raster(x)
bs <- blockSize(out)
out <- writeStart(out, filename, overwrite=TRUE)
#pb <- pbCreate(bs$n, ...)
for (i in 1:bs$n) {
v <- getValues(x, row=bs$row[i], nrows=bs$nrows[i] )
v[v==a] <- NA
out <- writeValues(out, v, bs$row[i])
#pbStep(pb, i)
}
out <- writeStop(out)
#pbClose(pb)
return(out)
}
keep_value_r_f <- function(x, a, filename) {
out <- raster(x)
bs <- blockSize(out)
out <- writeStart(out, filename, overwrite=TRUE)
#pb <- pbCreate(bs$n, ...)
for (i in 1:bs$n) {
v <- getValues(x, row=bs$row[i], nrows=bs$nrows[i] )
v[!(v %in% a)] <- NA
out <- writeValues(out, v, bs$row[i])
#pbStep(pb, i)
}
out <- writeStop(out)
#pbClose(pb)
return(out)
}
|
f624f07713dc8a7d90a6c02313d450bfbe8f3f84
|
676961fa8be3aee524385850133b2d442f6914e5
|
/stateCluster.R
|
14e020906b9a7a5dc0a53e1c6687b39abc68ed38
|
[] |
no_license
|
richshaw/stateCluster
|
462d50363d5c16f785cbb03d9499a515897ce978
|
2232fd9296be2ad34cf64666361f2d22fc689789
|
refs/heads/master
| 2021-01-19T19:37:09.441917
| 2015-03-03T02:22:47
| 2015-03-03T02:22:47
| 31,573,733
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,322
|
r
|
stateCluster.R
|
# Load data
data <- read.csv(file="censusStateClean.csv", header=T, sep=",", row.names=1)
# Scale data because using different units
data2 <- data.frame(scale(data))
# data3 <- data[,-c(1)]
# data2 <- data.frame(scale(data3))
# Run PCA
pc <- princomp(data2)
loadings(pc)
pc <- prcomp(data2)
# Choose number of principal compnets that account for > 85% of variance or underneath elbow
plot(pc, type='l')
summary(pc)
# User first 5 components
comp <- data.frame(pc$x[,1:5])
#K-means cluster
k <- kmeans(comp, 4, nstart=25, iter.max=1000)
# Plot clusters across components
library(RColorBrewer)
library(scales)
palette(alpha(brewer.pal(9,'Set1'), 0.5))
plot(comp, col=k$clust, pch=16)
# Cluster sizes
sort(table(k$clust))
clust <- names(sort(table(k$clust)))
# First cluster
row.names(data[k$clust==clust[1],])
# Second Cluster
row.names(data[k$clust==clust[2],])
# Third Cluster
row.names(data[k$clust==clust[3],])
# Fourth Cluster
row.names(data[k$clust==clust[4],])
# > # First cluster
# > row.names(data[k$clust==clust[1],])
# [1] "California" "Florida" "New York" "Texas"
# > # Second Cluster
# > row.names(data[k$clust==clust[2],])
# [1] "Connecticut" "Delaware" "Illinois" "Maryland" "Massachusetts" "New Jersey"
# [7] "Rhode Island" "Virginia"
# > # Third Cluster
# > row.names(data[k$clust==clust[3],])
# [1] "Colorado" "Idaho" "Iowa" "Kansas" "Maine" "Minnesota"
# [7] "Montana" "Nebraska" "Nevada" "New Hampshire" "North Dakota" "Oregon"
# [13] "South Dakota" "Utah" "Vermont" "Wisconsin" "Wyoming"
# > # Fourth Cluster
# > row.names(data[k$clust==clust[4],])
# [1] "Alabama" "AriNAona" "Arkansas" "Georgia" "Indiana" "Kentucky"
# [7] "Louisiana" "Michigan" "Mississippi" "Missouri" "New Mexico" "North Carolina"
# [13] "Ohio" "Oklahoma" "Pennsylvania" "South Carolina" "Tennessee" "West Virginia"
clusplot(comp, k$cluster, color=TRUE, shade=TRUE,
labels=2, lines=0)
boxplot(data$Bachelor.s.degree.or.higher..percent.of.persons.age.25.years...2009.2013 ~ k$cluster,
xlab='Cluster', ylab='Accommodation',
main='Batchelor Degree or Higher by Cluster')
|
d112190869fef770650a4e527e7a36369158e960
|
127ebda60a4dc1bb143083fe7a73e009c74472e2
|
/R/sample_data.R
|
96ee08fb403a261f4571d2f5d187fa8ebcf25502
|
[] |
no_license
|
telvis07/kaggle-melbourne-university-seizure-prediction
|
1b29d703ec0820f1dc5e4f9bcaf38c81c294b3d9
|
53ef57415b9e8491cf04e0f771f2dacbbd234dd6
|
refs/heads/master
| 2021-03-27T14:46:33.146597
| 2017-02-07T14:09:54
| 2017-02-07T14:09:54
| 73,083,918
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,336
|
r
|
sample_data.R
|
library(dplyr)
sample_data <- function(trainset, n_neg_samples=0, n_pos_samples=0) {
# make sure we don't subsample rows with all NULLs
trainset <- trainset[rowSums(is.na(trainset)) == 0,]
# separate pos and negative class
neg_samples <- trainset[trainset$target == "interictal",]
pos_samples <- trainset[trainset$target == "preictal",]
print(sprintf("before downsample: neg dim: %s", dim(neg_samples)[1] ))
print(sprintf("before downsample: pos dim: %s", dim(pos_samples)[1] ))
# downsample negative class (interictal)
if ((n_neg_samples != 0) && (n_neg_samples < dim(neg_samples)[1])) {
sample_inds <- sample(dim(neg_samples)[1], n_neg_samples, replace=FALSE)
neg_samples <- neg_samples[sample_inds,]
}
# downsample positive class (preictal)
if((n_pos_samples != 0) && (n_pos_samples < dim(pos_samples)[1])){
sample_inds <- sample(dim(pos_samples)[1], n_pos_samples, replace=FALSE)
pos_samples <- pos_samples[sample_inds,]
}
print(sprintf("after downsample: neg dim: %s", dim(neg_samples)[1] ))
print(sprintf("after downsample: pos dim: %s", dim(pos_samples)[1] ))
trainset <- rbind(neg_samples, pos_samples)
print(sprintf("downsampled trainset: %s rows, %s cols", dim(trainset)[1],
dim(trainset)[2]))
print(table(trainset$target))
trainset
}
|
8c6d157b55f9089adda714d5e0969f1769f6b908
|
81f518e29b4cac7cd61ea8e2c895d4f7edfd209b
|
/R/cor_smooth.R
|
8cf00a6eb591172fadb4873c1c993bf9c9bb2162
|
[] |
no_license
|
cran/correlation
|
5e2d691df07edb5aa69aba35f3780e19ffefcf57
|
db4fd0ce345a0dcee08de2a9c1810f79b5575b63
|
refs/heads/master
| 2023-04-13T04:06:37.689083
| 2023-04-06T08:23:26
| 2023-04-06T08:23:26
| 247,916,232
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,732
|
r
|
cor_smooth.R
|
#' Smooth a non-positive definite correlation matrix to make it positive definite
#'
#' Make correlations positive definite using `psych::cor.smooth`. If smoothing
#' is done, inferential statistics (*p*-values, confidence intervals, etc.) are
#' removed, as they are no longer valid.
#'
#' @param x A correlation matrix.
#' @param method Smoothing method. Can be `psych` (will use
#' `psych::cor.smooth()`), `hj` (Jorjani et al., 2003) or `lrs` (Schaeffer,
#' 2014). For the two last, will use `mbend::bend()` (check its documentation
#' for details).
#' @param verbose Set to `FALSE` to silence the function.
#' @param tol The minimum eigenvalue to be considered as acceptable.
#' @param ... Other arguments to be passed to or from other functions.
#'
#' @examplesIf requireNamespace("psych", quietly = TRUE)
#' set.seed(123)
#' data <- as.matrix(mtcars)
#' # Make missing data so pairwise correlation matrix is non-positive definite
#' data[sample(seq_len(352), size = 60)] <- NA
#' data <- as.data.frame(data)
#' x <- correlation(data)
#' is.positive_definite(x)
#'
#' smoothed <- cor_smooth(x)
#' @export
cor_smooth <- function(x, method = "psych", verbose = TRUE, ...) {
UseMethod("cor_smooth")
}
#' @export
cor_smooth.easycorrelation <- function(x,
method = "psych",
verbose = TRUE,
tol = 10^-12,
...) {
m <- cor_smooth(as.matrix(x), method = method, verbose = verbose, tol = tol, ...)
if (isTRUE(attributes(m)$smoothed)) {
estim <- names(x)[names(x) %in% c("r", "rho", "tau", "D")][1]
for (param1 in row.names(m)) {
for (param2 in colnames(m)) {
if (nrow(x[x$Parameter1 == param1 & x$Parameter2 == param2, ]) == 0) next
# Print changes
if (verbose) {
val1 <- x[x$Parameter1 == param1 & x$Parameter2 == param2, estim]
val2 <- m[param1, param2]
if (round(val1 - val2, digits = 2) == 0) {
insight::print_color(paste0(
param1,
" - ",
param2,
": no change (",
insight::format_value(val1),
")\n"
), "green")
} else {
insight::print_color(paste0(
param1,
" - ",
param2,
": ",
insight::format_value(val1),
" -> ",
insight::format_value(val2),
"\n"
), "red")
}
cat("\n")
}
x[x$Parameter1 == param1 & x$Parameter2 == param2, estim] <- m[param1, param2]
}
}
atts <- attributes(x)
x <- x[, c("Parameter1", "Parameter2", "r", "Method", "n_Obs")]
atts$names <- names(x)
atts$smoothed <- TRUE
attributes(x) <- atts
x
} else {
x
}
}
#' @export
cor_smooth.matrix <- function(x,
method = "psych",
verbose = TRUE,
tol = 10^-12,
...) {
method <- match.arg(method, choices = c("psych", "hj", "lrs"))
# Already positive definite
if (is.positive_definite(x, tol = tol, ...)) {
if (verbose) message("Matrix is positive definite, smoothing was not needed.")
return(x)
}
if (method == "psych") {
insight::check_if_installed("psych")
x <- suppressWarnings(psych::cor.smooth(x, eig.tol = tol, ...))
} else {
out <- try(suppressMessages(mbend::bend(x, method = method, ...)), silent = TRUE)
if (inherits(out, "try-error")) {
return(x)
}
x <- out$bent
}
attr(x, "smoothed") <- TRUE
x
}
# Utils -------------------------------------------------------------------
#' @rdname cor_smooth
#' @export
is.positive_definite <- function(x, tol = 10^-12, ...) {
UseMethod("is.positive_definite")
}
#' @rdname cor_smooth
#' @export
is_positive_definite <- is.positive_definite
#' @export
is.positive_definite.matrix <- function(x, tol = 10^-12, ...) {
eigens <- try(eigen(x), silent = TRUE)
# validation checks
if (inherits(eigens, "try-error")) {
stop(insight::format_message(
"There is something seriously wrong with the correlation matrix, as some of the eigen values are NA."
), call. = FALSE)
}
# Find out
if (min(eigens$values) >= tol) {
out <- TRUE
} else {
out <- FALSE
}
out
}
#' @export
is.positive_definite.easycorrelation <- function(x, ...) {
is.positive_definite(as.matrix(x, ...), ...)
}
|
c8ce517f6195bd8df11fb2e9b71a4ddc76163a2e
|
6ce79966b1b89de1a6d6eb29cea945188c18652c
|
/R/algorithms__MHPropWithKStepNewton.R
|
7f74fca38ff45d2f4e1728e702452e34b211f0b9
|
[] |
no_license
|
feng-li/movingknots
|
d3041a0998f0873459814a09e413c714fff700c6
|
5f921070e4cd160a831c5191255f88dd7d4c850c
|
refs/heads/master
| 2021-06-10T00:18:57.172246
| 2021-03-22T05:56:44
| 2021-03-22T05:56:44
| 145,708,629
| 4
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,204
|
r
|
algorithms__MHPropWithKStepNewton.R
|
#' Metropolis–Hastings algorithm with K-step Newton method for the spline model.
#'
#' Details are available in the paper.
#' @param param.cur NA
#' @param gradhess.fun.name NA
#' @param logpost.fun.name NA
#' @param nNewtonStep NA
#' @param Params NA
#' @param hessMethod NA
#' @param Y NA
#' @param x0 NA
#' @param callParam NA
#' @param splineArgs NA
#' @param priorArgs NA
#' @param prop.df NA
#' @param Params_Transform NA
#' @return NA
#' @author Feng Li, Department of Statistics, Stockholm University, Sweden.
#' @export
MHPropWithKStepNewton <- function(param.cur, gradhess.fun.name, logpost.fun.name,
nNewtonStep, Params, hessMethod, Y, x0, callParam,
splineArgs, priorArgs, prop.df, Params_Transform)
{
## All the propose are made at the level of transformed stage. No need further
## transformation
## print(callParam)
## browser()
KStepNewton1 <- KStepNewtonMove(param.cur = param.cur,
gradhess.fun.name = gradhess.fun.name,
KStep = nNewtonStep,
Params = Params,
hessMethod = hessMethod,
Y = Y,
x0 = x0,
callParam = callParam,
splineArgs = splineArgs,
priorArgs = priorArgs,
Params_Transform = Params_Transform)
param.cur.prop <- KStepNewton1$param.cur
HessObs.cur.prop <- KStepNewton1$hessObs.cur
invHessObs.cur.prop <- KStepNewton1$invHessObs.cur
if(any(is.na(HessObs.cur.prop)) ||
is(try(chol(-invHessObs.cur.prop), silent=T), "try-error")) # Something is wrong, reject it.
{
logjump.cur2prop <- NaN
param.prop <- NaN
}
else
{
# param.prop <- RndMultiT(param.cur.prop, -invHessObs.cur.prop, prop.df) # propose a draw
# from multivariate t-distribution.
param.prop <- (param.cur.prop +
t(rmvt(sigma = -invHessObs.cur.prop,
n = 1, df = prop.df,
method = "chol")))
# logjump.cur2prop <- DensMultiT(param.prop, param.cur.prop, -HessObs.cur.prop, prop.df)
# the jump density from current draw to propose
# draw.
logjump.cur2prop = dmvt(x = matrix(param.cur.prop - param.prop, 1, ),
sigma = -invHessObs.cur.prop,
df = prop.df, log = TRUE, checkSymmetry = FALSE)
}
if (any(is.na(param.prop))) # Previous proposal unsuccessful
{ HessObs.prop.prop <- NaN }
else # all are right
{
KStepNewton2 <- KStepNewtonMove(param.cur = param.prop,
gradhess.fun.name = gradhess.fun.name,
KStep =nNewtonStep,
Params = Params,
hessMethod = hessMethod,
Y = Y, x0 = x0,
callParam = callParam,
splineArgs = splineArgs,
priorArgs = priorArgs,
Params_Transform = Params_Transform)
param.prop.prop <- KStepNewton2$param.cur
HessObs.prop.prop <- KStepNewton2$hessObs.cur
invHessObs.prop.prop <- KStepNewton2$invHessObs.cur
}
if(any(is.na(HessObs.prop.prop))) # Something is wrong at KStepNewton2, reject it.
{
logpost.cur <- NaN
logpost.prop <- NaN
logjump.prop2cur <- NaN
}
else # all are right
{
## logjump.prop2cur <- DensMultiT(param.cur, param.prop.prop, -invHessObs.prop.prop,
## prop.df) # the jump density from propose draw to current
# draw.
logjump.prop2cur = dmvt(x = matrix(param.prop.prop - param.cur, 1, ),
sigma = -invHessObs.prop.prop,
df = prop.df, log = TRUE, checkSymmetry = FALSE)
Params.prop <- Params
Params.prop[[callParam$id]][callParam$subset] <- param.prop
Params.cur <- Params
Params.cur[[callParam$id]][callParam$subset] <- param.cur
caller.prop <- call(logpost.fun.name,
Y = Y,
x0 = x0,
Params = Params.prop,
callParam = callParam ,
priorArgs = priorArgs,
splineArgs = splineArgs,
Params_Transform = Params_Transform)
caller.cur <- call(logpost.fun.name,
Y = Y,
x0 = x0,
Params = Params.cur,
callParam = callParam,
priorArgs = priorArgs,
splineArgs = splineArgs,
Params_Transform = Params_Transform)
logpost.prop <- eval(caller.prop) # the logpost density for the proposed draw.
logpost.cur <- eval(caller.cur) # the logpost density for the current draw.
}
## compute the MH ratio.
log.r <- logpost.prop - logpost.cur + logjump.prop2cur - logjump.cur2prop
r <- exp(log.r)
accept.prob <- min(1, as.numeric(r)) # the acceptance probability.
## make decision to update or keep current draw.
if(!is.na(accept.prob) && runif(1) < accept.prob)
{
param.out <- param.prop # keep update
}
else
{
param.out <- param.cur # keep current
accept.prob <- 0 # set acceptance probs to zero.
}
out <- list(param.out = param.out, accept.prob = accept.prob)
## cat("prop:", param.prop, "cur:", param.cur, "\n")
return(out)
}
|
f641072d2154f617cca3863534b0ed3d3f7ffa88
|
ad6932226ea17dbf9d0c09ba3fd465b639f27bda
|
/vignettes/read-gse-matrix-file.r
|
cc9bd2b3d2fe22eb9120016713f1fb6b4c485293
|
[] |
no_license
|
perishky/meffonym
|
d00e0ccf417c1bababe2cb5cafb4bc91e1ee4f9a
|
ddf7f11e0831bdba486b5e49b80b508890affb39
|
refs/heads/master
| 2022-12-27T13:36:48.988965
| 2022-12-15T17:09:35
| 2022-12-15T17:09:35
| 200,882,117
| 5
| 0
| null | 2022-12-01T18:50:55
| 2019-08-06T15:52:04
|
R
|
UTF-8
|
R
| false
| false
| 1,144
|
r
|
read-gse-matrix-file.r
|
read.gse.matrix.file <- function(filename) {
dat <- readLines(filename)
nseries <- sum(grepl("^!Series_", dat))
nsamples <- sum(grepl("^!Sample_", dat))
ndata <- length(dat) - match("!series_matrix_table_begin", dat) - 2
con <- file(filename, "r")
header <- read.table(con, sep="\t", header=F, nrows=nseries, stringsAsFactors=F)
samples <- read.table(con, sep="\t", header=F, nrows=nsamples, stringsAsFactors=F)
samples <- t(samples)
colnames(samples) <- samples[1,]
colnames(samples) <- sub("!Sample_", "", colnames(samples))
samples <- data.frame(samples[-1,], stringsAsFactors=F)
rm(dat)
gc()
readLines(con,1)
dnam <- read.table(con, sep="\t", header=TRUE, quote="\"", dec=".", fill=TRUE,
na.strings = c("NA", "null", "NULL", "Null"), comment.char = "")
close(con)
if (ndata == 0)
dnam <- dnam[-(1:nrow(dnam)),]
if (nrow(dnam) > 0)
rownames(dnam) <- dnam[,1]
dnam <- as.matrix(dnam[,-1])
rownames(samples) <- colnames(dnam)
colnames(dnam) <- samples$geo_accession
list(dnam=dnam, samples=samples)
}
|
cc4b8c44f306d75ceb02925e7128896b388e2ae2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/lmtest/examples/resettest.Rd.R
|
978e585ca4b9ca56b8289c0f0f4e4a5e1cf13e13
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 287
|
r
|
resettest.Rd.R
|
library(lmtest)
### Name: resettest
### Title: RESET Test
### Aliases: resettest reset
### Keywords: htest
### ** Examples
x <- c(1:30)
y1 <- 1 + x + x^2 + rnorm(30)
y2 <- 1 + x + rnorm(30)
resettest(y1 ~ x, power=2, type="regressor")
resettest(y2 ~ x, power=2, type="regressor")
|
5b66b81f19cf805b918f1a403690d73ce442c416
|
e696c7eb91d2e2bcc299c01958dc29113d9c25d0
|
/tests/testthat/test_h5create.R
|
f34ca346cf5389404fc4dfe29f99f9e24f7d40a3
|
[] |
no_license
|
grimbough/archive-rhdf5
|
06b8ad682f88352f56a357c2a01e316c51fbae28
|
8acc2f9744382ade564fb6bc7b5ded1a7b2aafc7
|
refs/heads/master
| 2021-06-25T12:54:21.208183
| 2017-08-24T14:44:45
| 2017-08-24T14:44:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,102
|
r
|
test_h5create.R
|
library(rhdf5)
############################################################
context("h5createFile")
############################################################
## output file name
h5File <- tempfile(pattern = "ex_save", fileext = ".h5")
if(file.exists(h5File))
file.remove(h5File)
test_that("Default arguments", {
expect_true( h5createFile(file = h5File) )
expect_true( file.exists(h5File) )
})
test_that("Don't overwrite existing file", {
expect_message(h5createFile(file = h5File),
regexp = "already exists.\n$")
})
############################################################
context("h5createGroup")
############################################################
if(!file.exists(h5File))
h5createFile(file = h5File)
test_that("Create simple group", {
expect_true( h5createGroup(file = h5File, group = "foo") )
})
test_that("Create group heirachy", {
expect_true( h5createGroup(file = h5File, group = "foo/baa") )
})
test_that("Fail if toplevel group missing", {
## this is really an error, but doesn't get thrown as one
expect_error( h5createGroup(file = h5File, group = "baa/foo") )
H5close()
})
############################################################
context("h5createDataset")
############################################################
h5File <- tempfile(pattern = "ex_createDS", fileext = ".h5")
if(file.exists(h5File))
file.remove(h5File)
## create empty file
h5createFile(file = h5File)
test_that("Create single dataset", {
expect_true( h5createDataset(file = h5File, dataset = "A", dims = c(2,1)) )
expect_true( "A" %in% names(h5dump(file = h5File)) )
A <- h5read(file = h5File, name = "A")
expect_is(A, "matrix")
expect_true(nrow(A) == 2)
expect_true(ncol(A) == 1)
expect_is(A[1,1], "numeric")
})
test_that("Create more datasets with different data types", {
expect_true( h5createDataset(file = h5File, dataset = "int", dims = c(4,5), storage.mode = "integer") )
expect_true( h5createDataset(file = h5File, dataset = "bool", dims = c(4,5), storage.mode = "logical") )
expect_true( h5createDataset(file = h5File, dataset = "char", dims = c(4,5), storage.mode = "character", size = 255) )
contents <- h5dump(file = h5File)
expect_true( all(c("int", "bool", "char") %in% names(contents)) )
expect_is(contents$int, "matrix")
expect_true(nrow(contents$int) == 4)
expect_true(ncol(contents$int) == 5)
expect_is(contents$int[1,1], "integer")
expect_is(contents$bool[1,1], "logical")
expect_is(contents$char[1,1], "character")
})
test_that("Invalid storage mode arguments", {
expect_error( h5createDataset(file = h5File, dataset = "foo", dims = c(1,1), storage.mode = "foo") )
expect_error( h5createDataset(file = h5File, dataset = "foo", dims = c(1,1), storage.mode = 10) )
expect_error( h5createDataset(file = h5File, dataset = "foo", dims = c(1,1), storage.mode = 1L) )
expect_error( h5createDataset(file = h5File, dataset = "foo", dims = c(1,1), storage.mode = FALSE) )
## character without size argument
expect_error( h5createDataset(file = h5File, dataset = "foo", dims = c(1,1), storage.mode = "character", size = NULL) )
})
test_that("Datasets with different compression levels", {
dataMatrix <- matrix(runif(n = 1e5), nrow = 10000, ncol = 10)
h5File_0 <- tempfile(pattern = "level0_", fileext = ".h5")
if(file.exists(h5File_0))
file.remove(h5File_0)
h5createFile(h5File_0)
expect_true( h5createDataset(file = h5File_0, dataset = "A", dims = dim(dataMatrix), level = 0) )
h5write( dataMatrix, file = h5File_0, name = "A")
h5File_9 <- tempfile(pattern = "level9_", fileext = ".h5")
if(file.exists(h5File_9))
file.remove(h5File_9)
h5createFile(h5File_9)
expect_true( h5createDataset(file = h5File_9, dataset = "A", dims = dim(dataMatrix), level = 9) )
h5write( dataMatrix, file = h5File_9, name = "A")
## expect compressed file to be at least a small as uncompressed
expect_lte( file.size(h5File_9), file.size(h5File_0) )
})
############################################################
context("h5createAttribute")
############################################################
h5File <- tempfile(pattern = "ex_createAttr", fileext = ".h5")
if(file.exists(h5File))
file.remove(h5File)
## create a new file with a single dataset
h5createFile(h5File)
h5write(1:1, h5File, "foo")
test_that("Add attribute using file name", {
expect_true( h5createAttribute(obj = "foo", file = h5File, attr = "foo_attr", dims = c(1,1)) )
expect_match( names(h5readAttributes(file = h5File, name = "foo")), "foo_attr" )
})
test_that("Fail is attribute already exists", {
expect_false( h5createAttribute(obj = "foo", file = h5File, attr = "foo_attr", dims = c(1,1)) )
expect_message( h5createAttribute(obj = "foo", file = h5File, attr = "foo_attr", dims = c(1,1)),
"Can not create attribute")
})
test_that("Fail if dims or maxdims not numeric", {
expect_error( h5createAttribute(obj = "foo", file = h5File, attr = "foo_attr2", dims = "A" ) )
expect_error( h5createAttribute(obj = "foo", file = h5File, attr = "foo_attr2", dims = c(1,1), maxdims = "A" ) )
})
test_that("Invalid storage mode arguments", {
expect_error( h5createAttribute(file = h5File, obj = "foo", dims = c(1,1), attr = "bad_attr", storage.mode = "foo") )
expect_error( h5createAttribute(file = h5File, obj = "foo", dims = c(1,1), attr = "bad_attr", storage.mode = 10) )
expect_error( h5createAttribute(file = h5File, obj = "foo", dims = c(1,1), attr = "bad_attr", storage.mode = 1L) )
expect_error( h5createAttribute(file = h5File, obj = "foo", dims = c(1,1), attr = "bad_attr", storage.mode = FALSE) )
## character without size argument
expect_error( h5createAttribute(file = h5File, obj = "foo", dims = c(1,1), attr = "bad_attr", storage.mode = "character", size = NULL) )
})
test_that("No open HDF5 objects are left", {
expect_equal( length(h5validObjects()), 0 )
})
|
b6be02e0b2e5baebbe3ee4cd939c96859d330412
|
d3344fe3ff4009239d415facd12e8cddef7aa089
|
/seer_script.R
|
5b8468b8ec073f063fb50d4f04b0e49f9788daa2
|
[] |
no_license
|
datasciencebirds/seer1
|
3710d4e274162c7624d8b93b9e01748e82db04ba
|
25b148e06edf32f9a56ad7e7fa6a0c1febd386ec
|
refs/heads/master
| 2020-05-01T05:58:23.695265
| 2019-03-23T17:20:31
| 2019-03-23T17:20:31
| 177,317,549
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,890
|
r
|
seer_script.R
|
setwd(dir = "C:/Drive/Workstation/data-science/seer-workspace/paper1/")
#install.packages("caret")
#install.packages("ellipse")
#install.packages("mlbench")
#library("caret")
#library(mlbench)
filename <- "data/RESPIR_PROCESSED_DATA_min224.csv"
dataset <- read.csv(filename, header = FALSE)
colnames(dataset) <- c("Patient_ID_number","Registry_ID","Marital_Status_at_DX","Race_Ethnicity","NHIA_Derived_Hispanic_Origin","Sex","Age_at_diagnosis","Year_of_Birth","Sequence_Number_Central","Month_of_diagnosis","Year_of_diagnosis","Primary_Site","Laterality","Histology_92-00_ICD-O-2","Behavior_92-00_ICD-O-2","Histologic_Type_ICD-O-3","Behavior_Code_ICD-O-3","Grade","Diagnostic_Confirmation","Type_of_Reporting_Source","EOD_Tumor_Size","EOD_Extension","EOD_Extension_Prost_Path","EOD_Lymph_Node_Involv","Regional_Nodes_Positive","Regional_Nodes_Examined","EOD_Old_13_Digit","EOD_Old_2_Digit","EOD_Old_4_Digit","Coding_System_for_EOD","Tumor_Marker_1","Tumor_Marker_2","Tumor_Marker_3","CS_Tumor_Size","CS_Extension","CS_Lymph_Nodes","CS_Mets_at_Dx","CS_Site-Specific_Factor_1","CS_Site-Specific_Factor_2","CS_Site-Specific_Factor_3","CS_Site-Specific_Factor_4","CS_Site-Specific_Factor_5","CS_Site-Specific_Factor_6","CS_Site-Specific_Factor_25","Derived_AJCC_T","Derived_AJCC_N","Derived_AJCC_M","Derived_AJCC_Stage_Group","Derived_SS1977","Derived_SS2000","Derived_AJCC_Flag","CS_Version_Input_Original","CS_Version_Derived","CS_Version_Input_Current","RX_Summ_Surg_Prim_Site","RX_Summ_Scope_Reg_LN_Sur","RX_Summ_Surg_Oth_Reg_Dis","RX_Summ_Reg_LN_Examined","Reason_for_no_surgery","RX_Summ_Surgery_Type","RX_Summ_Scope_Reg_98-02","RX_Summ_Surg_Oth_98-02","SEER_Record_Number","SEER_Type_of_Follow-up","Age_Recode_<1_Year_olds","Site_Recode_ICD-O-3_WHO_2008","Recode_ICD-O-2_to_9","Recode_ICD-O-2_to_10","ICCC_site_recode_ICD-O-3_WHO_2008","ICCC_site_rec_extended_ICD-O-3_WHO_2008","Behavior_Recode_for_Analysis","Histology_Recode_Broad_Groupings","Histology_Recode_Brain_Groupings","CS_Schema_v0204+_N_A_cs0204schema","Race_recode_White_Black_Other","Race_recode_W_B_AI_API","Origin_recode_NHIA_Hispanic_NonHisp","SEER_historic_stage_A","AJCC_stage_3rd_edition_1988-2003","SEER_modified_AJCC_Stage_3rd_ed_1988-2003","SEER_Summary_Stage_1977_1995-2000","SEER_Summary_Stage_2000_2001-2003","First_malignant_primary_indicator","State-county_recode","Cause_of_Death_to_SEER_site_recode","COD_to_site_rec_KM","Vital_Status_recode","IHS_Link","Summary_stage_2000_1998+","AYA_site_recode_WHO_2008","Lymphoma_subtype_recode_WHO_2008","SEER_Cause-Specific_Death_Classification","SEER_Other_Cause_of_Death_Classification","CS_Tumor_Size_Ext_Eval","CS_Lymph_Nodes_Eval","CS_Mets_Eval","Primary_by_international_rules","ER_Status_Recode_Breast_Cancer_1990+","PR_Status_Recode_Breast_Cancer_1990+","CS_Schema_-AJCC_6th_ed_previously_called_v1","CS_Site-Specific_Factor_8","CS_Site-Specific_Factor_10","CS_Site-Specific_Factor_11","CS_Site-Specific_Factor_13","CS_Site-Specific_Factor_15","CS_Site-Specific_Factor_16","Lymph_vascular_invasion","Survival_months","Survival_months_flag","Insurance_recode_2007+","Derived_AJCC-7_T","Derived_AJCC-7_N","Derived_AJCC-7_M","Derived_AJCC-7_Stage_Grp","Breast_Adjusted_AJCC_6th_T_1988+","Breast_Adjusted_AJCC_6th_N_1988+","Breast_Adjusted_AJCC_6th_M_1988+","Breast_Adjusted_AJCC_6th_Stage_1988+","CS_Site-Specific_Factor_7","CS_Site-Specific_Factor_9","CS_Site-Specific_Factor_12","Derived_HER2_Recode_2010+","Breast_Subtype_2010+","Lymphomas:_Ann_Arbor_Staging_1983+","CS_Mets_at_Dx-Bone","CS_Mets_at_Dx-Brain","CS_Mets_at_Dx-Liver","CS_Mets_at_Dx-Lung","T_value_-_based_on_AJCC_3rd_1988-2003","N_value_-_based_on_AJCC_3rd_1988-2003","M_value_-_based_on_AJCC_3rd_1988-2003","Total_Number_of_In_Situ_malignant_Tumors_for_Patient","Total_Number_of_Benign_Borderline_Tumors_for_Patient")
featureMappedDataset <- dataset[c(3:4,6:7,13,18:19,59,65,87,108:109,132)]
|
94faa25671ce7d0b32cce593f95ca041a30c1a4d
|
011f465997737cb91ac4ac32ee4b4611e8b6d0f3
|
/qc_integration.R
|
6e4e1f64285f041fbc1824894b6605fdf1f2cae9
|
[] |
no_license
|
puweilin/scRNAseq_PTC
|
f8ffb65bf7e4b8e20dec655c94184da85eaa8bb1
|
d6b1229386e53b8e4cc059c49b747c85e83c2a57
|
refs/heads/master
| 2023-07-30T23:26:48.464808
| 2021-09-18T04:55:49
| 2021-09-18T04:55:49
| 407,757,645
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36,926
|
r
|
qc_integration.R
|
library(plyr)
library(dplyr)
library(sctransform)
library(Seurat)
library(ggplot2)
library(ggsci)
library(readr)
library(readxl)
library(DoubletFinder)
q = theme_classic() +
theme(panel.border = element_blank(),
axis.line.x = element_line(size = 0.5, linetype = "solid", colour = "black"),
axis.line.y = element_line(size = 0.5, linetype = "solid", colour = "black"))+
theme(legend.position=c(0.15,0.95))+
theme(legend.title = element_blank())+
theme(legend.text = element_text(colour="black", size = 16, face = "bold"))+
theme(axis.text.x=element_text(size=25, face = "bold"))+
theme(axis.title.x=element_text(size=25, face = "bold"))+
theme(axis.text.y=element_text(size=20, face = "bold"))+
theme(axis.title.y=element_text(size=25, face = "bold"))+
theme(plot.title = element_text(hjust = 0.5, vjust=0))
#T1
T1 = CreateSeuratObject(counts = T1.data, min.cells = 3, min.features = 200, project = "T1" )
T1[["percent.mt"]] = PercentageFeatureSet(object = T1, pattern = "^MT-")
VlnPlot(T1, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
T1 = subset(T1, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10)
T1 = NormalizeData(T1)
T1 = ScaleData(T1)
T1 = FindVariableFeatures(T1, selection.method = "vst", nfeatures = 10000)
T1 = RunPCA(T1, features = T1@assays$RNA@var.features)
T1 = FindNeighbors(T1, reduction = "pca", dims = 1:50)
T1 = FindClusters(T1, resolution = 0.6)
sweep.data = paramSweep_v3(T1, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(T1@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(T1$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
T1=doubletFinder_v3(T1, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = T1@meta.data[,8]
T1 = SubsetData(T1, cells = colnames(T1)[which(doubletsID == "Singlet")])
T1 = NormalizeData(T1)
T1 = ScaleData(T1)
T1 = FindVariableFeatures(T1, selection.method = "vst", nfeatures = 10000)
T1 = RunPCA(T1, features = T1@assays$RNA@var.features)
T1 = FindNeighbors(T1, reduction = "pca", dims = 1:50)
T1 = FindClusters(T1, resolution = 0.6)
#P1
P1 = CreateSeuratObject(counts = P1.data, min.cells = 3, min.features = 200, project = "P1" )
P1[["percent.mt"]] = PercentageFeatureSet(object = P1, pattern = "^MT-")
VlnPlot(P1, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
P1 = subset(P1, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
P1 = NormalizeData(P1)
P1 = ScaleData(P1)
P1 = FindVariableFeatures(P1, selection.method = "vst", nfeatures = 10000)
P1 = RunPCA(P1, features = P1@assays$RNA@var.features)
P1 = FindNeighbors(P1, reduction = "pca", dims = 1:50)
P1 = FindClusters(P1, resolution = 0.6)
sweep.data = paramSweep_v3(P1, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(P1@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(P1$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
P1=doubletFinder_v3(P1, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = P1@meta.data[,8]
P1 = SubsetData(P1, cells = colnames(P1)[which(doubletsID == "Singlet")])
P1 = NormalizeData(P1)
P1 = ScaleData(P1)
P1 = FindVariableFeatures(P1, selection.method = "vst", nfeatures = 10000)
P1 = RunPCA(P1, features = P1@assays$RNA@var.features)
P1 = FindNeighbors(P1, reduction = "pca", dims = 1:50)
P1 = FindClusters(P1, resolution = 0.6)
#T2
T2 = CreateSeuratObject(counts = T2.data, min.cells = 3, min.features = 200, project = "T2" )
T2[["percent.mt"]] = PercentageFeatureSet(object = T2, pattern = "^MT-")
VlnPlot(T2, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
T2 = subset(T2, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
T2 = NormalizeData(T2)
T2 = ScaleData(T2)
T2 = FindVariableFeatures(T2, selection.method = "vst", nfeatures = 10000)
T2 = RunPCA(T2, features = T2@assays$RNA@var.features)
T2 = FindNeighbors(T2, reduction = "pca", dims = 1:50)
T2 = FindClusters(T2, resolution = 0.6)
sweep.data = paramSweep_v3(T2, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(T2@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(T2$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
T2=doubletFinder_v3(T2, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = T2@meta.data[,8]
T2 = SubsetData(T2, cells = colnames(T2)[which(doubletsID == "Singlet")])
T2 = NormalizeData(T2)
T2 = ScaleData(T2)
T2 = FindVariableFeatures(T2, selection.method = "vst", nfeatures = 10000)
T2 = RunPCA(T2, features = T2@assays$RNA@var.features)
T2 = FindNeighbors(T2, reduction = "pca", dims = 1:50)
T2 = FindClusters(T2, resolution = 0.6)
#P2
P2 = CreateSeuratObject(counts = P2.data, min.cells = 3, min.features = 200, project = "P2" )
P2[["percent.mt"]] = PercentageFeatureSet(object = P2, pattern = "^MT-")
VlnPlot(P2, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
P2 = subset(P2, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
P2 = NormalizeData(P2)
P2 = ScaleData(P2)
P2 = FindVariableFeatures(P2, selection.method = "vst", nfeatures = 10000)
P2 = RunPCA(P2, features = P2@assays$RNA@var.features)
P2 = FindNeighbors(P2, reduction = "pca", dims = 1:50)
P2 = FindClusters(P2, resolution = 0.6)
sweep.data = paramSweep_v3(P2, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(P2@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(P2$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
P2=doubletFinder_v3(P2, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = P2@meta.data[,8]
P2 = SubsetData(P2, cells = colnames(P2)[which(doubletsID == "Singlet")])
P2 = NormalizeData(P2)
P2 = ScaleData(P2)
P2 = FindVariableFeatures(P2, selection.method = "vst", nfeatures = 10000)
P2 = RunPCA(P2, features = P2@assays$RNA@var.features)
P2 = FindNeighbors(P2, reduction = "pca", dims = 1:50)
P2 = FindClusters(P2, resolution = 0.6)
#LN2l
LN2l = CreateSeuratObject(counts = LN2l.data, min.cells = 3, min.features = 200, project = "LN2l" )
LN2l[["percent.mt"]] = PercentageFeatureSet(object = LN2l, pattern = "^MT-")
VlnPlot(LN2l, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
LN2l = subset(LN2l, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
LN2l = NormalizeData(LN2l)
LN2l = ScaleData(LN2l)
LN2l = FindVariableFeatures(LN2l, selection.method = "vst", nfeatures = 10000)
LN2l = RunPCA(LN2l, features = LN2l@assays$RNA@var.features)
LN2l = FindNeighbors(LN2l, reduction = "pca", dims = 1:50)
LN2l = FindClusters(LN2l, resolution = 0.6)
sweep.data = paramSweep_v3(LN2l, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(LN2l@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(LN2l$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
LN2l=doubletFinder_v3(LN2l, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = LN2l@meta.data[,8]
LN2l = SubsetData(LN2l, cells = colnames(LN2l)[which(doubletsID == "Singlet")])
LN2l = NormalizeData(LN2l)
LN2l = ScaleData(LN2l)
LN2l = FindVariableFeatures(LN2l, selection.method = "vst", nfeatures = 10000)
LN2l = RunPCA(LN2l, features = LN2l@assays$RNA@var.features)
LN2l = FindNeighbors(LN2l, reduction = "pca", dims = 1:50)
LN2l = FindClusters(LN2l, resolution = 0.6)
#T3
T3 = CreateSeuratObject(counts = T3.data, min.cells = 3, min.features = 200, project = "T3" )
T3[["percent.mt"]] = PercentageFeatureSet(object = T3, pattern = "^MT-")
VlnPlot(T3, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
T3 = subset(T3, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
T3 = NormalizeData(T3)
T3 = ScaleData(T3)
T3 = FindVariableFeatures(T3, selection.method = "vst", nfeatures = 10000)
T3 = RunPCA(T3, features = T3@assays$RNA@var.features)
T3 = FindNeighbors(T3, reduction = "pca", dims = 1:50)
T3 = FindClusters(T3, resolution = 0.6)
sweep.data = paramSweep_v3(T3, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(T3@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(T3$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
T3=doubletFinder_v3(T3, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = T3@meta.data[,8]
T3 = SubsetData(T3, cells = colnames(T3)[which(doubletsID == "Singlet")])
T3 = NormalizeData(T3)
T3 = ScaleData(T3)
T3 = FindVariableFeatures(T3, selection.method = "vst", nfeatures = 10000)
T3 = RunPCA(T3, features = T3@assays$RNA@var.features)
T3 = FindNeighbors(T3, reduction = "pca", dims = 1:50)
T3 = FindClusters(T3, resolution = 0.6)
#P3
P3 = CreateSeuratObject(counts = P3.data, min.cells = 3, min.features = 200, project = "P3" )
P3[["percent.mt"]] = PercentageFeatureSet(object = P3, pattern = "^MT-")
VlnPlot(P3, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
P3 = subset(P3, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
P3 = NormalizeData(P3)
P3 = ScaleData(P3)
P3 = FindVariableFeatures(P3, selection.method = "vst", nfeatures = 10000)
P3 = RunPCA(P3, features = P3@assays$RNA@var.features)
P3 = FindNeighbors(P3, reduction = "pca", dims = 1:50)
P3 = FindClusters(P3, resolution = 0.6)
sweep.data = paramSweep_v3(P3, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(P3@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(P3$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
P3=doubletFinder_v3(P3, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = P3@meta.data[,8]
P3 = SubsetData(P3, cells = colnames(P3)[which(doubletsID == "Singlet")])
P3 = NormalizeData(P3)
P3 = ScaleData(P3)
P3 = FindVariableFeatures(P3, selection.method = "vst", nfeatures = 10000)
P3 = RunPCA(P3, features = P3@assays$RNA@var.features)
P3 = FindNeighbors(P3, reduction = "pca", dims = 1:50)
P3 = FindClusters(P3, resolution = 0.6)
#LN3l
LN3l = CreateSeuratObject(counts = LN3l.data, min.cells = 3, min.features = 200, project = "LN3l" )
LN3l[["percent.mt"]] = PercentageFeatureSet(object = LN3l, pattern = "^MT-")
VlnPlot(LN3l, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
LN3l = subset(LN3l, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
LN3l = NormalizeData(LN3l)
LN3l = ScaleData(LN3l)
LN3l = FindVariableFeatures(LN3l, selection.method = "vst", nfeatures = 10000)
LN3l = RunPCA(LN3l, features = LN3l@assays$RNA@var.features)
LN3l = FindNeighbors(LN3l, reduction = "pca", dims = 1:50)
LN3l = FindClusters(LN3l, resolution = 0.6)
sweep.data = paramSweep_v3(LN3l, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(LN3l@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(LN3l$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
LN3l=doubletFinder_v3(LN3l, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = LN3l@meta.data[,8]
LN3l = SubsetData(LN3l, cells = colnames(LN3l)[which(doubletsID == "Singlet")])
LN3l = NormalizeData(LN3l)
LN3l = ScaleData(LN3l)
LN3l = FindVariableFeatures(LN3l, selection.method = "vst", nfeatures = 10000)
LN3l = RunPCA(LN3l, features = LN3l@assays$RNA@var.features)
LN3l = FindNeighbors(LN3l, reduction = "pca", dims = 1:50)
LN3l = FindClusters(LN3l, resolution = 0.6)
#LN3r
LN3r = CreateSeuratObject(counts = LN3r.data, min.cells = 3, min.features = 200, project = "LN3r" )
LN3r[["percent.mt"]] = PercentageFeatureSet(object = LN3r, pattern = "^MT-")
VlnPlot(LN3r, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
LN3r = subset(LN3r, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
LN3r = NormalizeData(LN3r)
LN3r = ScaleData(LN3r)
LN3r = FindVariableFeatures(LN3r, selection.method = "vst", nfeatures = 10000)
LN3r = RunPCA(LN3r, features = LN3r@assays$RNA@var.features)
LN3r = FindNeighbors(LN3r, reduction = "pca", dims = 1:50)
LN3r = FindClusters(LN3r, resolution = 0.6)
sweep.data = paramSweep_v3(LN3r, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(LN3r@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(LN3r$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
LN3r=doubletFinder_v3(LN3r, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = LN3r@meta.data[,8]
LN3r = SubsetData(LN3r, cells = colnames(LN3r)[which(doubletsID == "Singlet")])
LN3r = NormalizeData(LN3r)
LN3r = ScaleData(LN3r)
LN3r = FindVariableFeatures(LN3r, selection.method = "vst", nfeatures = 10000)
LN3r = RunPCA(LN3r, features = LN3r@assays$RNA@var.features)
LN3r = FindNeighbors(LN3r, reduction = "pca", dims = 1:50)
LN3r = FindClusters(LN3r, resolution = 0.6)
#SC4
SC4 = CreateSeuratObject(counts = SC4.data, min.cells = 3, min.features = 200, project = "SC4" )
SC4[["percent.mt"]] = PercentageFeatureSet(object = SC4, pattern = "^MT-")
VlnPlot(SC4, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
SC4 = subset(SC4, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
SC4 = NormalizeData(SC4)
SC4 = ScaleData(SC4)
SC4 = FindVariableFeatures(SC4, selection.method = "vst", nfeatures = 10000)
SC4 = RunPCA(SC4, features = SC4@assays$RNA@var.features)
SC4 = FindNeighbors(SC4, reduction = "pca", dims = 1:50)
SC4 = FindClusters(SC4, resolution = 0.6)
sweep.data = paramSweep_v3(SC4, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(SC4@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(SC4$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
SC4=doubletFinder_v3(SC4, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = SC4@meta.data[,8]
SC4 = SubsetData(SC4, cells = colnames(SC4)[which(doubletsID == "Singlet")])
SC4 = NormalizeData(SC4)
SC4 = ScaleData(SC4)
SC4 = FindVariableFeatures(SC4, selection.method = "vst", nfeatures = 10000)
SC4 = RunPCA(SC4, features = SC4@assays$RNA@var.features)
SC4 = FindNeighbors(SC4, reduction = "pca", dims = 1:50)
SC4 = FindClusters(SC4, resolution = 0.6)
#T5
T5 = CreateSeuratObject(counts = T5.data, min.cells = 3, min.features = 200, project = "T5" )
T5[["percent.mt"]] = PercentageFeatureSet(object = T5, pattern = "^MT-")
VlnPlot(T5, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
T5 = subset(T5, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
T5 = NormalizeData(T5)
T5 = ScaleData(T5)
T5 = FindVariableFeatures(T5, selection.method = "vst", nfeatures = 10000)
T5 = RunPCA(T5, features = T5@assays$RNA@var.features)
T5 = FindNeighbors(T5, reduction = "pca", dims = 1:50)
T5 = FindClusters(T5, resolution = 0.6)
sweep.data = paramSweep_v3(T5, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(T5@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(T5$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
T5=doubletFinder_v3(T5, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = T5@meta.data[,8]
T5 = SubsetData(T5, cells = colnames(T5)[which(doubletsID == "Singlet")])
T5 = NormalizeData(T5)
T5 = ScaleData(T5)
T5 = FindVariableFeatures(T5, selection.method = "vst", nfeatures = 10000)
T5 = RunPCA(T5, features = T5@assays$RNA@var.features)
T5 = FindNeighbors(T5, reduction = "pca", dims = 1:50)
T5 = FindClusters(T5, resolution = 0.6)
#P5
P5 = CreateSeuratObject(counts = P5.data, min.cells = 3, min.features = 200, project = "P5" )
P5[["percent.mt"]] = PercentageFeatureSet(object = P5, pattern = "^MT-")
VlnPlot(P5, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
P5 = subset(P5, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
P5 = NormalizeData(P5)
P5 = ScaleData(P5)
P5 = FindVariableFeatures(P5, selection.method = "vst", nfeatures = 10000)
P5 = RunPCA(P5, features = P5@assays$RNA@var.features)
P5 = FindNeighbors(P5, reduction = "pca", dims = 1:50)
P5 = FindClusters(P5, resolution = 0.6)
sweep.data = paramSweep_v3(P5, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(P5@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(P5$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
P5=doubletFinder_v3(P5, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = P5@meta.data[,8]
P5 = SubsetData(P5, cells = colnames(P5)[which(doubletsID == "Singlet")])
P5 = NormalizeData(P5)
P5 = ScaleData(P5)
P5 = FindVariableFeatures(P5, selection.method = "vst", nfeatures = 10000)
P5 = RunPCA(P5, features = P5@assays$RNA@var.features)
P5 = FindNeighbors(P5, reduction = "pca", dims = 1:50)
P5 = FindClusters(P5, resolution = 0.6)
#LN5r
LN5r = CreateSeuratObject(counts = LN5r.data, min.cells = 3, min.features = 200, project = "LN5r" )
LN5r[["percent.mt"]] = PercentageFeatureSet(object = LN5r, pattern = "^MT-")
VlnPlot(LN5r, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
LN5r = subset(LN5r, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
LN5r = NormalizeData(LN5r)
LN5r = ScaleData(LN5r)
LN5r = FindVariableFeatures(LN5r, selection.method = "vst", nfeatures = 10000)
LN5r = RunPCA(LN5r, features = LN5r@assays$RNA@var.features)
LN5r = FindNeighbors(LN5r, reduction = "pca", dims = 1:50)
LN5r = FindClusters(LN5r, resolution = 0.6)
sweep.data = paramSweep_v3(LN5r, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(LN5r@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(LN5r$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
LN5r=doubletFinder_v3(LN5r, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = LN5r@meta.data[,8]
LN5r = SubsetData(LN5r, cells = colnames(LN5r)[which(doubletsID == "Singlet")])
LN5r = NormalizeData(LN5r)
LN5r = ScaleData(LN5r)
LN5r = FindVariableFeatures(LN5r, selection.method = "vst", nfeatures = 10000)
LN5r = RunPCA(LN5r, features = LN5r@assays$RNA@var.features)
LN5r = FindNeighbors(LN5r, reduction = "pca", dims = 1:50)
LN5r = FindClusters(LN5r, resolution = 0.6)
#LN6r
LN6r = CreateSeuratObject(counts = LN6r.data, min.cells = 3, min.features = 200, project = "LN6r" )
LN6r[["percent.mt"]] = PercentageFeatureSet(object = LN6r, pattern = "^MT-")
VLNPlot(LN6r, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
LN6r = subset(LN6r, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
LN6r = NormalizeData(LN6r)
LN6r = ScaleData(LN6r)
LN6r = FindVariableFeatures(LN6r, selection.method = "vst", nfeatures = 10000)
LN6r = RunPCA(LN6r, features = LN6r@assays$RNA@var.features)
LN6r = FindNeighbors(LN6r, reduction = "pca", dims = 1:50)
LN6r = FindClusters(LN6r, resolution = 0.6)
sweep.data = paramSweep_v3(LN6r, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(LN6r@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(LN6r$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
LN6r=doubletFinder_v3(LN6r, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = LN6r@meta.data[,8]
LN6r = SubsetData(LN6r, cells = colnames(LN6r)[which(doubletsID == "Singlet")])
LN6r = NormalizeData(LN6r)
LN6r = ScaleData(LN6r)
LN6r = FindVariableFeatures(LN6r, selection.method = "vst", nfeatures = 10000)
LN6r = RunPCA(LN6r, features = LN6r@assays$RNA@var.features)
LN6r = FindNeighbors(LN6r, reduction = "pca", dims = 1:50)
LN6r = FindClusters(LN6r, resolution = 0.6)
#LN7r
LN7r = CreateSeuratObject(counts = LN7r.data, min.cells = 3, min.features = 200, project = "LN7r" )
LN7r[["percent.mt"]] = PercentageFeatureSet(object = LN7r, pattern = "^MT-")
VlnPlot(LN7r, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
LN7r = subset(LN7r, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
LN7r = NormalizeData(LN7r)
LN7r = ScaleData(LN7r)
LN7r = FindVariableFeatures(LN7r, selection.method = "vst", nfeatures = 10000)
LN7r = RunPCA(LN7r, features = LN7r@assays$RNA@var.features)
LN7r = FindNeighbors(LN7r, reduction = "pca", dims = 1:50)
LN7r = FindClusters(LN7r, resolution = 0.6)
sweep.data = paramSweep_v3(LN7r, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(LN7r@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(LN7r$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
LN7r=doubletFinder_v3(LN7r, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = LN7r@meta.data[,8]
LN7r = SubsetData(LN7r, cells = colnames(LN7r)[which(doubletsID == "Singlet")])
LN7r = NormalizeData(LN7r)
LN7r = ScaleData(LN7r)
LN7r = FindVariableFeatures(LN7r, selection.method = "vst", nfeatures = 10000)
LN7r = RunPCA(LN7r, features = LN7r@assays$RNA@var.features)
LN7r = FindNeighbors(LN7r, reduction = "pca", dims = 1:50)
LN7r = FindClusters(LN7r, resolution = 0.6)
#T8
T8 = CreateSeuratObject(counts = T8.data, min.cells = 3, min.features = 200, project = "T8" )
T8[["percent.mt"]] = PercentageFeatureSet(object = T8, pattern = "^MT-")
VlnPlot(T8, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
T8 = subset(T8, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
T8 = NormalizeData(T8)
T8 = ScaleData(T8)
T8 = FindVariableFeatures(T8, selection.method = "vst", nfeatures = 10000)
T8 = RunPCA(T8, features = T8@assays$RNA@var.features)
T8 = FindNeighbors(T8, reduction = "pca", dims = 1:50)
T8 = FindClusters(T8, resolution = 0.6)
sweep.data = paramSweep_v3(T8, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(T8@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(T8$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
T8=doubletFinder_v3(T8, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = T8@meta.data[,8]
T8 = SubsetData(T8, cells = colnames(T8)[which(doubletsID == "Singlet")])
T8 = NormalizeData(T8)
T8 = ScaleData(T8)
T8 = FindVariableFeatures(T8, selection.method = "vst", nfeatures = 10000)
T8 = RunPCA(T8, features = T8@assays$RNA@var.features)
T8 = FindNeighbors(T8, reduction = "pca", dims = 1:50)
T8 = FindClusters(T8, resolution = 0.6)
#P8
P8 = CreateSeuratObject(counts = P8.data, min.cells = 3, min.features = 200, project = "P8" )
P8[["percent.mt"]] = PercentageFeatureSet(object = P8, pattern = "^MT-")
VlnPlot(P8, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
P8 = subset(P8, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
P8 = NormalizeData(P8)
P8 = ScaleData(P8)
P8 = FindVariableFeatures(P8, selection.method = "vst", nfeatures = 10000)
P8 = RunPCA(P8, features = P8@assays$RNA@var.features)
P8 = FindNeighbors(P8, reduction = "pca", dims = 1:50)
P8 = FindClusters(P8, resolution = 0.6)
sweep.data = paramSweep_v3(P8, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(P8@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(P8$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
P8=doubletFinder_v3(P8, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = P8@meta.data[,8]
P8 = SubsetData(P8, cells = colnames(P8)[which(doubletsID == "Singlet")])
P8 = NormalizeData(P8)
P8 = ScaleData(P8)
P8 = FindVariableFeatures(P8, selection.method = "vst", nfeatures = 10000)
P8 = RunPCA(P8, features = P8@assays$RNA@var.features)
P8 = FindNeighbors(P8, reduction = "pca", dims = 1:50)
P8 = FindClusters(P8, resolution = 0.6)
#T9
T9 = CreateSeuratObject(counts = T9.data, min.cells = 3, min.features = 200, project = "T9" )
T9[["percent.mt"]] = PercentageFeatureSet(object = T9, pattern = "^MT-")
VlnPlot(T9, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
T9 = subset(T9, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
T9 = NormalizeData(T9)
T9 = ScaleData(T9)
T9 = FindVariableFeatures(T9, selection.method = "vst", nfeatures = 10000)
T9 = RunPCA(T9, features = T9@assays$RNA@var.features)
T9 = FindNeighbors(T9, reduction = "pca", dims = 1:50)
T9 = FindClusters(T9, resolution = 0.6)
sweep.data = paramSweep_v3(T9, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(T9@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(T9$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
T9=doubletFinder_v3(T9, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = T9@meta.data[,8]
T9 = SubsetData(T9, cells = colnames(T9)[which(doubletsID == "Singlet")])
T9 = NormalizeData(T9)
T9 = ScaleData(T9)
T9 = FindVariableFeatures(T9, selection.method = "vst", nfeatures = 10000)
T9 = RunPCA(T9, features = T9@assays$RNA@var.features)
T9 = FindNeighbors(T9, reduction = "pca", dims = 1:50)
T9 = FindClusters(T9, resolution = 0.6)
#P9
P9 = CreateSeuratObject(counts = P9.data, min.cells = 3, min.features = 200, project = "P9" )
P9[["percent.mt"]] = PercentageFeatureSet(object = P9, pattern = "^MT-")
VlnPlot(P9, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
P9 = subset(P9, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
P9 = NormalizeData(P9)
P9 = ScaleData(P9)
P9 = FindVariableFeatures(P9, selection.method = "vst", nfeatures = 10000)
P9 = RunPCA(P9, features = P9@assays$RNA@var.features)
P9 = FindNeighbors(P9, reduction = "pca", dims = 1:50)
P9 = FindClusters(P9, resolution = 0.6)
sweep.data = paramSweep_v3(P9, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(P9@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(P9$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
P9=doubletFinder_v3(P9, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = P9@meta.data[,8]
P9 = SubsetData(P9, cells = colnames(P9)[which(doubletsID == "Singlet")])
P9 = NormalizeData(P9)
P9 = ScaleData(P9)
P9 = FindVariableFeatures(P9, selection.method = "vst", nfeatures = 10000)
P9 = RunPCA(P9, features = P9@assays$RNA@var.features)
P9 = FindNeighbors(P9, reduction = "pca", dims = 1:50)
P9 = FindClusters(P9, resolution = 0.6)
#T10
T10 = CreateSeuratObject(counts = T10.data, min.cells = 3, min.features = 200, project = "T10" )
T10[["percent.mt"]] = PercentageFeatureSet(object = T10, pattern = "^MT-")
VlnPlot(T10, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
T10 = subset(T10, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
T10 = NormalizeData(T10)
T10 = ScaleData(T10)
T10 = FindVariableFeatures(T10, selection.method = "vst", nfeatures = 2000)
T10 = RunPCA(T10, features = T10@assays$RNA@var.features)
T10 = FindNeighbors(T10, reduction = "pca", dims = 1:50)
T10 = FindClusters(T10, resolution = 0.6)
T10 = SubsetData(T10, subset.name = "IGKC", high.threshold = 1)
sweep.data = paramSweep_v3(T10, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(T10@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.2*length(T10$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
T10=doubletFinder_v3(T10, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = T10@meta.data[,8]
T10 = SubsetData(T10, cells = colnames(T10)[which(doubletsID == "Singlet")])
T10 = NormalizeData(T10)
T10 = ScaleData(T10)
T10 = FindVariableFeatures(T10, selection.method = "vst", nfeatures = 10000)
T10 = RunPCA(T10, features = T10@assays$RNA@var.features)
T10 = FindNeighbors(T10, reduction = "pca", dims = 1:50)
T10 = FindClusters(T10, resolution = 0.6)
#LN10r
LN10r = CreateSeuratObject(counts = LN10r.data, min.cells = 3, min.features = 200, project = "LN10r" )
LN10r[["percent.mt"]] = PercentageFeatureSet(object = LN10r, pattern = "^MT-")
VlnPlot(LN10r, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
LN10r = subset(LN10r, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
LN10r = NormalizeData(LN10r)
LN10r = ScaleData(LN10r)
LN10r = FindVariableFeatures(LN10r, selection.method = "vst", nfeatures = 10000)
LN10r = RunPCA(LN10r, features = LN10r@assays$RNA@var.features)
LN10r = FindNeighbors(LN10r, reduction = "pca", dims = 1:50)
LN10r = FindClusters(LN10r, resolution = 0.6)
sweep.data = paramSweep_v3(LN10r, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(LN10r@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(LN10r$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
LN10r=doubletFinder_v3(LN10r, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = LN10r@meta.data[,8]
LN10r = SubsetData(LN10r, cells = colnames(LN10r)[which(doubletsID == "Singlet")])
LN10r = NormalizeData(LN10r)
LN10r = ScaleData(LN10r)
LN10r = FindVariableFeatures(LN10r, selection.method = "vst", nfeatures = 10000)
LN10r = RunPCA(LN10r, features = LN10r@assays$RNA@var.features)
LN10r = FindNeighbors(LN10r, reduction = "pca", dims = 1:50)
LN10r = FindClusters(LN10r, resolution = 0.6)
#SC11
SC11 = CreateSeuratObject(counts = SC11.data, min.cells = 3, min.features = 200, project = "SC11" )
SC11[["percent.mt"]] = PercentageFeatureSet(object = SC11, pattern = "^MT-")
VlnPlot(SC11, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
SC11 = subset(SC11, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
SC11 = NormalizeData(SC11)
SC11 = ScaleData(SC11)
SC11 = FindVariableFeatures(SC11, selection.method = "vst", nfeatures = 10000)
SC11 = RunPCA(SC11, features = SC11@assays$RNA@var.features)
SC11 = FindNeighbors(SC11, reduction = "pca", dims = 1:50)
SC11 = FindClusters(SC11, resolution = 0.6)
sweep.data = paramSweep_v3(SC11, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(SC11@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(SC11$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
SC11=doubletFinder_v3(SC11, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = SC11@meta.data[,8]
SC11 = SubsetData(SC11, cells = colnames(SC11)[which(doubletsID == "Singlet")])
SC11 = NormalizeData(SC11)
SC11 = ScaleData(SC11)
SC11 = FindVariableFeatures(SC11, selection.method = "vst", nfeatures = 10000)
SC11 = RunPCA(SC11, features = SC11@assays$RNA@var.features)
SC11 = FindNeighbors(SC11, reduction = "pca", dims = 1:50)
SC11 = FindClusters(SC11, resolution = 0.6)
#LN11r
LN11r = CreateSeuratObject(counts = LN11r.data, min.cells = 3, min.features = 200, project = "LN11r" )
LN11r[["percent.mt"]] = PercentageFeatureSet(object = LN11r, pattern = "^MT-")
VlnPlot(LN11r, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
LN11r = subset(LN11r, subset = nFeature_RNA > 500 & nFeature_RNA < 5000 & percent.mt < 10 )
LN11r = NormalizeData(LN11r)
LN11r = ScaleData(LN11r)
LN11r = FindVariableFeatures(LN11r, selection.method = "vst", nfeatures = 10000)
LN11r = RunPCA(LN11r, features = LN11r@assays$RNA@var.features)
LN11r = FindNeighbors(LN11r, reduction = "pca", dims = 1:50)
LN11r = FindClusters(LN11r, resolution = 0.6)
sweep.data = paramSweep_v3(LN11r, PCs = 1:50)
sweep.stats = summarizeSweep(sweep.data, GT = FALSE)
bcmvn= find.pK(sweep.stats)
homotypic.prop=modelHomotypic(LN11r@meta.data$RNA_snn_res.0.6)
nExp_poi=round(0.075*length(LN11r$orig.ident))
nExp_poi.adj=round(nExp_poi*(1-homotypic.prop))
LN11r=doubletFinder_v3(LN11r, PCs = 1:50, pN = 0.25, pK = as.numeric(as.character(bcmvn$pK[which.max(bcmvn$BCmetric)])),
nExp = nExp_poi.adj, reuse.pANN = FALSE)
doubletsID = LN11r@meta.data[,8]
LN11r = SubsetData(LN11r, cells = colnames(LN11r)[which(doubletsID == "Singlet")])
LN11r = NormalizeData(LN11r)
LN11r = ScaleData(LN11r)
LN11r = FindVariableFeatures(LN11r, selection.method = "vst", nfeatures = 10000)
LN11r = RunPCA(LN11r, features = LN11r@assays$RNA@var.features)
LN11r = FindNeighbors(LN11r, reduction = "pca", dims = 1:50)
LN11r = FindClusters(LN11r, resolution = 0.6)
T1 = RenameCells(T1, add.cell.id = "T1")
P1 = RenameCells(P1, add.cell.id = "P1")
T2 = RenameCells(T2, add.cell.id = "T2")
P2 = RenameCells(P2, add.cell.id = "P2")
LN2l = RenameCells(LN2l, add.cell.id = "LN2l")
T3 = RenameCells(T3, add.cell.id = "T3")
P3 = RenameCells(P3, add.cell.id = "P3")
LN3l = RenameCells(LN3l, add.cell.id = "LN3l")
LN3r = RenameCells(LN3r, add.cell.id = "LN3r")
SC4 = RenameCells(SC4, add.cell.id = "SC4")
T5 = RenameCells(T5, add.cell.id = "T5")
P5 = RenameCells(P5, add.cell.id = "P5")
LN5r = RenameCells(LN5r, add.cell.id = "LN5r")
LN6r = RenameCells(LN6r, add.cell.id = "LN6r")
LN7r = RenameCells(LN7r, add.cell.id = "LN7r")
T8 = RenameCells(T8, add.cell.id = "T8")
P8 = RenameCells(P8, add.cell.id = "P8")
T9 = RenameCells(T9, add.cell.id = "T9")
P9 = RenameCells(P9, add.cell.id = "P9")
T10 = RenameCells(T10, add.cell.id = "T10")
LN10r = RenameCells(LN10r, add.cell.id = "LN10r")
SC11 = RenameCells(SC11, add.cell.id = "SC11")
LN11r = RenameCells(LN11r, add.cell.id = "LN11r")
THCA = merge(T1, c(P1, T2, P2, LN2l, T3, P3, LN3l, LN3r, SC4,
T5, P5, LN5r, LN6r, LN7r, T8, P8, T9,
P9, T10, LN10r, SC11, LN11r), project = "THCA")
THCA = NormalizeData(THCA)
THCA = FindVariableFeatures(THCA, selection.method = "vst", nfeatures = 5000)
THCA = ScaleData(THCA)
THCA = RunPCA(THCA, npcs = 50)
THCA = RunUMAP(THCA, reduction = "pca", dims = 1:20)
THCA = FindNeighbors(THCA, reduction = "pca", dims = 1:20)
THCA = FindClusters(THCA, resolution = 1)
#UMAP visualization
DimPlot(THCA, reduction = "umap", pt.size = 0.6, label =TRUE, repel = TRUE, label.size = 5,
cols = c(pal_npg()(10), pal_aaas()(7), pal_jama()(7), pal_lancet()(7))) + NoLegend()
|
5d49a0c97f2977546e82c180bf86d485d3083e5d
|
49d6ea16082a529fb78af5fb63cca6b75820ca3f
|
/Avocado_Visualizations/Avocado_Visualizations.R
|
ddb2b504ffd668091b162c508019b94e8ff14c5e
|
[] |
no_license
|
Monica-Kulkarni/Avocado-pricing-forecast
|
a44d3260f722fc62481bd694dc9fdab1a8cc4ace
|
8c886a466836848dd0b1d31f83efc474eafc1e6d
|
refs/heads/master
| 2020-06-18T22:35:37.030777
| 2019-07-14T00:28:02
| 2019-07-14T00:28:02
| 196,477,398
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,313
|
r
|
Avocado_Visualizations.R
|
setwd("C:/Users/Monica Kulkarni/Downloads/avocado.csv")
df <- read.csv("avocado.csv")
original_df <- df
levels(df$type)
suppressPackageStartupMessages(library(tidyverse))
suppressPackageStartupMessages(library(skimr))
suppressPackageStartupMessages(library(GGally))
suppressPackageStartupMessages(library(viridis))
suppressPackageStartupMessages(library(caret))
suppressPackageStartupMessages(library(e1071))
suppressPackageStartupMessages(library(rpart))
suppressPackageStartupMessages(library(xgboost))
suppressPackageStartupMessages(library(corrplot))
suppressPackageStartupMessages(library(corrgram))
suppressPackageStartupMessages(library(ggplot2))
suppressPackageStartupMessages(library(ggthemes))
suppressPackageStartupMessages(library(psych))
suppressPackageStartupMessages(library(scales))
suppressPackageStartupMessages(library(treemap))
suppressPackageStartupMessages(library(repr))
suppressPackageStartupMessages(library(cowplot))
suppressPackageStartupMessages(library(magrittr))
suppressPackageStartupMessages(library(ggpubr))
suppressPackageStartupMessages(library(RColorBrewer))
suppressPackageStartupMessages(library(plotrix))
suppressPackageStartupMessages(library(ggrepel))
suppressPackageStartupMessages(library(tidyverse))
suppressPackageStartupMessages(library(gridExtra))
suppressPackageStartupMessages(library(lubridate))
suppressPackageStartupMessages(library(tibbletime))
suppressPackageStartupMessages(library(reshape2))
suppressPackageStartupMessages(library(tidyr))
suppressPackageStartupMessages(library(ggpubr))
suppressPackageStartupMessages(library(grid))
suppressPackageStartupMessages(library(smooth))
suppressPackageStartupMessages(library(forecast))
suppressPackageStartupMessages(library(fpp2))
options(repr.plot.width=8, repr.plot.height=4)
ggplot(df, aes(x=AveragePrice, fill=type)) + geom_density() + facet_wrap(~type) + theme_gray() +
theme(plot.title=element_text(hjust=0.5), legend.position="bottom") + labs(title="Avocado Price by Type") + scale_fill_brewer(palette="Dark2")
vol_type <- df %>% group_by(type) %>% summarise(avg.vol=mean(Total.Volume)) %>% mutate(pct=prop.table(avg.vol) * 100)
vol_type
# Change the date column from factor to date
df$Date <- as.Date(df$Date, "%Y-%m-%d")
df[ order(df$Date , decreasing = ),]
class(df$Date)
seasonal_df <- original_df
seasonal_df$month_year <- format(as.Date(original_df$Date), "%Y-%m")
seasonal_df$month <- format(as.Date(original_df$Date), "%m")
seasonal_df$year <- format(as.Date(original_df$Date), "%Y")
seasonal_df$monthabb <- sapply(seasonal_df$month, function(x) month.abb[as.numeric(x)])
seasonal_df$monthabb = factor(seasonal_df$monthabb, levels = month.abb)
# # Let's see if there are seasonal patterns with conventional avocadoes
ggplot(seasonal_df, aes(x = AveragePrice, fill = as.factor(year))) +
geom_density(alpha = .5) +
theme_economist() +
facet_wrap(~ year) + theme(plot.title=element_text(hjust=0.5), plot.background=element_rect(fill="#F9E79F")) +
guides(fill = FALSE) + labs(title="Distribution of Prices by year", x = 'Average Price', y = 'Density') +
scale_fill_manual(values=c("#2E64FE", "#40FF00", "#FE642E", "#FE2E2E"))
# Detecting seasonality patterns
conv_patterns <- seasonal_df %>% select(monthabb, AveragePrice, type) %>% filter(type == "conventional") %>%
group_by(monthabb) %>% summarize(avg=mean(AveragePrice)) %>%
ggplot(aes(x=monthabb, y=avg)) + geom_point(color="#F35D5D", aes(size=avg)) + geom_line(group=2, color="#000000") +
theme_economist() + theme(legend.position="none", plot.title=element_text(hjust=0.5), plot.background=element_rect(fill="#9aeafe")) +
labs(title="Conventional Avocados", x="Month", y="Average Price")
org_patterns <- seasonal_df %>% select(monthabb, AveragePrice, type) %>% filter(type == "organic") %>%
group_by(monthabb) %>% summarize(avg=mean(AveragePrice)) %>%
ggplot(aes(x=monthabb, y=avg)) + geom_point(color="#F35D5D", aes(size=avg)) + geom_line(group=2, color="#000000") +
theme_economist() + theme(legend.position="none", plot.title=element_text(hjust=0.5), plot.background=element_rect(fill="#9aeafe")) +
labs(title="Organic Avocados", x="Month", y="Average Price")
plot_grid(conv_patterns, org_patterns, nrow=2)
# Hmm let's see if the Seasonality pattern is maintained each year.
options(repr.plot.width=8, repr.plot.height=6)
conv_pat_yearly <- seasonal_df %>% select(year, monthabb, AveragePrice, type) %>% filter(type == "conventional", year == c("2015", "2016", "2017")) %>%
group_by(year, monthabb) %>% summarize(avg=mean(AveragePrice)) %>%
ggplot(aes(x=monthabb, y=avg)) + geom_point(color="#5D6D7E") + geom_line(group=1, color="#1b1efe") + facet_wrap(~as.factor(year)) +
theme_cowplot() + theme(plot.title=element_text(hjust=0.5), plot.background=element_rect(fill="#fed19a"), axis.text.x = element_text(angle = 90)) +
labs(title="Seasonal Fluctuations \n Convenctional Avocados", x="Month", y="Average Price")
org_pat_yearly <- seasonal_df %>% select(year, monthabb, AveragePrice, type) %>% filter(type == "organic", year == c("2015", "2016", "2017")) %>%
group_by(year, monthabb) %>% summarize(avg=mean(AveragePrice)) %>%
ggplot(aes(x=monthabb, y=avg)) + geom_point(color="#5D6D7E") + geom_line(group=1, color="#E74C3C") + facet_wrap(~as.factor(year)) +
theme_cowplot() + theme(plot.title=element_text(hjust=0.5), plot.background=element_rect(fill="#fed19a"), axis.text.x = element_text(angle = 90)) +
labs(title="Seasonal Fluctuations \n Organic Avocados", x="Month", y="Average Price")
plot_grid(conv_pat_yearly, org_pat_yearly, nrow=2)
# Measuring standard deviation per month through each year by type of avocado.
std_conv <- seasonal_df %>% select(year, monthabb, AveragePrice, type) %>% filter(type == "conventional", year == c("2015", "2016", "2017")) %>%
group_by(year, monthabb) %>% summarize(std=sd(AveragePrice)) %>%
ggplot(aes(x=monthabb, y=std)) +
geom_point(aes(size=std), col="#5A96C6") +
geom_segment(aes(x=monthabb,
xend=monthabb,
y=min(std),
yend=max(std)),
linetype="dashed",
size=0.1) +
coord_flip() +
facet_wrap(~year) +
theme_tufte() +
theme(plot.title=element_text(hjust=0.5), plot.background=element_rect(fill="#ffd6f1"), legend.position="none") +
labs(title="Conventional Avocados \n Price Volatility",x="Months", y="Standard Deviation")
std_org <- seasonal_df %>% select(year, monthabb, AveragePrice, type) %>% filter(type == "organic", year == c("2015", "2016", "2017")) %>%
group_by(year, monthabb) %>% summarize(std=sd(AveragePrice)) %>%
ggplot(aes(x=monthabb, y=std)) +
geom_point(aes(size=std), col="#5AC67C") +
geom_segment(aes(x=monthabb,
xend=monthabb,
y=min(std),
yend=max(std)),
linetype="dashed",
size=0.1) +
coord_flip() +
facet_wrap(~year) +
theme_tufte() +
theme(plot.title=element_text(hjust=0.5), plot.background=element_rect(fill="#ffd6f1"), legend.position="none") +
labs(title="Organic Avocados \n Price Volatility",x="Months", y="Standard Deviation")
plot_grid(std_conv, std_org, nrow=2)
# Let's have a closer look how the price changes per month.
# filter by type and year
options(repr.plot.width=10, repr.plot.height=8)
se <- function(x) sqrt(var(x)/length(x))
conv <- seasonal_df %>% select(year, monthabb, AveragePrice, type) %>% filter(type == "conventional", year == c("2015", "2016", "2017")) %>%
group_by(year, monthabb) %>%
ggplot(aes(x=monthabb, y=AveragePrice, fill=monthabb), color="white") + geom_bar(width=1, stat='identity') +
geom_errorbar(aes(ymin = AveragePrice - se(AveragePrice),
ymax = AveragePrice + se(AveragePrice),
color = monthabb),
width = .2) + scale_y_continuous(breaks = 0:nlevels(seasonal_df$monthabb)) +
facet_wrap(~year) + theme_minimal() +
theme(axis.ticks = element_blank(),
axis.text.y=element_blank(),
axis.title = element_blank(),
axis.line = element_blank(),
plot.background=element_rect(fill="#FFF1E0"),
legend.position="none", plot.title=element_text(hjust=0.5)) +
coord_polar() + labs(title="Seasonal cycle \n Conventional Avocados") +
scale_fill_manual(values=c('#57FCE0', '#57A6FC', '#3C546E', '#4AFA76', '#95CFA4', '#C0E436', '#F2A42D', '#F25F2D', '#F2442D',
'#AB4949', '#4950AB', '#4974AB'))
org <- seasonal_df %>% select(year, monthabb, AveragePrice, type) %>% filter(type == "organic", year == c("2015", "2016", "2017")) %>%
group_by(year, monthabb) %>%
ggplot(aes(x=monthabb, y=AveragePrice, fill=monthabb), color="white") + geom_bar(width=1, stat='identity') +
geom_errorbar(aes(ymin = AveragePrice - se(AveragePrice),
ymax = AveragePrice + se(AveragePrice),
color = monthabb),
width = .2) + scale_y_continuous(breaks = 0:nlevels(seasonal_df$monthabb)) +
facet_wrap(~year) + theme_minimal() +
theme(axis.ticks = element_blank(),
axis.text.y=element_blank(),
axis.title = element_blank(),
axis.line = element_blank(),
plot.background=element_rect(fill="#FFF1E0"),
legend.position="none", plot.title=element_text(hjust=0.5)) +
coord_polar() + labs(title="Seasonal cycle \n Conventional Avocados") +
scale_fill_manual(values=c('#57FCE0', '#57A6FC', '#3C546E', '#4AFA76', '#95CFA4', '#C0E436', '#F2A42D', '#F25F2D', '#F2442D',
'#AB4949', '#4950AB', '#4974AB'))
grid.arrange(conv, org, nrow = 2)
########################################################################################
options(repr.plot.width=10, repr.plot.height=7)
r_avg <- seasonal_df %>% group_by(year, monthabb) %>% select(type, year, monthabb, AveragePrice) %>%
filter(type == "conventional", year == c("2015", "2016", "2017")) %>%
group_by(year, monthabb) %>%
summarize(avg=mean(AveragePrice))
structured_data <- spread_(r_avg, key="year", value="avg")
colnames(structured_data) <- c("Months", "First_year", "Second_year", "Third_year")
structured_data$first_pct <- NA
structured_data$second_pct <- NA
structured_data$first_pct <- (structured_data$Second_year - structured_data$First_year)/structured_data$First_year
structured_data$second_pct <- (structured_data$Third_year - structured_data$Second_year)/structured_data$Second_year
structured_data<- structured_data %>%
mutate(first_cond=ifelse(first_pct > 0, "Positive", "Negative"),
second_cond=ifelse(second_pct > 0, "Positive", "Negative"))
firstp_change <- ggplot(structured_data) +
geom_segment( aes(x=Months, xend=Months, y=First_year, yend=Second_year), color="#6E6A6A") +
geom_point( aes(x=Months, y=First_year), color="#F74B4B", size=3 ) +
geom_point( aes(x=Months, y=Second_year),color="#36ACD7", size=3 ) +
coord_flip()+
theme_economist() +
theme(
legend.position = "top",
plot.title=element_text(hjust=0.5),
plot.background=element_rect(fill="#F4F6F7")
) +
labs(title="Conventional Avocado Price changes \n (2015 - 2016)", x="Months", y="Price",
caption="Red: Year of 2015, Blue: Year of 2016")
secondp_change <- ggplot(structured_data) +
geom_segment( aes(x=Months, xend=Months, y=Second_year, yend=Third_year), color="#6E6A6A") +
geom_point( aes(x=Months, y=Second_year), color="#36ACD7", size=3 ) +
geom_point( aes(x=Months, y=Third_year), color="#58FA58", size=3 ) +
coord_flip()+
theme_economist() +
theme(
legend.position = "top",
plot.title=element_text(hjust=0.5),
plot.background=element_rect(fill="#F4F6F7")
) +
labs(title="Conventional Avocado Price changes \n (2016 - 2017)", x="Months", y="Price",
caption="Blue: Year of 2016, Green: Year of 2017" )
# plot_grid(firstp_change, secondp_change, ncol=2)
first_pct_dif <- structured_data %>% select(Months, first_pct, first_cond) %>%
ggplot(aes(fill=first_cond)) + geom_bar(stat='identity', aes(x=Months, y=round(first_pct,2) * 100), color="black") +
theme_economist() + theme(axis.text.x=element_text(angle=90), plot.background=element_rect(fill="#F4F6F7"), legend.position="bottom") +
labs(x="Month", y="% Difference") +
guides(fill=guide_legend(title="Diff Status")) + scale_fill_manual(values=c("#FB4D42", "#ADE175"))
second_pct_dif <- structured_data %>% select(Months, second_pct, second_cond) %>%
ggplot(aes(fill=second_cond)) + geom_bar(stat='identity', aes(x=Months, y=round(second_pct,2) * 100), color="black") +
theme_economist() +
theme(axis.text.x=element_text(angle=90), plot.background=element_rect(fill="#F4F6F7"), legend.position="bottom") + labs(x="Month", y="% Difference") +
guides(fill=guide_legend(title="Diff Status")) + scale_fill_manual(values=c("#FB4D42", "#ADE175"))
plot_grid(firstp_change, secondp_change, first_pct_dif, second_pct_dif, nrow=2, ncol=2)
##########################################################################################
# Organic avvocados
r_avg_org <- seasonal_df %>% group_by(year, monthabb) %>% select(type, year, monthabb, AveragePrice) %>%
filter(type == "organic", year == c("2015", "2016", "2017")) %>%
group_by(year, monthabb) %>%
summarize(avg=mean(AveragePrice))
structured_data_org <- spread_(r_avg_org, key="year", value="avg")
colnames(structured_data_org) <- c("Months", "First_year", "Second_year", "Third_year")
structured_data_org$first_pct <- NA
structured_data_org$second_pct <- NA
structured_data_org$first_pct <- (structured_data_org$Second_year - structured_data_org$First_year)/structured_data$First_year
structured_data_org$second_pct <- (structured_data_org$Third_year - structured_data_org$Second_year)/structured_data$Second_year
structured_data_org<- structured_data_org %>%
mutate(first_cond=ifelse(first_pct > 0, "Positive", "Negative"),
second_cond=ifelse(second_pct > 0, "Positive", "Negative"))
firstp_change_org <- ggplot(structured_data_org) +
geom_segment( aes(x=Months, xend=Months, y=First_year, yend=Second_year), color="#6E6A6A") +
geom_point( aes(x=Months, y=First_year), color="#F74B4B", size=3 ) +
geom_point( aes(x=Months, y=Second_year),color="#36ACD7", size=3 ) +
coord_flip()+
theme_economist() +
theme(
legend.position = "top",
plot.title=element_text(hjust=0.5),
plot.background=element_rect(fill="#e5d6ff")
) +
labs(title="Organic Avocado Price changes \n (2015 - 2016)", x="Months", y="Price",
caption="Red: Year of 2015, Blue: Year of 2016")
secondp_change_org <- ggplot(structured_data_org) +
geom_segment( aes(x=Months, xend=Months, y=Second_year, yend=Third_year), color="#6E6A6A") +
geom_point( aes(x=Months, y=Second_year), color="#36ACD7", size=3 ) +
geom_point( aes(x=Months, y=Third_year), color="#58FA58", size=3 ) +
coord_flip()+
theme_economist() +
theme(
legend.position = "top",
plot.title=element_text(hjust=0.5),
plot.background=element_rect(fill="#e5d6ff")
) +
labs(title="Organic Avocado Price changes \n (2016 - 2017)", x="Months", y="Price",
caption="Blue: Year of 2016, Green: Year of 2017" )
# plot_grid(firstp_change, secondp_change, ncol=2)
first_pct_dif_org <- structured_data_org %>% select(Months, first_pct, first_cond) %>%
ggplot(aes(fill=first_cond)) + geom_bar(stat='identity', aes(x=Months, y=round(first_pct,2) * 100), color="black") +
theme_economist() + theme(axis.text.x=element_text(angle=90), plot.background=element_rect(fill="#e5d6ff"), legend.position="bottom") +
labs(x="Month", y="% Difference") +
guides(fill=guide_legend(title="Diff Status")) + scale_fill_manual(values=c("#FB4D42", "#ADE175"))
second_pct_dif_org <- structured_data_org %>% select(Months, second_pct, second_cond) %>%
ggplot(aes(fill=second_cond)) + geom_bar(stat='identity', aes(x=Months, y=round(second_pct,2) * 100), color="black") +
theme_economist() +
theme(axis.text.x=element_text(angle=90), plot.background=element_rect(fill="#e5d6ff"), legend.position="bottom") + labs(x="Month", y="% Difference") +
guides(fill=guide_legend(title="Diff Status")) + scale_fill_manual(values=c("#FB4D42", "#ADE175"))
plot_grid(firstp_change_org, secondp_change_org, first_pct_dif_org, second_pct_dif_org, nrow=2, ncol=2)
########################################################################################
options(repr.plot.width=8, repr.plot.height=6)
# Let's create a seasonal column and plot a point line chart by each year.
seasonal_df$season <- ifelse(seasonal_df$month %in% c("03", "04","05"), "Spring",
ifelse(seasonal_df$month %in% c("06","07" ,"08"), "Summer",
ifelse(seasonal_df$month %in% c("09","10","11"), "Fall", "Winter")))
seasonality.plot.conventional <- seasonal_df %>% select(season, year, AveragePrice, type) %>%
filter(type == "conventional", year == c("2015", "2016", "2017")) %>%
group_by(season, year) %>%
summarize(avg=mean(AveragePrice)) %>% ggplot(aes(x=season, y=avg, color=season)) + geom_point(size=3) +
geom_segment(aes(x=season,
xend=season,
y=0,
yend=avg)) +
coord_flip() + facet_wrap(~as.factor(year)) + theme_minimal() +
theme(plot.title=element_text(hjust=0.5), plot.background=element_rect(fill="#F4F6F7")) +
scale_color_manual(values=c("#a06a31", "#9bd16b", "#d1706b", "#3bbf9e")) +
labs(title="Conventional Avocados by Season", x="Season", y="Average Price") +
geom_text(aes(x=season, y=0.01, label= paste0("$ ", round(avg,2))),
hjust=-0.5, vjust=-0.5, size=4,
colour="black", fontface="italic",
angle=360)
seasonality.plot.organic <- seasonal_df %>% select(season, year, AveragePrice, type) %>%
filter(type == "organic", year == c("2015", "2016", "2017")) %>%
group_by(season, year) %>%
summarize(avg=mean(AveragePrice)) %>% ggplot(aes(x=season, y=avg, color=season)) + geom_point(size=3) +
geom_segment(aes(x=season,
xend=season,
y=0,
yend=avg)) +
coord_flip() + facet_wrap(~as.factor(year)) + theme_minimal() +
theme(plot.title=element_text(hjust=0.5), plot.background=element_rect(fill="#F4F6F7")) +
scale_color_manual(values=c("#a06a31", "#9bd16b", "#d1706b", "#3bbf9e")) +
labs(title="Organic Avocados by Season", x="Season", y="Average Price") +
geom_text(aes(x=season, y=0.01, label= paste0("$ ", round(avg,2))),
hjust=-0.5, vjust=-0.5, size=4,
colour="black", fontface="italic",
angle=360)
plot_grid(seasonality.plot.conventional, seasonality.plot.organic, nrow=2)
##########################################################################################
|
15ee8a16f222133eab6a9b09308aa9a6e711c495
|
1bb525ea09cc4dee92174a1c20719b8ced46aafd
|
/man/helloworld.Rd
|
4695623e3571924a9738db05b29091bdc9d4c0af
|
[] |
no_license
|
rflodin/testpkg
|
d1fcc217425c342862912042cb27765f4aec8a2a
|
67984f5ba6e9eb865510d013782270e88bf42281
|
refs/heads/master
| 2022-11-29T23:06:52.807031
| 2020-07-28T23:20:12
| 2020-07-28T23:20:12
| 283,348,514
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 287
|
rd
|
helloworld.Rd
|
\name{helloworld}
\alias{helloworld}
\title{
A helloworld Function
}
\description{
The basic helloworld.
}
\usage{
helloworld(x)
}
\arguments{
\item{x}{
a character string.
}
}
\value{
a character string, "hello world" plus the value of \code{x}.
}
\examples{
helloworld("Seattle!")
}
|
dfa6c121bdb5e12db92a7d2a4f66b3815df73537
|
7b2cacf99fe488c001d09b6a51eac439bdfa5272
|
/analysis/sliding_windows/regions_summary.plot.R
|
923c5892e4686440eca310987bbb92f170cc6337
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
talkowski-lab/rCNV2
|
d4fc066478db96322b7aa062f4ece268098b9de3
|
7e97d4c1562372a6edd7f67cdf36d4167da216f8
|
refs/heads/master
| 2023-04-11T08:48:40.884027
| 2023-01-25T15:59:13
| 2023-01-25T15:59:13
| 178,399,375
| 14
| 4
|
MIT
| 2022-03-16T16:42:46
| 2019-03-29T12:13:54
|
R
|
UTF-8
|
R
| false
| false
| 8,673
|
r
|
regions_summary.plot.R
|
#!/usr/bin/env Rscript
######################
# rCNV Project #
######################
# Copyright (c) 2019 Ryan L. Collins and the Talkowski Laboratory
# Distributed under terms of the MIT License (see LICENSE)
# Contact: Ryan L. Collins <rlcollins@g.harvard.edu>
# Plot summary schematics for final segments from sliding window analysis
# Set global parameters
options(scipen=100000, stringsAsFactors=F)
cnv.colors <- c("DEL"="#D43925",
"DUP"="#2376B2")
logscale.size <- log10(as.vector(sapply(5:7, function(e){(1:9)*10^e})))
logscale.size.labels.at <- 5:7
logscale.size.labels <- c("100kb", "1Mb", "10Mb")
#################
### FUNCTIONS ###
#################
# Load a BED file of loci
load.loci <- function(path){
dat <- read.table(path, header=T, sep="\t", comment.char="")
colnames(dat)[1] <- "chr"
colnames(dat)[which(colnames(dat)=="cred_intervals_size")] <- "size"
colnames(dat)[which(colnames(dat)=="n_genes")] <- "ngenes"
colnames(dat)[which(colnames(dat)=="n_hpos")] <- "nphenos"
return(dat)
}
# Plot swarms of region sizes
plot.size <- function(DEL, DUP, title.cex=1){
require(beeswarm)
par(mar=c(2, 4, 1.5, 0.5))
beeswarm(list("DEL"=log10(DEL$size),
"DUP"=log10(DUP$size)),
col=cnv.colors, pch=19, ylim=c(5, 7),
xaxt="n", xlab="", yaxt="n", ylab="",
corral="wrap", corralWidth=0.4)
size.means <- sapply(list(DEL, DUP), function(x){mean(log10(x$size))})
segments(x0=(1:2)-0.2, x1=(1:2)+0.2, y0=size.means, y1=size.means, lend="round", lwd=2)
axis(2, at=logscale.size, labels=NA, tck=-0.02, col="gray50")
axis(2, at=logscale.size.labels.at, labels=NA, tck=-0.04)
axis(2, at=logscale.size.labels.at, labels=logscale.size.labels,
las=2, line=-0.4, tick=F)
mtext(2, text="Credible Region Size", line=2.25, cex=title.cex)
axis(1, at=1, tick=F, line=-0.8, font=2, col.axis=cnv.colors[1], labels="DEL")
axis(1, at=2, tick=F, line=-0.8, font=2, col.axis=cnv.colors[2], labels="DUP")
mtext(3, line=0.2, font=2, text="Credible Region Size", cex=title.cex)
box()
}
# Plot swarms of count of genes
plot.genes <- function(DEL, DUP, title.cex=1){
require(beeswarm)
par(mar=c(2, 4, 1.5, 0.5))
beeswarm(list("DEL"=DEL$ngenes,
"DUP"=DUP$ngenes),
col=cnv.colors, pch=19,
xaxt="n", xlab="", yaxt="n", ylab="",
corral="wrap", corralWidth=0.4)
gene.means <- c(mean(DEL$ngenes), mean(DUP$ngenes))
segments(x0=(1:2)-0.2, x1=(1:2)+0.2, y0=gene.means, y1=gene.means, lend="round", lwd=2)
axis(2, labels=NA, tck=-0.04)
axis(2, las=2, line=-0.4, tick=F)
mtext(2, text="Genes per Region", line=2.25, cex=title.cex)
axis(1, at=1, tick=F, line=-0.8, font=2, col.axis=cnv.colors[1], labels="DEL")
axis(1, at=2, tick=F, line=-0.8, font=2, col.axis=cnv.colors[2], labels="DUP")
mtext(3, line=0.2, font=2, text="Genes per Region", cex=title.cex)
box()
}
# Plot swarms of count of HPOs
plot.hpos <- function(DEL, DUP, title.cex=1){
require(beeswarm)
par(mar=c(2, 4, 1.5, 0.5))
beeswarm(list("DEL"=DEL$npheno,
"DUP"=DUP$npheno),
col=cnv.colors, pch=19,
xaxt="n", xlab="", yaxt="n", ylab="",
corral="wrap", corralWidth=0.4)
gene.means <- c(mean(DEL$npheno), mean(DUP$npheno))
segments(x0=(1:2)-0.2, x1=(1:2)+0.2, y0=gene.means, y1=gene.means, lend="round", lwd=2)
axis(2, labels=NA, tck=-0.04)
axis(2, las=2, line=-0.4, tick=F)
mtext(2, text="Associated HPOs", line=2.25, cex=title.cex)
axis(1, at=1, tick=F, line=-0.8, font=2, col.axis=cnv.colors[1], labels="DEL")
axis(1, at=2, tick=F, line=-0.8, font=2, col.axis=cnv.colors[2], labels="DUP")
mtext(3, line=0.2, font=2, text="HPOs per Region", cex=title.cex)
box()
}
# Plot scatter of size vs genes
plot.sizeVsGenes <- function(DEL, DUP, title.cex=1){
par(mar=c(3, 4, 1.5, 0.5))
sizes <- c(DEL$size, DUP$size)
genes <- c(DEL$ngenes, DUP$ngenes)
plot(x=sizes, y=genes, pch=19, col=cnv.colors,
xaxt="n", xlab="", yaxt="n", ylab="",
panel.first=c(abline(lm(genes ~ sizes), lty=2, col="gray50")),
ylim=c(0, max(genes)))
axis(1, labels=NA, tck=-0.04)
axis(1, at=axTicks(1), labels=axTicks(1)/1000000, tick=F, line=-0.5)
mtext(1, line=1.5, text="Region Size (Mb)", cex=title.cex)
axis(2, labels=NA, tck=-0.04)
axis(2, las=2, line=-0.4, tick=F)
mtext(2, text="Genes per Region", line=2.25, cex=title.cex)
text(x=par("usr")[2], y=0.025*par("usr")[4], pos=2,
labels=bquote(italic(R)^2 == .(round(cor(sizes, genes), 3))))
mtext(3, line=0.2, font=2, text="Size vs. Genes", cex=title.cex)
}
# Plot scatter of size vs HPOs
plot.sizeVsHpos <- function(DEL, DUP, title.cex=1){
par(mar=c(3, 4, 1.5, 0.5))
sizes <- c(DEL$size, DUP$size)
hpos <- c(DEL$npheno, DUP$npheno)
plot(x=sizes, y=hpos, pch=19, col=cnv.colors,
xaxt="n", xlab="", yaxt="n", ylab="",
panel.first=c(abline(lm(hpos ~ sizes), lty=2, col="gray50")),
ylim=c(0, max(hpos)))
axis(1, labels=NA, tck=-0.04)
axis(1, at=axTicks(1), labels=axTicks(1)/1000000, tick=F, line=-0.5)
mtext(1, line=1.5, text="Region Size (Mb)", cex=title.cex)
axis(2, labels=NA, tck=-0.04)
axis(2, las=2, line=-0.4, tick=F)
mtext(2, text="Associated HPOs", line=2.25, cex=title.cex)
text(x=par("usr")[2], y=0.025*par("usr")[4], pos=2,
labels=bquote(italic(R)^2 == .(round(cor(sizes, hpos), 3))))
mtext(3, line=0.2, font=2, text="Size vs. HPOs", cex=title.cex)
}
# Plot scatter of genes vs HPOs
plot.genesVsHpos <- function(DEL, DUP, title.cex=1){
par(mar=c(3, 4, 1.5, 0.5))
genes <- c(DEL$ngenes, DUP$ngenes)
hpos <- c(DEL$npheno, DUP$npheno)
plot(x=genes, y=hpos, pch=19, col=cnv.colors,
xaxt="n", xlab="", yaxt="n", ylab="",
panel.first=c(abline(lm(hpos ~ genes), lty=2, col="gray50")),
ylim=c(0, max(hpos)))
axis(1, labels=NA, tck=-0.04)
axis(1, tick=F, line=-0.5)
mtext(1, line=1.5, text="Genes per Region", cex=title.cex)
axis(2, labels=NA, tck=-0.04)
axis(2, las=2, line=-0.4, tick=F)
mtext(2, text="Associated HPOs", line=2.25, cex=title.cex)
text(x=par("usr")[2], y=0.025*par("usr")[4], pos=2,
labels=bquote(italic(R)^2 == .(round(cor(genes, hpos), 3))))
mtext(3, line=0.2, font=2, text="Genes vs. HPOs", cex=title.cex)
}
#####################
### RSCRIPT BLOCK ###
#####################
require(optparse, quietly=T)
# List of command-line options
option_list <- list(
make_option(c("-o", "--out-prefix"), type="character", default="./final_regions.",
help="prefix for writing out all results. [default %default]", metavar="path")
)
# Get command-line arguments & options
args <- parse_args(OptionParser(usage="%prog DEL_regions DUP_regions",
option_list=option_list),
positional_arguments=TRUE)
opts <- args$options
# Checks for appropriate positional arguments
if(length(args$args) != 2){
stop("Must supply DEL and DUP regions as positional arguments.\n")
}
# Writes args & opts to vars
DEL.in <- args$args[1]
DUP.in <- args$args[2]
out.prefix <- opts$`out-prefix`
# # DEV PARAMETERS:
# DEL.in <- "~/scratch/rCNV.DEL.final_segments.loci.bed.gz"
# DUP.in <- "~/scratch/rCNV.DUP.final_segments.loci.bed.gz"
# out.prefix <- "~/scratch/sig_regions.test."
# Load data
DEL <- load.loci(DEL.in)
DUP <- load.loci(DUP.in)
# Plot size
jpeg(paste(out.prefix, "region_sizes.jpg", sep=""),
height=3*350, width=3*350, res=350)
plot.size(DEL, DUP)
dev.off()
# Plot count of genes
jpeg(paste(out.prefix, "gene_count.jpg", sep=""),
height=3*350, width=3*350, res=350)
plot.genes(DEL, DUP)
dev.off()
# Plot count of HPOs
jpeg(paste(out.prefix, "hpo_count.jpg", sep=""),
height=3*350, width=3*350, res=350)
plot.hpos(DEL, DUP)
dev.off()
# Plot size vs genes
jpeg(paste(out.prefix, "size_vs_genes.jpg", sep=""),
height=3*350, width=3*350, res=350)
plot.sizeVsGenes(DEL, DUP)
dev.off()
# Plot size vs HPOs
jpeg(paste(out.prefix, "size_vs_hpos.jpg", sep=""),
height=3*350, width=3*350, res=350)
plot.sizeVsHpos(DEL, DUP)
dev.off()
# Plot genes vs HPOs
jpeg(paste(out.prefix, "genes_vs_hpos.jpg", sep=""),
height=3*350, width=3*350, res=350)
plot.genesVsHpos(DEL, DUP)
dev.off()
# Plot combined six-panel figure
jpeg(paste(out.prefix, "multipanel_summary.jpg", sep=""),
height=4*350, width=6*350, res=350)
layout(matrix(1:6, nrow=2, byrow=T))
plot.size(DEL, DUP, title.cex=0.75)
plot.genes(DEL, DUP, title.cex=0.75)
plot.hpos(DEL, DUP, title.cex=0.75)
plot.sizeVsGenes(DEL, DUP, title.cex=0.75)
plot.sizeVsHpos(DEL, DUP, title.cex=0.75)
plot.genesVsHpos(DEL, DUP, title.cex=0.75)
dev.off()
|
52b1616f40ef77a6e6195b95158618d82ddf67fd
|
9f77863a8d6916ea52ff17f49e7c4901ea84f547
|
/Shiny_Application/ui.R
|
7a73e4a0b4edd05f0d2e660438078bc224205184
|
[] |
no_license
|
meowjiang/repoDB
|
210ef68a3f93e80f39dfb2175965c8d56ee960d7
|
cd4edec67e57df1d2a5f552e30a6960a38d79432
|
refs/heads/master
| 2020-05-07T22:26:09.058728
| 2017-07-28T17:22:56
| 2017-07-28T17:22:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,278
|
r
|
ui.R
|
########
# Load #
########
## Libraries
library(shiny)
library(DT)
#################
# UI Definition #
#################
shinyUI(fluidPage(
## Header
headerPanel('',
tags$head(
tags$img(src="logo.png", height="80px", width='275px',
style = "padding-left: 25px; padding-top: 15px")
)
),
tags$br(),
## Define Navigation
navlistPanel(
## Overview Panel
tabPanel(
"Introduction",
p("repoDB contains a standard set of drug repositioning successes and failures that can be
used to fairly and reproducibly benchmark computational repositioning methods. repoDB data
was extracted from ",
a('DrugCentral', href='http://drugcentral.org/'),
"and ",
a('ClinicalTrials.gov.', href='http://clinicaltrials.gov')
),
p("The repoDB website has several functionalities, which can be accessed from the navigation bar:",
tags$ul(
tags$li("Drug-centric searching"),
tags$li("Disease-centric searching"),
tags$li("Full repoDB download")
)
),
p("You can explore the types and characteristics of data in repoDB in the plot below."),
plotOutput('summary_plot')
),
## Drug Search Panel
tabPanel(
"Drug Search",
p('repoDB contains information about 1,571 currently approved drugs (as annotated in DrugBank).
To search repoDB for a specific drug, select a drug and the current statuses you\'d like to display.
Drugs are listed with their DrugBank IDs, for easier integration into your existing pipelines.
Search results can be downloaded as a tab-separated values file using the download button below the table
of drug indications.'
),
uiOutput('drugdrop'),
checkboxGroupInput('drugcheck',
'Select the status categories you\'d like to display',
choices = c('Approved','Terminated','Withdrawn','Suspended'),
selected = c('Approved','Terminated','Withdrawn','Suspended'),
inline=T
),
checkboxGroupInput('phasecheckdrug',
'Select the phases you\'d like to display',
choices = c('Phase 0', 'Phase 1', 'Phase 2', 'Phase 3'),
selected = c('Phase 0', 'Phase 1', 'Phase 2', 'Phase 3'),
inline = T
),
tags$hr(),
dataTableOutput('drugtable'),
downloadButton(
outputId = 'drugdownload',
label = 'Download the current search results'
)
),
tabPanel(
"Disease Search",
p(
'repoDB contains information about 2,051 diseases, all mapped to UMLS terms for easier
integration into your existing pipelines. To search for a specific disease,
select a disease and the current statuses you\'d like to display.
Search results can be downloaded as a tab-separated values file using the download button below the table
of drug indications.'
),
uiOutput('inddrop'),
checkboxGroupInput('indcheck',
'Select the status categories you\'d like to display',
choices = c('Approved','Terminated','Withdrawn','Suspended'),
selected = c('Approved','Terminated','Withdrawn','Suspended'),
inline=T
),
checkboxGroupInput('phasecheckind',
'Select the phases you\'d like to display',
choices = c('Phase 0', 'Phase 1', 'Phase 2', 'Phase 3'),
selected = c('Phase 0', 'Phase 1', 'Phase 2', 'Phase 3'),
inline = T
),
tags$hr(),
dataTableOutput('indtable'),
downloadButton(
outputId = 'inddownload',
label = 'Download the current search results'
)
),
tabPanel(
"Download",
p(
"The full repoDB database is available for download using the button below.
Please note that the data is presented as-is, and not all entries have been
validated before publication."
),
downloadButton(
outputId = 'downloadFull',
label = 'Download the full repoDB Dataset'
)
),
tabPanel(
"Citing repoDB",
p(
"To acknowledge use of the repoDB resource, please cite the following paper:"
),
tags$code(
"Brown AS and Patel CJ. repoDB: A New Standard for Drug Repositioning Validation.",
em("Scientific Data."),
"170029 (2017)."
),
tags$br(),
tags$br(),
p(
"repoDB was built using the October 25, 2016 build of ",
a("DrugCentral,", href='http://drugcentral.org/download'),
"the March 27, 2016 build of the ",
a("AACT database,", href='https://www.ctti-clinicaltrials.org/aact-database'),
"and the 2016AB Release of the ",
a("Unified Medical Language System.", href='https://www.nlm.nih.gov/research/umls/'),
"Metformin and recycling symbol used under CC0 license from wikimedia commons. Database symbol by Designmodo,
used under a CC3.0 licnesne."
),
p (
strong("By using the repoDB database, users agree to cite our work, as well as AACT,
DrugCentral, and UMLS for their role in data curation. This data is available under a ",
a('Creative Commons Attribution 4.0 International License.',href='https://creativecommons.org/licenses/by/4.0/')
)
)
),
tabPanel(
"Version History",
p("As repoDB is improved and augmented with new data, we will track any substantial changes made to repoDB here:"),
tags$ul(
tags$li(strong('v1.0 (March 14, 2017)'), ' - Initial release'),
tags$li(strong('v1.1 (June 26, 2017)'), ' - Fixed a bug in the ClinicalTrials.gov parser that created multiple DrugBank Identifiers for a
single drug (many thanks to Cristina Leal for spotting the error).'),
tags$li(strong('v1.2 (July 28, 2017)'), ' -', code('Version History'), ' tab was added to address discrepancies introduced in totals for Terminated
Withdrawn, and Suspended drug-disease pairs versus published values due to bugfix in v1.1 (many thanks to Beste Turanli for
spotting the discrepancy).')
)
)
),
## Footer
tags$hr(),
p(
strong('repoDB is intended for educational and scientific research purposes only.'),
'This work is licensed under a ',
a('Creative Commons Attribution 4.0 International License.',href="http://creativecommons.org/licenses/by/4.0/"),
'repoDB was developed by AS Brown and CJ Patel. See the "Citing repoDB" tab for citation information',
'For more projects, visit the ', a('Patel Group Homepage.', href='http://www.chiragjpgroup.org/')
)
))
|
0fadbccc32772ece7d41a97fbf875fcfe42c7733
|
125d5252f02b02e5a3fa302c782c64b0199473c0
|
/Z_Backup.R
|
3dcbee082f7cc759883a33291f1e63fd1da1768a
|
[] |
no_license
|
NicolasGoeller/ADS_Project_NicoNoah
|
e904b765e328434f6e43b33cf98a0bcd20c84b97
|
5b92e688a6dcbd05181c3674d77b30a070d2308a
|
refs/heads/master
| 2020-04-06T11:33:30.515972
| 2019-01-31T17:57:16
| 2019-01-31T17:57:16
| 157,422,024
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,366
|
r
|
Z_Backup.R
|
### Stuff we have coded, but we couldn't implement in current workstream
## 4. Construct index for social capial
## Draw on initial EVS_2008 to obtain proxies for social capital
# Our own social capital index with questionable intercorrelatedness:
EVS_2008 %<>% within({
imp_fam <- v2 #importnace of family (=very important; 4=not at all)
imp_fam[v2 %in% c(-5, -4, -3, -2, -1)] <- NA
imp_fam <- (imp_fam-4)*-1
imp_fam <- as.numeric(imp_fam)
imp_frds <- v3 #importance of friends and acquaintances (=very important; 4=not at all)
imp_frds[v3 %in% c(-5, -4, -3, -2, -1)] <- NA
imp_frds <- (imp_frds-4)*-1
imp_frds <- as.numeric(imp_frds)
trust <- v62 #people can be trusted/cant be too careful (dummy: 1=trusted; 2=be careful)
trust[v62 %in% c(-5, -4, -3, -2, -1)] <- NA
fair <- v63 #people try to take advantage or are fair (1=advantage; 10=fair)
fair[v63 %in% c(-5, -4, -3, -2, -1)] <- NA
fair <- as.numeric(fair)
helpful <- v64 #people are helpful or look after themselves (1=look out for themselves; 10=helpful)
helpful[v64 %in% c(-5, -4, -3, -2, -1)] <- NA
helpful <- as.numeric(helpful)
met_pep <- v97 #meeting nice people (1=very important; 4=not important at all)
met_pep[v97 %in% c(-5, -4, -3, -2, -1)] <- NA
met_pep <- (met_pep-4)*-1
met_pep <- as.numeric(met_pep)
conc_fam <- v284 #concerned with familiy (1=very much; 5 not at all)
conc_fam[v284 %in% c(-5, -4, -3, -2, -1)] <- NA
conc_fam <- (conc_fam-5)*-1
conc_fam <- as.numeric(conc_fam)
conc_neigh <- v285 #concerned with people in neighbourhood (1=very much; 5 not at all)
conc_neigh[v285 %in% c(-5, -4, -3, -2, -1)] <- NA
conc_neigh <- (conc_neigh-5)*-1
conc_neigh <- as.numeric(conc_neigh)
conc_region <- v286 #concerned with people in region (1=very much; 5 not at all)
conc_region[v286 %in% c(-5, -4, -3, -2, -1)] <- NA
conc_region <- (conc_region-5)*-1
conc_region <- as.numeric(conc_region)
})
#Construct social capital dataset
soc_cap_data <- select(EVS_2008, imp_fam,
imp_frds,
#fair,
#helpful,
met_pep,
conc_fam,
conc_neigh,
conc_region)
soc_cap_data %<>% na.omit()
cor_mat <- cor(soc_cap_data) %>% round(2) #get correlation matrix
soc_cap_data %>% as.matrix() %>% alpha(check.keys = T) #compute Cronbach's alpha
###Shiny
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
#generate bins based on input$bins from ui.R
x <- faithful[, 2]
bins <- seq(min(x), max(x), length.out = input$bins + 1)
#draw the histogram with the specified number of bins
hist(x, breaks = bins, col = 'darkgray', border = 'white')
})
})
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Old Faithful Geyser Data"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("bins",
"Number of bins:",
min = 1,
max = 50,
value = 30)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
))
|
c929c7c2dce1457d11961bf417a1e81db9314986
|
01d80c2cd1edae9956e67ae585b822d6014d4d96
|
/lab2/parzenWindow.R
|
0ce600c16ec12fff92fe7d356fda092e6d644875
|
[] |
no_license
|
alexlapiy/ML0
|
4e61c1e1e42f6f9f767bf1c7202f0013d87f15b6
|
e7235abb5a7e0bfc13e3faace121fa418c69d607
|
refs/heads/master
| 2020-07-19T03:59:40.105683
| 2019-12-23T05:45:34
| 2019-12-23T05:45:34
| 206,370,343
| 0
| 0
| null | 2019-10-02T13:53:41
| 2019-09-04T17:02:57
| null |
UTF-8
|
R
| false
| false
| 3,914
|
r
|
parzenWindow.R
|
euclidDist <- function(u, v) {
sqrt(sum((u - v)^2))
}
sortObjectsByDist <- function(xl, u) {
l <- dim(xl)[1]
n <- dim(xl)[2] - 1
# формируем матрицу расстояний состоящую из индекса и расстояния евклида из выборки для некоторой точки
distances <- matrix(NA, l, 2)
for (i in 1:l) {
distances[i, ] <- c(i, euclidDist(xl[i, 1:n], u))
}
# сортируем по расстоянию
orderedXl <- xl[order(distances[, 2]), ]
return (orderedXl <- cbind(orderedXl, euclidDist = sort(distances[, 2], decreasing = FALSE)))
}
# Прямоугольное ядро
rect_kernel <- function(dist, h) {
if(abs(dist / h) <= 1) {
return (1 / 2)
} else {
return(0)
}
}
# Епанечникова ядро
epanech_kernel <- function(dist, h) {
if(abs(dist / h) <= 1) {
return(3 / 4 * (1 - (dist / h)^2))
} else {
return(0)
}
}
# Квартическое ядро
quartic_kernel <- function(dist, h) {
if(abs(dist / h) <= 1) {
return(15 / 16 * (1 - (dist / h)^2)^2)
} else {
return(0)
}
}
# Треугольное ядро
triang_kernel <- function(dist, h) {
if(abs(dist / h) <= 1) {
return(1 - abs(dist / h))
} else {
return(0)
}
}
# Гауссовское ядро (финитные ядра)
gauss_kernel <- function(dist, h) {
return((2*pi)^(1/2) * exp((-1/2) * (dist / h)^2))
}
parzenWindow <- function(xl, u, h, kernelFunc) {
l <- dim(xl)[1]
orderedXl <- sortObjectsByDist(xl, u)
n <- dim(orderedXl)[2] - 1
classes <- orderedXl[1:l, n]
counts <- table(orderedXl[0, 3])
View(counts[classes[1]])
for (i in seq(1:l)) {
counts[classes[i]] <- counts[classes[i]] + kernelFunc(orderedXl[i, 4], h)
print(counts)
}
if(sum(counts) > 0) {
class <- names(which.max(counts))
} else {
class <- "noClass"
}
return(class)
}
loo <- function(xl, seqH, kernelFunc) {
l <- dim(xl)[1]
hLooArray <- array(0, length(seqH))
j <- 1
for(h in seqH) {
cnt <- 0
for(i in 1:l) {
u <- c(xl[i, 1], xl[i, 2])
x <- xl[-i, 1:3]
class <- parzenWindow(x, u, h, kernelFunc)
if(xl[i, 3] != class) {
cnt <- cnt + 1
}
}
hLooArray[j] <- cnt / l
j <- j + 1
print(j)
}
View(hLooArray)
return(hLooArray)
}
parzenPlot <- function(xl, u, h, kernelFunc) {
colors <- c("setosa" = "red", "versicolor" = "green3", "virginica" = "blue")
plot(xl[1:2], pch = 20, bg = colors[xl$Species], col = colors[xl$Species])
class <- parzenWindow(xl, u, h, kernelFunc)
points(u[1], u[2], pch = 25, bg = colors[class], asp = 1)
}
looPlot <- function(seqH, looData) {
plot(seqH, looData, xlab = "h", ylab = "LOO(h)", type = "l")
looDataFrame <- data.frame(seqH, looData)
minH <- looDataFrame[which.min(looDataFrame$looData),]
print(minH)
points(minH, pch=21, bg="red")
}
classificationMap <- function(xl, h, kernelFunc) {
colors <- c("setosa" = "red", "versicolor" = "green", "virginica" = "blue",
"noClass" = "white")
plot(xl[1:2], pch = 21, col = colors[xl$Species], bg = colors[xl$Species])
for (i in seq(1.0, 7.0, 0.1)) {
for (j in seq(0.1, 2.5, 0.1)) {
u <- c(i, j)
class <- parzenWindow(xl, u, h, kernelFunc)
points(i, j, pch = 21, col = colors[class])
}
}
}
#xl <-iris[sample(1:150, 30, replace=FALSE), 3:5]
xl <- iris[, 3:5]
u <- c(5, 2)
h <- 0.6
seqH <- seq(0.1, 2, 0.1)
parzenPlot(xl, u, h, rect_kernel)
#looRectKernel <- loo(xl, seqH, rect_kernel)
#looEpanechKernel <- loo(xl, seqH, epanech_kernel)
#looQuarticKernel <- loo(xl, seqH, quartic_kernel)
#looTriangKernel <- loo(xl, seqH, triang_kernel)
#looGaussKernel <- loo(xl, seqH, gauss_kernel)
# График LOO
#looPlot(seqH, looGaussKernel)
#Карта классификации
#classificationMap(xl, 0.6, rect_kernel)
|
6c8d7fe3842c2a8543ed9820ece709244f54258e
|
0e76443b6de1312c8d3988d2538263db0cd7385b
|
/分析及画图/ggplot_中文.R
|
25d48f5de9de395b44003190cbdd33540fbf0bb5
|
[] |
no_license
|
mrzhangqjankun/R-code-for-myself
|
0c34c9ed90016c18f149948f84503643f0f893b7
|
56f387b2e3b56f8ee4e8d83fcb1afda3d79088de
|
refs/heads/master
| 2022-12-30T08:56:58.880007
| 2020-10-23T03:20:17
| 2020-10-23T03:20:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,312
|
r
|
ggplot_中文.R
|
##2019.3.14
#ggplot出图显示中文
#https://mp.weixin.qq.com/s/ITKP6zlkbXGKeCWnNMP3Bw
install.packages('Cairo')
library("Cairo")
?Cairo
#example
ggsave("geo_Fus_wilt.pdf", p1, width = 12, height =8 , device = cairo_pdf, family = "Song")
# 字体选择链接:
# 新细明体, PMingLiU ,
# 细明体, MingLiU,
# 标楷体, DFKai-SB,
# 黑体, SimHei,
# 宋体, SimSun ,
# 新宋体, NSimSun,
# 仿宋, FangSong,
# 楷体, KaiTi,
# 仿宋_GB2312, FangSong_GB2312,
# 楷体_GB2312, KaiTi_GB2312,
# 微软正黑体, Microsoft JhengHei,
# 微软雅黑, Microsoft YaHei,
# 隶书, LiSu,
# 幼圆, YouYuan,
# 华文细黑, STXihei,
# 华文楷体, STKaiti,
# 华文宋体, STSong,
# 华文中宋, STZhongsong,
# 华文仿宋, STFangsong,
# 方正舒体 , FZShuTi,
# 方正姚体, FZYaoti,
# 华文彩云, STCaiyun,
# 华文琥珀, STHupo,
# 华文隶书, STLiti,
# 华文行楷, STXingkai,
# 华文新魏, STXinwei
#https://blog.csdn.net/hongweigg/article/details/47907555
#在使用pdf()函数时,要输出中文,只有一种字体可选。例子:
pdf("chinese.pdf",family="GB1")
plot(m2,xlab="高度",ylab="体重",main="统计")
dev.off()
#这里字体参数family只能设置成"GB1"(不知是否还有其他字体可选),默认宋体。
getwd()
#在使用Cairo包时,进行中文输出时,可以选择多种字体
CairoPDF("chinese.pdf",family="SimSun")
plot(1:10,1:10,type="n");
text(2,10,"宋体",family="SimSun");
text(2,8,"黑体",family="SimHei");
text(2,6,"楷体",family="KaiTi_GB2312");
text(2,4,"隶书",family="LiSu");
text(2,2,"幼圆",family="YouYuan");
text(6,10,"Arial",family="Arial");
text(6,8,"Times New Roman",family="Times New Roman");
text(6,6,"Courier New",family="Courier New");
text(6,4,"Consolas",family="Consolas");
text(6,2,"Symbol",family="Symbol");
dev.off();
#family参数为字体名称,如宋体:SimSun,黑体:SimHei。
|
3f949ce3f4665828027cca674ab826fd7ed5a106
|
c7d5fa4a80cf89aeb6e17159a0953ad90ad1f4dc
|
/man/batchconvert.Rd
|
3723f9bb16f0923f27ca2ed08d41a7a85ff8917a
|
[] |
no_license
|
cran/PopGenKit
|
4ac75f13f1f189d006d2fe95550eecb8854d1011
|
57588283dc44a661993babce5570e2bec9a3945b
|
refs/heads/master
| 2021-01-18T08:07:28.105429
| 2011-07-21T00:00:00
| 2011-07-21T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,308
|
rd
|
batchconvert.Rd
|
\name{batchconvert}
\alias{batchconvert}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Convert all Genepop files in dir to Arlequin format
%% ~~function to do ... ~~
}
\description{This function converts all Genepop files (extension .gen) in the working directory to Arlequin files (extension .arp). It also creates a batch Arlequin file (extension .arb) to allow batch analysis in Arlequin.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
batchconvert(ndigit = 3)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{ndigit}{The number of digits per allele in the input file. Can be 2 or 3 (default 3).
%% ~~Describe \code{ndigit} here~~
}
}
\details{Relies on the function \code{\link{convert}} to perform the batch conversion.
Input files must end with the extension '.gen'. Make sure to follow standard Genepop format for input files. The IDs of all individuals should be immediately followed by a comma. Population labels should be Pop, POP, or pop. There should not be invisible characters (tabs or spaces) at the end of lines, especially lines on which population labels are found. These lines should have only three characters (Pop). See example file 'glyptemys3.gen' if needed.
%% ~~ If necessary, more details than the description above ~~
}
\value{All arp files and the single arb file are saved in the working directory
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{Excoffier, L. and H.E.L. Lischer (2010). Arlequin suite ver 3.5: A new series of programs to perform
population genetics analyses under Linux and Windows. Mol. Ecol. Res. 10: 564-567.
Rousset, F. (2008). Genepop'007: a complete reimplementation of the Genepop software for Windows and Linux. Mol. Ecol. Res. 8: 103-106.
%% ~put references to the literature/web site here ~
}
\author{Sebastien Rioux Paquette
%% ~~who you are~~
}
\seealso{\code{\link{convert}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
bbb62850494fa69276643fe220d9449000090eb4
|
ff85b91ac7e2ef583c56e7f3dae6ca56897a9cd7
|
/man/inputGFF3OutputGenbank.Rd
|
0417eee1e60bc70f23bf60c10e15f7c1b6cb38c6
|
[
"MIT"
] |
permissive
|
Tasu/EpDB2UG
|
a8d5a3ea82c5410a85576f03c05c40050ef28dbc
|
ed4b83026440262f222e8c9493999d6e4d073da8
|
refs/heads/master
| 2021-01-20T20:53:16.212945
| 2016-08-20T19:06:28
| 2016-08-20T19:06:28
| 63,458,667
| 0
| 0
| null | 2016-08-20T19:06:28
| 2016-07-16T00:53:11
|
R
|
UTF-8
|
R
| false
| true
| 410
|
rd
|
inputGFF3OutputGenbank.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{inputGFF3OutputGenbank}
\alias{inputGFF3OutputGenbank}
\title{inputGFF3OutputGenbank}
\usage{
inputGFF3OutputGenbank(toxoDBGFF)
}
\arguments{
\item{toxoDBGFF}{input file path}
}
\description{
genbank file will be made.
output genbank file is ugene-compatible, may not be compatible for other software.
}
\examples{
}
|
9416d832fef8c81feebbbe6c264f21e36ff557b0
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googledrivev2.auto/man/permissions.update.Rd
|
b1b7a5b48b96b5e84f9e428d05f753020a7e44d8
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,512
|
rd
|
permissions.update.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drive_functions.R
\name{permissions.update}
\alias{permissions.update}
\title{Updates a permission.}
\usage{
permissions.update(Permission, fileId, permissionId, removeExpiration = NULL,
supportsTeamDrives = NULL, transferOwnership = NULL)
}
\arguments{
\item{Permission}{The \link{Permission} object to pass to this method}
\item{fileId}{The ID for the file or Team Drive}
\item{permissionId}{The ID for the permission}
\item{removeExpiration}{Whether to remove the expiration date}
\item{supportsTeamDrives}{Whether the requesting application supports Team Drives}
\item{transferOwnership}{Whether changing a role to 'owner' downgrades the current owners to writers}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/drive
\item https://www.googleapis.com/auth/drive.file
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/drive, https://www.googleapis.com/auth/drive.file)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/drive/}{Google Documentation}
Other Permission functions: \code{\link{Permission.teamDrivePermissionDetails}},
\code{\link{Permission}},
\code{\link{permissions.insert}},
\code{\link{permissions.patch}}
}
|
2c2a47b340f50f4b30366efb184069f9fa5d3e1e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/extremefit/examples/cox.adapt.Rd.R
|
f325afa7d443669542cc4de69e3eb817c2cc607a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 410
|
r
|
cox.adapt.Rd.R
|
library(extremefit)
### Name: cox.adapt
### Title: Compute the extreme quantile procedure for Cox model
### Aliases: cox.adapt
### ** Examples
library(survival)
data(bladder)
X <- bladder2$stop-bladder2$start
Z <- as.matrix(bladder2[, c(2:4, 8)])
delta <- bladder2$event
ord <- order(X)
X <- X[ord]
Z <- Z[ord,]
delta <- delta[ord]
cph<-coxph(Surv(X, delta) ~ Z)
ca <- cox.adapt(X, cph, delta, Z)
|
a6461dfbe752902e07a007b35b80563fe2076153
|
76283a39fcc37ae4ad222c2753f3509022fe36f2
|
/scripts/selection_analysis.R
|
af1f6789be325413f8ae7be42861493aad749646
|
[] |
no_license
|
ljljolinq1010/Adaptive-evolution-in-late-development-and-adult
|
015035644f981bad623ec0aa2ffce3b37d5859fc
|
9ce53bb72082f5dfdaa78bca9f935eab8f82f8a4
|
refs/heads/master
| 2021-05-12T14:24:03.356937
| 2018-01-10T14:05:54
| 2018-01-10T14:05:54
| 116,954,623
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,052
|
r
|
selection_analysis.R
|
library("ggplot2")
library("reshape2")
library("RColorBrewer")
######* module analysis *#####
moduletest<-function(module,selectome,orgName) {
if(orgName=="D.melanogaster") {
earlyM<-selectome[selectome$Ensembl.Gene.ID%in%module$Early.embryo,]
middleM<-selectome[selectome$Ensembl.Gene.ID%in%module$Middle.embryo,]
lateM<-selectome[selectome$Ensembl.Gene.ID%in%module$Late.embryo,]
larvaM<-selectome[selectome$Ensembl.Gene.ID%in%module$Larva,]
pupAdultM<-selectome[selectome$Ensembl.Gene.ID%in%module$Pupae.Adult,]
moduleList<-list(earlyM,middleM,lateM,larvaM,pupAdultM)
moduleDF<-rbind(earlyM,middleM,lateM,larvaM,pupAdultM)
moduleNames<-names(module)
legendName<-"Melanogaster group branch"
}
if (orgName=="D.rerio") {
clevBlasM<-selectome[selectome$Ensembl.Gene.ID%in%module$Cleav.Blastula,]
gastrulaM<-selectome[selectome$Ensembl.Gene.ID%in%module$Gastrula,]
segmentationM<-selectome[selectome$Ensembl.Gene.ID%in%module$Segmentation,]
pharyngulaM<-selectome[selectome$Ensembl.Gene.ID%in%module$Pharyngula,]
larvaM<-selectome[selectome$Ensembl.Gene.ID%in%module$Larva,]
juvenileM<-selectome[selectome$Ensembl.Gene.ID%in%module$Juvenile,]
adultM<-selectome[selectome$Ensembl.Gene.ID%in%module$Adult,]
moduleList<-list(clevBlasM,gastrulaM,segmentationM,pharyngulaM,larvaM,juvenileM,adultM)
moduleDF<-rbind(clevBlasM,gastrulaM,segmentationM,pharyngulaM,larvaM,juvenileM,adultM)
moduleNames<-names(module)
legendName<-"Clupeocephala branch"
}
if (orgName=="M.musculus") {
earlyM<-selectome[selectome$Ensembl.Gene.ID%in%module$Early.embryo,]
middleM<-selectome[selectome$Ensembl.Gene.ID%in%module$Middle.embryo,]
lateM<-selectome[selectome$Ensembl.Gene.ID%in%module$Late.embryo,]
moduleList<-list(earlyM,middleM,lateM)
moduleDF<-rbind(earlyM,middleM,lateM)
moduleNames<-names(module)
legendName<-"Murinae branch"
}
## retrieve lrt
# weak evidence of positive selection
lrt_weak <- lapply(moduleList, function(x) sqrt(sqrt(x$lrt[x$lrt>0])))
# strong evidence of positive selection
lrt_strong <- lapply(moduleList, function(x) sqrt(sqrt(x$lrt[x$lrt>0&x$qvalue<0.2])))
#####* test proportion of positive selection *#####
## proportion
allGeneNum<-unlist(lapply(moduleList, nrow))
lrt_weak_GeneNum<-unlist(lapply(lrt_weak, length))
lrt_strong_GeneNum<-unlist(lapply(lrt_strong, length))
lrt_weak_prop<-lrt_weak_GeneNum/allGeneNum
lrt_strong_prop<-lrt_strong_GeneNum/allGeneNum
## Chi-square Test of Goodness-of-Fit
expected<-allGeneNum/sum(allGeneNum)
observed_weak<-lrt_weak_GeneNum
observed_strong<-lrt_strong_GeneNum
chq_weak<-chisq.test(x = observed_weak,p = expected)
chq_strong<-chisq.test(x = observed_strong,p = expected)
## plot
if (orgName=="D.melanogaster") {
ylim_lrt_weak=c(0,1)
ylim_lrt_strong=c(0,0.1)
text_y<-c(-0.003,0.003)
ylim_boxplot<-c(-0.2,3)
boxColor<-c("grey","grey","grey","grey","red")
}
if (orgName=="D.rerio") {
ylim_lrt_weak<-c(0,1)
ylim_lrt_strong<-c(0,0.4)
text_y<-c(-0.015,0.015)
ylim_boxplot<-c(-0.2,3.5)
boxColor<-c("grey","grey","grey","blue","grey","red","red")
}
if (orgName=="M.musculus") {
ylim_lrt_weak=c(0,1)
ylim_lrt_strong=c(0,0.02)
text_y<-c(-0.0006,0.0006)
ylim_boxplot<-c(-0.2,2.5)
boxColor<-c("grey","blue","red")
}
# proportion of weak lrt
prop<-barplot(lrt_weak_prop,ylim=ylim_lrt_weak,col=3,ylab=expression(paste("Proportion of genes with", " ", paste(Delta,"lnL"), " > 0")), main=legendName)
legend("topleft",legend=paste0("p=",signif((chq_weak$p.value),2)), bty = 'n',cex = 1)
text(x =prop, y = -0.04, srt = 45,cex.lab=1, adj = 1, labels = moduleNames, xpd = TRUE)
text(x=prop, y=0.04, labels=paste0("n=", allGeneNum))
# proportion of strong lrt
prop<-barplot(lrt_strong_prop,ylim=ylim_lrt_strong,col=3,ylab=expression(paste("Proportion of genes with", " ", paste(Delta,"lnL"), " > 0")), main=legendName)
legend("topleft",legend=paste0("p=",signif((chq_strong$p.value),2)), bty = 'n',cex = 1)
text(x =prop, y = text_y[1], srt = 45,cex.lab=1, adj = 1, labels = moduleNames, xpd = TRUE)
text(x=prop, y=text_y[2], labels=paste0("n=", allGeneNum))
#####* test strength of positive selection *#####
## significant test for mean value
meanDistri<-c()
p_temp<-c()
pValue<-c()
for (i in 1:length(lrt_weak)) {
randomModule<-replicate(10000,sample(unlist(lrt_weak),length(lrt_weak[[i]]),replace=F))
meanDistri<-apply(randomModule,2,mean)
meanLrt<-mean(meanDistri)
sdLrt<-sd(meanDistri)
if (meanLrt > mean(lrt_weak[[i]]) ) {
p_temp<-pnorm(mean(lrt_weak[[i]]),meanLrt,sdLrt,lower.tail = T)
} else {
p_temp<-pnorm(mean(lrt_weak[[i]]),meanLrt,sdLrt,lower.tail = F)
}
pValue<-cbind(pValue,p_temp)
}
## ggplot
moduleDF<-subset(moduleDF,lrt>0)
moduleDF$lrt<-sqrt(sqrt(moduleDF$lrt))
moduleDF$moduleNames<-rep(moduleNames,unlist(lapply(lrt_weak, length)))
moduleDF$moduleNames<-as.character(moduleDF$moduleNames)
moduleDF$moduleNames<-factor(moduleDF$moduleNames,levels=unique(moduleDF$moduleNames))
if(orgName=="D.melanogaster") {
color<-c("blue","black","black","black","red")
y1=-0.5
y2=3
}
if (orgName=="M.musculus") {
color<-c("black","blue","red")
y1=-0.25
y2=2.5
}
if (orgName=="D.rerio") {
color<-c("black","black","black","blue","black","red","black")
y1=-0.5
y2=3.5
}
print(
ggplot(data=moduleDF,aes(x=moduleNames,y=lrt,color=moduleNames))+geom_jitter(position=position_jitter(0.1),pch=16,cex=2,alpha = 0.5) +
ylim(y1,y2)+stat_summary(fun.y = mean,fun.ymin = mean, fun.ymax = mean, geom = "crossbar", width = 0.5,color="black")+
scale_color_manual(values=color) +
labs(title=legendName,x="", y = expression(paste("Fourth root"," ",(paste(Delta,"lnL")))),size=20)+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(),axis.line = element_line(colour = "black"),
axis.text.y = element_text(size=18,color="black"),
axis.title.y = element_text(color="black", size=18, face="bold"),
legend.position="none",axis.text.x=element_text(angle = 45, hjust = 1,size=18), plot.title = element_text(hjust = 0.5,size=20,face="bold"))+
geom_hline(yintercept = mean(moduleDF$lrt),colour="green",linetype=2,size=1)
)
}
## fly
selectome <- read.table("data/selectome/fly_melanogaster_group_selectome.txt",sep="\t",h=T)
module<-read.table("results/modules/fly_module_genes.txt",sep="\t",h=T)
moduletest(module,selectome,"D.melanogaster")
## zf
selectome <- read.table("data/selectome/zf_clupeocephala_selectome.txt",sep="\t",h=T)
module<-read.table("results/modules/zf_module_genes.txt",sep="\t",h=T)
moduletest(module,selectome,"D.rerio")
## mouse
selectome <- read.table("data/selectome/mouse_murine_selectome.txt",sep="\t",h=T)
module<-read.table("results/modules/mouse_module_genes.txt",sep="\t",h=T)
moduletest(module,selectome,"M.musculus")
####* transcriptome index analysis *####
library(RColorBrewer)
myPalette <- brewer.pal(9, "Set1")
transIndex(transcriptome,selectome,orgName) {
if (orgName=="D.melanogaster") {
timePoint<-c(2:28)
devTime1<-log2(c(seq(2,24,by=2),44,70,96,108,120,132,144,156,168,192,216,240,264,360,960 ))
devTime2<-c("2h","4h","6h","8h","10h","12h","14h","","18h","","22h","","2d","3d","4d","","5d",
"","6d","","7d","8d","9d","","11d","15d","40d")
devTimeColor<-c(rep(myPalette[2],3),rep(myPalette[1],2),rep(myPalette[3],19),rep(myPalette[4],3))
modules = list(Early.development = 1:3, Middle.devlopment = 4:5, Late.development = 6:24,Adult=25:27)
legendName<-"Melanogaster group branch"
ylimBoxplot<-c( 0.64, 0.76)
ytext=0.631
ylimRegression<-c(0.67,0.73)
}
if (orgName=="M.musculus") {
timePoint<-c(2:18)
devTime1<-c(0.5,1.5,2,3.5,7.5,8.5,9,9.5,10.5,11.5,12.5,13.5,14.5,15.5,16.5,17.5,18.5)
devTime2<-c("0.5d","1.5d","2d","3.5d","7.5d","8.5d","9d","9.5d","10.5d","11.5d","12.5d","13.5d","14.5d","15.5d","16.5d","17.5d","18.5d")
devTimeColor<-c(myPalette[5],rep(myPalette[2],4),rep(myPalette[1],6),rep(myPalette[3],6))
modules = list(Maternal.stage=1,Early.development = 2:5, Middle.devlopment = 6:11, Late.development = 12:17)
legendName<-"Murinae branch"
ylimBoxplot<-c(0.25,0.33)
ylimRegression<-c(0.273,0.295)
ytext<-0.2435
}
if (orgName=="D.rerio") {
timePoint<-c(3:61) ## remove unfertilized egg
devTime1<-log2(c(0.25,0.75,1.25,1.75,2.25,2.75,3.33,4,4.66,5.33,6,7,8,9,10,10.33,11,11.66,
12,13,14,15,16,17,18,19,20,21,22,23,25,27,30,34,38,42,48,60,72,96,144,192,240,
336,432,720,960,1080,1320,1560,1920,2160,2520,2880,5040,6480,10080,12960,15120))
devTime2<-c("0.25h","0.75h","1.25h","1.75h","2.25h","","3.5h","","","5.5h","","","8h","","","","","11h",
"","","","","","","18h","","","","","","25h","","","","38h","","2d","","3d","4d","6d","","10d",
"","18d","30d","40d","","55d","","80d","","3.5m","","7m","9m","1y2m","","1y9m")
devTimeColor<-c(rep(myPalette[5],4),rep(myPalette[2],11),rep(myPalette[1],21),rep(myPalette[3],16),rep(myPalette[4],7))
modules = list(Maternal.stage=1:4,Early.development = 5:15, Middle.devlopment = 16:36, Late.development = 37:52,Adult=53:59)
legendName<-"Clupeocephala branch"
ylimBoxplot<-c(1.03,1.16)
ylimRegression<-c(1.075,1.10)
ytext=1.02
}
## calculate index
transSelec<- merge(transcriptome, selectome, by="Ensembl.Gene.ID")
transSelec$lrt <- ifelse(transSelec$lrt<0,0, transSelec$lrt)
transSelec$lrt <-sqrt(sqrt(transSelec$lrt))
transIndex<-c()
transIndex<-apply(transSelec[timePoint], 2, function(x) sum(x*(transSelec[,"lrt"]))/sum(x))
## bootstrap analysis
cat("\nbootstrap analysis...")
bootGeneID<-replicate(10000,sample(transSelec$Ensembl.Gene.ID,replace=T))
transIndexBoot<-c()
transIndexBoot1<-c()
for (i in 1:10000) {
tempID<-data.frame(bootGeneID[,i])
names(tempID)<-"Ensembl.Gene.ID"
tempTransSelec<-merge(tempID,transSelec,by="Ensembl.Gene.ID")
transIndexBoot1<-apply(tempTransSelec[timePoint],2, function(x) sum(x*(tempTransSelec[,"lrt"]))/sum(x))
transIndexBoot<-rbind(transIndexBoot,transIndexBoot1)
}
## calculate mean index of each module, and compare the mean with wilcox test.
meanIndex<-c()
meanIndex1<-c()
for (i in 1:10000) {
meanIndex1 <- lapply( modules,function(x) mean(transIndexBoot[i,][x]) )
meanIndex <- rbind(meanIndex,meanIndex1)
}
## pairwise test
pt<-c()
pt$meanIndex<-unlist(meanIndex)
pt<-data.frame(pt)
pt$group=rep(c(1:length(modules)),each=10000)
pairwiseWT<-pairwise.wilcox.test( pt$meanIndex,pt$group,p.adjust.method = "fdr")
pValue<-c()
if (orgName=="D.rerio") {
pValue$one<-c(1,pairwiseWT$p.value[,1])
pValue$second<-c(pairwiseWT$p.value[,1][1],1,pairwiseWT$p.value[,2][c(2:4)])
pValue$third<-c(pairwiseWT$p.value[,1][2],pairwiseWT$p.value[,2][2],1,pairwiseWT$p.value[,3][c(3:4)])
pValue$forth<-c(pairwiseWT$p.value[,1][3],pairwiseWT$p.value[,2][3],pairwiseWT$p.value[,3][3],1,pairwiseWT$p.value[,4][4])
pValue$fifth<-c(pairwiseWT$p.value[4,],1)
pValue<-data.frame(pValue)
colnames(pValue)<-names(modules)
rownames(pValue)<-names(modules)
} else {
pValue$one<-c(1,pairwiseWT$p.value[,1])
pValue$second<-c(pairwiseWT$p.value[,1][1],1,pairwiseWT$p.value[,2][c(2:3)])
pValue$third<-c(pairwiseWT$p.value[,1][2],pairwiseWT$p.value[,2][2],1,pairwiseWT$p.value[,3][3])
pValue$forth<-c(pairwiseWT$p.value[3,],1)
pValue<-data.frame(pValue)
colnames(pValue)<-names(modules)
rownames(pValue)<-names(modules)
}
## boxplot
if (orgName=="D.rerio"){
boxplotData<-matrix(unlist(meanIndex),ncol = 5,nrow = 10000)
} else {
boxplotData<-matrix(unlist(meanIndex),ncol = 4,nrow = 10000)
}
boxplot(boxplotData,las=2,ylim=ylimBoxplot,pch=16,outcex=0.5,boxwex=0.7, xaxt = "n",main=legendName,cex.lab=1.2,cex.main=1.2,
col=c(unique(devTimeColor)),ylab =expression(paste("Transcriptoem index of fourth root"," ",(paste(Delta,"lnL")))))
text(x = seq_along(names(modules)), y = ytext, srt = 45,cex.lab=1.2,adj = 1, labels = names(modules), xpd = TRUE)
## regression plot
if (orgName=="D.rerio") {
lmd4 <- lm(unlist(transIndex) ~ poly(devTime1, 4, raw=TRUE))
a<-summary(lmd4)$coef[,1][[1]]
b<-summary(lmd4)$coef[,1][[2]]
c<-summary(lmd4)$coef[,1][[3]]
d<-summary(lmd4)$coef[,1][[4]]
e<-summary(lmd4)$coef[,1][[5]]
r2<-signif(summary(lmd4)$adj.r.squared, 2)
f<-summary(lmd4)$fstatistic
polyModel <- function(x) { eval(a) + eval(b)*x + eval(c)*x^2+ eval(d)*x^3+ eval(e)*x^4}
} else {
lmd2 <- lm(unlist(transIndex) ~ poly(devTime1, 2, raw=TRUE))
a<-summary(lmd2)$coef[,1][[1]]
b<-summary(lmd2)$coef[,1][[2]]
c<-summary(lmd2)$coef[,1][[3]]
r2<-signif(summary(lmd2)$adj.r.squared, 2)
f<-summary(lmd2)$fstatistic
polyModel <- function(x) { eval(a) + eval(b)*x + eval(c)*x^2}
}
curve(polyModel, min(devTime1), max(devTime1), col="black",xlab="Time",
ylab=expression(paste("Transcriptoem index of fourth root"," ",(paste(Delta,"lnL")))),
ylim=ylimRegression, main=legendName,xaxt="n",lwd=6,lty=1,cex.lab=1.2,cex.axis=1.2,cex.main=1.2)
points(devTime1, unlist(transIndex), pch=16, lwd=6)
for (j in 1:length(timePoint)) {
axis(side=1, at=devTime1[j], col.axis=devTimeColor[j], labels=devTime2[j], las=2,cex.axis=1.2) # Add development stages as labels, each color represents one meta development stage
}
myP<-signif(pf(f[1],f[2],f[3],lower.tail=F), 2)
rp = vector('expression',2)
rp[1] = substitute(expression(R^2 == MYVALUE),
list(MYVALUE = format(r2,dig=3)))[2]
rp[2] = substitute(expression(p == MYOTHERVALUE),
list(MYOTHERVALUE = format(myP, digits = 2)))[2]
legend("topleft",legend=rp, bty = 'n',cex = 1.2,col=c("black","white"),lty=c(1,1),lwd=c(2,2))
}
## fly
selectome <- read.table("data/selectome/fly_melanogaster_group_selectome.txt",sep="\t",h=T)
transcriptome<-read.table("data/expression/fly_RNAseq_Log2.txt",sep="\t",h=T)
moduletest(transcriptome,selectome,"D.melanogaster")
## zf
selectome <- read.table("data/selectome/zf_clupeocephala_selectome.txt",sep="\t",h=T)
transcriptome<-read.table("data/expression/zf_Microarray_Log2.txt",sep="\t",h=T)
moduletest(transcriptome,selectome,"D.rerio")
## mouse
selectome <- read.table("data/selectome/mouse_murine_selectome.txt",sep="\t",h=T)
transcriptome<-read.table("data/expression/mouse_RNAseq_Log2.txt",sep="\t",h=T)
moduletest(transcriptome,selectome,"M.musculus")
#####* pathway analysis *#####
pathwaySelAna<-function(orgName,pathwaySel,pathwayID_geneID,ensemblID_entrezID,transcriptome) {
## retrieve positive selected pathways
positiveSelID<-subset(pathwaySel,setQ<0.2)$setID.orig
## retrieve genes of positive selected pathways
positiveGeneID<-lapply(positiveSelID, function(x) subset(pathwayID_geneID,setID==x)$geneID)
positiveGeneID<-data.frame(unique(unlist(positiveGeneID)))
names(positiveGeneID)<-"geneID"
positiveGeneID<-merge(positiveGeneID,ensemblID_entrezID,by="geneID")
## retrieve genes of non-positive selected pathways
nonPositiveGeneID<-lapply(positiveSelID, function(x) subset(pathwayID_geneID,setID!=x)$geneID)
nonPositiveGeneID<-data.frame(unique(unlist(nonPositiveGeneID)))
names(nonPositiveGeneID)<-"geneID"
nonPositiveGeneID<-merge(nonPositiveGeneID,ensemblID_entrezID,by="geneID")
nonPosGeneID<-nonPositiveGeneID
## retrieve gene expression
posExpression<-merge(positiveGeneID,transcriptome, by="Ensembl.Gene.ID")
medianPosExp<-apply(posExpression[3:length(posExpression)],2, function(x) median(x))
nonPosExpression<-merge(nonPosGeneID,transcriptome, by="Ensembl.Gene.ID")
medianNonPosExp<-apply(nonPosExpression[3:length(nonPosExpression)],2, function(x) median(x))
ratio<-medianPosExp/medianNonPosExp
if (orgName=="D.melanogaster") {
devTime1<-log2(c(seq(2,24,by=2),44,70,96,108,120,132,144,156,168,192,216,240,264,360,960 ))
devTime2<-c("2h","4h","6h","8h","10h","12h","14h","","18h","","22h","","2d","3d","4d","","5d",
"","6d","","7d","8d","9d","","11d","15d","40d")
devTimeColor<-c(rep(myPalette[2],3),rep(myPalette[1],2),rep(myPalette[3],19),rep(myPalette[4],3))
legendName<-"Melanogaster group branch"
ylimRegression<-c(0.8,1.2)
}
if (orgName=="M.musculus") {
devTime1<-c(1.5,2,3.5,7.5,8.5,9,9.5,10.5,11.5,12.5,13.5,14.5,15.5,16.5,17.5,18.5)
devTime2<-c("1.5d","2d","3.5d","7.5d","8.5d","9d","9.5d","10.5d","11.5d","12.5d","13.5d","14.5d","15.5d","16.5d","17.5d","18.5d")
devTimeColor<-c(rep(myPalette[2],4),rep(myPalette[1],6),rep(myPalette[3],6))
legendName<-"Murinae branch"
ylimRegression<-c(0.1,0.9)
}
if (orgName=="D.rerio") {
devTime1<-log2(c(0.25,0.75,1.25,1.75,2.25,2.75,3.33,4,4.66,5.33,6,7,8,9,10,10.33,11,11.66,
12,13,14,15,16,17,18,19,20,21,22,23,25,27,30,34,38,42,48,60,72,96,144,192,240,
336,432,720,960,1080,1320,1560,1920,2160,2520,2880,5040,6480,10080,12960,15120))
devTime2<-c("0.25h","0.75h","1.25h","1.75h","2.25h","","3.5h","","","5.5h","","","8h","","","","","11h",
"","","","","","","18h","","","","","","25h","","","","38h","","2d","","3d","4d","6d","","10d",
"","18d","30d","40d","","55d","","80d","","3.5m","","7m","9m","1y2m","","1y9m")
devTimeColor<-c(rep(myPalette[5],4),rep(myPalette[2],11),rep(myPalette[1],21),rep(myPalette[3],16),rep(myPalette[4],7))
legendName<-"Clupeocephala branch"
ylimRegression<-c(0.65,1.1)
}
## regression plot (polynomial model start with degree 3, progressively increase degree until the improvement is not significant (anova test > 0.05))
if (orgName=="M.musculus") {
lmd5 <- lm(unlist(ratio) ~ poly(devTime1, 5, raw=TRUE))
a<-summary(lmd5)$coef[,1][[1]]
b<-summary(lmd5)$coef[,1][[2]]
c<-summary(lmd5)$coef[,1][[3]]
d<-summary(lmd5)$coef[,1][[4]]
e<-summary(lmd5)$coef[,1][[5]]
h<-summary(lmd5)$coef[,1][[6]]
r2<-signif(summary(lmd5)$adj.r.squared, 2)
f<-summary(lmd5)$fstatistic
polyModel <- function(x) { eval(a) + eval(b)*x + eval(c)*x^2+ eval(d)*x^3+ eval(e)*x^4+ eval(h)*x^5}
} else {
lmd4 <- lm(unlist(ratio) ~ poly(devTime1, 4, raw=TRUE))
a<-summary(lmd4)$coef[,1][[1]]
b<-summary(lmd4)$coef[,1][[2]]
c<-summary(lmd4)$coef[,1][[3]]
d<-summary(lmd4)$coef[,1][[4]]
e<-summary(lmd4)$coef[,1][[5]]
r2<-signif(summary(lmd4)$adj.r.squared, 2)
f<-summary(lmd4)$fstatistic
polyModel <- function(x) { eval(a) + eval(b)*x + eval(c)*x^2+ eval(d)*x^3+ eval(e)*x^4}
}
curve(polyModel, min(devTime1), max(devTime1), col="cyan3",xlab="Time",
ylab="Ratio of median expression",
ylim=ylimRegression, main=legendName,xaxt="n",lwd=6,lty=1,cex.lab=1.2,cex.axis=1.2,cex.main=1.2)
points(devTime1, unlist(ratio), pch=16, lwd=6,col="cyan3")
for (j in 1:length(devTime1)) {
axis(side=1, at=devTime1[j], col.axis=devTimeColor[j], labels=devTime2[j], las=2,cex.axis=1.2) # Add development stages as labels, each color represents one meta development stage
}
myP<-signif(pf(f[1],f[2],f[3],lower.tail=F), 2)
rp = vector('expression',2)
rp[1] = substitute(expression(R^2 == MYVALUE),
list(MYVALUE = format(r2,dig=3)))[2]
rp[2] = substitute(expression(p == MYOTHERVALUE),
list(MYOTHERVALUE = format(myP, digits = 2)))[2]
legend("topleft",legend=rp, bty = 'n',cex = 1.2,col=c("black","white"),lty=c(1,1),lwd=c(2,2))
}
## zf
pathwaySel<-read.table("data/pathway_selection/zf_clupeocephala_setscores_postpruning.txt",sep="\t",h=T,quote = "")
pathwayID_geneID<-read.table("data/pathway_selection/zf_pathwayID_geneID.txt",sep="\t",h=F,quote = "")
names(pathwayID_geneID)<-c("setID","setName","geneID")
ensemblID_entrezID<-read.table("data/pathway_selection/zf_ensembl_entrez_ID_one2one.txt",sep="\t",h=F)
names(ensemblID_entrezID)<-c("Ensembl.Gene.ID","geneID")
transcriptome<-read.table("data/expression/zf_Microarray_Log2.txt",sep="\t",h=T)
transcriptome$egg.0min<-NULL
pathwaySelAna("D.rerio",pathwaySel,pathwayID_geneID,ensemblID_entrezID,transcriptome)
## mouse
pathwaySel<-read.table("data/pathway_selection/mouse_murinae_setscores_postpruning.txt",sep="\t",h=T,quote = "")
pathwayID_geneID<-read.table("data/pathway_selection/mouse_pathwayID_geneID",sep="\t",h=F,quote = "")
names(pathwayID_geneID)<-c("setID","setName","geneID")
ensemblID_entrezID<-read.table("data/pathway_selection/mouse_ensembl_entrez_ID_one2one.txt",sep="\t",h=F)
names(ensemblID_entrezID)<-c("Ensembl.Gene.ID","geneID")
transcriptome<-read.table("data/expression/mouse_RNAseq_Log2.txt",sep="\t",h=T)
transcriptome$Mean.2cell<-NULL ## because the median expression of this time point is 0, we need to remove it
pathwaySelAna("M.musculus",pathwaySel,pathwayID_geneID,ensemblID_entrezID,transcriptome)
## fly
pathwaySel<-read.table("data/pathway_selection/fly_setscores_postpruning.txt",sep="\t",h=T,quote = "")
pathwayID_geneID<-read.table("data/pathway_selection/fly_pathwayID_geneID",sep="\t",h=F,quote = "")
names(pathwayID_geneID)<-c("setID","setName","geneID")
ensemblID_entrezID<-read.table("data/pathway_selection/fly_ensembl_entrez_ID_one2one.txt",sep="\t",h=F)
names(ensemblID_entrezID)<-c("Ensembl.Gene.ID","geneID")
transcriptome<-read.table("data/expression/fly_RNAseq_Log2.txt",sep="\t",h=T)
pathwaySelAna("D.melanogaster",pathwaySel,pathwayID_geneID,ensemblID_entrezID,transcriptome)
|
61ab1f246b710ba79074a8887568a00575ea40bc
|
78a663673b1fc7cba571b2de0f44e0de4959abce
|
/freesurfer/flag_outliers.R
|
c5b3dd5c7ef0713d05cc8bff8aa563561b134c2f
|
[] |
no_license
|
PennBBL/conte
|
5da93edc2c65ee4951ba934bb2a8e3ea6e155ff7
|
cb1838ea7406b274ee7355094d6a6eda790c0c0c
|
refs/heads/master
| 2021-01-20T17:39:46.855146
| 2017-06-14T20:08:27
| 2017-06-14T20:08:27
| 90,881,167
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,139
|
r
|
flag_outliers.R
|
# this is a subscript of QA.sh that should run at the end of the script.
# The other scripts called by QA.sh create some csvs with thickness, volume,
# surface area, and curvature. This script flags all of these based 2sd outliers.
# The measures that are flagged are based on comments here:
# http://saturn/wiki/index.php/QA
### ARGS ###
############
subjects.dir<-commandArgs(TRUE)[1]
sdthresh<-2
### DIRS ###
############
#stats.dir<-file.path(subjects.dir, 'stats')
stats.dir<-file.path(subjects.dir)
aparc.dir<-file.path(stats.dir, 'aparc.stats')
aseg.dir<-file.path(stats.dir, 'aseg.stats')
area.dir<-file.path(stats.dir, 'aparc.stats/area')
curvature.dir<-file.path(stats.dir, 'aparc.stats/curvature')
### MEAN FILES ###
##################
mean.file<-file.path(aparc.dir, 'bilateral.meanthickness.totalarea.csv')
cnr.file<-file.path(stats.dir, 'cnr/cnr_buckner.csv')
snr.file<-file.path(stats.dir, 'cnr/snr.txt')
aseg.volume.file<-file.path(aseg.dir, 'aseg.stats.volume.csv')
lh.thickness.file<-file.path(aparc.dir, 'lh.aparc.stats.thickness.csv')
rh.thickness.file<-file.path(aparc.dir, 'rh.aparc.stats.thickness.csv')
### READ MEAN DATA ###
######################
mean.data<-read.csv(mean.file, strip.white=TRUE)
mean.data$meanthickness<-rowMeans(mean.data[, c('rh.meanthickness', 'lh.meanthickness')])
mean.data$totalarea<-rowSums(mean.data[, c('rh.totalarea', 'lh.totalarea')])
mean.data<-mean.data[,!(grepl('lh', names(mean.data)) | grepl('rh', names(mean.data)))]
cnr.data<-read.csv(cnr.file, strip.white=TRUE, header=FALSE)
full<-mean.data
full$cnr<- cnr.data$V3[match(full$scanid,cnr.data$V2)]
full$cnr<- as.numeric(as.character(full$cnr))
# the snr evaluation is not robust
# if it seems to have something wrong with it
# this will ignore it.
snr.data<-try(read.table(snr.file, strip.white=TRUE, header=FALSE, col.names=c('subject', 'snr')))
if(is.data.frame(snr.data)){
snr.data[,c('bblid', 'scanid')]<-apply(do.call(rbind, strsplit(as.character(snr.data$subject), split="/")), 2, as.character)
snr.data<-snr.data[,-1]
full$snr<- snr.data$snr[match(full$scanid,snr.data$scanid)]
}
aseg.volume.data<-read.table(aseg.volume.file, strip.white=TRUE, header=TRUE)
aseg.volume.data[,c('bblid', 'scanid')]<-apply(do.call(rbind, strsplit(as.character(aseg.volume.data$Measure.volume), split="/")), 2, as.character)
aseg.volume.data<-aseg.volume.data[,c("bblid", "scanid", "SubCortGrayVol", "CortexVol", "CorticalWhiteMatterVol")]
full$SubCortGrayVol<- aseg.volume.data$SubCortGrayVol[match(full$scanid,aseg.volume.data$scanid)]
full$CortexVol<- aseg.volume.data$CortexVol[match(full$scanid,aseg.volume.data$scanid)]
full$CorticalWhiteMatterVol<- aseg.volume.data$CorticalWhiteMatterVol[match(full$scanid,aseg.volume.data$scanid)]
### READ IN THICKNESS DATA ###
##############################
thickness.data<-read.table(lh.thickness.file, header=TRUE, strip.white=TRUE)
rh.thickness.data<-read.table(rh.thickness.file, header=TRUE, strip.white=TRUE)
thickness.data[,c('bblid', 'scanid')]<-apply(do.call(rbind, strsplit(as.character(thickness.data$lh.aparc.thickness), split="/")), 2, as.character)
rh.thickness.data[,c('bblid', 'scanid')]<-apply(do.call(rbind, strsplit(as.character(rh.thickness.data$rh.aparc.thickness), split="/")), 2, as.character)
rh.thickness.data<-rh.thickness.data[,-1]
thickness.data<-thickness.data[,-1]
thickness.data<-merge(thickness.data, rh.thickness.data, all=TRUE,by=c("scanid","bblid"))
rm('rh.thickness.data')
### CREATE DATA TO CALCULATE SD FROM ###
#########################################
lh.names<-grep('lh', names(thickness.data), value=TRUE)
rh.names<-sub('lh', 'rh', lh.names)
tmp_lh<-data.frame(matrix(NA, nrow=(nrow(thickness.data)),ncol=length(lh.names)+1))
tmp_lh[1]<- thickness.data$scanid
colnames(tmp_lh)[1]<-"scanid"
colnames(tmp_lh)[2:ncol(tmp_lh)]<- lh.names
tmp_rh<-data.frame(matrix(NA, nrow=(nrow(thickness.data)),ncol=length(rh.names)+1))
tmp_rh[1]<- thickness.data$scanid
colnames(tmp_rh)[1]<-"scanid"
colnames(tmp_rh)[2:ncol(tmp_rh)]<- rh.names
#then calculate the 2SD cut off for each lh.name and rh.name and calculate if each subject in the full dataset is a SD outlier based on the threshold you set (sdthresh)
for (i in lh.names){
sd_thresh<-(sdthresh*(sd(thickness.data[,i])))
sd_above_value<- mean(thickness.data[,i])+sd_thresh
sd_below_value<- mean(thickness.data[,i])-sd_thresh
#x<- cbind(sd_above_value,sd_below_value)
#output<- cbind(output,x)
tmp_lh[i]<- "0"
tmp_lh[i][thickness.data[i]>sd_above_value]<- "1"
tmp_lh[i][thickness.data[i]<sd_below_value]<- "1"
}
for (i in rh.names){
sd_thresh<-(sdthresh*(sd(thickness.data[,i])))
sd_above_value<- mean(thickness.data[,i])+sd_thresh
sd_below_value<- mean(thickness.data[,i])-sd_thresh
tmp_rh[i]<- "0"
tmp_rh[i][thickness.data[i]>sd_above_value]<- "1"
tmp_rh[i][thickness.data[i]<sd_below_value]<- "1"
}
tmp<- cbind(tmp_lh,tmp_rh[2:ncol(tmp_rh)])
tmp2<-data.frame(sapply(tmp[2:ncol(tmp)], function(x) as.numeric(as.character(x))))
tmp2<- cbind(tmp[1],tmp2)
###get number of thickness ROIs (sum of 1's just calculated for each subject)
# count number of outlying regions for each subject
thickness.data$noutliers.thickness.rois<-rowSums(tmp2[2:ncol(tmp2)])
####number of outliers in laterality for each subject
tmp_laterality<-data.frame(matrix(NA, nrow=(nrow(thickness.data)),ncol=length(lh.names)+1))
tmp_laterality[1]<- thickness.data$scanid
colnames(tmp_laterality)[1]<-"scanid"
colnames(tmp_laterality)[2:ncol(tmp_laterality)]<- lh.names
for (z in seq(1, length(lh.names))){
i <- lh.names[z]
r_name<- paste("rh",substring(i,4,10000),sep="_")
sd_above_value<-(mean((thickness.data[,i] - thickness.data[,r_name])/(thickness.data[,i] + thickness.data[,r_name]))+(sdthresh*(sd((thickness.data[,i] - thickness.data[,r_name])/(thickness.data[,i] + thickness.data[,r_name])))))
sd_below_value<-(mean((thickness.data[,i] - thickness.data[,r_name])/(thickness.data[,i] + thickness.data[,r_name]))-(sdthresh*(sd((thickness.data[,i] - thickness.data[,r_name])/(thickness.data[,i] + thickness.data[,r_name])))))
tmp_laterality[,z+1]<- "0"
tmp_laterality[,z+1][which((thickness.data[,i] - thickness.data[,r_name])/(thickness.data[,i] + thickness.data[,r_name])>sd_above_value)]<- "1"
tmp_laterality[,z+1][which((thickness.data[,i] - thickness.data[,r_name])/(thickness.data[,i] + thickness.data[,r_name])<sd_below_value)]<- "1"
tmp_laterality[,z+1]<- as.numeric(tmp_laterality[,z+1])
}
thickness.data$noutliers.lat.thickness.rois<-rowSums(tmp_laterality[2:ncol(tmp_laterality)])
###DO THE SAME FOR MEAN FLAGS
thickness.data.mean<-full
mean_names<- c('meanthickness', 'totalarea', "SubCortGrayVol", "CortexVol", "CorticalWhiteMatterVol", "cnr","snr")
tmp_mean<-data.frame(matrix(NA, nrow=(nrow(full)),ncol=length(mean_names)+1))
tmp_mean[1]<- full$scanid
colnames(tmp_mean)[1]<-"scanid"
colnames(tmp_mean)[2:ncol(tmp_mean)]<- mean_names
#then calculate the 2SD cut off for each mean and calculate if each subject in the full dataset is a SD outlier based on the threshold you set (sdthresh)
for (i in mean_names){
sd_thresh<-(sdthresh*(sd(thickness.data.mean[,i])))
sd_above_value<- mean(thickness.data.mean[,i])+sd_thresh
sd_below_value<- mean(thickness.data.mean[,i])-sd_thresh
tmp_mean[i]<- "0"
tmp_mean[i][full[i]>sd_above_value]<- "1"
tmp_mean[i][full[i]<sd_below_value]<- "1"
}
colnames(tmp_mean)[2:ncol(tmp_mean)]<- c(paste(mean_names, 'outlier', sep="_"))
### MERGE RESULTS OF ROI FLAGS WITH MEAN DATA ###
#################################################
thickness.data<-thickness.data[,c('bblid', 'scanid', 'noutliers.thickness.rois', 'noutliers.lat.thickness.rois')]
full$noutliers.thickness.rois<- thickness.data$noutliers.thickness.rois[match(full$scanid,thickness.data$scanid)]
full$noutliers.lat.thickness.rois<- thickness.data$noutliers.lat.thickness.rois[match(full$scanid,thickness.data$scanid)]
full$meanthickness_outlier<- tmp_mean$meanthickness_outlier[match(full$scanid,tmp_mean$scanid)]
full$totalarea_outlier<- tmp_mean$totalarea_outlier[match(full$scanid,tmp_mean$scanid)]
full$SubCortGrayVol_outlier<- tmp_mean$SubCortGrayVol_outlier[match(full$scanid,tmp_mean$scanid)]
full$CortexVol_outlier<- tmp_mean$CortexVol_outlier[match(full$scanid,tmp_mean$scanid)]
full$CorticalWhiteMatterVol_outlier<- tmp_mean$CorticalWhiteMatterVol_outlier[match(full$scanid,tmp_mean$scanid)]
full$cnr_outlier<- tmp_mean$cnr_outlier[match(full$scanid,tmp_mean$scanid)]
full$snr_outlier<- tmp_mean$snr_outlier[match(full$scanid,tmp_mean$scanid)]
### FLAG ON MEAN, CNR, SNR, AND NUMBER OF ROI FLAGS ###
#######################################################
flags<-names(full)[which(!names(full) %in% c('bblid', 'scanid'))]
### WRITE DATA OUT ###
######################
noutliers.flags<-grep('noutlier', names(full), value=T)
full[,paste(noutliers.flags, 'outlier', sep="_")]<-as.numeric(scale(full[,noutliers.flags])>sdthresh)
write.csv(full, file.path(stats.dir, paste('all.flags.n' , nrow(full),'.csv', sep='')), quote=FALSE, row.names=FALSE)
cat('wrote file to', file.path(stats.dir, paste('all.flags.n' , nrow(full),'.csv', sep='')), '\n')
|
a015b12d061a0f32f389f612161f3c8c8b8dec33
|
6d73e84867b990ee17e885e1975e8eb905025837
|
/Descarga_de_datos.R
|
c3e6c17d3f3afac794a1b9edbb693cf6576a37c6
|
[] |
no_license
|
Juliansrami99/dashboard
|
577ada18b1d2a87a7b305406da00dbf73588c494
|
8876f42b7a7e99e6c6bf62680a6add67e0c6866c
|
refs/heads/master
| 2020-09-15T12:20:57.241524
| 2019-11-22T16:27:46
| 2019-11-22T16:27:46
| 223,443,019
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,347
|
r
|
Descarga_de_datos.R
|
library(timeSeries)
library(PerformanceAnalytics)
library(FRAPO)
library(fPortfolio)
library(quantmod)
library(dplyr)
convertir<-function(tabla){
dias=as.data.frame(index(tabla))
nueva=as.data.frame(tabla)
nueva=nueva[,-c(1,2,3,5,6)]
nueva=as.data.frame(nueva)
y=cbind(dias,nueva)
y=as.data.frame(y)
colnames(y)<-c("DATES","PRECIO")
y$DATES=as.Date(y$DATES)
u=y %>% distinct(DATES, .keep_all = TRUE)
#y$PRECIO=as.numeric.factor(y$PRECIO)
rownames(u)=u$DATES
return(u)
}
### convertir a numerico de factor
as.numeric.factor <- function(x) {as.numeric(levels(x))[x]}
### Dradowns de los datos seleccionados
drawdowns_j<-function(tabla){
dias=tabla$DATES
base_serie<-timeSeries(tabla$PRECIO, charvec = rownames(tabla))
base_retornos<-na.omit(returnseries(base_serie,method = "discrete",
percentage = FALSE, trim = FALSE))
base_draw=PerformanceAnalytics:::Drawdowns(base_retornos)
dias=dias[-1]
tabla=cbind(dias,base_draw$TS.1)
tabla=as.data.frame(tabla)
colnames(tabla)=c("DATES","DRAWDOWNS")
tabla$DATES=as.Date(tabla$DATES)
return(tabla)
}
todos_datos<-function(vec){
total=list()
for(j in 1:length(vec)){
tryCatch({a=getSymbols(vec[j],src="yahoo",from="2000-12-12",auto.assign=FALSE,env = NULL)
},error=function(e)NA)
a<-na.omit(a)
total[[j]]<-a
}
precios=list()
for (i in 1:length(total)){
b=convertir(total[[i]])
precios[[i]]<-b
}
drawdowns=list()
for (u in 1:length(precios)){
precios[[u]]=na.omit(precios[[u]])
c=drawdowns_j(precios[[u]])
drawdowns[[u]]<-c
}
return(drawdowns)
}
vec<-c("MSFT","AAPL","AMZN","FB","XOM","JNJ","V","GOOG",
"PG","CVX","VZ","T","PFE","MA","UNH","DIS",
"CSCO","HD","CVX","KO","MRK","PEP","INTC","MCD","CMCSA",
"BA","WMT","BABA","NFLX","ABT","MDT","ORCL","ADBE","ACN",
"IBM","PM","PYPL","HON","UNP","COST","NEE","SBUX",
"LIN","TMO","LLY","CRM","AVGO","TXN","ABBV","UTX",
"DHR","NKE","MO","MMM","NVDA","OCOM","AMT","GE",
"GILD","ADP","TJX","SYK","BKNG","DUK","ENB","WM",
"BDX","LOW","SO","ANTM","CAT","MDLZ","D","UPS","CB",
"CNI","NEM","CVS","COP","CL","CHTR","BSX","CELG",
"CI","FISV","ED","ISRG","CSX","PGR","KMB","DD","RTN",
"PSA","SU","AGN")
lista_descarga=todos_datos(vec)
|
4e9489af1632e2bada92918bc8aeb031d36f3a69
|
27b6a72a64a83ad6e3f48f9b8836be7aae3fd163
|
/rcp.4c.rangerSubs.R
|
5d00f824e802f6752150d0ee1373b97c235ef97c
|
[] |
no_license
|
richardparrow/GSwag
|
4815991f90c66f923c9d6b90963f514d03c2c086
|
a3eda1671fce583824cf065a8cef603b1da84049
|
refs/heads/master
| 2020-03-28T21:46:29.933019
| 2018-09-26T16:12:28
| 2018-09-26T16:12:28
| 149,181,539
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,968
|
r
|
rcp.4c.rangerSubs.R
|
# libraries
library(ranger)
library(dplyr)
# loading
load("modelMatrixTrain_ranger.RData")
load("modelMatrixTest_ranger.RData")
# Random Forest with ranger!
# model 1: num.trees = 1000; mtry = 100 > sqrt(p) --- 6h (num.threads = 6)
print(system.time(
rfOne <- ranger(dependent.variable.name = "transactionRevenue",
data = rangerXTrain,
num.trees = 1000,
mtry = 100, # defaults to sqrt(p)
importance = "impurity",
num.threads = 6)
))
# model 2: num.trees = 1000; mtry = 200 > sqrt(p) --- ??? (num.threads = 6)
print(system.time(
rfTwo <- ranger(dependent.variable.name = "transactionRevenue",
data = rangerXTrain,
num.trees = 1000,
mtry = 200, # > sqrt(p)
importance = "impurity",
num.threads = 6)
))
gc()
# vediamo un po' come fitta
pdf(file="rcp.4c.fitComp.pdf")
rfFitOne = predict(rfOne, rangerXTrain)
rfFitOne = rfFitOne$predictions
rfFitTwo = predict(rfTwo, rangerXTrain)
rfFitTwo = rfFitTwo$predictions
plot(density(rangerXTrain[, "transactionRevenue"], from = 0, bw = 1))
lines(density(rfFitOne, from = 0, bw = 1), col = "red")
lines(density(rfFitTwo, from = 0, bw = 1), col = "blue")
dev.off()
gc()
# Submitting!
# RF One
rfPredsOne = predict(rfOne, rangerXTest)
rfPredsOne = rfPredsOne$predictions
predSubOne = cbind(testIds, rfPredsOne) %>% as.tibble() %>%
group_by(fullVisitorId) %>%
summarise(PredictedLogRevenue = log1p(sum(expm1(rfPredsOne))))
readr::write_csv(predSub, path = paste0(getwd(), "/sub_rf_mtry100.csv"))
# RF Two
rfPredsTwo = predict(rfTwo, rangerXTest)
rfPredsTwo = rfPredsTwo$predictions
predSubTwo = cbind(testIds, rfPredsTwo) %>% as.tibble() %>%
group_by(fullVisitorId) %>%
summarise(PredictedLogRevenue = log1p(sum(expm1(rfPredsTwo))))
readr::write_csv(predSub, path = paste0(getwd(), "/sub_rf_mtry200.csv"))
|
014741be90556917c5de40265ec47af08eb6062f
|
2ef6132cab8f2ece5e522c9f26c54cac3410673f
|
/server.R
|
d2a9a8f7e2c75c668c8b2320be46bb5d8a7b7a77
|
[] |
no_license
|
Nilesh1978/Developing-Data-Products
|
4c0705824eba340c1791d3d76577c9b610b0c417
|
fea955d3207da71ecca9a6db9b551e53eb4f5036
|
refs/heads/master
| 2020-12-25T05:06:48.977438
| 2015-07-26T19:04:19
| 2015-07-26T19:04:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 686
|
r
|
server.R
|
# Developing Data Products Project
library(shiny)
server = function(input, output, session) ({
output$gimg <- renderUI({
if (input$gender== "male")
{
img(src = "male.png", height = (as.integer(input$height)*10/7), width = (as.integer(input$weight)*10/7))
}
else
{
img(src = "female.png", height = as.integer(input$height), width = as.integer(input$weight))
}
})
x <- reactive({round(as.numeric(input$weight/(input$height/100)^2),0)})
output$bmi <- renderText({x()})
output$text3 <- renderText({
str1 <- paste("Hi")
str2 <- paste(input$name)
str3 <- paste("Your BMI is")
input$goButton
HTML(isolate(paste(str1, str2,str3)))
})
})
|
3285c5ccfd3662200e024fd3fe5fba14531516f0
|
32835c0a1fc6b04bdead155d22a3c3ec3f43a126
|
/datasets/R/plotsim_aggupset.R
|
61d114ab50eeb6e90961eade9f14da32f936c031
|
[
"MIT",
"CC-BY-NC-ND-4.0"
] |
permissive
|
pkimes/benchmark-fdr
|
a95784d38d273f05c593fa8037af478dd0082073
|
18c1e412afbbbbfa742a3b4c124c9e1611d5a4e4
|
refs/heads/master
| 2021-01-19T12:51:31.142996
| 2019-04-16T23:59:14
| 2019-04-16T23:59:14
| 100,814,197
| 15
| 7
|
MIT
| 2018-09-13T22:19:09
| 2017-08-19T18:37:27
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 8,522
|
r
|
plotsim_aggupset.R
|
#' Aggregated UpSet Plot
#'
#' Generate an UpSet plot showing the (rounded) median overlap between methods
#' across a collection of SummarizedBenchmark objects, e.g. corresponding
#' to simulation replicates.
#'
#' @param res list of SummarizedBenchmark objects to be combined in the plot.
#' @param alpha significance threshold to use for distinguishing significant
#' and non-significant tests.
#' @param supplementary logical whether plot is for supplementary materials.
#' (default = FALSE)
#' @param return_list logical whether frequency list should be returned instead
#' of upset plot. The returned list can be used to generate the upset plot
#' using `upset(fromExpression(freq_list)`. This can be useful if the user
#' wants to experiment with upset plot styles. (default = FALSE)
#' @param nintersects scalar value representing number of sets to look at.
#' Default is 40 (same as default in UpSetR package).
#'
#' @return
#' an upset plot if `return_list = FALSE` (default), else a list of frequencies that
#' can be used to generate the same upset plot.
#'
#' @details
#' Note: this can get incredibly slow if the number of methods being compared is
#' large since the possible number of overlaps grows exponentially. Anecdotally,
#' in simulations comparing 9 methods (+ truth) with 20,000 tests takes approximately
#' 15 to 20 seconds for 20 replications, and 40 to 45 seconds with 100 replications.
#'
#' @import dplyr magrittr
#' @author Patrick Kimes
aggupset <- function(res, alpha, supplementary = FALSE, return_list = FALSE,
nintersects = 40) {
## find significant hits at alpha cutoff for all replicates
hits_tabs <- lapply(res, sb2hits, a = alpha, s = supplementary)
## replace NAs with 0s (not called significant)
fails <- lapply(hits_tabs, sapply, function(x) { all(is.na(x)) })
hits_tabs <- lapply(hits_tabs, function(x) { x[is.na(x)] <- 0; x })
## count up frequencies in each intersection
n_cols <- unique(sapply(hits_tabs, ncol))
if (length(n_cols) > 1) {
stop("not all SummarizedBenchmarks have the same set of methods")
}
freq_tabs <- lapply(hits_tabs, hits2freq, nm = n_cols)
## convert anything that failed completely to NAs
freq_tabs <- mapply(function(x, y) {
if (!any(x)) { return(y) }
failid <- make.names(names(x))[x]
failid <- match(failid, names(y))
y$freq[rowSums(y[, failid]) > 0] <- NA
y
}, x = fails, y = freq_tabs, SIMPLIFY = FALSE)
## merge all freqs into single table - first rename 'freq' columns to 'freq.i' (i = 1..100)
method_names <- setdiff(names(freq_tabs[[1]]), "freq")
freq_tab <- mapply(function(itab, idx) { dplyr::rename(itab, !!(paste0("freq.", idx)) := freq) },
itab = freq_tabs, idx = 1:length(freq_tabs),
SIMPLIFY = FALSE) %>%
purrr::reduce(dplyr::left_join, by = method_names)
## summarize across 100 replications of each setting
freq_tab <- freq_tab %>%
gather(repl, cnt, starts_with("freq")) %>%
group_by_at(method_names) %>%
summarize(freq_mean = round(mean(cnt, na.rm = TRUE))) %>%
ungroup() %>%
mutate(freq_mean = ifelse(is.nan(freq_mean), 0, freq_mean))
## convert binary design matrix to UpSetR format (method names separated by "&")
freq_tab <- freq_tab %>%
unite("design", method_names, sep = "&", remove = FALSE) %>%
gather(method, val, -design, -freq_mean) %>%
mutate(val = ifelse(val, method, "")) %>%
spread(method, val) %>%
select(-design) %>%
unite("setname", method_names, sep = "&") %>%
mutate(setname = setname %>% gsub("&{2,}", "&", .) %>%
gsub("^&", "", .) %>%
gsub("&$", "", .))
## convert to vector to pass to UpSetR package
freq_list <- freq_tab$freq_mean
names(freq_list) <- freq_tab$setname
## return frequency list if requested
if (return_list) {
return(freq_list)
}
## draw upset plot if frequency list not returned
upset(fromExpression(freq_list),
nsets = n_cols,
nintersects = nintersects,
mb.ratio = c(0.55, 0.45),
order.by = "freq",
decreasing = TRUE,
set.metadata = list(data = data.frame(sets = method_names,
isTruth = grepl("truth", method_names)),
plots = list(
list(type = "matrix_rows",
column = "isTruth",
colors = c("TRUE" = "blue", "FALSE" = "gray"),
alpha = 0.2))))
}
#' Helper to Parse Significant Hits for Specified Alpha
#'
#' Determines which tests are significant for each method based
#' on a specified alpha threshold and returns as a binary data.frame
#' for easier downstream parsing.
#'
#' @param x SummarizedBenchmark w/ qvalue assay.
#' @param a alpha cutoff.
#' @param s logical whether for supplementary materials or not.
#'
#' @return
#' data.frame of 0/1s; rows are test, columns are methods.
#'
#' @import dplyr magrittr
#' @author Patrick Kimes
sb2hits <- function(x, a, s) {
## make quick table of significant tests w/ groundTruth
ht <- as_tibble(cbind((assay(x, "qvalue") < a) + 0,
truth = rowData(x)$qvalue))
## keep only IHW matching "alpha" parameter
ihw_keep <- paste0("ihw-a", sprintf("%02i", 100 * a ))
if (ihw_keep %in% names(ht)) {
## note - using mutate instead of rename so next 'select' call to drop
## extra "ihw-*" columns doesn't throw an error if correct alpha was only alpha
ht <- dplyr::mutate_(ht, ihw = paste0("`", ihw_keep, "`"))
}
ht <- dplyr::select(ht, -dplyr::contains("ihw-"))
## if not plotting for supplementary materials, remove BL w/ multiple DoF
if (!s) {
suppressWarnings({
ht <- ht %>%
dplyr::select(-one_of("bl-df02", "bl-df04", "bl-df05")) %>%
dplyr::rename(bl = `bl-df03`)
})
}
suppressWarnings({
ht <- dplyr::select(ht, -one_of("unadjusted"))
})
as.data.frame(ht)
}
#' Helper to Count Intersection Frequencies Across Methods
#'
#' Counts overlaps/intersections between methods based on the
#' binary data.frame generated using the `sb2hits()` function.
#' This function is just a wrapper to the `Counter()` function
#' in the `UpSetR` package.
#'
#' @param x data.frame returned by sb2hits
#' @param nm integer number of methods in comparison
#'
#' @return
#' tibble with one (binary) column per method, and a `freq` column, with
#' each row corresponding to a single overlap of methods - methods
#' contained in the overlap are set to 1, those not in the overlap
#' are set to 0 - with the `freq` column containing the number of test
#' statistics in the overlap.
#'
#' @import dplyr magrittr
#' @importFrom UpSetR Counter
#' @author Patrick Kimes
hits2freq <- function(x, nm) {
UpSetR:::Counter(x, nm, 1, names(x), nintersections = 2^nm,
mbar_color = "gray23",
order_mat = "degree", aggregate = "degree",
cut = NULL, empty_intersects = TRUE,
decrease = TRUE) %>%
as_tibble() %>%
select(-x, -color)
}
#' Number of Methods w/ Rejections
#'
#' Helper function to return the number of methods with rejections at
#' a particular alpha level (this helps us determine whether or not to plot the
#' aggregated upset plot - if there aren't at least 2 methods it will throw an
#' error, which is a problem for the null simulations).
#'
#' @param res standardized metric data.table generated using
#' standardize_results.
#' @param alpha alpha cutoff
#' @param filterSet which methods to exclude from consideration
#'
#' @author Keegan Korthauer
numberMethodsReject <- function(res, alphacutoff, filterSet) {
res <- res %>%
filter(is.na(param.alpha) | (param.alpha == alphacutoff)) %>%
filter(!(blabel %in% filterSet)) %>%
filter(alpha == alphacutoff) %>%
filter(performanceMetric == "rejections") %>%
select(blabel, performanceMetric, value) %>%
group_by(blabel) %>%
summarize(mean_value = round(mean(value))) %>%
filter(mean_value > 0)
return(nrow(res))
}
|
4e26042e0802d6d5fa95ab01fd03ab656050132e
|
802681f4028c1645678c9de8f24e5cb29e78e2a5
|
/R/EpiMutations.R
|
07775f0d50141e1cf2b4ef8975089ec666ad2c60
|
[
"MIT"
] |
permissive
|
das2000sidd/EpiMutations
|
936efe5670a5daf55aead548acdc8f1c0375d151
|
2fa076cf0cf9511dab0ec8ef02377f44d3d15ac1
|
refs/heads/master
| 2023-02-17T08:29:46.238154
| 2020-11-26T14:52:31
| 2020-11-26T14:52:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,019
|
r
|
EpiMutations.R
|
#' @export
EpiMutations<-function(diseases, num.cpgs = 10, pValue.cutoff = 0.01,
cutoff =0.1, outlier.score = 0.5,
nsamp = "deterministic",method = "manova")
{
#Correct parameter verification
if(is.null(diseases))
{
stop("'Diseases' parameter must be introduced")
}
#Diseases length(diseases) ==1
num.sample.diseases<-dim(diseases)[2]
if (num.sample.diseases !=1)
{
stop("'diseases' parameter sample number must be 1")
}
if (length(method)!=1)
{
stop(" Only one 'method' can be chosen at a time")
}
#dataset type (GenomicRatioSet or ExpressionSet)
type <- charmatch(class(diseases), c("GenomicRatioSet", "ExpressionSet"))
if(is.na(type))
{
stop("The data type must be 'GenomicRatioSet' or 'ExpressionSet'")
}
#Select one of the available methods
method.selected<-charmatch(method,c("manova","mlm","iso.forest","Mahdist.MCD"))
if(is.na(method.selected))
{
stop("The selected method must be 'manova', 'mlm','iso.forest','Mahdist.MCD'")
}
#Combine control panel with the disease sample
#GenomicRatioSets
if(type == 1)
{
set <- minfi::combineArrays(grs.control.panel, diseases,
outType = c("IlluminaHumanMethylation450k",
"IlluminaHumanMethylationEPIC",
"IlluminaHumanMethylation27k"),
verbose = TRUE)
}
#ExpressionSet
else if (type == 2)
{
set <- a4Base::combineTwoExpressionSet(es.control.panel,
diseases)
exprs.mat<-Biobase::exprs(set)
fdata<-Biobase::fData(set)
}
#Obtain Phenotypic data
pdata <- Biobase::pData(set)
sample<-colnames(diseases)
#create a variable 0,0,0,0...0,0,1
pdata$samp <- pdata$sampleID == sample
#Create the model matrix
model <- stats::model.matrix(~ samp, pdata)
#Bumphunter function from bumphunter package
#GenomicRatioSet
if(type == 1)
{
bumps <- bumphunter::bumphunter(set, model, cutoff = 0.1)$table
}
#ExpressionSet
else if (type == 2)
{
bumps <- bumphunter::bumphunter(object = exprs.mat,
design = model,
pos = fdata$RANGE_START,
chr = fdata$CHR,
cutoff = 0.1)$table
}
#Outlier identification using multiple methods
if(!is.na(bumps[1,1]))
{
bumps$sample <- sample
#delect bumps with at least selected "num.cpgs"
bumps <- subset(bumps, L >= num.cpgs)
#Find beta value matrix for each bump
#Outlier identification using multiple statistical approach
for( i in 1:dim(bumps)[1])
{
#Find beta value matrix for each bump
beta.values<-BumpBetaMatrix(bumps[i,],set)
#manova
if(method == "manova")
{
bumps$manova[i]<-EpiMANOVA(beta.values,model)
}
#mlm
if(method == "mlm")
{
bumps$mlm[i]<-epiMLM(beta.values,model)
}
if(method == "iso.forest")
{
bumps$iso[i]<-epiIsolationForest(beta.values, sample)
}
if(method == "Mahdist.MCD")
{
bumps$MahMCD[i]<-epiMahdist.MCD(beta.values, nsamp, sample)
}
}
#Subset bumps using p value
if(method == "manova")
{
outliers.epi.mutations <- subset(bumps, manova < pValue.cutoff)
}
if(method == "mlm")
{
outliers.epi.mutations <- subset(bumps, mlm < pValue.cutoff)
}
if(method == "iso.forest")
{
outliers.epi.mutations <- subset(bumps, iso > outlier.score)
}
if(method == "Mahdist.MCD")
{
outliers.epi.mutations <- subset(bumps, MahMCD == TRUE)
}
}
outliers.epi.mutations<-tibble::as_tibble(outliers.epi.mutations)
return(outliers.epi.mutations)
}
|
50332d95a91486da7f3507a0de8e0547d63f4a20
|
bd1912018a7de9cb7509f54224c4dc206417127d
|
/R/saturation.R
|
096f27c6df347226a6f08bae80ba128787efaa69
|
[] |
no_license
|
cran/secrdesign
|
2eba2706579c539d47c5f29ea1d8fffc0562b278
|
5076a1d0f4aa141312135aef2c6f0d36a681851c
|
refs/heads/master
| 2023-03-16T21:18:52.407592
| 2023-03-10T18:30:02
| 2023-03-10T18:30:02
| 19,372,578
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,438
|
r
|
saturation.R
|
###############################################################################
## package 'secrdesign'
## saturation.R
## 2017-11-02
###############################################################################
saturation <- function (traps, mask, detectpar, detectfn =
c('HHN', 'HHR', 'HEX', 'HAN', 'HCG', 'HN', 'HR', 'EX'),
D, plt = FALSE, add = FALSE, ...) {
if (!(detector(traps)[1] %in% c('multi','proximity', 'capped', 'count')))
stop ("only for 'multi','proximity', 'capped' or 'count' detectors")
if (is.character(detectfn))
detectfn <- match.arg(detectfn)
detectfn <- secr:::valid.detectfn(detectfn, valid = c(0,1,2,14:19))
dfc <- dfcast (detectfn, detectpar) # transforms detectfn 0 to 14, 1 to 15, 2 to 16
detectfn <- dfc$detectfn
detectpar <- dfc$detectpar
cellarea <- attr(mask, 'area')
dk <- edist(traps, mask) ## K x M
if (detectfn == 14)
h <- exp(-dk^2/2/detectpar$sigma^2) ## K x M
else if (detectfn == 15)
h <- 1 - exp(-(dk/detectpar$sigma)^-detectpar$z)
else if (detectfn == 16)
h <- exp(-dk^2/2/detectpar$sigma^2)
else if (detectfn == 17)
h = exp(- (dk-detectpar$w)^2 / 2 / detectpar$sigma^2)
else if (detectfn == 18)
h = pgamma(dk, shape = detectpar$z, scale = detectpar$sigma/detectpar$z,
lower.tail = FALSE, log.p = FALSE)
h <- detectpar$lambda0 * h
if (detector(traps)[1] == "multi") {
Hi <- apply(h, 2, sum) ## M hazard of detn | x
hmult <- (1 - exp(-Hi)) / Hi
pkx <- sweep(h, MARGIN = 2, STATS = hmult, FUN = "*") ## K x M Pr caught @ k | x
hkx <- -log(1-pkx)
}
else {
hkx <- h
}
if (length(D) > 1)
hkx <- sweep(hkx, MARGIN = 2, STATS = D, FUN = "*") ## K x M
else
hkx <- hkx * D
Hk <- apply(hkx, 1, sum) * cellarea ## K
p <- 1-exp(-Hk)
out <- list(bydetector = p, mean = mean(p)) # lambda0bias = mean(p)/mean(Hk)-1
if (plt) {
covariates(traps) <- data.frame(saturation = 1-exp(-Hk))
plot(as.mask(traps), covariate = 'saturation', dots = FALSE, add = add, ...)
plot(traps, add = TRUE)
invisible(out)
}
else {
out
}
}
|
81a81a6207215209652748e44821760744269489
|
48f7530f80150a8b9c71434cfdde178d57ad0590
|
/Ch3_LinearRegression/NonLinear.R
|
6df6bfbeca6e9f5d80d05dfd9a9110b54b2383d4
|
[] |
no_license
|
ssh352/Statistical_Learning
|
e3f9650bfacfd0c477a6d1cf74b809ce7307a293
|
5d8c4674ae39728a64ab2374d7b9ea5d94b6bd1c
|
refs/heads/master
| 2021-09-15T11:26:15.676956
| 2018-05-31T11:58:07
| 2018-05-31T11:58:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 724
|
r
|
NonLinear.R
|
library(MASS) # needed for Boston data
attach(Boston)
# x^2 in R must be wrapped using I(), ==> I(X^2)
lm.fit1 = lm(medv ~ lstat + I(lstat^2))
# Investigate the quadratic term
summary(lm.fit1)
# low p-value suggests quadratic term improves model
lm.fit = lm(medv ~ lstat)
anova(lm.fit, lm.fit1)
# F-stat = 135 and low p-value suggests quadratic model is far superior
par(mfrow = c(2, 2))
plot(lm.fit1)
# Residuals display no discernible pattern
# Polynomials using poly()
lm.fit5 = lm(medv ~ poly(lstat, 5))
# Shorter than including each order using I()
summary(lm.fit5)
# Including each term up to the fifth order improves the model
# Log transform
summary(lm(medv ~ log(rm), data = Boston))
|
fbd25eea6d3980f57f892174a9d8e40f7eccbd41
|
b56b5285c96be24eca2705a44c923ad73dbbb96d
|
/create_randomdata_R.R
|
4f3b52bb6e7d8660caa38f76d0f80b2f66a8900b
|
[] |
no_license
|
Juliecho0101/WaterWaste_Project2019
|
febf0343c3c0b80d8f10e6b36b28f23d33fb7918
|
cfad781992e7c1fb619297e2ad7d0a16db1e68a9
|
refs/heads/master
| 2020-08-01T00:29:45.871127
| 2019-11-11T06:03:48
| 2019-11-11T06:03:48
| 210,800,117
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,177
|
r
|
create_randomdata_R.R
|
# 일정한 시간간격 생성하여 컬럼별 난수데이터 생성하기
data1 <- as.Date("2019-08-01")
#z1 <- seq.POSIXt(as.POSIXct(data1), as.POSIXct(data1+7), by = "1 min")
df1 <- data.frame(GET_DATE =seq.POSIXt(as.POSIXct(data1), as.POSIXct(data1+7), by = "1 min"),
VC_VAL = rtruncnorm(n=10081, a=100, b=300, mean=220, sd=20),
AC_VAL = rtruncnorm(n=10081, a=0, b=1, mean=0.6, sd=1),
TPOW = rtruncnorm(n=10081, a=50, b=100, mean=80, sd=10),
PAVR = rtruncnorm(n=10081, a=5, b=12, mean=8, sd=1),
EC_VAL_I = rtruncnorm(n=10081, a=0.01, b=50, mean=20, sd=10),
EC_VAL_O = rtruncnorm(n=10081, a=0.01, b=50, mean=20, sd=10),
EC_T_I = rtruncnorm(n=10081, a=0.01, b=50, mean=15, sd=10),
EC_T_O = rtruncnorm(n=10081, a=0.01, b=50, mean=15, sd=10),
PD2_VALUE = rtruncnorm(n=10081, a=-5, b=5, mean=0, sd=1))
# 데이터 확인
summary(df1)
str(df1)
head(df1)
# 현재 datetime인 GET_DATE > YYYYmmddhhMMss
library(dplyr)
df1 <- df1 %>% mutate(DATE = format(GET_DATE,"%Y%m%d%H%M%S"))
write.xlsx(df1,"DATA_ECPH.xlsx")
|
f77dc590b26454be7391a9a6a9a810a9e20ce3c9
|
c6007faac6dca68f2e1a450af02a267b01d9349f
|
/man/add_headers.Rd
|
a567fcc28092a94e5046d6bff30282a5399380b2
|
[] |
no_license
|
vbonhomme/Momosaics
|
aeffe332dc4c6510b1c10fc0caf07a03f01e72a3
|
595d62186092fce1d5daabc017f81aabb50dd25f
|
refs/heads/master
| 2021-05-05T13:50:36.253260
| 2018-01-23T12:25:21
| 2018-01-23T12:25:21
| 118,444,235
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 723
|
rd
|
add_headers.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_layers.R
\name{add_headers}
\alias{add_headers}
\title{Add headers to groups on a mosaic plot}
\usage{
add_headers(df, column = "f", cex, ...)
}
\arguments{
\item{df}{\code{mosaic_df}}
\item{column}{(\code{f} by default) name or position of a column in \code{df} to feed \code{\link[=lines]{lines()}}.
If a non-character column is provided, its \code{names}.}
\item{cex}{\code{numeric} to feed \code{\link[=text]{text()}}, if missing tries
to obtain something adequate}
\item{...}{additional arguments to feed \code{\link[=text]{text()}}}
}
\value{
the \code{mosaic_df}, invisibly
}
\description{
Add headers to groups on a mosaic plot
}
|
b7d99d28b74a9c4d153a61967c94ba61096dc207
|
9cb318be063dec190b8045464beb40ab2dbef9cf
|
/ui.R
|
b6bb10611a48ef2789fc73a36000bbb96401f8d8
|
[] |
no_license
|
apeco2020/ShinyApplicationAndReproduciblePitch
|
ecd7b1c847c8e47842c0e15c5751acae5e0d8801
|
19c7a3dcebcb96aa4c75d57b46803b5ce9e4b934
|
refs/heads/master
| 2021-04-09T20:35:54.593614
| 2018-09-17T17:35:25
| 2018-09-17T17:35:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 992
|
r
|
ui.R
|
# ui.R
# Richard A. Lent
# Monday, September 17, 2018 at 11:05 AM
library(shiny)
shinyUI(fluidPage(
titlePanel("Draw a Graph with the Iris Data"),
sidebarLayout(
sidebarPanel(
isolate(
selectInput("variable", "Select variable",
choices = c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width"))
),
isolate(
selectInput("graph", "Select graph type", choices = c("Histogram", "Box Plot", "Scatter Plot"))
),
HTML("<center>"),
actionButton("go", "Draw graph", style="color: black;
background-color: cyan; border-color: black; margin-top: 0.5em;"),
HTML("<br>"),
actionButton("help", "Documentation for this app", style="color: black;
background-color: cyan; border-color: black; margin-top: 0.5em;"),
HTML("</center>")
),
mainPanel(
plotOutput("thePlot")
)
) # sidebarLayout
) # fluidPage
) # shinyUI
|
946846b11e7b782966f3901a2d8c41efa59c03e5
|
385a0d901a0cfe86912da704d7d907e7e8f38b82
|
/zV_Past/Past0/z_Backup_HM/003HedgeMaster/Final0_OutputPriceMatrix.R
|
5365749edad9668f4a8e295302baff40d3f3d87c
|
[] |
no_license
|
junyitt/HM2016
|
2bfe1244d7c8bd54d7289ba30616058e409e3cfc
|
750f3784de35f05da5b584b7f1704903ab294646
|
refs/heads/master
| 2020-04-11T06:28:34.904849
| 2016-11-12T20:05:38
| 2016-11-12T20:05:38
| 68,070,936
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,017
|
r
|
Final0_OutputPriceMatrix.R
|
library(dplyr)
###INPUT
source("~/003HedgeMaster/0f_pricing.R")
source("~/003HedgeMaster/0meta_pricing.R")
#########################
##Price of underlying - 8
#########################
#########
# assetn <- names(mu)
seed <- c(5,6,8,91,11)
S0 <- c(3.3,4,1000,240,2000)
assetlist <- list()
# for(i in 1:length(seedn)){
# assetlist[[i]] <- simprice(mu = mu[,assetn[i]], sigma = sigma[,assetn[i]], S0 = S0n[i], seed = seedn[i])
# }
#
assetlist <- lapply(X = 1:5, function(q){
simprice(mu = mu[,q], sigma = sigma[,q], S0 = S0[q], seed = seed[q])
})
subpricelist <- lapply(X = assetlist, FUN = subprice)
underdf <- do.call(cbind, subpricelist)
underdf[,c(3:5)] <- round(underdf[,c(3:5)],2)
underdf[,1:2] <- round(underdf[,1:2],4)
colnames(underdf) <- c("USD", "EUR", "GOL", "CRU", "PAL")
underdf <- t(underdf)
colnames(underdf) <- 0:10
write.csv(underdf, "~/003HedgeMaster/UNDERLYINGPRICE00.csv")
##################
##BOND PRICE - 4
##################
#read all the bond code - META procedure
BondCode_v <- as.character(read.table("~/003HedgeMaster/meta3_AllBondCode.txt", header = F)[,1])
#substr out the necessary information
startdate_v <- as.numeric(sapply(X = BondCode_v, FUN = function(ss){
substr(x = ss, start = 3, stop = 4)
}))
enddate_v <- as.numeric(sapply(X = BondCode_v, FUN = function(ss){
substr(x = ss, start = 5, stop = 6)
}))
#create vector for interest
loopindex <- 1:length(startdate_v)
underlying_v <- rep("MYR", length(startdate_v))
int_v <- sapply(X = loopindex, FUN = function(y){
intfind_f_1(start_1 = startdate_v[y],end_1 = enddate_v[y], under_1 = underlying_v[y])
})
int_v <- exp(int_v)-1
#finally create vector for bond price
bondprice_v <- sapply(X = loopindex, FUN = function(y){
bondcal_f_1(i = int_v[y], n = enddate_v[y]-startdate_v[y], Fr = 5, C = 100)
})
#round off
bondprice_v <- round(bondprice_v,2)
#cbind
bondprice_final_df <- cbind(code = BondCode_v, price = bondprice_v)
#output
#write.csv(bondprice_final_df, "~/003HedgeMaster/BONDPRICE_02.csv", row.names = F)
########################################
##FORWARD THEORETICAL PRICE - TOO MANY! 8 underlying* each year has n-k prices (k is the current period) - about 8*50 = 400
########################################
#read all the forward code - META procedure
FwdCode_v <- read.table("~/003HedgeMaster/meta3_AllForwardCode.txt", header = F)
#substr out the necessary information
startdate_v <- as.numeric(sapply(X = FwdCode_v, FUN = function(ss){
substr(x = ss, start = 3, stop = 4)
}))
enddate_v <- as.numeric(sapply(X = FwdCode_v, FUN = function(ss){
substr(x = ss, start = 5, stop = 6)
}))
underlying_v <- sapply(X = FwdCode_v, FUN = function(ss){
substr(x = ss, start = 7, stop = 9)
})
loopindex <- 1:length(underlying_v)
#create the relevant vector for dividend and interest
div_v <- sapply(X = loopindex, FUN = function(y){
divfind_f_1(start_1 = startdate_v[y],end_1 = enddate_v[y], under_1 = underlying_v[y])
})
int_v <- sapply(X = loopindex, FUN = function(y){
intfind_f_1(start_1 = startdate_v[y],end_1 = enddate_v[y], under_1 = underlying_v[y])
})
s0_v <- sapply(X = loopindex, FUN = function(y){
s0find_f_1(start_1 = startdate_v[y], under_1 = underlying_v[y])
})
#finally, create the relevant vector for forward price
fwdprice_v <- sapply(X = loopindex, FUN = function(y){
fwdprice_f_1(s0_1 = s0_v[y], int_1 = int_v[y], div_1 = div_v[y], startdate_v[y], enddate_v[y])
})
#ROUND UP
fwdprice_v[underlying_v %in% c("EUR", "USD")] <- round(fwdprice_v[underlying_v %in% c("EUR", "USD")],4)
fwdprice_v[underlying_v %in% c("GOL","CRU","PAL")] <- round(fwdprice_v[underlying_v %in% c("GOL","CRU","PAL")],2)
#cbind
fwdprice_final_df <- cbind(code = FwdCode_v, price = fwdprice_v)
colnames(fwdprice_final_df) <- c("code", "price")
fwdprice_final_df <- as.matrix(fwdprice_final_df)
#output
#write.csv(fwdprice_final_df, file = "~/003HedgeMaster/FWDPRICE_02.csv", row.names = F)
##############################
##NEW OPTIONS PRICING CODE- 8*30 + 8*30 = 480
##############################
#read all the option code - META procedure
OptionCode_v <- as.character(read.table("~/003HedgeMaster/meta3_CallOptionCode.txt", header = F)[,1])
PutOptionCode_v <- as.character(read.table("~/003HedgeMaster/meta3_PutOptionCode.txt", header = F)[,1])
#substr out the necessary information
startdate_v <- as.numeric(sapply(X = OptionCode_v, FUN = function(ss){
substr(x = ss, start = 3, stop = 4)
}))
enddate_v <- as.numeric(sapply(X = OptionCode_v, FUN = function(ss){
substr(x = ss, start = 5, stop = 6)
}))
underlying_v <- sapply(X = OptionCode_v, FUN = function(ss){
substr(x = ss, start = 7, stop = 9)
})
strike_v <- as.numeric(sapply(X = OptionCode_v, FUN = function(ss){
substr(x = ss, start = 10, stop = nchar(ss))
}))
loopindex <- 1:length(underlying_v)
#create the relevant vector for dividend and interest and s0 and >>>"VOLATILITY"
div_v <- sapply(X = loopindex, FUN = function(y){
divfind_f_1(start_1 = startdate_v[y],end_1 = enddate_v[y], under_1 = underlying_v[y])
})
int_v <- sapply(X = loopindex, FUN = function(y){
intfind_f_1(start_1 = startdate_v[y],end_1 = enddate_v[y], under_1 = underlying_v[y])
})
s0_v <- sapply(X = loopindex, FUN = function(y){
s0find_f_1(start_1 = startdate_v[y], under_1 = underlying_v[y])
})
#**volatility
volalist <- lapply(X = assetlist, FUN = vola2)
#create the relevant vector for volatility
vola_v <- sapply(X = loopindex, FUN = function(y){
volafind_f_1(start_1 = startdate_v[y], under_1 = underlying_v[y])
})
#finally, create a vector for the call premium and put premium
callprem_v <- sapply(X = loopindex, FUN = function(y){
bscall_f_1(S = s0_v[y], K = strike_v[y], T = 1, r = int_v[y], d = div_v[y], v = vola_v[y])
})
putprem_v <- sapply(X = loopindex, FUN = function(y){
bsput_f_1(S = s0_v[y], K = strike_v[y], T = 1, r = int_v[y], d = div_v[y], v = vola_v[y])
})
#ROUND UP
callprem_v[underlying_v %in% c("EUR", "USD")] <- round(callprem_v[underlying_v %in% c("EUR", "USD")],4)
callprem_v[underlying_v %in% c("GOL","CRU","PAL")] <- round(callprem_v[underlying_v %in% c("GOL","CRU","PAL")],2)
#ROUND UP
putprem_v[underlying_v %in% c("EUR", "USD")] <- round(putprem_v[underlying_v %in% c("EUR", "USD")],4)
putprem_v[underlying_v %in% c("GOL","CRU","PAL")] <- round(putprem_v[underlying_v %in% c("GOL","CRU","PAL")],2)
#cbind
optionprice_final_df <- cbind(code = c(OptionCode_v, PutOptionCode_v), price = c(callprem_v, putprem_v))
#output
#write.csv(optionprice_final_df, file = "~/003HedgeMaster/OPTIONPRICE_02.csv", row.names = F)
allprice_final_df <- rbind(bondprice_final_df,fwdprice_final_df, optionprice_final_df)
#output
write.csv(allprice_final_df, "~/003HedgeMaster/ALLPRICE_01.csv", row.names = F)
|
0b695786321a62cd61ad4565b6a0144f183deee4
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Tentrup/ltl2aig-comp/load_full_3_comp5_REAL.unsat/load_full_3_comp5_REAL.unsat.R
|
f7047152118404a1bacadc4f15adf8dfaa2068d8
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 82
|
r
|
load_full_3_comp5_REAL.unsat.R
|
64fa8ad4bfe0f62126ed8d3c09496748 load_full_3_comp5_REAL.unsat.qdimacs 30111 101864
|
12ef403687e0956f816988697c9e994038c8415e
|
e2a5cdf2dcbd788ac7c091897b5a027a809c302a
|
/R/snowColors.R
|
c3b404f82652969e4326ebc8c937edfe6c3e0ac9
|
[] |
no_license
|
lindbrook/cholera
|
3d20a0b76f9f347d7df3eae158bc8a357639d607
|
71daf0de6bb3fbf7b5383ddd187d67e4916cdc51
|
refs/heads/master
| 2023-09-01T01:44:16.249497
| 2023-09-01T00:32:33
| 2023-09-01T00:32:33
| 67,840,885
| 138
| 13
| null | 2023-09-14T21:36:08
| 2016-09-10T00:19:31
|
R
|
UTF-8
|
R
| false
| false
| 765
|
r
|
snowColors.R
|
#' Create a set of colors for pump neighborhoods.
#'
#' Uses \code{RColorBrewer::brewer.pal()}.
#' @param vestry Logical. \code{TRUE} uses the 14 pumps in the Vestry Report. \code{FALSE} uses the original 13.
#' @return A character vector of colors.
#' @note Built with 'RColorBrewer' package.
#' @export
snowColors <- function(vestry = FALSE) {
colors.pair <- RColorBrewer::brewer.pal(10, "Paired")
colors.dark <- RColorBrewer::brewer.pal(8, "Dark2")
out <- c("dodgerblue", "gray", colors.dark[1:4], colors.pair[2],
colors.dark[5:8], "red", colors.pair[1])
if (vestry) {
out <- c(out, "darkorange")
p.count <- nrow(cholera::pumps.vestry)
} else {
p.count <- nrow(cholera::pumps)
}
names(out) <- paste0("p", seq_len(p.count))
out
}
|
1f0f7196f9f4b2431d7f70169a7c4efe15f11d4d
|
76434d63930c563cb9bab7d263df2c80da04cb6f
|
/R/mixnorm.R
|
5b3c7948fc01941489704041f7c6e2b5f54bf35c
|
[] |
no_license
|
cran/bda
|
45de77f9d513cbeea00fc34120308f1d37dd2fd0
|
b7cc310ed8ce18c2327f99647f024727e28e59dd
|
refs/heads/master
| 2023-06-22T14:56:20.682683
| 2023-06-18T21:40:09
| 2023-06-18T21:40:09
| 17,694,669
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,381
|
r
|
mixnorm.R
|
.dmnorm <- function(x,p,mean,sd){
k <- length(p)
res <- 0
for(i in 1:k){
res <- res + p[i] * dnorm(x,mean[i],sd[i])
}
res
}
dmnorm <- function(x,p,mean,sd){
if(missing(p)) p <- 1
if(missing(mean)) mean <- 0
if(missing(sd)) sd <- 1
ndim <- length(p)
if(length(mean) != ndim | length(sd) != ndim)
stop("Parameters have different lengths")
p[p<0] <- 0.00001
p <- p/sum(p)
if(any(sd<=0)) stop("Invalid standard deviation(sd)")
sapply(x,.dmnorm,p=p,mean=mean,sd=sd)
}
.pmnorm <- function(x,p,mean,sd){
k <- length(p)
res <- 0
for(i in 1:k){
res <- res + p[i] * pnorm(x,mean[i],sd[i])
}
res
}
pmnorm <- function(q,p,mean,sd){
if(missing(p)) p <- 1
if(missing(mean)) mean <- 0
if(missing(sd)) sd <- 1
ndim <- length(p)
if(length(mean) != ndim | length(sd) != ndim)
stop("Parameters have different lengths")
p[p<0] <- 0.00001
## if(any(p>1|p<0)) stop("Wrong mixing coefficients")
p <- p/sum(p)
if(any(sd<=0)) stop("Invalid standard deviation(sd)")
sapply(q,.pmnorm,p=p,mean=mean,sd=sd)
}
.rmnorm <- function(x,p){
k <- length(p)
cump <- cumsum(p)
x[which(runif(1)-cump<=0)[1]]
}
rmnorm <- function(n,p,mean,sd){
if(missing(p)) p <- 1
if(missing(mean)) mean <- 0
if(missing(sd)) sd <- 1
ndim <- length(p)
if(length(mean) != ndim | length(sd) != ndim)
stop("Parameters have different lengths")
## if(any(p>1|p<0)) stop("Wrong mixing coefficients")
p[p<0] <- 0.00001
p <- p/sum(p)
if(any(sd<=0)) stop("Invalid standard deviation(sd)")
n <- ceiling(n)
stopifnot(n>0)
tmp <- NULL
k <- length(p)
for(i in 1:k){
tmp <- cbind(tmp, rnorm(n,mean[i], sd[i]))
}
res <- apply(tmp,1,.rmnorm,p=p)
as.numeric(res)
}
qmnorm <- function(prob,p,mean,sd){
if(missing(p)) p <- 1
if(missing(mean)) mean <- 0
if(missing(sd)) sd <- 1
sele <- !is.na(prob)
if(any(prob[sele]>1|prob[sele]<0))
stop("Invalid 'prob' value(s)")
ndim <- length(p)
if(length(mean) != ndim | length(sd) != ndim)
stop("Parameters have different lengths")
if(any(p>1|p<0)) stop("Wrong mixing coefficients")
p <- p/sum(p)
if(any(sd<=0)) stop("Invalid standard deviation(sd)")
mean.pool <- sum(p*mean)
s.pool <- sqrt(sum(p^2*sd^2))
x <- seq(mean.pool-4*s.pool,mean.pool+4*s.pool,length=401L)
Fx <- sapply(x,.pmnorm,p=p,mean=mean,sd=sd)
approx(Fx,x,prob)$y
}
|
930416ef0488fb45943d2f2e83e345b5ffdc7283
|
ed7e9bd07bf58346c5b4dd4325275c3b9607f948
|
/Shiny_Project/server.R
|
3f1c240315f3374b965786a81a79254e6021286e
|
[] |
no_license
|
kristinteves/GooglePlay2018
|
44ca22e9ee06c8bda1c81ddcf40d94f1276af169
|
c2b930300e660327d0d5d22466ceb75371949b7f
|
refs/heads/master
| 2022-12-26T19:43:11.063708
| 2020-10-08T06:16:51
| 2020-10-08T06:16:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,909
|
r
|
server.R
|
library(DT)
library(shiny)
library(googleVis)
library(tidyverse)
library(dplyr)
library(ggplot2)
library(ggcorrplot)
shinyServer(function(input,output){
# Background Tab
output$totalapps <- renderInfoBox({
totalapps = length(df$App)
totalappstate = infoBox("Total Apps", totalapps, icon = icon("google-play"), color = "green")
})
output$uniquecat <- renderInfoBox({
unique_cat = length(unique(df$Category))
unique_cat_state = infoBox("Number of Categories", unique_cat, icon = icon("layer-group"),
color = "green")
})
output$uniquegenre <- renderInfoBox({
unique_genre = length(unique(df$Genres))
unique_genre_state = infoBox("Number of Genres", unique_genre,
icon = icon("layer-group"), color = "green")
})
output$picture <- renderUI({
img2 = "https://www.derryjournal.com/images-i.jpimedia.uk/imagefetch/https://jpgreatcontent.co.uk/wp-content/uploads/2020/06/Google-play-store.jpg?width=640"
tags$img(src=img2, width = 600, height = 500)
})
# Overview and Rankings
# Correlation
output$correlation <-renderPlot({
corr_info = cor(data.matrix(df[,c("Reviews", "Installs", "Price", "Rating", "Size")]), use = "complete.obs")
ggcorrplot(corr_info, hc.order = TRUE, type = "lower", lab = TRUE) +
ggtitle("Correlation Matrix") +
theme(plot.title = element_text(size = 20, face = "bold"))
})
output$ratinghist <- renderPlot({
ggplot(subset(df, !is.na(Rating)), aes(x = Rating)) + geom_histogram(binwidth = 0.1, fill = "#23AE46")+
ggtitle("App Rating Distribution") +
theme(plot.title = element_text(size = 20, face = "bold"),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"))
})
# Tabset
output$topcat <-renderPlot({
df = df %>%
select(., Category) %>%
filter(., Category %in% top_cat)
df %>%
ggplot(aes(x = fct_infreq(Category))) + geom_bar(fill="#23AE46") +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
xlab(label = "Categories") +
ylab(label = "Number of Apps")
})
output$bottomcat <-renderPlot({
df = df %>%
select(., Category) %>%
filter(., Category %in% bottom_cat)
df %>%
ggplot(aes(x = fct_infreq(Category))) + geom_bar(fill="#23AE46") +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
plot.title = element_text(hjust = 0.5, size = 20)) +
xlab(label = "Categories") +
ylab(label = "Number of Apps")
})
output$topinstalls <-renderPlot({
df = df %>%
group_by(., Category) %>%
mutate(., Installs = Installs/1000000) %>%
filter(., Category %in% top_ins) %>%
summarise(., Installs = sum(Installs))
df %>%
ggplot(aes(x = reorder(Category, -Installs), y = Installs)) + geom_bar(fill="#23AE46", stat= "identity") +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
plot.title = element_text(hjust = 0.5, size = 20)) +
xlab(label = "Categories") +
ylab(label = "Number of Installs in Millions")
})
output$bottominstalls <-renderPlot({
df = df %>%
group_by(., Category) %>%
mutate(., Installs = Installs/1000000) %>%
filter(., Category %in% bot_ins) %>%
summarise(., Installs = sum(Installs))
df %>%
ggplot(aes(x = reorder(Category, -Installs), y = Installs)) + geom_bar(fill="#23AE46", stat= "identity") +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
plot.title = element_text(hjust = 0.5, size = 20)) +
xlab(label = "Categories") +
ylab(label = "Number of Installs in Millions")
})
output$topboth <- renderTable({
inner_join(top_cat_df,top_ins_df,by="Category")
})
# Category Tab
# Tabset
output$categoryinfo <- renderPlot({
df = df %>%
select(., Category, Type) %>%
filter(., Category %in% input$category)
df %>% ggplot(aes(x = fct_infreq(Category))) +
geom_bar(aes(fill = Type)) +
xlab(label = "Categories") +
ylab(label = "Number of Apps") +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"))
})
output$categoryrating <- renderPlot({
df = df %>%
select(., Category, Rating) %>%
filter(., Category %in% input$category)
df %>% ggplot(aes(x = fct_infreq(Category), y = Rating)) +
geom_boxplot() +
xlab(label = "Categories") +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
stat_summary(fun = "mean", color = "red", geom = "point", size = 3, na.rm = TRUE)
})
output$categorycontent <- renderPlot({
df = df %>%
select(., Category, Content.Rating) %>%
filter(., Category %in% input$category)
df %>% ggplot(aes(x = fct_infreq(Category))) +
geom_bar(aes(fill = Content.Rating), position = "dodge") +
xlab(label = "Categories") +
ylab(label = "Number of Apps") +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"))
})
output$categoryinstalls <- renderPlot({
df = df %>%
select(., Category, Installs) %>%
mutate(., Installs = Installs/1000000) %>%
filter(., Category %in% input$category)
df %>% ggplot(aes(x = fct_infreq(Category), y = Installs)) +
geom_bar(stat="identity", fill="#23AE46") +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
xlab(label = "Categories") +
ylab(label = "Installations in Millions")
})
output$genres <- renderPlot({
df = df %>% group_by(., Category, Genres) %>%
filter(., Category %in% input$category) %>%
summarise(., Genres = length(unique(Genres)))
df %>% ggplot(aes(x = fct_infreq(Category), y = Genres)) +
geom_bar(stat="identity", fill="#23AE46") +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
xlab(label = "Categories") +
ylab(label = "Number of Genres")
})
output$topinstall <- renderTable({
df %>% group_by(., Category) %>%
arrange(desc(Installs)) %>%
filter(., Category %in% input$category, Installs == max(Installs))
})
output$toprated <- renderTable({
df %>% group_by(., Category) %>%
arrange(desc(Weighted_Rating)) %>%
filter(., Category %in% input$category, Weighted_Rating == max(Weighted_Rating[is.finite(Weighted_Rating)]))
})
# Info boxes
output$averate <- renderInfoBox({
filter_rate = df[df$Category %in% input$category,]
ave_rate = round(mean(filter_rate$Rating, na.rm = TRUE),2)
ave_rate_state = infoBox("Average Rating", ave_rate, icon = icon("calculator"), color = "green")
})
output$totcatinstalls <- renderInfoBox({
filter_install = df[df$Category %in% input$category,]
totalcat_installs = round(sum(filter_install$Installs)/1000000000, 2)
totalcat_installs_state = infoBox("Installs in Billions", totalcat_installs,
icon = icon("download"), color = "green")
})
output$medprice <- renderInfoBox({
filter_rate = df[df$Category %in% input$category,]
paid = filter_rate[filter_rate$Price > 0,]
med_price = median(paid$Price, na.rm = TRUE)
med_price_state = infoBox("Median Paid Price", med_price, icon = icon("calculator"), color = "green")
})
# Data Tab
output$table <- DT::renderDT(
df, filter = list(position = "top")
)
# end of shinyServer
})
|
27ff80a7faff06ad615975caf9f30f827ef122a4
|
b313ba13c1156ccb088c4de6327a794117adc4cc
|
/December2019/IBSsimvsobs
|
7846f9efb9ac389bf6a8ed06da505098aac406c0
|
[] |
no_license
|
kbkubow/DaphniaPulex20162017Sequencing
|
50c921f3c3e8f077d49ccb3417daa76fb4dde698
|
c662b0900cc87a64ec43e765246c750be0830f77
|
refs/heads/master
| 2021-08-20T10:09:59.483110
| 2021-06-10T20:04:31
| 2021-06-10T20:04:31
| 182,109,481
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,739
|
IBSsimvsobs
|
#!/usr/bin/env Rscript
### libraries
library(gdsfmt)
library(SNPRelate)
library(data.table)
library(ggplot2)
library(foreach)
library(lattice)
library(tidyr)
library(SeqArray)
library(cowplot)
### Load Observed IBS file
inputobsfiles <- list.files(path="/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSobs/", pattern="IBSindobs_")
#obs <- fread("/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSobs/IBSindobs_R.csv")
totalibsobs <- foreach(i=1:length(inputobsfiles), .combine="rbind")%do%{
f=paste("/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSobs/", inputobsfiles[i], sep="")
obs <- fread(f)
obs
}
### Load simulated IBS files
inputfiles <- list.files(path="/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSsimF100/", pattern="IBSindsim_")
totalibssim <- foreach(i=1:length(inputfiles), .combine="rbind")%do%{
f=paste("/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSsimF100/", inputfiles[i], sep="")
sim <- fread(f)
sim
}
totalibssim$totalgc <- totalibssim$dos0 + totalibssim$dos2
totalibssim$total <- totalibssim$dos0 + totalibssim$dos2 + totalibssim$dos1
totalibsobs$totalobsgc <- totalibsobs$obsaltgc + totalibsobs$obsrefgc
totalibssim.ag <- totalibssim[,list(meansimaltgc = mean(dos0, na.rm=TRUE), meansimhet=mean(dos1, na.rm=TRUE),
meansimrefgc=mean(dos2, na.rm=TRUE), meansimtotgc=mean(totalgc, na.rm=TRUE), meantotalsites=mean(total, na.rm=TRUE)), list(clone, SC, afs)]
setkey(totalibssim.ag, clone, SC)
setkey(totalibsobs, clone, SC)
mIBSsimvsobs <- merge(totalibssim.ag, totalibsobs)
mIBSsimvsobs$excesstot <- mIBSsimvsobs$totalobsgc-mIBSsimvsobs$meansimtotgc
mIBSsimvsobs$proprealtot <- (mIBSsimvsobs$totalobsgc-mIBSsimvsobs$meansimtotgc)/mIBSsimvsobs$totalobsgc
# totestgc <- (mean(mIBSsimvsobs$propinctot))*(mean(mIBSsimvsobs$meansimtotgc))
#What is influence of read depth?
sc <- fread("CloneInfoFilePulexandObtusa_withmedrd_20200207")
setkey(sc, clone, SC)
setkey(mIBSsimvsobs, clone, SC)
mIBSsimvsobsclone <- merge(mIBSsimvsobs, sc)
ggplot(data=mIBSsimvsobsclone, aes(x=medrd, y=proprealtot)) + geom_point()
ggplot(data=mIBSsimvsobsclone, aes(x=medrd, y=excesstot)) + geom_point()
ggplot(data=mIBSsimvsobsclone, aes(x=as.factor(SC), y=excesstot)) + geom_point()
totestgcmean <- (mean(mIBSsimvsobs$propinctot))*(mean(mIBSsimvsobs$meansimtotgc))
totestgcmed <- (median(mIBSsimvsobs$propinctot))*(median(mIBSsimvsobs$meansimtotgc))
temp <- unlist(strsplit(mIBSsimvsobsclone$clone, split="_"))
mat <- matrix(temp, ncol=4, byrow=TRUE)
matdat <- as.data.table(mat)
mIBSsimvsobsclone$population <- matdat$V3
mIBSsimvsobsclone$year <- matdat$V2
ggplot(data=mIBSsimvsobsclone, aes(x=as.factor(year), y=excesstot)) + geom_point()
ggplot(data=mIBSsimvsobsclone[year=="2017"], aes(x=medrd, y=excesstot)) + geom_point()
### Load in 2N mutations counts
inputobsmutfiles <- list.files(path="/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSobsmut/", pattern="IBSindobsmut_2N_")
totalibsobsmut <- foreach(i=1:length(inputobsmutfiles), .combine="rbind")%do%{
f=paste("/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSobsmut/", inputobsmutfiles[i], sep="")
obs <- fread(f)
obs
}
ggplot(data=totalibsobsmut, aes(x=as.factor(SC), y=obshet)) + geom_point()
ggplot(data=totalibsobsmut[SC!="H"], aes(x=as.factor(SC), y=obshet)) + geom_point()
sc <- fread("CloneInfoFilePulexandObtusa_withmedrd_20200207")
setkey(sc, clone, SC)
setkey(totalibsobsmut, clone, SC)
totalibsobsmutclone <- merge(totalibsobsmut, sc)
ggplot(data=totalibsobsmutclone[SC!="H" & medrd > 5], aes(x=as.factor(SC), y=obshet)) + geom_point()
ggplot(data=totalibsobsmutclone[SC!="H" & medrd > 9], aes(x=as.factor(SC), y=obshet)) + geom_point()
ggplot(data=totalibsobsmutclone[SC!="H" & medrd > 14], aes(x=as.factor(SC), y=obshet)) + geom_point()
temp <- unlist(strsplit(totalibsobsmutclone$clone, split="_"))
mat <- matrix(temp, ncol=4, byrow=TRUE)
matdat <- as.data.table(mat)
totalibsobsmutclone$population <- matdat$V3
totalibsobsmutclone$year <- matdat$V2
mIBSsimvsobsclonesub <- data.table(clone=mIBSsimvsobsclone$clone, excesstot=mIBSsimvsobsclone$excesstot,
meantotalsites=mIBSsimvsobsclone$meantotalsites)
setkey(totalibsobsmutclone, clone)
setkey(mIBSsimvsobsclonesub, clone)
mm <- merge(mIBSsimvsobsclonesub, totalibsobsmutclone)
mm$totmutsites <- mm$obsalt+mm$obshet+mm$obsref
mm$totexcesspersite <- mm$excesstot/mm$meantotalsites
mm$mutpersite <- mm$obshet/mm$totmutsites
mm$mutpermoresite <- mm$obshet/(155605-mm$meantotalsites)
ggplot(data=mm, aes(x=excesstot, y=obshet)) + geom_point()
ggplot(data=mm[medrd>5], aes(x=totexcesspersite, y=mutpersite, color=SC)) + geom_point()
ggplot(data=mm[medrd>5], aes(x=totexcesspersite, y=obshet, color=SC)) + geom_point()
ggplot(data=mm[medrd>5], aes(x=totexcesspersite, y=mutpermoresite, color=SC)) + geom_point()
ggplot(data=mm[medrd>5 & SC!="H"], aes(x=totexcesspersite, y=mutpermoresite, color=SC)) + geom_point()
ggplot(data=totalibsobsmutclone[SC=="B" & medrd > 0 & clone!="April_2017_DCat_5"], aes(x=as.factor(year), y=obshet)) + geom_point()
ggplot(data=totalibsobsmutclone[SC=="B" & medrd > 5 & clone!="April_2017_DCat_5"], aes(x=as.factor(year), y=obshet)) + geom_point()
ggplot(data=totalibsobsmutclone[SC=="B" & medrd > 9 & clone!="April_2017_DCat_5"], aes(x=as.factor(year), y=obshet)) + geom_point()
ggplot(data=totalibsobsmutclone, aes(x=medrd, y=obshet, color=SC)) + geom_point()
ggplot(data=totalibsobsmutclone[SC!="H"], aes(x=medrd, y=obshet, color=SC)) + geom_point()
DBunk_108 <- ggplot(data=totalibssim[clone=="April_2017_DBunk_108"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="April_2017_DBunk_108"], color="red")
DBunk_149 <- ggplot(data=totalibssim[clone=="April_2017_DBunk_149"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="April_2017_DBunk_149"], color="red")
DBunk_151 <- ggplot(data=totalibssim[clone=="April_2017_DBunk_151"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="April_2017_DBunk_151"], color="red")
DBunk_253 <- ggplot(data=totalibssim[clone=="April_2017_DBunk_253"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="April_2017_DBunk_253"], color="red")
DBunk_314 <- ggplot(data=totalibssim[clone=="April_2017_DBunk_314"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="April_2017_DBunk_314"], color="red")
DBunk_6 <- ggplot(data=totalibssim[clone=="April_2017_DBunk_6"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="April_2017_DBunk_6"], color="red")
DBunk_106 <- ggplot(data=totalibssim[clone=="Spring_2017_DBunk_106"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2017_DBunk_106"], color="red")
DBunk_277 <- ggplot(data=totalibssim[clone=="Spring_2017_DBunk_277"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2017_DBunk_277"], color="red")
DBunk_298 <- ggplot(data=totalibssim[clone=="Spring_2017_DBunk_298"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2017_DBunk_298"], color="red")
plot_grid(DBunk_108, DBunk_149, DBunk_151, DBunk_253, DBunk_314, DBunk_6, DBunk_106, DBunk_277, DBunk_298)
setkey(obs, clone)
setkey(totalibssim, clone)
m <- merge(totalibssim, obs)
totalibssim.ag <- totalibssim[,list(meanrefgc = mean(refgc, na.rm=TRUE),
meanaltgc = mean(altgc, na.rm=TRUE), meantotalgc = mean(totalgc, na.rm=TRUE)), list(clone) ]
setkey(totalibssim.ag, clone)
setkey(obs, clone)
simobs <- merge(totalibssim.ag, obs)
simobs$obstotalgc <- simobs$obsaltgc+simobs$obsrefgc
simobs$excesstotalgc <- simobs$obstotalgc-simobs$meantotalgc
simobs$excessrefgc <- simobs$obsrefgc-simobs$meanrefgc
simobs$excessaltgc <- simobs$obsaltgc-simobs$meanaltgc
### Load Observed IBS file
obs <- fread("/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSobs/IBSindobs_K.csv")
### Load simulated IBS files
inputfiles <- list.files(path="/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSsimB/", pattern="IBSindsim_K_")
totalibssim <- foreach(i=1:length(inputfiles), .combine="rbind")%do%{
f=paste("/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSsimB/", inputfiles[i], sep="")
sim <- fread(f)
sim
}
totalibssim$totalgc <- totalibssim$refgc + totalibssim$altgc
obs$totalobsgc <- obs$obsaltgc + obs$obsrefgc
DBunk_Male5 <- ggplot(data=totalibssim[clone=="April29_2018_DBunk_Male5"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="April29_2018_DBunk_Male5"], color="red")
D8_46 <- ggplot(data=totalibssim[clone=="March20_2018_D8_46"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="March20_2018_D8_46"], color="red")
DBunk_1 <- ggplot(data=totalibssim[clone=="March20_2018_DBunk_1"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="March20_2018_DBunk_1"], color="red")
DBunk_12 <- ggplot(data=totalibssim[clone=="March20_2018_DBunk_12"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="March20_2018_DBunk_12"], color="red")
DBunk_13 <- ggplot(data=totalibssim[clone=="March20_2018_DBunk_13"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="March20_2018_DBunk_13"], color="red")
DBunk_15 <- ggplot(data=totalibssim[clone=="March20_2018_DBunk_15"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="March20_2018_DBunk_15"], color="red")
DBunk_17 <- ggplot(data=totalibssim[clone=="March20_2018_DBunk_17"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="March20_2018_DBunk_17"], color="red")
DBunk_20 <- ggplot(data=totalibssim[clone=="March20_2018_DBunk_20"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="March20_2018_DBunk_20"], color="red")
DBunk_28 <- ggplot(data=totalibssim[clone=="March20_2018_DBunk_28"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="March20_2018_DBunk_28"], color="red")
DBunk_30 <- ggplot(data=totalibssim[clone=="March20_2018_DBunk_30"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="March20_2018_DBunk_30"], color="red")
DBunk_9 <- ggplot(data=totalibssim[clone=="March20_2018_DBunk_9"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="March20_2018_DBunk_9"], color="red")
plot_grid(DBunk_Male5, D8_46, DBunk_1, DBunk_12, DBunk_13, DBunk_15, DBunk_17, DBunk_20, DBunk_28, DBunk_30, DBunk_9)
#!/usr/bin/env Rscript
### libraries
library(gdsfmt)
library(SNPRelate)
library(data.table)
library(ggplot2)
library(foreach)
library(lattice)
library(tidyr)
library(SeqArray)
library(cowplot)
### Load Observed IBS file
obs <- fread("/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSobs/IBSindobs_C.csv")
### Load simulated IBS files
inputfiles <- list.files(path="/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSsimB/", pattern="IBSindsim_C_April")
totalibssim <- foreach(i=1:length(inputfiles), .combine="rbind")%do%{
f=paste("/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSsimB/", inputfiles[i], sep="")
sim <- fread(f)
sim
}
totalibssim$totalgc <- totalibssim$refgc + totalibssim$altgc
obs$totalobsgc <- obs$obsaltgc + obs$obsrefgc
D8_125 <- ggplot(data=totalibssim[clone=="April_2017_D8_125"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="April_2017_D8_125"], color="red")
D8_143 <- ggplot(data=totalibssim[clone=="April_2017_D8_143"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="April_2017_D8_143"], color="red")
D8_17 <- ggplot(data=totalibssim[clone=="April_2017_D8_17"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="April_2017_D8_17"], color="red")
D8_210 <- ggplot(data=totalibssim[clone=="April_2017_D8_210"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="April_2017_D8_210"], color="red")
D8_32 <- ggplot(data=totalibssim[clone=="April_2017_D8_32"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="April_2017_D8_32"], color="red")
D8_47 <- ggplot(data=totalibssim[clone=="April_2017_D8_47"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="April_2017_D8_47"], color="red")
D8_58 <- ggplot(data=totalibssim[clone=="April_2017_D8_58"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="April_2017_D8_58"], color="red")
D8_91 <- ggplot(data=totalibssim[clone=="April_2017_D8_91"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="April_2017_D8_91"], color="red")
D8_130 <- ggplot(data=totalibssim[clone=="April_2017_D8_130"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="April_2017_D8_130"], color="red")
plot_grid(D8_125, D8_143, D8_17, D8_210, D8_32, D8_47, D8_58, D8_91, D8_130)
#!/usr/bin/env Rscript
### libraries
library(gdsfmt)
library(SNPRelate)
library(data.table)
library(ggplot2)
library(foreach)
library(lattice)
library(tidyr)
library(SeqArray)
library(cowplot)
### Load Observed IBS file
obs <- fread("/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSobs/IBSindobs_D.csv")
### Load simulated IBS files
inputfiles <- list.files(path="/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSsimB/", pattern="IBSindsim_D")
totalibssim <- foreach(i=1:length(inputfiles), .combine="rbind")%do%{
f=paste("/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSsimB/", inputfiles[i], sep="")
sim <- fread(f)
sim
}
totalibssim$totalgc <- totalibssim$refgc + totalibssim$altgc
obs$totalobsgc <- obs$obsaltgc + obs$obsrefgc
D10_46 <- ggplot(data=totalibssim[clone=="Fall_2016_D10_46"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Fall_2016_D10_46"], color="red")
D10_49 <- ggplot(data=totalibssim[clone=="Fall_2016_D10_49"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Fall_2016_D10_49"], color="red")
D10_50<- ggplot(data=totalibssim[clone=="Fall_2016_D10_50"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Fall_2016_D10_50"], color="red")
D10_57 <- ggplot(data=totalibssim[clone=="Fall_2016_D10_57"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Fall_2016_D10_57"], color="red")
D10_62 <- ggplot(data=totalibssim[clone=="Fall_2016_D10_62"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Fall_2016_D10_62"], color="red")
D10_63 <- ggplot(data=totalibssim[clone=="Fall_2016_D10_63"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Fall_2016_D10_63"], color="red")
D10_67 <- ggplot(data=totalibssim[clone=="Fall_2016_D10_67"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Fall_2016_D10_67"], color="red")
D10_70 <- ggplot(data=totalibssim[clone=="Fall_2016_D10_70"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Fall_2016_D10_70"], color="red")
D10_74 <- ggplot(data=totalibssim[clone=="Fall_2016_D10_74"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Fall_2016_D10_74"], color="red")
plot_grid(D10_46, D10_49, D10_50, D10_57, D10_62, D10_63, D10_67, D10_70, D10_74)
D10.1 <- ggplot(data=totalibssim[clone=="Spring_2016_D10_10.1"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D10_10.1"], color="red")
D10.3 <- ggplot(data=totalibssim[clone=="Spring_2016_D10_10.3"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D10_10.3"], color="red")
D10.4 <- ggplot(data=totalibssim[clone=="Spring_2016_D10_10.4"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D10_10.4"], color="red")
D10.5 <- ggplot(data=totalibssim[clone=="Spring_2016_D10_10.5"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D10_10.5"], color="red")
D10.6 <- ggplot(data=totalibssim[clone=="Spring_2016_D10_10.6"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D10_10.6"], color="red")
plot_grid(D10.1, D10.3, D10.4, D10.5, D10.6)
#!/usr/bin/env Rscript
### libraries
library(gdsfmt)
library(SNPRelate)
library(data.table)
library(ggplot2)
library(foreach)
library(lattice)
library(tidyr)
library(SeqArray)
library(cowplot)
### Load Observed IBS file
obs <- fread("/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSobs/IBSindobs_H.csv")
### Load simulated IBS files
inputfiles <- list.files(path="/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSsimB/", pattern="IBSindsim_H")
totalibssim <- foreach(i=1:length(inputfiles), .combine="rbind")%do%{
f=paste("/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexandObtusaC/IBSsimB/", inputfiles[i], sep="")
sim <- fread(f)
sim
}
totalibssim$totalgc <- totalibssim$refgc + totalibssim$altgc
obs$totalobsgc <- obs$obsaltgc + obs$obsrefgc
D8.1 <- ggplot(data=totalibssim[clone=="Spring_2016_D8_8.1"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D8_8.1"], color="red")
D8.10 <- ggplot(data=totalibssim[clone=="Spring_2016_D8_8.10"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D8_8.10"], color="red")
D8.12<- ggplot(data=totalibssim[clone=="Spring_2016_D8_8.12"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D8_8.12"], color="red")
D8.14 <- ggplot(data=totalibssim[clone=="Spring_2016_D8_8.14"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D8_8.14"], color="red")
D8.18 <- ggplot(data=totalibssim[clone=="Spring_2016_D8_8.18"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D8_8.18"], color="red")
D8.21 <- ggplot(data=totalibssim[clone=="Spring_2016_D8_8.21"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D8_8.21"], color="red")
D8.28 <- ggplot(data=totalibssim[clone=="Spring_2016_D8_8.28"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D8_8.28"], color="red")
D8.29 <- ggplot(data=totalibssim[clone=="Spring_2016_D8_8.29"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D8_8.29"], color="red")
D8.3 <- ggplot(data=totalibssim[clone=="Spring_2016_D8_8.3"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D8_8.3"], color="red")
plot_grid(D8.1, D8.10, D8.12, D8.14, D8.18, D8.21, D8.28, D8.29, D8.3)
D10.1 <- ggplot(data=totalibssim[clone=="Spring_2016_D10_10.1"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D10_10.1"], color="red")
D10.3 <- ggplot(data=totalibssim[clone=="Spring_2016_D10_10.3"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D10_10.3"], color="red")
D10.4 <- ggplot(data=totalibssim[clone=="Spring_2016_D10_10.4"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D10_10.4"], color="red")
D10.5 <- ggplot(data=totalibssim[clone=="Spring_2016_D10_10.5"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D10_10.5"], color="red")
D10.6 <- ggplot(data=totalibssim[clone=="Spring_2016_D10_10.6"], aes(x=totalgc)) + geom_histogram() +
geom_vline(xintercept = obs$totalobsgc[obs$clone=="Spring_2016_D10_10.6"], color="red")
plot_grid(D10.1, D10.3, D10.4, D10.5, D10.6)
|
|
de39bc84971e85bfa59efb89d806885679e7d64c
|
a6e57b6e4c6011af4bcfec2c6233e184fde36493
|
/TD3/Ex3-b.R
|
981f431953003c4d4de83660857e43da95274930
|
[] |
no_license
|
Philtesting/Exercice-Language-R
|
db40ca41ece01bda6bda49f942ad3f40aa8b726e
|
98ef1ede8f36577c966dbe251d0d5847e9993c10
|
refs/heads/master
| 2020-12-15T16:36:35.801112
| 2020-01-20T19:30:22
| 2020-01-20T19:30:22
| 235,181,478
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 256
|
r
|
Ex3-b.R
|
rm(list = ls(all = TRUE))
p = 20
N =c()
L =c()
for (n in 1:p){
i = floor(runif(1, min= 1, max = n))
M <- matrix(data = (i + i - 1)^(-1), nrow = i, ncol=i)^n
N = c(N, det(M))
L = c(L, log(det(M)))
}
{
plot(1:p, N, "l")
plot(1:p, L, "l")
}
|
9ae6a4346807f648b1fb43968e541c4e70991986
|
374a98e903856d2c5bf7f04a7a23361f5d79949b
|
/R/RcppExports.R
|
5a5092e3deaccc795b1feb28b064abb558885601
|
[] |
no_license
|
hheiling/glmmPen
|
c60a1217c3ddf0e31494cf59635714b9c45f36f2
|
d24a0992ccf3df156113f3756703652e95f77ce1
|
refs/heads/master
| 2023-07-21T05:08:25.893420
| 2023-07-18T16:44:42
| 2023-07-18T16:44:42
| 187,713,641
| 3
| 2
| null | 2023-09-08T22:00:19
| 2019-05-20T21:08:49
|
R
|
UTF-8
|
R
| false
| false
| 3,479
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
grp_CD_XZ_FA_step <- function(y, X, Z, group, pBigMat, J_f, dims, beta, offset, step_size, sig_g, family, link, init, phi, X_group, K, penalty, params, trace) {
.Call('_glmmPen_grp_CD_XZ_FA_step', PACKAGE = 'glmmPen', y, X, Z, group, pBigMat, J_f, dims, beta, offset, step_size, sig_g, family, link, init, phi, X_group, K, penalty, params, trace)
}
grp_CD_XZ_step <- function(y, X, Z, group, pBigMat, J_q, dims, beta, offset, step_size, sig_g, family, link, init, phi, XZ_group, K, penalty, params, trace) {
.Call('_glmmPen_grp_CD_XZ_step', PACKAGE = 'glmmPen', y, X, Z, group, pBigMat, J_q, dims, beta, offset, step_size, sig_g, family, link, init, phi, XZ_group, K, penalty, params, trace)
}
maxprod <- function(X_, y_, v_, m_, n, p) {
.Call('_glmmPen_maxprod', PACKAGE = 'glmmPen', X_, y_, v_, m_, n, p)
}
soft_thresh <- function(zeta, lambda) {
.Call('_glmmPen_soft_thresh', PACKAGE = 'glmmPen', zeta, lambda)
}
MCP_soln <- function(zeta, nu, lambda, gamma, alpha) {
.Call('_glmmPen_MCP_soln', PACKAGE = 'glmmPen', zeta, nu, lambda, gamma, alpha)
}
SCAD_soln <- function(zeta, nu, lambda, gamma, alpha) {
.Call('_glmmPen_SCAD_soln', PACKAGE = 'glmmPen', zeta, nu, lambda, gamma, alpha)
}
pglm_fit <- function(y, X, dims, beta, offset, family, link, penalty, params, penalty_factor, trace) {
.Call('_glmmPen_pglm_fit', PACKAGE = 'glmmPen', y, X, dims, beta, offset, family, link, penalty, params, penalty_factor, trace)
}
sample_mc_inner_gibbs <- function(f, z, y, t, NMC, u0, family, link, phi, sig_g) {
.Call('_glmmPen_sample_mc_inner_gibbs', PACKAGE = 'glmmPen', f, z, y, t, NMC, u0, family, link, phi, sig_g)
}
sample_mc_gibbs_adapt_rw <- function(f, z, y, NMC, u0, proposal_SD, batch, batch_length, offset, nMC_burnin, family, link, phi, sig_g) {
.Call('_glmmPen_sample_mc_gibbs_adapt_rw', PACKAGE = 'glmmPen', f, z, y, NMC, u0, proposal_SD, batch, batch_length, offset, nMC_burnin, family, link, phi, sig_g)
}
Qfun_FA <- function(y, X, Z, pBigMat, group, J_f, beta, offset, dims, family, link, sig_g, phi) {
.Call('_glmmPen_Qfun_FA', PACKAGE = 'glmmPen', y, X, Z, pBigMat, group, J_f, beta, offset, dims, family, link, sig_g, phi)
}
sig_gaus_FA <- function(y, X, Z, pBigMat, group, J_q, beta, offset, dims, link) {
.Call('_glmmPen_sig_gaus_FA', PACKAGE = 'glmmPen', y, X, Z, pBigMat, group, J_q, beta, offset, dims, link)
}
invlink <- function(link, eta) {
.Call('_glmmPen_invlink', PACKAGE = 'glmmPen', link, eta)
}
Qfun <- function(y, X, Z, pBigMat, group, J_q, beta, offset, dims, family, link, sig_g, phi) {
.Call('_glmmPen_Qfun', PACKAGE = 'glmmPen', y, X, Z, pBigMat, group, J_q, beta, offset, dims, family, link, sig_g, phi)
}
sig_gaus <- function(y, X, Z, pBigMat, group, J_q, beta, offset, dims, link) {
.Call('_glmmPen_sig_gaus', PACKAGE = 'glmmPen', y, X, Z, pBigMat, group, J_q, beta, offset, dims, link)
}
phi_ml <- function(y, eta, link, limit, eps, phi) {
.Call('_glmmPen_phi_ml', PACKAGE = 'glmmPen', y, eta, link, limit, eps, phi)
}
phi_ml_init <- function(y, eta, link, limit, eps) {
.Call('_glmmPen_phi_ml_init', PACKAGE = 'glmmPen', y, eta, link, limit, eps)
}
Qfun_quad_beta <- function(Q0, step_size, diff0, eta, eta0, beta, beta0) {
.Call('_glmmPen_Qfun_quad_beta', PACKAGE = 'glmmPen', Q0, step_size, diff0, eta, eta0, beta, beta0)
}
|
6bf87c8c4ba4f287f22ac2cc03b7885ad4704199
|
a730692eff417c0c25b716d72000dc7e486b8c91
|
/HRAnalytics.R
|
11d7d80dc9fbde553358f91c1b9436e0afb456b7
|
[] |
no_license
|
sayonti/HR-Analytics
|
21e0cd26223867cc6a4ece8b3d93611a9202e226
|
2f1adb85f10980ca2e5cfa4066009da011ede743
|
refs/heads/master
| 2021-01-23T01:02:06.618474
| 2017-03-22T20:52:47
| 2017-03-22T20:52:47
| 85,867,435
| 0
| 0
| null | 2017-03-22T19:23:21
| 2017-03-22T19:23:21
| null |
UTF-8
|
R
| false
| false
| 3,635
|
r
|
HRAnalytics.R
|
#install.packages('gsheet')
library(gsheet)
origData <- as.data.frame(gsheet2tbl('https://docs.google.com/spreadsheets/d/19-Zv4KiYXw20Dmtj97BfcE6Cri4paA2lnALa6H3w7pc/edit#gid=205206323'))
employeeID <- c(1:dim(origData)[1])
HRData<- as.data.frame(cbind(employeeID, origData))
colnames(HRData)[1] <- c('employee_ID')
names(HRData)[names(HRData) == "sales"] <- "department"
# LD: change salary from character to ordinal
HRData$salary <- ordered(HRData$salary, levels=c("low", "medium", "high"))
# AA: changed the remaining variables to their correct form & checked if no column is missed.
HRData$department <- as.factor(HRData$department)
HRData$Work_accident <- as.factor(HRData$Work_accident)
HRData$left <- as.factor(HRData$left)
HRData$promotion_last_5years <- as.factor(HRData$promotion_last_5years)
# GN: Renamed the 'sales' column since it's the department name
colnames(HRData)[10] <- "department"
factVars <- colnames(HRData)[sapply(HRData, is.factor)]
numVars <- colnames(HRData)[sapply(HRData, is.numeric)]
stopifnot(length(numVars) + length(factVars) == length(colnames(HRData)))
# GN: Created a new directory called 'hr_graphs' to store graphs
library(ggplot2)
mainDirectory <- getwd()
graphDirectory <- paste(getwd(),"/hr_graphs",sep = "")
if (!dir.exists(graphDirectory)) {
dir.create(graphDirectory)
}
setwd(graphDirectory)
# Centers plot titles
theme_update(plot.title = element_text(hjust = 0.5))
# Barplot of Promotion v. Left
pdf("left_promoted_barplot.pdf")
ggplot(HRData, aes(x = left, y = (..count..), fill = promotion_last_5years)) +
geom_bar(position = "dodge",width = .5) +
#geom_text(stat = 'count', aes(label = (..count..)),position = position_dodge(0.5)) +
scale_fill_discrete("Promoted In Last 5 Years", labels = c("Not Promoted","Promoted")) +
scale_x_discrete("Left Company", labels = c("Stayed", "Left")) +
ylab("No. of Employee") + ggtitle("Employee Left Company v. Employee Promoted in Last 5 Years")
dev.off()
# Scatterplot of Satisfaction v. Left
pdf("left_satisfaction_scatter.pdf")
ggplot(HRData, aes(x = left, y = satisfaction_level)) +
scale_x_discrete("Left Company", labels = c("Stayed", "Left")) +
scale_y_continuous("Satisfaction Level") +
ggtitle("Employee Satisfcation v. Employee Left Company") +
geom_jitter(alpha = 0.25, color = "darkblue")
dev.off()
# Barplot of Salary Level v. Left
pdf("left_salary_bar.pdf")
ggplot(HRData,aes(x = salary, y = (..count..), fill = left)) +
geom_bar(position = "dodge",width = .5) +
scale_fill_discrete("Left Company", labels = c("Stayed","Left")) +
scale_x_discrete("Salary Level", labels = c("Low", "Medium", "High")) +
scale_y_continuous("No. of Employees") +
ggtitle("Employee Salary Level v. Employee Left Company")
dev.off()
# Barplot of Department v. Left
pdf("left_department_bar.pdf")
ggplot(HRData,aes(x = department, y = (..count..), fill = left)) +
geom_bar(position = "dodge",width = .5) +
scale_fill_discrete("Left Company", labels = c("Stayed","Left")) +
scale_x_discrete("Department Name") +
scale_y_continuous("No. of Employees") +
ggtitle("Department v. Employee Left Company") + coord_flip()
dev.off()
setwd(mainDirectory)
# Boxplot for numeric variables
numericColumns <- HRData[,sapply(HRData,is.numeric)]
numCols <- numericColumns[,-1]
for(name in colnames(numCols)){
pdf(paste(name, '_boxplot.pdf'))
boxplot(numCols[[name]], main= name, horizontal = TRUE)
dev.off()
}
#correlation matrix
library(corrplot)
CorMat <- cor(numCols)
corrplot(CorMat, method = "pie")
|
79a0dc872cd5553ed4c3333e312eee4c7a5ab32a
|
29ecbdc56a470141afdf02077628a5b3cd6a12f7
|
/CustomFunctions/PlotPeaks.R
|
7a442938dfa265c32cdc8e9b51d3124fc8f969fa
|
[
"MIT"
] |
permissive
|
gretchunkim/NEAT
|
d8fd333abbeb10974abdc30fb663e8848928b967
|
10864f1924fc25bb2e847c69e5a08f1e2aae6dbe
|
refs/heads/master
| 2021-05-29T17:09:02.517936
| 2015-10-18T03:51:59
| 2015-10-18T03:51:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,067
|
r
|
PlotPeaks.R
|
#------------------------------------------------------------
# Libraries
source("~/PepsRscripts/RScripts/PepsFunctions/RectPlotter.R")
#------------------------------------------------------------
# PlotPeaks takes a matrix of peaks of form chr - start - end - val
# Plots peaks. If no value is provided, height of peak = trachheight
PlotPeaks <- function(peaks=get(peak_chr3R), ypos=ycenter[length(ycenter)], trackheight=trackheight, xpostitle=get(peak_chr3R)[1,2], relativeheight="FALSE", title="Peaks", peakcolor="Black"){
if(length(peaks[1,])!=4){
peaks <- cbind(peaks, trackheight)
}
if(relativeheight=="TRUE"){
peaks[,4] <- (peaks[,4]/max(peaks[,4]))*trackheight
}
if(relativeheight=="FALSE"){
peaks[,4] <- trackheight
}
for(k in 1:length(peaks[,1])){
#RectPlotter(matrixval=peaks[k,2:3], ycoord=ypos, trackheight=trackheight, mycolors=peakcolor)
RectPlotter(matrixval=peaks[k,2:3], ycoord=ypos, trackheight=trackheight, mycolors=peakcolor, relrectheight=peaks[k,4])
}
text(xpostitle, ypos, title, cex=0.5, pos=4)
}
|
382eb959aa62899d151c88aa4b75d4e862190635
|
d75b7bc015b47d94254bcc9334ba15972d3ec9a1
|
/1. FIRST YEAR/Introduction to Computing/Exercices_Laura/exercici11.R
|
b568437676c1eb53a80413c1d2610f7499969487
|
[] |
no_license
|
laurajuliamelis/BachelorDegree_Statistics
|
a0dcfec518ef70d4510936685672933c54dcee80
|
2294e3f417833a4f3cdc60141b549b50098d2cb1
|
refs/heads/master
| 2022-04-22T23:55:29.102206
| 2020-04-22T14:14:23
| 2020-04-22T14:14:23
| 257,890,534
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 162
|
r
|
exercici11.R
|
cat("Escriu tres nombres enters:","\n")
x <- scan(n=1, quiet=TRUE)
y <- scan(n=1, quiet=TRUE)
z <- scan(n=1, quiet=TRUE)
cat(x>y && y>z || x<y && y<z, "\n")
|
6d56fc2123b8a4ecd5bf904a07ef6d932246fe30
|
fd2a324a9505ed29e6136a06216edce999fa97a1
|
/man/plot.NMixPredCDFMarg.Rd
|
f0e8a448608dcac1d8982268d8a61f49538e4ecf
|
[] |
no_license
|
cran/mixAK
|
995c88ac9b1f70ab2dac51b4fc1347b9b1356eed
|
adc4c2229d8ad3573e560fd598158e53e5d1da76
|
refs/heads/master
| 2022-09-27T10:45:02.953514
| 2022-09-19T13:46:13
| 2022-09-19T13:46:13
| 17,697,529
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,073
|
rd
|
plot.NMixPredCDFMarg.Rd
|
\name{plot.NMixPredCDFMarg}
\alias{plot.NMixPredCDFMarg}
\title{
Plot computed marginal predictive cumulative distribution functions
}
\description{
This is a basic plotting tool to visualize computed marginal
cumulative distribution functions, see \code{\link{NMixPredCDFMarg}}.
}
\usage{
\method{plot}{NMixPredCDFMarg}(x, K=0, auto.layout=TRUE,
type="l", col="darkblue", lty=1, lwd=1, main, xlab, ylab, \dots)
}
\arguments{
\item{x}{an object of class \code{NMixPredCDFMarg}.}
\item{K}{if equal to \code{0} then the overall predictive CDF's
are plotted taken from the \code{dens} part of the object \code{x}.
If higher than \code{0} then the predictive CDF conditioned by
the value of \code{K} is plotted (taken from the \code{densK} part
of the object \code{x}).
}
\item{auto.layout}{if \code{TRUE} then the function determines itself
how to divide the plotting region to draw densities for all margins.
}
\item{type}{type of the plot.}
\item{col}{color used to draw the lines.}
\item{lty}{type of the line.}
\item{lwd}{line width.}
\item{main}{main title of the plot. Either character which is
replicated or a vector of characters of the length equal to the number of margins.}
\item{xlab}{label for the x-axis. Either character which is
replicated or a vector of characters of the length equal to the number of margins.}
\item{ylab}{label for the y-axis. Either character which is
replicated or a vector of characters of the length equal to the number of margins.}
\item{\dots}{additional arguments passed to the \code{plot} function.}
}
\value{
\code{invisible(x)}
}
\references{
Komárek, A. (2009).
A new R package for Bayesian estimation of multivariate normal mixtures allowing for selection
of the number of components and interval-censored data.
\emph{Computational Statistics and Data Analysis}, \bold{53}(12), 3932--3947.
}
\seealso{
\code{\link{NMixPredCDFMarg}}, \code{\link{NMixMCMC}}.
}
\author{
Arnošt Komárek \email{arnost.komarek@mff.cuni.cz}
}
\keyword{dplot}
|
ac344bbbe6e8931f23666f1aa8fb8d8fcbb60038
|
bae192fc279f36e7f05df7afd7b395beb32fe357
|
/graph/CY151620_131224h_NumDEGs.R
|
4caf91667675efbea9e16621c6edfdb00e52ff25
|
[] |
no_license
|
YKeito/CY_eachtime
|
fdcb07b2a0f0ba84af310bba4b1e3cba24bfa661
|
7480e1f34f062884ff7e06d4f67d035517c6c33d
|
refs/heads/master
| 2022-12-21T23:34:38.744444
| 2020-09-28T05:19:22
| 2020-09-28T05:19:22
| 299,196,070
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,998
|
r
|
CY151620_131224h_NumDEGs.R
|
####FDR005####
#up
CY15_1h_FDR005_up <- sum(allRNASeq$CY15_1h[allRNASeq$CY15_1h_q_value < 0.05] > 0)
CY15_3h_FDR005_up <- sum(allRNASeq$CY15_3h[allRNASeq$CY15_3h_q_value < 0.05] > 0)
CY15_12h_FDR005_up <- sum(allRNASeq$CY15_12h[allRNASeq$CY15_12h_q_value < 0.05] > 0)
CY15_24h_FDR005_up <- sum(allRNASeq$CY15_24h[allRNASeq$CY15_24h_q_value < 0.05] > 0)
CY16_1h_FDR005_up <- sum(allRNASeq$CY16_1h[allRNASeq$CY16_1h_q_value < 0.05] > 0)
CY16_3h_FDR005_up <- sum(allRNASeq$CY16_3h[allRNASeq$CY16_3h_q_value < 0.05] > 0)
CY16_12h_FDR005_up <- sum(allRNASeq$CY16_12h[allRNASeq$CY16_12h_q_value < 0.05] > 0)
CY16_24h_FDR005_up <- sum(allRNASeq$CY16_24h[allRNASeq$CY16_24h_q_value < 0.05] > 0)
CY20_1h_FDR005_up <- sum(allRNASeq$CY20_1h[allRNASeq$CY20_1h_q_value < 0.05] > 0)
CY20_3h_FDR005_up <- sum(allRNASeq$CY20_3h[allRNASeq$CY20_3h_q_value < 0.05] > 0)
CY20_12h_FDR005_up <- sum(allRNASeq$CY20_12h[allRNASeq$CY20_12h_q_value < 0.05] > 0)
CY20_24h_FDR005_up <- sum(allRNASeq$CY20_24h[allRNASeq$CY20_24h_q_value < 0.05] > 0)
#down
CY15_1h_FDR005_down <- sum(allRNASeq$CY15_1h[allRNASeq$CY15_1h_q_value < 0.05] < 0)
CY15_3h_FDR005_down <- sum(allRNASeq$CY15_3h[allRNASeq$CY15_3h_q_value < 0.05] < 0)
CY15_12h_FDR005_down <- sum(allRNASeq$CY15_12h[allRNASeq$CY15_12h_q_value < 0.05] < 0)
CY15_24h_FDR005_down <- sum(allRNASeq$CY15_24h[allRNASeq$CY15_24h_q_value < 0.05] < 0)
CY16_1h_FDR005_down <- sum(allRNASeq$CY16_1h[allRNASeq$CY16_1h_q_value < 0.05] < 0)
CY16_3h_FDR005_down <- sum(allRNASeq$CY16_3h[allRNASeq$CY16_3h_q_value < 0.05] < 0)
CY16_12h_FDR005_down <- sum(allRNASeq$CY16_12h[allRNASeq$CY16_12h_q_value < 0.05] < 0)
CY16_24h_FDR005_down <- sum(allRNASeq$CY16_24h[allRNASeq$CY16_24h_q_value < 0.05] < 0)
CY20_1h_FDR005_down <- sum(allRNASeq$CY20_1h[allRNASeq$CY20_1h_q_value < 0.05] < 0)
CY20_3h_FDR005_down <- sum(allRNASeq$CY20_3h[allRNASeq$CY20_3h_q_value < 0.05] < 0)
CY20_12h_FDR005_down <- sum(allRNASeq$CY20_12h[allRNASeq$CY20_12h_q_value < 0.05] < 0)
CY20_24h_FDR005_down <- sum(allRNASeq$CY20_24hh[allRNASeq$CY20_24h_q_value < 0.05] < 0)
#CYall FDR005
df <- data.frame(expression_change = rep(c("01up", "02down"), times = 12),
Numgenes = c(CY15_1h_FDR005_up, CY15_1h_FDR005_down,
CY15_3h_FDR005_up, CY15_3h_FDR005_down,
CY15_12h_FDR005_up, CY15_12h_FDR005_down,
CY15_24h_FDR005_up, CY15_24h_FDR005_down,
CY16_1h_FDR005_up, CY16_1h_FDR005_down,
CY16_3h_FDR005_up, CY16_3h_FDR005_down,
CY16_12h_FDR005_up, CY16_12h_FDR005_down,
CY16_24h_FDR005_up, CY16_24h_FDR005_down,
CY20_1h_FDR005_up, CY20_1h_FDR005_down,
CY20_3h_FDR005_up, CY20_3h_FDR005_down,
CY20_12h_FDR005_up, CY20_12h_FDR005_down,
CY20_24h_FDR005_up, CY20_24h_FDR005_down
),
Category = rep(rep(c("011h", "023h", "0312h", "0424h"), each = 2), times = 3),
CY = rep(c("CY15", "CY16", "CY20"), each = 8)
)
library(ggplot2)
library(reshape2)
#install.packages("RColorBrewer", dependencies = TRUE)
#library(RColorBrewer)
#display.brewer.all()
levels(df$Category) <- c("1h", "3h", "12h", "24h")
levels(df$expression_change) <- c("up", "down")
g <- ggplot(
df,
aes (
x = Category, # 遺伝子別でグルーピング
y = Numgenes,
fill = expression_change # 縦軸を生物種で fill up
)
)
g <- g + geom_bar(stat = "identity")
g <- g + theme_bw()
g <- g + scale_colour_manual(values = "Set1")
g <- g + ylab("Number of DEGs")
g <- g + xlab("time-course")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
CYall <- g + facet_grid(CY ~ .)
CYall <- CYall + theme(legend.position = 'none')
plot(CYall)
|
ba92c8bda664c248cb82e9da10a543e8d68a331a
|
3657383536fd9efb61367977254be08dae26a623
|
/Models/Ranger/model-mlr-ranger-learn-curve.R
|
206e338c2cb79125bfb47e5f987a1ad7dc3f35f4
|
[] |
no_license
|
kevinkr/kaggle-house-prices
|
54b176be43f383b7cd35a2c6bdfa3948f7980459
|
da985ba1331735042b8dd0dcce2ec0f430d2ac8c
|
refs/heads/master
| 2021-01-12T08:29:28.389256
| 2017-02-10T15:26:47
| 2017-02-10T15:26:47
| 76,594,754
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,705
|
r
|
model-mlr-ranger-learn-curve.R
|
# Kaggle House Prices 12-21-16
# MLR Ranger
library(mlbench)
library(ranger)
library(Metrics)
library(mlr)
makeRLearner.regr.ranger = function() {
makeRLearnerRegr(
cl = "regr.ranger",
package = "ranger",
par.set = makeParamSet(
makeIntegerLearnerParam(id = "num.trees", lower = 1L, default = 500L),
# FIXME: Add default value when data dependent defaults are implemented: mtry=floor(sqrt(#independent vars))
makeIntegerLearnerParam(id = "mtry", lower = 1L),
makeIntegerLearnerParam(id = "min.node.size", lower = 1L, default = 5L),
makeLogicalLearnerParam(id = "replace", default = TRUE),
makeNumericLearnerParam(id = "sample.fraction", lower = 0L, upper = 1L),
makeNumericVectorLearnerParam(id = "split.select.weights", lower = 0, upper = 1),
makeUntypedLearnerParam(id = "always.split.variables"),
makeLogicalLearnerParam(id = "respect.unordered.factors", default = FALSE),
makeDiscreteLearnerParam(id = "importance", values = c("none", "impurity", "permutation"), default = "none", tunable = FALSE),
makeLogicalLearnerParam(id = "write.forest", default = TRUE, tunable = FALSE),
makeLogicalLearnerParam(id = "scale.permutation.importance", default = FALSE, requires = quote(importance == "permutation"), tunable = FALSE),
makeIntegerLearnerParam(id = "num.threads", lower = 1L, when = "both", tunable = FALSE),
makeLogicalLearnerParam(id = "save.memory", default = FALSE, tunable = FALSE),
makeLogicalLearnerParam(id = "verbose", default = TRUE, when = "both", tunable = FALSE),
makeIntegerLearnerParam(id = "seed", when = "both", tunable = FALSE),
makeDiscreteLearnerParam(id = "splitrule", values = c("variance", "maxstat"), default = "variance"),
makeNumericLearnerParam(id = "alpha", lower = 0L, upper = 1L, default = 0.5, requires = quote(splitrule == "maxstat")),
makeNumericLearnerParam(id = "minprop", lower = 0L, upper = 1L, default = 0.1, requires = quote(splitrule == "maxstat")),
makeLogicalLearnerParam(id = "keep.inbag", default = FALSE, tunable = FALSE)
),
par.vals = list(num.threads = 1L, verbose = FALSE, respect.unordered.factors = TRUE),
properties = c("numerics", "factors", "ordered", "featimp"),
name = "Random Forests",
short.name = "ranger",
note = "By default, internal parallelization is switched off (`num.threads = 1`), `verbose` output is disabled, `respect.unordered.factors` is set to `TRUE`. All settings are changeable."
)
}
#' @export
trainLearner.regr.ranger = function(.learner, .task, .subset, .weights, ...) {
tn = getTaskTargetNames(.task)
ranger::ranger(formula = NULL, dependent.variable = tn, data = getTaskData(.task, .subset), ...)
}
#' @export
predictLearner.regr.ranger = function(.learner, .model, .newdata, ...) {
p = predict(object = .model$learner.model, data = .newdata, ...)
return(p$predictions)
}
#' @export
getFeatureImportanceLearner.regr.ranger = function(.learner, .model, ...) {
getFeatureImportanceLearner.classif.ranger(.learner, .model, ...)
}
# create mlr train and test task
train$SalePrice <- log(train$SalePrice + 200)
y <- train$SalePrice
X_train <- cbind(X_train,SalePrice=y)
X_test <- cbind(X_test,SalePrice=-99)
trainTask = makeRegrTask(data = as.data.frame(X_train), target = "SalePrice")
testTask = makeRegrTask(data = as.data.frame(X_test), target = "SalePrice")
# Measures
m1 = rmse
m2 = setAggregation(rmse, train.rmse) # unload Metrics package if error
# specify mlr learner with some nice hyperpars
set.seed(123)
lrn = makeLearner("regr.ranger")
lrn = setHyperPars(lrn,
num.trees = 200,
min.node.size = 5,
respect.unordered.factors = TRUE,
verbose = TRUE,
mtry = 5,
importance = "impurity"
)
# 1) make parameter set
ps = makeParamSet(
# for RF, start with # of trees
# then max tree depth
# and minimum sample leaf
makeIntegerParam("num.trees", lower = 100, upper = 400),
makeIntegerParam("min.node.size", lower = 1, upper = 10),
#makeDiscreteParam("num.trees", values = c(200, 250, 500, 750, 1000)),
makeIntegerParam("mtry", lower = 18, upper = 30)
)
# 2) Use 3-fold Cross-Validation to measure improvements
rdesc = makeResampleDesc("CV", iters = 10L, predict = "both")
# 3) Here we use random search (with 5 Iterations) to find the optimal hyperparameter
ctrl = makeTuneControlRandom(maxit = 10)
# 4) now use the learner on the training Task with the 3-fold CV to optimize your set of parameters in parallel
#parallelStartMulticore(5)
#res = tuneParams(lrn, task = trainTask, resampling = rdesc,
# par.set = ps, control = ctrl)
res = tuneParams(lrn,
task = trainTask,
resampling = rdesc,
par.set = ps,
control = makeTuneControlGrid(resolution = 10L),
measures = list(m1, m2)
)
resultsTableExport <- cbind(resultsTable,Model="ranger",lowestRmse=res$y[c(1)])
currentDateTime <- strftime(Sys.time(), "%Y %m %d %H %M %S")
csvFileName <- paste("C:/Users/kruegkj/Documents/GitHub/kaggle-house-prices/",
currentDateTime,".csv",sep="")
write.csv(resultsTableExport, file=csvFileName)
rm(resultsTableExport)
# Train on entire dataset (using best hyperparameters)
lrn = setHyperPars(lrn, par.vals = res$x)
mod = train(lrn, trainTask)
predict = predict(mod, trainTask)
predict
mean(rmse(log(X_train$SalePrice),log(as.data.frame(predict))))
# predict on new data
predict = predict(mod, newdata = validTrain)
predict
rmse(log(validTrain$SalePrice),log(as.data.frame(predict)))
#######################
opt.grid = as.data.frame(res$opt.path)
g = ggplot(opt.grid, aes(x = num.trees, y = mtry, fill = rmse.test.rmse))
g + geom_tile()
res_data = generateHyperParsEffectData(res)
ggplot(data=res_data$data, aes(x=mtry, y=rmse.train.rmse)) +
geom_line(aes(color="rmse.train.rmse")) +
geom_line(aes(y=rmse.test.rmse, color = "rmse.test.rmse")) +
facet_wrap(~num.trees)
# mtry
# Let's explore various training set sizes for each
lrn_best = setHyperPars(makeLearner('regr.ranger', id = "opt_regr.ranger"), par.vals = res$x)
lrn_max1 = setHyperPars(makeLearner('regr.ranger', id= "mtry = 22"), par.vals = list(mtry = 22))
lrn_max5 = setHyperPars(makeLearner('regr.ranger', id= "mtry = 26"), par.vals = list(mtry = 26))
lrn_max10 = setHyperPars(makeLearner('regr.ranger', id= "mtry = 30"), par.vals = list(mtry = 30))
r = generateLearningCurveData(list(lrn_best, lrn_max1, lrn_max5, lrn_max10, 'regr.ranger'),
task = trainTask,
percs = seq(0.1, 1, by = 0.1),
measures = list(m1, m2),
show.info = TRUE,
resampling = rdesc
)
plotLearningCurve(r, facet = "learner", pretty.names = FALSE)
plotLearningCurve(r, pretty.names = FALSE)
# num.trees
# Let's explore various training set sizes for each
lrn_best = setHyperPars(makeLearner('regr.ranger', id = "opt_regr.ranger"), par.vals = res$x)
lrn_max1 = setHyperPars(makeLearner('regr.ranger', id= "num.trees = 10"), par.vals = list(num.trees = 150))
lrn_max5 = setHyperPars(makeLearner('regr.ranger', id= "num.trees = 200"), par.vals = list(num.trees = 200))
lrn_max10 = setHyperPars(makeLearner('regr.ranger', id= "num.trees = 500"), par.vals = list(num.trees = 250))
r = generateLearningCurveData(list(lrn_best, lrn_max1, lrn_max5, lrn_max10, 'regr.ranger'),
task = trainTask,
percs = seq(0.1, 1, by = 0.1),
measures = list(m1, m2),
show.info = TRUE,
resampling = rdesc
)
plotLearningCurve(r, facet = "learner", pretty.names = FALSE)
plotLearningCurve(r, pretty.names = FALSE)
# best parameters
res$x
# test result
res$y
# resampling
r = resample("regr.ranger", trainTask, rdesc,
measures = m1, par.vals = res$x, show.info = FALSE)
r$aggr
r$measures.train
r$measures.test
##############################################
# train on full trian set
fullTrainTask = makeRegrTask(data = as.data.frame(train), target = "SalePrice")
final_mod = train(lrn, fullTrainTask)
pred = exp(getPredictionResponse(predict(mod, testTask)))-200
summary(pred)
# construct data frame for solution
submission = read.csv("Data/sample_submission.csv", colClasses = c("integer", "numeric"))
submission$SalePrice = pred
write.csv(submission,file = 'Submissions/ranger-mlr-v3-1-23-17.csv',row.names = FALSE)
|
7688138ee0c5836839ce8093a33fe634b99ba7c8
|
5cfe0376f8e6d47b8c47525c4a23b39563ccee29
|
/Week_01.Introduction.To.Statistics/Rsession_week_01.plotting.R
|
87aae64ba28d10fb2badb4fef94bf12233ba8c86
|
[] |
no_license
|
sophiemaichau/DataScience
|
206421f08eb03c41d1822ba14e57498fae3a68de
|
f83c0bbf636b9a57b8000648b2c88395e10a28c6
|
refs/heads/master
| 2021-05-07T14:46:50.715951
| 2018-01-22T10:16:29
| 2018-01-22T10:16:29
| 109,861,591
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,225
|
r
|
Rsession_week_01.plotting.R
|
#### Introduction ####
#
# Datascience in Bioinformatics (an extended version of an earlier course called LEARNING FROM GENOME DATA I
#
# The questions and exercises are identified as # Q:
#### This is a section header ####
# Section headers make it a lot easier to navigate your script
# Please check the drop down menu at the bottom of this window.
#### R for data science exercises: Data visualization ####
# First you should work through two online tutorials that will introduce you to ggplot2
# We advice you to make a new file (R script) and use that for doing the tutorial.
# When done - save the R script with an appropriate filename and in an appropriate location
#
#
# The tutorial is from the free book "R for data science" written by the R Overlord Hadley Wickham
# URL: http://r4ds.had.co.nz/index.html
#
#
# Read Welcome
# Read 1. Introduction
# Read 2. Introduction
# Go through 3. Data visualization (this takes some time but will introduce you to ggplot2)
#
# If you need help have a look at the different cheatsheets
# And if you are stuck - use the online forum!
#
#### Use you new knowledge to Work on real data ####
install.packages("tidyverse")
library(tidyverse)
# By using the knowledge from above we will now work on a dataset
# Read the data
microbes = read_tsv(file="taxontable.tsv") # read data
# Notice the warning - let's tell it that it may read more rows to guess the correct type of each row
microbes = read_tsv(file="taxontable.tsv", guess_max=5000) # read data
microbes # shows the data
dim(microbes) # 6773 x 11
names(microbes)
summary(microbes)
#### Variation of genome size and number of genes ####
# Hint for keeping and saving a plot
plotdata = data.frame(x=rnorm(100), y=rnorm(100), type=sample(x = 1:3, size = 100, replace = T))
plot1 = ggplot(data = plotdata) + geom_point(mapping = aes(x = x, y = y, color=type))
plot(plot1)
ggsave(filename = "test.png", plot = plot1)
plotdata
plot1
# Q: Make a scatterplot of Genome.Size(x) vs. Gene.Count(y)
plotx = ggplot(data = microbes) +
geom_point(mapping = aes(x = Genome.Size, y = Gene.Count))
# Q: Make a scatterplot of Genome.Size(x) vs. Gene.Count(y) on log-log scale
# Hint: ?scale_x_log10() (read examples)
?scale_x_log10()
plotx + scale_x_log10() + scale_y_log10()
# Q: Make a log-log scatterplot so you compare the different Domains (different colors)
ggplot(data = microbes) +
geom_point(mapping = aes(x = Genome.Size, y = Gene.Count, color = Domain)) +
scale_x_log10() +
scale_y_log10()
# Q: Make a facetted log-log scatterplot so you compare the different Domains
# HINT: ..facet_wrap()
ggplot(data = microbes) +
geom_point(mapping = aes(x = Genome.Size, y = Gene.Count, color = Domain)) +
scale_x_log10() +
scale_y_log10() +
facet_wrap(~Domain, nrow = 4)
# Q: What can you say about the relationship between genome size and gene count.
# You will present this and discuss it at the exercises
#### The relationship between genome size and gene density ####
# We define a new variable called "Gene.density" in the dataset (number of genes per Mb)
microbes = mutate(microbes, Gene.Density = Gene.Count/10^6)
# Q: How is gene density distributed ?
# Hint: +geom_histogram()
ggplot(data = microbes, mapping = aes(x = Gene.Density)) +
geom_histogram()
# Q: Make the histogram but with subplots for each Domain (facets)
ggplot(data = microbes, mapping = aes(x = Gene.Density)) +
geom_histogram() +
facet_wrap(~Domain)
# Q: Plot the gene density as a function of genome size on a log-log scale
ggplot(data = microbes, mapping = aes(x = Genome.Size, y = Gene.Density)) +
geom_point() +
scale_x_log10() +
scale_y_log10()
# Q: Produce the same figure, but each domain in a different color
ggplot(data = microbes, mapping = aes(x = Genome.Size, y = Gene.Density, color = Domain)) +
geom_point() +
scale_x_log10() +
scale_y_log10()
# Q: Produce the same figure, but each domain in a different subplot (facets)
ggplot(data = microbes, mapping = aes(x = Genome.Size, y = Gene.Density, color = Domain)) +
geom_point() +
scale_x_log10() +
scale_y_log10() +
facet_wrap(~Domain)
# Q: Present your results at the exercises.
# You are now done: https://goo.gl/3XLFxY
|
915b44457c2392e80f701b36c125c48c426d54b3
|
915625d842373876ed2f4579eea3ca7d67d6de6d
|
/cachematrix.R
|
0585cf14647001ec29894a09ca26c3c8182a9685
|
[] |
no_license
|
malinos/ProgrammingAssignment2
|
da0814d72e528425695a4a0a91322001b04c6a77
|
607639ce2bf6a41109883bcaf1dd77c0dfab65ff
|
refs/heads/master
| 2021-01-16T18:03:36.684782
| 2015-07-23T16:07:08
| 2015-07-23T16:07:08
| 39,566,459
| 0
| 0
| null | 2015-07-23T12:36:45
| 2015-07-23T12:36:45
| null |
UTF-8
|
R
| false
| false
| 1,406
|
r
|
cachematrix.R
|
## Matrix inversion is usually a costly computation
## and there may be some benefit to caching the inverse of a matrix
## rather than compute it repeatedly.
## This is the ProgrammingAssignment2 with the pair of function:
## 1.makeCacheMatrix: creates a special "matrix" object that can cache its inverse.
## 2.cacheSolve: computes the inverse of the special "matrix" returned
## by makeCacheMatrix
## Creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
#If the <<- were <- then we would be creating
#a local variable m and not updating the m that is
#in the parent environment, and it would subsequently
#never be cached
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x ##returns X
setsolve <- function(solve) m <<- mean
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## 2.cacheSolve: computes the inverse of the special "matrix" returned
## by makeCacheMatrix
cacheSolve <- function(x, ...) {
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
5f04cb1d77ccbd08dcf6c352e94734841d53431c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/wheatmap/examples/WColumnBind.Rd.R
|
86eb48ed42a6f59731b14897c3f8b1b9dcd2f57f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 292
|
r
|
WColumnBind.Rd.R
|
library(wheatmap)
### Name: WColumnBind
### Title: column bind non-overlapping objects
### Aliases: WColumnBind
### ** Examples
WHeatmap(matrix(rnorm(2000),nrow=40),name='a') +
WHeatmap(matrix(rnorm(30), nrow=3), RightOf(),name='b') +
WColorBarH(1:10, TopOf(WColumnBind('a','b')))
|
e7bcaa2079831e4bb86180a482f7549ea52e5a3b
|
988b1f6a93ff7ee36c1017b4a1ba42e1f273aba2
|
/shiny_app/mriqception_app/app.R
|
6827f3c4a89ee42946cc168dda5a9b0e0eb2fd32
|
[] |
no_license
|
crewalsh/mriqception
|
15f239d5c5483e61b695d5c90bb0356dc0aa1491
|
cc34d0376f81d50a313de301d0dd4013a45e1a24
|
refs/heads/master
| 2022-06-14T18:26:31.950752
| 2022-05-19T00:40:11
| 2022-05-19T00:40:11
| 200,738,461
| 0
| 1
| null | 2019-08-05T22:40:35
| 2019-08-05T22:40:35
| null |
UTF-8
|
R
| false
| false
| 15,090
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(reshape2)
library(plotly)
library(jsonlite)
`%notin%` <- Negate(`%in%`)
#source("~/Documents/Code/mriqception/shiny_app/mriqception_app/utils.R")
source("utils.R", local=TRUE)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("MRIQCeption"),
sidebarLayout(
sidebarPanel(
fileInput("local_file", h5("Please upload the output from MRIQC of your local data"),
multiple = FALSE,
accept = c(".tsv",
"text/tsv",
"text/tab-separated-values,text/plain",
".csv",
"text/csv",
"text/comma-separated-values,text/plain")),
fileInput("json_info", h5("Optionally, upload a .json file of BIDS info for your study to automatically set filter parameters close to those in your own study"),
multiple = FALSE,
accept = ".json"),
radioButtons("modality",
h5("Please choose the modality you are interested in"),
choices = list("BOLD"='bold',
"Structural (T1W)" = 'T1w',
"T2w" = "T2w")),
sliderInput("API_limit",
label = h5("To reduce time to load data from the API, optionally select a maximum number of pages to load"),
max = 10000,
min = 0,
value = 10000),
checkboxInput("remove_outliers", "Remove outliers from API", value=FALSE),
uiOutput("choose_filters"),
# just BOLD filters
# conditionalPanel(
# condition = "input.filters.includes('snr')",
# slider_input_fxn("snr")
# ),
# conditionalPanel(
# condition = "input.filters.includes('tsnr')",
# slider_input_fxn("tsnr")
# ),
# conditionalPanel(
# condition = "input.filters.includes('dvars_nstd')",
# slider_input_fxn("dvar")
# ),
# conditionalPanel(
# condition = "input.filters.includes('fd_mean')",
# slider_input_fxn("fd")
# ),
# # just T1w filters
# conditionalPanel(
# condition = "input.filters.includes('snr_total')",
# slider_input_fxn("snr_total")
# ),
# conditionalPanel(
# condition = "input.filters.includes('snr_gm')",
# slider_input_fxn("snr_gm")
# ),
# conditionalPanel(
# condition = "input.filters.includes('snr_wm')",
# slider_input_fxn("snr_wm")
# ),
# conditionalPanel(
# condition = "input.filters.includes('snr_csf')",
# slider_input_fxn("snr_csf")
# ),
# conditionalPanel(
# condition = "input.filters.includes('cnr')",
# slider_input_fxn("cnr")
# ),
# conditionalPanel(
# condition = "input.filters.includes('efc')",
# slider_input_fxn("efc")
# ),
# # all
# conditionalPanel(
# condition = "input.filters.includes('fwhm_avg')",
# slider_input_fxn("fwhm")
# ),
# conditionalPanel(
# condition = "input.filters.includes('gsr_x')",
# slider_input_fxn("gsr_x")
# ),
# conditionalPanel(
# condition = "input.filters.includes('gsr_y')",
# slider_input_fxn("gsr_y")
# ),
conditionalPanel(
condition = "input.filters.includes('bids_meta.EchoTime')",
slider_input_fxn("TE")
),
conditionalPanel(
condition = "input.filters.includes('bids_meta.RepetitionTime')",
slider_input_fxn("TR")
),
conditionalPanel(
condition = "input.filters.includes('bids_meta.MagneticFieldStrength')",
radioButtons("mag_strength",
h5("Please choose a magnet strength"),
choices = list(
"1.5T"= "1.5",
"3T"="3",
"7T" = "7"
),
selected = measure_slider_inputs[["mag_strength"]])
),
conditionalPanel(
condition = "input.filters.includes('bids_meta.Manufacturer')",
radioButtons("manufacturer",
h5("Please choose a scanner manufacturer"),
choices = list(
"Siemens"= "Siemens",
"GE"="GE"
),
selected = measure_slider_inputs[["scanner_manufacturer"]])
),
actionButton("get_data", "Generate API data")
),
mainPanel(
textOutput("color_descriptions"),
uiOutput("select_IQM_render"),
conditionalPanel(
condition = "input.get_data",
plotlyOutput("plot")
),
conditionalPanel(condition = "plotted == TRUE",
textOutput("IQM_description"))
)
)
)
server <- function(input, output,session) {
values <- reactiveValues(plotted=FALSE)
do_plot <- function(df){
plotted=TRUE
df %>%
plot_ly(type = 'violin') %>%
add_trace(
x = ~variable[df$group=="local_set"],
y = ~value[df$group=="local_set"],
legendgroup = 'Local',
scalegroup = 'Local',
name = 'Local',
side = 'negative',
box = list(
visible = T
),
meanline = list(
visible = T
),
line = list(
color = get_color()
),
color = I(get_color()),
points = 'all',
pointpos = -0.5,
jitter = 0.1,
scalemode = 'count',
meanline = list(
visible = T
),
line = list(
color = get_color()
),
marker = list(
line = list(
width = 2,
color = get_color()
),
symbol = 'line-ns'
)
) %>%
add_trace(
x = ~variable[df$group=="all_data"],
y = ~value[df$group=="all_data"],
legendgroup = 'API',
scalegroup = 'API',
name = 'API',
side = 'positive',
box = list(
visible = T
),
meanline = list(
visible = T
),
line = list(
color = 'rgb(58,54,54)'
),
color = I('dark gray')
) %>%
layout(
xaxis = list(
title = ""
),
yaxis = list(
title = "",
zeroline = F
)
)
}
get_color <- reactive(
color <- IQM_descriptions$color[which(IQM_descriptions$iqm_name == input$select_IQM)]
)
output$choose_filters <- renderUI({
if (input$modality == "bold"){
choices_list <- bold_choices
}else if (input$modality == "T1w"){
choices_list <- T1w_choices
}else if (input$modality == "T2w"){
choices_list <- T2w_choices
}
checkboxGroupInput("filters",
h5("Please choose the filters you want to use for the API data:"),
choices = choices_list
)
})
output$select_IQM_render <- renderUI({
req(values$df)
choices_API <- unique(values$df %>% filter(group == "all_data") %>% select("variable"))
choices_local <- unique(values$df %>% filter(group == "local_set") %>% select("variable"))
choices_overlap <- intersect(choices_API$variable, choices_local$variable)
choices_overlap <- choices_overlap[order(choices_overlap)]
choices_overlap <-choices_overlap[choices_overlap %notin% c("size_x", "size_y", "size_z", "size_t", "spacing_x", "spacing_y", "spacing_z", "spacing_tr")]
selectInput("select_IQM", h6("Please select IQM"),
choices=choices_overlap
)
})
get_API_data <- eventReactive(input$get_data,{
# load in API data
if (is.null(input$local_file)){
showModal(modalDialog("Please upload a local file",
title = "Upload local file",
footer = tagList(
actionButton("new_upload", "Continue")
)))
}
req(input$local_file)
if (!is.null(current_selection$filters)){
modal_text <- paste0("Are you sure you wish to pull from the API? You are currently filtering ", input$modality, " data based on ",paste(current_selection$filters, sep = "", collapse = ", ")," and ", as.character(input$API_limit), " pages. This action will take some time, so please ensure your filters are correct.")
}else{
modal_text <- paste0("Are you sure you wish to pull from the API? You are currently pulling ", input$modality," data from ", as.character(input$API_limit), " pages. This action will take some time, so please ensure your filters are correct.")
}
showModal(modalDialog(modal_text,
title="Download from API",
footer = tagList(
actionButton("cancel_API","Cancel"),
actionButton("confirm_API","Yes, please download from API", class = "btn btn-danger")
)))
})
observeEvent(input$confirm_API, {
removeModal()
#
# modality <- input$modality
# url_root <- 'https://mriqc.nimh.nih.gov/api/v1/'
# filters <- create_filter_text(isolate(input), isolate(current_selection$filters))
# url <- paste0(url_root,modality,"?max_results=50&page=1",filters,sep="")
# tmpFile <- tempfile()
# download.file(url, destfile = tmpFile, method = "curl")
# temp <- jsonlite::read_json(tmpFile)
#
# last_page_href <- temp[["_links"]][["last"]][["href"]]
# last_page_id <- strsplit(strsplit(last_page_href,split="page=")[[1]][2],split="&")[[1]][1]
# expanded_data <- reorganize_bids_data(temp[["_items"]])
#
# if (input$API_limit > as.numeric(last_page_id)){
# n <- as.numeric(last_page_id)
# }else{
# n <- input$API_limit
# }
API_demo <- read.csv("bold_demo.csv")
API_data <- melt(API_demo, id.vars = c("X_id"))
#
API_data$group <- "all_data"
API_data$variable <- as.character(API_data$variable)
#for testing
n <- 3
#
# withProgress(message = 'Loading data', detail = paste("Loading page 1 of",n), value = 0, {
# for (page in seq.int(2,n)){
# if (page %% 10 == 0){
# incProgress(10/n, detail = paste("Loading page", page,"of",n))
# }
#
# url <- paste0(url_root,modality,"/?max_results=50&page=",as.character(page),filters,sep="")
# tmpFile <- tempfile()
# download.file(url, destfile = tmpFile, method = "curl")
# temp <- jsonlite::read_json(tmpFile)
# temp_expanded <- reorganize_bids_data(temp[["_items"]])
# expanded_data <- merge(expanded_data, temp_expanded, all=TRUE)
# }
# })
#API_data <- melt(expanded_data, id.vars = c("subject_id"))
API_data$group <- "all_data"
API_data$variable <- as.character(API_data$variable)
inFile <- input$local_file
ext <- tools::file_ext(input$local_file$name)
if (ext == "tsv"){
local_data <- read.table(inFile$datapath, header=TRUE)
}else{
local_data <- read.csv(inFile$datapath, header=TRUE)
}
colnames(local_data)[1] <- "subject_id"
local_data <- melt(local_data, id.vars = c("subject_id"))
local_data$group <- "local_set"
full_data <- rbind(local_data,API_data)
full_data$value <- as.numeric(full_data$value)
values$df <- full_data
})
current_selection <- reactiveValues()
observeEvent(input$filters,{
current_selection$filters <- input$filters
})
observeEvent(input$json_info, {
req(input$json_info)
filepath <- input$json_info$datapath
json_file <- read_json(filepath, simplifyVector = TRUE)
if ("EchoTime" %in% names(json_file)){
updateSliderInput(session, "TE", value = c(json_file[["EchoTime"]], json_file[["EchoTime"]]))
updateCheckboxGroupInput(session, "filters", selected = c(current_selection$filters, "bids_meta.EchoTime"))
current_selection$filters <- c(current_selection$filters, "bids_meta.EchoTime")
}
if ("RepetitionTime" %in% names(json_file)){
updateSliderInput(session, "TR", value = c(json_file[["RepetitionTime"]], json_file[["RepetitionTime"]]))
updateCheckboxGroupInput(session, "filters", selected = c(current_selection$filters, "bids_meta.RepetitionTime"))
current_selection$filters <- c(current_selection$filters, "bids_meta.RepetitionTime")
}
if ("MagneticFieldStrength" %in% names(json_file)){
updateRadioButtons(session, "mag_strength", selected = json_file[["MagneticFieldStrength"]])
updateCheckboxGroupInput(session, "filters", selected = c(current_selection$filters, "bids_meta.MagneticFieldStrength"))
current_selection$filters <- c(current_selection$filters, "bids_meta.MagneticFieldStrength")
}
if ("Manufacturer" %in% names(json_file)){
if (grepl("GE", json_file[["Manufacturer"]], ignore.case = TRUE)){
updateRadioButtons(session, "manufacturer", selected = "GE")
}else if(grepl("Siemens", json_file[["Manufacturer"]], ignore.case = TRUE)){
updateRadioButtons(session, "manufacturer", selected = "Siemens")
}
updateCheckboxGroupInput(session, "filters", selected = c(current_selection$filters, "bids_meta.Manufacturer"))
current_selection$filters <- c(current_selection$filters, "bids_meta.Manufacturer")
}
})
observeEvent(input$new_upload,
removeModal()
)
observeEvent(input$cancel_API,
removeModal()
)
remove_outliers_reactive <- reactive({
if (input$remove_outliers){
values$plot_data <- remove_outliers_fxn(values$filtered_data)
}else{
values$plot_data <- values$filtered_data
}
})
output$plot <-renderPlotly({
get_API_data()
if (is.null(input$select_IQM)){
values$filtered_data <- values$df
}else{
values$filtered_data <- values$df %>% filter(variable == input$select_IQM)
}
remove_outliers_reactive()
req(values$plot_data)
do_plot(values$plot_data)
})
output$IQM_description <- renderText(
paste(IQM_descriptions$iqm_definition[which(IQM_descriptions$iqm_name == input$select_IQM)])
)
output$color_descriptions <- renderText("Colors reflect class of IQM. In each plot, the API data is shown in dark grey.
Spatial IQMs are plotted in gold, temporal IQMs in orange, noise IQMs in red, motion IQMs in green, artifact IQMs in
light blue, descriptive IQMs in dark blue, and other IQMs in purple.")
}
# Run the application
shinyApp(ui = ui, server = server)
|
4ce5479f112c53ba1c3290afc623ee0a6c4cd59e
|
85130399796efd779e40efa389aaffc885b0dcaf
|
/Rbasics.R
|
5fbbe12fc96d7c64bd984dde2ffcfd547506ed51
|
[] |
no_license
|
victorfeagins/WhatisR
|
6d3aba4552afb75885a23c6af8cc4f9706b67b38
|
acdf1a238b14c06ee20f2b020f3d54d11bfc7207
|
refs/heads/master
| 2023-07-24T17:21:59.606323
| 2021-09-03T22:20:57
| 2021-09-03T22:20:57
| 402,834,955
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,722
|
r
|
Rbasics.R
|
# R Basics ----
# R is all about objects
#Let's learn the essential Data types
## Numeric ----
#Arithmetic Operators
50 + 10 #Add
50 - 10 #Subtract
50 / 10 #Divide
50 * 10 #Multiply
2 ** 3 #Exponent
### Numeric Functions ----
#f(x) = y
round(50.70) #Round to nearest whole number
sqrt(4) #Taking the square root
log(2) # Taking the natural log
#You can also create your own functions
cube <- function(x){
x**3
}
cube(2)
## Character ----
"Hello"
"Dog"
'Cat'
'Neck Tie'
"Don't do that"
### Character Functions ----
nchar("Knit")
toupper("Knit")
tolower("Knit")
paste("I", "like", "knitting")
## Logic ----
#TRUE or FALSE
TRUE #Logic
FALSE
TRUE + TRUE #TRUE = 1
FALSE + FALSE #FALSE = 0
### Basic Logic Operators ----
10 > 1 #10 greater then 1?
10 < 1 #10 less then 1?
10 == 1 #10 equal to 1?
10 != 1 #10 not equal to 1?
"A" == "A"
"A" == "a"
!TRUE #Not TRUE
### Combining Logic Operators ----
TRUE | FALSE # TRUE or FALSE
10 > 1 | 10 < 1 #At least one true will be true
TRUE & FALSE # TRUE and FALSE
10 > 1 & 10 < 1 #At least one false will be false
## Vectors
#Combination individual objects
#Can do functions on each element
c(10.6, 50.4, 30.8)
round(c(10.6, 50.4, 30.8))
c("cat", "dog", "turtle")
toupper(c("cat", "dog", "turtle"))
## Assignment of variables
NumVec <- c(10.6, 50.4, 30.8)
round(NumVec)
pets <- c("cats", "dogs", "turtles", "horses")
toupper(pets)
names <- c("Jimmy", "Carl", "Eric", "Sarah")
paste(names, "likes", pets)
### Indexing Vectors
length(names) #Length will count number of elements in a vector
names[1] #Grabs 1st element in vector
pets[-1] #Loses 1st element in vector
names[3:length(names)] #Grabs everything after 3rd element
|
9063d9f8d7fd6066e4317d064781e0497bd90508
|
23db4d2af33d272ea56eac407399c90c4b2b682d
|
/script_02.R
|
954c8075a9402edb6ab75978d08b760ebf346f1a
|
[] |
no_license
|
bdemeshev/r_socio_hse
|
5ce6a1263f949b0d225a3972d0bff890cb4a8496
|
17f16afb6df6a5f32cfec79437ec017071a48073
|
refs/heads/master
| 2021-01-22T20:21:42.919836
| 2017-09-20T13:10:49
| 2017-09-20T13:10:49
| 85,315,335
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,107
|
r
|
script_02.R
|
library(tidyverse) # обработка данных, графики
library(forcats) # для работы с факторными переменными
library(lubridate) # для работы с датами/временем
library(stringr) # для работы с текстовыми переменными
library(reshape2) # преобразование длинные <-> широкие
library(readr) # чтение файлов
# install = инсталлировать (1 раз)
# install.packages("the_package")
# Tools - Install packages
# --------------------------------
# attach = load = подключить = загрузить (каждый раз при запуске)
# library(the_package)
#
# список функций пакета:
help(package = "readr")
# документация по функции
help(read_csv)
# или коротко:
?read_csv
adv <- read_csv("~/Downloads/Advertising.csv")
# File -> Import Dataset -> From CSV ->
glimpse(adv)
# два простых графика
# самый идеальный, пустой график!
ggplot(data = adv)
# чуть похуже :)
# http://docs.ggplot2.org
ggplot(data = adv) +
geom_point(aes(x = Radio,
y = Newspaper,
size = Sales),
alpha = 0.3) +
xlab("Расходы на радио (тыс. у.е.)") +
ylab("Газеты :)")
base <- ggplot(data = adv) +
geom_point(aes(x = Radio,
y = Newspaper,
size = Sales))
base
base + xlab("Привет! :)") + ggtitle("Нано-шедевр")
# критерий хорошего графика :)
ggplot(data = adv) +
geom_point(aes(x = Radio, alpha = TV,
y = Newspaper,
size = Sales))
base + theme_bw()
# https://cran.r-project.org/web/packages/ggthemes/vignettes/ggthemes.html
# GOOGLE: ggplot2 + themes
library(ggthemes)
base + theme_stata()
base + theme_excel()
hist_base <- ggplot(data = adv) +
geom_histogram(aes(x = Sales))
hist_base
# то же самое, но по-быстрому
qplot(data = adv,
geom = "histogram",
x = Sales)
qplot(data = adv,
geom = "point",
x = Sales,
y = TV,
xlab = "Подпись по горизонтали")
??qplot
?qplot
# GOOGLE: ggplot2 -> вкладка Images
# Хочу такой же, только с золотыми пуговицами
# преобразования переменных
# mutate {dplyr}
adv2 <- mutate(adv,
sales2 = Sales^2,
ln_sales = log(Sales),
sales_scaled = scale(Sales),
sales_scaled_2 =
(Sales - mean(Sales)) / sd(Sales))
glimpse(adv2)
write_csv(adv2, path = "adv_2.csv")
# узнать рабочую папку:
getwd()
# установить рабочую папку:
setwd("~/Downloads/")
# Session - Set working directory - Choose
# отбор наблюдений по нескольким критериям:
adv3 <- filter(adv,
Sales > mean(Sales),
TV < quantile(TV, 0.9) )
# быстрый взгляд на табличку:
glimpse(adv3)
# особая переменная: rownames
# извлечь имя строки (если оно по старой традиции есть)
rownames(adv)
# упорядочиваем табличку adv3 по убыванию Sales
# добавляем переменную nomer равную номеру строки
adv4 <- arrange(adv3, -Sales) %>%
mutate(nomer = row_number())
# список функций пакета:
help(package = "dplyr")
library(psych) # в частности содержит функцию describe()
adv %>% select(Sales) %>% describe() %>%
select(min, max)
describe(adv) # описательные статистики по всем переменным
describe(adv$Sales) # описательные статистики по одной переменной
min(adv$TV) # минимум переменной TV из табличку adv
mean(adv$TV) # среднее
|
f8930b741cd8992e0821e4d201a7970a2438f999
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/6468_0/rinput.R
|
cbaaf164297baac62ab0316813a008bf42c1dd9c
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("6468_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6468_0_unrooted.txt")
|
a19a8cb3fb770f9f57996ec3936023cd9a350767
|
498e7df01e78657277b23d81d7b07ab431def4fb
|
/share_sim_hosp_sderr.R
|
5da368f388cee7e8a893c2832cd5cfcc4b6583eb
|
[] |
no_license
|
kralljr/share_medicare
|
de5be725529fd00b42ab8aaf6edd31b91731a16e
|
17aac20ee28e70e5cc93e71d4b11ce5e3f5ec2a5
|
refs/heads/master
| 2021-01-17T07:40:19.613158
| 2016-07-15T15:53:38
| 2016-07-15T15:53:38
| 18,215,156
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,272
|
r
|
share_sim_hosp_sderr.R
|
### File to simulate mortality effects
rm(list = ls())
args <- commandArgs(TRUE)
sim <- as.numeric(args[[1]])
err <- as.numeric(args[[2]])
print(c(sim, err))
#load others
library(share)
library(handles)
library(sharesim)
#load data
data(keeps, cms, sds, vec, names)
# new combo of sources
load("~/SHARE/sharesim/data_share_sim_revisedcombo.RData")
#set up info
nmons <- 25
ndays <- 500
etas <- c(3, 1, 0.75, 0.5, 1, 1, 2)
seeds <- c(9763, 398)
reps1 <- c(25, 5)
sderrs <- c(0.001, 0.1, 0.5)
sderr1 <- sderrs[err]
###############
reps <- reps1[sim]
outmult <- list()
set.seed(seeds[sim])
for(i in 1 : 100) {
print(i)
outmult[[i]] <- outerSIMhosp(names, nmons, reps, ndays, vec, keeps,
cms, sds, etas, sderr = sderr1)
}
regcoef <- sapply(outmult, function(x) x[[1]], simplify = F)
percinc1 <- sapply(outmult, function(x) x[[2]], simplify = F)
iqrs <- apply(sapply(outmult, function(x) x[[3]]), 1, median)
outrest <- gethospsim(regcoef, iqrs)[[2]]
mse <- msefun(percinc1, etas, rownames(outmult[[1]][[1]][[1]]))
out <- outmult[[1]][[2]]
# save output
sims <- c("A", "B")
save(out, outmult, mse, outrest, iqrs, percinc1, regcoef,
file = paste0(sims[sim], "simhosp_multres_sderr", err, ".RData"))
|
2bc2df21f9a370efa2c5755240431bae49d42157
|
9bcbf545552e7a8ead8478e6b3550c6a7a2f17cb
|
/man/read_data.Rd
|
40f45274fbce251724a3977ed8371dc176ee474b
|
[] |
no_license
|
bgulbis/BGTools
|
5f77e9f277e986907f38cdeefbb4c42b5614420e
|
e31d098e7c21459f04c291bd712ae802a8d6945e
|
refs/heads/master
| 2021-01-21T04:43:20.281983
| 2016-06-29T12:16:08
| 2016-06-29T12:16:08
| 47,362,878
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,037
|
rd
|
read_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_data.R
\name{read_data}
\alias{read_data}
\title{Read and join data from multiple csv files}
\usage{
read_data(data.dir, file.name, base = FALSE, check.distinct = TRUE)
}
\arguments{
\item{data.dir}{A character string with the name of the directory containing
the data files}
\item{file.name}{A character string with name of data file or pattern to
match}
\item{base}{An optional logical, if \code{TRUE} then the base function
\code{\link{read.csv}} will be used}
\item{check.distinct}{An optional logical, calls
\code{\link[dplyr]{distinct}} on the imported data if \code{TRUE}}
}
\value{
A data frame
}
\description{
\code{read_data} takes a directory and file name and reads in all matching
csv files and binds them together into a data frame
}
\details{
This function takes a directory and file name and reads in all matching csv
files and binds them together into a data frame.
}
\seealso{
\code{\link{read.csv}} \code{\link[readr]{read_csv}}
}
|
eb7a21f31d2bf5ee3547c712dd5a9d5988c3a70d
|
60de7eab71406c75aaef944f53d0098c80148a43
|
/sandbox/Ineq_Visuals.R
|
bfcf9574a5cd2dac3b455f3012df9b4614ba2684
|
[] |
no_license
|
ds-civic-data/sd-pdx-sea
|
5f9bb3bc020e6146f291a8af7a176558a6ef7063
|
8dcb176ae14b322ede777bb244dad7052b747695
|
refs/heads/master
| 2020-03-06T21:00:13.381244
| 2018-05-10T23:58:01
| 2018-05-10T23:58:01
| 127,067,873
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 688
|
r
|
Ineq_Visuals.R
|
######################################################################################
## Title: Counting Migration by State (PUMS) and other exploration
## Author: Josephine Baker
## Created: 04/22/2018
##########################################
library(tidyverse)
library(readr)
library(ineq)
library(ggplot2)
source("Measuring_inequality.R")
#gini_stats1<- gini_stats0%>%
#Not done yet
ggplot(gini_stats0, aes(x = gini_nam, y = gini_p, fill = gini_nam))+
geom_bar(stat = "identity")+
#geom_point(alpha = .5, size = 5)+
geom_text(aes(label = gini_nam), size = 3, angle = 90,
color = "white")+
theme(legend.position="none")+
labs(y = "Gini Coefficient")
|
4e08967a83633dd296325acaaa602bb8749a8b66
|
953e84446de8d060683c87669f2c62350939ed5f
|
/code/16S/deprecated/after_dada2_make_otu_table.R
|
6ff7aa2ea164b21c8b52461260e56b8d77a9cab8
|
[] |
no_license
|
tkarasov/pathodopsis
|
29e23821c33ac727421158d9e40b277a09b8b8ca
|
c5d486ac02b1f0b2ff525439d3bf2d3843231e4d
|
refs/heads/master
| 2023-08-25T02:31:53.053463
| 2023-08-15T21:37:10
| 2023-08-15T21:37:10
| 172,746,993
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,080
|
r
|
after_dada2_make_otu_table.R
|
#library(phyloseq)
library(dada2)
library(dplyr)
library(tidyverse)
library(fossil)
#library(msa)
#library(DECIPHER)
library(genefilter)
library(phangorn)
library("RColorBrewer")
library(gplots)
library(sjstats)
library(nlme)
#path="/Users/tkarasov/work_main"
#choose whether working on home computer or at work
path="/ebio"
source(paste(path, "/abt6_projects9/pathodopsis_microbiomes/scripts/16S/amp_seq_functions.R", sep=""))
#https://f1000research.com/articles/5-1492/v1 This is the dada2 file
output_direc="/ebio/abt6_projects9/pathodopsis_microbiomes/data/processed_reads/16S/16S_all"
seqtab.nochim = readRDS(paste(output_direc,"seqtab_final.rds", sep="/"))
taxa=readRDS(paste(output_direc,"tax_final.rds", sep="/"))
#metadata=read.table(paste(path,"/abt6_projects9/pathodopsis_microbiomes/pathodopsis_git/data/v1_22_5_merged.txt", sep=""), header=T, sep=",")
#koppen_geiger=read.table("/ebio/abt6_projects9/pathodopsis_microbiomes/pathodopsis_git/data/Pathodopsis_site_metadata_20190808_CZ_course2.txt", header=T, sep="\t")
metadata=read.table("/ebio/abt6_projects9/pathodopsis_microbiomes/pathodopsis_git/data/Pathodopsis_site_metadata_20190808_CZ_course2.txt", header=T, sep="\t")
#metadata=merge(metadata, koppen_geiger, by=c("Site.ID", "Latitude", "Longitude"))
samples.out <- rownames(seqtab.nochim)
metadata_keep=metadata[metadata$Plant_ID%in%samples.out,]
meta_unique = metadata_keep %>% distinct()
metadata_organized=merge(data.frame(samples.out), meta_unique, by.x="samples.out", by.y="Plant_ID", all.x=TRUE)
subject <- sapply(strsplit(samples.out, "D"), `[`, 1)
samdf <- data.frame(Subject=metadata_organized$samples.out, Latitude=metadata_organized$Latitude, Longitude=metadata_organized$Longitude, Altitude=metadata_organized$Altitude, hpa=metadata_organized$HpA_plant, TourID=metadata_organized$Tour_ID, Clim=metadata_organized$ClimateZ)
rownames(samdf) <- samdf$Subject
sample_names(seqtab.nochim)=samples.out
ps <- phyloseq(otu_table(seqtab.nochim, taxa_are_rows=FALSE),
sample_data(samdf),
tax_table(taxa))
#remove samples with fewwer than 1000 reads
GP = prune_samples(sample_sums(ps)>=1000, ps)
#now for taxa names
dna <- Biostrings::DNAStringSet(taxa_names(GP))
names(dna) <- taxa_names(GP)
GP <- merge_phyloseq(GP, dna)
taxa_names(GP) <- paste0("ASV", seq(ntaxa(GP)))
#you can access the sequence by refeseq(GP)
#remove mitochondria
mito = colnames(otu_table(GP))[which(tax_table(GP)[,5] != "Mitochondria")]
GP = prune_taxa(mito, GP)
# remove samples that have fewer than 50 reads in any sample
flist <- filterfun(kOverA(1, 50))
GP50 = filter_taxa(GP, flist, TRUE )
qGPr = transform_sample_counts(GP, function(otu) otu/sum(otu))
#basic plot
top20 <- names(sort(taxa_sums(GP50), decreasing=TRUE))[1:20]
ps.top20 <- transform_sample_counts(GP50, function(OTU) OTU/sum(OTU))
ps.top20 <- prune_taxa(top20, ps.top20)
plot_bar(ps.top20, x="Subject", fill="Family") + facet_wrap(~Clim, scales="free_x")
GP50.prop <- transform_sample_counts(GP50, function(otu) otu/sum(otu))
ord.nmds.bray <- ordinate(GP50.prop, method="NMDS", distance="bray")
plot_ordination(GP50.prop, ord.nmds.bray, color="TourID", title="Bray NMDS")
plot_richness(GP50.prop, x="Subject", measures=c("Shannon", "Simpson"), color="Clim")
# Extract tables
extracted_GP50 = as(otu_table(GP50), "matrix")
#merge on genus and family
GP_genus=tax_glom(GP50, "Genus")
GP_fam=tax_glom(GP50, "Family")
##rename ASV for family
extracted_GP50_family = as(otu_table(GP_fam), "matrix")
#make phylogenetic tree. DECIPHER Alignseqs scales linearly rather than exponentially. Cannot be run on my computer.
#seqs <- getSequences(seqtab.nochim)
#names(seqs) <- seqs # This propagates to the tip labels of the tree
#mult <- AlignSeqs(DNAStringSet(seqs), anchor=NA, verbose=TRUE, processors=16)
#align seqs didn't maintain names
#now write decipher alignment to file
#https://github.com/benjjneb/dada2/issues/204
#writeXStringSet(mult, file= paste(output_direc,"/abt6_projects9/pathodopsis_microbiomes/data/processed_reads/16S/16S_12_2018/demult_python/16S_otus_aligned.fasta", sep=""))
#fasttree
#system("export OMP_NUM_THREADS=16")
#system("/usr/bin/fasttreeMP -fastest -noml -gtr -nt /ebio/abt6_projects9/pathodopsis_microbiomes/data/processed_reads/16S/16S_all/16S_otus_aligned.fasta > /ebio/abt6_projects9/pathodopsis_microbiomes/data/processed_reads/16S/16S_all/16S_otus_aligned.FastTree.tre")
# Alternative for trees...as of right now it seems like we need to use alignseqs to align the OTUs but then put throught raxml
#alignment.rax.gtr <- raxml(DNAbin(mult), m="GTRGAMMAIX", # model f="a", # best tree and bootstrap p=1234, # random number seed x=2345, # random seed for rapid bootstrapping N=100, # number of bootstrap replicates file="alignment", # name of output files exec="raxmlHPC", # name of executablethreads=16S)
#plants not in the metadata "PA0824" "PA0825" "PA1336" "PA1339" "PA1749" "PA1753" "PA1756" "PA1757" "PA1759" "PA1761" "PC0029"
#st <- seqtab.nochim[rowSums(seqtab.nochim) >= 500,]
#seqtab.nochim <- st
|
07c777bbd57903f2ae2bfa0948b1192637e9026c
|
b21bdfcee70b2e4bce5dc7cf3d43c0940763fc5c
|
/inst/www/templates/script.R
|
567d2f0b8e5c9645cf82803e0ee87d2771ede6e5
|
[
"MIT"
] |
permissive
|
dreamRs/addinit
|
f0a7858c4f805337093b315b32bca89bbdc10279
|
c8d4bd1986b79a4c51eb1939b7af78c2dbb1425c
|
refs/heads/master
| 2021-12-14T22:10:15.411235
| 2021-12-11T19:05:59
| 2021-12-11T19:05:59
| 96,358,956
| 57
| 6
| null | 2018-03-15T12:08:06
| 2017-07-05T20:33:28
|
R
|
UTF-8
|
R
| false
| false
| 239
|
r
|
script.R
|
# ------------------------------------------------------------------------
#
# Title : {{title}}
# By : {{author}}
# Date : {{date}}
#
# ------------------------------------------------------------------------
{{packages}}
|
e402e41806dddf1ab3efc0d1b2893c0652b8ae9b
|
c846f88fbdd6d56f50949ee86bdd63a1de5e3dc2
|
/04_Abril/5 Poblacion_CM_Supermercado_Cuidalela/Conexion_access.R
|
c4b5602e36c85606164ec7092c1257891d388240
|
[] |
no_license
|
edgaruio/A-Requerimiento-2020
|
4bf0752436d1a06bea6a74ba97f654e9047b696a
|
c48dcdf9c557a45a12530047e54fc268c44ee5ff
|
refs/heads/master
| 2022-11-27T07:35:28.647661
| 2020-08-03T13:40:45
| 2020-08-03T13:40:45
| 276,465,243
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 658
|
r
|
Conexion_access.R
|
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# Data tablas Conversion ----
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
channel <- odbcDriverConnect(
"Driver={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=//bogak08beimrodc/bi/Tabla_conversion/Tabla_conversion.accdb"
)
sqlTables(channel)
Tb_localidad<- sqlQuery(
channel ,
paste ("select * from Tb_localidad"),
as.is=T
) %>%
data.frame()
Tb_Division_Politica_Dane<- sqlQuery(
channel ,
paste ("select * from Tb_Division_Politica_Dane"),
as.is=T
) %>%
data.frame()
odbcCloseAll()
|
49ab8a320ed1e79bd4f6426d01282f65f25c6314
|
b058c9a53f23c2dcbd801b89b82b435237096941
|
/Descriptive Statistics.R
|
0c2246f15eeeadc371b989dd76216cb994c4e71d
|
[] |
no_license
|
adiganesh93/Customer-Retention-System
|
8c53f9dde9e482de677e42d46a39a96832a307d1
|
ba233839e07ffa0e4845c6d9ae38bb85510d9c06
|
refs/heads/master
| 2020-05-20T17:12:33.615914
| 2019-05-09T00:43:20
| 2019-05-09T00:43:20
| 185,682,822
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,367
|
r
|
Descriptive Statistics.R
|
#package
library(ggplot2)
library(MASS)
#read data
df <- read.csv("C:/Users/Aditya/Desktop/IST-687/Satisfaction Survey.csv")
#clean missing value
df$Departure.Delay.in.Minutes[which(is.na(df$Departure.Delay.in.Minutes) & (df$Flight.cancelled == 'Yes'))] <- 0
df$Arrival.Delay.in.Minutes[which(is.na(df$Arrival.Delay.in.Minutes) & (df$Flight.cancelled == 'Yes'))] <- 0
df$Flight.time.in.minutes[which(is.na(df$Flight.time.in.minutes) & (df$Flight.cancelled == 'Yes'))] <- 0
#change the data type of 'Satisfaction' to numeric
df$Satisfaction <- as.numeric(as.character(df$Satisfaction))
#omit the missing value
ndf <- na.omit(df)
#get the names of airlines
airline.name <- c(levels(ndf$Airline.Name))
#Insert a new column descriping the degree of satisfaction
ndf$degree <- NA
ndf$Satisfaction <- as.numeric(as.character(ndf$Satisfaction))
ndf$degree[which(ndf$Satisfaction >=4)]<- "High"
ndf$degree[which(ndf$Satisfaction ==3)] <- "Average"
ndf$degree[which(ndf$Satisfaction ==3.5)] <- "Average"
ndf$degree[which(ndf$Satisfaction <3)] <- "Low"
ndf <- na.omit(ndf)
#Build a table counting the number of each degree of satisfaction grouped by airline names
Freq <- as.data.frame(table(ndf$Airline.Name,ndf$degree))
colnames(Freq) <- c("Name","Satis.Degree","Count")
Freq
#Draw a bar chart for all airline companies showing the distribution of satisfaction
Satis_Bar <- ggplot(Freq,aes(x=Name,y=Count,fill=Satis.Degree)) +
geom_bar(stat="identity",color="black") +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Distribution of Satisfaction by Airline names")+
labs(x = "Airline Names", y = "Count")
Satis_Bar
#Build a propotion table of satisfaction grouped by company
P1 <- prop.table(table(ndf$Airline.Name,ndf$degree),1)
P1
Prop <- data.frame(matrix(data = P1, nrow= length(airline.name), ncol = 3, byrow = F))
Prop
colnames(Prop) <- c('Average','High','Low')
row.names(Prop) <- c(airline.name)
Prop
#perfect group
HighBar <- ggplot(Prop,aes(x=reorder(airline.name,High),y=High)) + geom_col(color="black",fill=" dark green") +
theme(axis.text.x = element_text(angle=90,hjust = 1))+
ggtitle("Distribution of High Satisfaction by Airline names")+
labs(x = "Airline Names", y = "The porpotion of high satisfaction")+
coord_cartesian(ylim = c(0.4,0.6))
HighBar
#The west Ailrlines has the highest porpotion of high satisfaction
#This company is the most satisfacted company
#soso group
AverageBar <- ggplot(Prop,aes(x=reorder(airline.name,Average),y=Average)) + geom_col(color="black",fill="dark red") +
theme(axis.text.x = element_text(angle=90,hjust = 1))+
ggtitle("Distribution of Average Satisfaction by Airline names")+
labs(x = "Airline Names", y = "The porpotion of average satisfaction")+
coord_cartesian(ylim = c(0.25,0.3))
AverageBar
#unhappy group
LowBar <- ggplot(Prop,aes(x=reorder(airline.name,Low),y=Low)) + geom_col(color="black",fill="dark blue") +
theme(axis.text.x = element_text(angle=90,hjust = 1))+
ggtitle("Distribution of Low Satisfaction by Airline names")+
labs(x = "Airline Names", y = "The porpotion of low satisfaction")+
coord_cartesian(ylim = c(0.1,0.26))
LowBar
#The GoingNorth Airlines Inc. has the highest porpotion of low satisfaction.
#So this company is the least satisfacted company.
|
13d1a7fdecde90d5b8003043e0e30e316783fe09
|
95ddb283bc126d83c683cecbf9b874521e08da98
|
/man/s3faInit.Rd
|
414a7f8f14e995ee314f6b00550f691bb14f07ba
|
[] |
no_license
|
aciobanusebi/s2fa
|
05770bcc5c23d8524beaa5cf71d343d0ab452b26
|
e3f0dd9770e0d3843948c6d039504531d575d752
|
refs/heads/master
| 2021-09-07T21:38:38.465399
| 2021-08-04T15:25:44
| 2021-08-04T15:25:44
| 192,394,527
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,450
|
rd
|
s3faInit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s3faInit.R
\name{s3faInit}
\alias{s3faInit}
\title{Generate initial parameters for EM/S3FA}
\usage{
s3faInit(X_t_supervised, Z_t_supervised, X_t_unsupervised, type = "fa",
checkArgs = TRUE)
}
\arguments{
\item{X_t_supervised}{train input data (which has output) as design matrix (must be matrix, not data.frame), i.e. row = instance, column = feature/attribute}
\item{Z_t_supervised}{train output data (for X_t_supervised) as design matrix (must be matrix, not data.frame), i.e. row = instance, column = feature/attribute}
\item{X_t_unsupervised}{train input data (which has no output) as design matrix (must be matrix, not data.frame), i.e. row = instance, column = feature/attribute}
\item{type}{"unconstrained", "fa", or "ppca"; refers to psi}
\item{checkArgs}{whether to check the arguments are valid; it takes more time to execute}
}
\value{
initial parameters for EM/S3FA, i.e. a list containing nDimX, nDimZ, type, mu_z_t, Sigma_z_t, mu_t, lambda_t, psi_t. "_t" comes from "transpose"
}
\description{
Generate initial parameters for EM/S3FA
}
\examples{
params <- s3faInit(X_t_supervised = house[1:10,2:3,drop=FALSE],
Z_t_supervised = house[1:10,1,drop=FALSE],
X_t_unsupervised = house[11:20,2:3,drop=FALSE],
type = "fa",
checkArgs = TRUE)
params
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.