blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e0f9eeaa8ba629e3e7c269a998d07f6a0f22e6c6 | c2977bf13e06740383d03325e9ca29a3b838b0e0 | /.cocult3_rarefy_simulation_scriptsnapshot.R | d965cd813360fb6fb4fdb00d1bf562d853201815 | [] | no_license | kijong-yi/cocult2 | e3f44e0372da8aa0a962e26e2662920405d516a0 | 361fde72f3c5c2ac89d6636c3fc83b2e331d34a8 | refs/heads/master | 2020-08-07T19:15:47.666210 | 2019-10-15T05:54:25 | 2019-10-15T05:54:25 | 213,558,547 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,229 | r | .cocult3_rarefy_simulation_scriptsnapshot.R |
# prep ------------------------------------------------------------------------------------------------------------
library(tidyverse)
library(iNEXT)
library(vegan)
library(doMC)
library(Hmisc)
registerDoMC(24)
counts <- read_tsv("tcr_counts.txt") %>%
as.data.frame() %>% column_to_rownames("count") %>% as.matrix()
colnames(counts)[1] <- "35.base" # typo
# rarefaction analysis --------------------------------------------------------------------------------------------
if(F){
source("~/src/plot.iNEXT.R")
out <- iNEXT(counts,
q=c(0,1,2),
datatype="abundance",
size=NULL,
endpoint=NULL,
knots=100,
se=TRUE,
conf=0.95,
nboot=50)
par(mfrow = c(2,2), mar = c(5,4,1,1))
plot.iNEXT(out, type = 1, se = F, show.legend = F, show.main = F, Order =1, ylab = "Shannon diversity")
plot.iNEXT(out, type = 3, se = F, show.legend = F, show.main = F, Order =1, ylab = "Shannon diversity")
plot.iNEXT(out, type = 2, se = F, show.legend = F, show.main = F, Order =1, ylab = "Shannon diversity")
par(mfrow = c(2,2), mar = c(5,4,1,1))
plot.iNEXT(out, type = 1, se = F, show.legend = F, show.main = F, Order =2, ylab = "Simpson diversity")
plot.iNEXT(out, type = 3, se = F, show.legend = F, show.main = F, Order =2, ylab = "Simpson diversity")
plot.iNEXT(out, type = 2, se = F, show.legend = F, show.main = F, Order =2, ylab = "Simpson diversity")
ggiNEXT(out,facet.var = "order") + facet_wrap(~order, scale = "free") + scale_shape_manual(values=c(rep(19,29),rep(17,5)))
ggiNEXT(out, type=2) + scale_shape_manual(values=c(rep(19,29),rep(17,5)))
ggiNEXT(out, type=3) + scale_shape_manual(values=c(rep(19,29),rep(17,5)))
# write_rds(out, "iNEXT.rds")
out
}
# simulation --------------------------------------------------------------
# in silico population generation -----------------------------------------
if(F){
tmp <- counts[,1:5] %>% rowSums
tmp <- tmp[tmp>0]
population <- c((tmp*500)[tmp*500>max(tmp)],rep(tmp,100))
system("mkdir -p simulation")
write_rds(population,"simulation/population.Rds")
max(tmp)
max(tmp/sum(tmp))
sum(tmp==1)
(population/sum(population)) %>% sort(decreasing = T) %>% head(5)
(tmp/sum(tmp)) %>% sort(decreasing = T) %>% head(5)
length(population)
colSums(counts)[1:5]; mean(colSums(counts)[1:5]); sd(colSums(counts)[1:5])
colSums(counts)[6:34];
mean(colSums(counts)[6:34]); sd(colSums(counts)[6:34])
# need distance between samples - based population size adjustment
} else {
population <- read_rds("simulation/population.Rds")
}
# subsample bases ---------------------------------------------------------
if(F){
base1 <- rrarefy(population, rnorm(n = 1,mean = 10000, sd=4000))[,]
base2 <- rrarefy(population, rnorm(n = 1,mean = 10000, sd=4000))[,]
base3 <- rrarefy(population, rnorm(n = 1,mean = 10000, sd=4000))[,]
base4 <- rrarefy(population, rnorm(n = 1,mean = 10000, sd=4000))[,]
mat1 <- cbind(base1,base2,base3,base4, base_all = base1+base2+base3+base4)
mat1 <- mat1[rowSums(mat1)>0,]
rarecurve(t(mat1), step = 10, sample = min(colSums(mat1)))
rarecurve(t(counts[,1:5]), step = 1, sample = min(colSums(counts[,1:5])))
}
# proliferate/decay simulation --------------------------------------------
if (F){
recursive_amp_rare <- function(init = t(rrarefy(population,3*10^5)),
amp.r = 2.3,
rare.r = 0.7,
iter = 10,
seed.n = 100) {
S = cbind(init,init)
S[1,2] = seed.n
for(i in 1:iter){
a=S[,ncol(S), drop = F]
a[1,1] = a[1,1]+round(a[1,1]*amp.r)
r=t(rrarefy(t(a), round(sum(a)*rare.r)))
S = cbind(S,a,r)
}
if(ncol(S)>26){
colnames(S) <- paste0("D",(1:ncol(S)/2))
} else {
colnames(S) <- letters[1:ncol(S)]
}
S
}
set.seed(42)
S.init = t(rrarefy(population,1*10^5))
S.init = S.init[S.init>0, ,drop = F]
simul <- function(a = 2.3, r = 0.7, s = 10, i = S.init, iter.n = 10,dry = F) {
cat("simulate rare ...")
S <- recursive_amp_rare(i, a, r, iter.n, seed.n = s)
par(mfcol = c(2,2), mar = c(0,0,1,1),oma=c(4,4,4,1))
x = colSums(S)
y = colSums(S>0)
x %>% barplot()
abline(h=100000)
y %>% t %>% barplot
abline(h=8000)
plot(x,y,xlim=c(0,1.5*10^5), ylim=c(0,34000),)
lines(x,y,lty=2,col="grey")
abline(h=c(5000,7500),v=c(10000,50000,100000), col="grey", lty=2)
plot(x,y,xlim=c(0,1.5*10^5), ylim=c(0,10000))
lines(x,y,lty=2,col="grey")
abline(h=c(5000,7500),v=c(10000,50000,100000), col="grey", lty=2)
if(dry){return(1)}
ans <- readline("Go? [y/n]: ")
if(ans != "y") {stop("user stop")}
registerDoMC(24)
cat(" done\nrun iNEXT ...")
tmp <- iNEXT(S,
q=0,
se = F,
nboot = 1,
size = c(0, 5000, 10000, 15000, 20000, 40000, 50000, 75000, 100000, 125000, 150000),
datatype="abundance", doMC = T)
cat(" done\n plotting ...")
real <- read_rds("iNEXT.rds")
par(mfcol = c(2,2), mar = c(0,0,1,1),oma=c(4,4,4,1))
plot.iNEXT(real, se=F, show.main=F, show.legend=F, xlim=c(0,1.5*10^5), ylim=c(0,34000), xaxt='n', order=0, las=1)
abline(h=c(5000,7500),v=c(10000,50000,100000), col="grey", lty=2)
plot.iNEXT(real, xlim=c(0,1.5*10^5), ylim=c(0,10000), se=F, order=0, show.main=F, las=1, show.legend = F)
abline(h=c(5000,7500),v=c(10000,50000,100000), col="grey", lty=2)
plot.iNEXT(tmp, se=F, show.main=F, show.legend=F, xlim=c(0,1.5*10^5), ylim=c(0,34000), xaxt='n', yaxt='n', order=0)
abline(h=c(5000,7500),v=c(10000,50000,100000), col="grey", lty=2)
plot.iNEXT(tmp, xlim=c(0,1.5*10^5), ylim=c(0,10000), se=F, order=0, show.main=F, yaxt='n')
abline(h=c(5000,7500),v=c(10000,50000,100000), col="grey", lty=2)
mtext(paste0("universe = 3*10^5, seed = ",s,", seed.amp.rate = ",a,",\n universe.decay.rate = ",r,", cycle = ",iter.n), outer=T, cex=1.5, font=2)
cat(" done\n")
return(tmp)
}
a2.3r0.7 <- simul(2.3,0.7)
a2.4r0.7 <- simul(2.4,0.7)
a1.7r0.7s100 <- simul(1.7,0.7,100)
a1.1r0.7s1000 <- simul(1.1,0.7,1000)
a2.5r0.7 <- simul(2.5,0.7)
a1.001r0.7s10000 <- simul(1.001,0.7,10000)
a1.002r0.7s2000 <- simul(1.002,0.7,2000)
S.init2 = t(rrarefy(population,3*10^5))
S.init2 = S.init2[S.init2>0, ,drop = F]
simul(1.15,0.7,1800, S.init2, 9, T)
simul(1.15,0.7,1800, S.init2, 9, F)
simul(0.363,0.85,5000, S.init2, 19, T)
S <- recursive_amp_rare(S.init2, 0.363, 0.85, 19, seed.n = 5000)
par(mfcol = c(2,2), mar = c(0,4,1,1),oma=c(5,0,4,1))
x = colSums(S)
y = colSums(S>0)
x %>% barplot(xaxt='n', ylab = "sample size")
barplot(5000 * 1.363^c(rep(0:19,each=2)) * 0.85^c(0,0,0,rep(1:18,each=2),19),add = T, col = "grey40", yaxt='n')
legend("topright",legend=c("expanded"), fill = c("grey40"), bty ='n')
abline(h=100000)
y %>% t %>% barplot(col="grey50",ylab="species richness")
abline(h=8000)
plot(x,y,xlim=c(0,1.5*10^5), ylim=c(0,34000),xaxt='n', ylab = "species richness")
lines(x,y,lty=2,col="grey")
abline(h=c(5000,7500),v=c(10000,50000,100000), col="grey", lty=2)
plot(x,y,xlim=c(0,1.5*10^5), ylim=c(0,10000), xlab = "sample size", ylab="species richness")
lines(x,y,lty=2,col="grey")
abline(h=c(5000,7500),v=c(10000,50000,100000), col="grey", lty=2)
simul(0.363,0.85,5000, S.init2, 19, F) # final fit
}
# top clone percentages distribution --------------------------------------
if(F){
d <- counts[,6:34] %>% sweep(2, colSums(.), `/`) %>% unlist %>%
Hmisc::cut2(cuts = c(40,20,10,7,5,3,1,0.5,0.1)/100) %>% as.numeric %>% table()
names(d) <- c(0,rev(c(40,20,10,7,5,3,1,0.5,0.1)))
table1 <- data.frame("observed %" = c(40,20,10,7,5,3,1,0.5,0.1),check.names = F) %>%
mutate(`n.clone in 3*10^5 * 29` = c(rev(d[5:10]),NA,NA,NA),
`cumulative n.clone` = cumsum(`n.clone in 3*10^5 * 29`),
`extrapolated.cumsum` = 805/`observed %`^1.5,
`extrapolated n.clone` = diff(c(0,805 / `observed %`^1.5)),
`proportion sum %` = `observed %` * `extrapolated n.clone` / 29,
`proportion cumsum %` = cumsum(`proportion sum %`),
`count in 10^5` = `observed %`/100 * 10^5,
`a %` = 100*(((10^5*`observed %`/100/4.755642)^(1/19))/0.85 - 1),
`n.clone in population(23k)` = `extrapolated n.clone`*length(population)/sum(drarefy(population,3*10^5))/29) %>% round(1)
table1 %>% htmlTable::htmlTableWidget()
par(mfrow = c(3,1), mar = c(5,4,1,1),oma=c(0,0,5,0))
counts[,6:34] %>% sweep(2, colSums(.), `/`) %>% hist(breaks = 30, ylim = c(0,100), xlim = c(0,0.4), main = "",xlab = "counts[,6:34] %>% sweep(2, colSums(.), `/`) %>% hist(breaks = 30, ylim = c(0,100))", col = "grey80")
abline(v=c(0.02,0.04, 0.06, 0.08, 0.12, 0.28, 0.52), col = "red")
text(c(0.03,0.05,0.07,0.1,0.2,0.4),rep(100,6),c("3%","5%","7%","10%","20%","40%"))
counts[,6:34] %>% sweep(2, colSums(.), `/`) %>% unlist %>% Hmisc::cut2(cuts = c(0.02,0.04, 0.06, 0.08, 0.12, 0.28, 0.52)) %>% table %>%
.[2:7] %>% {text(c(0.03,0.05,0.07,0.1,0.2,0.4),rep(95,6),.);.} %>%
rev() %>% cumsum %>% {text(rev(c(0.03,0.05,0.07,0.1,0.2,0.4)),rep(90,6),.)}
plot(table1$`observed %`[1:6],table1$`cumulative n.clone`[1:6], xlim = c(0,40), ylim = c(0,250))
# points(table1$`observed %`[7],table1$`extrapolated n.clone`[7],pch=2)
(1:1500/30) %>% lines(9*20 /. , col = "black", lty = 2)
(1:1500/30) %>% lines(9*20^1.5/.^1.5, col = "red", lty = 2)
(1:1500/30) %>% lines(9*20^2 /.^2 , col = "darkgreen", lty = 2)
(1:1500/30) %>% lines(9*20^2.5/.^2.5, col = "blue", lty = 2)
text(30,110,expression(x * y == 180), col="black",cex=1.3,adj=c(0,0))
text(30,90,expression(x^1.5 * y == 805), col="red",cex=1.3,adj=c(0,0))
text(30,70,expression(x^2 * y == 3600), col="darkgreen",cex=1.3,adj=c(0,0),font=2)
text(30,50,expression(x^2.5 * y == 16100), col="blue",cex=1.3, adj = c(0,0))
h<-counts[,6:34] %>% sweep(2, colSums(.), `/`) %>% {.*100} %>% hist(breaks = 30, plot=F)
h$counts <- rev(cumsum(rev(h$counts)))
h$density <- rev(cumsum(rev(h$density)))
plot(h, freq=TRUE, ylim = c(0,250), xlim = c(0,40), main = "", col="grey80",border="black")
(1:1500/30) %>% lines(9*20 /. , col = "black", lty = 2)
(1:1500/30) %>% lines(9*20^1.5/.^1.5, col = "red", lty = 2)
(1:1500/30) %>% lines(9*20^2 /.^2 , col = "darkgreen", lty = 2)
(1:1500/30) %>% lines(9*20^2.5/.^2.5, col = "blue", lty = 2)
text(30,110,expression(x * y == 180), col="black",cex=1.3,adj=c(0,0))
text(30,90,expression(x^1.5 * y == 805), col="red",cex=1.3,adj=c(0,0))
text(30,70,expression(x^2 * y == 3600), col="darkgreen",cex=1.3,adj=c(0,0),font=2)
text(30,50,expression(x^2.5 * y == 16100), col="blue",cex=1.3, adj = c(0,0))
mtext("Top clone percentage distribution in samples",outer=T, cex=1.3,font=2,line=1)
}
# seeding on the population & sampling & cocult ---------------------------
# from table1
spawn_specificity <- function(pop = population){
mat <- tribble(~fold, ~size, ~color,
1.893, 3, "#9E0142",
1.825, 6, "#E25249",
1.760, 17, "#FBA45C",
1.727, 18, "#FEE899",
1.697, 29, "#EDF7A3",
1.652, 84, "#A1D9A4",
1.559, 656, "#48A0B2",
1.503, 1486, "#5E4FA2")
fold = rep(1, length(population))
color = rep("black", length(population))
I = 1:length(pop)
for (i in nrow(mat):1){
idx = sample(I, size = mat$size[i])
fold[idx] <- mat$fold[i]
color[idx] <- mat$color[i]
I = I[-idx]
}
attr(fold, "color") = color
fold
}
growth.specificity1 <- spawn_specificity()
inspect <- function(l){
n <- l %>% lapply(sum) %>% unlist
S.obs <- l %>% lapply(function(x){sum(x>0)}) %>% unlist
qD <- l %>% lapply(function(x){rarefy(x,17000, se = F, MARGIN = 1)}) %>% unlist
SC <- l %>% lapply(function(x){1-rareslope(x,17000)}) %>% unlist
hist(n); mtext(mean(n))
hist(S.obs); mtext(mean(S.obs))
hist(qD); mtext(mean(qD))
hist(SC); mtext(mean(SC))
}
cocult <- function(P, r=0.85, sp = growth.specificity1, iter=19){
for(i in 1:19){
P <- P*sp
P <- rrarefy(t(P),round(r*colSums(P))) %>% t
}
P
}
rnorm(100,mean = 3*10^5, sd = 10000) %>% hist
P <- foreach(size=rnorm(100,mean = 3*10^5, sd = 10000),.combine=rbind) %dopar% {
t(rrarefy(population, size))}
P.result <- cocult(P)
par(mfcol = c(4,4))
P %>% inspect
P %>% amp(0.5) %>% inspect
P %>% amp(1) %>% inspect
P %>% amp(1.5) %>% inspect
par(mfcol = c(4,3))
P %>% amp(0.5) %>% rare(0.1) %>% inspect
P %>% amp(1) %>% rare(0.1) %>% inspect
P %>% amp(1.5) %>% rare(0.1) %>% inspect
P %>% amp(0.5) %>% rare(0.1) %>% amp(0.5) %>% inspect
P %>% amp(0.5) %>% rare(0.1) %>% amp(1) %>% inspect
P %>% amp(0.5) %>% rare(0.1) %>% amp(1.5) %>% inspect
P %>% amp(1) %>% rare(0.1) %>% amp(0.5) %>% inspect
P %>% amp(1) %>% rare(0.1) %>% amp(1) %>% inspect
P %>% amp(1) %>% rare(0.1) %>% amp(1.5) %>% inspect
P %>% amp(1.5) %>% rare(0.1) %>% amp(0.5) %>% inspect
P %>% amp(1.5) %>% rare(0.1) %>% amp(1) %>% inspect
P %>% amp(1.5) %>% rare(0.1) %>% amp(1.5) %>% inspect
# one-time amplify model
stimulate <- function (pop = population,
start.size = 3*10^5,
end.size = round(rnorm(n = 1,mean = 40000, sd=10000)),
growth = spawn_specificity()
){
s.pop <- vegan::rrarefy(pop, start.size)
s.fold <- rep(1,length(s.pop))
s.color <- rep("black",length(s.pop))
for(i in growth){
if(is.null(i$idx)) {
idx <- sample(which(s.pop>0), i$nclones)
} else {
idx <- i$idx
}
s.fold[idx] <- i$fold
s.color[idx] <- i$color
s.pop[idx] <- round(s.pop[idx] * i$fold)
}
s.pop <- vegan::rrarefy(s.pop, end.size)[,]
return(list(s.pop = s.pop, s.fold = s.fold, s.color = s.color))
}
# RARAR model
stimulate2 <- function (pop = population,
start.size = 3*10^5,
middle.size = 3*10^4,
end.size.prop = 0.5,
growth = spawn_specificity()){
s.pop <- vegan::rrarefy(pop, start.size)
# s.fold <- rep(1,length(population))
# s.color <- rep("black",length(population))
for(i in growth){
if(is.null(i$idx)) {
idx <- sample(which(s.pop>0), i$nclones)
} else {
idx <- i$idx
}
# s.fold[idx] <- i$fold
# s.color[idx] <- i$color
s.pop[idx] <- round(s.pop[idx] * i$fold ^ .5)
}
s.pop <- vegan::rrarefy(s.pop, middle.size + (sum(s.pop) - start.size))[,]
for(i in growth){
if(is.null(i$idx)) {
idx <- sample(which(s.pop>0), i$nclones)
} else {
idx <- i$idx
}
s.pop[idx] <- round(s.pop[idx] * i$fold ^ .5)
}
# s.pop <- vegan::rrarefy(s.pop, sum(s.pop) * end.size.prop)[,]
return(list(s.pop = s.pop
# , s.fold = s.fold, s.color = s.color
))
}
# simulation
if (F){
registerDoMC(5)
sim <- foreach(x = 1:50) %dopar% {
stimulate() %>% .$s.pop %>% .[.>0] %>%
{
list(n = sum(.),
S.obs = length(.),
qD = rarefy(.,17000, se = FALSE, MARGIN = 1),
SC = 1-rareslope(.,17000))
}
}
sim.result <- matrix(unlist(sim), ncol = 4, byrow = TRUE)
colnames(sim.result) <- c("n", "S.obs", "qD", "SC")
sim.result <- as.data.frame(sim.result)
sim2 <- foreach(x = 1:20) %dopar% {
stimulate2() %>% .$s.pop %>% .[.>0] %>%
{
list(n = sum(.),
S.obs = length(.),
qD = rarefy(.,17000, se = FALSE, MARGIN = 1),
SC = 1-rareslope(.,17000))
}
}
sim2.result <- matrix(unlist(sim2), ncol = 4, byrow = TRUE)
colnames(sim2.result) <- c("n", "S.obs", "qD", "SC")
sim2.result <- as.data.frame(sim2.result)
par(mfrow = c(4,3))
hist(colSums(counts[,6:34]))
hist(sim.result$n)
hist(sim2.result$n)
hist(colSums(counts[,6:34] > 0))
hist(sim.result$S.obs)
hist(sim2.result$S.obs)
rarefy(counts[,6:34], 17000, se = FALSE, MARGIN = 2) %>% hist
hist(sim.result$qD)
hist(sim2.result$qD)
counts[,6:34] %>% t %>% rareslope(17000) %>% {1-.} %>% hist(main = "Histogram of sample coverage of counts[,6:34]")
hist(sim.result$SC)
hist(sim2.result$SC)
}
if(F){
counts[,6:34] %>% rarefy(17000, se = FALSE, MARGIN = 2) %>% summary
counts[,6:34] %>% t %>% rareslope(17000) %>% summary
counts[,6:34] %>% rarefy(17000, se = FALSE, MARGIN = 2) %>% hist
counts[,6:34] %>% t %>% rareslope(17000) %>% hist
growth.specificity1 <- spawn_specificity()
sim1 <- stimulate(population, growth = growth.specificity1)
sim2 <- stimulate(population, growth = growth.specificity1)
sim3 <- stimulate(population, growth = growth.specificity1)
sim4 <- stimulate(population, growth = spawn_specificity())
sim5 <- stimulate(population, growth = spawn_specificity())
sim6 <- stimulate(population, growth = spawn_specificity())
mat2 <- cbind(sim1=sim1$s.pop,
sim2=sim2$s.pop,
sim3=sim3$s.pop,
sim4=sim4$s.pop,
sim5=sim5$s.pop,
sim6=sim6$s.pop,
base1,
base2,
base3,
base4)
mat2 <- mat2[rowSums(mat2)>0,]
dim(mat2)
dim(counts)
counts[,1:10] %>% rowSums() %>% {. > 0} %>% table # need adjust of parameters
rarecurve(x = t(mat2), sample = 17000, step = 1)
par(mfrow = c(1,2))
rarecurve(t(mat2), step = 5, sample = min(colSums(mat1)))
rarecurve(t(counts[,1:11]), step = 5, sample = min(colSums(counts[,1:11])))
# plot(sim1$s.pop, base1, col = sim1$s.color); abline(v = 0, h = 0)
# plot(as.data.frame(mat1))
inext1 <- iNEXT(mat1,
q=c(0,1),
datatype="abundance",
size=NULL,
endpoint=NULL,
knots=30,
se=F
# conf=0.95,
# nboot=50
)
plot(inext1)
}
|
02621ee8f942dfb8f6607b14e99bd675b8ac3d33 | 7a5d49b43c5d6f9a8131e9e38178d39ecc068081 | /man/ImportDataListQT.Rd | 2f362da54b96baaa39f37bfa95de6ba19fc05434 | [] | no_license | statistikat/mzR | 85ac986612990e6fd1dfd337a657a90ba271fd4e | dca98366ffa9032c0e926abe5579a1d1053424c7 | refs/heads/master | 2023-08-16T06:05:36.632937 | 2023-08-07T10:46:01 | 2023-08-07T10:46:01 | 57,034,956 | 2 | 0 | null | 2019-06-04T13:52:18 | 2016-04-25T11:03:05 | R | UTF-8 | R | false | true | 4,352 | rd | ImportDataListQT.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ImportDatalistQT.R
\name{ImportDataListQT}
\alias{ImportDataListQT}
\title{Sehr spezifische Funktion die MZ-Daten einliest um damit in weiterer Folge Tabellen im
MZ-Quartalsbericht-Format zu erstellen (hausintern).}
\usage{
ImportDataListQT(timeInstant, nbw = NULL, whichVar = NULL,
weightDecimals = 2, ImportAndMerge = TRUE, curr_inFile = NULL,
prev_inFile = NULL, mergeBy = c("asbper", "ajahr", "amonat"),
mz_intern = mount_mz_intern(), mz_ergebnisse = mount_mz_ergebnisse())
}
\arguments{
\item{timeInstant}{numerischer Vektor mit 2 Elementen: c(jahr, quartal).
Hier gibt man den Zeitpunkt an auf den sich alle Ergebnisse im weitesten
Sinn beziehen sollen, also i.d.R. das aktuellste Quartal.}
\item{nbw}{numerischer Wert: Anzahl an Bootstrap-Gewichten die eingelesen
werden soll (z.B. um Rechenzeit beim Aufsetzen der Tabellen zu verkuerzen).}
\item{whichVar}{Character (vector) oder NULL. Falls ungleich NULL, Character Vektor mit Variable(n) aus
dem dg7-Mikrozensus-File die im Output-File enthalten sein sollen. Die
uebrigen Variablen werden weggelassen. Default ist NULL, dabei werden alle
Variablen behalten.}
\item{weightDecimals}{Numerischer Wert oder NULL. Anzahl der Nachkommastellen der Stichprobengewichte,
gerundet nach SPSS RND Logik (0.5 bwz. -0.5 wird dabei immer "weg von 0" gerundet).
Falls NULL, werden die Gewichte nicht gerundet.}
\item{ImportAndMerge}{TRUE/FALSE ob die Funktion \link{ImportAndMerge} angewendet werden soll.
Bei der Defaulteinstellung \code{ImportAndMerge=TRUE} und \code{curr_inFile=NULL} wird \emph{Daten_ab2004_QuartPub.sav}
zu den MZ-Daten gemerged.}
\item{curr_inFile}{Pfad der Datei die eingelesen und zu den MZ-Daten gemerged werden soll (bezogen auf
den aktuelleren der beiden Zeitpunkte falls prev_inFile ungleich NULL).
Eingelesen werden koennen Files vom Typ .sav, .csv und .csv.gz.}
\item{prev_inFile}{Falls ungleich NULL, Pfad der Datei die eingelesen und zu den MZ-Daten gemerged werden
soll (bezogen auf den weniger aktuellen Zeitpunkt). Eingelesen werden koennen
Files vom Typ .sav, .csv und .csv.gz.}
\item{mergeBy}{Character Vektor mit Variable(n) nach denen gemerged werden
soll (default=c("asbper","ajahr","amonat")).}
\item{mz_intern}{Pfad zu dem \code{mz_intern} Ordner in der STAT Infrastruktur.
Standardmäßig wird dieser mit \code{mountSTAT} generiert.}
\item{mz_ergebnisse}{Pfad zu dem \code{06 Ergebnisse} Ordner in der STAT
Infrastruktur. Standardmäßig wird dieser mit \code{mountSTAT} generiert.}
}
\value{
Output ist eine Liste deren Elemente jeweils MZ-Daten enthalten
die die selbe Grundstruktur haben wie der Output aus der Funktion \link{ImportData}.
}
\description{
Funktion liest MZ-Daten (dg7) automatisch aus dem STAT-Filemanagement ein und fuehrt diese Daten
mit dem derzeit fuer die Quartalsberichtsproduktion verwendeten SPSS-File zusammen (siehe Details).
}
\details{
Anzugeben ist bei dieser Funktion der Referenzzeitpunkt \code{timeInstant} des MZ-Quartalsberichts.
Die MZ-Daten (dg7) werden (derzeit) defaultmaessig (\code{ImportAndMerge=TRUE})
ueber \code{mergeBy = c("asbper","ajahr","amonat")} mit \emph{Daten_ab2004_QuartPub.sav}
gemerged. Dieses File wird quartalsweise vom Fachbereich erzeugt und immer im selben Ordner abgelegt.
Sollte man mit einem anderen File mergen wollen, so kann man die entsprechenden File-Pfade in
\code{curr_inFile} und \code{prev_inFile} spezifizieren aber auch \code{mergeBy} anpassen, analog zu \link{ImportAndMerge}.
Des Weiteren koennen die einzulesenden MZ-Daten mit den Funktionsparametern
\code{nbw}, \code{whichVar} und \code{weightDecimals} angepasst werden, siehe \link{ImportData}.
Dadurch kann man z.B. vermeiden, dass Variablen die sowohl in den MZ-Daten als auch in
\emph{Daten_ab2004_QuartPub.sav} vorkommen doppelt eingelesen werden.
}
\examples{
\dontrun{
## Lesen Daten fuer den AKE-QT-Referenzzeitpunkt 2014q4 ein.
# Fuer Testzwecke (um Rechenzeit zu sparen) schraenken wir die
# Anzahl der Bootstrapgewichte ein auf 5.
# Ausserdem wollen wir aus den Original-MZ-Daten (dg7) nur die
# Variable rbpkin behalten.
datalist <- ImportDataListQT(timeInstant=c(2014,4), nbw=5, whichVar=c("rbpkin"))
}
}
\seealso{
\code{\link{MakeQT},\link{MakeTable},\link{FillExcelTemplate},\link{ImportData},\link{ImportAndMerge}}
}
|
c8bb7099effcd54ca5c3563b57b98888789c5a09 | 719500684fceaf0a7a80ce663e9cf07802e10b9a | /R/make_unique.r | dceff29a2fbe8ad31a70cbeed311c4a1775d3b91 | [] | no_license | pbreheny/breheny | 964baf0670a9eb4975946eae66772d47d9affd11 | 3e15bb78a769616adb49ea50c800d026f48ef8e7 | refs/heads/master | 2023-08-09T18:34:35.865871 | 2023-08-01T15:38:54 | 2023-08-01T15:38:54 | 82,972,930 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 709 | r | make_unique.r | #' Ensure all entries in a string are uniquely labeled
#'
#' Leaves entries untouched if they are already unique; adds a suffix like '_1', etc. if necessary.
#'
#' @param x A character vector or something coerceable to a character vector.
#' @param sep Separator between label and suffix. Default: _.
#'
#' @examples
#' make_unique(LETTERS[c(1,1,2,2,2,3,4)])
#'
#' @export
make_unique <- function(x, sep='_') {
x <- as.character(x)
tab <- table(x)
tab <- tab[tab > 1]
lentab <- length(tab)
if (lentab > 0) {
u <- names(tab)
for (i in 1:lentab) {
n <- tab[i]
x[x == u[i]] <- paste0(x[x == u[i]], sep, formatC(1:n, width = 1 + floor(log10(n)), flag = "0"))
}
}
x
}
|
4cc277991d30dc99d1b5d30c1d38406d5cd8629c | 7579eefd2cc5b98f92eba70c85aea273a67df733 | /midterm 2/q2p4.R | a8e482ec72963bb4fdbb1093259a3c475e6af8a1 | [] | no_license | sebastianbugal/stat240 | 6a5ff7066f69bf8bcde48e3d91d91e18b531b08a | fb577283923fb760972a407d1551a2cb93258012 | refs/heads/master | 2021-02-09T13:45:10.225990 | 2020-03-08T09:05:13 | 2020-03-08T09:05:13 | 244,289,189 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 395 | r | q2p4.R | library(RSQLite)
library(DBI)
dbcon = dbConnect(SQLite(), dbname="stat240.sqlite")
names(dbReadTable(dbcon, "citiesP"))
query="SELECT DISTINCT rank2016, rank2011,name FROM citiesP "
a=dbGetQuery(dbcon, query)
nrow(a)
a
plot(a$rank2016,a$rank2011,ylab='Ranking of 2011', xlab= 'Ranking of 2016',pch=1, cex=0.5,)
#the above command has a doubled column province is displayed twice thats why erro |
3a1daddd26c815ac6f99afe420cf5b33aa131f1a | 98ed3d4ea1681b651a14195e32c491e2e5aeaa43 | /DataScience/exploreData/densityPlot.R | 9ce0d4060d6707e62af7c5df96164cc3a71258f5 | [] | no_license | vineetyadav/DataScienceR | 6f793f5e1682844859f07ce52d2a71a3c4cb6551 | e6f8145671c10cc886d214e4a35e50cc5a30345b | refs/heads/master | 2022-05-09T03:57:16.694566 | 2022-04-17T07:45:37 | 2022-04-17T07:45:37 | 97,370,556 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 342 | r | densityPlot.R | # TODO: Add comment
#
# Author: bigdata
###############################################################################
#The scales package brings in dollor scale notation.
cusdata <-read.table('custdata.tsv',header=T,sep='\t')
#Set the x-axis labels in dollars
ggplot(cusdata)+geom_density(aes(x=income))+scales_x_continous(labels=dollor) |
5318c1f1afcc10276862e4349d2ddd9f0b494fa5 | 4ed821ee1d12c363ba500ffd3c4a5f7c04d5ba2e | /man/normal_deviates.Rd | e3a34e3be06b822d0e054c36fea4eab40e411841 | [
"CC-BY-4.0"
] | permissive | jrnold/datums | 25ea78a61bc61c027611756439c2fad50dca72ee | 66efd0d0873390420622558e126f9e22f579f464 | refs/heads/master | 2021-01-20T02:44:31.807859 | 2018-05-07T22:22:35 | 2018-05-07T22:22:35 | 81,639,460 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 731 | rd | normal_deviates.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/normal_deviates.R
\docType{data}
\name{normal_deviates}
\alias{normal_deviates}
\title{100,000 Random Normal Deviates}
\format{An object of class \code{numeric} of length 100000.}
\source{
\url{https://www.rand.org/content/dam/rand/pubs/monograph_reports/MR1418/MR1418.deviates.txt.zip}
}
\usage{
normal_deviates
}
\description{
100,000 random normal deviates generated by the RAND Corporation in 1955 in the book \emph{A Million Random Digits with 100,000 Normal Deviates}.
}
\references{
RAND (1955) \emph{A Million Random Digits with 100,000 Normal Deviates}. \url{https://www.rand.org/pubs/monograph_reports/MR1418/index2.html}
}
\keyword{datasets}
|
a32b4569b3f650b28092533d8bd1d28f611230aa | 9c09ff539b6697ec5e2708300ffa2a26ad26577b | /man/makeOF.Rd | 5590127660c6bb376c2ed761278976b943761dfe | [] | no_license | cran/scdensity | 605e9a8fc30eb8e20bc481b7e3077796e96c526e | ebf65d7d6aade0b297d7679839e07167c35197e5 | refs/heads/master | 2021-09-05T18:47:14.124418 | 2018-01-30T09:52:26 | 2018-01-30T09:52:26 | 119,523,526 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 725 | rd | makeOF.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weightedKDE.R
\name{makeOF}
\alias{makeOF}
\title{A function factory for making the search objective function.}
\usage{
makeOF(P)
}
\arguments{
\item{P}{The list of problem details.}
}
\value{
The objective function.
}
\description{
Used when we need to search for important points.
P is the problem list. It should have already gone through BuildConCheckGrid and
BinningStep. The returned function must return a value even if WeightedKDE() fails.
In case of failure, just assign a large random value to the objective value (to keep
the search from stagnating or moving systematically in one direction).
}
\keyword{internal}
|
48ed2d443ea61535e7dd0f9395db3287110a06b3 | 24d47dffee8d3443bfdbe1dc88ee2796b6a64ec9 | /workout01/code/make-shot-charts-script.R | f1f0e05b9f7d380a8bb0893f015921f4bbb10689 | [] | no_license | stat133-sp19/hw-stat133-SeungHugh | 7335e345d8f2675ec381ed008799e13af2c6bb5f | e956a1f5815a749893eae4d9e1cb18883265f275 | refs/heads/master | 2020-04-28T19:39:40.980249 | 2019-05-03T20:34:10 | 2019-05-03T20:34:10 | 175,518,134 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,113 | r | make-shot-charts-script.R | #title: Players' data's chart
#description: R code for plotting players' goal data
#input: shots-data.csv
#output: andre-iguodala-shot-chart.pdf, draymond-green-shot-chart.pdf, kevin-durant-shot-chart.pdf, klay-thompson-shot-chart.pdf, stephen-curry-shot-chart.pdf, gsw-shot-charts.pdf, gsw-shot-charts.png
install.packages("jpeg")
library(jpeg)
library(grid)
library(ggplot2)
court_file <- "../images/nba-court.jpg"
court_image <- rasterGrob(readJPEG(court_file), width = unit(1, "npc"), height = unit(1, "npc"))
total <- read.csv("../data/shots-data.csv", stringsAsFactors = FALSE)
pdf("../images/andre-iguodala-shot-chart.pdf", width = 6.5, height = 5)
ggplot(data = total[total$name == "Andre Iguodala", ]) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) + ggtitle("Shot Chart: Andre Iguodala (2016 season)") +
theme_minimal()
dev.off()
pdf("../images/draymond-green-shot-chart.pdf", width = 6.5, height = 5)
ggplot(data = total[total$name == "Draymond Green", ]) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) + ggtitle("Shot Chart: Draymond Green (2016 season)") +
theme_minimal()
dev.off()
pdf("../images/kevin-durant-shot-chart.pdf", width = 6.5, height = 5)
ggplot(data = total[total$name == "Kevin Durant", ]) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) + ggtitle("Shot Chart: Kevin Durant (2016 season)") +
theme_minimal()
dev.off()
pdf("../images/klay-thompson-shot-chart.pdf", width = 6.5, height = 5)
ggplot(data = total[total$name == "Klay Thompson", ]) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) + ggtitle("Shot Chart: Klay Thompson (2016 season)") +
theme_minimal()
dev.off()
pdf("../images/stephen-curry-shot-chart.pdf", width = 6.5, height = 5)
ggplot(data = total[total$name == "Stephen Curry", ]) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) + ggtitle("Shot Chart: Stephen Curry (2016 season)") +
theme_minimal()
dev.off()
pdf("../images/gsw-shot-charts.pdf", width = 8, height = 7)
ggplot(data = total) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) + ggtitle("Shot Chart: GSW (2016 season)") +
facet_wrap(~ name) +
theme_minimal() +
theme(legend.position = "top", legend.title = element_blank())
dev.off()
png("../images/gsw-shot-charts.png", width = 8, height = 7, units = "in", res = 1200)
ggplot(data = total) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) + ggtitle("Shot Chart: GSW (2016 season)") +
facet_wrap(~ name) +
theme_minimal() +
theme(legend.position = "top", legend.title = element_blank())
dev.off()
|
818d9f1c3aa9623dfce9396f2ee50556a518b747 | e49842b46d1f60d3bc4295d3b8d5bd60ab98dbf4 | /scripts/load_dataset.R | 214c832e84845a38cf814d82d283567fdbd7d044 | [] | no_license | Victinyx/GCR-ExpAI | 010b2baaaf3ec0363a6bfa44816fba80200672bf | e66cb4a0a66c3ef5470cfa54dc7ff2e95170ed3d | refs/heads/main | 2023-07-06T09:37:26.496742 | 2021-08-11T12:59:34 | 2021-08-11T12:59:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 239 | r | load_dataset.R | GCR <- read.csv('./data/german_credit_data.csv',
header = TRUE)
GCR <- GCR %>% select(-X, -Age)
set.seed(123)
GCR_split <- initial_split(GCR, strata = Risk)
GCR_train <- training(GCR_split)
GCR_test <- testing(GCR_split)
|
b7aa53bbf847af171f4a2f487d99aecacc646bcc | 8769749bb0a919299b66db7aaafa400d1d412469 | /archive/hiccup_loop/trials/refine_loop_pos.r | dc01dc9faaa50d10d5c23e9406fa79e8e17a6e54 | [] | no_license | bioinfx/cvdc_scripts | e9e113fae866d2d3f0c2515fae1b410b2d7a3eeb | d33757f9d02fa6d503b5cb65336c0e4e410caa78 | refs/heads/master | 2022-03-19T00:15:13.372417 | 2019-12-05T04:38:48 | 2019-12-05T04:38:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,497 | r | refine_loop_pos.r | a=read.delim("combined_loops.hiccup_refined.txt")
files= list.files(pattern="30k.txt",path="contacts_by_samples",full.names=T,recursive=T)
names = sub("contacts_by_samples/(D.._HiC_Rep.).contacts.30k.txt","\\1",files)
dat = list()
for (i in 1:12){
dat[[i]] = data.frame(fread(files[[i]]))
}
datm = Reduce(function(...)merge(...,by=c("chr","x1","y1","x2","y2"),all.x=T),dat)
a = a[,c(1,2,3,16:27)]
m = merge(datm,a,by=c("chr","x1","y1"))
# only use the contacts from true peak calls.
valid_contacts = m[,c(6:17)]*m[,c(18:29)]
mat = cbind(m[,1:5],rowSums(valid_contacts))
colnames(mat)[6] = "contacts"
mat$name = paste(mat$chr,mat$x1,mat$y1)
uniqnames = unique(mat$name)
mat$dist = mat$y2-mat$x2
mat = mat[order(mat$name,-mat$dist),]
mat$rank=1
#tab=list()
num=0
for (name in uniqnames){
# tmp = mat[which(mat$name==name),]
# tab[[name]] = tmp[which.max(tmp$contacts),]
mat$rank[which(mat$name == name)] = rank(-mat$contacts[which(mat$name== name)],
ties.method="first")
num = num + 1
if (num %% 100 ==1 ) { print(num) }
}
mat.top = mat[which(mat$rank==1),]
write.table(mat.top,"uniq_loops.details.txt",row.names=F,quote=F,sep='\t')
write.table(mat.top[,c(1,4,5)],"uniq_loops.final.txt",row.names=F,quote=F,sep='\t',col.names=F)
out = mat.top[,c(1,4,5)]
colnames(out) = c("chr1","x1","y1")
out$chr2 = out$chr1
out$x2 = out$x1+10000
out$y2 = out$y1+10000
out$color = "0,0,255"
write.table(out[,c(1,2,5,4,3,6,7)],"uniq_loops.final.for_hiccups.txt",row.names=F,quote=F,sep='\t')
|
3ec5c1bc90f57a64fda3a20a3b7dc4ccdbd9135a | 479b469c906687ebc72f944166c641dc6b03979a | /src/mutmap/tools/install_r_dependencies.R | 283da0218a403c73ba4d36e48a4acc8cb971faa0 | [] | no_license | ultimatesource/dng-mutmap | bfdb74fd181ef382b76b40b6698614af51b53890 | b2d465ecef2bd15a98abb01f4fd7994c56a6630c | refs/heads/master | 2020-03-16T23:12:43.910276 | 2018-05-15T21:49:50 | 2018-05-15T21:49:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 226 | r | install_r_dependencies.R | #!/usr/bin/env Rscript
install.packages("kinship2", repos='http://cran.us.r-project.org')
install.packages("jsonlite", repos='http://cran.us.r-project.org')
install.packages("argparser", repos='http://cran.us.r-project.org')
|
0c37eae5b4d4a959d82a7d20c8f2a9a67a7dc18f | 0c06657ed29a61713f9a6347094a2d974f726183 | /data_miRNAs/12tissues_RPM/00MIRNA_tissues_specific_modH.R | 19faf94fdccda6b73f79210006c14f82c3e6dd8a | [
"MIT"
] | permissive | 19zhangt/sRNA_analysis_Maize | e353b558cc2f0bb29f64ef8563994e326cf920d0 | 8a7688a30f02f0a86b05bd820a8c7d7d110b2767 | refs/heads/master | 2020-09-11T00:01:09.139234 | 2019-11-15T08:49:34 | 2019-11-15T08:49:34 | 221,872,661 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,206 | r | 00MIRNA_tissues_specific_modH.R | # R
setwd("/home/zhangt/00projects_2019/01miRNAs_large_scale_maize/data_miRNAs/12_tissues_RPM/")
options(stringsAsFactors = F)
tukey.biweight <- function(x, c=5, epsilon=0.0001)
{
m <- median(x)
s <- median(abs(x - m))
u <- (x - m) / (c * s + epsilon)
w <- rep(0, length(x))
i <- abs(u) <= 1
w[i] <- ((1 - u^2)^2)[i]
t.bi <- sum(w * x) / sum(w)
return(t.bi)
}
exp_ze_mat <- function(final_mirna, samle_information, MIRNA_seq, exp_mat, location){
out_list <- list()
mirna_num <- final_mirna[,1]
tpm_mat <- matrix(nrow = length(mirna_num), ncol = nrow(samle_information))
rownames(tpm_mat) <- mirna_num
colnames(tpm_mat) <- samle_information[,1]
for(i in mirna_num){
cat(which(mirna_num%in%i), "\n")
seq_out <- MIRNA_seq[MIRNA_seq$pre_name==i, 3]
tpm_mat[i,] <- as.numeric(colSums(exp_mat[seq_out, samle_information[,1]]))
}
sample_name <- unique(samle_information[,2])
merge_mat <- matrix(nrow = nrow(tpm_mat), ncol = length(sample_name))
rownames(merge_mat) <- rownames(tpm_mat)
colnames(merge_mat) <- sample_name
for(i in mirna_num){
value_out <- vector()
for(j in sample_name){
value_out <- c(value_out, mean(tpm_mat[i,which(samle_information$V2 %in% j)]))
}
merge_mat[i, ] <- value_out
}
limit_mat_index <- apply(merge_mat, 1, function(x){sum(x>10)>=1})
merge_mat <- merge_mat[limit_mat_index, ]
range(apply(merge_mat, 1, max))
EXP <- merge_mat
EXPtukey <- apply(EXP, 1, tukey.biweight)
modEXP <- sapply(1:nrow(EXP), function(aa){return(abs(EXP[aa,] - EXPtukey[aa]))})
modEXP <- t(modEXP)
rownames(modEXP) <- rownames(EXP)
modEXP <- apply(modEXP, 1, unlist)
modEXP <- t(modEXP)
modH <- apply(modEXP, 1, function(aa){
bb <- aa/sum(aa)
sumH <- 0
for(i in 1:length(bb)){
sumH <- sumH + bb[i] * log2(bb[i] + 0.00000001)
}
return(-sumH)
})
EXPZ <- apply(EXP, 1, function(aa){return((aa - mean(aa))/sd(aa))})
EXPZ <- t(EXPZ)
EXPZ <- cbind(EXPZ, EXPZ[,1:2])
EXPZ[,ncol(EXPZ)-1] <- apply(EXPZ[,1:(ncol(EXPZ)-2)], 1, max)
EXPZ[,ncol(EXPZ)] <- modH
colnames(EXPZ)[(ncol(EXPZ)-1):ncol(EXPZ)] <- c("maxZ", "modH")
EXPZ_bc <- EXPZ
rownames(EXPZ) <- location[final_mirna[rownames(EXPZ),2],1]
out_list[[1]] <- merge_mat
out_list[[2]] <- EXPZ
return(out_list)
}
exp_mat <- read.table("../5largematrix/unique_read_RPM.txt", row.names = 1, header = T)
MIRNA_seq <- read.table("MIRNA_isos.txt", header = T)
location <- read.table("miRNAs_and_locations.txt", sep = "\t")
rownames(location) <- location[,2]
final_mirna <- read.table("finalTable.txt", sep = "\t")
final_mirna <- final_mirna[final_mirna[,2]%in%location[,2], ]
rownames(final_mirna) <- final_mirna[,1]
# merge_tissues
samle_information <- read.table("sample_information_tissues.txt", sep = "\t")
merge_tissues <- exp_ze_mat(final_mirna, samle_information, MIRNA_seq, exp_mat, location)
# split_tissues
# samle_information <- read.table("sample_information.txt", sep = "\t")
# split_tissues <- exp_ze_mat(final_mirna, samle_information, MIRNA_seq, exp_mat, location)
MIRNA <- c("zma-MIR164e", "zma-MIR164h",
"zma-MIR169e", "zma-MIR169j", "zma-MIR169o",
"zma-MIR399g", "zma-MIR399b", "zma-MIR399d",
"zma-MIR2275a", "zma-MIR2275b", "zma-MIR2275c"
)
EXPZ_tmp1 <- merge_tissues[[2]]
EXPZ_tmp1[intersect(MIRNA, rownames(EXPZ_tmp1)),15:16]
sum(EXPZ_tmp1[,15]>3&EXPZ_tmp1[,16]<1.8)
sort(rownames(EXPZ_tmp1)[EXPZ_tmp1[,15]>3&EXPZ_tmp1[,16]<1.8])
EXPZ_tmp1_result <- EXPZ_tmp1[EXPZ_tmp1[,15]>3&EXPZ_tmp1[,16]<1.8,]
tissues <- apply(EXPZ_tmp1_result[,1:14], 1, function(x){
colnames(EXPZ_tmp1_result)[which.max(x)]
})
table(tissues)
EXPZ_tmp1_result <- cbind(EXPZ_tmp1_result, tissues)
# EXPZ_tmp2 <- split_tissues[[2]]
# EXPZ_tmp2[intersect(MIRNA, rownames(EXPZ_tmp2)),44:45]
# sum(EXPZ_tmp2[,44]>3.9&EXPZ_tmp2[,45]<3)
EXP <- merge_tissues[[1]]
rownames(EXP) <- location[final_mirna[rownames(EXP),2],1]
EXP <- EXP[EXPZ_tmp1[,15]>3&EXPZ_tmp1[,16]<1.8, ]
write.table(EXP, file = paste0(ncol(EXP), "_samples_",
nrow(EXP), "_miRNAs_expssion_values_115.txt"),
quote = FALSE, sep = "\t", row.names = T)
write.table(EXPZ_tmp1_result, file = paste0(ncol(EXP), "_samples_",
nrow(EXP), "_miRNAs_shannon_zscore_115.txt"),
quote = FALSE, sep = "\t", row.names = T)
pnorm()
qnorm(0.999)
# tukeybiweight <- function(x, c=5, epsilon=0.0001){
# list(exprs=apply(x,2,tukey.biweight,c=c,epsilon=epsilon),se.exprs=rep(NA,ncol(x)))
# }
# EXPtukey <- tukeybiweight(t(EXP))
input_data <- read.table("14_samples_94_miRNAs_shannon_zscore_115.txt", sep = "\t", stringsAsFactors = F)
con_select <- sort(unique(input_data[,17]))
con_select <- gsub(" ", ".", con_select)
input_data <- input_data[,con_select]
rownames(input_data) <- gsub("zma-", "", rownames(input_data))
pdf("FigureS3_Heatmap_tissue-specific_miRNA.pdf", height = 10, width = 8)
pheatmap::pheatmap(input_data, cluster_cols = F, border_color = "grey70",
color = colorRampPalette(c("white", "#FB1823"))(100),
cellwidth = 10, fontsize_row = 7, clustering_method = "ward.D2")
dev.off()
|
249dc620649a8011f67bb2bd80e88db5efb2acaf | 2469e9f76fb9454942ee729b4ebb86ab1bb72920 | /maps.R | d4185fc6bdb72496df00a5acc63191f96efefecf | [] | no_license | armanaghamyan/business_intelligence_project | 5be96a5927f6f9f4c7999b4e278f0a0ef9857d3f | dd92c6e2c6e803a103e18abc81cd20cbaf36cc96 | refs/heads/master | 2020-12-21T05:19:08.652829 | 2020-01-26T14:04:30 | 2020-01-26T14:04:30 | 236,320,052 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,528 | r | maps.R | library("leaflet")
# map of Armenia
armenia_map<-leaflet(options = leafletOptions(minZoom = 0,maxZoom = 18))
armenia_map<-addTiles(armenia_map)
armenia_map<-setView(armenia_map,lng =45.0382,lat=40.0691,zoom = 5.5)
armenia_map<-addMarkers(armenia_map,lng=44.5152,lat=40.1872,popup="Capital Yerevan",clusterOptions = markerClusterOptions())
armenia_borders<-readLines("Armenia_full.geojson") %>% paste(collapse = "\n")
armenia_map<-addGeoJSON(armenia_map,geojson = armenia_borders,weight = 1,color = "#fc6f03",fill = TRUE)
# map of Georgia
georgia_map<-leaflet(options = leafletOptions(minZoom = 0,maxZoom = 18))
georgia_map<-addTiles(georgia_map)
georgia_map<-setView(georgia_map,lng =43.3569,lat=42.3154,zoom = 5.5)
georgia_map<-addMarkers(georgia_map,lng=44.8271,lat=41.7151,popup="Capital Tbilisi",clusterOptions = markerClusterOptions())
georgia_borders<-readLines("Georgia_full.geojson") %>% paste(collapse = "\n")
georgia_map<-addGeoJSON(georgia_map,geojson = georgia_borders,weight = 1,color = "#fc6f03",fill = TRUE)
# map of Azerbaijan
azer_map<-leaflet(options = leafletOptions(minZoom = 0,maxZoom = 18))
azer_map<-addTiles(azer_map)
azer_map<-setView(azer_map,lng =47.5769,lat=40.1431,zoom = 5.5)
azer_map<-addMarkers(azer_map,lng=49.8671,lat=40.4093,popup="Capital Baku",clusterOptions = markerClusterOptions())
azer_borders<-readLines("Azerbaijan_full.geojson") %>% paste(collapse = "\n")
azer_map<-addGeoJSON(azer_map,geojson = azer_borders,weight = 1,color = "#fc6f03",fill = TRUE)
|
bfed119c50b64a62fb0414aa1e8b8d6462e52acc | f6a1375e6453107cba75567ec0c3ba23a5ac7958 | /TopmedPipeline/man/writeBED.Rd | 12a98902c60e9e117f5ad0c6e60de9a6d3c16e03 | [] | no_license | UW-GAC/analysis_pipeline | 7c04b61c9cafa2bcf9ed1b25c47c089f4aec0646 | df9f8ca64ddc9995f7aef118987553b3c31301a1 | refs/heads/master | 2023-04-07T03:13:52.185334 | 2022-03-23T21:15:46 | 2022-03-23T21:15:46 | 57,252,920 | 42 | 30 | null | 2023-03-23T20:13:40 | 2016-04-27T22:25:56 | R | UTF-8 | R | false | true | 469 | rd | writeBED.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/locuszoom.R
\name{writeBED}
\alias{writeBED}
\title{Write a bed file to use as a track in a LocusZoom plot}
\usage{
writeBED(x, file, track.label = "")
}
\arguments{
\item{x}{data.frame with columns "chr", "start", "end"}
\item{file}{output file name}
\item{track.label}{character string to label BED track in plot}
}
\description{
Write a bed file to use as a track in a LocusZoom plot
}
|
cb21206f73f3ea9e886bd9e52fb1dea5e1170c6b | b599e97542c6df5add3e4b53586097705b10ce74 | /man/crudat2raster.Rd | aea4b5c733f7f809d80fd863a4c8ba32b78254a5 | [] | no_license | ajijohn/NicheMapR | 09c435107b9e4aa0fd5b7982510a65e76680f1ed | 98386659036cc55df840df7339af519a766b88d2 | refs/heads/master | 2021-01-09T05:34:23.544532 | 2017-01-28T07:29:16 | 2017-01-28T07:29:16 | 80,757,456 | 0 | 1 | null | 2017-02-02T18:51:23 | 2017-02-02T18:51:23 | null | UTF-8 | R | false | true | 464 | rd | crudat2raster.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crudat2raster.R
\name{crudat2raster}
\alias{crudat2raster}
\title{crudat2raster}
\usage{
get.global.climate(folder)
}
\arguments{
\item{folder}{Path to the folder you want to install the global climate data in}
}
\value{
cru.raster CRU data in raster format
header Header data for the layer
}
\description{
Converts CRU integer ascii file to raster with longitude range -180 to 180
}
|
422c38c028840578090b26c67039e712e86d42a5 | cb5ce5ebc1048cba6edd5b1d1dc28f0b5c1a1df7 | /man/zzasym_compare.Rd | 63b7395de69e1398bd960e0bde136c67347c73c7 | [] | no_license | GeoBosh/Rdpack | 8ac7009f2d1838ffb99a73debd1818831bc521b8 | 1b97d7d52119204b7eef1528e9a7d5f30d4cc57c | refs/heads/master | 2023-08-31T06:56:19.147613 | 2023-08-21T15:39:43 | 2023-08-21T15:39:43 | 100,482,670 | 24 | 8 | null | 2019-07-25T10:44:22 | 2017-08-16T11:38:41 | R | UTF-8 | R | false | false | 1,719 | rd | zzasym_compare.Rd | \name{.asym_compare}
\alias{.asym_compare}
\alias{.ocompare}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Tensor comparison and asymmetric comparison between two vectors}
\description{Tensor comparison and asymmetric comparison between two
vectors.}
\usage{
.ocompare(x, y)
.asym_compare(x, y)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
%% ~~Describe \code{x} here~~
}
\item{y}{
%% ~~Describe \code{y} here~~
}
}
\details{
\code{.ocompare} (for "outer compare") returns a matrix whose (i,j)th
element is TRUE if x[i] is identical to y[j], and FALSE otherwise.
\code{.asym_compare} calls \code{.ocompare} and iterprets its result
asymmetrically. Elements of \code{x} that are not in \code{y} ae
considered "new". Similarly, Elements of \code{y} that are not in
\code{x} ae considered "removed". Elements that are in both are
"common".
Todo: check if the above is correct or the other way round! !!!
}
\value{
For \code{.ocompare}, a matrix as described in Details.
For \code{.asym_compare} a list with indices as follows.
\item{i_new}{new elements, indices in \code{x} of elements that are not
in \code{y}.}
\item{i_removed}{removed elements, indices in \code{y} of elements that are not
in \code{x}.}
\item{i_common}{common elements, indices in \code{x} of elements that
are in both, \code{x} and \code{y}.}
}
\author{Georgi N. Boshnakov}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
##---- Should be DIRECTLY executable !! ----
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{internal}
|
94ec05b698519797ecc4b72be61c02511897b989 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/xoi/examples/ioden.Rd.R | d48aee9c39de608f65bd7c1f1e5862786021d341 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 441 | r | ioden.Rd.R | library(xoi)
### Name: ioden
### Title: Distance between crossovers
### Aliases: ioden
### Keywords: distribution
### ** Examples
f1 <- ioden(1, L=200, n=201)
plot(f1, type="l", lwd=2, las=1,
ylim=c(0,0.014), yaxs="i", xaxs="i", xlim=c(0,200))
f2 <- ioden(2.6, L=200, n=201)
lines(f2, col="blue", lwd=2)
f3 <- ioden(4.3, L=200, n=201)
lines(f3, col="red", lwd=2)
f4 <- ioden(7.6, L=200, n=201)
lines(f4, col="green", lwd=2)
|
7aa3aa8ef9b204f87dc84697442ce78b20807378 | 43bebf203c05f1b29c53716882efd80155970062 | /ui.R | f11e314092ea7ba637ac6ff393974c049926004b | [] | no_license | shengbing/shinyProject_dataproduct | 46f9ffbc3da1aba078959e1421003596c7627c1d | 71fae742dc2df3285f71dd7d1fb0dfbab7a61beb | refs/heads/master | 2020-05-19T17:19:47.141634 | 2014-11-16T05:36:28 | 2014-11-16T05:36:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,819 | r | ui.R | #ui.R
library(shiny)
shinyUI(fluidPage(
titlePanel("T distribution"),
sidebarLayout(
sidebarPanel(
helpText("Plot t-distribution with specified degree of freedom, and compute p-value for the t statistics you entered"),
sliderInput("df",
label = "Degree of freedom:",
min = 1, max = 100, value = 1, step=1),
numericInput("tstatistics",
label = "Enter t statistics (-10 to 10) :",
min = -10, max = 10, value = 0, step=0.0001),
checkboxInput("checkbox1", label = "show normal p value", value = TRUE),
submitButton("Submit")
),
mainPanel(
h5("You have selected degree of freedom:"),
verbatimTextOutput("text1"),
h5("You have entered t statistics:"),
verbatimTextOutput("text2"),
h5("p value for t distribution = "),
verbatimTextOutput("text3"),
textOutput("text4"),
plotOutput('plot1')
)
)
)) |
1909929d9fb82d873e0477dfb6c22db55e640917 | b28deaaed4d864a0211dc34c0785bd4210611a52 | /Riyadh Radhi Knn.R | a25a74ed06b3a5ad11c0d2b3f4bed2337ad99e44 | [] | no_license | riyadh-radhi/The-k-nearest-neighbors_machine_learning | 6a0b2fa7704cbc6502e517af1ed311d34c5265b2 | 399c0bb617093e5a0edf8bf61bd651bb9250decd | refs/heads/master | 2020-06-08T17:45:09.777398 | 2019-06-22T20:15:36 | 2019-06-22T20:15:36 | 193,275,697 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,453 | r | Riyadh Radhi Knn.R | library(MASS)
library(dplyr)
library(ggplot2)
library(rtf)
library(scales)
library(caret)
library(class)
library(pROC)
#1) Reading data and doing the subset
rm(list=ls())
df <- read.csv("Attrition.csv", stringsAsFactors = F)
set.seed(2019)
dfNew <- subset(df, select = c(Attrition,Age,DistanceFromHome,HourlyRate,
JobSatisfaction,PerformanceRating,
YearsAtCompany,PercentSalaryHike,
TotalWorkingYears,TrainingTimesLastYear,
YearsInCurrentRole,YearsSinceLastPromotion,
YearsWithCurrManager))
#2) Standardize selected variables and dividing the data
dfNewScaled <- scale(dfNew[,2:13])
dfPartition <- createDataPartition(dfNew$Attrition, p = .7,
list = FALSE,
times = 1)
Train <- dfNew[dfPartition,-1]
Test <- dfNew[-dfPartition,-1]
#3)
test_atr<-dfNew[-dfPartition,"Attrition"]
myKnn <- knn(Train, Test, dfNew[dfPartition, "Attrition"], k= floor(sqrt(nrow(Train))), prob = TRUE)
test_atr<-dfNew[-dfPartition,"Attrition"]
test_atr <- as.factor(test_atr)
# Testing Accuracy
table(myKnn, test_atr )
confusionMatrix(myKnn,test_atr)
predict(myKnn,Test, type="prob")
mean(myKnn == test_atr)
auc(as.vector(test_atr),attributes(myKnn)$prob)
#5)
new.emp <-c(Age = 40, DistanceFromHome = 3, HourlyRate = 20,JobSatisfaction = 2,
PerformanceRating = 3, YearsAtCompany = 8,PercentSalaryHike = 2, TotalWorkingYears = 10,
TrainingTimesLastYear = 1,YearsInCurrentRole = 5, YearsSinceLastPromotion = 4,
YearsWithCurrManager = 2)
myKnn2<-knn(Train,new.emp,dfNew[dfPartition, "Attrition"],k=floor(sqrt(nrow(Train))),prob=T)
attr(myKnn2,"prob") #The probapility is 0.75
#6)
library(caret)
trn<-dfNew[dfPartition,]
train_control <- trainControl(method="repeatedcv", number=10)
grid <- expand.grid(.fL=c(0), .usekernel=c(FALSE))
model <- train(Attrition~., data=trn, trControl=train_control, method="knn")
# summarize results
print(model)
plot(model)
#7) Confusion Matrix
test_atr_factor <- as.factor(test_atr) # I need to transfer it first to factor
predClass <- predict(model, Test) # Predict the model
confusionMatrix(predClass,test_atr_factor) # draw the confusion matrix
|
49c1a9eb6c2469105483c6d9966dc73d50779b54 | 1acbab2400012ebeec7b41487788df3bcd911095 | /r_scripts/4z_z_score_model_fit_inspection.R | d314750ee8fb3442df6c330629b941d7247479b2 | [] | no_license | FelixMay/FragFrame_1 | ac9be66ff5907f4e3215e6b41c673698fee1701d | d9fb53d8565df9a378c20bbb40ac7e550ee07b87 | refs/heads/master | 2020-12-24T06:25:03.090757 | 2020-06-02T08:41:28 | 2020-06-02T08:41:28 | 73,482,248 | 2 | 4 | null | null | null | null | UTF-8 | R | false | false | 4,466 | r | 4z_z_score_model_fit_inspection.R | # need to execute 0_init_dirs_load_packages.R first
# code to plot visual inspection of models: trace plots, posterior predictive checks, residuals
load(paste0(path2wd, 'main_results/fragSize_z_score_ref.Rdata'))
meta <- read_delim(paste0(path2wd, 'data/new_meta_2_merge.csv'), delim =';') %>%
dplyr::rename(dataset_label = dataset_id)
frag <- read_csv(paste0(path2wd, 'intermediate_results/2_biodiv_frag_fcont_10_mabund_as_is.csv'))
frag <- left_join(frag,
meta,
by = 'dataset_label')
# change to an appropriate directory to save plots to:
plot_dir <- '~/Dropbox/1current/fragmentation_synthesis/temp/figs/visual_inspection/'
# create vector of response variable to loop through
response <- c('z_Sstd', 'z_S_PIE', 'z_Sn', 'z_S_chao', 'z_Scov')
for(i in 1:length(response)){
print(paste('model', i, 'in', length(response)))
model = paste0(response[i], '_studT_fragSize') %>% as.name()
resid <- residuals(eval(model),
type = 'pearson',
method = 'predict'
) %>%
as_tibble() %>%
bind_cols(eval(model)$data) %>%
left_join(meta,
by = 'dataset_label')
fitted <- fitted(eval(model), re_formula = NA)
predict <- predict(eval(model))
# join with the sample_design column
resid <- left_join(resid, frag %>% distinct(dataset_label, sample_design),
by = 'dataset_label')
resid$fitted <- fitted[,'Estimate']
resid$predict <- predict[,'Estimate']
png(paste0(plot_dir, '_', response[i], '_Pearson_residuals.png'), width = 240, height = 200, res = 75, units = 'mm')
par(mfrow=c(3,4), mai = c(0.5, 0.5, 0.1, 0.1))
# can we do a better job with the ones and twos? Probably not. Error distribution? Model?
with(resid,# %>% filter(Estimate<5),
plot(Estimate ~ fitted,
ylab = 'Pearson residual'));abline(h=0, lty=2)
plot(resid$fitted, as.numeric(unlist(resid[,5])),
ylab = 'z-score');abline(c(0,1), lty=2)
with(resid %>% filter(Estimate<5),
plot(Estimate ~ c.lfs,
ylab = 'Pearson residual', xlab = 'Fragment size (centred, log-scale)'));abline(h=0, lty=2)
with(resid %>% filter(Estimate < 5),
plot(Estimate ~ predict,
ylab = 'Pearson residual'));abline(h=0, lty=2)
with(resid %>% filter(Estimate<5),
boxplot(Estimate ~ dataset_label,
ylab = 'Pearson residual'));abline(h=0, lty=2)
with(resid %>% filter(Estimate<5),
boxplot(Estimate ~ biome,
ylab = 'Pearson residual'));abline(h=0, lty=2)
with(resid %>% filter(Estimate<5),
boxplot(Estimate ~ taxa,
ylab = 'Pearson residual'));abline(h=0, lty=2)
with(resid %>% filter(Estimate<5),
boxplot(Estimate ~ Matrix.category,
ylab = 'Pearson residual'));abline(h=0, lty=2)
with(resid %>% filter(Estimate<5),
boxplot(Estimate ~ time.since.fragmentation,
ylab = 'Pearson residual'));abline(h=0, lty=2)
with(resid %>% filter(Estimate<5),
boxplot(Estimate ~ continent,
ylab = 'Pearson residual'));abline(h=0, lty=2)
with(resid %>% filter(Estimate<5),
boxplot(Estimate ~ sample_design,
ylab = 'Pearson residual'));abline(h=0, lty=2)
dev.off()
}
# chain inspection
plot(z_Sstd_studT_fragSize)
plot(z_S_PIE_studT_fragSize)
plot(z_Sn_studT_fragSize)
plot(z_Scov_studT_fragSize)
plot(z_S_chao_studT_fragSize)
# posterior predictive checks
Sstd_pp <- pp_check(z_Sstd_studT_fragSize) +
labs(subtitle = expression(S[std])) +
coord_cartesian(xlim = c(-20, 20))
S_PIE_pp <- pp_check(z_S_PIE_studT_fragSize) +
labs(subtitle = expression(S[PIE])) +
coord_cartesian(xlim = c(-20, 20))
Sn_pp <- pp_check(z_Sn_studT_fragSize) +
labs(subtitle = expression(S[n])) +
coord_cartesian(xlim = c(-20, 20))
Scov_pp <- pp_check(z_Scov_studT_fragSize) +
labs(subtitle = expression(S[cov])) +
coord_cartesian(xlim = c(-20, 20))
Schao_pp <- pp_check(z_S_chao_studT_fragSize) +
labs(subtitle = expression(S[chao])) +
coord_cartesian(xlim = c(-20, 20))
cowplot::plot_grid(Sstd_pp,
S_PIE_pp,
Sn_pp,
Scov_pp,
Schao_pp,
nrow = 3, align = 'hv')
# ggsave(paste0(plot_dir, 'Posterior_predictive_z_scores.png'), width = 290, height = 200, units = 'mm')
|
af4309494a925dceed521c1193903a6b3289254a | c7ee9553b8b2e67bffeea0874db00dd1e7fdb541 | /R/ranktests.R | 98b8d68d837fc066e56a2ec38e847d051b3a01b0 | [] | no_license | cran/pdSpecEst | 2c80e79f2f5e6a64b2d017ccad56192519b3c3b8 | e8bc2a87aed91542fc804910f1556c7c24419fc1 | refs/heads/master | 2021-01-11T20:27:28.345499 | 2020-01-08T08:10:07 | 2020-01-08T08:10:07 | 79,120,097 | 7 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,181 | r | ranktests.R | #' Rank-based hypothesis tests for HPD matrices
#'
#' \code{pdRankTests} performs a number of generalized rank-based hypothesis tests in the metric space of HPD matrices equipped
#' with the affine-invariant Riemannian metric or Log-Euclidean metric for samples of HPD matrices or samples of sequences
#' (curves) of HPD matrices as described in Chapter 4 of \insertCite{C18}{pdSpecEst}.
#'
#' For samples of \eqn{(d,d)}-dimensional HPD matrices with pooled sample size \eqn{S}, the argument
#' \code{data} is a \eqn{(d,d,S)}-dimensional array of \eqn{(d,d)}-dimensional HPD matrices, where the individual samples are
#' combined along the third array dimension. For samples of sequences of \eqn{(d,d)}-dimensional HPD matrices with pooled sample
#' size \eqn{S}, the argument \code{data} is a \eqn{(d,d,n,S)}-dimensional array of length \eqn{n} sequences
#' of \eqn{(d,d)}-dimensional HPD matrices, where the individual samples are combined along the fourth array dimension. The argument
#' \code{sample_sizes} specifies the sizes of the individual samples so that \code{sum(sample_sizes)} is equal to \code{S}. \cr
#' The available generalized rank-based testing procedures (specified by the argument \code{test}) are:
#' \describe{
#' \item{\code{"rank.sum"}}{Intrinsic Wilcoxon rank-sum test to test for homogeneity of distributions of two independent
#' samples of HPD matrices or samples of sequences of HPD matrices. The usual univariate ranks are replaced by data depth
#' induced ranks obtained with \code{\link{pdDepth}}.}
#' \item{\code{"krusk.wall"}}{Intrinsic Kruskal-Wallis test to test for homogeneity of distributions of more than two independent
#' samples of HPD matrices or samples of sequences of HPD matrices. The usual univariate ranks are replaced by data depth
#' induced ranks obtained with \code{\link{pdDepth}}.}
#' \item{\code{"signed.rank"}}{Intrinsic signed-rank test to test for homogeneity of distributions of independent paired or matched samples
#' of HPD matrices. The intrinsic signed-rank test is \emph{not} based on data depth induced ranks, but on a specific difference score in the Riemannian
#' manifold of HPD matrices equipped with either the affine-invariant Riemannian or Log-Euclidean metric.}
#' \item{\code{"bartels"}}{Intrinsic Bartels-von Neumann test to test for randomness (i.e., exchangeability) within a single independent sample of
#' HPD matrices or a sample of sequences of HPD matrices. The usual univariate ranks are replaced by data depth induced
#' ranks obtained with \code{\link{pdDepth}}.}
#' }
#' The function computes the generalized rank-based test statistics in the \emph{complete} metric space of HPD matrices equipped with one of the following metrics:
#' (i) the Riemannian metric (default) as detailed in e.g., \insertCite{B09}{pdSpecEst}[Chapter 6] or \insertCite{PFA05}{pdSpecEst}; or (ii) the Log-Euclidean metric,
#' the Euclidean inner product between matrix logarithms. The default Riemannian metric is invariant under congruence transformation by any invertible matrix,
#' whereas the Log-Euclidean metric is only invariant under congruence transformation by unitary matrices, see \insertCite{C18}{pdSpecEst}[Chapter 4] for more details.
#'
#' @note The intrinsic signed-rank test also provides a valid test for equivalence of spectral matrices of two multivariate stationary time
#' series based on the HPD periodogram matrices obtained via \code{\link{pdPgram}}, see \insertCite{C18}{pdSpecEst}[Chapter 4] for the details.
#'
#' @note The function does not check for positive definiteness of the input matrices, and may fail
#' if matrices are close to being singular.
#'
#' @note The data depth computations under the Riemannian metric are more involved than under the Log-Euclidean
#' metric, and may therefore result in (significantly) higher computation times.
#'
#' @param data either a \eqn{(d,d,S)}-dimensional array corresponding to an array of pooled individual samples of \eqn{(d,d)}-dimensional
#' HPD matrices, or a \eqn{(d,d,n,S)}-dimensional array corresponding to an array of pooled individual samples of length \eqn{n} sequences
#' of \eqn{(d,d)}-dimensional HPD matrices.
#' @param sample_sizes a numeric vector specifying the individual sample sizes in the pooled sample \code{data}, such that \code{sum(sample_sizes)} is
#' equal to \code{S}. Not required for tests \code{"signed-rank"} and \code{"bartels"}, as the sample sizes are automatically determined from the input array
#' \code{data}.
#' @param test rank-based hypothesis testing procedure, one of \code{"rank.sum"}, \code{"krusk.wall"}, \code{"signed.rank"}, \code{"bartels"} explained
#' in the Details section below.
#' @param depth data depth measure used in the rank-based tests, one of \code{"gdd"}, \code{"zonoid"}, or \code{"spatial"} corresponding to the
#' geodesic distance depth, intrinsic zonoid depth and intrinsic spatial depth respectively. Defaults to \code{"gdd"}. Not required for test
#' \code{"signed.rank"}. See the documentation of the function \code{\link{pdDepth}} for additional details about the different depth measures.
#' @param metric the metric that the space of HPD matrices is equipped with, either \code{"Riemannian"} or \code{"logEuclidean"}. Defaults to
#' \code{"Riemannian"}.
#'
#' @return The function returns a list with five components:
#' \item{test }{name of the rank-based test}
#' \item{p.value }{p-value of the test}
#' \item{statistic }{computed test statistic}
#' \item{null.distr }{distribution of the test statistic under the null hypothesis}
#' \item{depth.values }{computed data depth values (if available)}
#'
#' @examples
#' ## null hypothesis is true
#' data <- replicate(100, Expm(diag(2), H.coeff(rnorm(4), inverse = TRUE)))
#' pdRankTests(data, sample_sizes = c(50, 50), test = "rank.sum") ## homogeneity 2 samples
#' pdRankTests(data, sample_sizes = rep(25, 4), test = "krusk.wall") ## homogeneity 4 samples
#' pdRankTests(data, test = "bartels") ## randomness
#'
#' ## null hypothesis is false
#' data1 <- array(c(data, replicate(50, Expm(diag(2), H.coeff(0.5 * rnorm(4), inverse = TRUE)))),
#' dim = c(2,2,150))
#' pdRankTests(data1, sample_sizes = c(100, 50), test = "rank.sum")
#' pdRankTests(data1, sample_sizes = rep(50, 3), test = "krusk.wall")
#' pdRankTests(data1, test = "bartels")
#'
#' \dontrun{
#' ## signed-rank test for equivalence of spectra of multivariate time series
#' ## ARMA(1,1) process: Example 11.4.1 in (Brockwell and Davis, 1991)
#' Phi <- array(c(0.7, 0, 0, 0.6, rep(0, 4)), dim = c(2, 2, 2))
#' Theta <- array(c(0.5, -0.7, 0.6, 0.8, rep(0, 4)), dim = c(2, 2, 2))
#' Sigma <- matrix(c(1, 0.71, 0.71, 2), nrow = 2)
#' pgram <- function(Sigma) pdPgram(rARMA(2^8, 2, Phi, Theta, Sigma)$X)$P
#'
#' ## null is true
#' pdRankTests(array(c(pgram(Sigma), pgram(Sigma)), dim = c(2,2,2^8)), test = "signed.rank")
#' ## null is false
#' pdRankTests(array(c(pgram(Sigma), pgram(0.5 * Sigma)), dim = c(2,2,2^8)), test = "signed.rank")
#' }
#' @seealso \code{\link{pdDepth}}, \code{\link{pdPgram}}
#'
#' @references
#' \insertAllCited{}
#'
#' @export
pdRankTests <- function(data, sample_sizes, test = c("rank.sum", "krusk.wall", "signed.rank", "bartels"),
depth = c("gdd", "zonoid", "spatial"), metric = c("Riemannian", "logEuclidean")) {
if (missing(depth)) {
depth <- "gdd"
}
ddim <- dim(data)
if (missing(sample_sizes)) {
sample_sizes <- NA
}
metric <- match.arg(metric, c("Riemannian", "logEuclidean"))
test <- match.arg(test, c("rank.sum", "krusk.wall", "signed.rank", "bartels"))
depth <- match.arg(depth, c("gdd", "zonoid", "spatial"))
err.message <- "Incorrect input lenghts for arguments: 'samples' and/or 'sample_sizes',
consult the function documentation for the requested inputs."
n <- sample_sizes
if ((test == "krusk.wall") & (length(n) == 2)) {
warning("Argument 'test' changed to 'rank.sum' to test for homogeneity of
distributions of two independent samples of HPD matrices.")
test <- "rank.sum"
}
## Intrinsic rank-sum test
if (test == "rank.sum") {
if (!isTRUE((((length(ddim) == 3) & (ddim[3] == sum(n))) | ((length(ddim) == 4) &
(ddim[4] == sum(n)))) & (ddim[1] == ddim[2]) & (length(n) == 2))) {
stop(err.message)
}
dd <- pdDepth(X = data, method = depth, metric = metric)
T1 <- (sum(rank(dd, ties.method = "random")[1:n[1]]) - n[1] * (sum(n) + 1)/2) /
sqrt(n[1] * n[2] * (sum(n) + 1)/12)
output <- list(test = "Intrinsic Wilcoxon rank-sum", p.value = 2 * stats::pnorm(abs(T1), lower.tail = FALSE), statistic = T1,
null.distr = "Standard normal distribution", depth.values = dd)
}
## Intrinsic Kruskal-Wallis test
if (test == "krusk.wall") {
N <- sum(n)
if (!isTRUE((((length(ddim) == 3) & (ddim[3] == N)) | ((length(ddim) == 4) &
(ddim[4] == N))) & (ddim[1] == ddim[2]) & (length(n) > 2))) {
stop(err.message)
}
dd <- pdDepth(X = data, method = depth, metric = metric)
R_bar <- unname(unlist(lapply(split(rank(dd, ties.method = "random"),
f = rep(1:length(n), times = n)), mean)))
T2 <- 12/(N * (N + 1)) * sum(n * (R_bar - (N + 1)/2)^2)
output <- list(test = "Intrinsic Kruskal-Wallis", p.value = min(stats::pchisq(T2, df = 2, lower.tail = TRUE),
pchisq(T2, df = 2, lower.tail = FALSE)), statistic = T2,
null.distr = "Chi-squared distribution (df = 2)", depth.values = dd)
}
## Intrinsic signed-rank test
if (test == "signed.rank") {
if (!isTRUE((length(ddim) == 3) & (ddim[1] == ddim[2]) & (ddim[3]%%2 == 0))) {
stop(err.message)
}
n <- ddim[3]/2
d <- ddim[1]
if(metric == "Riemannian"){
ast <- function(A, B) t(Conj(A)) %*% B %*% A
diff <- sapply(1:n, function(i) Re(sum(diag(Logm(diag(d), ast(iSqrt(data[, , n + i]), data[, , i]))))))
} else{
diff <- sapply(1:n, function(i) Re(sum(diag(Logm(diag(d), data[, , n + i]) - Logm(diag(d), data[, , i])))))
}
T3 <- stats::wilcox.test(x = diff, y = rep(0, n), paired = TRUE, correct = TRUE)
output <- list(test = "Intrinsic Wilcoxon signed-rank", p.value = T3$p.value, statistic = T3$statistic, null.distr = T3$method)
}
## Intrinsic Bartels-von Neumann test
if (test == "bartels") {
if (!isTRUE(((length(ddim) == 3) | ((length(ddim) == 4))) & (ddim[1] == ddim[2]))) {
stop(err.message)
}
n <- utils::tail(ddim, 1)
dd <- pdDepth(X = data, method = depth, metric = metric)
T4 <- sum(diff(rank(dd, ties.method = "random"))^2)/(n * (n^2 - 1)/12)
sigma <- sqrt(4 * (n - 2) * (5 * n^2 - 2 * n - 9)/(5 * n * (n + 1) * (n - 1)^2))
output <- list(test = "Intrinsic Bartels-von Neumann", p.value = 2 * pnorm(abs((T4 - 2)/sigma), lower.tail = FALSE),
statistic = (T4 - 2)/sigma, null.distr = "Standard normal distribution",
depth.values = dd)
}
return(output)
}
|
6de2349f9eb88367645c1c9dd1ff412b5fcdb408 | d25ffe6711f9b621f5cf3d9e0daed2ff5dba5d8a | /R/analyzePort.R | 8046b7153bdcb01b477f3c82559d8ea775df5368 | [] | no_license | bplloyd/CoreHF | 0313e782b3f199026e3e8db7273ce9e8bef3f78a | 2aae9c6817db0c3168d104d8654d2b180e3e6ce6 | refs/heads/master | 2021-01-21T11:24:05.395756 | 2017-07-20T14:01:11 | 2017-07-20T14:01:11 | 91,341,924 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,649 | r | analyzePort.R | #' @include portCalcs.R
analyzePort = function(R, weights = rep(1/ncol(R), ncol(R)), calcs = c("return.annualized", "std.annualized"), ...){
R.port = return.port(R, weights)
sapply(calcs, function(f)do.call(f, list(R.port, ...)))
}
analyzePorts = function(R, ports, calcs = c("return.annualized", "std.annualized"), par = T, ...){
if(par){
ncore = parallel::detectCores()
cl = parallel::makeCluster(ncore)
cl.env = new.env()
for(f in calcs){
assign(x = f, value = match.fun(f), envir = cl.env)
}
assign(x = "R", value = R, envir = cl.env)
assign(x = "ports", value = ports, envir = cl.env)
assign(x = "calcs", value = calcs, envir = cl.env)
assign(x = "analyzePort", value = analyzePort, cl.env)
assign(x = "return.port", value = return.port, cl.env)
varlist = c("R", "ports", "calcs", "analyzePort", "return.port")
for(f in calcs){
varlist = c(varlist, f)
}
parallel::clusterExport(cl, varlist = varlist, envir = cl.env)
res = t(parallel::parSapply(cl = cl,
X = 1:nrow(ports),
FUN = function(r)analyzePort(R=R, weights=ports[r, ], calcs=calcs)))
parallel::stopCluster(cl)
rm(cl.env)
} else {
res = t(sapply(1:nrow(ports),
FUN = function(r)analyzePort(R=R, weights = ports[r, ], calcs = calcs)))
}
cbind(ports, res)
}
sortAnalysis = function(assets, analysis, funcs = colnames(analysis[, -which(colnames(analysis) %in% assets)])){
sorted = lapply(funcs,
function(f) analysis[order(analysis[, f], decreasing = TRUE),])
names(sorted) = funcs
sorted
}
|
a1fdfc6ecff879c7921812b9ac360810d67db170 | 57da4c759470db56761c7e35b86d93862d31dc87 | /exploredata2.R | 9b97d9e6082cbabd38f6a7ba0dcc610da3b9ec84 | [] | no_license | monita211/ExData_Project2 | 0e34da19ffbac2fc05ad016ed9e95f50ff49394c | e5c1a21af9f3c4663ac03be276f5e967f6b17a1e | refs/heads/master | 2021-01-15T21:45:09.083993 | 2015-04-24T14:33:48 | 2015-04-24T14:33:48 | 34,523,279 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,767 | r | exploredata2.R | #load data files
NEI <- readRDS("exploredataproject2/summarySCC_PM25.rds")
SCC <- readRDS("exploredataproject2/Source_Classification_Code.rds")
#load packages
load(ggplot2)
#Assignment
#The overall goal of this assignment is to explore the National Emissions Inventory database and
#see what it say about fine particulate matter pollution in the United states over the 10-year
#period 1999–2008. You may use any R package you want to support your analysis.
#Questions: You must address the following questions and tasks in your exploratory analysis. For each question/task
#you will need to make a single plot. Unless specified, you can use any plotting system in R to make your plot.
#ONE: Have total emissions from PM2.5 decreased in the United States from 1999 to 2008? Using the base plotting system,
#make a plot showing the total PM2.5 emission from all sources for each of the years 1999, 2002, 2005, and 2008.
totalByYear <- aggregate(Emissions~year,NEI,sum)
with(totalByYear, plot(year, Emissions, type = 'b', pch = 19, col = "red", main = "Total Emissions in the United States, 1999-2008",xlab = "Year", ylab = "Emissions (PM2.5)"))
totalByYear
#TWO: Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510") from 1999 to 2008?
#Use the base plotting system to make a plot answering this question.
bmoreNEI <- NEI[NEI$fips == "24510", ]
bmoreTotalByYear <- aggregate(Emissions~year,bmoreNEI,sum)
with(bmoreTotalByYear, plot(year, col = "red", type = 'b', pch = 19, Emissions, main = "Total Emissions in Baltimore City, 1999-2008", xlab = "Year", ylab = "Emissions (PM2.5)"))
bmoreTotalByYear
#THREE: Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad) variable, which of these
#four sources have seen decreases in emissions from 1999–2008 for Baltimore City? Which have seen increases in
#emissions from 1999–2008? Use the ggplot2 plotting system to make a plot answer this question.
##in baltimore city, emissions totals for four years modified by type
bmoreTotalByYearType <- aggregate(Emissions~year + type,bmoreNEI,sum)
g <- ggplot(bmoreTotalByYearType, aes(year, Emissions ))
g + geom_point(alpha = 1/3, col = "red", size = 3) + geom_line(col = 'red') + facet_grid(.~type) + labs(title = "Emission Totals by Type for 1999-2008 (Baltimore City)") + labs(x = "Year") + labs(y = expression("Emissions Total " * PM[2.5])) + theme_grey(base_family = "Avenir")
#FOUR: Across the United States, how have emissions from coal combustion-related sources changed from 1999–2008?
#join NEI and SCC, form new dataframe
SCC1 <- subset(SCC, select = c("SCC","EI.Sector"))
mergedData <- merge(NEI, SCC1)
##subset for appropriate levels
levels(mergedData$EI.Sector) #Fuel Comb- Comm/Institutional - Coal(13), Fuel Comb - Electric Generation - Coal(18), and Fuel Comb - Industrial Boilers, ICEs - Coal(23)
mergedData$coal <- mergedData$EI.Sector %in% c("Fuel Comb- Comm/Institutional - Coal","Fuel Comb - Electric Generation - Coal","Fuel Comb - Industrial Boilers, ICEs - Coal")
coalData <- mergedData[mergedData$coal == "TRUE", ]
##aggregate by year and plot
coalByYear <- aggregate(Emissions~year, coalData, sum)
with(coalByYear, plot(year, Emissions, type = 'b', pch = 19, col = "red", main = "Total Coal Combustion-Related Emissions in the United States, 1999-2008", xlab = "Year", ylab = "Emissions (PM2.5)"))
#FIVE: How have emissions from motor vehicle sources changed from 1999–2008 in Baltimore City?
##use merged dataframe formed in previous problem but subset for motor vehicle sources
mergedData$motor.vehicles <- mergedData$EI.Sector %in% c("Mobile - Aircraft","Mobile - Commercial Marine Vessels","Mobile - Locomotives","Mobile - Non-Road Equipment - Diesel",
"Mobile - Non-Road Equipment - Gasoline","Mobile - Non-Road Equipment - Other",
"Mobile - On-Road Diesel Heavy Duty Vehicles","Mobile - On-Road Diesel Light Duty Vehicles",
"Mobile - On-Road Gasoline Heavy Duty Vehicles","Mobile - On-Road Gasoline Light Duty Vehicles")
motorVehicleData <- mergedData[mergedData$motor.vehicles == "TRUE", ]
#subset just baltimore
bmoreMotorVehicleData <- motorVehicleData[motorVehicleData$fips == "24510",]
##aggregate and plot
bmoreMotorVehicleByYear <- aggregate(Emissions~year, bmoreMotorVehicleData, sum)
with(bmoreMotorVehicleByYear, plot(year, col = "red", Emissions, type = 'b', pch = 19, main = "Total Motor Vehicle Emissions in Baltimore City, 1999-2008", xlab = "Year", ylab = "Emissions (PM2.5)"))
#SIX: Compare emissions from motor vehicle sources in Baltimore City with emissions from motor vehicle sources in
#Los Angeles County, California (fips == "06037"). Which city has seen greater changes over time in motor vehicle emissions?
#use joined dataframe, motorVehicleData
bmoreLAMotorVehicle <- motorVehicleData[motorVehicleData$fips == c("06037","24510"),]
#relabel levels
city_names <- list("06037" = "LA","24510" = "Baltimore City")
city_labeller <- function(variable,value){
return(city_names[value])
}
bmoreLAMotorVehicleByYear <- aggregate(Emissions~year + fips, bmoreLAMotorVehicle, sum)
g2 <- ggplot(bmoreLAMotorVehicleByYear, aes(year, Emissions ))
g2 + geom_point(alpha = 1, aes(colour = factor(fips))) + geom_line(alpha = 1, aes(colour = factor(fips))) +
labs(title = "Motor Vehicle Emissions for 1999-2008 (LA vs Baltimore City)") + labs(x = "Year") +
labs(y = expression("Emissions Total " * PM[2.5])) + theme_grey(base_family = "Avenir") +
scale_fill_discrete(name="Cities",breaks=c("06037", "24510"),labels=c("LA", "Baltimore City"))
|
ac6cc989b93001b79bfe2ef3ab2766792be58f52 | 154309a5a3ef4b595a0852f2eeaaeb44f270478b | /Prediction/modello_hierarchical_biv.R | a9769114c43477246e193858442a41b833a50c6d | [] | no_license | araiari/OECD-Bias-in-students-evaluation | e4dba46e2a96d6b64513cd3dc885f346794605b3 | be725d6946b1cfcfce752f9300554a23a173ed63 | refs/heads/master | 2023-01-11T00:00:31.673047 | 2020-11-17T14:50:03 | 2020-11-17T14:50:03 | 227,797,349 | 2 | 1 | null | null | null | null | WINDOWS-1252 | R | false | false | 3,090 | r | modello_hierarchical_biv.R |
rm(list = ls())
setwd("~/aa POLIMI/AA_Bayesiana/Progetto Pisa/predittiva")
library(rjags)
library(coda)
# for plots
library(ggplot2)
library(MASS)
library(tidyr)
library(dplyr)
library(purrr)
library(ggsci)
library(plot.matrix)
require(gplots)
require(ggpubr)
set.seed(49)
dati <- read.csv(file='~/aa POLIMI/AA_Bayesiana/Progetto Pisa/dati_sistemati_E_sistemazione_dati/dati_bivariato.csv')
names(dati)
dati <- dati[,c(1,2,16,20,25,21,28,23,24,15,31,32,22,10,41,42)]
dati <- na.omit(dati)
names(dati)
N <- dim(dati)[1] # numero studenti
Y <- as.matrix(dati[,15:16]) #eval (controllare quale colonna è la risposta)
g <- as.factor(dati[,2])
M <- length(levels(g)) # numero di scuole
levels(g) <- c(1:M)
g <- as.numeric(g)
ZZ <- as.matrix(dati[,14]) #covariata scuola(rinomino ZZ)
#ZZ <- as.numeric(ZZ[1:N,1])
XX <- as.matrix(dati[,3:13])#covariate studente
p = 1 # numero cov scuola
v = dim(XX)[2] # numero cov. studente
N = dim(dati)[1]
zero2 <- c(0,0)
cinc2 <- colMeans(Y)
zerop <- rep(0,p)
Id <- diag(c(1,1))
Idp <- diag(rep(1,p))
beta_in <- matrix(0,p,2)
data_JAGS_EN <-list(N = N,
v = v,
M = M,
Y = Y,
g = as.vector(g),
X = as.matrix(XX),
Z = as.matrix(ZZ),
Id = Id,
cinc2 = cinc2,
zerop = zerop,
Idp = Idp)
inits = function() {
list(#theta0 = 0.0,
#theta = rep(0,p),
#gamma0 = rep(0,M),
#gamma = matrix(0,M,v),
#a1 = 10,
#a2 = 10,
beta1=zerop, beta2=zerop,
#beta0=zero2,
sigma1=60,sigma2=60, rho=0.65,
tau2_gamma0=5,tau2_theta1=0.1,tau2_theta2=0.1,
#a normal exp(0.1), a big exp(0.01), a small exp(10)
.RNG.seed = 321, .RNG.name = 'base::Wichmann-Hill')
}
model = jags.model("modello_hierarchical_biv.bug",
data = data_JAGS_EN,
n.adapt = 5000,
inits = inits,
n.chains = 1)
nit <- 25000
thin <-10
#param <- c("theta0", "theta", "gamma",'a1','a2')
#param <- c("theta0", "theta",'a1','a2')
#param <- c("theta0", "theta",'a1','a2','a')
#param <- c("theta0", "theta",'a1','a2','prec_gamma')
#param <- c( "beta",'a1','a2','prec_gamma')
#param <- c( "beta0","beta1","beta2",'sigma1','rho','gamma0')
param <- c( "beta1","beta2",'sigma1','sigma2','rho','gamma0','gamma1','gamma2','tau2_gamma0 ','tau2_gamma1','tau2_gamma2')
#param <- c( 'beta0',"beta1","beta2",'sigma1','sigma2','rho')
#param <- c( "theta",'a1','a2','prec_gamma','gamma')
output <- coda.samples(model = model,
variable.names = param,
n.iter = nit,
thin = thin)
save.image(file='mod5.Rdata')
save(output,file='modello_pred_output')
#x11()
#plot(output,ask=T)
#dev.off()
#plot(output[[1]][,7200:7277],ask=T)
#quantili=as.matrix(summary(output)$quantiles)
#CI_beta=quantili[1:8,c(1,5)]
|
79b3fea787675e335df465d215b2f94908f8e636 | b65abc4f3b8395a8cd1ad037055f1dfc4693cb94 | /R/TPEA.R | 13ef14049c7ec785001874d521570844aacb98fc | [] | no_license | cran/TPEA | 6aeabcb2d4fb9bb493a3804335f37b4474801346 | a5c1c4273bac35b2ef8ab0ff1f0367ac48b26f70 | refs/heads/master | 2021-01-18T17:40:12.141616 | 2017-06-25T14:42:32 | 2017-06-25T14:42:32 | 71,983,452 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,381 | r | TPEA.R | TPEA <-
function(DEGs,scores,n,FDR_method){
pkgEnv <- new.env(parent=emptyenv())
if(!exists("all_genes", pkgEnv)) {
data("all_genes", package="TPEA", envir=pkgEnv)
da1<-pkgEnv[["all_genes"]]
}
if(!exists("pathway_names", pkgEnv)) {
data("pathway_names", package="TPEA", envir=pkgEnv)
da2<-pkgEnv[["pathway_names"]]
}
all_genes<-da1;
pathway_names<-da2;
number<-n;
all_rand_area<-matrix(0,109,1);
for(i in 1:number){
DEG1<-intersect(DEGs[,1],all_genes[,1]);
DEG1<-as.data.frame(DEG1);
num<-sample(1:nrow(all_genes),size=nrow(DEG1));
rand_genes<-all_genes[num,1];
rand_genes<-as.data.frame(rand_genes);
rand_area<-AUEC(rand_genes);
rand_area[,2]<-as.matrix(rand_area[,2]);
all_rand_area<-cbind(all_rand_area,rand_area[,2]);
print(i);
}
all_rand_area[,1]<-scores[,2];
p_value<-data.frame();
N_AUEC<-data.frame();
for(j in 1:109){
p<-length(which(all_rand_area[j,-1]>=all_rand_area[j,1]))/number;
p_value<-rbind(p_value,p);
nor_area<-(all_rand_area[j,1]-mean(all_rand_area[j,-1]))/sd(all_rand_area[j,-1]);
N_AUEC<-rbind(N_AUEC,nor_area);
}
result1<-cbind(pathway_names,scores[,1],p_value,N_AUEC);
p_v<-as.matrix(p_value);
FDR<-p.adjust(p_v,method=FDR_method,n=109);
FDR<-as.matrix(FDR);
colnames(FDR)<-c("FDR");
result1<-as.matrix(result1);
result2<-cbind(result1,FDR);
result2<-result2[order(result2[,4]),];
return(result2);
}
|
455591d8df2ea95a1cbe4afd916f9096b8ac2d9c | 8831241719a4fa9ded7fd627ab6df8af8512ded8 | /ps1_q3.R | 7b90ae0c4b5ca3cee211262b6e8c1d1a53598558 | [] | no_license | RuiruiZh/R-Stats506 | 4eab68eced329bbd6b56e60083e54d72f13aa1e6 | 1ab273e30591a8cd009ba84da9ae13e30ccd11bb | refs/heads/master | 2020-04-12T17:34:48.078157 | 2018-12-22T19:02:15 | 2018-12-22T19:02:15 | 162,649,795 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,711 | r | ps1_q3.R | ## STATS506 Fall 2018
## Problem Set 1 Q3
##
## This R script documents the analysis on the dataset about:
## RECS 2015 data
##
##
## Author: Ruirui Zhang, ruiruiz@umich.edu
## Updated: October 1, 2018 - Last modified date
#! Limit lines to 80 characters with rare exceptions.
# 80: -------------------------------------------------------------------------
# Remove objects from current environment
rm(list = ls())
ls()
# libraries: ------------------------------------------------------------------
# Install Packages ------------------------------------------------------------
#install.packages('dplyr')
# Load Libraries --------------------------------------------------------------
if (!require("dplyr")) {
install.packages('dplyr')
library("dplyr")
}
if (!require("plyr")) {
install.packages('plyr')
library("plyr")
}
if (!require("ggplot2")) {
install.packages('ggplot2')
library("ggplot2")
}
# Load Data--------------------------------------------------------------------
setwd("C:/Users/zhang/Desktop/UMich F18/STATS 506/HW1")
recs15_Public <- read.csv(file="./recs2015_public_v3.csv", header=TRUE, sep=",")
###############################################################################
# Step 0a: a function to calculate BRR 95CI
###############################################################################
table_BRR_95CI = function (theta_hat, theta_r){
# Define variables
R = ncol(theta_r)
Fay_coef = 0.5
num_row = length(theta_hat)
theta_SE_by_div = c()
# calculate SE
for (i in 1:num_row) {
theta_r_hat_diff = theta_r[i,] - rep(theta_hat[i],R)
theta_diff_sq = theta_r_hat_diff^2
theta_diff_sq_sum = sum(theta_diff_sq)
theta_var = 1/(R * (1 - Fay_coef)^2) * theta_diff_sq_sum
theta_SE = sqrt(theta_var)
theta_SE_by_div = c(theta_SE_by_div,
theta_SE)
}
# combine theta_hat and CI95
result = cbind(
theta_hat = theta_hat,
CI_L = theta_hat - 1.96 * theta_SE_by_div,
CI_U = theta_hat + 1.96 * theta_SE_by_div)
row.names(result) = label_DIVISION
result
}
###############################################################################
# Step 0b: NWEIGHT, BRRweight
###############################################################################
num_obs = nrow(recs15_Public)
weight_sum = sum(recs15_Public$NWEIGHT)
# select "weight" columns
recs15_weight = c()
for (i in 475:571){
recs15_weight = cbind(recs15_weight,
recs15_Public[1:num_obs, i]/ (sum(recs15_Public[,i])*0
+1))
}
weight_div = c()
for (i in sort(unique(recs15_Public[,1]))){
weight_div = cbind(weight_div,
sum(recs15_Public[which(recs15_Public$DIVISION == i),
475])
)
}
###############################################################################
# Step 0C: Division name
###############################################################################
label_DIVISION = c(
"New England",
"Middle Atlantic",
"East North Central",
"West North Central",
"South Atlantic",
"East South Central",
"West South Central",
"Mountain North",
"Mountain South",
"Pacific")
###############################################################################
# Question 3 (a) --------------------------------------------------------------
# What percent of homes have stucco construction as
# the major outside wall material
# within each division?
###############################################################################
# Step 1: calculate theta_hat and theta_r using NWEIGHT & BRRweight
# theta = percent of homes have stucco construction as
# the major outside wall material
recs15_Public_OW4 = cbind(
recs15_Public$DIVISION,
recs15_weight /weight_sum * ((as.numeric(recs15_Public$WALLTYPE == 4))*1+1*0)
)
# Step 2: group theta_hat, theta_r: row number = division
temp_recs15_Public_OW4_pct_byDiv = c()
for (i in sort(unique(recs15_Public_OW4[,1]))){
temp_recs15_Public_OW4_pct_byDiv = rbind(
temp_recs15_Public_OW4_pct_byDiv,
colSums(as.numeric(recs15_Public_OW4[,1] == i) * recs15_Public_OW4[,2:98])*
weight_sum/
colSums(as.numeric(recs15_Public_OW4[,1] == i)*recs15_weight))
}
recs15_Public_OW4_pct_byDiv = temp_recs15_Public_OW4_pct_byDiv
# recs15_Public_OW4_pct_byDiv = c()
# recs15_Public_OW4_pct_byDiv =
# for (i in unique(recs15_Public_OW4[,1])){
# temp_recs15_Public_OW4_pct_byDiv[i,] * weight_sum / weight_div[i]
# }
# Step 3: generate summary table by division
recs15_Public_OW4_theta_hat = recs15_Public_OW4_pct_byDiv[,1]
recs15_Public_OW4_theta_r = recs15_Public_OW4_pct_byDiv[,-1]
recs15_Public_OW4_summary =
table_BRR_95CI(recs15_Public_OW4_theta_hat,
recs15_Public_OW4_theta_r)
knitr::kable(recs15_Public_OW4_summary,
digits=3,
row.names = NA)
# Step z: give plot
give_plot_Q3a =
ggplot(data = NULL,aes(x=label_DIVISION,
y=recs15_Public_OW4_summary[,1],
fill = "pink")) +
geom_bar(position=position_dodge(), stat="identity") +
geom_errorbar(aes(ymin=recs15_Public_OW4_summary[,2],
ymax=recs15_Public_OW4_summary[,3]),
width=.2, # Width of the error bars
position=position_dodge(.9))+
ggtitle("Percentage of homes have stucco construction
as the major outside wall material") +
xlab("Division") +
ylab("Percentage of homes") +
scale_fill_hue(name="Supplement type", # Legend label, use darker colors
breaks=c("OJ", "VC"),
labels=c("Orange juice", "Ascorbic acid"))
###############################################################################
# Question 3 (b) --------------------------------------------------------------
# Calculate the average total electricity usage in kilowatt hours
# in each division.
# Answer the same question stratified by urban and rural status.
###############################################################################
summary(recs15_Public$KWH)
# Step 1: calculate weighted electricity useage using NWEIGHT & BRRweight
recs15_Public_electri = cbind(
recs15_Public$DIVISION,
recs15_weight * recs15_Public$KWH
)
recs15_Public_electri_rural = cbind(
recs15_Public$DIVISION,
recs15_weight * recs15_Public$KWH *
(recs15_Public$UATYP10 == "R")
)
recs15_Public_electri_urban = cbind(
recs15_Public$DIVISION,
recs15_weight * recs15_Public$KWH *
(recs15_Public$UATYP10 == "U")
)
# Step 2: calculate theta_hat, theta_r: row number = division
# theta = average total electricity usage in kilowatt hours by division
recs15_Public_electri_avg_byDiv = c()
recs15_Public_electri_rural_avg_byDiv = c()
recs15_Public_electri_urban_avg_byDiv = c()
for (i in sort(unique(recs15_Public_electri[,1]))){
# !all status
recs15_Public_electri_avg_byDiv = rbind(
recs15_Public_electri_avg_byDiv,
# avg total electricity usage in division i
colSums(as.numeric(recs15_Public_electri[,1] == i) *
recs15_Public_electri[,2:98])/
colSums(as.numeric(recs15_Public_electri[,1] == i)*
recs15_weight))
# !rural status
recs15_Public_electri_rural_avg_byDiv = rbind(
recs15_Public_electri_rural_avg_byDiv,
# avg total electricity usage in division i
colSums(as.numeric(recs15_Public_electri_rural[,1] == i) *
recs15_Public_electri_rural[,2:98])/
colSums(as.numeric(recs15_Public_electri_rural[,1] == i) *
as.numeric(recs15_Public$UATYP10 == "R")*
recs15_weight))
# !urban status
recs15_Public_electri_urban_avg_byDiv = rbind(
recs15_Public_electri_urban_avg_byDiv,
# avg total electricity usage in division i
colSums(as.numeric(recs15_Public_electri_urban[,1] == i) *
recs15_Public_electri_urban[,2:98])/
colSums(as.numeric(recs15_Public_electri_urban[,1] == i) *
as.numeric(recs15_Public$UATYP10 == "U")*
recs15_weight))
}
# Step 3: generate summary table by division
recs15_Public_electr_theta_hat = recs15_Public_electri_avg_byDiv[,1]
recs15_Public_electr_theta_r = recs15_Public_electri_avg_byDiv[,-1]
recs15_Public_electr_rural_theta_hat =
recs15_Public_electri_rural_avg_byDiv[,1]
recs15_Public_electr_rural_theta_r =
recs15_Public_electri_rural_avg_byDiv[,-1]
recs15_Public_electr_urban_theta_hat =
recs15_Public_electri_urban_avg_byDiv[,1]
recs15_Public_electr_urban_theta_r =
recs15_Public_electri_urban_avg_byDiv[,-1]
recs15_Public_electr_summary =
table_BRR_95CI(recs15_Public_electr_theta_hat,
recs15_Public_electr_theta_r)
recs15_Public_electr_rural_summary =
table_BRR_95CI(recs15_Public_electr_rural_theta_hat,
recs15_Public_electr_rural_theta_r)
recs15_Public_electr_urban_summary =
table_BRR_95CI(recs15_Public_electr_urban_theta_hat,
recs15_Public_electr_urban_theta_r)
# cbind(recs15_Public_electr_summary[,1],
# recs15_Public_electr_rural_summary[,1],
# recs15_Public_electr_urban_summary[,1])
# step z: give plot
give_plot_Q3b_all =
ggplot(data = NULL,aes(x=label_DIVISION,
y=recs15_Public_electr_summary[,1],
fill = "pink")) +
geom_bar(position=position_dodge(), stat="identity") +
geom_errorbar(aes(ymin=recs15_Public_electr_summary[,2],
ymax=recs15_Public_electr_summary[,3]),
width=.2, # Width of the error bars
position=position_dodge(.9))+
ggtitle("Average total electricity usage in kilowatt hours (All)") +
xlab("Division") +
ylab("Average total electricity usage in kilowatt hours") +
scale_fill_hue(name="Supplement type", # Legend label
breaks=c("OJ", "VC"),
labels=c("Orange juice", "Ascorbic acid"))
give_plot_Q3b_rural =
ggplot(data = NULL,aes(x=label_DIVISION,
y=recs15_Public_electr_rural_summary[,1],
fill = "pink")) +
geom_bar(position=position_dodge(), stat="identity") +
geom_errorbar(aes(ymin=recs15_Public_electr_rural_summary[,2],
ymax=recs15_Public_electr_rural_summary[,3]),
width=.2, # Width of the error bars
position=position_dodge(.9))+
ggtitle("Average total electricity usage in kilowatt hours
in rural areas") +
xlab("Division") +
ylab("Average total electricity usage in kilowatt hours") +
scale_fill_hue(name="Supplement type", # Legend label, use darker colors
breaks=c("OJ", "VC"),
labels=c("Orange juice", "Ascorbic acid"))
give_plot_Q3b_urban =
ggplot(data = NULL,aes(x=label_DIVISION,
y=recs15_Public_electr_urban_summary[,1],
fill = "pink")) +
geom_bar(position=position_dodge(), stat="identity") +
geom_errorbar(aes(ymin=recs15_Public_electr_urban_summary[,2],
ymax=recs15_Public_electr_urban_summary[,3]),
width=.2, # Width of the error bars
position=position_dodge(.9))+
ggtitle("Average total electricity usage in kilowatt hours
in urban areas") +
xlab("Division") +
ylab("Average total electricity usage in kilowatt hours") +
scale_fill_hue(name="Supplement type", # Legend label, use darker colors
breaks=c("OJ", "VC"),
labels=c("Orange juice", "Ascorbic acid"))
###############################################################################
# Question 3 (c) --------------------------------------------------------------
# Which division has the largest disparity between urban and rural ares
# in terms of the proportion of homes with internet access.
###############################################################################
# Step 1: calculate percent of homes have internet access
# weighted by NWEIGHT & BRRweight
recs15_Public_INET = cbind(
recs15_Public$DIVISION,
recs15_Public$UATYP10,
recs15_weight / weight_sum * (as.numeric(recs15_Public$INTERNET == 1))
)
# Step 2: calculate grouped theta_hat, theta_r, row = division
# theta is the difference in % of homes with internet between urban and rural
# by division.
recs15_Public_INET_pct_byDiv = c()
for (i in unique(recs15_Public_INET[,1])){
recs15_Public_INET_pct_byDiv = rbind(
recs15_Public_INET_pct_byDiv,
# % for urban - % for rural
colSums(as.numeric(recs15_Public_INET[,1] == i &
recs15_Public_INET[,2] == 3 ) *
recs15_Public_INET[,3:99]) -
colSums(as.numeric(recs15_Public_INET[,1] == i &
recs15_Public_INET[,2] == 1 ) *
recs15_Public_INET[,3:99])
)
}
# Step 3: generate summary table by division
recs15_Public_INET_theta_hat = recs15_Public_INET_pct_byDiv[,1]
recs15_Public_INET_theta_r = recs15_Public_INET_pct_byDiv[,-1]
recs15_Public_INET_summary =
table_BRR_95CI(recs15_Public_INET_theta_hat,
recs15_Public_INET_theta_r)
# plot(recs15_Public_INET_summary)
# qplot(rownames(recs15_Public_INET_summary),
# (recs15_Public_INET_summary[,2]))
# Error bars represent standard error of the mean
give_plot_Q3c =
ggplot(data = NULL,aes(x=label_DIVISION,
y=recs15_Public_INET_summary[,1],
fill = "pink")) +
geom_bar(position=position_dodge(), stat="identity") +
geom_errorbar(aes(ymin=recs15_Public_INET_summary[,2],
ymax=recs15_Public_INET_summary[,3]),
width=.2, # Width of the error bars
position=position_dodge(.9))+
ggtitle("Disparity in proportion of homes with internet access
between urban and rural (by division)") +
xlab("Division") +
ylab("Disparity between urban and rural") +
scale_fill_hue(name="Supplement type", # Legend label, use darker colors
breaks=c("OJ", "VC"),
labels=c("Orange juice", "Ascorbic acid"))
|
aaa8cf96b6b83996c5371b6d0bec516f3c87b308 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/RPEXE.RPEXT/examples/umbrella.Rd.R | 5756400dc49388af4007aea53b959f56f3d77fa2 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 213 | r | umbrella.Rd.R | library(RPEXE.RPEXT)
### Name: umbrella
### Title: Umbrella alternative.
### Aliases: umbrella
### ** Examples
data(pava_dfrd)
t_d = pava_dfrd[,1]
t = pava_dfrd[,2]
d = pava_dfrd[,3]
umbrella(t_d, t, d, 2)
|
6398818ea063d865eb42b8b8f839c5cf312573ec | b1608f6a7651f92b83f6e1305b0febddae4fbfc3 | /R/people.R | 96e5111c44764ecf87cb4f8f8046ba25501a909a | [] | no_license | stephstammel/consultthat | 36c0b26eea92cdeabc9ca71a391444c688c50f5d | 9e57813a5d16868164eb7506351da25a8449e779 | refs/heads/master | 2020-03-10T17:48:41.300445 | 2018-05-24T02:11:52 | 2018-05-24T02:11:52 | 129,509,135 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 863 | r | people.R | #' people()
#'
#' Return the log of personnel for the current client as a data frame. If no
#' log of personnel exists, NULL will be returned and also congratulations you're either
#' really early in this project or have a great memory. Possibly both.
#' @param path, path where client directory is located.
#' @param client, client you wish to return personnel log for.
#' @return people, a data frame.
#' @export
#'
#' @examples
#'
#' people()
#'
people <- function(path, client){
requireNamespace("utils")
people_file <- paste(path, client, "personnel_log.csv", sep = "/")
if(!file.exists(people_file)){
people_log <- NULL
stop("No people exist in this project yet. That's a surprise ;)")
} else {
people_log <- utils::read.csv(people_file, stringsAsFactors = FALSE)
people_log <- as.data.frame(people_log)
}
return(people_log)
}
|
b0c695724500515523693b8014ff8e0aeb1cfc5d | bc504192da5aa37ccf2c40464942c3d5c56193d7 | /F567.s2020.ex16.R | ed9ef41fdc06590115e95e2f9109801a0329bbb7 | [] | no_license | danniecuiuc/frm | 6125daab1ddb6ceb7f50b6f1a9f99437238102ce | 12d632bad3283188378ee3f4623bc3b2382165c6 | refs/heads/master | 2022-10-19T13:55:07.987734 | 2020-06-08T14:58:32 | 2020-06-08T14:58:32 | 269,361,255 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 588 | r | F567.s2020.ex16.R | #Spring 2020 solution to Daily Exercise 16
library(MASS) #needed to simulate multivariate Normal rvs
# Question 1
V = rep(0,1000)
SD = rep(0,1000)
for(i in 1:1000){
returns = mvrnorm(21, mu = 0.00, Sigma = 0.03^2)
V[i] = sum(returns * returns)/21
SD[i] = sqrt(V[i])
}
meanSDQ1 = mean(SD)
stdSDQ1 = sd(SD)
# Question 2
V = rep(0,1000)
SD = rep(0,1000)
minutesigma = 0.03/sqrt(1440)
for(i in 1:1000){
returns = mvrnorm(21*1440, mu = 0.00, Sigma = minutesigma^2)
V[i] = sum(returns * returns)/21
SD[i] = sqrt(V[i])
}
meanSDQ2 = mean(SD)
stdSDQ2 = sd(SD) |
5cc8231ba9df191520d5b1dc85ce3def856f2bc6 | 5a4ac3a10eb6ea4e5dc6b0588ce3fa03bf3c175e | /Day017/code.R | cec79fe7aa876ddb3aa4d07268c74c82ff242a71 | [] | no_license | woons/project_woons | 9bda2dcf1afebe4c3daf9c20a15605dec9ddbae3 | 3958979aa22ddba7434289792b1544be3f884d95 | refs/heads/master | 2021-03-16T08:40:40.350667 | 2018-05-04T05:18:45 | 2018-05-04T05:18:45 | 90,750,693 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 671 | r | code.R | all <- c("충청남도", "충남", "충남", "충청남", "충청남도", "충청도")
wrong <- c("충청도", "충남", "충청남")
replacements <- c("충청도로 교체", "충남으로 교체", "충청남으로 교체")
#만약에 원래 데이터랑 오류 사전이랑 비교해서 오류 사전의 내용이 포함되어 있으면
#올바른 단어로 교체하세요
idx <- match(all, wrong)
idx #wrong에서 몇번째꺼랑 match 되는지 index번호를 알려준다
replacements[idx]
ifelse(is.na(idx), "충청남도", "충청남도") #1
ifelse(is.na(idx), all, replacements[idx] ) #1
ifelse(all != "충청남도", "충청남도", "충청남도") #2
|
4db5713e3981d5a6f5f7a13df89709ae3838f71f | 154553c5d637755a8aebb29b681480714ad4c819 | /R/my_csv_reader.R | 31ff88b41215c02205a61502f4056459244c221c | [] | no_license | amirbenmahjoub/Package_DM | fb42cfc1e66c57af177e1e400dad0c09a91ee914 | 3b9749e80894e57160ab1dced41c01312534958a | refs/heads/master | 2021-05-08T08:31:11.217927 | 2017-11-24T15:36:32 | 2017-11-24T15:36:32 | 107,041,020 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 405 | r | my_csv_reader.R |
#' import multiple csv files
#'
#' @param folder path to the folder
#'
#' @return a list
#' @export
#' @importFrom utils read.csv2
#' @import assertthat
#' @examples
#' \dontrun{
#'
#' my_csv_reader("../../path/to/folder")
#'
#' }
#'
my_csv_reader <- function(folder){
assert_that(is.dir(file))
temp <- list.files(path=folder, pattern = ".csv")
result <- lapply(temp, read.csv2)
return(result)
}
|
65afc5c073b13c301cba19d126645c5c6710ca3e | e6949144fd0304ab2cafaa4f62b90c406bcb4f0b | /tests/testthat/test-brms_tools.R | b53ec5b622849a0472fa766bf6416aaed8369273 | [
"Apache-2.0"
] | permissive | cas-bioinf/covid19retrospective | 12ca3bbeeaa9aeb2d38a88ea42026a39c45bf88e | cfdd018db6eef417771f9ee3f86209d536a372ba | refs/heads/master | 2023-07-15T23:02:44.219995 | 2021-09-07T07:21:28 | 2021-09-07T07:21:28 | 257,529,142 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,311 | r | test-brms_tools.R | test_that("normalize_stancode", {
expect_equal(
normalize_stancode("// a\nb;\n b + c = 4; // kde\ndata"),
normalize_stancode("// dasflkjldl\n // adsfadsfa\n b;\n\n \n \t\rb + c = 4;\ndata")
)
expect_equal(
normalize_stancode("data /* adfa */ {\nint a;\n /* asdddede \n asdfas \n asf */}\n"),
normalize_stancode("data {\nint a;\n} /* aa \n adfasdf \n asdfadsf ddd */\n")
)
expect_equal(
normalize_stancode("data \n {\nint a;\n\n } \t\n"),
normalize_stancode("data {\nint a;\n} \n")
)
expect_equal(
normalize_stancode("/* \n\n */\na*/"),
normalize_stancode("a*/")
)
expect_equal(
normalize_stancode("//adsfadf \ra // asdfasdf\r\n"),
normalize_stancode("a")
)
expect_equal(
normalize_stancode("/* * \n * \n * fg / */hhh"),
normalize_stancode("hhh")
)
expect_equal(
normalize_stancode("a //b"),
normalize_stancode("a")
)
expect_false(normalize_stancode("// a\ndata {\nint a;\n}\n") ==
normalize_stancode("// a\ndata {\nint b;\n}\n"))
#Should not remove single whitespace
expect_false(normalize_stancode("da ta") ==
normalize_stancode("data"))
#Should handle wrong nested comments
expect_false(normalize_stancode("/* \n\n */\na*/") ==
normalize_stancode("b*/"))
})
|
ab390b2efe3eee67291af8a60d31c55bb65141cb | 39431027afac67ebe79bf5058d7f54cb292a8bff | /rbasics/solution/step4.R | eaa9b1e775fdd9f588911243b0379c56167b28d9 | [] | no_license | shiyuangu/cloudera_hadoop | 6a4177e2f838f8c367b482a7c9bc61f2971b6942 | 09435d04ca30a7b3fb50c427b3d79b2827cf1893 | refs/heads/master | 2021-01-22T02:14:19.350499 | 2017-05-24T23:11:17 | 2017-05-24T23:11:17 | 92,344,239 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 95 | r | step4.R | # step 1
head(trees, 5)
# step 2
plot(trees$Girth, trees$Height)
# step 3
hist(trees$Height)
|
151b02efb5686ceb828128e351a2d99437d96aa4 | aece5f1e0c40de4bf0eea8a0f4e8914ac18702c4 | /R/ipc.R | df49a458aae1c1ffe05b93935601788c1f45c731 | [] | no_license | Johan-rosa/bcdata | 3ef6b1daa908b16fcd9e4031a4bac385ebf7aa20 | 2316b0fc795dd148db4321cf3744289f5454503f | refs/heads/main | 2023-03-15T11:43:04.193783 | 2022-06-11T11:51:21 | 2022-06-11T11:51:21 | 251,477,606 | 2 | 1 | null | 2023-03-11T13:10:57 | 2020-03-31T02:13:07 | R | UTF-8 | R | false | false | 9,926 | r | ipc.R | #' Descarga las series del IPC
#'
#' Descarga las series del IPC de la Republica Dominicana
#' con diferentes desagregaciones
#'
#' @param desagregacion string indicando la desagregacion deseada. opciones:
#' "general", "grupos", "regiones", "subyacente", "tnt" (transable y no transable),
#' "articulos"
#'
#' @return Un tibble con las series del ipc con la desagregacion deseada
#' @examples
#' get_ipc_data()
#' get_ipc_data(desagregacion = "grupos")
#' get_ipc_data(desagregacion = "regiones")
# Funcion para descargar data del IPC
get_ipc_data <- function(desagregacion = "general"){
# Asignando el pipe para usarlo sin cargar dplyr
`%>%` <- magrittr::`%>%`
if(desagregacion == "general") {
# Descarga el ipc general ---------------------------
url_descarga <- paste0("https://cdn.bancentral.gov.do/documents/",
"estadisticas/precios/documents/",
"ipc_base_2019-2020.xls")
# directorio de descarga
file_path <- tempfile(pattern = "", fileext = ".xls")
# descarga el archivo
download.file(url_descarga, file_path, mode = "wb", quiet = TRUE)
suppressMessages(
# leer el archivo
ipc_general <- readxl::read_excel(
file_path,
sheet = 1,
col_names = FALSE,
skip = 7)
)
# Adecuando el archivo
ipc_general <- ipc_general %>%
janitor::clean_names() %>%
dplyr::select(x1:x7) %>%
setNames(
c("year", "mes", "ipc","ipc_vm", "ipc_vd", "ipc_vi", "ipc_p12")
) %>%
dplyr::filter(!is.na(mes)) %>%
dplyr::mutate(
fecha = seq(lubridate::ymd("1984/01/01"),
by = "month",
length.out = nrow(.)),
year = lubridate::year(fecha)
) %>%
dplyr::select(fecha, year, mes, everything())
return(ipc_general)
} else if(desagregacion == 'grupos') {
# IPC por grupo de bienes y servicios ------------------------------------------
url_descarga <- paste0(
"https://cdn.bancentral.gov.do/documents/estadisticas/",
"precios/documents/ipc_grupos_base_2019-2020.xls"
)
# directorio de descarga
file_path <- tempfile(pattern = "", fileext = ".xls")
# descarga el archivo
download.file(url_descarga, file_path, mode = "wb", quiet = TRUE)
# header del dataframe
header_ipc_grupos <- c(
"fecha", "ipc_ayb", "ipc_ayb_vm", "ipc_alcohol_tabaco",
"ipc_alcohol_tabaco_vm", "ipc_ropa_calzado", "ipc_ropa_calzado_vm",
"ipc_vivienda", "ipc_vivienda_vm",
"ipc_muebles", "ipc_muebles_vm", "ipc_salud", "ipc_salud_vm",
"ipc_transporte", "ipc_transporte_vm", "ipc_comunicaciones",
"ipc_comunicaciones_vm", "ipc_cultura", "ipc_cultura_vm", "ipc_educacion",
"ipc_educacion_vm", "ipc_hotel_restaurantes", "ipc_hotel_restaurantes_vm",
"ipc_bines_servicios", "ipc_bienes_servicios_vm"
)
suppressMessages(
# Importar archivos
ipc_grupos <- readxl::read_excel(
file_path,
skip = 10,
col_names = F,
na = "-"
))
# adecuando el archivo
ipc_grupos <-
ipc_grupos %>%
janitor::clean_names() %>%
dplyr::select(x1:x25) %>%
setNames(header_ipc_grupos) %>%
dplyr::filter(!is.na(ipc_ayb)) %>%
dplyr::mutate(
fecha = seq(lubridate::ymd('1999/01/01'),
by = "month",
length.out = nrow(.)),
year = lubridate::year(fecha),
mes = crear_mes(mes = lubridate::month(fecha), type = "number_to_text")) %>%
dplyr::select(fecha, year, mes, everything())
return(ipc_grupos)
} else if(desagregacion == 'regiones') {
# IPC por regiones ---------------------------------------
# Header ipc por regiones
header_ipc_regiones <- c(
"year", "mes", "ipc_ozama", "ipc_ozama_vm", "ipc_cibao",
"ipc_cibao_vm", "ipc_este", "ipc_este_vm", "ipc_sur",
"ipc_sur_vm")
# url de descarga
url_descarga <- paste0(
"https://cdn.bancentral.gov.do/",
"documents/estadisticas/precios/documents/",
"ipc_regiones_base_2019-2020.xls"
)
# ruta del archivo
file_path <- tempfile(pattern = "", fileext = ".xls")
# descarga el archivo
download.file(url_descarga, file_path, mode = "wb", quiet = TRUE)
suppressMessages(
# importar files con ipc por regiones
ipc_region <- readxl::read_excel(
file_path,
skip = 7,
col_names = F
))
# adecuando el archivo
ipc_region <-
ipc_region %>%
purrr::set_names(header_ipc_regiones) %>%
dplyr::filter(!is.na(mes)) %>%
dplyr::mutate(
fecha = seq(lubridate::ymd('2011/01/01'),
by = "month",
length.out = nrow(.)),
year = lubridate::year(fecha),
mes = crear_mes(mes = lubridate::month(fecha), type = "number_to_text")) %>%
dplyr::select(fecha, year, mes, everything())
return(ipc_region)
} else if(desagregacion == "subyacente") {
# IPC según subyacente o no subyacente -----------------------
#Header ipc subyacente o no subyacente
header_ipc_subyacente <- c(
"year", "mes", "ipc_subyacente", "ipc_subyacente_vm",
"ipc_subyacente_vd", "ipc_subyacente_vi"
)
# url de descarga
url_descarga <- paste0(
"https://cdn.bancentral.gov.do/documents/",
"estadisticas/precios/documents/",
"ipc_subyacente_base_2019-2020.xlsx"
)
# ruta del archivo
file_path <- tempfile(pattern = "", fileext = ".xlsx")
# descarga el archivo
download.file(url_descarga, file_path, mode = "wb", quiet = TRUE)
suppressMessages(
# importar el archivo
ipc_subyacente <- readxl::read_excel(
file_path,
skip = 25,
col_names = F
))
# adecuar el objeto
ipc_subyacente <-
ipc_subyacente %>%
janitor::clean_names() %>%
dplyr::select(x1:x6) %>%
setNames(header_ipc_subyacente) %>%
dplyr::mutate(
fecha = seq(lubridate::ymd('2000/01/01'),
by = "month",
length.out = nrow(.)),
year = lubridate::year(fecha),
mes = crear_mes(mes = lubridate::month(fecha), type = "number_to_text")) %>%
dplyr::select(fecha, year, mes, everything()) %>%
dplyr::filter(!is.na(ipc_subyacente))
return(ipc_subyacente)
} else if(desagregacion == "tnt") {
# IPC de bienes transables y no transable -------------------------------
# Header del data frame
header_ipc_tnt <- c(
"year", "mes", "ipc", "ipc_vm", "ipc_vd",
"ipc_t", "ipc_t_vm", "ipc_t_vd", "ipc_nt",
"ipc_nt_vm", "ipc_nt_vd"
)
# url de descarga de
url_descarga <- paste0(
"https://cdn.bancentral.gov.do/",
"documents/estadisticas/precios/",
"documents/ipc_tnt_base_2019-2020.xls"
)
# ruta del archivo
file_path <- tempfile(pattern = "", fileext = ".xls")
# descarga el archivo
download.file(url_descarga, file_path, mode = "wb", quiet = TRUE)
suppressMessages(
# importar archivo
ipc_tnt <- readxl::read_excel(
file_path,
skip = 27,
col_names = F,
na = "-"
)
)
# Adecuando el objeto
ipc_tnt <- ipc_tnt %>%
janitor::clean_names() %>%
setNames(header_ipc_tnt) %>%
dplyr::filter(!is.na(mes)) %>%
dplyr::mutate(
fecha = seq(lubridate::ymd('1999/02/01'),
by = "month",
length.out = nrow(.)),
year = lubridate::year(fecha),
mes = crear_mes(mes = lubridate::month(fecha), type = "number_to_text")) %>%
dplyr::select(fecha, year, mes, everything())
return(ipc_tnt)
} else if(desagregacion == "articulos"){
#articulos_detalle <- read_rds("data/articulos_detalles.rds")
url <- "https://cdn.bancentral.gov.do/documents/estadisticas/precios/documents/ipc_articulos_base_2019-2020.xlsx"
temp_path <- tempfile(fileext = ".xlsx")
download.file(url, temp_path, mode = "wb", quiet = TRUE)
sheets <- stringr::str_subset(readxl::excel_sheets(temp_path), "METADATOS", negate = TRUE)
ipc_articulos_long <- purrr::map(
sheets,
~suppressMessages(readxl::read_excel(temp_path, sheet = .x, skip = 4)) %>%
janitor::remove_empty(which = "cols") %>%
janitor::clean_names() %>%
dplyr::rename(nombre = x1, ponderador = x2) %>%
dplyr::bind_cols(dplyr::select(dplyr::ungroup(articulos_detalle), division)) %>%
tidyr::pivot_longer(cols = -c(nombre, ponderador, division),
names_to = "mes", values_to = "indice")
) %>%
setNames(readr::parse_number(sheets)) %>%
dplyr::bind_rows(.id = "year")
return(ipc_articulos_long)
}
}
|
0f96696e1960601e9fa1f2b2a3c0dc135d956f56 | 888b76fabb8b490b2f92daa1e53b84940eaf83eb | /analysis/inundation_wooddensity_predicts_elevation/fsen_fden.R | 27811726854dcbbdb0d53c41a4535db6ff8de57b | [] | no_license | jmargrove/ForestFloodingSensitivityAnalysis | ba398074537f23c76264bb9ebffaf7390895866c | 43eaa322207aa8d8433ce42d359e72909406d1a5 | refs/heads/master | 2021-11-22T03:22:30.804283 | 2018-10-31T15:28:11 | 2018-10-31T15:28:11 | 120,295,376 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,575 | r | fsen_fden.R | ################################################################################
# title: flooding sensitivity response predicting species distributions
# author: James Margrove
# Clear work space
rm(list = ls())
# Import packages
require(ggplot2)
source('./functions/booter.R')
# Import data
rr_data <- read.table("./data/riskratio_data.txt", header = TRUE)
dden_data <- read.table("./data/dden_adult_new.txt", header = TRUE)
rr_data$pe <- read.table("./data/pelev_data.txt", header = TRUE)$pe
dden_data <- dden_data[order(dden_data$sp), ]
rr_data$dden <- dden_data$dden_adult
rr_data$fden <- cut(rr_data$dden,
breaks = c(min(rr_data$dden)-.1,
mean(rr_data$dden),
max(rr_data$dden)),
labels = c("low","high"))
rr_data
# Exploration
p1 <- ggplot(rr_data, aes(x = fden, y = (rr))) +
geom_point() +
stat_smooth(method = lm, color = '#000000', size = 0.5) +
ylab("log(water inundation sensitivity)") +
xlab("log(adult wood density)") +
theme_classic()
p1
# Model the data
rr_data$fden <- relevel(rr_data$fden, ref = "high")
?relevel
model <- lm(rr ~ fden, data = rr_data)
summary(model)
par(mfrow=c(2,2))
plot(model)
rr_data$resid <- residuals(model)
with(rr_data, tapply(resid, fden, var))*100
# The low wood density groups residuals had an order of magnitude greater variance than
# the high wood density group. So we are going to log transform the data.
model2 <- update(model, -rr + log(rr) ~ .)
summary(model2)
plot(model2)
rr_data$resid2 <- residuals(model2)
with(rr_data, tapply(resid2, fden, var))*100
# Evaluation
preds <- data.frame(fden = c("low", "high"))
# Prediction
preds$rr <- predict(model2,
newdata = preds,
type = "response")
preds$CI <- predict(model2,
newdata = preds,
type = "response",
se.fit = TRUE)$se.fit
# Plot the graph
p1 <- ggplot(preds, aes(x = fden, y = exp(rr))) +
geom_point() +
geom_errorbar(aes(ymin = exp(rr - CI * 1.96),
ymax = exp(rr + CI * 1.96)),
width = 0.2,
alpha = 0.22) +
ylab("water inundation sensitivity") +
xlab(bquote("Wood density")) +
theme_classic() +
theme(legend.position = c(0.2, 0.85))
p1
# Boot strap for coef CIs
coef_CI <- booter(model = model2,
data = rr_data,
preds = preds,
coef = TRUE,
n = 5000)
coef_CI
# saving the plot
ggsave(p1, file = "./graphs/fsen_vs_fden.png",
width = 4,
height = 4)
### the var in low wood density group
summary(lm(rr ~ pe, subset = fden == "low", rr_data))
with(rr_data[rr_data$fden == "low",], cor(rr, pe))
with(rr_data[rr_data$fden == "high",], cor(rr, pe))
model4 <- (lm(pe ~ rr + dden, rr_data))
save(model4, file = "./models/pe_rr_dden_NoAbundance")
summary(model4)
booter(model4, rr_data, coef = T, n = 50)
### does the trait of root:shoot ratio explain any of this
traits <- read.table("./data/traits.txt", header = TRUE)
dt <- merge(rr_data, traits, by = "sp")
ggplot(dt, aes(x= slm, y = rr)) + geom_point()
# ther are 9 species that overlap
m1 <- (lm(rr ~ dden.x + slm, data = dt))
anova(m1)
car::vif(m1)
ggplot(dt, aes(x = dden.y, y = rr)) + geom_point()
|
8f82e992e8b29f90f666ac489e5b1cb12221245e | 21eada57e383078a1af917e9a3a31e049feb2c76 | /Shiny-App/R-shiny-ML/ui.R | 5cb18ef3fd52403da18f46586df2a8a7c969876b | [] | no_license | PHP-2560/final-project-mlhelper | a8252b5ce29d36485f40e285e8f7c65e2b5f7111 | f3898344dbeff0d4deeb2d9cdc88805a97ad6827 | refs/heads/master | 2020-04-11T05:26:43.883618 | 2018-12-16T18:24:33 | 2018-12-16T18:24:33 | 161,549,792 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,961 | r | ui.R | library(shiny)
library(htmltools)
shinyUI(
pageWithSidebar(
# Header:
headerPanel("Machine Learning Helper"),
# Input in sidepanel:
sidebarPanel(
## useless
tags$head(
tags$style(type="text/css", "select { max-width: 200px; }"),
tags$style(type="text/css", ".span4 { max-width:200px; }"),
tags$style(type="text/css", ".well { max-width: 300px; }")
),
# Upload data:
fileInput("file", "Upload csv data-file:"),
#Using action buttoms to load sample dataset
#change the color of the buttom to contrast with previous blank
actionButton("myLoader", "Load test dataset",
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
#add block between each part
hr(),
# Variable selection:
#Independent Numeric variable selection:
#htmlOutput("varselect_num"),
#Independent Categorical variable selection:
#htmlOutput("varselect_cat"),
#Dependent variable selection:
htmlOutput("response"),
#Because next part is the download file part, so we add a line to block between variable selection and
#file download
hr(),
width=3),
# Main:
mainPanel(
titlePanel("Choose an Algorithmn"),
tags$head(
tags$style(type='text/css',
".nav-tabs {font-size: 14px} ")),
tabsetPanel(type = "tabs",
tabPanel("LogTransformation", br(),
sliderInput("fp_set",label = "Adjusted Fisher-Pearson Coefficient of Skewness",0,1,0.5),
p(em("A number,usually between 0 and 1, it the critical value of adjusted Fisher-Pearson coefficient of skewness, when the adjusted Fisher-Pearson coefficient of skewness of the data response is larger than this value, we think the distribution of response is skewed and will do the log-transformation")),
br(),
plotOutput("LogTransformation", width = "100%", height = "580px")
),
tabPanel("PCA", br(),
sliderInput("pov_set", label = "Critical value of proportion",0,1,0.5),
p(em("Critical value of proportion of variance that Principal Components must have, this value will decide how many principle components we will have after PCA, its value need to be in [0,1], largert it is, more principle components we will have.")),
br(),
verbatimTextOutput("pca_matrix")),
tabPanel("Ridge", plotOutput("mse_ridge", height = "580px"),br(),
p("The smallest MSE that Ridge could achieve was: "),
verbatimTextOutput("ridge_mse_num")),
tabPanel("LASSO", plotOutput("mse_lasso", height = "580px"),br(),
p("The smallest MSE that LASSO could achieve was: "),
verbatimTextOutput("lasso_mse_num")),
tabPanel("Elastic Net", plotOutput("mse_elnet", height = "580px"),br(),
p("The smallest MSE that Elastic Net could achieve was: "),
verbatimTextOutput("elnet_mse_num")),
tabPanel("Bagging", plotOutput("bag_mse", height = "580px"),br(),
p("The smallest MSE that Bagging could achieve was: "),
verbatimTextOutput("bag_mse_num")),
tabPanel("Random Forest", plotOutput("rf_mse", height = "580px"),br(),
p("The smallest MSE that Random Forest could achieve was: "),
verbatimTextOutput("rf_mse_num")),
tabPanel("Help", htmlOutput("inc"))
)
)
))
|
bdf506664af020420771e75173eb3bbb3961fb7d | d38b8fdda7bfdaf76621e20e60a381108c48f9e1 | /R/sloopoint.R | c1420cdfca90c9bf45431d819e212bbcf6019e52 | [
"HPND"
] | permissive | timcdlucas/INLAutils | e4425389d3003a9639a6e4f1e573c07717715c01 | 74d769a7279e55256d06f94e97344d702bbb8b1b | refs/heads/master | 2022-12-04T08:24:51.277408 | 2022-11-23T11:37:26 | 2022-11-23T11:37:26 | 61,026,905 | 25 | 10 | NOASSERTION | 2020-06-26T08:27:33 | 2016-06-13T10:19:39 | R | UTF-8 | R | false | false | 1,589 | r | sloopoint.R | #' A function to plot the left-out points during the spatial leave-one-out cross-validation in R using INLA.
#'
#' This function plots the left-out point(s) of a spatial leave-one-out cross-validation (SLOO-CV) of one or several models running on INLA.
#' @param points = cbind of longitude and latitude of the observed points (full sample)
#' @param test = cbind of longitude and latitude of the test point(s)
#' @param rad = Numeric value giving the radius for the spatial buffer around left-out point's location
#'
#' @name sloopoint
#'
#' @examples
#' \dontrun{
#' # sloopoint function
#' dataframe<-data.frame(long=runif(100, -40.0, 40.0),lat=runif(100, -40.0, 40.0))
#' test<-dataframe[1:10,]
#' rad = 1
#'
#' # run the function
#' sloopoint(points = cbind(dataframe$long, dataframe$lat), test = cbind(test$long, test$lat),
#' rad = rad)
#' }
sloopoint <- function(points = points, test = test, rad = rad) {
pchname <- as.character(seq(1:nrow(test)))
plot(points, pch = 3, xlab = "longitude", ylab = "latitude")
points(test, pch = 19, col = "red", cex = 3.5, bg = "black")
graphics::text(test, labels = pchname, cex = 0.75, col = "white")
graphics::symbols(x = test[, 1], y = test[, 2], circles = rep(rad, length(test[, 1])), fg = "red", add = TRUE, inches = FALSE)
graphics::legend("topright", legend = c("Observed locations", paste0("Predicted location(s) and removed disc(s)\nwith iteration number at centroid:{1,...,",
length(test[, 1]), "}")), pch = c(3, 19), col = c("black", "red"), cex = 0.75, text.col = c("black", "red"))
}
|
e84dbaf8627d23639b949a0fb890c8be82642f92 | 132e56789f866a0d1fad6eb8839982b0822c8020 | /R/dostats.R | d3cd1e7a81407fb94c7a665654f988070fb2de5e | [] | no_license | halpo/dostats | beeac5e74790c7d78095293f24a3adc232954d08 | afd933128376cc1fb6a565ceba6967113586884b | refs/heads/master | 2022-06-09T03:21:33.485624 | 2022-05-10T19:07:50 | 2022-05-10T19:07:50 | 3,388,144 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,793 | r | dostats.R | {###############################################################################
# dostats.R
# This file is part of the R package dostats
#
# Copyright 2012 Andrew Redd
# Date: 5/30/2012
#
# DESCRIPTION
# ===========
# dostats is a helper function for computing descriptive tables.
#
# LICENSE
# ========
# dostats is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# dostats is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# dostats. If not, see http://www.gnu.org/licenses/.
#
}###############################################################################
#' @importFrom utils head tail
#' @importFrom stats anova na.fail
NULL
#' Convenient interface for computing statistics on a vector
#'
#' @param x the vector
#' @param ... statistics to compute, must take a vector and return a vector
#' @param .na.action the action to take on NA values, for all statistics
#'
#' @return A one row \code{data.frame} with columns named as in \code{...}
#' @export
#' @seealso \code{\link[plyr]{ldply}}
#' @keywords utilities misc
#' @example inst/ex_dostats.R
dostats <- function(x, ..., .na.action=na.fail){
if(any(is.na(x)))
x <- .na.action(x)
funs <- list(...)
fnames <- names(funs)
inames <- as.character(substitute(c(...)))[-1]
fnames <- if(is.null(fnames)) inames else ifelse(fnames != "", fnames, inames)
l <- structure(lapply(funs, do.call, list(x)), names=fnames)
l <- lapply(l, function(y)if(length(y)==1) y else t(y))
do.call(data.frame, l)
}
#' Filter by class
#' @param x vector of any class
#' @param .class string for class to filter by
#' @param ... passed to \code{\link{dostats}}
#' @return data frame of computed statistics if x is of class \code{.class}
#' otherwise returns \code{NULL}.
#' @export
#' @seealso \code{\link{dostats}}
class.stats <- function(.class){
if(!inherits(.class, "character"))
.class=as.character(substitute(.class))
function(x, ...){if(inherits(x, .class))
dostats(x, ...)
else NULL
}
}
#' @describeIn class.stats Numeric class statistics
#' @export
numeric.stats <- class.stats(numeric)
#' @describeIn class.stats Factor class statistics
#' @export
factor.stats <- class.stats(factor)
#' @describeIn class.stats Integer class statistics
#' @export
integer.stats <- class.stats(integer)
|
ffd5c9fd838f75d74f8878294460421dcf1ac74d | 3c883c8e8f1aad9cfbaeff60998ec9b0df2b7ba0 | /inst/tests/esAnnotate_correct_classes.R | c4d3010acfa07514f658ea52b9eaa94534876911 | [] | no_license | genomelab/esFunctions | ec14979d03247120d54972f9b2b9213a5cbcc3cc | e721a3859ce29abdd50c930b213eebe503e6ad26 | refs/heads/master | 2023-05-13T11:23:07.544888 | 2023-05-01T04:51:05 | 2023-05-01T04:51:05 | 17,329,057 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 578 | r | esAnnotate_correct_classes.R | #this is a test for esAnnotate
setwd("/Users/anthonycolombo60/Dropbox/TARGET PROJECT/R Package/esFunctions/inst/tests")
library("testthat")
#now to test esAnnotate
test_that("Correct Class", {
Antd_eset<-esAnnotate(eset,anno) #these objects are generated from eset.R data
expect_that(Antd_eset, is_a("ExpressionSet"))
expect_that(anno, is_a("data.frame"))
expect_that(eset, is_a("ExpressionSet"))
expect_that(rownames(anno), is_a("character"))
expect_that(featureNames(eset), is_a("character"))
})
#all the class objects work, how else to test this function?
|
7ed96855017bf3e1d32dc3ad522d4b83f44dcddd | bb0fb51530335b10a2e64557fb6c950be61b7968 | /Rscripts/MutationFreqSum.R | f1a5597078f6df7de90cf2467ac06f74daee4b87 | [] | no_license | kahot/HCV_project | bd0450d07c84906b13b3cf6b442de68cdc7f3e44 | 0047c945f9f1522ebbda2b1cb3adf7742ce01a9e | refs/heads/master | 2022-03-24T06:01:24.189668 | 2019-12-16T17:13:03 | 2019-12-16T17:13:03 | 187,297,639 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,645 | r | MutationFreqSum.R | ## prepare summary data for mutation frequenceis across all samples
library(plotrix)
source("Rscripts/CreateEmptyData.R")
CreateEmptyData()
for (i in 1:length(Overview_summary)){
dat<-Overview_summary[[i]]
filename<-names(Overview_summary)[i]
for (typeofsite in c("syn", "nonsyn")){
for (wtnt in c("a", "t", "c", "g")){
mutrate<- dat$freq.Ts[dat$Type==typeofsite & dat$MajNt==wtnt]
m[typeofsite,wtnt]<-mean(mutrate[!is.na(mutrate)])
se[typeofsite,wtnt]<-std.error(mutrate[!is.na(mutrate)])
m_NonCpG<-dat$freq.Ts[dat$Type==typeofsite & dat$MajNt==wtnt & dat$makesCpG==0]
m_nonCpG[typeofsite,wtnt]<-mean(m_NonCpG[!is.na(m_NonCpG)])
se_nonCpG[typeofsite,wtnt]<-std.error(m_NonCpG[!is.na(m_NonCpG)])
mu_CpG<-dat$freq.Ts[dat$Type==typeofsite & dat$MajNt==wtnt & dat$makesCpG==1]
m_CpG[typeofsite,wtnt]<-mean(mu_CpG[!is.na(mu_CpG)])
se_CpG[typeofsite,wtnt]<-std.error(mu_CpG[!is.na(mu_CpG)])
#TV1
mutrate1<-dat$freq.transv1[dat$Type.tv1==typeofsite & dat$MajNt==wtnt]
m1[typeofsite,wtnt]<-mean(mutrate1[!is.na(mutrate1)])
se1[typeofsite,wtnt]<-std.error(mutrate1[!is.na(mutrate1)])
m1_NonCpG<-dat$freq.transv1[dat$Type.tv1==typeofsite & dat$MajNt==wtnt & dat$makesCpG.tv1==0]
m1_nonCpG[typeofsite,wtnt]<-mean(m1_NonCpG[!is.na(m1_NonCpG)])
se1_nonCpG[typeofsite,wtnt]<-std.error(m1_NonCpG[!is.na(m1_NonCpG)])
mu1_CpG<-dat$freq.transv1[dat$Type.tv1==typeofsite & dat$MajNt==wtnt & dat$makesCpG.tv1==1]
m1_CpG[typeofsite,wtnt]<-mean(mu1_CpG[!is.na(mu1_CpG)])
se1_CpG[typeofsite,wtnt]<-std.error(mu1_CpG[!is.na(mu1_CpG)])
#tv2
mutrate2<-dat$freq.transv2[dat$Type.tv2==typeofsite & dat$MajNt==wtnt]
m2[typeofsite,wtnt]<-mean(mutrate2[!is.na(mutrate2)])
se2[typeofsite,wtnt]<-std.error(mutrate2[!is.na(mutrate2)])
m2_NonCpG<-dat$freq.transv2[dat$Type.tv2==typeofsite & dat$MajNt==wtnt & dat$makesCpG.tv2==0]
m2_nonCpG[typeofsite,wtnt]<-mean(m2_NonCpG[!is.na(m2_NonCpG)])
se2_nonCpG[typeofsite,wtnt]<-std.error(m2_NonCpG[!is.na(m2_NonCpG)])
mu2_CpG<-dat$freq.transv2[dat$Type.tv2==typeofsite & dat$MajNt==wtnt & dat$makesCpG.tv2==1]
m2_CpG[typeofsite,wtnt]<-mean(mu2_CpG[!is.na(mu2_CpG)])
se2_CpG[typeofsite,wtnt]<-std.error(mu2_CpG[!is.na(mu2_CpG)])
vectorname<<-paste0(typeofsite,"_",wtnt)
assign(vectorname, mutrate)
vectorname.tv1<-paste0(typeofsite,"_",wtnt,"_tv1")
vectorname.tv2<-paste0(typeofsite,"_",wtnt,"_tv2")
assign (vectorname.tv1, mutrate1)
assign (vectorname.tv2, mutrate2)
vname1<<-paste0(typeofsite,"_",wtnt,"_noncpg")
assign(vname1, m_NonCpG)
vectorname.tv1<-paste0(typeofsite,"_",wtnt,"_tv1_noncpg")
vectorname.tv2<-paste0(typeofsite,"_",wtnt,"_tv2_noncpg")
assign (vectorname.tv1, m1_NonCpG)
assign (vectorname.tv2, m2_NonCpG)
vname2<<-paste0(typeofsite,"_",wtnt,"_cpg")
assign(vname2, mu_CpG)
vectorname.tv1<-paste0(typeofsite,"_",wtnt,"_tv1_cpg")
vectorname.tv2<-paste0(typeofsite,"_",wtnt,"_tv2_cpg")
assign (vectorname.tv1, mu1_CpG)
assign (vectorname.tv2, mu2_CpG)
}
}
A_syn<<-c(A_syn,syn_a)
T_syn<<-c(T_syn,syn_t)
C_syn<<-c(C_syn,syn_c)
G_syn<<-c(G_syn,syn_g)
A_nonsyn<<-c(A_nonsyn,nonsyn_a)
T_nonsyn<<-c(T_nonsyn,nonsyn_t)
C_nonsyn<<-c(C_nonsyn,nonsyn_c)
G_nonsyn<<-c(G_nonsyn,nonsyn_g)
A_syn_noncpg<<-c(A_syn_noncpg,syn_a_noncpg)
T_syn_noncpg<<-c(T_syn_noncpg,syn_t_noncpg)
C_syn_noncpg<<-c(C_syn_noncpg,syn_c_noncpg)
G_syn_noncpg<<-c(G_syn_noncpg,syn_g_noncpg)
A_nonsyn_noncpg<<-c(A_nonsyn_noncpg,nonsyn_a_noncpg)
T_nonsyn_noncpg<<-c(T_nonsyn_noncpg,nonsyn_t_noncpg)
C_nonsyn_noncpg<<-c(C_nonsyn_noncpg,nonsyn_c_noncpg)
G_nonsyn_noncpg<<-c(G_nonsyn_noncpg,nonsyn_g_noncpg)
A_syn_noncpg<<-c(A_syn_noncpg,syn_a_noncpg)
T_syn_noncpg<<-c(T_syn_noncpg,syn_t_noncpg)
C_syn_noncpg<<-c(C_syn_noncpg,syn_c_noncpg)
G_syn_noncpg<<-c(G_syn_noncpg,syn_g_noncpg)
A_nonsyn_noncpg<<-c(A_nonsyn_noncpg,nonsyn_a_noncpg)
T_nonsyn_noncpg<<-c(T_nonsyn_noncpg,nonsyn_t_noncpg)
C_nonsyn_noncpg<<-c(C_nonsyn_noncpg,nonsyn_c_noncpg)
G_nonsyn_noncpg<<-c(G_nonsyn_noncpg,nonsyn_g_noncpg)
A_tv1_syn_cpg<<-c(A_tv1_syn_cpg,syn_a_tv1_cpg)
A_tv1_syn_noncpg<<-c(A_tv1_syn_noncpg,syn_a_tv1_noncpg)
A_tv1_nonsyn_cpg<<-c(A_tv1_nonsyn_cpg,nonsyn_a_tv1_cpg)
A_tv1_nonsyn_noncpg<<-c(A_tv1_nonsyn_noncpg,nonsyn_a_tv1_noncpg)
C_tv1_syn_noncpg<<-c(C_tv1_syn_noncpg,syn_c_tv1_noncpg)
C_tv1_nonsyn_noncpg<<-c(C_tv1_nonsyn_noncpg,nonsyn_c_tv1_noncpg)
G_tv1_syn_cpg<<-c(G_tv1_syn_cpg,syn_g_tv1_cpg)
G_tv1_syn_noncpg<<-c(G_tv1_syn_noncpg,syn_g_tv1_noncpg)
G_tv1_nonsyn_cpg<<-c(G_tv1_nonsyn_cpg,nonsyn_g_tv1_cpg)
G_tv1_nonsyn_noncpg<<-c(G_tv1_nonsyn_noncpg,nonsyn_g_tv1_noncpg)
T_tv1_syn_noncpg<<-c(T_tv1_syn_noncpg,syn_t_tv1_noncpg)
T_tv1_nonsyn_noncpg<<-c(T_tv1_nonsyn_noncpg,nonsyn_t_tv1_noncpg)
A_tv2_syn<<-c(A_tv2_syn,syn_a_tv2)
T_tv2_syn<<-c(T_tv2_syn,syn_t_tv2)
C_tv2_syn<<-c(C_tv2_syn,syn_c_tv2)
G_tv2_syn<<-c(G_tv2_syn,syn_g_tv2)
A_tv2_nonsyn<<-c(A_tv2_nonsyn,nonsyn_a_tv2)
T_tv2_nonsyn<<-c(T_tv2_nonsyn,nonsyn_t_tv2)
C_tv2_nonsyn<<-c(C_tv2_nonsyn,nonsyn_c_tv2)
G_tv2_nonsyn<<-c(G_tv2_nonsyn,nonsyn_g_tv2)
A_tv2_syn_noncpg<<-c(A_tv2_syn_noncpg,syn_a_tv2_noncpg)
A_tv2_nonsyn_noncpg<<-c(A_tv2_nonsyn_noncpg,nonsyn_a_tv2_noncpg)
C_tv2_syn_cpg<<-c(C_tv2_syn_cpg,syn_c_tv2_cpg)
C_tv2_syn_noncpg<<-c(C_tv2_syn_noncpg,syn_c_tv2_noncpg)
C_tv2_nonsyn_cpg<<-c(C_tv2_nonsyn_cpg,nonsyn_c_tv2_cpg)
C_tv2_nonsyn_noncpg<<-c(C_tv2_nonsyn_noncpg,nonsyn_c_tv2_noncpg)
G_tv2_syn_noncpg<<-c(G_tv2_syn_noncpg,syn_g_tv2_noncpg)
G_tv2_nonsyn_noncpg<<-c(G_tv2_nonsyn_noncpg,nonsyn_g_tv2_noncpg)
T_tv2_syn_cpg<<-c(T_tv2_syn_cpg,syn_t_tv2_cpg)
T_tv2_syn_noncpg<<-c(T_tv2_syn_noncpg,syn_t_tv2_noncpg)
T_tv2_nonsyn_cpg<<-c(T_tv2_nonsyn_cpg,nonsyn_t_tv2_cpg)
T_tv2_nonsyn_noncpg<<-c(T_tv2_nonsyn_noncpg,nonsyn_t_tv2_noncpg)
mut[[i]]<-m
mut.CpG[[i]]<-m_CpG
mut.nonCpG[[i]]<-m_nonCpG
names(mut)[i]<-filename
names(mut.CpG)[i]<-filename
names(mut.nonCpG)[i]<-filename
mut1[[i]]<-m1
mut1.CpG[[i]]<-m1_CpG
mut1.nonCpG[[i]]<-m1_nonCpG
names(mut1)[i]<-filename
names(mut1.CpG)[i]<-filename
names(mut1.nonCpG)[i]<-filename
mut2[[i]]<-m2
mut2.CpG[[i]]<-m2_CpG
mut2.nonCpG[[i]]<-m2_nonCpG
names(mut2)[i]<-filename
names(mut2.CpG)[i]<-filename
names(mut2.nonCpG)[i]<-filename
SE[[i]]<-se
SE.CpG[[i]]<-se_CpG
SE.nonCpG[[i]]<-se_nonCpG
names(SE)[i]<-paste0(filename,".se")
names(SE.CpG)[i]<-paste0(filename,".se")
names(SE.nonCpG)[i]<-paste0(filename,".se")
SE1[[i]]<-se1
SE1.CpG[[i]]<-se1_CpG
SE1.nonCpG[[i]]<-se1_nonCpG
names(SE1)[i]<-paste0(filename,".se")
names(SE1.CpG)[i]<-paste0(filename,".se")
names(SE1.nonCpG)[i]<-paste0(filename,".se")
SE2[[i]]<-se2
SE2.CpG[[i]]<-se2_CpG
SE2.nonCpG[[i]]<-se2_nonCpG
names(SE2)[i]<-filename
names(SE2.CpG)[i]<-filename
names(SE2.nonCpG)[i]<-filename
}
Summary<-data.frame(matrix(ncol = 0, nrow = 4),row.names =c("A","T",'C','G') )
dir="./Output1A/MutFreq/Maj/"
for (i in c("mut","mut1","mut2")){
if (i=="mut") { k=1
n<-""
error<-SE
fname<-"Transition"}
if (i=="mut1") {k=2
n=1
error<-SE1
fname<-"Transversion_1"}
if (i=="mut2") {k=3; n=2
error<-SE2
fname<-"Transversion_2"}
syn<-do.call(rbind, lapply(get(paste0(i)), function(x) x[1,]))
nonsyn<-do.call(rbind, lapply(get(paste0(i)), function(x) x[2,]))
syn.se<-do.call(rbind, lapply(error, function(x) x[1,]))
nonsyn.se<-do.call(rbind, lapply(error, function(x) x[2,]))
SYN<-rbind(syn, syn.se)
NonSYN<-rbind(nonsyn, nonsyn.se)
Summary$ave.syn<-colMeans(syn)
Summary$ave.nonsyn<-colMeans(nonsyn)
Summary$ave.syn.se<-colMeans(syn.se)
Summary$ave.nonsyn.se<-colMeans(nonsyn.se)
CpG<-do.call(rbind, lapply(get(paste0(i,".CpG")), function(x) x[1,]))
CpG.ns<-do.call(rbind, lapply(get(paste0(i,".CpG")), function(x) x[2,]))
CpG.se<-do.call(rbind, lapply(get(paste0("SE",n,".CpG")), function(x) x[1,]))
CpG.ns.se<-do.call(rbind, lapply(get(paste0("SE",n,".CpG")), function(x) x[2,]))
CPG.s<-rbind(CpG,CpG.se)
CPG.ns<-rbind(CpG.ns,CpG.ns.se)
Summary$CpG.ave<-colMeans(CpG)
Summary$CpG.ns.ave<-colMeans(CpG.ns)
Summary$CpG.ave.se<-colMeans(CpG.se)
Summary$CpG.ns.ave.se<-colMeans(CpG.se)
nonCpG<-do.call(rbind, lapply(get(paste0(i,".nonCpG")), function(x) x[1,]))
nonCpG.se<-do.call(rbind, lapply(get(paste0("SE",n,".nonCpG")), function(x) x[1,]))
nonCpG.ns<-do.call(rbind, lapply(get(paste0(i,".nonCpG")), function(x) x[2,]))
nonCpG.ns.se<-do.call(rbind, lapply(get(paste0("SE",n,".nonCpG")), function(x) x[2,]))
nonCPG.s<-rbind(nonCpG,nonCpG.se)
nonCPG.ns<-rbind(nonCpG.ns,nonCpG.ns.se)
Summary$nonCpG.ave<-colMeans(nonCpG)
Summary$nonCpG.ns.ave<-colMeans(nonCpG.ns)
Summary$nonCpG.ave.se<-colMeans(nonCpG.se)
Summary$nonCpG.ns.ave.se<-colMeans(nonCpG.se)
write.csv(Summary,paste0(dir,"Summary_",fname,".csv"))
write.csv(SYN, paste0(dir,"Synonymous_",fname,".csv"))
write.csv(NonSYN,paste0(dir,"Nonsynonymous_",fname,".csv"))
write.csv(CPG.s,paste0(dir, "CpG_Synonymous_",fname,".csv"))
write.csv(CPG.ns,paste0(dir, "CpG_Nonsynonymous_",fname,".csv"))
write.csv(nonCPG.s,paste0(dir,"nonCpG_Synonymous_", fname,".csv"))
write.csv(nonCPG.ns,paste0(dir,"nonCpG_Nonsynonymous_", fname,".csv"))
}
|
379b40f18de576f2a7efec7292fe3b7101fab651 | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /coga/inst/testfiles/pcoga_approx/libFuzzer_pcoga_approx/libfuzzer_logs/1610567672-inps.R | 72d5a597a833f5b5f32ad627457b45101f2493cc | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 449 | r | 1610567672-inps.R | list(rate = numeric(0), shape = c(0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0), x = numeric(0))
testlist <- list(rate = numeric(0), shape = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = numeric(0)) |
6b0dd5ccdc0bee91c565f30aa0595f53c582c7ed | 8db1240915746d5fd2e7eafc2aa17eb468d9dc20 | /Statistika/tasu-analüüs.R | ac907ec1c608b216dc2ee55cf79570332568b596 | [] | no_license | RRisto/Riigteenuste_analyys | 4a2f8ecdcd7895044a3c3a6ccb0b16b45fa0f142 | 7ec758d797a498835112ee1f06627cc769834782 | refs/heads/master | 2021-01-10T16:10:45.017135 | 2016-01-06T12:12:49 | 2016-01-06T12:12:49 | 46,671,347 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,797 | r | tasu-analüüs.R | #########################################kui palju on tasulisi, tasuta teenuseid
#kanalite lõikes
#teen kohandatud funktsiooni metlimiseks et saaks ka muid muutujaid kaasata
meltimine2=function(kanal, data) {
library(reshape2)
#leiame ainult seda kanalit puudutavad muutujad
sub=data[, grepl(paste(kanal,"|nimetus|identifikaator|allasutus|sihtgrupp|tegevusvaldkond|teenusetyyp|makse", sep=""), names(data))]
#määran id-d, mis meltimisel meltimata jäävad
id=grep(c("nimetus|identifikaator|allasutus|sihtgrupp|tegevusvaldkond|teenusetyyp|makse|link"), names(sub), value=T)
#kui selle kanali kohta stati pole, anna vastuseks null
if(length(id)<=7) {
tulem=NULL
} else {
tulem=melt(sub, id=id)
#muudan variable nime ära, mis on kanalispets, muidu ei saa rbindida
names(tulem)=c("nimetus", "identifikaator","sihtgrupp","tegevusvaldkond",
"teenusetyyp","allasutus","makse", "link", "variable", "value")
}
tulem
}
#näide
iseteen=meltimine("E.iseteenindus.", data=dataFlat2014)
#korrastaja funktsiooni kohandame ka
korrastaja2=function(andmed, eemalda) {
library(reshape2)
#eemalda - mis osa columnite nimedest tuleb eemdalda
names(andmed)=gsub(pattern=eemalda,"" ,names(andmed))
#kanalite lõikes meldime
veeb=meltimine2("Veebileht...portaal.", data=andmed)
iseteen=meltimine2("E.iseteenindus.", data=andmed)
eesti=meltimine2("Eesti.ee.", data=andmed)
nuti=meltimine2("Nutirakendus.", data=andmed)
digitv=meltimine2("Digitelevisioon.", data=andmed)
epost=meltimine2("E.post.", data=andmed)
sms=meltimine2("Tekstisõnum.", data=andmed)
telefon=meltimine2("Telefon.", data=andmed)
faks=meltimine2("Faks.", data=andmed)
post=meltimine2("Post.", data=andmed)
lett=meltimine2("Letiteenus", data=andmed)
kodus=meltimine2("Kliendi.juures.", data=andmed)
#rbindime
koos=rbind(veeb, iseteen, eesti, nuti, digitv, epost, sms, telefon, faks,
post, lett, kodus) #leiame kanali ja näitaja
#stati saamiseks eemaldame .ee eesti.ee-st need põhujstavad muidu probleeme
koos$variable=gsub(".ee.", ".", as.character(koos$variable), fixed=T)
koos$variable=gsub("E.iseteenindus", "Eiseteenindus", as.character(koos$variable), fixed=T)
koos$variable=gsub("E.post", "Epost", as.character(koos$variable), fixed=T)
koos$variable=gsub("Veebileht...portaal", "Veebileht", as.character(koos$variable), fixed=T)
stat=gsub("Kliendi.juures", "Kliendijuures", as.character(koos$variable), fixed=T)
#lõikame punktini asja maha
stat <- strsplit(stat, split ="\\.")
#teeme df-ks
df=as.data.frame(stat)
#transponeerime
df=as.data.frame(t(df))
#lisame algsesse andmestikku
koos$kanal=df[,1]
koos$naitaja=df[,2]
#viskame välja tühjad read, kus pole linki
koos=koos[!is.na(koos$link),]
koos
}
#teeme funktsioonidega parajaks andmed
puhas2014=korrastaja2(dataFlat2014, "X2014.")
puhasEmpty=korrastaja2(dataFlatEmpty, "empty.")
#paneme kokku
andmed=rbind(puhas2014, puhasEmpty)
#eemaldame variable, seda oli ainult kontrollimiseks vaja
andmed$variable=NULL
###############################KUI PALJU ON TASULISI TEENUSEID ametite lõikes
#üldine
table(andmedLai$makse)
#asutuste lõikes
makseAsutus=as.data.frame(table(andmedLai$makse, andmedLai$allasutus))
library(ggplot2)
ggplot(makseAsutus, aes(x=Var2, y=Freq))+
geom_bar(stat="identity", fill="lightblue")+
facet_wrap(~Var1)+
theme_minimal()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplot(makseAsutus, aes(x=Var1, y=Freq))+
geom_bar(stat="identity", fill="lightblue")+
facet_wrap(~Var2)+
theme_minimal()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
#sama asi protsentides
library(dplyr)
makseAsutuseLoikes=makseAsutus %>%
group_by(Var2) %>%
mutate(n=sum(Freq)) %>%
group_by(Var1, add=TRUE) %>%
mutate(protsent = Freq/n)
#plotime
ggplot(makseAsutuseLoikes, aes(x=Var2, y=protsent))+
geom_bar(stat="identity", fill="lightblue")+
facet_wrap(~Var1)+
theme_minimal()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))+
coord_flip()
ggplot(makseAsutuseLoikes, aes(x=Var1, y=protsent))+
geom_bar(stat="identity",fill="lightblue")+
facet_wrap(~Var2)+
theme_minimal()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
#######################KUI PALJU ON TASULISI TEENUSED KANALITE LÕIKES
|
3c3f8b46f793a930d3b2d57898982d67302510bb | f36014b74bd33b4967a70eeb7f2d59c48bf4882c | /plot1.R | 11b58c462e1a150e07c2dc0022c675580ae3072e | [] | no_license | JackieErbe/ExData_Plotting1 | 08a9293f1c47e5565e9bb3246a6ba70b5b27b970 | 87424b1b1719c0ed35a131c4b2787c88d75f8c42 | refs/heads/master | 2021-01-01T18:00:18.300295 | 2017-07-24T18:29:02 | 2017-07-24T18:29:02 | 98,221,504 | 0 | 0 | null | 2017-07-24T18:22:00 | 2017-07-24T18:22:00 | null | UTF-8 | R | false | false | 1,793 | r | plot1.R | plot1 <- function(filetype="png"){
## This function creates a histogram of "Global Active Power" in the
## specified bitmap format graphics devices "bmp", "jpeg", "png", or
## "tiff". If nothing is specified, a "png" file will be generated.
## Create temporary file and download zip file to it
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
temp)
## Unzip file and read table
unziped <- unz(temp, "household_power_consumption.txt")
power_data <- read.table(unziped, header=T, sep=";", na.strings = "?",
stringsAsFactors = F)
## Remove temp file
unlink(temp)
## Subset data to the dates 2007-02-01 and 2007-02-02
data_subset <- power_data[power_data$Date %in% c("1/2/2007", "2/2/2007"), ]
## Convert the Date and Time character variables to Date/Time class
## and place values into datetime column
data_subset$datetime <- strptime(paste(data_subset$Date, data_subset$Time),
format = "%d/%m/%Y %H:%M:%S")
## Create file of specified type at 480 x 480 pixels
fun_call <- get(filetype) ## Gets function from filetype name
fun_call(filename = paste("plot1.", filetype, sep = ""),
width = 480, height = 480, units = "px")
## Create histogram
hist(data_subset$Global_active_power,
col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
## Complete and close plot
dev.off()
} |
870aeb25a4d4c89ca0507387cb7268d25f57de74 | 0f51f612697b612c5e3439a2fc907213fec08065 | /lib/validatePredict.R | bf5c62ab0266d73ee74231a2895d0ea41093c135 | [] | no_license | ISS-Analytics/orgvalues | 73ee9d70885d69625caeaf1ebcc36afb9f67d193 | 360bb7371ac20879ae9a4a4314e5bd17ea9b33e9 | refs/heads/master | 2021-07-07T07:25:31.790000 | 2017-10-04T03:03:13 | 2017-10-04T03:03:13 | 104,955,306 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,904 | r | validatePredict.R | # validatePredict
# Description: This library contains the function utilized to generate perform a k-fold validation
# and subsequent calculation of prediction metrics (RMSE, MAPE, MAD) for PLS & LM
#Function for Generating and Evaluating Out-of-sample predictions using 10-fold cross-validation
validatePredict <- function(dataset, smMatrix, mmMatrix, maxIt=300, stopCriterion=7,kfold=10, reps = 10){
#Identify variables to be tested
uniqueTarget <- unique(smMatrix[,2])
items <- NULL
for (i in 1:length(uniqueTarget)){
items <- c(items, mmMatrix[mmMatrix[, "latent"] == uniqueTarget[i],"measurement"])
}
uniqueSource <- unique(smMatrix[,1])
sources <- NULL
for (i in 1:length(uniqueSource)){
sources <- c(sources,mmMatrix[mmMatrix[,"latent"]==uniqueSource[i],"measurement"])
}
lmtarget <- ifelse(length(intersect(uniqueTarget, uniqueSource)) == 0, uniqueTarget,setdiff(uniqueTarget, uniqueSource))
targets <- NULL
for (i in 1:length(lmtarget)){
targets <- c(targets, mmMatrix[mmMatrix[, "latent"] == lmtarget[i],"measurement"])
}
# Initialize matrices for prediction metrics
# Initialize RMSE holders
PLSRMSE <- matrix(,nrow=reps,ncol=length(targets),byrow =TRUE,dimnames = list(1:reps,targets))
PLSSSE <- matrix(,nrow=kfold,ncol=length(targets),byrow =TRUE,dimnames = list(1:kfold,targets))
LMRMSE <- matrix(,nrow=reps,ncol=length(targets),byrow =TRUE,dimnames = list(1:reps,targets))
LMSSSE <- matrix(,nrow=kfold,ncol=length(targets),byrow =TRUE,dimnames = list(1:kfold,targets))
# Initialize Rsquared holders
PLSSSR <- matrix(,nrow=kfold,ncol=length(uniqueTarget),byrow =TRUE,dimnames = list(1:kfold,uniqueTarget))
PLSSST <- matrix(,nrow=kfold,ncol=length(uniqueTarget),byrow =TRUE,dimnames = list(1:kfold,uniqueTarget))
PLSRsquared <- matrix(,nrow=reps,ncol=length(uniqueTarget),byrow =TRUE,dimnames = list(1:reps,uniqueTarget))
# Initialize predMAPE
PLSSAPE <- matrix(,nrow=kfold,ncol=length(targets),byrow =TRUE,dimnames = list(1:kfold,targets))
PLSMAPE <- matrix(,nrow=reps,ncol=length(targets),byrow =TRUE,dimnames = list(1:reps,targets))
LMMAPE <- matrix(,nrow=reps,ncol=length(targets),byrow =TRUE,dimnames = list(1:reps,targets))
LMSAPE <- matrix(,nrow=kfold,ncol=length(targets),byrow =TRUE,dimnames = list(1:kfold,targets))
# Initialize predMAD
PLSSAD <- matrix(,nrow=kfold,ncol=length(targets),byrow =TRUE,dimnames = list(1:kfold,targets))
PLSMAD <- matrix(,nrow=reps,ncol=length(targets),byrow =TRUE,dimnames = list(1:reps,targets))
LMMAD <- matrix(,nrow=reps,ncol=length(targets),byrow =TRUE,dimnames = list(1:reps,targets))
LMSAD <- matrix(,nrow=kfold,ncol=length(targets),byrow =TRUE,dimnames = list(1:kfold,targets))
# Perform repetitions
for (x in 1:reps) {
#Randomly shuffle the data
dataset <- dataset[sample(nrow(dataset)),]
# Extract the target and non-target variables for Linear Model
independentMatrix <- dataset[,sources]
dependentMatrix <- dataset[,targets]
#Create 10 equally size folds
folds <- cut(seq(1,nrow(dataset)),breaks=kfold,labels=FALSE)
#Perform 10 fold cross validation
for(i in 1:kfold){
#Segment your data by fold using the which() function
testIndexes <- which(folds==i,arr.ind=TRUE)
testingData <- dataset[testIndexes, ]
trainingData <- dataset[-testIndexes, ]
indepTestData <- independentMatrix[testIndexes, ]
indepTrainData <- independentMatrix[-testIndexes, ]
depTestData <- dependentMatrix[testIndexes, ]
depTrainData <- dependentMatrix[-testIndexes, ]
#PLS training model
testHolder <- PLSpredict(trainingData, testingData ,smMatrix, mmMatrix, maxIt, stopCriterion)
#PLS test model for collecting actuals
PLS_actual_model <- simplePLS(dataset,smMatrix,mmMatrix,maxIt,stopCriterion)
#Initialize PLS residuals and actuals holder matrices
PLSactuals <- testHolder$testData[,targets]
PLS_composite_actuals <- as.matrix(PLS_actual_model$fscores[testIndexes,uniqueTarget])
PLS_composite_predictions <- as.matrix(testHolder$compositeScores[,uniqueTarget])
PLSresiduals <- testHolder$residuals[,targets]
#Initialize lm residuals and actuals holder matrices
lmprediction <- matrix(,nrow=nrow(depTestData),ncol=length(targets),byrow =TRUE,dimnames = list(1:nrow(depTestData),targets))
lmresidual <- matrix(,nrow=nrow(depTestData),ncol=length(targets),byrow =TRUE,dimnames = list(1:nrow(depTestData),targets))
lmactual <- matrix(,nrow=nrow(depTestData),ncol=length(targets),byrow =TRUE,dimnames = list(1:nrow(depTestData),targets))
#LM Models
for(l in 1:length(targets)){
trainLM <- lm(depTrainData[,l] ~ ., indepTrainData)
lmprediction[,l] <- predict(trainLM, newdata = indepTestData)
lmresidual[,l] <- lmprediction[,l] - depTestData[, l]
lmactual[,l] <- depTestData[, l]
}
#iterate over no of uniquetargets (endogenous constructs) to calculate oos Rsquared
for(j in 1:length(uniqueTarget)){
#Calculate SST and SSR
PLSSST[i,j] <- sum((PLS_composite_actuals[,j] - mean(PLS_composite_actuals[,j]))^2)
PLSSSR[i,j] <- sum((PLS_composite_actuals[,j] - PLS_composite_predictions[,j])^2)
PLSRsquared[x,j] <- 1 - (sum(PLSSSR[,j])/sum(PLSSST[,j]))
}
#Iterate over no of targets
for(j in 1:length(targets)){
#Calculate SMSE
PLSSSE[i,j] <- sum(PLSresiduals[,j]^2)
LMSSSE[i,j] <- sum(lmresidual[,j]^2)
#Calculate SAPE
PLSSAPE[i,j] <- sum((abs(PLSresiduals[,j]/PLSactuals[,j])))
#PLSSAPE[i,j] <- sum((abs((mean(testHolder$testData[,j]) - testHolder$predictedMeasurements[,j])/mean(testHolder$testData[,j]))))
#PLSSAPE[i,j] <- sum((abs((testHolder$residuals[,j])/mean(testHolder$testData[,j]))))
LMSAPE[i,j] <- sum((abs(lmresidual[,j]/lmactual[,j])))
#Calculate SAD
PLSSAD[i,j] <- sum(abs(PLSresiduals[,j] - mean(PLSresiduals[,j])))
LMSAD[i,j] <- sum(abs(lmresidual[,j] - mean(lmresidual[,j])))
}
}
#Final calculations
denom <- nrow(dataset)
for (k in 1:length(targets)) {
LMRMSE[x,k] <- sqrt((sum(LMSSSE[,k]))/denom)
PLSRMSE[x,k] <- sqrt((sum(PLSSSE[,k]))/denom)
LMMAPE[x,k] <- 100*(sum(LMSAPE[,k])/denom)
PLSMAPE[x,k] <- 100*(sum(PLSSAPE[,k])/denom)
LMMAD[x,k] <- sum(LMSAD[,k])/denom
PLSMAD[x,k] <- sum(PLSSAD[,k])/denom
}
}
validateResults <- list(PLSRMSE = PLSRMSE,
PLSMAPE = PLSMAPE,
PLSMAD = PLSMAD,
PLSR2 = PLSRsquared,
LMRMSE = LMRMSE,
LMMAPE = LMMAPE,
LMMAD = LMMAD)
return(validateResults)
} |
2dd6643475e6883147c26ccadc47cace16784074 | eb9374f38182a566512cb1c97cf6634ae28e271c | /man/map_gist.Rd | c34308fa380f7cd3d1273ed41d7aa31a0ee7d38b | [
"MIT"
] | permissive | Zwens/geojsonio | 1064b5ba3d02b11c4d3e0c2adfc267a96533947c | 50d99de0f5ec2f8a346d7af6da02ade5eef5abbf | refs/heads/master | 2020-08-16T08:14:07.786273 | 2019-10-14T18:14:27 | 2019-10-14T22:31:25 | 215,478,651 | 1 | 0 | NOASSERTION | 2019-10-16T06:57:04 | 2019-10-16T06:57:04 | null | UTF-8 | R | false | true | 5,100 | rd | map_gist.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mapgist.R
\name{map_gist}
\alias{map_gist}
\title{Publish an interactive map as a GitHub gist}
\usage{
map_gist(input, lat = "lat", lon = "long", geometry = "point",
group = NULL, type = "FeatureCollection", file = "myfile.geojson",
description = "", public = TRUE, browse = TRUE, ...)
}
\arguments{
\item{input}{Input object}
\item{lat}{Name of latitude variable}
\item{lon}{Name of longitude variable}
\item{geometry}{(character) Are polygons in the object}
\item{group}{(character) A grouping variable to perform grouping for polygons - doesn't
apply for points}
\item{type}{(character) One of FeatureCollection or GeometryCollection}
\item{file}{File name to use to put up as the gist file}
\item{description}{Description for the GitHub gist, or leave to default (=no description)}
\item{public}{(logical) Want gist to be public or not? Default: TRUE}
\item{browse}{If TRUE (default) the map opens in your default browser.}
\item{...}{Further arguments passed on to \code{\link[httr]{POST}}}
}
\description{
There are two ways to authorize to work with your GitHub account:
\itemize{
\item PAT - Generate a personal access token (PAT) at
\url{https://help.github.com/articles/creating-an-access-token-for-command-line-use} and
record it in the GITHUB_PAT envar in your \code{.Renviron} file.
\item Interactive - Interactively login into your GitHub account and authorise with OAuth.
}
Using the PAT method is recommended.
Using the gist_auth() function you can authenticate separately first, or if you're not
authenticated, this function will run internally with each function call. If you have a
PAT, that will be used, if not, OAuth will be used.
}
\examples{
\dontrun{
# From file
file <- "myfile.geojson"
geojson_write(us_cities[1:20, ], lat='lat', lon='long', file = file)
map_gist(file=as.location(file))
# From SpatialPoints class
library("sp")
x <- c(1,2,3,4,5)
y <- c(3,2,5,1,4)
s <- SpatialPoints(cbind(x,y))
map_gist(s)
# from SpatialPointsDataFrame class
x <- c(1,2,3,4,5)
y <- c(3,2,5,1,4)
s <- SpatialPointsDataFrame(cbind(x,y), mtcars[1:5,])
map_gist(s)
# from SpatialPolygons class
poly1 <- Polygons(list(Polygon(cbind(c(-100,-90,-85,-100),
c(40,50,45,40)))), "1")
poly2 <- Polygons(list(Polygon(cbind(c(-90,-80,-75,-90),
c(30,40,35,30)))), "2")
sp_poly <- SpatialPolygons(list(poly1, poly2), 1:2)
map_gist(sp_poly)
# From SpatialPolygonsDataFrame class
sp_polydf <- as(sp_poly, "SpatialPolygonsDataFrame")
map_gist(sp_poly)
# From SpatialLines class
c1 <- cbind(c(1,2,3), c(3,2,2))
c2 <- cbind(c1[,1]+.05,c1[,2]+.05)
c3 <- cbind(c(1,2,3),c(1,1.5,1))
L1 <- Line(c1)
L2 <- Line(c2)
L3 <- Line(c3)
Ls1 <- Lines(list(L1), ID = "a")
Ls2 <- Lines(list(L2, L3), ID = "b")
sl1 <- SpatialLines(list(Ls1))
sl12 <- SpatialLines(list(Ls1, Ls2))
map_gist(sl1)
# From SpatialLinesDataFrame class
dat <- data.frame(X = c("Blue", "Green"),
Y = c("Train", "Plane"),
Z = c("Road", "River"), row.names = c("a", "b"))
sldf <- SpatialLinesDataFrame(sl12, dat)
map_gist(sldf)
# From SpatialGrid
x <- GridTopology(c(0,0), c(1,1), c(5,5))
y <- SpatialGrid(x)
map_gist(y)
# From SpatialGridDataFrame
sgdim <- c(3,4)
sg <- SpatialGrid(GridTopology(rep(0,2), rep(10,2), sgdim))
sgdf <- SpatialGridDataFrame(sg, data.frame(val = 1:12))
map_gist(sgdf)
# from data.frame
## to points
map_gist(us_cities)
## to polygons
head(states)
map_gist(states[1:351, ], lat='lat', lon='long', geometry="polygon", group='group')
## From a list
mylist <- list(list(lat=30, long=120, marker="red"),
list(lat=30, long=130, marker="blue"))
map_gist(mylist, lat="lat", lon="long")
# From a numeric vector
## of length 2 to a point
vec <- c(-99.74,32.45)
map_gist(vec)
## this requires numeric class input, so inputting a list will dispatch on the list method
poly <- c(c(-114.345703125,39.436192999314095),
c(-114.345703125,43.45291889355468),
c(-106.61132812499999,43.45291889355468),
c(-106.61132812499999,39.436192999314095),
c(-114.345703125,39.436192999314095))
map_gist(poly, geometry = "polygon")
# From a json object
(x <- geojson_json(c(-99.74,32.45)))
map_gist(x)
## another example
map_gist(geojson_json(us_cities[1:10,], lat='lat', lon='long'))
# From a geo_list object
(res <- geojson_list(us_cities[1:2,], lat='lat', lon='long'))
map_gist(res)
# From SpatialPixels
pixels <- suppressWarnings(SpatialPixels(SpatialPoints(us_cities[c("long", "lat")])))
summary(pixels)
map_gist(pixels)
# From SpatialPixelsDataFrame
pixelsdf <- suppressWarnings(
SpatialPixelsDataFrame(points = canada_cities[c("long", "lat")], data = canada_cities)
)
map_gist(pixelsdf)
# From SpatialRings
library("rgeos")
r1 <- Ring(cbind(x=c(1,1,2,2,1), y=c(1,2,2,1,1)), ID="1")
r2 <- Ring(cbind(x=c(1,1,2,2,1), y=c(1,2,2,1,1)), ID="2")
r1r2 <- SpatialRings(list(r1, r2))
map_gist(r1r2)
# From SpatialRingsDataFrame
dat <- data.frame(id = c(1,2), value = 3:4)
r1r2df <- SpatialRingsDataFrame(r1r2, data = dat)
map_gist(r1r2df)
}
}
|
b7b1dca200ba0b9a63c5900d14def767901c465a | e0f0495031af791a7088ec84c55fa8bd00058761 | /tidyseq.R | cc355d2ad49245ad89cdff5ee95ae26723228356 | [] | no_license | sonicyyouth/snip | abe04cdfffe22aa51630f4a04eba6d0089aacd34 | 4d33f277e37c87a0b609d5edd005a3f8c26807c5 | refs/heads/master | 2021-01-20T06:59:48.596366 | 2015-09-20T15:15:14 | 2015-09-20T15:15:14 | 42,827,796 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 20,231 | r | tidyseq.R | setwd("~/Dropbox/Rworkspace/projects/snp")
library(survival)
library(tidyr)
library(dplyr)
library(limma)
genesurv = function(expset,entz,gene,set = "TCGA GBM"){
source("~/Dropbox/Rworkspace/r scripts/ggsurv.R")
gbmgene = exprs(expset)[entz,]
gbmgene1 = median(gbmgene)
gbmgene[gbmgene <= gbmgene1] = 1
gbmgene[gbmgene>gbmgene1] = 2
gbmgene = ifelse(gbmgene == 1, "Low expression","High expression")
sv = Surv(expset$time,as.numeric(expset$vitalstatus))
surv = survfit(sv~gbmgene)
logr = survdiff(sv ~ gbmgene)
cox = coxph(sv ~ gbmgene)
hr = hazardr(logr)
hr = hr[order(hr)]
pval = round(1- pchisq(logr$chisq,1),2)
lb = paste("Log-rank P = ",pval,"\nHR = ", round(hr[2],2),"(",round(hr[1],2),"-", round(hr[3],2), ")",sep = "")
tl = paste("Survival curve of ", gene," in ", set,sep = "")
f0 = ggsurv(surv)+theme_classic() + labs(list(title = tl,x = "Time (days)",y = "proportion survival/surviving"))+ annotate("text", label = lb ,x = 0.72*max(expset$time), y = 0.6,size = 6) + theme(legend.position = c(.7,.8),text = element_text(size = 16),legend.title = element_blank(),legend.text = element_text(size = 20))
return(f0)
}
load("../glioma_clin/tcgagbm133entrezmr.rda")
RNAexp <- read.table(file = "../tcgafile/LGG.rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes_normalized__data.data.txt", head = T, stringsAsFactors = F,check.names = F, sep = "\t")
View(RNAexp)
setwd("../tcgafile")
fil <- dir("./Level_3")
## fil <- paste("./Level_3/",fil, sep = "")
filename <- read.table(file = "FILE_SAMPLE_MAP.txt",head = T,stringsAsFactors = F, sep = "\t")
rppaall <- read.table(file = paste("./Level_3/",fil[1], sep = ""),head = T, stringsAsFactors = F,sep = "\t")
xd = setdiff(x[,1],x400[,1])
colnames(rppaall)[2] = filename[match(fil[1],filename[,1]),2]
ppaall <- lapply(1:length(fil),function(x){
rppa <- read.table( paste("./Level_3/",fil[x], sep = ""),head = T, stringsAsFactors = F,sep = "\t")
dir = "/home/liuqun/Dropbox/Rworkspace/projects/"
colnames(rppa)[2] = filename[match(fil[x],filename[,1]),2]
return(rppa)
})
pakt = lapply(ppaall,function(x)x[grep("Akt",x[,1]),])
pakta = do.call(cbind,pakt)
chk = sapply(seq(1,ncol(pakta),2),function(x)all(pakta[,x] == pakta[,1]))
length(chk) == sum(chk)
pakta = pakta[,c(1,seq(2,ncol(pakta),2))]
View(pakta)
save(pakta, file = "lggpakt.rda")
ls()
colnames(pakta) = substr(colnames(pakta), 1,15)
colnames(lggsnp) = substr(colnames(lggsnp), 1,15)
xy = intersect(colnames(pakta),colnames(lggsnp))
length(xy)
grep("rs10500560",lggsnp[,1])
lggsnp[grep("rs1050560",lggsnp[,1]),1]
length(ppaall)
chk = lapply(1:length(ppaall),function(x)all(ppaall[[x]][,1] == ppaall[[1]][,1]))
rppall <- do.call(rbind,ppaall)
dim(rppall)
head(ppaall[[1]])
dim(rppaall)
xd = setdiff(x[,1],x400[,1])
xd1 = setdiff(x400[,1],x[,1])
xd = setdiff(x[,1],x400[,1])
x = read.csv(file = "lggclin1.csv", sep = ",", check.names = F,stringsAsFactors = F)
x[,171] = gsub(",", ";",x[,171])
write.csv(x,file = "lggclin1.csv")
View(x)
rownames(x) = toupper(x[,6])
x = x[,-1]
x[1,11]
table(x[,6])
x[1,15:20]
x1 = x[,c(1,6,9,12,15,18,41,62,63,81,83:85,92,95:98,102,104,105,109,111:117,148,158,163:166,170:172)]
write.csv(x1,file = "lggclinsimple.csv")
View(x1)
getwd()
glmclin = read.csv(file = "gbmclin1.csv",head = T, stringsAsFactors = F, check.names = F)
View(glmclin)
glmrna = read.csv(file = "primyglmrna.csv",head = T, stringsAsFactors = F, quote = "",check.names = F)
View(glmrna)
colnames(glmrna) = substr(colnames(glmrna),1,15)
glmclin = glmclin[which(glmclin[,3] != ""),]
glmclin[,3] = toupper(glmclin[,3])
rownames(glmclin) = glmclin[,3]
write.csv(glmclin,file = "gbmclin1.csv")
rownames(glmrna) = glmrna[,1]
xy = intersect(colnames(glmrna), rownames(glmclin))
length(xy)
grep('CDC20',rownames(glmrna))
rownames(glmrna)[grep('MYC',rownames(glmrna))]
glmclin[which(glmclin[,"disease"] == "gbm"),"grade"] = "g4"
table(glmrna[3399,xy],glmclin[xy,"histology"])
myccdc = data.frame(glmclin[xy,],myc = log2(as.numeric(glmrna[11402,xy])),cdc20 = log2(as.numeric(glmrna[3399,xy])))
myccdc = myccdc[which(myccdc[,"grade"] != ""),]
## colnames(glmclin)[2] = "ages"
## colnames(glmclin)[4] = "cqcfhistology"
## colnames(glmclin)[20] = "gender"
## colnames(glmclin)[30] = "grade"
## colnames(glmclin)[22] = "histology"
View(myccdc)
library(ggplot2)
f1 = ggplot(myccdc,aes(x = grade,y = myc,colors = grade))+geom_boxplot(width = 0.7,outlier.shape = NA) + geom_jitter(aes(color = grade),position = position_jitter(width = 0.2))+theme_classic()+theme(legend.position="none",text = element_text(size = 16),axis.text.x = element_text(size = 11))+ylab("MYC expression")
f2 = ggplot(myccdc,aes(x = histology,y = myc,colors = histology))+geom_boxplot(width = 0.7,outlier.shape = NA) + geom_jitter(aes(color = histology),position = position_jitter(width = 0.2))+theme_classic()+theme(legend.position="none",text = element_text(size = 16),axis.text.x = element_text(size = 11))+ylab("MYC expression")
f3 = ggplot(myccdc,aes(x = grade,y = cdc20,colors = grade))+geom_boxplot(width = 0.7,outlier.shape = NA) + geom_jitter(aes(color = grade),position = position_jitter(width = 0.2))+theme_classic()+theme(legend.position="none",text = element_text(size = 16),axis.text.x = element_text(size = 11))+ylab("CDC20 expression")
f4 = ggplot(myccdc,aes(x = histology,y = cdc20,colors = histology))+geom_boxplot(width = 0.7,outlier.shape = NA) + geom_jitter(aes(color = histology),position = position_jitter(width = 0.2))+theme_classic()+theme(legend.position="none",text = element_text(size = 16),axis.text.x = element_text(size = 11))+ylab("CDC20 expression")
f1
f2
f3
f4
pdf("f1.pdf")
f1
dev.off()
pdf("f2.pdf")
f2
dev.off()
pdf("f3.pdf")
f3
dev.off()
pdf("f4.pdf")
f4
dev.off()
t.test(myccdc$cdc20~myccdc$grade)
hazardr = function(x){
hr = (x$obs[1]/x$exp[1])/(x$obs[2]/x$exp[2])
up95 = exp(log(hr) + qnorm(0.975)*sqrt(1/x$exp[2]+1/x$exp[1]))
low95 = exp(log(hr) - qnorm(0.975)*sqrt(1/x$exp[2]+1/x$exp[1]))
return(c(up95,hr,low95))
}
source("~/Dropbox/Rworkspace/r scripts/ggsurv.R")
myc = myccdc[,"myc"]
myc1 = median(myc)
myc[myc <= myc1] = 1
myc[myc>myc1] = 2
myc = ifelse(myc == 1, "Low expression","High expression")
sv = Surv(myccdc$time,as.numeric(myccdc$vitual_status))
surv = survfit(sv~myc)
logr = survdiff(sv ~ myc)
cox = coxph(sv ~ myc)
hr = hazardr(logr)
f5 = ggsurv(surv)+theme_classic() + labs(list(title = "Survival curve of MYC in TCGA Glioma",x = "Time (days)",y = "proportion survival/surviving"))+ annotate("text", label = "Log-rank P = 0.00006\nHR = 0.55(0.41-0.74)",x = 4500, y = 0.6,size = 6) + theme(legend.position = c(.7,.8),text = element_text(size = 20),legend.title = element_blank(),legend.text = element_text(size = 20))
pdf("f5.pdf")
f5
dev.off()
cdc = myccdc[,"cdc20"]
cdc1 = median(cdc)
cdc[cdc <= cdc1] = 1
cdc[cdc>cdc1] = 2
cdc = ifelse(cdc == 1, "Low expression","High expression")
sv = Surv(myccdc$time,as.numeric(myccdc$vitual_status))
surv = survfit(sv~cdc)
logr = survdiff(sv ~ cdc)
cox = coxph(sv ~ cdc)
hr = hazardr(logr)
f6 = ggsurv(surv)+theme_classic() + labs(list(title = "Survival curve of CDC20 in TCGA Glioma",x = "Time (days)",y = "proportion survival/surviving"))+ annotate("text", label = "Log-rank P = 0.00006\nHR = 0.55(0.41-0.74)",x = 4500, y = 0.6,size = 6) + theme(legend.position = c(.7,.8),text = element_text(size = 20),legend.title = element_blank(),legend.text = element_text(size = 20))
pdf("f6.pdf")
f6
dev.off()
load("../glioma_clin/tcgagbm133entrezmr.rda")
gbmmyc = exprs(gbm133)["4609",]
gbmmyc1 = median(gbmmyc)
gbmmyc[gbmmyc <= gbmmyc1] = 1
gbmmyc[gbmmyc>gbmmyc1] = 2
gbmmyc = ifelse(gbmmyc == 1, "Low expression","High expression")
sv = Surv(gbm133$time,as.numeric(gbm133$vitalstatus))
surv = survfit(sv~gbmmyc)
logr = survdiff(sv ~ gbmmyc)
cox = coxph(sv ~ gbmmyc)
hr = hazardr(logr)
f7 = ggsurv(surv)+theme_classic() + labs(list(title = "Survival curve of MYC in TCGA GBM",x = "Time (days)",y = "proportion survival/surviving"))+ annotate("text", label = "Log-rank P = 0.36\nHR = 0.91(0.75-1.10)",x = 2800, y = 0.6,size = 6) + theme(legend.position = c(.7,.8),text = element_text(size = 20),legend.title = element_blank(),legend.text = element_text(size = 20))
pdf("f7.pdf")
f7
dev.off()
gbmcdc = exprs(gbm133)["991",]
gbmcdc1 = median(gbmcdc)
gbmcdc[gbmcdc <= gbmcdc1] = 1
gbmcdc[gbmcdc>gbmcdc1] = 2
gbmcdc = ifelse(gbmcdc == 1, "Low expression","High expression")
sv = Surv(gbm133$time,as.numeric(gbm133$vitalstatus))
surv = survfit(sv~gbmcdc)
logr = survdiff(sv ~ gbmcdc)
cox = coxph(sv ~ gbmcdc)
hr = hazardr(logr)
f8 = ggsurv(surv)+theme_classic() + labs(list(title = "Survival curve of CDC20 in TCGA GBM",x = "Time (days)",y = "proportion survival/surviving"))+ annotate("text", label = "Log-rank P = 0.97\nHR = 1.003(0.83-1.22)",x = 2800, y = 0.6,size = 6) + theme(legend.position = c(.7,.8),text = element_text(size = 20),legend.title = element_blank(),legend.text = element_text(size = 20))
pdf("f8.pdf")
f8
dev.off()
gbmmyccdc = data.frame(sampleNames(gbm133),cdc = gbmcdc,myc = gbmmyc)
View(gbmmyccdc)
gbmmyccdc$both = 1
gbmmyccdc[which(gbmmyccdc$cdc == "High expression" & gbmmyccdc$myc == "Low expression"),"both"] = 3
gbmmyccdc[which(gbmmyccdc$myc == "High expression" & gbmmyccdc$cdc == "Low expression"),"both"] = 2
gbmmyccdc[which(gbmmyccdc$myc == "High expression" & gbmmyccdc$cdc == "High expression"),"both"] = 4
table(gbmmyccdc$both)
sv = Surv(gbm133$time,as.numeric(gbm133$vitalstatus))
surv = survfit(sv~gbmmyccdc$both)
logr = survdiff(sv ~ gbmmyccdc$both)
cox = coxph(sv ~ gbmmyccdc$both)
hr = hazardr(logr)
f10 = ggsurv(surv)+theme_classic() + labs(list(title = "Survival curve of CDC20 in TCGA GBM",x = "Time (days)",y = "proportion survival/surviving"))+ annotate("text", label = "Log-rank P = 0.97\nHR = 1.003(0.83-1.22)",x = 2800, y = 0.6,size = 6) + theme(legend.position = c(.7,.8),text = element_text(size = 20),legend.title = element_blank(),legend.text = element_text(size = 20))
pdf("f10.pdf")
f10
dev.off()
sampleNames(gbm133)[1:10]
xy = intersect(sampleNames(gbm133), rownames(myccdc))
length(xy)
pdf("f11.pdf")
plot(exprs(gbm133)["4609",xy],myccdc[xy,"myc"])
dev.off()
pdf("f12.pdf")
plot(exprs(gbm133)["4609",xy],exprs(gbm133)["991",xy])
dev.off()
cor.test(exprs(gbm133)["4609",xy],exprs(gbm133)["991",xy])
xy = intersect(sampleNames(gbm133),colnames(gbmmh27))
cyb5mhexp = data.frame(sample = xy,cg03826976 = gbmmh27["cg03826976",xy],CYB5R2 = exprs(gbm133)["51700",xy])
m = lm(CYB5R2~cg03826976,data = cyb5mhexp)
f14 = ggplot(cyb5mhexp,aes(x = cg03826976,y = CYB5R2))+geom_point() + geom_abline(aes(intercept = coef(m)[1],slope = coef(m)[2]))+theme_classic() +annotate("text", label = "Linear regression P=0.0004",x = 0.6, y = 9,size = 6.5)+theme(text = element_text(size = 18))+labs(x = "cg03826976 methylation",y = "CYB5R2 expression")
pdf("figure14.pdf",width = 5,height = 5)
f14
dev.off()
rembanno <- read.csv(file = "rembanno.csv", head = T, stringsAsFactors = F,check.names = F)
library(org.Hs.eg.db)
genesynb = AnnotationDbi::select(org.Hs.eg.db, keys=c('CDC20','MYC'), columns=c("SYMBOL","ENTREZID"), keytype="SYMBOL")
genesynb$probe = rembanno[match(genesynb$ENTREZID, rembanno$Entrez),"Composite Element Name"]
rownames(genesynb) = genesynb$SYMBOL
load("/home/liu/Documents/bigfile/rembrandt_caArray_eset.rda")
rmbpda = pData(rembcarma)
View(pda)
cybclass = rembcarma[,rembcarma$disease != ""]
cybclass = cybclass[,cybclass$disease != "MIXED"]
cybclass = cybclass[,cybclass$disease != "UNCLASSIFIED"]
cybclass = cybclass[,cybclass$disease != "UNKNOWN"]
table(rmbpda[which(rmbpda$disease == "GBM"),"grading"])
table(rmbpda$grading, rmbpda$disease)
table(rmbpda$disease)
rmbpda[which(rmbpda$disease == "GBM"),"grading"] = "Grade 4"
rmbpda[which(rmbpda$disease == "NON_TUMOR"),"grading"] = "NON_TUMOR"
table(rmbpda[which(rmbpda$disease == "NON_TUMOR"),"grading"])
pData(rembcarma) = rmbpda
cybgrading = rembcarma[,rembcarma$grading != ""]
mycdc = data.frame(sample = sampleNames(cybclass),class =cybclass$disease, "MYC"= exprs(cybclass)[genesynb["MYC","probe"],], "CDC20" = exprs(cybclass)[genesynb["CDC20","probe"],])
mycdcg = data.frame(sample = sampleNames(cybgrading),grading =cybgrading$grading, "MYC"= exprs(cybgrading)[genesynb["MYC","probe"],], "CDC20" = exprs(cybgrading)[genesynb["CDC20","probe"],])
table(cyb5g$grading)
#cyb5c = cyb5c %>% filter(class %in% c("NON_TUMOR","ASTROCYTOMA","OLIGODENDROGLIOMA","GBM"))
f14 = ggplot(mycdc,aes(x = class,y = MYC,colors = class))+geom_boxplot(width = 0.7,outlier.shape = NA) + geom_jitter(aes(color = class),position = position_jitter(width = 0.2))+scale_x_discrete(limits = c("NON_TUMOR","ASTROCYTOMA","OLIGODENDROGLIOMA","GBM"),breaks = c("NON_TUMOR","ASTROCYTOMA","OLIGODENDROGLIOMA","GBM"),labels = c("NON TUMOR","A/AA","O/AO","GBM"))+theme_classic()+theme(legend.position="none",text = element_text(size = 16),axis.text.x = element_text(size = 11))+ylab("MYC expression")
f15 = ggplot(mycdcg,aes(x = grading,y = MYC,colors = grading))+geom_boxplot(width = 0.7,outlier.shape = NA) + geom_jitter(aes(color = grading),position = position_jitter(width = 0.2))+scale_x_discrete(limits = c("NON_TUMOR","Grade 2","Grade 3","Grade 4"),breaks = c("NON_TUMOR","Grade 2","Grade 3","Grade 4"),labels = c("NON TUMOR","Grade II","Grade III","Grade IV"))+theme_classic()+theme(legend.position="none",text = element_text(size = 16),axis.text.x = element_text(size = 10))+ylab("MYC expression")+xlab(NULL)
pdf("f14.pdf")
f14
dev.off()
pdf("f15.pdf")
f15
dev.off()
f16 = ggplot(mycdc,aes(x = class,y = CDC20,colors = class))+geom_boxplot(width = 0.7,outlier.shape = NA) + geom_jitter(aes(color = class),position = position_jitter(width = 0.2))+scale_x_discrete(limits = c("NON_TUMOR","ASTROCYTOMA","OLIGODENDROGLIOMA","GBM"),breaks = c("NON_TUMOR","ASTROCYTOMA","OLIGODENDROGLIOMA","GBM"),labels = c("NON TUMOR","A/AA","O/AO","GBM"))+theme_classic()+theme(legend.position="none",text = element_text(size = 16),axis.text.x = element_text(size = 11))+ylab("CDC20 expression")
f17 = ggplot(mycdcg,aes(x = grading,y = CDC20,colors = grading))+geom_boxplot(width = 0.7,outlier.shape = NA) + geom_jitter(aes(color = grading),position = position_jitter(width = 0.2))+scale_x_discrete(limits = c("NON_TUMOR","Grade 2","Grade 3","Grade 4"),breaks = c("NON_TUMOR","Grade 2","Grade 3","Grade 4"),labels = c("NON TUMOR","Grade II","Grade III","Grade IV"))+theme_classic()+theme(legend.position="none",text = element_text(size = 16),axis.text.x = element_text(size = 10))+ylab("CDC20 expression")+xlab(NULL)
pdf("f16.pdf")
f16
dev.off()
pdf("f17.pdf")
f17
Gdev.off()
x = read.table(file = "../tcgafile/gdac.broadinstitute.org_GBMLGG.Mutation_Packager_Raw_Calls.Level_3.2015060100.0.0/TCGA-02-0003-01.maf.txt", header = T, sep = "\t")
View(x)
dim(x)
write.csv(x, file = "muttest.csv")
x1 = read.csv(file = "../tcgafile/TCGA_GBM_mutation_broad_gene-2015-02-24/genomicMatrix.csv", header = T)
write.csv(x1,file = "muttest1.csv")
glmcna = read.table(file = "../tcgafile/GBMLGG.snp__genome_wide_snp_6__broad_mit_edu__Level_3__segmented_scna_hg19__seg.seg.txt", head = T, sep = "\t",quote = "")
dim(glmcna)
head(glmcna)
load("glioma_gene_cna.rda")
load("glioma_gene_cnaall.rda")
p16 = glmgenecnall %>% filter(gene == "1029")
sum(is.na(p16$segmean))
hist(p16$segmean)
p16 = p16[which(substr(p16$sample,14,15) == "01"),]
sum(p16$segmean < -1)
p16in = glmgenecn %>% filter(gene == "1029")
p16in = p16in[which(substr(p16in$sample,14,15) == "01" |substr(p16in$sample,14,15) == "02"),]
p16in1 = p16in[which(substr(p16in$sample,14,15) == "01"),]
rownames(p16) = substr(p16$sample,1,12)
length(unique(substr(p16in$sample,1,12)))
gbmrn = exprs(gbm133)
xy = intersect(rownames(p16in1),colnames(gbmrn))
xydel = rownames(p16in1[which(p16in1[,"segmean"] <= -1 & rownames(p16in1) %in% xy),])
xyndel = rownames(p16in1[which(p16in1[,"segmean"] > -1 & rownames(p16in1) %in% xy),])
expset = gbm133[,xydel]
entz = "4609"
gene= "MYC"
set= "TCGA p16 del GBM"
file = "f18"
f18 = genesurv(expset = expset,entz= "4609",gene = "MYC",set = "TCGA p16 del GBM")
pdf("f18.pdf")
f18
dev.off()
f19 = genesurv(expset = expset,entz= "991",gene = "CDC20",set = "TCGA p16 del GBM")
pdf("f19.pdf")
f19
dev.off()
gbmcdnk = gbm133[,xy]
gbmcdnk$cdkn = ifelse(sampleNames(gbmcdnk) %in% xydel,"del","nondel")
sv = Surv(gbmcdnk$time,as.numeric(gbmcdnk$vitalstatus))
surv = survfit(sv~gbmcdnk$cdkn)
logr = survdiff(sv ~ gbmcdnk$cdkn)
cox = coxph(sv ~ gbmcdnk$cdkn)
hr = hazardr(logr)
pval = round(1- pchisq(logr$chisq,1),3)
lb = paste("Log-rank P = ",pval,"\nHR = ", round(hr[2],2),"(",round(hr[1],2),"-", round(hr[3],2), ")",sep = "")
tl = "Survival curve of p16 del in TCGA GBM"
f0 = ggsurv(surv)+theme_classic() + labs(list(title = tl,x = "Time (days)",y = "proportion survival/surviving"))+ annotate("text", label = lb ,x = 0.72*max(expset$time), y = 0.6,size = 6) + theme(legend.position = c(.7,.8),text = element_text(size = 16),legend.title = element_blank(),legend.text = element_text(size = 20))
pdf("f20.pdf")
f0
dev.off()
gbmcdkn = data.frame(p16 = gbmcdnk$cdkn,cdc20 = exprs(gbmcdnk)["991",],myc = exprs(gbmcdnk)["4609",])
t.test(gbmcdkn$myc ~ gbmcdkn$p16)
f21 = ggplot(gbmcdkn,aes(x = p16,y = myc,colors = p16))+geom_boxplot(width = 0.7,outlier.shape = NA) + geom_jitter(aes(color = p16),position = position_jitter(width = 0.2))+annotate("text", label = "P=0.006",x = 1.5, y = 12,size = 6.5)+theme_classic()+theme(legend.position="none",text = element_text(size = 16),axis.text.x = element_text(size = 11))+ylab("MYC expression")
pdf("f21.pdf")
f21
dev.off()
t.test(gbmcdkn$cdc20 ~ gbmcdkn$p16)
f22 = ggplot(gbmcdkn,aes(x = p16,y = cdc20,colors = p16))+geom_boxplot(width = 0.7,outlier.shape = NA) + geom_jitter(aes(color = p16),position = position_jitter(width = 0.2))+annotate("text", label = "P=0.57",x = 1.5, y = 12,size = 6.5)+theme_classic()+theme(legend.position="none",text = element_text(size = 16),axis.text.x = element_text(size = 11))+ylab("CDC20 expression")
pdf("f22.pdf")
f22
dev.off()
du = substr(p16[duplicated(substr(p16$sample,1,12)),"sample"],1,12)
pdu = p16[which(substr(p16$sample,1,12) %in% du),]
p16 = p16[order(abs(p16$segmean),decreasing = T),]
p16 = p16[!duplicated(p16$sample),]
p16$sample = substr(p16$sample,1,12)
rownames(p16) = p16$sample
rownames(glmclin) = glmclin[,1]
p16g2del = intersect(rownames(p16[which(p16$segmean < -1),]),rownames(glmclin[which(glmclin$grade == "g2"),]))
p16g3del = intersect(rownames(p16[which(p16$segmean < -1),]),rownames(glmclin[which(glmclin$grade == "g3"),]))
p16g4del = intersect(rownames(p16[which(p16$segmean < -1),]),rownames(glmclin[which(glmclin$grade == "g4"),]))
## make the figure ji asked
load("glioma_gene_cnaall.rda")
ji = glmgenecnall %>% filter(gene == "1029" | gene == "5728" | gene == "10000")
ji = ji[which(substr(ji$sample,14,15) == "01"),]
ji = ji[order(abs(ji$segmean),decreasing = T),]
ji = ji %>% distinct(sample,gene) %>% spread(gene,segmean)
ji$sample = substr(ji$sample,1,12)
rownames(ji) = ji$sample
gbmrn = exprs(gbm133)
xy = intersect(rownames(ji),colnames(gbmrn))
jiall = data.frame(ji[xy,], cdc20 = gbmrn["991",xy], myc = gbmrn["4609",xy])
colnames(jiall)[2:4] = c("akt3","p16","pten")
x1 = read.csv(file = "../tcgafile/TCGA_GBM_mutation_broad_gene-2015-02-24/genomicMatrix.csv", header = T,check.names = F)
colnames(x1) = substr(colnames(x1),1,12)
jimu = x1[which(x1[,1] == "AKT3" | x1[,1] == "PTEN"),]
jimu = t(jimu)
colnames(jimu) = jimu[1,]
jimu = jimu[-1,]
colnames(jimu) = paste(colnames(jimu),"mu",sep = "")
jimu = data.frame(sample = rownames(jimu),jimu,stringsAsFactors = F)
jif = left_join(jiall,jimu)
jif = jif[,-7]
save(jif, file = "jifigure.rda")
## make figure
jifmu = jif[which(!is.na(jif$PTENmu)),]
jifmu[which(jif$akt3 <= -1),"akt3"] = -1
jifmu[which(jif$akt3 >= 1),"akt3"] = 1
jifmu[which(jif$akt3 < 1 $ jif$akt3 > -1),"akt3"] = 0
jifmu[which(jif$p16 <= -1),"p16"] = -1
jifmu[which(jif$p16 >= 1),"p16"] = 1
jifmu[which(jif$p16 < 1 $ jif$p16 > -1),"p16"] = 0
jifmu[which(jif$pten <= -1),"pten"] = -1
jifmu[which(jif$pten >= 1),"pten"] = 1
jifmu[which(jif$pten < 1 $ jif$pten > -1),"pten"] = 0
sum(jif$akt3 > 1)
|
1829110778e8eca148aa6819581347e1d99f0482 | 257b5303c5276cf90bc5110c1785cc144076031f | /code/22_telomere_length_MR.R | 34887a6e13e616a4e9b70cb76c33901f9416762e | [] | no_license | xiaotianliao/mpn-gwas | 65bb7cc1f37b9c4af98a776128b7d91d06e4e5db | fb271abe98a43e140c2cdf8c200d556a477e00e0 | refs/heads/master | 2023-08-22T16:06:14.066422 | 2020-10-14T15:50:09 | 2020-10-14T15:50:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,060 | r | 22_telomere_length_MR.R | library(tidyverse)
library(data.table)
library(BuenColors)
library(cowplot)
library(TwoSampleMR)
library(MendelianRandomization)
library(MRPRESSO)
# Exposure data (pval < 1e-5)
sumstats <- "file path to telomere length summary statistics"
telo_sumstats <- as.data.frame(fread(sumstats)) %>%
mutate(Phenotype = "telomere_length")
telo_dat <- format_data(telo_sumstats,type="exposure",
snp_col = "rsid",
beta_col = "beta", se_col = "se",
eaf_col = "eaf",
effect_allele_col = "ea",
other_allele_col = "oa",
pval_col = "pvalue",
samplesize_col = "n_samples")
# Outcome data
outcome_sumstats <- "file path to outcome summary statistics"
outcome_dat <- read_outcome_data(
snps = telo_dat$SNP,
filename = outcome_sumstats,
sep = "\t",
snp_col = "RSID",
beta_col = "Effect",
se_col = "StdErr",
effect_allele_col = "risk",
other_allele_col = "nonrisk",
eaf_col = "RAF",
pval_col = "pvalue"
) %>% mutate(outcome = "MPN",samplesize.outcome=838503)
# Harmonize data
dat <- harmonise_data(
exposure_dat = telo_dat,
outcome_dat = outcome_dat
)
# LD clump
dat_clumped <- clump_data(dat,clump_r2=0.001)
# Perform MR
mr_method_list()
methods_to_use <- c("mr_egger_regression","mr_ivw","mr_weighted_median")
res <- mr(dat_clumped, method_list=methods_to_use)
res
mr_presso(data = dat_clumped,
BetaOutcome = "beta.outcome", BetaExposure = "beta.exposure",
SdOutcome = "se.outcome", SdExposure = "se.exposure",
OUTLIERtest = TRUE, DISTORTIONtest = TRUE,
NbDistribution = 1000, SignifThreshold = 0.05)
# Plots
mr_scatter_plot <- function(mr_results, dat)
{
# dat <- subset(dat, paste(id.outcome, id.exposure) %in% paste(mr_results$id.outcome, mr_results$id.exposure))
requireNamespace("ggplot2", quietly=TRUE)
requireNamespace("plyr", quietly=TRUE)
mrres <- plyr::dlply(dat, c("id.exposure", "id.outcome"), function(d)
{
d <- plyr::mutate(d)
if(nrow(d) < 2 | sum(d$mr_keep) == 0)
{
return(blank_plot("Insufficient number of SNPs"))
}
d <- subset(d, mr_keep)
index <- d$beta.exposure < 0
d$beta.exposure[index] <- d$beta.exposure[index] * -1
d$beta.outcome[index] <- d$beta.outcome[index] * -1
mrres <- subset(mr_results, id.exposure == d$id.exposure[1] & id.outcome == d$id.outcome[1])
mrres$a <- 0
if("MR Egger" %in% mrres$method)
{
temp <- mr_egger_regression(d$beta.exposure, d$beta.outcome, d$se.exposure, d$se.outcome, default_parameters())
mrres$a[mrres$method == "MR Egger"] <- temp$b_i
}
if("MR Egger (bootstrap)" %in% mrres$method)
{
temp <- mr_egger_regression_bootstrap(d$beta.exposure, d$beta.outcome, d$se.exposure, d$se.outcome, default_parameters())
mrres$a[mrres$method == "MR Egger (bootstrap)"] <- temp$b_i
}
ggplot2::ggplot(data=d, ggplot2::aes(x=beta.exposure, y=beta.outcome)) +
ggplot2::geom_errorbar(ggplot2::aes(ymin=beta.outcome-se.outcome, ymax=beta.outcome+se.outcome), colour="grey", width=0,size=0.35) +
ggplot2::geom_errorbarh(ggplot2::aes(xmin=beta.exposure-se.exposure, xmax=beta.exposure+se.exposure), colour="grey", height=0,size=0.35) +
ggplot2::geom_point(ggplot2::aes(text=paste("SNP:", SNP)),size=0.35) +
ggplot2::geom_abline(data=mrres, ggplot2::aes(intercept=a, slope=b, colour=method),size=0.35, show.legend=TRUE) +
ggplot2::scale_colour_manual(values=c("#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", "#ff7f00", "#cab2d6", "#6a3d9a", "#ffff99", "#b15928")) +
ggplot2::labs(colour="MR Test", x=paste("SNP effect on", d$exposure[1]), y=paste("SNP effect on", d$outcome[1])) +
ggplot2::theme(legend.position="top", legend.direction="vertical") +
ggplot2::guides(colour=ggplot2::guide_legend(ncol=2))
})
mrres
}
p1 <- mr_scatter_plot(res, dat_clumped)
p1 <- p1[[1]] + pretty_plot(fontsize=6) + L_border() + theme(legend.position ="none") +
geom_hline(yintercept=0,linetype="dashed",size=0.35)
p1
res_single <- mr_singlesnp(dat_clumped,single_method="mr_meta_fixed",
all_method=methods_to_use)
p2 <- mr_forest_plot(res_single)
p2 <- p2[[1]]+ pretty_plot(fontsize=6) + L_border() + theme(legend.position = "none")
res_loo <- mr_leaveoneout(dat_clumped,method = TwoSampleMR::mr_ivw)
p3 <- mr_leaveoneout_plot(res_loo)
p3 <- p3[[1]]+ pretty_plot(fontsize=6) + L_border() + theme(legend.position = "none")
# Save plots
if (TRUE){
cowplot::ggsave2(p1, file="../output/telomere_length/mendelian_randomization/MR_scatterplot.pdf",
width=3.3, height=3.3,units="cm")
cowplot::ggsave2(p2, file="../output/telomere_length/mendelian_randomization/MR_singleSNP.pdf",
width=2.5, height=3)
cowplot::ggsave2(p3, file="../output/telomere_length/mendelian_randomization/MR_leave_one_out.pdf",
width=2.5, height=3)
} |
7895c8b92311144318cc82d7b7410984526a68bc | 421366a39299a1a82bd0f2a42e667da7fc602b62 | /R/MergeFolderPredictions.R | 2136a17e552dadea7f9ad420199d4b8910eb0fc8 | [] | no_license | thomasferte/PredictCovidOpen | 363ef4cc9006696d5fa16c2ac4bdf9b58882a476 | 2468b0006a6f19310a9f8c7de6aa46979f19d627 | refs/heads/main | 2023-03-17T19:28:52.863817 | 2023-01-26T09:51:38 | 2023-01-26T09:51:38 | 496,952,863 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 817 | r | MergeFolderPredictions.R | #' MergeFolderPredictions
#'
#' @description Merge folder PredictCovid Data
#'
#' @param folder The folder path
#'
#' @return A list of predictions and features importance
#' @export
MergeFolderPredictions <- function(folder){
lsFolder <- PredVarimpFromFolder(folder = folder)
df_pred <- lsFolder$df_pred
df_varimp <- lsFolder$df_varimp
# split df_varimp to lower file size
colGroup <- colnames(df_varimp %>% select(-c(Features, Importance)))
dfGroup <- df_varimp %>% select(all_of(colGroup)) %>%
distinct() %>%
tibble::rowid_to_column(var = "ID_Param")
df_varimpLight <- df_varimp %>%
right_join(dfGroup, by = colGroup) %>%
select(-all_of(colGroup))
res <- list(df_pred = df_pred,
df_varimp = df_varimpLight,
df_groupVarimp = dfGroup)
return(res)
}
|
6bd175dff4c476d8a03e8f3e4817361dd8cf0fc4 | 64a11deb0eb55a794d4da81ea7c8d8a52265755d | /tests/testthat/test_square.R | b3d3c30ceabd3281c6aef5bdd73ae8032b022d22 | [] | no_license | schivuku/travis-test | 681d5fce819279c13e319a569f9fa14bc0fcc435 | 4e52cb4fb74341fc90f621d5a21287b48fdd5352 | refs/heads/master | 2021-01-22T13:42:07.012023 | 2015-10-30T01:11:11 | 2015-10-30T01:11:11 | 45,220,588 | 0 | 0 | null | 2015-10-30T01:05:02 | 2015-10-30T01:05:02 | null | UTF-8 | R | false | false | 148 | r | test_square.R | context("square")
test_that("square", {
expect_equal(square(3), 9)
expect_equal(square(5), 25)
expect_error(square("a"), "number")
})
|
1df4fa18ef41cda069b6e9a78b97e71115871bbd | 5f992b73c05e4b2212e4e7ad62d7235747893bc1 | /run_analysis.R | 73126d52c6a1e92b29420cb972790a14a8193823 | [] | no_license | Subramania/CE_Data_Cleansing | 65c02640667d66dbc6f002888f393f6894be0330 | 93e98d53877cfaa65612c07258a2a1b48e6cda01 | refs/heads/master | 2020-04-23T15:17:40.304348 | 2015-02-11T13:01:22 | 2015-02-11T13:01:22 | 30,644,329 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,023 | r | run_analysis.R |
#Load all the required Libraries
library(dplyr)
#set all the path and file names
activityLabelFile<-"UCI HAR Dataset//activity_labels.txt"
featureFile<-"UCI HAR Dataset//features.txt"
trainX<-"UCI HAR Dataset//train//X_train.txt"
trainY<-"UCI HAR Dataset/train/y_train.txt"
trainSub<-"UCI HAR Dataset/train/subject_train.txt"
testX<-"UCI HAR Dataset//test//X_test.txt"
testY<-"UCI HAR Dataset/test/y_test.txt"
testSub<-"UCI HAR Dataset/test/subject_test.txt"
#Load activity labels and feature list (measures)
activiyLabel<-read.csv(activityLabelFile, sep=" ", header=FALSE,col.names=c("act","desc"))
featureList<-read.csv(featureFile, header=FALSE, sep=" ", col.names=c("id","measure"))
featureList[,2]<-gsub( "[[:punct:]]", "_",featureList[,2])
#Load training data
trainSubList<-scan(trainSub, what=numeric(), sep=" ")
trainXList<-read.table(trainX)
trainYList<-read.table(trainY)
#update the col names with feature names
colnames(trainXList)<-featureList[[2]]
#Include the subject Id & Activity
trainXList["id"]<-trainSubList
trainXList["act"]<-trainYList
#Load test data
testSubList<-scan(testSub, what=numeric(), sep=" ")
testXList<-read.table(testX)
testYList<-read.table(testY)
#update the col names with feature names
colnames(testXList)<-featureList[[2]]
#Include the subject Id & Activity
testXList["id"]<-testSubList
testXList["act"]<-testYList
#combine training & test data
allData<-rbind(trainXList,testXList)
#get only columns that are either mean or std
reqCol<-as.character(featureList[grep("mean|std|Mean|Std",featureList[[2]]),2])
allReqCol<-c("id","act",reqCol)
tidyData<-subset(allData, select=allReqCol)
#get the description of the activities
tidyData<-merge(activiyLabel,tidyData, by="act")
#remove the activity id
tidyData<-subset(tidyData, select=-act)
#group data by activity description & subject id
tidyDataGroup<-group_by(tidyData, desc,id)
#calculate mean for each of the variables
tidyDataSummary<-summarise_each(tidyDataGroup, funs(mean))
#return the final variable
tidyDataSummary |
fef02a5bc76947b95bc958c69f705927b4becc82 | 3fe1a27ecb52798609e99b5c7a2af1b95166c03d | /man/normalize.Rd | 765c80fc6f1f67285c8f46d3e6cce70115225827 | [] | no_license | mbedward/ecbtools | 6cbbfbe3c18734b75c42f0bd292aeab69f7bee69 | 16c5df2857f83fadfaac35447e21388daa69bfdb | refs/heads/master | 2023-02-16T19:29:39.773238 | 2021-01-17T20:26:25 | 2021-01-17T20:26:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,195 | rd | normalize.Rd | \name{normalize}
\alias{normalize}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Normalize a vector or matrix within a range.
}
\description{
Normalizes a vector or matrix values to spread between a minimum and maximum value. By default, it normalizes between zero and one.
}
\usage{
normalize(x, low = 0, high = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
A vector or matrix of numeric values.
}
\item{low}{
The minimum value desired in the output data.
}
\item{high}{
The maximum value desired in the output data.
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
A vector or matrix of normalized values.
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Grant Williamson
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
k=matrix(runif(20),4,5)
normalize(k)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ normalize }
\keyword{ range }% __ONLY ONE__ keyword per line
|
0b92aa0a116ec89db1784895b5e46f5226d5d4f0 | 8cf5934012226d4e66914e4001d2b5ed7ff37d21 | /R/plannedparenthood.R | 625abbbff7c1e9a6363004d9575c83fa334b3aa6 | [] | no_license | aaronmams/R-spatial | e2e3d604ecf59920daa451364b24a3af54c7b347 | c992513a0ab23baabadcbc94b8a5f8a6d2989474 | refs/heads/master | 2021-01-22T05:00:44.407834 | 2017-03-02T17:37:21 | 2017-03-02T17:37:21 | 81,606,032 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,345 | r | plannedparenthood.R | library(data.table)
library(dplyr)
library(ggplot2)
#get the results of the
house2012 <- tbl_df(read.csv('data/HouseElectionResults2012.csv')) %>%
filter(GE.WINNER.INDICATOR=='W' & D!='S') %>%
filter(! STATE %in% c('American Samoa','Puerto Rico','Northern Mariana Islands','District of Columbia'))
#----------------------------------------------------
house2016 <- read.csv('data/Congress114_members.txt')
house2016 <- strsplit(as.character(house2016[,1]),"\\s+")
#first just take out all the list elements that have exactly 5 elements because
# these will be easy to deal with
goods <- which(lapply(house2016,function(x)length(x))==5)
good.members <- data.frame(rbindlist(lapply(goods,function(i){
tmp <- house2016[[i]]
cd <- unlist(strsplit(tmp[[1]],"[.]"))
data.frame(cd,firstname=tmp[2],lastname=tmp[3],party=tmp[4],state=tmp[5])
})))
#probably have to do the others by hand
bads <- which(lapply(house2016,function(x)length(x))!=5)
#there are only 13 bad ones at this point...
r1 <- data.frame(cd=23,firstname='Debbie', lastname='Wasserman Schultz', party='D',state='FL')
r2 <- data.frame(cd=8,firstname='Chris Van', lastname='Hollen', party='D',state='MD')
r3 <- data.frame(cd=2,firstname='Ann McLane', lastname='Kuster', party='D',state='NH')
r4 <- data.frame(cd=12,firstname='Bonnie', lastname='Watson Coleman', party='D',state='NJ')
r5 <- data.frame(cd=1,firstname='Michelle', lastname='Lujan Grisham', party='D',state='NM')
r6 <- data.frame(cd=3,firstname='Ben Ray', lastname='Lujan', party='D',state='NM')
r7 <- data.frame(cd=15,firstname='Jose E', lastname='Serrano', party='D',state='NY')
r8 <- data.frame(cd=18,firstname='Sean Patrick', lastname='Maloney', party='D',state='NY')
r9 <- data.frame(cd=18,firstname='Sheila', lastname='Jackson Lee', party='D',state='TX')
r10 <- data.frame(cd=1,firstname='G.K.', lastname='Butterfield', party='D',state='NC')
r11 <- data.frame(cd=30,firstname='Eddie Bernice', lastname='Johnson', party='D',state='TX')
r12 <- data.frame(cd=3,firstname='Jamie', lastname='Herrera Beutler', party='R',state='WA')
r13 <- data.frame(cd=5,firstname='Cathy', lastname='McMorris Rodgers', party='R',state='WA')
df <- rbind(r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13)
house2016 <- rbind(good.members,df)
#last step is to get the party afiliation
party.afil <- lapply(house2016$party,function(x){
tmp <- strsplit(as.character(x),'[())]')
if(length(unlist(tmp))==2){
party <- as.character(unlist(tmp)[2])
}else{
party <- as.character(tmp)
}
return(party)
})
house2016$party <- unlist(party.afil)
#-------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------
key <- 'f5a32f694a14b28acf7301f4972eaab8551eafda'
#Now get 3 simple demographics for 2012 and 2016
#1. percent female
#2. percent black
#3. percent with a college degree
#4. age structure...let's just use percent 25 - 50
# by congressional district
# age and sex
series.males <- c('001E','002E','007E','008E','009E','010E','011E','012E','013E','014E','015E','016E')
series.females <- c('026E','031E','032E','033E','034E','035E','036E','037E','038E','039E','040E')
series <- c(series.males,series.females)
series <- paste('B01001_',series,sep="")
series.names<- c('total pop','total male','m18_19','m20','m21','m22_24','m25_29','m30_34',
'm35_39','m40_44','m45_49','m50_54','total female','f18_19','f20','f21','f22_24',
'f25_29','f30_34',
'f35_39','f40_44','f45_49','f50_54')
pop.fn <- function(i,yr){
resURL <- paste('http://api.census.gov/data/',yr,
'/acs1?get=NAME,',
series[i],
'&for=congressional+district:*&key=f5a32f694a14b28acf7301f4972eaab8551eafda',
sep="")
ljson <- fromJSON(resURL)
ljson <- ljson[2:length(ljson)]
tmp <- data.frame(unlist(lapply(ljson,function(x)x[1])),
unlist(lapply(ljson,function(x)x[2])),
unlist(lapply(ljson,function(x)x[3])),
unlist(lapply(ljson,function(x)x[4])),
series.names[i])
names(tmp) <- c('name','variable','state','congressional_district','series_name')
return(tmp)
}
age.sex <- pop.fn(i=1,yr=2015)
names(age.sex) <- c('name','total_pop','state','congressional_district','series')
age.sex$total_female <- pop.fn(i=13,yr=2015)[,2]
age.sex$f25 <- pop.fn(i=18,yr=2015)[,2]
age.sex$f30 <- pop.fn(i=19,yr=2015)[,2]
age.sex$f35 <- pop.fn(i=20,yr=2015)[,2]
age.sex$f40 <- pop.fn(i=21,yr=2015)[,2]
age.sex$f45 <- pop.fn(i=22,yr=2015)[,2]
age.sex$f50 <- pop.fn(i=23,yr=2015)[,2]
df12 <- pop.fn(i=1,yr=2012)
names(pop.fn) <- c('name','total_pop','state','congressional district','series')
df12$total_female <- pop.fn(i=13,yr=2012)[,2]
df12$f25 <- pop.fn(i=18,yr=2012)[,2]
df12$f30 <- pop.fn(i=19,yr=2012)[,2]
df12$f35 <- pop.fn(i=20,yr=2012)[,2]
df12$f40 <- pop.fn(i=21,yr=2012)[,2]
df12$f45 <- pop.fn(i=22,yr=2012)[,2]
df12$f50 <- pop.fn(i=23,yr=2012)[,2]
#percent black
#2015 1 yr ACS estimate
#black population
resURL <- 'http://api.census.gov/data/2015/acs1?get=NAME,B02001_003E&for=congressional+district:*&key=f5a32f694a14b28acf7301f4972eaab8551eafda'
ljson <- fromJSON(resURL)
pct.black <- data.frame(rbindlist(lapply(ljson,function(x){
tmp <- unlist(x)
return(data.frame(name=tmp[1],pop.black=tmp[2],state=tmp[3],cd=tmp[4]))
})))
#total population
resURL <- 'http://api.census.gov/data/2015/acs1?get=NAME,B02001_001E&for=congressional+district:*&key=f5a32f694a14b28acf7301f4972eaab8551eafda'
ljson <- fromJSON(resURL)
total.pop <- data.frame(rbindlist(lapply(ljson,function(x){
return(data.frame(name=unlist(x)[1],total.pop=unlist(x)[2]))
})))
pct.black <- tbl_df(pct.black) %>%
inner_join(total.pop,by=c('name'))
#2012 1 yr ACS estimate
#black population
resURL <- 'http://api.census.gov/data/2012/acs1?get=NAME,B02001_003E&for=congressional+district:*&key=f5a32f694a14b28acf7301f4972eaab8551eafda'
ljson <- fromJSON(resURL)
pct.black2012 <- data.frame(rbindlist(lapply(ljson,function(x){
tmp <- unlist(x)
return(data.frame(name=tmp[1],pop.black=tmp[2],state=tmp[3],cd=tmp[4]))
})))
#total population
resURL <- 'http://api.census.gov/data/2012/acs1?get=NAME,B02001_001E&for=congressional+district:*&key=f5a32f694a14b28acf7301f4972eaab8551eafda'
ljson <- fromJSON(resURL)
total.pop <- data.frame(rbindlist(lapply(ljson,function(x){
return(data.frame(name=unlist(x)[1],total.pop=unlist(x)[2]))
})))
pct.black2012 <- tbl_df(pct.black2012) %>%
inner_join(total.pop,by=c('name'))
#---------------------------------------------------------------------------------
#poverty percentages
resURL <- 'http://api.census.gov/data/2015/acs1?get=NAME,B17001_025E&for=congressional+district:*&key=f5a32f694a14b28acf7301f4972eaab8551eafda'
ljson <- fromJSON(resURL)
poverty.f25 <- data.frame(rbindlist(lapply(ljson,function(x){
return(data.frame(name=unlist(x)[1],pop.poverty=unlist(x)[2]))
})))
resURL <- 'http://api.census.gov/data/2015/acs1?get=NAME,B17001_026E&for=congressional+district:*&key=f5a32f694a14b28acf7301f4972eaab8551eafda'
ljson <- fromJSON(resURL)
poverty.f35 <- data.frame(rbindlist(lapply(ljson,function(x){
return(data.frame(name=unlist(x)[1],pop.poverty=unlist(x)[2]))
})))
pov.df <- data.frame(cbind(poverty.f25[2:nrow(poverty.f25),],poverty.f35$pop.poverty[2:nrow(poverty.f35)]))
names(pov.df) <- c('name','pov25','pov35')
#---------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------
#Education Level
#set up a function for this too...just for compactness
edu.fn <- function(yr){
resURL <- paste('http://api.census.gov/data/',yr,
'/acs1/subject?get=NAME,S1501_C01_006E&for=congressional+district:*&',
'key=f5a32f694a14b28acf7301f4972eaab8551eafda',sep="")
ljson <- fromJSON(resURL)
name <- unlist(lapply(ljson[2:length(ljson)],function(x)x[1]))
pop25 <- unlist(lapply(ljson[2:length(ljson)],function(x)x[2]))
state <- unlist(lapply(ljson[2:length(ljson)],function(x)x[3]))
congressional_district <- unlist(lapply(ljson[2:length(ljson)],function(x)x[4]))
df <- data.frame(name=name,pop25=pop25,state=state,congressional_district=congressional_district,
source=paste('ACS_1yr_',yr,sep=""))
#add in the number of people 25 and over with a bachelor's degree
resURL <- paste('http://api.census.gov/data/',yr,
'/acs1/subject?get=NAME,S1501_C01_012E&for=congressional+district:*&',
'key=f5a32f694a14b28acf7301f4972eaab8551eafda',sep="")
ljson <- fromJSON(resURL)
pop25_bachelors <- unlist(lapply(ljson[2:length(ljson)],function(x)x[2]))
df$pop25_bachelors <- pop25_bachelors
return(df)
}
edu2015 <- edu.fn(yr=2015)
edu2014 <- edu.fn(yr=2014)
edu2012 <- edu.fn(yr=2012)
#############################################################################
#############################################################################
#############################################################################
#############################################################################
#############################################################################
#############################################################################
#############################################################################
#add state abbreviations to the age.sex data frame
state.codes <- read.csv('data/state_fips_codes.csv') %>% select(code,abb)
names(state.codes) <- c('state.code','abb')
names(house2016) <- c('cd','firstname','lastname','party','abb')
house2016$cd <- as.numeric(as.character(house2016$cd))
house2016$cd[is.na(house2016$cd)] <- 0
age.sex <- age.sex %>% mutate(state.code = as.numeric(as.character(state))) %>%
inner_join(state.codes,by=c('state.code')) %>%
mutate(cd=as.numeric(as.character(congressional_district))) %>%
inner_join(house2016,by=c('abb','cd'))
#now bring in the % black
pct.black <- pct.black %>% filter(row_number() > 1) %>%
mutate(cd=as.numeric(as.character(cd)),
state.code=as.numeric(as.character(state)),
pop.black=as.numeric(as.character(pop.black)),
total.pop=as.numeric(as.character(total.pop)),
pct.black=pop.black/total.pop) %>%
select(cd,state.code,pct.black)
age.sex <- age.sex %>% inner_join(pct.black,by=c('state.code','cd'))
#bring in education
edu2015 <- edu2015 %>% mutate(state.code=as.numeric(as.character(state)),
cd=as.numeric(as.character(congressional_district)),
pop25=as.numeric(as.character(pop25)),
pop25_bachelors=as.numeric(as.character(pop25_bachelors)),
pct.bachelors=pop25_bachelors/pop25) %>%
select(state.code,cd,pct.bachelors)
age.sex <- age.sex %>% inner_join(edu2015,by=c('state.code','cd'))
#add in the female poverty
age.sex <- age.sex %>% left_join(pov.df,by=c('name')) %>%
mutate(pov.female=as.numeric(as.character(pov25))+
as.numeric(as.character(pov35)),
pct.pov.female=as.numeric(as.character(pov.female))/as.numeric(as.character(total_female)))
############################################################################
############################################################################
############################################################################
############################################################################
############################################################################
############################################################################
############################################################################
#look at republican districts with highest female poverty rates
age.sex %>% select(name,party,pct.pov.female,pct.female) %>%
filter(party=='R') %>% arrange(-pct.pov.female,-pct.female)
############################################################################
############################################################################
############################################################################
############################################################################
############################################################################
############################################################################
############################################################################
#propensity to vote for Clinton over Trump by demographic make-up
#get 2016 presidential election results by CD
#http://www.dailykos.com/story/2012/11/19/1163009/-Daily-Kos-Elections-presidential-results-by-congressional-district-for-the-2012-2008-elections
pres.2016 <- read.csv('data/2016presidential_by_cd.csv')
#1st value after party affiliation is Clinton vote share
# 2nd value is Trump vote share
head(pres.2016[pres.2016$Party=='(R)' & pres.2016$Clinton.2016>pres.2016$Trump.2016,])
pres.2016 <- pres.2016 %>% mutate(d.margin=Clinton.2016 - Trump.2016) %>%
arrange(Party,-d.margin)
p16 <- pres.2016 %>% mutate(Dem=ifelse(Clinton.2016>Trump.2016,1,0)) %>%
select(CD,Dem)
cd.new <- strsplit(as.character(p16$CD),"-")
p16$abb <- unlist(lapply(cd.new,function(x){x[1]}))
p16$cd <- unlist(lapply(cd.new,function(x){x[2]}))
#fix 'at large' values
p16 <- p16 %>% mutate(cd=ifelse(cd=='AL','00',cd),cd=as.numeric(cd))
#merge these results with our demographics
age.sex <- age.sex %>% left_join(p16,by=c('abb','cd'))
#something simple
#logit model
vote.red <- glm(Dem~pct.bachelors+pct.female+pct.black,data=age.sex,family=binomial(link='logit'))
age.sex$pr.D <- predict.glm(vote.red, newdata = age.sex, type = "response")
#sort by propensity and look
age.sex %>% arrange(-pr.D) %>% select(name,pr.D,party,pct.black,pct.bachelors,pct.female)
############################################################################
############################################################################
############################################################################
############################################################################
############################################################################
############################################################################
############################################################################
# try a classification tree application just for fun
# here we will use a classification tree to predict 2016 presidential election outcomes
# by congressional district using demographic data
library(tree)
#recod Republican districts = 1
age.sex <- age.sex %>% mutate(z = ifelse(party=='R',1,0),
female.votingage=as.numeric(as.character(f25))+
as.numeric(as.character(f30))+
as.numeric(as.character(f40))+
as.numeric(as.character(f50)),
total_pop=as.numeric(as.character(total_pop)),
pct.female=female.votingage/total_pop)
#have a quick look at some factor distributions
ggplot(age.sex,aes(x=party,y=pct.bachelors)) + geom_boxplot() + theme_bw()
ggplot(age.sex,aes(x=party,y=pct.female)) + geom_boxplot() + theme_bw()
#a classification tree
tree.model <- tree(factor(party) ~ pct.black + pct.bachelors + pct.female, data=age.sex)
tree.model
my.prediction <- predict(tree.model, age.sex) # gives the probability for each class
head(my.prediction)
my.prediction <- data.frame(pDem=my.prediction[,1],pRepub=my.prediction[,2])
#Create a simplied postestimation data frame
tree.df <- age.sex %>% select(name,party,pct.black,pct.bachelors,pct.female,party,Dem)
tree.df <- cbind(tree.df,my.prediction)
plot(tree.model)
text(tree.model)
#look at number correctly classified
maxidx <- function(arr) {
return(which(arr == max(arr)))
}
idx <- apply(my.prediction, c(1), maxidx)
prediction <- c('D', 'R')[idx]
tree.df$tree.pred <- prediction
#compare number correctly classified to what we would get from a logit model
#logit model
vote.red <- glm(z~pct.bachelors+pct.female+pct.black,data=age.sex,family=binomial(link='logit'))
tree.df$pr.R <- predict.glm(vote.red, newdata = tree.df, type = "response")
tree.df <- tree.df %>% mutate(logit.correct=ifelse(pr.R>0.5 & party=='R',1,
ifelse(pr.R<0.5 & party=='D',1,0)),
tree.correct=ifelse(tree.pred==party,1,0))
logit.bad <- nrow(tree.df) - sum(tree.df$logit.correct)
tree.bad <- nrow(tree.df) - sum(tree.df$tree.correct)
#First priority: Red districts where the prediction didn't work well & that
# voted for Clinton over Trump
tree.df %>% filter(tree.pred=='D' & party=='R' & Dem==1) %>% arrange(-pDem)
#which Republican Districts were predicted to vote Democrate but voted Trump in the National Election:
tree.df %>% filter(tree.correct==0 & party=='R' & Dem==0) %>% arrange(-pDem)
|
84421c175523d13c54e8d3fbfd08115e179c76c9 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/modes/examples/bimodality_coefficient.Rd.R | 0d0d565d3d530a85b0e00d572ab96f125b33000d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 284 | r | bimodality_coefficient.Rd.R | library(modes)
### Name: bimodality_coefficient
### Title: Bimodality Coefficient
### Aliases: bimodality_coefficient
### Keywords: bimodality, measure, modality nonparametric
### ** Examples
data<-c(rnorm(15,0,1),rnorm(21,5,1))
hist(data)
bimodality_coefficient(data,TRUE)
|
42664a199d5c83d95e130f0ab3e22c327c0f3337 | 0bb9a61ef4f65de46b86b501537c871547ae9ce3 | /Expert_Parcer/Script.R | 3ec212be7c79a148b82060e01ab2f07b02299c4d | [] | no_license | StepanSushko/TopInustrialCompanies | 2170770af8fd4e1bd9ec61084d8e9c8120d1e434 | 0c29d933dcc086e8ab06c30286262dbc99f15f3a | refs/heads/master | 2020-03-18T10:36:08.860326 | 2018-05-23T21:09:57 | 2018-05-23T21:09:57 | 134,622,153 | 0 | 0 | null | null | null | null | WINDOWS-1251 | R | false | false | 6,199 | r | Script.R | if (!require("XML")) { install.packages("XML"); require("XML") }
if (!require("ggplot2")) { install.packages("ggplot2"); require("ggplot2") }
if (!require("RCurl")) { install.packages("RCurl"); require("RCurl") }
if (!require("qdap")) { install.packages("qdap"); require("qdap") }
#if (!require("gridExtra")) { install.packages("gridExtra"); require("gridExtra") }
#if (!require("xlsx")) { install.packages("xlsx"); require("xlsx") }
#if (!require("rJava")) { install.packages("rJava"); require("rJava") }
#if (!require("mclust")) { install.packages("mclust"); require("mclust") }
plotDir = "C:/Users/stepa/OneDrive/DataScience/Expert_Parcer/Expert_Parcer/Plots"
# Parsing -----
# u = "http://en.wikipedia.org/wiki/World_population"
u = "http://expert.ru/ratings/300-krupnejshih-proizvodstvennyih-kompanij-yuga-rossii-po-itogam-2016-goda/"
u <- getURL(u)
doc = htmlParse(u, encoding = "UTF8", asText = T)
tableNodes = getNodeSet(doc, "//table")
tryAsInteger = function(val) {
#val = xmlValue(node)
ans = as.integer(gsub(",", ".", val))
if (is.na(ans))
val
else
ans
}
del_ref = function(val) { genX(val, "[", "]") }
tb = readHTMLTable(tableNodes[[1]])
colnames(tb) = as.character( unlist( tb[1,] ) )
tb = tb[-c(1),]
#tb = apply(tb, MARGIN = 2, del_ref)
tb = as.data.frame(apply(tb, MARGIN = 2, tryAsInteger))
tb[, c(1, 2, 7, 8, 9, 10, 11)] = apply(tb[, c(1, 2, 7, 8, 9, 10, 11)], MARGIN = 2, as.integer)
#head( tb[,c(1,2,7,8,9,10,11)] )
tb$`Регион` = as.character(tb$`Регион`)
tb[tb$`Регион` == "Крым",4] = "Республика Крым"
tb[tb$`Регион` == "Чечня", 4] = "Чеченская Республика"
head(tb, n = 10)
#write.csv2( tb, file = "Выручка.csv", sep = ",", col.names = NA)
#tb = tb[tb$`Отрасль` != "АПК и пищевая промышленность",]
# Analysis -----
#Отрасли
Показатель = tb$`Выручка в 2016 году, млн руб.`
Показатель = tb$`Чистая прибыль в 2016 году, млн руб.`
Показатель = tb$`Прирост выручки в 2016 году, %`
df = aggregate( Показатель ~ `Отрасль`, tb, sum)
df = df[order( df$Показатель, decreasing = T),]
ggplot(df) +
geom_bar(aes(x = 1:dim(df)[1], y = Показатель), stat = "identity") + xlab("") +
theme(axis.text.x = element_text(angle = 270, vjust = 0.25, hjust = 0, size = 7),
axis.text.y = element_text(size = 7)) +
scale_x_reverse( #name = "",
breaks = 1:dim(df)[1],
labels = df$`Отрасль`) +
coord_flip() +
scale_y_continuous(breaks = seq(0, 3000000, 100000),
labels = as.character( as.character(seq(00, 3000000, 100000))))
# Регионы
Показатель = tb$`Выручка в 2016 году, млн руб.`
Показатель = tb$`Чистая прибыль в 2016 году, млн руб.`
Показатель = tb$`Прирост выручки в 2016 году, %`
df = aggregate(Показатель ~ `Регион`, tb, sum)
df = df[order(df$Показатель, decreasing = T),]
png(filename = file.path(plotDir, "Regions_ranking.png"), width = 600, height = 400, units = "px", pointsize = 24, bg = "white", res = 100, family = "", restoreConsole = TRUE) #, type = c("cairo-png"))
ggplot(df) +
geom_bar(aes(x = 1:dim(df)[1], y = Показатель), stat = "identity") + xlab("") +
theme(axis.text.x = element_text(angle = 270, vjust = 0.25, hjust = 0)) +
scale_x_continuous(#name = "",
breaks = 1:dim(df)[1],
labels = df$`Регион`) + ggtitle("Совокупная Выручка в 2016 году, млн руб.") + ylab("") + annotate(geom = "text", x = 7, y = 250000, xend = Inf, yend = Inf, label = 'Степан Сушко', color = 'white', angle = 45, fontface = 'bold', size = 6, alpha = 0.5, family = 'Verdana')
dev.off()
# Структура по региону
Region = "Ростовская область"
Region = "Волгоградская область"
Region = "Краснодарский край"
Показатель = (tb[tb$`Регион` == Region,])$`Выручка в 2016 году, млн руб.`
#Показатель = (tb[tb$`Регион` == Region,])$`Чистая прибыль в 2016 году, млн руб.`
df = aggregate(Показатель ~ `Отрасль`, tb[tb$`Регион` == Region,], sum)
df = df[order(df$Показатель, decreasing = T),]
png(filename = file.path(plotDir, "Region_vs_revenue_VO.png"), width = 600, height = 400, units = "px", pointsize = 24, bg = "white", res = 100, family = "", restoreConsole = TRUE) #, type = c("cairo-png"))
ggplot(df) +
geom_bar(aes(x = 1:dim(df)[1], y = Показатель), stat = "identity") + xlab("") +
theme(axis.text.x = element_text(angle = 270, vjust = 0.25, hjust = 0, size = 7),
axis.text.y = element_text(size = 7)) +
scale_x_reverse(#name = "",
breaks = 1:dim(df)[1],
labels = df$`Отрасль`) +
coord_flip() +
#scale_y_continuous(
# breaks = seq(500000, 3000000, 500000),
# labels = as.character(seq(500000, 3000000, 500000))) +
ggtitle(Region) + ylab("Выручка в 2016 году, млн руб.") +
annotate(geom = "text", x = 4, y = 55000, xend = Inf, yend = Inf, label = 'Степан Сушко', color = 'white', angle = 45, fontface = 'bold', size = 6, alpha = 0.5, family = 'Verdana')
dev.off()
# Частота
unique(tb$`Регион`)
df = data.frame(table(as.character(tb$`Регион`)))
df = df[order(df$Freq, decreasing = T),]
ggplot(data = df) +
geom_bar(aes(x = 1:dim(df)[1], y = Freq), stat = "identity") + xlab("") +
theme(axis.text.x = element_text(angle = 270, vjust = 0.25, hjust = 0)) +
scale_x_continuous(#name = "",
breaks = 1:dim(df)[1],
labels = df$Var1) + ylab("Число компаний")
|
f8b74cea6f6f53f323ebdbcb1cc6428635e0bbf4 | 872e9ddfecc5d9456142b7d98b4a067620c00ded | /script.R | 88b5c99511132208bf878a39ba90e4635bae4cdb | [] | no_license | rscherrer/egstools | 4d9b7042f25c37e6396925543c5eca4cb5bc80df | 90977be2ca98e50553847d7c80319b7bdf4a4dd4 | refs/heads/master | 2020-11-26T13:43:55.892998 | 2020-03-29T19:53:51 | 2020-03-29T19:53:51 | 229,089,402 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,563 | r | script.R |
#disassortment <- mrep(c(1,-1), sapply(d, nrow))
#d <- do.call(rbind, d)
#d$disassortment <- as.factor(disassortment)
d2 <- d %>%
filter(disassortment == 1) %>%
group_by(id) %>%
summarize(RI = RI[n()])
length(which(d2$RI < -0.2))
hist(d2$RI)
d <- egstools::collect(
dir = "/media/raphael/bigass/simulations_epistatic_large",
files = files
)
d2 <- d %>%
group_by(id) %>%
summarize(RI = RI[n()])
folders <- d2$id[which(d2$RI > 0.5)]
folders <- list.files("/media/raphael/bigass/simulations_disassortative_longrun", full.names = TRUE)
folders <- folders[grep("sim_", folders)]
lapply(seq_len(33), function(i) {
folder <- folders[i]
file.copy(
paste0(folder, "/architecture.txt"),
paste0("/media/raphael/bigass/architectures_assortative/architecture_", i, ".txt")
)
})
# Phase plane
p <- plot_phase(
d,
xname = "EI",
yname = "RI",
#tname = "t",
labs = c("Ecological divergence", "Reproductive isolation"),
#colvar = "disassortment",
#collab = "Disassortative"
splitvar = "ecosel",
splitvar2 = "hsymmetry"
)
p
ggsave("RSI.pdf", p)
# Heatmap
h <- plot_heatmap(
d,
labs = c("Habitat symmetry", "Ecological selection"),
xname = "hsymmetry",
yname = "ecosel",
zname = "EI",
tname = "t",
summary = "value",
aggregate = "number",
threshold = 0.9,
collab = "Ecological divergence",
colors = c("black", "lightgreen"),
splitvar = "epistasis"
)
h
ggsave("heatmap_EI_lowdispersal.pdf", h)
############
############
# Speciation cube
#cube <- plot_cube(d, labs, phi = 30, theta = 300)
|
ffa8175994a270a7a8d069cbceef3e31be25b013 | d4e84759fa1e0d22c9468fa53f1254cc024e0a3f | /R/ip_api.R | 72b5a00fa161f0d5d307a81f84301f1fe0b39bc8 | [
"Apache-2.0"
] | permissive | Ironholds/rgeolocate | 795c9174aca9102ac116f983a65284108c3a093b | d2f550ee9ec3bfe1a24449eae0f40c65b7ee0042 | refs/heads/master | 2023-05-12T13:02:14.751389 | 2023-01-30T20:55:03 | 2023-01-30T20:55:03 | 35,774,653 | 71 | 31 | NOASSERTION | 2023-04-26T08:24:08 | 2015-05-17T17:03:30 | C | UTF-8 | R | false | false | 2,023 | r | ip_api.R | query_ip_api <- function(ip, sleep){
if(sleep){
Sys.sleep(0.40)
}
url <- paste0("https://ip-api.com/json/", ip)
result <- httr::GET(url, user_agent("rgeolocate - https://github.com/Ironholds/rgeolocate"))
if(result$status > 300){
return("Error")
}
parsed_results <- httr::content(result, as = "parsed", type = "application/json")
if(parsed_results$status == "fail"){
return("Error")
}
return(unlist(parsed_results))
}
#'@title Geolocate IP Addresses Through ip-api.com
#'@description \code{ip_api} consumes a vector of IP addresses
#'and geolocates them via \href{https://ip-api.com}{ip-api.com}.
#'
#'@param ip_addresses a character vector of IP addresses
#'
#'@param as_data_frame whether to return the results as a data.frame or not.
#'Set to TRUE by default.
#'
#'@param delay whether or not to delay each request by 400ms. ip-api.com has a
#'maximum threshold of 150 requests a minute; if you're parallelising calls, you
#'might run into this. \code{delay} allows you to set a delay between requests, taking
#'advantage of parallelisation while avoiding running into this threshold. Set to
#'FALSE by default
#'
#'@return either a data.frame or a list of vectors. If an IP cannot be geolocated, it
#'will provide an error message: see the examples for field names and examples of each
#'possible output.
#'
#'@seealso \code{\link{ip_info}} and \code{\link{db_ip}} for other
#'online geolocation APIs.
#'
#'@examples
#'\dontrun{
#'#Valid, data.frame output
#'result <- ip_api("2607:FB90:426:DC1D:CFC4:4875:8BC2:4D93")
#'
#'#Invalid, data.frame output
#'result <- ip_api("argh")
#'
#'#Valid list output
#'result <- ip_api("2607:FB90:426:DC1D:CFC4:4875:8BC2:4D93", as_data_frame = FALSE)
#'
#'#Invalid list output
#'result <- ip_api("argh", as_data_frame = FALSE)
#'}
#'@export
ip_api <- function(ip_addresses, as_data_frame = TRUE, delay = FALSE){
results <- lapply(ip_addresses, query_ip_api, delay)
if(as_data_frame){
return(ip_to_df(results))
}
return(results)
}
|
59ced68c881881b628b359576098a6a0f1b07ce0 | 4e165402f4e3f7d161558d62e598fa1f7bb696cf | /codes/Train.R | 21795bfae0235e86b6629fca02f0350fab105063 | [] | no_license | BCEM-UniAndes/Reproducibility-Guidelines | 55acafbd77f6968f778846a90ce406db2e7d7c67 | 3f707e08c0507c701a3768cf789816dfe1442856 | refs/heads/main | 2023-08-10T16:41:05.437415 | 2021-09-30T13:19:37 | 2021-09-30T13:19:37 | 347,115,094 | 2 | 2 | null | 2021-04-07T21:38:11 | 2021-03-12T15:39:48 | null | UTF-8 | R | false | false | 1,793 | r | Train.R | #######################################
###### script description ######
#######################################
# To Test the random forest model, the train and test datasets must be loaded
#Written by Laura Carolina Camelo Valera
#Written by Laura Carolina Camelo Valera at Computational Biology and Microbial Ecology lab (BCEM)
#Institution: Los Andes University, Colombia
#email. lc.camelo10@uniandes.edu.co
######### Parameters #########
#1 Train matrix, phage-bacteria pairs
#Activate arguments reading
args = commandArgs(trailingOnly=TRUE)
# test if there is at least one argument: if not, return an error
if (length(args)==0) {
stop("At least one argument must be supplied (input file).n", call.=FALSE)
}
#### Arguments Setting ####
train_PhageBacteriaPairs = args[1]
#Required libraries
library(caret)
library(randomForest)
library(tictoc)
library(parallel)
library(doMC)
#Número de cores a usar
registerDoMC(cores=15)
#Function File
source("~/JovenInvestigador/RScripts/functionsProject.R")
#Required data
Data_PairedRelations <- read.table(train_PhageBacteriaPairs,h=T,sep="\t")
#Data_PairedRelations_test <- read.table(test_PhageBacteriaPairs,h=T,sep="\t")
print("Datasets loaded")
#load("/hpcfs/home/lc.camelo10/JovenInvestigador/DataPairsTest.RData")
#load("/hpcfs/home/lc.camelo10/JovenInvestigador/DataPairsTrain.RData")
#to tetramers
Data_PairedRelations$Interaction<-as.factor(Data_PairedRelations$Interaction)
tic()
rf_PairedDistances=randomForest(y=Data_PairedRelations$Interaction,
x=Data_PairedRelations[,10:152],
subset=1:nrow(Data_PairedRelations))
toc()
saveRDS(rf_PairedDistances, "/hpcfs/home/lc.camelo10/JovenInvestigador/Outputs/model/rf_model.RDS")
|
b857a55e6c12c192a3f14deb5393ae792a30ab95 | eb79f12e402744ae5ff3c66d5cc14bcaf8c3632f | /Datacleaning.R | 8d5eb35dfd8bcff5677111038d77d81da4acd41f | [] | no_license | Darki92/Whale-Analysis | ecbb098c28300cabfe9f884bf3a2143c5a0462cc | 530f944b348ac83523c844666b1bfde55b06fb4e | refs/heads/master | 2021-09-10T18:01:10.805019 | 2018-03-30T15:45:32 | 2018-03-30T15:45:32 | 110,442,059 | 0 | 1 | null | 2018-03-31T13:29:16 | 2017-11-12T15:04:30 | R | UTF-8 | R | false | false | 4,615 | r | Datacleaning.R | install.packages("CircStats")
install.packages("boot")
install.packages("MASS")
install.packages("reshape2")
install.packages("stats")
install.packages("moveHMM")
require(CircStats) # for von Mises distribution
require(boot) # for logit
require(MASS)
require(reshape2)
require(stats)
require(moveHMM)
setwd("~/Uni/(M.Sc.) 3. Semester/Statistical Consulting/Minke whale project")
whalegps <- read.table("GPS_data_170921.txt", header=T, sep= "\t") # read the gps data
whaledivestats <- read.csv("dive_stats_170921.csv")
#function which computes the time in seconds in our case for the gps data. datetime must be in the format year-month-day hour:minute:second
overallsec <- function(datetime){
data <- colsplit(datetime, " ", c("date", "time")) #split datetime in date und time
date <- data$date #save date and time independently
time <- data$time
data <- colsplit(date, "-",c("year", "month", "day")) #split date
year <- data$year
month <- data$month
day <- data$day
data <- colsplit(time, ":",c("hour", "min", "sec")) #split time
hour <- data$hour
min <- data$min
sec <- data$sec
seconds <- sec + min * 60 + hour * 60 *60 +day * 24 *60*60 #compute the date in seconds, since month and year are the same for all datapoints we can forget about them
return(seconds)
}
whalegps$overallsec <- overallsec(whalegps$datetime) # create the overallsec computation to the gps data
#function which computes the time in seconds in our case for the stat data. datetime must be in the format year-month-day hour:minute:second UTC
overallsecwithUTC <- function(datetime){
data <- colsplit(datetime, " ", c("date", "time", "UTC")) #split datetime in date und time and UTC
date <- data$date #save date and time independently
time <- data$time
data <- colsplit(date, "-",c("year", "month", "day")) #split date
year <- data$year
month <- data$month
day <- data$day
data <- colsplit(time, ":",c("hour", "min", "sec")) #split time
hour <- data$hour
min <- data$min
sec <- data$sec
seconds <- sec + min * 60 + hour * 60 *60 +day * 24 *60*60 #compute the date in seconds, since month and year are the same for all datapoints we can forget about them
return(seconds)
}
whaledivestats$enddescsec <- overallsecwithUTC(whaledivestats$enddesc)
whaledivestats$begdescsec <- overallsecwithUTC(whaledivestats$begdesc)
#plot the time against lat and long
plot(whalegps$overallsec,whalegps$lat)
plot(whalegps$overallsec,whalegps$long)
#seems that linear interpolation is ok or?
interpolationvalue <- function(tvec,time,argument,k){ #tvec is vector of timepoints where we want to have the interpolation value, time is the set of times there we know the value we are interested in, argument is the vector of the variable we are interested in and k is the maximal distance in the timevector we want to allow for linear interpolation
b <-rep(0, length(tvec))
for(i in 1:length(tvec)){
t <- tvec[i]
if(t<time[1] || t > time[length(time)]){a <- NA}
else{
timevector <- t-time
postime <- subset(timevector, timevector >= 0)
t1 <- length(postime) #the index of the time point in time which is the biggest lower threshold
t2<- t1+1 # now t1<= t <= t2
if(time[t2]-time[t1] <= k){
a <- argument[t1]+(t-time[t1])/(time[t2]-time[t1]) * (argument[t2]-argument[t1])
}
else { a <- NA}
}
b[i] <- a
}
return(b)
}
# calculation for the lat and long variables by linear interpolation in the dive set
whaledivestats$lat <- interpolationvalue(whaledivestats$begdescsec, whalegps$overallsec,whalegps$lat,10000000000)
whaledivestats$long <- interpolationvalue(whaledivestats$begdescsec, whalegps$overallsec,whalegps$long,10000000000)
# calculation of step and angle with moveHMM
d <- prepData(data.frame(lat=whaledivestats$lat,long=whaledivestats$long), type="LL", coordNames = c("lat","long"))
whaledivestats$step <- d$step
whaledivestats$angle <- d$angle
plot(d)
#
splitset <- function(k, argument, dataset){
a <- which(argument >= k)
zr <- vector("list", length = length(a) + 1)
zr[[1]] <- dataset[1:(a[1]-1),]
#zr <- data.frame()
for (i in 2:length(a)) {
zr[[i]] <- dataset[(a[i-1]+1):(a[i]-1),]
}
zr[[length(a)+1]] <- dataset[(a[length(a)]+1):length(argument),]
return(zr)
}
#k=20000
#argument <- whaledivestats$postdive.dur
#newdata <- splitset(k, argument, dataset)
data <- whaledivestats
# transform steplength into meters
data[,12] <- data[,12]*1000
### create csv
#write to new csv-file to prevent repeating the steps above everytime
write.csv(data, file = "whale_data_cleaned.csv")
|
30bb7914b600647c5dd74fe6c5baf43a76e6c1b2 | 74453745dd2a15c8e310e8f4446ccada9702435e | /man/restore.Rd | 790f49002802f8f25505c14f32df0eba07494943 | [
"MIT"
] | permissive | rstudio/renv | ffba012525e8b1e42094899c3df9952b54ecb945 | 8c10553e700cad703ddf4dd086104f9d80178f3a | refs/heads/main | 2023-08-29T08:45:28.288471 | 2023-08-28T22:01:19 | 2023-08-28T22:01:19 | 159,560,389 | 958 | 169 | MIT | 2023-09-14T00:55:28 | 2018-11-28T20:25:39 | R | UTF-8 | R | false | true | 3,864 | rd | restore.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/restore.R
\name{restore}
\alias{restore}
\title{Restore project library from a lockfile}
\usage{
restore(
project = NULL,
...,
library = NULL,
lockfile = NULL,
packages = NULL,
exclude = NULL,
rebuild = FALSE,
repos = NULL,
clean = FALSE,
prompt = interactive()
)
}
\arguments{
\item{project}{The project directory. If \code{NULL}, then the active project will
be used. If no project is currently active, then the current working
directory is used instead.}
\item{...}{Unused arguments, reserved for future expansion. If any arguments
are matched to \code{...}, renv will signal an error.}
\item{library}{The library paths to be used during restore. See \strong{Library}
for details.}
\item{lockfile}{Path to a lockfile. When \code{NULL} (the default), the
\code{renv.lock} located in the root of the current project will be used.}
\item{packages}{A subset of packages recorded in the lockfile to restore.
When \code{NULL} (the default), all packages available in the lockfile will be
restored. Any required recursive dependencies of the requested packages
will be restored as well.}
\item{exclude}{A subset of packages to be excluded during restore. This can
be useful for when you'd like to restore all but a subset of packages from
a lockfile. Note that if you attempt to exclude a package which is required
as the recursive dependency of another package, your request will be
ignored.}
\item{rebuild}{Force packages to be rebuilt, thereby bypassing any installed
versions of the package available in the cache? This can either be a
boolean (indicating that all installed packages should be rebuilt), or a
vector of package names indicating which packages should be rebuilt.}
\item{repos}{The repositories to use when restoring packages installed
from CRAN or a CRAN-like repository. By default, the repositories recorded
in the lockfile will be, ensuring that (e.g.) CRAN packages are
re-installed from the same CRAN mirror.
Use \code{repos = getOptions(repos)} to override with the repositories set
in the current session, or see the \code{repos.override} option in \link{config} for
an alternate way override.}
\item{clean}{Boolean; remove packages not recorded in the lockfile from
the target library? Use \code{clean = TRUE} if you'd like the library state
to exactly reflect the lockfile contents after \code{restore()}.}
\item{prompt}{Boolean; prompt the user before taking any action? For backwards
compatibility, \code{confirm} is accepted as an alias for \code{prompt}.}
}
\value{
A named list of package records which were installed by renv.
}
\description{
Restore a project's dependencies from a lockfile, as previously generated by
\code{\link[=snapshot]{snapshot()}}. \code{renv::restore()} compares packages recorded in the lockfile to
the packages installed in the project library. Where there are differences
it resolves them by installing the lockfile-recorded package into the
project library. If \code{clean = TRUE}, \code{restore()} will additionally delete any
packages in the project library that don't appear in the lockfile.
}
\examples{
\dontrun{
# disable automatic snapshots
auto.snapshot <- getOption("renv.config.auto.snapshot")
options(renv.config.auto.snapshot = FALSE)
# initialize a new project (with an empty R library)
renv::init(bare = TRUE)
# install digest 0.6.19
renv::install("digest@0.6.19")
# save library state to lockfile
renv::snapshot()
# remove digest from library
renv::remove("digest")
# check library status
renv::status()
# restore lockfile, thereby reinstalling digest 0.6.19
renv::restore()
# restore automatic snapshots
options(renv.config.auto.snapshot = auto.snapshot)
}
}
\seealso{
Other reproducibility:
\code{\link{lockfiles}},
\code{\link{snapshot}()}
}
\concept{reproducibility}
|
7ef0fde98dec5be5cb631a40b0450727d975babb | 0c8be292fd40767f10f28271088df34cb3027286 | /Solved/Problem39.R | b9dd313d36b2391a432d95f3d62a108b9dfb0320 | [] | no_license | RossiLorenzo/Project_Euler | 01a3feca7de0c72cc9d7a895b236f4350868e9b7 | d0329ee1e230874ccadb8e05ff84a49638f0e559 | refs/heads/master | 2021-01-10T15:32:17.856291 | 2015-12-06T19:07:34 | 2015-12-06T19:07:34 | 47,507,194 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,176 | r | Problem39.R | # If p is the perimeter of a right angle triangle
# with integral length sides, {a,b,c}, there are exactly three
# solutions for p = 120.
#
# {20,48,52}, {24,45,51}, {30,40,50}
#
# For which value of p ≤ 1000, is the
# number of solutions maximised?
# Libraries
library(parallel)
# Let's consider a triangle with sides a <= b <= c
# The perimiter is a + b + c.
# Since is right angle c^2 = a^2 + b^2
# So c = sqrt(a^2 + b^2) and p = a + b + sqrt(a^2 + b^2)
# Very easily p = a + b + sqrt(a^2 + b^2) <= 2a + 2b
# p <= 2a+2b <= 4b -> b >= p/4
# For every possible perimeter get possible sides
all_p = 8:1000
cores = makeCluster(10)
X = unlist(parLapplyLB(cores, all_p, function(p){
library(dplyr)
# Calculate possible a, b
b_min = p/4
a = seq(1, b_min, 1)
b = seq(b_min, p - b_min)
triangle = expand.grid(a, b)
# Calculate c from a, b with c^2 = a^2 + b^2
triangle$Var3 = sqrt(triangle$Var1^2 + triangle$Var2^2)
# Check that permiter is right and that c is a perfect square
triangle = filter(triangle,
Var3 + Var2 + Var1 == p,
round(Var3) == Var3)
nrow(triangle)
}))
stopCluster(cores)
# Print results
all_p[which(X == max(X))]
|
c4b7774cb0dbd38234563af815521ef5f1b066cf | 899374f8aecd9427b6f1f945d91c60e647cb0c58 | /Valeria_Stavrova/Task4/task4.R | 418d1d373c092ce6fbf71962e55024ff95e67ab9 | [] | no_license | shulman0/M2019_4142 | a1e081a2441c8b9e2ff84878c4390aac97575dbe | d12e48d50efa84a2fa2d82b9a2745a045cd490ce | refs/heads/master | 2020-09-22T12:26:49.271898 | 2019-11-29T20:53:14 | 2019-11-29T20:53:14 | 225,193,826 | 0 | 0 | null | 2019-12-01T16:38:56 | 2019-12-01T16:38:55 | null | UTF-8 | R | false | false | 1,684 | r | task4.R | #EXPLORYING_DATA
class(weather)
dim(weather)
names(weather)
str(weather)
summary(weather)
head(weather)
tail(weather)
print(weather)
View(weather)
#TIDYING_DATA
library(tidyr)
library(dplyr)
newweather <-weather[,-1] %>% gather(key = day, value = val, X1 : X31) %>%
pivot_wider(names_from = measure, values_from = val)
View(newweather)
#PREPARING_FOR_ANALYSIS
#i decided to keep days,months and years in different columns, because
#it may be convenient to select certain years/months/days as in the examples below
library(data.table)
draftday<-data.table(newweather$day)
day<- separate(draftday, V1, c("X", "day"),sep = 1)[,2]
finalweather<-cbind(day, newweather[,-3])[,c(2,3,1,4:25)]
finalweather1 <- finalweather %>% mutate_at(vars(-Events), as.numeric)
View(finalweather1)
#examples
finalweather1 %>% select(c(1:5)) %>% subset(year==2014)
finalweather1 %>% select(year, month, day, Events) %>% subset(month==12) %>% subset(day==1)
#what about MISSING_VALUES?
is.na(finalweather1)
any(is.na(finalweather1))
sum(is.na(finalweather1))
#in case if missing values aren't meaningful
finalweather2<-na.omit(finalweather1)
any(is.na(finalweather2))
#OUTLIERS_&_ERRORS_DETECTION
summary(finalweather1)
summary(finalweather2)
boxplot(finalweather1$Max.TemperatureF, horizontal = T)
boxplot(finalweather1$Max.Dew.PointF,finalweather1$MeanDew.PointF,finalweather1$Min.DewpointF)
hist(finalweather1$Min.Humidity)
boxplot(finalweather2$Mean.VisibilityMiles, horizontal = T)
hist(finalweather2$PrecipitationIn)
#SAVING_DATA_SETS
saveRDS(finalweather1, "finalweather1.rds")
saveRDS(finalweather2, "finalweather2NONA.rds")
|
3313e872454e66d6ace8e17e597654b518e2570f | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/mrds/examples/plot.io.Rd.R | 9d493a114f3fdc9fd8149f02965df5a21383c056 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 695 | r | plot.io.Rd.R | library(mrds)
### Name: plot.io
### Title: Plot fit of detection functions and histograms of data from
### distance sampling independent observer ('io') model
### Aliases: plot.io
### Keywords: plot
### ** Examples
## No test:
library(mrds)
data(book.tee.data)
egdata <- book.tee.data$book.tee.dataframe
result.io <- ddf(dsmodel=~cds(key = "hn"), mrmodel=~glm(~distance),
data=egdata, method="io", meta.data=list(width=4))
# just plot everything
plot(result.io)
# Plot primary and secondary unconditional detection functions on one page
# and primary and secondary conditional detection functions on another
plot(result.io,which=c(1,2,5,6),pages=2)
## End(No test)
|
9d6fa73abf9c2ae5e9977bcce5d9c24d4d72cad4 | f9d80df2d3108c24255c4c2ba0fbce25a3acfad5 | /code/examples/Example2.R | 0c22f35d643007b2e62fdd87d9e99c34fdcc9eda | [] | no_license | certifiedwaif/phd | 12ed3c3113a5f662cf14538776f02acecd11338b | 0f6f7b48917961f6821ff7aaf27357c1be1c6d5a | refs/heads/master | 2021-10-24T22:07:44.383587 | 2019-03-29T03:12:55 | 2019-03-29T03:12:55 | 11,830,694 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,499 | r | Example2.R |
set.seed(1)
###############################################################################
x <- seq(-5,5,,1000)
f <- dt(x,3)
plot(x,f,type="l",ylim=c(0,0.4))
h <- abs(x)
lines(x,f*h,col="blue")
g1 <- dt(x,1)
lines(x,g1,col="green3")
g2 <- dnorm(x)
lines(x,g2,col="red")
###############################################################################
MAXITER <- 1000000
N <- 1000000
print("Classical Monte Carlo")
h <- c()
for (ITER in 1:MAXITER)
{
x <- rt(N,3)
h <- c(h,abs(x))
I1.hat <- mean(h)
se <- sqrt(var(h)/length(h))
if ((se)<(5.0E-4)) {
break;
}
print(c(length(h),I1.hat,se))
}
###############################################################################
print("Importance Sampling with t3 proposal")
N <- 100000
h <- c()
w <- c()
for (ITER in 1:MAXITER)
{
x <- rt(N,1)
f <- dt(x,3)
g <- dt(x,1)
w <- c(w,f/g)
h <- c(h,abs(x))
I2.hat <- mean(w*h)
se <- sqrt(var(w*h)/length(h))
if ((se)<(5.0E-4)) {
break;
}
print(c(length(h),I2.hat,se))
}
###############################################################################
print("Importance Sampling with standard normal proposal")
N <- 1000000
h <- c()
w <- c()
for (ITER in 1:100)
{
x <- rnorm(N)
f <- dt(x,3)
g <- dnorm(x)
w <- c(w,f/g)
h <- c(h,abs(x))
I3.hat <- mean(w*h)
se <- sqrt(var(w*h)/length(h))
if ((se)<(5.0E-4)) {
break;
}
print(c(length(h),I3.hat,se))
}
|
10040399c0556a9c6f8f77d7c18a782ad0ea1ad5 | 0bf88d85ff253b7f364c8a97b4711f00f88cff6d | /openings.R | e54c639fed83d600f6b75471e7f90667a07a60c0 | [] | no_license | Georgits/web | ed8bd8904a95d440325e476bb3f7cca670e79c9a | 72dbc519fcce150f8f914a636d092d0664325639 | refs/heads/master | 2020-03-24T17:22:04.313505 | 2019-02-18T21:56:08 | 2019-02-18T21:56:08 | 142,857,231 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 889 | r | openings.R | url<- "https://www.bad-homburg.de/rathaus/arbeitgeber-stadt/stellenausschreibungen-Stadt.php"
library(stringr)
library(rvest)
library(RSelenium)
remDr <- remoteDriver(
remoteServerAddr = "localhost",
port = 4445L,
browserName = "firefox"
)
remDr$open()
remDr$navigate(url)
html <- read_html(remDr$getPageSource()[[1]])
webElems <- remDr$findElements(using = "css selector", "li.download")
HG <- unlist(lapply(webElems, function(x) {x$getElementText()}))
HG <- html %>%
# The relevant tag
html_nodes('li.download') %>%
html_text() %>%
# Trim additional white space
str_trim() %>%
# Convert the list into a vector
unlist()
HG_raw<-map_dfc(HG, ~str_split(.x, c("\n")))
test <- HG_raw[c(2,6,9,12),]
# liste <- lapply(openings, function(x) {str_split(x, "\n")})
# lapply(liste, function(x) {x[[1]][[1]][1] <- NULL})
|
e15be6348404b7e855b3a517b6755aab914bdfcc | 27f5233f79c8cfd30d5eb027697e468ef0b1102a | /src/plot_many.R | 45ac9bd0b5b2492e6ba6412c43e577319490f85f | [] | no_license | jebyrnes/r-spatial_dc_workshop_2019-01-22 | b2ddf960c86b83c6c5bfa46cbc1f7e9a6efbf60a | 2cc5996aa3a0c086b0b286f490642f7d1e176fd3 | refs/heads/master | 2020-04-17T23:32:50.646154 | 2019-01-24T01:58:15 | 2019-01-24T01:58:15 | 167,039,731 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,110 | r | plot_many.R | #'-----------------------------
#' Plotting All the Things
#'
#'-----------------------------
#libraries
library(sf)
library(ggplot2)
library(dplyr)
#loading data
aoi_boundary_HARV <- st_read("data/NEON-DS-Site-Layout-Files/HARV/HarClip_UTMZ18.shp")
lines_HARV <- st_read("data/NEON-DS-Site-Layout-Files/HARV/HARV_roads.shp")
point_HARV <- st_read("data/NEON-DS-Site-Layout-Files/HARV/HARVtower_UTM18N.shp")
#plot them all!!!
ggplot() +
geom_sf(data = aoi_boundary_HARV, fill = "grey",
color = "green") +
geom_sf(data = lines_HARV, aes(color = TYPE), size = 1) +
geom_sf(data = point_HARV) +
coord_sf()
#combine a raster and vector
library(raster)
chm_HARV <- raster("data/NEON-DS-Airborne-Remote-Sensing/HARV/CHM/HARV_chmCrop.tif")
chm_HARV_df <- as.data.frame(chm_HARV, xy = TRUE)
ggplot() +
geom_raster(data = chm_HARV_df,
aes(x = x, y = y, fill = HARV_chmCrop)) +
geom_sf(data = aoi_boundary_HARV, fill = "grey",
color = "green") +
geom_sf(data = lines_HARV,
aes(color = TYPE), size = 1) +
geom_sf(data = point_HARV) +
coord_sf()
|
e26f959c2d291c30ff2c9337460331365cdef8d4 | 67c2a90c7edfac3cfd891cb332c45e71cf4a6ad1 | /R/plot.slca.R | 3b27ab9eeea0f67a29bee5b2320d473e3cffaed3 | [] | no_license | alexanderrobitzsch/CDM | 48316397029327f213967dd6370a709dd1bd2e0a | 7fde48c9fe331b020ad9c7d8b0ec776acbff6a52 | refs/heads/master | 2022-09-28T18:09:22.491208 | 2022-08-26T11:36:31 | 2022-08-26T11:36:31 | 95,295,826 | 21 | 11 | null | 2019-06-19T09:40:01 | 2017-06-24T12:19:45 | R | UTF-8 | R | false | false | 393 | r | plot.slca.R | ## File Name: plot.slca.R
## File Version: 0.09
###################################################
# plot slca
plot.slca <- function( x, group=1, ... )
{
pi.k <- x$pi.k
TP <- nrow(pi.k)
xlabels <- seq(1, TP)
graphics::barplot( pi.k[,group], xlab="Class", ylab="Probability",
names.arg=xlabels, main=paste0("Class Distribution | Group ", group ), ... )
}
|
b2859e18c04e0739f592e451f8bf7b18a03e641a | 672f1434b4dde4cad051e66994a90887656fad8b | /global.R | 1bd35a416e215e53f0bfcc322218aee9280c10dd | [
"MIT"
] | permissive | samuelbharti/sMAP | df527b56c6f5f240621056d4abffa77072c96408 | 5ef4d2629cfaa0b123de29c2d306ce80a8cbe12c | refs/heads/main | 2023-07-22T15:32:52.830315 | 2021-08-22T23:12:23 | 2021-08-22T23:12:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 827 | r | global.R | # Load the libraries and import function files here.
# Global.R is run one time at app initiallization.
library(bs4Dash)
library(shiny)
library(shinybusy)
library(fresh)
library(oligo)
library(GEOquery)
library(affy)
library(limma)
library(arrayQualityMetrics)
library(sva)
library(Biobase)
library(affyPLM)
library(EnhancedVolcano)
library(AnnotationDbi)
library(hgu133plus2.db)
library(WGCNA)
library(clusterProfiler)
library(msigdbr)
library(impute)
library(org.Hs.eg.db)
library(ggplot2)
library(pheatmap)
library(shinyWidgets)
library(DT)
library(data.table)
library(magrittr)
library(tidyr)
library(enrichplot)
packages12<-c("stringr","R.utils","shinyWidgets")
for(y in packages12){
library(y,character.only=TRUE)
}
#Import Functions from External R scripts.
source("functions/testFunction.R", local = TRUE)$value
|
e17534c08da41b3944f6132d80a59c052680c6c0 | 11e9f9ab4a752042a2c41ea6139cdd54a30b9724 | /cachematrix.R | 4ada1cde40a6c3a0344a88005865d46141b00ea9 | [] | no_license | ldriggen/ProgrammingAssignment2 | 4908bc5cb8408a74023bbc3bd9e07681f77d68ff | c23c7f59d8ce98b97b70679bf39a61baa71233d3 | refs/heads/master | 2021-01-17T21:35:25.629591 | 2015-08-21T19:10:23 | 2015-08-21T19:10:23 | 41,004,417 | 0 | 0 | null | 2015-08-19T00:31:44 | 2015-08-19T00:31:42 | null | UTF-8 | R | false | false | 2,091 | r | cachematrix.R | ## File: cachematrix.R
## Author: Larry Riggen
## Creation Date: 2015-08-20
## File Contents: functions makeCacheMatrix and cacheSolve
## Purpose:
##
## makeCacheMatrix and cacheSolve work in tandem to cache a matrix inverse and
## and reduce overhead by being able to recall the solution.
##
##
## Sample usage (note: the the second cacheSolve(b) execution returns the inverse from cache):
## ------------------------------------------------------
## > b<-makeCacheMatrix(matrix(c(1,3,1,2),nrow=2,ncol=2))
## > cacheSolve(b)
## ------------------------------------------------------
##
## [,1] [,2]
## [1,] -2 1
## [2,] 3 -1
##-------------------------------------------------------
## > cacheSolve(b)
##-------------------------------------------------------
## [1] "getting cached data"
## [,1] [,2]
## [1,] -2 1
## [2,] 3 -1
## makeCacheMatrix creates a special "matrix", which is really a list
## containing a function to:
##
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the matrix inverse
## 4. get the value of the matrix inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmatrix <- function(solve) m <<- solve
getmatrix <- function() m
list(set = set, get = get,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## cacheSolve function calculates the inverse of the special "vector" created with the makeCacheMatrix function above.
## However, it first checks to see if the inverse has already been calculated.
## If so, it gets the inverse from the cache and skips the computation.
## Otherwise, it calculates the inverse of the data and sets the value of the inverse in the cache via the setmatrix function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getmatrix()
if(!is.null(m)) {
print("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setmatrix(m)
m
}
|
e457b3eb91ca11e6ecb36e6f3b32b2ca0c13d6c4 | 505210587ef159312129478d0067e45d9fc5c997 | /man/subset_samples_safe.Rd | 16cb64936454752451cfd4bad8316dd9cb5116bc | [
"MIT"
] | permissive | mworkentine/mattsUtils | 5c0d4dd140eca3b252ecba61c70f04d034dbb1d8 | f09704d993398b2c6920c84fe1b61dbbee6542fa | refs/heads/master | 2021-01-19T12:00:18.865239 | 2019-02-25T19:59:39 | 2019-02-25T20:02:50 | 69,693,229 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 480 | rd | subset_samples_safe.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/microbiome_helpers.R
\name{subset_samples_safe}
\alias{subset_samples_safe}
\title{Subset samples 2}
\usage{
subset_samples_safe(physeq, var, value)
}
\arguments{
\item{physeq}{a valid phyloseq object}
\item{var}{variable (column name) of sample data}
\item{value}{filter to keep only this value of the variable}
}
\description{
A programming safe version of \code{\link{phyloseq::subset_samples}}
}
|
38336cae397bbf419614c1f32fd8986e73b2f95a | 875ad4e5b8d6b03b0bb19794fb4678790530647b | /assignment2/hw2_104761501.R | 114456950282abf4ca81981059a698d2afdf5f17 | [] | no_license | chihming/1052DataScience | 5cc8a60a73e9af138aacf198b4589051b4aa97a1 | feba123b349d461138e744547f380b094b6341e3 | refs/heads/master | 2020-02-26T15:05:53.669548 | 2017-06-22T02:20:27 | 2017-06-22T02:20:27 | 83,531,085 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,315 | r | hw2_104761501.R | list.of.packages <- c("ROCR")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages, repos="http://cran.rstudio.com/")
library(ROCR)
eval_func<-function(query_m, name, pred, ref, score)
{
if(query_m == "male"){
T_IDX <- which(pred=="male")
F_IDX <- which(pred=="female")
TP <- length(which(ref[T_IDX]=="male"))
FP <- length(which(ref[T_IDX]=="female"))
TN <- length(which(ref[F_IDX]=="female"))
FN <- length(which(ref[F_IDX]=="male"))
sens <- round((TP/(TP+FN)), 2)
spec <- round((TN/(TN+FP)), 2)
f1 <- round((2*TP/(2*TP+FP+FN)), 2)
ref_v <- as.numeric(ifelse(ref=="male", 1, 0))
auc_pred <- prediction(predictions=score, labels=ref_v)
auc_perf <- performance(auc_pred, "auc")
auc_score <- round(as.numeric(auc_perf@y.values),2)
return ( data.frame('method'=name, 'sensitivity'=sens, 'specificity'=spec, 'F1'=f1, 'AUC'=auc_score) )
}
else if (query_m == "female") {
score <- 1.0-score
T_IDX <- which(pred=="female")
F_IDX <- which(pred=="male")
TP <- length(which(ref[T_IDX]=="female"))
FP <- length(which(ref[T_IDX]=="male"))
TN <- length(which(ref[F_IDX]=="male"))
FN <- length(which(ref[F_IDX]=="female"))
sens <- round((TP/(TP+FN)), 2)
spec <- round((TN/(TN+FP)), 2)
f1 <- round((2*TP/(2*TP+FP+FN)), 2)
ref_v <- as.numeric(ifelse(ref=="female", 1, 0))
auc_pred <- prediction(predictions=score, labels=ref_v)
auc_perf <- performance(auc_pred, "auc")
auc_score <- round(as.numeric(auc_perf@y.values),2)
return ( data.frame('method'=name, 'sensitivity'=sens, 'specificity'=spec, 'F1'=f1, 'AUC'=auc_score) )
} else {
stop(paste("ERROR: unknown query function", query_m))
}
}
# read parameters
args = commandArgs(trailingOnly=TRUE)
if (length(args)==0) {
stop("USAGE: Rscript hw2_104761501.R --target male/female --files file1 file2 ... filen --out result.csv", call.=FALSE)
}
# parse parameters
i<-1
while(i < length(args))
{
if(args[i] == "--target"){
query_m<-args[i+1]
i<-i+1
}else if(args[i] == "--files"){
j<-grep("-", c(args[(i+1):length(args)], "-"))[1]
files<-args[(i+1):(i+j-1)]
i<-i+j-1
}else if(args[i] == "--out"){
out_f<-args[i+1]
i<-i+1
}else{
stop(paste("Unknown flag", args[i]), call.=FALSE)
}
i<-i+1
}
print("PROCESS")
print(paste("query mode :", query_m))
print(paste("output file:", out_f))
print(paste("files :", files))
# read files
rows <- data.frame()
for(file in files)
{
name<-gsub(".csv", "", basename(file))
d<-read.table(file, header=T,sep=",")
eval_res <- eval_func(query_m, name, d$prediction, d$reference, d$pred.score)
rows <- rbind(rows, eval_res)
}
top_sens <- rows[ which.max(rows$sensitivity), ]$method
top_spec <- rows[ which.max(rows$specificity), ]$method
top_f1 <- rows[ which.max(rows$F1), ]$method
top_auc <- rows[ which.max(rows$AUC), ]$method
rows <- rbind(rows, (data.frame("method"="highest", "sensitivity"=top_sens, "specificity"=top_spec, "F1"=top_f1, "AUC"=top_auc)))
print (rows)
write.table(rows, file=out_f, row.names = F, quote = F, sep=',')
|
5568fafe3d20e79276bf4674c912f301a16b5524 | f2d3a834eb614c444e4c4d2f863577e804d9fb70 | /man/may.numeric.Rd | ec055658fb68c3a97f9cfab7b071f43d55268332 | [] | no_license | David-Hervas/clickR | 150669cc67575659258e2bb44f429544e52e809c | cb738e505375376d91ac37eb01813ac3fb0e1432 | refs/heads/master | 2023-08-14T05:06:15.095067 | 2023-08-07T17:01:53 | 2023-08-07T17:01:53 | 90,495,146 | 3 | 3 | null | null | null | null | UTF-8 | R | false | true | 332 | rd | may.numeric.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check_quality.R
\name{may.numeric}
\alias{may.numeric}
\title{Checks if each value might be numeric}
\usage{
may.numeric(x)
}
\arguments{
\item{x}{A vector}
}
\value{
A logical vector
}
\description{
Checks if each value from a vector might be numeric
}
|
3fa42fb2f82c568e76e40218703fd3c5e11dc57b | 3d5233d7cfc6d2426c600e4c612afb6cea2416c5 | /tests/testthat/test_local_convertOMLTaskToMlr.R | 823b132b99ba4feec3274430eeff809390c2f138 | [] | no_license | mutual-ai/openml-r | 72ecdc54b5e34f1eb31e8a93633f3d1f93c3a6d1 | 599a378dd67f37b06d23928d16b01bbd13102c4b | refs/heads/master | 2020-04-01T14:04:05.019082 | 2016-07-19T08:11:01 | 2016-07-19T08:11:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 364 | r | test_local_convertOMLTaskToMlr.R | context("convertOMLTaskToMlr")
test_that("convertOMLTaskToMlr", {
with_test_cache({
task = getOMLTask(59)
mlr.task = convertOMLTaskToMlr(task)
expect_is(mlr.task$mlr.task, "Task")
expect_is(mlr.task$mlr.rin, "ResampleInstance")
for (i in seq_along(mlr.task$mlr.measures))
expect_is(mlr.task$mlr.measures[[i]], "Measure")
})
})
|
d80a1ebb927df5043d137ce87710c40a041c56a1 | 61ad1512559407a9b22823188c9984769d5da25e | /man/init_presta_manager.Rd | 0b08c93d18b0187ed0c36d8ba9868ad3efbc1039 | [
"MIT"
] | permissive | Cervangirard/todoist | 0e7cd0ed651744f1d843060527b50fd94c28d2f7 | 586b117e577c9cffde5149b7c329b4b0c7cfe970 | refs/heads/master | 2021-04-05T07:20:42.763590 | 2020-04-17T13:41:51 | 2020-04-17T13:41:51 | 248,532,463 | 0 | 0 | NOASSERTION | 2020-03-19T15:03:56 | 2020-03-19T15:03:55 | null | UTF-8 | R | false | true | 909 | rd | init_presta_manager.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/form_presta.R
\name{init_presta_manager}
\alias{init_presta_manager}
\title{Init presta manager}
\usage{
init_presta_manager(
project_id,
tasks_list = list("Proposition - Devis", "Gestion projet",
"S'assurer d'avoir un nom de projet coherent avec Slack",
"S'assigner et mettre des dates sur certaines taches pour pas les oublier",
"Rediger la reference de la mission dans {reference}"),
try_again = 3,
time_try_again = 3,
verbose = TRUE,
responsible = NULL,
token = get_todoist_api_token()
)
}
\arguments{
\item{project_id}{id of project}
\item{tasks_list}{lists of tasks}
\item{try_again}{try again the http request}
\item{time_try_again}{number of tries}
\item{verbose}{make the function verbose}
\item{responsible}{add people in project}
\item{token}{token}
}
\description{
Init presta manager
}
|
f02e603bc4b1002b231b5d484f3a669276c5aff6 | 4515a918188cf18e299963f6c4d87202c7cd212f | /scripts/old/99_plot_slim_old.R | ebf958153235d7820f39963eaf413ceb4457f5ff | [] | no_license | mastoffel/sheep_roh | 94132c4531eaff49dc77555f0c9b6c132bebc115 | b39bb4e33a1362d9a58eebd4341e2a77c0c599e8 | refs/heads/master | 2023-07-06T00:33:51.772215 | 2021-08-06T10:41:33 | 2021-08-06T10:41:33 | 269,019,067 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,977 | r | 99_plot_slim_old.R | library(tidyverse)
library(ggplot2)
library(gghalves)
source("../sheep_ID/theme_simple.R")
mut_df1 <- read_delim("output/qm_slim/slim1000200/out/par_combs_popsize1_1000_popsize2_200.txt", " ") %>%
mutate(popsize = 1000)
mut_df2 <- read_delim("output/qm_slim/slim5000200/out/par_combs_popsize1_5000_popsize2_200.txt", " ")%>%
mutate(popsize = 5000)
mut_df3 <- read_delim("output/qm_slim/slim10000200/out/par_combs_popsize1_10000_popsize2_200.txt", " ")%>%
mutate(popsize = 10000)
mut_all <- bind_rows(mut_df1, mut_df2, mut_df3)
# filter out one parameter combination
mut_p <- mut_all %>%
filter(roh_class != "outside_roh") %>%
group_by(popsize, mut1_dom_coeff, mut1_gam_mean, seed, roh_class) %>%
summarise(s_sum_per_MB = mean(s_sum_per_MB, na.rm = TRUE),
num_mut_per_MB = mean(num_mut_per_MB, na.rm = TRUE)) %>%
mutate(roh_class = factor(roh_class, levels = rev(c("long", "medium","short"))))
#mutate(roh_class = factor(roh_class, levels = rev(c("long", "medium","short"))))
# % deleteriousness lost
mut_p %>%
group_by(mut1_dom_coeff, roh_class) %>%
summarise(s_sum_per_MB = mean(s_sum_per_MB, na.rm = TRUE),
num_mut_per_MB = mean(num_mut_per_MB, na.rm = TRUE))
p1 <- mut_p %>%
filter(!is.na(roh_class)) %>%
mutate(popsize = case_when(
popsize == 1000 ~ "1k",
popsize == 5000 ~ "5k",
popsize == 10000 ~ "10k",
)) %>%
mutate(popsize = factor(popsize, levels = c("1k", "5k", "10k"))) %>%
rename(s = mut1_gam_mean,
h = mut1_dom_coeff,
Nanc = popsize) %>%
ggplot(aes(roh_class, s_sum_per_MB, fill = roh_class)) +
geom_half_point(side = "r", shape = 21, alpha = 0.8, stroke = 0.1, size =2,
transformation_params = list(height = 0, width = 1.3, seed = 1)) +
geom_half_boxplot(side = "l", outlier.color = NA,
width = 0.8, lwd = 0.5, color = "black",
alpha = 0.8) +
#scale_y_continuous(limits = c(-0.02, 0)) +
scale_fill_viridis_d("ROH length class", direction = -1,
guide = guide_legend(reverse = TRUE),
labels=rev(c("long (>6.25cM)", "medium (>1.56cM & <6.25cM)", "short (<1.56cM)"))) +
ylab("Selection coefficient per cM") +
xlab("ROH length class") +
scale_x_discrete(labels = c(expression(ROH[long]),expression(ROH[medium]), expression(ROH[short]))) +
scale_y_continuous(breaks = c(0, -0.005, -0.01),
labels = c("0", "-0.005", "-0.01")) +
facet_grid(h + Nanc ~ s,
labeller = label_both, #scales = "free",
switch = "y") +
# ggtitle("Sim: weakly del\nROH cutoff 4900KB, \nmut1. dist: -0.03, 2, dom coeff 0.1 \nmut2. dist: -0.2, 3, dom coeff 0.01") +
# geom_jitter(size = 2, alpha = 0.3, width = 0.2) +
theme_simple(grid_lines = FALSE, axis_lines = TRUE, base_size = 12) +
theme(
# panel.grid.major = element_blank(),
#panel.grid.minor = element_blank(),
axis.line.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.y = element_blank(),
axis.title.y = element_blank(),
#strip.text.y.right = element_text(angle = 0),
# legend.position = "none",
axis.text = element_text(color = "black"),
legend.position = "top"
) +
#ggtitle("Ancestral N = 1000, current N = 200")
coord_flip()
p1
ggsave("figs/sim_s_h_ne_all.jpg", p1, height = 8, width = 7)
# % differences
mut_p %>%
group_by(roh_class) %>%
summarise(mean(s_sum_per_MB))
# % decrease long to medium
1 - (-0.00249 / -0.00339)
1 - ( -0.00136 / -0.00339)
mut_p %>%
group_by(roh_class, mut1_gam_mean) %>%
summarise(mean(s_sum_per_MB))
1- (-0.00133 / -0.00402)
1- (-0.00140 / -0.00264)
mut_p %>%
group_by(roh_class, mut1_dom_coeff) %>%
summarise(mean(s_sum_per_MB))
1- (-0.00194 / -0.00477)
1- (-0.000800 / -0.00195 )
mut_p <- mut_df2 %>%
filter(roh_class != "outside_roh") %>%
group_by(mut1_dom_coeff, mut1_gam_mean, seed, roh_class) %>%
summarise(s_sum_per_MB = mean(s_sum_per_MB, na.rm = TRUE),
num_mut_per_MB = mean(num_mut_per_MB, na.rm = TRUE)) %>%
mutate(roh_class = factor(roh_class, levels = c("long", "medium","short")))
#mutate(roh_class = factor(roh_class, levels = rev(c("long", "medium","short"))))
p2 <- mut_p %>%
filter(popsize = 5000) %>%
rename(selection = mut1_gam_mean,
dominance = mut1_dom_coeff) %>%
ggplot(aes(roh_class, s_sum_per_MB, fill = roh_class)) +
geom_half_point(side = "r", shape = 21, alpha = 0.8, stroke = 0.1, size =2,
transformation_params = list(height = 0, width = 1.3, seed = 1)) +
geom_half_boxplot(side = "l", outlier.color = NA,
width = 0.5, lwd = 0.5, color = "black",
alpha = 0.8) +
#scale_y_continuous(limits = c(-0.02, 0)) +
scale_fill_viridis_d(direction = 1) +
ylab("Selection coefficient per cM") +
# xlab("ROH length class") +
scale_x_discrete(labels = c(expression(ROH[long]),expression(ROH[medium]), expression(ROH[short]))) +
facet_grid(dominance ~ selection, labeller = label_both, scales = "free") +
# ggtitle("Sim: weakly del\nROH cutoff 4900KB, \nmut1. dist: -0.03, 2, dom coeff 0.1 \nmut2. dist: -0.2, 3, dom coeff 0.01") +
# geom_jitter(size = 2, alpha = 0.3, width = 0.2) +
theme_simple(grid_lines = FALSE, axis_lines = TRUE, base_size = 12) +
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line.y = element_blank(),
axis.ticks.y = element_blank(),
axis.title.y = element_blank(),
legend.position = "none",
axis.text = element_text(color = "black")
) #+
#coord_flip()
library(patchwork)
p1 + p2
|
6e0de75c52119205912505f42911f0b041258905 | b43718436c098a8088ff1329adac36e1e8063e54 | /man/xml_to_ds_spec.Rd | ba49407d10db1b668ef1b994dc98dcafcacf02df | [
"MIT"
] | permissive | thomas-neitmann/metacore | 658d6f20e7722db882a505a3eb60e5f89f8f576d | ec62d598f69d0b3a9f99e3cb24dbe6a8b222bbba | refs/heads/main | 2023-06-29T02:25:15.744144 | 2021-06-11T09:54:28 | 2021-06-11T09:54:28 | 390,272,220 | 0 | 0 | NOASSERTION | 2021-07-28T08:35:57 | 2021-07-28T08:22:34 | null | UTF-8 | R | false | true | 614 | rd | xml_to_ds_spec.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xml_builders.R
\name{xml_to_ds_spec}
\alias{xml_to_ds_spec}
\title{XML to Data Set Spec}
\usage{
xml_to_ds_spec(doc)
}
\arguments{
\item{doc}{xml document}
}
\value{
data frame with the data set specifications
}
\description{
Creates a dataset specification, which has the domain name and label for each dataset
}
\seealso{
Other xml builder:
\code{\link{xml_to_codelist}()},
\code{\link{xml_to_derivations}()},
\code{\link{xml_to_ds_vars}()},
\code{\link{xml_to_value_spec}()},
\code{\link{xml_to_var_spec}()}
}
\concept{xml builder}
|
5c1a7250595ae35135fddee002f54a8f999dbba2 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ncf/examples/spline.correlog.Rd.R | ba30de826b18e4eb46caec4820dab37e65051867 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,042 | r | spline.correlog.Rd.R | library(ncf)
### Name: spline.correlog
### Title: Uni- and multivariate spline correlograms
### Aliases: spline.correlog
### Keywords: smooth spatial
### ** Examples
# first generate some sample data
x <- expand.grid(1:20, 1:5)[, 1]
y <- expand.grid(1:20, 1:5)[, 2]
# z data from an exponential random field
z <- cbind(
rmvn.spa(x = x, y = y, p = 2, method = "exp"),
rmvn.spa(x = x, y = y, p = 2, method = "exp")
)
# w data from a gaussian random field
w <- cbind(
rmvn.spa(x = x, y = y, p = 2, method = "gaus"),
rmvn.spa(x = x, y = y, p = 2, method = "gaus")
)
# univariate spline correlogram
fit1 <- spline.correlog(x = x, y = y, z = z[, 1], resamp = 100)
## Not run: plot.spline.correlog(fit1)
summary(fit1)
# multivariate spline correlogram
fit2 <- spline.correlog(x = x, y = y, z = z, resamp = 100)
## Not run: plot.spline.correlog(fit2)
summary(fit2)
# multivariate spline cross-correlogram
fit3 <- spline.correlog(x = x, y = y, z = z, w = w, resamp = 100)
## Not run: plot.spline.correlog(fit3)
summary(fit3)
|
f2177272b1360350591d8806cbd84aa2a3f5b9fc | dfe2c8e283a4dcec74ed3511d214f014e21a15ea | /R/makeStats.R | a184b90e4b851a429fe10e889e5016f5d33a0b0e | [
"MIT"
] | permissive | JiaziChen111/modelconf | 0b8dabcb32056aaf701bc45ecef7605827e2f078 | e05eaad92e8499feb6f2befba54b226a979da8a7 | refs/heads/master | 2022-03-30T09:15:30.208959 | 2020-01-21T21:53:46 | 2020-01-21T21:53:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 530 | r | makeStats.R | makeStats <-
function(data, boot.index) {
data.mean <- colMeans(data)
n <- nrow(data)
B <- ncol(boot.index)
weights <- vapply(1L:B,
function(j) tabulate(boot.index[, j], nbins = n),
integer(n)) * (1 / n)
if (all(apply(weights, 1, function(x) length(unique(x)) == 1))){
warning("makeStats: Insufficient observations or block length too large.")
}
boot.data.mean <- t(data) %*% weights
return(list(data.mean = data.mean,
boot.data.mean = boot.data.mean))
}
|
7f7e8836f944c0f186ab9fd847476bef69635a5e | cb57199836e5e5de9f597c13404e2a089285bc44 | /man/format_topics.Rd | 18de15420c6d363b3481e4de4885f07db5165496 | [
"MIT"
] | permissive | kevinrue/BiocChallenges | 57971f16cc828cfc657a52ab1291983adc595d7b | 8f1a9628b7816c69876b9ffa3610fd0109bb6fa5 | refs/heads/main | 2023-08-28T09:53:56.134237 | 2021-10-31T21:29:41 | 2021-10-31T21:29:41 | 294,656,697 | 0 | 0 | MIT | 2020-09-11T15:39:06 | 2020-09-11T09:50:25 | R | UTF-8 | R | false | true | 622 | rd | format_topics.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/topics.R
\docType{data}
\name{topic_tags}
\alias{topic_tags}
\alias{format_topics}
\title{Challenge Topics}
\format{
\code{topic_tags} is a character vector of valid topic tags.
}
\usage{
topic_tags
format_topics(params)
}
\arguments{
\item{params}{Challenge parameters as \code{list}.}
}
\value{
\code{format_topics()} returns a character value indicating the challenge topic tags.
}
\description{
Challenge Topics
}
\examples{
topic_tags
params <- list(topics = c("Challenges", "Community"))
cat(format_topics(params))
}
\keyword{datasets}
|
eeb717c91ee4a88191609d2d6722d910a32e6115 | 4ce0cc121642cc9a3af8fd16e2c87280d4d6dc50 | /R/hello.R | 5149c454550c82dad782e6bccc88473a1c7e1f91 | [] | no_license | roberamo/daphnia | 38a26506456c852c88557690986235c146bcb66f | 629b4fc733c20d653778e8fa9df3dd2f5c1b7962 | refs/heads/master | 2020-04-06T17:52:49.091392 | 2018-11-15T09:38:32 | 2018-11-15T09:38:32 | 157,676,623 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 252 | r | hello.R | #' Say hello to someone.
#'
#' @param name A string of characters. (Default = "Robert")
#' @return Prints a friendly greeting.
#' @examples
#' hello(name = "Andreas")
#' @export
#'
hello <- function(name = "Robert") {
print(paste("Hello,", name))
}
|
50582452d49e7f752eb197fc91a75e4d1897bf50 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Rdimtools/examples/nonlinear_LAMP.Rd.R | 89fe8d4641e648ee47c5d2a36ba12f483d1e1823 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 484 | r | nonlinear_LAMP.Rd.R | library(Rdimtools)
### Name: do.lamp
### Title: Local Affine Multidimensional Projection
### Aliases: do.lamp
### ** Examples
## Not run:
##D ## load iris data
##D data(iris)
##D X <- as.matrix(iris[,1:4])
##D
##D ## let's compare with PCA
##D out1 <- do.pca(X, ndim=2) # PCA
##D out2 <- do.lamp(X, ndim=2) # LAMP
##D
##D ## visualize
##D par(mfrow=c(1,2))
##D plot(out1$Y[,1], out1$Y[,2], main="PCA")
##D plot(out2$Y[,1], out2$Y[,2], main="LAMP")
## End(Not run)
|
642fb9603cdfebf483afd42c5701d4d3b8bbca0f | 10ba1d80002e305998364b1e46e744708ef89043 | /Naive Bay_Yulong Gong.R | e3016e3ec60a043df254b07f8dfe028353004008 | [] | no_license | AntonioMoralCevallos/BA810-Classification_of_Cellphone_Price_Ranges | 1c9eb53253275d9a70fd12d2b14d6bcf517112d9 | 6403a90983aec8221cd37934889f6b780e11af08 | refs/heads/main | 2023-08-07T00:16:34.496827 | 2021-08-28T05:54:40 | 2021-08-28T05:54:40 | 340,150,260 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,131 | r | Naive Bay_Yulong Gong.R | install.packages(c('tidyverse','caret','caretEnsemble','psych','Amelia','mice','GGally','rpart','randomForest'))
library(tidyverse)
library(ggplot2)
library(caret)
library(psych)
library(Amelia)
library(mice)
library(GGally)
library(rpart)
library(randomForest)
library(data.table)
# import the data
data <- fread('/Users/yulong/Desktop/BA810/train.csv')
str(data)
describe(data)
# price high_low
data[,result:=0]
# 0,1,2, low
# 3 high
data[price_range == 3, result:=1]
# drop original
data[,price_range:=NULL]
# build the model
# split the train & test. 20 as test, 80 as train.
data[,test:=0]
data[sample(nrow(data),1600),test := 1]
Train <- data[test==1]
Test <- data[test==0]
Train[,test:= NULL]
Test[,test:=NULL]
# Check the distribution of the result
prop.table(table(Train$result))
# split the feature.
x = Train[,-21]
y = factor(Train$result)
# Naive Bay
install.packages('e1071')
library(e1071)
model = train(x,y,'nb',trControl=trainControl(method='cv',number=10))
# predict
predict<- predict(model, newdata = Test)
pre <- ifelse(predict > 0.5, 1,0)
confusionMatrix(factor(preds), factor(Test$result))
|
0e178b5163512df758b92897c773d5002754e9e9 | 70aeaefa7dccc959a192d46f659d9153cad62a5b | /R/add.cases.r | 9ff0044a3c6228e178ae22892e4ccbb1707fbc27 | [] | no_license | isubirana/rutinesPkg | ec5929925c6291f4d440b5e10c50e29f7ecad306 | 598f879321f7cbb8f753c1b80b1ee39810d3f3b6 | refs/heads/master | 2020-04-02T13:39:45.823880 | 2019-03-04T07:24:41 | 2019-03-04T07:24:41 | 154,490,840 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,909 | r | add.cases.r | #' FUNCIO QUE AFEGEIX CASOS
#' funcio que afegeix els casos de y sota dels de x.
#' @param x primera base de dades
#' @param y segona base de dades que es posara a sota
#' @param font NULL
#' @param all 1: posa les variables comunes de x i de y; 2: posa totes les variables de x; 3: posa totes les variables de x i totes les variables de y.
#' @note per a l'opcio all=1 i all=2 manen les etiquetes i formats de x
#' per a l'opcio all=3 manen les etiquetes de x per a les variables comunes i les de y per a la resta. Els formats resultants s?n
#' coherents amb la fusio (realitza un arregla.formats)
#' @export
add.cases<-function(x,y,font=NULL,all=3,show.warnings=TRUE){
comunes<-intersect(names(x),names(y))
if (!is.null(font)){
eval(parse(text=paste("x$",font,"<-0",sep="")))
eval(parse(text=paste("y$",font,"<-1",sep="")))
}
if (all==1){ # totes les variables comunes de x i y.
if (length(comunes)==0){
if (show.warnings) cat("\n\n>Advertencia: no hi ha cap variable comuna\n\n")
return(NULL)
}
if (length(comunes)>0){
return(rbind(x[,comunes],y[,comunes]))
}
}
if (all==2){ # totes les variables de x
falten<-names(x)[!names(x)%in%comunes]
if (length(falten)){
if (show.warnings) cat("\n\n>Advertencia: les seguents variables quedaran buides\n ",paste(falten,collapse=", "),"\n\n")
for (i in 1:length(falten)) eval(parse(text=paste("y$'",falten[i],"'<-NA",sep="")))
}
return(rbind(x,y[,names(x)]))
}
if (all==3){ # totes les variables de y, x
attr.x<-lapply(x,attributes)
attr.y<-lapply(y,attributes)
falten.y<-names(x)[!names(x)%in%names(y)]
falten.x<-names(y)[!names(y)%in%names(x)]
if (length(falten.y)){
if (show.warnings) cat("\n\n>Advertencia: les seguents variables quedaran buides per a y\n ",paste(falten.y,collapse=", "),"\n\n")
for (i in 1:length(falten.y)) eval(parse(text=paste("y$'",falten.y[i],"'<-NA",sep="")))
y<-as.data.frame(y)
}
if (length(falten.x)){
if (show.warnings) cat("\n\n>Advertencia: les seguents variables quedaran buides per a x\n ",paste(falten.x,collapse=", "),"\n\n")
for (i in 1:length(falten.x)) eval(parse(text=paste("x$'",falten.x[i],"'<-NA",sep="")))
x<-as.data.frame(x)
}
fusio<-rbind(
as.data.frame(lapply(x[,names(x)],function(temp) I(as.character(temp)))),
as.data.frame(lapply(y[,names(x)],function(temp) I(as.character(temp))))
)
fusio<-arregla.formats(fusio,force=TRUE)
names(fusio)<-names(x)
if (length(attr.x)){
for (i in 1:length(attr.x)){
attributes(fusio[,names(attr.x)[i]])<-attr.x[[i]]
}
}
if (length(attr.y)){
for (i in 1:length(attr.y)){ # queden els atributs de les variables de x.
if (!names(attr.y)[i]%in%names(attr.x)) attributes(fusio[,names(attr.y)[i]])<-attr.y[[i]]
}
}
return(fusio)
}
}
|
58f86d154fa867b15ae0708dfe3c4e5f94272abf | 628380401240c432584c9af7145c993e5d1ff740 | /R/check_fun.R | 23a1cc3efd3daaafe5343c1f6ac56f3780eae6d5 | [] | no_license | bamaecohydro/waterLevel_Demo | a3fa05a2edcd0b762d406db2e5669a5864c272c9 | 016934c0c77cbd5b084de939d978bfe6ccfaaf9d | refs/heads/master | 2023-03-09T09:52:17.518236 | 2021-02-17T18:25:28 | 2021-02-17T18:25:28 | 339,799,582 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,979 | r | check_fun.R | #Grab info from files
file_fun<-function(n){
#Download data
temp<-read_csv(pt_files[n], skip=1)
#Determine serial number
serial_number<-colnames(temp)[grep("LGR",colnames(temp))][1] #Find collumn name with serial number
serial_number<-substr(serial_number, #isolate serial number
gregexpr("SEN.S.N",serial_number)[[1]][1]+9, #Start
nchar(serial_number)-1) #stop
serial_number<-as.numeric(serial_number)
#Determine TZ
time_zone<-colnames(temp)[grep("GMT",colnames(temp))] #Grab collumn name w/ time offset
time_zone<-substr(time_zone,
regexpr('GMT', time_zone)[1],
nchar(time_zone))
time_zone<-if_else(time_zone=="GMT-04:00",
"EST",
if_else(time_zone=="GMT-05:00",
"EDT",
"-9999"))
#Determin units
units<-colnames(temp)[grep("Abs Pres,",colnames(temp))]
units<-substr(units,
regexpr("Abs Pres,", units)+10,
regexpr("Abs Pres,", units)+12)
#Organize
colnames(temp)<-c("ID","Timestamp","pressureAbsolute", "temp")
temp<-temp[,c("Timestamp","pressureAbsolute", "temp")]
temp<-temp %>%
#Select collumns of interest
dplyr::select(Timestamp, pressureAbsolute, temp) %>%
#Convert to POSIX
dplyr::mutate(Timestamp = as.POSIXct(strptime(Timestamp, "%m/%d/%y %I:%M:%S %p"), tz = time_zone)) %>%
#Convert to GMT
dplyr::mutate(Timestamp = with_tz(Timestamp, "GMT")) %>%
#Order the intput
dplyr::arrange(Timestamp)
#create output
tibble(path = pt_files[n],
Sonde_ID = serial_number,
units = units,
start_date = min(temp$Timestamp),
end_date = max(temp$Timestamp))
}
#check fun
check_fun<-function(pt_files,field_log){
#Collect sonde serial number from sonde download files
sonde_files<-lapply(X = seq(1,length(pt_files)), FUN = file_fun)
sonde_files<-bind_rows(sonde_files) %>% select(Sonde_ID)
#Collect sonde serial number from field sheet
field_sheet<-field_log %>% select(Sonde_ID)
#Join
sonde_files <- sonde_files %>% mutate(x = 'sonde_files')
field_sheet <- field_sheet %>% mutate(y = 'field_sheet')
output<-full_join(sonde_files,field_sheet)
#Find missing data
output<-output %>% filter(is.na(x) | is.na(y))
#Export Output
if(nrow(output)==0){
print("#--------------------------------------------------")
print("#--------------------------------------------------")
print("#--------------------------------------------------")
print("Looks like your field sheets match the downloads")
}else{
print("#--------------------------------------------------")
print("#--------------------------------------------------")
print("#--------------------------------------------------")
print("Ahh shit...you messed something up you goof! ")
output
}
}
|
748ec69a226ef2ac81a4d6bfbfd8e3355665a33b | 684ecc2d505f5ba61b6f9bf93cc8f07971323c17 | /man/larkspur.Rd | a0e0f8583c3062a46a25fb88b05495453caaf7ff | [] | no_license | trobinj/trtools | 1997997ff220a355c911c6d3746ebfa5012cc919 | b6fcac2f13a4ab3c8a343c0ce5fc255fc16db779 | refs/heads/master | 2023-01-13T22:37:28.268364 | 2023-01-06T15:51:26 | 2023-01-06T15:51:26 | 67,811,702 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,079 | rd | larkspur.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/larkspur.R
\docType{data}
\name{larkspur}
\alias{larkspur}
\title{Sample of transects of pale white larkspur.}
\format{
A data frame with 20 observations and two variables:
\describe{
\item{plants}{abundance in a sampled transect}
\item{area}{sampled transect area in square meters}
}
}
\source{
Stehman, S. V., & Salzer, D. W. 2000. Estimating density from surveys employing unequal-area belt transects. \emph{Wetlands}, \emph{20}, 512-519.
Gregoire, T. G., & Valentine, H. T. (2007). \emph{Sampling strategies for natural resources and the environment}. Boca Raton, FL: Chapman & Hall/CRC.
}
\usage{
larkspur
}
\description{
Sample of 20 one-meter belt transects to estimate the abundance of pale white larkspur in a given region. The total number of sampling units in the sampling frame is 150 belt transects with a total area of 5897 square meters.
}
\note{
Although not stated explicitly in the source, the sample was presumably obtained using simple random sampling.
}
\keyword{datasets}
|
2c978423a117e9ebb8620a37a26f7b46117454cb | a8b6bfc9e1e34d5d260b833587b9fc7ff84215da | /man/nbss.Rd | 7bb304fddcf400c41f1f0a21e3d86d62a67c195b | [] | no_license | jiho/nbssr | d93d92efaa92fe3099187c74527eed22aa77844d | 838ddfe8829829aed2a0023d63798d377e5090b1 | refs/heads/master | 2022-07-26T15:18:55.354762 | 2020-05-14T23:40:21 | 2020-05-14T23:40:21 | 262,024,579 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,321 | rd | nbss.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nbss.R
\name{nbss}
\alias{nbss}
\title{Normalised Biomass Size Spectrum}
\usage{
nbss(x, w = rep(1, length(x)), type = c("biomass", "abundance"),
base = 10, binwidth = 0.1)
}
\arguments{
\item{x}{vector of biomasses, biovolumes, or lengths.}
\item{w}{vector of weights, typically concentrations associated with individual measurements in \code{x}.}
\item{type}{whether to compute a biomass/biovolume or abundance spectrum.}
\item{base}{base of the logarithm for the computation of bins.}
\item{binwidth}{width of bins in log10 scale.}
}
\value{
a data.frame with columns
\itemize{
\item \code{bin_log} value of the bin center in log scale;
\item \code{bin} value of the bin center in original scale;
\item \code{binwidth} width of the bin center in original scale;
\item \code{y} sum of biomass/biovolume for a biomass spectrum; count of the objects in the bin for an abundance spectrum;
\item \code{norm_y} \code{y/binwidth}
}
}
\description{
Normalised Biomass Size Spectrum
}
\examples{
# Biovolume spectrum
ss <- nbss(uvp$volume_mm3)
head(ss)
autoplot(ss) + labs(
x=expression("Biovolume (mm"^3*")"),
y="Normalised biovolume"
)
# Abundance spectrum
ss <- nbss(uvp$length_mm, binwidth=0.05)
autoplot(ss) + labs(x="Length (mm)")
}
|
80492a9100f227ac1eeb09aec6dd477bb0e67b18 | 5434a6fc0d011064b575b321e93a3519db5e786a | /man/clearNodeOpacityBypass.Rd | 2d07142657f4ece3f36535e2fb94ba6615681d84 | [
"MIT"
] | permissive | cytoscape/RCy3 | 4813de06aacbaa9a3f0269c0ab8824a6e276bad9 | 18d5fac035e1f0701e870150c55231c75309bdb7 | refs/heads/devel | 2023-09-01T18:23:28.246389 | 2023-08-23T07:57:19 | 2023-08-23T07:57:19 | 118,533,442 | 47 | 22 | MIT | 2023-04-03T17:52:34 | 2018-01-23T00:21:43 | R | UTF-8 | R | false | true | 1,003 | rd | clearNodeOpacityBypass.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StyleBypasses.R
\name{clearNodeOpacityBypass}
\alias{clearNodeOpacityBypass}
\title{Clear Node Opacity Bypass}
\usage{
clearNodeOpacityBypass(node.names, network = NULL, base.url = .defaultBaseUrl)
}
\arguments{
\item{node.names}{List of node names or SUIDs}
\item{network}{(optional) Name or SUID of the network. Default is the
"current" network active in Cytoscape.}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
None
}
\description{
Clear the bypass value for node fill, label and border opacity
for the specified node or nodes, effectively restoring any previously defined
style defaults or mappings.
}
\examples{
\donttest{
clearNodeOpacityBypass(c('Node 1','Node 2'))
}
}
\seealso{
{
\link{setNodeOpacityBypass}
}
}
|
796b77e0607234f3999cb031dda17dee87012c09 | 501221e119380024b9be0cb3259a98080212c50d | /cachematrix.R | e5232323964b314d8270098b6f77736bbee09483 | [] | no_license | apocalypsemeow/ProgrammingAssignment2 | 58535b294b759cd66490fc71aa82882d02102038 | d6bd8d9458f4e0ea313e0105d60909f7ea421016 | refs/heads/master | 2021-01-20T15:49:45.987447 | 2014-08-22T02:54:25 | 2014-08-22T02:54:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,258 | r | cachematrix.R | ## These functions compute the inverse of a matrix
## if it has not already been computed.
## makeCacheMatrix takes as input a matrix, and returns a list
## of functions that can modify and retrieve its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL ## Used to determine if inverse already set
set <- function(y) { ## Changes value of x
x <<- y
m <<- NULL
}
get <- function() x ## Retrieves matrix passed
setinverse <- function(inverse) m <<- inverse ## Sets inverse to m
getinverse <- function() m ## Retrieves current inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse) ## Returns list of functions
}
## cacheSolve takes in the list created by cacheMatrix
## and computes the inverse of the matrix,
## if not already initialized
cacheSolve <- function(x, ...) {
## Returns either a matrix that is the inverse of 'x', or NULL
m <- x$getinverse()
## If inverse has been set, cacheSolve returns the
## already computed inverse
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## Else get the matrix then compute, set, and return its inverse
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
1fce6ff60f1cd01d9a8587e0fca252480ba2eb8f | e7eccb62a1ee411b0c9bc659b0bcc52c58eb6eb1 | /Neural_net_portion_Fred.R | 1a73f869782eebc9a21f555b111bbfc096aba9fc | [] | no_license | cmbrook/Big-Data-Spring-2017-USU | f639be516aa75f3f0f869c2babd3aa63dbb156ab | b073687d1218deb9a631d4663c500fab8143a4cd | refs/heads/master | 2021-01-11T16:41:33.365368 | 2017-04-25T04:29:04 | 2017-04-25T04:29:04 | 80,140,198 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,235 | r | Neural_net_portion_Fred.R |
train_dum<-encode_categorical_variables(train2)
ntrain<-1460
train_dum_reduced<-do_variable_selection(train_dum)
vrs<-names(train_dum_reduced)
train_dum_reduced_log<-mutate(train_dum_reduced,SalePrice=log(SalePrice+1))
nn_form<-as.formula(paste("SalePrice ~", paste(vrs[!vrs %in% "SalePrice"],collapse= " + ")))
train_dum_scaled<-as.data.frame(scale(train_dum_reduced_log, center=T,scale=T))
nn_onenode<-neuralnet(nn_form,dat=train_dum_scaled, hidden=c(1),linear.output=T,rep=5,stepmax=1e+05,lifesign="full")
#doesn't converge with logistic activation function
#hnn_20nodes<-neuralnet(nn_form,data=train_dum_scaled,hidden=c(10,10),linear.output=T,rep=1,stepmax=3e+05,lifesign="full")
#tried with tanh activation function
#hnn_20nodes_t<-neuralnet(nn_form,data=train_dum_scaled,hidden=c(10,10),linear.output=T,rep=5,stepmax=5e+05,lifesign="full",act.fct = "tanh")
#hnn_20nodes_l_cv<-tenFoldCrossVal(formula=nn_form,data=train_dum_scaled,type="neunet",nnn=c(10,10))
#results<-hnn_20nodes_t$result.matrix
#this one works after 11 minutes
#hnn_25nodes<-neuralnet(nn_form,data=train_dum_scaled,hidden=c(15,10),linear.output=T,rep=1,stepmax=3e+05,lifesign="full")
# but cross validation doesn't work
#nn_cv_25node<-tenFoldCrossVal(formula=nn_form,data=train_dum_scaled,type="neunet",nnn=c(15,10))
#didn't converge
#hnn_25n4L<-neuralnet(nn_form,data=train_dum_scaled,hidden=c(12,8,4,2),linear.output=T,rep=1,stepmax=3e+05,lifesign="full")
#converged
#hhn_48n<-neuralnet(nn_form,data=train_dum_scaled,hidden=c(15),linear.output=T,rep=1,stepmax=3e+05,lifesign="full")
#but cross validation doesn't work
#hhn_15_cv<-tenFoldCrossVal(formula=nn_form,data=train_dum_scaled,type="neunet",nnn=c(15),output=T,stepmax=3e+05,lifesign="full")
#nn_15_prediction_train<-unscale_unlog_nn_prediction(compute(hhn_48n,covariate=select(train_dum_scaled,-SalePrice))$net.result,train_dum_reduced_log$SalePrice)
RMSE<-sqrt(sum((train_dum_reduced$SalePrice - nn_15_prediction_train)**2)/1460)
#RMSE is extremely bad
#Code to predict from test data and create submission csv
test1<-deal_missing_values(test)
test_dum<-encode_categorical_variables(test1)
reduced_vars<-which(colnames(test_dum) %in% colnames(train_dum_reduced))
test_dum_reduced<-test_dum[,reduced_vars]
test_dum_scaled<-scale(test_dum_reduced)
prediction_nn_un<-compute(nn_onenode,covariate=test_dum_scaled)$net.result
prediction_nn<-unscale_unlog_nn_prediction(prediction_nn_un,train_dum_reduced_log$SalePrice)
submission_nn<-data.frame(test$Id,prediction_nn)
colnames(submission_nn)<-c("ID","SalePrice")
write.csv(submission_nn,"submission_nn_15.csv",row.names = FALSE)
# We tried a variety of configurations for a neural net using a subset of predictors, and found very few configurations that converged consistently.
#
# Even when the models converged with the entirety of the data, we could not achieve convergence for most of models using a 10-fold cross validation method. Since these models took an extremely long time to run and did not appear to have better predictive power than other methods, we decided not to use a neural net model for our final submission. The RMLSE for an untuned 1-node neural net model was .16521, worse than the linear model. |
91c32412a0237c2f13af7335c336eaa54576246a | 29585dff702209dd446c0ab52ceea046c58e384e | /phytotools/R/fitEP.R | b4c4cea2ca85eafa1b6285f44500a59f93534b81 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,134 | r | fitEP.R | fitEP <- function(x, #E
y, #Quantum Efficiency, rETR or P
normalize=FALSE, #Should curve be normalized to E (Default=TRUE for modeling Quantum Efficiency)
lowerlim=c(0,0,0), #Lower bounds of parameter estimates (alpha,Eopt,Ps)
upperlim=c(100,2000,2000), #Upper bounds of parameter estimates (alpha,Eopt,Ps)
fitmethod=c("Nelder-Mead")) #Fitting method passed to modFit
{
#If normalize =T, assign E = 0 to very small number
if (normalize==T) x[x==0] <- 1e-9
#Remove NA values
ind <- is.finite(x) & is.finite(y)
res <- rep(NA,length(x))
x <- x[ind==T]
y <- y[ind==T]
#Intitial Parameter Estimates
if (normalize==T){
alpha <- max(y)
eopt <- mean(x)
ps <- max(x*y)
}
if (normalize==F){
PE <- y/x
alpha <- max(PE[is.finite(PE)])
eopt <- mean(x)
ps <- max(y)
}
#Load the model
EP <- function(p,x) return(data.frame(x = x, y = x/((1/(p[1]*p[2]^2))*x^2+(1/p[3]-2/(p[1]*p[2]))*x+(1/p[1]))))
EP.E <- function(p,x) return(data.frame(x = x, y = 1/((1/(p[1]*p[2]^2))*x^2+(1/p[3]-2/(p[1]*p[2]))*x+(1/p[1]))))
if (normalize==F) model.1 <- function(p) (y - EP(p, x)$y)
if (normalize==T) model.1 <- function(p) (y - EP.E(p, x)$y)
#In case of non-convergence, NAs are returned
if (class(try(modFit(f = model.1,p = c(alpha,eopt,ps), method = fitmethod,
lower=lowerlim, upper=upperlim,
hessian = TRUE),silent=T))=="try-error"){
fit <- list(alpha=NA,eopt=NA,ps=NA,ssr=NA,residuals=rep(NA,c(length(x))))
}else{
fit <- modFit(f = model.1,p = c(alpha,eopt,ps),method = fitmethod,
lower=lowerlim, upper=upperlim, hessian = TRUE)
fit <- list(alpha=summary(fit)$par[1,],eopt=summary(fit)$par[2,],ps=summary(fit)$par[3,],
ssr=fit$ssr,residuals=fit$residuals,model="EP",normalize=normalize)
}
return(fit)
}
|
2133e0ad9fd20e0abcd4042c93d4dde986388815 | 52f97cd52c3a4a06b52d5166083a377340adb114 | /JingFeng_ML_Thesis/mango/wheat_tSNE.R | 73904b0721a6c6bbd56f130b7bcab5a6faaa479f | [] | no_license | xiaoci12aa/Jing_Feng_ML_Thesis | fca032096b6747c48ac7258c29090d81b52f1226 | 401c59eb77b4eb256d41051346eac0e625ba743d | refs/heads/master | 2021-01-23T08:33:54.995827 | 2017-09-05T22:13:12 | 2017-09-05T22:13:12 | 102,534,344 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 760 | r | wheat_tSNE.R | library(tidyverse)
library(tsne)
wheat_data <- read_tsv(file = "/Users/XC/DOCUMENTS_COPY/UCL/Project/Final_Project/wheat/data sheet 1.csv")
X_wheat <- wheat_data[,-c(1:4,35)]
# Y <- wheat_data[,3]
tsne_wheat <- tsne(X_wheat, perplexity = 30, k = 2, max_iter = 500)
# add necessary variebles to data frame
tsne_wheat_df <- as.data.frame(tsne_wheat)
tsne_wheat_df$Year <- wheat_data$Year
tsne_wheat_df$Variety <- wheat_data$Variety
tsne_wheat_df$Growth <- wheat_data$Growth
# pic 1: tsne factor year & variety
ggplot(data = tsne_wheat_df) +
geom_point(mapping = aes(x = V1, y = V2, shape = Year, colour = Variety))
# pic 2: tsne growth & variety
ggplot(data = tsne_wheat_df) +
geom_point(mapping = aes(x = V1, y = V2, shape = Year, colour = Growth))
|
24dd793b0a8c67b98302fa0bf87a8e3dcc6faee1 | 1e899747d72292a3a205251602d420d4ffcb0abc | /workflow/shrinkage_plot.R | 7d78924137332da45af4b341b26288d96dc2348c | [] | no_license | emittman/BB_data_analysis | 21253c573aedb8fd28be4b55ff85d6ff99536b3f | 5c5ec61a9a316623873c3a7219d55d527bc353c3 | refs/heads/master | 2021-09-21T00:57:15.512850 | 2018-08-18T04:56:19 | 2018-08-18T04:56:19 | 69,043,249 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,254 | r | shrinkage_plot.R | setwd("workflow/")
dm=1
dat <- readRDS("../BB_data/clean_unit_summaries.rds")
dat$model <- as.integer(dat$model)
library(plyr)
library(dplyr)
library(rstan)
library(ggplot2)
overview <- ddply(dat, .(model), summarise,
n=length(model),
f=sum(failed>0),
early_f = sum(failed>0 & endtime<365*24*1),
late_f = sum(failed>0 & endtime>365*24*2))
# id <- unique(dat$model)
id <- with(overview, which(overview$early >= 0 & overview$late_f >= 0 & f >=3))
overview$stan_id <- NA
overview[id,]$stan_id <- 1:length(id)
# s <- readRDS("../workflow/samples_lor_only3fails.rds")
sfull <- readRDS("../workflow/samples_lor_only3fails.rds")
tr_adj <- readRDS("../BB_data/tr_adj_tp2s2pi.rds")$median
sampfull <- extract(sfull)
source("../plotting_fns/KM_plot2.R")
source("../predictive/sample_from_predictive.R")
source("../plotting_fns/lifetime_plot.R")
xlabels=c(1,2,5,10, 20)
ylabels=c(.001,.01,.1,.5,.9, .99, .999)
xlimits=c(5000, 170000)
ylimits=c(.001,.999)
datdm <- filter(dat, model==overview$model[which(overview$stan_id==dm)])
lt_plot <- lifetime_plot3(datdm, xlabels = xlabels, in_years=TRUE,
lab=paste(c("Drive-model ",dm),collapse=""), trans="log", xlimits = xlimits)
kmdm <- KM.survfit(datdm, greenwood = FALSE)
kmdm <- tr.adj(kmdm, tr_adj[dm])
bp <- baseKMplot(fit=kmdm, xlimits=xlimits, ylimits=ylimits, color="black",
linetype = "solid", alpha = 1, logscale = TRUE, label="nonparametric", prob = .9)
sampdm <- with(sampfull, list(mu1=mu1, sigma1=sigma1, log_pi=log_pi[,dm], mu2=mu2[,dm], sigma2=sigma2[,dm]))
banddm <- bandFromPSamp(samples=sampdm, range=xlimits, length.out = 70, N=200, logscale = TRUE)
bpp <- addBandToBaseplot(baseplot=bp, bandObj=banddm, color="black",
linetype="dashed", label="posterior median\n(90% interval)", alpha = .3)
sampGlob <- with(sampfull, list(mu1=mu1, sigma1=sigma1, log_pi=eta_pi, mu2=eta_tp2 - qsev(.5)*exp(eta_s2), sigma2=exp(eta_s2)))
bandGlob <- bandFromPSamp(samples=sampGlob, range=xlimits, length.out=50, N=300, logscale = TRUE)
bandGlob$band <- bandGlob$band[which(bandGlob$band$est<ylimits[2]),]
bppp <- addBandToBaseplot(baseplot = bpp, bandObj=bandGlob, color = "black", linetype = "dotted", alpha = 0, label = "global")
combined <- plotFinally(plotList=bppp, xbrks=xlabels*24*365, ybrks=ylabels, years=TRUE, greenwood = FALSE) +
guides(fill=FALSE)
library(cowplot)
# now extract the legends
legendLT <- get_legend(lt_plot + theme(legend.text = element_text(size=7)))
legendcomb <- get_legend(combined+ theme(legend.text = element_text(size=7)))
# and replot suppressing the legend
lt_plot <- lt_plot + theme(legend.position = "none")
combined <- combined + theme(legend.position='none')
# Now plots are aligned vertically with the legend to the right
pdf(paste(c("../paper/fig/dm",dm,"-shrinkage.pdf"), collapse=""), width=6, height=6)
ggdraw(plot_grid(plot_grid(lt_plot, combined, ncol=1, align='v'),
plot_grid(legendLT, legendcomb, ncol=1),
rel_widths=c(1, 0.2)))
dev.off()
|
60f8be82c7d2704b042728b78695408ad8bf9f81 | b1ddc1aa692dd03a39c6eac2b88480e8f2c934d6 | /man/makeCustomRun.Rd | 5bd26b7ad59fb2aff1e8446c28fa07cc7d593b33 | [] | no_license | ekhco/LCsim | 73375ee41ecef7efb664a29011cbc6549a9a7f59 | 990030483962dddb28a25af86470b883f5f11f0e | refs/heads/master | 2022-02-15T09:25:00.706297 | 2019-07-28T01:58:45 | 2019-07-28T01:58:45 | 103,585,907 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 972 | rd | makeCustomRun.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runSHG.R
\name{makeCustomRun}
\alias{makeCustomRun}
\title{Custom SHG Call}
\usage{
makeCustomRun(birth_cohort)
}
\arguments{
\item{birth_cohort}{the year of birth for the cohort (ie: 1950)}
}
\description{
The SHG is written in Python and is maintained seperately from this package by CISNET.
This function writes a custom "run_tests.py" into the current director for the SHG so
that only selected birth cohort years can be run instead of all of the 1890 - 2110 range
that is hardcoded into the current run_tests.py distributed with SHG 6.3.4. This function
is called by runSHG() and does not need to be called directly. It creates a python script
in the SHG directory, called 'run_custom_tests.py'
}
\examples{
setwd("~/SHG/") # navigate to the SHG directory first
SHG_output <- makeCustomRun(birth_cohort = 1950)
}
\keyword{generator}
\keyword{history}
\keyword{run_tests}
\keyword{smoking}
|
f6d5c5c872010a41a0f6fbe63f395c7412c68059 | 3e88ff6e452f3ea6e3b0f650d78fb317fe23cfe6 | /plot4.R | c99e485bbc8130b1a1c745e67e9dc504bf144afb | [] | no_license | izaakjephsonbw/ExData_Plotting1 | ba59ec3092f0f5d34cfb61ccaaa1dc0d9d5129a7 | c3381d7d8d4722d7ee2c93afcb11cef9b683aa3b | refs/heads/master | 2020-09-08T00:38:08.965818 | 2019-11-11T12:16:28 | 2019-11-11T12:16:28 | 220,959,148 | 0 | 0 | null | 2019-11-11T10:45:59 | 2019-11-11T10:45:58 | null | UTF-8 | R | false | false | 1,421 | r | plot4.R | ## Creates 4 plots of Global Active Power, Voltage, Energy Sub Metering and Global Reactive Power against time (Plot 4)
## Import libraries
library(tidyverse)
## Read data into R
colnames <- read.table("./data/household_power_consumption.txt", sep = ";", nrows = 1)
electric <- as.tbl(read.table("./data/household_power_consumption.txt", sep = ";", skip = 66637, nrows = 2880, col.names = as_vector(colnames)))
rm(colnames)
## Convert Date and Time to single Date/Time variable
electric <- electric %>% mutate(Date_Time = paste(electric$Date, electric$Time, sep = " ") %>% dmy_hms())
## Produce line plot and save as png file
png(file = "plot4.png")
par(mfrow = c(2,2))
with(electric, {
plot(Date_Time, Global_active_power, ylab = "Global Active Power (kilowatts)", xlab = "", type = "l")
plot(Date_Time, Voltage, ylab = "Voltage", xlab = "", type = "l")
plot(Date_Time, Sub_metering_1, ylab = "Energy sub metering", xlab = "", type = "l", col = "black")
points(Date_Time, Sub_metering_2, type = "l", col = "red")
points(Date_Time, Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub metering 1","Sub metering 2","Sub metering 3"), bty = "n")
plot(Date_Time, Global_reactive_power, ylab = "Global Reactive Power", xlab = "", type = "l")
})
dev.off()
#dev.off() |
8611ca9b70e22b0eba9336326e15473477f48487 | 4b18c286684d30b23bc0b81ac6faff63167c7905 | /man/geneexpr.Rd | 6fc5f0b45fd1e4b730f73d718ca180bc6e0be508 | [] | no_license | smeister/test2 | 63169293554590460551b2aab2e53f36df8487b3 | 49e93d1af927559ea7197ce6dd405765443fa30c | refs/heads/master | 2020-03-11T09:24:28.283235 | 2018-04-17T13:44:21 | 2018-04-17T13:44:21 | 129,909,791 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 206 | rd | geneexpr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geneexpr.R
\name{geneexpr}
\alias{geneexpr}
\title{Geenexpr function}
\usage{
geneexpr(...)
}
\description{
Geenexpr function
}
|
0b7aaf180cbf0855577ccf6b6dd99928d7f02dc3 | d187f91ad1a76fb2118d537dfdaecab1f1483529 | /plot5.R | 94acc274ed13060e2046102127c40452220f1b7e | [] | no_license | IndC/Exploratory-Data-Analytics | 0aa1bf7b1c3ca47d2a6c606e20cd76182ee5f8f9 | 8e1c8fbf8df81d413ab86143a3b21b2a8ff29cb1 | refs/heads/master | 2020-03-26T20:18:30.792395 | 2015-01-26T03:15:31 | 2015-01-26T03:15:31 | 29,799,244 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 926 | r | plot5.R | # How have emissions from motor vehicle sources changed from 1999-2008 in Baltimore City?
library(reshape2)
library(ggplot2)
NEI <- readRDS("./exdata-data-NEI_data/summarySCC_PM25.rds")
SCC <- readRDS("./exdata-data-NEI_data/Source_Classification_Code.rds")
SMV <- SCC[grep("highway", SCC$SCC.Level.Two, ignore.case = TRUE), c(1,4)]
SMV$EI.Sector <- factor(SMV$EI.Sector)
a <- NEI[NEI$SCC %in% SMV$SCC & NEI$fips == "24510",c(2,4,6)]
b <- merge(a, SMV, by = "SCC")
y <- with(b, tapply(Emissions, list("Sector" = EI.Sector, "Year" = year),sum, na.rm = TRUE))
y <- melt(y, id = c("Year", "Sector"), na.rm = TRUE)
png("./Exploratory-Data-Analytics/plot5.png", width=800,height=400)
g <- ggplot(y, aes(Year, value))
g <- g + geom_point() + facet_grid(. ~ Sector) + geom_smooth(method = "lm", se = FALSE)
g + labs(title = "PM2.5 Emissions from Motor Vehicles by year.") + labs(x = "Year", y = "PM2.5 Emission")
dev.off() |
7a85cef5a9da64f099fb58dae4615c0a82a2557c | 29585dff702209dd446c0ab52ceea046c58e384e | /spatcounts/R/est.sc.R | 2a455631969954119464d072cd49b50996b49c95 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,375 | r | est.sc.R | `est.sc` <-
function (Yin, fm.X, region, model = "Poi", gmat, nmat, totalit, fm.ga = TRUE, t.i = NULL, phi0 = 1, omega0 = 0, r0 = 1, beta0 = NULL, gamma0 = NULL, sigma0 = 1, psi0 = 1, Tau = 10, alpha = 2){
if (fm.X == ~1)
Xin <- matrix(1, length(Yin), 1)
else Xin <- model.matrix(fm.X)
Xin <- cbind(region, Xin)
if (is.null(beta0) == TRUE)
beta0 <- rep(0, (dim(Xin)[2] - 1))
if (is.null(gamma0) == TRUE)
gamma0 <- rep(0, length(gmat))
if (is.null(t.i) == TRUE)
t.i <- rep(1, dim(Yin)[1])
if (model == "Poi") {
poi <- poiind(Yin, Xin, t.i, fm.ga, gmat, nmat, totalit, phi0, beta0, gamma0, sigma0, psi0, Tau, alpha)
return(poi)
}
if (model == "NB") {
nb <- nbind(Yin, Xin, t.i, fm.ga, gmat, nmat, totalit, r0, beta0, gamma0, sigma0, psi0, Tau, alpha)
return(nb)
}
if (model == "GP") {
gp <- gpind(Yin, Xin, t.i, fm.ga, gmat, nmat, totalit, phi0, beta0, gamma0, sigma0, psi0, Tau, alpha)
return(gp)
}
if (model == "ZIP") {
zip <- zipind(Yin, Xin, t.i, fm.ga, gmat, nmat, totalit, omega0, beta0, gamma0, sigma0, psi0, Tau, alpha)
return(zip)
}
if (model == "ZIGP") {
zigp <- zigpind(Yin, Xin, t.i, fm.ga, gmat, nmat, totalit, phi0, omega0, beta0, gamma0, sigma0, psi0, Tau, alpha)
return(zigp)
}
}
|
e989ea1472737db1a5d2ff1fbec0d69cb50752eb | 410a2dfcbe74978eb10912a3dfe74bff3d136357 | /tests/testthat/testOneStructure.r | 8a54483601d1c542d907a858b85430560d240fb7 | [] | no_license | cran/dae | 7ee9c2ad4fcb3c97b846eb76ed560c20c591a977 | df5b45cc69b9984473f254891510687f2eb114ef | refs/heads/master | 2023-08-18T19:51:15.580290 | 2023-08-07T15:50:14 | 2023-08-07T17:30:45 | 17,695,368 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,642 | r | testOneStructure.r | #devtools::test("dae")
context("analysis")
cat("#### Test for designAnatomy with single structure\n")
test_that("OneStructure", {
skip_on_cran()
library(dae)
#'### Make a Latin square
ls.ran <- designRandomize(allocated = data.frame(Trt = factor(designLatinSqrSys(7))),
recipient = list(Row = 7, Column = 7),
seed = 354131)
lsadd.canon <- designAnatomy(list(plt = ~ Row+Column, trt = ~ Trt), data = ls.ran)
summadd <- summary(lsadd.canon)
testthat::expect_warning(print(summadd))
testthat::expect_equal(length(summadd),2)
testthat::expect_true(all(summadd$decomp$Source.plt == c("Row", "Column")))
testthat::expect_true(all(summadd$decomp$df1 == 6))
testthat::expect_true(all(is.na(summadd$decomp$Source.trt)))
testthat::expect_true(all(is.na(summadd$decomp$df2)))
ls.canon <- designAnatomy(list(plt = ~ Row*Column, trt = ~ Trt), data = ls.ran)
summ <- summary(ls.canon)
testthat::expect_equal(attr(summ$decomp, which = "n"), 49)
testthat::expect_equal(length(summ),2)
testthat::expect_true(all(summ$decomp$Source.plt == c("Row", "Column", "Row#Column", "Row#Column")))
testthat::expect_true(all(summ$decomp$df1 == c(6,6,36,36)))
testthat::expect_true(all(summ$decomp$Source.trt[3:4] == c("Trt", "Residual")))
testthat::expect_true(all(summ$decomp$df2[3:4] == c(6,30)))
ls1.canon <- designAnatomy(list(plt = ~ Row+Column), data = ls.ran)
summ1 <- summary(ls1.canon)
testthat::expect_equal(length(summ1),2)
testthat::expect_true(all(summ1$decomp$Source.plt == c("Row", "Column")))
testthat::expect_true(all(summ1$decomp$df == 6))
struct <- pstructure(~ Row+Column, data = ls.ran)
})
cat("#### Test for pstructure with factor nesting\n")
test_that("pstucture_fac.multinested", {
skip_on_cran()
library(dae)
#'## Set constants
nblks <- 6
treat.levs <- c("Control","Dr","Na","LN")
(ntreats <- length(treat.levs))
lines.lev <- c("O. aust", "Calrose", paste0("Transgenic", 1:7))
(nlines <- length(lines.lev))
#'### Systematic allocation
sys.lay <- cbind(
fac.gen(list(Block = nblks, MainUnit = ntreats, Cart = nlines)),
fac.gen(list(Treatment = treat.levs, Line = lines.lev), times = nblks))
#'### Randomization
rand.lay <- designRandomize(recipient = sys.lay[,1:3],
allocated = sys.lay[,4:5],
nested.recipients = list(MainUnit = "Block",
Cart = c("MainUnit", "Block")),
seed = 82604)
#'## Add nested factors
#'### Line nested within Treatments
rand.lay <- cbind(rand.lay,
with(rand.lay, fac.multinested(nesting.fac = Treatment, nested.fac = Line,
fac.prefix = "Line")))
#Test same levels order for all nested factors
testthat::expect_true(all(unlist(lapply(rand.lay[c("LineControl","LineDr","LineNa","LineLN")],
function(fac, levs) all(levels(fac) == c("rest", levs)),
levs = levels(rand.lay$Line)))))
#'### Factors that remove contrast involving O. aust
rand.lay <- within(rand.lay,
{
OaVsRest <- fac.uselogical(Line == "O. aust", labels = c("O. aust", "Other"))
OaTreat <- fac.recode(fac.combine(list(Line, Treatment)),
c(levels(Treatment), rep("Other", 32)))
})
#'### Factors for Lines within Treatments, excluding O. aust
rand.lay <- within(rand.lay,
{
OaDr <- fac.uselogical(LineDr == "O. aust", labels = c("O. aust", "Other"))
OaControl <- fac.uselogical(LineControl == "O. aust", labels = c("O. aust", "Other"))
OaLN <- fac.uselogical(LineLN == "O. aust", labels = c("O. aust", "Other"))
OaNa <- fac.uselogical(LineNa == "O. aust", labels = c("O. aust", "Other"))
})
#'## Investigate Treatment terms
#'### Removal of O. aust from the Treatments*Line
print(trt.str <- pstructure(~ OaVsRest/OaTreat + Treatment*Line, data = rand.lay),
which = "proj")
testthat::expect_true(all(names(trt.str$Q) == c("OaVsRest", "OaTreat[OaVsRest]", "Treatment",
"Line[OaVsRest]", "Treatment#Line")))
testthat::expect_true(all(trt.str$aliasing$Source == c("Treatment", "Treatment")))
testthat::expect_true(all(trt.str$aliasing$Alias == c("OaTreat[OaVsRest]",
"## Information remaining")))
#'### Removal of O. aust from remaining Lines nested within Treats
print(trt.str <- pstructure(~ OaVsRest/OaTreat + Treatment/(LineControl + LineDr +
LineLN + LineNa),
which.criteria = c("aeff", "xeff", "eeff", "ord"),
data = rand.lay), which = c("proj", "alias"))
testthat::expect_true(all(names(trt.str$Q) == c("OaVsRest", "OaTreat[OaVsRest]", "Treatment",
"LineControl[Treatment]", "LineDr[Treatment]",
"LineLN[Treatment]", "LineNa[Treatment]")))
testthat::expect_true(all(trt.str$aliasing$df == c(3,3, rep(c(1,1,1,7), times = 4))))
testthat::expect_true(all(trt.str$aliasing$Alias[c(2,6,10,14,18)] == "## Information remaining"))
#'### Treaments pooled over ALL lines but then separation of O. aust from remaining Lines, both nested within Treats
print(trt.str <- pstructure(~ Treatment/(OaControl + LineControl + OaDr + LineDr +
OaLN + LineLN + OaNa + LineNa), data = rand.lay),
which = "proj")
testthat::expect_true(all(names(trt.str$Q) == c("Treatment", "OaControl[Treatment]",
"LineControl[Treatment:OaControl]",
"OaDr[Treatment]", "LineDr[Treatment:OaDr]",
"OaLN[Treatment]", "LineLN[Treatment:OaLN]",
"OaNa[Treatment]", "LineNa[Treatment:OaNa]")))
testthat::expect_true(all(trt.str$aliasing$df == c(3, rep(c(1,7), times = 4))))
testthat::expect_true(is.null(trt.str$aliasing))
})
cat("#### Test for partially aliased terms\n")
test_that("AliasStructure", {
skip_on_cran()
library(dae)
nblks <- 7
nclones <- 3
nsoils <- 3
# Generate a systematic design
Trts.sys <- fac.gen(list(Clone=1:nclones, Soil=nsoils), times = nblks-1)
Trts.sys <- rbind(Trts.sys, Trts.sys[setdiff(1:9, c(2,4,9)),]) # treats absent from partial rep (final block)
pstr <- pstructure(formula = ~ Clone*Soil, data = Trts.sys)
testthat::expect_equal(nrow(pstr$aliasing),2)
testthat::expect_true((all(pstr$aliasing$Alias == c("Clone", "## Information remaining"))))
testthat::expect_true(all(abs(pstr$aliasing$aefficiency - c(0.0024,0.9975)) < 1e-04))
testthat::expect_true(all( pstr$marginality[upper.tri(pstr$marginality, diag = TRUE)] == c(1,0,1,1,1,1)))
})
cat("#### Test for pstructure with generalized factors\n")
test_that("pstucture_genfac", {
skip_on_cran()
library(dae)
pepalt.sys <- fac.gen(list(Rep = 2, Plate = 3, Side = 2, Boxrow = 2, Shelf = 4))
pepalt.str <- pstructure( ~ (Shelf:Boxrow)*(Rep/(Side:Plate)), data = pepalt.sys)
(sources <- pepalt.str$sources)
testthat::expect_true(all(sources == c("Shelf:Boxrow", "Rep", "Side:Plate[Rep]",
"(Shelf:Boxrow)#Rep", "(Shelf:Boxrow)#(Side:Plate)[Rep]")))
pepalt.str <- pstructure( ~ (Rep/Plate)*(Boxrow/(Shelf:Side)), data = pepalt.sys)
(sources <- pepalt.str$sources)
testthat::expect_true(all(sources == c("Rep", "Plate[Rep]", "Boxrow", "Shelf:Side[Boxrow]",
"Rep#Boxrow", "Rep#(Shelf:Side)[Boxrow]",
"Plate#Boxrow[Rep]", "Plate#(Shelf:Side)[Rep:Boxrow]")))
})
cat("#### Test for pstructure with difficult marginality single structure\n")
test_that("PlaidInteractions", {
skip_on_cran()
library(dae)
# Generate first-phase sytematic design
ph1.sys <- cbind(fac.gen(list(Expressive = c("Yes", "No"), Patients = 4, Occasions = 2)),
fac.gen(list(Motions = c("active", "passive")), times = 8))
# Generate the two-phase systematic design
ph2.sys <- cbind(fac.gen(list(Raters = 74, Viewings = 16)),
fac.gen(list(Trainings = 2, 16), times = 37),
rep.data.frame(ph1.sys, times =74))
# Randomize the two-phase design
ph2.lay <- designRandomize(allocated = ph2.sys[c("Trainings", "Expressive", "Patients",
"Occasions", "Motions")],
recipient = ph2.sys[c("Raters", "Viewings")],
except = "Viewings",
seed = 15674)
# Convert names of the factors to single capital letters
ph2.L.lay <- ph2.lay
names(ph2.L.lay)[match(c("Raters", "Viewings", "Trainings", "Expressive", "Patients",
"Occasions", "Motions"), names(ph2.L.lay))] <- c("R", "V", "T",
"E", "P", "O", "M")
#Test the neat formula
terms <- attr(terms(~ T * M * E + T:M:E:P + R:(M * (E / P)), data = ph2.L.lay),
which = "term.labels")
testthat::expect_equal(length(terms), 13)
alloc.canon <- designAnatomy(list(alloc = ~ T * M * E + T:M:E:P + R:(M * (E / P))),
keep.order = TRUE, data = ph2.L.lay)
testthat::expect_true(all(alloc.canon$terms$alloc %in% terms))
testthat::expect_true(all(names(alloc.canon$sources$alloc) %in% terms))
testthat::expect_true(all(alloc.canon$sources$alloc %in% c("T", "M", "T#M", "E", "T#E",
"M#E", "T#M#E", "P[T:M:E]",
"R[T:M]", "R[T:E]", "P[T:E:R]",
"M#E#R[T]", "M#P#R[T:E]")))
#Test the simple formula
terms <- attr(terms(~ (T + R) * M * (E / P), keep.order = TRUE, data = ph2.L.lay),
which = "term.labels")
testthat::expect_equal(length(terms), 17)
alloc.canon <- designAnatomy(list(alloc = ~ (T + R) * M * (E / P)),
data = ph2.L.lay)
testthat::expect_true(all(alloc.canon$terms$alloc %in% terms))
testthat::expect_true(all(names(alloc.canon$sources$alloc) %in% terms))
testthat::expect_true(all(alloc.canon$sources$alloc %in% c("T", "R[T]", "M", "T#M", "R#M[T]",
"E", "P[E]", "T#E", "T#P[E]",
"R#E[T]", "R#P[T:E]",
"M#E", "M#P[E]", "T#M#E",
"T#M#P[E]", "R#M#E[T]",
"R#M#P[T:E]")))
})
|
a60fb66ef9c5291c1735147af6c0df59ccb11770 | 6c95b178376e0f406a4833296e57b84cb802e4b6 | /server.R | 9c6cf9195ae77aaed46fe36fffe318561da9ff64 | [
"MIT"
] | permissive | booleangabs/Airlines-Data-Dashboard | b4c98d1f71b1bbe92af848fee751925ce81bf9fe | fdd5ab6b97a8e2134a2d873406fb12975aa415e1 | refs/heads/main | 2023-07-20T13:42:05.749546 | 2021-08-18T00:40:58 | 2021-08-18T00:40:58 | 394,472,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,894 | r | server.R | getMode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
server <- function(input, output) {
################### INPUT ####################
# Empresa na aba simples
select_airline <- eventReactive(input$go, {
airline_name <- input$airline
return(airline_name)
})
# Empresas na aba para comparacao
get_airline1 <- eventReactive(input$go_comp, {
airline_name1 <- input$airline_c1
return(airline_name1)
})
get_airline2 <- eventReactive(input$go_comp, {
airline_name2 <- input$airline_c2
return(airline_name2)
})
# Intervalo na aba simples
select_range <- eventReactive(input$go, {
start <- input$true_date
end <- input$true_date_2
return(list(start = start, end = end))
})
# Intervalo na aba para comparacao
select_range_comp <- eventReactive(input$go_comp, {
start <- input$true_date_c
end <- input$true_date_c2
return(list(start = start, end = end))
})
# Seleciona os dados para a aba simples
select_dt <- eventReactive(input$go, {
data <- input$data
if(data == "Despesas"){
return(master_df = read.csv('data/airline_expenses.csv'))
}else{
if(data == "Receita"){
return(master_df = read.csv('data/airline_revenues.csv'))
}else{
return(master_df= read.csv('data/airline_passengers.csv'))
}
}
})
# Nome da informacao desejada
info_selected <- eventReactive(input$go, {
data <- input$data
return(data)
})
# Seleciona os dados para a aba de comparacao
select_dt_c <- eventReactive(input$go_comp, {
data <- input$data_comp
if(data == "Despesas"){
return(master_df = read.csv('data/airline_expenses.csv'))
}else{
if(data == "Receita"){
return(master_df = read.csv('data/airline_revenues.csv'))
}else{
return(master_df= read.csv('data/airline_passengers.csv'))
}
}
})
# Nome da informacao desejada
info_selected_c <- eventReactive(input$go_comp, {
data <- input$data_comp
return(data)
})
# Seletor do input de intervalo da aba simples
output$starttime <- renderUI({
airline_name <- input$airline
df <- read.csv('data/airline_expenses.csv')
years <- df$Year
names(years) <- years
selectInput("true_date", "Inicio da análise", choices= years)
})
output$endtime <- renderUI({
airline_name <- input$airline
df <- read.csv('data/airline_expenses.csv')
years <- df$Year
names(years) <- years
selectInput("true_date_2", "Fim da análise", choices= years)
})
# Seletor do input de intervalo da aba
output$starttime_comp <- renderUI({
airline_name <- input$airline
df <- read.csv('data/airline_expenses.csv')
years <- df$Year
names(years) <- years
selectInput("true_date_c", "Inicio da análise", choices= years)
})
output$endtime_comp <- renderUI({
airline_name <- input$airline
df <- read.csv('data/airline_expenses.csv')
years <- df$Year
names(years) <- years
selectInput("true_date_c2", "Fim da análise", choices= years)
})
################ OUTPUT #####################
# Carrega a tabela de informacao da aba simples
Info_DataTable <- eventReactive(input$go,{
master_df <- select_dt()
range <- select_range()
if(range[[1]]> range[[2]]){
Erro <- "PERIODO INVALIDO"
df_tb <- data.frame(Erro)
df_tb <- as.data.frame(t(df_tb))
return(df_tb)
}
df <- master_df %>% filter(Year >= range[[1]])
df <- df %>% filter(Year <= range[[2]])
df <- df %>% select(select_airline())
mean <- df %>% colMeans()
median <- median(as.numeric(unlist(df)))
mode <- getMode(unlist(df))
#Desvio padrao amostral
standard_deviation <- sd(unlist(df))
Media <- mean[[1]]
Mediana <- median[[1]]
Moda <- mode[[1]]
Desvio_Padrao <- standard_deviation[[1]]
Max <- max(df)
Min <- min(df)
Airline <- input$airline
df_tb <- data.frame(Airline, Media, Mediana, Moda, Desvio_Padrao, Max, Min)
df_tb <- as.data.frame(t(df_tb))
return(df_tb)
})
# Renderiza a tabela de informacoes
output$info <- renderDT({
Info_DataTable() %>%
as.data.frame() %>%
DT::datatable(options=list(
language=list(
url = '//cdn.datatables.net/plug-ins/1.10.11/i18n/Portuguese-Brasil.json'
)
))
})
# Grafico da serie na aba simples
output$sh <- renderPlot({
# All the inputs
airline_name <- select_airline()
master_df <- select_dt()
range <- select_range()
if(range[[1]] >= range[[2]]){
return()
}
graph_range <- range[[1]]:range[[2]]
info <- info_selected()
y_lab <- 'Despesas em $'
if(info == 'Receita'){
y_lab <- 'Receita em $'
}else{
if(info == 'Passageiros'){
y_lab <- 'Passageiros'
}
}
df <- master_df %>% filter(Year >= range[[1]])
df <- df %>% filter(Year <= range[[2]])
a <- df %>%
ggplot(aes_(x=as.name("Year"), y = as.name(airline_name))) +
geom_path()+
ylab(as.name(y_lab)) +
xlab('Ano')+
scale_x_continuous(breaks = graph_range)
a
})
# Grafico do histograma na aba simples
output$hs <- renderPlot({
airline_name <- select_airline()
master_df <- select_dt()
range <- select_range()
if(range[[1]] > range[[2]]){
return()
}
hist_range <- range[[1]]:range[[2]]
df <- master_df %>% filter(Year >= range[[1]])
df <- df %>% filter(Year <= range[[2]])
dif <- as.numeric(range[[2]]) - as.numeric(range[[1]])
classes <- (sqrt(dif))
a <- df %>%
ggplot(aes_(x=as.name(airline_name))) +
geom_histogram(color = 'white', fill = 'lightblue', bins = classes) +
theme_classic(base_size = 18) +
xlab("Valores") +
ylab("Frequencia")
a
})
# Grafico de caixa na aba simples
output$bp <- renderPlot({
airline_name <- select_airline()
master_df <- select_dt()
range <- select_range()
if(range[[1]] > range[[2]]){
return()
}
hist_range <- range[[1]]:range[[2]]
df <- master_df %>% filter(Year >= range[[1]])
df <- df %>% filter(Year <= range[[2]])
a <- df %>%
ggplot(aes_(x=as.name("Year"), y = as.name(airline_name)))+
geom_boxplot()
a
})
# Renderiza a 1a serie na aba de comparacao
output$serie1 <- renderUI({
name <- get_airline1()
box(title = paste('Série da Empresa', name), width = 12, solidHeader = TRUE,
plotOutput('comp_sh1'))
})
# Renderiza a 2a serie na aba de comparacao
output$serie2 <- renderUI({
name <- get_airline2()
box(title = paste('Série da Empresa', name), width = 12, solidHeader = TRUE,
plotOutput('comp_sh2'))
})
# Grafico da 1a serie na aba de comparacao
output$comp_sh1 <- renderPlot({
airline1 <- get_airline1()
master_df <- select_dt_c()
range <- select_range_comp()
if(range[[1]] >= range[[2]]){
return()
}
graph_range <- range[[1]]:range[[2]]
info <- info_selected_c()
y_lab <- 'Despesas em $'
if(info == 'Receita'){
y_lab <- 'Receita em $'
}else{
if(info == 'Passageiros'){
y_lab <- 'Passageiros'
}
}
df <- master_df %>% filter(Year >= range[[1]])
df <- df %>% filter(Year <= range[[2]])
a <- df %>%
ggplot(aes_(x=as.name("Year"), y = as.name(airline1))) +
geom_path()+
ylab(as.name(y_lab)) +
xlab('Ano')+
scale_x_continuous(breaks = graph_range)
a
})
# Grafico da 2a serie na aba de comparacao
output$comp_sh2 <- renderPlot({
airline2 <- get_airline2()
master_df <- select_dt_c()
range <- select_range_comp()
if(range[[1]] >= range[[2]]){
return()
}
graph_range <- range[[1]]:range[[2]]
info <- info_selected_c()
y_lab <- 'Despesas em $'
if(info == 'Receita'){
y_lab <- 'Receita em $'
}else{
if(info == 'Passageiros'){
y_lab <- 'Passageiros'
}
}
df <- master_df %>% filter(Year >= range[[1]])
df <- df %>% filter(Year <= range[[2]])
a <- df %>%
ggplot(aes_(x=as.name("Year"), y = as.name(airline2))) +
geom_path()+
ylab(as.name(y_lab)) +
xlab('Ano')+
scale_x_continuous(breaks = graph_range)
a
})
# Grafico de dispersao na aba de comparacao
output$comp_sc <- renderPlot({
airline1 <- get_airline1()
airline2 <- get_airline2()
if (airline1==airline2) {
return()
}
master_df <- select_dt_c()
range <- select_range_comp()
if(range[[1]] >= range[[2]]){
return()
}
graph_range <- range[[1]]:range[[2]]
df <- master_df %>% filter(Year >= range[[1]])
df <- df %>% filter(Year <= range[[2]])
df %>% ggplot(aes_(x = as.name(airline1), y = as.name(airline2))) + geom_point(size = 2)
})
# Grafico de barra das medias na aba de comparacao
output$comp_bm <- renderPlot({
airline1 <- get_airline1()
airline2 <- get_airline2()
if (airline1==airline2) {
return()
}
master_df <- select_dt_c()
range <- select_range_comp()
if(range[[1]] >= range[[2]]){
return()
}
graph_range <- range[[1]]:range[[2]]
df <- master_df %>% filter(Year >= range[[1]])
df <- df %>% filter(Year <= range[[2]])
df1 <- df %>% select(airline1)
df2 <- df %>% select(airline2)
mean1 <- df1 %>% colMeans()
mean2 <- df2 %>% colMeans()
data <- data.frame(
airline=c(airline1, airline2) ,
mean=c(mean1, mean2)
)
ggplot(data, aes(x=airline, y=mean)) + geom_bar(stat = "identity")
})
# Tabela de correlacao das series
output$corr_cm <- renderDT({
airline1 <- get_airline1()
airline2 <- get_airline2()
if (airline1==airline2) {
return()
}
master_df <- select_dt_c()
range <- select_range_comp()
if(range[[1]] >= range[[2]]){
return()
}
graph_range <- range[[1]]:range[[2]]
df <- master_df %>% filter(Year >= range[[1]])
df <- df %>% filter(Year <= range[[2]])
df <- df %>% select(airline1, airline2)
corr_dt <- data.frame(
Correlacao_com_=cor(df)
)
corr_dt %>%
as.data.frame() %>%
DT::datatable(options=list(
language=list(
url = '//cdn.datatables.net/plug-ins/1.10.11/i18n/Portuguese-Brasil.json'
)
))
})
}
|
5a2435d61471d801c8837ee1ceaa307cd1da4b25 | 36a745e91445b312088d2ec5beabbc10e4d14160 | /analysis_kim_adenocarcinoma/Kim_run_DM_package_version5.R | 21b36bd4d978b3d92f4076533226a112750e7213 | [] | no_license | gosianow/multinomial_project | a111be129f3790048f1f2c9baac5c39651c6605b | f5e5b96195fd99a8fe4aa80cdc0f12196e595823 | refs/heads/master | 2020-12-25T22:30:58.509784 | 2016-03-31T15:48:22 | 2016-03-31T15:48:22 | 28,908,957 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,956 | r | Kim_run_DM_package_version5.R | ######################################################
# BioC 2.14
# Created 04 Nov 2014
# Analysis of Kim_adenocarcinoma data
# Update 19 Nov 2014:
# plots of TREND dispersion vs. mean
#######################################################
setwd("/home/Shared/data/seq/Kim_adenocarcinoma/")
metadata <- read.table("metadata/Malgorzata Nowicka2014-11-04GSE37764.csv", stringsAsFactors=F, sep=",", header=T)
metadataOrg <- metadata <- metadata[metadata$X == "RNA-seq",]
##########################################################################
# run new DM (common dispersion, and adjustement) on FC
##########################################################################
library(edgeR)
library(parallel)
library(dirmult)
library(limma)
Rdir <- "/home/gosia/R/R_Multinomial_project/DM_package/version5_sQTL/"
source("/home/gosia/R/R_Multinomial_project/DM_commonDisp/dirmult_code.R")
source(paste0(Rdir, "dmFunctions_v5.R"))
##############################################################################################################
# run DM version 5 (common dispersion + adjustement) on FC
##############################################################################################################
#### load FC
fc <- read.table("featureCounts/fc.txt", header = T)
colnames(fc) <- gsub("_s.bam","",gsub("X.home.Shared.data.seq.Kim_adenocarcinoma.bam_insilicodb.", "",colnames(fc)))
# Null_normal1
model <- "Null_normal1"
out.dir <- paste0("DM_v5/fc/diff_out_", model)
dir.create(out.dir, showWarnings=F, recursive=T)
metadata <- metadataOrg[metadataOrg$Tissue.Type == "normal", ]
metadata$condition = c(rep("C1", 3), rep("C2", 3))
counts <- fc[, metadata[, "ids"]]
gene_id <- strsplit2(fc[,1], ":")[,1]
ete_id <- fc[,1]
dgeOrg <- DGEList(counts=counts, group = metadata$condition, genes=data.frame(gene_id=gene_id, ete_id=ete_id))
name1 <- "fc"
dge <- dgeOrg
# write.table(data.frame(dge$genes,dge$counts), paste0(out.dir, "/dge_counts_",name1,"_NOT_FILTERED.xls"), quote=F, sep="\t", row.names=F, col.names=T)
###fc_g0_s4_keep0s_subsetInf_DM5adj############################################################
name1 <- "fc_g0_s4_keep0s_subsetInf_DM5adj"
dge <- dgeOrg
keep <- rowSums(cpm(dge) > 0) >= 4
dge <- dge[keep,]
dge$genes$gene_id <- as.factor(as.character(dge$genes$gene_id)) # otherwise number of levels astays as before subsetting
dge$counts <- split(data.frame(dge$counts), factor(dge$genes$gene_id, levels = unique(dge$genes$gene_id)))
dge$counts <- lapply(dge$counts, as.matrix) ## !!! have to conver into matrix, othewise ERROR
nlevels(dge$genes$gene_id)
mcCores <- 20
## run DM pipeline
dgeDM <- dmEstimateCommonDisp(dge, group=NULL, adjust = TRUE, mode = "constrOptim2G", epsilon = 1e-05, maxIte = 1000, interval = c(0, 1e+5), tol = 1e-00, mcCores=mcCores, verbose=FALSE)
write.table(dgeDM$commonDispersion, paste0(out.dir, "/",name1,"_commonDispersion.txt"), quote=F, sep="\t", row.names=F, col.names=F)
dgeDM <- dgeDM <- dmFit(dgeDM, group=NULL, dispersion="commonDispersion", mode="constrOptim2G", epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
dgeDM <- dmTest(dgeDM, mode="constrOptim2G", epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
write.table(dgeDM$table, paste0(out.dir, "/",name1,"_results.xls"), quote=F, sep="\t", row.names=F, col.names=T)
save(dgeDM, file=paste0(out.dir, "/",name1,"_dgeDM.RData"))
###fc_g0_s4_keep0s_subsetInf_DM5TGadj############################################################
name1 <- "fc_g0_s4_keep0s_subsetInf_DM5TGadj"
dge <- dgeOrg
keep <- rowSums(cpm(dge) > 0) >= 4
dge <- dge[keep,]
dge$genes$gene_id <- as.factor(as.character(dge$genes$gene_id)) # otherwise number of levels astays as before subsetting
dge$counts <- split(data.frame(dge$counts), factor(dge$genes$gene_id, levels = unique(dge$genes$gene_id)))
dge$counts <- lapply(dge$counts, as.matrix) ## !!! have to conver into matrix, othewise ERROR
mcCores <- 20
## run DM pipeline
dgeDM <- dmEstimateTagwiseDisp(dge, group=NULL, adjust = TRUE, mode = "constrOptim2G", epsilon = 1e-05, maxIte = 1000, interval = c(0, 1e+5), tol = 1e-00, mcCores=mcCores, verbose=FALSE)
write.table(dgeDM$tagwiseDispersion, paste0(out.dir, "/",name1,"_tagwiseDispersion.txt"), quote=F, sep="\t", row.names=T, col.names=F)
dgeDM <- dmFit(dgeDM, group=NULL, dispersion="tagwiseDispersion", mode="constrOptim2G", epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
dgeDM <- dmTest(dgeDM, mode="constrOptim2G", epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
write.table(dgeDM$table, paste0(out.dir, "/",name1,"_results.xls"), quote=F, sep="\t", row.names=F, col.names=T)
save(dgeDM, file=paste0(out.dir, "/",name1,"_dgeDM.RData"))
###fc_g0_s4_keep0s_subsetInf_DM5TGoadj############################################################
name1 <- "fc_g0_s4_keep0s_subsetInf_DM5TGoadj"
dge <- dgeOrg
keep <- rowSums(cpm(dge) > 0) >= 4
dge <- dge[keep,]
dge$genes$gene_id <- as.factor(as.character(dge$genes$gene_id)) # otherwise number of levels astays as before subsetting
dge$counts <- split(data.frame(dge$counts), factor(dge$genes$gene_id, levels = unique(dge$genes$gene_id)))
dge$counts <- lapply(dge$counts, as.matrix) ## !!! have to conver into matrix, othewise ERROR
mcCores <- 30
## run DM pipeline
dgeDM <- dmEstimateTagwiseDisp(dge, group=NULL, adjust = TRUE, mode = "constrOptim2G", epsilon = 1e-05, maxIte = 1000, interval = c(0, 1e+5), tol = 1e-00, mcCores=mcCores, verbose=FALSE, modeDisp="optim", initDisp = 100)
write.table(dgeDM$tagwiseDispersion, paste0(out.dir, "/",name1,"_tagwiseDispersion.txt"), quote=F, sep="\t", row.names=T, col.names=F)
dgeDM <- dmFit(dgeDM, group=NULL, dispersion="tagwiseDispersion", mode="constrOptim2G", epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
dgeDM <- dmTest(dgeDM, mode="constrOptim2G", epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
write.table(dgeDM$table, paste0(out.dir, "/",name1,"_results.xls"), quote=F, sep="\t", row.names=F, col.names=T)
save(dgeDM, file=paste0(out.dir, "/",name1,"_dgeDM.RData"))
###fc_g0_s4_keep0s_subsetInf_DM5TGoWadj############################################################
name1 <- "fc_g0_s4_keep0s_subsetInf_DM5TGoWadj"
mcCores <- 20
## run DM pipeline
dgeDM <- dmEstimateTagwiseDisp(dge, group=NULL, adjust = TRUE, mode = "constrOptim2G", epsilon = 1e-05, maxIte = 1000, interval = c(0, 1e+5), tol = 1e-00, mcCores=mcCores, verbose=FALSE, modeDisp="optim", initDisp = 10, initWeirMoM = TRUE)
write.table(dgeDM$tagwiseDispersion, paste0(out.dir, "/",name1,"_tagwiseDispersion.txt"), quote=F, sep="\t", row.names=T, col.names=F)
write.table(dgeDM$initDispersion, paste0(out.dir, "/",name1,"_initDispersion.txt"), quote=F, sep="\t", row.names=T, col.names=F)
dgeDM <- dmFit(dgeDM, group=NULL, dispersion="tagwiseDispersion", mode="constrOptim2G", epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
dgeDM <- dmTest(dgeDM, mode="constrOptim2G", epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
write.table(dgeDM$table, paste0(out.dir, "/",name1,"_results.xls"), quote=F, sep="\t", row.names=F, col.names=T)
save(dgeDM, file=paste0(out.dir, "/",name1,"_dgeDM.RData"))
########### Null_tumor1 ###########
model <- "Null_tumor1"
out.dir <- paste0("DM_v5/fc/diff_out_", model)
dir.create(out.dir, showWarnings=F, recursive=T)
metadata <- metadataOrg[metadataOrg$Tissue.Type == "tumor", ]
metadata$condition = c(rep("C1", 3), rep("C2", 3))
counts <- fc[, metadata[, "ids"]]
gene_id <- strsplit2(fc[,1], ":")[,1]
ete_id <- fc[,1]
dgeOrg <- DGEList(counts=counts, group = metadata$condition, genes=data.frame(gene_id=gene_id, ete_id=ete_id))
name1 <- "fc"
dge <- dgeOrg
# write.table(data.frame(dge$genes,dge$counts), paste0(out.dir, "/dge_counts_",name1,"_NOT_FILTERED.xls"), quote=F, sep="\t", row.names=F, col.names=T)
###fc_g0_s4_keep0s_subsetInf_DM5adj############################################################
name1 <- "fc_g0_s4_keep0s_subsetInf_DM5adj"
dge <- dgeOrg
keep <- rowSums(cpm(dge) > 0) >= 4
dge <- dge[keep,]
dge$genes$gene_id <- as.factor(as.character(dge$genes$gene_id)) # otherwise number of levels astays as before subsetting
dge$counts <- split(data.frame(dge$counts), factor(dge$genes$gene_id, levels = unique(dge$genes$gene_id)))
dge$counts <- lapply(dge$counts, as.matrix) ## !!! have to conver into matrix, othewise ERROR
nlevels(dge$genes$gene_id)
mcCores <- 20
## run DM pipeline
dgeDM <- dmEstimateCommonDisp(dge, group=NULL, adjust = TRUE, mode = "constrOptim2G", epsilon = 1e-05, maxIte = 1000, interval = c(0, 1e+5), tol = 1e-00, mcCores=mcCores, verbose=FALSE)
write.table(dgeDM$commonDispersion, paste0(out.dir, "/",name1,"_commonDispersion.txt"), quote=F, sep="\t", row.names=F, col.names=F)
dgeDM <- dgeDM <- dmFit(dgeDM, group=NULL, dispersion="commonDispersion", mode="constrOptim2G", epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
dgeDM <- dmTest(dgeDM, mode="constrOptim2G", epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
write.table(dgeDM$table, paste0(out.dir, "/",name1,"_results.xls"), quote=F, sep="\t", row.names=F, col.names=T)
save(dgeDM, file=paste0(out.dir, "/",name1,"_dgeDM.RData"))
###fc_g0_s4_keep0s_subsetInf_DM5TGadj############################################################
name1 <- "fc_g0_s4_keep0s_subsetInf_DM5TGadj"
dge <- dgeOrg
keep <- rowSums(cpm(dge) > 0) >= 4
dge <- dge[keep,]
dge$genes$gene_id <- as.factor(as.character(dge$genes$gene_id)) # otherwise number of levels astays as before subsetting
dge$counts <- split(data.frame(dge$counts), factor(dge$genes$gene_id, levels = unique(dge$genes$gene_id)))
dge$counts <- lapply(dge$counts, as.matrix) ## !!! have to conver into matrix, othewise ERROR
mcCores <- 20
## run DM pipeline
dgeDM <- dmEstimateTagwiseDisp(dge, group=NULL, adjust = TRUE, mode = "constrOptim2G", epsilon = 1e-05, maxIte = 1000, interval = c(0, 1e+5), tol = 1e-00, mcCores=mcCores, verbose=FALSE)
write.table(dgeDM$tagwiseDispersion, paste0(out.dir, "/",name1,"_tagwiseDispersion.txt"), quote=F, sep="\t", row.names=T, col.names=F)
dgeDM <- dmFit(dgeDM, group=NULL, dispersion="tagwiseDispersion", mode="constrOptim2G", epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
dgeDM <- dmTest(dgeDM, mode="constrOptim2G", epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
write.table(dgeDM$table, paste0(out.dir, "/",name1,"_results.xls"), quote=F, sep="\t", row.names=F, col.names=T)
save(dgeDM, file=paste0(out.dir, "/",name1,"_dgeDM.RData"))
###fc_g0_s4_keep0s_subsetInf_DM5TGoadj############################################################
name1 <- "fc_g0_s4_keep0s_subsetInf_DM5TGoadj"
dge <- dgeOrg
keep <- rowSums(cpm(dge) > 0) >= 4
dge <- dge[keep,]
dge$genes$gene_id <- as.factor(as.character(dge$genes$gene_id)) # otherwise number of levels astays as before subsetting
dge$counts <- split(data.frame(dge$counts), factor(dge$genes$gene_id, levels = unique(dge$genes$gene_id)))
dge$counts <- lapply(dge$counts, as.matrix) ## !!! have to conver into matrix, othewise ERROR
mcCores <- 30
## run DM pipeline
dgeDM <- dmEstimateTagwiseDisp(dge, group=NULL, adjust = TRUE, mode = "constrOptim2G", epsilon = 1e-05, maxIte = 1000, interval = c(0, 1e+5), tol = 1e-00, mcCores=mcCores, verbose=FALSE, modeDisp="optim", initDisp = 100)
write.table(dgeDM$tagwiseDispersion, paste0(out.dir, "/",name1,"_tagwiseDispersion.txt"), quote=F, sep="\t", row.names=T, col.names=F)
dgeDM <- dmFit(dgeDM, group=NULL, dispersion="tagwiseDispersion", mode="constrOptim2G", epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
dgeDM <- dmTest(dgeDM, mode="constrOptim2G", epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
write.table(dgeDM$table, paste0(out.dir, "/",name1,"_results.xls"), quote=F, sep="\t", row.names=F, col.names=T)
save(dgeDM, file=paste0(out.dir, "/",name1,"_dgeDM.RData"))
###fc_g0_s4_keep0s_subsetInf_DM5TGoWadj############################################################
name1 <- "fc_g0_s4_keep0s_subsetInf_DM5TGoWadj"
mcCores <- 10
## run DM pipeline
dgeDM <- dmEstimateTagwiseDisp(dge, group=NULL, adjust = TRUE, mode = "constrOptim2G", epsilon = 1e-05, maxIte = 1000, interval = c(0, 1e+5), tol = 1e-00, mcCores=mcCores, verbose=FALSE, modeDisp="optim", initDisp = 10, initWeirMoM = TRUE)
write.table(dgeDM$tagwiseDispersion, paste0(out.dir, "/",name1,"_tagwiseDispersion.txt"), quote=F, sep="\t", row.names=T, col.names=F)
write.table(dgeDM$initDispersion, paste0(out.dir, "/",name1,"_initDispersion.txt"), quote=F, sep="\t", row.names=T, col.names=F)
dgeDM <- dmFit(dgeDM, group=NULL, dispersion="tagwiseDispersion", mode="constrOptim2G", epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
dgeDM <- dmTest(dgeDM, mode="constrOptim2G", epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
write.table(dgeDM$table, paste0(out.dir, "/",name1,"_results.xls"), quote=F, sep="\t", row.names=F, col.names=T)
save(dgeDM, file=paste0(out.dir, "/",name1,"_dgeDM.RData"))
#######################################################
### calculate the mean gene expression
#######################################################
out.dir <- "PLOTS_DM_v5_TREND_dispVSmean/"
dir.create(out.dir, recursive = T, showWarnings = FALSE)
# model <- "diff_out_Null_normal1"
model <- "diff_out_Null_tumor1"
dir.create(paste0(out.dir,"/", model))
load(paste0("DM_v5/fc/", model ,"/fc_g0_s4_keep0s_subsetInf_DM5adj_dgeDM.RData"))
meanExpr <- sapply(dgeDM$counts, function(g){ mean(colSums(g)) } )
meanExpr <- data.frame(gene_id = names(meanExpr), meanExpr = meanExpr)
head(meanExpr)
table <- meanExpr
#######################################################
# plot dispersion vs mean
#######################################################
### load common dispersions
cDisp <- read.table(paste0("DM_v5/fc/",model,"/fc_g0_s4_keep0s_subsetInf_DM5adj_commonDispersion.txt"))
files <- list.files(path = paste0("DM_v5/fc/", model), pattern = "_results.xls" )
files <- files[grepl(pattern = "TG", files)]
TGmethods <- gsub(pattern = "_results.xls", replacement = "" , files)
for( i in 1:length(TGmethods)){
# i = 1
tDisp <- read.table(paste0("DM_v5/fc/", model ,"/",TGmethods[i],"_tagwiseDispersion.txt"))
tName <- paste0(TGmethods[i],"_tagwiseDispersion")
colnames(tDisp) <- c("gene_id", tName)
table <- unique(merge(table, tDisp, by = "gene_id", all.x=TRUE))
pdf(paste0(out.dir, "/", model, "/TREMD_mean_vs_gamma-",TGmethods[i],".pdf"))
smoothScatter(log10(table$meanExpr), log10(table[,tName]), xlab="log10 mean gene expression", ylab="log10 gamma +", nrpoints = Inf, colramp=colorRampPalette(c("white", "grey40")), pch = 19, cex=0.6)
abline(h = log10(cDisp), col = "red")
dev.off()
}
|
7e31b5b06ca0f0181decce6debe43c88ec270701 | c459dd32d88158cb064c3af2bc2ea8c7ab77c667 | /sample_info/clinical/extract_cptac_discovery_ccRCC_survival_time.R | ffe0c75863daa36053ae0bb678aa1b2d3a55da1b | [] | no_license | ding-lab/ccRCC_snRNA_analysis | d06b8af60717779671debe3632cad744467a9668 | ac852b3209d2479a199aa96eed3096db0b5c66f4 | refs/heads/master | 2023-06-21T15:57:54.088257 | 2023-06-09T20:41:56 | 2023-06-09T20:41:56 | 203,657,413 | 6 | 3 | null | null | null | null | UTF-8 | R | false | false | 9,909 | r | extract_cptac_discovery_ccRCC_survival_time.R | # Yige Wu @WashU Mar 2022
## reference: https://www.emilyzabor.com/tutorials/survival_analysis_in_r_tutorial.html
# set up libraries and output directory -----------------------------------
## set working directory
dir_base = "~/Library/CloudStorage/Box-Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA"
setwd(dir_base)
packages = c(
"rstudioapi",
"plyr",
"dplyr",
"stringr",
"reshape2",
"data.table"
)
for (pkg_name_tmp in packages) {
library(package = pkg_name_tmp, character.only = T)
}
# input dependencies ------------------------------------------------------
## input clinical info
clinical_df <- readxl::read_excel(path = "../../../../CPTAC_ccRCC/Data_Freeze_1.1/CPTAC_ccRCC_Combined/Clinical_data/CCRCC_Oct2021_clinical_data.xlsx")
## input the ccRCC/non-cc RCC info
class_df <- fread(data.table = F, input = "./Resources/Bulk_Processed_Data/Case_ID/CPTAC_ccRCC_discovery_caseID_v1.0.tsv")
## inpu the specimen clinical info
clinical_specimen_df <- fread(data.table = F, input = "./Resources/Analysis_Results/sample_info/clinical/extract_ccRCC_specimen_clinical_data/20210706.v1/ccRCC_Specimen_Clinicl_Data.20210706.v1.tsv")
# extract info ------------------------------------------------------------
colnames(clinical_df)
clinical_df$`follow-up/vital_status_at_date_of_last_contact`
clinical_df$`follow-up/days_from_date_of_initial_pathologic_diagnosis_to_date_of_death`
clinical_df$`follow-up/new_tumor_after_initial_treatment`
clinical_df$`follow-up/days_from_date_of_initial_pathologic_diagnosis_to_date_of_new_tumor_after_initial_treatment`
clinical_df$`follow-up/measure_of_success_of_outcome_at_date_of_last_contact_or_death`
clinical_df$`follow-up/measure_of_success_of_outcome_at_the_completion_of_initial_first_course_treatment`
## get the latest survival time
clinical_df$days_from_date_of_initial_pathologic_diagnosis_to_date_of_last_contact <- sapply(X = clinical_df$`follow-up/days_from_date_of_initial_pathologic_diagnosis_to_date_of_last_contact`, function(x) {
days_vec <- str_split(string = x, pattern = "\\|")[[1]]
day_last <- days_vec[length(days_vec)]
return(day_last)
})
clinical_df$vital_status_at_date_of_last_contact <- sapply(X = clinical_df$`follow-up/vital_status_at_date_of_last_contact`, function(x) {
tmp_vec <- str_split(string = x, pattern = "\\|")[[1]]
tmp <- tmp_vec[length(tmp_vec)]
return(tmp)
})
clinical_df$days_from_date_of_initial_pathologic_diagnosis_to_date_of_death <- sapply(X = clinical_df$`follow-up/days_from_date_of_initial_pathologic_diagnosis_to_date_of_death`, function(x) {
tmp_vec <- str_split(string = x, pattern = "\\|")[[1]]
tmp <- tmp_vec[length(tmp_vec)]
return(tmp)
})
clinical_df$new_tumor_after_initial_treatment <- sapply(X = clinical_df$`follow-up/new_tumor_after_initial_treatment`, function(x) {
tmp_vec <- str_split(string = x, pattern = "\\|")[[1]]
tmp <- tmp_vec[length(tmp_vec)]
return(tmp)
})
clinical_df$days_from_date_of_initial_pathologic_diagnosis_to_date_of_new_tumor_after_initial_treatment <- sapply(X = clinical_df$`follow-up/days_from_date_of_initial_pathologic_diagnosis_to_date_of_new_tumor_after_initial_treatment`, function(x) {
tmp_vec <- str_split(string = x, pattern = "\\|")[[1]]
tmp <- tmp_vec[length(tmp_vec)]
return(tmp)
})
clinical_df$tumor_status_at_date_of_last_contact_or_death <- sapply(X = clinical_df$`follow-up/tumor_status_at_date_of_last_contact_or_death`, function(x) {
tmp_vec <- str_split(string = x, pattern = "\\|")[[1]]
tmp <- tmp_vec[length(tmp_vec)]
return(tmp)
})
clinical_df$days_from_date_of_initial_pathologic_diagnosis_to_withtumor <- sapply(X = 1:nrow(clinical_df), function(i, fu_time, tumor_status) {
tumorstatus_vec <- str_split(string = tumor_status[i], pattern = "\\|")[[1]]
fudays_vec <- str_split(string = fu_time[i], pattern = "\\|")[[1]]
if (!is.na(tumorstatus_vec) & any(tumorstatus_vec == "With Tumor")) {
tmp <- fudays_vec[which(tumorstatus_vec == "With Tumor")[1]]
} else {
tmp <- NA
}
return(tmp)
},
fu_time = clinical_df$`follow-up/days_from_date_of_initial_pathologic_diagnosis_to_date_of_last_contact`,
tumor_status = clinical_df$`follow-up/tumor_status_at_date_of_last_contact_or_death`)
as.vector(clinical_df$days_from_date_of_initial_pathologic_diagnosis_to_withtumor)
# clinical_df %>%
# select(days_from_date_of_initial_pathologic_diagnosis_to_date_of_last_contact,
# vital_status_at_date_of_last_contact,
# days_from_date_of_initial_pathologic_diagnosis_to_date_of_death,
# new_tumor_after_initial_treatment,
# days_from_date_of_initial_pathologic_diagnosis_to_date_of_new_tumor_after_initial_treatment,
# tumor_status_at_date_of_last_contact_or_death,
# `follow-up/days_from_date_of_initial_pathologic_diagnosis_to_date_of_additional_surgery_for_new_tumor_metastasis`) %>%
# View()
## there are some cases where there was new tumor after initial treatment, but at the last contact or death, they are tumor free
clinical_df %>%
filter(tumor_status_at_date_of_last_contact_or_death == "With Tumor" & new_tumor_after_initial_treatment == "No") %>%
View()
clinical_df %>%
filter(tumor_status_at_date_of_last_contact_or_death == "With Tumor") %>%
View()
# make data frame ---------------------------------------------------------
## add basic information for future survival analysis
### get only ccRCC
case2survival_df <- class_df %>%
filter(Histologic_Type == "Clear cell renal cell carcinoma") %>%
select(CASE_ID)
case2survival_df$age <- mapvalues(x = case2survival_df$CASE_ID, from = clinical_df$case_id, to = as.vector(clinical_df$`consent/age`))
case2survival_df$age[case2survival_df$age == ">=90"] <- "90"
case2survival_df$age <- as.numeric(case2survival_df$age)
case2survival_df$sex <- mapvalues(x = case2survival_df$CASE_ID, from = clinical_df$case_id, to = as.vector(clinical_df$`consent/sex`))
case2survival_df$sex.ismale <- as.numeric(case2survival_df$sex == "Male")
case2survival_df$tumor_stage_pathological <- mapvalues(x = case2survival_df$CASE_ID, from = clinical_df$case_id, to = as.vector(clinical_df$`baseline/tumor_stage_pathological`))
case2survival_df <- case2survival_df %>%
mutate(tumor_stage_pathological = gsub(x = tumor_stage_pathological, pattern = "Stage ", replacement = "")) %>%
mutate(stage.numeric = ifelse(tumor_stage_pathological == "I", 1,
ifelse(tumor_stage_pathological == "II", 2,
ifelse(tumor_stage_pathological == "III", 3, 4))))
### add tumor grade
clinical_specimen_filtered_df <- clinical_specimen_df %>%
filter(Tissue_Type == "tumor") %>%
mutate(Histologic_Grade.numeric = gsub(x = Histologic_Grade, pattern = "G", replacement = "")) %>%
arrange(Case, desc(Histologic_Grade.numeric)) %>%
select(Case, Histologic_Grade.numeric)
clinical_specimen_filtered_df <- clinical_specimen_filtered_df[!duplicated(clinical_specimen_filtered_df$Case),]
case2survival_df$grade.numeric <- mapvalues(x = case2survival_df$CASE_ID, from = clinical_specimen_filtered_df$Case, to = as.vector(clinical_specimen_filtered_df$Histologic_Grade.numeric))
case2survival_df$grade.numeric <- as.numeric(case2survival_df$grade.numeric)
## add survival time
case2survival_df <- merge(x = case2survival_df, y = clinical_df %>%
select(case_id, days_from_date_of_initial_pathologic_diagnosis_to_date_of_last_contact,
vital_status_at_date_of_last_contact,
days_from_date_of_initial_pathologic_diagnosis_to_date_of_death,
days_from_date_of_initial_pathologic_diagnosis_to_withtumor,
new_tumor_after_initial_treatment,
days_from_date_of_initial_pathologic_diagnosis_to_date_of_new_tumor_after_initial_treatment,
tumor_status_at_date_of_last_contact_or_death),
by.x = c("CASE_ID"), by.y = c("case_id"), all.x = T)
case2survival_df$days_from_date_of_initial_pathologic_diagnosis_to_date_of_last_contact <- as.numeric(case2survival_df$days_from_date_of_initial_pathologic_diagnosis_to_date_of_last_contact)
case2survival_df$days_from_date_of_initial_pathologic_diagnosis_to_date_of_death <- as.numeric(case2survival_df$days_from_date_of_initial_pathologic_diagnosis_to_date_of_death)
case2survival_df$days_from_date_of_initial_pathologic_diagnosis_to_withtumor <- as.numeric(case2survival_df$days_from_date_of_initial_pathologic_diagnosis_to_withtumor)
case2survival_df$tumor_status_at_date_of_last_contact_or_death
# make components for overall survival ------------------------------------
case2survival_df <- case2survival_df %>%
mutate(OS_time = days_from_date_of_initial_pathologic_diagnosis_to_date_of_last_contact) %>%
mutate(OS_status = ifelse(vital_status_at_date_of_last_contact == "Deceased", "dead", "censored")) %>%
mutate(PFS_time = ifelse(!is.na(days_from_date_of_initial_pathologic_diagnosis_to_withtumor), days_from_date_of_initial_pathologic_diagnosis_to_withtumor,
days_from_date_of_initial_pathologic_diagnosis_to_date_of_last_contact)) %>%
mutate(PFS_status = ifelse(!is.na(days_from_date_of_initial_pathologic_diagnosis_to_withtumor) | vital_status_at_date_of_last_contact == "Deseased", "progression", "censored"))
# write output ------------------------------------------------------------
source("./ccRCC_snRNA_analysis/functions.R")
## set run id
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
file2write <- paste0(dir_out, "CPTAC_Discovery_ccRCC_Survival_Time", run_id, ".tsv")
write.table(file = file2write, x = case2survival_df, quote = F, sep = "\t", row.names = F)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.