content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
testlist <- list(x = c(NaN, NaN, 3.13290399277814e-305, -9.04325940472529e+304, -2.3275419129442e+197, -2.327541784346e+197, -2.327541784346e+197, -2.327541784346e+197, -2.70503619502547e-11, -1.7559039189425e+306, -4.83169060316842e-13, NaN, 1.63897169200378e-260, 0, 1.44673259278062e-307, 6.20225906577851e+250, 3.99608007991712e+149, NaN, NaN, 3.25938554769722e-311, -1.74846700476429e-283, Inf, 2.8398992681996e-29, -3.01021693352041e+105, 1.21626821479095e+58, 8.04006479379062e-315, 1.390671161567e-309, 3.39850951637185e-315, 5.20413872813422e-258, -6.40658036636793e-145, 4.52513516640491e+154, 7.06416463537717e-304, NaN, NaN, -7.20321336226232e+303, 4.14464901596874e-317, 2.14325735979507e-312, -Inf, 7.39437241408225e-304, 6.89707709752781e-307, -7.68164101933785e+304, 5.41117184707469e-312, NaN, NaN, NaN, -5.80251977845898e-50, 3.82993203612961e-306, -5.80251977845881e-50, 1.06102087793428e-314, 3.47355704226781e-306, -1.06523955577036e-314, -3.38211226493095e-306, 1.42873381341084e-101, 3.65190519703948e-306, 8.28904605845809e-317, 1.7487302291371e-283, 2.78134232345779e-309, -8.37116099364271e+298, NaN, -Inf, -5.80251977845898e-50, -3.73056436676488e+305, -3.38211227038164e-306, 1.06099789548264e-314, 1.06523955577036e-314, 4.55654551346028e-305, -2.48469051960539e+77, 7.74681462431717e-304, 1.80350548608666e-309, -7.68185448396664e+304, NaN, 1.39107858475887e-308, -6.40666335849475e-145, -4.49642301433951e+307, -1.36845553156102e-48, 3.09157869946877e-305, -1.7582545656058e-24, -3.07840260097378e-289, 5.07505857093848e-299, NaN, -2.78677463235887e-309, Inf, 0, -3.47355843762567e-306, 0))
result <- do.call(diceR:::indicator_matrix,testlist)
str(result) | /diceR/inst/testfiles/indicator_matrix/libFuzzer_indicator_matrix/indicator_matrix_valgrind_files/1609960140-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 1,708 | r | testlist <- list(x = c(NaN, NaN, 3.13290399277814e-305, -9.04325940472529e+304, -2.3275419129442e+197, -2.327541784346e+197, -2.327541784346e+197, -2.327541784346e+197, -2.70503619502547e-11, -1.7559039189425e+306, -4.83169060316842e-13, NaN, 1.63897169200378e-260, 0, 1.44673259278062e-307, 6.20225906577851e+250, 3.99608007991712e+149, NaN, NaN, 3.25938554769722e-311, -1.74846700476429e-283, Inf, 2.8398992681996e-29, -3.01021693352041e+105, 1.21626821479095e+58, 8.04006479379062e-315, 1.390671161567e-309, 3.39850951637185e-315, 5.20413872813422e-258, -6.40658036636793e-145, 4.52513516640491e+154, 7.06416463537717e-304, NaN, NaN, -7.20321336226232e+303, 4.14464901596874e-317, 2.14325735979507e-312, -Inf, 7.39437241408225e-304, 6.89707709752781e-307, -7.68164101933785e+304, 5.41117184707469e-312, NaN, NaN, NaN, -5.80251977845898e-50, 3.82993203612961e-306, -5.80251977845881e-50, 1.06102087793428e-314, 3.47355704226781e-306, -1.06523955577036e-314, -3.38211226493095e-306, 1.42873381341084e-101, 3.65190519703948e-306, 8.28904605845809e-317, 1.7487302291371e-283, 2.78134232345779e-309, -8.37116099364271e+298, NaN, -Inf, -5.80251977845898e-50, -3.73056436676488e+305, -3.38211227038164e-306, 1.06099789548264e-314, 1.06523955577036e-314, 4.55654551346028e-305, -2.48469051960539e+77, 7.74681462431717e-304, 1.80350548608666e-309, -7.68185448396664e+304, NaN, 1.39107858475887e-308, -6.40666335849475e-145, -4.49642301433951e+307, -1.36845553156102e-48, 3.09157869946877e-305, -1.7582545656058e-24, -3.07840260097378e-289, 5.07505857093848e-299, NaN, -2.78677463235887e-309, Inf, 0, -3.47355843762567e-306, 0))
result <- do.call(diceR:::indicator_matrix,testlist)
str(result) |
library(lattice)
library(ggplot2)
library(car)
master <- read.csv("/home/shares/oss2017/social/DATA/master_20170726.csv",
header = TRUE,
stringsAsFactors = FALSE)
names(master)
View(master[master$year == 2010,])
master$pop_change1 <- NA
for(i in unique(master$year)){
master$pop_change1[which(master$year == i)] <- (master$Pop[which(master$year == (i+1))] - master$Pop[which(master$year == i)])/master$Pop[which(master$year == i)]
}
master$pop_change2 <- NA
for(i in unique(master$year)){
master$pop_change2[which(master$year == i)] <- (master$Pop[which(master$year == (i+2))] - master$Pop[which(master$year == i)])/master$Pop[which(master$year == i)]
}
master$perc_total_dev <- master$perc_dev_high + master$perc_dev_med + master$perc_dev_low
master$perc_total_est_wet <- with(master, perc_est_forest_wet +
perc_est_scrub_wet +
perc_est_emerg_wet)
master2010 <- master[master$year == 2010,]
master2010$STATEFP <- as.factor(master2010$STATEFP)
#check the distribution to see how not normal the y variable is:
hist(master$pop_change, breaks = 20)
#Not too bad. Let's go with it.
par(mar = c(4, 4, 4, 4))
plot(master$mean.el, master$Pop, col = "blue")
hist(master$Pop)
hist(master$mean.el)
class(master$Pop)
master$Pop <- as.numeric(master$Pop)
# EXPLORE VARIABLES ####
splom(~ master[,c(64, 41, 8, 27, 14, 63)])
plot(master[,41], master[,63],
xlab ="sea level",
ylab = "bonding")
ggplot(master, aes(Sea_level.mm, bond_SoCI, col = factor(year))) +
geom_point()
#
ggplot(master, aes(perc_pal_emerg_wet, perc_dev_high)) +
geom_point()
m1 <- glm(pop_change ~ Sea_level.mm +
Mean.Temp.K +
perc_pal_emerg_wet +
perc_dev_high +
bond_SoCI,
data = master)
summary(m1)
summary(master2010$pop_change1)
summary(master2010$pop_change2)
plot(master2010$SoCI_bond, master2010$pop_change1)
plot(master2010$SoCI_bond, master2010$pop_change2)
plot(master2010$Sea_level.mm, master2010$pop_change1)
plot(master2010$Sea_level.mm, master2010$pop_change2)
m2 <- glm(pop_change2 ~ Sea_level.mm*SoCI_bond, data = master2010)
summary(m2)
Anova(m2)
plot(plot(master2010$Wind.speed.m.s, master2010$pop_change1))
hist(master2010$Wind.speed.m.s)
pdf("data-explore.pdf")
par(mfrow = c(2, 2))
for(i in c(3, 4, 6:41, 44:62)){
plot(master2010[,i], master2010$pop_change1,
xlab = names(master2010)[i],
ylab = "% Population Change to Next Year",
bty = "l",
pch = 16, col = rgb(0, 0, 0, 1/4))
abline(h = 0, lty = 2, lwd = 2)
}
dev.off()
pdf("data-explore2.pdf")
par(mfrow = c(2, 2))
for(i in c(3, 4, 6:41, 44:62)){
plot(master2010[,i], master2010$pop_change2,
xlab = names(master2010)[i],
ylab = "% Population Change to Next 2 Years",
bty = "l",
pch = 16, col = rgb(0, 0, 0, 1/4))
abline(h = 0, lty = 2, lwd = 2)
}
dev.off()
m3 <- glm(pop_change1 ~ SoCI_bond,
data = master2010[which(master2010$pop_change2 > 0.02),])
m4 <- glm(pop_change2 ~ SoCI_bond,
data = master2010[which(master2010$pop_change2 < 0.05),])
m5 <- glm(pop_change1 ~ SoCI_bond * Sea_level.mm,
data = master2010[which(master2010$pop_change2 > 0.02),])
summary(m5)
Anova(m5, type = "III")
m5.2 <- glm(pop_change1 ~ (SoCI_bond + SoCI_bridge) *
(Sea_level.mm + Mean.Precipitation.kg.m.2 + perc_total_dev + perc_total_est_wet),
data = master2010)
summary(m5.2)
Anova(m5.2, type = "III")
m5.3 <- glm(pop_change2 ~ (SoCI_bond + SoCI_bridge) *
(Sea_level.mm + Mean.Precipitation.kg.m.2 + perc_total_dev + perc_total_est_wet),
data = master2010)
summary(m5.3)
Anova(m5.3, type = "III")
plot(master2010$perc_total_dev, master2010$perc_total_est_wet)
#work with 1 year lag:
m5.4 <- glm(pop_change1 ~ (SoCI_bond + SoCI_bridge) * perc_total_est_wet +
Sea_level.mm + Mean.Precipitation.kg.m.2,
data = master2010)
summary(m5.4)
Anova(m5.4, type = "III")
m5.5 <- glm(pop_change1 ~ (SoCI_bond + SoCI_bridge) * perc_total_est_wet +
Sea_level.mm,
data = master2010)
summary(m5.5)
Anova(m5.5, type = "III")
m5.6 <- glm(pop_change1 ~ (SoCI_bond + SoCI_bridge) *
Sea_level.mm,
data = master2010)
summary(m5.6)
Anova(m5.6, type = "III")
m5.7 <- glm(pop_change1 ~ SoCI_bond * (perc_total_est_wet) + SoCI_bridge,
data = master2010)
summary(m5.7)
Anova(m5.7, type = "III")
#make some plots
ggplot(master2010,
aes(SoCI_bond, pop_change1, col = perc_total_est_wet)) +
geom_point()
ggplot(master2010,
aes(SoCI_bond, pop_change1, col = Sea_level.mm)) +
geom_point()
ggplot(master2010,
aes(SoCI_bond, pop_change1, col = max.el)) +
geom_point()
# master$urban <- master$perc_dev_high +
# master$perc_dev_low +
# master$perc_dev_med +
# master$perc_open_sp_dev
# hist(master$urban)
| /models-explore.R | no_license | utn100/NCEAS-OSS2017-Project2 | R | false | false | 4,986 | r |
library(lattice)
library(ggplot2)
library(car)
master <- read.csv("/home/shares/oss2017/social/DATA/master_20170726.csv",
header = TRUE,
stringsAsFactors = FALSE)
names(master)
View(master[master$year == 2010,])
master$pop_change1 <- NA
for(i in unique(master$year)){
master$pop_change1[which(master$year == i)] <- (master$Pop[which(master$year == (i+1))] - master$Pop[which(master$year == i)])/master$Pop[which(master$year == i)]
}
master$pop_change2 <- NA
for(i in unique(master$year)){
master$pop_change2[which(master$year == i)] <- (master$Pop[which(master$year == (i+2))] - master$Pop[which(master$year == i)])/master$Pop[which(master$year == i)]
}
master$perc_total_dev <- master$perc_dev_high + master$perc_dev_med + master$perc_dev_low
master$perc_total_est_wet <- with(master, perc_est_forest_wet +
perc_est_scrub_wet +
perc_est_emerg_wet)
master2010 <- master[master$year == 2010,]
master2010$STATEFP <- as.factor(master2010$STATEFP)
#check the distribution to see how not normal the y variable is:
hist(master$pop_change, breaks = 20)
#Not too bad. Let's go with it.
par(mar = c(4, 4, 4, 4))
plot(master$mean.el, master$Pop, col = "blue")
hist(master$Pop)
hist(master$mean.el)
class(master$Pop)
master$Pop <- as.numeric(master$Pop)
# EXPLORE VARIABLES ####
splom(~ master[,c(64, 41, 8, 27, 14, 63)])
plot(master[,41], master[,63],
xlab ="sea level",
ylab = "bonding")
ggplot(master, aes(Sea_level.mm, bond_SoCI, col = factor(year))) +
geom_point()
#
ggplot(master, aes(perc_pal_emerg_wet, perc_dev_high)) +
geom_point()
m1 <- glm(pop_change ~ Sea_level.mm +
Mean.Temp.K +
perc_pal_emerg_wet +
perc_dev_high +
bond_SoCI,
data = master)
summary(m1)
summary(master2010$pop_change1)
summary(master2010$pop_change2)
plot(master2010$SoCI_bond, master2010$pop_change1)
plot(master2010$SoCI_bond, master2010$pop_change2)
plot(master2010$Sea_level.mm, master2010$pop_change1)
plot(master2010$Sea_level.mm, master2010$pop_change2)
m2 <- glm(pop_change2 ~ Sea_level.mm*SoCI_bond, data = master2010)
summary(m2)
Anova(m2)
plot(plot(master2010$Wind.speed.m.s, master2010$pop_change1))
hist(master2010$Wind.speed.m.s)
pdf("data-explore.pdf")
par(mfrow = c(2, 2))
for(i in c(3, 4, 6:41, 44:62)){
plot(master2010[,i], master2010$pop_change1,
xlab = names(master2010)[i],
ylab = "% Population Change to Next Year",
bty = "l",
pch = 16, col = rgb(0, 0, 0, 1/4))
abline(h = 0, lty = 2, lwd = 2)
}
dev.off()
pdf("data-explore2.pdf")
par(mfrow = c(2, 2))
for(i in c(3, 4, 6:41, 44:62)){
plot(master2010[,i], master2010$pop_change2,
xlab = names(master2010)[i],
ylab = "% Population Change to Next 2 Years",
bty = "l",
pch = 16, col = rgb(0, 0, 0, 1/4))
abline(h = 0, lty = 2, lwd = 2)
}
dev.off()
m3 <- glm(pop_change1 ~ SoCI_bond,
data = master2010[which(master2010$pop_change2 > 0.02),])
m4 <- glm(pop_change2 ~ SoCI_bond,
data = master2010[which(master2010$pop_change2 < 0.05),])
m5 <- glm(pop_change1 ~ SoCI_bond * Sea_level.mm,
data = master2010[which(master2010$pop_change2 > 0.02),])
summary(m5)
Anova(m5, type = "III")
m5.2 <- glm(pop_change1 ~ (SoCI_bond + SoCI_bridge) *
(Sea_level.mm + Mean.Precipitation.kg.m.2 + perc_total_dev + perc_total_est_wet),
data = master2010)
summary(m5.2)
Anova(m5.2, type = "III")
m5.3 <- glm(pop_change2 ~ (SoCI_bond + SoCI_bridge) *
(Sea_level.mm + Mean.Precipitation.kg.m.2 + perc_total_dev + perc_total_est_wet),
data = master2010)
summary(m5.3)
Anova(m5.3, type = "III")
plot(master2010$perc_total_dev, master2010$perc_total_est_wet)
#work with 1 year lag:
m5.4 <- glm(pop_change1 ~ (SoCI_bond + SoCI_bridge) * perc_total_est_wet +
Sea_level.mm + Mean.Precipitation.kg.m.2,
data = master2010)
summary(m5.4)
Anova(m5.4, type = "III")
m5.5 <- glm(pop_change1 ~ (SoCI_bond + SoCI_bridge) * perc_total_est_wet +
Sea_level.mm,
data = master2010)
summary(m5.5)
Anova(m5.5, type = "III")
m5.6 <- glm(pop_change1 ~ (SoCI_bond + SoCI_bridge) *
Sea_level.mm,
data = master2010)
summary(m5.6)
Anova(m5.6, type = "III")
m5.7 <- glm(pop_change1 ~ SoCI_bond * (perc_total_est_wet) + SoCI_bridge,
data = master2010)
summary(m5.7)
Anova(m5.7, type = "III")
#make some plots
ggplot(master2010,
aes(SoCI_bond, pop_change1, col = perc_total_est_wet)) +
geom_point()
ggplot(master2010,
aes(SoCI_bond, pop_change1, col = Sea_level.mm)) +
geom_point()
ggplot(master2010,
aes(SoCI_bond, pop_change1, col = max.el)) +
geom_point()
# master$urban <- master$perc_dev_high +
# master$perc_dev_low +
# master$perc_dev_med +
# master$perc_open_sp_dev
# hist(master$urban)
|
library(pacman)
p_load(tidyverse,spatstat,future,promises,listenv)
library(MathBioSim)
result_dir = './30_03_2020/'
dir.create(result_dir, showWarnings = FALSE)
dir.create(paste0(result_dir,"pop/"), showWarnings = FALSE)
dir.create(paste0(result_dir,"pcfs/"), showWarnings = FALSE)
plan(sequential)
n_samples = 2000
initial_population = 10000
time_limit = 36000
min_norm = Inf
cell_count_x = 100
epsilon = 1e-6
params_all <-
data.frame(id=c(1,2,3),
sm=c(0.84,0.96,0.56),
sw=c(0.09,0.09,0.13),
b=1,d=0.1,dd=3/(2*(log(2))),
samples=n_samples,
start_pop=initial_population,
seed=1234)%>%
mutate(area=pmax(sm, sw) * 1000)
all_runs = listenv()
for (i in 1:nrow(params_all)) {
params=params_all[i,]
all_runs[[i]]%<-%
{
start_time = Sys.time()
params$death_kernel_r = 10 * params$sw
params$death_kernel_nodes = 1001
x_grid_death = seq(0,params$death_kernel_r,
length.out = params$death_kernel_nodes)
params$birth_kernel_r = 10 * params$sm
params$birth_kernel_nodes = 1001
x_grid_birth = seq(0,params$birth_kernel_r,
length.out = params$birth_kernel_nodes)
params$area_length_x = params$area
params$init_density = params$start_pop / params$area_length_x
sim_params <-
list("area_length_x"=params$area_length_x,
"cell_count_x"=cell_count_x,
"b"=params$b,
"d"=params$d,
"dd"=params$dd,
"seed"=params$seed,
"init_density"=params$init_density,
"death_kernel_r"=params$death_kernel_r,
"death_kernel_y"=dnorm(x_grid_death, sd = params$sw),
"birth_kernel_r"=params$birth_kernel_r,
"birth_kernel_y"=dnorm(x_grid_birth, sd = params$sm),
"spline_precision" = 1e-9
)
pcf_grid = seq(0,max(c(params$sw,params$sm))*10,length.out = 1001)
sim<-new(poisson_1d,sim_params)
pop<-numeric(n_samples)
time<-numeric(n_samples)
pcf_estimate<-list()
calculated_limit = n_samples
for(j in 1:n_samples){
sim$run_events(sim$total_population)
pop[j]=sim$total_population
time[j]=sim$time
points<-unique.ppp(ppp(sim$get_all_coordinates(),
rep(0,sim$total_population),
c(0,sim$area_length_x),
c(-sim$area_length_x/2,sim$area_length_x/2)))
K_estimate<-Kest(points,r=pcf_grid,correction="Ripley")
pcf_estimate[[j]]=data.frame(Kest=K_estimate$iso/2,x=pcf_grid)%>%
mutate(pfc=(Kest-lag(Kest))/(pcf_grid-lag(pcf_grid))/sim$area_length_x)%>%
pull(pfc)
l2_norm = 0.0
populations = sim$get_cell_populations()
if (j > 1){
for(k in 1:cell_count_x){
#cat(j,' ', k, ' ', populations[k], ' ', populations_old[k], '\n')
l2_norm = l2_norm + (populations[k] - populations_old[k])^2
}
}
populations_old = populations
if (j > 1 && l2_norm < min_norm) {
min_norm = l2_norm
cat('parameter id #', i, ', simulation #', j, '; current norm is ', l2_norm, '\n')
}
if ((j > 1) && (l2_norm < epsilon)) {
calculated_limit = j
break
}
if (Sys.time()-start_time>time_limit){
calculated_limit = j
break
}
}
pcf_est_av<-numeric(length(pcf_grid))
for(j in 1:length(pcf_estimate[[1]])){
jrow=numeric(n_samples)
for (k in 1:calculated_limit){
jrow[k]=pcf_estimate[[k]][j]
}
pcf_est_av[j]=mean(jrow)
}
pcfs<-data.frame(id=i,r=pcf_grid,y=pcf_est_av)
pops<-data.frame(id=i,time=time,pop=pop)
write_csv(pops,paste0(result_dir,"pop/",i,".csv"))
write_csv(pcfs,paste0(result_dir,"pcfs/",i,".csv"))
}%stdout%TRUE
}
all_runs%>%as.list()
write_csv(params_all,paste0(result_dir,"params.csv"))
list.files(paste0(result_dir,"pop/"),full.names = TRUE)%>%
map_dfr(read_csv)%>%
write_csv(paste0(result_dir,"pop.csv"))
list.files(paste0(result_dir,"pcfs/"),full.names = TRUE)%>%
map_dfr(read_csv)%>%
write_csv(paste0(result_dir,"pcfs.csv"))
| /examples/1d_epsilon_test.R | no_license | nietoo/RcppSim | R | false | false | 4,702 | r | library(pacman)
p_load(tidyverse,spatstat,future,promises,listenv)
library(MathBioSim)
result_dir = './30_03_2020/'
dir.create(result_dir, showWarnings = FALSE)
dir.create(paste0(result_dir,"pop/"), showWarnings = FALSE)
dir.create(paste0(result_dir,"pcfs/"), showWarnings = FALSE)
plan(sequential)
n_samples = 2000
initial_population = 10000
time_limit = 36000
min_norm = Inf
cell_count_x = 100
epsilon = 1e-6
params_all <-
data.frame(id=c(1,2,3),
sm=c(0.84,0.96,0.56),
sw=c(0.09,0.09,0.13),
b=1,d=0.1,dd=3/(2*(log(2))),
samples=n_samples,
start_pop=initial_population,
seed=1234)%>%
mutate(area=pmax(sm, sw) * 1000)
all_runs = listenv()
for (i in 1:nrow(params_all)) {
params=params_all[i,]
all_runs[[i]]%<-%
{
start_time = Sys.time()
params$death_kernel_r = 10 * params$sw
params$death_kernel_nodes = 1001
x_grid_death = seq(0,params$death_kernel_r,
length.out = params$death_kernel_nodes)
params$birth_kernel_r = 10 * params$sm
params$birth_kernel_nodes = 1001
x_grid_birth = seq(0,params$birth_kernel_r,
length.out = params$birth_kernel_nodes)
params$area_length_x = params$area
params$init_density = params$start_pop / params$area_length_x
sim_params <-
list("area_length_x"=params$area_length_x,
"cell_count_x"=cell_count_x,
"b"=params$b,
"d"=params$d,
"dd"=params$dd,
"seed"=params$seed,
"init_density"=params$init_density,
"death_kernel_r"=params$death_kernel_r,
"death_kernel_y"=dnorm(x_grid_death, sd = params$sw),
"birth_kernel_r"=params$birth_kernel_r,
"birth_kernel_y"=dnorm(x_grid_birth, sd = params$sm),
"spline_precision" = 1e-9
)
pcf_grid = seq(0,max(c(params$sw,params$sm))*10,length.out = 1001)
sim<-new(poisson_1d,sim_params)
pop<-numeric(n_samples)
time<-numeric(n_samples)
pcf_estimate<-list()
calculated_limit = n_samples
for(j in 1:n_samples){
sim$run_events(sim$total_population)
pop[j]=sim$total_population
time[j]=sim$time
points<-unique.ppp(ppp(sim$get_all_coordinates(),
rep(0,sim$total_population),
c(0,sim$area_length_x),
c(-sim$area_length_x/2,sim$area_length_x/2)))
K_estimate<-Kest(points,r=pcf_grid,correction="Ripley")
pcf_estimate[[j]]=data.frame(Kest=K_estimate$iso/2,x=pcf_grid)%>%
mutate(pfc=(Kest-lag(Kest))/(pcf_grid-lag(pcf_grid))/sim$area_length_x)%>%
pull(pfc)
l2_norm = 0.0
populations = sim$get_cell_populations()
if (j > 1){
for(k in 1:cell_count_x){
#cat(j,' ', k, ' ', populations[k], ' ', populations_old[k], '\n')
l2_norm = l2_norm + (populations[k] - populations_old[k])^2
}
}
populations_old = populations
if (j > 1 && l2_norm < min_norm) {
min_norm = l2_norm
cat('parameter id #', i, ', simulation #', j, '; current norm is ', l2_norm, '\n')
}
if ((j > 1) && (l2_norm < epsilon)) {
calculated_limit = j
break
}
if (Sys.time()-start_time>time_limit){
calculated_limit = j
break
}
}
pcf_est_av<-numeric(length(pcf_grid))
for(j in 1:length(pcf_estimate[[1]])){
jrow=numeric(n_samples)
for (k in 1:calculated_limit){
jrow[k]=pcf_estimate[[k]][j]
}
pcf_est_av[j]=mean(jrow)
}
pcfs<-data.frame(id=i,r=pcf_grid,y=pcf_est_av)
pops<-data.frame(id=i,time=time,pop=pop)
write_csv(pops,paste0(result_dir,"pop/",i,".csv"))
write_csv(pcfs,paste0(result_dir,"pcfs/",i,".csv"))
}%stdout%TRUE
}
all_runs%>%as.list()
write_csv(params_all,paste0(result_dir,"params.csv"))
list.files(paste0(result_dir,"pop/"),full.names = TRUE)%>%
map_dfr(read_csv)%>%
write_csv(paste0(result_dir,"pop.csv"))
list.files(paste0(result_dir,"pcfs/"),full.names = TRUE)%>%
map_dfr(read_csv)%>%
write_csv(paste0(result_dir,"pcfs.csv"))
|
\name{BivariateAssoc}
\alias{BivariateAssoc}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Bivariate association measures for supervised learning tasks.
}
\description{
Computes bivariate association measures between a response and predictor variables (and, optionnaly, between every pairs of predictor variables.)
}
\usage{
BivariateAssoc(Y, X, xx = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Y}{the response variable}
\item{X}{the predictor variables}
\item{xx}{whether the association measures should be computed for couples of predictor variables (default) or not. With a lot of predictors, consider setting xx to FALSE (for reasons of computation time).}
}
\details{
For each pair of variable, a permutation test is computed, following the framework used in conditional inference trees to choose a splitting variable. This test produces a p-value, transformed as -log(1-p) for reasons of comparison stability. The function also computes a "standard" association measure : kenddal's tau correlation for pairs of numeric variables, Cramer's V for pairs of factors and eta-squared for pairs numeric-factor.}
\value{
A list of the following items :
\item{YX}{: a table with the association measures between the response and predictor variables}
\item{XX}{: a table with the association measures between every couples of predictor variables}
In each table :
\item{measure}{: name of the "standard" association measure}
\item{assoc}{: value of the "standard" association measure}
\item{p.value}{: p-value from the permutation test}
\item{criterion}{: p-value from the permutation test transformed as -log(1-p), which serves to sort rows}
}
\references{
Hothorn T, Hornik K, Van De Wiel MA, Zeileis A. "A lego system for conditional inference". \emph{The American Statistician}. 60:257–263, 2006.
Hothorn T, Hornik K, Zeileis A. "Unbiased Recursive Partitioning: A Conditional Inference Framework". \emph{Journal of Computational and Graphical Statistics}, 15(3):651-674, 2006.
}
\author{
Nicolas Robette
}
\note{
see also https://stats.stackexchange.com/questions/171301/interpreting-ctree-partykit-output-in-r
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{ctree}
}
\examples{
data(iris)
iris2 = iris
iris2$Species = factor(iris$Species == "versicolor")
BivariateAssoc(iris2$Species,iris2[,1:4])
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/BivariateAssoc.Rd | no_license | nicolas-robette/moreparty | R | false | false | 2,626 | rd | \name{BivariateAssoc}
\alias{BivariateAssoc}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Bivariate association measures for supervised learning tasks.
}
\description{
Computes bivariate association measures between a response and predictor variables (and, optionnaly, between every pairs of predictor variables.)
}
\usage{
BivariateAssoc(Y, X, xx = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Y}{the response variable}
\item{X}{the predictor variables}
\item{xx}{whether the association measures should be computed for couples of predictor variables (default) or not. With a lot of predictors, consider setting xx to FALSE (for reasons of computation time).}
}
\details{
For each pair of variable, a permutation test is computed, following the framework used in conditional inference trees to choose a splitting variable. This test produces a p-value, transformed as -log(1-p) for reasons of comparison stability. The function also computes a "standard" association measure : kenddal's tau correlation for pairs of numeric variables, Cramer's V for pairs of factors and eta-squared for pairs numeric-factor.}
\value{
A list of the following items :
\item{YX}{: a table with the association measures between the response and predictor variables}
\item{XX}{: a table with the association measures between every couples of predictor variables}
In each table :
\item{measure}{: name of the "standard" association measure}
\item{assoc}{: value of the "standard" association measure}
\item{p.value}{: p-value from the permutation test}
\item{criterion}{: p-value from the permutation test transformed as -log(1-p), which serves to sort rows}
}
\references{
Hothorn T, Hornik K, Van De Wiel MA, Zeileis A. "A lego system for conditional inference". \emph{The American Statistician}. 60:257–263, 2006.
Hothorn T, Hornik K, Zeileis A. "Unbiased Recursive Partitioning: A Conditional Inference Framework". \emph{Journal of Computational and Graphical Statistics}, 15(3):651-674, 2006.
}
\author{
Nicolas Robette
}
\note{
see also https://stats.stackexchange.com/questions/171301/interpreting-ctree-partykit-output-in-r
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{ctree}
}
\examples{
data(iris)
iris2 = iris
iris2$Species = factor(iris$Species == "versicolor")
BivariateAssoc(iris2$Species,iris2[,1:4])
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
library(R2jags)
omega<-0.6
lambda<-1
nocc<-4
n<-250
Z<-rbinom(n,1,omega)
indx<- which(Z==1)
Ntrue<-sum(Z)
nprim<- 4
psi<-c(lambda,
(1-lambda))
S<-matrix(3,n,nprim)
for(i in 1:nprim)
{
S[indx,i]<-apply(rmultinom(
sum(Z),1,psi),
2,which.max)
}
p<-0.3
ch<-array(0,c(nrow(states),nocc,nprim))
for(j in 1:nprim)
{
for(i in 1:nocc)
{
indx<- which(S[,j]==1)
ch[indx,i,j]<-rbinom(
length(indx),
1,p)
}
}
captured1<- apply(ch[,,1],1,max)
acoustic<- sample(which(captured1==1),10,
replace=FALSE)
tagged<-rep(0,length(states))
tagged[which(apply(ch,1,max)==1)]<-1
tagged[acoustic]<-2
reciever<-c(0,1)
ch_acoustic<- matrix(0,length(states),nprim)
ch_acoustic[which(S[,2]==1),2]<-1
# subset only captured fish
captured<- apply(ch,1,max)
indx<-which(captured==1)
chpit<- ch[indx,,]
chac<- ch_acoustic[indx,]
tagged<-tagged[indx]
primary<- function()
{
for(i in 1:M)
{
Z[i]~dbern(omega)
}
for(i in 1:M)
{
for(j in 1:nocc)
{
cap_p[i,j]<-Z[i]*p
chpit[i,j]~dbern(cap_p[i,j])
}
}
## DERIVED PARAMETERS
#N[i]<-equals(S[i],1) + equals(S[i],2)
Nhat<- sum(Z[])
## PRIORS
omega~dunif(0,1)
#lambda~dunif(0,1)
p~dunif(0,1)
}
ndaug<-250
daug<- matrix(0,ndaug,nocc)
chpit<-chpit[,,1]
chpit<-chpit[which(rowSums(chpit)>0),]
chpit<-rbind(chpit,matrix(0,ndaug,nocc))
library(abind)
#chpit<-abind(chpit,
# array(0, c(ndaug, nocc,nprim)),
# along = 1)
chac<-rbind(chac,matrix(0,ndaug,nprim))
S<-matrix(3,nrow(chac),ncol(chac))
## set captured fish as 2
S[which(apply(chpit,c(1),max)==1),]<-2
S[which(apply(chpit,c(1,3),max)+chac>0)]<-1
tagged<-c(tagged,rep(0,ndaug))
dat<-list(
M=nrow(chpit),
chpit=chpit,
#chac=chac,
#tagged=tagged,
#nprim=nprim,
nocc=nocc)
Z<-apply(chpit,1,max)
inits<- function()
{
list(omega=0.5,p=0.1,Z=Z)
}
params<-c("omega","p","Nhat")
out <- jags(data=dat,
inits=inits,
parameters=params,
model.file=primary,
n.chains = 3,
n.iter = 15000,
n.burnin = 6000,
n.thin=2,
working.directory=getwd())
out
Ntrue | /_model-verifications/Jags-Robust-Design-Verification/rd2.R | no_license | mcolvin/paddlefish-RD | R | false | false | 2,229 | r | library(R2jags)
omega<-0.6
lambda<-1
nocc<-4
n<-250
Z<-rbinom(n,1,omega)
indx<- which(Z==1)
Ntrue<-sum(Z)
nprim<- 4
psi<-c(lambda,
(1-lambda))
S<-matrix(3,n,nprim)
for(i in 1:nprim)
{
S[indx,i]<-apply(rmultinom(
sum(Z),1,psi),
2,which.max)
}
p<-0.3
ch<-array(0,c(nrow(states),nocc,nprim))
for(j in 1:nprim)
{
for(i in 1:nocc)
{
indx<- which(S[,j]==1)
ch[indx,i,j]<-rbinom(
length(indx),
1,p)
}
}
captured1<- apply(ch[,,1],1,max)
acoustic<- sample(which(captured1==1),10,
replace=FALSE)
tagged<-rep(0,length(states))
tagged[which(apply(ch,1,max)==1)]<-1
tagged[acoustic]<-2
reciever<-c(0,1)
ch_acoustic<- matrix(0,length(states),nprim)
ch_acoustic[which(S[,2]==1),2]<-1
# subset only captured fish
captured<- apply(ch,1,max)
indx<-which(captured==1)
chpit<- ch[indx,,]
chac<- ch_acoustic[indx,]
tagged<-tagged[indx]
primary<- function()
{
for(i in 1:M)
{
Z[i]~dbern(omega)
}
for(i in 1:M)
{
for(j in 1:nocc)
{
cap_p[i,j]<-Z[i]*p
chpit[i,j]~dbern(cap_p[i,j])
}
}
## DERIVED PARAMETERS
#N[i]<-equals(S[i],1) + equals(S[i],2)
Nhat<- sum(Z[])
## PRIORS
omega~dunif(0,1)
#lambda~dunif(0,1)
p~dunif(0,1)
}
ndaug<-250
daug<- matrix(0,ndaug,nocc)
chpit<-chpit[,,1]
chpit<-chpit[which(rowSums(chpit)>0),]
chpit<-rbind(chpit,matrix(0,ndaug,nocc))
library(abind)
#chpit<-abind(chpit,
# array(0, c(ndaug, nocc,nprim)),
# along = 1)
chac<-rbind(chac,matrix(0,ndaug,nprim))
S<-matrix(3,nrow(chac),ncol(chac))
## set captured fish as 2
S[which(apply(chpit,c(1),max)==1),]<-2
S[which(apply(chpit,c(1,3),max)+chac>0)]<-1
tagged<-c(tagged,rep(0,ndaug))
dat<-list(
M=nrow(chpit),
chpit=chpit,
#chac=chac,
#tagged=tagged,
#nprim=nprim,
nocc=nocc)
Z<-apply(chpit,1,max)
inits<- function()
{
list(omega=0.5,p=0.1,Z=Z)
}
params<-c("omega","p","Nhat")
out <- jags(data=dat,
inits=inits,
parameters=params,
model.file=primary,
n.chains = 3,
n.iter = 15000,
n.burnin = 6000,
n.thin=2,
working.directory=getwd())
out
Ntrue |
###### Mike Petroni 5/20/2021
# This study ranks EGU's based on health costs, EJ considerations, and Carbon Emissions
###
######## Maps
###
library(tmap)
library(sf)
#generator map
#here we do a four panel map with marginal emissions costs
facmap <- st_as_sf(as.data.frame(dat_fac), coords = c("LON", "LAT"))
summary(facmap$marg_pm)
library(spData)
library(tmaptools)
us_states2163 = st_transform(us_states, 2163)
nox <- tm_shape(us_states2163) +
tm_polygons() +
tm_shape(facmap) +
tm_dots(size = .1, shape = 18, col = "marg_nox",
breaks = c(1000, 5000, 10000, 50000, 100000, 500000, 1000000, 5000000),
palette = "YlOrRd",
legend.show = FALSE,
title = "2018 Marginal NOx Health Costs") +
tm_layout(title= 'Nitrogen Oxides',
title.position = c('left', 'bottom'),
title.size = .8)
pm <- tm_shape(us_states2163) +
tm_polygons() +
tm_shape(facmap) +
tm_dots(size = .1, shape = 18, col = "marg_pm",
breaks = c(1000, 5000, 10000, 50000, 100000, 500000, 1000000, 5000000),
palette = "YlOrRd",
legend.show = FALSE,
title = "2018 Marginal NOx Health Costs") +
tm_layout(title= 'Primary Particulate Matter 2.5',
title.position = c('left', 'bottom'),
title.size = .8)
so2 <- tm_shape(us_states2163) +
tm_polygons() +
tm_shape(facmap) +
tm_dots(size = .1, shape = 18, col = "marg_so2",
breaks = c(1000, 5000, 10000, 50000, 100000, 500000, 1000000, 5000000),
palette = "YlOrRd",
legend.show = FALSE,
title = "2018 Marginal NOx Health Costs") +
tm_layout(title= 'Sulphur Dioxide',
title.position = c('left', 'bottom'),
title.size = .8)
legend <- tm_shape(us_states2163) +
tm_borders(col = "#ffffff") + tm_add_legend(type = "fill",
col = c(tmaptools::get_brewer_pal("YlOrRd", n = 7, plot = F)),
labels = c("$1,000 to $5,000", "$5,000 to $10,000",
"$10,000 to $50,000", "$50,000 to $100,000",
"$100,000 to $500,000", "$500,000 to $1,000,000",
"$1,000,000 to $5,000,000"),
title = "Marginal Emissions Cost Per Ton") +
tm_legend(legend.outside = F,
legend.title.size = 3.8,
legend.text.size = 2.3,
legend.bg.color = "white",
legend.bg.alpha = 1,
legend.position = c("left", "top")) +
tm_layout(frame = FALSE,
legend.width = .95)
tmap_arrange(nox, pm, so2, legend)
#Population characteristics map
RColorBrewer::brewer.pal(6, "YlOrRd")
#full on
us_states2163 = st_transform(us_states, 2163)
tm_shape(us_states2163) +
tm_polygons(col = "#989898") +
tm_shape(facmap) +
tm_dots(size = "VUL_POP_50k", shape = 18, alpha = .5,
col = "VUL_POP_50k",
palette = "viridis",
style = "pretty",
title = "Total Vulnerable Population Within 50km",
title.size = "Total Vulnerable Population Within 50km") +
tm_layout(legend.outside = T,
frame = FALSE)
tm_fill(col = "tot_cost_em",
style = "quantile",
title = "Emissions Health Costs 2018",
legend.format=list(fun=function(x) paste0(formatC(x, digits=0, format="f"), "%")))
#EJ map
################## data ########
# we use the generator, ej, and bg pop center point data from the data aqusion file
generators2 <- dat12 %>% filter(STATE_CODE == 'AL') %>%
dplyr::select(LON, LAT, ORISPL, PNAME) %>% distinct()
############ make a map of the buffer analysis
#select facility
sdf1 <- dat_fac %>% filter(PNAME == "James H Miller Jr")
#make facility spatial
sdf <- dat_fac %>% filter(PNAME == "James H Miller Jr")
coordinates(sdf) <- c("LON", "LAT")
#obtain the polygons for block groups in facility state
bama <- block_groups(state = "AL")
#make buffers
mybuff10k <- st_as_sf(buffer(sdf, width = 10000))
mybuff5k <- st_as_sf(buffer(sdf, width = 5000))
mybuff50k <- st_as_sf(buffer(sdf, width = 50000))
#get intersections for the 50k intersection
inter50k <- st_intersection(points, mybuff50k)
st_geometry(inter50k) = NULL
inter50k <- inter50k %>%
mutate(GEOID = paste0(STATEFP,COUNTYFP,TRACTCE,BLKGRPCE))
bama2 <- subset(bama, bama$GEOID %in% inter50k$GEOID)
bama2 <- merge(bama2, EJscreen, by.x = "GEOID", by.y = "ID", all.x = T)
# make buffers outside of SF
mybuff10k <- buffer(sdf, width = 10000)
mybuff5k <- buffer(sdf, width = 5000)
mybuff50k <- buffer(sdf, width = 50000)
#intersect buffer with blockgroup population centerpoints
inter10k <- st_intersection(points, mybuff10k)
inter5k <- st_intersection(points, mybuff5k)
inter50k <- st_intersection(points, mybuff50k)
bama1 <- subset(bama, bama$GEOID %in% inter10k$GEOID)
bama2 <- subset(bama, bama$GEOID %in% inter50k$GEOID)
bama1 <- merge(bama1, EJscreen, by.x = "GEOID", by.y = "ID", all.x = T)
bama2 <- merge(bama2, EJscreen, by.x = "GEOID", by.y = "ID", all.x = T)
###make a sf obj of the facility
sdf2 <- st_as_sf(as.data.frame(sdf1), coords = c("LON", "LAT"))
bama2$VULEOPCT <- bama2$VULEOPCT*100
################# maps ############
#set the border
bbox_new <- st_bbox(bama2) # current bounding box
xrange <- bbox_new$xmax - bbox_new$xmin # range of x values
yrange <- bbox_new$ymax - bbox_new$ymin # range of y values
# bbox_new[1] <- bbox_new[1] - (0.25 * xrange) # xmin - left
bbox_new[3] <- bbox_new[3] + (1 * xrange) # xmax - right
# bbox_new[2] <- bbox_new[2] - (0.25 * yrange) # ymin - bottom
#bbox_new[4] <- bbox_new[4] + (0.2 * yrange) # ymax - top
bbox_new <- bbox_new %>% # take the bounding box ...
st_as_sfc() # ... and make it a sf polygon
map1a <- tm_shape(bama2, bbox = bbox_new) + tm_borders() +
tm_fill(col = "VULEOPCT",
style = "quantile",
title = "EJSCREEN Demographic Index - \nLow Income and Minority Percentage",
legend.format=list(fun=function(x) paste0(formatC(x, digits=0, format="f"), "%"))) +
tm_shape(mybuff50k) +
#tm_fill(col = "#f2f2f2", alpha = .3) +
tm_borders(lwd = 2) +
tm_shape(mybuff10k) +
#tm_fill(col = "#f2f2f2", alpha = .3) +
tm_borders(lwd = 2) +
tm_shape(mybuff5k) +
tm_borders(lwd = 2) +
#tm_fill(col = "#f2f2f2", alpha = .3) +
tm_shape(sdf2) + tm_symbols(size = .5, shape = 18, col = "#000000") +
tm_compass(type = "4star", position = c(0.01, 0.83),
size = 3) +
tm_scale_bar(breaks = c(0, 10),
position = c(0.4, 0.01),
text.size = 1) +
tm_legend(#legend.outside = TRUE,
legend.title.size = 2,
legend.text.size = 1.5,
legend.bg.color = "white",
legend.bg.alpha = 1,
legend.position = c("right", "top"),
main.title= 'Alabama Power Company (Miller Power Plant)',
title.position = c('left', 'top'))
#tm_text("Name", just = "bottom", xmod = 0.5, size = 0.8)
map1a
## NEW YORK!
#state shape
shapeny <- st_read(dsn = "C:/Users/Mike Petroni/Documents/GitHub/NYenviroScreen/data/NYS_Civil_Boundaries_SHP",
layer = "State_Shoreline")
#counties
nycounties <- st_read(dsn = "C:/Users/Mike Petroni/Documents/GitHub/NYenviroScreen/data/NYS_Civil_Boundaries_SHP",
layer = "Counties_Shoreline")
pejas_shp <- readOGR(dsn = "./Data/PEJA_NY_2021", layer = "PEJA")
pejas_shp <- subset(pejas_shp, pejas_shp$PEJA == "Yes")
NY_facs_mp <- st_as_sf(as.data.frame(NY_facs), coords = c("LON", "LAT"))
NY_facs_mp$EJ_benefit_per <- NY_facs_mp$EJ_benefit_per*100
tm_shape(shapeny) +
tm_polygons(col = "#cbcbcb") +
tm_shape(pejas_shp) +
tm_fill(col = "#8a6363") +
tm_shape(NY_facs_mp) +
tm_dots(size = "EJ_benefit", shape = 18, alpha = .9,
scale = 2,
shapes.style = "pretty",
sizes.legend = c(200000, 2000000, 20000000, 200000000),
sizes.legend.labels = c("$200,000", "$2Mil", "$20Mil", "$200Mil"),
shapes.legend.fill = "grey80",
col = "EJ_benefit_per",
palette = "viridis",
style = "pretty",
legend.format=list(fun=function(x) paste0(formatC(x, digits=0, format="f"), " %")),
title = "Share of Total Co-Pollutant Reduction \nBenefit in EJ Communities Within 50Km",
title.size = "Generator Co-Pollutant Reduction \nEJ Benefit Within 50km") +
tm_add_legend(type = "fill",
col = "#8a6363",
labels = c("Potential Environmental Justice Area")) +
tm_layout(legend.outside = T,
frame = FALSE) +
tm_legend(legend.title.size = 2,
legend.text.size = 1.5)
| /Mapping.R | permissive | mdpetron/Super-Health-Polluters | R | false | false | 8,595 | r | ###### Mike Petroni 5/20/2021
# This study ranks EGU's based on health costs, EJ considerations, and Carbon Emissions
###
######## Maps
###
library(tmap)
library(sf)
#generator map
#here we do a four panel map with marginal emissions costs
facmap <- st_as_sf(as.data.frame(dat_fac), coords = c("LON", "LAT"))
summary(facmap$marg_pm)
library(spData)
library(tmaptools)
us_states2163 = st_transform(us_states, 2163)
nox <- tm_shape(us_states2163) +
tm_polygons() +
tm_shape(facmap) +
tm_dots(size = .1, shape = 18, col = "marg_nox",
breaks = c(1000, 5000, 10000, 50000, 100000, 500000, 1000000, 5000000),
palette = "YlOrRd",
legend.show = FALSE,
title = "2018 Marginal NOx Health Costs") +
tm_layout(title= 'Nitrogen Oxides',
title.position = c('left', 'bottom'),
title.size = .8)
pm <- tm_shape(us_states2163) +
tm_polygons() +
tm_shape(facmap) +
tm_dots(size = .1, shape = 18, col = "marg_pm",
breaks = c(1000, 5000, 10000, 50000, 100000, 500000, 1000000, 5000000),
palette = "YlOrRd",
legend.show = FALSE,
title = "2018 Marginal NOx Health Costs") +
tm_layout(title= 'Primary Particulate Matter 2.5',
title.position = c('left', 'bottom'),
title.size = .8)
so2 <- tm_shape(us_states2163) +
tm_polygons() +
tm_shape(facmap) +
tm_dots(size = .1, shape = 18, col = "marg_so2",
breaks = c(1000, 5000, 10000, 50000, 100000, 500000, 1000000, 5000000),
palette = "YlOrRd",
legend.show = FALSE,
title = "2018 Marginal NOx Health Costs") +
tm_layout(title= 'Sulphur Dioxide',
title.position = c('left', 'bottom'),
title.size = .8)
legend <- tm_shape(us_states2163) +
tm_borders(col = "#ffffff") + tm_add_legend(type = "fill",
col = c(tmaptools::get_brewer_pal("YlOrRd", n = 7, plot = F)),
labels = c("$1,000 to $5,000", "$5,000 to $10,000",
"$10,000 to $50,000", "$50,000 to $100,000",
"$100,000 to $500,000", "$500,000 to $1,000,000",
"$1,000,000 to $5,000,000"),
title = "Marginal Emissions Cost Per Ton") +
tm_legend(legend.outside = F,
legend.title.size = 3.8,
legend.text.size = 2.3,
legend.bg.color = "white",
legend.bg.alpha = 1,
legend.position = c("left", "top")) +
tm_layout(frame = FALSE,
legend.width = .95)
tmap_arrange(nox, pm, so2, legend)
#Population characteristics map
RColorBrewer::brewer.pal(6, "YlOrRd")
#full on
us_states2163 = st_transform(us_states, 2163)
tm_shape(us_states2163) +
tm_polygons(col = "#989898") +
tm_shape(facmap) +
tm_dots(size = "VUL_POP_50k", shape = 18, alpha = .5,
col = "VUL_POP_50k",
palette = "viridis",
style = "pretty",
title = "Total Vulnerable Population Within 50km",
title.size = "Total Vulnerable Population Within 50km") +
tm_layout(legend.outside = T,
frame = FALSE)
tm_fill(col = "tot_cost_em",
style = "quantile",
title = "Emissions Health Costs 2018",
legend.format=list(fun=function(x) paste0(formatC(x, digits=0, format="f"), "%")))
#EJ map
################## data ########
# we use the generator, ej, and bg pop center point data from the data aqusion file
generators2 <- dat12 %>% filter(STATE_CODE == 'AL') %>%
dplyr::select(LON, LAT, ORISPL, PNAME) %>% distinct()
############ make a map of the buffer analysis
#select facility
sdf1 <- dat_fac %>% filter(PNAME == "James H Miller Jr")
#make facility spatial
sdf <- dat_fac %>% filter(PNAME == "James H Miller Jr")
coordinates(sdf) <- c("LON", "LAT")
#obtain the polygons for block groups in facility state
bama <- block_groups(state = "AL")
#make buffers
mybuff10k <- st_as_sf(buffer(sdf, width = 10000))
mybuff5k <- st_as_sf(buffer(sdf, width = 5000))
mybuff50k <- st_as_sf(buffer(sdf, width = 50000))
#get intersections for the 50k intersection
inter50k <- st_intersection(points, mybuff50k)
st_geometry(inter50k) = NULL
inter50k <- inter50k %>%
mutate(GEOID = paste0(STATEFP,COUNTYFP,TRACTCE,BLKGRPCE))
bama2 <- subset(bama, bama$GEOID %in% inter50k$GEOID)
bama2 <- merge(bama2, EJscreen, by.x = "GEOID", by.y = "ID", all.x = T)
# make buffers outside of SF
mybuff10k <- buffer(sdf, width = 10000)
mybuff5k <- buffer(sdf, width = 5000)
mybuff50k <- buffer(sdf, width = 50000)
#intersect buffer with blockgroup population centerpoints
inter10k <- st_intersection(points, mybuff10k)
inter5k <- st_intersection(points, mybuff5k)
inter50k <- st_intersection(points, mybuff50k)
bama1 <- subset(bama, bama$GEOID %in% inter10k$GEOID)
bama2 <- subset(bama, bama$GEOID %in% inter50k$GEOID)
bama1 <- merge(bama1, EJscreen, by.x = "GEOID", by.y = "ID", all.x = T)
bama2 <- merge(bama2, EJscreen, by.x = "GEOID", by.y = "ID", all.x = T)
###make a sf obj of the facility
sdf2 <- st_as_sf(as.data.frame(sdf1), coords = c("LON", "LAT"))
bama2$VULEOPCT <- bama2$VULEOPCT*100
################# maps ############
#set the border
bbox_new <- st_bbox(bama2) # current bounding box
xrange <- bbox_new$xmax - bbox_new$xmin # range of x values
yrange <- bbox_new$ymax - bbox_new$ymin # range of y values
# bbox_new[1] <- bbox_new[1] - (0.25 * xrange) # xmin - left
bbox_new[3] <- bbox_new[3] + (1 * xrange) # xmax - right
# bbox_new[2] <- bbox_new[2] - (0.25 * yrange) # ymin - bottom
#bbox_new[4] <- bbox_new[4] + (0.2 * yrange) # ymax - top
bbox_new <- bbox_new %>% # take the bounding box ...
st_as_sfc() # ... and make it a sf polygon
map1a <- tm_shape(bama2, bbox = bbox_new) + tm_borders() +
tm_fill(col = "VULEOPCT",
style = "quantile",
title = "EJSCREEN Demographic Index - \nLow Income and Minority Percentage",
legend.format=list(fun=function(x) paste0(formatC(x, digits=0, format="f"), "%"))) +
tm_shape(mybuff50k) +
#tm_fill(col = "#f2f2f2", alpha = .3) +
tm_borders(lwd = 2) +
tm_shape(mybuff10k) +
#tm_fill(col = "#f2f2f2", alpha = .3) +
tm_borders(lwd = 2) +
tm_shape(mybuff5k) +
tm_borders(lwd = 2) +
#tm_fill(col = "#f2f2f2", alpha = .3) +
tm_shape(sdf2) + tm_symbols(size = .5, shape = 18, col = "#000000") +
tm_compass(type = "4star", position = c(0.01, 0.83),
size = 3) +
tm_scale_bar(breaks = c(0, 10),
position = c(0.4, 0.01),
text.size = 1) +
tm_legend(#legend.outside = TRUE,
legend.title.size = 2,
legend.text.size = 1.5,
legend.bg.color = "white",
legend.bg.alpha = 1,
legend.position = c("right", "top"),
main.title= 'Alabama Power Company (Miller Power Plant)',
title.position = c('left', 'top'))
#tm_text("Name", just = "bottom", xmod = 0.5, size = 0.8)
map1a
## NEW YORK!
#state shape
shapeny <- st_read(dsn = "C:/Users/Mike Petroni/Documents/GitHub/NYenviroScreen/data/NYS_Civil_Boundaries_SHP",
layer = "State_Shoreline")
#counties
nycounties <- st_read(dsn = "C:/Users/Mike Petroni/Documents/GitHub/NYenviroScreen/data/NYS_Civil_Boundaries_SHP",
layer = "Counties_Shoreline")
pejas_shp <- readOGR(dsn = "./Data/PEJA_NY_2021", layer = "PEJA")
pejas_shp <- subset(pejas_shp, pejas_shp$PEJA == "Yes")
NY_facs_mp <- st_as_sf(as.data.frame(NY_facs), coords = c("LON", "LAT"))
NY_facs_mp$EJ_benefit_per <- NY_facs_mp$EJ_benefit_per*100
tm_shape(shapeny) +
tm_polygons(col = "#cbcbcb") +
tm_shape(pejas_shp) +
tm_fill(col = "#8a6363") +
tm_shape(NY_facs_mp) +
tm_dots(size = "EJ_benefit", shape = 18, alpha = .9,
scale = 2,
shapes.style = "pretty",
sizes.legend = c(200000, 2000000, 20000000, 200000000),
sizes.legend.labels = c("$200,000", "$2Mil", "$20Mil", "$200Mil"),
shapes.legend.fill = "grey80",
col = "EJ_benefit_per",
palette = "viridis",
style = "pretty",
legend.format=list(fun=function(x) paste0(formatC(x, digits=0, format="f"), " %")),
title = "Share of Total Co-Pollutant Reduction \nBenefit in EJ Communities Within 50Km",
title.size = "Generator Co-Pollutant Reduction \nEJ Benefit Within 50km") +
tm_add_legend(type = "fill",
col = "#8a6363",
labels = c("Potential Environmental Justice Area")) +
tm_layout(legend.outside = T,
frame = FALSE) +
tm_legend(legend.title.size = 2,
legend.text.size = 1.5)
|
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x=matrix()){
inv <- NULL
##set function
set <- function(y){
x <<-y
inv <<-NULL
}
##get function
get <-function()x
##setInverse function
setInverse <- function(Inverse)inv <<- Inverse
##getInverse function
getInverse <- function()inv
list(set=set,get=get,setInverse=setInverse,getInverse=getInverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above
cacheSolve <- function(x, ...) {
##Inverse of matrix
inv <- x$getInverse()
if(!is.null(inv)) {
message("Cached Data...........")
return(inv)
}
mat <- x$get()
##using solve() function to get inverse of matrix
inv <- solve(mat)
x$setInverse(inv)
print(inv)
}
| /cachematrix.R | no_license | sidhant-152/ProgrammingAssignment2 | R | false | false | 861 | r | ## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x=matrix()){
inv <- NULL
##set function
set <- function(y){
x <<-y
inv <<-NULL
}
##get function
get <-function()x
##setInverse function
setInverse <- function(Inverse)inv <<- Inverse
##getInverse function
getInverse <- function()inv
list(set=set,get=get,setInverse=setInverse,getInverse=getInverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above
cacheSolve <- function(x, ...) {
##Inverse of matrix
inv <- x$getInverse()
if(!is.null(inv)) {
message("Cached Data...........")
return(inv)
}
mat <- x$get()
##using solve() function to get inverse of matrix
inv <- solve(mat)
x$setInverse(inv)
print(inv)
}
|
###########################################################
# Diversity measures 1, 2, 4=5 (depending on option used) #
###########################################################
diff_state_dist = function(x, y, only_alt_bins = T, absolute_number = F, ploidy = 2) {
frac_genome_alt = length(which(x!=y)) / length(x)
if(only_alt_bins) {
bins_diff = length(which(x!=y))
c = cbind(x, y)
bins_ab = length(which(apply(c, 1, function(i) any(i!=ploidy))))
output = bins_diff / bins_ab
} else {output = frac_genome_alt}
if(absolute_number) {output = length(which(x!=y))}
return(output)
}
#########################################################
# Salpie's divergence measure with ploidy normalisation #
#########################################################
calculateDivergence <- function(to_compare, ploidy = 2) {
fab <- as.data.frame(table(colSums(to_compare<ploidy)))
if (length(fab$Freq[fab$Var1==1] > 0)) {
if (fab$Freq[fab$Var1==1] > 0) {
score_loss <- fab$Freq[fab$Var1==1]
} else {
score_loss <- 0
}
} else {
score_loss <- 0
}
fab <- as.data.frame(table(colSums(to_compare>ploidy)))
if (length(fab$Freq[fab$Var1==1] > 0)) {
if (fab$Freq[fab$Var1==1] > 0) {
score_gain <- fab$Freq[fab$Var1==1]
} else {
score_gain <- 0
}
} else {
score_gain <- 0
}
anueploidy <- length(to_compare[to_compare<ploidy|to_compare>ploidy])
loss_anueploidy <- length(to_compare[to_compare<ploidy])
gain_anueploidy <- length(to_compare[to_compare>ploidy])
scores <- c(paste(rownames(head(to_compare))[1],rownames(head(to_compare))[2], sep="_"), score_loss, score_gain, anueploidy, loss_anueploidy, gain_anueploidy)
divergence_score = (score_loss + score_gain) / anueploidy
return(divergence_score)
}
###################################################
# Diversity measures 3 (depending on option used) #
###################################################
genetic_distance = function(x, y, normalise_by_bin_number = T) {
dist = sum(abs(x-y))
if(normalise_by_bin_number) {dist = dist / length(x)}
return(dist)
}
##############################################
# L2RSS - a comparison of log2ratio profiles #
##############################################
log2ratio_comparison = function(segs_col_a, segs_col_b, exp_distance = 1848.691, normalise_to_exp = T, min_purity = 0.2) {
# Calculate contineous copy number
calcCN = function(lrrs, rho, psit, gamma = 1) {
psi = (2*(1 - rho)) + (rho*psit)
n = ((psi*(2^(lrrs/gamma))) - (2 * (1 - rho))) / rho
return(n)
}
# What is our parameter search of purities?
parameter_comparison = rbind(cbind(seq(min_purity, 0.99, by = 0.01), 1),
cbind(1, seq(1, min_purity, by = -0.01)))
# Here we do a search of purity pairs
search = lapply(1:nrow(parameter_comparison), function(r) {
# Selected parameters for iteration
rhoA = parameter_comparison[r,1]
rhoB = parameter_comparison[r,2]
# Continuous copy number calculation
CNa = calcCN(lrrs = segs_col_a, rho = rhoA, psit = 2)
CNb = calcCN(lrrs = segs_col_b, rho = rhoB, psit = 2)
# Sum of squared differences (maybe normalise for number of bins?)
dist = sum((CNa - CNb)^2)
return(dist)
})
# Distance results for parameter comparisons
res = cbind(parameter_comparison, unlist(search))
# Which has the shortest distance
R = which.min(res[,3])
# Get the d
d = res[R,3]
if(normalise_to_exp) {
# Normalise the distance to the cohort (hard coded for now)
d = d / exp_distance # This number is the median dist in non-same patient comparisons
if(d>1) {d = 1} # Cap at 1
}
return(d)
}
#######################################
# Diversity measure 6 helper function #
#######################################
armCN = function(df, pqs, method = c("median", "mean"), l2r_col = 4, report_NA = F) {
method = match.arg(method)
chrs = unique(df$chromosome)
per_chr = lapply(chrs, function(c) {
chrp = df[df$chromosome==c & pqs=="p",l2r_col]
chrq = df[df$chromosome==c & pqs=="q",l2r_col]
if(method == "median") {
p = median(chrp, na.rm = T)
q = median(chrq, na.rm = T)
}
if(method == "mean") {
p = mean(chrp, na.rm = T)
q = mean(chrq, na.rm = T)
}
out = c(p, q)
names(out) = paste0(c,c("p","q"))
return(out)
})
out = unlist(per_chr)
if(!report_NA) {out = out[!is.na(out)]}
return(out)
}
############################################################
# Using biomaRt to calculate number of genes per bin #
# - may be really slow, might be better to download refseq #
# - only compatible with hg38 right now #
############################################################
countGenesPerBin = function(bins, genome = "hg38") {
genome = match.arg(genome)
require("biomaRt")
# Set up BiomaRt
listMarts(host="www.ensembl.org")
ensembl = useMart(biomart = "ENSEMBL_MART_ENSEMBL",dataset="hsapiens_gene_ensembl")
filters = listFilters(ensembl)
coords = paste0(bins$chromosome,":",bins$start,":",bins$end)
coords = as.list(coords)
count_entries = lapply(coords, function(b) {
# Get overlapping genes from biomaRt
results=getBM(attributes = c("chromosome_name", "start_position", "end_position", "hgnc_symbol"),
filters = c("chromosomal_region", "biotype"),
values = list(chromosomal_region=b, biotype="protein_coding"),
mart = ensembl)
nrow(results)
})
count_entries = unlist(count_entries)
out = data.frame(bins[,1:3], gene_number = count_entries)
return(out)
}
####################################################
# Using biomaRt to assign each gene to a bin #
# - only compatible with hg38 right now #
####################################################
getGeneBinIndex = function(bins_locs, genome = "hg38", chrs = 1:22, saveFile="~/Downloads/human_protein_encoding_genes_ensembl.rds") {
genome = match.arg(genome)
require("biomaRt")
if (!file.exists(saveFile)){
print("Querying Biomart for protein coding genes")
ensembl = useMart(biomart = "ENSEMBL_MART_ENSEMBL",dataset="hsapiens_gene_ensembl")
humanProteinCodingGenes = getBM(attributes = c("chromosome_name", "start_position", "end_position", "hgnc_symbol"),
filters = c("biotype"),
values = list(biotype="protein_coding"),
mart = ensembl)
saveRDS(humanProteinCodingGenes,file=saveFile)
} else {
print("Loading genes from savefile")
humanProteinCodingGenes = readRDS(saveFile)
}
# Subset the autosomes
hPCG_autosomes = humanProteinCodingGenes[humanProteinCodingGenes$chromosome_name %in% chrs,]
hPCG_autosomes = hPCG_autosomes[order(hPCG_autosomes$chromosome_name, hPCG_autosomes$start_position),]
rownames(hPCG_autosomes) = NULL
hPCG_autosomes$mid_point = as.numeric(round(((hPCG_autosomes$end_position - hPCG_autosomes$start_position)/2) + hPCG_autosomes$start_position))
# Run through as a list
gene_bin_indexes = lapply(1:nrow(hPCG_autosomes), function(i) {
g = hPCG_autosomes[i,]
chr = g[,"chromosome_name"]
mid = g[,"mid_point"]
which(bins_locs$chr==chr & bins_locs$start<=mid & bins_locs$end>=mid)
})
# Produce the bin indexes
return(unlist(gene_bin_indexes))
}
#######################################
# Breakpoint functions from Salpie :) #
#######################################
convertToBreakpoints = function(cnTable){
y = cnTable
y[y > 0] <- 0
for (column in 1:ncol(cnTable)) {
breakpoints = (which(!!diff(as.numeric(cnTable[,column])))+1) #get indexes
y[c(breakpoints),column] <- 1
}
return(y)
}
calculateRelatednessCn = function(cnTable, pairs, maxgap){
pair_scores <- apply(pairs, 1, function(x){getScoreCN(cnTable, populationBreakpoints, maxgap, as.character(x))})
results <- cbind.data.frame(pairs, pair_scores)
return(results)
}
getScoreCN = function(cnTable, maxgap, pairs){
sample1 <- cnTable[,c(colnames(cnTable) == "Chr" | colnames(cnTable) == "Start" | colnames(cnTable) == "End" | colnames(cnTable) == pairs[1])]
sample2 <- cnTable[,c(colnames(cnTable) == "Chr" | colnames(cnTable) == "Start" | colnames(cnTable) == "End" | colnames(cnTable) == pairs[2])]
row_sample1 = apply(sample1, 1, function(row) all(row !=0 ))
sample1 <- sample1[row_sample1,]
row_sample2 = apply(sample2, 1, function(row) all(row !=0 ))
sample2 <- sample2[row_sample2,]
if (empty(sample1) | empty(sample2)){
score = 0
} else {
#tryCatch creates an empty GRanges object if the list is empty - would error out otherwise
sample1_granges <- makeGRangesFromDataFrame(sample1[,c("Chr", "Start", "End")], start.field = "Start", end.field = "End")
sample2_granges <- makeGRangesFromDataFrame(sample2[,c("Chr", "Start", "End")], start.field = "Start", end.field = "End")
hits_start <- suppressWarnings(queryHits(findOverlaps(sample1_granges, sample2_granges, type = "start", maxgap = maxgap)))
hits_end <- suppressWarnings(queryHits(findOverlaps(sample1_granges, sample2_granges, type = "end", maxgap = maxgap)))
nconcordant_adj <- 2*(length(hits_start)+length(hits_end))
total_breakpoints <- sum(2*length(sample1_granges)+2*length(sample2_granges))
score = (total_breakpoints-nconcordant_adj)/total_breakpoints
}
return(score)
}
###################################
# Average mean length differences #
###################################
get_diffLengths = function(cnTable, pairs, pp = 2, max_size = FALSE) {
# Make a dataframe
a = cnTable[,c(colnames(cnTable) == "Chr" | colnames(cnTable) == pairs[1])]
b = cnTable[,c(colnames(cnTable) == "Chr" | colnames(cnTable) == pairs[2])]
a[,2] = as.numeric(as.character(a[,2]))
b[,2] = as.numeric(as.character(b[,2]))
len = NULL
for (chrs in 1:length(unique(a$Chr))) {
sub_a = subset(a, a$Chr == unique(a$Chr)[chrs])
sub_b = subset(b, b$Chr == unique(a$Chr)[chrs])
pasted = paste0(sub_a[,2], sub_b[,2])
diff_bins = unlist(lapply(strsplit(rle(pasted)$values, split = ""), function(i) all(!duplicated(i))))
lengths = rle(pasted)$lengths[diff_bins]
len = c(len,lengths)
}
# Catch times when there is no difference and record it as zero
if(length(len)==0) {len = 0}
if(max_size) {output = max(len)} else {output = mean(len)}
return(output)
}
#################################################
# Pairwise events comparison, used for rebuttal #
#################################################
getEvents = function(sample_1, sample_2, bin_names, chrs = 1:22) {
# Run through the chromosomes
event_comparison = do.call(rbind, lapply(chrs, function(chr) {
# Grep the chromosomes from QDNAseq formatted bin names
chr_ind = grep(paste0("^",chr,":"), bin_names)
# Collapse call of pair into a string
runs = paste0(sample_1[chr_ind],"_",sample_2[chr_ind])
# Collapse strings into runs
collapsed = rle(runs)
# Calculate if the pair is a change
changed = unlist(lapply(strsplit(as.character(collapsed$value), "_"), function(i) diff(as.numeric(i))!=0))
# Collect the data to output
df = data.frame(chr = chr, len = collapsed$lengths, CNs = collapsed$value, Change = changed)
return(df)
}))
return(event_comparison)
}
| /Diversity_Metrics/DiversityMetrics_backup_GC.R | no_license | BCI-EvoCa/CNA_stability | R | false | false | 11,924 | r | ###########################################################
# Diversity measures 1, 2, 4=5 (depending on option used) #
###########################################################
diff_state_dist = function(x, y, only_alt_bins = T, absolute_number = F, ploidy = 2) {
frac_genome_alt = length(which(x!=y)) / length(x)
if(only_alt_bins) {
bins_diff = length(which(x!=y))
c = cbind(x, y)
bins_ab = length(which(apply(c, 1, function(i) any(i!=ploidy))))
output = bins_diff / bins_ab
} else {output = frac_genome_alt}
if(absolute_number) {output = length(which(x!=y))}
return(output)
}
#########################################################
# Salpie's divergence measure with ploidy normalisation #
#########################################################
calculateDivergence <- function(to_compare, ploidy = 2) {
fab <- as.data.frame(table(colSums(to_compare<ploidy)))
if (length(fab$Freq[fab$Var1==1] > 0)) {
if (fab$Freq[fab$Var1==1] > 0) {
score_loss <- fab$Freq[fab$Var1==1]
} else {
score_loss <- 0
}
} else {
score_loss <- 0
}
fab <- as.data.frame(table(colSums(to_compare>ploidy)))
if (length(fab$Freq[fab$Var1==1] > 0)) {
if (fab$Freq[fab$Var1==1] > 0) {
score_gain <- fab$Freq[fab$Var1==1]
} else {
score_gain <- 0
}
} else {
score_gain <- 0
}
anueploidy <- length(to_compare[to_compare<ploidy|to_compare>ploidy])
loss_anueploidy <- length(to_compare[to_compare<ploidy])
gain_anueploidy <- length(to_compare[to_compare>ploidy])
scores <- c(paste(rownames(head(to_compare))[1],rownames(head(to_compare))[2], sep="_"), score_loss, score_gain, anueploidy, loss_anueploidy, gain_anueploidy)
divergence_score = (score_loss + score_gain) / anueploidy
return(divergence_score)
}
###################################################
# Diversity measures 3 (depending on option used) #
###################################################
genetic_distance = function(x, y, normalise_by_bin_number = T) {
dist = sum(abs(x-y))
if(normalise_by_bin_number) {dist = dist / length(x)}
return(dist)
}
##############################################
# L2RSS - a comparison of log2ratio profiles #
##############################################
log2ratio_comparison = function(segs_col_a, segs_col_b, exp_distance = 1848.691, normalise_to_exp = T, min_purity = 0.2) {
# Calculate contineous copy number
calcCN = function(lrrs, rho, psit, gamma = 1) {
psi = (2*(1 - rho)) + (rho*psit)
n = ((psi*(2^(lrrs/gamma))) - (2 * (1 - rho))) / rho
return(n)
}
# What is our parameter search of purities?
parameter_comparison = rbind(cbind(seq(min_purity, 0.99, by = 0.01), 1),
cbind(1, seq(1, min_purity, by = -0.01)))
# Here we do a search of purity pairs
search = lapply(1:nrow(parameter_comparison), function(r) {
# Selected parameters for iteration
rhoA = parameter_comparison[r,1]
rhoB = parameter_comparison[r,2]
# Continuous copy number calculation
CNa = calcCN(lrrs = segs_col_a, rho = rhoA, psit = 2)
CNb = calcCN(lrrs = segs_col_b, rho = rhoB, psit = 2)
# Sum of squared differences (maybe normalise for number of bins?)
dist = sum((CNa - CNb)^2)
return(dist)
})
# Distance results for parameter comparisons
res = cbind(parameter_comparison, unlist(search))
# Which has the shortest distance
R = which.min(res[,3])
# Get the d
d = res[R,3]
if(normalise_to_exp) {
# Normalise the distance to the cohort (hard coded for now)
d = d / exp_distance # This number is the median dist in non-same patient comparisons
if(d>1) {d = 1} # Cap at 1
}
return(d)
}
#######################################
# Diversity measure 6 helper function #
#######################################
armCN = function(df, pqs, method = c("median", "mean"), l2r_col = 4, report_NA = F) {
method = match.arg(method)
chrs = unique(df$chromosome)
per_chr = lapply(chrs, function(c) {
chrp = df[df$chromosome==c & pqs=="p",l2r_col]
chrq = df[df$chromosome==c & pqs=="q",l2r_col]
if(method == "median") {
p = median(chrp, na.rm = T)
q = median(chrq, na.rm = T)
}
if(method == "mean") {
p = mean(chrp, na.rm = T)
q = mean(chrq, na.rm = T)
}
out = c(p, q)
names(out) = paste0(c,c("p","q"))
return(out)
})
out = unlist(per_chr)
if(!report_NA) {out = out[!is.na(out)]}
return(out)
}
############################################################
# Using biomaRt to calculate number of genes per bin #
# - may be really slow, might be better to download refseq #
# - only compatible with hg38 right now #
############################################################
countGenesPerBin = function(bins, genome = "hg38") {
genome = match.arg(genome)
require("biomaRt")
# Set up BiomaRt
listMarts(host="www.ensembl.org")
ensembl = useMart(biomart = "ENSEMBL_MART_ENSEMBL",dataset="hsapiens_gene_ensembl")
filters = listFilters(ensembl)
coords = paste0(bins$chromosome,":",bins$start,":",bins$end)
coords = as.list(coords)
count_entries = lapply(coords, function(b) {
# Get overlapping genes from biomaRt
results=getBM(attributes = c("chromosome_name", "start_position", "end_position", "hgnc_symbol"),
filters = c("chromosomal_region", "biotype"),
values = list(chromosomal_region=b, biotype="protein_coding"),
mart = ensembl)
nrow(results)
})
count_entries = unlist(count_entries)
out = data.frame(bins[,1:3], gene_number = count_entries)
return(out)
}
####################################################
# Using biomaRt to assign each gene to a bin #
# - only compatible with hg38 right now #
####################################################
getGeneBinIndex = function(bins_locs, genome = "hg38", chrs = 1:22, saveFile="~/Downloads/human_protein_encoding_genes_ensembl.rds") {
genome = match.arg(genome)
require("biomaRt")
if (!file.exists(saveFile)){
print("Querying Biomart for protein coding genes")
ensembl = useMart(biomart = "ENSEMBL_MART_ENSEMBL",dataset="hsapiens_gene_ensembl")
humanProteinCodingGenes = getBM(attributes = c("chromosome_name", "start_position", "end_position", "hgnc_symbol"),
filters = c("biotype"),
values = list(biotype="protein_coding"),
mart = ensembl)
saveRDS(humanProteinCodingGenes,file=saveFile)
} else {
print("Loading genes from savefile")
humanProteinCodingGenes = readRDS(saveFile)
}
# Subset the autosomes
hPCG_autosomes = humanProteinCodingGenes[humanProteinCodingGenes$chromosome_name %in% chrs,]
hPCG_autosomes = hPCG_autosomes[order(hPCG_autosomes$chromosome_name, hPCG_autosomes$start_position),]
rownames(hPCG_autosomes) = NULL
hPCG_autosomes$mid_point = as.numeric(round(((hPCG_autosomes$end_position - hPCG_autosomes$start_position)/2) + hPCG_autosomes$start_position))
# Run through as a list
gene_bin_indexes = lapply(1:nrow(hPCG_autosomes), function(i) {
g = hPCG_autosomes[i,]
chr = g[,"chromosome_name"]
mid = g[,"mid_point"]
which(bins_locs$chr==chr & bins_locs$start<=mid & bins_locs$end>=mid)
})
# Produce the bin indexes
return(unlist(gene_bin_indexes))
}
#######################################
# Breakpoint functions from Salpie :) #
#######################################
convertToBreakpoints = function(cnTable){
y = cnTable
y[y > 0] <- 0
for (column in 1:ncol(cnTable)) {
breakpoints = (which(!!diff(as.numeric(cnTable[,column])))+1) #get indexes
y[c(breakpoints),column] <- 1
}
return(y)
}
calculateRelatednessCn = function(cnTable, pairs, maxgap){
pair_scores <- apply(pairs, 1, function(x){getScoreCN(cnTable, populationBreakpoints, maxgap, as.character(x))})
results <- cbind.data.frame(pairs, pair_scores)
return(results)
}
getScoreCN = function(cnTable, maxgap, pairs){
sample1 <- cnTable[,c(colnames(cnTable) == "Chr" | colnames(cnTable) == "Start" | colnames(cnTable) == "End" | colnames(cnTable) == pairs[1])]
sample2 <- cnTable[,c(colnames(cnTable) == "Chr" | colnames(cnTable) == "Start" | colnames(cnTable) == "End" | colnames(cnTable) == pairs[2])]
row_sample1 = apply(sample1, 1, function(row) all(row !=0 ))
sample1 <- sample1[row_sample1,]
row_sample2 = apply(sample2, 1, function(row) all(row !=0 ))
sample2 <- sample2[row_sample2,]
if (empty(sample1) | empty(sample2)){
score = 0
} else {
#tryCatch creates an empty GRanges object if the list is empty - would error out otherwise
sample1_granges <- makeGRangesFromDataFrame(sample1[,c("Chr", "Start", "End")], start.field = "Start", end.field = "End")
sample2_granges <- makeGRangesFromDataFrame(sample2[,c("Chr", "Start", "End")], start.field = "Start", end.field = "End")
hits_start <- suppressWarnings(queryHits(findOverlaps(sample1_granges, sample2_granges, type = "start", maxgap = maxgap)))
hits_end <- suppressWarnings(queryHits(findOverlaps(sample1_granges, sample2_granges, type = "end", maxgap = maxgap)))
nconcordant_adj <- 2*(length(hits_start)+length(hits_end))
total_breakpoints <- sum(2*length(sample1_granges)+2*length(sample2_granges))
score = (total_breakpoints-nconcordant_adj)/total_breakpoints
}
return(score)
}
###################################
# Average mean length differences #
###################################
get_diffLengths = function(cnTable, pairs, pp = 2, max_size = FALSE) {
# Make a dataframe
a = cnTable[,c(colnames(cnTable) == "Chr" | colnames(cnTable) == pairs[1])]
b = cnTable[,c(colnames(cnTable) == "Chr" | colnames(cnTable) == pairs[2])]
a[,2] = as.numeric(as.character(a[,2]))
b[,2] = as.numeric(as.character(b[,2]))
len = NULL
for (chrs in 1:length(unique(a$Chr))) {
sub_a = subset(a, a$Chr == unique(a$Chr)[chrs])
sub_b = subset(b, b$Chr == unique(a$Chr)[chrs])
pasted = paste0(sub_a[,2], sub_b[,2])
diff_bins = unlist(lapply(strsplit(rle(pasted)$values, split = ""), function(i) all(!duplicated(i))))
lengths = rle(pasted)$lengths[diff_bins]
len = c(len,lengths)
}
# Catch times when there is no difference and record it as zero
if(length(len)==0) {len = 0}
if(max_size) {output = max(len)} else {output = mean(len)}
return(output)
}
#################################################
# Pairwise events comparison, used for rebuttal #
#################################################
getEvents = function(sample_1, sample_2, bin_names, chrs = 1:22) {
# Run through the chromosomes
event_comparison = do.call(rbind, lapply(chrs, function(chr) {
# Grep the chromosomes from QDNAseq formatted bin names
chr_ind = grep(paste0("^",chr,":"), bin_names)
# Collapse call of pair into a string
runs = paste0(sample_1[chr_ind],"_",sample_2[chr_ind])
# Collapse strings into runs
collapsed = rle(runs)
# Calculate if the pair is a change
changed = unlist(lapply(strsplit(as.character(collapsed$value), "_"), function(i) diff(as.numeric(i))!=0))
# Collect the data to output
df = data.frame(chr = chr, len = collapsed$lengths, CNs = collapsed$value, Change = changed)
return(df)
}))
return(event_comparison)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{calendar_plot}
\alias{calendar_plot}
\title{Create calendar plot}
\usage{
calendar_plot(df, type = "continuous", labels = NULL,
legend_name = "Exposure")
}
\arguments{
\item{df}{Data frame with one column named \code{date} for date with entries in the format
"yyyy-mm-dd" and one column for the daily values of the variable to plot.}
\item{type}{Character string specifying whether the exposure is continuous or
discrete}
\item{labels}{Vector of character strings naming the levels of a discrete
variable to be used in the figure legend.}
\item{legend_name}{Character string specifying the title to be used in the figure
legend.}
}
\description{
Creates a calendar plot of a time series of continuous or discrete data. The time series
data frame input to this function must have only two columns, one for the date and one with
the values to plot.
}
\details{
The output of this function is a \code{ggplot} object, so you can customize
this output object as with any \code{ggplot} object.
}
\examples{
testdat <- sim_exposure(n = 1000, central = 0.1,
exposure_type = "binary")
testdat$x[c(89,101,367,500,502,598,678,700,895)] <- 3
calendar_plot(testdat, type = "discrete", labels = c("no", "yes", "maybe"))
}
| /man/calendar_plot.Rd | no_license | sakoehler7/eesim | R | false | true | 1,320 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{calendar_plot}
\alias{calendar_plot}
\title{Create calendar plot}
\usage{
calendar_plot(df, type = "continuous", labels = NULL,
legend_name = "Exposure")
}
\arguments{
\item{df}{Data frame with one column named \code{date} for date with entries in the format
"yyyy-mm-dd" and one column for the daily values of the variable to plot.}
\item{type}{Character string specifying whether the exposure is continuous or
discrete}
\item{labels}{Vector of character strings naming the levels of a discrete
variable to be used in the figure legend.}
\item{legend_name}{Character string specifying the title to be used in the figure
legend.}
}
\description{
Creates a calendar plot of a time series of continuous or discrete data. The time series
data frame input to this function must have only two columns, one for the date and one with
the values to plot.
}
\details{
The output of this function is a \code{ggplot} object, so you can customize
this output object as with any \code{ggplot} object.
}
\examples{
testdat <- sim_exposure(n = 1000, central = 0.1,
exposure_type = "binary")
testdat$x[c(89,101,367,500,502,598,678,700,895)] <- 3
calendar_plot(testdat, type = "discrete", labels = c("no", "yes", "maybe"))
}
|
##' Survey bootstrap variance estimators
##'
##' \code{surveybootstrap} has methods for analyzing data that were collected
##' using network reporting techniques. It includes estimators appropriate for
##' the simple boostrap and the rescaled bootstrap.
##'
##' @docType package
##' @name surveybootstrap
##' @aliases surveybootstrap package-surveybootstrap
##' @import dplyr
##' @importFrom stats rmultinom setNames terms update update.formula xtabs
NULL
##' @useDynLib surveybootstrap
##' @importFrom Rcpp sourceCpp
NULL
##' MU284 population
##'
##' Data used in unit tests for variance estimation.
##' See TODO-Sarndal TODO-sampling package
##' TODO-doc describing unit tests
##'
##' @name MU284
##' @docType data
NULL
| /R/surveybootstrap_package.r | no_license | Huneel7/surveybootstrap | R | false | false | 725 | r | ##' Survey bootstrap variance estimators
##'
##' \code{surveybootstrap} has methods for analyzing data that were collected
##' using network reporting techniques. It includes estimators appropriate for
##' the simple boostrap and the rescaled bootstrap.
##'
##' @docType package
##' @name surveybootstrap
##' @aliases surveybootstrap package-surveybootstrap
##' @import dplyr
##' @importFrom stats rmultinom setNames terms update update.formula xtabs
NULL
##' @useDynLib surveybootstrap
##' @importFrom Rcpp sourceCpp
NULL
##' MU284 population
##'
##' Data used in unit tests for variance estimation.
##' See TODO-Sarndal TODO-sampling package
##' TODO-doc describing unit tests
##'
##' @name MU284
##' @docType data
NULL
|
######################################
#
# Make the two plots for the intercept and
# two slope terms.
#
# Written by M. Fidino
#
######################################
library(dplyr)
library(lubridate)
library(runjags)
library(parallel)
library(coda)
library(knitr)
model <- "./JAGS/multi_scale_occupancy_simpler_RE.R"
min_dets <- 19
include_inxs <- FALSE
# Whether or not you want to check out the gelman rubin diagnostic
# of parameters
check_grdiag <- FALSE
source("./R/prep_data_for_model.R")
########################################################
# Setting stuff up for the two plots
########################################################
# get some values we are going to call multiple times
ncity <- data_list$ncity
nspecies <- data_list$nspecies
# read in the results
m1 <- readRDS("./results/final_model_fit.RDS")
# convert the jags output to a matrix
mc <- as.matrix(
as.mcmc.list(m1),
chains = TRUE
)[,-1]
# Get just the results of whether a species was available
# in the city
mx <- mc[,grep("x", colnames(mc))]
# and then remove that from the rest of the matrix
mc <- mc[,-grep("x", colnames(mc))]
# pull the median and 95% quantiles
msum <- apply(mc, 2, quantile, probs = c(0.025,0.5,0.975))
# transpose so it's long on rows, not columns
msum <- t(msum)
# get the global averages and global standard deviations
species_average_occ <- msum[
grep(
"b_within\\[",
row.names(msum)
),
]
tau_within <- msum[
grep(
"tau_within",
row.names(msum)
),
]
sd_within <- sqrt(1 / tau_within)
# get the species specific estimates. We need the mcmc
# draws for this.
bspci <- mc[
,
grep(
"b_species_city",
colnames(mc)
)
]
# replicate the mx mat by the number of parameters
# in this case there are 3 (intercept, income, URB)
mx3 <- mx[
,
rep(
1:ncol(mx),
each = 3
)
]
# For the city specific estimates
species_city_est <- matrix(
NA,
ncol = 3,
nrow = ncol(bspci)
)
row.names(species_city_est) <- colnames(bspci)
for(i in 1:ncol(bspci)){
if(median(mx3[,i]) == 0){
next
}
tmpmc <- bspci[mx3[,i] == 1,i]
species_city_est[i,] <- quantile(tmpmc, probs = c(0.025,0.5,0.975))
}
########################################################
# For the average occupancy plot
########################################################
# get the among city averages
species_mu <- msum[
grep(
"^b_species\\[1",
row.names(msum)
),
]
# give them some names
row.names(species_mu) <- species_map$Species
# get the species specific standard deviations
# among cities
tau_species <- msum[
grep(
"tau_species\\[1"
, row.names(msum)
),
]
sd_species <- sqrt(1 / tau_species)
row.names(sd_species) <- species_map$Species
# a matrix for the 95% predictive interval
species_95pi <- matrix(
NA,
ncol = 2,
nrow = nspecies
)
# fill it in
for(i in 1:nspecies){
species_95pi[i,] <- plogis(
qnorm(
c(0.025,0.975),
species_mu[i,2],
sd_species[i,2]
)
)
}
# convert species_mu to probability (for intercept only)
species_mu <- plogis(species_mu)
# get the ordering, from lowest to highest
species_order <- order(
species_mu[,2],
decreasing = FALSE
)
# sort the mu and 95% PI
species_mu <- species_mu[species_order,]
species_95pi <- species_95pi[species_order,]
species_city <- species_city_est[
grep(
"b_species_city\\[1",
row.names(species_city_est)
),
2
]
# split species city by species
species_city <- matrix(
species_city,
ncol = ncity,
nrow = nspecies
)
species_city <- plogis(species_city)
species_city <- species_city[species_order,]
# get 95% CI as well
species_city_quantiles <- species_city_est[
grep(
"b_species_city\\[1",
row.names(species_city_est)
),
]
# split species city by species
species_city_quantiles <- array(
species_city_quantiles,
dim = c(nspecies, ncity, 3),
dimnames = list(species_map$Species, city_map$City, c("lo", "med", "hi"))
)
species_city_quantiles <- plogis(species_city_quantiles)
condition <- c("raccoon", "phaz")
species_city_quantiles[
species_map$Species == condition[1],
city_map$City == condition[2],
]
# And the figure!
tiff(
"./plots/among_model/figure_4.tiff",
height = 7,
width = 6,
units = "in",
res = 800,
compression = "lzw"
)
par(mar=c(3.5,10,0,2))
par(xpd = NA)
# y_min modifies the vertical placement of the legend in this
# plot. I used this to play around with the appropriate placement
y_min <- 16.5
{plot(1~1, type = "n", xlim = c(0,1), ylim = c(0.5,nspecies+0.5),
bty = "l", xlab = "",
ylab = "", yaxt = "n",
xaxt = "n",
yaxs = "i",xaxs = "i")
set.seed(90210)
rect(xleft = plogis(species_average_occ[1,1]),
xright = plogis(species_average_occ[1,3]),
ybottom = 0.5,
ytop = nspecies+0.5,
col = "gray80",
border = "gray80")
lines(x = rep(plogis(species_average_occ[1,2]),2),
y = c(0.5, nspecies+0.5),
lwd = 2,
lty = 3,
col = "gray30")
axis(1, at= seq(0,1, 0.2), labels = F, tck = -0.025, lwd = 2)
axis(1, at= seq(0,1, 0.2/2), labels = F, tck = -0.0125, lwd = 2)
axis(2, at = seq(0.5, nspecies+0.5),
labels = F, tck = 0, lwd = 2)
axis(2, at = seq(1, nspecies),
labels = F, tck = -0.0125, lwd = 2)
tp <- gsub("_"," ", row.names(species_mu))
tp <- gsub("north american ", "", tp)
tp <- gsub("virginia", "Virginia", tp, ignore.case = FALSE)
tp <- gsub("white ", "white-", tp, ignore.case = FALSE)
mtext(
sprintf("%.1f", seq(0,1, 0.2)),
1,
at = seq(0,1,0.2),
line = 0.75,
cex = 1.3
)
mtext(tp,
2,
0.5, at = seq(1, nspecies),
cex = 1.4, las = 1)
mtext("Occupancy", 1, 2.2, cex = 1.6)
for(i in 1:nspecies){
# plot predictive interval first
lines(
x = species_95pi[i,],
y = rep(i,2),
col =scales::alpha("gray20", 0.75),
lwd = 2
)
# followed by a gray box to act as a base for the
# 95% CI so we don't see the 95% PI underneath it
rect(
species_mu[i,1],
i - 0.085,
species_mu[i,3],
i + 0.085,
col = "gray80",
border = "gray80"
)
# Then the 95% CI
rect(
species_mu[i,1],
i - 0.085,
species_mu[i,3],
i + 0.085,
col = scales::alpha("gray20", 0.75),
border = scales::alpha("gray20", 1)
)
# And now the city-specific estimates for each species
# This is the reason we set the seed at the beginning.
# We are jittering the y-axis a TINY bit.
points(
species_city[i,],
y = jitter(rep(i, ncity), 0.4),
pch = 21, bg = scales::alpha("white", 0.3),
col = scales::alpha("black", 0.6),
cex = 1
)
}
# Add among-city average occupancy for each species
points(
x = species_mu[,2],
y = 1:nspecies,
cex = 1.5,
pch=21,
bg = "#00AADE"
)
# Add the legend, and to do so allow for plotting outside
# into the plot margin. We've got two different legends
# here so we can split up the species-specific and community
# level stuff.
par(xpd = NA)
legend(
x = 0.45,
y = 26.5-y_min,
legend = c(
"E(among city)",
"E(sampled city)",
"95% among city CI",
"95% unsampled city PI"
),
lty = c(NA, NA, NA, 1),
lwd = c(NA, NA, NA, 2),
pch = c(21, 21, 15, NA),
pt.cex = c(1.5, 1.3, 1.3, NA),
col = c(
"black", "black",
scales::alpha("gray20", 0.75),
scales::alpha("gray20", 0.75)
),
pt.bg = c(
"#00AADE",
"white",
scales::alpha("gray20", 0.75),
NA
),
cex = 1,
# box.col = "white",
bty = "n",
title = "Species specific",
title.col = "white"
)
# And the community stuff
legend(
x = 0.45,
y = 21.5-y_min,
legend = c(
"E(among species)",
"95% among species CI"
),
lty = c(3, NA),
lwd = c(2, NA),
pch = c(NA, 15),
pt.cex = c(NA, 1.3),
col = c("gray30", "gray80"),
cex = 1,
# box.col = "white",
bty = "n",
title = "Community",
title.col = "white"
)
# Doing my own titles, plus an underline.
lines(
x = c(0.58, 0.95),
y = rep(25.25-y_min,2)
)
text(
x = 0.77,
y = 26.5-y_min,
labels = "Species specific",
pos = 1,
cex = 1.1
)
lines(
x = c(0.58, 0.95),
y = rep(20.2-y_min,2)
)
text(
x = 0.71,
y = 21.4-y_min,
labels = "Community",
pos = 1,
cex = 1.1
)
}
dev.off()
# Now do the second figure, which is going to be a two-panel
# get the among city averages
income_mu <- msum[
grep(
"^b_species\\[2",
row.names(msum)
),
]
# give them some names
row.names(income_mu) <- species_map$Species
urb_mu <- msum[
grep(
"^b_species\\[3",
row.names(msum)
),
]
# give them some names
row.names(urb_mu) <- species_map$Species
# get the species specific standard deviations
# among cities
tau_income <- msum[
grep(
"tau_species\\[2"
, row.names(msum)
),
]
sd_income <- sqrt(1 / tau_income)
row.names(sd_income) <- species_map$Species
tau_urb <- msum[
grep(
"tau_species\\[3"
, row.names(msum)
),
]
sd_urb <- sqrt(1 / tau_urb)
row.names(sd_urb) <- species_map$Species
# a matrix for the 95% predictive interval
income_95pi <- matrix(
NA,
ncol = 2,
nrow = nspecies
)
# fill it in
for(i in 1:nspecies){
income_95pi[i,] <- qnorm(
c(0.025,0.975),
income_mu[i,2],
sd_income[i,2]
)
}
# a matrix for the 95% predictive interval
urb_95pi <- matrix(
NA,
ncol = 2,
nrow = nspecies
)
# fill it in
for(i in 1:nspecies){
urb_95pi[i,] <- qnorm(
c(0.025,0.975),
urb_mu[i,2],
sd_urb[i,2]
)
}
# get the ordering, from lowest to highest for
# urbanization
species_order <- order(
urb_mu[,2],
decreasing = TRUE
)
# sort the mu and 95% PI
income_mu <- income_mu[species_order,]
income_95pi <- income_95pi[species_order,]
urb_mu <- urb_mu[species_order,]
urb_95pi <- urb_95pi[species_order,]
income_city <- species_city_est[
grep(
"b_species_city\\[2",
row.names(species_city_est)
),
2
]
urb_city <- species_city_est[
grep(
"b_species_city\\[3",
row.names(species_city_est)
),
2
]
# split species city by species
income_city <- matrix(
income_city,
ncol = ncity,
nrow = nspecies
)
income_city <- income_city[species_order,]
urb_city <- matrix(
urb_city,
ncol = ncity,
nrow = nspecies
)
urb_city <- urb_city[species_order,]
income_city_quantiles <- species_city_est[
grep(
"b_species_city\\[2",
row.names(species_city_est)
),
]
# split species city by species
income_city_quantiles <- array(
income_city_quantiles,
dim = c(nspecies, ncity, 3),
dimnames = list(species_map$Species, city_map$City, c("lo", "med", "hi"))
)
hm <- which(income_city_quantiles[,,3] < 0, arr.ind = TRUE)
hm[order(hm[,1]),]
hm <- which(income_city_quantiles[,,1] > 0, arr.ind = TRUE)
hm[order(hm[,1]),]
income_city_quantiles[,,3]
condition <- c("cottontail_sp", "mawi")
round(income_city_quantiles[
species_map$Species == condition[1],
c(1,3,6,8,10),
],2)
table(row.names(hm))
condition <- c("c", "phaz")
urb_city_quantiles[
species_map$Species == condition[1],,
#city_map$City == condition[2],
]
urb_city_quantiles <- species_city_est[
grep(
"b_species_city\\[3",
row.names(species_city_est)
),
]
# split species city by species
urb_city_quantiles <- array(
urb_city_quantiles,
dim = c(nspecies, ncity, 3),
dimnames = list(species_map$Species, city_map$City, c("lo", "med", "hi"))
)
condition <- c("cougar", "phaz")
urb_city_quantiles[
species_map$Species == condition[1],,
#city_map$City == condition[2],
]
dim(which(urb_city_quantiles[,,1] > 0, arr.ind = TRUE))
dim(which(urb_city_quantiles[,,3] < 0, arr.ind = TRUE))
tiff(
"./plots/among_model/figure_5.tiff",
height = 7,
width = 8,
units = "in",
res = 800,
compression = "lzw"
)
layout(matrix(c(1,2), ncol = 2, nrow = 1))
par(mar=c(3.5,2,6,2))
par(xpd = NA)
plot(1~1, type = "n", xlim = c(-2,2), ylim = c(0.5,nspecies+0.5),
bty = "n", xlab = "",
ylab = "", yaxt = "n",
xaxt = "n",
yaxs = "i",xaxs = "i")
set.seed(90210)
rect(xleft = species_average_occ[2,1],
xright = species_average_occ[2,3],
ybottom = 0.5,
ytop = nspecies+0.5,
col = "gray80",
border = "gray80")
lines(x = rep(species_average_occ[2,2],2),
y = c(0.5, nspecies+0.5),
lwd = 2,
lty = 3,
col = "gray30")
axis(1, at= seq(-2,2, 0.5), labels = F, tck = -0.025, lwd = 2)
axis(1, at= seq(-2,2, 0.5/2), labels = F, tck = -0.0125, lwd = 2)
# axis(4, at = seq(0.5, nspecies+0.5),
# labels = F, tck = 0, lwd = 2)
# axis(4, at = seq(1, nspecies),
# labels = F, tck = -0.0125, lwd = 2)
#
# tp <- gsub("_"," ", row.names(income_mu))
#
# tp <- gsub("north american ", "", tp)
mtext(
sprintf("%.1f", seq(-2,2, 1)),
1,
at = seq(-2,2,1),
line = 0.75,
cex = 1.3
)
#
# mtext(tp,
# 4,
# 0.5, at = seq(1, nspecies),
# cex = 1.4, las = 1)
mtext("Response to income", 1, 2.2, cex = 1.6)
for(i in 1:nspecies){
# plot predictive interval first
lines(
x = income_95pi[i,],
y = rep(i,2),
col =scales::alpha("gray20", 0.75),
lwd = 2
)
# followed by a gray box to act as a base for the
# 95% CI so we don't see the 95% PI underneath it
rect(
income_mu[i,1],
i - 0.085,
income_mu[i,3],
i + 0.085,
col = "gray80",
border = "gray80"
)
# Then the 95% CI
rect(
income_mu[i,1],
i - 0.085,
income_mu[i,3],
i + 0.085,
col = scales::alpha("gray20", 0.75),
border = scales::alpha("gray20", 1)
)
# And now the city-specific estimates for each species
# This is the reason we set the seed at the beginning.
# We are jittering the y-axis a TINY bit.
points(
income_city[i,],
y = jitter(rep(i, ncity), 0.4),
pch = 21, bg = scales::alpha("white", 0.3),
col = scales::alpha("black", 0.6),
cex = 1
)
}
# Add among-city average occupancy for each species
points(
x = income_mu[,2],
y = 1:nspecies,
cex = 1.5,
pch=21,
bg = "#00AADE"
)
lines(x = c(0,0), y = c(0.5, nspecies+0.5), lty = 2, lwd = 2)
############################################
# Urbanization part of the plot
#
# tiff(
# "./plots/among_model/figure_3.tiff",
# height = 7,
# width = 6,
# units = "in",
# res = 800,
# compression = "lzw"
# )
#windows(6,7)
par(mar=c(3.5,2,6,2))
par(xpd = NA)
plot(1~1, type = "n", xlim = c(-2,2), ylim = c(0.5,nspecies+0.5),
bty = "n", xlab = "",
ylab = "", yaxt = "n",
xaxt = "n",
yaxs = "i",xaxs = "i")
set.seed(90210)
rect(xleft = species_average_occ[3,1],
xright = species_average_occ[3,3],
ybottom = 0.5,
ytop = nspecies+0.5,
col = "gray80",
border = "gray80")
lines(x = rep(species_average_occ[3,2],2),
y = c(0.5, nspecies+0.5),
lwd = 2,
lty = 3,
col = "gray30")
axis(1, at= seq(-2,2, 0.5), labels = F, tck = -0.025, lwd = 2)
axis(1, at= seq(-2,2, 0.5/2), labels = F, tck = -0.0125, lwd = 2)
# axis(2, at = seq(0.5, nspecies+0.5),
# labels = F, tck = 0, lwd = 2)
# axis(2, at = seq(1, nspecies),
# labels = F, tck = -0.0125, lwd = 2)
tp <- gsub("_"," ", row.names(urb_mu))
tp <- gsub("north american ", "", tp)
tp <- gsub("virginia", "Virginia", tp, ignore.case = FALSE)
tp <- gsub("white ", "white-", tp, ignore.case = FALSE)
mtext(
sprintf("%.1f", seq(-2,2, 1)),
1,
at = seq(-2,2,1),
line = 0.75,
cex = 1.3
)
for(i in 1:nspecies){
if(tp[i] == "white-tailed deer"){
text(x = -2.7, y = i+0.05, tp[i], adj = 0.5, cex = 1.3 )
}else{
text(x = -2.5, y = i+0.05, tp[i], adj = 0.5, cex = 1.3 )
}
}
# mtext(tp,
# 2,
# 0.5, at = seq(1, nspecies),
# cex = 1.4, las = 1)
mtext("Response to urbanization", 1, 2.2, cex = 1.6)
for(i in 1:nspecies){
# plot predictive interval first
lines(
x = urb_95pi[i,],
y = rep(i,2),
col =scales::alpha("gray20", 0.75),
lwd = 2
)
# followed by a gray box to act as a base for the
# 95% CI so we don't see the 95% PI underneath it
rect(
urb_mu[i,1],
i - 0.085,
urb_mu[i,3],
i + 0.085,
col = "gray80",
border = "gray80"
)
# Then the 95% CI
rect(
urb_mu[i,1],
i - 0.085,
urb_mu[i,3],
i + 0.085,
col = scales::alpha("gray20", 0.75),
border = scales::alpha("gray20", 1)
)
# And now the city-specific estimates for each species
# This is the reason we set the seed at the beginning.
# We are jittering the y-axis a TINY bit.
points(
urb_city[i,],
y = jitter(rep(i, ncity), 0.4),
pch = 21, bg = scales::alpha("white", 0.3),
col = scales::alpha("black", 0.6),
cex = 1
)
}
# Add among-city average occupancy for each species
points(
x = urb_mu[,2],
y = 1:nspecies,
cex = 1.5,
pch=21,
bg = "#00AADE"
)
lines(x = c(0,0), y = c(0.5, nspecies+0.5), lty = 2, lwd = 2)
# Add the legend, and to do so allow for plotting outside
# into the plot margin. We've got two different legends
# here so we can split up the species-specific and community
# level stuff.
par(xpd = NA)
legend(
x = -5.4,
y = 33,
legend = c(
"E(among city)",
"E(sampled city)",
"95% among city CI",
"95% unsampled city PI"
),
lty = c(NA, NA, NA, 1),
lwd = c(NA, NA, NA, 2),
pch = c(21, 21, 15, NA),
pt.cex = c(1.5, 1.3, 1.3, NA),
col = c(
"black", "black",
scales::alpha("gray20", 0.75),
scales::alpha("gray20", 0.75)
),
pt.bg = c(
"#00AADE",
"white",
scales::alpha("gray20", 0.75),
NA
),
cex = 1,
# box.col = "white",
bty = "n",
title = "Species specific",
title.col = "white"
)
# And the community stuff
legend(
x = -2.5,
y = 33,
legend = c(
"E(among species)",
"95% among species CI"
),
lty = c(3, NA),
lwd = c(2, NA),
pch = c(NA, 15),
pt.cex = c(NA, 1.3),
col = c("gray30", "gray80"),
cex = 1,
# box.col = "white",
bty = "n",
title = "Community",
title.col = "white"
)
# Doing my own titles, plus an underline.
lines(
x = c(-4.8, -3.2),
y = rep(31.45,2)
)
text(
x = -4,
y = 33,
labels = "Species specific",
pos = 1,
cex = 1.1
)
lines(
x = c(-1.85, -0.75),
y = rep(31.45,2)
)
text(
x = -1.325,
y = 33,
labels = "Community",
pos = 1,
cex = 1.1
)
dev.off()
| /R/plotting_scripts/intercept_slope_plot.R | no_license | mfidino/uwin-luxury-effect | R | false | false | 18,144 | r | ######################################
#
# Make the two plots for the intercept and
# two slope terms.
#
# Written by M. Fidino
#
######################################
library(dplyr)
library(lubridate)
library(runjags)
library(parallel)
library(coda)
library(knitr)
model <- "./JAGS/multi_scale_occupancy_simpler_RE.R"
min_dets <- 19
include_inxs <- FALSE
# Whether or not you want to check out the gelman rubin diagnostic
# of parameters
check_grdiag <- FALSE
source("./R/prep_data_for_model.R")
########################################################
# Setting stuff up for the two plots
########################################################
# get some values we are going to call multiple times
ncity <- data_list$ncity
nspecies <- data_list$nspecies
# read in the results
m1 <- readRDS("./results/final_model_fit.RDS")
# convert the jags output to a matrix
mc <- as.matrix(
as.mcmc.list(m1),
chains = TRUE
)[,-1]
# Get just the results of whether a species was available
# in the city
mx <- mc[,grep("x", colnames(mc))]
# and then remove that from the rest of the matrix
mc <- mc[,-grep("x", colnames(mc))]
# pull the median and 95% quantiles
msum <- apply(mc, 2, quantile, probs = c(0.025,0.5,0.975))
# transpose so it's long on rows, not columns
msum <- t(msum)
# get the global averages and global standard deviations
species_average_occ <- msum[
grep(
"b_within\\[",
row.names(msum)
),
]
tau_within <- msum[
grep(
"tau_within",
row.names(msum)
),
]
sd_within <- sqrt(1 / tau_within)
# get the species specific estimates. We need the mcmc
# draws for this.
bspci <- mc[
,
grep(
"b_species_city",
colnames(mc)
)
]
# replicate the mx mat by the number of parameters
# in this case there are 3 (intercept, income, URB)
mx3 <- mx[
,
rep(
1:ncol(mx),
each = 3
)
]
# For the city specific estimates
species_city_est <- matrix(
NA,
ncol = 3,
nrow = ncol(bspci)
)
row.names(species_city_est) <- colnames(bspci)
for(i in 1:ncol(bspci)){
if(median(mx3[,i]) == 0){
next
}
tmpmc <- bspci[mx3[,i] == 1,i]
species_city_est[i,] <- quantile(tmpmc, probs = c(0.025,0.5,0.975))
}
########################################################
# For the average occupancy plot
########################################################
# get the among city averages
species_mu <- msum[
grep(
"^b_species\\[1",
row.names(msum)
),
]
# give them some names
row.names(species_mu) <- species_map$Species
# get the species specific standard deviations
# among cities
tau_species <- msum[
grep(
"tau_species\\[1"
, row.names(msum)
),
]
sd_species <- sqrt(1 / tau_species)
row.names(sd_species) <- species_map$Species
# a matrix for the 95% predictive interval
species_95pi <- matrix(
NA,
ncol = 2,
nrow = nspecies
)
# fill it in
for(i in 1:nspecies){
species_95pi[i,] <- plogis(
qnorm(
c(0.025,0.975),
species_mu[i,2],
sd_species[i,2]
)
)
}
# convert species_mu to probability (for intercept only)
species_mu <- plogis(species_mu)
# get the ordering, from lowest to highest
species_order <- order(
species_mu[,2],
decreasing = FALSE
)
# sort the mu and 95% PI
species_mu <- species_mu[species_order,]
species_95pi <- species_95pi[species_order,]
species_city <- species_city_est[
grep(
"b_species_city\\[1",
row.names(species_city_est)
),
2
]
# split species city by species
species_city <- matrix(
species_city,
ncol = ncity,
nrow = nspecies
)
species_city <- plogis(species_city)
species_city <- species_city[species_order,]
# get 95% CI as well
species_city_quantiles <- species_city_est[
grep(
"b_species_city\\[1",
row.names(species_city_est)
),
]
# split species city by species
species_city_quantiles <- array(
species_city_quantiles,
dim = c(nspecies, ncity, 3),
dimnames = list(species_map$Species, city_map$City, c("lo", "med", "hi"))
)
species_city_quantiles <- plogis(species_city_quantiles)
condition <- c("raccoon", "phaz")
species_city_quantiles[
species_map$Species == condition[1],
city_map$City == condition[2],
]
# And the figure!
tiff(
"./plots/among_model/figure_4.tiff",
height = 7,
width = 6,
units = "in",
res = 800,
compression = "lzw"
)
par(mar=c(3.5,10,0,2))
par(xpd = NA)
# y_min modifies the vertical placement of the legend in this
# plot. I used this to play around with the appropriate placement
y_min <- 16.5
{plot(1~1, type = "n", xlim = c(0,1), ylim = c(0.5,nspecies+0.5),
bty = "l", xlab = "",
ylab = "", yaxt = "n",
xaxt = "n",
yaxs = "i",xaxs = "i")
set.seed(90210)
rect(xleft = plogis(species_average_occ[1,1]),
xright = plogis(species_average_occ[1,3]),
ybottom = 0.5,
ytop = nspecies+0.5,
col = "gray80",
border = "gray80")
lines(x = rep(plogis(species_average_occ[1,2]),2),
y = c(0.5, nspecies+0.5),
lwd = 2,
lty = 3,
col = "gray30")
axis(1, at= seq(0,1, 0.2), labels = F, tck = -0.025, lwd = 2)
axis(1, at= seq(0,1, 0.2/2), labels = F, tck = -0.0125, lwd = 2)
axis(2, at = seq(0.5, nspecies+0.5),
labels = F, tck = 0, lwd = 2)
axis(2, at = seq(1, nspecies),
labels = F, tck = -0.0125, lwd = 2)
tp <- gsub("_"," ", row.names(species_mu))
tp <- gsub("north american ", "", tp)
tp <- gsub("virginia", "Virginia", tp, ignore.case = FALSE)
tp <- gsub("white ", "white-", tp, ignore.case = FALSE)
mtext(
sprintf("%.1f", seq(0,1, 0.2)),
1,
at = seq(0,1,0.2),
line = 0.75,
cex = 1.3
)
mtext(tp,
2,
0.5, at = seq(1, nspecies),
cex = 1.4, las = 1)
mtext("Occupancy", 1, 2.2, cex = 1.6)
for(i in 1:nspecies){
# plot predictive interval first
lines(
x = species_95pi[i,],
y = rep(i,2),
col =scales::alpha("gray20", 0.75),
lwd = 2
)
# followed by a gray box to act as a base for the
# 95% CI so we don't see the 95% PI underneath it
rect(
species_mu[i,1],
i - 0.085,
species_mu[i,3],
i + 0.085,
col = "gray80",
border = "gray80"
)
# Then the 95% CI
rect(
species_mu[i,1],
i - 0.085,
species_mu[i,3],
i + 0.085,
col = scales::alpha("gray20", 0.75),
border = scales::alpha("gray20", 1)
)
# And now the city-specific estimates for each species
# This is the reason we set the seed at the beginning.
# We are jittering the y-axis a TINY bit.
points(
species_city[i,],
y = jitter(rep(i, ncity), 0.4),
pch = 21, bg = scales::alpha("white", 0.3),
col = scales::alpha("black", 0.6),
cex = 1
)
}
# Add among-city average occupancy for each species
points(
x = species_mu[,2],
y = 1:nspecies,
cex = 1.5,
pch=21,
bg = "#00AADE"
)
# Add the legend, and to do so allow for plotting outside
# into the plot margin. We've got two different legends
# here so we can split up the species-specific and community
# level stuff.
par(xpd = NA)
legend(
x = 0.45,
y = 26.5-y_min,
legend = c(
"E(among city)",
"E(sampled city)",
"95% among city CI",
"95% unsampled city PI"
),
lty = c(NA, NA, NA, 1),
lwd = c(NA, NA, NA, 2),
pch = c(21, 21, 15, NA),
pt.cex = c(1.5, 1.3, 1.3, NA),
col = c(
"black", "black",
scales::alpha("gray20", 0.75),
scales::alpha("gray20", 0.75)
),
pt.bg = c(
"#00AADE",
"white",
scales::alpha("gray20", 0.75),
NA
),
cex = 1,
# box.col = "white",
bty = "n",
title = "Species specific",
title.col = "white"
)
# And the community stuff
legend(
x = 0.45,
y = 21.5-y_min,
legend = c(
"E(among species)",
"95% among species CI"
),
lty = c(3, NA),
lwd = c(2, NA),
pch = c(NA, 15),
pt.cex = c(NA, 1.3),
col = c("gray30", "gray80"),
cex = 1,
# box.col = "white",
bty = "n",
title = "Community",
title.col = "white"
)
# Doing my own titles, plus an underline.
lines(
x = c(0.58, 0.95),
y = rep(25.25-y_min,2)
)
text(
x = 0.77,
y = 26.5-y_min,
labels = "Species specific",
pos = 1,
cex = 1.1
)
lines(
x = c(0.58, 0.95),
y = rep(20.2-y_min,2)
)
text(
x = 0.71,
y = 21.4-y_min,
labels = "Community",
pos = 1,
cex = 1.1
)
}
dev.off()
# Now do the second figure, which is going to be a two-panel
# get the among city averages
income_mu <- msum[
grep(
"^b_species\\[2",
row.names(msum)
),
]
# give them some names
row.names(income_mu) <- species_map$Species
urb_mu <- msum[
grep(
"^b_species\\[3",
row.names(msum)
),
]
# give them some names
row.names(urb_mu) <- species_map$Species
# get the species specific standard deviations
# among cities
tau_income <- msum[
grep(
"tau_species\\[2"
, row.names(msum)
),
]
sd_income <- sqrt(1 / tau_income)
row.names(sd_income) <- species_map$Species
tau_urb <- msum[
grep(
"tau_species\\[3"
, row.names(msum)
),
]
sd_urb <- sqrt(1 / tau_urb)
row.names(sd_urb) <- species_map$Species
# a matrix for the 95% predictive interval
income_95pi <- matrix(
NA,
ncol = 2,
nrow = nspecies
)
# fill it in
for(i in 1:nspecies){
income_95pi[i,] <- qnorm(
c(0.025,0.975),
income_mu[i,2],
sd_income[i,2]
)
}
# a matrix for the 95% predictive interval
urb_95pi <- matrix(
NA,
ncol = 2,
nrow = nspecies
)
# fill it in
for(i in 1:nspecies){
urb_95pi[i,] <- qnorm(
c(0.025,0.975),
urb_mu[i,2],
sd_urb[i,2]
)
}
# get the ordering, from lowest to highest for
# urbanization
species_order <- order(
urb_mu[,2],
decreasing = TRUE
)
# sort the mu and 95% PI
income_mu <- income_mu[species_order,]
income_95pi <- income_95pi[species_order,]
urb_mu <- urb_mu[species_order,]
urb_95pi <- urb_95pi[species_order,]
income_city <- species_city_est[
grep(
"b_species_city\\[2",
row.names(species_city_est)
),
2
]
urb_city <- species_city_est[
grep(
"b_species_city\\[3",
row.names(species_city_est)
),
2
]
# split species city by species
income_city <- matrix(
income_city,
ncol = ncity,
nrow = nspecies
)
income_city <- income_city[species_order,]
urb_city <- matrix(
urb_city,
ncol = ncity,
nrow = nspecies
)
urb_city <- urb_city[species_order,]
income_city_quantiles <- species_city_est[
grep(
"b_species_city\\[2",
row.names(species_city_est)
),
]
# split species city by species
income_city_quantiles <- array(
income_city_quantiles,
dim = c(nspecies, ncity, 3),
dimnames = list(species_map$Species, city_map$City, c("lo", "med", "hi"))
)
hm <- which(income_city_quantiles[,,3] < 0, arr.ind = TRUE)
hm[order(hm[,1]),]
hm <- which(income_city_quantiles[,,1] > 0, arr.ind = TRUE)
hm[order(hm[,1]),]
income_city_quantiles[,,3]
condition <- c("cottontail_sp", "mawi")
round(income_city_quantiles[
species_map$Species == condition[1],
c(1,3,6,8,10),
],2)
table(row.names(hm))
condition <- c("c", "phaz")
urb_city_quantiles[
species_map$Species == condition[1],,
#city_map$City == condition[2],
]
urb_city_quantiles <- species_city_est[
grep(
"b_species_city\\[3",
row.names(species_city_est)
),
]
# split species city by species
urb_city_quantiles <- array(
urb_city_quantiles,
dim = c(nspecies, ncity, 3),
dimnames = list(species_map$Species, city_map$City, c("lo", "med", "hi"))
)
condition <- c("cougar", "phaz")
urb_city_quantiles[
species_map$Species == condition[1],,
#city_map$City == condition[2],
]
dim(which(urb_city_quantiles[,,1] > 0, arr.ind = TRUE))
dim(which(urb_city_quantiles[,,3] < 0, arr.ind = TRUE))
tiff(
"./plots/among_model/figure_5.tiff",
height = 7,
width = 8,
units = "in",
res = 800,
compression = "lzw"
)
layout(matrix(c(1,2), ncol = 2, nrow = 1))
par(mar=c(3.5,2,6,2))
par(xpd = NA)
plot(1~1, type = "n", xlim = c(-2,2), ylim = c(0.5,nspecies+0.5),
bty = "n", xlab = "",
ylab = "", yaxt = "n",
xaxt = "n",
yaxs = "i",xaxs = "i")
set.seed(90210)
rect(xleft = species_average_occ[2,1],
xright = species_average_occ[2,3],
ybottom = 0.5,
ytop = nspecies+0.5,
col = "gray80",
border = "gray80")
lines(x = rep(species_average_occ[2,2],2),
y = c(0.5, nspecies+0.5),
lwd = 2,
lty = 3,
col = "gray30")
axis(1, at= seq(-2,2, 0.5), labels = F, tck = -0.025, lwd = 2)
axis(1, at= seq(-2,2, 0.5/2), labels = F, tck = -0.0125, lwd = 2)
# axis(4, at = seq(0.5, nspecies+0.5),
# labels = F, tck = 0, lwd = 2)
# axis(4, at = seq(1, nspecies),
# labels = F, tck = -0.0125, lwd = 2)
#
# tp <- gsub("_"," ", row.names(income_mu))
#
# tp <- gsub("north american ", "", tp)
mtext(
sprintf("%.1f", seq(-2,2, 1)),
1,
at = seq(-2,2,1),
line = 0.75,
cex = 1.3
)
#
# mtext(tp,
# 4,
# 0.5, at = seq(1, nspecies),
# cex = 1.4, las = 1)
mtext("Response to income", 1, 2.2, cex = 1.6)
for(i in 1:nspecies){
# plot predictive interval first
lines(
x = income_95pi[i,],
y = rep(i,2),
col =scales::alpha("gray20", 0.75),
lwd = 2
)
# followed by a gray box to act as a base for the
# 95% CI so we don't see the 95% PI underneath it
rect(
income_mu[i,1],
i - 0.085,
income_mu[i,3],
i + 0.085,
col = "gray80",
border = "gray80"
)
# Then the 95% CI
rect(
income_mu[i,1],
i - 0.085,
income_mu[i,3],
i + 0.085,
col = scales::alpha("gray20", 0.75),
border = scales::alpha("gray20", 1)
)
# And now the city-specific estimates for each species
# This is the reason we set the seed at the beginning.
# We are jittering the y-axis a TINY bit.
points(
income_city[i,],
y = jitter(rep(i, ncity), 0.4),
pch = 21, bg = scales::alpha("white", 0.3),
col = scales::alpha("black", 0.6),
cex = 1
)
}
# Add among-city average occupancy for each species
points(
x = income_mu[,2],
y = 1:nspecies,
cex = 1.5,
pch=21,
bg = "#00AADE"
)
lines(x = c(0,0), y = c(0.5, nspecies+0.5), lty = 2, lwd = 2)
############################################
# Urbanization part of the plot
#
# tiff(
# "./plots/among_model/figure_3.tiff",
# height = 7,
# width = 6,
# units = "in",
# res = 800,
# compression = "lzw"
# )
#windows(6,7)
par(mar=c(3.5,2,6,2))
par(xpd = NA)
plot(1~1, type = "n", xlim = c(-2,2), ylim = c(0.5,nspecies+0.5),
bty = "n", xlab = "",
ylab = "", yaxt = "n",
xaxt = "n",
yaxs = "i",xaxs = "i")
set.seed(90210)
rect(xleft = species_average_occ[3,1],
xright = species_average_occ[3,3],
ybottom = 0.5,
ytop = nspecies+0.5,
col = "gray80",
border = "gray80")
lines(x = rep(species_average_occ[3,2],2),
y = c(0.5, nspecies+0.5),
lwd = 2,
lty = 3,
col = "gray30")
axis(1, at= seq(-2,2, 0.5), labels = F, tck = -0.025, lwd = 2)
axis(1, at= seq(-2,2, 0.5/2), labels = F, tck = -0.0125, lwd = 2)
# axis(2, at = seq(0.5, nspecies+0.5),
# labels = F, tck = 0, lwd = 2)
# axis(2, at = seq(1, nspecies),
# labels = F, tck = -0.0125, lwd = 2)
tp <- gsub("_"," ", row.names(urb_mu))
tp <- gsub("north american ", "", tp)
tp <- gsub("virginia", "Virginia", tp, ignore.case = FALSE)
tp <- gsub("white ", "white-", tp, ignore.case = FALSE)
mtext(
sprintf("%.1f", seq(-2,2, 1)),
1,
at = seq(-2,2,1),
line = 0.75,
cex = 1.3
)
for(i in 1:nspecies){
if(tp[i] == "white-tailed deer"){
text(x = -2.7, y = i+0.05, tp[i], adj = 0.5, cex = 1.3 )
}else{
text(x = -2.5, y = i+0.05, tp[i], adj = 0.5, cex = 1.3 )
}
}
# mtext(tp,
# 2,
# 0.5, at = seq(1, nspecies),
# cex = 1.4, las = 1)
mtext("Response to urbanization", 1, 2.2, cex = 1.6)
for(i in 1:nspecies){
# plot predictive interval first
lines(
x = urb_95pi[i,],
y = rep(i,2),
col =scales::alpha("gray20", 0.75),
lwd = 2
)
# followed by a gray box to act as a base for the
# 95% CI so we don't see the 95% PI underneath it
rect(
urb_mu[i,1],
i - 0.085,
urb_mu[i,3],
i + 0.085,
col = "gray80",
border = "gray80"
)
# Then the 95% CI
rect(
urb_mu[i,1],
i - 0.085,
urb_mu[i,3],
i + 0.085,
col = scales::alpha("gray20", 0.75),
border = scales::alpha("gray20", 1)
)
# And now the city-specific estimates for each species
# This is the reason we set the seed at the beginning.
# We are jittering the y-axis a TINY bit.
points(
urb_city[i,],
y = jitter(rep(i, ncity), 0.4),
pch = 21, bg = scales::alpha("white", 0.3),
col = scales::alpha("black", 0.6),
cex = 1
)
}
# Add among-city average occupancy for each species
points(
x = urb_mu[,2],
y = 1:nspecies,
cex = 1.5,
pch=21,
bg = "#00AADE"
)
lines(x = c(0,0), y = c(0.5, nspecies+0.5), lty = 2, lwd = 2)
# Add the legend, and to do so allow for plotting outside
# into the plot margin. We've got two different legends
# here so we can split up the species-specific and community
# level stuff.
par(xpd = NA)
legend(
x = -5.4,
y = 33,
legend = c(
"E(among city)",
"E(sampled city)",
"95% among city CI",
"95% unsampled city PI"
),
lty = c(NA, NA, NA, 1),
lwd = c(NA, NA, NA, 2),
pch = c(21, 21, 15, NA),
pt.cex = c(1.5, 1.3, 1.3, NA),
col = c(
"black", "black",
scales::alpha("gray20", 0.75),
scales::alpha("gray20", 0.75)
),
pt.bg = c(
"#00AADE",
"white",
scales::alpha("gray20", 0.75),
NA
),
cex = 1,
# box.col = "white",
bty = "n",
title = "Species specific",
title.col = "white"
)
# And the community stuff
legend(
x = -2.5,
y = 33,
legend = c(
"E(among species)",
"95% among species CI"
),
lty = c(3, NA),
lwd = c(2, NA),
pch = c(NA, 15),
pt.cex = c(NA, 1.3),
col = c("gray30", "gray80"),
cex = 1,
# box.col = "white",
bty = "n",
title = "Community",
title.col = "white"
)
# Doing my own titles, plus an underline.
lines(
x = c(-4.8, -3.2),
y = rep(31.45,2)
)
text(
x = -4,
y = 33,
labels = "Species specific",
pos = 1,
cex = 1.1
)
lines(
x = c(-1.85, -0.75),
y = rep(31.45,2)
)
text(
x = -1.325,
y = 33,
labels = "Community",
pos = 1,
cex = 1.1
)
dev.off()
|
library(shiny)
library(bs4Dash)
shinyApp(
ui = dashboardPage(
dashboardHeader(),
dashboardSidebar(),
dashboardBody(
radioButtons("controller", "Controller", choices = c(1, 2)),
br(),
accordion(
id = "accordion1",
accordionItem(
title = "Accordion 1 Item 1",
status = "danger",
collapsed = TRUE,
"This is some text!"
),
accordionItem(
title = "Accordion 1 Item 2",
status = "warning",
collapsed = FALSE,
"This is some text!"
)
),
accordion(
id = "accordion2",
accordionItem(
title = "Accordion 2 Item 1",
status = "info",
collapsed = TRUE,
"This is some text!"
),
accordionItem(
title = "Accordion 2 Item 2",
status = "success",
collapsed = FALSE,
"This is some text!"
)
)
),
title = "Accordion"
),
server = function(input, output, session) {
observeEvent(input$controller, {
updateAccordion(id = "accordion1", selected = input$controller)
})
observe(print(input$accordion1))
observeEvent(input$accordion1, {
toast(sprintf("You selected accordion N° %s", input$accordion1))
})
}
) | /shiny/bs4Dash/bs4Dash_accordion.R | no_license | piyayut-ch/tutorial | R | false | false | 1,327 | r | library(shiny)
library(bs4Dash)
shinyApp(
ui = dashboardPage(
dashboardHeader(),
dashboardSidebar(),
dashboardBody(
radioButtons("controller", "Controller", choices = c(1, 2)),
br(),
accordion(
id = "accordion1",
accordionItem(
title = "Accordion 1 Item 1",
status = "danger",
collapsed = TRUE,
"This is some text!"
),
accordionItem(
title = "Accordion 1 Item 2",
status = "warning",
collapsed = FALSE,
"This is some text!"
)
),
accordion(
id = "accordion2",
accordionItem(
title = "Accordion 2 Item 1",
status = "info",
collapsed = TRUE,
"This is some text!"
),
accordionItem(
title = "Accordion 2 Item 2",
status = "success",
collapsed = FALSE,
"This is some text!"
)
)
),
title = "Accordion"
),
server = function(input, output, session) {
observeEvent(input$controller, {
updateAccordion(id = "accordion1", selected = input$controller)
})
observe(print(input$accordion1))
observeEvent(input$accordion1, {
toast(sprintf("You selected accordion N° %s", input$accordion1))
})
}
) |
rm(list=ls())
setwd('C:\\Users\\zhu2\\Documents\\network_final')
# load("~/getpathway/gene39761.rda.RData")
# Y.exp <- expinpath[-3]
rm(list=ls()[-grep('Y.exp',ls())])
load('pathnet.rda'); load('rlt_expnetwork.rda')
#inputdata
setwd('C:\\Users\\zhu2\\Documents\\signaling\\codes\\')
source('sparse_2sem_final.R')
source('local_cnif_macro.R')
source('CNIF.R')
sourceCpp("score_function_regression.cpp")
sourceCpp("simple_cycle.cpp")
sourceCpp("initial_sem.cpp")
setwd('C:\\Users\\zhu2\\Documents\\getpathway')
library(flare)
library(grplasso)
library(data.table)
library(dplyr)
library(igraph)
# rlt <- rlt[sapply(rlt,is.list)]
#Batch
test <- function(i){
print(i)
print('setup')
pathi <- names(rlt)[i]
x.path <- colnames(pathnet$pathnet)[pathnet$pathnet[grep(pathi,rownames(pathnet$pathnet)),,drop=F]>=.1]
print(x.path)
Y <- as.matrix(rlt[[i]]$data)
Y.fixed <- rlt[[i]]$adj$eq_matrix
Xs <- lapply(match(x.path,names(rlt)),function(i){
X <- rlt[[i]]$data
as.matrix(X)
})
j <- 0
print('modeling')
temps <- lapply(Xs,function(x){
print(paste(j<<-j+1,x.path[j]))
temp <- try(sparse_2sem(Y=Y,Y.fixed=Y.fixed,X=x,lambda=0.2)$eq_matrix)
temp
})
return(temps)
}
rlt <- lapply(1:length(rlt),test)
#Processing Result
| /projects/pathbased_genenet.R | no_license | wenrurumon/directed_network | R | false | false | 1,271 | r |
rm(list=ls())
setwd('C:\\Users\\zhu2\\Documents\\network_final')
# load("~/getpathway/gene39761.rda.RData")
# Y.exp <- expinpath[-3]
rm(list=ls()[-grep('Y.exp',ls())])
load('pathnet.rda'); load('rlt_expnetwork.rda')
#inputdata
setwd('C:\\Users\\zhu2\\Documents\\signaling\\codes\\')
source('sparse_2sem_final.R')
source('local_cnif_macro.R')
source('CNIF.R')
sourceCpp("score_function_regression.cpp")
sourceCpp("simple_cycle.cpp")
sourceCpp("initial_sem.cpp")
setwd('C:\\Users\\zhu2\\Documents\\getpathway')
library(flare)
library(grplasso)
library(data.table)
library(dplyr)
library(igraph)
# rlt <- rlt[sapply(rlt,is.list)]
#Batch
test <- function(i){
print(i)
print('setup')
pathi <- names(rlt)[i]
x.path <- colnames(pathnet$pathnet)[pathnet$pathnet[grep(pathi,rownames(pathnet$pathnet)),,drop=F]>=.1]
print(x.path)
Y <- as.matrix(rlt[[i]]$data)
Y.fixed <- rlt[[i]]$adj$eq_matrix
Xs <- lapply(match(x.path,names(rlt)),function(i){
X <- rlt[[i]]$data
as.matrix(X)
})
j <- 0
print('modeling')
temps <- lapply(Xs,function(x){
print(paste(j<<-j+1,x.path[j]))
temp <- try(sparse_2sem(Y=Y,Y.fixed=Y.fixed,X=x,lambda=0.2)$eq_matrix)
temp
})
return(temps)
}
rlt <- lapply(1:length(rlt),test)
#Processing Result
|
brupt <- read.csv("F:\\Statistics\\Cases\\Qualitative_Bankruptcy\\Qualitative_Bankruptcy.data.txt")
library(caret)
set.seed(333)
intrain <- createDataPartition(y=brupt$Class,p=0.7,list = FALSE)
training <- brupt[intrain, ]
validation <- brupt[-intrain,]
library(e1071)
classifier <- naiveBayes(training[,1:6], training[,7])
PredY <- predict(classifier, newdata=validation[,-7],
type="class")
PredYProb <- predict(classifier, newdata=validation[,-7],type="raw")
tbl <- table(PredY, validation[,7],dnn=list('predicted','actual'))
confusionMatrix(tbl)
# OR
confusionMatrix(PredY, validation[,7],dnn=list('predicted','actual'))
postResample(PredY, validation[,7]) # For factor variables
library(pROC)
plot.roc(validation[,7],PredYProb[,1],
legacy.axes=TRUE,print.auc=TRUE )
#### Predicting
tp <- read.csv("F:\\Statistics\\Cases\\Qualitative_Bankruptcy\\ToPredict.csv")
predBR <- predict(classifier, newdata = tp,type = "class")
predicted <- data.frame(tp,predBR)
| /Naive Bayes/Q_Bankrupt.R | no_license | nileshsbartakke/machine_learning | R | false | false | 1,042 | r | brupt <- read.csv("F:\\Statistics\\Cases\\Qualitative_Bankruptcy\\Qualitative_Bankruptcy.data.txt")
library(caret)
set.seed(333)
intrain <- createDataPartition(y=brupt$Class,p=0.7,list = FALSE)
training <- brupt[intrain, ]
validation <- brupt[-intrain,]
library(e1071)
classifier <- naiveBayes(training[,1:6], training[,7])
PredY <- predict(classifier, newdata=validation[,-7],
type="class")
PredYProb <- predict(classifier, newdata=validation[,-7],type="raw")
tbl <- table(PredY, validation[,7],dnn=list('predicted','actual'))
confusionMatrix(tbl)
# OR
confusionMatrix(PredY, validation[,7],dnn=list('predicted','actual'))
postResample(PredY, validation[,7]) # For factor variables
library(pROC)
plot.roc(validation[,7],PredYProb[,1],
legacy.axes=TRUE,print.auc=TRUE )
#### Predicting
tp <- read.csv("F:\\Statistics\\Cases\\Qualitative_Bankruptcy\\ToPredict.csv")
predBR <- predict(classifier, newdata = tp,type = "class")
predicted <- data.frame(tp,predBR)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oXim-main.R
\name{getOxyrange}
\alias{getOxyrange}
\title{Takes a matrix of echogram and calculate Oxycline.}
\usage{
getOxyrange(fluidMatrix, filterSettings = NULL, stepBYstep = FALSE, ...)
}
\arguments{
\item{fluidMatrix}{Object of class \code{echoData} (from \code{\link{readEchograms}} function) with echogram}
\item{filterSettings}{List with combination of filters.}
\item{stepBYstep}{\code{logical}. If \code{FALSE} (default), returns just original and final echogram, otherwise each
echogram (after applying filters one by one) will be returned.}
\item{...}{Arguments passed to \code{\link{smooth.spline}} function. See Details.}
}
\description{
This function takes a filter configuration and applies to echograms given on an \code{echoData} object.
}
\details{
If \code{filterSettings = NULL}, oXim will use filter configuration present on \code{defaultFilterSettings}
data set. For extra details about image filters, see \code{\link{createFilterSetting}} help.
Application of filters may produce some gaps in the final matrix. In order to fill them, the function uses
\code{\link{smooth.spline}} whose arguments can be passed using by \code{...}.
}
\examples{
fileMode <- list(fish38_file = system.file("extdata", "fish38.mat", package = "oXim"),
fluid120_file = system.file("extdata", "fluid120.mat", package = "oXim"),
blue38_file = system.file("extdata", "blue38.mat", package = "oXim"))
echoData <- readEchograms(fileMode = fileMode)
oxyLimits <- getOxyrange(fluidMatrix = echoData)
}
| /man/getOxyrange.Rd | no_license | cran/oXim | R | false | true | 1,655 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oXim-main.R
\name{getOxyrange}
\alias{getOxyrange}
\title{Takes a matrix of echogram and calculate Oxycline.}
\usage{
getOxyrange(fluidMatrix, filterSettings = NULL, stepBYstep = FALSE, ...)
}
\arguments{
\item{fluidMatrix}{Object of class \code{echoData} (from \code{\link{readEchograms}} function) with echogram}
\item{filterSettings}{List with combination of filters.}
\item{stepBYstep}{\code{logical}. If \code{FALSE} (default), returns just original and final echogram, otherwise each
echogram (after applying filters one by one) will be returned.}
\item{...}{Arguments passed to \code{\link{smooth.spline}} function. See Details.}
}
\description{
This function takes a filter configuration and applies to echograms given on an \code{echoData} object.
}
\details{
If \code{filterSettings = NULL}, oXim will use filter configuration present on \code{defaultFilterSettings}
data set. For extra details about image filters, see \code{\link{createFilterSetting}} help.
Application of filters may produce some gaps in the final matrix. In order to fill them, the function uses
\code{\link{smooth.spline}} whose arguments can be passed using by \code{...}.
}
\examples{
fileMode <- list(fish38_file = system.file("extdata", "fish38.mat", package = "oXim"),
fluid120_file = system.file("extdata", "fluid120.mat", package = "oXim"),
blue38_file = system.file("extdata", "blue38.mat", package = "oXim"))
echoData <- readEchograms(fileMode = fileMode)
oxyLimits <- getOxyrange(fluidMatrix = echoData)
}
|
source("C:/Users/User/Documents/Programming/Maths/Project Euler/Useful.R")
iterate = function(x) {
vec = num.to.vec.fast(x)
sum = 0
for (i in 1:length(vec)) {
sum = sum + vec[i]^2
}
return (sum)
}
tm = 10000000
book = rep(NA,tm)
book[1] = 1
book[89] = 89
progress = 0
old_progress = 0
for (i in 1:tm) {
progress = 100*i/tm
if (progress > old_progress + 0.1) {
cat(progress, "%", "\n")
old_progress = progress
}
j = i
while (TRUE) {
j = iterate(j)
if (!is.na(book[j])) {
book[i] = book[j]
break
}
}
}
sum(book == 89)
| /Problem 92/Problem 92.R | no_license | samuelweller21/Project-Euler | R | false | false | 617 | r | source("C:/Users/User/Documents/Programming/Maths/Project Euler/Useful.R")
iterate = function(x) {
vec = num.to.vec.fast(x)
sum = 0
for (i in 1:length(vec)) {
sum = sum + vec[i]^2
}
return (sum)
}
tm = 10000000
book = rep(NA,tm)
book[1] = 1
book[89] = 89
progress = 0
old_progress = 0
for (i in 1:tm) {
progress = 100*i/tm
if (progress > old_progress + 0.1) {
cat(progress, "%", "\n")
old_progress = progress
}
j = i
while (TRUE) {
j = iterate(j)
if (!is.na(book[j])) {
book[i] = book[j]
break
}
}
}
sum(book == 89)
|
# Description
# Author: Timothy Keyes
# Version: 2020-02-17
# Libraries
library(flowCore)
library(tidyverse)
# Parameters
input_path <- file.path("~", "Desktop", "new_healthies")
out_data <- here::here("c01-own", "data")
marker_path <- here::here("c01-own", "docs", "ALL_panel.csv")
#===============================================================================
#read in names of markers in the dataset
marker_names <-
marker_path %>%
read_csv() %>%
mutate(Metal = str_replace_all(Metal, "[()]", ""))
str_extract(string = temp, pattern = "\\d{2}_([:alpha:]+_?)+(\\d?)+")
my_data <-
input_path %>%
list.files(path = ., full.names = TRUE) %>%
str_extract(string = ., pattern = "\\d{2}_([:alpha:]+_?)+(\\d?)+") %>%
str_sub(start = 4L) %>%
tibble(population = .) %>%
transmute(
file_name = list.files(path = input_path, full.names = TRUE),
population,
data =
map(
file_name,
~
read.FCS(
filename = .,
transformation = FALSE,
truncate_max_range = FALSE
) %>%
flowCore::exprs() %>%
as_tibble()
)
)
col_names <-
map(my_data$data, colnames) %>%
unlist() %>%
str_replace_all(pattern = "[()]", replacement = "") %>%
table() %>%
enframe() %>%
arrange(value)
#may have to change this data structure to use regular expressions
lookup_table <-
setNames(object = marker_names$Metal, nm = marker_names$protein)
#lookup_table["CD34"] <- "Sm148Di"
tof_rename <- function(data, lookup_table) {
colnames(data) <- str_replace_all(colnames(data), "[()]", "")
my_lookups <- (lookup_table[which(lookup_table %in% colnames(data))])
data %>%
select(which(colnames(.) %in% lookup_table)) %>%
rename(!!! my_lookups)
}
my_data <-
my_data %>%
group_by(file_name, population) %>%
unnest(cols = data) %>%
ungroup() %>%
rename(
Tdt = TdT
)
write_rds(
x = my_data, path = file.path(out_data, "population_data.rds"), compress = "gz"
)
| /CS_230/course_project/r_scripts/read_healthy_data.R | no_license | keyes-timothy/classes | R | false | false | 2,040 | r | # Description
# Author: Timothy Keyes
# Version: 2020-02-17
# Libraries
library(flowCore)
library(tidyverse)
# Parameters
input_path <- file.path("~", "Desktop", "new_healthies")
out_data <- here::here("c01-own", "data")
marker_path <- here::here("c01-own", "docs", "ALL_panel.csv")
#===============================================================================
#read in names of markers in the dataset
marker_names <-
marker_path %>%
read_csv() %>%
mutate(Metal = str_replace_all(Metal, "[()]", ""))
str_extract(string = temp, pattern = "\\d{2}_([:alpha:]+_?)+(\\d?)+")
my_data <-
input_path %>%
list.files(path = ., full.names = TRUE) %>%
str_extract(string = ., pattern = "\\d{2}_([:alpha:]+_?)+(\\d?)+") %>%
str_sub(start = 4L) %>%
tibble(population = .) %>%
transmute(
file_name = list.files(path = input_path, full.names = TRUE),
population,
data =
map(
file_name,
~
read.FCS(
filename = .,
transformation = FALSE,
truncate_max_range = FALSE
) %>%
flowCore::exprs() %>%
as_tibble()
)
)
col_names <-
map(my_data$data, colnames) %>%
unlist() %>%
str_replace_all(pattern = "[()]", replacement = "") %>%
table() %>%
enframe() %>%
arrange(value)
#may have to change this data structure to use regular expressions
lookup_table <-
setNames(object = marker_names$Metal, nm = marker_names$protein)
#lookup_table["CD34"] <- "Sm148Di"
tof_rename <- function(data, lookup_table) {
colnames(data) <- str_replace_all(colnames(data), "[()]", "")
my_lookups <- (lookup_table[which(lookup_table %in% colnames(data))])
data %>%
select(which(colnames(.) %in% lookup_table)) %>%
rename(!!! my_lookups)
}
my_data <-
my_data %>%
group_by(file_name, population) %>%
unnest(cols = data) %>%
ungroup() %>%
rename(
Tdt = TdT
)
write_rds(
x = my_data, path = file.path(out_data, "population_data.rds"), compress = "gz"
)
|
#setwd("~/Projects/DFS/")
#setwd("~/Documents/PrincetonFall16/fantasyfootball/DFS/")
download_DK_contest_file_NFL <- function(contest_number, date, contest_name) {
### Local Function
strEndsWith <- function(haystack, needle)
{
hl <- nchar(haystack)
nl <- nchar(needle)
if(nl>hl)
{
return(F)
} else
{
return(substr(haystack, hl-nl+1, hl) == needle)
}
}
original_wd <- getwd()
browseURL(paste0('https://www.draftkings.com/contest/exportfullstandingscsv/', contest_number))
setwd('~/Downloads')
# Difference is due to Default Brower being Chrome vs. Safari for Alan and Michael Respectively.
#For Alans computer run this line:
if(original_wd != "/Users/Michael/Projects/DFS") {
while(!file.exists(paste0("contest-standings-", contest_number, ".zip"))){
Sys.sleep(1)
}
unzip(paste0("contest-standings-", contest_number, ".zip"))
file.remove(paste0("contest-standings-", contest_number, ".zip"))
# Michael's Computer runs this line
} else {
while(!file.exists(paste0("contest-standings-", contest_number, ".csv"))){
Sys.sleep(1)
}
}
contest <- read.csv(paste0("contest-standings-", contest_number, ".csv"), stringsAsFactors = F)
file.remove(paste0("contest-standings-", contest_number, ".csv"))
setwd(original_wd)
setwd(paste0('NFL/data_warehouse/', date))
file.sources = list.files()
cleaned_files <- toupper(file.sources)
cleaned_files <- gsub(" ", "", cleaned_files, fixed = TRUE)
folder <- grep(toupper(get('contest_name')), cleaned_files, value=TRUE)
correct_index = 0
for(index in 1:length(cleaned_files)) {
if(strEndsWith(cleaned_files[index], toupper(get('contest_name')))) {
correct_index = index
}
}
setwd(file.sources[correct_index])
write.csv(contest, file = 'contest-standings.csv', row.names = F)
setwd(original_wd)
}
| /scrapingContestData/download_DK_contest_file_NFL.R | no_license | alandu20/dailyfantasy | R | false | false | 1,922 | r | #setwd("~/Projects/DFS/")
#setwd("~/Documents/PrincetonFall16/fantasyfootball/DFS/")
download_DK_contest_file_NFL <- function(contest_number, date, contest_name) {
### Local Function
strEndsWith <- function(haystack, needle)
{
hl <- nchar(haystack)
nl <- nchar(needle)
if(nl>hl)
{
return(F)
} else
{
return(substr(haystack, hl-nl+1, hl) == needle)
}
}
original_wd <- getwd()
browseURL(paste0('https://www.draftkings.com/contest/exportfullstandingscsv/', contest_number))
setwd('~/Downloads')
# Difference is due to Default Brower being Chrome vs. Safari for Alan and Michael Respectively.
#For Alans computer run this line:
if(original_wd != "/Users/Michael/Projects/DFS") {
while(!file.exists(paste0("contest-standings-", contest_number, ".zip"))){
Sys.sleep(1)
}
unzip(paste0("contest-standings-", contest_number, ".zip"))
file.remove(paste0("contest-standings-", contest_number, ".zip"))
# Michael's Computer runs this line
} else {
while(!file.exists(paste0("contest-standings-", contest_number, ".csv"))){
Sys.sleep(1)
}
}
contest <- read.csv(paste0("contest-standings-", contest_number, ".csv"), stringsAsFactors = F)
file.remove(paste0("contest-standings-", contest_number, ".csv"))
setwd(original_wd)
setwd(paste0('NFL/data_warehouse/', date))
file.sources = list.files()
cleaned_files <- toupper(file.sources)
cleaned_files <- gsub(" ", "", cleaned_files, fixed = TRUE)
folder <- grep(toupper(get('contest_name')), cleaned_files, value=TRUE)
correct_index = 0
for(index in 1:length(cleaned_files)) {
if(strEndsWith(cleaned_files[index], toupper(get('contest_name')))) {
correct_index = index
}
}
setwd(file.sources[correct_index])
write.csv(contest, file = 'contest-standings.csv', row.names = F)
setwd(original_wd)
}
|
library(testthat)
context("map_fields")
test_that("map_fields throws no errors", {
df <- map_fields(data.frame(x=1:3, y=4:6),
list(decimalLongitude="x", decimalLatitude="y"))
expect_true(all(c("decimalLongitude", "decimalLatitude") %in% names(df)))
expect_equal(ncol(df), 2)
})
| /tests/testthat/test_map_fields.R | permissive | iobis/obistools | R | false | false | 305 | r | library(testthat)
context("map_fields")
test_that("map_fields throws no errors", {
df <- map_fields(data.frame(x=1:3, y=4:6),
list(decimalLongitude="x", decimalLatitude="y"))
expect_true(all(c("decimalLongitude", "decimalLatitude") %in% names(df)))
expect_equal(ncol(df), 2)
})
|
\name{rmeanplot}
\alias{rmeanplot}
\title{
Running Mean Plots of Multiple Parameters
}
\description{
This function produces running mean plots from an MCMC simulation on a single plot for all parameters (by default) or those parameters indicated by the \code{parms} argument.
}
\usage{
rmeanplot(mcmcout, parms = NULL, regex = NULL, random = NULL,
leaf.marker = "[\\\\[_]", ylim = NULL, auto.layout = TRUE,
mar = c(2, 2, 1.5, 0.25) + 0.1, col = NULL, lty = 1,
plot.title = NULL, main = NULL, greek = FALSE,
style = c("gray", "plain"), ...)
}
\arguments{
\item{mcmcout}{ an object that can be coerced to an \code{mcmc} or \code{mcmc.list} object }
\item{parms}{ character vector specifying which subsets of parameters to plot. If \code{NULL}, \code{mcmcplot} will plot all parameters. Regular expressions are used to strip all numbers and punctuation out of the parameter names to find the parameters that match the character strings in \code{parms}.}
\item{regex}{ character vector of regular expressions denoting groups of parameters to plot.}
\item{random}{ an integer indicating the maximum number of parameters to randomly select for plotting from each group of parameters as specified by the \code{parms} argument. }
\item{leaf.marker}{ a regular expression with a character class that marks the beginning of the ``leaf'' portion of a parameter name. The default character class includes \code{[} and \code{_}}
\item{ylim}{ limits for the y-axis.}
\item{auto.layout}{automatically creates a plot layout using \code{mult.fig} if \code{TRUE}.}
\item{mar}{ argument passed to \code{multi.fig} if \code{auto.layout=TRUE}}
\item{col}{colors to be used in plotting the densities. Default is \code{mcmcplotsPalette(nchains)}.}
\item{lty}{line types to be used in plotting.}
\item{plot.title}{ title to put in the outer margin. Default is no title.}
\item{main}{ main title for the plots. Default is to use parameter names.}
\item{greek}{ if \code{TRUE}, the names of greek letters in the \code{labels} will be displayed as greek characters on the plot.}
\item{style}{ if "gray", then the plotting region is printed with a gray background, otherwise the default plotting region is used.}
\item{\ldots}{ further arguments passed to the plotting function. }
}
\value{
Creates a plot.
}
\author{
Evangelos Evangelou
}
\examples{
## Create fake MCMC output
nc <- 10; nr <- 1000
pnames <- c(paste("alpha[", 1:5, "]", sep=""), paste("gamma[", 1:5, "]", sep=""))
means <- rpois(10, 20)
fakemcmc <- coda::as.mcmc.list(
lapply(1:3,
function(i) coda::mcmc(matrix(rnorm(nc*nr, rep(means,each=nr)),
nrow=nr, dimnames=list(NULL,pnames)))))
## Plot traces of the fake MCMC output
rmeanplot(fakemcmc)
rmeanplot(fakemcmc, style="plain")
rmeanplot(fakemcmc, "gamma", greek=TRUE)
}
\keyword{ hplot }
| /man/rmeanplot.Rd | no_license | cran/mcmcplots | R | false | false | 2,945 | rd | \name{rmeanplot}
\alias{rmeanplot}
\title{
Running Mean Plots of Multiple Parameters
}
\description{
This function produces running mean plots from an MCMC simulation on a single plot for all parameters (by default) or those parameters indicated by the \code{parms} argument.
}
\usage{
rmeanplot(mcmcout, parms = NULL, regex = NULL, random = NULL,
leaf.marker = "[\\\\[_]", ylim = NULL, auto.layout = TRUE,
mar = c(2, 2, 1.5, 0.25) + 0.1, col = NULL, lty = 1,
plot.title = NULL, main = NULL, greek = FALSE,
style = c("gray", "plain"), ...)
}
\arguments{
\item{mcmcout}{ an object that can be coerced to an \code{mcmc} or \code{mcmc.list} object }
\item{parms}{ character vector specifying which subsets of parameters to plot. If \code{NULL}, \code{mcmcplot} will plot all parameters. Regular expressions are used to strip all numbers and punctuation out of the parameter names to find the parameters that match the character strings in \code{parms}.}
\item{regex}{ character vector of regular expressions denoting groups of parameters to plot.}
\item{random}{ an integer indicating the maximum number of parameters to randomly select for plotting from each group of parameters as specified by the \code{parms} argument. }
\item{leaf.marker}{ a regular expression with a character class that marks the beginning of the ``leaf'' portion of a parameter name. The default character class includes \code{[} and \code{_}}
\item{ylim}{ limits for the y-axis.}
\item{auto.layout}{automatically creates a plot layout using \code{mult.fig} if \code{TRUE}.}
\item{mar}{ argument passed to \code{multi.fig} if \code{auto.layout=TRUE}}
\item{col}{colors to be used in plotting the densities. Default is \code{mcmcplotsPalette(nchains)}.}
\item{lty}{line types to be used in plotting.}
\item{plot.title}{ title to put in the outer margin. Default is no title.}
\item{main}{ main title for the plots. Default is to use parameter names.}
\item{greek}{ if \code{TRUE}, the names of greek letters in the \code{labels} will be displayed as greek characters on the plot.}
\item{style}{ if "gray", then the plotting region is printed with a gray background, otherwise the default plotting region is used.}
\item{\ldots}{ further arguments passed to the plotting function. }
}
\value{
Creates a plot.
}
\author{
Evangelos Evangelou
}
\examples{
## Create fake MCMC output
nc <- 10; nr <- 1000
pnames <- c(paste("alpha[", 1:5, "]", sep=""), paste("gamma[", 1:5, "]", sep=""))
means <- rpois(10, 20)
fakemcmc <- coda::as.mcmc.list(
lapply(1:3,
function(i) coda::mcmc(matrix(rnorm(nc*nr, rep(means,each=nr)),
nrow=nr, dimnames=list(NULL,pnames)))))
## Plot traces of the fake MCMC output
rmeanplot(fakemcmc)
rmeanplot(fakemcmc, style="plain")
rmeanplot(fakemcmc, "gamma", greek=TRUE)
}
\keyword{ hplot }
|
\name{mstage}
\alias{mstage}
\title{Multistage sampling}
\description{Implements multistage sampling with equal/unequal probabilities.}
\usage{mstage(data, stage=c("stratified","cluster",""), varnames, size,
method=c("srswor","srswr","poisson","systematic"), pik, description=FALSE)}
\arguments{
\item{data}{data frame or data matrix; its number of rows is N, the population size.}
\item{stage}{list of sampling types at each stage; the possible values are: "stratified", "cluster"
and "" (without stratification or clustering). For multistage element sampling, this argument is not necessary.}
\item{varnames}{list of stratification or clustering variables.}
\item{size}{list of sample sizes (in the order in which the samples appear in the multistage sampling).}
\item{method}{list of methods to select units at each stage; the following methods are implemented: simple random
sampling without replacement (srswor), simple random sampling with replacement (srswr),
Poisson sampling (poisson), systematic sampling (systematic); if the method is not specified,
by default the method is "srswor". The method can be different at each stage.}
\item{pik}{list of selection probabilities or auxiliary information used to compute them;
this argument is only used for unequal probability sampling (Poisson, systematic). If an
auxiliary information is provided, the function uses the \link{inclusionprobabilities} function for
computing these probabilities.}
\item{description}{a message is printed if its value is TRUE; the message gives the number
of selected units and the number of the units in the population.
By default, its value is FALSE.}
}
\value{
The function returns a list, which contains the stages (if m is this list, the stage i is m$'i' etc)
and the following information:
\item{ID_unit}{the identifier of selected units at each stage.}
\item{Prob_ number _stage}{the inclusion probability at stage 'number'.}
\item{Prob}{the final unit inclusion probability given in the last stage; it is the product of unit inclusion probabilities at each stage.}
}
\details{The data should be sorted in ascending order by the columns given in the varnames argument before applying the function. Use, for example, data[order(data$state,data$region),].
}
\seealso{
\code{\link{cluster}}, \code{\link{strata}}, \code{\link{getdata}}}
\examples{
############
## Example 1
############
# Two-stage cluster sampling
# Uses the 'swissmunicipalities' data
data(swissmunicipalities)
b=swissmunicipalities
b=b[order(b$REG,b$CT),]
attach(b)
# the variable 'REG' (region) has 7 categories;
# it is used as clustering variable in the first-stage sample
# the variable 'CT' (canton) has 26 categories;
# it is used as clustering variable in the second-stage sample
# 4 clusters (regions) are selected in the first-stage
# 1 canton is selected in the second-stage from each sampled region
# the method is simple random sampling without replacement in each stage
# (equal probability, without replacement)
m=mstage(b,stage=list("cluster","cluster"), varnames=list("REG","CT"),
size=list(4,c(1,1,1,1)), method=list("srswor","srswor"))
# the first stage is m[[1]], the second stage is m[[2]]
#the selected regions
unique(m[[1]]$REG)
#the selected cantons
unique(m[[2]]$CT)
# extracts the observed data
x=getdata(b,m)[[2]]
# check the output
table(x$REG,x$CT)
############
## Example 2
############
# Two-stage element sampling
# Generates artificial data (a 235X3 matrix with 3 columns: state, region, income).
# The variable "state" has 2 categories ('n','s').
# The variable "region" has 5 categories ('A', 'B', 'C', 'D', 'E').
# The variable "income" is generated using the U(0,1) distribution.
data=rbind(matrix(rep('n',165),165,1,byrow=TRUE),matrix(rep('s',70),70,1,byrow=TRUE))
data=cbind.data.frame(data,c(rep('A',115),rep('D',10),rep('E',40),rep('B',30),rep('C',40)),
100*runif(235))
names(data)=c("state","region","income")
data=data[order(data$state,data$region),]
table(data$state,data$region)
# the method is simple random sampling without replacement
# 25 units are drawn in the first-stage
# in the second-stage, 10 units are drawn from the already 25 selected units
m=mstage(data,size=list(25,10),method=list("srswor","srswor"))
# the first stage is m[[1]], the second stage is m[[2]]
# extracts the observed data
xx=getdata(data,m)[[2]]
xx
# check the result
table(xx$state,xx$region)
############
## Example 3
############
# Stratified one-stage cluster sampling
# The same data as in Example 2
# the variable 'state' is used as stratification variable
# 165 units are in the first stratum and 70 in the second one
# the variable 'region' is used as clustering variable
# 1 cluster (region) is drawn in each state using "srswor"
m=mstage(data, stage=list("stratified","cluster"), varnames=list("state","region"),
size=list(c(165,70),c(1,1)),method=list("","srswor"))
# check the first stage
table(m[[1]]$state)
# check the second stage
table(m[[2]]$region)
# extracts the observed data
xx=getdata(data,m)[[2]]
# check the result
table(xx$state,xx$region)
############
## Example 4
############
# Two-stage cluster sampling
# The same data as in Example 1
# in the first-stage, the clustering variable is 'REG' (region) with 7 categories
# 4 clusters (regions) are drawn in the first-stage
# each region is selected with the probability 4/7
# in the second-stage, the clustering variable is 'CT'(canton) with 26 categories
# 1 cluster (canton) is drawn in the second-stage from each selected region
# in region 1, there are 3 cantons; one canton is selected with prob. 0.2, 0.4, 0.4, resp.
# in region 2, there are 5 cantons; each canton is selected with the prob. 1/5
# in region 3, there are 3 cantons; each canton is selected with the prob. 1/3
# in region 4, there is 1 canton, which it is selected with the prob. 1
# in region 5, there are 7 cantons; each canton is selected with the prob. 1/7
# in region 6, there are 6 cantons; each canton is selected with the prob. 1/6
# in region 7, there is 1 canton, which it is selected with the prob. 1
# it is necessary to use a list of selection probabilities at each stage
# prob is the list of the selection probabilities
# the method is systematic sampling (unequal probabilities, without replacement)
# ls is the list of sizes
ls=list(4,c(1,1,1,1))
prob=list(rep(4/7,7),list(c(0.2,0.4,0.4),rep(1/5,5),rep(1/3,3),rep(1,1),rep(1/7,7),
rep(1/6,6),rep(1,1)))
m=mstage(b,stage=list("cluster","cluster"),varnames=list("REG","CT"),
size=ls, method=c("systematic","systematic"),pik=prob)
#the selected regions
unique(m[[1]]$REG)
#the selected cantons
unique(m[[2]]$CT)
# extracts the observed data
xx=getdata(b,m)[[2]]
# check the result
table(xx$REG,xx$CT)
############
## Example 5
############
# Stratified two-stage cluster sampling
# The same data as in Example 1
# the variable 'REG' is used as stratification variable
# there are 7 strata
# the variable 'CT' is used as first clustering variable
# first stage, clusters (cantons) are drawn from each region using "srswor"
# 3 clusters are drawn from the regions 1,2,3,5, and 6, respectively
# 1 cluster is drawn from the regions 4 and 7, respectively
# the variable 'COM' is used as second clustering variable
# second stage, 2 clusters (municipalities) are drawn from each selected canton using "srswor"
m=mstage(b,stage=list("stratified","cluster","cluster"), varnames=list("REG","CT","COM"),
size=list(size1=table(b$REG),size2=c(rep(3,3),1,3,3,1), size3=rep(2,17)),
method=list("","srswor","srswor"))
# extracts the observed data
getdata(b,m)[[3]]
}
\keyword{survey}
| /man/mstage.Rd | no_license | cran/sampling | R | false | false | 7,756 | rd | \name{mstage}
\alias{mstage}
\title{Multistage sampling}
\description{Implements multistage sampling with equal/unequal probabilities.}
\usage{mstage(data, stage=c("stratified","cluster",""), varnames, size,
method=c("srswor","srswr","poisson","systematic"), pik, description=FALSE)}
\arguments{
\item{data}{data frame or data matrix; its number of rows is N, the population size.}
\item{stage}{list of sampling types at each stage; the possible values are: "stratified", "cluster"
and "" (without stratification or clustering). For multistage element sampling, this argument is not necessary.}
\item{varnames}{list of stratification or clustering variables.}
\item{size}{list of sample sizes (in the order in which the samples appear in the multistage sampling).}
\item{method}{list of methods to select units at each stage; the following methods are implemented: simple random
sampling without replacement (srswor), simple random sampling with replacement (srswr),
Poisson sampling (poisson), systematic sampling (systematic); if the method is not specified,
by default the method is "srswor". The method can be different at each stage.}
\item{pik}{list of selection probabilities or auxiliary information used to compute them;
this argument is only used for unequal probability sampling (Poisson, systematic). If an
auxiliary information is provided, the function uses the \link{inclusionprobabilities} function for
computing these probabilities.}
\item{description}{a message is printed if its value is TRUE; the message gives the number
of selected units and the number of the units in the population.
By default, its value is FALSE.}
}
\value{
The function returns a list, which contains the stages (if m is this list, the stage i is m$'i' etc)
and the following information:
\item{ID_unit}{the identifier of selected units at each stage.}
\item{Prob_ number _stage}{the inclusion probability at stage 'number'.}
\item{Prob}{the final unit inclusion probability given in the last stage; it is the product of unit inclusion probabilities at each stage.}
}
\details{The data should be sorted in ascending order by the columns given in the varnames argument before applying the function. Use, for example, data[order(data$state,data$region),].
}
\seealso{
\code{\link{cluster}}, \code{\link{strata}}, \code{\link{getdata}}}
\examples{
############
## Example 1
############
# Two-stage cluster sampling
# Uses the 'swissmunicipalities' data
data(swissmunicipalities)
b=swissmunicipalities
b=b[order(b$REG,b$CT),]
attach(b)
# the variable 'REG' (region) has 7 categories;
# it is used as clustering variable in the first-stage sample
# the variable 'CT' (canton) has 26 categories;
# it is used as clustering variable in the second-stage sample
# 4 clusters (regions) are selected in the first-stage
# 1 canton is selected in the second-stage from each sampled region
# the method is simple random sampling without replacement in each stage
# (equal probability, without replacement)
m=mstage(b,stage=list("cluster","cluster"), varnames=list("REG","CT"),
size=list(4,c(1,1,1,1)), method=list("srswor","srswor"))
# the first stage is m[[1]], the second stage is m[[2]]
#the selected regions
unique(m[[1]]$REG)
#the selected cantons
unique(m[[2]]$CT)
# extracts the observed data
x=getdata(b,m)[[2]]
# check the output
table(x$REG,x$CT)
############
## Example 2
############
# Two-stage element sampling
# Generates artificial data (a 235X3 matrix with 3 columns: state, region, income).
# The variable "state" has 2 categories ('n','s').
# The variable "region" has 5 categories ('A', 'B', 'C', 'D', 'E').
# The variable "income" is generated using the U(0,1) distribution.
data=rbind(matrix(rep('n',165),165,1,byrow=TRUE),matrix(rep('s',70),70,1,byrow=TRUE))
data=cbind.data.frame(data,c(rep('A',115),rep('D',10),rep('E',40),rep('B',30),rep('C',40)),
100*runif(235))
names(data)=c("state","region","income")
data=data[order(data$state,data$region),]
table(data$state,data$region)
# the method is simple random sampling without replacement
# 25 units are drawn in the first-stage
# in the second-stage, 10 units are drawn from the already 25 selected units
m=mstage(data,size=list(25,10),method=list("srswor","srswor"))
# the first stage is m[[1]], the second stage is m[[2]]
# extracts the observed data
xx=getdata(data,m)[[2]]
xx
# check the result
table(xx$state,xx$region)
############
## Example 3
############
# Stratified one-stage cluster sampling
# The same data as in Example 2
# the variable 'state' is used as stratification variable
# 165 units are in the first stratum and 70 in the second one
# the variable 'region' is used as clustering variable
# 1 cluster (region) is drawn in each state using "srswor"
m=mstage(data, stage=list("stratified","cluster"), varnames=list("state","region"),
size=list(c(165,70),c(1,1)),method=list("","srswor"))
# check the first stage
table(m[[1]]$state)
# check the second stage
table(m[[2]]$region)
# extracts the observed data
xx=getdata(data,m)[[2]]
# check the result
table(xx$state,xx$region)
############
## Example 4
############
# Two-stage cluster sampling
# The same data as in Example 1
# in the first-stage, the clustering variable is 'REG' (region) with 7 categories
# 4 clusters (regions) are drawn in the first-stage
# each region is selected with the probability 4/7
# in the second-stage, the clustering variable is 'CT'(canton) with 26 categories
# 1 cluster (canton) is drawn in the second-stage from each selected region
# in region 1, there are 3 cantons; one canton is selected with prob. 0.2, 0.4, 0.4, resp.
# in region 2, there are 5 cantons; each canton is selected with the prob. 1/5
# in region 3, there are 3 cantons; each canton is selected with the prob. 1/3
# in region 4, there is 1 canton, which it is selected with the prob. 1
# in region 5, there are 7 cantons; each canton is selected with the prob. 1/7
# in region 6, there are 6 cantons; each canton is selected with the prob. 1/6
# in region 7, there is 1 canton, which it is selected with the prob. 1
# it is necessary to use a list of selection probabilities at each stage
# prob is the list of the selection probabilities
# the method is systematic sampling (unequal probabilities, without replacement)
# ls is the list of sizes
ls=list(4,c(1,1,1,1))
prob=list(rep(4/7,7),list(c(0.2,0.4,0.4),rep(1/5,5),rep(1/3,3),rep(1,1),rep(1/7,7),
rep(1/6,6),rep(1,1)))
m=mstage(b,stage=list("cluster","cluster"),varnames=list("REG","CT"),
size=ls, method=c("systematic","systematic"),pik=prob)
#the selected regions
unique(m[[1]]$REG)
#the selected cantons
unique(m[[2]]$CT)
# extracts the observed data
xx=getdata(b,m)[[2]]
# check the result
table(xx$REG,xx$CT)
############
## Example 5
############
# Stratified two-stage cluster sampling
# The same data as in Example 1
# the variable 'REG' is used as stratification variable
# there are 7 strata
# the variable 'CT' is used as first clustering variable
# first stage, clusters (cantons) are drawn from each region using "srswor"
# 3 clusters are drawn from the regions 1,2,3,5, and 6, respectively
# 1 cluster is drawn from the regions 4 and 7, respectively
# the variable 'COM' is used as second clustering variable
# second stage, 2 clusters (municipalities) are drawn from each selected canton using "srswor"
m=mstage(b,stage=list("stratified","cluster","cluster"), varnames=list("REG","CT","COM"),
size=list(size1=table(b$REG),size2=c(rep(3,3),1,3,3,1), size3=rep(2,17)),
method=list("","srswor","srswor"))
# extracts the observed data
getdata(b,m)[[3]]
}
\keyword{survey}
|
#print(HairEyeColor)
mosaicplot(HairEyeColor, color=T,off=5)
santa <- data.frame(belief=c('no belief','no belief','no belief','no belief',
'belief','belief','belief','belief',
'belief','belief','no belief','no belief',
'belief','belief','no belief','no belief'),
sibling=c('older brother','older brother','older brother','older sister',
'no older sibling','no older sibling','no older sibling',
'older sister','older brother','older sister','older brother',
'older sister','no older sibling','older sister','older brother',
'no older sibling')
)
tbl <- table(santa)
#print(santa)
#mosaicplot(santa,color=T,off=1)
mosaicplot(tbl,color=T,off=1)
mosaicplot(~belief+sibling,data=santa,color=T,off=1)
| /R_kmooc2/7weeks/[Ex]7_4.R | no_license | HeeSeok-Kwon/R_study | R | false | false | 697 | r | #print(HairEyeColor)
mosaicplot(HairEyeColor, color=T,off=5)
santa <- data.frame(belief=c('no belief','no belief','no belief','no belief',
'belief','belief','belief','belief',
'belief','belief','no belief','no belief',
'belief','belief','no belief','no belief'),
sibling=c('older brother','older brother','older brother','older sister',
'no older sibling','no older sibling','no older sibling',
'older sister','older brother','older sister','older brother',
'older sister','no older sibling','older sister','older brother',
'no older sibling')
)
tbl <- table(santa)
#print(santa)
#mosaicplot(santa,color=T,off=1)
mosaicplot(tbl,color=T,off=1)
mosaicplot(~belief+sibling,data=santa,color=T,off=1)
|
rm(list = ls())
set.seed(777)
library(randomForest)
library(trainControl)
library(caret)
SPAM = read.csv("Data/SPAM.csv",header = FALSE,sep=' ')
train_ind = sample(1:nrow(SPAM), nrow(SPAM)*0.60)
train = SPAM[train_ind,]
test = SPAM[-train_ind,]
error_store_test <- c()
error_store <- c()
for(i in 4:8){
model = randomForest(x=train[1:57], y=as.factor(train[,58]), ntree=500, mtry = i, na.action = na.omit )
y_hat <- predict(model, newdata = test[1:57], type = "response")
predicted_y<-y_hat
yy <- as.factor(test$V58)
conf_mat<-confusionMatrix(y_hat, yy)
acc_RF<-conf_mat$overall[1]
error_store_test <- c(error_store_test, unname(acc_RF))
print(model$err.rate[,1])
error_store <- c(error_store, sum(model$err.rate[,1])/500)
}
model <- randomForest(x=train[1:57], y=as.factor(train[,58]), ntree=500, mtry = 30, importance=TRUE )
print(sum(model$err.rate[,1])/500)
metric <- "Accuracy"
mtry<- sqrt(57)
control <- trainControl(method="boot", search="random")
bestmtry<-tuneRF(train[1:57],as.factor(train[,58]), stepFactor = 1.3,mtryStart =mtry,improve = 1e-5, ntree=500)
mtry_num<- 4:8
plot(x=mtry_num,y=error_store_test, main = "test error by mtry", ylab = "Accuracy")
plot(y=error_store,x= mtry_num, main = "train error by mtry", ylab = "Accuracy")
error_store_test_ntree <- c()
error_store_ntree <- c()
tree_num<- 1:10*100
for(i in 1:10){
model = randomForest(x=train[1:57], y=as.factor(train[,58]), ntree=i*100, mtry = 5, na.action = na.omit )
y_hat <- predict(model, newdata = test[1:57], type = "response")
predicted_y<-y_hat
yy <- as.factor(test$V58)
conf_mat<-confusionMatrix(y_hat, yy)
acc_RF<-conf_mat$overall[1]
error_store_test_ntree <- c(error_store_test, unname(acc_RF))
print(model$err.rate[,1])
error_store_ntree <- c(error_store, sum(model$err.rate[,1]))
}
plot(error_store_test_ntree, main = "test error by mtry", ylab = "Accuracy")
| /6_3_Mtry_OOBerror_RF(spam)/code/Question_6_3.R | no_license | WL0118/Supervised_Learning | R | false | false | 1,966 | r | rm(list = ls())
set.seed(777)
library(randomForest)
library(trainControl)
library(caret)
SPAM = read.csv("Data/SPAM.csv",header = FALSE,sep=' ')
train_ind = sample(1:nrow(SPAM), nrow(SPAM)*0.60)
train = SPAM[train_ind,]
test = SPAM[-train_ind,]
error_store_test <- c()
error_store <- c()
for(i in 4:8){
model = randomForest(x=train[1:57], y=as.factor(train[,58]), ntree=500, mtry = i, na.action = na.omit )
y_hat <- predict(model, newdata = test[1:57], type = "response")
predicted_y<-y_hat
yy <- as.factor(test$V58)
conf_mat<-confusionMatrix(y_hat, yy)
acc_RF<-conf_mat$overall[1]
error_store_test <- c(error_store_test, unname(acc_RF))
print(model$err.rate[,1])
error_store <- c(error_store, sum(model$err.rate[,1])/500)
}
model <- randomForest(x=train[1:57], y=as.factor(train[,58]), ntree=500, mtry = 30, importance=TRUE )
print(sum(model$err.rate[,1])/500)
metric <- "Accuracy"
mtry<- sqrt(57)
control <- trainControl(method="boot", search="random")
bestmtry<-tuneRF(train[1:57],as.factor(train[,58]), stepFactor = 1.3,mtryStart =mtry,improve = 1e-5, ntree=500)
mtry_num<- 4:8
plot(x=mtry_num,y=error_store_test, main = "test error by mtry", ylab = "Accuracy")
plot(y=error_store,x= mtry_num, main = "train error by mtry", ylab = "Accuracy")
error_store_test_ntree <- c()
error_store_ntree <- c()
tree_num<- 1:10*100
for(i in 1:10){
model = randomForest(x=train[1:57], y=as.factor(train[,58]), ntree=i*100, mtry = 5, na.action = na.omit )
y_hat <- predict(model, newdata = test[1:57], type = "response")
predicted_y<-y_hat
yy <- as.factor(test$V58)
conf_mat<-confusionMatrix(y_hat, yy)
acc_RF<-conf_mat$overall[1]
error_store_test_ntree <- c(error_store_test, unname(acc_RF))
print(model$err.rate[,1])
error_store_ntree <- c(error_store, sum(model$err.rate[,1]))
}
plot(error_store_test_ntree, main = "test error by mtry", ylab = "Accuracy")
|
readfile <- function(directory, filename) {
full_path = paste(directory, "/", filename, sep="")
data = read.csv(full_path)
}
loadPackage <- function(package) {
if(!require(package, character.only=TRUE)) {
install.packages(package)
if(!require(package,character.only = TRUE)) stop("Package not found")
}
}
| /r/pa1/utils.R | no_license | nexusventuri/datasciencecoursera | R | false | false | 316 | r | readfile <- function(directory, filename) {
full_path = paste(directory, "/", filename, sep="")
data = read.csv(full_path)
}
loadPackage <- function(package) {
if(!require(package, character.only=TRUE)) {
install.packages(package)
if(!require(package,character.only = TRUE)) stop("Package not found")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_Data200.R
\name{load_Data200}
\alias{load_Data200}
\alias{save_Data200}
\alias{load_Data200_info}
\title{Load or save from Data200}
\usage{
load_Data200(layer, WGS84 = FALSE)
save_Data200(path, layer = NULL, type = NULL)
load_Data200_info(english_names = FALSE)
}
\arguments{
\item{layer}{identification of data to extract as character, see details.}
\item{WGS84}{convert data to WGS-84 coordinate system? Default \code{FALSE}.}
\item{path}{\code{character} path to store the files to.}
\item{type}{\code{character} type of layers to save. See details, types are listed in brackets.}
\item{english_names}{change the names of the columns to English. Default \code{FALSE}.}
}
\value{
\code{"load_Data200"} - \code{data.frame} with spatial objects (\code{\link[sf]{sf}}) of the specified layer.
For \code{layer} either \code{"DMR"} or \code{"DMRShaded"} the output is actually a (\code{\link[raster]{raster}}).
\code{"save_Data200"} - \code{path} to the unzipped files (for layer) or folder (for type),
the zipped file is also stored at path (mainly for further use)
}
\description{
Load data from Data200 data source (\url{https://geoportal.cuzk.cz/(S(ijginumejzilvacbfijkylwj))/Default.aspx?mode=TextMeta&side=mapy_data200&text=dSady_mapyData200&head_tab=sekce-02-gp&menu=229}).
The data can be used only after correctly citing the creator (as per terms of use \url{https://geoportal.cuzk.cz/Dokumenty/Podminky_EN.pdf}).
The citation is in form "Mapový podklad – Data200, {{insert year}} © Český úřad zeměměřický a katastrální, www.cuzk.cz".
}
\details{
The \code{layer} can have values from following set, in the bracket is the name of general category
(can be used as type in saving the data):
\enumerate{
\item \code{"AdministrativniHraniceLinie"} (\code{"Hranice"})
\item \code{"AdministrativniUzemiCentroid"} (\code{"Hranice"})
\item \code{"AdministrativniUzemiUTJ"} (\code{"Hranice"})
\item \code{"AdministrativniUzemiObce"} (\code{"Hranice"})
\item \code{"AdministrativniUzemiOkresy"} (\code{"Hranice"})
\item \code{"AdministrativniUzemiKraje"} (\code{"Hranice"})
\item \code{"HrazJezNad50m"} (\code{"Vodstvo"})
\item \code{"HrazJezPod50m"} (\code{"Vodstvo"})
\item \code{"JezeroRybnikVodniNadrz"} (\code{"Vodstvo"})
\item \code{"VodniTokPod50m"} (\code{"Vodstvo"})
\item \code{"VodniTokNad50m"} (\code{"Vodstvo"})
\item \code{"Ostrovy"} (\code{"Vodstvo"})
\item \code{"MokrinaBazina"} (\code{"Vodstvo"})
\item \code{"Vodopad"} (\code{"Vodstvo"})
\item \code{"Prameny1"} (\code{"Vodstvo"})
\item \code{"Prameny2"} (\code{"Vodstvo"})
\item \code{"OrografickeNazvy"} (\code{"Popis"})
\item \code{"GeomorfologickeOblasti"} (\code{"Popis"})
\item \code{"GeomorfologickeCelky"} (\code{"Popis"})
\item \code{"GeomorfologickePodcelky"} (\code{"Popis"})
\item \code{"NarodniParkPrirodniRezervace"} (\code{"RuzneObjekty"})
\item \code{"Produktovod"} (\code{"RuzneObjekty"})
\item \code{"Vysilac"} (\code{"RuzneObjekty"})
\item \code{"VyznamneObjekty"} (\code{"RuzneObjekty"})
\item \code{"ProduktovodVyznamneBody"} (\code{"RuzneObjekty"})
\item \code{"Věž"} (\code{"RuzneObjekty"})
\item \code{"DulLom"} (\code{"RuzneObjekty"})
\item \code{"Budova"} (\code{"RuzneObjekty"})
\item \code{"ElektrickeVedeni"} (\code{"RuzneObjekty"})
\item \code{"Elektrarna"} (\code{"RuzneObjekty"})
\item \code{"ObceBody"} (\code{"Sidla"})
\item \code{"ObcePolygony"} (\code{"Sidla"})
\item \code{"Privoz"} (\code{"Doprava"})
\item \code{"PrivozStanice"} (\code{"Doprava"})
\item \code{"ZeleznicniPrejezd"} (\code{"Doprava"})
\item \code{"Heliport"} (\code{"Doprava"})
\item \code{"LanovaDraha"} (\code{"Doprava"})
\item \code{"DalnicniOdpocivka"} (\code{"Doprava"})
\item \code{"KrizovatkaMimourovnova"} (\code{"Doprava"})
\item \code{"LetisteNad40Ha"} (\code{"Doprava"})
\item \code{"LetisteNad40HaBod"} (\code{"Doprava"})
\item \code{"ZelezniceZastavky"} (\code{"Doprava"})
\item \code{"LetistePod40Ha"} (\code{"Doprava"})
\item \code{"LodniPristav"} (\code{"Doprava"})
\item \code{"PristavaciDraha"} (\code{"Doprava"})
\item \code{"Zeleznice"} (\code{"Doprava"})
\item \code{"Silnice"} (\code{"Doprava"})
\item \code{"LesyPlantaze"} (\code{"Vegetace"})
\item \code{"KotovaneBody"} (\code{"Relief"})
\item \code{"Vrstevnice"} (\code{"Relief"})
\item \code{"SkalniStenaSraz"} (\code{"Relief"})
\item \code{"Jeskyne"} (\code{"Relief"})
\item \code{"DMR"} (\code{"Relief"})
\item \code{"DMRShaded"} (\code{"Relief"})
}
}
\section{Functions}{
\itemize{
\item \code{load_Data200}: Loads single dataset
\item \code{save_Data200}: Download and store layer (and zipped general category) or complete category
\item \code{load_Data200_info}: Load information about layers in Data200.
}}
\examples{
\dontrun{
waterfalls <- load_Data200(layer = "Vodopad")
}
\dontrun{
folder_water_objects <- save_Data200("~/data/water", type = "Vodopad")
}
}
| /man/load_Data200.Rd | permissive | JanCaha/CzechData | R | false | true | 4,920 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_Data200.R
\name{load_Data200}
\alias{load_Data200}
\alias{save_Data200}
\alias{load_Data200_info}
\title{Load or save from Data200}
\usage{
load_Data200(layer, WGS84 = FALSE)
save_Data200(path, layer = NULL, type = NULL)
load_Data200_info(english_names = FALSE)
}
\arguments{
\item{layer}{identification of data to extract as character, see details.}
\item{WGS84}{convert data to WGS-84 coordinate system? Default \code{FALSE}.}
\item{path}{\code{character} path to store the files to.}
\item{type}{\code{character} type of layers to save. See details, types are listed in brackets.}
\item{english_names}{change the names of the columns to English. Default \code{FALSE}.}
}
\value{
\code{"load_Data200"} - \code{data.frame} with spatial objects (\code{\link[sf]{sf}}) of the specified layer.
For \code{layer} either \code{"DMR"} or \code{"DMRShaded"} the output is actually a (\code{\link[raster]{raster}}).
\code{"save_Data200"} - \code{path} to the unzipped files (for layer) or folder (for type),
the zipped file is also stored at path (mainly for further use)
}
\description{
Load data from Data200 data source (\url{https://geoportal.cuzk.cz/(S(ijginumejzilvacbfijkylwj))/Default.aspx?mode=TextMeta&side=mapy_data200&text=dSady_mapyData200&head_tab=sekce-02-gp&menu=229}).
The data can be used only after correctly citing the creator (as per terms of use \url{https://geoportal.cuzk.cz/Dokumenty/Podminky_EN.pdf}).
The citation is in form "Mapový podklad – Data200, {{insert year}} © Český úřad zeměměřický a katastrální, www.cuzk.cz".
}
\details{
The \code{layer} can have values from following set, in the bracket is the name of general category
(can be used as type in saving the data):
\enumerate{
\item \code{"AdministrativniHraniceLinie"} (\code{"Hranice"})
\item \code{"AdministrativniUzemiCentroid"} (\code{"Hranice"})
\item \code{"AdministrativniUzemiUTJ"} (\code{"Hranice"})
\item \code{"AdministrativniUzemiObce"} (\code{"Hranice"})
\item \code{"AdministrativniUzemiOkresy"} (\code{"Hranice"})
\item \code{"AdministrativniUzemiKraje"} (\code{"Hranice"})
\item \code{"HrazJezNad50m"} (\code{"Vodstvo"})
\item \code{"HrazJezPod50m"} (\code{"Vodstvo"})
\item \code{"JezeroRybnikVodniNadrz"} (\code{"Vodstvo"})
\item \code{"VodniTokPod50m"} (\code{"Vodstvo"})
\item \code{"VodniTokNad50m"} (\code{"Vodstvo"})
\item \code{"Ostrovy"} (\code{"Vodstvo"})
\item \code{"MokrinaBazina"} (\code{"Vodstvo"})
\item \code{"Vodopad"} (\code{"Vodstvo"})
\item \code{"Prameny1"} (\code{"Vodstvo"})
\item \code{"Prameny2"} (\code{"Vodstvo"})
\item \code{"OrografickeNazvy"} (\code{"Popis"})
\item \code{"GeomorfologickeOblasti"} (\code{"Popis"})
\item \code{"GeomorfologickeCelky"} (\code{"Popis"})
\item \code{"GeomorfologickePodcelky"} (\code{"Popis"})
\item \code{"NarodniParkPrirodniRezervace"} (\code{"RuzneObjekty"})
\item \code{"Produktovod"} (\code{"RuzneObjekty"})
\item \code{"Vysilac"} (\code{"RuzneObjekty"})
\item \code{"VyznamneObjekty"} (\code{"RuzneObjekty"})
\item \code{"ProduktovodVyznamneBody"} (\code{"RuzneObjekty"})
\item \code{"Věž"} (\code{"RuzneObjekty"})
\item \code{"DulLom"} (\code{"RuzneObjekty"})
\item \code{"Budova"} (\code{"RuzneObjekty"})
\item \code{"ElektrickeVedeni"} (\code{"RuzneObjekty"})
\item \code{"Elektrarna"} (\code{"RuzneObjekty"})
\item \code{"ObceBody"} (\code{"Sidla"})
\item \code{"ObcePolygony"} (\code{"Sidla"})
\item \code{"Privoz"} (\code{"Doprava"})
\item \code{"PrivozStanice"} (\code{"Doprava"})
\item \code{"ZeleznicniPrejezd"} (\code{"Doprava"})
\item \code{"Heliport"} (\code{"Doprava"})
\item \code{"LanovaDraha"} (\code{"Doprava"})
\item \code{"DalnicniOdpocivka"} (\code{"Doprava"})
\item \code{"KrizovatkaMimourovnova"} (\code{"Doprava"})
\item \code{"LetisteNad40Ha"} (\code{"Doprava"})
\item \code{"LetisteNad40HaBod"} (\code{"Doprava"})
\item \code{"ZelezniceZastavky"} (\code{"Doprava"})
\item \code{"LetistePod40Ha"} (\code{"Doprava"})
\item \code{"LodniPristav"} (\code{"Doprava"})
\item \code{"PristavaciDraha"} (\code{"Doprava"})
\item \code{"Zeleznice"} (\code{"Doprava"})
\item \code{"Silnice"} (\code{"Doprava"})
\item \code{"LesyPlantaze"} (\code{"Vegetace"})
\item \code{"KotovaneBody"} (\code{"Relief"})
\item \code{"Vrstevnice"} (\code{"Relief"})
\item \code{"SkalniStenaSraz"} (\code{"Relief"})
\item \code{"Jeskyne"} (\code{"Relief"})
\item \code{"DMR"} (\code{"Relief"})
\item \code{"DMRShaded"} (\code{"Relief"})
}
}
\section{Functions}{
\itemize{
\item \code{load_Data200}: Loads single dataset
\item \code{save_Data200}: Download and store layer (and zipped general category) or complete category
\item \code{load_Data200_info}: Load information about layers in Data200.
}}
\examples{
\dontrun{
waterfalls <- load_Data200(layer = "Vodopad")
}
\dontrun{
folder_water_objects <- save_Data200("~/data/water", type = "Vodopad")
}
}
|
#### Program information ######################################################
# Package: MenA_VaccSims #
# Source file name: ModelInputUtilities.R #
# Contact: chris.c.stewart@kp.org, michael.l.jackson@kp.org #
# Version Date 12/17/19 #
#_______________________________________ _____________________________________#
# Input datasets: specify folder containing downloads from #
# https://montagu.vaccineimpact.org/ #
#_____________________________________________________________________________#
# Functions in this program: #
# (1) GetMontaguDemogData #
# (2) GetDemographicParameters #
# (3) checkVIMCdates #
# (4) GetPopAgeDist #
# (5) GetVaccScenario #
# (6) GetDiseaseStateDist #
# (7) GetWAIFWmatrix #
# (8) GetModelParams #
# (9) GetLifeEx #
#_____________________________________________________________________________#
# Parameters: #
# Start and end dates of simulation - to be specified in calling program #
# 3-letter country code #
# directory containing downloaded file #
# threshold - number of years to fill in if data is missing, defaults to 1 #
# (Note: this is because although total pop goes to 2100, cbr, cdr, imr may #
# end at 2099- I just copy the nearest year's value) #
#_____________________________________________________________________________#
# Purpose: return a vector or dataframe of parameters for: #
# -distribution for initializing population #
# -demographics: birth, death, infant mortality rate for simulation #
# -vaccination scenarios #
#_____________________________________________________________________________#
### (1) Download data from Montagu API ########################################
# Not currently used. #
# PURPOSE: GET DATA FROM API at "montagu.vaccineimpact.org" #
# This function does not work from KPWA network but tested from my laptop #
# 11/16/18, "downloads the 5 data files we need by reading into dataframe #
# then writing csv to the directory specified. Reconstructs near-real #
# filenames(compared to manual download),will be recognized by downstream fxn #
# requires R package "montagu", install with: #
# install.packages("drat") # if needed #
# drat:::add("vimc") #
# install.packages("montagu") #
#_____________________________________________________________________________#
# Chloe 7/12: note that death rates by age (TDB) and numbers of births will need to be added to this function (or something like it) to be used
# in the future; existence of that data in input folder currently assumed in functions below.
GetMontaguDemogData<-function( username=NULL, password=NULL, touchstone="201710gavi-5", destpath=NULL) {
svr<-montagu_server(name="production", hostname="montagu.vaccineimpact.org", username=username, password=password)
montagu_server_global_default_set(svr)
tchlist<-montagu_touchstones_list(svr)
if (touchstone %in% as.vector(tchlist$id)) {
dlist<-montagu_demographics_list(touchstone_id = touchstone)
demogidlist<-as.vector(dlist$id)
needed_data<-c("cbr", "cdr", "unwpp_imr", "qq_pop", "tot_pop")
for (i in 1:length(needed_data)) {
if (needed_data[i] %in% demogidlist) {
dat<-montagu::montagu_demographic_data(type=needed_data[i], touchstone_id=tch)
datrow<-dlist[dlist$id==needed_data[i],]
#filename like: 201710gavi-5_dds-201710_unwpp_imr_both.csv
# [touchstone id + source + demog id + _both]
#but it doesn't matter, my function lookst for demog id in name
if (length(destpath)>0) {
filename<-paste0(datadir, touchstone, datrow$source, datrow$id, "_both.csv")
write.csv(dat, filename)
print(filename)
}
}
}
}
else {print("Touchstone not found. Touchstone list:")
print(tchlist)
}
}
### (2) GetDemographicParameters ##############################################
# This function returns a dataset with a row for each year of simulation, #
# with total pop, death rate, birth rate, and infant mortality rate #
# ASSUMES FILENAMES CONTAIN: "tot_pop_both", "cdr_both", "cbr_both","imr_both"#
# Chloe 7/12/19: now assume death rates and numbers of deaths from files #
# called "p_dying_both" and "births" respectively. #
# Mike 12/10/19: Re-write so that death probabilities are estimated from #
# annual age-specific UN interpolated population sizes, rather than probs #
# from Montagu. For the 2019 model year, keep the current approach of using #
# average death probs in 5-year age buckets, to keep things consistent with #
# the MenA_OneSim.R program. Future work may move this to single-year buckets.#
#_____________________________________________________________________________#
GetDemographicParameters<-function(path, mycountry, start, end, fillThreshold=1) {
setwd(path)
#### (1) Get total population size
totpop<-GetFilename(path, "tot_pop_both")
if (is.character(totpop)==FALSE) { stop(mymsg) }
dfpop<-read.csv(totpop)
if (CheckDemogFileStructure(mycountry=mycountry, mydf=dfpop, dfdesc="tot_pop_both")==FALSE) { stop (filemsg)}
ctrypop<-dfpop[dfpop$country_code==mycountry, c("country_code", "year", "value")]
ctrypopfull<-checkVIMCdates(mydata=ctrypop, startyr=year(start), endyr=year(end), threshold=fillThreshold)
if (is.data.frame(ctrypopfull)==FALSE) { stop(paste(datemsg, " tot_pop_both")) }
ctrypopfull%>%group_by(country_code)%>%summarize(min(year), max(year))
#### (2) Get birthrate and number of births each year
cbr<-GetFilename(path, "cbr_both")
births <- GetFilename(path, "births")
if (is.character(cbr)==FALSE) { stop(mymsg) }
if (is.character(births)==FALSE) { stop(mymsg) }
dfbirth<-read.csv(cbr)
numbirth<-read.csv(births)
if (CheckDemogFileStructure(mycountry=mycountry, mydf=dfbirth, dfdesc="cbr_both")==FALSE) { stop (filemsg)}
if (CheckDemogFileStructure(mycountry=mycountry, mydf=numbirth, dfdesc="births")==FALSE) { stop (filemsg)}
ctrybirth<-dfbirth[dfbirth$country_code==mycountry, c("country_code", "year", "value")]
numbirth_ctry<-numbirth[numbirth$country_code==mycountry, c("country_code", "year", "value")]
ctrybirthfull<-checkVIMCdates(mydata=ctrybirth, startyr=year(start), endyr=year(end), threshold=fillThreshold)
numbirthfull<-checkVIMCdates(mydata=numbirth_ctry, startyr=year(start), endyr=year(end), threshold=fillThreshold)
if (is.data.frame(ctrybirthfull)==FALSE) { stop(paste(datemsg, " cbr_both")) }
if (is.data.frame(numbirthfull)==FALSE) { stop(paste(datemsg, " births")) }
ctrybirthfull%>%group_by(country_code)%>%summarize(min(year), max(year))
numbirthfull%>%group_by(country_code)%>%summarize(min(year), max(year))
build0<-merge(x=ctrypopfull, y=ctrybirthfull, by=c("country_code", "year"), all=TRUE)
colnames(build0)[colnames(build0)=="value.x"] <- "totalpop"
colnames(build0)[colnames(build0)=="value.y"] <- "birthrate"
build1 <- merge(x=build0, y=numbirthfull, by=c("country_code", "year"), all=TRUE)
colnames(build1)[colnames(build1)=="value"] <- "births"
#### (3) Calculate yearly probability of death by age group
dr <- GetFilename(path, "int_pop_both")
if (is.character(dr)==FALSE){ stop(mymsg) }
int.pop.all <- read.csv(dr)
if (CheckDemogFileStructure(mycountry=mycountry, mydf=int.pop.all, dfdesc="int_pop_both")==FALSE) {
stop (filemsg)
}
int.pop <- int.pop.all[int.pop.all$country_code==mycountry & int.pop.all$year>=1950,
c("country_code", "year", "age_from", "age_to", "value")]
# Error checking: should have 15251 rows (101 age groups x 151 years)
if(dim(int.pop)[1] != 15251){
stop("Incorrect number of entries in int_pop_both")
}
int.pop <- int.pop[order(int.pop$year, int.pop$age_from),]
# For each age group, calculate deaths each year
# First, ignore the age 100-120 (will set death rate to 1.0)
# and ignore year 2100 (since we don't have 2101 data for comparison)
int.pop$next_value <- ifelse(int.pop$year==2100, NA,
ifelse(int.pop$age_to==120, NA, int.pop$value[103:15251]))
int.pop$death.prob <- (int.pop$value - int.pop$next_value)/int.pop$value
# Set death.prob to 1.0 for age 100-120
int.pop$death.prob <- ifelse(int.pop$age_to==120, 1, int.pop$death.prob)
# For 2100, use death probabilities from 2099
int.pop$death.prob[int.pop$year==2100] <- int.pop$death.prob[int.pop$year==2099]
# Fix death.prob where value is zero (set death prob to zero)
int.pop$death.prob[int.pop$value==0] <- 0
# In a handful of cases (at the upper age ranges) population size increases from
# year to year. Rather than assume a negative death prob (which might break the
# simulation in an unpredictable way) set death prob to zero for these.
int.pop$death.prob[int.pop$death.prob < 0] <- 0
# For every 5-year age bucket (except infant mortality) assume midpoint is death prob
int.pop.small <- int.pop[int.pop$age_from %in% c(0, 2, seq(7, 82, by=5)),]
int.pop.small$name <- ifelse(int.pop.small$age_from==0, "imr",
paste("dr", int.pop.small$age_from-2, int.pop.small$age_from+2, sep=""))
int.pop.small$name <- ifelse(int.pop.small$name=="dr04", "dr14", int.pop.small$name)
build2a <- spread(int.pop.small[,c("year", "death.prob", "name")], name, death.prob)
build2a <- build2a[, c(1, 19, 3, 13, 2, 4:12, 14:18)]
build2 <- merge(x=build1, y=build2a, by="year", all=TRUE)
return(build2)
}
### (3) checkVIMCdates ########################################################
# Function to check dates. #
checkVIMCdates<-function(mydata, startyr, endyr, threshold=1) {
#assume data has variables country and year
#will fill in up to a threshold (default = 1 year) with values from nearest year
datemsg<<-""
datesum<-mydata%>%dplyr::group_by(country_code)%>%dplyr::summarize(minyear=min(year), maxyear=max(year))
if (datesum$minyear > startyr) {
prediff<-datesum$minyear-startyr
if (prediff <= threshold) {
#fix by filling
prefill<-mydata[mydata$year==datesum$minyear,]
for (i in 1:prediff) {
prefill$year<-datesum$minyear-i
mydata<-rbind(mydata, prefill)
}
} else {
# over threshold message
datemsg<<- paste0("There is a ", prediff, "-year gap in the demographic data compared to your simulation begin date. You can increase the threshold parameter to fill in data from the closest year.")
return(FALSE)
}
}
if (datesum$maxyear < endyr) {
if (datesum$minyear-startyr <= threshold) {
postdiff<-endyr-datesum$maxyear
if (postdiff <= threshold) {
postfill<-mydata[mydata$year==datesum$maxyear,]
for (i in 1:postdiff) {
postfill$year<-datesum$maxyear+i
mydata<-rbind(mydata, postfill)
}
} else {
# over threshold message
datemsg<<-paste0("There is a ", postdiff, "-year gap in the demographic data compared to your simulation end date. You can increase the threshold parameter to fill in data from the closest year.")
return(FALSE)
}
}
}
return(mydata)
}
### (4) GetPopAgeDist #########################################################
# This function returns a vector with 7 values, one for each 5-year age #
# band up to 30, calculated from quinquennial file, for the year closest to #
# specified start of simulation. Added names=age band 11/15/18 #
# ASSUMES FILENAME CONTAINS "qq_pop_both" #
# Called by InitializePopulation.R #
#_____________________________________________________________________________#
GetPopAgeDist<-function(path, mycountry, start) {
disterr<<-""
setwd(path)
qqfile<-GetFilename(path, "qq_pop_both")
if (is.character(qqfile)==FALSE) { stop(mymsg) }
qqdf<-read.csv(qqfile)
if (CheckDemogFileStructure(mycountry=mycountry, mydf=qqdf, dfdesc="qq_pop_both")==FALSE) { stop (filemsg)}
#record for every 5th year - want the closest to start or before start?
mround <- function(x,base){
base*round(x/base)
}
popyr<-mround(year(start), 5)
qqkeep<-qqdf[qqdf$country_code==mycountry & qqdf$year==popyr,]
if (nrow(qqkeep) > 0) {
if (!(DemogNumVarExists("age_from", qqkeep) & DemogNumVarExists("age_to", qqkeep))) {
disterr<<- "Non-numeric data in age band variables"
return(FALSE)
}
if (!(DemogNumVarExists("age_to", qqkeep))) { return(FALSE) }
#check min and max age band - now they all go up to 120 but lets be generous and say 90
if (min(qqkeep$age_from > 0 || max(qqkeep$age_to) < 90) == FALSE) {
# Chloe edit 3/22/19: Removing this line so as to not lump all 30+ groups together and replacing with next one.
# qqkeep$ageband<-ifelse(qqkeep$age_from < 30, paste0("Age_", qqkeep$age_from, "_",qqkeep$age_to), "Age_30")
qqkeep$ageband<-paste0("Age_", qqkeep$age_from, "_",qqkeep$age_to)
bands<-qqkeep%>%group_by(country_code, ageband)%>%summarize(tot=sum(value),minage=min(age_from))
totpop<-qqkeep%>%group_by(country_code)%>%summarize(totpop=sum(value))
if (totpop[,2] > 0) {
numsall<-merge(x=bands, y=totpop, by="country_code")
numsall$fraction<-numsall$tot/numsall$totpop
agedist <- numsall[order(numsall$minage),]
dist<-agedist$fraction
names(dist)<-agedist$ageband
return(dist)
} else {
disterr<<-"Population value is zero. Please check qqpop file."
return(FALSE)
}
} else {
disterr<<-"Incomplete age bands for this country and year"
return(FALSE)
}
} else {
disterr<<-"No age distribution input found for this country and year"
return(FALSE)
}
}
### (5) GetVaccScenario #######################################################
# This function returns a dataset with a row for each year of simulation, #
# with DosesCampaign, CoverRoutine, and AgeLimCampaign #
# NOTE: ASSUMES FILENAMES CONTAIN: "mena-routine" and "mena-campaign" #
# NOTE: NOT CURRENTLY LIMITED TO YEARS OF SIM, could use GetVIMCdates? #
# 12/6/18 copying destring from taRifx to deal with "<NA>" in vacc files #
# 11Dec2019: Added sub-scenario option to select between more options #
#_____________________________________________________________________________#
destring <- function(x,keep="0-9.-") {
return( as.numeric(gsub(paste("[^",keep,"]+",sep=""),"",x)) )
}
GetVaccScenario<-function(mycountry, scenario, sub.scenario, directory) { #sub.scenario allows for selection between bestcase and default vaccination scenario files
vaccmsg<<-""
setwd(directory)
if (scenario=="none") sub.scenario <- "NA" #can't have a sub-scenario when there's no vaccinations
if (scenario=="routine" | scenario=="both") {
filename<-GetFilename(directory, "mena-routine", sub.scenario)
}
if (scenario=="campaign") {
filename<-GetFilename(directory, "mena-campaign", sub.scenario)
}
if (is.character(filename)==FALSE) { stop(mymsg) }
dfvacc<-read.csv(filename, stringsAsFactors = FALSE)
if (IsCountryAndColAvailable(country_code=mycountry,mydf=dfvacc, forVacc=1)==FALSE) { stop(countrymsg) }
#target and year validated above. Do we need AgeLimCampaign? No its not used.
if (scenario=="routine" || scenario=="both") {
if (!(DemogNumVarExists("coverage", dfvacc))) {
vaccmsg<<-"coverage variable missing from vaccination file"
return(FALSE)
}
}
ctryvacc<-dfvacc[dfvacc$country_code==mycountry, c("country_code", "year", "activity_type", "target" , "coverage")]
colnames(ctryvacc)[colnames(ctryvacc)=="coverage"] <-"CoverRoutine"
##target has "<NA>" where activity type = routine, hosing conversion
#still getting coercion warning
#getting this even though not strictly required by routine option
ctryvacc$DosesCampaign<-destring(ctryvacc$target)
newdf<-subset(ctryvacc, select=-c(target))
}
### (6) GetDiseaseStateDist ###################################################
# Function GetDiseaseStateDist, called by InitializePopulation.R #
# Reads dist_both.csv, which is supplied with scripts; format should not vary #
#_____________________________________________________________________________#
GetDiseaseStateDist<-function(directory, region) {
setwd(directory)
dxfile<-GetFilename(directory, "dist_both.csv")
if (is.character(dxfile)==FALSE) {
stop(mymsg)
print("File [dist_both.csv] is packaged with the R scripts and should be in the same directory.")
}
dist<-read.csv(dxfile, stringsAsFactors = TRUE)
distcol<-ifelse(region=='hyper', 4, 3)
statefract<-as.vector(dist[,distcol]) # fraction of each disease state in each of 7 population groups
return(statefract)
}
### (7) GetWAIFWmatrix #################################################################
# Get WAIFWmatrix: constructs expanded WAIFW matrix from supplied parameter data frame;#
#______________________________________________________________________________________#
# Chloe edit 3/29: need to expand further to account for higher ages part of sim now.
# Chloe edit 5/5: left all these new additions to the GetWAIFmatrix() function below.
# EJ 10/23: simplify the WAIFW construction, base it on the parameter input to OneSim
# instead of external csv. Expand in one step instead of two. Implement five age group
# WAIFW version.
# MJ 1/29/2020: Critical correction, the bd parameters were assigned incorrectly
GetWAIFWmatrix<-function(params) {
Dwaifw <- matrix(0, nrow=5, ncol=5)
Dwaifw[1,] <- rep(params$bd1, 5)
Dwaifw[2,] <- rep(params$bd2, 5)
Dwaifw[3,] <- c(rep(params$bd3, 2), params$bd6, rep(params$bd3, 2))
Dwaifw[4,] <- c(rep(params$bd4, 3), params$bd7, rep(params$bd4, 1))
Dwaifw[5,] <- rep(params$bd5, 5)
Rwaifw <- Dwaifw * params$br
Rwaifw.expanded <- Rwaifw[rep(seq_len(nrow(Rwaifw)), times=c(60, 60, 60, 60, 1201)),] #Replicates the first 4 rows 60 times, the last row 1201 times. One row per month, so ages 0-4, 5-9, 10-14, 15-19, 20-120
Dwaifw.expanded <- Dwaifw[rep(seq_len(nrow(Dwaifw)), times=c(60, 60, 60, 60, 1201)),]
wboth <- array(data=NA, dim=c(1441, 5, 2))
wboth[,,1] <- Rwaifw.expanded
wboth[,,2] <- Dwaifw.expanded
dimnames(wboth)[[3]]<-c("rainy", "dry")
return(wboth)
}
### (8) GetModelParams ########################################################
# GetModelParams: Reads posterior_parameters.csv, which is supplied with #
# scripts. Subset to hyper or non-hyper region. #
# Goal is to prevent hard-coding of parameters in model #
#_____________________________________________________________________________#
GetModelParams<-function(path=scripts.dir, region.val,
scaling=c(1, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75)) {
# Inputs:
# path - character scaler with directory where parameter file exists
# region.val - character scaler indicating region type (hyper vs not)
# scaling - numeric vector to scale hyper bd1-bd7 down to not-hyper
if (!(region.val %in% c("hyper", "not_hyper"))){
mymsg <- "region must be hyper or not_hyper"
stop(mymsg)
}
setwd(path)
param.file <-GetFilename(path, "posterior_parameters.csv")
if (is.character(param.file)==FALSE) {
stop(mymsg)
print("File [posterior_parameters.csv] is packaged with the R scripts and should be in the same directory.")
}
params <- read.csv(param.file, stringsAsFactors = FALSE) #data frame
if (region.val=="not_hyper"){
params$bd1 <- params$bd1 * scaling[1]
params$bd2 <- params$bd2 * scaling[2]
params$bd3 <- params$bd3 * scaling[3]
params$bd4 <- params$bd4 * scaling[4]
params$bd5 <- params$bd5 * scaling[5]
params$bd6 <- params$bd6 * scaling[6]
params$bd7 <- params$bd7 * scaling[7]
}
return(params)
}
### (9) GetLifeExp ############################################################
# GetLifeExp: reads the life_ex_both file and creates a data.frame of life #
# expectancy by age and year. #
#_____________________________________________________________________________#
GetLifeExp<-function(path, mycountry.s=mycountry) {
# Inputs:
# path - character scalar with directory where life expectancy data exist
# mycountry.s - character scalar with code for current country
# Output:
# data.frame with life expectancy by age and year
setwd(path)
lifex <- GetFilename(path, "life_ex_both")
if (is.character(lifex)==FALSE) {stop(mymsg)}
lifex.df <- read.csv(lifex)
if (CheckDemogFileStructure(mycountry=mycountry.s, mydf=lifex.df, dfdesc="life_ex_both")==FALSE) { stop (filemsg)}
lifex.df <- lifex.df[lifex.df$country_code==mycountry.s,
c("age_from", "age_to", "year", "value")]
# Expand to individual ages instead of age groups
# First expand for the "missing" ages (e.g. 2-4 from the 1-4 group)
# Then add the "present" ages (values of age_from)
lifex.df$count <- lifex.df$age_to - lifex.df$age_from
le.df <- data.frame(year=rep(lifex.df$year, times=lifex.df$count),
value=rep(lifex.df$value, times=lifex.df$count),
age_strt=rep(lifex.df$age_from, times=lifex.df$count),
counter=sequence(lifex.df$count))
le.df$age_from <- le.df$age_strt + le.df$counter
le.df <- rbind(lifex.df[, c("year", "value", "age_from")],
le.df[, c("year", "value", "age_from")])
le.df <- le.df[order(le.df$year, le.df$age_from),]
# Expand to every year instead of every 5 years
results <- data.frame(year=rep(le.df$year, times=5),
value=rep(le.df$value, times=5),
age_from=rep(le.df$age_from, times=5),
counter=c(rep(0, times=length(le.df$year)),
rep(1, times=length(le.df$year)),
rep(2, times=length(le.df$year)),
rep(3, times=length(le.df$year)),
rep(4, times=length(le.df$year))))
results$year <- results$year + results$counter
results <- results[order(results$year, results$age_from),
c("year", "age_from", "value")]
names(results) <- c("year", "AgeInYears", "Life.Ex")
# Duplicate 2099 to create 2100
res2 <- results[results$year==2099,]
res2$year <- 2100
results <- rbind(results, res2)
return(results)
}
### (10) Get proportion to model ##############################################
# For many countries, the target population for vaccination is only a portion #
# of the full country. This function imports data on the percent of the pop #
# to use in the model.
GetModelPct <- function(path=input.dir, mycountry.s=mycountry){
# Inputs
# path - character scalar with directory where modeled percent data exist
# mycountry.s - character scalar with code for current country
# Outputs: scaler containing proportion of population to model
popmod.df <- read.csv(paste(path, "percent_pop_modeled.csv", sep="/"),
stringsAsFactors = FALSE)
return(popmod.df$pct_pop_modeled[popmod.df$country_code==mycountry.s])
}
### (11) Combine central estimate files #######################################
# For a defined vaccination program and sub-program, read in output files #
# from all countries, format per VIMC specifications, and write the combined #
# file to the output directory. #
combineOutputFiles <- function(path=output.dir, vacc_program="none",
vacc_subprogram="default",
deliv.path="G:/CTRHS/Modeling_Infections/GAVI MenA predictions/Deliverables/Deliverables 2019"
){
# Inputs
# path - character scalar indicating location of output files
# vacc_program - character scalar for program, as "none", "campaign", "both"
# vacc_subprogram - character scalar for sub-program, as "default" or "bestcase
# deliv.path - character scalar for deliverables folder for compiled results
# Output: writes a .csv file
# (A) Check inputs
if (!dir.exists(path)) {
stop(paste(path, "is not a valid directory", sep=" "))
}
if (!(vacc_program %in% c("none", "campaign", "routine", "both"))){
stop("vacc_progam must be one of 'none', 'campaign', 'routine', or 'both'")
}
if (!(vacc_subprogram %in% c("default", "bestcase"))){
stop("vacc_subprogram must be one of 'default', 'bestcase'")
}
if (!dir.exists(deliv.path)){
stop(paste(path, "is not a valid directory", sep=" "))
}
# (B) Set up some needed data.frames and vectors
names.df <- data.frame(
country_code=c("BDI", "BEN", "BFA", "CAF", "CIV", "CMR", "COD", "ERI", "ETH", "GHA", "GIN", "GMB",
"GNB", "KEN", "MLI", "MRT", "NER", "NGA", "RWA", "SDN", "SEN", "SSD", "TCD", "TGO",
"TZA", "UGA"),
country=c("Burundi", "Benin", "Burkina Faso", "Central African Republic",
"Cote d'Ivoire", "Cameroon", "Congo, the Democratic Republic of the",
"Eritrea", "Ethiopia", "Ghana", "Guinea", "Gambia", "Guinea-Bissau", "Kenya",
"Mali", "Mauritania", "Niger", "Nigeria", "Rwanda", "Sudan", "Senegal",
"South Sudan", "Chad", "Togo", "Tanzania, United Republic of", "Uganda" ))
oldnames=c("AgeInYears", "Cases", "Deaths", "DALYs", "cohortsize")
newnames=c("age", "cases", "deaths", "dalys", "cohort_size")
output.df <- data.frame(disease=character(0), year=numeric(0), age=numeric(0),
country=character(0), country_name=character(0),
cohort_size=numeric(0), cases=numeric(0),
dalys=numeric(0), deaths=numeric(0), stringsAsFactors = FALSE)
# (C) Get list of all relevant output files and cycle through each
# Read the file in, verify size, rename variables, and bind rows to output
files.v <- list.files(path, pattern=paste(vacc_program, vacc_subprogram, sep="_"))
files.v <- files.v[grep("^(?!PSA)", files.v, perl=TRUE)]
for (f in 1:length(files.v)){
country_code <- substr(files.v[f], 1, 3)
result.df <- read.csv(paste(output.dir, files.v[f], sep="/"))
size <- length(result.df$year)
if (size != 7171){
stop(paste("Input file is", size, "records and should be 7171", sep=" "))
}
result.df$disease <- rep("MenA", times=size)
result.df$country <- rep(country_code, times=size)
result.df$country_name <- as.character(rep(names.df$country[names.df$country_code==country_code], times=size))
result.df <- result.df %>%
rename_at(vars(oldnames), ~newnames)
output.df <- bind_rows(output.df, result.df[, names(output.df)])
}
if(vacc_program=="none"){
filename <- paste(deliv.path, "/mena-no-vaccination-201910gavi-3.MenA_KPW-Jackson.csv", sep="")
} else {
filename <- paste(deliv.path, "/mena-", vacc_program, "-", vacc_subprogram,
"-201910gavi-3.MenA_KPW-Jackson.csv", sep="")
}
# (D) Output to the appropriate directory, with error-handling
outval <- tryCatch({
write.csv(x=output.df, file=filename, row.names=FALSE)
}, warning=function(cond){
message("Trying to output gave a warning:")
message(cond)
return(NA)
}, error=function(cond){
message("Trying to output gave an error:")
message(cond)
return(NA)
}
)
if (is.null(outval)){print(paste("Output written to:", filename, sep=" "))}
}
| /ModelInputUtilities.R | no_license | ThatFluGuy/MenA_R_Programming | R | false | false | 29,198 | r | #### Program information ######################################################
# Package: MenA_VaccSims #
# Source file name: ModelInputUtilities.R #
# Contact: chris.c.stewart@kp.org, michael.l.jackson@kp.org #
# Version Date 12/17/19 #
#_______________________________________ _____________________________________#
# Input datasets: specify folder containing downloads from #
# https://montagu.vaccineimpact.org/ #
#_____________________________________________________________________________#
# Functions in this program: #
# (1) GetMontaguDemogData #
# (2) GetDemographicParameters #
# (3) checkVIMCdates #
# (4) GetPopAgeDist #
# (5) GetVaccScenario #
# (6) GetDiseaseStateDist #
# (7) GetWAIFWmatrix #
# (8) GetModelParams #
# (9) GetLifeEx #
#_____________________________________________________________________________#
# Parameters: #
# Start and end dates of simulation - to be specified in calling program #
# 3-letter country code #
# directory containing downloaded file #
# threshold - number of years to fill in if data is missing, defaults to 1 #
# (Note: this is because although total pop goes to 2100, cbr, cdr, imr may #
# end at 2099- I just copy the nearest year's value) #
#_____________________________________________________________________________#
# Purpose: return a vector or dataframe of parameters for: #
# -distribution for initializing population #
# -demographics: birth, death, infant mortality rate for simulation #
# -vaccination scenarios #
#_____________________________________________________________________________#
### (1) Download data from Montagu API ########################################
# Not currently used. #
# PURPOSE: GET DATA FROM API at "montagu.vaccineimpact.org" #
# This function does not work from KPWA network but tested from my laptop #
# 11/16/18, "downloads the 5 data files we need by reading into dataframe #
# then writing csv to the directory specified. Reconstructs near-real #
# filenames(compared to manual download),will be recognized by downstream fxn #
# requires R package "montagu", install with: #
# install.packages("drat") # if needed #
# drat:::add("vimc") #
# install.packages("montagu") #
#_____________________________________________________________________________#
# Chloe 7/12: note that death rates by age (TDB) and numbers of births will need to be added to this function (or something like it) to be used
# in the future; existence of that data in input folder currently assumed in functions below.
GetMontaguDemogData<-function( username=NULL, password=NULL, touchstone="201710gavi-5", destpath=NULL) {
svr<-montagu_server(name="production", hostname="montagu.vaccineimpact.org", username=username, password=password)
montagu_server_global_default_set(svr)
tchlist<-montagu_touchstones_list(svr)
if (touchstone %in% as.vector(tchlist$id)) {
dlist<-montagu_demographics_list(touchstone_id = touchstone)
demogidlist<-as.vector(dlist$id)
needed_data<-c("cbr", "cdr", "unwpp_imr", "qq_pop", "tot_pop")
for (i in 1:length(needed_data)) {
if (needed_data[i] %in% demogidlist) {
dat<-montagu::montagu_demographic_data(type=needed_data[i], touchstone_id=tch)
datrow<-dlist[dlist$id==needed_data[i],]
#filename like: 201710gavi-5_dds-201710_unwpp_imr_both.csv
# [touchstone id + source + demog id + _both]
#but it doesn't matter, my function lookst for demog id in name
if (length(destpath)>0) {
filename<-paste0(datadir, touchstone, datrow$source, datrow$id, "_both.csv")
write.csv(dat, filename)
print(filename)
}
}
}
}
else {print("Touchstone not found. Touchstone list:")
print(tchlist)
}
}
### (2) GetDemographicParameters ##############################################
# This function returns a dataset with a row for each year of simulation, #
# with total pop, death rate, birth rate, and infant mortality rate #
# ASSUMES FILENAMES CONTAIN: "tot_pop_both", "cdr_both", "cbr_both","imr_both"#
# Chloe 7/12/19: now assume death rates and numbers of deaths from files #
# called "p_dying_both" and "births" respectively. #
# Mike 12/10/19: Re-write so that death probabilities are estimated from #
# annual age-specific UN interpolated population sizes, rather than probs #
# from Montagu. For the 2019 model year, keep the current approach of using #
# average death probs in 5-year age buckets, to keep things consistent with #
# the MenA_OneSim.R program. Future work may move this to single-year buckets.#
#_____________________________________________________________________________#
GetDemographicParameters<-function(path, mycountry, start, end, fillThreshold=1) {
setwd(path)
#### (1) Get total population size
totpop<-GetFilename(path, "tot_pop_both")
if (is.character(totpop)==FALSE) { stop(mymsg) }
dfpop<-read.csv(totpop)
if (CheckDemogFileStructure(mycountry=mycountry, mydf=dfpop, dfdesc="tot_pop_both")==FALSE) { stop (filemsg)}
ctrypop<-dfpop[dfpop$country_code==mycountry, c("country_code", "year", "value")]
ctrypopfull<-checkVIMCdates(mydata=ctrypop, startyr=year(start), endyr=year(end), threshold=fillThreshold)
if (is.data.frame(ctrypopfull)==FALSE) { stop(paste(datemsg, " tot_pop_both")) }
ctrypopfull%>%group_by(country_code)%>%summarize(min(year), max(year))
#### (2) Get birthrate and number of births each year
cbr<-GetFilename(path, "cbr_both")
births <- GetFilename(path, "births")
if (is.character(cbr)==FALSE) { stop(mymsg) }
if (is.character(births)==FALSE) { stop(mymsg) }
dfbirth<-read.csv(cbr)
numbirth<-read.csv(births)
if (CheckDemogFileStructure(mycountry=mycountry, mydf=dfbirth, dfdesc="cbr_both")==FALSE) { stop (filemsg)}
if (CheckDemogFileStructure(mycountry=mycountry, mydf=numbirth, dfdesc="births")==FALSE) { stop (filemsg)}
ctrybirth<-dfbirth[dfbirth$country_code==mycountry, c("country_code", "year", "value")]
numbirth_ctry<-numbirth[numbirth$country_code==mycountry, c("country_code", "year", "value")]
ctrybirthfull<-checkVIMCdates(mydata=ctrybirth, startyr=year(start), endyr=year(end), threshold=fillThreshold)
numbirthfull<-checkVIMCdates(mydata=numbirth_ctry, startyr=year(start), endyr=year(end), threshold=fillThreshold)
if (is.data.frame(ctrybirthfull)==FALSE) { stop(paste(datemsg, " cbr_both")) }
if (is.data.frame(numbirthfull)==FALSE) { stop(paste(datemsg, " births")) }
ctrybirthfull%>%group_by(country_code)%>%summarize(min(year), max(year))
numbirthfull%>%group_by(country_code)%>%summarize(min(year), max(year))
build0<-merge(x=ctrypopfull, y=ctrybirthfull, by=c("country_code", "year"), all=TRUE)
colnames(build0)[colnames(build0)=="value.x"] <- "totalpop"
colnames(build0)[colnames(build0)=="value.y"] <- "birthrate"
build1 <- merge(x=build0, y=numbirthfull, by=c("country_code", "year"), all=TRUE)
colnames(build1)[colnames(build1)=="value"] <- "births"
#### (3) Calculate yearly probability of death by age group
dr <- GetFilename(path, "int_pop_both")
if (is.character(dr)==FALSE){ stop(mymsg) }
int.pop.all <- read.csv(dr)
if (CheckDemogFileStructure(mycountry=mycountry, mydf=int.pop.all, dfdesc="int_pop_both")==FALSE) {
stop (filemsg)
}
int.pop <- int.pop.all[int.pop.all$country_code==mycountry & int.pop.all$year>=1950,
c("country_code", "year", "age_from", "age_to", "value")]
# Error checking: should have 15251 rows (101 age groups x 151 years)
if(dim(int.pop)[1] != 15251){
stop("Incorrect number of entries in int_pop_both")
}
int.pop <- int.pop[order(int.pop$year, int.pop$age_from),]
# For each age group, calculate deaths each year
# First, ignore the age 100-120 (will set death rate to 1.0)
# and ignore year 2100 (since we don't have 2101 data for comparison)
int.pop$next_value <- ifelse(int.pop$year==2100, NA,
ifelse(int.pop$age_to==120, NA, int.pop$value[103:15251]))
int.pop$death.prob <- (int.pop$value - int.pop$next_value)/int.pop$value
# Set death.prob to 1.0 for age 100-120
int.pop$death.prob <- ifelse(int.pop$age_to==120, 1, int.pop$death.prob)
# For 2100, use death probabilities from 2099
int.pop$death.prob[int.pop$year==2100] <- int.pop$death.prob[int.pop$year==2099]
# Fix death.prob where value is zero (set death prob to zero)
int.pop$death.prob[int.pop$value==0] <- 0
# In a handful of cases (at the upper age ranges) population size increases from
# year to year. Rather than assume a negative death prob (which might break the
# simulation in an unpredictable way) set death prob to zero for these.
int.pop$death.prob[int.pop$death.prob < 0] <- 0
# For every 5-year age bucket (except infant mortality) assume midpoint is death prob
int.pop.small <- int.pop[int.pop$age_from %in% c(0, 2, seq(7, 82, by=5)),]
int.pop.small$name <- ifelse(int.pop.small$age_from==0, "imr",
paste("dr", int.pop.small$age_from-2, int.pop.small$age_from+2, sep=""))
int.pop.small$name <- ifelse(int.pop.small$name=="dr04", "dr14", int.pop.small$name)
build2a <- spread(int.pop.small[,c("year", "death.prob", "name")], name, death.prob)
build2a <- build2a[, c(1, 19, 3, 13, 2, 4:12, 14:18)]
build2 <- merge(x=build1, y=build2a, by="year", all=TRUE)
return(build2)
}
### (3) checkVIMCdates ########################################################
# Function to check dates. #
checkVIMCdates<-function(mydata, startyr, endyr, threshold=1) {
#assume data has variables country and year
#will fill in up to a threshold (default = 1 year) with values from nearest year
datemsg<<-""
datesum<-mydata%>%dplyr::group_by(country_code)%>%dplyr::summarize(minyear=min(year), maxyear=max(year))
if (datesum$minyear > startyr) {
prediff<-datesum$minyear-startyr
if (prediff <= threshold) {
#fix by filling
prefill<-mydata[mydata$year==datesum$minyear,]
for (i in 1:prediff) {
prefill$year<-datesum$minyear-i
mydata<-rbind(mydata, prefill)
}
} else {
# over threshold message
datemsg<<- paste0("There is a ", prediff, "-year gap in the demographic data compared to your simulation begin date. You can increase the threshold parameter to fill in data from the closest year.")
return(FALSE)
}
}
if (datesum$maxyear < endyr) {
if (datesum$minyear-startyr <= threshold) {
postdiff<-endyr-datesum$maxyear
if (postdiff <= threshold) {
postfill<-mydata[mydata$year==datesum$maxyear,]
for (i in 1:postdiff) {
postfill$year<-datesum$maxyear+i
mydata<-rbind(mydata, postfill)
}
} else {
# over threshold message
datemsg<<-paste0("There is a ", postdiff, "-year gap in the demographic data compared to your simulation end date. You can increase the threshold parameter to fill in data from the closest year.")
return(FALSE)
}
}
}
return(mydata)
}
### (4) GetPopAgeDist #########################################################
# This function returns a vector with 7 values, one for each 5-year age #
# band up to 30, calculated from quinquennial file, for the year closest to #
# specified start of simulation. Added names=age band 11/15/18 #
# ASSUMES FILENAME CONTAINS "qq_pop_both" #
# Called by InitializePopulation.R #
#_____________________________________________________________________________#
GetPopAgeDist<-function(path, mycountry, start) {
disterr<<-""
setwd(path)
qqfile<-GetFilename(path, "qq_pop_both")
if (is.character(qqfile)==FALSE) { stop(mymsg) }
qqdf<-read.csv(qqfile)
if (CheckDemogFileStructure(mycountry=mycountry, mydf=qqdf, dfdesc="qq_pop_both")==FALSE) { stop (filemsg)}
#record for every 5th year - want the closest to start or before start?
mround <- function(x,base){
base*round(x/base)
}
popyr<-mround(year(start), 5)
qqkeep<-qqdf[qqdf$country_code==mycountry & qqdf$year==popyr,]
if (nrow(qqkeep) > 0) {
if (!(DemogNumVarExists("age_from", qqkeep) & DemogNumVarExists("age_to", qqkeep))) {
disterr<<- "Non-numeric data in age band variables"
return(FALSE)
}
if (!(DemogNumVarExists("age_to", qqkeep))) { return(FALSE) }
#check min and max age band - now they all go up to 120 but lets be generous and say 90
if (min(qqkeep$age_from > 0 || max(qqkeep$age_to) < 90) == FALSE) {
# Chloe edit 3/22/19: Removing this line so as to not lump all 30+ groups together and replacing with next one.
# qqkeep$ageband<-ifelse(qqkeep$age_from < 30, paste0("Age_", qqkeep$age_from, "_",qqkeep$age_to), "Age_30")
qqkeep$ageband<-paste0("Age_", qqkeep$age_from, "_",qqkeep$age_to)
bands<-qqkeep%>%group_by(country_code, ageband)%>%summarize(tot=sum(value),minage=min(age_from))
totpop<-qqkeep%>%group_by(country_code)%>%summarize(totpop=sum(value))
if (totpop[,2] > 0) {
numsall<-merge(x=bands, y=totpop, by="country_code")
numsall$fraction<-numsall$tot/numsall$totpop
agedist <- numsall[order(numsall$minage),]
dist<-agedist$fraction
names(dist)<-agedist$ageband
return(dist)
} else {
disterr<<-"Population value is zero. Please check qqpop file."
return(FALSE)
}
} else {
disterr<<-"Incomplete age bands for this country and year"
return(FALSE)
}
} else {
disterr<<-"No age distribution input found for this country and year"
return(FALSE)
}
}
### (5) GetVaccScenario #######################################################
# This function returns a dataset with a row for each year of simulation, #
# with DosesCampaign, CoverRoutine, and AgeLimCampaign #
# NOTE: ASSUMES FILENAMES CONTAIN: "mena-routine" and "mena-campaign" #
# NOTE: NOT CURRENTLY LIMITED TO YEARS OF SIM, could use GetVIMCdates? #
# 12/6/18 copying destring from taRifx to deal with "<NA>" in vacc files #
# 11Dec2019: Added sub-scenario option to select between more options #
#_____________________________________________________________________________#
destring <- function(x,keep="0-9.-") {
return( as.numeric(gsub(paste("[^",keep,"]+",sep=""),"",x)) )
}
GetVaccScenario<-function(mycountry, scenario, sub.scenario, directory) { #sub.scenario allows for selection between bestcase and default vaccination scenario files
vaccmsg<<-""
setwd(directory)
if (scenario=="none") sub.scenario <- "NA" #can't have a sub-scenario when there's no vaccinations
if (scenario=="routine" | scenario=="both") {
filename<-GetFilename(directory, "mena-routine", sub.scenario)
}
if (scenario=="campaign") {
filename<-GetFilename(directory, "mena-campaign", sub.scenario)
}
if (is.character(filename)==FALSE) { stop(mymsg) }
dfvacc<-read.csv(filename, stringsAsFactors = FALSE)
if (IsCountryAndColAvailable(country_code=mycountry,mydf=dfvacc, forVacc=1)==FALSE) { stop(countrymsg) }
#target and year validated above. Do we need AgeLimCampaign? No its not used.
if (scenario=="routine" || scenario=="both") {
if (!(DemogNumVarExists("coverage", dfvacc))) {
vaccmsg<<-"coverage variable missing from vaccination file"
return(FALSE)
}
}
ctryvacc<-dfvacc[dfvacc$country_code==mycountry, c("country_code", "year", "activity_type", "target" , "coverage")]
colnames(ctryvacc)[colnames(ctryvacc)=="coverage"] <-"CoverRoutine"
##target has "<NA>" where activity type = routine, hosing conversion
#still getting coercion warning
#getting this even though not strictly required by routine option
ctryvacc$DosesCampaign<-destring(ctryvacc$target)
newdf<-subset(ctryvacc, select=-c(target))
}
### (6) GetDiseaseStateDist ###################################################
# Function GetDiseaseStateDist, called by InitializePopulation.R #
# Reads dist_both.csv, which is supplied with scripts; format should not vary #
#_____________________________________________________________________________#
GetDiseaseStateDist<-function(directory, region) {
setwd(directory)
dxfile<-GetFilename(directory, "dist_both.csv")
if (is.character(dxfile)==FALSE) {
stop(mymsg)
print("File [dist_both.csv] is packaged with the R scripts and should be in the same directory.")
}
dist<-read.csv(dxfile, stringsAsFactors = TRUE)
distcol<-ifelse(region=='hyper', 4, 3)
statefract<-as.vector(dist[,distcol]) # fraction of each disease state in each of 7 population groups
return(statefract)
}
### (7) GetWAIFWmatrix #################################################################
# Get WAIFWmatrix: constructs expanded WAIFW matrix from supplied parameter data frame;#
#______________________________________________________________________________________#
# Chloe edit 3/29: need to expand further to account for higher ages part of sim now.
# Chloe edit 5/5: left all these new additions to the GetWAIFmatrix() function below.
# EJ 10/23: simplify the WAIFW construction, base it on the parameter input to OneSim
# instead of external csv. Expand in one step instead of two. Implement five age group
# WAIFW version.
# MJ 1/29/2020: Critical correction, the bd parameters were assigned incorrectly
GetWAIFWmatrix<-function(params) {
Dwaifw <- matrix(0, nrow=5, ncol=5)
Dwaifw[1,] <- rep(params$bd1, 5)
Dwaifw[2,] <- rep(params$bd2, 5)
Dwaifw[3,] <- c(rep(params$bd3, 2), params$bd6, rep(params$bd3, 2))
Dwaifw[4,] <- c(rep(params$bd4, 3), params$bd7, rep(params$bd4, 1))
Dwaifw[5,] <- rep(params$bd5, 5)
Rwaifw <- Dwaifw * params$br
Rwaifw.expanded <- Rwaifw[rep(seq_len(nrow(Rwaifw)), times=c(60, 60, 60, 60, 1201)),] #Replicates the first 4 rows 60 times, the last row 1201 times. One row per month, so ages 0-4, 5-9, 10-14, 15-19, 20-120
Dwaifw.expanded <- Dwaifw[rep(seq_len(nrow(Dwaifw)), times=c(60, 60, 60, 60, 1201)),]
wboth <- array(data=NA, dim=c(1441, 5, 2))
wboth[,,1] <- Rwaifw.expanded
wboth[,,2] <- Dwaifw.expanded
dimnames(wboth)[[3]]<-c("rainy", "dry")
return(wboth)
}
### (8) GetModelParams ########################################################
# GetModelParams: Reads posterior_parameters.csv, which is supplied with #
# scripts. Subset to hyper or non-hyper region. #
# Goal is to prevent hard-coding of parameters in model #
#_____________________________________________________________________________#
GetModelParams<-function(path=scripts.dir, region.val,
scaling=c(1, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75)) {
# Inputs:
# path - character scaler with directory where parameter file exists
# region.val - character scaler indicating region type (hyper vs not)
# scaling - numeric vector to scale hyper bd1-bd7 down to not-hyper
if (!(region.val %in% c("hyper", "not_hyper"))){
mymsg <- "region must be hyper or not_hyper"
stop(mymsg)
}
setwd(path)
param.file <-GetFilename(path, "posterior_parameters.csv")
if (is.character(param.file)==FALSE) {
stop(mymsg)
print("File [posterior_parameters.csv] is packaged with the R scripts and should be in the same directory.")
}
params <- read.csv(param.file, stringsAsFactors = FALSE) #data frame
if (region.val=="not_hyper"){
params$bd1 <- params$bd1 * scaling[1]
params$bd2 <- params$bd2 * scaling[2]
params$bd3 <- params$bd3 * scaling[3]
params$bd4 <- params$bd4 * scaling[4]
params$bd5 <- params$bd5 * scaling[5]
params$bd6 <- params$bd6 * scaling[6]
params$bd7 <- params$bd7 * scaling[7]
}
return(params)
}
### (9) GetLifeExp ############################################################
# GetLifeExp: reads the life_ex_both file and creates a data.frame of life #
# expectancy by age and year. #
#_____________________________________________________________________________#
GetLifeExp<-function(path, mycountry.s=mycountry) {
# Inputs:
# path - character scalar with directory where life expectancy data exist
# mycountry.s - character scalar with code for current country
# Output:
# data.frame with life expectancy by age and year
setwd(path)
lifex <- GetFilename(path, "life_ex_both")
if (is.character(lifex)==FALSE) {stop(mymsg)}
lifex.df <- read.csv(lifex)
if (CheckDemogFileStructure(mycountry=mycountry.s, mydf=lifex.df, dfdesc="life_ex_both")==FALSE) { stop (filemsg)}
lifex.df <- lifex.df[lifex.df$country_code==mycountry.s,
c("age_from", "age_to", "year", "value")]
# Expand to individual ages instead of age groups
# First expand for the "missing" ages (e.g. 2-4 from the 1-4 group)
# Then add the "present" ages (values of age_from)
lifex.df$count <- lifex.df$age_to - lifex.df$age_from
le.df <- data.frame(year=rep(lifex.df$year, times=lifex.df$count),
value=rep(lifex.df$value, times=lifex.df$count),
age_strt=rep(lifex.df$age_from, times=lifex.df$count),
counter=sequence(lifex.df$count))
le.df$age_from <- le.df$age_strt + le.df$counter
le.df <- rbind(lifex.df[, c("year", "value", "age_from")],
le.df[, c("year", "value", "age_from")])
le.df <- le.df[order(le.df$year, le.df$age_from),]
# Expand to every year instead of every 5 years
results <- data.frame(year=rep(le.df$year, times=5),
value=rep(le.df$value, times=5),
age_from=rep(le.df$age_from, times=5),
counter=c(rep(0, times=length(le.df$year)),
rep(1, times=length(le.df$year)),
rep(2, times=length(le.df$year)),
rep(3, times=length(le.df$year)),
rep(4, times=length(le.df$year))))
results$year <- results$year + results$counter
results <- results[order(results$year, results$age_from),
c("year", "age_from", "value")]
names(results) <- c("year", "AgeInYears", "Life.Ex")
# Duplicate 2099 to create 2100
res2 <- results[results$year==2099,]
res2$year <- 2100
results <- rbind(results, res2)
return(results)
}
### (10) Get proportion to model ##############################################
# For many countries, the target population for vaccination is only a portion #
# of the full country. This function imports data on the percent of the pop #
# to use in the model.
GetModelPct <- function(path=input.dir, mycountry.s=mycountry){
# Inputs
# path - character scalar with directory where modeled percent data exist
# mycountry.s - character scalar with code for current country
# Outputs: scaler containing proportion of population to model
popmod.df <- read.csv(paste(path, "percent_pop_modeled.csv", sep="/"),
stringsAsFactors = FALSE)
return(popmod.df$pct_pop_modeled[popmod.df$country_code==mycountry.s])
}
### (11) Combine central estimate files #######################################
# For a defined vaccination program and sub-program, read in output files #
# from all countries, format per VIMC specifications, and write the combined #
# file to the output directory. #
combineOutputFiles <- function(path=output.dir, vacc_program="none",
vacc_subprogram="default",
deliv.path="G:/CTRHS/Modeling_Infections/GAVI MenA predictions/Deliverables/Deliverables 2019"
){
# Inputs
# path - character scalar indicating location of output files
# vacc_program - character scalar for program, as "none", "campaign", "both"
# vacc_subprogram - character scalar for sub-program, as "default" or "bestcase
# deliv.path - character scalar for deliverables folder for compiled results
# Output: writes a .csv file
# (A) Check inputs
if (!dir.exists(path)) {
stop(paste(path, "is not a valid directory", sep=" "))
}
if (!(vacc_program %in% c("none", "campaign", "routine", "both"))){
stop("vacc_progam must be one of 'none', 'campaign', 'routine', or 'both'")
}
if (!(vacc_subprogram %in% c("default", "bestcase"))){
stop("vacc_subprogram must be one of 'default', 'bestcase'")
}
if (!dir.exists(deliv.path)){
stop(paste(path, "is not a valid directory", sep=" "))
}
# (B) Set up some needed data.frames and vectors
names.df <- data.frame(
country_code=c("BDI", "BEN", "BFA", "CAF", "CIV", "CMR", "COD", "ERI", "ETH", "GHA", "GIN", "GMB",
"GNB", "KEN", "MLI", "MRT", "NER", "NGA", "RWA", "SDN", "SEN", "SSD", "TCD", "TGO",
"TZA", "UGA"),
country=c("Burundi", "Benin", "Burkina Faso", "Central African Republic",
"Cote d'Ivoire", "Cameroon", "Congo, the Democratic Republic of the",
"Eritrea", "Ethiopia", "Ghana", "Guinea", "Gambia", "Guinea-Bissau", "Kenya",
"Mali", "Mauritania", "Niger", "Nigeria", "Rwanda", "Sudan", "Senegal",
"South Sudan", "Chad", "Togo", "Tanzania, United Republic of", "Uganda" ))
oldnames=c("AgeInYears", "Cases", "Deaths", "DALYs", "cohortsize")
newnames=c("age", "cases", "deaths", "dalys", "cohort_size")
output.df <- data.frame(disease=character(0), year=numeric(0), age=numeric(0),
country=character(0), country_name=character(0),
cohort_size=numeric(0), cases=numeric(0),
dalys=numeric(0), deaths=numeric(0), stringsAsFactors = FALSE)
# (C) Get list of all relevant output files and cycle through each
# Read the file in, verify size, rename variables, and bind rows to output
files.v <- list.files(path, pattern=paste(vacc_program, vacc_subprogram, sep="_"))
files.v <- files.v[grep("^(?!PSA)", files.v, perl=TRUE)]
for (f in 1:length(files.v)){
country_code <- substr(files.v[f], 1, 3)
result.df <- read.csv(paste(output.dir, files.v[f], sep="/"))
size <- length(result.df$year)
if (size != 7171){
stop(paste("Input file is", size, "records and should be 7171", sep=" "))
}
result.df$disease <- rep("MenA", times=size)
result.df$country <- rep(country_code, times=size)
result.df$country_name <- as.character(rep(names.df$country[names.df$country_code==country_code], times=size))
result.df <- result.df %>%
rename_at(vars(oldnames), ~newnames)
output.df <- bind_rows(output.df, result.df[, names(output.df)])
}
if(vacc_program=="none"){
filename <- paste(deliv.path, "/mena-no-vaccination-201910gavi-3.MenA_KPW-Jackson.csv", sep="")
} else {
filename <- paste(deliv.path, "/mena-", vacc_program, "-", vacc_subprogram,
"-201910gavi-3.MenA_KPW-Jackson.csv", sep="")
}
# (D) Output to the appropriate directory, with error-handling
outval <- tryCatch({
write.csv(x=output.df, file=filename, row.names=FALSE)
}, warning=function(cond){
message("Trying to output gave a warning:")
message(cond)
return(NA)
}, error=function(cond){
message("Trying to output gave an error:")
message(cond)
return(NA)
}
)
if (is.null(outval)){print(paste("Output written to:", filename, sep=" "))}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/browse_elements.R
\name{browse_elements}
\alias{browse_elements}
\title{Browse elements for description}
\usage{
browse_elements(pattern)
}
\arguments{
\item{pattern}{A case-insensitive perl expression or expressions to match in the long name of \code{\link{heims_data_dict}}.}
}
\value{
A \code{data.table} of all element-long name combinations matching the perl regular expression.
}
\description{
Browse elements for description
}
\examples{
browse_elements(c("ProViDer", "Maj"))
}
| /man/browse_elements.Rd | no_license | cran/heims | R | false | true | 583 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/browse_elements.R
\name{browse_elements}
\alias{browse_elements}
\title{Browse elements for description}
\usage{
browse_elements(pattern)
}
\arguments{
\item{pattern}{A case-insensitive perl expression or expressions to match in the long name of \code{\link{heims_data_dict}}.}
}
\value{
A \code{data.table} of all element-long name combinations matching the perl regular expression.
}
\description{
Browse elements for description
}
\examples{
browse_elements(c("ProViDer", "Maj"))
}
|
myTestRule {
#Input parameters are:
# Collection path name
# Destination resource name for replicas
# Option string containing
# all -
# irodsAdmin - for administrator initiated replication
# backupMode - will not throw an error if a good copy
# already exists
#Output parameter is:
# Status
# Output from running the example is:
# Replicate collection /tempZone/home/rods/sub1 to location destRescName=testResc
# Put a file in the collection
msiDataObjPut(*Path,*Resource,"localPath=*LocalFile++++forceFlag=",*Status);
msiSplitPath(*Path, *Coll, *File);
#Replicate the collection
msiReplColl(*Coll,*Dest,*Flag,*Status);
writeLine("stdout","Replicate collection *Coll to location *Dest");
}
INPUT *Path="/tempZone/home/rods/sub1/foo1",*Resource="demoResc", *Dest="testResc", *LocalFile="foo1", *Flag="backupMode"
OUTPUT ruleExecOut
| /irods-3.3.1-cyverse/iRODS/clients/icommands/test/rules3.0/rulemsiReplColl.r | no_license | bogaotory/irods-cyverse | R | false | false | 883 | r | myTestRule {
#Input parameters are:
# Collection path name
# Destination resource name for replicas
# Option string containing
# all -
# irodsAdmin - for administrator initiated replication
# backupMode - will not throw an error if a good copy
# already exists
#Output parameter is:
# Status
# Output from running the example is:
# Replicate collection /tempZone/home/rods/sub1 to location destRescName=testResc
# Put a file in the collection
msiDataObjPut(*Path,*Resource,"localPath=*LocalFile++++forceFlag=",*Status);
msiSplitPath(*Path, *Coll, *File);
#Replicate the collection
msiReplColl(*Coll,*Dest,*Flag,*Status);
writeLine("stdout","Replicate collection *Coll to location *Dest");
}
INPUT *Path="/tempZone/home/rods/sub1/foo1",*Resource="demoResc", *Dest="testResc", *LocalFile="foo1", *Flag="backupMode"
OUTPUT ruleExecOut
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/case_id.r
\name{case_id}
\alias{case_id}
\title{Case classifier}
\usage{
case_id(eventlog)
}
\arguments{
\item{eventlog}{An object of class \code{eventlog}.}
}
\description{
Get the case classifier of an object of class \code{eventlog}
}
\seealso{
\code{\link{eventlog}}, \code{\link{activity_id}},
\code{\link{lifecycle_id}}, \code{\link{activity_instance_id}}
}
| /man/case_id.Rd | no_license | BijsT/bupaR | R | false | true | 460 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/case_id.r
\name{case_id}
\alias{case_id}
\title{Case classifier}
\usage{
case_id(eventlog)
}
\arguments{
\item{eventlog}{An object of class \code{eventlog}.}
}
\description{
Get the case classifier of an object of class \code{eventlog}
}
\seealso{
\code{\link{eventlog}}, \code{\link{activity_id}},
\code{\link{lifecycle_id}}, \code{\link{activity_instance_id}}
}
|
library(data.table)
library(vioplot)
library(aplpack)
library(datasets)
library(lattice)
library(ggplot2)
library(nlme)
library(lattice)
library(RColorBrewer)
library(grid)
library(gridExtra)
library(plyr)
plot1 <- function(){
par(mar=c(4, 4, 4, 4))
par(oma=c(2, 2, 2, 2))
#NEI <- readRDS("summarySCC_PM25.rds")
#SCC <- readRDS("Source_Classification_Code.rds")
dataset <-tapply(NEI$Emissions/1000, NEI$year, sum)
barplot(dataset,xlab="Year", ylab=expression('PM'[2.5]*' in 1000'),
main="US Emission from All Sources",col=c("darkblue","red", "gold", "blue"))
dev.copy(png, file="plot1.png")
dev.off()
}
plot2 <- function(){
par(mar=c(4, 4, 4, 4))
par(oma=c(2, 2, 2, 2))
#NEI <- readRDS("summarySCC_PM25.rds")
#SCC <- readRDS("Source_Classification_Code.rds")
NEI.baltimore <- NEI[which(NEI$fips=='24510'),]
dataset <-tapply(NEI.baltimore$Emissions, NEI.baltimore$year, sum)
barplot(dataset,xlab="Year", ylab=expression('PM'[2.5]),
main="Baltimore Emission from All Sources",col=c("darkblue","red", "gold", "blue"))
#dev.copy(png, file="plot2.png")
#dev.off()
}
plot3 <- function(){
par(mar=c(4, 4, 4, 4))
par(oma=c(2, 2, 2, 2))
#NEI <- readRDS("summarySCC_PM25.rds")
#SCC <- readRDS("Source_Classification_Code.rds")
NEI.baltimore <- NEI[which(NEI$fips=='24510'),]
NEI.baltimore$year = as.character(NEI.baltimore$year)
qp<- qplot(year, Emissions, data=NEI.baltimore, stat="summary", fun.y="sum",facets= .~type, geom="bar", fill=year)
qp + ggtitle("Baltimore Emission from all sources by types")
#dev.copy(png, file="plot3.png")
#dev.off()
}
plot4 <- function(){
par(mar=c(4, 4, 4, 4))
par(oma=c(2, 2, 2, 2))
par(mfrow=c(2,1))
#NEI <- readRDS("summarySCC_PM25.rds")
#SCC <- readRDS("Source_Classification_Code.rds")
sccIndex_v <-grep('*Coal',SCC$EI.Sector)
scc.coal <- SCC[sccIndex_v,]
NEI.us.coal <- NEI[which(NEI$SCC %in% scc.coal$SCC),]
NEI.us.coal$year = as.character(NEI.us.coal$year)
mergedData = merge(NEI.us.coal, SCC, by="SCC")
mergedData$year = as.character(mergedData$year)
#dataset <-tapply( NEI.us.coal$Emissions/1000, NEI.us.coal$year, sum)
#plot1<-barplot(dataset,xlab="Year", ylab=expression('PM'[2.5]),
# main="US Emission from coal combustion",col=c("darkblue","red", "gold", "blue"))
qp<- qplot(year, Emissions/1000, data=mergedData, stat="summary", fun.y="sum",facets= .~EI.Sector, geom="bar", fill=year)
qp + ggtitle("US Emission from coal combustion by Combustion Types(EI.Sector)")
#dev.copy(png, file="plot4.png")
#dev.off()
}
plot5 <- function(){
par(mar=c(4, 4, 4, 4))
par(oma=c(2, 2, 2, 2))
#NEI <- readRDS("summarySCC_PM25.rds")
#SCC <- readRDS("Source_Classification_Code.rds")
sccIndex_v <-grep("^Mobile.*On-Road.*(Diesel|Gasoline)",SCC$EI.Sector)
scc.motor <- SCC[sccIndex_v,]
NEI.baltimore <- NEI[which(NEI$fips=='24510'),]
NEI.baltimore.motor <- NEI.baltimore[which(NEI.baltimore$SCC %in% scc.motor$SCC),]
mergedData = merge(NEI.baltimore.motor, SCC, by="SCC")
mergedData$year = as.character(mergedData$year)
qp<- qplot(year, Emissions, data=mergedData, stat="summary", fun.y="sum",facets= .~EI.Sector, geom="bar", fill=year)
qp + ggtitle("Baltimore Emission from motor by Mobile Types(EI.Sector)")
#dataset <-tapply( NEI.baltimore.motor$Emissions, NEI.baltimore.motor$year, sum)
#barplot(dataset,xlab="Year", ylab=expression('PM'[2.5]),
# main="Emission from Baltimore Motors",col=c("darkblue","red", "gold", "blue"))
dev.copy(png, file="plot5.png")
dev.off()
}
stat_sum_single <- function(fun, geom="point", ...) {
stat_summary(fun.y=fun, colour="red", geom=geom, size = 3, ...)
}
stat_sum_df <- function(fun, geom="crossbar", ...) {
stat_summary(fun.data=fun, colour="red", geom=geom, width=0.2, ...)
}
plot6 <- function(){
par(mar=c(4, 4, 4, 4))
par(oma=c(2, 2, 2, 2))
#NEI <- readRDS("summarySCC_PM25.rds")
#SCC <- readRDS("Source_Classification_Code.rds")
sccIndex_v <-grep("*Vehicles*", SCC$SCC.Level.Two)
scc.motor <- SCC[sccIndex_v,]
#NEI.baltimore <- NEI[which(NEI$fips=='24510'),]
#NEI.LA <- NEI[which(NEI$fips=='06037'),]
#NEI.baltimore.motor <- NEI.baltimore[which(NEI.baltimore$SCC %in% scc.motor$SCC),]
#NEI.LA.motor <- NEI.LA[which(NEI.LA$SCC %in% scc.motor$SCC),]
#array.baltimore <-tapply( NEI.baltimore.motor$Emissions, NEI.baltimore.motor$year, sum)
#array.LA <-tapply( NEI.LA.motor$Emissions, NEI.LA.motor$year, sum)
#xnames<-names(array.baltimore)
#df.baltimore <- data.frame(xnames, as.vector(array.baltimore))
#df.LA <- data.frame(xnames, as.vector(array.LA))
#df<-data.frame(xnames, as.vector(array.LA),as.vector(array.baltimore))
NEI.two <-NEI[which(NEI$fips=='06037' | NEI$fips=='24510'),]
NEI.two$year = as.character(NEI.two$year)
#c3[,1][which(c3[,1]<12)] <- -1
NEI.two[,1][ which(NEI.two$fips=='06037')] <- 'LA'
NEI.two[,1][ which(NEI.two$fips=='24510')] <- 'Baltimore'
qp<- qplot(year, Emissions, data=NEI.two, stat="summary", fun.y="sum",facets= .~fips, geom="bar", fill=year)
qp + ggtitle("LA vs Baltimore Emission")+stat_sum_df("normalize")
#dev.copy(png, file="plot6.png")
#dev.off()
#x<-xnames
#y1<-df[,2]
#y2<-df[,3]
#barplot(counts,xlab="Year", ylab=expression('PM'[2.5]),
# main="Emission from Baltimore Motors",col=c("darkblue","red", "gold", "blue"))
#plot(x,y1,
# type='n', ylab=expression('PM'[2.5]), xlab="Year", ylim=range(200,7500)
# ,main="Emission from LA/Baltimore Motors for year 1999, 2002, 2005, and 2008", col="blue")
#lines(x,y2, type='l', col="blue")
#lines(x,y1, type='l', col="red")
}
plot_v2<-function(){
x <- seq(-2, 2, 0.05)
y1 <- pnorm(x)
y2 <- pnorm(x,1,1)
plot(x,y1,type="n")
lines(x,y2,col="green")
lines(x,y1,col="red")
}
plot_v0 <-function()
{
x1 <- NEI$Emission[NEI$year==1999]
x2 <- NEI$Emission[NEI$year==2002]
x3 <- NEI$Emission[NEI$year==2005]
x4 <- NEI$Emission[NEI$year==2008]
vioplot(x1, x2, x3,x4 ,
names=c("1999", "2002", "2005", "2008"), col=c("red","blue","yellow", "pink"))
title("US Emission Violin plot")
}
plot_v <-function()
{
NEI.baltimore <- NEI[which(NEI$fips==24510),]
x1 <- NEI.baltimore$Emission[NEI.baltimore$year==1999]
x2 <- NEI.baltimore$Emission[NEI.baltimore$year==2002]
x3 <- NEI.baltimore$Emission[NEI.baltimore$year==2005]
x4 <- NEI.baltimore$Emission[NEI.baltimore$year==2008]
vioplot(x1, x2, x3,x4 ,
names=c("1999", "2002", "2005", "2008"), col=c("red","blue","yellow", "pink"))
title("Baltimore Emission Violin plot")
}
plot_v1 <- function()
{
x1 <- mtcars$mpg[mtcars$cyl==4]
x2 <- mtcars$mpg[mtcars$cyl==6]
x3 <- mtcars$mpg[mtcars$cyl==8]
vioplot(x1, x2, x3,
names=c("4 cyl", "6 cyl", "8 cyl"), col=c("red","blue","yellow"))
title("Violin Plots of Miles Per Gallon")
} | /plort_1.R | no_license | kennethchung/HopkinsDataScience | R | false | false | 6,960 | r | library(data.table)
library(vioplot)
library(aplpack)
library(datasets)
library(lattice)
library(ggplot2)
library(nlme)
library(lattice)
library(RColorBrewer)
library(grid)
library(gridExtra)
library(plyr)
plot1 <- function(){
par(mar=c(4, 4, 4, 4))
par(oma=c(2, 2, 2, 2))
#NEI <- readRDS("summarySCC_PM25.rds")
#SCC <- readRDS("Source_Classification_Code.rds")
dataset <-tapply(NEI$Emissions/1000, NEI$year, sum)
barplot(dataset,xlab="Year", ylab=expression('PM'[2.5]*' in 1000'),
main="US Emission from All Sources",col=c("darkblue","red", "gold", "blue"))
dev.copy(png, file="plot1.png")
dev.off()
}
plot2 <- function(){
par(mar=c(4, 4, 4, 4))
par(oma=c(2, 2, 2, 2))
#NEI <- readRDS("summarySCC_PM25.rds")
#SCC <- readRDS("Source_Classification_Code.rds")
NEI.baltimore <- NEI[which(NEI$fips=='24510'),]
dataset <-tapply(NEI.baltimore$Emissions, NEI.baltimore$year, sum)
barplot(dataset,xlab="Year", ylab=expression('PM'[2.5]),
main="Baltimore Emission from All Sources",col=c("darkblue","red", "gold", "blue"))
#dev.copy(png, file="plot2.png")
#dev.off()
}
plot3 <- function(){
par(mar=c(4, 4, 4, 4))
par(oma=c(2, 2, 2, 2))
#NEI <- readRDS("summarySCC_PM25.rds")
#SCC <- readRDS("Source_Classification_Code.rds")
NEI.baltimore <- NEI[which(NEI$fips=='24510'),]
NEI.baltimore$year = as.character(NEI.baltimore$year)
qp<- qplot(year, Emissions, data=NEI.baltimore, stat="summary", fun.y="sum",facets= .~type, geom="bar", fill=year)
qp + ggtitle("Baltimore Emission from all sources by types")
#dev.copy(png, file="plot3.png")
#dev.off()
}
plot4 <- function(){
par(mar=c(4, 4, 4, 4))
par(oma=c(2, 2, 2, 2))
par(mfrow=c(2,1))
#NEI <- readRDS("summarySCC_PM25.rds")
#SCC <- readRDS("Source_Classification_Code.rds")
sccIndex_v <-grep('*Coal',SCC$EI.Sector)
scc.coal <- SCC[sccIndex_v,]
NEI.us.coal <- NEI[which(NEI$SCC %in% scc.coal$SCC),]
NEI.us.coal$year = as.character(NEI.us.coal$year)
mergedData = merge(NEI.us.coal, SCC, by="SCC")
mergedData$year = as.character(mergedData$year)
#dataset <-tapply( NEI.us.coal$Emissions/1000, NEI.us.coal$year, sum)
#plot1<-barplot(dataset,xlab="Year", ylab=expression('PM'[2.5]),
# main="US Emission from coal combustion",col=c("darkblue","red", "gold", "blue"))
qp<- qplot(year, Emissions/1000, data=mergedData, stat="summary", fun.y="sum",facets= .~EI.Sector, geom="bar", fill=year)
qp + ggtitle("US Emission from coal combustion by Combustion Types(EI.Sector)")
#dev.copy(png, file="plot4.png")
#dev.off()
}
plot5 <- function(){
par(mar=c(4, 4, 4, 4))
par(oma=c(2, 2, 2, 2))
#NEI <- readRDS("summarySCC_PM25.rds")
#SCC <- readRDS("Source_Classification_Code.rds")
sccIndex_v <-grep("^Mobile.*On-Road.*(Diesel|Gasoline)",SCC$EI.Sector)
scc.motor <- SCC[sccIndex_v,]
NEI.baltimore <- NEI[which(NEI$fips=='24510'),]
NEI.baltimore.motor <- NEI.baltimore[which(NEI.baltimore$SCC %in% scc.motor$SCC),]
mergedData = merge(NEI.baltimore.motor, SCC, by="SCC")
mergedData$year = as.character(mergedData$year)
qp<- qplot(year, Emissions, data=mergedData, stat="summary", fun.y="sum",facets= .~EI.Sector, geom="bar", fill=year)
qp + ggtitle("Baltimore Emission from motor by Mobile Types(EI.Sector)")
#dataset <-tapply( NEI.baltimore.motor$Emissions, NEI.baltimore.motor$year, sum)
#barplot(dataset,xlab="Year", ylab=expression('PM'[2.5]),
# main="Emission from Baltimore Motors",col=c("darkblue","red", "gold", "blue"))
dev.copy(png, file="plot5.png")
dev.off()
}
stat_sum_single <- function(fun, geom="point", ...) {
stat_summary(fun.y=fun, colour="red", geom=geom, size = 3, ...)
}
stat_sum_df <- function(fun, geom="crossbar", ...) {
stat_summary(fun.data=fun, colour="red", geom=geom, width=0.2, ...)
}
plot6 <- function(){
par(mar=c(4, 4, 4, 4))
par(oma=c(2, 2, 2, 2))
#NEI <- readRDS("summarySCC_PM25.rds")
#SCC <- readRDS("Source_Classification_Code.rds")
sccIndex_v <-grep("*Vehicles*", SCC$SCC.Level.Two)
scc.motor <- SCC[sccIndex_v,]
#NEI.baltimore <- NEI[which(NEI$fips=='24510'),]
#NEI.LA <- NEI[which(NEI$fips=='06037'),]
#NEI.baltimore.motor <- NEI.baltimore[which(NEI.baltimore$SCC %in% scc.motor$SCC),]
#NEI.LA.motor <- NEI.LA[which(NEI.LA$SCC %in% scc.motor$SCC),]
#array.baltimore <-tapply( NEI.baltimore.motor$Emissions, NEI.baltimore.motor$year, sum)
#array.LA <-tapply( NEI.LA.motor$Emissions, NEI.LA.motor$year, sum)
#xnames<-names(array.baltimore)
#df.baltimore <- data.frame(xnames, as.vector(array.baltimore))
#df.LA <- data.frame(xnames, as.vector(array.LA))
#df<-data.frame(xnames, as.vector(array.LA),as.vector(array.baltimore))
NEI.two <-NEI[which(NEI$fips=='06037' | NEI$fips=='24510'),]
NEI.two$year = as.character(NEI.two$year)
#c3[,1][which(c3[,1]<12)] <- -1
NEI.two[,1][ which(NEI.two$fips=='06037')] <- 'LA'
NEI.two[,1][ which(NEI.two$fips=='24510')] <- 'Baltimore'
qp<- qplot(year, Emissions, data=NEI.two, stat="summary", fun.y="sum",facets= .~fips, geom="bar", fill=year)
qp + ggtitle("LA vs Baltimore Emission")+stat_sum_df("normalize")
#dev.copy(png, file="plot6.png")
#dev.off()
#x<-xnames
#y1<-df[,2]
#y2<-df[,3]
#barplot(counts,xlab="Year", ylab=expression('PM'[2.5]),
# main="Emission from Baltimore Motors",col=c("darkblue","red", "gold", "blue"))
#plot(x,y1,
# type='n', ylab=expression('PM'[2.5]), xlab="Year", ylim=range(200,7500)
# ,main="Emission from LA/Baltimore Motors for year 1999, 2002, 2005, and 2008", col="blue")
#lines(x,y2, type='l', col="blue")
#lines(x,y1, type='l', col="red")
}
plot_v2<-function(){
x <- seq(-2, 2, 0.05)
y1 <- pnorm(x)
y2 <- pnorm(x,1,1)
plot(x,y1,type="n")
lines(x,y2,col="green")
lines(x,y1,col="red")
}
plot_v0 <-function()
{
x1 <- NEI$Emission[NEI$year==1999]
x2 <- NEI$Emission[NEI$year==2002]
x3 <- NEI$Emission[NEI$year==2005]
x4 <- NEI$Emission[NEI$year==2008]
vioplot(x1, x2, x3,x4 ,
names=c("1999", "2002", "2005", "2008"), col=c("red","blue","yellow", "pink"))
title("US Emission Violin plot")
}
plot_v <-function()
{
NEI.baltimore <- NEI[which(NEI$fips==24510),]
x1 <- NEI.baltimore$Emission[NEI.baltimore$year==1999]
x2 <- NEI.baltimore$Emission[NEI.baltimore$year==2002]
x3 <- NEI.baltimore$Emission[NEI.baltimore$year==2005]
x4 <- NEI.baltimore$Emission[NEI.baltimore$year==2008]
vioplot(x1, x2, x3,x4 ,
names=c("1999", "2002", "2005", "2008"), col=c("red","blue","yellow", "pink"))
title("Baltimore Emission Violin plot")
}
plot_v1 <- function()
{
x1 <- mtcars$mpg[mtcars$cyl==4]
x2 <- mtcars$mpg[mtcars$cyl==6]
x3 <- mtcars$mpg[mtcars$cyl==8]
vioplot(x1, x2, x3,
names=c("4 cyl", "6 cyl", "8 cyl"), col=c("red","blue","yellow"))
title("Violin Plots of Miles Per Gallon")
} |
# Check if works w/ M/F
# redo names?
#' Plots a dot plot
#'
#' @examples
#' # generate random data
#' df = data.frame(year = c(rep(2007, 6), rep(2016, 6)), value = sample(1:100, 12), region = rep(letters[1:6], 2), facet = rep(c('group1', 'group2'), 6))
#'
#' plot_dot_diff(df, group_var = 'year', region_var = 'region', value_var = 'value')
#' plot_dot_diff(df, group_var = 'year', region_var = 'region', value_var = 'value', include_arrows = FALSE)
#' plot_dot_diff(df, group_var = 'year', region_var = 'region', value_var = 'value', sort_by = 'first', fill_value = FALSE, value_label_offset = 0.25, sort_desc = FALSE)
#' plot_dot_diff(df, group_var = 'year', region_var = 'region', value_var = 'value', sort_by = 'first', fill_value = FALSE, value_label_offset = 0.25, sort_desc = FALSE)
#'
#' # example with categorical data
#' df2 = data.frame(group = c(rep('group1', 6), rep('group2', 6)), value = sample(1:100, 12), region = rep(letters[1:6], 2), facet = rep(c('group1', 'group2'), 6))
#'
#' plot_dot_diff(df2, group_var = 'group', region_var = 'region', value_var = 'value')
plot_dot_diff = function(df,
group_var = 'year',
region_var = 'region',
value_var = 'avg',
sort_desc = TRUE,
sort_by = 'diff', # one of: 'diff', 'first', 'last', 'none'
facet_var = NULL,
ncol = NULL,
nrow = NULL,
scales = 'fixed',
include_arrows = TRUE,
arrow_arg = arrow(length = unit(0.03, "npc")),
connector_length = 0.85, # fraction of total difference
dot_size = 6,
dot_shape = c(21, 23, 22, 24),
fill_value = TRUE,
dot_fill_discrete = c('#D3DEED', '#3288BD'), # first year, second year tuple
dot_fill_cont = brewer.pal(9, 'YlGnBu'),
connector_stroke = 0.25,
connector_colour = grey75K,
label_vals = TRUE,
label_size = 3,
label_colour = grey75K,
label_digits = 1,
percent_vals = FALSE,
value_label_offset = 0,
label_group = TRUE,
label_group_size = 4,
group_label_offset = 0.25,
horiz = TRUE,
file_name = NULL,
width = 10,
height = 6,
saveBoth = FALSE,
font_normal = 'Lato',
font_semi = 'Lato',
font_light = 'Lato Light',
panel_spacing = 1, # panel spacing, in lines
font_axis_label = 12,
font_axis_title = font_axis_label * 1.15,
font_facet = font_axis_label * 1.15,
font_legend_title = font_axis_label,
font_legend_label = font_axis_label * 0.8,
font_subtitle = font_axis_label * 1.2,
font_title = font_axis_label * 1.3,
legend.position = 'none',
legend.direction = 'horizontal',
grey_background = FALSE,
background_colour = grey10K,
projector = FALSE){
# -- Check inputs --
if(!is.list(arrow_arg)){
if(is.null(arrow)) {
warning('arrow should be either an arrow object or NULL. Switching to NULL')
arrow_arg = NULL
} else {
warning('Provide a valid arrow argument (see function "arrow")')
}
}
if(include_arrows == FALSE) {
# make sure the line goes all the way to the dot
connector_length = 1
arrow_arg = NULL
}
# Assumes data come in tidy form and pre-calculated averages.
# -- find latest year / group --
# for factors, set to the first value.
if(is.numeric(df[[group_var]])) {
min_time = min(df[[group_var]])
max_time = max(df[[group_var]])
} else {
min_time = as.character(unique(df[[group_var]])[1])
max_time = as.character(unique(df[[group_var]])[2])
}
# -- Spread wide for connector line / sorting --
df_untidy = if(is.null(facet_var)) {
df_untidy = df %>%
select_(group_var, region_var, value_var) %>%
spread_(group_var, value_var) %>%
rename_('time1' = as.name(min_time),
'time2' = as.name(max_time)) %>%
mutate(diff = (time2 - time1),
pct_diff = diff/time1)
} else {
df_untidy = df %>%
select_(group_var, region_var, value_var, facet_var) %>%
spread_(group_var, value_var) %>%
rename_('time1' = as.name(min_time),
'time2' = as.name(max_time)) %>%
mutate(diff = (time2 - time1),
pct_diff = diff/time1)
}
# -- refactor y-vars --
# decide how to order the var
if(sort_by != 'none') {
if(sort_by == 'last') {
facet_order = df %>%
filter_(paste0(group_var, '==', max_time))
sort_var = value_var
} else if (sort_by == 'first'){
facet_order = df %>%
filter_(paste0(group_var, '==', min_time))
sort_var = value_var
} else if(sort_by == 'diff'){
facet_order = df_untidy
sort_var = 'diff'
} else if(sort_by == 'pct_diff'){
facet_order = df_untidy
sort_var = 'pct_diff'
} else {
facet_order = df_untidy
sort_var = 'diff'
warning('sorting values by difference')
}
# sort ascending or descending
if(sort_desc == TRUE) {
facet_order = facet_order %>%
arrange_(sort_var)
} else{
facet_order = facet_order %>%
arrange_(paste0('desc(', sort_var, ')'))
}
# relevel
df[[region_var]] = factor(df[[region_var]],
levels = facet_order[[region_var]])
df_untidy[[region_var]] = factor(df_untidy[[region_var]],
levels = facet_order[[region_var]])
} else {
facet_order = df_untidy
}
# -- define the value of the top element --
top_region = facet_order %>% slice(n())
top_region = top_region[[region_var]]
# -- PLOT --
p = ggplot(df) +
# -- bar between dots --
geom_segment(aes_string(x = 'time1', xend = 'diff * connector_length + time1',
y = region_var, yend = region_var),
size = connector_stroke,
arrow = arrow_arg,
colour = connector_colour,
data = df_untidy) +
theme_xgrid(font_normal = font_normal, font_semi = font_semi,
font_light = font_light, legend.position = legend.position,
legend.direction = legend.direction, panel_spacing = panel_spacing,
font_axis_label = font_axis_label, font_axis_title = font_axis_title,
font_facet = font_facet, font_legend_title = font_legend_title,
font_legend_label = font_legend_label, font_subtitle = font_subtitle,
font_title = font_title, grey_background = grey_background,
background_colour = background_colour, projector = projector
) +
scale_shape_manual(values = dot_shape) +
theme(axis.title.x = element_blank(),
axis.text.y = element_text(family = font_normal, size = font_axis_label * 1.25))
# -- scale fill of points --
if(fill_value == TRUE){
p = p +
geom_point(aes_string(x = value_var, y = region_var,
shape = paste0('as.factor(', group_var, ')'),
fill = value_var),
size = dot_size, colour = grey90K) +
scale_fill_gradientn(colours = dot_fill_cont)
} else {
p = p +
geom_point(aes_string(x = value_var, y = region_var,
shape = paste0('as.factor(', group_var, ')'),
fill = paste0('as.factor(', group_var, ')')),
size = dot_size, colour = grey90K) +
scale_fill_manual(values = dot_fill_discrete)
}
# -- flip coords --
if(horiz == FALSE) {
p = p + coord_flip()
}
# -- group label --
if(label_group == TRUE) {
p = p +
geom_text(aes_string(x = value_var, y = region_var,
label = group_var),
family = font_light,
size = label_group_size,
nudge_y = group_label_offset,
data = df %>% filter_(paste0(region_var, '=="', top_region, '"')))
}
# -- value labels --
if (label_vals == TRUE) {
# -- calculate y-offset for labels, if needed --
if (is.null(value_label_offset)) {
if(is.null(facet_var)) {
y_offset = 0.05
} else {
y_offset = 0.25
}
# set a reasonable y-offset
value_label_offset = diff(range(df[[value_var]])) * y_offset
}
if(percent_vals == TRUE) {
df = df %>%
mutate_(.dots = setNames(paste0('llamar::percent(', value_var, ', 0)'), 'value_label'))
} else {
df = df %>%
mutate_(.dots = setNames(paste0('llamar::round_exact(', value_var, ',', label_digits, ')'), 'value_label'))
}
if(value_label_offset != 0) {
# text is above/below the dots
p = p +
geom_text(aes_string(x = value_var,
y = region_var,
label = 'value_label'),
size = label_size,
family = font_light,
nudge_y = value_label_offset,
colour = grey60K,
data = df)
} else if (fill_value == TRUE) {
# continuous variable
p = p +
geom_text(aes_string(x = value_var,
y = region_var,
label = 'value_label',
colour = value_var),
size = label_size,
family = font_light,
nudge_y = value_label_offset,
data = df) +
scale_colour_text(df[[value_var]])
} else {
# discrete variable
p = p +
geom_text(aes_string(x = value_var,
y = region_var,
label = 'value_label',
colour = paste0('as.factor(', group_var, ')')),
size = label_size,
family = font_light,
nudge_y = value_label_offset,
data = df) +
scale_colour_manual(values = c(grey90K, 'white'))
}
}
# -- facetting --
# + facet, single slope graph per facet
if(!is.null(facet_var)) {
p = p +
facet_wrap(as.formula(paste0('~', facet_var)),
ncol = ncol, nrow = nrow,
scales = scales)
}
# -- scales --
if(percent_vals == TRUE) {
p = p +
scale_x_continuous(labels = percent)
}
# -- save plot --
if(!is.null(file_name)) {
save_plot(file_name, saveBoth = saveBoth, width = width, height = height)
}
# -- return --
return(p)
} | /R/plot_dot_diff.R | permissive | flaneuse/llamar | R | false | false | 11,674 | r | # Check if works w/ M/F
# redo names?
#' Plots a dot plot
#'
#' @examples
#' # generate random data
#' df = data.frame(year = c(rep(2007, 6), rep(2016, 6)), value = sample(1:100, 12), region = rep(letters[1:6], 2), facet = rep(c('group1', 'group2'), 6))
#'
#' plot_dot_diff(df, group_var = 'year', region_var = 'region', value_var = 'value')
#' plot_dot_diff(df, group_var = 'year', region_var = 'region', value_var = 'value', include_arrows = FALSE)
#' plot_dot_diff(df, group_var = 'year', region_var = 'region', value_var = 'value', sort_by = 'first', fill_value = FALSE, value_label_offset = 0.25, sort_desc = FALSE)
#' plot_dot_diff(df, group_var = 'year', region_var = 'region', value_var = 'value', sort_by = 'first', fill_value = FALSE, value_label_offset = 0.25, sort_desc = FALSE)
#'
#' # example with categorical data
#' df2 = data.frame(group = c(rep('group1', 6), rep('group2', 6)), value = sample(1:100, 12), region = rep(letters[1:6], 2), facet = rep(c('group1', 'group2'), 6))
#'
#' plot_dot_diff(df2, group_var = 'group', region_var = 'region', value_var = 'value')
plot_dot_diff = function(df,
group_var = 'year',
region_var = 'region',
value_var = 'avg',
sort_desc = TRUE,
sort_by = 'diff', # one of: 'diff', 'first', 'last', 'none'
facet_var = NULL,
ncol = NULL,
nrow = NULL,
scales = 'fixed',
include_arrows = TRUE,
arrow_arg = arrow(length = unit(0.03, "npc")),
connector_length = 0.85, # fraction of total difference
dot_size = 6,
dot_shape = c(21, 23, 22, 24),
fill_value = TRUE,
dot_fill_discrete = c('#D3DEED', '#3288BD'), # first year, second year tuple
dot_fill_cont = brewer.pal(9, 'YlGnBu'),
connector_stroke = 0.25,
connector_colour = grey75K,
label_vals = TRUE,
label_size = 3,
label_colour = grey75K,
label_digits = 1,
percent_vals = FALSE,
value_label_offset = 0,
label_group = TRUE,
label_group_size = 4,
group_label_offset = 0.25,
horiz = TRUE,
file_name = NULL,
width = 10,
height = 6,
saveBoth = FALSE,
font_normal = 'Lato',
font_semi = 'Lato',
font_light = 'Lato Light',
panel_spacing = 1, # panel spacing, in lines
font_axis_label = 12,
font_axis_title = font_axis_label * 1.15,
font_facet = font_axis_label * 1.15,
font_legend_title = font_axis_label,
font_legend_label = font_axis_label * 0.8,
font_subtitle = font_axis_label * 1.2,
font_title = font_axis_label * 1.3,
legend.position = 'none',
legend.direction = 'horizontal',
grey_background = FALSE,
background_colour = grey10K,
projector = FALSE){
# -- Check inputs --
if(!is.list(arrow_arg)){
if(is.null(arrow)) {
warning('arrow should be either an arrow object or NULL. Switching to NULL')
arrow_arg = NULL
} else {
warning('Provide a valid arrow argument (see function "arrow")')
}
}
if(include_arrows == FALSE) {
# make sure the line goes all the way to the dot
connector_length = 1
arrow_arg = NULL
}
# Assumes data come in tidy form and pre-calculated averages.
# -- find latest year / group --
# for factors, set to the first value.
if(is.numeric(df[[group_var]])) {
min_time = min(df[[group_var]])
max_time = max(df[[group_var]])
} else {
min_time = as.character(unique(df[[group_var]])[1])
max_time = as.character(unique(df[[group_var]])[2])
}
# -- Spread wide for connector line / sorting --
df_untidy = if(is.null(facet_var)) {
df_untidy = df %>%
select_(group_var, region_var, value_var) %>%
spread_(group_var, value_var) %>%
rename_('time1' = as.name(min_time),
'time2' = as.name(max_time)) %>%
mutate(diff = (time2 - time1),
pct_diff = diff/time1)
} else {
df_untidy = df %>%
select_(group_var, region_var, value_var, facet_var) %>%
spread_(group_var, value_var) %>%
rename_('time1' = as.name(min_time),
'time2' = as.name(max_time)) %>%
mutate(diff = (time2 - time1),
pct_diff = diff/time1)
}
# -- refactor y-vars --
# decide how to order the var
if(sort_by != 'none') {
if(sort_by == 'last') {
facet_order = df %>%
filter_(paste0(group_var, '==', max_time))
sort_var = value_var
} else if (sort_by == 'first'){
facet_order = df %>%
filter_(paste0(group_var, '==', min_time))
sort_var = value_var
} else if(sort_by == 'diff'){
facet_order = df_untidy
sort_var = 'diff'
} else if(sort_by == 'pct_diff'){
facet_order = df_untidy
sort_var = 'pct_diff'
} else {
facet_order = df_untidy
sort_var = 'diff'
warning('sorting values by difference')
}
# sort ascending or descending
if(sort_desc == TRUE) {
facet_order = facet_order %>%
arrange_(sort_var)
} else{
facet_order = facet_order %>%
arrange_(paste0('desc(', sort_var, ')'))
}
# relevel
df[[region_var]] = factor(df[[region_var]],
levels = facet_order[[region_var]])
df_untidy[[region_var]] = factor(df_untidy[[region_var]],
levels = facet_order[[region_var]])
} else {
facet_order = df_untidy
}
# -- define the value of the top element --
top_region = facet_order %>% slice(n())
top_region = top_region[[region_var]]
# -- PLOT --
p = ggplot(df) +
# -- bar between dots --
geom_segment(aes_string(x = 'time1', xend = 'diff * connector_length + time1',
y = region_var, yend = region_var),
size = connector_stroke,
arrow = arrow_arg,
colour = connector_colour,
data = df_untidy) +
theme_xgrid(font_normal = font_normal, font_semi = font_semi,
font_light = font_light, legend.position = legend.position,
legend.direction = legend.direction, panel_spacing = panel_spacing,
font_axis_label = font_axis_label, font_axis_title = font_axis_title,
font_facet = font_facet, font_legend_title = font_legend_title,
font_legend_label = font_legend_label, font_subtitle = font_subtitle,
font_title = font_title, grey_background = grey_background,
background_colour = background_colour, projector = projector
) +
scale_shape_manual(values = dot_shape) +
theme(axis.title.x = element_blank(),
axis.text.y = element_text(family = font_normal, size = font_axis_label * 1.25))
# -- scale fill of points --
if(fill_value == TRUE){
p = p +
geom_point(aes_string(x = value_var, y = region_var,
shape = paste0('as.factor(', group_var, ')'),
fill = value_var),
size = dot_size, colour = grey90K) +
scale_fill_gradientn(colours = dot_fill_cont)
} else {
p = p +
geom_point(aes_string(x = value_var, y = region_var,
shape = paste0('as.factor(', group_var, ')'),
fill = paste0('as.factor(', group_var, ')')),
size = dot_size, colour = grey90K) +
scale_fill_manual(values = dot_fill_discrete)
}
# -- flip coords --
if(horiz == FALSE) {
p = p + coord_flip()
}
# -- group label --
if(label_group == TRUE) {
p = p +
geom_text(aes_string(x = value_var, y = region_var,
label = group_var),
family = font_light,
size = label_group_size,
nudge_y = group_label_offset,
data = df %>% filter_(paste0(region_var, '=="', top_region, '"')))
}
# -- value labels --
if (label_vals == TRUE) {
# -- calculate y-offset for labels, if needed --
if (is.null(value_label_offset)) {
if(is.null(facet_var)) {
y_offset = 0.05
} else {
y_offset = 0.25
}
# set a reasonable y-offset
value_label_offset = diff(range(df[[value_var]])) * y_offset
}
if(percent_vals == TRUE) {
df = df %>%
mutate_(.dots = setNames(paste0('llamar::percent(', value_var, ', 0)'), 'value_label'))
} else {
df = df %>%
mutate_(.dots = setNames(paste0('llamar::round_exact(', value_var, ',', label_digits, ')'), 'value_label'))
}
if(value_label_offset != 0) {
# text is above/below the dots
p = p +
geom_text(aes_string(x = value_var,
y = region_var,
label = 'value_label'),
size = label_size,
family = font_light,
nudge_y = value_label_offset,
colour = grey60K,
data = df)
} else if (fill_value == TRUE) {
# continuous variable
p = p +
geom_text(aes_string(x = value_var,
y = region_var,
label = 'value_label',
colour = value_var),
size = label_size,
family = font_light,
nudge_y = value_label_offset,
data = df) +
scale_colour_text(df[[value_var]])
} else {
# discrete variable
p = p +
geom_text(aes_string(x = value_var,
y = region_var,
label = 'value_label',
colour = paste0('as.factor(', group_var, ')')),
size = label_size,
family = font_light,
nudge_y = value_label_offset,
data = df) +
scale_colour_manual(values = c(grey90K, 'white'))
}
}
# -- facetting --
# + facet, single slope graph per facet
if(!is.null(facet_var)) {
p = p +
facet_wrap(as.formula(paste0('~', facet_var)),
ncol = ncol, nrow = nrow,
scales = scales)
}
# -- scales --
if(percent_vals == TRUE) {
p = p +
scale_x_continuous(labels = percent)
}
# -- save plot --
if(!is.null(file_name)) {
save_plot(file_name, saveBoth = saveBoth, width = width, height = height)
}
# -- return --
return(p)
} |
#' Implement a landing page
#'
#' @param navbar_title The title for the Navigation Bar
#' @param task_title The task title
#' @param description_text Task description text -- see
#' \code{\link{make_description}}
#' @param informed_consent_text Informed consent language -- see
#' \code{\link{make_informed_consent}}
#' @param task_url The URL for the task where a user will be sent upon agreeing
#' to participate.
#'
#' @return HTML, CSS, and JavaScript necessary to create a task's landing page.
#' @export
#'
#' @details
#'
#' If the `task_url` parameter is static, and every visitor to the landing page
#' will go to one URL, then it may be a simple URL character string. However, if
#' the visitors to the landing page may be subject to randomization (treatment
#' groups), you may wish to utilize the [shinyrandomize::random_link()]
#' function. This will allow you to define a data frame of group-link
#' (key-value) pairs which will get updated every time the Shiny app loads. This
#' is a fine solution. However, this requires the deployed app be dormant
#' intermittently so the cloud computer hosting the application will reload the
#' app and update the UI.
#'
#' #For most cases, I would recommend using \code{\link{autoRandomize}}, which is
#' placed in the server component of the application. The examples section below
#' highlights that best practice. For alternatives, see the documentation for
#' \code{\link{autoRandomize}}.
#'
#'
#' @examples
#'
#' # An example of a landing page that will update whenever a user visits the landing page.
#' # Note the use of the server here. This is the recommended practice to ensure randomization.
#'
#' if (interactive()) {
#'
#' library(shiny)
#' library(shinyland)
#'
#' # Create a data frame of group-link pairs.
#' group_link_pairs <- data.frame(
#' group = c("personal", "github"),
#' link = c("https://jdtrat.com/",
#' "https://github.com/jdtrat")
#' )
#'
#' ui <- fluidPage(
#' shinyland(
#' navbar_title = "{shinyland} presents",
#' task_title = "A demo task",
#' description_text = make_description(
#' rawHTML = TRUE,
#' '
#' <div class = "col-sm-3 box mx-4 p-4 text-center">
#' This is an example for creating a landing page.
#' This is where a description of the text would go.
#' </div>
#' '),
#' informed_consent_text = make_informed_consent(
#' rawHTML = FALSE,
#' tags$div("This is an example for creating a landing page.
#' This is where Informed Consent language would go.
#' In this case, clicking 'Yes, I agree to participate'
#' will redirect you to the website https://jdtrat.com."
#' )
#' ),
#' task_url = "https//website-does-not-matter-because-it-will-be-replaced.com"
#' )
#' )
#'
#' server <- function(input, output, session) {
#' autoRandomize(groupLinkPairs = group_link_pairs)
#' }
#'
#' shinyApp(ui, server)
#'
#' }
#'
#'
#'
shinyland <- function(navbar_title, task_title, description_text, informed_consent_text, task_url) {
shiny::tagList(
htmltools::htmlDependency(
name = "landingPage",
version = utils::packageVersion("shinyland"),
package = "shinyland",
src = "assets",
script = c("js/jquery-1.12.4.min.js", "js/bootstrap-4.6.0.min.js", "js/updateRandomLink.js"),
stylesheet = c("css/bootstrap-4.6.0.min.css")
),
make_landing(navbar_title = navbar_title,
task_title = task_title,
description_text = description_text,
informed_consent_text = informed_consent_text,
task_url = task_url)
)
}
| /R/landing.R | permissive | jdtrat/shinyland | R | false | false | 3,724 | r | #' Implement a landing page
#'
#' @param navbar_title The title for the Navigation Bar
#' @param task_title The task title
#' @param description_text Task description text -- see
#' \code{\link{make_description}}
#' @param informed_consent_text Informed consent language -- see
#' \code{\link{make_informed_consent}}
#' @param task_url The URL for the task where a user will be sent upon agreeing
#' to participate.
#'
#' @return HTML, CSS, and JavaScript necessary to create a task's landing page.
#' @export
#'
#' @details
#'
#' If the `task_url` parameter is static, and every visitor to the landing page
#' will go to one URL, then it may be a simple URL character string. However, if
#' the visitors to the landing page may be subject to randomization (treatment
#' groups), you may wish to utilize the [shinyrandomize::random_link()]
#' function. This will allow you to define a data frame of group-link
#' (key-value) pairs which will get updated every time the Shiny app loads. This
#' is a fine solution. However, this requires the deployed app be dormant
#' intermittently so the cloud computer hosting the application will reload the
#' app and update the UI.
#'
#' #For most cases, I would recommend using \code{\link{autoRandomize}}, which is
#' placed in the server component of the application. The examples section below
#' highlights that best practice. For alternatives, see the documentation for
#' \code{\link{autoRandomize}}.
#'
#'
#' @examples
#'
#' # An example of a landing page that will update whenever a user visits the landing page.
#' # Note the use of the server here. This is the recommended practice to ensure randomization.
#'
#' if (interactive()) {
#'
#' library(shiny)
#' library(shinyland)
#'
#' # Create a data frame of group-link pairs.
#' group_link_pairs <- data.frame(
#' group = c("personal", "github"),
#' link = c("https://jdtrat.com/",
#' "https://github.com/jdtrat")
#' )
#'
#' ui <- fluidPage(
#' shinyland(
#' navbar_title = "{shinyland} presents",
#' task_title = "A demo task",
#' description_text = make_description(
#' rawHTML = TRUE,
#' '
#' <div class = "col-sm-3 box mx-4 p-4 text-center">
#' This is an example for creating a landing page.
#' This is where a description of the text would go.
#' </div>
#' '),
#' informed_consent_text = make_informed_consent(
#' rawHTML = FALSE,
#' tags$div("This is an example for creating a landing page.
#' This is where Informed Consent language would go.
#' In this case, clicking 'Yes, I agree to participate'
#' will redirect you to the website https://jdtrat.com."
#' )
#' ),
#' task_url = "https//website-does-not-matter-because-it-will-be-replaced.com"
#' )
#' )
#'
#' server <- function(input, output, session) {
#' autoRandomize(groupLinkPairs = group_link_pairs)
#' }
#'
#' shinyApp(ui, server)
#'
#' }
#'
#'
#'
shinyland <- function(navbar_title, task_title, description_text, informed_consent_text, task_url) {
shiny::tagList(
htmltools::htmlDependency(
name = "landingPage",
version = utils::packageVersion("shinyland"),
package = "shinyland",
src = "assets",
script = c("js/jquery-1.12.4.min.js", "js/bootstrap-4.6.0.min.js", "js/updateRandomLink.js"),
stylesheet = c("css/bootstrap-4.6.0.min.css")
),
make_landing(navbar_title = navbar_title,
task_title = task_title,
description_text = description_text,
informed_consent_text = informed_consent_text,
task_url = task_url)
)
}
|
/L12_1.R | no_license | congca/R-case-code | R | false | false | 9,011 | r | ||
# Buffer large clumps of NA cells in raster to remove artifacts
# around edges. Clumps only buffered if their size exceeds threshold.
# "drop" is the number of times to repeat buffering
buffer_inward = function(rast,drop,threshold=1e4){
require(raster)
rast_NA = rast
rast_NA[] = as.numeric(is.na(rast[]))
rast_clump = clump(rast_NA, directions=8) # NA and 0 are bg values for clumping
outside = rast
outside[] = 0 # start w/ 0 rast, then set values in big clumps to NA..
outside[rast_clump[] %in% names(which(table(rast_clump[]) > threshold))] = NA
for(x in 1:drop){ # drop outer cells this many times
print(x)
edge = boundaries(outside, type='inner', classes=FALSE, directions=8)
outside[edge[] %in% 1] = NA
} # expands outside inward by Drop_cells # of cells
rast[is.na(outside)] = NA
# we can also have errors along borders at the extents of the raster -- just drop these parts directly:
for(drop_row in c(1:drop, nrow(rast):(nrow(rast)-drop+1))) rast[drop_row,] = NA
for(drop_col in c(1:drop, ncol(rast):(ncol(rast)-drop+1))) rast[,drop_col] = NA
return(rast)
}
# pretty formatting for response and predictor variable names
format_names = function(names){
plyr::revalue(names, replace = c(
"Apr_Jun_mean_min"="Spring min",
"Apr_Jun_mean_max"="Spring max",
"Apr_Jun_mean_mean"="Spring mean",
"Jul_Sep_mean_max"="Summer max",
"Jul_Sep_mean_mean"="Summer mean",
"GDD_winter_5"=paste0("Winter GDD (5 ","\U00B0", "C)"),
"Closure_2"="Closure (> 2 m)",
"Closure_10"="Closure (> 10 m)",
"Closure_40"="Closure (> 40 m)",
"Density_0_2"="Density (0-2 m)",
"Density_2_10"="Density (2-10 m)",
"Height_mean"="Height (mean)",
"Height_75"="Height (75%)",
"Height_95"="Height (95%)",
"Height_SD"="Height (s.d.)",
"Height_SD_7x7"="Height (7x7 s.d.)",
"Height_80_1m"="Height (80%, 1 m)",
"Height_95_1m"="Height (95%, 1 m)",
"Height_SD_1m"="Height (s.d., 1 m)",
"Biomass_Mg_Ha"="Biomass (Mg/Ha)",
"Veg_height_DEM"="Veg. height (DEM)",
"Elevation"="Elevation",
"Slope"="Slope",
"Aspect_eastness"="Aspect (eastness)",
"Aspect_northness"="Aspect (northness)",
"Convergence_index"="Convergence index",
"Position_index"="Position index",
"PC1"="PC 1",
"PC2"="PC 2"
), warn_missing = TRUE)
}
remove_diag = function(mat){
diag(mat) = NA
return(mat)
}
| /utility_functions.R | no_license | wolfch2/HJA_microclimate | R | false | false | 2,573 | r | # Buffer large clumps of NA cells in raster to remove artifacts
# around edges. Clumps only buffered if their size exceeds threshold.
# "drop" is the number of times to repeat buffering
buffer_inward = function(rast,drop,threshold=1e4){
require(raster)
rast_NA = rast
rast_NA[] = as.numeric(is.na(rast[]))
rast_clump = clump(rast_NA, directions=8) # NA and 0 are bg values for clumping
outside = rast
outside[] = 0 # start w/ 0 rast, then set values in big clumps to NA..
outside[rast_clump[] %in% names(which(table(rast_clump[]) > threshold))] = NA
for(x in 1:drop){ # drop outer cells this many times
print(x)
edge = boundaries(outside, type='inner', classes=FALSE, directions=8)
outside[edge[] %in% 1] = NA
} # expands outside inward by Drop_cells # of cells
rast[is.na(outside)] = NA
# we can also have errors along borders at the extents of the raster -- just drop these parts directly:
for(drop_row in c(1:drop, nrow(rast):(nrow(rast)-drop+1))) rast[drop_row,] = NA
for(drop_col in c(1:drop, ncol(rast):(ncol(rast)-drop+1))) rast[,drop_col] = NA
return(rast)
}
# pretty formatting for response and predictor variable names
format_names = function(names){
plyr::revalue(names, replace = c(
"Apr_Jun_mean_min"="Spring min",
"Apr_Jun_mean_max"="Spring max",
"Apr_Jun_mean_mean"="Spring mean",
"Jul_Sep_mean_max"="Summer max",
"Jul_Sep_mean_mean"="Summer mean",
"GDD_winter_5"=paste0("Winter GDD (5 ","\U00B0", "C)"),
"Closure_2"="Closure (> 2 m)",
"Closure_10"="Closure (> 10 m)",
"Closure_40"="Closure (> 40 m)",
"Density_0_2"="Density (0-2 m)",
"Density_2_10"="Density (2-10 m)",
"Height_mean"="Height (mean)",
"Height_75"="Height (75%)",
"Height_95"="Height (95%)",
"Height_SD"="Height (s.d.)",
"Height_SD_7x7"="Height (7x7 s.d.)",
"Height_80_1m"="Height (80%, 1 m)",
"Height_95_1m"="Height (95%, 1 m)",
"Height_SD_1m"="Height (s.d., 1 m)",
"Biomass_Mg_Ha"="Biomass (Mg/Ha)",
"Veg_height_DEM"="Veg. height (DEM)",
"Elevation"="Elevation",
"Slope"="Slope",
"Aspect_eastness"="Aspect (eastness)",
"Aspect_northness"="Aspect (northness)",
"Convergence_index"="Convergence index",
"Position_index"="Position index",
"PC1"="PC 1",
"PC2"="PC 2"
), warn_missing = TRUE)
}
remove_diag = function(mat){
diag(mat) = NA
return(mat)
}
|
#########################################################################################
#Name: Pallavi Dwivedi
#Date: October 16,2016
#Project description: Prediction of Diabetes risk based on blood glucose, blood pressure,
# skin thickness, insulin, number of pregnancies, Body mass index
# and age (Data source: Kaggle)
#########################################################################################
rm(list=ls())
library(caret)
library(randomForest)
library(rpart)
library(dplyr)
library(rattle)
library(pROC)
library(plyr)
library(party)
## Read the dataset
file <- read.csv("diabetes.csv")
file$Outcome <- ifelse(file$Outcome==1,'yes','no')
file$Outcome <- as.factor(file$Outcome)
colnames(file)[3] <- "BP" # Blood pressure
colnames(file)[4] <- "Skin" # Skin thickness
colnames(file)[7] <- "DPF" # Diabetes Pedigree Function
predictors <- names(file)[names(file) != "Outcome"]
#### Gradient boosting ############################################
set.seed(1234)
splitIndex <- createDataPartition(file[,"Outcome"], p = .75, list = FALSE, times = 1)
training <- file[ splitIndex,]
testing <- file[-splitIndex,]
Control <- trainControl(method='cv', number=3, returnResamp='none', summaryFunction = twoClassSummary,
classProbs = TRUE)
Model <- train(training[,predictors], training[,"Outcome"],
method='gbm',
trControl= Control,
metric = "ROC",
preProc = c("center", "scale"))
summary(Model)
print(Model)
predictions <- predict(Model, testing[,predictors], type='raw')
head(predictions)
print(postResample(pred=predictions, obs=as.factor(testing[,"Outcome"])))
# probabilites
library(pROC)
predictions <- predict(Model, testing[,predictors], type='prob')
head(predictions)
## AUC score
auc <- roc(ifelse(testing[,"Outcome"]=="yes",1,0), predictions[[2]])
print(auc$auc)
#### Random forest ################################################################################
model_rf_1 <- train(Outcome ~., data = training, method="rf", trControl = trainControl(method="cv"), number=5)
#predict_rf <- predict(model_rf, testing, type= "class")
predict_rf_1 <- predict(model_rf_1, testing, type= "raw")
#confusionMatrix(predict_rf, testing$classe)
confusionMatrix(predict_rf_1, testing$Outcome)
| /Diabetes/prj_1.R | no_license | mohcinemadkour/ML-Workshops | R | false | false | 2,371 | r | #########################################################################################
#Name: Pallavi Dwivedi
#Date: October 16,2016
#Project description: Prediction of Diabetes risk based on blood glucose, blood pressure,
# skin thickness, insulin, number of pregnancies, Body mass index
# and age (Data source: Kaggle)
#########################################################################################
rm(list=ls())
library(caret)
library(randomForest)
library(rpart)
library(dplyr)
library(rattle)
library(pROC)
library(plyr)
library(party)
## Read the dataset
file <- read.csv("diabetes.csv")
file$Outcome <- ifelse(file$Outcome==1,'yes','no')
file$Outcome <- as.factor(file$Outcome)
colnames(file)[3] <- "BP" # Blood pressure
colnames(file)[4] <- "Skin" # Skin thickness
colnames(file)[7] <- "DPF" # Diabetes Pedigree Function
predictors <- names(file)[names(file) != "Outcome"]
#### Gradient boosting ############################################
set.seed(1234)
splitIndex <- createDataPartition(file[,"Outcome"], p = .75, list = FALSE, times = 1)
training <- file[ splitIndex,]
testing <- file[-splitIndex,]
Control <- trainControl(method='cv', number=3, returnResamp='none', summaryFunction = twoClassSummary,
classProbs = TRUE)
Model <- train(training[,predictors], training[,"Outcome"],
method='gbm',
trControl= Control,
metric = "ROC",
preProc = c("center", "scale"))
summary(Model)
print(Model)
predictions <- predict(Model, testing[,predictors], type='raw')
head(predictions)
print(postResample(pred=predictions, obs=as.factor(testing[,"Outcome"])))
# probabilites
library(pROC)
predictions <- predict(Model, testing[,predictors], type='prob')
head(predictions)
## AUC score
auc <- roc(ifelse(testing[,"Outcome"]=="yes",1,0), predictions[[2]])
print(auc$auc)
#### Random forest ################################################################################
model_rf_1 <- train(Outcome ~., data = training, method="rf", trControl = trainControl(method="cv"), number=5)
#predict_rf <- predict(model_rf, testing, type= "class")
predict_rf_1 <- predict(model_rf_1, testing, type= "raw")
#confusionMatrix(predict_rf, testing$classe)
confusionMatrix(predict_rf_1, testing$Outcome)
|
library(netjack)
### Name: diff_test
### Title: Test for differences from original statistic
### Aliases: diff_test
### ** Examples
data(GroupA)
GroupA_Net = as_NetSample(GroupA, 1:20, node.variables = list(community = c(rep(1, 10), rep(2,10))),
sample.variables = list(group = c(rep(1, 10), rep(2,10))))
Jackknife_GroupA_Net = net_apply(GroupA_Net, node_jackknife)
GlobEff_GroupA_Net = net_stat_apply(Jackknife_GroupA_Net, global_efficiency)
diff_test(GlobEff_GroupA_Net)
| /data/genthat_extracted_code/netjack/examples/diff_test.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 482 | r | library(netjack)
### Name: diff_test
### Title: Test for differences from original statistic
### Aliases: diff_test
### ** Examples
data(GroupA)
GroupA_Net = as_NetSample(GroupA, 1:20, node.variables = list(community = c(rep(1, 10), rep(2,10))),
sample.variables = list(group = c(rep(1, 10), rep(2,10))))
Jackknife_GroupA_Net = net_apply(GroupA_Net, node_jackknife)
GlobEff_GroupA_Net = net_stat_apply(Jackknife_GroupA_Net, global_efficiency)
diff_test(GlobEff_GroupA_Net)
|
'''
Arch and Garch estimation in R
'''
library(quantmod)
library(fGarch)
Symb = '^GSPC'
# doenload the SP500 adjusted close price from yahoo finance
P = getSymbols(Symbols = Symb, src = "yahoo", from =c("2009-01-01") ,
to = Sys.Date(), auto.assign = F,periodicity = "daily")[,6]
log_return = diff(log(P))*100
Zero_mean_return= log_return - mean(as.vector(log_return),na.rm = TRUE)
# Garch modeling ARCH(1)
garch01=garchFit(formula = ~garch(1,0), data =na.omit(Zero_mean_return),
include.mean = FALSE)
names(garch01@fit)
garch01@fit$coef
garch01@fit$llh
plot(garch01)
garch40=garchFit(formula = ~garch(4,0), data =na.omit(Zero_mean_return),
include.mean = FALSE)
garch40@fit$coef
garch40@fit$llh
garch41=garchFit(formula = ~garch(4,1), data =na.omit(Zero_mean_return),
include.mean = FALSE)
garch41@fit$coef
garch41@fit$llh
garch11=garchFit(formula = ~garch(1,1), data =na.omit(Zero_mean_return),
include.mean = FALSE)
garch11@fit$coef
garch11@fit$llh
garch11_t=garchFit(formula = ~garch(1,1), data =na.omit(Zero_mean_return),
include.mean = FALSE, cond.dist = "std",trace= F)
garch11_t@fit$coef
garch11_t@fit$llh
# GARCH(1,1): (sigma_t)^2= omega + alpha_1 * (return_t-1)^2 + Beta_1 * (sigma_t-1)^2
garch11_s_t=garchFit(formula = ~garch(1,1), data =na.omit(Zero_mean_return),
include.mean = FALSE, cond.dist = "sstd",trace= F)
garch11_s_t@fit$coef
garch11_s_t@fit$llh
plot(garch11_s_t)
# Advaned ARCH and GARCH estimation in R
#APARCH : (sig_t+1)^delta = omega + SUM(i=1 to L_1)[alpha_i *(|return_t-i| - gama *(return_t-i))]
# + SUM(i=1 to L_2) [Beta_j * (sig_t-j)^delta ]
# normal APARCH(1,1)
aparch_11=garchFit(formula= ~aparch(1,1), data=na.omit(Zero_mean_return)
,include.mean = FALSE,trace = F)
# fix delta at 2
aparch_11=garchFit(formula= ~aparch(1,1), data=na.omit(Zero_mean_return)
,include.mean = FALSE,include.delta = F,delta = 2,trace = F).predict()
aparch_11@fit$coef
garchFit()
# normal APARCH(1,1)
aparch_11=garchFit(formula= ~aparch(1,1), data=na.omit(Zero_mean_return)
,include.mean = FALSE,trace = F)
# fix delta = 2
aparch_delta2_11=garchFit(formula= ~aparch(1,1), data=na.omit(Zero_mean_return)
,include.mean = FALSE,include.delta = F,delta = 2,trace = F)
aparch_delta2_11@fit$coef
aparch_student_t_11 = garchFit(formula = ~aparch(1,1),data = na.omit(Zero_mean_return)
,include.mean = FALSE, cond.dist = 'std',trace = F)
aparch_student_t_11@fit$coef
# Normal Aparch(2,2)
aparch_22 = garchFit(formula = ~aparch(2,2), data = na.omit(Zero_mean_return),
include.mean = FALSE ,trace=F)
aparch_22@fit$coef
| /Arch and Garch estimation in R.R | no_license | Sajades/ARCH-and-GARCH-modeling-in-R-part-I | R | false | false | 2,880 | r | '''
Arch and Garch estimation in R
'''
library(quantmod)
library(fGarch)
Symb = '^GSPC'
# doenload the SP500 adjusted close price from yahoo finance
P = getSymbols(Symbols = Symb, src = "yahoo", from =c("2009-01-01") ,
to = Sys.Date(), auto.assign = F,periodicity = "daily")[,6]
log_return = diff(log(P))*100
Zero_mean_return= log_return - mean(as.vector(log_return),na.rm = TRUE)
# Garch modeling ARCH(1)
garch01=garchFit(formula = ~garch(1,0), data =na.omit(Zero_mean_return),
include.mean = FALSE)
names(garch01@fit)
garch01@fit$coef
garch01@fit$llh
plot(garch01)
garch40=garchFit(formula = ~garch(4,0), data =na.omit(Zero_mean_return),
include.mean = FALSE)
garch40@fit$coef
garch40@fit$llh
garch41=garchFit(formula = ~garch(4,1), data =na.omit(Zero_mean_return),
include.mean = FALSE)
garch41@fit$coef
garch41@fit$llh
garch11=garchFit(formula = ~garch(1,1), data =na.omit(Zero_mean_return),
include.mean = FALSE)
garch11@fit$coef
garch11@fit$llh
garch11_t=garchFit(formula = ~garch(1,1), data =na.omit(Zero_mean_return),
include.mean = FALSE, cond.dist = "std",trace= F)
garch11_t@fit$coef
garch11_t@fit$llh
# GARCH(1,1): (sigma_t)^2= omega + alpha_1 * (return_t-1)^2 + Beta_1 * (sigma_t-1)^2
garch11_s_t=garchFit(formula = ~garch(1,1), data =na.omit(Zero_mean_return),
include.mean = FALSE, cond.dist = "sstd",trace= F)
garch11_s_t@fit$coef
garch11_s_t@fit$llh
plot(garch11_s_t)
# Advaned ARCH and GARCH estimation in R
#APARCH : (sig_t+1)^delta = omega + SUM(i=1 to L_1)[alpha_i *(|return_t-i| - gama *(return_t-i))]
# + SUM(i=1 to L_2) [Beta_j * (sig_t-j)^delta ]
# normal APARCH(1,1)
aparch_11=garchFit(formula= ~aparch(1,1), data=na.omit(Zero_mean_return)
,include.mean = FALSE,trace = F)
# fix delta at 2
aparch_11=garchFit(formula= ~aparch(1,1), data=na.omit(Zero_mean_return)
,include.mean = FALSE,include.delta = F,delta = 2,trace = F).predict()
aparch_11@fit$coef
garchFit()
# normal APARCH(1,1)
aparch_11=garchFit(formula= ~aparch(1,1), data=na.omit(Zero_mean_return)
,include.mean = FALSE,trace = F)
# fix delta = 2
aparch_delta2_11=garchFit(formula= ~aparch(1,1), data=na.omit(Zero_mean_return)
,include.mean = FALSE,include.delta = F,delta = 2,trace = F)
aparch_delta2_11@fit$coef
aparch_student_t_11 = garchFit(formula = ~aparch(1,1),data = na.omit(Zero_mean_return)
,include.mean = FALSE, cond.dist = 'std',trace = F)
aparch_student_t_11@fit$coef
# Normal Aparch(2,2)
aparch_22 = garchFit(formula = ~aparch(2,2), data = na.omit(Zero_mean_return),
include.mean = FALSE ,trace=F)
aparch_22@fit$coef
|
require(devtools)
require(testthat)
load_all()
options(error = NULL)
devtools::test()
devtools::build(args = "-compact-vignettes=both") # R CMD build afex --compact-vignettes="gs+qpdf"
document()
check()
test_package("afex", filter = "aov")
test_package("afex", filter = "mixed")
test_package("afex", filter = "mixed-structure")
test_package("afex", filter = "mixed-bugs")
test_package("afex", filter = "mixed-mw")
test_package("afex", filter = "emmeans")
options(error = recover)
options(error = NULL)
options(warn = 2)
options(warn = 0)
## check for non-ASCII characters in examples:
for (f in list.files("examples/", full.names = TRUE)) {
cat(f, "\n")
tools::showNonASCIIfile(f)
}
## check for non-ASCII characters in R files:
for (f in list.files("R/", full.names = TRUE)) {
cat(f, "\n")
tools::showNonASCIIfile(f)
}
## check for non-ASCII characters in Rd files:
for (f in list.files("man/", full.names = TRUE)) {
cat(f, "\n")
tools::showNonASCIIfile(f)
}
#install.packages("afex", dependencies = TRUE)
#devtools::build()
devtools::build_vignettes()
clean_vignettes(pkg = ".")
devtools::build(args = "--compact-vignettes=both")
### add packages
usethis::use_package("glmmTMB", "Suggests")
usethis::use_package("rstanarm", "Suggests")
usethis::use_package("brms", "Suggests")
usethis::use_package("cowplot", "Suggests")
usethis::use_package("nlme", "Suggests")
#usethis::use_package("GLMMadaptive", "Suggests")
### check reverse dependencies:
library(revdepcheck) # see https://github.com/r-lib/revdepcheck
revdep_check(num_workers = 2)
revdep_summary()
revdep_details(revdep = "r2glmm")
| /dev.R | no_license | cedricbatailler/afex | R | false | false | 1,620 | r | require(devtools)
require(testthat)
load_all()
options(error = NULL)
devtools::test()
devtools::build(args = "-compact-vignettes=both") # R CMD build afex --compact-vignettes="gs+qpdf"
document()
check()
test_package("afex", filter = "aov")
test_package("afex", filter = "mixed")
test_package("afex", filter = "mixed-structure")
test_package("afex", filter = "mixed-bugs")
test_package("afex", filter = "mixed-mw")
test_package("afex", filter = "emmeans")
options(error = recover)
options(error = NULL)
options(warn = 2)
options(warn = 0)
## check for non-ASCII characters in examples:
for (f in list.files("examples/", full.names = TRUE)) {
cat(f, "\n")
tools::showNonASCIIfile(f)
}
## check for non-ASCII characters in R files:
for (f in list.files("R/", full.names = TRUE)) {
cat(f, "\n")
tools::showNonASCIIfile(f)
}
## check for non-ASCII characters in Rd files:
for (f in list.files("man/", full.names = TRUE)) {
cat(f, "\n")
tools::showNonASCIIfile(f)
}
#install.packages("afex", dependencies = TRUE)
#devtools::build()
devtools::build_vignettes()
clean_vignettes(pkg = ".")
devtools::build(args = "--compact-vignettes=both")
### add packages
usethis::use_package("glmmTMB", "Suggests")
usethis::use_package("rstanarm", "Suggests")
usethis::use_package("brms", "Suggests")
usethis::use_package("cowplot", "Suggests")
usethis::use_package("nlme", "Suggests")
#usethis::use_package("GLMMadaptive", "Suggests")
### check reverse dependencies:
library(revdepcheck) # see https://github.com/r-lib/revdepcheck
revdep_check(num_workers = 2)
revdep_summary()
revdep_details(revdep = "r2glmm")
|
library(icosa)
### Name: vicinity
### Title: The neighbouring faces of faces in an icosahedral grid
### Aliases: vicinity vicinity,trigrid,character-method
### ** Examples
g <- trigrid(3)
ne <- vicinity(g, c("F4", "F10"))
ne
| /data/genthat_extracted_code/icosa/examples/vicinity-methods.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 236 | r | library(icosa)
### Name: vicinity
### Title: The neighbouring faces of faces in an icosahedral grid
### Aliases: vicinity vicinity,trigrid,character-method
### ** Examples
g <- trigrid(3)
ne <- vicinity(g, c("F4", "F10"))
ne
|
testlist <- list(b = c(-62195L, -1667457892L, -852010L, -1131245043L, -1835887972L, -1667457892L, -1667457892L, 202116108L, 202116108L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result) | /mcga/inst/testfiles/ByteVectorToDoubles/AFL_ByteVectorToDoubles/ByteVectorToDoubles_valgrind_files/1613108884-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 383 | r | testlist <- list(b = c(-62195L, -1667457892L, -852010L, -1131245043L, -1835887972L, -1667457892L, -1667457892L, 202116108L, 202116108L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result) |
library(ecospat)
library(FactoMineR)
library(factoextra)
library(corrplot)
library(dplyr)
library(stringr)
# Global
workingdirectory="D:/Koma/_PhD/Sync/_Amsterdam/_PhD/Chapter3_wetlandniche/3_Dataprocessing/Niche_v13/"
setwd(workingdirectory)
# Import data
GrW=read.csv("GrW_wlandsc.csv")
GrW_lgn8 <- subset(GrW, lgn8 %in% c(16,17,30,322,332,41,42,43))
KK=read.csv("KK_wlandsc.csv")
KK_lgn8 <- subset(KK, lgn8 %in% c(16,17,30,322,332,41,42,43))
Sn=read.csv("Sn_wlandsc.csv")
Sn_lgn8 <- subset(Sn, lgn8 %in% c(16,17,30,322,332,41,42,43))
Bgr=read.csv("Bgr_wlandsc.csv")
Bgr_lgn8 <- subset(Bgr, lgn8 %in% c(16,17,30,322,332,41,42,43))
#data_merged=rbind(GrW_lgn8,KK_lgn8,Sn_lgn8,Bgr_lgn8)
data_merged=rbind(GrW,KK,Sn,Bgr)
noffea=9
# 200 m only reed
data_merged=subset(data_merged,select=c(11,10,9,7,8,12,14,13,18,15,16,4,5,2))
names(data_merged) <- c("VV_p95","VV_FHD","VD_0_1","VD_1_2","VD_2_3",
"HV_sd","HV_reedveg_sd", "HV_reedveg_prop","HV_reedveg_patch",
"species","occurrence","x","y","id")
data_merged=data_merged[(data_merged$VV_p95<30),]
data_merged[is.na(data_merged)==TRUE] <- 0
#### Ecospat
pca.env<-dudi.pca(data_merged[,1:noffea],scannf=FALSE,center=TRUE,nf=3)
pca.env$co[2]=pca.env$co[2]*-1
fviz_pca_var(pca.env,axes = c(1, 2), col.var = "black",repel = TRUE,fontsize=14)
var <- get_pca_var(pca.env)
corrplot(as.matrix(var$cor), is.corr=FALSE,method="number",col=colorRampPalette(c("dodgerblue4","white","firebrick"))(200))
grotekarakiet=dplyr::filter(data_merged,str_detect(data_merged$species,"Grote Karekiet"))
kleinekarakiet=dplyr::filter(data_merged,str_detect(data_merged$species,"Kleine Karekiet"))
snor=dplyr::filter(data_merged,str_detect(data_merged$species,"Snor"))
bgr=dplyr::filter(data_merged,str_detect(data_merged$species,"Background"))
scores.globclim<-pca.env$li
scores.sp.grotekarakiet<-suprow(pca.env,grotekarakiet[,1:noffea])$li
scores.sp.kleinekarakiet<-suprow(pca.env,kleinekarakiet[,1:noffea])$li
scores.sp.snor<-suprow(pca.env,snor[,1:noffea])$li
scores.clim.background<-suprow(pca.env,bgr[,1:noffea])$li
# rotate
scores.globclim$Axis2=scores.globclim$Axis2*-1
scores.sp.grotekarakiet$Axis2=scores.sp.grotekarakiet$Axis2*-1
scores.sp.kleinekarakiet$Axis2=scores.sp.kleinekarakiet$Axis2*-1
scores.sp.snor$Axis2=scores.sp.snor$Axis2*-1
scores.clim.background$Axis2=scores.clim.background$Axis2*-1
# PCA 1 vs PCA 2
grid.clim.grotekarakiet<-ecospat.grid.clim.dyn(glob=scores.globclim[,c(1,2)], glob1=scores.clim.background[,c(1,2)], sp=scores.sp.grotekarakiet[,c(1,2)], R=500)
grid.clim.kleinekarakiet<-ecospat.grid.clim.dyn(glob=scores.globclim[,c(1,2)], glob1=scores.clim.background[,c(1,2)], sp=scores.sp.kleinekarakiet[,c(1,2)], R=500)
grid.clim.snor<-ecospat.grid.clim.dyn(glob=scores.globclim[,c(1,2)], glob1=scores.clim.background[,c(1,2)], sp=scores.sp.snor[,c(1,2)], R=500)
saveRDS(grid.clim.grotekarakiet, "grw_kdens.rds")
saveRDS(grid.clim.kleinekarakiet, "kk_kdens.rds")
saveRDS(grid.clim.snor, "sn_kdens.rds")
# PCA 1 vs PCA 3
grid.clim.grotekarakiet2<-ecospat.grid.clim.dyn(glob=scores.globclim[,c(1,3)], glob1=scores.clim.background[,c(1,3)], sp=scores.sp.grotekarakiet[,c(1,3)], R=500)
grid.clim.kleinekarakiet2<-ecospat.grid.clim.dyn(glob=scores.globclim[,c(1,3)], glob1=scores.clim.background[,c(1,3)], sp=scores.sp.kleinekarakiet[,c(1,3)], R=500)
grid.clim.snor2<-ecospat.grid.clim.dyn(glob=scores.globclim[,c(1,3)], glob1=scores.clim.background[,c(1,3)], sp=scores.sp.snor[,c(1,3)], R=500)
saveRDS(grid.clim.grotekarakiet2, "grw_kdens2.rds")
saveRDS(grid.clim.kleinekarakiet2, "kk_kdens2.rds")
saveRDS(grid.clim.snor2, "sn_kdens2.rds")
# PCA 2 vs PCA 3
grid.clim.grotekarakiet3<-ecospat.grid.clim.dyn(glob=scores.globclim[,c(2,3)], glob1=scores.clim.background[,c(2,3)], sp=scores.sp.grotekarakiet[,c(2,3)], R=500)
grid.clim.kleinekarakiet3<-ecospat.grid.clim.dyn(glob=scores.globclim[,c(2,3)], glob1=scores.clim.background[,c(2,3)], sp=scores.sp.kleinekarakiet[,c(2,3)], R=500)
grid.clim.snor3<-ecospat.grid.clim.dyn(glob=scores.globclim[,c(2,3)], glob1=scores.clim.background[,c(2,3)], sp=scores.sp.snor[,c(2,3)], R=500)
saveRDS(grid.clim.grotekarakiet3, "grw_kdens3.rds")
saveRDS(grid.clim.kleinekarakiet3, "kk_kdens3.rds")
saveRDS(grid.clim.snor3, "sn_kdens3.rds")
# Overlaps
ecospat.niche.overlap(grid.clim.grotekarakiet, grid.clim.kleinekarakiet, cor=TRUE)
ecospat.niche.overlap(grid.clim.grotekarakiet, grid.clim.snor, cor=TRUE)
ecospat.niche.overlap(grid.clim.kleinekarakiet, grid.clim.snor, cor=TRUE)
ecospat.niche.overlap(grid.clim.grotekarakiet2, grid.clim.kleinekarakiet2, cor=TRUE)
ecospat.niche.overlap(grid.clim.grotekarakiet2, grid.clim.snor2, cor=TRUE)
ecospat.niche.overlap(grid.clim.kleinekarakiet2, grid.clim.snor2, cor=TRUE)
ecospat.niche.overlap(grid.clim.grotekarakiet3, grid.clim.kleinekarakiet3, cor=TRUE)
ecospat.niche.overlap(grid.clim.grotekarakiet3, grid.clim.snor3, cor=TRUE)
ecospat.niche.overlap(grid.clim.kleinekarakiet3, grid.clim.snor3, cor=TRUE)
# Tests PCA 1 vs PCA 2
# Grote Karakiet
ecospat.niche.overlap(grid.clim.grotekarakiet, grid.clim.kleinekarakiet, cor=TRUE)
ecospat.niche.overlap(grid.clim.grotekarakiet, grid.clim.snor, cor=TRUE)
eq.test_gr_k<-ecospat.niche.equivalency.test(grid.clim.grotekarakiet, grid.clim.kleinekarakiet, rep=1000, alternative = "lower",ncores=15)
sim.test_gr_k<-ecospat.niche.similarity.test(grid.clim.grotekarakiet, grid.clim.kleinekarakiet, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(eq.test_gr_k, "D", "Equivalency")
ecospat.plot.overlap.test(sim.test_gr_k, "D", "Similarity")
eq.test_gr_s<-ecospat.niche.equivalency.test(grid.clim.grotekarakiet, grid.clim.snor, rep=1000, alternative = "lower",ncores=15)
sim.test_gr_s<-ecospat.niche.similarity.test(grid.clim.grotekarakiet, grid.clim.snor, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(eq.test_gr_s, "D", "Equivalency")
ecospat.plot.overlap.test(sim.test_gr_s, "D", "Similarity")
saveRDS(eq.test_gr_k, "eq.test_gr_k.rds")
saveRDS(sim.test_gr_k, "sim.test_gr_k.rds")
saveRDS(eq.test_gr_s, "eq.test_gr_s.rds")
saveRDS(sim.test_gr_s, "sim.test_gr_s.rds")
# Kleine Karakiet
ecospat.niche.overlap(grid.clim.kleinekarakiet, grid.clim.snor, cor=TRUE)
sim.test_k_gr<-ecospat.niche.similarity.test(grid.clim.kleinekarakiet, grid.clim.grotekarakiet, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(sim.test_k_gr, "D", "Similarity")
eq.test_k_s<-ecospat.niche.equivalency.test(grid.clim.kleinekarakiet, grid.clim.snor, rep=1000, alternative = "lower",ncores=15)
sim.test_k_s<-ecospat.niche.similarity.test(grid.clim.kleinekarakiet, grid.clim.snor, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(eq.test_k_s, "D", "Equivalency")
ecospat.plot.overlap.test(sim.test_k_s, "D", "Similarity")
saveRDS(sim.test_k_gr, "sim.test_k_gr.rds")
saveRDS(eq.test_k_s, "eq.test_k_s.rds")
saveRDS(sim.test_k_s, "sim.test_k_s.rds")
# Snor
sim.test_s_gr<-ecospat.niche.similarity.test(grid.clim.snor, grid.clim.grotekarakiet, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(sim.test_s_gr, "D", "Similarity")
sim.test_s_k<-ecospat.niche.similarity.test(grid.clim.snor, grid.clim.kleinekarakiet, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(sim.test_s_k, "D", "Similarity")
saveRDS(sim.test_s_gr, "sim.test_s_gr.rds")
saveRDS(sim.test_s_k, "sim.test_s_k.rds")
# Tests PCA 1 vs PCA 3
# Grote Karakiet
ecospat.niche.overlap(grid.clim.grotekarakiet2, grid.clim.kleinekarakiet2, cor=TRUE)
ecospat.niche.overlap(grid.clim.grotekarakiet2, grid.clim.snor2, cor=TRUE)
eq.test_gr_k2<-ecospat.niche.equivalency.test(grid.clim.grotekarakiet2, grid.clim.kleinekarakiet2, rep=1000, alternative = "lower",ncores=15)
sim.test_gr_k2<-ecospat.niche.similarity.test(grid.clim.grotekarakiet2, grid.clim.kleinekarakiet2, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(eq.test_gr_k2, "D", "Equivalency")
ecospat.plot.overlap.test(sim.test_gr_k2, "D", "Similarity")
eq.test_gr_s2<-ecospat.niche.equivalency.test(grid.clim.grotekarakiet2, grid.clim.snor2, rep=1000, alternative = "lower",ncores=15)
sim.test_gr_s2<-ecospat.niche.similarity.test(grid.clim.grotekarakiet2, grid.clim.snor2, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(eq.test_gr_s2, "D", "Equivalency")
ecospat.plot.overlap.test(sim.test_gr_s2, "D", "Similarity")
saveRDS(eq.test_gr_k2, "eq.test_gr_k2.rds")
saveRDS(sim.test_gr_k2, "sim.test_gr_k2.rds")
saveRDS(eq.test_gr_s2, "eq.test_gr_s2.rds")
saveRDS(sim.test_gr_s2, "sim.test_gr_s2.rds")
# Kleine Karakiet
ecospat.niche.overlap(grid.clim.kleinekarakiet2, grid.clim.snor2, cor=TRUE)
sim.test_k_gr2<-ecospat.niche.similarity.test(grid.clim.kleinekarakiet2, grid.clim.grotekarakiet2, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(sim.test_k_gr2, "D", "Similarity")
eq.test_k_s2<-ecospat.niche.equivalency.test(grid.clim.kleinekarakiet2, grid.clim.snor2, rep=1000, alternative = "lower",ncores=15)
sim.test_k_s2<-ecospat.niche.similarity.test(grid.clim.kleinekarakiet2, grid.clim.snor2, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(eq.test_k_s2, "D", "Equivalency")
ecospat.plot.overlap.test(sim.test_k_s2, "D", "Similarity")
saveRDS(sim.test_k_gr2, "sim.test_k_gr2.rds")
saveRDS(eq.test_k_s2, "eq.test_k_s2.rds")
saveRDS(sim.test_k_s2, "sim.test_k_s2.rds")
# Snor
sim.test_s_gr2<-ecospat.niche.similarity.test(grid.clim.snor2, grid.clim.grotekarakiet2, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(sim.test_s_gr2, "D", "Similarity")
sim.test_s_k2<-ecospat.niche.similarity.test(grid.clim.snor2, grid.clim.kleinekarakiet2, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(sim.test_s_k2, "D", "Similarity")
saveRDS(sim.test_s_gr2, "sim.test_s_gr2.rds")
saveRDS(sim.test_s_k2, "sim.test_s_k2.rds")
# Tests PCA 2 vs PCA 3
# Grote Karakiet
ecospat.niche.overlap(grid.clim.grotekarakiet3, grid.clim.kleinekarakiet3, cor=TRUE)
ecospat.niche.overlap(grid.clim.grotekarakiet3, grid.clim.snor3, cor=TRUE)
eq.test_gr_k3<-ecospat.niche.equivalency.test(grid.clim.grotekarakiet3, grid.clim.kleinekarakiet3, rep=1000, alternative = "lower",ncores=15)
sim.test_gr_k3<-ecospat.niche.similarity.test(grid.clim.grotekarakiet3, grid.clim.kleinekarakiet3, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(eq.test_gr_k3, "D", "Equivalency")
ecospat.plot.overlap.test(sim.test_gr_k3, "D", "Similarity")
eq.test_gr_s3<-ecospat.niche.equivalency.test(grid.clim.grotekarakiet3, grid.clim.snor3, rep=1000, alternative = "lower",ncores=15)
sim.test_gr_s3<-ecospat.niche.similarity.test(grid.clim.grotekarakiet3, grid.clim.snor3, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(eq.test_gr_s3, "D", "Equivalency")
ecospat.plot.overlap.test(sim.test_gr_s3, "D", "Similarity")
saveRDS(eq.test_gr_k3, "eq.test_gr_k3.rds")
saveRDS(sim.test_gr_k3, "sim.test_gr_k3.rds")
saveRDS(eq.test_gr_s3, "eq.test_gr_s3.rds")
saveRDS(sim.test_gr_s3, "sim.test_gr_s3.rds")
# Kleine Karakiet
ecospat.niche.overlap(grid.clim.kleinekarakiet3, grid.clim.snor3, cor=TRUE)
sim.test_k_gr3<-ecospat.niche.similarity.test(grid.clim.kleinekarakiet3, grid.clim.grotekarakiet3, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(sim.test_k_gr3, "D", "Similarity")
eq.test_k_s3<-ecospat.niche.equivalency.test(grid.clim.kleinekarakiet3, grid.clim.snor3, rep=1000, alternative = "lower",ncores=15)
sim.test_k_s3<-ecospat.niche.similarity.test(grid.clim.kleinekarakiet3, grid.clim.snor3, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(eq.test_k_s3, "D", "Equivalency")
ecospat.plot.overlap.test(sim.test_k_s3, "D", "Similarity")
saveRDS(sim.test_k_gr3, "sim.test_k_gr3.rds")
saveRDS(eq.test_k_s3, "eq.test_k_s3.rds")
saveRDS(sim.test_k_s3, "sim.test_k_s3.rds")
# Snor
sim.test_s_gr3<-ecospat.niche.similarity.test(grid.clim.snor3, grid.clim.grotekarakiet3, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(sim.test_s_gr3, "D", "Similarity")
sim.test_s_k3<-ecospat.niche.similarity.test(grid.clim.snor3, grid.clim.kleinekarakiet3, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(sim.test_s_k3, "D", "Similarity")
saveRDS(sim.test_s_gr3, "sim.test_s_gr3.rds")
saveRDS(sim.test_s_k3, "sim.test_s_k3.rds")
| /data_analysis/analysis/ecospat_territorydata.R | permissive | eEcoLiDAR/Niche_separation_wetland_ALS | R | false | false | 12,723 | r | library(ecospat)
library(FactoMineR)
library(factoextra)
library(corrplot)
library(dplyr)
library(stringr)
# Global
workingdirectory="D:/Koma/_PhD/Sync/_Amsterdam/_PhD/Chapter3_wetlandniche/3_Dataprocessing/Niche_v13/"
setwd(workingdirectory)
# Import data
GrW=read.csv("GrW_wlandsc.csv")
GrW_lgn8 <- subset(GrW, lgn8 %in% c(16,17,30,322,332,41,42,43))
KK=read.csv("KK_wlandsc.csv")
KK_lgn8 <- subset(KK, lgn8 %in% c(16,17,30,322,332,41,42,43))
Sn=read.csv("Sn_wlandsc.csv")
Sn_lgn8 <- subset(Sn, lgn8 %in% c(16,17,30,322,332,41,42,43))
Bgr=read.csv("Bgr_wlandsc.csv")
Bgr_lgn8 <- subset(Bgr, lgn8 %in% c(16,17,30,322,332,41,42,43))
#data_merged=rbind(GrW_lgn8,KK_lgn8,Sn_lgn8,Bgr_lgn8)
data_merged=rbind(GrW,KK,Sn,Bgr)
noffea=9
# 200 m only reed
data_merged=subset(data_merged,select=c(11,10,9,7,8,12,14,13,18,15,16,4,5,2))
names(data_merged) <- c("VV_p95","VV_FHD","VD_0_1","VD_1_2","VD_2_3",
"HV_sd","HV_reedveg_sd", "HV_reedveg_prop","HV_reedveg_patch",
"species","occurrence","x","y","id")
data_merged=data_merged[(data_merged$VV_p95<30),]
data_merged[is.na(data_merged)==TRUE] <- 0
#### Ecospat
pca.env<-dudi.pca(data_merged[,1:noffea],scannf=FALSE,center=TRUE,nf=3)
pca.env$co[2]=pca.env$co[2]*-1
fviz_pca_var(pca.env,axes = c(1, 2), col.var = "black",repel = TRUE,fontsize=14)
var <- get_pca_var(pca.env)
corrplot(as.matrix(var$cor), is.corr=FALSE,method="number",col=colorRampPalette(c("dodgerblue4","white","firebrick"))(200))
grotekarakiet=dplyr::filter(data_merged,str_detect(data_merged$species,"Grote Karekiet"))
kleinekarakiet=dplyr::filter(data_merged,str_detect(data_merged$species,"Kleine Karekiet"))
snor=dplyr::filter(data_merged,str_detect(data_merged$species,"Snor"))
bgr=dplyr::filter(data_merged,str_detect(data_merged$species,"Background"))
scores.globclim<-pca.env$li
scores.sp.grotekarakiet<-suprow(pca.env,grotekarakiet[,1:noffea])$li
scores.sp.kleinekarakiet<-suprow(pca.env,kleinekarakiet[,1:noffea])$li
scores.sp.snor<-suprow(pca.env,snor[,1:noffea])$li
scores.clim.background<-suprow(pca.env,bgr[,1:noffea])$li
# rotate
scores.globclim$Axis2=scores.globclim$Axis2*-1
scores.sp.grotekarakiet$Axis2=scores.sp.grotekarakiet$Axis2*-1
scores.sp.kleinekarakiet$Axis2=scores.sp.kleinekarakiet$Axis2*-1
scores.sp.snor$Axis2=scores.sp.snor$Axis2*-1
scores.clim.background$Axis2=scores.clim.background$Axis2*-1
# PCA 1 vs PCA 2
grid.clim.grotekarakiet<-ecospat.grid.clim.dyn(glob=scores.globclim[,c(1,2)], glob1=scores.clim.background[,c(1,2)], sp=scores.sp.grotekarakiet[,c(1,2)], R=500)
grid.clim.kleinekarakiet<-ecospat.grid.clim.dyn(glob=scores.globclim[,c(1,2)], glob1=scores.clim.background[,c(1,2)], sp=scores.sp.kleinekarakiet[,c(1,2)], R=500)
grid.clim.snor<-ecospat.grid.clim.dyn(glob=scores.globclim[,c(1,2)], glob1=scores.clim.background[,c(1,2)], sp=scores.sp.snor[,c(1,2)], R=500)
saveRDS(grid.clim.grotekarakiet, "grw_kdens.rds")
saveRDS(grid.clim.kleinekarakiet, "kk_kdens.rds")
saveRDS(grid.clim.snor, "sn_kdens.rds")
# PCA 1 vs PCA 3
grid.clim.grotekarakiet2<-ecospat.grid.clim.dyn(glob=scores.globclim[,c(1,3)], glob1=scores.clim.background[,c(1,3)], sp=scores.sp.grotekarakiet[,c(1,3)], R=500)
grid.clim.kleinekarakiet2<-ecospat.grid.clim.dyn(glob=scores.globclim[,c(1,3)], glob1=scores.clim.background[,c(1,3)], sp=scores.sp.kleinekarakiet[,c(1,3)], R=500)
grid.clim.snor2<-ecospat.grid.clim.dyn(glob=scores.globclim[,c(1,3)], glob1=scores.clim.background[,c(1,3)], sp=scores.sp.snor[,c(1,3)], R=500)
saveRDS(grid.clim.grotekarakiet2, "grw_kdens2.rds")
saveRDS(grid.clim.kleinekarakiet2, "kk_kdens2.rds")
saveRDS(grid.clim.snor2, "sn_kdens2.rds")
# PCA 2 vs PCA 3
grid.clim.grotekarakiet3<-ecospat.grid.clim.dyn(glob=scores.globclim[,c(2,3)], glob1=scores.clim.background[,c(2,3)], sp=scores.sp.grotekarakiet[,c(2,3)], R=500)
grid.clim.kleinekarakiet3<-ecospat.grid.clim.dyn(glob=scores.globclim[,c(2,3)], glob1=scores.clim.background[,c(2,3)], sp=scores.sp.kleinekarakiet[,c(2,3)], R=500)
grid.clim.snor3<-ecospat.grid.clim.dyn(glob=scores.globclim[,c(2,3)], glob1=scores.clim.background[,c(2,3)], sp=scores.sp.snor[,c(2,3)], R=500)
saveRDS(grid.clim.grotekarakiet3, "grw_kdens3.rds")
saveRDS(grid.clim.kleinekarakiet3, "kk_kdens3.rds")
saveRDS(grid.clim.snor3, "sn_kdens3.rds")
# Overlaps
ecospat.niche.overlap(grid.clim.grotekarakiet, grid.clim.kleinekarakiet, cor=TRUE)
ecospat.niche.overlap(grid.clim.grotekarakiet, grid.clim.snor, cor=TRUE)
ecospat.niche.overlap(grid.clim.kleinekarakiet, grid.clim.snor, cor=TRUE)
ecospat.niche.overlap(grid.clim.grotekarakiet2, grid.clim.kleinekarakiet2, cor=TRUE)
ecospat.niche.overlap(grid.clim.grotekarakiet2, grid.clim.snor2, cor=TRUE)
ecospat.niche.overlap(grid.clim.kleinekarakiet2, grid.clim.snor2, cor=TRUE)
ecospat.niche.overlap(grid.clim.grotekarakiet3, grid.clim.kleinekarakiet3, cor=TRUE)
ecospat.niche.overlap(grid.clim.grotekarakiet3, grid.clim.snor3, cor=TRUE)
ecospat.niche.overlap(grid.clim.kleinekarakiet3, grid.clim.snor3, cor=TRUE)
# Tests PCA 1 vs PCA 2
# Grote Karakiet
ecospat.niche.overlap(grid.clim.grotekarakiet, grid.clim.kleinekarakiet, cor=TRUE)
ecospat.niche.overlap(grid.clim.grotekarakiet, grid.clim.snor, cor=TRUE)
eq.test_gr_k<-ecospat.niche.equivalency.test(grid.clim.grotekarakiet, grid.clim.kleinekarakiet, rep=1000, alternative = "lower",ncores=15)
sim.test_gr_k<-ecospat.niche.similarity.test(grid.clim.grotekarakiet, grid.clim.kleinekarakiet, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(eq.test_gr_k, "D", "Equivalency")
ecospat.plot.overlap.test(sim.test_gr_k, "D", "Similarity")
eq.test_gr_s<-ecospat.niche.equivalency.test(grid.clim.grotekarakiet, grid.clim.snor, rep=1000, alternative = "lower",ncores=15)
sim.test_gr_s<-ecospat.niche.similarity.test(grid.clim.grotekarakiet, grid.clim.snor, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(eq.test_gr_s, "D", "Equivalency")
ecospat.plot.overlap.test(sim.test_gr_s, "D", "Similarity")
saveRDS(eq.test_gr_k, "eq.test_gr_k.rds")
saveRDS(sim.test_gr_k, "sim.test_gr_k.rds")
saveRDS(eq.test_gr_s, "eq.test_gr_s.rds")
saveRDS(sim.test_gr_s, "sim.test_gr_s.rds")
# Kleine Karakiet
ecospat.niche.overlap(grid.clim.kleinekarakiet, grid.clim.snor, cor=TRUE)
sim.test_k_gr<-ecospat.niche.similarity.test(grid.clim.kleinekarakiet, grid.clim.grotekarakiet, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(sim.test_k_gr, "D", "Similarity")
eq.test_k_s<-ecospat.niche.equivalency.test(grid.clim.kleinekarakiet, grid.clim.snor, rep=1000, alternative = "lower",ncores=15)
sim.test_k_s<-ecospat.niche.similarity.test(grid.clim.kleinekarakiet, grid.clim.snor, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(eq.test_k_s, "D", "Equivalency")
ecospat.plot.overlap.test(sim.test_k_s, "D", "Similarity")
saveRDS(sim.test_k_gr, "sim.test_k_gr.rds")
saveRDS(eq.test_k_s, "eq.test_k_s.rds")
saveRDS(sim.test_k_s, "sim.test_k_s.rds")
# Snor
sim.test_s_gr<-ecospat.niche.similarity.test(grid.clim.snor, grid.clim.grotekarakiet, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(sim.test_s_gr, "D", "Similarity")
sim.test_s_k<-ecospat.niche.similarity.test(grid.clim.snor, grid.clim.kleinekarakiet, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(sim.test_s_k, "D", "Similarity")
saveRDS(sim.test_s_gr, "sim.test_s_gr.rds")
saveRDS(sim.test_s_k, "sim.test_s_k.rds")
# Tests PCA 1 vs PCA 3
# Grote Karakiet
ecospat.niche.overlap(grid.clim.grotekarakiet2, grid.clim.kleinekarakiet2, cor=TRUE)
ecospat.niche.overlap(grid.clim.grotekarakiet2, grid.clim.snor2, cor=TRUE)
eq.test_gr_k2<-ecospat.niche.equivalency.test(grid.clim.grotekarakiet2, grid.clim.kleinekarakiet2, rep=1000, alternative = "lower",ncores=15)
sim.test_gr_k2<-ecospat.niche.similarity.test(grid.clim.grotekarakiet2, grid.clim.kleinekarakiet2, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(eq.test_gr_k2, "D", "Equivalency")
ecospat.plot.overlap.test(sim.test_gr_k2, "D", "Similarity")
eq.test_gr_s2<-ecospat.niche.equivalency.test(grid.clim.grotekarakiet2, grid.clim.snor2, rep=1000, alternative = "lower",ncores=15)
sim.test_gr_s2<-ecospat.niche.similarity.test(grid.clim.grotekarakiet2, grid.clim.snor2, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(eq.test_gr_s2, "D", "Equivalency")
ecospat.plot.overlap.test(sim.test_gr_s2, "D", "Similarity")
saveRDS(eq.test_gr_k2, "eq.test_gr_k2.rds")
saveRDS(sim.test_gr_k2, "sim.test_gr_k2.rds")
saveRDS(eq.test_gr_s2, "eq.test_gr_s2.rds")
saveRDS(sim.test_gr_s2, "sim.test_gr_s2.rds")
# Kleine Karakiet
ecospat.niche.overlap(grid.clim.kleinekarakiet2, grid.clim.snor2, cor=TRUE)
sim.test_k_gr2<-ecospat.niche.similarity.test(grid.clim.kleinekarakiet2, grid.clim.grotekarakiet2, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(sim.test_k_gr2, "D", "Similarity")
eq.test_k_s2<-ecospat.niche.equivalency.test(grid.clim.kleinekarakiet2, grid.clim.snor2, rep=1000, alternative = "lower",ncores=15)
sim.test_k_s2<-ecospat.niche.similarity.test(grid.clim.kleinekarakiet2, grid.clim.snor2, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(eq.test_k_s2, "D", "Equivalency")
ecospat.plot.overlap.test(sim.test_k_s2, "D", "Similarity")
saveRDS(sim.test_k_gr2, "sim.test_k_gr2.rds")
saveRDS(eq.test_k_s2, "eq.test_k_s2.rds")
saveRDS(sim.test_k_s2, "sim.test_k_s2.rds")
# Snor
sim.test_s_gr2<-ecospat.niche.similarity.test(grid.clim.snor2, grid.clim.grotekarakiet2, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(sim.test_s_gr2, "D", "Similarity")
sim.test_s_k2<-ecospat.niche.similarity.test(grid.clim.snor2, grid.clim.kleinekarakiet2, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(sim.test_s_k2, "D", "Similarity")
saveRDS(sim.test_s_gr2, "sim.test_s_gr2.rds")
saveRDS(sim.test_s_k2, "sim.test_s_k2.rds")
# Tests PCA 2 vs PCA 3
# Grote Karakiet
ecospat.niche.overlap(grid.clim.grotekarakiet3, grid.clim.kleinekarakiet3, cor=TRUE)
ecospat.niche.overlap(grid.clim.grotekarakiet3, grid.clim.snor3, cor=TRUE)
eq.test_gr_k3<-ecospat.niche.equivalency.test(grid.clim.grotekarakiet3, grid.clim.kleinekarakiet3, rep=1000, alternative = "lower",ncores=15)
sim.test_gr_k3<-ecospat.niche.similarity.test(grid.clim.grotekarakiet3, grid.clim.kleinekarakiet3, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(eq.test_gr_k3, "D", "Equivalency")
ecospat.plot.overlap.test(sim.test_gr_k3, "D", "Similarity")
eq.test_gr_s3<-ecospat.niche.equivalency.test(grid.clim.grotekarakiet3, grid.clim.snor3, rep=1000, alternative = "lower",ncores=15)
sim.test_gr_s3<-ecospat.niche.similarity.test(grid.clim.grotekarakiet3, grid.clim.snor3, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(eq.test_gr_s3, "D", "Equivalency")
ecospat.plot.overlap.test(sim.test_gr_s3, "D", "Similarity")
saveRDS(eq.test_gr_k3, "eq.test_gr_k3.rds")
saveRDS(sim.test_gr_k3, "sim.test_gr_k3.rds")
saveRDS(eq.test_gr_s3, "eq.test_gr_s3.rds")
saveRDS(sim.test_gr_s3, "sim.test_gr_s3.rds")
# Kleine Karakiet
ecospat.niche.overlap(grid.clim.kleinekarakiet3, grid.clim.snor3, cor=TRUE)
sim.test_k_gr3<-ecospat.niche.similarity.test(grid.clim.kleinekarakiet3, grid.clim.grotekarakiet3, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(sim.test_k_gr3, "D", "Similarity")
eq.test_k_s3<-ecospat.niche.equivalency.test(grid.clim.kleinekarakiet3, grid.clim.snor3, rep=1000, alternative = "lower",ncores=15)
sim.test_k_s3<-ecospat.niche.similarity.test(grid.clim.kleinekarakiet3, grid.clim.snor3, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(eq.test_k_s3, "D", "Equivalency")
ecospat.plot.overlap.test(sim.test_k_s3, "D", "Similarity")
saveRDS(sim.test_k_gr3, "sim.test_k_gr3.rds")
saveRDS(eq.test_k_s3, "eq.test_k_s3.rds")
saveRDS(sim.test_k_s3, "sim.test_k_s3.rds")
# Snor
sim.test_s_gr3<-ecospat.niche.similarity.test(grid.clim.snor3, grid.clim.grotekarakiet3, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(sim.test_s_gr3, "D", "Similarity")
sim.test_s_k3<-ecospat.niche.similarity.test(grid.clim.snor3, grid.clim.kleinekarakiet3, rep=1000, alternative = "greater", rand.type = 2,ncores=15)
ecospat.plot.overlap.test(sim.test_s_k3, "D", "Similarity")
saveRDS(sim.test_s_gr3, "sim.test_s_gr3.rds")
saveRDS(sim.test_s_k3, "sim.test_s_k3.rds")
|
# HW 8 - Due Wednesday Nov 18, 2015 in moodle and hardcopy in office hours.
# Upload R file to Moodle with name: HW8_490IDS_YOURUNI.R
# Do Not remove any of the comments. These are marked by #
### This assignment will provide some practice carrying out and interpreting PCA
### Part 1. Iris Data
# a) [2 points] Using the Iris dataset from class, carry out PCA on the covariates
# (these are Sepal.Length Sepal.Width Petal.Length Petal.Width). Create a new R
# object containing the output, and apply the summary command. Copy your commands
# and results of the summary command below.
# b) [2 points] How do you interpret the contents of the summary command output?
# c) [2 points] What are the loadings and what are the scores? Output them below and
# provide an interpretation.
# d) [2 points] Plot the labeled iris data in the new basis, as discussed in lecture.
# e) [2 points] Create a scree plot as discussed in class. Is there a natural
# break point? Where? How much dimensionality reduction do you recommend in this
# dataset? Why did you expect this outcome?
### Part 2. Wine Data
# Carry out parts a) through e) as above on the wines.txt dataset uploaded to moodle.
# In that dataset each row is a different wine, and each column is a different chemical
# measured in each wine.
| /HW/HW8_490IDS.R | no_license | Superdu712/Data-Science | R | false | false | 1,307 | r | # HW 8 - Due Wednesday Nov 18, 2015 in moodle and hardcopy in office hours.
# Upload R file to Moodle with name: HW8_490IDS_YOURUNI.R
# Do Not remove any of the comments. These are marked by #
### This assignment will provide some practice carrying out and interpreting PCA
### Part 1. Iris Data
# a) [2 points] Using the Iris dataset from class, carry out PCA on the covariates
# (these are Sepal.Length Sepal.Width Petal.Length Petal.Width). Create a new R
# object containing the output, and apply the summary command. Copy your commands
# and results of the summary command below.
# b) [2 points] How do you interpret the contents of the summary command output?
# c) [2 points] What are the loadings and what are the scores? Output them below and
# provide an interpretation.
# d) [2 points] Plot the labeled iris data in the new basis, as discussed in lecture.
# e) [2 points] Create a scree plot as discussed in class. Is there a natural
# break point? Where? How much dimensionality reduction do you recommend in this
# dataset? Why did you expect this outcome?
### Part 2. Wine Data
# Carry out parts a) through e) as above on the wines.txt dataset uploaded to moodle.
# In that dataset each row is a different wine, and each column is a different chemical
# measured in each wine.
|
#Deliverables:
#Vaccine types usage
#Filter by December, January, February each (data updated: 02/20/21)
#Which country has highest count of vaccinations
#Daily vaccinations per country
library('dplyr')
#Connecting data 1
getwd()
setwd("/Users/hyojae/Desktop/R")
vaccine <- read.csv("country_vaccinations.csv")
head(vaccine)
summary(vaccine)
str(vaccine)
vaccine$vaccines <- as.factor(vaccine$vaccines)
#Connecting data 2
population <- read.csv("population_by_country_2020.csv")
head(population)
names(population)[1] <- "Country"
names(population)[2] <- "Population_2020"
#Changing date to Posixtime
vaccine$PosixTime <- as.POSIXct(vaccine$date, format = "%m/%d/%y")
head(vaccine, 5)
#Removing unnecessary columns
vaccine$date <- NULL
vaccine$source_website <- NULL
head(vaccine)
#Extracting year and month from PosixTime
vaccine$year <- format(vaccine$PosixTime, format="%Y")
vaccine$month <- format(as.Date(vaccine$PosixTime, format="%m/%d/%y"),"%m")
#Merging two dataframes
df <- merge(vaccine, population, by.x = "country", by.y = "Country")
df$source_name <- NULL
df$Yearly.Change <- NULL
df$Net.Change <- NULL
df$Density..P.Km.. <- NULL
df$Land.Area..Km.. <- NULL
df$Migrants..net. <- NULL
df$Fert..Rate <- NULL
df$World.Share <- NULL
head(df)
#Filter vaccination by month: on Dec, Jan, or Feb
dec <- df[df$month == '12',]
jan <- df[df$month == '01',]
feb <- df[df$month == '02',]
#daily vaccinated columns on country and numbers
daily <- vaccine[c('country', 'daily_vaccinations')]
#Sum of daily vaccines for each country
library('data.table')
daily <- as.data.table(daily)
daily[, lapply(.SD, sum, na.rm = T), by = 'country']
#10 Highest numbers of daily vaccinations and countries
top10_daily <- daily[, lapply(.SD, sum, na.rm = T), by = 'country'] %>% top_n(10)
#plot
library('ggplot2')
p <- ggplot(data = top10_daily, aes(x = country, y = daily_vaccinations))
q <- p + geom_bar(stat = 'identity', position = 'dodge', fill = '#234F1E') +
scale_y_continuous(labels = scales::comma, breaks = scales::pretty_breaks(n = 10)) +
scale_x_discrete(labels = c('United Arab Emirates' = 'UAE', 'United Kingdom' = 'UK', 'United States' = 'US')) +
geom_text(stat = 'identity', aes(label = scales::comma(daily_vaccinations)), color = '#234F1E',
vjust = -0.9, nudge_y = 0.5, size = 2.5)
q + ggtitle('Daily Vaccination Counts', subtitle = 'as of 2/20/21') + xlab('Country') +
ylab('Cumulative daily vaccine counts') +
theme(
plot.title = element_text(color = '#234F1E', size = 16, face = 'bold.italic', hjust = 0.5),
plot.subtitle = element_text(size = 8.5, face = 'italic', hjust = 0.65),
axis.title.x = element_text(size = 10, face = 'bold'),
axis.title.y = element_text(size = 10, face = 'bold')
)
| /Coronavirus Vaccines.R | no_license | hyojaeee/R | R | false | false | 2,759 | r | #Deliverables:
#Vaccine types usage
#Filter by December, January, February each (data updated: 02/20/21)
#Which country has highest count of vaccinations
#Daily vaccinations per country
library('dplyr')
#Connecting data 1
getwd()
setwd("/Users/hyojae/Desktop/R")
vaccine <- read.csv("country_vaccinations.csv")
head(vaccine)
summary(vaccine)
str(vaccine)
vaccine$vaccines <- as.factor(vaccine$vaccines)
#Connecting data 2
population <- read.csv("population_by_country_2020.csv")
head(population)
names(population)[1] <- "Country"
names(population)[2] <- "Population_2020"
#Changing date to Posixtime
vaccine$PosixTime <- as.POSIXct(vaccine$date, format = "%m/%d/%y")
head(vaccine, 5)
#Removing unnecessary columns
vaccine$date <- NULL
vaccine$source_website <- NULL
head(vaccine)
#Extracting year and month from PosixTime
vaccine$year <- format(vaccine$PosixTime, format="%Y")
vaccine$month <- format(as.Date(vaccine$PosixTime, format="%m/%d/%y"),"%m")
#Merging two dataframes
df <- merge(vaccine, population, by.x = "country", by.y = "Country")
df$source_name <- NULL
df$Yearly.Change <- NULL
df$Net.Change <- NULL
df$Density..P.Km.. <- NULL
df$Land.Area..Km.. <- NULL
df$Migrants..net. <- NULL
df$Fert..Rate <- NULL
df$World.Share <- NULL
head(df)
#Filter vaccination by month: on Dec, Jan, or Feb
dec <- df[df$month == '12',]
jan <- df[df$month == '01',]
feb <- df[df$month == '02',]
#daily vaccinated columns on country and numbers
daily <- vaccine[c('country', 'daily_vaccinations')]
#Sum of daily vaccines for each country
library('data.table')
daily <- as.data.table(daily)
daily[, lapply(.SD, sum, na.rm = T), by = 'country']
#10 Highest numbers of daily vaccinations and countries
top10_daily <- daily[, lapply(.SD, sum, na.rm = T), by = 'country'] %>% top_n(10)
#plot
library('ggplot2')
p <- ggplot(data = top10_daily, aes(x = country, y = daily_vaccinations))
q <- p + geom_bar(stat = 'identity', position = 'dodge', fill = '#234F1E') +
scale_y_continuous(labels = scales::comma, breaks = scales::pretty_breaks(n = 10)) +
scale_x_discrete(labels = c('United Arab Emirates' = 'UAE', 'United Kingdom' = 'UK', 'United States' = 'US')) +
geom_text(stat = 'identity', aes(label = scales::comma(daily_vaccinations)), color = '#234F1E',
vjust = -0.9, nudge_y = 0.5, size = 2.5)
q + ggtitle('Daily Vaccination Counts', subtitle = 'as of 2/20/21') + xlab('Country') +
ylab('Cumulative daily vaccine counts') +
theme(
plot.title = element_text(color = '#234F1E', size = 16, face = 'bold.italic', hjust = 0.5),
plot.subtitle = element_text(size = 8.5, face = 'italic', hjust = 0.65),
axis.title.x = element_text(size = 10, face = 'bold'),
axis.title.y = element_text(size = 10, face = 'bold')
)
|
# plot how an environmental perturbation works for
# different maternal effects
the.data <- read.table("summary_three_x.csv"
,sep=";"
,header=T)
pdf("overview_perturb.pdf")
print(levelplot(generation ~ rate_t0 * intptb |
mu_m_g * mu_m_e * mu_m_m * mu_b,
data=the.data,
strip=function(strip.levels,...) {
strip.default(strip.levels=T,...)
},
col.regions=matlab.like))
dev.off()
| /figs/step_change/overview.r | no_license | bramkuijper/three_maternal_effects | R | false | false | 552 | r | # plot how an environmental perturbation works for
# different maternal effects
the.data <- read.table("summary_three_x.csv"
,sep=";"
,header=T)
pdf("overview_perturb.pdf")
print(levelplot(generation ~ rate_t0 * intptb |
mu_m_g * mu_m_e * mu_m_m * mu_b,
data=the.data,
strip=function(strip.levels,...) {
strip.default(strip.levels=T,...)
},
col.regions=matlab.like))
dev.off()
|
library(dataCompareR)
### Name: makeValidKeys
### Title: makeValidKeys
### Aliases: makeValidKeys
### ** Examples
## Not run: makeValidKeys(c(" hello", "__BAD NAME___")
| /data/genthat_extracted_code/dataCompareR/examples/makeValidKeys.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 176 | r | library(dataCompareR)
### Name: makeValidKeys
### Title: makeValidKeys
### Aliases: makeValidKeys
### ** Examples
## Not run: makeValidKeys(c(" hello", "__BAD NAME___")
|
#### SDM auf Basis von GBIF und BIOCLIM f?r ...
#### Mantis religiosa
#### Modellbewertung
rm(list = ls())
#28.11.
#### Start ####
start_time <- date() # Startzeit speichern
### 0.1- Workspace setzen(Pfad in Zwischenablage) ####
wdname <- "C:/Users/Denis/Documents/Makrooekologie/Workspace"
# wdname <- gsub( "\\\\", "/", readClipboard())
setwd(wdname); getwd(); rm(wdname)
### 0.2- Notwendige Ordnerstruktur in Working Directory ####
if("data" %in% list.files() == FALSE){dir.create("data/")} ## Daten Ordner
if("gbif" %in% list.files("data/") == FALSE){dir.create("data/gbif/")} ## Unterordner für GBIF-Daten
if("bioclim" %in% list.files("data/") == FALSE){dir.create("data/bioclim/")} ## Unterordner für Bioclim-Daten
if("figures" %in% list.files() == FALSE){dir.create("figures/")} ## Ordner für Grafiken
if("models" %in% list.files() == FALSE){dir.create("models/")} ## Modell Ordner
if("maxent" %in% list.files("models/") == FALSE){dir.create("models/maxent/")} ## Unterordner für MaxEnt-Modelle
### 0.3- Pakete laden ####
library(rgbif) ## Global Biodiversity Information Facility, Datenbank f?r Artvorkommen
library(raster) ## Rasterverarbeitung und Bioclim-Daten
library(dismo) ## Für MaxEnt-Modellierung
library(maptools) ## Für Weltkarten-Polygone
library(colorRamps)
library(classInt)
library(rJava) ## Java Implementierung
library(MaxentVariableSelection)
library(corrplot)
library(rgdal)
library(gbm)
library(hier.part)
### 1- Daten einlesen ####
## 1.1- Datens?tze einlesen ##
## Artdaten
specDataReg <- readRDS("data/gbif/Mantis_religiosa_europe_dataset.rds")
## Umweltdaten
enviData <- readRDS("data/bioclim/bioclim_europe_Var_bio_Res_5.rds")
## 1.2- Daten extrahieren ##
species <- specDataReg[[1]]
region <- specDataReg[[2]]
presences_region <- specDataReg[[3]]
background <- specDataReg[[4]]
rm(specDataReg)
## 1.3- Umweltvariablen Bioclim ####
##BIO1 = Annual Mean Temperature, ##BIO2 = Mean Diurnal Range (Mean of monthly (max temp - min temp)), ##BIO3 = Isothermality (BIO2/BIO7) (* 100), BIO4 = Temperature Seasonality (standard deviation *100)
##BIO5 = Max Temperature of Warmest Month, ##BIO6 = Min Temperature of Coldest Month, ##BIO7 = Temperature Annual Range (BIO5-BIO6)
##BIO8 = Mean Temperature of Wettest Quarter, ##BIO9 = Mean Temperature of Driest Quarter, ##BIO10 = Mean Temperature of Warmest Quarter
##BIO11 = Mean Temperature of Coldest Quarter, ##BIO12 = Annual Precipitation, ##BIO13 = Precipitation of Wettest Month
##BIO14 = Precipitation of Driest Month, ##BIO15 = Precipitation Seasonality (Coefficient of Variation), ##BIO16 = Precipitation of Wettest Quarter
##BIO17 = Precipitation of Driest Quarter,##BIO18 = Precipitation of Warmest Quarter, ##BIO19 = Precipitation of Coldest Quarter
## 1.4- Modelle einlesen ####
maxMods <- readRDS("models/maxent/Mantis_religiosa_europe_MaxEntModels.rds")
me <- unlist(maxMods)
#### 2- Contribution ####
#x11()
png(paste("figures/Cont_", species, "_", "europe", ".png", sep= ""), width = 1200, height = 800, res = 120)
par(mfrow=c(3,5))
for(i in 1:length(me)){
plot(me[[i]], main = paste("AUC", me[[i]]@results[5]))
}
dev.off()
### 3- Evaluation ####
## 3.1- Auswählen welche Modelle betrachtet werden sollen ####
meAll <- me[[length(me)]]
meSing <- me[[1]]
for(i in 1:5){ ## Wählt besten einzelnen Parameter aus
if(me[[i]]@results[5] > meSing@results[5]){meSing <- me[[i]]}
}
meSing
meImp <- me[[10]]
me1 <- me[[11]]
## 3.2- Evaluieren ####
eAll <- evaluate(presences_region, background, meSing, enviData)
eSing <- evaluate(presences_region, background, meAll, enviData)
eImp <- evaluate(presences_region, background, meImp, enviData)
e1 <- evaluate(presences_region, background, me1, enviData) ## Gutes Modell mit nur 3 Predictoren (untereinander kaum korreliert)
eAll
eSing
eImp
e1
### 4- Betrachten der Evaluation ####
## 4.1- Thresholds ##
thrAll <- threshold(eAll)
thrSing <- threshold(eSing)
thrImp <- threshold(eImp)
thr1 <- threshold(e1)
## 4.2- ROC-Kurven und Density-Plots ##
x11()
par(mfrow=c(1,2))
plot(eAll, "ROC", sub= "eAll")
density(eAll)
x11()
par(mfrow=c(1,2))
plot(eSing, "ROC", sub= "eSing")
density(eSing)
x11()
par(mfrow=c(1,2))
plot(eImp, "ROC", sub= "eImp")
density(eImp)
x11()
par(mfrow=c(1,2))
plot(e1, "ROC", sub= "e1")
density(e1)
#### 5- Response Kurven ####
### 5.1- 1D Response Kurven###
x11()
response(meAll)
response(meImp)
response(me1)
### 5.2- 2D Response Kurven (in Bearbeitung)###
np <- 30
newdata <- expand.grid(bio10=seq(145, 200, len=np), bio18=seq(0, 240, len=np))
newdata$pred <- predict(me1, newdata)
## 3.4.1 Use threshold to show distribution
newdata$pred[newdata$pred<thr$sensitivity] <- NA
## 3.4.2- Create classes of site suitability
cInt <- classIntervals((newdata$pred))
xdiff <-diff(unique(newdata$bio10))[1]
ydiff <-diff(unique(newdata$bio18))[1]
mypalette <- colorRampPalette(c("lightgreen", "darkgreen"))
newdata$colors <- findColours(cInt, mypalette(length(cInt$brks)))
par(mfrow=c(1,1), mar=c(5,5,1,1))
symbols(x=newdata$bio10, y=newdata$bio18, rectangles=matrix(rep(c(xdiff, ydiff), nrow(newdata)), ncol=2, byrow=T), bg=newdata$colors, fg="white", inches=F, xlab="Temperature of warmest quarter (°dC)", ylab="Precipitation of warmest quarter (mm)")
contour(x=unique(newdata$bio10), y=unique(newdata$bio18), z=matrix(newdata$pred, nrow=np), add=T, levels=unique(round(cInt$brks,1)), labcex = 1.3)
mtext(species, side=3, line=-1.3, font=3)
mtext(paste0("AUC = " , round(e@auc, 2), " "), side=1, line=-2.3, adj=1)
mtext(paste0("Pearson r = " , round(e@cor, 2), " "), side=1, line=-1.3, adj=1)
#### Ende ####
start_time; date() ## Start; und Endzeit abfragen ## Dauer: ca. 1 Min | /Skripte/3-Evaluation_Mantis_religiosa.R | no_license | debroize/Makrooekologie | R | false | false | 5,875 | r | #### SDM auf Basis von GBIF und BIOCLIM f?r ...
#### Mantis religiosa
#### Modellbewertung
rm(list = ls())
#28.11.
#### Start ####
start_time <- date() # Startzeit speichern
### 0.1- Workspace setzen(Pfad in Zwischenablage) ####
wdname <- "C:/Users/Denis/Documents/Makrooekologie/Workspace"
# wdname <- gsub( "\\\\", "/", readClipboard())
setwd(wdname); getwd(); rm(wdname)
### 0.2- Notwendige Ordnerstruktur in Working Directory ####
if("data" %in% list.files() == FALSE){dir.create("data/")} ## Daten Ordner
if("gbif" %in% list.files("data/") == FALSE){dir.create("data/gbif/")} ## Unterordner für GBIF-Daten
if("bioclim" %in% list.files("data/") == FALSE){dir.create("data/bioclim/")} ## Unterordner für Bioclim-Daten
if("figures" %in% list.files() == FALSE){dir.create("figures/")} ## Ordner für Grafiken
if("models" %in% list.files() == FALSE){dir.create("models/")} ## Modell Ordner
if("maxent" %in% list.files("models/") == FALSE){dir.create("models/maxent/")} ## Unterordner für MaxEnt-Modelle
### 0.3- Pakete laden ####
library(rgbif) ## Global Biodiversity Information Facility, Datenbank f?r Artvorkommen
library(raster) ## Rasterverarbeitung und Bioclim-Daten
library(dismo) ## Für MaxEnt-Modellierung
library(maptools) ## Für Weltkarten-Polygone
library(colorRamps)
library(classInt)
library(rJava) ## Java Implementierung
library(MaxentVariableSelection)
library(corrplot)
library(rgdal)
library(gbm)
library(hier.part)
### 1- Daten einlesen ####
## 1.1- Datens?tze einlesen ##
## Artdaten
specDataReg <- readRDS("data/gbif/Mantis_religiosa_europe_dataset.rds")
## Umweltdaten
enviData <- readRDS("data/bioclim/bioclim_europe_Var_bio_Res_5.rds")
## 1.2- Daten extrahieren ##
species <- specDataReg[[1]]
region <- specDataReg[[2]]
presences_region <- specDataReg[[3]]
background <- specDataReg[[4]]
rm(specDataReg)
## 1.3- Umweltvariablen Bioclim ####
##BIO1 = Annual Mean Temperature, ##BIO2 = Mean Diurnal Range (Mean of monthly (max temp - min temp)), ##BIO3 = Isothermality (BIO2/BIO7) (* 100), BIO4 = Temperature Seasonality (standard deviation *100)
##BIO5 = Max Temperature of Warmest Month, ##BIO6 = Min Temperature of Coldest Month, ##BIO7 = Temperature Annual Range (BIO5-BIO6)
##BIO8 = Mean Temperature of Wettest Quarter, ##BIO9 = Mean Temperature of Driest Quarter, ##BIO10 = Mean Temperature of Warmest Quarter
##BIO11 = Mean Temperature of Coldest Quarter, ##BIO12 = Annual Precipitation, ##BIO13 = Precipitation of Wettest Month
##BIO14 = Precipitation of Driest Month, ##BIO15 = Precipitation Seasonality (Coefficient of Variation), ##BIO16 = Precipitation of Wettest Quarter
##BIO17 = Precipitation of Driest Quarter,##BIO18 = Precipitation of Warmest Quarter, ##BIO19 = Precipitation of Coldest Quarter
## 1.4- Modelle einlesen ####
maxMods <- readRDS("models/maxent/Mantis_religiosa_europe_MaxEntModels.rds")
me <- unlist(maxMods)
#### 2- Contribution ####
#x11()
png(paste("figures/Cont_", species, "_", "europe", ".png", sep= ""), width = 1200, height = 800, res = 120)
par(mfrow=c(3,5))
for(i in 1:length(me)){
plot(me[[i]], main = paste("AUC", me[[i]]@results[5]))
}
dev.off()
### 3- Evaluation ####
## 3.1- Auswählen welche Modelle betrachtet werden sollen ####
meAll <- me[[length(me)]]
meSing <- me[[1]]
for(i in 1:5){ ## Wählt besten einzelnen Parameter aus
if(me[[i]]@results[5] > meSing@results[5]){meSing <- me[[i]]}
}
meSing
meImp <- me[[10]]
me1 <- me[[11]]
## 3.2- Evaluieren ####
eAll <- evaluate(presences_region, background, meSing, enviData)
eSing <- evaluate(presences_region, background, meAll, enviData)
eImp <- evaluate(presences_region, background, meImp, enviData)
e1 <- evaluate(presences_region, background, me1, enviData) ## Gutes Modell mit nur 3 Predictoren (untereinander kaum korreliert)
eAll
eSing
eImp
e1
### 4- Betrachten der Evaluation ####
## 4.1- Thresholds ##
thrAll <- threshold(eAll)
thrSing <- threshold(eSing)
thrImp <- threshold(eImp)
thr1 <- threshold(e1)
## 4.2- ROC-Kurven und Density-Plots ##
x11()
par(mfrow=c(1,2))
plot(eAll, "ROC", sub= "eAll")
density(eAll)
x11()
par(mfrow=c(1,2))
plot(eSing, "ROC", sub= "eSing")
density(eSing)
x11()
par(mfrow=c(1,2))
plot(eImp, "ROC", sub= "eImp")
density(eImp)
x11()
par(mfrow=c(1,2))
plot(e1, "ROC", sub= "e1")
density(e1)
#### 5- Response Kurven ####
### 5.1- 1D Response Kurven###
x11()
response(meAll)
response(meImp)
response(me1)
### 5.2- 2D Response Kurven (in Bearbeitung)###
np <- 30
newdata <- expand.grid(bio10=seq(145, 200, len=np), bio18=seq(0, 240, len=np))
newdata$pred <- predict(me1, newdata)
## 3.4.1 Use threshold to show distribution
newdata$pred[newdata$pred<thr$sensitivity] <- NA
## 3.4.2- Create classes of site suitability
cInt <- classIntervals((newdata$pred))
xdiff <-diff(unique(newdata$bio10))[1]
ydiff <-diff(unique(newdata$bio18))[1]
mypalette <- colorRampPalette(c("lightgreen", "darkgreen"))
newdata$colors <- findColours(cInt, mypalette(length(cInt$brks)))
par(mfrow=c(1,1), mar=c(5,5,1,1))
symbols(x=newdata$bio10, y=newdata$bio18, rectangles=matrix(rep(c(xdiff, ydiff), nrow(newdata)), ncol=2, byrow=T), bg=newdata$colors, fg="white", inches=F, xlab="Temperature of warmest quarter (°dC)", ylab="Precipitation of warmest quarter (mm)")
contour(x=unique(newdata$bio10), y=unique(newdata$bio18), z=matrix(newdata$pred, nrow=np), add=T, levels=unique(round(cInt$brks,1)), labcex = 1.3)
mtext(species, side=3, line=-1.3, font=3)
mtext(paste0("AUC = " , round(e@auc, 2), " "), side=1, line=-2.3, adj=1)
mtext(paste0("Pearson r = " , round(e@cor, 2), " "), side=1, line=-1.3, adj=1)
#### Ende ####
start_time; date() ## Start; und Endzeit abfragen ## Dauer: ca. 1 Min |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MDBinom.r
\name{rg_nieparam}
\alias{rg_nieparam}
\title{Rysuje lokalnie wygładzoną funckję (dev)
Rysuje i zwraca statystyki dla bucketów.}
\usage{
rg_nieparam(
score,
default,
buckets = 100,
pred = NULL,
weights = rep(1, length(score)),
wytnij = 0,
span = 0.7,
degree = 2,
plot = TRUE,
plt_type = "br",
new = TRUE,
col_points = "black",
col_line = "darkblue",
col_pred = "green",
index = FALSE,
glm = FALSE,
col_glm = "green",
...
)
}
\arguments{
\item{score}{Wektor zmiennych numerycznych.}
\item{default}{Wektor zmiennej dwumianowej.}
\item{buckets}{Liczba bucketów, na ile neleży podzielić \code{score}.}
\item{pred}{predykcja modelu.}
\item{wytnij}{Ile krańcowych obserwacji wyciąć.}
\item{span}{Współczynnik wygładzania. Szegóły w funkcji \code{\link[locfit]{locfit}}}
\item{degree}{Stopień wielomianu do lokalnego wygładzania. Szegóły w funkcji \code{\link[locfit]{locfit}}}
\item{plot}{Czy rysować wykres.}
\item{plt_type}{jeśli \code{br}, to na osi OY będzie BR. W przeciwnym razie będzie logit(BR)}
\item{new}{Czy rysować wykres od nowa.}
\item{col_points}{Kolor punktów.}
\item{col_line}{Kolor lini.}
\item{index}{jeśli \code{TRUE}, na osi OX będą numery kolejnych bucketów.
W przeciwnym razie na osi OX będą wartości \code{score}.}
\item{glm}{czy rysować dopasowanie modelu logistycznego do zmiennej.}
\item{col_glm}{kolor wykresu z modelu logistycznego.}
\item{...}{dodatkowe parametry.}
}
\description{
Rysuje lokalnie wygładzoną funckję (dev)
Rysuje i zwraca statystyki dla bucketów.
}
\examples{
n<-1000;
x1<-rnorm(n);
x2<-x1+rnorm(n);
y<-(x1+rnorm(n))<0;
reg_nieparam(x1,y, buckets=20)
reg_nieparam(x2,y, buckets=20, new=FALSE, col_line="green",col_points="green")
}
\author{
Michał Danaj
}
| /man/rg_nieparam.Rd | no_license | michaldanaj/MDBinom | R | false | true | 1,888 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MDBinom.r
\name{rg_nieparam}
\alias{rg_nieparam}
\title{Rysuje lokalnie wygładzoną funckję (dev)
Rysuje i zwraca statystyki dla bucketów.}
\usage{
rg_nieparam(
score,
default,
buckets = 100,
pred = NULL,
weights = rep(1, length(score)),
wytnij = 0,
span = 0.7,
degree = 2,
plot = TRUE,
plt_type = "br",
new = TRUE,
col_points = "black",
col_line = "darkblue",
col_pred = "green",
index = FALSE,
glm = FALSE,
col_glm = "green",
...
)
}
\arguments{
\item{score}{Wektor zmiennych numerycznych.}
\item{default}{Wektor zmiennej dwumianowej.}
\item{buckets}{Liczba bucketów, na ile neleży podzielić \code{score}.}
\item{pred}{predykcja modelu.}
\item{wytnij}{Ile krańcowych obserwacji wyciąć.}
\item{span}{Współczynnik wygładzania. Szegóły w funkcji \code{\link[locfit]{locfit}}}
\item{degree}{Stopień wielomianu do lokalnego wygładzania. Szegóły w funkcji \code{\link[locfit]{locfit}}}
\item{plot}{Czy rysować wykres.}
\item{plt_type}{jeśli \code{br}, to na osi OY będzie BR. W przeciwnym razie będzie logit(BR)}
\item{new}{Czy rysować wykres od nowa.}
\item{col_points}{Kolor punktów.}
\item{col_line}{Kolor lini.}
\item{index}{jeśli \code{TRUE}, na osi OX będą numery kolejnych bucketów.
W przeciwnym razie na osi OX będą wartości \code{score}.}
\item{glm}{czy rysować dopasowanie modelu logistycznego do zmiennej.}
\item{col_glm}{kolor wykresu z modelu logistycznego.}
\item{...}{dodatkowe parametry.}
}
\description{
Rysuje lokalnie wygładzoną funckję (dev)
Rysuje i zwraca statystyki dla bucketów.
}
\examples{
n<-1000;
x1<-rnorm(n);
x2<-x1+rnorm(n);
y<-(x1+rnorm(n))<0;
reg_nieparam(x1,y, buckets=20)
reg_nieparam(x2,y, buckets=20, new=FALSE, col_line="green",col_points="green")
}
\author{
Michał Danaj
}
|
#Rscript nb_regression_outlier_filtering.R high_vs_low_otu_table.txt high_low_mapfile.txt High Low Treatment ZINB_NB_Output_result.txt 2
start.time <- Sys.time()
library(pscl)
library(MASS)
library(foreach)
library(doMC)
data_load <- function(MYdata,mapfile,treatment,trt1,trt2){
colnames(MYdata)
MYmeta <- read.table(mapfile,header = T, sep = "\t", check.names = F, comment.char= "") #change Group header to Treatment
colnames(MYmeta)
allcols <- length(colnames(MYdata))
MYdata <- MYdata[,order(names(MYdata))]
MYdata2 <- MYdata[,1:(allcols-1)]
MYdata2 <- MYdata2[,order(names(MYdata2))]
MYmeta <- MYmeta[order(MYmeta[,"#SampleID"]), ]
matrix1 <- MYdata2[c(as.factor(MYmeta[,"#SampleID"][MYmeta[,treatment]==trt1]))] # change whichever group you are testing
matrix2 <- MYdata2[c(as.factor(MYmeta[,"#SampleID"][MYmeta[,treatment]==trt2]))] # change whichever group you are testing
matrix3 <- cbind(matrix1,matrix2)
mat.array <- t(matrix3)
both <- merge(mat.array,MYmeta,by.x=0,by.y="#SampleID")
}
zinb_nb_test <- function(both,MYdata,trt,categ1,categ2){
all_data <- foreach(i=2:(length(rownames(MYdata))+1), .combine = rbind) %dopar% {
final_vec <- c()
formula1 <- as.formula(paste("both[,i] ~ ",trt," | 1",sep=""))
formula2 <- as.formula(paste("both[,i] ~ ",trt,sep=""))
result.pois <- tryCatch(glm(formula2, family="poisson", data = both),error=function(e) NA)
result.zinb <- tryCatch(zeroinfl(formula1, data = both, dist = "negbin"),error=function(e) NA)
result.nb <- tryCatch(glm.nb(formula2, data = both),error=function(e) NA)
pois.coeff <- tryCatch(exp(summary(result.pois)$coefficients[2,1]),error=function(e) NA) # Column 1
pois.pval <- tryCatch(summary(result.pois)$coefficients[2,4],error=function(e) NA) # Column 2
nb.coeff <- tryCatch(exp(summary(result.nb)$coefficients[2,1]),error=function(e) NA) # Column 3
nb.pval <- tryCatch(summary(result.nb)$coefficients[2,4],error=function(e) NA) # Column 4
zinb.coeff <- tryCatch(exp(summary(result.zinb)$coefficients$count[2,1]),error=function(e) NA) # Column 5
zinb.pval <- tryCatch(summary(result.zinb)$coefficients$count[2,4],error=function(e) NA) # Column 6
aic.pois <- tryCatch(AIC(result.pois),error=function(e) NA) # Column 7
aic.nb <- tryCatch(AIC(result.nb),error=function(e) NA) # Column 8
aic.zinb <- tryCatch(AIC(result.zinb),error=function(e) NA) # Column 9
bic.pois <- tryCatch(BIC(result.pois),error=function(e) NA) # Column 10
bic.nb <- tryCatch(BIC(result.nb),error=function(e) NA) # Column 11
bic.zinb <- tryCatch(BIC(result.zinb),error=function(e) NA) # Column 12
final_vec <- c(pois.coeff,pois.pval,nb.coeff, nb.pval, zinb.coeff, zinb.pval, aic.pois, aic.nb, aic.zinb, bic.pois, bic.nb, bic.zinb) # Appended data from Columns 1-12
shap_wilk_pval <- tryCatch(shapiro.test(both[,i])$p.value,error=function(e) NA) # Significant p-value indicates data is not normally distributed. (Column 15)
pval_ttest <- tryCatch(t.test(formula2, data=both)$p.value,error=function(e) NA) # Column 14
estimate_tab <- tryCatch(t.test(formula2, data=both)$estimate,error=function(e) NA) # Column 17-20 drawn from this object
heading <- paste(gsub(" ","",strsplit(names(estimate_tab)[1],"mean in group")[[1]][2],fixed=TRUE),"_minus_",gsub(" ","",strsplit(names(estimate_tab)[2],"mean in group")[[1]][2],fixed=TRUE),"_mean",sep="") # Column 16
mean_diff <- tryCatch((estimate_tab[1][[1]] - estimate_tab[2][[1]]),error=function(e) NA) # Column 13
kwtest <- tryCatch(kruskal.test(formula2,data=both)$p.value,error=function(e) NA) # Column 21
warn.nb <- tryCatch(glm.nb(formula2, data = both),error=function(e) NA,warning=function(w) w)
valwarn.nb <- ifelse(class(warn.nb)[1] == "simpleWarning", 'yes', 'no') # Column 22
trt1vals <- both[,i][which(as.vector(both[,trt]) == categ1)]
trt2vals <- both[,i][which(as.vector(both[,trt]) == categ2)]
zerotrt1 <- sum(trt1vals == 0) # Column 23
zerotrt2 <- sum(trt2vals == 0) # Column 24
nonzerotrt1 <- length(trt1vals) - zerotrt1 # Column 25
nonzerotrt2 <- length(trt2vals) - zerotrt2 # Column 26
totaltrt1 <- sum(trt1vals) # Column 27
totaltrt2 <- sum(trt2vals) # Column 28
mean.otu <- mean(both[,i]) # Column 29
var.otu <- var(both[,i]) # Column 30
var.mean.ratio <- var.otu/mean.otu # Column 31
newd <- both[,i][-(which(both[,i] > 5*IQR(both[,i])))] # Select indices of values that are not greater than 5 times the IQR (i.e., values > 5*IQR will be removed)
treatment <- as.vector(both[,trt])
newmeta <- treatment[-(which(both[,i] > 5*IQR(both[,i])))] # Select values greater than 5 times the IQR
newpval_pois <- tryCatch(summary(glm(newd ~ newmeta, family="poisson"))$coefficients[2,4],error=function(e) NA) # Column 32
newpval_nb <- tryCatch(summary(glm.nb(newd ~ newmeta))$coefficients[2,4],error=function(e) NA) # Column 33
newpval_zinb <- tryCatch(summary(zeroinfl(newd ~ newmeta | 1, dist = "negbin"))$coefficients$count[2,4],error=function(e) NA) # Column 34
aic.filt.pois <- tryCatch(AIC(glm(newd ~ newmeta, family="poisson")),error=function(e) NA) # Column 35
aic.filt.nb <- tryCatch(AIC(glm.nb(newd ~ newmeta)),error=function(e) NA) # Column 36
aic.filt.zinb <- tryCatch(AIC(zeroinfl(newd ~ newmeta | 1, dist = "negbin")),error=function(e) NA) # Column 37
bic.filt.pois <- tryCatch(BIC(glm(newd ~ newmeta, family="poisson")),error=function(e) NA) # Column 38
bic.filt.nb <- tryCatch(BIC(glm.nb(newd ~ newmeta)),error=function(e) NA) # Column 39
bic.filt.zinb <- tryCatch(BIC(zeroinfl(newd ~ newmeta | 1, dist = "negbin")),error=function(e) NA) # Column 40
bestmod <- c("Poisson","NB","ZINB")
all.aic.nonfilt <- c(aic.pois,aic.nb,aic.zinb)
all.bic.nonfilt <- c(bic.pois,bic.nb,bic.zinb)
all.aic.filt <- c(aic.filt.pois,aic.filt.nb,aic.filt.zinb)
all.bic.filt <- c(bic.filt.pois,bic.filt.nb,bic.filt.zinb)
aic.nonfilt.best <- bestmod[which(all.aic.nonfilt == min(all.aic.nonfilt, na.rm=TRUE))][1] # Column 41
bic.nonfilt.best <- bestmod[which(all.bic.nonfilt == min(all.bic.nonfilt, na.rm=TRUE))][1] # Column 42
aic.filt.best <- bestmod[which(all.aic.filt == min(all.aic.filt, na.rm=TRUE))][1] # Column 43
bic.filt.best <- bestmod[which(all.bic.filt == min(all.bic.filt, na.rm=TRUE))][1] # Column 44
bestmodel <- c(aic.nonfilt.best,bic.nonfilt.best,aic.filt.best,bic.filt.best)
final_vec <- c(final_vec,mean_diff,pval_ttest,shap_wilk_pval,heading,estimate_tab[1][[1]],names(estimate_tab)[1],estimate_tab[2][[1]],names(estimate_tab)[2],kwtest,valwarn.nb,zerotrt1,zerotrt2,nonzerotrt1,nonzerotrt2,totaltrt1,totaltrt2,mean.otu,var.otu,var.mean.ratio,newpval_pois,newpval_nb,newpval_zinb,aic.filt.pois,aic.filt.nb,aic.filt.zinb,bic.filt.pois,bic.filt.nb,bic.filt.zinb,bestmodel) #
final_vec
}
return (all_data)
}
final_steps <- function(otutable,mapfile,categ1,categ2,trt,outputname){
MYdata <- read.table(otutable,header = T, sep = "\t", check.names = F, row.names =1, comment.char= "", skip =1,quote="")
both <- data_load(MYdata,mapfile,trt,categ1,categ2)
all_data <- zinb_nb_test(both,MYdata,trt,categ1,categ2)
allcols <- length(colnames(MYdata))
pois.qval <- p.adjust(all_data[,2], method = "fdr") # Column 2 of all_data
nb.qval <- p.adjust(all_data[,4], method = "fdr") # Column 4 of all_data
zinb.qval <- p.adjust(all_data[,6], method = "fdr") # Column 6 of all_data
ttest.qval <- p.adjust(all_data[,14], method = "fdr") # Column 14 of all_data
kw.qval <- p.adjust(all_data[,21], method = "fdr") # Column 21 of all_data
pois.filt.qval <- p.adjust(all_data[,32], method = "fdr") # Column 32 from all_data
nb.filt.qval <- p.adjust(all_data[,33], method = "fdr") # Column 33 from all_data
zinb.filt.qval <- p.adjust(all_data[,34], method = "fdr") # Column 34 from all_data
taxonomy <- MYdata[allcols]
otuids <- colnames(both)[2:(length(colnames(both))-1)]
taxlabels <- as.vector(taxonomy[,1])
difflabel <- unique(all_data[,16]) # It's the unique entry of 'heading' column in all_data
mean1_head <- unique(all_data[,18]) # It's the unique 'names(estimate_tab)[1]' column in all_data
mean2_head <- unique(all_data[,20]) # It's the unique entry of 'names(estimate_tab)[2]' column in all_data
zrtrt1 <- paste("# of 0's in ",categ1,sep="")
zrtrt2 <- paste("# of 0's in ",categ2,sep="")
nzrtrt1 <- paste("# of non-zeroes in ",categ1,sep="")
nzrtrt2 <- paste("# of non-zeroes in ",categ2,sep="")
tottrt1 <- paste("Total count in ",categ1,sep="")
totttrt2 <- paste("Total count in ",categ2,sep="")
all_data <- cbind(otuids,all_data[,1:2],pois.qval,all_data[,3:4],nb.qval,all_data[,5:6],zinb.qval,all_data[,17],all_data[,19],all_data[,13],all_data[,14],ttest.qval,all_data[,21],kw.qval,all_data[,22:31],all_data[,15],taxlabels,all_data[,32],pois.filt.qval,all_data[,33],nb.filt.qval,all_data[,34],zinb.filt.qval,all_data[,7:12],all_data[,35:44])
colnames(all_data) <- c("OTU_IDs","Poiss_Coeff","Poiss_pval","Poiss_qval","NB_Coeff","NB_pval","NB_qval","ZINB_Coeff","ZINB_pval","ZINB_qval",mean1_head,mean2_head,difflabel,"ttest_pval","ttest_qval","KW_pval","KW_qval","NB_Coeff_Estimate_Error",zrtrt1,zrtrt2,nzrtrt1,nzrtrt2,tottrt1,totttrt2,"mean_otu","variance_otu","var/mean ratio","Shapiro_Wilk_Normality_pvalue","taxonomy","pois_filt_pval","pois_filt_qval","nb_filt_pval","nb_filt_qval","zinb_filt_pval","zinb_filt_qval","aic.pois", "aic.nb", "aic.zinb", "bic.pois", "bic.nb", "bic.zinb","aic.filt.pois","aic.filt.nb","aic.filt.zinb","bic.filt.pois","bic.filt.nb","bic.filt.zinb","aic.nonfilt.best","bic.nonfilt.best","aic.filt.best","bic.filt.best") #change Difference to groups being tested
suppressWarnings(write.table(as.matrix(all_data),file=outputname,sep="\t",append = TRUE,col.names=TRUE,row.names=FALSE,quote=FALSE))
}
argv <- commandArgs(TRUE)
registerDoMC(as.numeric(argv[7])) #change the 4 to your number of CPU cores
final_steps(argv[1],argv[2],argv[3],argv[4],argv[5],argv[6])
print (Sys.time() - start.time)
| /src/old_code/old_script_with_bic_rfunction.R | no_license | alifar76/NegBinSig-Test | R | false | false | 10,432 | r | #Rscript nb_regression_outlier_filtering.R high_vs_low_otu_table.txt high_low_mapfile.txt High Low Treatment ZINB_NB_Output_result.txt 2
start.time <- Sys.time()
library(pscl)
library(MASS)
library(foreach)
library(doMC)
data_load <- function(MYdata,mapfile,treatment,trt1,trt2){
colnames(MYdata)
MYmeta <- read.table(mapfile,header = T, sep = "\t", check.names = F, comment.char= "") #change Group header to Treatment
colnames(MYmeta)
allcols <- length(colnames(MYdata))
MYdata <- MYdata[,order(names(MYdata))]
MYdata2 <- MYdata[,1:(allcols-1)]
MYdata2 <- MYdata2[,order(names(MYdata2))]
MYmeta <- MYmeta[order(MYmeta[,"#SampleID"]), ]
matrix1 <- MYdata2[c(as.factor(MYmeta[,"#SampleID"][MYmeta[,treatment]==trt1]))] # change whichever group you are testing
matrix2 <- MYdata2[c(as.factor(MYmeta[,"#SampleID"][MYmeta[,treatment]==trt2]))] # change whichever group you are testing
matrix3 <- cbind(matrix1,matrix2)
mat.array <- t(matrix3)
both <- merge(mat.array,MYmeta,by.x=0,by.y="#SampleID")
}
zinb_nb_test <- function(both,MYdata,trt,categ1,categ2){
all_data <- foreach(i=2:(length(rownames(MYdata))+1), .combine = rbind) %dopar% {
final_vec <- c()
formula1 <- as.formula(paste("both[,i] ~ ",trt," | 1",sep=""))
formula2 <- as.formula(paste("both[,i] ~ ",trt,sep=""))
result.pois <- tryCatch(glm(formula2, family="poisson", data = both),error=function(e) NA)
result.zinb <- tryCatch(zeroinfl(formula1, data = both, dist = "negbin"),error=function(e) NA)
result.nb <- tryCatch(glm.nb(formula2, data = both),error=function(e) NA)
pois.coeff <- tryCatch(exp(summary(result.pois)$coefficients[2,1]),error=function(e) NA) # Column 1
pois.pval <- tryCatch(summary(result.pois)$coefficients[2,4],error=function(e) NA) # Column 2
nb.coeff <- tryCatch(exp(summary(result.nb)$coefficients[2,1]),error=function(e) NA) # Column 3
nb.pval <- tryCatch(summary(result.nb)$coefficients[2,4],error=function(e) NA) # Column 4
zinb.coeff <- tryCatch(exp(summary(result.zinb)$coefficients$count[2,1]),error=function(e) NA) # Column 5
zinb.pval <- tryCatch(summary(result.zinb)$coefficients$count[2,4],error=function(e) NA) # Column 6
aic.pois <- tryCatch(AIC(result.pois),error=function(e) NA) # Column 7
aic.nb <- tryCatch(AIC(result.nb),error=function(e) NA) # Column 8
aic.zinb <- tryCatch(AIC(result.zinb),error=function(e) NA) # Column 9
bic.pois <- tryCatch(BIC(result.pois),error=function(e) NA) # Column 10
bic.nb <- tryCatch(BIC(result.nb),error=function(e) NA) # Column 11
bic.zinb <- tryCatch(BIC(result.zinb),error=function(e) NA) # Column 12
final_vec <- c(pois.coeff,pois.pval,nb.coeff, nb.pval, zinb.coeff, zinb.pval, aic.pois, aic.nb, aic.zinb, bic.pois, bic.nb, bic.zinb) # Appended data from Columns 1-12
shap_wilk_pval <- tryCatch(shapiro.test(both[,i])$p.value,error=function(e) NA) # Significant p-value indicates data is not normally distributed. (Column 15)
pval_ttest <- tryCatch(t.test(formula2, data=both)$p.value,error=function(e) NA) # Column 14
estimate_tab <- tryCatch(t.test(formula2, data=both)$estimate,error=function(e) NA) # Column 17-20 drawn from this object
heading <- paste(gsub(" ","",strsplit(names(estimate_tab)[1],"mean in group")[[1]][2],fixed=TRUE),"_minus_",gsub(" ","",strsplit(names(estimate_tab)[2],"mean in group")[[1]][2],fixed=TRUE),"_mean",sep="") # Column 16
mean_diff <- tryCatch((estimate_tab[1][[1]] - estimate_tab[2][[1]]),error=function(e) NA) # Column 13
kwtest <- tryCatch(kruskal.test(formula2,data=both)$p.value,error=function(e) NA) # Column 21
warn.nb <- tryCatch(glm.nb(formula2, data = both),error=function(e) NA,warning=function(w) w)
valwarn.nb <- ifelse(class(warn.nb)[1] == "simpleWarning", 'yes', 'no') # Column 22
trt1vals <- both[,i][which(as.vector(both[,trt]) == categ1)]
trt2vals <- both[,i][which(as.vector(both[,trt]) == categ2)]
zerotrt1 <- sum(trt1vals == 0) # Column 23
zerotrt2 <- sum(trt2vals == 0) # Column 24
nonzerotrt1 <- length(trt1vals) - zerotrt1 # Column 25
nonzerotrt2 <- length(trt2vals) - zerotrt2 # Column 26
totaltrt1 <- sum(trt1vals) # Column 27
totaltrt2 <- sum(trt2vals) # Column 28
mean.otu <- mean(both[,i]) # Column 29
var.otu <- var(both[,i]) # Column 30
var.mean.ratio <- var.otu/mean.otu # Column 31
newd <- both[,i][-(which(both[,i] > 5*IQR(both[,i])))] # Select indices of values that are not greater than 5 times the IQR (i.e., values > 5*IQR will be removed)
treatment <- as.vector(both[,trt])
newmeta <- treatment[-(which(both[,i] > 5*IQR(both[,i])))] # Select values greater than 5 times the IQR
newpval_pois <- tryCatch(summary(glm(newd ~ newmeta, family="poisson"))$coefficients[2,4],error=function(e) NA) # Column 32
newpval_nb <- tryCatch(summary(glm.nb(newd ~ newmeta))$coefficients[2,4],error=function(e) NA) # Column 33
newpval_zinb <- tryCatch(summary(zeroinfl(newd ~ newmeta | 1, dist = "negbin"))$coefficients$count[2,4],error=function(e) NA) # Column 34
aic.filt.pois <- tryCatch(AIC(glm(newd ~ newmeta, family="poisson")),error=function(e) NA) # Column 35
aic.filt.nb <- tryCatch(AIC(glm.nb(newd ~ newmeta)),error=function(e) NA) # Column 36
aic.filt.zinb <- tryCatch(AIC(zeroinfl(newd ~ newmeta | 1, dist = "negbin")),error=function(e) NA) # Column 37
bic.filt.pois <- tryCatch(BIC(glm(newd ~ newmeta, family="poisson")),error=function(e) NA) # Column 38
bic.filt.nb <- tryCatch(BIC(glm.nb(newd ~ newmeta)),error=function(e) NA) # Column 39
bic.filt.zinb <- tryCatch(BIC(zeroinfl(newd ~ newmeta | 1, dist = "negbin")),error=function(e) NA) # Column 40
bestmod <- c("Poisson","NB","ZINB")
all.aic.nonfilt <- c(aic.pois,aic.nb,aic.zinb)
all.bic.nonfilt <- c(bic.pois,bic.nb,bic.zinb)
all.aic.filt <- c(aic.filt.pois,aic.filt.nb,aic.filt.zinb)
all.bic.filt <- c(bic.filt.pois,bic.filt.nb,bic.filt.zinb)
aic.nonfilt.best <- bestmod[which(all.aic.nonfilt == min(all.aic.nonfilt, na.rm=TRUE))][1] # Column 41
bic.nonfilt.best <- bestmod[which(all.bic.nonfilt == min(all.bic.nonfilt, na.rm=TRUE))][1] # Column 42
aic.filt.best <- bestmod[which(all.aic.filt == min(all.aic.filt, na.rm=TRUE))][1] # Column 43
bic.filt.best <- bestmod[which(all.bic.filt == min(all.bic.filt, na.rm=TRUE))][1] # Column 44
bestmodel <- c(aic.nonfilt.best,bic.nonfilt.best,aic.filt.best,bic.filt.best)
final_vec <- c(final_vec,mean_diff,pval_ttest,shap_wilk_pval,heading,estimate_tab[1][[1]],names(estimate_tab)[1],estimate_tab[2][[1]],names(estimate_tab)[2],kwtest,valwarn.nb,zerotrt1,zerotrt2,nonzerotrt1,nonzerotrt2,totaltrt1,totaltrt2,mean.otu,var.otu,var.mean.ratio,newpval_pois,newpval_nb,newpval_zinb,aic.filt.pois,aic.filt.nb,aic.filt.zinb,bic.filt.pois,bic.filt.nb,bic.filt.zinb,bestmodel) #
final_vec
}
return (all_data)
}
final_steps <- function(otutable,mapfile,categ1,categ2,trt,outputname){
MYdata <- read.table(otutable,header = T, sep = "\t", check.names = F, row.names =1, comment.char= "", skip =1,quote="")
both <- data_load(MYdata,mapfile,trt,categ1,categ2)
all_data <- zinb_nb_test(both,MYdata,trt,categ1,categ2)
allcols <- length(colnames(MYdata))
pois.qval <- p.adjust(all_data[,2], method = "fdr") # Column 2 of all_data
nb.qval <- p.adjust(all_data[,4], method = "fdr") # Column 4 of all_data
zinb.qval <- p.adjust(all_data[,6], method = "fdr") # Column 6 of all_data
ttest.qval <- p.adjust(all_data[,14], method = "fdr") # Column 14 of all_data
kw.qval <- p.adjust(all_data[,21], method = "fdr") # Column 21 of all_data
pois.filt.qval <- p.adjust(all_data[,32], method = "fdr") # Column 32 from all_data
nb.filt.qval <- p.adjust(all_data[,33], method = "fdr") # Column 33 from all_data
zinb.filt.qval <- p.adjust(all_data[,34], method = "fdr") # Column 34 from all_data
taxonomy <- MYdata[allcols]
otuids <- colnames(both)[2:(length(colnames(both))-1)]
taxlabels <- as.vector(taxonomy[,1])
difflabel <- unique(all_data[,16]) # It's the unique entry of 'heading' column in all_data
mean1_head <- unique(all_data[,18]) # It's the unique 'names(estimate_tab)[1]' column in all_data
mean2_head <- unique(all_data[,20]) # It's the unique entry of 'names(estimate_tab)[2]' column in all_data
zrtrt1 <- paste("# of 0's in ",categ1,sep="")
zrtrt2 <- paste("# of 0's in ",categ2,sep="")
nzrtrt1 <- paste("# of non-zeroes in ",categ1,sep="")
nzrtrt2 <- paste("# of non-zeroes in ",categ2,sep="")
tottrt1 <- paste("Total count in ",categ1,sep="")
totttrt2 <- paste("Total count in ",categ2,sep="")
all_data <- cbind(otuids,all_data[,1:2],pois.qval,all_data[,3:4],nb.qval,all_data[,5:6],zinb.qval,all_data[,17],all_data[,19],all_data[,13],all_data[,14],ttest.qval,all_data[,21],kw.qval,all_data[,22:31],all_data[,15],taxlabels,all_data[,32],pois.filt.qval,all_data[,33],nb.filt.qval,all_data[,34],zinb.filt.qval,all_data[,7:12],all_data[,35:44])
colnames(all_data) <- c("OTU_IDs","Poiss_Coeff","Poiss_pval","Poiss_qval","NB_Coeff","NB_pval","NB_qval","ZINB_Coeff","ZINB_pval","ZINB_qval",mean1_head,mean2_head,difflabel,"ttest_pval","ttest_qval","KW_pval","KW_qval","NB_Coeff_Estimate_Error",zrtrt1,zrtrt2,nzrtrt1,nzrtrt2,tottrt1,totttrt2,"mean_otu","variance_otu","var/mean ratio","Shapiro_Wilk_Normality_pvalue","taxonomy","pois_filt_pval","pois_filt_qval","nb_filt_pval","nb_filt_qval","zinb_filt_pval","zinb_filt_qval","aic.pois", "aic.nb", "aic.zinb", "bic.pois", "bic.nb", "bic.zinb","aic.filt.pois","aic.filt.nb","aic.filt.zinb","bic.filt.pois","bic.filt.nb","bic.filt.zinb","aic.nonfilt.best","bic.nonfilt.best","aic.filt.best","bic.filt.best") #change Difference to groups being tested
suppressWarnings(write.table(as.matrix(all_data),file=outputname,sep="\t",append = TRUE,col.names=TRUE,row.names=FALSE,quote=FALSE))
}
argv <- commandArgs(TRUE)
registerDoMC(as.numeric(argv[7])) #change the 4 to your number of CPU cores
final_steps(argv[1],argv[2],argv[3],argv[4],argv[5],argv[6])
print (Sys.time() - start.time)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/directconnect_operations.R
\name{directconnect_create_public_virtual_interface}
\alias{directconnect_create_public_virtual_interface}
\title{Creates a public virtual interface}
\usage{
directconnect_create_public_virtual_interface(connectionId,
newPublicVirtualInterface)
}
\arguments{
\item{connectionId}{[required] The ID of the connection.}
\item{newPublicVirtualInterface}{[required] Information about the public virtual interface.}
}
\description{
Creates a public virtual interface. A virtual interface is the VLAN that
transports AWS Direct Connect traffic. A public virtual interface
supports sending traffic to public services of AWS such as Amazon S3.
When creating an IPv6 public virtual interface (\code{addressFamily} is
\code{ipv6}), leave the \code{customer} and \code{amazon} address fields blank to use
auto-assigned IPv6 space. Custom IPv6 addresses are not supported.
}
\section{Request syntax}{
\preformatted{svc$create_public_virtual_interface(
connectionId = "string",
newPublicVirtualInterface = list(
virtualInterfaceName = "string",
vlan = 123,
asn = 123,
authKey = "string",
amazonAddress = "string",
customerAddress = "string",
addressFamily = "ipv4"|"ipv6",
routeFilterPrefixes = list(
list(
cidr = "string"
)
),
tags = list(
list(
key = "string",
value = "string"
)
)
)
)
}
}
\keyword{internal}
| /cran/paws.networking/man/directconnect_create_public_virtual_interface.Rd | permissive | sanchezvivi/paws | R | false | true | 1,505 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/directconnect_operations.R
\name{directconnect_create_public_virtual_interface}
\alias{directconnect_create_public_virtual_interface}
\title{Creates a public virtual interface}
\usage{
directconnect_create_public_virtual_interface(connectionId,
newPublicVirtualInterface)
}
\arguments{
\item{connectionId}{[required] The ID of the connection.}
\item{newPublicVirtualInterface}{[required] Information about the public virtual interface.}
}
\description{
Creates a public virtual interface. A virtual interface is the VLAN that
transports AWS Direct Connect traffic. A public virtual interface
supports sending traffic to public services of AWS such as Amazon S3.
When creating an IPv6 public virtual interface (\code{addressFamily} is
\code{ipv6}), leave the \code{customer} and \code{amazon} address fields blank to use
auto-assigned IPv6 space. Custom IPv6 addresses are not supported.
}
\section{Request syntax}{
\preformatted{svc$create_public_virtual_interface(
connectionId = "string",
newPublicVirtualInterface = list(
virtualInterfaceName = "string",
vlan = 123,
asn = 123,
authKey = "string",
amazonAddress = "string",
customerAddress = "string",
addressFamily = "ipv4"|"ipv6",
routeFilterPrefixes = list(
list(
cidr = "string"
)
),
tags = list(
list(
key = "string",
value = "string"
)
)
)
)
}
}
\keyword{internal}
|
# Params.
T = 1000
tau = 1000
theta0 = 0.4
theta1 = 0.2
phi = (log(1-theta1)-log(1-theta0)) / (log(theta0)-log(theta1))
# Random series.
#set.seed(123)
R = 0 + c(runif(tau) < theta0, runif(T-tau) < theta1)
R_ = rev(R)
Rbar = cumsum(R) / (1:T)
Rbar_ = cumsum(R_) / (1:T)
xlogx = function(x) {
x[x == 0] = 1
return(x*log(x))
}
# Likelihood
#Lt = diffinv(R*log(theta0)+(1-R)*log(1-theta0)) +
# rev(diffinv(R_*log(theta1)+(1-R_)*log(1-theta1)))
#plot(Lt, type='l')
#Xt = diffinv(R*log(theta0/theta1) + (1-R)*log((1-theta0)/(1-theta1)))
Xt = (1:T) * (xlogx(Rbar) + xlogx(1-Rbar)) +
rev((1:T) * (xlogx(Rbar_) + xlogx(1-Rbar_)))
plot(Xt-Xt[T], type='l')
#t = which.max(Xt)
#theta0_hat = Rbar[t]
#theta1_hat = Rbar_[T-t]
#tmp = sort(c(Rbar[t], Rbar_[T-t]))
#theta0_hat = tmp[2]
#theta1_hat = tmp[1]
#phi_hat = log((1-theta0_hat)/(1-theta1_hat)) / log((theta1_hat)/(theta0_hat))
#K = 1000
#p00 = exp(-sum(pbinom(q=floor((1:K)*phi_hat/(1+phi_hat)), size=1:K, prob=theta0_hat, lower.tail=TRUE)/(1:K)))
#print(phi_hat)
#print(p00)
#Q = matrix(rep(0, K^2), ncol=K)
#Q[1,2:K] = p00*(1-theta0_hat)^(1:(K-1))
#Q[2,1] = p00*theta0_hat
#for (m in 2:K) {
# for (l in 2:K) {
# if ((l-1) > phi_hat*(m-1) + 1) next
# if ((l-1) < phi_hat*(m-1) - phi_hat) {
# Q[l,m] = theta0_hat*Q[l-1,m] + (1-theta0_hat)*Q[l,m-1]
# } else {
# Q[l,m] = theta0_hat*Q[l-1,m]
# }
# }
#}
#print(sum(Q))
# 6.906944
| /Hinkley.R | no_license | gui11aume/misc | R | false | false | 1,431 | r | # Params.
T = 1000
tau = 1000
theta0 = 0.4
theta1 = 0.2
phi = (log(1-theta1)-log(1-theta0)) / (log(theta0)-log(theta1))
# Random series.
#set.seed(123)
R = 0 + c(runif(tau) < theta0, runif(T-tau) < theta1)
R_ = rev(R)
Rbar = cumsum(R) / (1:T)
Rbar_ = cumsum(R_) / (1:T)
xlogx = function(x) {
x[x == 0] = 1
return(x*log(x))
}
# Likelihood
#Lt = diffinv(R*log(theta0)+(1-R)*log(1-theta0)) +
# rev(diffinv(R_*log(theta1)+(1-R_)*log(1-theta1)))
#plot(Lt, type='l')
#Xt = diffinv(R*log(theta0/theta1) + (1-R)*log((1-theta0)/(1-theta1)))
Xt = (1:T) * (xlogx(Rbar) + xlogx(1-Rbar)) +
rev((1:T) * (xlogx(Rbar_) + xlogx(1-Rbar_)))
plot(Xt-Xt[T], type='l')
#t = which.max(Xt)
#theta0_hat = Rbar[t]
#theta1_hat = Rbar_[T-t]
#tmp = sort(c(Rbar[t], Rbar_[T-t]))
#theta0_hat = tmp[2]
#theta1_hat = tmp[1]
#phi_hat = log((1-theta0_hat)/(1-theta1_hat)) / log((theta1_hat)/(theta0_hat))
#K = 1000
#p00 = exp(-sum(pbinom(q=floor((1:K)*phi_hat/(1+phi_hat)), size=1:K, prob=theta0_hat, lower.tail=TRUE)/(1:K)))
#print(phi_hat)
#print(p00)
#Q = matrix(rep(0, K^2), ncol=K)
#Q[1,2:K] = p00*(1-theta0_hat)^(1:(K-1))
#Q[2,1] = p00*theta0_hat
#for (m in 2:K) {
# for (l in 2:K) {
# if ((l-1) > phi_hat*(m-1) + 1) next
# if ((l-1) < phi_hat*(m-1) - phi_hat) {
# Q[l,m] = theta0_hat*Q[l-1,m] + (1-theta0_hat)*Q[l,m-1]
# } else {
# Q[l,m] = theta0_hat*Q[l-1,m]
# }
# }
#}
#print(sum(Q))
# 6.906944
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isd_read.R
\name{isd_read}
\alias{isd_read}
\title{Read NOAA ISD/ISH local file}
\usage{
isd_read(path, additional = TRUE, parallel = FALSE,
cores = getOption("cl.cores", 2), progress = FALSE)
}
\arguments{
\item{path}{(character) path to the file. required.}
\item{additional}{(logical) include additional and remarks data sections
in output. Default: \code{TRUE}. Passed on to
\code{\link[isdparser]{isd_parse}}}
\item{parallel}{(logical) do processing in parallel. Default: \code{FALSE}}
\item{cores}{(integer) number of cores to use: Default: 2. We look in
your option "cl.cores", but use default value if not found.}
\item{progress}{(logical) print progress - ignored if \code{parallel=TRUE}.
The default is \code{FALSE} because printing progress adds a small bit of
time, so if processing time is important, then keep as \code{FALSE}}
}
\value{
A tibble (data.frame)
}
\description{
Read NOAA ISD/ISH local file
}
\details{
\code{isd_read} - read a \code{.gz} file as downloaded
from NOAA's website
}
\examples{
\dontrun{
file <- system.file("examples", "011490-99999-1986.gz", package = "rnoaa")
isd_read(file)
isd_read(file, additional = FALSE)
}
}
\references{
ftp://ftp.ncdc.noaa.gov/pub/data/noaa/
}
\seealso{
\code{\link{isd}}, \code{\link{isd_stations}},
\code{\link{isd_stations_search}}
Other isd: \code{\link{isd_stations_search}},
\code{\link{isd_stations}}, \code{\link{isd}}
}
| /man/isd_read.Rd | permissive | martgnz/rnoaa | R | false | true | 1,483 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isd_read.R
\name{isd_read}
\alias{isd_read}
\title{Read NOAA ISD/ISH local file}
\usage{
isd_read(path, additional = TRUE, parallel = FALSE,
cores = getOption("cl.cores", 2), progress = FALSE)
}
\arguments{
\item{path}{(character) path to the file. required.}
\item{additional}{(logical) include additional and remarks data sections
in output. Default: \code{TRUE}. Passed on to
\code{\link[isdparser]{isd_parse}}}
\item{parallel}{(logical) do processing in parallel. Default: \code{FALSE}}
\item{cores}{(integer) number of cores to use: Default: 2. We look in
your option "cl.cores", but use default value if not found.}
\item{progress}{(logical) print progress - ignored if \code{parallel=TRUE}.
The default is \code{FALSE} because printing progress adds a small bit of
time, so if processing time is important, then keep as \code{FALSE}}
}
\value{
A tibble (data.frame)
}
\description{
Read NOAA ISD/ISH local file
}
\details{
\code{isd_read} - read a \code{.gz} file as downloaded
from NOAA's website
}
\examples{
\dontrun{
file <- system.file("examples", "011490-99999-1986.gz", package = "rnoaa")
isd_read(file)
isd_read(file, additional = FALSE)
}
}
\references{
ftp://ftp.ncdc.noaa.gov/pub/data/noaa/
}
\seealso{
\code{\link{isd}}, \code{\link{isd_stations}},
\code{\link{isd_stations_search}}
Other isd: \code{\link{isd_stations_search}},
\code{\link{isd_stations}}, \code{\link{isd}}
}
|
import = function(df, path, category){
df = read_tsv(path) %>%
filter(! is.na(log10_padj)) %>%
mutate(category=category) %>%
bind_rows(df, .) %>%
return()
}
main = function(theme_spec,
tss_genic, tss_intragenic, tss_antisense, tss_intergenic,
# tfiib_genic, tfiib_intragenic, tfiib_intergenic,
fig_width, fig_height,
pdf_out){
source(theme_spec)
tss_df = tibble() %>%
import(tss_genic, "genic") %>%
import(tss_intragenic, "intragenic") %>%
import(tss_antisense, "antisense") %>%
import(tss_intergenic, "intergenic") %>%
mutate(category=fct_inorder(category, ordered=TRUE)) %>%
select(name, condition_expr, control_expr, category) %>%
gather(key=condition, value=expression, -c(name, category)) %>%
mutate(condition = ordered(condition,
levels = c("control_expr", "condition_expr"),
labels = c("non-depleted", "depleted")))
tss_plot = ggplot(data = tss_df,
aes(x=category, y=expression+1,
group=interaction(condition, category))) +
geom_violin(aes(fill=condition,
color=condition),
bw = .08,
width=1.2,
position=position_dodge(width=0.75),
size=0.2,
alpha=0.9) +
geom_boxplot(position=position_dodge(width=0.75),
fill="white",
width=0.12,
notch=TRUE,
outlier.size=0,
outlier.stroke=0,
size=0.2,
alpha=0.2) +
scale_x_discrete(expand = c(0,0)) +
# limits = c("genic", "intragenic", "antisense", "intergenic", "")) +
scale_y_log10(name = "normalized counts",
breaks = c(10, 1000), labels = c(bquote(10^1), bquote(10^3))) +
scale_fill_few(guide=guide_legend(direction="vertical",
label.position="right",
# label.hjust=1,
keyheight=unit(6, "pt"))) +
scale_color_few(guide=guide_legend(direction="vertical",
label.position="right",
# label.hjust=1,
keyheight=unit(6, "pt"))) +
# ggtitle("expression level of TSS-seq peaks") +
theme_default +
theme(axis.title.x = element_blank(),
axis.text.x = element_text(size=10),
panel.grid.major.x = element_blank(),
legend.position = "top",
legend.text = element_text(margin = margin(0,0,0,0,"pt")),
legend.justification = c(0.5, 0.5),
legend.box.margin = margin(0, 0, -10, 0, "pt"),
legend.spacing.x = unit(2, "pt"),
plot.margin = margin(0,0,0,0,"pt"))
ggsave(pdf_out,
plot=tss_plot,
width=fig_width,
height=fig_height,
units="in",
device=cairo_pdf)
}
main(theme_spec = snakemake@input[["theme"]],
tss_genic = snakemake@input[["tss_genic"]],
tss_intragenic = snakemake@input[["tss_intragenic"]],
tss_antisense = snakemake@input[["tss_antisense"]],
tss_intergenic = snakemake@input[["tss_intergenic"]],
fig_width = snakemake@params[["width"]],
fig_height = snakemake@params[["height"]],
pdf_out = snakemake@output[["pdf"]])
| /scripts/five_tss_expression_levels.R | no_license | james-chuang/dissertation | R | false | false | 3,705 | r |
import = function(df, path, category){
df = read_tsv(path) %>%
filter(! is.na(log10_padj)) %>%
mutate(category=category) %>%
bind_rows(df, .) %>%
return()
}
main = function(theme_spec,
tss_genic, tss_intragenic, tss_antisense, tss_intergenic,
# tfiib_genic, tfiib_intragenic, tfiib_intergenic,
fig_width, fig_height,
pdf_out){
source(theme_spec)
tss_df = tibble() %>%
import(tss_genic, "genic") %>%
import(tss_intragenic, "intragenic") %>%
import(tss_antisense, "antisense") %>%
import(tss_intergenic, "intergenic") %>%
mutate(category=fct_inorder(category, ordered=TRUE)) %>%
select(name, condition_expr, control_expr, category) %>%
gather(key=condition, value=expression, -c(name, category)) %>%
mutate(condition = ordered(condition,
levels = c("control_expr", "condition_expr"),
labels = c("non-depleted", "depleted")))
tss_plot = ggplot(data = tss_df,
aes(x=category, y=expression+1,
group=interaction(condition, category))) +
geom_violin(aes(fill=condition,
color=condition),
bw = .08,
width=1.2,
position=position_dodge(width=0.75),
size=0.2,
alpha=0.9) +
geom_boxplot(position=position_dodge(width=0.75),
fill="white",
width=0.12,
notch=TRUE,
outlier.size=0,
outlier.stroke=0,
size=0.2,
alpha=0.2) +
scale_x_discrete(expand = c(0,0)) +
# limits = c("genic", "intragenic", "antisense", "intergenic", "")) +
scale_y_log10(name = "normalized counts",
breaks = c(10, 1000), labels = c(bquote(10^1), bquote(10^3))) +
scale_fill_few(guide=guide_legend(direction="vertical",
label.position="right",
# label.hjust=1,
keyheight=unit(6, "pt"))) +
scale_color_few(guide=guide_legend(direction="vertical",
label.position="right",
# label.hjust=1,
keyheight=unit(6, "pt"))) +
# ggtitle("expression level of TSS-seq peaks") +
theme_default +
theme(axis.title.x = element_blank(),
axis.text.x = element_text(size=10),
panel.grid.major.x = element_blank(),
legend.position = "top",
legend.text = element_text(margin = margin(0,0,0,0,"pt")),
legend.justification = c(0.5, 0.5),
legend.box.margin = margin(0, 0, -10, 0, "pt"),
legend.spacing.x = unit(2, "pt"),
plot.margin = margin(0,0,0,0,"pt"))
ggsave(pdf_out,
plot=tss_plot,
width=fig_width,
height=fig_height,
units="in",
device=cairo_pdf)
}
main(theme_spec = snakemake@input[["theme"]],
tss_genic = snakemake@input[["tss_genic"]],
tss_intragenic = snakemake@input[["tss_intragenic"]],
tss_antisense = snakemake@input[["tss_antisense"]],
tss_intergenic = snakemake@input[["tss_intergenic"]],
fig_width = snakemake@params[["width"]],
fig_height = snakemake@params[["height"]],
pdf_out = snakemake@output[["pdf"]])
|
library(plyr)
library(tidyverse)
wd <- "/home/ginnyweasley/Dokumente/01_Promotion/06_Daten/02_SISAL/SISAL_Export_1b/"
prefix <- '' # ab v1c heißen die csv files anderes
load_data <- function(prefix, wd) {
composite_link_entity <- read.csv(paste(wd, prefix, 'composite_link_entity.csv',sep = ''), header = T,stringsAsFactors = F)
d13C <- read.csv(paste(wd, prefix, 'd13C.csv',sep='') ,header = T, stringsAsFactors = F)
d13C <- rename(d13C, iso_std_d13C = iso_std )
d18O <- read.csv(paste(wd, prefix, 'd18O.csv', sep =''),header = T, stringsAsFactors = F)
d18O <- rename(d18O, iso_std_d18O = iso_std)
dating_lamina <- read.csv(paste(wd, prefix, 'dating_lamina.csv', sep = ''), header = T, stringsAsFactors = F)
dating <- read.csv(paste(wd, prefix, 'dating.csv',sep = ''), header = T, stringsAsFactors = F)
entity_link_reference <- read.csv(paste(wd, prefix, 'entity_link_reference.csv', sep = ''), header =T, stringsAsFactors = F)
entity <- read.csv(paste(wd, prefix, 'entity.csv', sep = ''), header = T, stringsAsFactors = F)
gap <- read.csv(paste(wd, prefix, 'gap.csv', sep = ''), header = T, stringsAsFactors = F)
hiatus <- read.csv(paste(wd, prefix, 'hiatus.csv', sep =''), header = T, stringsAsFactors = F)
notes <- read.csv(paste(wd, prefix, 'notes.csv', sep = ''), header = T, stringsAsFactors = F)
original_chronology <- read.csv(paste(wd, prefix, 'original_chronology.csv', sep = ''), header = T, stringsAsFactors = F)
reference <- read.csv(paste(wd, prefix, 'reference.csv', sep = ''), header = T, stringsAsFactors = F)
sample <- read.csv(paste(wd, prefix, 'sample.csv', sep = ''), header = T, stringsAsFactors = F)
sisal_chronology <- read.csv(paste(wd, prefix, 'sisal_chronology.csv', sep = ''), header = T, stringsAsFactors = F)
site <- read.csv(paste(wd, prefix, 'site.csv', sep = ''), header = T, stringsAsFactors = F)
site_tb <- left_join(site, entity, by = 'site_id') %>% left_join(., entity_link_reference, by = 'entity_id') %>%
left_join(., reference, by = 'ref_id') %>% left_join(., notes, by = 'site_id') %>% mutate_at(vars(site_id, entity_id), as.numeric)
dating_tb <- dating %>% group_by(entity_id) %>%mutate(laminar_dated = if_else((entity_id %in% dating_lamina$entity_id), 'yes', 'no')) %>%
mutate_at(vars(dating_id, depth_dating, dating_thickness, X14C_correction, corr_age, corr_age_uncert_pos, corr_age_uncert_neg), as.numeric) %>%ungroup()
dating_tb_2 <- dating %>% left_join(.,entity, by = "entity_id") %>% filter(entity_status == "current") %>%
mutate_at(vars(dating_id, depth_dating, dating_thickness, X14C_correction, corr_age, corr_age_uncert_pos, corr_age_uncert_neg), as.numeric)
sample_tb <- join_all(list(sample,hiatus, gap, original_chronology, sisal_chronology, d13C, d18O), by = 'sample_id', type = 'left', match = 'all') %>%
mutate_at(vars(entity_id, sample_id, sample_thickness, depth_sample, interp_age, interp_age_uncert_pos, interp_age_uncert_neg, COPRA_age,
COPRA_age_uncert_pos, COPRA_age_uncert_neg, linear_age, linear_age_uncert_pos, linear_age_uncert_neg, d13C_measurement,
d13C_precision, d18O_measurement, d18O_precision), as.numeric)
return(list(site_tb, dating_tb, dating_tb_2, sample_tb))
}
data <- load_data(prefix, wd)
site_tb <- as.data.frame(data[1])
dating_tb <- as.data.frame(data[2])
dating_tb_2 <- as.data.frame(data[3]) # dating_tb_2 ist die tb, die Carla nochmal extra aufbereitet hat (siehe oben in der Funktion). Hier ist entity_status included
sample_tb <- as.data.frame(data[4])
#Bereite Tabelle vor: 4 Spalten: site_id, entity_id, interp_age, d18O_measurement, für die gilt, mehr als 50 d18O Messungen in den letzten 1150 Jahren über einen Zeitraum von mehr als 500 Jahren
# 1) filtere alle fie nicht C14 datiert sind oder laminations haben --> dann bleiben nur Ur/Th Datierungen übrig
dating_tb_3 <- dating_tb_2 %>% filter(date_used == "yes" & date_type != c("C14", "Event; end of laminations", "Event; start of laminations"))
# 2) aus sample ID, verwende nur die, die bei dating_tb_3 rausgekommen sind
# filtere dann die, die mindestens ein Alter haben das jünger ist als 1100y.
# Die werden sortiert nach entity_id und dann gezählt. Übrig sollen nur die bleiben, mit mehr als 50 Datierungen.
sample_min50 <- sample_tb %>% filter( entity_id %in% dating_tb_3$entity_id) %>%
filter(interp_age < 1100) %>% group_by(entity_id) %>% count() %>% filter(n>50)
# 3) erstelle Tabelle die alle Messungen zu den oben selektierten entities erstellt
sample_min50_data <- sample_tb %>% filter(entity_id %in% sample_min50$entity_id) %>% select(entity_id, interp_age, d18O_measurement) %>% filter(interp_age<1100)
# 4) Füge der Liste oben eine vordere Spalte mit der site_id hinzu
site_min50 <- site_tb %>% filter(entity_id %in% sample_min50$entity_id) %>% select(site_id, entity_id) %>% distinct(site_id, entity_id) %>% right_join(., sample_min50_data, by="entity_id")
# Füge der Liste zwei Spalten hinzu wie jüngste und älteste Messung gemacht wird, berechne Länge, filtere alle, sodass nur noch perioden >500 Jahre übrig bleiben
site_period <- site_min50 %>% group_by(entity_id) %>%
filter(entity_id != 51 && entity_id != 85 && entity_id != 123 && entity_id != 435) %>%
summarise(min_corr_age = round(min(interp_age, na.rm = T), digits = 2),
max_corr_age = round(max(interp_age, na.rm = T), digits = 2)) %>%
filter(min_corr_age < 0 & max_corr_age > 1000) %>%
mutate(period = max_corr_age -min_corr_age) %>% filter(period > 700)
entities_used <- site_period[,1]$entity_id
# 5) finale Tabelle (aus site_min_50 werden nochmal die entfernt, die weniger als 500 Jahre lange Messungen haben)
sample_final <- site_min50 %>% filter(entity_id %in% site_period$entity_id) %>% filter(interp_age < 1100)
sites_used <- sample_final %>% group_by(site_id)%>% count(site_id)%>% select(site_id)# %>% arrange(site_id)
sites_used <- sites_used[,1]$site_id
sites_entities_used <- sample_final %>% count(site_id, entity_id)%>% select(site_id, entity_id)
remove(dating_tb_2, dating_tb_3, sample_min50, sample_min50_data, site_period)
setwd("/home/ginnyweasley/Dokumente/01_Promotion/07_R_Code/201908_REKLIM_prepare/")
| /4_Read_in_SISAL_002.R | no_license | ginnyweasleyIUP/201908_REKLIM_prepare | R | false | false | 6,223 | r | library(plyr)
library(tidyverse)
wd <- "/home/ginnyweasley/Dokumente/01_Promotion/06_Daten/02_SISAL/SISAL_Export_1b/"
prefix <- '' # ab v1c heißen die csv files anderes
load_data <- function(prefix, wd) {
composite_link_entity <- read.csv(paste(wd, prefix, 'composite_link_entity.csv',sep = ''), header = T,stringsAsFactors = F)
d13C <- read.csv(paste(wd, prefix, 'd13C.csv',sep='') ,header = T, stringsAsFactors = F)
d13C <- rename(d13C, iso_std_d13C = iso_std )
d18O <- read.csv(paste(wd, prefix, 'd18O.csv', sep =''),header = T, stringsAsFactors = F)
d18O <- rename(d18O, iso_std_d18O = iso_std)
dating_lamina <- read.csv(paste(wd, prefix, 'dating_lamina.csv', sep = ''), header = T, stringsAsFactors = F)
dating <- read.csv(paste(wd, prefix, 'dating.csv',sep = ''), header = T, stringsAsFactors = F)
entity_link_reference <- read.csv(paste(wd, prefix, 'entity_link_reference.csv', sep = ''), header =T, stringsAsFactors = F)
entity <- read.csv(paste(wd, prefix, 'entity.csv', sep = ''), header = T, stringsAsFactors = F)
gap <- read.csv(paste(wd, prefix, 'gap.csv', sep = ''), header = T, stringsAsFactors = F)
hiatus <- read.csv(paste(wd, prefix, 'hiatus.csv', sep =''), header = T, stringsAsFactors = F)
notes <- read.csv(paste(wd, prefix, 'notes.csv', sep = ''), header = T, stringsAsFactors = F)
original_chronology <- read.csv(paste(wd, prefix, 'original_chronology.csv', sep = ''), header = T, stringsAsFactors = F)
reference <- read.csv(paste(wd, prefix, 'reference.csv', sep = ''), header = T, stringsAsFactors = F)
sample <- read.csv(paste(wd, prefix, 'sample.csv', sep = ''), header = T, stringsAsFactors = F)
sisal_chronology <- read.csv(paste(wd, prefix, 'sisal_chronology.csv', sep = ''), header = T, stringsAsFactors = F)
site <- read.csv(paste(wd, prefix, 'site.csv', sep = ''), header = T, stringsAsFactors = F)
site_tb <- left_join(site, entity, by = 'site_id') %>% left_join(., entity_link_reference, by = 'entity_id') %>%
left_join(., reference, by = 'ref_id') %>% left_join(., notes, by = 'site_id') %>% mutate_at(vars(site_id, entity_id), as.numeric)
dating_tb <- dating %>% group_by(entity_id) %>%mutate(laminar_dated = if_else((entity_id %in% dating_lamina$entity_id), 'yes', 'no')) %>%
mutate_at(vars(dating_id, depth_dating, dating_thickness, X14C_correction, corr_age, corr_age_uncert_pos, corr_age_uncert_neg), as.numeric) %>%ungroup()
dating_tb_2 <- dating %>% left_join(.,entity, by = "entity_id") %>% filter(entity_status == "current") %>%
mutate_at(vars(dating_id, depth_dating, dating_thickness, X14C_correction, corr_age, corr_age_uncert_pos, corr_age_uncert_neg), as.numeric)
sample_tb <- join_all(list(sample,hiatus, gap, original_chronology, sisal_chronology, d13C, d18O), by = 'sample_id', type = 'left', match = 'all') %>%
mutate_at(vars(entity_id, sample_id, sample_thickness, depth_sample, interp_age, interp_age_uncert_pos, interp_age_uncert_neg, COPRA_age,
COPRA_age_uncert_pos, COPRA_age_uncert_neg, linear_age, linear_age_uncert_pos, linear_age_uncert_neg, d13C_measurement,
d13C_precision, d18O_measurement, d18O_precision), as.numeric)
return(list(site_tb, dating_tb, dating_tb_2, sample_tb))
}
data <- load_data(prefix, wd)
site_tb <- as.data.frame(data[1])
dating_tb <- as.data.frame(data[2])
dating_tb_2 <- as.data.frame(data[3]) # dating_tb_2 ist die tb, die Carla nochmal extra aufbereitet hat (siehe oben in der Funktion). Hier ist entity_status included
sample_tb <- as.data.frame(data[4])
#Bereite Tabelle vor: 4 Spalten: site_id, entity_id, interp_age, d18O_measurement, für die gilt, mehr als 50 d18O Messungen in den letzten 1150 Jahren über einen Zeitraum von mehr als 500 Jahren
# 1) filtere alle fie nicht C14 datiert sind oder laminations haben --> dann bleiben nur Ur/Th Datierungen übrig
dating_tb_3 <- dating_tb_2 %>% filter(date_used == "yes" & date_type != c("C14", "Event; end of laminations", "Event; start of laminations"))
# 2) aus sample ID, verwende nur die, die bei dating_tb_3 rausgekommen sind
# filtere dann die, die mindestens ein Alter haben das jünger ist als 1100y.
# Die werden sortiert nach entity_id und dann gezählt. Übrig sollen nur die bleiben, mit mehr als 50 Datierungen.
sample_min50 <- sample_tb %>% filter( entity_id %in% dating_tb_3$entity_id) %>%
filter(interp_age < 1100) %>% group_by(entity_id) %>% count() %>% filter(n>50)
# 3) erstelle Tabelle die alle Messungen zu den oben selektierten entities erstellt
sample_min50_data <- sample_tb %>% filter(entity_id %in% sample_min50$entity_id) %>% select(entity_id, interp_age, d18O_measurement) %>% filter(interp_age<1100)
# 4) Füge der Liste oben eine vordere Spalte mit der site_id hinzu
site_min50 <- site_tb %>% filter(entity_id %in% sample_min50$entity_id) %>% select(site_id, entity_id) %>% distinct(site_id, entity_id) %>% right_join(., sample_min50_data, by="entity_id")
# Füge der Liste zwei Spalten hinzu wie jüngste und älteste Messung gemacht wird, berechne Länge, filtere alle, sodass nur noch perioden >500 Jahre übrig bleiben
site_period <- site_min50 %>% group_by(entity_id) %>%
filter(entity_id != 51 && entity_id != 85 && entity_id != 123 && entity_id != 435) %>%
summarise(min_corr_age = round(min(interp_age, na.rm = T), digits = 2),
max_corr_age = round(max(interp_age, na.rm = T), digits = 2)) %>%
filter(min_corr_age < 0 & max_corr_age > 1000) %>%
mutate(period = max_corr_age -min_corr_age) %>% filter(period > 700)
entities_used <- site_period[,1]$entity_id
# 5) finale Tabelle (aus site_min_50 werden nochmal die entfernt, die weniger als 500 Jahre lange Messungen haben)
sample_final <- site_min50 %>% filter(entity_id %in% site_period$entity_id) %>% filter(interp_age < 1100)
sites_used <- sample_final %>% group_by(site_id)%>% count(site_id)%>% select(site_id)# %>% arrange(site_id)
sites_used <- sites_used[,1]$site_id
sites_entities_used <- sample_final %>% count(site_id, entity_id)%>% select(site_id, entity_id)
remove(dating_tb_2, dating_tb_3, sample_min50, sample_min50_data, site_period)
setwd("/home/ginnyweasley/Dokumente/01_Promotion/07_R_Code/201908_REKLIM_prepare/")
|
library(dad)
### Name: roseflowers
### Title: Rose flowers
### Aliases: roseflowers
### Keywords: datasets
### ** Examples
data(roseflowers)
summary(roseflowers$variety)
summary(roseflowers$flower)
| /data/genthat_extracted_code/dad/examples/roseflowers.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 205 | r | library(dad)
### Name: roseflowers
### Title: Rose flowers
### Aliases: roseflowers
### Keywords: datasets
### ** Examples
data(roseflowers)
summary(roseflowers$variety)
summary(roseflowers$flower)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/books_objects.R
\name{Category}
\alias{Category}
\title{Category Object}
\usage{
Category(Category.items = NULL, items = NULL)
}
\arguments{
\item{Category.items}{The \link{Category.items} object or list of objects}
\item{items}{A list of onboarding categories}
}
\value{
Category object
}
\description{
Category Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
\seealso{
Other Category functions: \code{\link{Category.items}}
}
| /googlebooksv1.auto/man/Category.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 564 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/books_objects.R
\name{Category}
\alias{Category}
\title{Category Object}
\usage{
Category(Category.items = NULL, items = NULL)
}
\arguments{
\item{Category.items}{The \link{Category.items} object or list of objects}
\item{items}{A list of onboarding categories}
}
\value{
Category object
}
\description{
Category Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
\seealso{
Other Category functions: \code{\link{Category.items}}
}
|
#### -- Packrat Autoloader (version 0.4.9-3) -- ####
source("packrat/init.R")
library(paleotree)
library(ape)
#### -- End Packrat Autoloader -- ####
| /.Rprofile | no_license | maurotcs/MyFirstPckg | R | false | false | 149 | rprofile | #### -- Packrat Autoloader (version 0.4.9-3) -- ####
source("packrat/init.R")
library(paleotree)
library(ape)
#### -- End Packrat Autoloader -- ####
|
#####################################################################
# ~~~~~~~~~~~~~~~~~~
# Tumor subtype and cell type independent DNA methylation alterations
# associated with stage progression in invasive breast carcinoma
# ~~~~~~~~~~~~~~~~~~
# Way, G., Johnson, K., Christensen, B. 2015
#
# This script will store all of the custom functions that are used in
# the doRefFree analyses
#####################################################################
################################
# SubsetStage Function
################################
# This function will subset stage for the input dataframe of covariates
subsetStage <- function (data, stage) {
tmpframe <- c()
# Separate the data according to the given stage assignments
for (i in 1:length(stage)) {
tmp <- data[data$pathologic_stage == stage[i], ]
# Accept only primary tumors
tmp <- tmp[tmp$sample.type == "Primary Tumor", ]
# Combine the info together
tmpframe <- rbind(tmpframe, tmp)
}
# Also, take all the solid tissue normal samples in this step as well
tmpframe <- rbind(tmpframe, data[data$sample.type == "Solid Tissue Normal", ])
return(tmpframe)
}
################################
# customRefFree Function
################################
# This function will make the design matrices, run RefFreeEWAS, and save output files
# k is the number of dimensions; it can be prespecified if you do not wish to run EstDimRMT
customRefFree <- function (covariates, betas, age = T, findRMT = T, bootstraps = 5,
save = F, iPOI = 2, k) {
# subset betas
newBeta <- betas[ ,rownames(covariates)]
# Null Model
mod0 <- model.matrix(~1, data = covariates)
if (age == T) {
# Model adjusting for age as a continuous variable
mod <- model.matrix(~ covariates$sample.type + covariates$age.Dx)
} else {
mod <- model.matrix(~ covariates$sample.type)
}
# Obtain Bstar
tmpBstar <- as.matrix(newBeta[ , ]) %*% mod %*% solve(t(mod)%*%mod)
# Get Residual Matrix
tmpResid <- as.matrix(newBeta[ , ])-tmpBstar%*%t(mod)
# Use the function EstDimRMT in the package 'isva'
if (findRMT == T) {
require("isva")
RMT <- EstDimRMT(as.matrix(tmpResid), plot = F)
k <- RMT$dim
} else {
k = k
}
# Perform RefFreeEWAS
# Generate reffree model
rf0 <- RefFreeEwasModel(as.matrix(newBeta[ , ]), mod, k)
# Use model to bootstrap
rfb <- BootRefFreeEwasModel(rf0, bootstraps)
# Store in list
reffreeList <- list(rfb, rf0)
################################
# Extract RefFreeEWAS results
################################
# Summarize bootstrap results
rfbSummary <- summary(rfb)
# Extract standard error for all cpgs
SE <- rfbSummary[ , , , 2]
# Get standard errors for Beta and Bstar
seBeta <- apply(rfb[ , ,"B", ], 1:2, sd)
seBstar <- apply(rfb[ , ,"B*", ], 1:2, sd)
# Degrees of freedom
denDegFree <- dim(mod)[1] - dim(mod)[2]
residDf <-denDegFree
# Extract deltas
Delta <- rf0$Bstar[ ,2] - rf0$Beta[ ,2]
seDelta <- apply(rfb[ , , "B*", ]-rfb[ , , "B", ], 1:2, sd)
pvDelta <- 2 * pt(-abs(Delta)/seDelta[ ,2], denDegFree)
# Get Adjusted and Unadjusted P-values
pvAdj <- 2 * pt( -abs(rf0$Beta[ ,iPOI]/SE[ ,iPOI, 1]) , residDf)
pvUnadj <- 2 * pt( -abs(rf0$Bstar[ ,iPOI]/SE[ ,iPOI, 2]) , residDf)
# Get adjusted and unadjusted model coefficients
coefAdj<-rf0$Beta[ ,iPOI]
coefUnadj<-rf0$Bstar[ ,iPOI]
# Create a matrix and write to file
results <- cbind(coefAdj, coefUnadj, pvAdj, pvUnadj, Delta, pvDelta)
# Get the list ready to return
returnlist <- list(reffreeList, results, k)
return(returnlist)
}
################################
# findGoodQ Function
################################
# This function will return a set of genes, regions, gene regions, or cgs according to a given q value cutoff, stage, or subtype
findGoodQ <- function (qcut, AnnotationList, stage, subtypes, consider = "GeneRegion",
returning = "Values") {
# Subset the annotation list to only accept the given stages
AnnotationList <- AnnotationList[grepl(stage, names(AnnotationList))]
# Loop over the given subtypes and reassign the Annotation List
WhatList <- list()
for (i in 1:length(subtypes)) {
tmp <- AnnotationList[grepl(subtypes[i], names(AnnotationList), fixed = T)]
WhatList[[i]] <- as.data.frame(tmp)
colnames(WhatList[[i]]) <- colnames(tmp[[1]])
names(WhatList)[i] <- paste(subtypes[i], "_", stage, sep = "")
}
# Reassign annotation list
AnnotationList <- WhatList
# Ask if we are returning values only, or the entire dataframe; if all, then initialize a list
if (returning == "All") {
AllFrames <- list()
}
# Get all the significant Q values
sigQ <- list()
for (i in 1:length(AnnotationList)) {
tmp <- AnnotationList[[i]]
# Remove cgs that were previously filtered
tmp <- tmp[!is.na(tmp$qvalues), ]
# only accept cgs that have met the q value cutoff
tmp <- tmp[tmp$qvalues < qcut, ]
# assign the unique gene regions, or Genes, or CpGs, to the internal list
if (consider == "UCSC_RefGene_Name") {
sigQ[[i]] <- unique(tmp$UCSC_RefGene_Name)
} else if(consider == "TargetID") {
sigQ[[i]] <- unique(tmp$TargetID)
} else if(consider == "UCSC_RefGene_Group") {
sigQ[[i]] <- unique(tmp$UCSC_RefGene_Group)
} else {
sigQ[[i]] <- unique(tmp$GeneRegion)
}
# store subtype specific information in the AllFrames list
if (returning == "All") {
AllFrames[[i]] <- tmp
}
}
# Get all the intersects of the significant hits according to the q val cut
for (j in 1:length(sigQ)) {
if (j == 1) {
compare <- sigQ[[j]]
} else {
compare <- intersect(compare, sigQ[[j]])
}
}
# Remove "NA NA" which are CpGs that are unmapped to gene:regions
compare <- compare[compare != "NA NA"]
if (returning != "All") {
# Return which were in common
return(as.character(compare))
} else {
FilteredFrame <- data.frame()
for (i in 1:length(AllFrames)) {
compareMedians <- c()
for (j in 1:length(compare)) {
subset <- AllFrames[[i]][AllFrames[[i]][ ,consider] %in% compare[j], ]
compareMedians <- c(compareMedians, median(subset$qvalues))
}
if (i == 1) {
FilteredFrame <- compareMedians
} else {
FilteredFrame <- cbind(FilteredFrame, compareMedians)
}
}
colnames(FilteredFrame) <- paste(subtypes, "medianQ", sep = "-")
rownames(FilteredFrame) <- compare
TotalMedian <- apply(FilteredFrame, 1, median)
FilteredFrame <- cbind(FilteredFrame, TotalMedian)
FilteredFrame <- FilteredFrame[order(FilteredFrame[ ,ncol(FilteredFrame)], decreasing = F), ]
}
}
| /II.RefFreeEWAS/Scripts/Functions/doRefFree_functions.R | permissive | Christensen-Lab-Dartmouth/brca_lowstage_DMGRs | R | false | false | 6,992 | r | #####################################################################
# ~~~~~~~~~~~~~~~~~~
# Tumor subtype and cell type independent DNA methylation alterations
# associated with stage progression in invasive breast carcinoma
# ~~~~~~~~~~~~~~~~~~
# Way, G., Johnson, K., Christensen, B. 2015
#
# This script will store all of the custom functions that are used in
# the doRefFree analyses
#####################################################################
################################
# SubsetStage Function
################################
# This function will subset stage for the input dataframe of covariates
subsetStage <- function (data, stage) {
tmpframe <- c()
# Separate the data according to the given stage assignments
for (i in 1:length(stage)) {
tmp <- data[data$pathologic_stage == stage[i], ]
# Accept only primary tumors
tmp <- tmp[tmp$sample.type == "Primary Tumor", ]
# Combine the info together
tmpframe <- rbind(tmpframe, tmp)
}
# Also, take all the solid tissue normal samples in this step as well
tmpframe <- rbind(tmpframe, data[data$sample.type == "Solid Tissue Normal", ])
return(tmpframe)
}
################################
# customRefFree Function
################################
# This function will make the design matrices, run RefFreeEWAS, and save output files
# k is the number of dimensions; it can be prespecified if you do not wish to run EstDimRMT
customRefFree <- function (covariates, betas, age = T, findRMT = T, bootstraps = 5,
save = F, iPOI = 2, k) {
# subset betas
newBeta <- betas[ ,rownames(covariates)]
# Null Model
mod0 <- model.matrix(~1, data = covariates)
if (age == T) {
# Model adjusting for age as a continuous variable
mod <- model.matrix(~ covariates$sample.type + covariates$age.Dx)
} else {
mod <- model.matrix(~ covariates$sample.type)
}
# Obtain Bstar
tmpBstar <- as.matrix(newBeta[ , ]) %*% mod %*% solve(t(mod)%*%mod)
# Get Residual Matrix
tmpResid <- as.matrix(newBeta[ , ])-tmpBstar%*%t(mod)
# Use the function EstDimRMT in the package 'isva'
if (findRMT == T) {
require("isva")
RMT <- EstDimRMT(as.matrix(tmpResid), plot = F)
k <- RMT$dim
} else {
k = k
}
# Perform RefFreeEWAS
# Generate reffree model
rf0 <- RefFreeEwasModel(as.matrix(newBeta[ , ]), mod, k)
# Use model to bootstrap
rfb <- BootRefFreeEwasModel(rf0, bootstraps)
# Store in list
reffreeList <- list(rfb, rf0)
################################
# Extract RefFreeEWAS results
################################
# Summarize bootstrap results
rfbSummary <- summary(rfb)
# Extract standard error for all cpgs
SE <- rfbSummary[ , , , 2]
# Get standard errors for Beta and Bstar
seBeta <- apply(rfb[ , ,"B", ], 1:2, sd)
seBstar <- apply(rfb[ , ,"B*", ], 1:2, sd)
# Degrees of freedom
denDegFree <- dim(mod)[1] - dim(mod)[2]
residDf <-denDegFree
# Extract deltas
Delta <- rf0$Bstar[ ,2] - rf0$Beta[ ,2]
seDelta <- apply(rfb[ , , "B*", ]-rfb[ , , "B", ], 1:2, sd)
pvDelta <- 2 * pt(-abs(Delta)/seDelta[ ,2], denDegFree)
# Get Adjusted and Unadjusted P-values
pvAdj <- 2 * pt( -abs(rf0$Beta[ ,iPOI]/SE[ ,iPOI, 1]) , residDf)
pvUnadj <- 2 * pt( -abs(rf0$Bstar[ ,iPOI]/SE[ ,iPOI, 2]) , residDf)
# Get adjusted and unadjusted model coefficients
coefAdj<-rf0$Beta[ ,iPOI]
coefUnadj<-rf0$Bstar[ ,iPOI]
# Create a matrix and write to file
results <- cbind(coefAdj, coefUnadj, pvAdj, pvUnadj, Delta, pvDelta)
# Get the list ready to return
returnlist <- list(reffreeList, results, k)
return(returnlist)
}
################################
# findGoodQ Function
################################
# This function will return a set of genes, regions, gene regions, or cgs according to a given q value cutoff, stage, or subtype
findGoodQ <- function (qcut, AnnotationList, stage, subtypes, consider = "GeneRegion",
returning = "Values") {
# Subset the annotation list to only accept the given stages
AnnotationList <- AnnotationList[grepl(stage, names(AnnotationList))]
# Loop over the given subtypes and reassign the Annotation List
WhatList <- list()
for (i in 1:length(subtypes)) {
tmp <- AnnotationList[grepl(subtypes[i], names(AnnotationList), fixed = T)]
WhatList[[i]] <- as.data.frame(tmp)
colnames(WhatList[[i]]) <- colnames(tmp[[1]])
names(WhatList)[i] <- paste(subtypes[i], "_", stage, sep = "")
}
# Reassign annotation list
AnnotationList <- WhatList
# Ask if we are returning values only, or the entire dataframe; if all, then initialize a list
if (returning == "All") {
AllFrames <- list()
}
# Get all the significant Q values
sigQ <- list()
for (i in 1:length(AnnotationList)) {
tmp <- AnnotationList[[i]]
# Remove cgs that were previously filtered
tmp <- tmp[!is.na(tmp$qvalues), ]
# only accept cgs that have met the q value cutoff
tmp <- tmp[tmp$qvalues < qcut, ]
# assign the unique gene regions, or Genes, or CpGs, to the internal list
if (consider == "UCSC_RefGene_Name") {
sigQ[[i]] <- unique(tmp$UCSC_RefGene_Name)
} else if(consider == "TargetID") {
sigQ[[i]] <- unique(tmp$TargetID)
} else if(consider == "UCSC_RefGene_Group") {
sigQ[[i]] <- unique(tmp$UCSC_RefGene_Group)
} else {
sigQ[[i]] <- unique(tmp$GeneRegion)
}
# store subtype specific information in the AllFrames list
if (returning == "All") {
AllFrames[[i]] <- tmp
}
}
# Get all the intersects of the significant hits according to the q val cut
for (j in 1:length(sigQ)) {
if (j == 1) {
compare <- sigQ[[j]]
} else {
compare <- intersect(compare, sigQ[[j]])
}
}
# Remove "NA NA" which are CpGs that are unmapped to gene:regions
compare <- compare[compare != "NA NA"]
if (returning != "All") {
# Return which were in common
return(as.character(compare))
} else {
FilteredFrame <- data.frame()
for (i in 1:length(AllFrames)) {
compareMedians <- c()
for (j in 1:length(compare)) {
subset <- AllFrames[[i]][AllFrames[[i]][ ,consider] %in% compare[j], ]
compareMedians <- c(compareMedians, median(subset$qvalues))
}
if (i == 1) {
FilteredFrame <- compareMedians
} else {
FilteredFrame <- cbind(FilteredFrame, compareMedians)
}
}
colnames(FilteredFrame) <- paste(subtypes, "medianQ", sep = "-")
rownames(FilteredFrame) <- compare
TotalMedian <- apply(FilteredFrame, 1, median)
FilteredFrame <- cbind(FilteredFrame, TotalMedian)
FilteredFrame <- FilteredFrame[order(FilteredFrame[ ,ncol(FilteredFrame)], decreasing = F), ]
}
}
|
library(statnet.common)
### Name: order
### Title: Implement the 'sort' and 'order' methods for 'data.frame' and
### 'matrix', sorting it in lexicographic order.
### Aliases: order order.default order.data.frame order.matrix
### sort.data.frame
### Keywords: manip
### ** Examples
data(iris)
head(iris)
head(order(iris))
head(sort(iris))
stopifnot(identical(sort(iris),iris[order(iris),]))
| /data/genthat_extracted_code/statnet.common/examples/sort.data.frame.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 406 | r | library(statnet.common)
### Name: order
### Title: Implement the 'sort' and 'order' methods for 'data.frame' and
### 'matrix', sorting it in lexicographic order.
### Aliases: order order.default order.data.frame order.matrix
### sort.data.frame
### Keywords: manip
### ** Examples
data(iris)
head(iris)
head(order(iris))
head(sort(iris))
stopifnot(identical(sort(iris),iris[order(iris),]))
|
# set path and require and source necessary functions and packages
path = "/Users/jossando/trabajo/E299/05_Code/02_Analysis/rcode/bielefeld_examples"
setwd(path)
source("openGraphSaveGraph.R") # utilities by Kruschke from http://doingbayesiandataanalysis.blogspot.de/
source("HDIofMCMC.R")
source("DBDA2E-utilities.R")
require(runjags)
require(ggplot2)
# Get the data
load(paste(path,"/dataE299_bielefeld.Rdata",sep=''))
# this time we take all four conditions
dFc$cond = factor(dFc$cond )
cond = as.numeric(dFc$cond)
subj = as.numeric(as.factor(dFc$subjIndx))
# another column to make condition 2-3-4 as only one condition
cond2 = cond
cond2[cond>2] = 2
# data for RJAGS as a list
dataList = list(y = dFc$mRT,
cond = cond,
cond2 = cond2,
subj = subj,
nCond = length(unique(cond)),
nTotal = length(dFc$mRT),
nSubj = length(unique(subj)),
MPriormean = mean(dFc$mRT),
MPriorstd = sd(dFc$mRT)*10,
SPriorL = sd(dFc$mRT)/100,
SPriorH = sd(dFc$mRT)*100,
aGammaShRa = gammaShRaFromModeSD(mode = sd(dFc$mRT)/2,sd = sd(dFc$mRT)*2))
# model specification as a strin
modelString = "
model {
for ( i in 1:nTotal ) {
y[i] ~ dnorm( mu[i] , 1/sigma^2)
mu[i] <- a0 + a1[cond[i]] + aS[subj[i]]
# folowwing lines are for model comparison
#mucond[i] <- a0 + a1[cond[i]] + aS[subj[i]]
#mucond2[i] <- a0 + a1[cond2[i]] + aS[subj[i]]
#mu[i] <- equals(mC,1)*mucond[i] + equals(mC,2)*mucond2[i]
}
sigma ~ dunif(SPriorL, SPriorH )
a0 ~ dnorm(MPriormean,1/MPriorstd^2)
for ( j in 1:nCond ) {
a1[j] ~ dnorm(0, 1/(aSigma)^2)
}
aSigma ~ dgamma( aGammaShRa[1] , aGammaShRa[2] )
for ( j in 1:nSubj ) { aS[j] ~ dnorm( 0.0 , 1/(sSigma)^2 ) }
sSigma ~ dgamma( aGammaShRa[1] , aGammaShRa[2] )
# Convert a0,a[] to sum-to-zero b0,b[] :
for ( j in 1:nCond ) { for (s in 1:nSubj){
m[j,s] <- a0 + a1[j] + aS[s] }} # cell means
b0 <- mean( m[1:nCond,1:nSubj] )
for ( jS in 1:nSubj ) { bS[jS] <- mean( m[1:nCond, jS] ) - b0}
for ( jC in 1:nCond ) { b1[jC] <- mean( m[jC,1:nSubj] ) - b0}
# this is for model comparison
mC ~ dcat( mPriorProb[] )
mPriorProb[1] <- .5
mPriorProb[2] <- .5
}"
writeLines(modelString, con="model4.txt" )
# run the model
parameters = c("b0","b1","sigma","aSigma","bS","mC")
runjagsModel = run.jags("model4.txt",
data = dataList,
adapt = 1000,
monitor = parameters,
n.chains = 4,
burnin = 1000,
sample = 10000)
# check convergence
codaSamples = as.mcmc.list(runjagsModel)
mcmcChain = as.matrix(as.mcmc(runjagsModel))
# for ( parName in colnames(codaSamples[[1]]) ) {
# diagMCMC( codaObject=codaSamples, parName=parName)
# saveGraph(file=paste(path,"/figures/","convergence10000",parName,sep=""), type="png")
# }
openGraph(4,3)
plotPost( mcmcChain[,"b0"]+mcmcChain[,"b1[1]"] , main="b0+b1[1]" ,
cenTend="mean", credMass=0.95)
openGraph(4,3)
plotPost( mcmcChain[,"b0"]+mcmcChain[,"b1[2]"] , main="b0+b1[2]" ,
cenTend="mean", credMass=0.95)
openGraph(4,3)
plotPost( mcmcChain[,"b1[2]"]-mcmcChain[,"b1[1]"] , main="b1[2]-b1[1]" ,
cenTend="mean", credMass=0.95, ROPE = c(-.01,.01), compVal=0 )
openGraph(4,3)
plotPost( mcmcChain[,"sigma"] , main="sigma" ,
cenTend="mean", credMass=0.95)
effect_size = (mcmcChain[,"b1[2]"]-mcmcChain[,"b1[1]"])/mcmcChain[,"sigma"]
openGraph(4,3)
plotPost( effect_size, main="ES" ,
cenTend="mean", credMass=0.95 , ROPE = c(-.1,.1), compVal=0 )
# | /05_Code/02_Analysis/rcode/bielefeld_examples/bielefeld_Bayes_exercises/continuous4.R | no_license | KSuljic/E299 | R | false | false | 3,815 | r | # set path and require and source necessary functions and packages
path = "/Users/jossando/trabajo/E299/05_Code/02_Analysis/rcode/bielefeld_examples"
setwd(path)
source("openGraphSaveGraph.R") # utilities by Kruschke from http://doingbayesiandataanalysis.blogspot.de/
source("HDIofMCMC.R")
source("DBDA2E-utilities.R")
require(runjags)
require(ggplot2)
# Get the data
load(paste(path,"/dataE299_bielefeld.Rdata",sep=''))
# this time we take all four conditions
dFc$cond = factor(dFc$cond )
cond = as.numeric(dFc$cond)
subj = as.numeric(as.factor(dFc$subjIndx))
# another column to make condition 2-3-4 as only one condition
cond2 = cond
cond2[cond>2] = 2
# data for RJAGS as a list
dataList = list(y = dFc$mRT,
cond = cond,
cond2 = cond2,
subj = subj,
nCond = length(unique(cond)),
nTotal = length(dFc$mRT),
nSubj = length(unique(subj)),
MPriormean = mean(dFc$mRT),
MPriorstd = sd(dFc$mRT)*10,
SPriorL = sd(dFc$mRT)/100,
SPriorH = sd(dFc$mRT)*100,
aGammaShRa = gammaShRaFromModeSD(mode = sd(dFc$mRT)/2,sd = sd(dFc$mRT)*2))
# model specification as a strin
modelString = "
model {
for ( i in 1:nTotal ) {
y[i] ~ dnorm( mu[i] , 1/sigma^2)
mu[i] <- a0 + a1[cond[i]] + aS[subj[i]]
# folowwing lines are for model comparison
#mucond[i] <- a0 + a1[cond[i]] + aS[subj[i]]
#mucond2[i] <- a0 + a1[cond2[i]] + aS[subj[i]]
#mu[i] <- equals(mC,1)*mucond[i] + equals(mC,2)*mucond2[i]
}
sigma ~ dunif(SPriorL, SPriorH )
a0 ~ dnorm(MPriormean,1/MPriorstd^2)
for ( j in 1:nCond ) {
a1[j] ~ dnorm(0, 1/(aSigma)^2)
}
aSigma ~ dgamma( aGammaShRa[1] , aGammaShRa[2] )
for ( j in 1:nSubj ) { aS[j] ~ dnorm( 0.0 , 1/(sSigma)^2 ) }
sSigma ~ dgamma( aGammaShRa[1] , aGammaShRa[2] )
# Convert a0,a[] to sum-to-zero b0,b[] :
for ( j in 1:nCond ) { for (s in 1:nSubj){
m[j,s] <- a0 + a1[j] + aS[s] }} # cell means
b0 <- mean( m[1:nCond,1:nSubj] )
for ( jS in 1:nSubj ) { bS[jS] <- mean( m[1:nCond, jS] ) - b0}
for ( jC in 1:nCond ) { b1[jC] <- mean( m[jC,1:nSubj] ) - b0}
# this is for model comparison
mC ~ dcat( mPriorProb[] )
mPriorProb[1] <- .5
mPriorProb[2] <- .5
}"
writeLines(modelString, con="model4.txt" )
# run the model
parameters = c("b0","b1","sigma","aSigma","bS","mC")
runjagsModel = run.jags("model4.txt",
data = dataList,
adapt = 1000,
monitor = parameters,
n.chains = 4,
burnin = 1000,
sample = 10000)
# check convergence
codaSamples = as.mcmc.list(runjagsModel)
mcmcChain = as.matrix(as.mcmc(runjagsModel))
# for ( parName in colnames(codaSamples[[1]]) ) {
# diagMCMC( codaObject=codaSamples, parName=parName)
# saveGraph(file=paste(path,"/figures/","convergence10000",parName,sep=""), type="png")
# }
openGraph(4,3)
plotPost( mcmcChain[,"b0"]+mcmcChain[,"b1[1]"] , main="b0+b1[1]" ,
cenTend="mean", credMass=0.95)
openGraph(4,3)
plotPost( mcmcChain[,"b0"]+mcmcChain[,"b1[2]"] , main="b0+b1[2]" ,
cenTend="mean", credMass=0.95)
openGraph(4,3)
plotPost( mcmcChain[,"b1[2]"]-mcmcChain[,"b1[1]"] , main="b1[2]-b1[1]" ,
cenTend="mean", credMass=0.95, ROPE = c(-.01,.01), compVal=0 )
openGraph(4,3)
plotPost( mcmcChain[,"sigma"] , main="sigma" ,
cenTend="mean", credMass=0.95)
effect_size = (mcmcChain[,"b1[2]"]-mcmcChain[,"b1[1]"])/mcmcChain[,"sigma"]
openGraph(4,3)
plotPost( effect_size, main="ES" ,
cenTend="mean", credMass=0.95 , ROPE = c(-.1,.1), compVal=0 )
# |
# Load packages
# Use 'install.packages("MASS")' if not available
library(MASS)
library(plyr)
# The function below will randomly generate datasets
# from a bivariate normal distribution.
# First define this distribution's variance-covariance matrix.
Sigma <- matrix(c(6, 1.3, 1.3, 2), 2, 2)
# Now define a simulation function that
# draws a random ample with 2x100 observations
# from a bivariat normal distribution
# and compute standardised and raw effect sizes
# for the complete dataset and
# for the dataset with the middle half removed.
simulate.fnc <- function() {
# Generate 100 pairs of datapoints from a bivariate normal distribution;
# Output them as a dataframe;
# and sort them from low to high.
dat <- arrange(data.frame(mvrnorm(n = 100, rep(3, 2), Sigma)),
X1)
# Compute effect sizes
# Pearson correlation for the whole dataset
r.full <- with(dat, cor(X1, X2))
# Pearson correlation when the middle part of the dataset isn't considered.
r.reduced <- with(dat[c(1:20, 81:100),], cor(X1, X2))
# Regression coefficient for the whole dataset
b.full <- coef(with(dat, lm(X2 ~ X1)))[2]
# Regression coefficient when the middle part of the dataset isn't considered.
b.reduced <- coef(with(dat[c(1:20, 81:100),], lm(X2 ~ X1)))[2]
# Return these four effect sizes
return(list(r.full, r.reduced, b.full, b.reduced))
}
# Run the above function 10000 times and save the results
sims <- replicate(10000, simulate.fnc())
# Plot effect sizes
# Two plots side by side (doesn't really matter)
par(mfrow = c(1, 2), las = 1, bty = "l", lty = 1)
# Standardised effect sizes
plot(unlist(sims[1,]), xlim = range(c(unlist(sims[1,]), unlist(sims[2,]))),
unlist(sims[2,]), ylim = range(c(unlist(sims[1,]), unlist(sims[2,]))),
xlab = "Pearson's r - random sampling",
ylab = "Pearson's r - extreme groups",
pch = ".",
main = "Standardised effect sizes\n(correlation coefficients)")
abline(a = 0, b = 1, lty = 2)
# Unstandardised effect sizes
plot(unlist(sims[3,]), xlim = range(c(unlist(sims[3,]), unlist(sims[4,]))),
unlist(sims[4,]), ylim = range(c(unlist(sims[3,]), unlist(sims[4,]))),
xlab = "Beta coefficient - random sampling",
ylab = "Beta coefficient - extreme groups",
pch = ".",
main = "Unstandardised effect sizes\n(regression coefficients)")
abline(a = 0, b = 1, lty = 2)
## For the second simulation, just replace 'dat[c(1:20, 81:100),]' in the above by 'dat[c(71:100),]' | /downloads/SimulationEffectSizes.R | permissive | janhove/janhove.github.io | R | false | false | 2,495 | r | # Load packages
# Use 'install.packages("MASS")' if not available
library(MASS)
library(plyr)
# The function below will randomly generate datasets
# from a bivariate normal distribution.
# First define this distribution's variance-covariance matrix.
Sigma <- matrix(c(6, 1.3, 1.3, 2), 2, 2)
# Now define a simulation function that
# draws a random ample with 2x100 observations
# from a bivariat normal distribution
# and compute standardised and raw effect sizes
# for the complete dataset and
# for the dataset with the middle half removed.
simulate.fnc <- function() {
# Generate 100 pairs of datapoints from a bivariate normal distribution;
# Output them as a dataframe;
# and sort them from low to high.
dat <- arrange(data.frame(mvrnorm(n = 100, rep(3, 2), Sigma)),
X1)
# Compute effect sizes
# Pearson correlation for the whole dataset
r.full <- with(dat, cor(X1, X2))
# Pearson correlation when the middle part of the dataset isn't considered.
r.reduced <- with(dat[c(1:20, 81:100),], cor(X1, X2))
# Regression coefficient for the whole dataset
b.full <- coef(with(dat, lm(X2 ~ X1)))[2]
# Regression coefficient when the middle part of the dataset isn't considered.
b.reduced <- coef(with(dat[c(1:20, 81:100),], lm(X2 ~ X1)))[2]
# Return these four effect sizes
return(list(r.full, r.reduced, b.full, b.reduced))
}
# Run the above function 10000 times and save the results
sims <- replicate(10000, simulate.fnc())
# Plot effect sizes
# Two plots side by side (doesn't really matter)
par(mfrow = c(1, 2), las = 1, bty = "l", lty = 1)
# Standardised effect sizes
plot(unlist(sims[1,]), xlim = range(c(unlist(sims[1,]), unlist(sims[2,]))),
unlist(sims[2,]), ylim = range(c(unlist(sims[1,]), unlist(sims[2,]))),
xlab = "Pearson's r - random sampling",
ylab = "Pearson's r - extreme groups",
pch = ".",
main = "Standardised effect sizes\n(correlation coefficients)")
abline(a = 0, b = 1, lty = 2)
# Unstandardised effect sizes
plot(unlist(sims[3,]), xlim = range(c(unlist(sims[3,]), unlist(sims[4,]))),
unlist(sims[4,]), ylim = range(c(unlist(sims[3,]), unlist(sims[4,]))),
xlab = "Beta coefficient - random sampling",
ylab = "Beta coefficient - extreme groups",
pch = ".",
main = "Unstandardised effect sizes\n(regression coefficients)")
abline(a = 0, b = 1, lty = 2)
## For the second simulation, just replace 'dat[c(1:20, 81:100),]' in the above by 'dat[c(71:100),]' |
# COVID-19 Canada Open Data Working Group Push Updated Data to GitHub #
# Author: Jean-Paul R. Soucy #
# Push updated files to the GitHub repository using git from the command line
# GitHub repository: https://github.com/ccodwg/Covid19Canada
# Note: This script assumes the working directory is set to the root directory of the project
# This is most easily achieved by using the provided Covid19Canada.Rproj in RStudio
# Authentication: You must authenticate your Google account before running the rest of the script.
# This is performed in the conductor.R script. See details there.
# GitHub: This script assumes a stored SSH key for GitHub.
# list files in Google Drive data folder
files <- drive_ls("ccodwg/data")
# download and format data notes, append current date as header
file.remove("data_notes.txt")
while(!file.exists("data_notes.txt")) {
drive_download(
files[files$name == "data_notes_covid19", ],
path = "data_notes.txt",
overwrite = TRUE
)
}
data_notes <- suppressWarnings(readLines("data_notes.txt"))
header <- paste0("New data: ", as.character(date(with_tz(Sys.time(), tzone = "America/Toronto"))), ". See data notes.\n\n")
data_notes <- paste0(header, paste(data_notes, collapse = "\n"), "\n")
# write data notes
data_notes <- gsub("\n\n\n", "\n\n", data_notes) # get rid of extra line breaks
cat(data_notes, file = "data_notes.txt")
# stage data update
system2(
command = "git",
args = c("add",
"official_datasets/",
"timeseries_canada/",
"timeseries_hr/",
"timeseries_prov",
"data_notes.txt",
"update_time.txt"
)
)
# commit data update
system2(
command = "git",
args = c("commit",
"-m",
paste0('"', data_notes, '"')
)
)
# push data update
system2(
command = "git",
args = c("push")
)
| /scripts/github_update.R | permissive | ccodwg/Covid19Canada | R | false | false | 1,856 | r | # COVID-19 Canada Open Data Working Group Push Updated Data to GitHub #
# Author: Jean-Paul R. Soucy #
# Push updated files to the GitHub repository using git from the command line
# GitHub repository: https://github.com/ccodwg/Covid19Canada
# Note: This script assumes the working directory is set to the root directory of the project
# This is most easily achieved by using the provided Covid19Canada.Rproj in RStudio
# Authentication: You must authenticate your Google account before running the rest of the script.
# This is performed in the conductor.R script. See details there.
# GitHub: This script assumes a stored SSH key for GitHub.
# list files in Google Drive data folder
files <- drive_ls("ccodwg/data")
# download and format data notes, append current date as header
file.remove("data_notes.txt")
while(!file.exists("data_notes.txt")) {
drive_download(
files[files$name == "data_notes_covid19", ],
path = "data_notes.txt",
overwrite = TRUE
)
}
data_notes <- suppressWarnings(readLines("data_notes.txt"))
header <- paste0("New data: ", as.character(date(with_tz(Sys.time(), tzone = "America/Toronto"))), ". See data notes.\n\n")
data_notes <- paste0(header, paste(data_notes, collapse = "\n"), "\n")
# write data notes
data_notes <- gsub("\n\n\n", "\n\n", data_notes) # get rid of extra line breaks
cat(data_notes, file = "data_notes.txt")
# stage data update
system2(
command = "git",
args = c("add",
"official_datasets/",
"timeseries_canada/",
"timeseries_hr/",
"timeseries_prov",
"data_notes.txt",
"update_time.txt"
)
)
# commit data update
system2(
command = "git",
args = c("commit",
"-m",
paste0('"', data_notes, '"')
)
)
# push data update
system2(
command = "git",
args = c("push")
)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 30086
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 30085
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 30085
c
c Input Parameter (command line, file):
c input filename QBFLIB/Sauer-Reimer/ITC99/b21_PR_5_75.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 10408
c no.of clauses 30086
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 30085
c
c QBFLIB/Sauer-Reimer/ITC99/b21_PR_5_75.qdimacs 10408 30086 E1 [1] 0 230 10137 30085 RED
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Sauer-Reimer/ITC99/b21_PR_5_75/b21_PR_5_75.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 719 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 30086
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 30085
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 30085
c
c Input Parameter (command line, file):
c input filename QBFLIB/Sauer-Reimer/ITC99/b21_PR_5_75.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 10408
c no.of clauses 30086
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 30085
c
c QBFLIB/Sauer-Reimer/ITC99/b21_PR_5_75.qdimacs 10408 30086 E1 [1] 0 230 10137 30085 RED
|
## UI ##
ui = tags$div(fluidPage(theme = shinytheme("journal"),
headerPanel("Map & Cluster SRSW based on morphological features"),
tabsetPanel(
tabPanel("Network",
fluidRow(
column(width = 8, align = "center",
selectInput("nodecolor", h5("Select node color"), choices = listcolors))
),
fluidRow(
column(width = 8,
visNetworkOutput("network_graph", height = "1000px")
),
column(width = 4,
plotOutput("plotSherdHeightWidthRatio", height = "100px"),
br(),
plotOutput("plotSherdIncl", height = "100px"),
br(),
plotOutput("plotSherdRimDiam", height = "100px"),
br(),
plotOutput("plotSherdWTat2.3", height = "100px"),
br(),
plotOutput("plotSherdHeight", height = "100px"),
br(),
imageOutput("selectedSherdImage"),
br(),
column(width = 2,
tableOutput("sherdInfo")
),br(),
column(width = 2,
# tableOutput("sherdClusterInfo")
plotOutput("plotClusterInfo", width = "400px", height = "200px"))
)
),
fluidRow(
column(width = 12,
actionButton("cluster_1", "4 Clusters", icon("glasses"), style = "color: #000000; background-color: #ffffff; border-color: #000000"),
actionButton("cluster_2", "6 Clusters", icon("glasses"), style = "color: #000000; background-color: #ffffff; border-color: #000000")
)
),
fluidRow(
column(width = 12,
splitLayout(cellWidths = c("50%", "50%"), plotOutput("FISplot_by_cluster_1", height = "200px"), plotOutput("FISplot_by_cluster_2", height = "200px")),
splitLayout(cellWidths = c("50%", "50%"), plotOutput("FISplot_by_cluster_3", height = "200px"), plotOutput("FISplot_by_cluster_4.ext", height = "200px")),
splitLayout(cellWidths = c("50%", "50%"), plotOutput("FISplot_by_cluster_4.int", height = "200px"), plotOutput("FISplot_by_cluster_5", height = "200px")),
splitLayout(cellWidths = c("50%", "50%"), plotOutput("FISplot_by_cluster_6", height = "200px"), plotOutput("FISplot_by_cluster_7", height = "200px"))
)
),
fluidRow(
column(width = 8,
plotlyOutput("beeplot", height = "1200px"),
verbatimTextOutput("beeselected")
),
column(width = 4,
tableOutput('tableContexts'))
),
br(),br(),
fluidRow(
column(12, align = "center",
# br(),
DTOutput('FIStable')
), br(),
column(3, align = "left",
imageOutput("imageColorLegend") # , width = "100%"
)
),
br(),br()
),
tabPanel("Memo",
fluidRow(
h2("Fuzzy type definition and description", align = "center"),
br(),br(),br(),br(),br(),
h4("This works examines whether there are morphologically different groups within the SRSW type 1B150.", align = "center"),
br(),br(),br(),br(),br(),br(),br(),br(),br(),
h5("This work is part of the FWO G088319N Back To Basics, With A Twist.",
align = "center", style = "font-family: 'Times'; color: #808080; font-style: italic;"),
h5("Implementation: Danai Kafetzaki. Date: 27 May 2020 - 19 October 2021",
align = "center", style = "font-family: 'Times'; color: #808080; font-style: italic;"),
br(),br(),
fluidRow(
column(12,
column(6, align = "right",
imageOutput("imageLogo1" , width = "100%", height = "100%")),
column(6, align = "left",
imageOutput("imageLogo2" , width = "100%", height = "100%"))
),
br(),br()
)
)
),
tabPanel("Distance Matrix",
fluidRow(
column(width = 8, align = "center",
highchartOutput('distM_plot', height = "1200px", width = "1200px")
),
column(width = 2, align = "center",
plotOutput("plotBoth", height = "200px")
)
)
)
)
)
)
| /R/ui.R | permissive | kafetzakid/morphotypeShiny | R | false | false | 7,202 | r | ## UI ##
ui = tags$div(fluidPage(theme = shinytheme("journal"),
headerPanel("Map & Cluster SRSW based on morphological features"),
tabsetPanel(
tabPanel("Network",
fluidRow(
column(width = 8, align = "center",
selectInput("nodecolor", h5("Select node color"), choices = listcolors))
),
fluidRow(
column(width = 8,
visNetworkOutput("network_graph", height = "1000px")
),
column(width = 4,
plotOutput("plotSherdHeightWidthRatio", height = "100px"),
br(),
plotOutput("plotSherdIncl", height = "100px"),
br(),
plotOutput("plotSherdRimDiam", height = "100px"),
br(),
plotOutput("plotSherdWTat2.3", height = "100px"),
br(),
plotOutput("plotSherdHeight", height = "100px"),
br(),
imageOutput("selectedSherdImage"),
br(),
column(width = 2,
tableOutput("sherdInfo")
),br(),
column(width = 2,
# tableOutput("sherdClusterInfo")
plotOutput("plotClusterInfo", width = "400px", height = "200px"))
)
),
fluidRow(
column(width = 12,
actionButton("cluster_1", "4 Clusters", icon("glasses"), style = "color: #000000; background-color: #ffffff; border-color: #000000"),
actionButton("cluster_2", "6 Clusters", icon("glasses"), style = "color: #000000; background-color: #ffffff; border-color: #000000")
)
),
fluidRow(
column(width = 12,
splitLayout(cellWidths = c("50%", "50%"), plotOutput("FISplot_by_cluster_1", height = "200px"), plotOutput("FISplot_by_cluster_2", height = "200px")),
splitLayout(cellWidths = c("50%", "50%"), plotOutput("FISplot_by_cluster_3", height = "200px"), plotOutput("FISplot_by_cluster_4.ext", height = "200px")),
splitLayout(cellWidths = c("50%", "50%"), plotOutput("FISplot_by_cluster_4.int", height = "200px"), plotOutput("FISplot_by_cluster_5", height = "200px")),
splitLayout(cellWidths = c("50%", "50%"), plotOutput("FISplot_by_cluster_6", height = "200px"), plotOutput("FISplot_by_cluster_7", height = "200px"))
)
),
fluidRow(
column(width = 8,
plotlyOutput("beeplot", height = "1200px"),
verbatimTextOutput("beeselected")
),
column(width = 4,
tableOutput('tableContexts'))
),
br(),br(),
fluidRow(
column(12, align = "center",
# br(),
DTOutput('FIStable')
), br(),
column(3, align = "left",
imageOutput("imageColorLegend") # , width = "100%"
)
),
br(),br()
),
tabPanel("Memo",
fluidRow(
h2("Fuzzy type definition and description", align = "center"),
br(),br(),br(),br(),br(),
h4("This works examines whether there are morphologically different groups within the SRSW type 1B150.", align = "center"),
br(),br(),br(),br(),br(),br(),br(),br(),br(),
h5("This work is part of the FWO G088319N Back To Basics, With A Twist.",
align = "center", style = "font-family: 'Times'; color: #808080; font-style: italic;"),
h5("Implementation: Danai Kafetzaki. Date: 27 May 2020 - 19 October 2021",
align = "center", style = "font-family: 'Times'; color: #808080; font-style: italic;"),
br(),br(),
fluidRow(
column(12,
column(6, align = "right",
imageOutput("imageLogo1" , width = "100%", height = "100%")),
column(6, align = "left",
imageOutput("imageLogo2" , width = "100%", height = "100%"))
),
br(),br()
)
)
),
tabPanel("Distance Matrix",
fluidRow(
column(width = 8, align = "center",
highchartOutput('distM_plot', height = "1200px", width = "1200px")
),
column(width = 2, align = "center",
plotOutput("plotBoth", height = "200px")
)
)
)
)
)
)
|
#plot smcpp results
library(magrittr);library(plyr);library(pbapply);library(ggplot2);library(RColorBrewer)
setwd("~/Dropbox/selasphorus/smcpp/")
smc <- read.csv("plotdata.csv",stringsAsFactors = F)
smc$label[smc$label=="sasin"] <- "sasin\nsasin"
smc$label[smc$label=="sedentarius"] <- "sasin\nsedentarius"
smc$label <- factor(smc$label,levels=c("rufus","sasin\nsasin","sasin\nsedentarius","calliope"))
pdf("~/Dropbox/selasphorus/ms/fig/smcpp_fig.pdf",width=3.25,height=4)
ggplot(data=smc,aes(x=x,y=y))+facet_grid(label~.)+
theme_classic()+
theme(axis.title=element_text(size=8),
strip.text=element_text(size=8),
axis.text=element_text(size=8),
panel.grid.major=element_line(color="grey",size=0.25),
strip.background = element_blank())+
xlab("Generations")+ylab("Ne")+
ylim(0,1.25e5)+xlim(2e3,1e5)+
# scale_color_manual(values=c(rep(brewer.pal(4,"RdYlBu")[2],11),
# rep(brewer.pal(4,"RdYlBu")[3],11),
# rep(brewer.pal(4,"RdYlBu")[4],11),
# rep("violet",11)),guide=F)+
geom_rect(aes(xmin=18000,xmax=26500,ymin=0,ymax=1.25e5),fill="grey",alpha=0.5)+
geom_path(lwd=.35,alpha=0.8)
dev.off()
# model <- readLines("models/rufus/model.final.json")
# rho <- model[grep("rho",model)] %>% strsplit(": |,") %>% unlist() %>% .[2] %>% as.numeric()
# theta <- model[grep("theta",model)] %>% strsplit(": |,") %>% unlist() %>% .[2] %>% as.numeric()
# alpha <- model[grep("alpha",model)] %>% strsplit(": |,") %>% unlist() %>% .[2] %>% as.numeric()
# N0 <- model[grep("N0",model)] %>% strsplit(": |,") %>% unlist() %>% .[2] %>% as.numeric()
# knots <- model[(grep("knots",model)+1):(grep("knots",model)+10)] %>%
# sapply(function(e) gsub(" |,","",e)) %>%
# as.numeric()
# hidden_states <- model[(grep("hidden_states",model)+2):(grep("model",model)-3)] %>%
# sapply(function(e) gsub(" |,","",e)) %>%
# as.numeric()
# y <- model[(grep("y",model)+1):(grep("rho",model)-3)] %>%
# sapply(function(e) gsub(" |,","",e)) %>%
# as.numeric()
#
# df <- data.frame(knots,y)
# df$times <- df$knots*(2*N0)*2
# df$endtimes <- c(df$times[2:length(df$times)],df$times[length(df$times)])
# df$pops <- df$y*N0
#
# ggplot(data=df,aes(x=times,y=pops,xend=endtimes,yend=pops))+
# scale_x_log10()+
# ylim(-1e4,1.5e5)+
# geom_segment() | /scripts/smcpp_plots.R | no_license | cjbattey/selasphorus_evolution | R | false | false | 2,417 | r | #plot smcpp results
library(magrittr);library(plyr);library(pbapply);library(ggplot2);library(RColorBrewer)
setwd("~/Dropbox/selasphorus/smcpp/")
smc <- read.csv("plotdata.csv",stringsAsFactors = F)
smc$label[smc$label=="sasin"] <- "sasin\nsasin"
smc$label[smc$label=="sedentarius"] <- "sasin\nsedentarius"
smc$label <- factor(smc$label,levels=c("rufus","sasin\nsasin","sasin\nsedentarius","calliope"))
pdf("~/Dropbox/selasphorus/ms/fig/smcpp_fig.pdf",width=3.25,height=4)
ggplot(data=smc,aes(x=x,y=y))+facet_grid(label~.)+
theme_classic()+
theme(axis.title=element_text(size=8),
strip.text=element_text(size=8),
axis.text=element_text(size=8),
panel.grid.major=element_line(color="grey",size=0.25),
strip.background = element_blank())+
xlab("Generations")+ylab("Ne")+
ylim(0,1.25e5)+xlim(2e3,1e5)+
# scale_color_manual(values=c(rep(brewer.pal(4,"RdYlBu")[2],11),
# rep(brewer.pal(4,"RdYlBu")[3],11),
# rep(brewer.pal(4,"RdYlBu")[4],11),
# rep("violet",11)),guide=F)+
geom_rect(aes(xmin=18000,xmax=26500,ymin=0,ymax=1.25e5),fill="grey",alpha=0.5)+
geom_path(lwd=.35,alpha=0.8)
dev.off()
# model <- readLines("models/rufus/model.final.json")
# rho <- model[grep("rho",model)] %>% strsplit(": |,") %>% unlist() %>% .[2] %>% as.numeric()
# theta <- model[grep("theta",model)] %>% strsplit(": |,") %>% unlist() %>% .[2] %>% as.numeric()
# alpha <- model[grep("alpha",model)] %>% strsplit(": |,") %>% unlist() %>% .[2] %>% as.numeric()
# N0 <- model[grep("N0",model)] %>% strsplit(": |,") %>% unlist() %>% .[2] %>% as.numeric()
# knots <- model[(grep("knots",model)+1):(grep("knots",model)+10)] %>%
# sapply(function(e) gsub(" |,","",e)) %>%
# as.numeric()
# hidden_states <- model[(grep("hidden_states",model)+2):(grep("model",model)-3)] %>%
# sapply(function(e) gsub(" |,","",e)) %>%
# as.numeric()
# y <- model[(grep("y",model)+1):(grep("rho",model)-3)] %>%
# sapply(function(e) gsub(" |,","",e)) %>%
# as.numeric()
#
# df <- data.frame(knots,y)
# df$times <- df$knots*(2*N0)*2
# df$endtimes <- c(df$times[2:length(df$times)],df$times[length(df$times)])
# df$pops <- df$y*N0
#
# ggplot(data=df,aes(x=times,y=pops,xend=endtimes,yend=pops))+
# scale_x_log10()+
# ylim(-1e4,1.5e5)+
# geom_segment() |
library(tidyverse)
library(ptm)
library(googlesheets4)
library(here)
# more general script for finding everything of a certain genus
pml <- "https://docs.google.com/spreadsheets/d/1vzVIT5gjQ0yCGwbAyqB4f8pltOkLfWz27TIFTxnm9hk/edit#gid=0"
ptm <- googlesheets4::read_sheet(pml, sheet = "Copy of All PTM data", skip = 1,
col_types = "iccccccDcccccccccccccccccccccccccc-iiccc")
pyr <- ptm %>%
filter(str_detect(`Final determination`, "Pyropia") |
str_detect(`Determination in the field`, "Pyropia"),
!is.na(`Photos on server`))
cal <- ptm %>%
filter(str_detect(`Final determination`, "Calliarthron") |
str_detect(`Determination in the field`, "Calliarthron"),
!is.na(`Photos on server`))
find_photo <- rbind(pyr, cal) %>%
select(`PTM#`) %>%
mutate(ptm = paste0("PTM",`PTM#`))
path <- "/Volumes/martonelab/Photos/1501-2000"
my_files <- find_photo$ptm %>%
map(~list.files(path = path,
pattern = .,
all.files = T,
full.names = T))
# identify the folders
new_folder <- paste0(here(),"/results")
# copy the files to the new folder
my_files <- my_files %>%
map(~file.copy(., new_folder))
photo_meta <- rbind(pyr, cal)
write_csv(photo_meta, "results/found_pyropia_calliarthron.csv",
na = "")
| /find_pyropia_calliarthron.R | no_license | laijasmine/finding_photos | R | false | false | 1,345 | r | library(tidyverse)
library(ptm)
library(googlesheets4)
library(here)
# more general script for finding everything of a certain genus
pml <- "https://docs.google.com/spreadsheets/d/1vzVIT5gjQ0yCGwbAyqB4f8pltOkLfWz27TIFTxnm9hk/edit#gid=0"
ptm <- googlesheets4::read_sheet(pml, sheet = "Copy of All PTM data", skip = 1,
col_types = "iccccccDcccccccccccccccccccccccccc-iiccc")
pyr <- ptm %>%
filter(str_detect(`Final determination`, "Pyropia") |
str_detect(`Determination in the field`, "Pyropia"),
!is.na(`Photos on server`))
cal <- ptm %>%
filter(str_detect(`Final determination`, "Calliarthron") |
str_detect(`Determination in the field`, "Calliarthron"),
!is.na(`Photos on server`))
find_photo <- rbind(pyr, cal) %>%
select(`PTM#`) %>%
mutate(ptm = paste0("PTM",`PTM#`))
path <- "/Volumes/martonelab/Photos/1501-2000"
my_files <- find_photo$ptm %>%
map(~list.files(path = path,
pattern = .,
all.files = T,
full.names = T))
# identify the folders
new_folder <- paste0(here(),"/results")
# copy the files to the new folder
my_files <- my_files %>%
map(~file.copy(., new_folder))
photo_meta <- rbind(pyr, cal)
write_csv(photo_meta, "results/found_pyropia_calliarthron.csv",
na = "")
|
#!/usr/local/bin/Rscript
### source: https://stackoverflow.com/questions/35194048/using-r-how-to-calculate-the-distance-from-one-point-to-a-line
dist3d <- function(x0, x1, x2) {
v1 <- x1 - x2
v2 <- x0 - x1
v3 <- cross3d_prod(v1, v2)
area <- sqrt(sum(v3 * v3)) / 2
d <- 2 * area / sqrt(sum(v1 * v1))
return(d)
}
cross3d_prod <- function(v1, v2) {
v3 <- vector()
v3[1] <- v1[2] * v2[3] - v1[3] * v2[2]
v3[2] <- v1[3] * v2[1] - v1[1] * v2[3]
v3[3] <- v1[1] * v2[2] - v1[2] * v2[1]
return(v3)
}
| /PointLine3D.R | no_license | marketavlkova/2021_PromoterEvolution | R | false | false | 515 | r | #!/usr/local/bin/Rscript
### source: https://stackoverflow.com/questions/35194048/using-r-how-to-calculate-the-distance-from-one-point-to-a-line
dist3d <- function(x0, x1, x2) {
v1 <- x1 - x2
v2 <- x0 - x1
v3 <- cross3d_prod(v1, v2)
area <- sqrt(sum(v3 * v3)) / 2
d <- 2 * area / sqrt(sum(v1 * v1))
return(d)
}
cross3d_prod <- function(v1, v2) {
v3 <- vector()
v3[1] <- v1[2] * v2[3] - v1[3] * v2[2]
v3[2] <- v1[3] * v2[1] - v1[1] * v2[3]
v3[3] <- v1[1] * v2[2] - v1[2] * v2[1]
return(v3)
}
|
DAMOCLES_loglik_choosepar = function(trparsopt,trparsfix,idparsopt,idparsfix,phy,pa1,pa2,pchoice,edgeTList)
{
pa = cbind(pa1,pa2)
trpars1 = rep(0,3)
trpars1[idparsopt] = trparsopt
if(length(idparsfix) != 0) { trpars1[idparsfix] = trparsfix }
if(max(trpars1) > 1 || min(trpars1) < 0)
{
loglik = -Inf
} else {
pars1 = trpars1/(1 - trpars1)
loglik = DAMOCLES_loglik(phy,pa,pars1,pchoice,edgeTList)
}
return(loglik)
}
DAMOCLES_simplex = function(trparsopt,trparsfix,idparsopt,idparsfix,phy,pa1,pa2,pars2,edgeTList)
{
pchoice = pars2[5]
numpar = length(trparsopt)
## Setting up initial simplex
v = t(matrix(rep(trparsopt,each = numpar + 1),nrow = numpar + 1))
for(i in 1:numpar)
{
parsoptff = 1.05 * trparsopt[i]/(1 - trparsopt[i])
trparsoptff = parsoptff/(1 + parsoptff)
fac = trparsoptff/trparsopt[i]
if(v[i,i + 1] == 0)
{
v[i,i + 1] = 0.00025
} else {
v[i,i + 1] = v[i,i + 1] * min(1.05,fac)
}
}
fv = rep(0,numpar + 1)
for(i in 1:(numpar + 1))
{
fv[i] = -DAMOCLES_loglik_choosepar(v[,i],trparsfix,idparsopt,idparsfix,phy,pa1,pa2,pchoice,edgeTList)
}
how = "initial"
itercount = 1
string = itercount
for(i in 1:numpar)
{
string = paste(string, v[i,1]/(1 - v[i,1]), sep = " ")
}
string = paste(string, -fv[1], how, "\n", sep = " ")
cat(string)
flush.console()
tmp = order(fv)
if(numpar == 1)
{
v = matrix(v[tmp],nrow = 1,ncol = 2)
} else {
v = v[,tmp]
}
fv = fv[tmp]
## Iterate until stopping criterion is reached
reltolx = pars2[1]
reltolf = pars2[2]
abstolx = pars2[3]
maxiter = pars2[4]
rh = 1
ch = 2
ps = 0.5
si = 0.5
v2 = t(matrix(rep(v[,1],each = numpar + 1),nrow = numpar + 1))
while(itercount <= maxiter & ( ( is.nan(max(abs(fv - fv[1]))) | (max(abs(fv - fv[1])) - reltolf * abs(fv[1]) > 0) ) + ( (max(abs(v - v2) - reltolx * abs(v2)) > 0) | (max(abs(v - v2)) - abstolx > 0) ) ) )
{
## Calculate reflection point
if(numpar == 1)
{
xbar = v[1]
} else {
xbar = rowSums(v[,1:numpar])/numpar
}
xr = (1 + rh) * xbar - rh * v[,numpar + 1]
fxr = -DAMOCLES_loglik_choosepar(xr,trparsfix,idparsopt,idparsfix,phy,pa1,pa2,pchoice,edgeTList)
if(fxr < fv[1])
{
## Calculate expansion point
xe = (1 + rh * ch) * xbar - rh * ch * v[,numpar + 1]
fxe = -DAMOCLES_loglik_choosepar(xe,trparsfix,idparsopt,idparsfix,phy,pa1,pa2,pchoice,edgeTList)
if(fxe < fxr)
{
v[,numpar + 1] = xe
fv[numpar + 1] = fxe
how = "expand"
} else {
v[,numpar + 1] = xr
fv[numpar + 1] = fxr
how = "reflect"
}
} else {
if(fxr < fv[numpar])
{
v[,numpar + 1] = xr
fv[numpar + 1] = fxr
how = "reflect"
} else {
if(fxr < fv[numpar + 1])
{
## Calculate outside contraction point
xco = (1 + ps * rh) * xbar - ps * rh * v[,numpar + 1]
fxco = -DAMOCLES_loglik_choosepar(xco,trparsfix,idparsopt,idparsfix,phy,pa1,pa2,pchoice,edgeTList)
if(fxco <= fxr)
{
v[,numpar + 1] = xco
fv[numpar + 1] = fxco
how = "contract outside"
} else {
how = "shrink"
}
} else {
## Calculate inside contraction point
xci = (1 - ps) * xbar + ps * v[,numpar + 1]
fxci = -DAMOCLES_loglik_choosepar(xci,trparsfix,idparsopt,idparsfix,phy,pa1,pa2,pchoice,edgeTList)
if(fxci < fv[numpar + 1])
{
v[,numpar + 1] = xci
fv[numpar + 1] = fxci
how = "contract inside"
} else {
how = "shrink"
}
}
if(how == "shrink")
{
for(j in 2:(numpar + 1))
{
v[,j] = v[,1] + si * (v[,j] - v[,1])
fv[j] = -DAMOCLES_loglik_choosepar(v[,j],trparsfix,idparsopt,idparsfix,phy,pa1,pa2,pchoice,edgeTList)
}
}
}
}
tmp = order(fv)
if(numpar == 1)
{
v = matrix(v[tmp],nrow = 1,ncol = 2)
} else {
v = v[,tmp]
}
fv = fv[tmp]
itercount = itercount + 1
string = itercount;
for(i in 1:numpar)
{
string = paste(string, v[i,1]/(1 - v[i,1]), sep = " ")
}
string = paste(string, -fv[1], how, "\n", sep = " ")
cat(string)
flush.console()
v2 = t(matrix(rep(v[,1],each = numpar + 1),nrow = numpar + 1))
}
if(itercount < maxiter)
{
cat("Optimization has terminated successfully.","\n")
} else {
cat("Maximum number of iterations has been exceeded.","\n")
}
out = list(par = v[,1], fvalues = -fv[1], conv = as.numeric(itercount > maxiter))
invisible(out)
}
DAMOCLES_ML = function(
phy = rcoal(10),
pa = matrix(c(phy$tip.label,sample(c(0,1),Ntip(phy),replace = T)),nrow = Ntip(phy),ncol = 2),
initparsopt = c(0.1,0.1),
idparsopt = 1:length(initparsopt),
parsfix = 0,
idparsfix = (1:3)[-idparsopt],
pars2 = c(1E-3,1E-4,1E-5,1000),
pchoice = 0)
{
if(is.matrix(pa) == 0){pa = matrix(c(phy$tip.label,pa),nrow = length(pa),ncol = 2)}
options(warn=-1)
idpars = sort(c(idparsopt,idparsfix))
if(sum(idpars == (1:3)) != 3)
{
cat("The parameters to be optimized and fixed are incoherent.")
} else {
namepars = c("mu","gamma_0","gamma_1")
if(length(namepars[idparsopt]) == 0) { optstr = "nothing" } else { optstr = namepars[idparsopt] }
cat("You are optimizing",optstr,"\n")
if(length(namepars[idparsfix]) == 0) { fixstr = "nothing" } else { fixstr = namepars[idparsfix] }
cat("You are fixing",fixstr,"\n")
trparsopt = initparsopt/(1 + initparsopt)
trparsfix = parsfix/(1 + parsfix)
trparsfix[parsfix == Inf] = 1
flush.console()
pars2[5] = pchoice
edgeTList = compute_edgeTList(phy)
out = DAMOCLES_simplex(trparsopt = trparsopt,trparsfix = trparsfix,idparsopt = idparsopt,idparsfix = idparsfix,phy = phy,pa1 = pa[,1],pa2 = pa[,2],pars2,edgeTList)
if(out$conv > 0)
{
cat("Optimization has not converged. Try again with different starting values.\n")
} else {
MLtrpars = unlist(out$par)
MLpars = MLtrpars/(1 - MLtrpars)
out$par = list(MLpars)
MLpars1 = rep(0,3)
MLpars1[idparsopt] = MLpars
if(length(idparsfix) != 0) { MLpars1[idparsfix] = parsfix }
ML = as.numeric(unlist(out$fvalues))
out2 = data.frame(mu = MLpars1[1], gamma_0 = MLpars1[2], gamma_1 = MLpars1[3], loglik = ML, df = length(initparsopt), conv = unlist(out$conv))
s1 = sprintf('Maximum likelihood parameter estimates: mu: %f, gamma_0: %f, gamma_1: %f',MLpars1[1],MLpars1[2],MLpars1[3])
s2 = sprintf('Maximum loglikelihood: %f',out$fvalues)
cat("\n",s1,"\n",s2,"\n")
invisible(out2)
}
}
}
| /DAMOCLES/R/DAMOCLES_ML.R | no_license | ingted/R-Examples | R | false | false | 7,454 | r | DAMOCLES_loglik_choosepar = function(trparsopt,trparsfix,idparsopt,idparsfix,phy,pa1,pa2,pchoice,edgeTList)
{
pa = cbind(pa1,pa2)
trpars1 = rep(0,3)
trpars1[idparsopt] = trparsopt
if(length(idparsfix) != 0) { trpars1[idparsfix] = trparsfix }
if(max(trpars1) > 1 || min(trpars1) < 0)
{
loglik = -Inf
} else {
pars1 = trpars1/(1 - trpars1)
loglik = DAMOCLES_loglik(phy,pa,pars1,pchoice,edgeTList)
}
return(loglik)
}
DAMOCLES_simplex = function(trparsopt,trparsfix,idparsopt,idparsfix,phy,pa1,pa2,pars2,edgeTList)
{
pchoice = pars2[5]
numpar = length(trparsopt)
## Setting up initial simplex
v = t(matrix(rep(trparsopt,each = numpar + 1),nrow = numpar + 1))
for(i in 1:numpar)
{
parsoptff = 1.05 * trparsopt[i]/(1 - trparsopt[i])
trparsoptff = parsoptff/(1 + parsoptff)
fac = trparsoptff/trparsopt[i]
if(v[i,i + 1] == 0)
{
v[i,i + 1] = 0.00025
} else {
v[i,i + 1] = v[i,i + 1] * min(1.05,fac)
}
}
fv = rep(0,numpar + 1)
for(i in 1:(numpar + 1))
{
fv[i] = -DAMOCLES_loglik_choosepar(v[,i],trparsfix,idparsopt,idparsfix,phy,pa1,pa2,pchoice,edgeTList)
}
how = "initial"
itercount = 1
string = itercount
for(i in 1:numpar)
{
string = paste(string, v[i,1]/(1 - v[i,1]), sep = " ")
}
string = paste(string, -fv[1], how, "\n", sep = " ")
cat(string)
flush.console()
tmp = order(fv)
if(numpar == 1)
{
v = matrix(v[tmp],nrow = 1,ncol = 2)
} else {
v = v[,tmp]
}
fv = fv[tmp]
## Iterate until stopping criterion is reached
reltolx = pars2[1]
reltolf = pars2[2]
abstolx = pars2[3]
maxiter = pars2[4]
rh = 1
ch = 2
ps = 0.5
si = 0.5
v2 = t(matrix(rep(v[,1],each = numpar + 1),nrow = numpar + 1))
while(itercount <= maxiter & ( ( is.nan(max(abs(fv - fv[1]))) | (max(abs(fv - fv[1])) - reltolf * abs(fv[1]) > 0) ) + ( (max(abs(v - v2) - reltolx * abs(v2)) > 0) | (max(abs(v - v2)) - abstolx > 0) ) ) )
{
## Calculate reflection point
if(numpar == 1)
{
xbar = v[1]
} else {
xbar = rowSums(v[,1:numpar])/numpar
}
xr = (1 + rh) * xbar - rh * v[,numpar + 1]
fxr = -DAMOCLES_loglik_choosepar(xr,trparsfix,idparsopt,idparsfix,phy,pa1,pa2,pchoice,edgeTList)
if(fxr < fv[1])
{
## Calculate expansion point
xe = (1 + rh * ch) * xbar - rh * ch * v[,numpar + 1]
fxe = -DAMOCLES_loglik_choosepar(xe,trparsfix,idparsopt,idparsfix,phy,pa1,pa2,pchoice,edgeTList)
if(fxe < fxr)
{
v[,numpar + 1] = xe
fv[numpar + 1] = fxe
how = "expand"
} else {
v[,numpar + 1] = xr
fv[numpar + 1] = fxr
how = "reflect"
}
} else {
if(fxr < fv[numpar])
{
v[,numpar + 1] = xr
fv[numpar + 1] = fxr
how = "reflect"
} else {
if(fxr < fv[numpar + 1])
{
## Calculate outside contraction point
xco = (1 + ps * rh) * xbar - ps * rh * v[,numpar + 1]
fxco = -DAMOCLES_loglik_choosepar(xco,trparsfix,idparsopt,idparsfix,phy,pa1,pa2,pchoice,edgeTList)
if(fxco <= fxr)
{
v[,numpar + 1] = xco
fv[numpar + 1] = fxco
how = "contract outside"
} else {
how = "shrink"
}
} else {
## Calculate inside contraction point
xci = (1 - ps) * xbar + ps * v[,numpar + 1]
fxci = -DAMOCLES_loglik_choosepar(xci,trparsfix,idparsopt,idparsfix,phy,pa1,pa2,pchoice,edgeTList)
if(fxci < fv[numpar + 1])
{
v[,numpar + 1] = xci
fv[numpar + 1] = fxci
how = "contract inside"
} else {
how = "shrink"
}
}
if(how == "shrink")
{
for(j in 2:(numpar + 1))
{
v[,j] = v[,1] + si * (v[,j] - v[,1])
fv[j] = -DAMOCLES_loglik_choosepar(v[,j],trparsfix,idparsopt,idparsfix,phy,pa1,pa2,pchoice,edgeTList)
}
}
}
}
tmp = order(fv)
if(numpar == 1)
{
v = matrix(v[tmp],nrow = 1,ncol = 2)
} else {
v = v[,tmp]
}
fv = fv[tmp]
itercount = itercount + 1
string = itercount;
for(i in 1:numpar)
{
string = paste(string, v[i,1]/(1 - v[i,1]), sep = " ")
}
string = paste(string, -fv[1], how, "\n", sep = " ")
cat(string)
flush.console()
v2 = t(matrix(rep(v[,1],each = numpar + 1),nrow = numpar + 1))
}
if(itercount < maxiter)
{
cat("Optimization has terminated successfully.","\n")
} else {
cat("Maximum number of iterations has been exceeded.","\n")
}
out = list(par = v[,1], fvalues = -fv[1], conv = as.numeric(itercount > maxiter))
invisible(out)
}
DAMOCLES_ML = function(
phy = rcoal(10),
pa = matrix(c(phy$tip.label,sample(c(0,1),Ntip(phy),replace = T)),nrow = Ntip(phy),ncol = 2),
initparsopt = c(0.1,0.1),
idparsopt = 1:length(initparsopt),
parsfix = 0,
idparsfix = (1:3)[-idparsopt],
pars2 = c(1E-3,1E-4,1E-5,1000),
pchoice = 0)
{
if(is.matrix(pa) == 0){pa = matrix(c(phy$tip.label,pa),nrow = length(pa),ncol = 2)}
options(warn=-1)
idpars = sort(c(idparsopt,idparsfix))
if(sum(idpars == (1:3)) != 3)
{
cat("The parameters to be optimized and fixed are incoherent.")
} else {
namepars = c("mu","gamma_0","gamma_1")
if(length(namepars[idparsopt]) == 0) { optstr = "nothing" } else { optstr = namepars[idparsopt] }
cat("You are optimizing",optstr,"\n")
if(length(namepars[idparsfix]) == 0) { fixstr = "nothing" } else { fixstr = namepars[idparsfix] }
cat("You are fixing",fixstr,"\n")
trparsopt = initparsopt/(1 + initparsopt)
trparsfix = parsfix/(1 + parsfix)
trparsfix[parsfix == Inf] = 1
flush.console()
pars2[5] = pchoice
edgeTList = compute_edgeTList(phy)
out = DAMOCLES_simplex(trparsopt = trparsopt,trparsfix = trparsfix,idparsopt = idparsopt,idparsfix = idparsfix,phy = phy,pa1 = pa[,1],pa2 = pa[,2],pars2,edgeTList)
if(out$conv > 0)
{
cat("Optimization has not converged. Try again with different starting values.\n")
} else {
MLtrpars = unlist(out$par)
MLpars = MLtrpars/(1 - MLtrpars)
out$par = list(MLpars)
MLpars1 = rep(0,3)
MLpars1[idparsopt] = MLpars
if(length(idparsfix) != 0) { MLpars1[idparsfix] = parsfix }
ML = as.numeric(unlist(out$fvalues))
out2 = data.frame(mu = MLpars1[1], gamma_0 = MLpars1[2], gamma_1 = MLpars1[3], loglik = ML, df = length(initparsopt), conv = unlist(out$conv))
s1 = sprintf('Maximum likelihood parameter estimates: mu: %f, gamma_0: %f, gamma_1: %f',MLpars1[1],MLpars1[2],MLpars1[3])
s2 = sprintf('Maximum loglikelihood: %f',out$fvalues)
cat("\n",s1,"\n",s2,"\n")
invisible(out2)
}
}
}
|
library(pipeR)
library(htmltools)
library(rvest)
library(XML)
library(RSelenium)
library(RCurl)
library(lubridate)
library(data.table)
library(RCurl)
RSelenium::checkForServer()
RSelenium::startServer()
remDr <- remoteDriver()
remDr$open()
times <- format(seq(as.POSIXct("2015-01-01 10:00:00"),as.POSIXct("2015-01-01 13:59:59"),by="mins"),format='%H:%M')
dates <- seq(as.Date("2015-10-13"),as.Date("2015-10-16"),by="days")
dates <- format(dates,"%Y.%m.%d")
exact <- merge(dates,times)
names(exact) <- c("date","time")
#exact <- as.data.table(exact)
#setkey(exact,date)
#sub <- exact[1:20,]
system.time(
#exact[, path := apply()]
path <- apply(exact, 1, function(x) {
url <- paste0("http://suncalc.net/#/40.7389,-74.004,19/",x["date"],"/",x["time"])
print(url)
remDr$navigate(url)
elem <- remDr$findElements(using = 'css selector', "path")[[8]]
elem$getElementAttribute("d")[[1]]
})
)
split <- lapply(path, function(x) {
sub("M","",x)
strsplit(x,c("L"))
strsplit(x,",")
})
| /suncalc.R | no_license | anbnyc/creative | R | false | false | 1,008 | r | library(pipeR)
library(htmltools)
library(rvest)
library(XML)
library(RSelenium)
library(RCurl)
library(lubridate)
library(data.table)
library(RCurl)
RSelenium::checkForServer()
RSelenium::startServer()
remDr <- remoteDriver()
remDr$open()
times <- format(seq(as.POSIXct("2015-01-01 10:00:00"),as.POSIXct("2015-01-01 13:59:59"),by="mins"),format='%H:%M')
dates <- seq(as.Date("2015-10-13"),as.Date("2015-10-16"),by="days")
dates <- format(dates,"%Y.%m.%d")
exact <- merge(dates,times)
names(exact) <- c("date","time")
#exact <- as.data.table(exact)
#setkey(exact,date)
#sub <- exact[1:20,]
system.time(
#exact[, path := apply()]
path <- apply(exact, 1, function(x) {
url <- paste0("http://suncalc.net/#/40.7389,-74.004,19/",x["date"],"/",x["time"])
print(url)
remDr$navigate(url)
elem <- remDr$findElements(using = 'css selector', "path")[[8]]
elem$getElementAttribute("d")[[1]]
})
)
split <- lapply(path, function(x) {
sub("M","",x)
strsplit(x,c("L"))
strsplit(x,",")
})
|
setClass("IlluminaMethylationManifest",
representation(data = "environment",
annotation = "character"))
setValidity("IlluminaMethylationManifest", function(object) {
msg <- NULL
if(! "TypeI" %in% ls(object@data) || !is(object@data[["TypeI"]], "DataFrame"))
msg <- paste(msg, "'data' slot must contain a DataFrame with TypeI probes", sep = "\n")
if(! "TypeII" %in% ls(object@data) || !is(object@data[["TypeII"]], "DataFrame"))
msg <- paste(msg, "'data' slot must contain a DataFrame with TypeII probes", sep = "\n")
if(! "TypeControl" %in% ls(object@data) || !is(object@data[["TypeControl"]], "DataFrame"))
msg <- paste(msg, "'data' slot must contain a DataFrame with Control probes", sep = "\n")
if(! "TypeSnpI" %in% ls(object@data) || !is(object@data[["TypeSnpI"]], "DataFrame"))
msg <- paste(msg, "'data' slot must contain a DataFrame with Snp I probes", sep = "\n")
if(! "TypeSnpII" %in% ls(object@data) || !is(object@data[["TypeSnpII"]], "DataFrame"))
msg <- paste(msg, "'data' slot must contain a DataFrame with Snp II probes", sep = "\n")
## Check Names
if(! all(c("Name", "AddressA", "AddressB", "Color", "nCpG") %in% colnames(object@data[["TypeI"]])))
msg <- paste(msg, "'TypeI' has to have column names 'Name', 'AddressA', 'AddressB', 'Color', 'nCpG'")
if(!is.character(object@data[["TypeI"]]$Name) ||
!is.character(object@data[["TypeI"]]$AddressA) ||
!is.character(object@data[["TypeI"]]$AddressB) ||
!is.character(object@data[["TypeI"]]$Color) ||
!is.integer(object@data[["TypeI"]]$nCpG))
msg <- paste(msg, "'TypeI' columns has wrong classes")
if(! all(c("Name", "AddressA", "nCpG") %in% colnames(object@data[["TypeII"]])))
msg <- paste(msg, "'TypeII' has to have column names 'Name', 'AddressA', 'nCpG'")
if(!is.character(object@data[["TypeII"]]$Name) ||
!is.character(object@data[["TypeII"]]$AddressA) ||
!is.integer(object@data[["TypeII"]]$nCpG))
msg <- paste(msg, "'TypeII' columns has wrong classes")
if(! all(c("Type", "Address") %in% colnames(object@data[["TypeControl"]])))
msg <- paste(msg, "'TypeControl' has to have column names 'Type', 'Address'")
if(!is.character(object@data[["TypeControl"]]$Type) ||
!is.character(object@data[["TypeControl"]]$Address))
msg <- paste(msg, "'TypeControl' columns has wrong classes")
if(! all(c("Name", "AddressA", "AddressB", "Color") %in% colnames(object@data[["TypeSnpI"]])))
msg <- paste(msg, "'TypeSnpI' has to have column names 'Name', 'AddressA', 'AddressB', 'Color'")
if(!is.character(object@data[["TypeSnpI"]]$Name) ||
!is.character(object@data[["TypeSnpI"]]$AddressA) ||
!is.character(object@data[["TypeSnpI"]]$AddressB) ||
!is.character(object@data[["TypeSnpI"]]$Color))
msg <- paste(msg, "'TypeSnpI' columns has wrong classes")
if(! all(c("Name", "AddressA") %in% colnames(object@data[["TypeSnpII"]])))
msg <- paste(msg, "'TypeSnpII' has to have column names 'Name', 'AddressA'")
if(!is.character(object@data[["TypeSnpII"]]$Name) ||
!is.character(object@data[["TypeSnpII"]]$AddressA))
msg <- paste(msg, "'TypeSnpII' columns has wrong classes")
if (is.null(msg)) TRUE else msg
})
setMethod("show", "IlluminaMethylationManifest", function(object) {
cat("IlluminaMethylationManifest object\n")
.show.annotation(object@annotation)
cat("Number of type I probes:", nrow(object@data[["TypeI"]]), "\n")
cat("Number of type II probes:", nrow(object@data[["TypeII"]]), "\n")
cat("Number of control probes:", nrow(object@data[["TypeControl"]]), "\n")
cat("Number of SNP type I probes:", nrow(object@data[["TypeSnpI"]]), "\n")
cat("Number of SNP type II probes:", nrow(object@data[["TypeSnpII"]]), "\n")
})
IlluminaMethylationManifest <- function(TypeI = new("DataFrame"),
TypeII = new("DataFrame"),
TypeControl = new("DataFrame"),
TypeSnpI = new("DataFrame"),
TypeSnpII = new("DataFrame"),
annotation = "") {
data <- new.env(parent = emptyenv())
data[["TypeI"]] <- TypeI
data[["TypeII"]] <- TypeII
data[["TypeControl"]] <- TypeControl
data[["TypeSnpI"]] <- TypeSnpI
data[["TypeSnpII"]] <- TypeSnpII
lockEnvironment(data, bindings = TRUE)
manifest <- new("IlluminaMethylationManifest", annotation = annotation, data = data)
manifest
}
setMethod("getManifest", signature(object = "IlluminaMethylationManifest"),
function(object) {
object
})
setMethod("getManifest", signature(object = "character"),
function(object) {
maniString <- .getManifestString(object)
if(!require(maniString, character.only = TRUE))
stop(sprintf("cannot load manifest package %s", maniString))
get(maniString)
})
getProbeInfo <- function(object, type = c("I", "II", "Control", "I-Green", "I-Red", "SnpI", "SnpII")) {
type <- match.arg(type)
if(type %in% c("I", "II", "Control", "SnpI", "SnpII"))
return(getManifest(object)@data[[paste("Type", type, sep = "")]])
typeI <- getManifest(object)@data[["TypeI"]]
if(type == "I-Green")
return(typeI[typeI$Color == "Grn",])
if(type == "I-Red")
return(typeI[typeI$Color == "Red",])
}
getManifestInfo <- function(object, type = c("nLoci", "locusNames")) {
type <- match.arg(type)
switch(type,
"nLoci" = {
nrow(getProbeInfo(object, type = "I")) +
nrow(getProbeInfo(object, type = "II"))
},
"locusNames" = {
c(getProbeInfo(object, type = "I")$Name,
getProbeInfo(object, type = "II")$Name)
})
}
getControlAddress <- function(object, controlType = c("NORM_A", "NORM_C", "NORM_G", "NORM_T"),
asList = FALSE) {
if(asList) {
ctrls <- getProbeInfo(object, type = "Control")
out <- split(ctrls$Address, ctrls$Type)
out <- out[names(out) %in% controlType]
} else {
ctrls <- getProbeInfo(object, type = "Control")
out <- ctrls[ctrls$Type %in% controlType, "Address"]
}
out
}
getProbePositionsDetailed <- function(map) {
## map is GR with metadata columns strand and type
stopifnot(is(map, "GRanges"))
stopifnot(c("Strand", "Type") %in% names(mcols(map)))
probeStart <- rep(NA, length(map))
wh.II.F <- which(map$Type=="II" & map$Strand=="+")
wh.II.R <- which(map$Type=="II" & map$Strand=="-")
wh.I.F <- which(map$Type=="I" & map$Strand=="+")
wh.I.R <- which(map$Type=="I" & map$Strand=="-")
probeStart[wh.II.F] <- start(map)[wh.II.F]
probeStart[wh.II.R] <- start(map)[wh.II.R] - 50
probeStart[wh.I.F] <- start(map)[wh.I.F] - 1
probeStart[wh.I.R] <- start(map)[wh.I.R] - 49
map$probeStart <- probeStart
probeEnd <- rep(NA, length(map))
probeEnd[wh.II.F] <- start(map)[wh.II.F] + 50
probeEnd[wh.II.R] <- start(map)[wh.II.R]
probeEnd[wh.I.F] <- start(map)[wh.I.F] + 49
probeEnd[wh.I.R] <- start(map)[wh.I.R] + 1
map$probeEnd <- probeEnd
sbe <- rep(NA, length(map))
sbe[wh.II.F] <- start(map)[wh.II.F]
sbe[wh.II.R] <- start(map)[wh.II.R] + 1
sbe[wh.I.F] <- start(map)[wh.I.F] - 1
sbe[wh.I.R] <- start(map)[wh.I.R] + 2
map$SBE <- sbe
map
}
| /R/manifest.R | no_license | Tomnl/minfi | R | false | false | 7,676 | r | setClass("IlluminaMethylationManifest",
representation(data = "environment",
annotation = "character"))
setValidity("IlluminaMethylationManifest", function(object) {
msg <- NULL
if(! "TypeI" %in% ls(object@data) || !is(object@data[["TypeI"]], "DataFrame"))
msg <- paste(msg, "'data' slot must contain a DataFrame with TypeI probes", sep = "\n")
if(! "TypeII" %in% ls(object@data) || !is(object@data[["TypeII"]], "DataFrame"))
msg <- paste(msg, "'data' slot must contain a DataFrame with TypeII probes", sep = "\n")
if(! "TypeControl" %in% ls(object@data) || !is(object@data[["TypeControl"]], "DataFrame"))
msg <- paste(msg, "'data' slot must contain a DataFrame with Control probes", sep = "\n")
if(! "TypeSnpI" %in% ls(object@data) || !is(object@data[["TypeSnpI"]], "DataFrame"))
msg <- paste(msg, "'data' slot must contain a DataFrame with Snp I probes", sep = "\n")
if(! "TypeSnpII" %in% ls(object@data) || !is(object@data[["TypeSnpII"]], "DataFrame"))
msg <- paste(msg, "'data' slot must contain a DataFrame with Snp II probes", sep = "\n")
## Check Names
if(! all(c("Name", "AddressA", "AddressB", "Color", "nCpG") %in% colnames(object@data[["TypeI"]])))
msg <- paste(msg, "'TypeI' has to have column names 'Name', 'AddressA', 'AddressB', 'Color', 'nCpG'")
if(!is.character(object@data[["TypeI"]]$Name) ||
!is.character(object@data[["TypeI"]]$AddressA) ||
!is.character(object@data[["TypeI"]]$AddressB) ||
!is.character(object@data[["TypeI"]]$Color) ||
!is.integer(object@data[["TypeI"]]$nCpG))
msg <- paste(msg, "'TypeI' columns has wrong classes")
if(! all(c("Name", "AddressA", "nCpG") %in% colnames(object@data[["TypeII"]])))
msg <- paste(msg, "'TypeII' has to have column names 'Name', 'AddressA', 'nCpG'")
if(!is.character(object@data[["TypeII"]]$Name) ||
!is.character(object@data[["TypeII"]]$AddressA) ||
!is.integer(object@data[["TypeII"]]$nCpG))
msg <- paste(msg, "'TypeII' columns has wrong classes")
if(! all(c("Type", "Address") %in% colnames(object@data[["TypeControl"]])))
msg <- paste(msg, "'TypeControl' has to have column names 'Type', 'Address'")
if(!is.character(object@data[["TypeControl"]]$Type) ||
!is.character(object@data[["TypeControl"]]$Address))
msg <- paste(msg, "'TypeControl' columns has wrong classes")
if(! all(c("Name", "AddressA", "AddressB", "Color") %in% colnames(object@data[["TypeSnpI"]])))
msg <- paste(msg, "'TypeSnpI' has to have column names 'Name', 'AddressA', 'AddressB', 'Color'")
if(!is.character(object@data[["TypeSnpI"]]$Name) ||
!is.character(object@data[["TypeSnpI"]]$AddressA) ||
!is.character(object@data[["TypeSnpI"]]$AddressB) ||
!is.character(object@data[["TypeSnpI"]]$Color))
msg <- paste(msg, "'TypeSnpI' columns has wrong classes")
if(! all(c("Name", "AddressA") %in% colnames(object@data[["TypeSnpII"]])))
msg <- paste(msg, "'TypeSnpII' has to have column names 'Name', 'AddressA'")
if(!is.character(object@data[["TypeSnpII"]]$Name) ||
!is.character(object@data[["TypeSnpII"]]$AddressA))
msg <- paste(msg, "'TypeSnpII' columns has wrong classes")
if (is.null(msg)) TRUE else msg
})
setMethod("show", "IlluminaMethylationManifest", function(object) {
cat("IlluminaMethylationManifest object\n")
.show.annotation(object@annotation)
cat("Number of type I probes:", nrow(object@data[["TypeI"]]), "\n")
cat("Number of type II probes:", nrow(object@data[["TypeII"]]), "\n")
cat("Number of control probes:", nrow(object@data[["TypeControl"]]), "\n")
cat("Number of SNP type I probes:", nrow(object@data[["TypeSnpI"]]), "\n")
cat("Number of SNP type II probes:", nrow(object@data[["TypeSnpII"]]), "\n")
})
IlluminaMethylationManifest <- function(TypeI = new("DataFrame"),
TypeII = new("DataFrame"),
TypeControl = new("DataFrame"),
TypeSnpI = new("DataFrame"),
TypeSnpII = new("DataFrame"),
annotation = "") {
data <- new.env(parent = emptyenv())
data[["TypeI"]] <- TypeI
data[["TypeII"]] <- TypeII
data[["TypeControl"]] <- TypeControl
data[["TypeSnpI"]] <- TypeSnpI
data[["TypeSnpII"]] <- TypeSnpII
lockEnvironment(data, bindings = TRUE)
manifest <- new("IlluminaMethylationManifest", annotation = annotation, data = data)
manifest
}
setMethod("getManifest", signature(object = "IlluminaMethylationManifest"),
function(object) {
object
})
setMethod("getManifest", signature(object = "character"),
function(object) {
maniString <- .getManifestString(object)
if(!require(maniString, character.only = TRUE))
stop(sprintf("cannot load manifest package %s", maniString))
get(maniString)
})
getProbeInfo <- function(object, type = c("I", "II", "Control", "I-Green", "I-Red", "SnpI", "SnpII")) {
type <- match.arg(type)
if(type %in% c("I", "II", "Control", "SnpI", "SnpII"))
return(getManifest(object)@data[[paste("Type", type, sep = "")]])
typeI <- getManifest(object)@data[["TypeI"]]
if(type == "I-Green")
return(typeI[typeI$Color == "Grn",])
if(type == "I-Red")
return(typeI[typeI$Color == "Red",])
}
getManifestInfo <- function(object, type = c("nLoci", "locusNames")) {
type <- match.arg(type)
switch(type,
"nLoci" = {
nrow(getProbeInfo(object, type = "I")) +
nrow(getProbeInfo(object, type = "II"))
},
"locusNames" = {
c(getProbeInfo(object, type = "I")$Name,
getProbeInfo(object, type = "II")$Name)
})
}
getControlAddress <- function(object, controlType = c("NORM_A", "NORM_C", "NORM_G", "NORM_T"),
asList = FALSE) {
if(asList) {
ctrls <- getProbeInfo(object, type = "Control")
out <- split(ctrls$Address, ctrls$Type)
out <- out[names(out) %in% controlType]
} else {
ctrls <- getProbeInfo(object, type = "Control")
out <- ctrls[ctrls$Type %in% controlType, "Address"]
}
out
}
getProbePositionsDetailed <- function(map) {
## map is GR with metadata columns strand and type
stopifnot(is(map, "GRanges"))
stopifnot(c("Strand", "Type") %in% names(mcols(map)))
probeStart <- rep(NA, length(map))
wh.II.F <- which(map$Type=="II" & map$Strand=="+")
wh.II.R <- which(map$Type=="II" & map$Strand=="-")
wh.I.F <- which(map$Type=="I" & map$Strand=="+")
wh.I.R <- which(map$Type=="I" & map$Strand=="-")
probeStart[wh.II.F] <- start(map)[wh.II.F]
probeStart[wh.II.R] <- start(map)[wh.II.R] - 50
probeStart[wh.I.F] <- start(map)[wh.I.F] - 1
probeStart[wh.I.R] <- start(map)[wh.I.R] - 49
map$probeStart <- probeStart
probeEnd <- rep(NA, length(map))
probeEnd[wh.II.F] <- start(map)[wh.II.F] + 50
probeEnd[wh.II.R] <- start(map)[wh.II.R]
probeEnd[wh.I.F] <- start(map)[wh.I.F] + 49
probeEnd[wh.I.R] <- start(map)[wh.I.R] + 1
map$probeEnd <- probeEnd
sbe <- rep(NA, length(map))
sbe[wh.II.F] <- start(map)[wh.II.F]
sbe[wh.II.R] <- start(map)[wh.II.R] + 1
sbe[wh.I.F] <- start(map)[wh.I.F] - 1
sbe[wh.I.R] <- start(map)[wh.I.R] + 2
map$SBE <- sbe
map
}
|
/cria_graficos7_pca1023.R | no_license | Andreatrindade/wifilivresp2 | R | false | false | 3,551 | r | ||
#=================================================================================
# steven wong, february 2016
#=================================================================================
# setup
rm(list = ls())
source('~/Desktop/Analytics/Titanic/code/starter/project_library.R')
load('data/working/Titanic.RData')
#=================================================================================
# data setup
# modify / select features so that they are appropriate for boosting
d_all_temp <-
d_all %>%
mutate(Sex = ifelse(Sex == 'male', 1, 0)) %>%
# remove catagorical variables with too many catagories
select(-c(Embarked, Cabin, Title, Ticket_A))
# catagorical variable
dummy <- dummyVars(~ Embarked + Cabin + Title + Ticket_A, data = d_all, fullRank = TRUE)
data_dummy <- tbl_df(data.frame(predict(dummy, d_all)))
d_all_mod <- bind_cols(d_all_temp, data_dummy)
#=================================================================================
# boosting
dtrain <-
d_all_mod %>%
filter(dataset == 'train') %>%
select(-dataset, -PassengerId)
dtest <-
d_all_mod %>%
filter(dataset == 'test') %>%
select(-dataset)
#---------------------------------------------------------------------------------
# cross-validation
n_cv <- 10
set.seed(94305)
cv <- sample(1:n_cv, nrow(dtrain), replace = T)
results <- data_frame()
for (i in 1:n_cv) {
train <- xgb.DMatrix(
data = as.matrix(select(dtrain[cv != i, ], -Survived)),
label = as.matrix(select(dtrain[cv != i, ], Survived))
)
test <- xgb.DMatrix(
data = as.matrix(select(dtrain[cv == i, ], -Survived))
)
# boosting
model <- xgb.train(data = train, objective = 'binary:logistic', booster = 'gbtree',
nrounds = 2500, max_depth = 10)
# # random forest
# model <- xgb.train(data = train, objective = 'binary:logistic',
# nrounds = 1, num_parallel_tree = 5000, colsample_bytree = 0.25)
pred <- as.numeric(predict(model, test) > 0.5)
results <- bind_rows(
results,
data_frame(Survived = dtrain[cv==i, ]$Survived, pred = pred, cv = i)
)
}
results %>%
group_by(cv) %>%
summarise(correct = sum(Survived == pred)/n()) %>%
summarise(mean(correct), sd(correct))
# #---------------------------------------------------------------------------------
# # result
# train <- xgb.DMatrix(
# data = as.matrix(select(dtrain, -Survived)),
# label = as.matrix(select(dtrain, Survived))
# )
# test <- xgb.DMatrix(
# data = as.matrix(select(dtest, -Survived, -PassengerId))
# )
# model <- xgb.train(data = train, objective = 'binary:logistic', booster = 'gbtree',
# nrounds = 2500, max_depth = 10)
# pred <- as.numeric(predict(model, test) > 0.5)
# output <- bind_cols(select(dtest, PassengerId), data_frame(Survived = pred))
# write_csv(output, 'data/output/m_boosting_xgb.csv')
# model <- xgb.train(data = train, objective = 'binary:logistic',
# nrounds = 1, num_parallel_tree = 1500, colsample_bytree = 0.25)
# pred <- as.numeric(predict(model, test) > 0.5)
# output <- bind_cols(select(dtest, PassengerId), data_frame(Survived = pred))
# write_csv(output, 'data/output/m_random_forest_xgb.csv')
| /Kaggle_Titanic/code/m_boosting.R | no_license | stevenwong15/Projects | R | false | false | 3,218 | r | #=================================================================================
# steven wong, february 2016
#=================================================================================
# setup
rm(list = ls())
source('~/Desktop/Analytics/Titanic/code/starter/project_library.R')
load('data/working/Titanic.RData')
#=================================================================================
# data setup
# modify / select features so that they are appropriate for boosting
d_all_temp <-
d_all %>%
mutate(Sex = ifelse(Sex == 'male', 1, 0)) %>%
# remove catagorical variables with too many catagories
select(-c(Embarked, Cabin, Title, Ticket_A))
# catagorical variable
dummy <- dummyVars(~ Embarked + Cabin + Title + Ticket_A, data = d_all, fullRank = TRUE)
data_dummy <- tbl_df(data.frame(predict(dummy, d_all)))
d_all_mod <- bind_cols(d_all_temp, data_dummy)
#=================================================================================
# boosting
dtrain <-
d_all_mod %>%
filter(dataset == 'train') %>%
select(-dataset, -PassengerId)
dtest <-
d_all_mod %>%
filter(dataset == 'test') %>%
select(-dataset)
#---------------------------------------------------------------------------------
# cross-validation
n_cv <- 10
set.seed(94305)
cv <- sample(1:n_cv, nrow(dtrain), replace = T)
results <- data_frame()
for (i in 1:n_cv) {
train <- xgb.DMatrix(
data = as.matrix(select(dtrain[cv != i, ], -Survived)),
label = as.matrix(select(dtrain[cv != i, ], Survived))
)
test <- xgb.DMatrix(
data = as.matrix(select(dtrain[cv == i, ], -Survived))
)
# boosting
model <- xgb.train(data = train, objective = 'binary:logistic', booster = 'gbtree',
nrounds = 2500, max_depth = 10)
# # random forest
# model <- xgb.train(data = train, objective = 'binary:logistic',
# nrounds = 1, num_parallel_tree = 5000, colsample_bytree = 0.25)
pred <- as.numeric(predict(model, test) > 0.5)
results <- bind_rows(
results,
data_frame(Survived = dtrain[cv==i, ]$Survived, pred = pred, cv = i)
)
}
results %>%
group_by(cv) %>%
summarise(correct = sum(Survived == pred)/n()) %>%
summarise(mean(correct), sd(correct))
# #---------------------------------------------------------------------------------
# # result
# train <- xgb.DMatrix(
# data = as.matrix(select(dtrain, -Survived)),
# label = as.matrix(select(dtrain, Survived))
# )
# test <- xgb.DMatrix(
# data = as.matrix(select(dtest, -Survived, -PassengerId))
# )
# model <- xgb.train(data = train, objective = 'binary:logistic', booster = 'gbtree',
# nrounds = 2500, max_depth = 10)
# pred <- as.numeric(predict(model, test) > 0.5)
# output <- bind_cols(select(dtest, PassengerId), data_frame(Survived = pred))
# write_csv(output, 'data/output/m_boosting_xgb.csv')
# model <- xgb.train(data = train, objective = 'binary:logistic',
# nrounds = 1, num_parallel_tree = 1500, colsample_bytree = 0.25)
# pred <- as.numeric(predict(model, test) > 0.5)
# output <- bind_cols(select(dtest, PassengerId), data_frame(Survived = pred))
# write_csv(output, 'data/output/m_random_forest_xgb.csv')
|
setwd('~/Downloads/550project')
rental_data <- read.csv("rental_price.csv")
library(dplyr)
library(stringr)
training_zipcode <- c(90012,90031,90032,90022,91754,91755,90640,91107,91775,91108,91006,91024,91016,91706,91010,91702,91722,91741,91740)
training_area <- filter(rental_data, str_detect(RegionName, paste(training_zipcode, collapse="|")))
training_area <- training_area[c(2,28:116)]
training_area <- training_area[c(1:85)]
rowName <- training_area[,c(1)]
row.names(training_area) <- rowName
training_area <- training_area[-c(1)]
colNames <- names(training_area)
colNames <- substr(colNames, 2, 8)
colNames <- str_replace(colNames, '\\.', "-")
colNames <- paste0(colNames, "-01")
names(training_area) <- colNames
write.csv(training_area, "rental_price_training.csv")
control_zipcode <- c(91773,91750,91711,91767,91762)
control_area <- filter(rental_data, str_detect(RegionName, paste(control_zipcode, collapse="|")))
control_area <- control_area[c(2,28:116)]
control_area <- control_area[c(1:85)]
c.rowName <- control_area[,c(1)]
row.names(control_area) <- c.rowName
control_area <- control_area[-c(1)]
colNames <- names(control_area)
colNames <- substr(colNames, 2, 8)
colNames <- str_replace(colNames, '\\.', "-")
colNames <- paste0(colNames, "-01")
names(control_area) <- colNames
write.csv(control_area, "Rental_Price_Test.csv")
| /Final Project/code/rental_data_extract.R | no_license | phuongqn/DataScience-at-Scale | R | false | false | 1,340 | r | setwd('~/Downloads/550project')
rental_data <- read.csv("rental_price.csv")
library(dplyr)
library(stringr)
training_zipcode <- c(90012,90031,90032,90022,91754,91755,90640,91107,91775,91108,91006,91024,91016,91706,91010,91702,91722,91741,91740)
training_area <- filter(rental_data, str_detect(RegionName, paste(training_zipcode, collapse="|")))
training_area <- training_area[c(2,28:116)]
training_area <- training_area[c(1:85)]
rowName <- training_area[,c(1)]
row.names(training_area) <- rowName
training_area <- training_area[-c(1)]
colNames <- names(training_area)
colNames <- substr(colNames, 2, 8)
colNames <- str_replace(colNames, '\\.', "-")
colNames <- paste0(colNames, "-01")
names(training_area) <- colNames
write.csv(training_area, "rental_price_training.csv")
control_zipcode <- c(91773,91750,91711,91767,91762)
control_area <- filter(rental_data, str_detect(RegionName, paste(control_zipcode, collapse="|")))
control_area <- control_area[c(2,28:116)]
control_area <- control_area[c(1:85)]
c.rowName <- control_area[,c(1)]
row.names(control_area) <- c.rowName
control_area <- control_area[-c(1)]
colNames <- names(control_area)
colNames <- substr(colNames, 2, 8)
colNames <- str_replace(colNames, '\\.', "-")
colNames <- paste0(colNames, "-01")
names(control_area) <- colNames
write.csv(control_area, "Rental_Price_Test.csv")
|
\name{GenerateRegularTS}
\alias{GenerateRegularTS}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
GenerateRegularTS
}
\description{
GenerateRegularTS
}
\usage{
GenerateRegularTS(Datavec,TimeChar,TimeResolutionBegin='secs',
TimeResolutionEnd='mins',Minutes=1,PlotIt=FALSE,
formating="(\%y-\%m-\%d \%H:\%M:\%S)",
tz='UTC')
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Datavec}{[1:n] numerical data vector
}
\item{TimeChar}{[1:n]
Time, a chron object or a POSIXlt object or a character vector, Devicespezifische Zeit,
if not chron object, please adjust format neceassry
It hast to be convertible to character
}
\item{TimeResolutionBegin}{to be filled...
}
\item{TimeResolutionEnd}{to be filled...
}
\item{Minutes}{to be filled...
}
\item{PlotIt}{TRUE: plotting, FALSE: no plots
}
\item{formating}{default="(\%y-\%m-\%d \%H:\%M:\%S)", else check \code{strptime} function
}
\item{tz}{
sometimes durations and time difference cannot be calculated if timezone is not chosen,
default: 'UTC'
}
}
\details{
...
}
\value{
Regular TS of equidistance as a zoo object
}
\references{
...
}
\author{
Michael Thrun
}
\keyword{irregular ts}% use one of RShowDoc("KEYWORDS")
\keyword{regular ts}% __ONLY ONE__ keyword per line
| /man/GenerateRegularTS.Rd | no_license | Quirinms/TimeSeries | R | false | false | 1,315 | rd | \name{GenerateRegularTS}
\alias{GenerateRegularTS}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
GenerateRegularTS
}
\description{
GenerateRegularTS
}
\usage{
GenerateRegularTS(Datavec,TimeChar,TimeResolutionBegin='secs',
TimeResolutionEnd='mins',Minutes=1,PlotIt=FALSE,
formating="(\%y-\%m-\%d \%H:\%M:\%S)",
tz='UTC')
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Datavec}{[1:n] numerical data vector
}
\item{TimeChar}{[1:n]
Time, a chron object or a POSIXlt object or a character vector, Devicespezifische Zeit,
if not chron object, please adjust format neceassry
It hast to be convertible to character
}
\item{TimeResolutionBegin}{to be filled...
}
\item{TimeResolutionEnd}{to be filled...
}
\item{Minutes}{to be filled...
}
\item{PlotIt}{TRUE: plotting, FALSE: no plots
}
\item{formating}{default="(\%y-\%m-\%d \%H:\%M:\%S)", else check \code{strptime} function
}
\item{tz}{
sometimes durations and time difference cannot be calculated if timezone is not chosen,
default: 'UTC'
}
}
\details{
...
}
\value{
Regular TS of equidistance as a zoo object
}
\references{
...
}
\author{
Michael Thrun
}
\keyword{irregular ts}% use one of RShowDoc("KEYWORDS")
\keyword{regular ts}% __ONLY ONE__ keyword per line
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plexsans.R
\docType{data}
\name{font_ps}
\alias{font_ps}
\alias{font_ps_light}
\title{PlexSans font name R variable aliases}
\format{
length 1 character vector
An object of class \code{character} of length 1.
}
\usage{
font_ps
font_ps_light
}
\description{
\code{font_ps} == "\code{IBMPlexSans}"
\code{font_ps_light} == "\code{IBMPlexSans-Light}"
}
\note{
\code{font_ps_light} (a.k.a. "\code{IBMPlexSans-Light}") is not available on
Windows and will throw a warning if used in plots.
}
\keyword{datasets}
| /man/PlexSans.Rd | permissive | gkampolis/hrbrthemes | R | false | true | 586 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plexsans.R
\docType{data}
\name{font_ps}
\alias{font_ps}
\alias{font_ps_light}
\title{PlexSans font name R variable aliases}
\format{
length 1 character vector
An object of class \code{character} of length 1.
}
\usage{
font_ps
font_ps_light
}
\description{
\code{font_ps} == "\code{IBMPlexSans}"
\code{font_ps_light} == "\code{IBMPlexSans-Light}"
}
\note{
\code{font_ps_light} (a.k.a. "\code{IBMPlexSans-Light}") is not available on
Windows and will throw a warning if used in plots.
}
\keyword{datasets}
|
library(data.table)
#0 - Set directory
setwd("C:/Users/admin/Desktop/Sociology/PSID Data")
#1 - Load data
I <- readRDS("5 - Merged_Data.rds")
#2 - Inflate numbers to 2015
#read CPI
CPI <- read.csv("C:/Users/bda13/Desktop/Analysis - Data/CPI info/CPI.csv")
CPI <- read.csv("C:/Users/admin/Desktop/Sociology/CPI info/CPI.csv")
#determine year to inflate to; adjust other numbers accordingly
cpiindex <- CPI$Avg[CPI$Year == max(I$year, na.rm = T)]
CPI$Avg <- cpiindex/CPI$Avg
#create a vector of these numbers equal to nrow I, indexed by year
temp <- data.frame(Year = I$year, index = 1:nrow(I))
temp <- merge(temp, CPI, all.x = T)
inflatenums <- temp[order(temp$index), ]
#pick variables to inflate
#scan variables for numbers that go higher than 1000 (this just happens to work)
varmax <- vector()
for(i in 1:length(I)){
try({
if(class(I[, i]) != "factor")
varmax[i] <- max(I[, i], na.rm = T)
})
}
#determine which vars to inflate
toinflate <- grep("^eq|^inc|^debt", names(I), value = T)
noinflate <- grep("\\_d$", names(I), value = T)
toinflate <- setdiff(toinflate, noinflate)
toinflate <- match(toinflate, names(I))
#inflate those variables and divide by 1000
for(i in 1:length(toinflate)){
I[, toinflate[i]] <- inflatenums$Avg*I[, toinflate[i]]
}
#3 - save
saveRDS(I, "6 - Merged_Data.rds")
| /Code/06 - Inflate financial variables.R | no_license | BrianAronson/Health-Wealth-PSID | R | false | false | 1,627 | r | library(data.table)
#0 - Set directory
setwd("C:/Users/admin/Desktop/Sociology/PSID Data")
#1 - Load data
I <- readRDS("5 - Merged_Data.rds")
#2 - Inflate numbers to 2015
#read CPI
CPI <- read.csv("C:/Users/bda13/Desktop/Analysis - Data/CPI info/CPI.csv")
CPI <- read.csv("C:/Users/admin/Desktop/Sociology/CPI info/CPI.csv")
#determine year to inflate to; adjust other numbers accordingly
cpiindex <- CPI$Avg[CPI$Year == max(I$year, na.rm = T)]
CPI$Avg <- cpiindex/CPI$Avg
#create a vector of these numbers equal to nrow I, indexed by year
temp <- data.frame(Year = I$year, index = 1:nrow(I))
temp <- merge(temp, CPI, all.x = T)
inflatenums <- temp[order(temp$index), ]
#pick variables to inflate
#scan variables for numbers that go higher than 1000 (this just happens to work)
varmax <- vector()
for(i in 1:length(I)){
try({
if(class(I[, i]) != "factor")
varmax[i] <- max(I[, i], na.rm = T)
})
}
#determine which vars to inflate
toinflate <- grep("^eq|^inc|^debt", names(I), value = T)
noinflate <- grep("\\_d$", names(I), value = T)
toinflate <- setdiff(toinflate, noinflate)
toinflate <- match(toinflate, names(I))
#inflate those variables and divide by 1000
for(i in 1:length(toinflate)){
I[, toinflate[i]] <- inflatenums$Avg*I[, toinflate[i]]
}
#3 - save
saveRDS(I, "6 - Merged_Data.rds")
|
#' Count unique values in a vector
#'
#' Count the number of unique values in a vector. `vec_count()` has two
#' important differences to `table()`: it returns a data frame, and when
#' given multiple inputs (as a data frame), it only counts combinations that
#' appear in the input.
#'
#' @param x A vector (including a data frame).
#' @param sort One of "count", "key", "location", or "none".
#' * "count", the default, puts most frequent values at top
#' * "key", orders by the output key column (i.e. unique values of `x`)
#' * "location", orders by location where key first seen. This is useful
#' if you want to match the counts up to other unique/duplicated functions.
#' * "none", leaves unordered.
#' @return A data frame with columns `key` (same type as `x`) and
#' `count` (an integer vector).
#' @export
#' @examples
#' vec_count(mtcars$vs)
#' vec_count(iris$Species)
#'
#' # If you count a data frame you'll get a data frame
#' # column in the output
#' str(vec_count(mtcars[c("vs", "am")]))
#'
#' # Sorting ---------------------------------------
#'
#' x <- letters[rpois(100, 6)]
#' # default is to sort by frequency
#' vec_count(x)
#'
#' # by can sort by key
#' vec_count(x, sort = "key")
#'
#' # or location of first value
#' vec_count(x, sort = "location")
#' head(x)
#'
#' # or not at all
#' vec_count(x, sort = "none")
vec_count <- function(x, sort = c("count", "key", "location", "none")) {
sort <- match.arg(sort)
# Returns key-value pair giving index of first occurrence value and count
kv <- .Call(vctrs_count, vec_proxy(x))
# rep_along() to support zero-length vectors!
df <- data_frame(key = rep_along(kv$val, NA), count = kv$val)
df$key <- vec_slice(x, kv$key) # might be a dataframe
if (sort == "none") {
return(df)
}
idx <- switch(sort,
location = order(kv$key),
key = vec_order(df$key),
count = order(-kv$val)
)
df <- vec_slice(df, idx)
reset_rownames(df)
}
reset_rownames <- function(x) {
rownames(x) <- NULL
is_df <- map_lgl(x, is.data.frame)
x[is_df] <- lapply(x[is_df], reset_rownames)
x
}
# Duplicates --------------------------------------------------------------
#' Find duplicated values
#'
#' * `vec_duplicate_any()`: detects the presence of duplicated values,
#' similar to [anyDuplicated()].
#' * `vec_duplicate_all()`: detects if all values are equivalent.
#' * `vec_duplicate_detect()`: returns a logical vector describing if each
#' element of the vector is duplicated elsewhere. Unlike [duplicated()], it
#' reports all duplicated values, not just the second and subsequent
#' repetitions.
#' * `vec_duplicate_id()`: returns an integer vector giving the location of
#' the first occurrence of the value.
#'
#' @section Missing values:
#' In most cases, missing values are not considered to be equal, i.e.
#' `NA == NA` is not `TRUE`. This behaviour would be unappealing here,
#' so these functions consider all `NAs` to be equal. (Similarly,
#' all `NaN` are also considered to be equal.)
#'
#' @param x A vector (including a data frame).
#' @return
#' * `vec_duplicate_any()`: a logical vector of length 1.
#' * `vec_duplicate_all()`: a logical vector of length 1.
#' * `vec_duplicate_detect()`: a logical vector the same length as `x`.
#' * `vec_duplicate_id()`: an integer vector the same length as `x`.
#' @seealso [vec_unique()] for functions that work with the dual of duplicated
#' values: unique values.
#' @name vec_duplicate
#' @examples
#' vec_duplicate_any(1:10)
#' vec_duplicate_any(c(1, 1:10))
#'
#' vec_duplicate_all(c(1, 1))
#' vec_duplicate_all(c(1, 2))
#' vec_duplicate_all(c(NA, NA))
#'
#' x <- c(10, 10, 20, 30, 30, 40)
#' vec_duplicate_detect(x)
#' # Note that `duplicated()` doesn't consider the first instance to
#' # be a duplicate
#' duplicated(x)
#'
#' # Identify elements of a vector by the location of the first element that
#' # they're equal to:
#' vec_duplicate_id(x)
#' # Location of the unique values:
#' vec_unique_loc(x)
#' # Equivalent to `duplicated()`:
#' vec_duplicate_id(x) == seq_along(x)
NULL
#' @rdname vec_duplicate
#' @export
vec_duplicate_any <- function(x) {
.Call(vctrs_duplicated_any, x)
}
#' @rdname vec_duplicate
#' @export
vec_duplicate_detect <- function(x) {
.Call(vctrs_duplicated, x)
}
#' @rdname vec_duplicate
#' @export
vec_duplicate_id <- function(x) {
.Call(vctrs_id, x)
}
# Unique values -----------------------------------------------------------
#' Find and count unique values
#'
#' * `vec_unique()`: the unique values. Equivalent to [unique()].
#' * `vec_unique_loc()`: the locations of the unique values.
#' * `vec_unique_count()`: the number of unique values.
#'
#' @inherit vec_duplicate sections
#' @param x A vector (including a data frame).
#' @return
#' * `vec_unique()`: a vector the same type as `x` containing only unique
#' values.
#' * `vec_unique_loc()`: an integer vector, giving locations of unique values.
#' * `vec_unique_count()`: an integer vector of length 1, giving the
#' number of unique values.
#' @seealso [vec_duplicate] for functions that work with the dual of
#' unique values: duplicated values.
#' @export
#' @examples
#' x <- rpois(100, 8)
#' vec_unique(x)
#' vec_unique_loc(x)
#' vec_unique_count(x)
#'
#' # `vec_unique()` returns values in the order that encounters them
#' # use sort = "location" to match to the result of `vec_count()`
#' head(vec_unique(x))
#' head(vec_count(x, sort = "location"))
#'
#' # Normally missing values are not considered to be equal
#' NA == NA
#'
#' # But they are for the purposes of considering uniqueness
#' vec_unique(c(NA, NA, NA, NA, 1, 2, 1))
vec_unique <- function(x) {
vec_slice(x, vec_unique_loc(x))
}
#' @rdname vec_unique
#' @export
vec_unique_loc <- function(x) {
.Call(vctrs_unique_loc, x)
}
#' @rdname vec_unique
#' @export
vec_unique_count <- function(x) {
.Call(vctrs_n_distinct, x)
}
# Matching ----------------------------------------------------------------
#' Find matching observations across vectors
#'
#' `vec_in()` returns a logical vector based on whether `needle` is found in
#' haystack. `vec_match()` returns an integer vector giving location of
#' `needle` in `haystack`, or `NA` if it's not found.
#'
#' `vec_in()` is equivalent to [%in%]; `vec_match()` is equivalent to `match()`.
#'
#' @inherit vec_duplicate sections
#' @param needles,haystack Vector of `needles` to search for in vector haystack.
#' `haystack` should usually be unique; if not `vec_match()` will only
#' return the location of the first match.
#'
#' `needles` and `haystack` are coerced to the same type prior to
#' comparison.
#' @return A vector the same length as `needles`. `vec_in()` returns a
#' logical vector; `vec_match()` returns an integer vector.
#' @export
#' @examples
#' hadley <- strsplit("hadley", "")[[1]]
#' vec_match(hadley, letters)
#'
#' vowels <- c("a", "e", "i", "o", "u")
#' vec_match(hadley, vowels)
#' vec_in(hadley, vowels)
#'
#' # Only the first index of duplicates is returned
#' vec_match(c("a", "b"), c("a", "b", "a", "b"))
vec_match <- function(needles, haystack) {
.Call(vctrs_match, needles, haystack)
}
#' @export
#' @rdname vec_match
vec_in <- function(needles, haystack) {
.Call(vctrs_in, needles, haystack)
}
| /R/dictionary.R | no_license | batpigandme/vctrs | R | false | false | 7,258 | r | #' Count unique values in a vector
#'
#' Count the number of unique values in a vector. `vec_count()` has two
#' important differences to `table()`: it returns a data frame, and when
#' given multiple inputs (as a data frame), it only counts combinations that
#' appear in the input.
#'
#' @param x A vector (including a data frame).
#' @param sort One of "count", "key", "location", or "none".
#' * "count", the default, puts most frequent values at top
#' * "key", orders by the output key column (i.e. unique values of `x`)
#' * "location", orders by location where key first seen. This is useful
#' if you want to match the counts up to other unique/duplicated functions.
#' * "none", leaves unordered.
#' @return A data frame with columns `key` (same type as `x`) and
#' `count` (an integer vector).
#' @export
#' @examples
#' vec_count(mtcars$vs)
#' vec_count(iris$Species)
#'
#' # If you count a data frame you'll get a data frame
#' # column in the output
#' str(vec_count(mtcars[c("vs", "am")]))
#'
#' # Sorting ---------------------------------------
#'
#' x <- letters[rpois(100, 6)]
#' # default is to sort by frequency
#' vec_count(x)
#'
#' # by can sort by key
#' vec_count(x, sort = "key")
#'
#' # or location of first value
#' vec_count(x, sort = "location")
#' head(x)
#'
#' # or not at all
#' vec_count(x, sort = "none")
vec_count <- function(x, sort = c("count", "key", "location", "none")) {
sort <- match.arg(sort)
# Returns key-value pair giving index of first occurrence value and count
kv <- .Call(vctrs_count, vec_proxy(x))
# rep_along() to support zero-length vectors!
df <- data_frame(key = rep_along(kv$val, NA), count = kv$val)
df$key <- vec_slice(x, kv$key) # might be a dataframe
if (sort == "none") {
return(df)
}
idx <- switch(sort,
location = order(kv$key),
key = vec_order(df$key),
count = order(-kv$val)
)
df <- vec_slice(df, idx)
reset_rownames(df)
}
reset_rownames <- function(x) {
rownames(x) <- NULL
is_df <- map_lgl(x, is.data.frame)
x[is_df] <- lapply(x[is_df], reset_rownames)
x
}
# Duplicates --------------------------------------------------------------
#' Find duplicated values
#'
#' * `vec_duplicate_any()`: detects the presence of duplicated values,
#' similar to [anyDuplicated()].
#' * `vec_duplicate_all()`: detects if all values are equivalent.
#' * `vec_duplicate_detect()`: returns a logical vector describing if each
#' element of the vector is duplicated elsewhere. Unlike [duplicated()], it
#' reports all duplicated values, not just the second and subsequent
#' repetitions.
#' * `vec_duplicate_id()`: returns an integer vector giving the location of
#' the first occurrence of the value.
#'
#' @section Missing values:
#' In most cases, missing values are not considered to be equal, i.e.
#' `NA == NA` is not `TRUE`. This behaviour would be unappealing here,
#' so these functions consider all `NAs` to be equal. (Similarly,
#' all `NaN` are also considered to be equal.)
#'
#' @param x A vector (including a data frame).
#' @return
#' * `vec_duplicate_any()`: a logical vector of length 1.
#' * `vec_duplicate_all()`: a logical vector of length 1.
#' * `vec_duplicate_detect()`: a logical vector the same length as `x`.
#' * `vec_duplicate_id()`: an integer vector the same length as `x`.
#' @seealso [vec_unique()] for functions that work with the dual of duplicated
#' values: unique values.
#' @name vec_duplicate
#' @examples
#' vec_duplicate_any(1:10)
#' vec_duplicate_any(c(1, 1:10))
#'
#' vec_duplicate_all(c(1, 1))
#' vec_duplicate_all(c(1, 2))
#' vec_duplicate_all(c(NA, NA))
#'
#' x <- c(10, 10, 20, 30, 30, 40)
#' vec_duplicate_detect(x)
#' # Note that `duplicated()` doesn't consider the first instance to
#' # be a duplicate
#' duplicated(x)
#'
#' # Identify elements of a vector by the location of the first element that
#' # they're equal to:
#' vec_duplicate_id(x)
#' # Location of the unique values:
#' vec_unique_loc(x)
#' # Equivalent to `duplicated()`:
#' vec_duplicate_id(x) == seq_along(x)
NULL
#' @rdname vec_duplicate
#' @export
vec_duplicate_any <- function(x) {
.Call(vctrs_duplicated_any, x)
}
#' @rdname vec_duplicate
#' @export
vec_duplicate_detect <- function(x) {
.Call(vctrs_duplicated, x)
}
#' @rdname vec_duplicate
#' @export
vec_duplicate_id <- function(x) {
.Call(vctrs_id, x)
}
# Unique values -----------------------------------------------------------
#' Find and count unique values
#'
#' * `vec_unique()`: the unique values. Equivalent to [unique()].
#' * `vec_unique_loc()`: the locations of the unique values.
#' * `vec_unique_count()`: the number of unique values.
#'
#' @inherit vec_duplicate sections
#' @param x A vector (including a data frame).
#' @return
#' * `vec_unique()`: a vector the same type as `x` containing only unique
#' values.
#' * `vec_unique_loc()`: an integer vector, giving locations of unique values.
#' * `vec_unique_count()`: an integer vector of length 1, giving the
#' number of unique values.
#' @seealso [vec_duplicate] for functions that work with the dual of
#' unique values: duplicated values.
#' @export
#' @examples
#' x <- rpois(100, 8)
#' vec_unique(x)
#' vec_unique_loc(x)
#' vec_unique_count(x)
#'
#' # `vec_unique()` returns values in the order that encounters them
#' # use sort = "location" to match to the result of `vec_count()`
#' head(vec_unique(x))
#' head(vec_count(x, sort = "location"))
#'
#' # Normally missing values are not considered to be equal
#' NA == NA
#'
#' # But they are for the purposes of considering uniqueness
#' vec_unique(c(NA, NA, NA, NA, 1, 2, 1))
vec_unique <- function(x) {
vec_slice(x, vec_unique_loc(x))
}
#' @rdname vec_unique
#' @export
vec_unique_loc <- function(x) {
.Call(vctrs_unique_loc, x)
}
#' @rdname vec_unique
#' @export
vec_unique_count <- function(x) {
.Call(vctrs_n_distinct, x)
}
# Matching ----------------------------------------------------------------
#' Find matching observations across vectors
#'
#' `vec_in()` returns a logical vector based on whether `needle` is found in
#' haystack. `vec_match()` returns an integer vector giving location of
#' `needle` in `haystack`, or `NA` if it's not found.
#'
#' `vec_in()` is equivalent to [%in%]; `vec_match()` is equivalent to `match()`.
#'
#' @inherit vec_duplicate sections
#' @param needles,haystack Vector of `needles` to search for in vector haystack.
#' `haystack` should usually be unique; if not `vec_match()` will only
#' return the location of the first match.
#'
#' `needles` and `haystack` are coerced to the same type prior to
#' comparison.
#' @return A vector the same length as `needles`. `vec_in()` returns a
#' logical vector; `vec_match()` returns an integer vector.
#' @export
#' @examples
#' hadley <- strsplit("hadley", "")[[1]]
#' vec_match(hadley, letters)
#'
#' vowels <- c("a", "e", "i", "o", "u")
#' vec_match(hadley, vowels)
#' vec_in(hadley, vowels)
#'
#' # Only the first index of duplicates is returned
#' vec_match(c("a", "b"), c("a", "b", "a", "b"))
vec_match <- function(needles, haystack) {
.Call(vctrs_match, needles, haystack)
}
#' @export
#' @rdname vec_match
vec_in <- function(needles, haystack) {
.Call(vctrs_in, needles, haystack)
}
|
/truco.R | no_license | josebordon94/truco-cardgame-simulator | R | false | false | 9,527 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pour.R
\name{pour}
\alias{pour}
\title{Pouring a Drink}
\usage{
pour(drink, area_pcts)
}
\arguments{
\item{drink}{= a dataframe containing two columns, drink and density, maximumn rows = 4}
\item{area_pct}{a vector of fractional areas, must sum to 1, maximum length = 4}
}
\value{
a plot of the layered beverage
}
\description{
Plots layered drinks based on specific gravity
}
\examples{
drinks <- data.frame(drink = c("soda", "cranberry", "milk"),
density = c(1, 2, 3),
stringsAsFactors = FALSE)
area_pcts <- c(0.25, 0.25, 0.5)
pour(drinks, area_pcts = area_pcts)
}
\author{
Greg Pilgrim \email{gpilgrim2670@gmail.com}
}
| /man/pour.Rd | permissive | gpilgrim2670/mixed | R | false | true | 745 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pour.R
\name{pour}
\alias{pour}
\title{Pouring a Drink}
\usage{
pour(drink, area_pcts)
}
\arguments{
\item{drink}{= a dataframe containing two columns, drink and density, maximumn rows = 4}
\item{area_pct}{a vector of fractional areas, must sum to 1, maximum length = 4}
}
\value{
a plot of the layered beverage
}
\description{
Plots layered drinks based on specific gravity
}
\examples{
drinks <- data.frame(drink = c("soda", "cranberry", "milk"),
density = c(1, 2, 3),
stringsAsFactors = FALSE)
area_pcts <- c(0.25, 0.25, 0.5)
pour(drinks, area_pcts = area_pcts)
}
\author{
Greg Pilgrim \email{gpilgrim2670@gmail.com}
}
|
/MacOSX10.4u.sdk/System/Library/Frameworks/ApplicationServices.framework/Versions/A/Frameworks/FindByContent.framework/Versions/A/Headers/FindByContent.r | no_license | alexey-lysiuk/macos-sdk | R | false | false | 576 | r | ||
# Build basic ngram models for exploratory analysis
load.project()
# Too slow
#cache("first_word_tree", CODE={
#
# tree <- list()
#
# for (word in words_to_keep) {
# next_words <- bi_freq[grep(paste0("^", word, "\\b"), bi_freq)]
# tree[[word]] <- next_words
#
# }
#
#
#})
# Get a list of all bigrams begining with happy
bigram_happy <- bi_freq[grep("^happy\\b", bifreq)]
tree <- list()
for (b in bigram_happy) {
trigram_happy <- tri_freq[grep(paste0("^(", b, ")\\b")), tri_freq)]
tree[[b]]<-prob_nextword(tri_freq, )
}
| /capstone/src/exploratory.R | no_license | connectedblue/capstone | R | false | false | 647 | r | # Build basic ngram models for exploratory analysis
load.project()
# Too slow
#cache("first_word_tree", CODE={
#
# tree <- list()
#
# for (word in words_to_keep) {
# next_words <- bi_freq[grep(paste0("^", word, "\\b"), bi_freq)]
# tree[[word]] <- next_words
#
# }
#
#
#})
# Get a list of all bigrams begining with happy
bigram_happy <- bi_freq[grep("^happy\\b", bifreq)]
tree <- list()
for (b in bigram_happy) {
trigram_happy <- tri_freq[grep(paste0("^(", b, ")\\b")), tri_freq)]
tree[[b]]<-prob_nextword(tri_freq, )
}
|
rm(list = ls())
library(Daniel)
library(dplyr)
library(nnet)
CalcCImultinom <- function(fit)
{
s <- summary(fit)
coef <- s$coefficients
ses <- s$standard.errors
ci.1 <- coef[1,2] + c(-1, 1)*1.96*ses[1, 2]
ci.2 <- coef[2,2] + c(-1, 1)*1.96*ses[2, 2]
return(rbind(ci.1,ci.2))
}
#key
# A, B,C,D,E,F,G - betaE[2] = 1, 1.25, 1.5, 1.75, 2, 2.25, 2.5
# A,B,C, D, E,F - betaU = 2,3,4,5,6,7
patt <- "GC"
beta0 <- -5
betaE <- log(2.5)
betaU <- log(4)
sigmaU <- 1
n.sample <- 50000
n.sim <- 1000
AllY <- vector(length = n.sim)
ace.diff1 <- ace.or1 <- vector(length = n.sim)
ci <- matrix(nr = n.sim, nc = 2)
for (j in 1:n.sim)
{
CatIndex(j)
# Simulate genetic score
U <- rnorm(n.sample, 0, sd = sigmaU)
#### Calcualte probabilites for each subtype with and without the exposure ####
E0 <- exp(beta0[1] + betaU*U)
E1 <- exp(beta0[1] + betaE + betaU*U)
prE0 <- E0/(1 + E0)
prE1 <- E1/(1 + E1)
# Simulate subtypes #
Yctrl <- Ytrt <- vector(length = n.sample)
X <- rbinom(n = n.sample, 1, 0.5)
Yctrl <- rbinom(n.sample, 1, prE0)
Ytrt <- rbinom(n.sample, 1, prE1)
Y <- (1-X)*Yctrl + X*Ytrt
AllY[j] <- sum(Y)
ace.diff1[j] <- mean((Ytrt[X==1]) - mean(Yctrl[X==0]))
fit <- glm(Y~ X, family = "binomial")
ace.or1[j] <- exp(fit$coefficients)[2]
ci[j, ] <- confint(fit)[2,]
}
save.image(paste0("CMPEn50krareSingle",patt,".RData"))
| /Simulations/Scripts/R/Rare/Single/CMPEn50KrareSingleGC.R | no_license | yadevi/CausalMPE | R | false | false | 1,341 | r | rm(list = ls())
library(Daniel)
library(dplyr)
library(nnet)
CalcCImultinom <- function(fit)
{
s <- summary(fit)
coef <- s$coefficients
ses <- s$standard.errors
ci.1 <- coef[1,2] + c(-1, 1)*1.96*ses[1, 2]
ci.2 <- coef[2,2] + c(-1, 1)*1.96*ses[2, 2]
return(rbind(ci.1,ci.2))
}
#key
# A, B,C,D,E,F,G - betaE[2] = 1, 1.25, 1.5, 1.75, 2, 2.25, 2.5
# A,B,C, D, E,F - betaU = 2,3,4,5,6,7
patt <- "GC"
beta0 <- -5
betaE <- log(2.5)
betaU <- log(4)
sigmaU <- 1
n.sample <- 50000
n.sim <- 1000
AllY <- vector(length = n.sim)
ace.diff1 <- ace.or1 <- vector(length = n.sim)
ci <- matrix(nr = n.sim, nc = 2)
for (j in 1:n.sim)
{
CatIndex(j)
# Simulate genetic score
U <- rnorm(n.sample, 0, sd = sigmaU)
#### Calcualte probabilites for each subtype with and without the exposure ####
E0 <- exp(beta0[1] + betaU*U)
E1 <- exp(beta0[1] + betaE + betaU*U)
prE0 <- E0/(1 + E0)
prE1 <- E1/(1 + E1)
# Simulate subtypes #
Yctrl <- Ytrt <- vector(length = n.sample)
X <- rbinom(n = n.sample, 1, 0.5)
Yctrl <- rbinom(n.sample, 1, prE0)
Ytrt <- rbinom(n.sample, 1, prE1)
Y <- (1-X)*Yctrl + X*Ytrt
AllY[j] <- sum(Y)
ace.diff1[j] <- mean((Ytrt[X==1]) - mean(Yctrl[X==0]))
fit <- glm(Y~ X, family = "binomial")
ace.or1[j] <- exp(fit$coefficients)[2]
ci[j, ] <- confint(fit)[2,]
}
save.image(paste0("CMPEn50krareSingle",patt,".RData"))
|
##
## rookutils.R
##
## 3/3/15
##
# Presently unused attempt at a termination function so as to streamline code for warnings
terminate<-function(response,warning){
jsonWarning <- toJSON(list(warning=warning))
print(jsonWarning)
response$write(jsonWarning)
response$finish()
stop()
}
# VJD: this reads a tab-delimited file. in the future, this should read and load the file based on the type of file defined at ingest. this way R can make use of important metadata such as whether a variable is a factor.
readData <- function(sessionid,logfile){
tryCatch({
mydata<-NULL
mydata<-read.delim(file=paste("/tmp/data_",sessionid,".tab",sep=""))
}, error=function(err){
warning <<- TRUE ## assign up the scope bc inside function
result <<- list(warning=paste("R data loading error: ", err))
}) # if data is not readable, returns a warning and an error
return(mydata) #note mydata might be NULL
}
getDataFromDataverse<-function(hostname, fileid){
path<-paste("http://",hostname,"/api/access/datafile/",fileid,sep="")
mydata<-tryCatch(expr=read.delim(file=path), error=function(e) NULL) # if data is not readable, NULL
return(mydata)
}
# quick way to reassign factor levels in the data, necessary for setx() after data have been subset
refactor <- function(data) {
for(i in 1:ncol(data)) {
if(is.factor(data[,i])) {
data[,i] <- factor(data[,i])
}
}
return(data)
}
# This utility function was necessary when using rjson, rather than jsonlite, to transform list-of-lists to matrix
#
#edgeReformat<-function(edges){
# k<-length(edges)
# new<-matrix(NA,nrow=k,ncol=2)
# for(i in 1:k){
# new[i,1]<-edges[[i]][1]
# new[i,2]<-edges[[i]][2]
# }
# return(new)
#}
buildSetx <- function(setx, varnames) {
outeq <- NULL
alteq <- NULL
call <- NULL
j<-1
k<-1
for(i in 1:length(varnames)){
t <- setx[i,] # under rjson was: unlist(setx[i])
if(t[1]=="" & t[2]=="") {next}
if(t[1]!="") {
outeq[j] <- paste(varnames[i],"=as.numeric(",t[1],")")
j<-j+1
}
if(t[2]!="") {
alteq[k] <- paste(varnames[i],"=as.numeric(",t[2],")")
k<-k+1
}
}
if(!is.null(outeq)) { # x has been set by user
outeq <- paste(outeq, collapse=",")
call[1] <- paste("x.out <- setx(z.out,",outeq,")")
} else { # x has not been set by user, use defaults
call[1] <- paste("x.out <- setx(z.out)")
}
if(!is.null(alteq)) { # x1 has been set by user
alteq <- paste(alteq, collapse=",")
call[2] <- paste("x.alt <- setx(z.out,",alteq,")")
} else if(!is.null(outeq)) { # x1 has not been set by user, but x has been set, so use defaults
call[2] <- paste("x.alt <- setx(z.out)")
}
# else user has not set any covariates, so x is default (above) and x1 is undefined
return(call)
}
buildFormula<-function(dv, linkagelist, varnames=NULL, nomvars){
if(is.null(varnames)){
varnames<-unique(c(dv,linkagelist))
}
print(varnames)
k<-length(varnames)
relmat<-matrix(0,nrow=k,ncol=k)
# define relationship matrix
# relmat[i,j]==1 => "i caused by j"
print(linkagelist)
print(nrow(linkagelist))
for(i in 1:nrow(linkagelist)){
row.position<-min( (1:k)[varnames %in% linkagelist[i,2] ] ) # min() solves ties with shared variable names
col.position<-min( (1:k)[varnames %in% linkagelist[i,1] ] )
relmat[row.position,col.position]<-1
}
print(relmat)
# store matrix contains all backwards linked variables
store<-relmat.n<-relmat
continue<-TRUE
while(continue){
relmat.n<-relmat.n %*% relmat
relmat.n[store==1]<-0 # stops following previously traced path
relmat.n[relmat.n>1]<-1 # converts to boolean indicator matrix
store<-store + relmat.n # trace all long run paths
store[store>1]<-1 # converts to boolean indicator matrix
continue<-(sum(relmat.n)>0) # no new paths to trace
}
j<-min( (1:k)[varnames %in% dv ] )
rhsIndicator<-store[j,] # these are the variables that have a path to dv
rhsIndicator[j]<-0 # do not want dv as its own rhs variable
flag<-rhsIndicator==1
rhs.names<-varnames[flag]
rhs.names[which(rhs.names %in% nomvars)] <- paste("factor(", rhs.names[which(rhs.names %in% nomvars)], ")", sep="") # nominal variables are entered into the formula as factors
formula<-as.formula(paste(dv," ~ ", paste(rhs.names,collapse=" + ")))
print(formula)
return(formula)
}
pCall <- function(data,production,sessionid, types) {
pjson<-preprocess(testdata=data, types=types)
print("new preprocess metadata: ")
print(pjson)
if(production){
subsetfile <- paste("/var/www/html/custom/preprocess_dir/preprocessSubset_",sessionid,".txt",sep="")
write(pjson,file=subsetfile)
url <- paste("https://beta.dataverse.org/custom/preprocess_dir/preprocessSubset_",sessionid,".txt",sep="")
}else{
url <- paste("data/preprocessSubset_",sessionid,".txt",sep="")
write(pjson,file=paste("../",url, sep=""))
}
return(url)
}
## called by executeHistory(), subset.app, and zelig.app.
## this function parses everything$zsubset to a list of subset values. the names of the elements in the list are 1, 2, 3, etc, and corresponds to the indices in the zvars and plot arrays
parseSubset <- function(sub) {
if(class(sub)=="matrix") {
mysubset <- list()
t <- sub
for(i in 1:nrow(t)) {
mysubset[[i]]<-t[i,]
}
} else {
mysubset <- sub
}
return(mysubset)
}
## called by executeHistory, subset.app, and zelig.app.
## sub is a list of subset values, from parseSubset(). varnames and plot are vectors. if plot[i] is "bar", it subsets on all values in sub[[i]]. if plot[i] is "continuous", it subsets on the range specified by the two values in sub[[i]]
subsetData <- function(data, sub, varnames, plot){
fdata<-data # not sure if this is necessary, but just to be sure that the subsetData function doesn't overwrite global mydata
fdata$flag <- 0
skip <- ""
for(i in 1:length(varnames)){
t <- sub[[i]] # under rjson was: unlist(sub[i])
p <- plot[i]
if(t[1]=="" | length(t)==0) {next} #no subset region
else {
if(p=="continuous") {
myexpr <- paste("fdata$flag[which(fdata$\"",varnames[i],"\" < ",t[1]," | fdata$\"",varnames[i],"\" > ",t[2],")] <- 1", sep="")
print(myexpr)
print(colnames(fdata))
eval(parse(text=myexpr))
if(sum(fdata$flag)==nrow(fdata)) { # if this will remove all the data, skip this subset and warn the user
fdata$flag <- 0
skip <- c(skip, varnames[i]) ## eventually warn the user that skip[2:length(skip)] are variables that they have chosen to subset but have been skipped because if they were subsetted we would have no data left
}
else {
fdata <- fdata[which(fdata$flag==0),] # subsets are the overlap of all remaining selected regions.
}
} else if(p=="bar") {
myexpr <- paste("fdata$flag[which(as.character(fdata$\"",varnames[i],"\")%in% t)] <- 1", sep="")
eval(parse(text=myexpr))
if(sum(fdata$flag)==nrow(fdata)) {
fdata$flag <- 1
skip <- c(skip, varnames[i])
}
else {
fdata <- fdata[which(fdata$flag==1),] # notice we keep 1s, above we keep 0s
}
}
}
}
fdata$flag<-NULL
return(fdata)
}
# data is a data.frame of columns of data used for transformation
# func is a string of the form "log(_transvar0)" or "_transvar0^2"
transform <- function(data, func) {
x <- gsub("_transvar0", "data[,1]", func)
if(ncol(data)>1) {
for(i in 2:ncol(data)) {
sub1 <- paste("_transvar", i-1, sep="")
sub2 <- paste("data[,", i, "]")
x <- gsub(sub1, sub2, x)
}
}
x <- gsub("_plus_", "+", x)
x <- paste("data[,1] <- ", x)
print(x)
if(substr(func,1,3)=="log") {
if(any(data[,1]<0, na.rm=TRUE)) {
data[,1] <- data[,1] + -1*min(data[,1])
}
if(any(data[,1]==0, na.rm=TRUE)) {
data[,1] <- data[,1] + .0001
}
}
eval(parse(text=x))
return(data)
}
## called by executeHistory() and transform.app
parseTransform <- function(data, func, vars) {
call <- "no transformation"
t <- which(colnames(data) %in% vars)
tdata <- as.data.frame(data[,t])
colnames(tdata) <- colnames(data)[t]
tdata <- transform(data=tdata, func=func)
tdata <- as.data.frame(tdata[,1])
call <- gsub("_plus_", "+", func) # + operator disappears, probably a jsonlite parsing bug, so + operator is mapped to '_plus_' in the javascript, and remapped to + operator here
call <- gsub("_transvar0", vars[1], call)
if(length(vars)>1) {
for(i in 2:length(vars)) {
sub1 <- paste("_transvar", i-1, sep="")
sub2 <- vars[i]
call <- gsub(sub1, sub2, call)
}
}
# replace non-alphanumerics with '_' so that these variables may be used in R formulas.
call <- gsub("[[:punct:]]", "_", call)
call <- paste("t_", call, sep="")
colnames(tdata) <- call
return(tdata)
}
## called by zelig.app, subset.app, transform.app
# history is everything$callHistory, data is mydata, as initially read
# if empty, i.e. the first call from space i, history is an empty list
# else, history is a data.frame where each row contains the data to reconstruct the call
executeHistory <- function (history, data) {
n <- nrow(history)
if(is.null(n)) {return(data)}
print(n)
for(i in 1:n) {
if(history[i,"func"]=="transform") {
v <- history[i,"zvars"]
if(class(v)=="list") {v <- v[[1]]} # v must be a vector of variable names
f <- history[i,"transform"]
tdata <- parseTransform(data=data, func=f, vars=v)
data <- cbind(data, tdata)
} else if(history[i,"func"]=="subset") {
v <- history[i,"zvars"][[1]] # cell is a list so take the first element
p <- history[i,"zplot"][[1]]
sub <- parseSubset(history[i, "zsubset"][[1]])
data <- subsetData(data=data, sub=sub, varnames=v, plot=p)
}
}
return(data)
}
# Code mostly from Zelig's plots.R function plot.qi(). Eventually, Zelig will implement a more general solution where each plot is stored in the Zelig object.
zplots <- function(obj, path, mymodelcount, mysessionid, production){
writeplot <- function(exec, path, mymodelcount, mysessionid, qicount, color.x, color.x1, color.mixed, titles, production) {
qicount <<- qicount+1
qicount<-qicount+1
eval(parse(text=path))
eval(parse(text=exec))
dev.off()
if(production){
imageVector[[qicount]]<<-paste("https://beta.dataverse.org/custom/pic_dir/", mysessionid,"_",mymodelcount,qicount,".png", sep = "")
}else{
imageVector[[qicount]]<<-paste(R.server$full_url("pic_dir"), "/output",mymodelcount,qicount,".png", sep = "")
}
}
qicount<-0
imageVector<-list()
# Determine whether two "Expected Values" qi's exist
both.ev.exist <- (length(obj$sim.out$x$ev)>0) & (length(obj$sim.out$x1$ev)>0)
# Determine whether two "Predicted Values" qi's exist
both.pv.exist <- (length(obj$sim.out$x$pv)>0) & (length(obj$sim.out$x1$pv)>0)
color.x <- rgb(242, 122, 94, maxColorValue=255)
color.x1 <- rgb(100, 149, 237, maxColorValue=255)
# Interpolation of the above colors in rgb color space:
color.mixed <- rgb(t(round((col2rgb(color.x) + col2rgb(color.x1))/2)), maxColorValue=255)
titles <- obj$setx.labels
# Plot each simulation
if(length(obj$sim.out$x$pv)>0) {
execMe <- "Zelig::simulations.plot(obj$sim.out$x$pv[[1]], main = titles$pv, col = color.x, line.col = \"black\")"
writeplot(execMe, path, mymodelcount, mysessionid, qicount, color.x, color.x1, color.mixed, titles, production)
}
if(length(obj$sim.out$x1$pv)>0) {
execMe <- "Zelig::simulations.plot(obj$sim.out$x1$pv[[1]], main = titles$pv1, col = color.x1, line.col = \"black\")"
writeplot(execMe, path, mymodelcount, mysessionid, qicount, color.x, color.x1, color.mixed, titles, production)
}
if(length(obj$sim.out$x$ev)>0) {
execMe <- "Zelig::simulations.plot(obj$sim.out$x$ev[[1]], main = titles$ev, col = color.x, line.col = \"black\")"
writeplot(execMe, path, mymodelcount, mysessionid, qicount, color.x, color.x1, color.mixed, titles, production)
}
if(length(obj$sim.out$x1$ev)>0) {
execMe <- "Zelig::simulations.plot(obj$sim.out$x1$ev[[1]], main = titles$ev1, col = color.x1, line.col = \"black\")"
writeplot(execMe, path, mymodelcount, mysessionid, qicount, color.x, color.x1, color.mixed, titles, production)
}
if(length(obj$sim.out$x1$fd)>0) {
execMe <- "Zelig::simulations.plot(obj$sim.out$x1$fd[[1]], main = titles$fd, col = color.mixed, line.col = \"black\")"
writeplot(execMe, path, mymodelcount, mysessionid, qicount, color.x, color.x1, color.mixed, titles, production)
}
if(both.pv.exist) {
execMe <- "Zelig::simulations.plot(y=obj$sim.out$x$pv[[1]], y1=obj$sim.out$x1$pv[[1]], main = \"Comparison of Y|X and Y|X1\", col = paste(c(color.x, color.x1), \"80\", sep=\"\"), line.col = \"black\")"
writeplot(execMe, path, mymodelcount, mysessionid, qicount, color.x, color.x1, color.mixed, titles, production)
}
if(both.ev.exist) {
execMe <- "Zelig::simulations.plot(y=obj$sim.out$x$ev[[1]], y1=obj$sim.out$x1$ev[[1]], main = \"Comparison of E(Y|X) and E(Y|X1)\", col = paste(c(color.x, color.x1), \"80\", sep=\"\"), line.col = \"black\")"
writeplot(execMe, path, mymodelcount, mysessionid, qicount, color.x, color.x1, color.mixed, titles, production)
}
return(imageVector)
}
logFile <- function(sessionid, production){
if(production){
outfile<-paste("/var/www/html/custom/log_dir/log_",sessionid,".txt",sep="")
} else {
outfile<-paste("log_",sessionid,".txt",sep="")
}
return(outfile)
}
logSessionInfo <- function(logfile, sessionid, cite){
write(paste("\nData file citation from Dataverse:\n\n",cite,"\n\nR session information:",sep=""),logfile,append=TRUE)
sink(file = logfile, append=TRUE, type = "output")
print(sessionInfo())
sink()
write(paste("\n\nReplication code for TwoRavens session ",sessionid,". Note that unless your session information is identical to that described above, it is not guaranteed the results will be identical. Please download rookutils.R from https://github.com/IQSS/TwoRavens/tree/master/rook and ensure that you have rookutils.R in your working directory.\n\nlibrary(Rook)\nlibrary(rjson)\nlibrary(jsonlite)\nlibrary(devtools)\ninstall_github(\"IQSS/Zelig\")\nlibrary(Zelig)\nsource(rookutils.R)\n\n",sep=""),logfile,append=TRUE)
}
| /budget_tool/rook/rookutils.R | permissive | fbchow/budget_tool | R | false | false | 15,958 | r | ##
## rookutils.R
##
## 3/3/15
##
# Presently unused attempt at a termination function so as to streamline code for warnings
terminate<-function(response,warning){
jsonWarning <- toJSON(list(warning=warning))
print(jsonWarning)
response$write(jsonWarning)
response$finish()
stop()
}
# VJD: this reads a tab-delimited file. in the future, this should read and load the file based on the type of file defined at ingest. this way R can make use of important metadata such as whether a variable is a factor.
readData <- function(sessionid,logfile){
tryCatch({
mydata<-NULL
mydata<-read.delim(file=paste("/tmp/data_",sessionid,".tab",sep=""))
}, error=function(err){
warning <<- TRUE ## assign up the scope bc inside function
result <<- list(warning=paste("R data loading error: ", err))
}) # if data is not readable, returns a warning and an error
return(mydata) #note mydata might be NULL
}
getDataFromDataverse<-function(hostname, fileid){
path<-paste("http://",hostname,"/api/access/datafile/",fileid,sep="")
mydata<-tryCatch(expr=read.delim(file=path), error=function(e) NULL) # if data is not readable, NULL
return(mydata)
}
# quick way to reassign factor levels in the data, necessary for setx() after data have been subset
refactor <- function(data) {
for(i in 1:ncol(data)) {
if(is.factor(data[,i])) {
data[,i] <- factor(data[,i])
}
}
return(data)
}
# This utility function was necessary when using rjson, rather than jsonlite, to transform list-of-lists to matrix
#
#edgeReformat<-function(edges){
# k<-length(edges)
# new<-matrix(NA,nrow=k,ncol=2)
# for(i in 1:k){
# new[i,1]<-edges[[i]][1]
# new[i,2]<-edges[[i]][2]
# }
# return(new)
#}
buildSetx <- function(setx, varnames) {
outeq <- NULL
alteq <- NULL
call <- NULL
j<-1
k<-1
for(i in 1:length(varnames)){
t <- setx[i,] # under rjson was: unlist(setx[i])
if(t[1]=="" & t[2]=="") {next}
if(t[1]!="") {
outeq[j] <- paste(varnames[i],"=as.numeric(",t[1],")")
j<-j+1
}
if(t[2]!="") {
alteq[k] <- paste(varnames[i],"=as.numeric(",t[2],")")
k<-k+1
}
}
if(!is.null(outeq)) { # x has been set by user
outeq <- paste(outeq, collapse=",")
call[1] <- paste("x.out <- setx(z.out,",outeq,")")
} else { # x has not been set by user, use defaults
call[1] <- paste("x.out <- setx(z.out)")
}
if(!is.null(alteq)) { # x1 has been set by user
alteq <- paste(alteq, collapse=",")
call[2] <- paste("x.alt <- setx(z.out,",alteq,")")
} else if(!is.null(outeq)) { # x1 has not been set by user, but x has been set, so use defaults
call[2] <- paste("x.alt <- setx(z.out)")
}
# else user has not set any covariates, so x is default (above) and x1 is undefined
return(call)
}
buildFormula<-function(dv, linkagelist, varnames=NULL, nomvars){
if(is.null(varnames)){
varnames<-unique(c(dv,linkagelist))
}
print(varnames)
k<-length(varnames)
relmat<-matrix(0,nrow=k,ncol=k)
# define relationship matrix
# relmat[i,j]==1 => "i caused by j"
print(linkagelist)
print(nrow(linkagelist))
for(i in 1:nrow(linkagelist)){
row.position<-min( (1:k)[varnames %in% linkagelist[i,2] ] ) # min() solves ties with shared variable names
col.position<-min( (1:k)[varnames %in% linkagelist[i,1] ] )
relmat[row.position,col.position]<-1
}
print(relmat)
# store matrix contains all backwards linked variables
store<-relmat.n<-relmat
continue<-TRUE
while(continue){
relmat.n<-relmat.n %*% relmat
relmat.n[store==1]<-0 # stops following previously traced path
relmat.n[relmat.n>1]<-1 # converts to boolean indicator matrix
store<-store + relmat.n # trace all long run paths
store[store>1]<-1 # converts to boolean indicator matrix
continue<-(sum(relmat.n)>0) # no new paths to trace
}
j<-min( (1:k)[varnames %in% dv ] )
rhsIndicator<-store[j,] # these are the variables that have a path to dv
rhsIndicator[j]<-0 # do not want dv as its own rhs variable
flag<-rhsIndicator==1
rhs.names<-varnames[flag]
rhs.names[which(rhs.names %in% nomvars)] <- paste("factor(", rhs.names[which(rhs.names %in% nomvars)], ")", sep="") # nominal variables are entered into the formula as factors
formula<-as.formula(paste(dv," ~ ", paste(rhs.names,collapse=" + ")))
print(formula)
return(formula)
}
pCall <- function(data,production,sessionid, types) {
pjson<-preprocess(testdata=data, types=types)
print("new preprocess metadata: ")
print(pjson)
if(production){
subsetfile <- paste("/var/www/html/custom/preprocess_dir/preprocessSubset_",sessionid,".txt",sep="")
write(pjson,file=subsetfile)
url <- paste("https://beta.dataverse.org/custom/preprocess_dir/preprocessSubset_",sessionid,".txt",sep="")
}else{
url <- paste("data/preprocessSubset_",sessionid,".txt",sep="")
write(pjson,file=paste("../",url, sep=""))
}
return(url)
}
## called by executeHistory(), subset.app, and zelig.app.
## this function parses everything$zsubset to a list of subset values. the names of the elements in the list are 1, 2, 3, etc, and corresponds to the indices in the zvars and plot arrays
parseSubset <- function(sub) {
if(class(sub)=="matrix") {
mysubset <- list()
t <- sub
for(i in 1:nrow(t)) {
mysubset[[i]]<-t[i,]
}
} else {
mysubset <- sub
}
return(mysubset)
}
## called by executeHistory, subset.app, and zelig.app.
## sub is a list of subset values, from parseSubset(). varnames and plot are vectors. if plot[i] is "bar", it subsets on all values in sub[[i]]. if plot[i] is "continuous", it subsets on the range specified by the two values in sub[[i]]
subsetData <- function(data, sub, varnames, plot){
fdata<-data # not sure if this is necessary, but just to be sure that the subsetData function doesn't overwrite global mydata
fdata$flag <- 0
skip <- ""
for(i in 1:length(varnames)){
t <- sub[[i]] # under rjson was: unlist(sub[i])
p <- plot[i]
if(t[1]=="" | length(t)==0) {next} #no subset region
else {
if(p=="continuous") {
myexpr <- paste("fdata$flag[which(fdata$\"",varnames[i],"\" < ",t[1]," | fdata$\"",varnames[i],"\" > ",t[2],")] <- 1", sep="")
print(myexpr)
print(colnames(fdata))
eval(parse(text=myexpr))
if(sum(fdata$flag)==nrow(fdata)) { # if this will remove all the data, skip this subset and warn the user
fdata$flag <- 0
skip <- c(skip, varnames[i]) ## eventually warn the user that skip[2:length(skip)] are variables that they have chosen to subset but have been skipped because if they were subsetted we would have no data left
}
else {
fdata <- fdata[which(fdata$flag==0),] # subsets are the overlap of all remaining selected regions.
}
} else if(p=="bar") {
myexpr <- paste("fdata$flag[which(as.character(fdata$\"",varnames[i],"\")%in% t)] <- 1", sep="")
eval(parse(text=myexpr))
if(sum(fdata$flag)==nrow(fdata)) {
fdata$flag <- 1
skip <- c(skip, varnames[i])
}
else {
fdata <- fdata[which(fdata$flag==1),] # notice we keep 1s, above we keep 0s
}
}
}
}
fdata$flag<-NULL
return(fdata)
}
# data is a data.frame of columns of data used for transformation
# func is a string of the form "log(_transvar0)" or "_transvar0^2"
transform <- function(data, func) {
x <- gsub("_transvar0", "data[,1]", func)
if(ncol(data)>1) {
for(i in 2:ncol(data)) {
sub1 <- paste("_transvar", i-1, sep="")
sub2 <- paste("data[,", i, "]")
x <- gsub(sub1, sub2, x)
}
}
x <- gsub("_plus_", "+", x)
x <- paste("data[,1] <- ", x)
print(x)
if(substr(func,1,3)=="log") {
if(any(data[,1]<0, na.rm=TRUE)) {
data[,1] <- data[,1] + -1*min(data[,1])
}
if(any(data[,1]==0, na.rm=TRUE)) {
data[,1] <- data[,1] + .0001
}
}
eval(parse(text=x))
return(data)
}
## called by executeHistory() and transform.app
parseTransform <- function(data, func, vars) {
call <- "no transformation"
t <- which(colnames(data) %in% vars)
tdata <- as.data.frame(data[,t])
colnames(tdata) <- colnames(data)[t]
tdata <- transform(data=tdata, func=func)
tdata <- as.data.frame(tdata[,1])
call <- gsub("_plus_", "+", func) # + operator disappears, probably a jsonlite parsing bug, so + operator is mapped to '_plus_' in the javascript, and remapped to + operator here
call <- gsub("_transvar0", vars[1], call)
if(length(vars)>1) {
for(i in 2:length(vars)) {
sub1 <- paste("_transvar", i-1, sep="")
sub2 <- vars[i]
call <- gsub(sub1, sub2, call)
}
}
# replace non-alphanumerics with '_' so that these variables may be used in R formulas.
call <- gsub("[[:punct:]]", "_", call)
call <- paste("t_", call, sep="")
colnames(tdata) <- call
return(tdata)
}
## called by zelig.app, subset.app, transform.app
# history is everything$callHistory, data is mydata, as initially read
# if empty, i.e. the first call from space i, history is an empty list
# else, history is a data.frame where each row contains the data to reconstruct the call
executeHistory <- function (history, data) {
n <- nrow(history)
if(is.null(n)) {return(data)}
print(n)
for(i in 1:n) {
if(history[i,"func"]=="transform") {
v <- history[i,"zvars"]
if(class(v)=="list") {v <- v[[1]]} # v must be a vector of variable names
f <- history[i,"transform"]
tdata <- parseTransform(data=data, func=f, vars=v)
data <- cbind(data, tdata)
} else if(history[i,"func"]=="subset") {
v <- history[i,"zvars"][[1]] # cell is a list so take the first element
p <- history[i,"zplot"][[1]]
sub <- parseSubset(history[i, "zsubset"][[1]])
data <- subsetData(data=data, sub=sub, varnames=v, plot=p)
}
}
return(data)
}
# Code mostly from Zelig's plots.R function plot.qi(). Eventually, Zelig will implement a more general solution where each plot is stored in the Zelig object.
zplots <- function(obj, path, mymodelcount, mysessionid, production){
writeplot <- function(exec, path, mymodelcount, mysessionid, qicount, color.x, color.x1, color.mixed, titles, production) {
qicount <<- qicount+1
qicount<-qicount+1
eval(parse(text=path))
eval(parse(text=exec))
dev.off()
if(production){
imageVector[[qicount]]<<-paste("https://beta.dataverse.org/custom/pic_dir/", mysessionid,"_",mymodelcount,qicount,".png", sep = "")
}else{
imageVector[[qicount]]<<-paste(R.server$full_url("pic_dir"), "/output",mymodelcount,qicount,".png", sep = "")
}
}
qicount<-0
imageVector<-list()
# Determine whether two "Expected Values" qi's exist
both.ev.exist <- (length(obj$sim.out$x$ev)>0) & (length(obj$sim.out$x1$ev)>0)
# Determine whether two "Predicted Values" qi's exist
both.pv.exist <- (length(obj$sim.out$x$pv)>0) & (length(obj$sim.out$x1$pv)>0)
color.x <- rgb(242, 122, 94, maxColorValue=255)
color.x1 <- rgb(100, 149, 237, maxColorValue=255)
# Interpolation of the above colors in rgb color space:
color.mixed <- rgb(t(round((col2rgb(color.x) + col2rgb(color.x1))/2)), maxColorValue=255)
titles <- obj$setx.labels
# Plot each simulation
if(length(obj$sim.out$x$pv)>0) {
execMe <- "Zelig::simulations.plot(obj$sim.out$x$pv[[1]], main = titles$pv, col = color.x, line.col = \"black\")"
writeplot(execMe, path, mymodelcount, mysessionid, qicount, color.x, color.x1, color.mixed, titles, production)
}
if(length(obj$sim.out$x1$pv)>0) {
execMe <- "Zelig::simulations.plot(obj$sim.out$x1$pv[[1]], main = titles$pv1, col = color.x1, line.col = \"black\")"
writeplot(execMe, path, mymodelcount, mysessionid, qicount, color.x, color.x1, color.mixed, titles, production)
}
if(length(obj$sim.out$x$ev)>0) {
execMe <- "Zelig::simulations.plot(obj$sim.out$x$ev[[1]], main = titles$ev, col = color.x, line.col = \"black\")"
writeplot(execMe, path, mymodelcount, mysessionid, qicount, color.x, color.x1, color.mixed, titles, production)
}
if(length(obj$sim.out$x1$ev)>0) {
execMe <- "Zelig::simulations.plot(obj$sim.out$x1$ev[[1]], main = titles$ev1, col = color.x1, line.col = \"black\")"
writeplot(execMe, path, mymodelcount, mysessionid, qicount, color.x, color.x1, color.mixed, titles, production)
}
if(length(obj$sim.out$x1$fd)>0) {
execMe <- "Zelig::simulations.plot(obj$sim.out$x1$fd[[1]], main = titles$fd, col = color.mixed, line.col = \"black\")"
writeplot(execMe, path, mymodelcount, mysessionid, qicount, color.x, color.x1, color.mixed, titles, production)
}
if(both.pv.exist) {
execMe <- "Zelig::simulations.plot(y=obj$sim.out$x$pv[[1]], y1=obj$sim.out$x1$pv[[1]], main = \"Comparison of Y|X and Y|X1\", col = paste(c(color.x, color.x1), \"80\", sep=\"\"), line.col = \"black\")"
writeplot(execMe, path, mymodelcount, mysessionid, qicount, color.x, color.x1, color.mixed, titles, production)
}
if(both.ev.exist) {
execMe <- "Zelig::simulations.plot(y=obj$sim.out$x$ev[[1]], y1=obj$sim.out$x1$ev[[1]], main = \"Comparison of E(Y|X) and E(Y|X1)\", col = paste(c(color.x, color.x1), \"80\", sep=\"\"), line.col = \"black\")"
writeplot(execMe, path, mymodelcount, mysessionid, qicount, color.x, color.x1, color.mixed, titles, production)
}
return(imageVector)
}
logFile <- function(sessionid, production){
if(production){
outfile<-paste("/var/www/html/custom/log_dir/log_",sessionid,".txt",sep="")
} else {
outfile<-paste("log_",sessionid,".txt",sep="")
}
return(outfile)
}
logSessionInfo <- function(logfile, sessionid, cite){
write(paste("\nData file citation from Dataverse:\n\n",cite,"\n\nR session information:",sep=""),logfile,append=TRUE)
sink(file = logfile, append=TRUE, type = "output")
print(sessionInfo())
sink()
write(paste("\n\nReplication code for TwoRavens session ",sessionid,". Note that unless your session information is identical to that described above, it is not guaranteed the results will be identical. Please download rookutils.R from https://github.com/IQSS/TwoRavens/tree/master/rook and ensure that you have rookutils.R in your working directory.\n\nlibrary(Rook)\nlibrary(rjson)\nlibrary(jsonlite)\nlibrary(devtools)\ninstall_github(\"IQSS/Zelig\")\nlibrary(Zelig)\nsource(rookutils.R)\n\n",sep=""),logfile,append=TRUE)
}
|
#' @title rsComposite
#'
#' @description Compositing of remote sensing data based on GPS tracking dates.
#' @param img Object of class \emph{RasterSpack} or \emph{RasterBrick}.
#' @param rd Object of class \emph{Date} with \emph{img} observation dates.
#' @param ot Object of class \emph{Date} with reference dates.
#' @param cm Number of deviations from the target date. Default is 1.
#' @param type One of "norm" or "pheno".
#' @param d.buffer Search buffer (expressed in days).
#' @import raster rgdal
#' @importFrom stats lm
#' @seealso \code{\link{imgInt}} \code{\link{dataQuery}}
#' @return A \emph{list}.
#' @details {The function uses a multi-layer raster object to build a composite.
#' It looks at a ginve set of dates (e.g. GPS tracking dates) and estimates a
#' reference date to build the composite for defined by the median of \emph{ot}.
#' The median is then used to estimate Median Absolute Deviation (MAD) which
#' specifies the size of the buffer set aroung the target date within which
#' bands will be considered. Here, \emph{cm} is used as a multiplier to enlarge
#' the temporal buffer. Alternatively, a user define temporal buffer is allowed
#' by using the keyword \emph{d.buffer}. If \emph{ot} countains only one element,
#' the function will use it as a reference date. In this case, if \emph{d.buffer}
#' is NULL the function will set it to 30 by default. The way how the function handles
#' temporal information depends on the \emph{type} keyword. If set to \emph{norm},
#' the function will search for the nearest possible dates within the temporal
#' buffer. However, if \emph{pheno} is set, then the day of the year will be given
#' priority. Thus, if multi-year raster data is provided, older data with a DOY
#' closer to the target that will be used when possible. The output provides:
#' #' \itemize{
#' \item{\emph{value} - composite of target images}
#' \item{\emph{dates} - per pixel date code}
#' \item{\emph{count} - pixel count of \emph{dates}}
#' \item{\emph{na.count} - count of NA values}
#' \item{\emph{target} - target date}
#' \item{\emph{mad} - temporal buffer}
#' }}
#' @examples \dontrun{
#'
#' require(raster)
#'
#' # read raster data
#' file <- list.files(system.file('extdata', '', package="rsMove"), 'tc.*tif', full.names=TRUE)
#' rsStk <- stack(file)
#' rsStk <- stack(rsStk, rsStk, rsStk) # dummy files for the example
#'
#' # raster dates
#' rd = seq.Date(as.Date("2013-01-01"), as.Date("2013-12-31"), 45)
#'
#' # target date
#' ot = as.Date("2013-06-01")
#'
#' # build composite
#' r.comp <- rsComposite(rsStk, rd, ot, d.buffer=90)
#'
#' }
#' @export
#-------------------------------------------------------------------------------------------------------------------------------#
rsComposite <- function(img, rd, ot, cm=1, type='norm', d.buffer=NULL) {
#-------------------------------------------------------------------------------------------------------------------------------#
# 1. check variables
#-------------------------------------------------------------------------------------------------------------------------------#
# raster
if (!exists('img')) {stop('error: "img" is missing')}
if (!class(img)[1]%in%c('RasterStack', 'RasterBrick')) {stop('error: "img" is not of a valid class')}
# raster dates
if (!exists('rd')) {stop('error: "rd" is missing')}
if (!class(rd)[1]%in%c('Date')) {stop('error: "rd" is nof of a valid class')}
if (length(rd)!=nlayers(img)) {stop('errorr: "img" and "rd" have different lengths')}
# reference dates
if (!exists('ot')) {stop('"ot" is missing')}
if (!class(ot)[1]%in%c('Date')) {stop('error: "ot" is nof of a valid class')}
# auxiliary variables
if (!is.numeric(cm)) {stop('"cm" is not numeric')}
if (!is.null(d.buffer)) {if (!is.numeric(d.buffer)) {stop('"d.buffer" is not numeric')}}
if (!type%in%c('norm', 'pheno')) {stop('"type" is not a valid keyword')}
#-------------------------------------------------------------------------------------------------------------------------------#
# 2. handle time information
#-------------------------------------------------------------------------------------------------------------------------------#
# determine date format
if (type=='pheno') {
bd <- as.Date(paste0(as.character(format(ot,'%Y')), '-01-01'))
ot0 <- as.numeric((ot-bd) + 1)
bd <- as.Date(paste0(as.character(format(rd,'%Y')), '-01-01'))
rd0 <- as.numeric((rd-bd) + 1)
} else {
ot0 <- ot
rd0 <- rd
}
# determine date range
if (length(ot)>1) {
t.date <- median(ot0) # target date
if (is.null(d.buffer)) {d.buffer <- median(abs(ot0-t.date))*cm} # search buffer
} else {
t.date <- ot0
if (is.null(d.buffer)) {d.buffer <- 30}
}
#-------------------------------------------------------------------------------------------------------------------------------#
# 3. define functions
#-------------------------------------------------------------------------------------------------------------------------------#
# compositing
f1 <- function(x) {
ind <- which(!is.na(x))
if (length(ind)>0) {
v <- x[ind]
d <- rd0[ind]
diff <- abs(d-t.date)
ind <- which(diff <= d.buffer)
if (length(ind)>0) {v[ind[which(diff[ind]==min(diff[ind]))]]
} else {return(NA)}} else {return(NA)}}
# retrieve date
f2 <- function(x) {
ind <- which(!is.na(x))
if (length(ind)>0) {
v <- x[ind]
d <- rd0[ind]
diff <- abs(d-t.date)
ind <- which(diff <= d.buffer)
if (length(ind)>0) {as.numeric(d[ind[which(diff[ind]==min(diff[ind]))]])
} else {return(NA)}} else {return(NA)}}
#-------------------------------------------------------------------------------------------------------------------------------#
# 4. build composites
#-------------------------------------------------------------------------------------------------------------------------------#
r.value <- calc(img, f1)
r.date <- calc(img, f2)
#-------------------------------------------------------------------------------------------------------------------------------#
# 4. build composites
#-------------------------------------------------------------------------------------------------------------------------------#
# build table from date info
ud <- unique(r.date)
used <- rd0[as.numeric(rd0)%in%ud]
ud <- as.numeric(rd0)
count <- sapply(ud, function(x) {cellStats(r.date==x, sum)})
df <- data.frame(date=used, value=ud, count=count)
# check for missing values
mv <- cellStats(is.na(r.date), sum)
# return data
return(list(value=r.value, date=r.date, count=df, na.count=mv, target=t.date, buffer=d.buffer))
} | /R/rsComposite.R | no_license | 16EAGLE/rsMove | R | false | false | 6,763 | r | #' @title rsComposite
#'
#' @description Compositing of remote sensing data based on GPS tracking dates.
#' @param img Object of class \emph{RasterSpack} or \emph{RasterBrick}.
#' @param rd Object of class \emph{Date} with \emph{img} observation dates.
#' @param ot Object of class \emph{Date} with reference dates.
#' @param cm Number of deviations from the target date. Default is 1.
#' @param type One of "norm" or "pheno".
#' @param d.buffer Search buffer (expressed in days).
#' @import raster rgdal
#' @importFrom stats lm
#' @seealso \code{\link{imgInt}} \code{\link{dataQuery}}
#' @return A \emph{list}.
#' @details {The function uses a multi-layer raster object to build a composite.
#' It looks at a ginve set of dates (e.g. GPS tracking dates) and estimates a
#' reference date to build the composite for defined by the median of \emph{ot}.
#' The median is then used to estimate Median Absolute Deviation (MAD) which
#' specifies the size of the buffer set aroung the target date within which
#' bands will be considered. Here, \emph{cm} is used as a multiplier to enlarge
#' the temporal buffer. Alternatively, a user define temporal buffer is allowed
#' by using the keyword \emph{d.buffer}. If \emph{ot} countains only one element,
#' the function will use it as a reference date. In this case, if \emph{d.buffer}
#' is NULL the function will set it to 30 by default. The way how the function handles
#' temporal information depends on the \emph{type} keyword. If set to \emph{norm},
#' the function will search for the nearest possible dates within the temporal
#' buffer. However, if \emph{pheno} is set, then the day of the year will be given
#' priority. Thus, if multi-year raster data is provided, older data with a DOY
#' closer to the target that will be used when possible. The output provides:
#' #' \itemize{
#' \item{\emph{value} - composite of target images}
#' \item{\emph{dates} - per pixel date code}
#' \item{\emph{count} - pixel count of \emph{dates}}
#' \item{\emph{na.count} - count of NA values}
#' \item{\emph{target} - target date}
#' \item{\emph{mad} - temporal buffer}
#' }}
#' @examples \dontrun{
#'
#' require(raster)
#'
#' # read raster data
#' file <- list.files(system.file('extdata', '', package="rsMove"), 'tc.*tif', full.names=TRUE)
#' rsStk <- stack(file)
#' rsStk <- stack(rsStk, rsStk, rsStk) # dummy files for the example
#'
#' # raster dates
#' rd = seq.Date(as.Date("2013-01-01"), as.Date("2013-12-31"), 45)
#'
#' # target date
#' ot = as.Date("2013-06-01")
#'
#' # build composite
#' r.comp <- rsComposite(rsStk, rd, ot, d.buffer=90)
#'
#' }
#' @export
#-------------------------------------------------------------------------------------------------------------------------------#
rsComposite <- function(img, rd, ot, cm=1, type='norm', d.buffer=NULL) {
#-------------------------------------------------------------------------------------------------------------------------------#
# 1. check variables
#-------------------------------------------------------------------------------------------------------------------------------#
# raster
if (!exists('img')) {stop('error: "img" is missing')}
if (!class(img)[1]%in%c('RasterStack', 'RasterBrick')) {stop('error: "img" is not of a valid class')}
# raster dates
if (!exists('rd')) {stop('error: "rd" is missing')}
if (!class(rd)[1]%in%c('Date')) {stop('error: "rd" is nof of a valid class')}
if (length(rd)!=nlayers(img)) {stop('errorr: "img" and "rd" have different lengths')}
# reference dates
if (!exists('ot')) {stop('"ot" is missing')}
if (!class(ot)[1]%in%c('Date')) {stop('error: "ot" is nof of a valid class')}
# auxiliary variables
if (!is.numeric(cm)) {stop('"cm" is not numeric')}
if (!is.null(d.buffer)) {if (!is.numeric(d.buffer)) {stop('"d.buffer" is not numeric')}}
if (!type%in%c('norm', 'pheno')) {stop('"type" is not a valid keyword')}
#-------------------------------------------------------------------------------------------------------------------------------#
# 2. handle time information
#-------------------------------------------------------------------------------------------------------------------------------#
# determine date format
if (type=='pheno') {
bd <- as.Date(paste0(as.character(format(ot,'%Y')), '-01-01'))
ot0 <- as.numeric((ot-bd) + 1)
bd <- as.Date(paste0(as.character(format(rd,'%Y')), '-01-01'))
rd0 <- as.numeric((rd-bd) + 1)
} else {
ot0 <- ot
rd0 <- rd
}
# determine date range
if (length(ot)>1) {
t.date <- median(ot0) # target date
if (is.null(d.buffer)) {d.buffer <- median(abs(ot0-t.date))*cm} # search buffer
} else {
t.date <- ot0
if (is.null(d.buffer)) {d.buffer <- 30}
}
#-------------------------------------------------------------------------------------------------------------------------------#
# 3. define functions
#-------------------------------------------------------------------------------------------------------------------------------#
# compositing
f1 <- function(x) {
ind <- which(!is.na(x))
if (length(ind)>0) {
v <- x[ind]
d <- rd0[ind]
diff <- abs(d-t.date)
ind <- which(diff <= d.buffer)
if (length(ind)>0) {v[ind[which(diff[ind]==min(diff[ind]))]]
} else {return(NA)}} else {return(NA)}}
# retrieve date
f2 <- function(x) {
ind <- which(!is.na(x))
if (length(ind)>0) {
v <- x[ind]
d <- rd0[ind]
diff <- abs(d-t.date)
ind <- which(diff <= d.buffer)
if (length(ind)>0) {as.numeric(d[ind[which(diff[ind]==min(diff[ind]))]])
} else {return(NA)}} else {return(NA)}}
#-------------------------------------------------------------------------------------------------------------------------------#
# 4. build composites
#-------------------------------------------------------------------------------------------------------------------------------#
r.value <- calc(img, f1)
r.date <- calc(img, f2)
#-------------------------------------------------------------------------------------------------------------------------------#
# 4. build composites
#-------------------------------------------------------------------------------------------------------------------------------#
# build table from date info
ud <- unique(r.date)
used <- rd0[as.numeric(rd0)%in%ud]
ud <- as.numeric(rd0)
count <- sapply(ud, function(x) {cellStats(r.date==x, sum)})
df <- data.frame(date=used, value=ud, count=count)
# check for missing values
mv <- cellStats(is.na(r.date), sum)
# return data
return(list(value=r.value, date=r.date, count=df, na.count=mv, target=t.date, buffer=d.buffer))
} |
prcnt <-
c(
34.6594306764,
35.2754003647,
35.40613204,
35.1855137062,
36.6282823891,
37.3816513577,
37.5314871844,
36.3784124999,
37.8949982178,
37.9752539821,
37.5238097329,
38.8349502588,
39.5894958061,
40.4058337918
)
anio <-
c(
rep(2011,4),
rep(2012,4),
rep(2013,4),
rep(2014,2)
)
trim <-
c(
rep(c("I","II","III","IV"),3),
c("I","II")
)
desempl.educ.sup <- data.frame(anio,trim,prcnt)
rownames(desempl.educ.sup) <- paste0(anio,".", trim)
tinf <- trunc(desempl.educ.sup$prcnt[1])
desempl.educ.sup$d1 <- with(desempl.educ.sup, prcnt-tinf)
desempl.educ.sup
barplot(desempl.educ.sup$prcnt,
main="Desempleo- Educ. Media Sup o Superior\n en Mexico",
xlab="% del total de desempleados",
names.arg=rownames(desempl.educ.sup),
col=cm.colors(14),
horiz=T,
las=1)
barplot(desempl.educ.sup$d1, main="Incremento en \nDesempleo- Educ. Media Sup o Superior",
xlab="% del total de desempleados",
xaxt="n",
names.arg=rownames(desempl.educ.sup),
col=heat.colors(14)[14:1],
horiz=T,
las=1)
axis(1, at=0:6, lab=tinf:(tinf+6))
| /DesemplEducSup.R | no_license | juliosergio/Libro | R | false | false | 1,299 | r | prcnt <-
c(
34.6594306764,
35.2754003647,
35.40613204,
35.1855137062,
36.6282823891,
37.3816513577,
37.5314871844,
36.3784124999,
37.8949982178,
37.9752539821,
37.5238097329,
38.8349502588,
39.5894958061,
40.4058337918
)
anio <-
c(
rep(2011,4),
rep(2012,4),
rep(2013,4),
rep(2014,2)
)
trim <-
c(
rep(c("I","II","III","IV"),3),
c("I","II")
)
desempl.educ.sup <- data.frame(anio,trim,prcnt)
rownames(desempl.educ.sup) <- paste0(anio,".", trim)
tinf <- trunc(desempl.educ.sup$prcnt[1])
desempl.educ.sup$d1 <- with(desempl.educ.sup, prcnt-tinf)
desempl.educ.sup
barplot(desempl.educ.sup$prcnt,
main="Desempleo- Educ. Media Sup o Superior\n en Mexico",
xlab="% del total de desempleados",
names.arg=rownames(desempl.educ.sup),
col=cm.colors(14),
horiz=T,
las=1)
barplot(desempl.educ.sup$d1, main="Incremento en \nDesempleo- Educ. Media Sup o Superior",
xlab="% del total de desempleados",
xaxt="n",
names.arg=rownames(desempl.educ.sup),
col=heat.colors(14)[14:1],
horiz=T,
las=1)
axis(1, at=0:6, lab=tinf:(tinf+6))
|
\name{extract_comb}
\alias{extract_comb}
\title{
Extract Elements in a Combination set
}
\description{
Extract Elements in a Combination set
}
\usage{
extract_comb(m, comb_name)
}
\arguments{
\item{m}{A combination matrix returned by \code{\link{make_comb_mat}}.}
\item{comb_name}{The valid combination set name should be from \code{\link{comb_name}}.}
}
\details{
It returns the combination set.
}
\examples{
set.seed(123)
lt = list(a = sample(letters, 10),
b = sample(letters, 15),
c = sample(letters, 20))
m = make_comb_mat(lt)
extract_comb(m, "110")
}
| /man/extract_comb.Rd | permissive | jokergoo/ComplexHeatmap | R | false | false | 582 | rd | \name{extract_comb}
\alias{extract_comb}
\title{
Extract Elements in a Combination set
}
\description{
Extract Elements in a Combination set
}
\usage{
extract_comb(m, comb_name)
}
\arguments{
\item{m}{A combination matrix returned by \code{\link{make_comb_mat}}.}
\item{comb_name}{The valid combination set name should be from \code{\link{comb_name}}.}
}
\details{
It returns the combination set.
}
\examples{
set.seed(123)
lt = list(a = sample(letters, 10),
b = sample(letters, 15),
c = sample(letters, 20))
m = make_comb_mat(lt)
extract_comb(m, "110")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lobo.R
\docType{methods}
\name{subset,lobo-method}
\alias{subset,lobo-method}
\title{Subset a LOBO Object}
\usage{
\S4method{subset}{lobo}(x, subset, ...)
}
\arguments{
\item{x}{a \code{lobo} object.}
\item{subset}{a condition to be applied to the \code{data} portion of
\code{x}. See \sQuote{Details}.}
\item{\dots}{ignored.}
}
\value{
A new \code{lobo} object.
}
\description{
Subset an lobo object, in a way that is somewhat
analogous to \code{\link{subset.data.frame}}.
}
\seealso{
Other things related to \code{lobo} data: \code{\link{[[,lobo-method}},
\code{\link{[[<-,lobo-method}}, \code{\link{as.lobo}},
\code{\link{lobo-class}}, \code{\link{lobo}},
\code{\link{plot,lobo-method}},
\code{\link{summary,lobo-method}}
}
\author{
Dan Kelley
}
| /pkgs/oce/man/subset-lobo-method.Rd | no_license | vaguiar/EDAV_Project_2017 | R | false | true | 838 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lobo.R
\docType{methods}
\name{subset,lobo-method}
\alias{subset,lobo-method}
\title{Subset a LOBO Object}
\usage{
\S4method{subset}{lobo}(x, subset, ...)
}
\arguments{
\item{x}{a \code{lobo} object.}
\item{subset}{a condition to be applied to the \code{data} portion of
\code{x}. See \sQuote{Details}.}
\item{\dots}{ignored.}
}
\value{
A new \code{lobo} object.
}
\description{
Subset an lobo object, in a way that is somewhat
analogous to \code{\link{subset.data.frame}}.
}
\seealso{
Other things related to \code{lobo} data: \code{\link{[[,lobo-method}},
\code{\link{[[<-,lobo-method}}, \code{\link{as.lobo}},
\code{\link{lobo-class}}, \code{\link{lobo}},
\code{\link{plot,lobo-method}},
\code{\link{summary,lobo-method}}
}
\author{
Dan Kelley
}
|
# This will getting data that require for week 1 assignment.
# Here is the steps
# 1. download data and unzip the file not already exist in loocal
# 2. parsing the data
# 3. getting only 2007-02-01 and 2007-02-02 data as requested in the assignment
library(readr)
getHousehold2007Data <- function(){
houseHoldDataFileName = "household_power_consumption.txt"
if(!file.exists(houseHoldDataFileName)){
# download and unzip file
urlpath = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
destFile="household_power_consumption.zip"
# http://stackoverflow.com/questions/3053833/using-r-to-download-zipped-data-file-extract-and-import-data
download.file(urlpath,destFile)
unzip(destFile, houseHoldDataFileName)
}
householdPower <- read_delim("household_power_consumption.txt",delim = ";",
col_types = "ccddddddd",na = c("NA"))
household2007 <- householdPower[householdPower$Date=="1/2/2007" |householdPower$Date=="2/2/2007" ,]
household2007$DateTime = paste(household2007$Date, household2007$Time, sep=" ")
household2007$DateTime <- strptime(household2007$DateTime, "%d/%m/%Y %H:%M:%S")
household2007
}
| /downloadFile.R | no_license | lizicuenca/ExData_Plotting1 | R | false | false | 1,247 | r | # This will getting data that require for week 1 assignment.
# Here is the steps
# 1. download data and unzip the file not already exist in loocal
# 2. parsing the data
# 3. getting only 2007-02-01 and 2007-02-02 data as requested in the assignment
library(readr)
getHousehold2007Data <- function(){
houseHoldDataFileName = "household_power_consumption.txt"
if(!file.exists(houseHoldDataFileName)){
# download and unzip file
urlpath = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
destFile="household_power_consumption.zip"
# http://stackoverflow.com/questions/3053833/using-r-to-download-zipped-data-file-extract-and-import-data
download.file(urlpath,destFile)
unzip(destFile, houseHoldDataFileName)
}
householdPower <- read_delim("household_power_consumption.txt",delim = ";",
col_types = "ccddddddd",na = c("NA"))
household2007 <- householdPower[householdPower$Date=="1/2/2007" |householdPower$Date=="2/2/2007" ,]
household2007$DateTime = paste(household2007$Date, household2007$Time, sep=" ")
household2007$DateTime <- strptime(household2007$DateTime, "%d/%m/%Y %H:%M:%S")
household2007
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{regionQuantileByTargetVar}
\alias{regionQuantileByTargetVar}
\title{Visualize how individual regions are associated with target variable}
\usage{
regionQuantileByTargetVar(
signal,
signalCoord,
regionSet,
rsName = "",
signalCol = paste0("PC", 1:5),
maxRegionsToPlot = 8000,
cluster_rows = TRUE,
row_title = "Region",
column_title = rsName,
column_title_side = "top",
cluster_columns = FALSE,
name = "Percentile of Loading Scores in PC",
col = c("skyblue", "yellow"),
absVal = TRUE,
...
)
}
\arguments{
\item{signal}{Matrix of feature contribution scores (the contribution of
each epigenetic feature to each target variable). One named column for each
target variable.
One row for each original epigenetic feature (should be same order
as original data/signalCoord). For (an unsupervised) example, if PCA was
done on epigenetic data and the
goal was to find region sets associated with the principal components, you
could use the x$rotation output of prcomp(epigenetic data) as the
feature contribution scores/`signal` parameter.}
\item{signalCoord}{A GRanges object or data frame with coordinates
for the genomic signal/original epigenetic data.
Coordinates should be in the
same order as the original data and the feature contribution scores
(each item/row in signalCoord
corresponds to a row in signal). If a data.frame,
must have chr and start columns (optionally can have end column,
depending on the epigenetic data type).}
\item{regionSet}{A genomic ranges (GRanges) object with regions corresponding
to the same biological annotation.
Must be from the same reference genome as the coordinates for the actual data/samples (signalCoord).
The regions that will be visualized.}
\item{rsName}{Character. Name of the region set.
For use as a title for the heatmap.}
\item{signalCol}{A character vector with the names of the sample variables
of interest/target variables (e.g. PCs or sample phenotypes).}
\item{maxRegionsToPlot}{How many top regions from region set to include
in heatmap. Including too many may slow down computation and increase memory
use. If regionSet has more regions than maxRegionsToPlot, a number of regions
equal to maxRegionsToPlot will be randomly sampled from the region set and
these regions will be plotted. Clustering rows is a major limiting factor
on how long it takes to plot the regions so if you want to plot many regions,
you can also set cluster_rows to FALSE.}
\item{cluster_rows}{Logical object, whether to cluster rows or not (may
increase computation time significantly for large number of rows)}
\item{row_title}{Character object, row title}
\item{column_title}{Character object, column title}
\item{column_title_side}{Character object, where to put the column title:
"top" or "bottom"}
\item{cluster_columns}{Logical object, whether to cluster columns.
It is recommended
to keep this as FALSE so it will be easier to compare target
variables that have a certain order such as PCs
(with cluster_columns = FALSE, they will be in the same specified
order in different heatmaps)}
\item{name}{Character object, legend title}
\item{col}{A vector of colors or a color mapping function which
will be passed to the ComplexHeatmap::Heatmap() function. See ?Heatmap
(the "col" parameter) for more details.}
\item{absVal}{Logical. If TRUE, take the absolute value of values in
signal. Choose TRUE if you think there may be some
genomic loci in a region set that will increase and others
will decrease (if there may be anticorrelation between
regions in a region set). Choose FALSE if you expect regions in a
given region set to all change in the same direction (all be positively
correlated with each other).}
\item{...}{Optional parameters for ComplexHeatmap::Heatmap()}
}
\value{
A heatmap. Columns are signalCol's, rows are regions.
This heatmap allows you to see if some regions are
associated with certain target variables but not others.
Also, you can see if a subset of
regions in the region set are associated with
target variables while another subset
are not associated with any target variables
To color each region, first the (absolute) signal
values within that region are
averaged. Then this average is compared to the distribution of all (absolute)
individual signal values for the given target variable to get
a quantile/percentile
for that region. Colors are based on this quantile/percentile.
The output is a Heatmap object (ComplexHeatmap package).
}
\description{
Visualize how much each region in a region set
is associated with each target variable.
For each target variable (`signalCol`), the average (absolute)
signal value is calculated for
each region in the region set. Then for a given target variable,
the average signal is converted to a percentile/quantile based
on the distribution of all signal values
for that target variable. These values are
plotted in a heatmap.
}
\examples{
data("brcaATACCoord1")
data("brcaATACData1")
data("esr1_chr1")
featureContributionScores <- prcomp(t(brcaATACData1))$rotation
regionByPCHM <- regionQuantileByTargetVar(signal = featureContributionScores,
signalCoord = brcaATACCoord1,
regionSet = esr1_chr1,
rsName = "Estrogen Receptor Chr1",
signalCol=paste0("PC", 1:2),
maxRegionsToPlot = 8000,
cluster_rows = TRUE,
cluster_columns = FALSE,
column_title = rsName,
name = "Percentile of Loading Scores in PC")
}
| /man/regionQuantileByTargetVar.Rd | permissive | databio/COCOA | R | false | true | 5,846 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{regionQuantileByTargetVar}
\alias{regionQuantileByTargetVar}
\title{Visualize how individual regions are associated with target variable}
\usage{
regionQuantileByTargetVar(
signal,
signalCoord,
regionSet,
rsName = "",
signalCol = paste0("PC", 1:5),
maxRegionsToPlot = 8000,
cluster_rows = TRUE,
row_title = "Region",
column_title = rsName,
column_title_side = "top",
cluster_columns = FALSE,
name = "Percentile of Loading Scores in PC",
col = c("skyblue", "yellow"),
absVal = TRUE,
...
)
}
\arguments{
\item{signal}{Matrix of feature contribution scores (the contribution of
each epigenetic feature to each target variable). One named column for each
target variable.
One row for each original epigenetic feature (should be same order
as original data/signalCoord). For (an unsupervised) example, if PCA was
done on epigenetic data and the
goal was to find region sets associated with the principal components, you
could use the x$rotation output of prcomp(epigenetic data) as the
feature contribution scores/`signal` parameter.}
\item{signalCoord}{A GRanges object or data frame with coordinates
for the genomic signal/original epigenetic data.
Coordinates should be in the
same order as the original data and the feature contribution scores
(each item/row in signalCoord
corresponds to a row in signal). If a data.frame,
must have chr and start columns (optionally can have end column,
depending on the epigenetic data type).}
\item{regionSet}{A genomic ranges (GRanges) object with regions corresponding
to the same biological annotation.
Must be from the same reference genome as the coordinates for the actual data/samples (signalCoord).
The regions that will be visualized.}
\item{rsName}{Character. Name of the region set.
For use as a title for the heatmap.}
\item{signalCol}{A character vector with the names of the sample variables
of interest/target variables (e.g. PCs or sample phenotypes).}
\item{maxRegionsToPlot}{How many top regions from region set to include
in heatmap. Including too many may slow down computation and increase memory
use. If regionSet has more regions than maxRegionsToPlot, a number of regions
equal to maxRegionsToPlot will be randomly sampled from the region set and
these regions will be plotted. Clustering rows is a major limiting factor
on how long it takes to plot the regions so if you want to plot many regions,
you can also set cluster_rows to FALSE.}
\item{cluster_rows}{Logical object, whether to cluster rows or not (may
increase computation time significantly for large number of rows)}
\item{row_title}{Character object, row title}
\item{column_title}{Character object, column title}
\item{column_title_side}{Character object, where to put the column title:
"top" or "bottom"}
\item{cluster_columns}{Logical object, whether to cluster columns.
It is recommended
to keep this as FALSE so it will be easier to compare target
variables that have a certain order such as PCs
(with cluster_columns = FALSE, they will be in the same specified
order in different heatmaps)}
\item{name}{Character object, legend title}
\item{col}{A vector of colors or a color mapping function which
will be passed to the ComplexHeatmap::Heatmap() function. See ?Heatmap
(the "col" parameter) for more details.}
\item{absVal}{Logical. If TRUE, take the absolute value of values in
signal. Choose TRUE if you think there may be some
genomic loci in a region set that will increase and others
will decrease (if there may be anticorrelation between
regions in a region set). Choose FALSE if you expect regions in a
given region set to all change in the same direction (all be positively
correlated with each other).}
\item{...}{Optional parameters for ComplexHeatmap::Heatmap()}
}
\value{
A heatmap. Columns are signalCol's, rows are regions.
This heatmap allows you to see if some regions are
associated with certain target variables but not others.
Also, you can see if a subset of
regions in the region set are associated with
target variables while another subset
are not associated with any target variables
To color each region, first the (absolute) signal
values within that region are
averaged. Then this average is compared to the distribution of all (absolute)
individual signal values for the given target variable to get
a quantile/percentile
for that region. Colors are based on this quantile/percentile.
The output is a Heatmap object (ComplexHeatmap package).
}
\description{
Visualize how much each region in a region set
is associated with each target variable.
For each target variable (`signalCol`), the average (absolute)
signal value is calculated for
each region in the region set. Then for a given target variable,
the average signal is converted to a percentile/quantile based
on the distribution of all signal values
for that target variable. These values are
plotted in a heatmap.
}
\examples{
data("brcaATACCoord1")
data("brcaATACData1")
data("esr1_chr1")
featureContributionScores <- prcomp(t(brcaATACData1))$rotation
regionByPCHM <- regionQuantileByTargetVar(signal = featureContributionScores,
signalCoord = brcaATACCoord1,
regionSet = esr1_chr1,
rsName = "Estrogen Receptor Chr1",
signalCol=paste0("PC", 1:2),
maxRegionsToPlot = 8000,
cluster_rows = TRUE,
cluster_columns = FALSE,
column_title = rsName,
name = "Percentile of Loading Scores in PC")
}
|
library(precrec)
context("AP 1: Autoplot for pipeline functions")
# Test autoplot(object, ...)
ap1_check_libs <- function() {
if (requireNamespace("ggplot2", quietly = TRUE)) {
TRUE
} else {
FALSE
}
}
test_that("autoplot fmdat", {
if (!ap1_check_libs()) {
skip("Libraries cannot be loaded")
}
pdf(NULL)
on.exit(dev.off())
data(B500)
fmdat <- reformat_data(B500$good_er_scores, B500$labels)
pp <- ggplot2::autoplot(fmdat)
expect_true(all(class(pp) == c("gg", "ggplot")))
expect_error(pp, NA)
})
test_that("autoplot cmat", {
if (!ap1_check_libs()) {
skip("Libraries cannot be loaded")
}
pdf(NULL)
on.exit(dev.off())
data(B500)
cmat <- create_confmats(scores = B500$good_er_scores,
labels = B500$labels)
pp <- ggplot2::autoplot(cmat)
expect_true(all(class(pp) == c("gg", "ggplot")))
expect_error(pp, NA)
})
test_that("autoplot pevals", {
if (!ap1_check_libs()) {
skip("Libraries cannot be loaded")
}
pdf(NULL)
on.exit(dev.off())
data(B500)
pevals <- calc_measures(scores = B500$good_er_scores,
labels = B500$labels)
pp <- ggplot2::autoplot(pevals)
expect_true(all(class(pp) == c("gg", "ggplot")))
expect_error(pp, NA)
})
| /data/genthat_extracted_code/precrec/tests/test_g_autoplot1_pl.R | no_license | surayaaramli/typeRrh | R | false | false | 1,262 | r | library(precrec)
context("AP 1: Autoplot for pipeline functions")
# Test autoplot(object, ...)
ap1_check_libs <- function() {
if (requireNamespace("ggplot2", quietly = TRUE)) {
TRUE
} else {
FALSE
}
}
test_that("autoplot fmdat", {
if (!ap1_check_libs()) {
skip("Libraries cannot be loaded")
}
pdf(NULL)
on.exit(dev.off())
data(B500)
fmdat <- reformat_data(B500$good_er_scores, B500$labels)
pp <- ggplot2::autoplot(fmdat)
expect_true(all(class(pp) == c("gg", "ggplot")))
expect_error(pp, NA)
})
test_that("autoplot cmat", {
if (!ap1_check_libs()) {
skip("Libraries cannot be loaded")
}
pdf(NULL)
on.exit(dev.off())
data(B500)
cmat <- create_confmats(scores = B500$good_er_scores,
labels = B500$labels)
pp <- ggplot2::autoplot(cmat)
expect_true(all(class(pp) == c("gg", "ggplot")))
expect_error(pp, NA)
})
test_that("autoplot pevals", {
if (!ap1_check_libs()) {
skip("Libraries cannot be loaded")
}
pdf(NULL)
on.exit(dev.off())
data(B500)
pevals <- calc_measures(scores = B500$good_er_scores,
labels = B500$labels)
pp <- ggplot2::autoplot(pevals)
expect_true(all(class(pp) == c("gg", "ggplot")))
expect_error(pp, NA)
})
|
kable(head(transaction_cleaned)) %>% kable_styling()
library(readr) # Read Rectangular Text Data
library(dplyr) # A Grammar of Data Manipulation
library(purrr) # Functional Programming Tools
library(stringr) # Simple, Consistent Wrappers for Common String Operations
library(tidyr) # Tidy Messy Data
library(lubridate) # Make Dealing with Dates a Little Easier
library(kableExtra) # Construct Complex Table with 'kable' and Pipe Syntax
library(factoextra) # Extract and Visualize the Results of Multivariate Data Analyses
library(FactoMineR) # Multivariate Exploratory Data Analysis and Data Mining
library(gridExtra) # Miscellaneous Functions for "Grid" Graphics
library(float) # 32-Bit Floats
library(recommenderlab) # Lab for Developing and Testing Recommender Algorithms
# 【1】Read Raw Data
app <- read_csv("app_dat.csv")
category_ref <- read_csv("category_ref.csv")
device_ref <- read_csv("device_ref.csv")
in_app <- read_csv("in-app_dat.csv")
transaction <- read_csv("transaction_dat.csv")
account <- read.csv("account_dat.csv")
# 【2】Data Cleaning
# (1)Transaction Table: Remove duplicates in the transaction tables
transaction_cleaned <- transaction %>%
distinct() %>%
rename("transaction_date"=create_dt) %>%
mutate(device_id=as.character(device_id))
remove(transaction)
# (2) App Table & In App Table: Remove wrongly label app_name ("#NAME?")
# Create parent_app_content_id and type columns so it could be combined with in_app table
app_cleaned <- app %>%
filter(app_name!="#NAME?") %>%
mutate(type=NA,
parent_app_content_id=content_id,
device_id=as.character(device_id))
app_and_in_app_cleaned <- in_app %>%
left_join(app_cleaned,by=c("parent_app_content_id"="content_id")) %>%
rename("type"=type.x) %>%
select(names(app_cleaned)) %>%
rbind.data.frame(app_cleaned)
remove(app)
remove(app_cleaned)
remove(in_app)
# (3) Account Table: Fix the issue of mixed date format
account_cleaned <- account %>%
mutate(helper=as.character(map(create_dt,~str_split(.x,"/")[[1]][1]))) %>%
mutate(account_creation_date=if_else(
str_length(helper)==4, ymd(create_dt), mdy(create_dt)
)) %>%
select(-create_dt,-helper)
remove(account)
# (4) Device Table
device_ref_cleaned <- device_ref %>%
mutate(device_id=as.character(device_id))
remove(device_ref)
#【3】Join the fact tables with dimension tables to create a transaction master table for downstream analysis
# Filter out transactions without account information
# Filter out transactions without app information
# Filter out transactions where account_creation_date > transaction_date
transaction_master_table <- transaction_cleaned %>%
left_join(account_cleaned,by=c("acct_id"="acct_id")) %>%
left_join(app_and_in_app_cleaned,by=c("content_id"="content_id")) %>%
rename("device_id_from_transaction"=device_id.x,"device_id_from_app"=device_id.y) %>%
left_join(device_ref_cleaned,by=c("device_id_from_transaction"="device_id")) %>%
left_join(category_ref,by=c("category_id"="category_id")) %>%
filter(!is.na(account_creation_date) ) %>%
filter(!is.na(app_name)) %>%
filter(account_creation_date<=transaction_date)
remove(account_cleaned)
remove(app_and_in_app_cleaned)
remove(category_ref)
remove(device_ref_cleaned)
remove(transaction_cleaned)
# write_csv(transaction_master_table,"transaction_master_table.csv")
#【4】Add a column called "revenue_model" to the transaction_master_table (transaction level)
# (1) If an app never charge users, it's a free app
# (2) If an app charge users only when user download it, its a paid app
# (3) If an app charge users in-app by consumable, its a freemium consumable app
# (4) If an app charge users in-app by subscription, its a freemium subscription app
app_with_revenue_model <- transaction_master_table %>%
group_by(app_name,category_name,type) %>%
summarise(total_sales=sum(price)) %>%
mutate(revenue_model=
case_when(
is.na(type) & total_sales==0 ~ "free app",
is.na(type) & total_sales>0 ~ "paid app",
type=="consumable" ~ "freemium consumable app",
type=="subscription" ~ "freemium subscription app "
)
) %>%
arrange(app_name,category_name,type,desc(total_sales)) %>%
filter(row_number()==1) %>%
ungroup() %>%
select(app_name,revenue_model)
transaction_master_table <- transaction_master_table %>%
left_join(app_with_revenue_model,by=c("app_name"="app_name"))
remove(app_with_revenue_model)
# 【5】 # 2016-08-22, 2016-08-23, 2016-08-24 were missings
transaction_date_diagnosis <- transaction_master_table %>%
group_by(transaction_date) %>%
summarise(total_price=sum(price)) %>%
arrange(transaction_date) %>%
mutate(helper=lag(transaction_date)) %>%
filter(transaction_date-helper!=1)
remove(transaction_date_diagnosis)
#【6】Summary: in the downstream analysis
sapply(transaction_master_table, function(x){length(unique(x))})
# (1) There are 5 categories of app.
# (2) There are 2 devices.
# (3) There are 996 apps categorized into 4 types.
# (4) There are 28456 accounts
# (5) There are 2 payment types.
# (6) There are 110 days of data (2016-06-01 to 2016-09-21)
# (7) There are 3454546 transactions (purchase + download)
# 【7】split transaction_master_table into downloads and purchases
download_master_table <- transaction_master_table %>%
filter(price==0)
purchase_master_table <- transaction_master_table %>%
filter(price>0)
remove(transaction_master_table)
| /1.0 Data Processing.R | no_license | Joshuacourse/Joshuacourse.github.io | R | false | false | 5,552 | r |
kable(head(transaction_cleaned)) %>% kable_styling()
library(readr) # Read Rectangular Text Data
library(dplyr) # A Grammar of Data Manipulation
library(purrr) # Functional Programming Tools
library(stringr) # Simple, Consistent Wrappers for Common String Operations
library(tidyr) # Tidy Messy Data
library(lubridate) # Make Dealing with Dates a Little Easier
library(kableExtra) # Construct Complex Table with 'kable' and Pipe Syntax
library(factoextra) # Extract and Visualize the Results of Multivariate Data Analyses
library(FactoMineR) # Multivariate Exploratory Data Analysis and Data Mining
library(gridExtra) # Miscellaneous Functions for "Grid" Graphics
library(float) # 32-Bit Floats
library(recommenderlab) # Lab for Developing and Testing Recommender Algorithms
# 【1】Read Raw Data
app <- read_csv("app_dat.csv")
category_ref <- read_csv("category_ref.csv")
device_ref <- read_csv("device_ref.csv")
in_app <- read_csv("in-app_dat.csv")
transaction <- read_csv("transaction_dat.csv")
account <- read.csv("account_dat.csv")
# 【2】Data Cleaning
# (1)Transaction Table: Remove duplicates in the transaction tables
transaction_cleaned <- transaction %>%
distinct() %>%
rename("transaction_date"=create_dt) %>%
mutate(device_id=as.character(device_id))
remove(transaction)
# (2) App Table & In App Table: Remove wrongly label app_name ("#NAME?")
# Create parent_app_content_id and type columns so it could be combined with in_app table
app_cleaned <- app %>%
filter(app_name!="#NAME?") %>%
mutate(type=NA,
parent_app_content_id=content_id,
device_id=as.character(device_id))
app_and_in_app_cleaned <- in_app %>%
left_join(app_cleaned,by=c("parent_app_content_id"="content_id")) %>%
rename("type"=type.x) %>%
select(names(app_cleaned)) %>%
rbind.data.frame(app_cleaned)
remove(app)
remove(app_cleaned)
remove(in_app)
# (3) Account Table: Fix the issue of mixed date format
account_cleaned <- account %>%
mutate(helper=as.character(map(create_dt,~str_split(.x,"/")[[1]][1]))) %>%
mutate(account_creation_date=if_else(
str_length(helper)==4, ymd(create_dt), mdy(create_dt)
)) %>%
select(-create_dt,-helper)
remove(account)
# (4) Device Table
device_ref_cleaned <- device_ref %>%
mutate(device_id=as.character(device_id))
remove(device_ref)
#【3】Join the fact tables with dimension tables to create a transaction master table for downstream analysis
# Filter out transactions without account information
# Filter out transactions without app information
# Filter out transactions where account_creation_date > transaction_date
transaction_master_table <- transaction_cleaned %>%
left_join(account_cleaned,by=c("acct_id"="acct_id")) %>%
left_join(app_and_in_app_cleaned,by=c("content_id"="content_id")) %>%
rename("device_id_from_transaction"=device_id.x,"device_id_from_app"=device_id.y) %>%
left_join(device_ref_cleaned,by=c("device_id_from_transaction"="device_id")) %>%
left_join(category_ref,by=c("category_id"="category_id")) %>%
filter(!is.na(account_creation_date) ) %>%
filter(!is.na(app_name)) %>%
filter(account_creation_date<=transaction_date)
remove(account_cleaned)
remove(app_and_in_app_cleaned)
remove(category_ref)
remove(device_ref_cleaned)
remove(transaction_cleaned)
# write_csv(transaction_master_table,"transaction_master_table.csv")
#【4】Add a column called "revenue_model" to the transaction_master_table (transaction level)
# (1) If an app never charge users, it's a free app
# (2) If an app charge users only when user download it, its a paid app
# (3) If an app charge users in-app by consumable, its a freemium consumable app
# (4) If an app charge users in-app by subscription, its a freemium subscription app
app_with_revenue_model <- transaction_master_table %>%
group_by(app_name,category_name,type) %>%
summarise(total_sales=sum(price)) %>%
mutate(revenue_model=
case_when(
is.na(type) & total_sales==0 ~ "free app",
is.na(type) & total_sales>0 ~ "paid app",
type=="consumable" ~ "freemium consumable app",
type=="subscription" ~ "freemium subscription app "
)
) %>%
arrange(app_name,category_name,type,desc(total_sales)) %>%
filter(row_number()==1) %>%
ungroup() %>%
select(app_name,revenue_model)
transaction_master_table <- transaction_master_table %>%
left_join(app_with_revenue_model,by=c("app_name"="app_name"))
remove(app_with_revenue_model)
# 【5】 # 2016-08-22, 2016-08-23, 2016-08-24 were missings
transaction_date_diagnosis <- transaction_master_table %>%
group_by(transaction_date) %>%
summarise(total_price=sum(price)) %>%
arrange(transaction_date) %>%
mutate(helper=lag(transaction_date)) %>%
filter(transaction_date-helper!=1)
remove(transaction_date_diagnosis)
#【6】Summary: in the downstream analysis
sapply(transaction_master_table, function(x){length(unique(x))})
# (1) There are 5 categories of app.
# (2) There are 2 devices.
# (3) There are 996 apps categorized into 4 types.
# (4) There are 28456 accounts
# (5) There are 2 payment types.
# (6) There are 110 days of data (2016-06-01 to 2016-09-21)
# (7) There are 3454546 transactions (purchase + download)
# 【7】split transaction_master_table into downloads and purchases
download_master_table <- transaction_master_table %>%
filter(price==0)
purchase_master_table <- transaction_master_table %>%
filter(price>0)
remove(transaction_master_table)
|
## DATA SCIENCE - COURSERA - T DELALOY
## we will be using the "Individual household electric power consumption Data Set"
## first time
library(data.table)
#define working directory
setwd("C:/Users/I051921/Desktop/Prediction (KXEN)/Coursera")
## Open File
file <- "EXPLORING/household_power_consumption.txt"
data <- fread(file)
## transform data
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
## select data between 2 dates
data <- data[data$Date=="2007-02-01" | data$Date=="2007-02-02"]
## Convert data to a data frame
class(data)
data <- data.frame(data)
## Convert columns 3 to 9 to numeric
for(i in c(3:9)) {
class(data[,i])
data[,i] <- as.numeric(as.character(data[,i]))
}
## Create Date_Time variable
data$DateTime <- paste(data$Date, data$Time)
## Convert Date_Time variable to proper format
data$DateTime <- strptime(data$DateTime, format="%Y-%m-%d %H:%M:%S")
## create Plot 1
png(filename = "plot1.png", width = 480, height = 480, units = "px", bg = "white")
hist(data$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power(kilowatts)")
dev.off()
| /plot1.R | no_license | TDELALOY/ExData_Plotting1 | R | false | false | 1,135 | r | ## DATA SCIENCE - COURSERA - T DELALOY
## we will be using the "Individual household electric power consumption Data Set"
## first time
library(data.table)
#define working directory
setwd("C:/Users/I051921/Desktop/Prediction (KXEN)/Coursera")
## Open File
file <- "EXPLORING/household_power_consumption.txt"
data <- fread(file)
## transform data
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
## select data between 2 dates
data <- data[data$Date=="2007-02-01" | data$Date=="2007-02-02"]
## Convert data to a data frame
class(data)
data <- data.frame(data)
## Convert columns 3 to 9 to numeric
for(i in c(3:9)) {
class(data[,i])
data[,i] <- as.numeric(as.character(data[,i]))
}
## Create Date_Time variable
data$DateTime <- paste(data$Date, data$Time)
## Convert Date_Time variable to proper format
data$DateTime <- strptime(data$DateTime, format="%Y-%m-%d %H:%M:%S")
## create Plot 1
png(filename = "plot1.png", width = 480, height = 480, units = "px", bg = "white")
hist(data$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power(kilowatts)")
dev.off()
|
# Curso de R
# Hunter, Maria O
# Departamento de Agricultura Tropical
# Universidade Federal de Mato Grosso - Cuiaba
# hunter.maria (at) gmail.com
# Baseado no curso de:
# Gorgens, Eric Bastos
# Departamento de Engenharia Florestal
# Universidade Federal dos Vales do Jequitinhonha e Mucuri - UFVJM
# Diamantina, Brazil
# eric.gorgens (at) ufvjm.edu.br
#
#------------------------------------------------------------------------------------------
#### EXPLORACAO DOS DADOS: PACOTE GRAFICO NATIVO AO R ####
# Fonte do exemplo: http://ecologia.ib.usp.br/bie5782/doku.php?id=bie5782:03_apostila:05a-graficos)
# Criando data frame com os dados
df1 = data.frame(riqueza = c(15,18,22,24,25,30,31,34,37,39,41,45),
area = c(2,4.5,6,10,30,34,50,56,60,77.5,80,85),
categoria = rep(c("pequeno", "grande"), each=6))
head(df1) # mostra o cabecalho e as primeiras linhas do arquivo
# Grafico de dispersao
?plot
plot(riqueza~area, data = df1)
plot(df1$area, df1$riqueza) # escrita alternativa
plot(riqueza~area, data=df1)
#exemplo separando os dados com duas cores
plot(riqueza~area, data=df1[df1$categoria=='pequeno',],
col='red',xlim=c(0,90), ylim=c(10,50))
points(riqueza~area, data=df1[df1$categoria=='grande',],
col='black')
legend('bottomright',c('pequeno','grande'),col=c('red','black'),pch=c(1,1),ncol=2)
# Especificando outros parâmetros: nome do eixo x (xlab), nome do eixo y (ylab),
# valores minimas e maximas do eixo x (xlim), titulo principal (main)
plot(riqueza~area, data = df1, xlab="Area (m²)", ylab="Riqueza (# espécies)",
xlim=c(0,90), ylim=c(0,90), main="Riqueza = f(area)")
# Adicionando uma linha de tendência linear
abline(lm(riqueza~area, data = df1))
#aqui criei um modelo linear (lm) entre os dois variaveis e mostrei com a funcao abline()
text(60,60, 'R-2= 0.95', pos=4)
# Criando um boxplot
# neste case 'categoria' esta um fator
boxplot(riqueza~categoria, data = df1)
# Criando um grafico de barras
barplot(df1$riqueza) #anota que tem 12 barras, pq tem 12 pontos, eles estao ordenados
# Criando um histograma
hist(df1$riqueza)
#### EXPLORACAO DOS DADOS: PACOTE GGPLOT ####
library(ggplot2)
# Para este exemplo criaremos um data frame ficticio
set.seed(22136) # fixa o gerador de numeros aleatorio para que os resultados sejam iguais para todos
df <- rbind(data.frame(group='A', tempo=rnorm(n=200, mean=10, sd=3), peso=rnorm(n=200, mean=100, sd=20)),
data.frame(group='B', tempo=rnorm(n=200, mean=10, sd=3), peso=rnorm(n=200, mean=105, sd=20)))
#rbind combina por linhas. Neste caso combina a primeira dataframe de grupo A, com a segunda de grupo B
#rnorm cria um conjunto de dados aleatorias em volta de uma media definida, com deviacao padrao definida
head(df) # exibe primeiras linhas do data frame
#Criando um gráfico de dispersão do tempo em função do peso
ggplot(df, aes(tempo, peso)) + geom_point()
#aqui o primeiro parte do funcao defini qual seja o conjunto de dados (df) e os eixos que vao usar
# (definido por meio de aes - "aesthetic")
# depois estao adicionado os pontos ("geometria - pontos" = geom_point)
#Criando um gráfico de dispersão do peso em função do tempo
ggplot(df, aes(peso, tempo)) + geom_point()
#aqui e tudo igual menos a ordem dos eixos
# criando um boxplot
ggplot(df, aes(group, peso)) + geom_boxplot()
#aqui 'grupo' e um fator
# criando um histograma
ggplot(df, aes(peso)) + geom_histogram(binwidth=15)
# criando um histograma para cada grupo e organiza numa mesma janela
ggplot(df, aes(peso)) + geom_histogram(binwidth=10) + facet_wrap(~group, nrow=2, ncol=1)
# criando um histograma para cada grupo num mesmo gráfico, mas com cores diferentes
ggplot(df, aes(peso, color = group)) + geom_histogram(binwidth=10)
#### VOLTANDO PARA O PACOTE BASICO ####
# se quiser mostrar varias graficos numa painel so, tambem pode fazer no pacote basico
# por meio do par(mfrow=c(2,2)) onde que define o numero de colunas e linhas
par(mfrow=c(2,1))
hist(df[df$group=='A','peso'])
hist(df[df$group=='B','peso'])
hist(df[df$group=='A','peso'], main=NA, xlab = 'Group A')
hist(df[df$group=='B','peso'], main=NA, xlab= 'Group B')
#importante lembrar de voltar a janela de grafico com os parametros originais depois que termina
par(mfrow=c(1,1))
#### CORRELACAO E COVARIANCA ####
data(mtcars)
names(mtcars)
dim(mtcars)
head(mtcars)
tail(mtcars)
# Correlacoes e covariancia
cor(mtcars, method="spearman")
cor(mtcars, method="pearson")
cov(mtcars)
#Correlograma
install.packages('corrgram')
library(corrgram) # ativa pacote
# install.packages("corrgram") # Comando para instalar o pacote
corrgram(mtcars, order=TRUE, lower.panel=panel.shade,
upper.panel=panel.pts, text.panel=panel.txt,
main="Correlograma")
#outras opcoes incluem panel.conf (para confianca), panel.pts (dispersao), panel.density,
#panel.bar, panel.ellipse, panel.minmax, col.regions, panel.pie
corrgram(mtcars, order=T, lower.panel = panel.shade,
upper.panel = panel.conf, diag.panel = panel.density)
#### ESTATISTICAS DESCRITIVAS ####
data(mtcars)
# Estatisticas basicas
mean(mtcars$mpg) # media
var(mtcars$mpg) # variancia
sd(mtcars$mpg) # desvio padrao
quantile(mtcars$mpg, c(0.25, 0.50, 0.75)) # quantile
max(mtcars$mpg) # maximo
min(mtcars$mpg) # minimo
median(mtcars$mpg) # mediana
IQR(mtcars$mpg) # distancia intequartil
# Veja agora a função summary
summary(mtcars)
#### UTILIZANDO APPLY SIMILPLIFICADO : SAPPLY ####
sapply(mtcars, mean)
sapply(mtcars, min)
sapply(mtcars, max)
sapply(mtcars, var)
sapply(mtcars, sd)
#### TESTES DE PRESUPOSICOES ESTATISTICAS ####
# Importando base
#df = read.csv("lidar.csv", header=TRUE, sep="")
df <- read.csv("flowering_alien_vs_indigen.csv", sep = ";") # o separador pode mudar de computador para computador
names(df)
#-------------------------------------------------------------
# Normalidade
# metodo grafico
qqnorm(df$Flowering) #comparando Volume com distribuicao da varianca normal
qqline(df$Flowering) #addicione uma linha representando normalidade perfeita dos dados
#teste de Shapiro -- p < 0.1 nao e normal
shapiro.test(df$Flowering)
#teste de Lilliefors
library(nortest)
lillie.test(df$Flowering) #compara os dados com distribuicao normal via K-S test
#-------------------------------------------------------------
# Homogeneidade de variancia
# Teste de Breusch-Pagan-Godfrey
library(lmtest)
bptest(mpg ~ cyl, data=mtcars) #p-val > 0.05 HETEROSKEDASTICIDADE NAO SIGNIFICATIVA
#### TESTES DOS HIPOTESES ####
#### TESTE QUI-QUADRADO ####
# comparando tempo de florescimento de especies nativas e exoticas
df <- read.csv("flowering_alien_vs_indigen.csv", sep = ";") # o separador pode mudar de computador para computador
head(df)
names(df)
# explorando a distribuição graficamente.
library(ggplot2)
ggplot(df, aes(Flowering)) + geom_histogram() + facet_wrap(~Status, nrow = 2, ncol = 1)
# e pelo teste Qui-quadrado.
m <- table(df$Status, df$Flowering)
Xsq <- chisq.test(m) # Resumo do teste
Xsq$observed # Valores observados (o mesmo que matriz m)
Xsq$expected # Valores esperados segundo hipótese nula
Xsq$residuals # Resíduos de Pearson
Xsq$stdres # Resíduos padronizados
matplot(Xsq$observed, Xsq$expected)
abline(a=0,b=1)
#### TESTE KOLMOGOROV-SMIRNOV ####
# explorando a distribuição graficamente.
require(ggplot2)
ggplot(df, aes(Flowering)) + geom_histogram() + facet_wrap(~Status, nrow = 2, ncol = 1)
# usando o KS-test,
ks.test(df$Flowering[df$Status == "indigen"],
df$Flowering[df$Status == "Neophyt"])
#### F TESTE ####
# Vamos utilizar dados gerados aleatorios
x <- rnorm(50, mean = 0, sd = 2)
y <- rnorm(30, mean = 1, sd = 1)
var.test(x, y) # Tem a mesma varianca?
#### TESTE T ####
#data(sleep)
#summary(sleep)
t.test(Flowering ~ Status, data = df)
| /Hunter_Aula2v2.R | no_license | mohunter/RCourse | R | false | false | 8,015 | r | # Curso de R
# Hunter, Maria O
# Departamento de Agricultura Tropical
# Universidade Federal de Mato Grosso - Cuiaba
# hunter.maria (at) gmail.com
# Baseado no curso de:
# Gorgens, Eric Bastos
# Departamento de Engenharia Florestal
# Universidade Federal dos Vales do Jequitinhonha e Mucuri - UFVJM
# Diamantina, Brazil
# eric.gorgens (at) ufvjm.edu.br
#
#------------------------------------------------------------------------------------------
#### EXPLORACAO DOS DADOS: PACOTE GRAFICO NATIVO AO R ####
# Fonte do exemplo: http://ecologia.ib.usp.br/bie5782/doku.php?id=bie5782:03_apostila:05a-graficos)
# Criando data frame com os dados
df1 = data.frame(riqueza = c(15,18,22,24,25,30,31,34,37,39,41,45),
area = c(2,4.5,6,10,30,34,50,56,60,77.5,80,85),
categoria = rep(c("pequeno", "grande"), each=6))
head(df1) # mostra o cabecalho e as primeiras linhas do arquivo
# Grafico de dispersao
?plot
plot(riqueza~area, data = df1)
plot(df1$area, df1$riqueza) # escrita alternativa
plot(riqueza~area, data=df1)
#exemplo separando os dados com duas cores
plot(riqueza~area, data=df1[df1$categoria=='pequeno',],
col='red',xlim=c(0,90), ylim=c(10,50))
points(riqueza~area, data=df1[df1$categoria=='grande',],
col='black')
legend('bottomright',c('pequeno','grande'),col=c('red','black'),pch=c(1,1),ncol=2)
# Especificando outros parâmetros: nome do eixo x (xlab), nome do eixo y (ylab),
# valores minimas e maximas do eixo x (xlim), titulo principal (main)
plot(riqueza~area, data = df1, xlab="Area (m²)", ylab="Riqueza (# espécies)",
xlim=c(0,90), ylim=c(0,90), main="Riqueza = f(area)")
# Adicionando uma linha de tendência linear
abline(lm(riqueza~area, data = df1))
#aqui criei um modelo linear (lm) entre os dois variaveis e mostrei com a funcao abline()
text(60,60, 'R-2= 0.95', pos=4)
# Criando um boxplot
# neste case 'categoria' esta um fator
boxplot(riqueza~categoria, data = df1)
# Criando um grafico de barras
barplot(df1$riqueza) #anota que tem 12 barras, pq tem 12 pontos, eles estao ordenados
# Criando um histograma
hist(df1$riqueza)
#### EXPLORACAO DOS DADOS: PACOTE GGPLOT ####
library(ggplot2)
# Para este exemplo criaremos um data frame ficticio
set.seed(22136) # fixa o gerador de numeros aleatorio para que os resultados sejam iguais para todos
df <- rbind(data.frame(group='A', tempo=rnorm(n=200, mean=10, sd=3), peso=rnorm(n=200, mean=100, sd=20)),
data.frame(group='B', tempo=rnorm(n=200, mean=10, sd=3), peso=rnorm(n=200, mean=105, sd=20)))
#rbind combina por linhas. Neste caso combina a primeira dataframe de grupo A, com a segunda de grupo B
#rnorm cria um conjunto de dados aleatorias em volta de uma media definida, com deviacao padrao definida
head(df) # exibe primeiras linhas do data frame
#Criando um gráfico de dispersão do tempo em função do peso
ggplot(df, aes(tempo, peso)) + geom_point()
#aqui o primeiro parte do funcao defini qual seja o conjunto de dados (df) e os eixos que vao usar
# (definido por meio de aes - "aesthetic")
# depois estao adicionado os pontos ("geometria - pontos" = geom_point)
#Criando um gráfico de dispersão do peso em função do tempo
ggplot(df, aes(peso, tempo)) + geom_point()
#aqui e tudo igual menos a ordem dos eixos
# criando um boxplot
ggplot(df, aes(group, peso)) + geom_boxplot()
#aqui 'grupo' e um fator
# criando um histograma
ggplot(df, aes(peso)) + geom_histogram(binwidth=15)
# criando um histograma para cada grupo e organiza numa mesma janela
ggplot(df, aes(peso)) + geom_histogram(binwidth=10) + facet_wrap(~group, nrow=2, ncol=1)
# criando um histograma para cada grupo num mesmo gráfico, mas com cores diferentes
ggplot(df, aes(peso, color = group)) + geom_histogram(binwidth=10)
#### VOLTANDO PARA O PACOTE BASICO ####
# se quiser mostrar varias graficos numa painel so, tambem pode fazer no pacote basico
# por meio do par(mfrow=c(2,2)) onde que define o numero de colunas e linhas
par(mfrow=c(2,1))
hist(df[df$group=='A','peso'])
hist(df[df$group=='B','peso'])
hist(df[df$group=='A','peso'], main=NA, xlab = 'Group A')
hist(df[df$group=='B','peso'], main=NA, xlab= 'Group B')
#importante lembrar de voltar a janela de grafico com os parametros originais depois que termina
par(mfrow=c(1,1))
#### CORRELACAO E COVARIANCA ####
data(mtcars)
names(mtcars)
dim(mtcars)
head(mtcars)
tail(mtcars)
# Correlacoes e covariancia
cor(mtcars, method="spearman")
cor(mtcars, method="pearson")
cov(mtcars)
#Correlograma
install.packages('corrgram')
library(corrgram) # ativa pacote
# install.packages("corrgram") # Comando para instalar o pacote
corrgram(mtcars, order=TRUE, lower.panel=panel.shade,
upper.panel=panel.pts, text.panel=panel.txt,
main="Correlograma")
#outras opcoes incluem panel.conf (para confianca), panel.pts (dispersao), panel.density,
#panel.bar, panel.ellipse, panel.minmax, col.regions, panel.pie
corrgram(mtcars, order=T, lower.panel = panel.shade,
upper.panel = panel.conf, diag.panel = panel.density)
#### ESTATISTICAS DESCRITIVAS ####
data(mtcars)
# Estatisticas basicas
mean(mtcars$mpg) # media
var(mtcars$mpg) # variancia
sd(mtcars$mpg) # desvio padrao
quantile(mtcars$mpg, c(0.25, 0.50, 0.75)) # quantile
max(mtcars$mpg) # maximo
min(mtcars$mpg) # minimo
median(mtcars$mpg) # mediana
IQR(mtcars$mpg) # distancia intequartil
# Veja agora a função summary
summary(mtcars)
#### UTILIZANDO APPLY SIMILPLIFICADO : SAPPLY ####
sapply(mtcars, mean)
sapply(mtcars, min)
sapply(mtcars, max)
sapply(mtcars, var)
sapply(mtcars, sd)
#### TESTES DE PRESUPOSICOES ESTATISTICAS ####
# Importando base
#df = read.csv("lidar.csv", header=TRUE, sep="")
df <- read.csv("flowering_alien_vs_indigen.csv", sep = ";") # o separador pode mudar de computador para computador
names(df)
#-------------------------------------------------------------
# Normalidade
# metodo grafico
qqnorm(df$Flowering) #comparando Volume com distribuicao da varianca normal
qqline(df$Flowering) #addicione uma linha representando normalidade perfeita dos dados
#teste de Shapiro -- p < 0.1 nao e normal
shapiro.test(df$Flowering)
#teste de Lilliefors
library(nortest)
lillie.test(df$Flowering) #compara os dados com distribuicao normal via K-S test
#-------------------------------------------------------------
# Homogeneidade de variancia
# Teste de Breusch-Pagan-Godfrey
library(lmtest)
bptest(mpg ~ cyl, data=mtcars) #p-val > 0.05 HETEROSKEDASTICIDADE NAO SIGNIFICATIVA
#### TESTES DOS HIPOTESES ####
#### TESTE QUI-QUADRADO ####
# comparando tempo de florescimento de especies nativas e exoticas
df <- read.csv("flowering_alien_vs_indigen.csv", sep = ";") # o separador pode mudar de computador para computador
head(df)
names(df)
# explorando a distribuição graficamente.
library(ggplot2)
ggplot(df, aes(Flowering)) + geom_histogram() + facet_wrap(~Status, nrow = 2, ncol = 1)
# e pelo teste Qui-quadrado.
m <- table(df$Status, df$Flowering)
Xsq <- chisq.test(m) # Resumo do teste
Xsq$observed # Valores observados (o mesmo que matriz m)
Xsq$expected # Valores esperados segundo hipótese nula
Xsq$residuals # Resíduos de Pearson
Xsq$stdres # Resíduos padronizados
matplot(Xsq$observed, Xsq$expected)
abline(a=0,b=1)
#### TESTE KOLMOGOROV-SMIRNOV ####
# explorando a distribuição graficamente.
require(ggplot2)
ggplot(df, aes(Flowering)) + geom_histogram() + facet_wrap(~Status, nrow = 2, ncol = 1)
# usando o KS-test,
ks.test(df$Flowering[df$Status == "indigen"],
df$Flowering[df$Status == "Neophyt"])
#### F TESTE ####
# Vamos utilizar dados gerados aleatorios
x <- rnorm(50, mean = 0, sd = 2)
y <- rnorm(30, mean = 1, sd = 1)
var.test(x, y) # Tem a mesma varianca?
#### TESTE T ####
#data(sleep)
#summary(sleep)
t.test(Flowering ~ Status, data = df)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ravcutils.R
\name{vol_dist}
\alias{vol_dist}
\title{Computes "volume" of distribution}
\usage{
vol_dist(cov.)
}
\arguments{
\item{cov.}{Covariance matrix of the distribution.}
}
\value{
"Volume" of the ellipsoid.
}
\description{
\code{vol_dist} gives an idea of the spread of distribution by computing its
"volume". Distribution is assumed to be Gaussian with covariance matrix
\code{cov.}. Then the volume of the n-dimensional ellipsoid, defined by this
covariance matrix is computed. This function may be useful for clustering to
assess the size of the cluster.
}
| /man/vol_dist.Rd | no_license | mobius-eng/ravcutils | R | false | true | 644 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ravcutils.R
\name{vol_dist}
\alias{vol_dist}
\title{Computes "volume" of distribution}
\usage{
vol_dist(cov.)
}
\arguments{
\item{cov.}{Covariance matrix of the distribution.}
}
\value{
"Volume" of the ellipsoid.
}
\description{
\code{vol_dist} gives an idea of the spread of distribution by computing its
"volume". Distribution is assumed to be Gaussian with covariance matrix
\code{cov.}. Then the volume of the n-dimensional ellipsoid, defined by this
covariance matrix is computed. This function may be useful for clustering to
assess the size of the cluster.
}
|
library(ggplot2)
library(reshape2)
data <- data.frame(time = seq(0, 23), noob = rnorm(24), plus = runif(24), extra = rpois(24, lambda = 1))
Molten <- melt(data, id.vars = "time")
ggplot(Molten, aes(x = time, y = value, colour = variable)) + geom_line()
| /Plot_Multiple_Line_ggPlot2.R | no_license | codegauravg/Learning-R | R | false | false | 253 | r | library(ggplot2)
library(reshape2)
data <- data.frame(time = seq(0, 23), noob = rnorm(24), plus = runif(24), extra = rpois(24, lambda = 1))
Molten <- melt(data, id.vars = "time")
ggplot(Molten, aes(x = time, y = value, colour = variable)) + geom_line()
|
#Hypothesis Testing
#_______________________________________________________________________________________#
#A. Package MASS
install.packages("MASS")
library(MASS)
#_______________________________________________________________________________________#
#B. Chem dataset One Sample t-test
chem
chem <- data.frame(chem)
t.test(chem, mu = 1, alternative = "greater")
#_______________________________________________________________________________________#
#C. Cats dataset Two Sample t-test
cats
male_cats <- subset(cats, Sex == "M")
female_cats <- subset(cats, Sex == "F")
t.test(male_cats$Bwt, female_cats$Bwt, alternative = "two.sided")
cats
#_______________________________________________________________________________________#
#D. Shoes Dataset Paired t-test
shoes
shoes <- data.frame(shoes)
t.test(shoes$A, shoes$B, alternative = "greater", paired = T)
#_______________________________________________________________________________________#
#E. Bacteria data set test of equal or given proportions
a <- table(bacteria$hilo, bacteria$ap)
prop.test(a, length(a), alternative = "two.sided", conf.level = 0.95, correct = TRUE)
#_______________________________________________________________________________________#
#F Cats data set f-test
var.test(male_cats$Bwt, female_cats$Bwt)
#Inferential Statistics Assignment
#_______________________________________________________________________________________#
forestfires <- read.csv(file.choose())
View(forestfires)
write.csv(forestfires, file = "forestfire.csv")
#_______________________________________________________________________________________#
#One sample t-test for temperature greater than 18 degree celsius
t.test(forestfires$temp, mu =18, alternative = "greater")
#_______________________________________________________________________________________#
#Two sample t-test wind speed equal to 4km/h
Month_Aug <- forestfires[which(forestfires$month=='aug'),]
Month_Sep <- forestfires[which(forestfires$month=='sep'),]
t.test(Month_Aug$wind, Month_Sep$wind, mu=4, alternative = "two.sided")
#_______________________________________________________________________________________#
#Paired t-test for temperature equal to 9 degree celsius
Month_Apr <- forestfires[which(forestfires$month=='apr'),]
Month_Dec <- forestfires[which(forestfires$month=='dec'),]
t.test(Month_Apr$temp, Month_Dec$temp, mu=9, alternative = "two.sided", paired = T)
#_______________________________________________________________________________________#
#Test of equal or given proportions
forestfires$rainfall <- ifelse(forestfires$rain == 0, 'n', 'y')
temp_rainfall <- table(Month_Aug$month, Month_Aug$rainfall)
temp_rainfall <- as.matrix(temp_rainfall[c('aug'),])
colnames(temp_rainfall) <- c('aug')
temp_rainfall <- t(temp_rainfall)
prop.test(temp_rainfall, length(temp_rainfall), alternative = "two.sided", conf.level = 0.95, correct = TRUE)
#_______________________________________________________________________________________#
#f-test
var.test(Month_Aug$temp, Month_Sep$temp)
| /Hypothesis_Testing.R | permissive | rkhatu97/Projects_R | R | false | false | 3,115 | r | #Hypothesis Testing
#_______________________________________________________________________________________#
#A. Package MASS
install.packages("MASS")
library(MASS)
#_______________________________________________________________________________________#
#B. Chem dataset One Sample t-test
chem
chem <- data.frame(chem)
t.test(chem, mu = 1, alternative = "greater")
#_______________________________________________________________________________________#
#C. Cats dataset Two Sample t-test
cats
male_cats <- subset(cats, Sex == "M")
female_cats <- subset(cats, Sex == "F")
t.test(male_cats$Bwt, female_cats$Bwt, alternative = "two.sided")
cats
#_______________________________________________________________________________________#
#D. Shoes Dataset Paired t-test
shoes
shoes <- data.frame(shoes)
t.test(shoes$A, shoes$B, alternative = "greater", paired = T)
#_______________________________________________________________________________________#
#E. Bacteria data set test of equal or given proportions
a <- table(bacteria$hilo, bacteria$ap)
prop.test(a, length(a), alternative = "two.sided", conf.level = 0.95, correct = TRUE)
#_______________________________________________________________________________________#
#F Cats data set f-test
var.test(male_cats$Bwt, female_cats$Bwt)
#Inferential Statistics Assignment
#_______________________________________________________________________________________#
forestfires <- read.csv(file.choose())
View(forestfires)
write.csv(forestfires, file = "forestfire.csv")
#_______________________________________________________________________________________#
#One sample t-test for temperature greater than 18 degree celsius
t.test(forestfires$temp, mu =18, alternative = "greater")
#_______________________________________________________________________________________#
#Two sample t-test wind speed equal to 4km/h
Month_Aug <- forestfires[which(forestfires$month=='aug'),]
Month_Sep <- forestfires[which(forestfires$month=='sep'),]
t.test(Month_Aug$wind, Month_Sep$wind, mu=4, alternative = "two.sided")
#_______________________________________________________________________________________#
#Paired t-test for temperature equal to 9 degree celsius
Month_Apr <- forestfires[which(forestfires$month=='apr'),]
Month_Dec <- forestfires[which(forestfires$month=='dec'),]
t.test(Month_Apr$temp, Month_Dec$temp, mu=9, alternative = "two.sided", paired = T)
#_______________________________________________________________________________________#
#Test of equal or given proportions
forestfires$rainfall <- ifelse(forestfires$rain == 0, 'n', 'y')
temp_rainfall <- table(Month_Aug$month, Month_Aug$rainfall)
temp_rainfall <- as.matrix(temp_rainfall[c('aug'),])
colnames(temp_rainfall) <- c('aug')
temp_rainfall <- t(temp_rainfall)
prop.test(temp_rainfall, length(temp_rainfall), alternative = "two.sided", conf.level = 0.95, correct = TRUE)
#_______________________________________________________________________________________#
#f-test
var.test(Month_Aug$temp, Month_Sep$temp)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{append_otargets_pheno_link}
\alias{append_otargets_pheno_link}
\title{Function that adds HTML links to different genetic variant identifiers}
\usage{
append_otargets_pheno_link(
var_df,
pcgr_data = NULL,
oncotree = NULL,
linktype = "dbsource"
)
}
\arguments{
\item{var_df}{data frame with variants}
\item{pcgr_data}{PCGR data structure}
\item{oncotree}{Oncotree data frame}
\item{linktype}{type of link}
}
\description{
Function that adds HTML links to different genetic variant identifiers
}
| /pcgrr/man/append_otargets_pheno_link.Rd | permissive | sigven/pcgr | R | false | true | 596 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{append_otargets_pheno_link}
\alias{append_otargets_pheno_link}
\title{Function that adds HTML links to different genetic variant identifiers}
\usage{
append_otargets_pheno_link(
var_df,
pcgr_data = NULL,
oncotree = NULL,
linktype = "dbsource"
)
}
\arguments{
\item{var_df}{data frame with variants}
\item{pcgr_data}{PCGR data structure}
\item{oncotree}{Oncotree data frame}
\item{linktype}{type of link}
}
\description{
Function that adds HTML links to different genetic variant identifiers
}
|
startTime <- Sys.time()
SSHFS <- F
setDir <- ifelse(SSHFS, "/media/electron", "")
args <- commandArgs(trailingOnly = TRUE)
stopifnot(length(args) == 1)
settingF <- args[1]
stopifnot(file.exists(settingF))
pipScriptDir <- paste0(setDir, "/mnt/ed4/marie/scripts/TAD_DE_pipeline")
script9_name <- "9_runEmpPvalMeanTADLogFC"
script10_name <- "10_runEmpPvalMeanTADCorr"
script11_name <- "11_runEmpPvalCombined"
script_name <- "16_cross_comp"
stopifnot(file.exists(paste0(pipScriptDir, "/", script_name, ".R")))
cat(paste0("> START ", script_name, "\n"))
source("main_settings.R")
#source("run_settings.R")
source(settingF)
source(paste0(pipScriptDir, "/", "TAD_DE_utils.R"))
suppressPackageStartupMessages(library(foreach, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE)) # error bar
suppressPackageStartupMessages(library(doMC, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE)) # error bar
registerDoMC(ifelse(SSHFS,2, nCpu)) # loaded from main_settings.R
# create the directories
curr_outFold <- paste0(pipOutFold, "/", script_name)
system(paste0("mkdir -p ", curr_outFold))
pipLogFile <- paste0(pipOutFold, "/", script_name, "_logFile.txt")
system(paste0("rm -f ", pipLogFile))
#*********************************************************
txt <- paste0(toupper(script_name), "> Cross-comparison for following datasets: ", paste0(comp_folders, collapse = ", "), "\n")
printAndLog(txt, pipLogFile)
#*********************************************************
# set the name of the Rdata associated with script name
script_Rdata <- setNames(c("emp_pval_meanLogFC.Rdata","emp_pval_meanCorr.Rdata", "emp_pval_combined.Rdata" ), c(script9_name, script10_name, script11_name))
#*********************************************************
gene2tadDT <- read.delim(gene2tadDT_file, header=F, col.names = c("entrezID", "chromo", "start", "end", "region"), stringsAsFactors = F)
gene2tadDT$entrezID <- as.character(gene2tadDT$entrezID)
symbDT <- read.delim(paste0(symbolDT_file), header=TRUE, stringsAsFactors = F)
symbDT$entrezID <- as.character(symbDT$entrezID)
plotType <- "svg"
myHeight <- ifelse(plotType == "png", 480 , 7)
myWidth <- ifelse(plotType == "png", 600, 10)
########################################### INTERSECT FOR THE MEAN TAD CORR
for(i_script in 1:length(script_Rdata)) {
all_DS_rank <- foreach(ds = comp_folders) %dopar% {
pval_file <- paste0(ds, "/", names(script_Rdata)[i_script], "/", script_Rdata[i_script])
cat(paste0("... pval_file: ", pval_file, "\n"))
pval_data <- eval(parse(text = load(pval_file)))
rank(pval_data, ties="min")
}
names(all_DS_rank) <- comp_folders
interSize <- foreach(curr_rank = 1:upToRank, .combine='c') %dopar% {
# for all callers, take all the regions that are up to current rank
curr_rank_DS <- lapply(all_DS_rank, function(x) names(x[x <= curr_rank]))
length(Reduce(intersect, curr_rank_DS))
}
# draw intersect size ~ rank
outFile <- paste0(curr_outFold, "/", sub(".Rdata", "", as.character(script_Rdata[i_script])), "_datasets_intersect_vs_rank_upto", upToRank, ".", plotType)
do.call(plotType, list(outFile, height=myHeight, width=myWidth))
plot(interSize ~ c(1:upToRank),
bty="l", xlab = "up to rank", ylab ="intersect size",
pch=16, cex=0.7, type='o',
main=paste0(gsub("_", " " , sub(".Rdata", "", as.character(script_Rdata[i_script]))), " - datasets intersect"))
legend("topleft", legend = comp_folders, cex=0.7, bty="n")
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
if(all(interSize < 1)) next
# take max 10 at the intersect and draw
rankToPlot <- max(which(interSize <= 10))
# retrieve the TADs at the intersect at this rank
plot_rank_DS <- lapply(all_DS_rank, function(x) names(x[x <= rankToPlot]))
TADs_to_plot <- Reduce(intersect, plot_rank_DS)
stopifnot(length(TADs_to_plot) <= 10)
rank_tad_DT <- foreach(i = TADs_to_plot, .combine = 'rbind') %dopar% {
# retrieve the genes
entrez_genes <- gene2tadDT$entrezID[gene2tadDT$region == i]
symbol_genes <- unlist(sapply(entrez_genes, function(x) symbDT$symbol[symbDT$entrezID == x][1]))
data.frame(TAD = i, entrezID = entrez_genes, symbol = symbol_genes)
}
outFile <- paste0(curr_outFold, "/", sub(".Rdata", "", as.character(script_Rdata[i_script])), "_datasets_intersect_upto_10_intersect_TAD.txt")
write.table(rank_tad_DT, file = outFile, sep="\t", quote=F, col.names = T, row.names = F)
cat(paste0("... written: ", outFile, "\n"))
} # end iterating over the different p-values
txt <- paste0(startTime, "\n", Sys.time(), "\n")
printAndLog(txt, pipLogFile)
cat(paste0("*** DONE: ", script_name, "\n"))
| /NOT_USED_SCRIPTS/16_cross_comp.R | no_license | marzuf/TAD_DE_pipeline_v2 | R | false | false | 4,674 | r | startTime <- Sys.time()
SSHFS <- F
setDir <- ifelse(SSHFS, "/media/electron", "")
args <- commandArgs(trailingOnly = TRUE)
stopifnot(length(args) == 1)
settingF <- args[1]
stopifnot(file.exists(settingF))
pipScriptDir <- paste0(setDir, "/mnt/ed4/marie/scripts/TAD_DE_pipeline")
script9_name <- "9_runEmpPvalMeanTADLogFC"
script10_name <- "10_runEmpPvalMeanTADCorr"
script11_name <- "11_runEmpPvalCombined"
script_name <- "16_cross_comp"
stopifnot(file.exists(paste0(pipScriptDir, "/", script_name, ".R")))
cat(paste0("> START ", script_name, "\n"))
source("main_settings.R")
#source("run_settings.R")
source(settingF)
source(paste0(pipScriptDir, "/", "TAD_DE_utils.R"))
suppressPackageStartupMessages(library(foreach, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE)) # error bar
suppressPackageStartupMessages(library(doMC, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE)) # error bar
registerDoMC(ifelse(SSHFS,2, nCpu)) # loaded from main_settings.R
# create the directories
curr_outFold <- paste0(pipOutFold, "/", script_name)
system(paste0("mkdir -p ", curr_outFold))
pipLogFile <- paste0(pipOutFold, "/", script_name, "_logFile.txt")
system(paste0("rm -f ", pipLogFile))
#*********************************************************
txt <- paste0(toupper(script_name), "> Cross-comparison for following datasets: ", paste0(comp_folders, collapse = ", "), "\n")
printAndLog(txt, pipLogFile)
#*********************************************************
# set the name of the Rdata associated with script name
script_Rdata <- setNames(c("emp_pval_meanLogFC.Rdata","emp_pval_meanCorr.Rdata", "emp_pval_combined.Rdata" ), c(script9_name, script10_name, script11_name))
#*********************************************************
gene2tadDT <- read.delim(gene2tadDT_file, header=F, col.names = c("entrezID", "chromo", "start", "end", "region"), stringsAsFactors = F)
gene2tadDT$entrezID <- as.character(gene2tadDT$entrezID)
symbDT <- read.delim(paste0(symbolDT_file), header=TRUE, stringsAsFactors = F)
symbDT$entrezID <- as.character(symbDT$entrezID)
plotType <- "svg"
myHeight <- ifelse(plotType == "png", 480 , 7)
myWidth <- ifelse(plotType == "png", 600, 10)
########################################### INTERSECT FOR THE MEAN TAD CORR
for(i_script in 1:length(script_Rdata)) {
all_DS_rank <- foreach(ds = comp_folders) %dopar% {
pval_file <- paste0(ds, "/", names(script_Rdata)[i_script], "/", script_Rdata[i_script])
cat(paste0("... pval_file: ", pval_file, "\n"))
pval_data <- eval(parse(text = load(pval_file)))
rank(pval_data, ties="min")
}
names(all_DS_rank) <- comp_folders
interSize <- foreach(curr_rank = 1:upToRank, .combine='c') %dopar% {
# for all callers, take all the regions that are up to current rank
curr_rank_DS <- lapply(all_DS_rank, function(x) names(x[x <= curr_rank]))
length(Reduce(intersect, curr_rank_DS))
}
# draw intersect size ~ rank
outFile <- paste0(curr_outFold, "/", sub(".Rdata", "", as.character(script_Rdata[i_script])), "_datasets_intersect_vs_rank_upto", upToRank, ".", plotType)
do.call(plotType, list(outFile, height=myHeight, width=myWidth))
plot(interSize ~ c(1:upToRank),
bty="l", xlab = "up to rank", ylab ="intersect size",
pch=16, cex=0.7, type='o',
main=paste0(gsub("_", " " , sub(".Rdata", "", as.character(script_Rdata[i_script]))), " - datasets intersect"))
legend("topleft", legend = comp_folders, cex=0.7, bty="n")
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
if(all(interSize < 1)) next
# take max 10 at the intersect and draw
rankToPlot <- max(which(interSize <= 10))
# retrieve the TADs at the intersect at this rank
plot_rank_DS <- lapply(all_DS_rank, function(x) names(x[x <= rankToPlot]))
TADs_to_plot <- Reduce(intersect, plot_rank_DS)
stopifnot(length(TADs_to_plot) <= 10)
rank_tad_DT <- foreach(i = TADs_to_plot, .combine = 'rbind') %dopar% {
# retrieve the genes
entrez_genes <- gene2tadDT$entrezID[gene2tadDT$region == i]
symbol_genes <- unlist(sapply(entrez_genes, function(x) symbDT$symbol[symbDT$entrezID == x][1]))
data.frame(TAD = i, entrezID = entrez_genes, symbol = symbol_genes)
}
outFile <- paste0(curr_outFold, "/", sub(".Rdata", "", as.character(script_Rdata[i_script])), "_datasets_intersect_upto_10_intersect_TAD.txt")
write.table(rank_tad_DT, file = outFile, sep="\t", quote=F, col.names = T, row.names = F)
cat(paste0("... written: ", outFile, "\n"))
} # end iterating over the different p-values
txt <- paste0(startTime, "\n", Sys.time(), "\n")
printAndLog(txt, pipLogFile)
cat(paste0("*** DONE: ", script_name, "\n"))
|
library(ape)
### Name: identify.phylo
### Title: Graphical Identification of Nodes and Tips
### Aliases: identify.phylo
### Keywords: aplot
### ** Examples
## Not run:
##D tr <- rtree(20)
##D f <- function(col) {
##D o <- identify(tr)
##D nodelabels(node=o$nodes, pch = 19, col = col)
##D }
##D plot(tr)
##D f("red") # click close to a node
##D f("green")
## End(Not run)
| /data/genthat_extracted_code/ape/examples/identify.phylo.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 388 | r | library(ape)
### Name: identify.phylo
### Title: Graphical Identification of Nodes and Tips
### Aliases: identify.phylo
### Keywords: aplot
### ** Examples
## Not run:
##D tr <- rtree(20)
##D f <- function(col) {
##D o <- identify(tr)
##D nodelabels(node=o$nodes, pch = 19, col = col)
##D }
##D plot(tr)
##D f("red") # click close to a node
##D f("green")
## End(Not run)
|
# Setting working directory and Loading libraries and Data into the Project Directory
library("ggplot2")
setwd("/home/jagoul/Coursera/Data-Science-Specialization/Exploratory Data Analysis/Week 3&4/Project/")
# reading PM2.5-PRI Emissions Dataset
NEI <- readRDS("summarySCC_PM25.rds")
# reading Summary file of pollution sources dataset
SCC <- readRDS("Source_Classification_Code.rds")
# To plot All emissions sources in Baltimore City from 1999 until 2008, we need to subset NEI dataset of Baltimore city then
# sum-up all Emissions entries and seggregate it by source type under each given year.
datasetBaltimore <- subset(NEI,fips == "24510")
datasetBaltimoreAggregatedbyType <- with(datasetBaltimore,aggregate(x= Emissions ,by = list(year,type), FUN= sum, na.rm=TRUE))
names(datasetBaltimoreAggregatedbyType) <- c("Year", "Type","Emissions")
# Emissions are aggregated by type for Baltimore city and distributed over the year between 1999-2008
# Plotting the schema of all sources
par(mar=c(5,4,2,1))
g <- ggplot(datasetBaltimoreAggregatedbyType,aes(factor(Year), Emissions, fill=Type))
g+ geom_bar(stat ="identity")+
facet_grid(facets = .~Type,space = "fixed")+
labs(x="Year", y= expression("PM"["2.5"]* " Emissions (in Tons)"), title= expression("PM"["2.5"]* " Emissions By Source Type in Baltimore City"))
# Exportng plot into png file
dev.copy(png, filename = "Plot3.png", width=680, height=480)
dev.off()
| /Plot 3.R | no_license | Jagoul/Exploratory-Data-Analysis-Project-Two | R | false | false | 1,427 | r | # Setting working directory and Loading libraries and Data into the Project Directory
library("ggplot2")
setwd("/home/jagoul/Coursera/Data-Science-Specialization/Exploratory Data Analysis/Week 3&4/Project/")
# reading PM2.5-PRI Emissions Dataset
NEI <- readRDS("summarySCC_PM25.rds")
# reading Summary file of pollution sources dataset
SCC <- readRDS("Source_Classification_Code.rds")
# To plot All emissions sources in Baltimore City from 1999 until 2008, we need to subset NEI dataset of Baltimore city then
# sum-up all Emissions entries and seggregate it by source type under each given year.
datasetBaltimore <- subset(NEI,fips == "24510")
datasetBaltimoreAggregatedbyType <- with(datasetBaltimore,aggregate(x= Emissions ,by = list(year,type), FUN= sum, na.rm=TRUE))
names(datasetBaltimoreAggregatedbyType) <- c("Year", "Type","Emissions")
# Emissions are aggregated by type for Baltimore city and distributed over the year between 1999-2008
# Plotting the schema of all sources
par(mar=c(5,4,2,1))
g <- ggplot(datasetBaltimoreAggregatedbyType,aes(factor(Year), Emissions, fill=Type))
g+ geom_bar(stat ="identity")+
facet_grid(facets = .~Type,space = "fixed")+
labs(x="Year", y= expression("PM"["2.5"]* " Emissions (in Tons)"), title= expression("PM"["2.5"]* " Emissions By Source Type in Baltimore City"))
# Exportng plot into png file
dev.copy(png, filename = "Plot3.png", width=680, height=480)
dev.off()
|
testlist <- list(Rs = numeric(0), atmp = c(-4.50566521220552e-36, 6.80668723574097e+264, 5.98550530667504e-58, -7.28473308948055e+34, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142, 2.88358101657793e-242, 2.898978379072e+128, 1.40737955412509e+160, -2.13738641116696e+192, 3.45295731548885e-272, -3.46023726898595e-88, -2.02162980664513e-181, -2.29508290835037e-270, 3.10211826298138e+173, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(-1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 2.77477784961188e-131, 2.36571851792443e+100, -2.39778800228083e+40, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224469374587e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.19961311631504e-152, -5.44923785276135e+73, -1.03042857109918e-123, -1334516499709826304, -3.04001955957975e-65, 1.42002144715861e+59, -2.07434119258585e-169, 3.0657477657616e-67, 4.06737580382438e+93, 7.4712532464615e-284, 1.76868497277177e+90, -1.0272283117137e+145, 3.15594844519519e+59, -2.5314964135126e+151, 6.3559915616272e-20, 8.32214200240855e-171, -1.66455733512201e-155, 3.51899746099255e-06, 8.46092595981126e-191, 0.000487039587081904, 5.8159156678811e+88, -4.17716818760037e+116, 1.36527621059248e+188, -5.85967094107836e+127, -7.50883190250449e-230), temp = c(1.36656528938164e-311, -1.65791256519293e+82, -1.06085080136347e+37, -4.6797892590747e+143, -1.62867813593242e+90, -1.13148154057486e-43, -1.1713955333734e-31, 1.27375143867828e-212, -4.42041589312145e+92, -5.07252331912468e+207, 8.60148313830859e-150, 2.90190951781172e+48, -2.72819470543294e+134, -2.23411857963441e+97, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615844834-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 2,046 | r | testlist <- list(Rs = numeric(0), atmp = c(-4.50566521220552e-36, 6.80668723574097e+264, 5.98550530667504e-58, -7.28473308948055e+34, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142, 2.88358101657793e-242, 2.898978379072e+128, 1.40737955412509e+160, -2.13738641116696e+192, 3.45295731548885e-272, -3.46023726898595e-88, -2.02162980664513e-181, -2.29508290835037e-270, 3.10211826298138e+173, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(-1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 2.77477784961188e-131, 2.36571851792443e+100, -2.39778800228083e+40, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224469374587e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.19961311631504e-152, -5.44923785276135e+73, -1.03042857109918e-123, -1334516499709826304, -3.04001955957975e-65, 1.42002144715861e+59, -2.07434119258585e-169, 3.0657477657616e-67, 4.06737580382438e+93, 7.4712532464615e-284, 1.76868497277177e+90, -1.0272283117137e+145, 3.15594844519519e+59, -2.5314964135126e+151, 6.3559915616272e-20, 8.32214200240855e-171, -1.66455733512201e-155, 3.51899746099255e-06, 8.46092595981126e-191, 0.000487039587081904, 5.8159156678811e+88, -4.17716818760037e+116, 1.36527621059248e+188, -5.85967094107836e+127, -7.50883190250449e-230), temp = c(1.36656528938164e-311, -1.65791256519293e+82, -1.06085080136347e+37, -4.6797892590747e+143, -1.62867813593242e+90, -1.13148154057486e-43, -1.1713955333734e-31, 1.27375143867828e-212, -4.42041589312145e+92, -5.07252331912468e+207, 8.60148313830859e-150, 2.90190951781172e+48, -2.72819470543294e+134, -2.23411857963441e+97, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.