content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
##
# Author: Yuchen Liang
# Date: Apr 12, 2019
##
rm(list = ls())
require(wru)
require(dplyr)
require(stringr)
# function author: Ignacio Sarmiento-Barbieri
gen_demog_infousa<-function(dta) {
# Generate Demog. variables -----------------------------------------------
Black_codes<-c("B5", "Q6", "Q7", "Q8", "Q9","AO","A8","BJ","BW","BF","BI","CM","C3","CF","TD","KM","CG",
"DJ","GQ","ET","GA","GM","GH","GW","GN","H2","CI","KE","LS", "LR","MG","MW","ML","MR","MZ","NA",
"NE","NG","RW","SN","SC","SL","SO","ZA","SD","S9","SZ","TZ","TG","UG","X5","CD","ZM","ZW","Z8")
Native_American_codes<-c("A4", "N3")
Pacific_Islander_codes<-c("FJ", "PH", "H3", "NR", "PG", "P5", "TO", "VU", "WS")
Middle_Eastern_codes<-c("DZ", "A7", "BH", "EG", "IQ", "JO", "KW", "LB", "LY", "MA", "OM", "P4", "QA", "SA",
"SY", "TN", "AE", "YE")
Jewish_codes<-c("J4")
Hispanic_codes<-c("B3","H5","PT")
Far_Eastern_codes<-c("CN", "ID", "JP", "KH", "K5", "LA", "MY", "MN", "MM", "TH", "T5", "VN")
Central_Southwest_Asian_codes<-c("AM", "AZ", "C5", "GE", "KZ", "KG", "TJ", "TM", "UZ")
South_Asian_codes<-c("AF", "BD", "BT", "IN", "NP", "O8", "PK", "LK", "T4")
Western_European_codes<-c("AT", "BE", "NL", "E5", "FR", "DE", "IE", "K8", "LI", "LU", "IM", "S3", "CH", "TR", "W4")
Mediterranean_codes<-c( "CY", "GR","IT", "MT")
Eastern_European_codes<-c("AL", "BA", "BG", "BY", "HR", "CZ", "EE", "HU", "LV", "LT", "MK", "MD", "PL", "RO", "RU", "CS", "SK", "SI", "UA")
Scandinavian_codes<-c("NO", "IS", "FI", "DK", "SE")
Other_codes<-c("AU", "GY", "MV", "NZ", "SR", "ZZ")
Asian_codes<-c(Far_Eastern_codes, Central_Southwest_Asian_codes, South_Asian_codes)
White_codes<-c(Western_European_codes,Mediterranean_codes,Eastern_European_codes,Scandinavian_codes)
dta <- dta %>% mutate(ethnicity=ifelse(Ethnicity_Code_1%in%Black_codes,"Black",
ifelse(Ethnicity_Code_1%in%Hispanic_codes,"Hispanic",
ifelse(Ethnicity_Code_1%in%Western_European_codes,"Western_European",
ifelse(Ethnicity_Code_1%in%Mediterranean_codes,"Mediterranean",
ifelse(Ethnicity_Code_1%in%Eastern_European_codes,"Eastern_European",
ifelse(Ethnicity_Code_1%in%Scandinavian_codes,"Scandinavian",
ifelse(Ethnicity_Code_1%in%c(00),"Unknown",
"Other"))))))),
race=ifelse(Ethnicity_Code_1%in%Black_codes,"Black",
ifelse(Ethnicity_Code_1%in%Hispanic_codes,"Hispanic",
ifelse(Ethnicity_Code_1%in%White_codes,"White",
ifelse(Ethnicity_Code_1%in%Asian_codes,"Asian","Other")))))
return(dta)
}
# Parameters
dat <- readRDS("/home/bdeep/share/projects/InfoUSA/shp_merging/p2/p2result_2017.rds")
# STATES <- c("AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DE", "DC", "FL",
# "GA", "HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME",
# "MD", "MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH",
# "NJ", "NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI",
# "SC", "SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI",
# "WY")
colnames(dat) <- str_to_upper(colnames(dat))
to_pred <- dat %>% select(id = FAMILYID,
surname = LAST_NAME_1,
state = STATE,
county = CENSUS2010COUNTYCODE,
tract = CENSUS2010TRACT,
block = CENSUS2010BLOCK)
# year = YEAR)
STATES <- unique(to_pred %>% pull(state))
dat <- dat %>% rename(Ethnicity_Code_1=ETHNICITY_CODE_1)
## Change this if not in the same state or year
# to_pred$state <- ST
to_pred$year <- 2017
# Prepare data.frame to predict
## county is three chars long
to_pred$county <- str_pad(to_pred$county, 3, side="left", pad="0")
## tract is six characters
to_pred$tract <- str_pad(to_pred$tract, 6, side="left", pad="0")
## block is four characters
to_pred$block <- str_pad(to_pred$block, 4, side="left", pad="0")
to_pred$surname <- str_to_title(to_pred$surname)
cong_all <- NULL
# One-shot, given that the census data is available locally
{
census_data <- list()
i <- 1
for (st in STATES){
census_data[[i]] <- readRDS(paste0("./census_data_tract/", st, "_census.rds"))[[1]]
names(census_data)[i] <- st
i <- i + 1
}
result_c <- predict_race(to_pred,
census.geo = "county",
census.key = "xxx",
census.data = census_data)
result_t <- predict_race(to_pred,
census.geo = "tract",
census.key = "xxx",
census.data = census_data)
# Combine levels
naid <- result_t %>% filter(is.na(pred.whi)) %>% select(id, year)
result_all <- result_t %>% filter(!is.na(pred.whi)) %>% bind_rows(naid %>% left_join(result_c, by=c("id", "year")))
# Test accuracy
y <- gen_demog_infousa(dat) %>% select(id=FAMILYID,
# year=YEAR,
first_name=FIRST_NAME_1,
last_name=LAST_NAME_1,
eth_code_infousa=Ethnicity_Code_1,
race_infousa=race)
y$year <- 2017
y$race_infousa <- str_sub(y$race_infousa, 1, 1)
get_names <- result_all %>% select(starts_with("pred."))
new_names <- get_names %>% rename(wru_whi=pred.whi,
wru_bla=pred.bla,
wru_his=pred.his,
wru_asi=pred.asi,
wru_oth=pred.oth)
result_all <- cbind(result_all,
data.frame(race = colnames(get_names)[max.col(get_names,ties.method="first")],
prob = apply(get_names, 1, max)),
new_names)
result_all$race_abbr <- str_to_title(str_sub(result_all$race, 6, 6))
# Merge
cong_all <- result_all %>% select(id, year, race_wru=race_abbr, prob_wru=prob, starts_with("wru_")) %>% full_join(y, by=c("id", "year")) %>% mutate(same=(race_wru==race_infousa))
}
'''
# divided by states, if census data not available
for (st in STATES){
print(paste0("Processing state ", st, "..."))
# Download census data, or get from local if available
census_data <- readRDS(paste0("./census_data_tract/", st, "_census.rds"))
# census_data <- get_census_data(key = "01d35539fab30488330596ef4cb9ecf28968827d",
# states = st,
# census.geo = "tract",
# retry = 5)
# Optional: Save to local for reference
# saveRDS(census_data, file=paste0("./census_data_tract/", st, "_census.rds"))
# Get at county level
result_c <- predict_race(to_pred %>% filter(state==st),
census.geo = "county",
census.key = "xxx",
census.data = census_data)
result_t <- predict_race(to_pred %>% filter(state==st),
census.geo = "tract",
census.key = "xxx",
census.data = census_data)
# Combine levels
naid <- result_t %>% filter(is.na(pred.whi)) %>% select(id, year)
result_all <- result_t %>% filter(!is.na(pred.whi)) %>% bind_rows(naid %>% left_join(result_c, by=c("id", "year")))
# Test accuracy
y <- gen_demog_infousa(dat %>% filter(STATE==st)) %>% select(id=FAMILYID,
# year=YEAR,
first_name=FIRST_NAME_1,
last_name=LAST_NAME_1,
eth_code_infousa=Ethnicity_Code_1,
race_infousa=race)
y$year <- 2017
y$race_infousa <- str_sub(y$race_infousa, 1, 1)
get_names <- result_all %>% select(starts_with("pred."))
new_names <- get_names %>% rename(wru_whi=pred.whi,
wru_bla=pred.bla,
wru_his=pred.his,
wru_asi=pred.asi,
wru_oth=pred.oth)
result_all <- cbind(result_all,
data.frame(race = colnames(get_names)[max.col(get_names,ties.method="first")],
prob = apply(get_names, 1, max)),
new_names)
result_all$race_abbr <- str_to_title(str_sub(result_all$race, 6, 6))
# Merge
cong <- result_all %>% select(id, year, race_wru=race_abbr, prob_wru=prob, starts_with("wru_")) %>% full_join(y, by=c("id", "year"))
cong_all <- rbind(cong_all, cong %>% mutate(same=(race_wru==race_infousa)))
}
'''
# saveRDS(cong_all, file="./info2017pred.rds")
rm(census_data, get_names, naid, cong, result_c, result_t, new_names, result_all, y)
# Evaluation
cong <- cong_all
rm(cong_all)
## overall accuracy
print("Overall Congruence:")
mean(cong$same)
## accuracy by group
print("Congrouence and Avg Prob by group:")
cong %>% group_by(race_infousa) %>% summarise(cong=mean(same))
## sensitivity by group
cong %>% group_by(race_wru) %>% summarise(tr_A=sum(race_infousa=='A')/n(),
tr_B=sum(race_infousa=='B')/n(),
tr_H=sum(race_infousa=='H')/n(),
tr_O=sum(race_infousa=='O')/n(),
tr_W=sum(race_infousa=='W')/n())
## confusion matrix
print("Confusion Matrix:")
ref <- matrix(cong$race_infousa, ncol=1)
pred <- matrix(cong$race_wru, ncol=1)
table(ref, pred)
rm(ref, pred)
## plot
x <- cong
ggplot(data=x %>% filter(race_infousa=="W")) + theme_bw() + stat_bin(aes(x=prob_wru, y=..count../sum(..count..), fill=race_wru), geom="bar", binwidth=0.05) + scale_fill_brewer(palette = "Set2") + labs(y="fraction",title="InfoUSA White")
ggplot(data=x %>% filter(race_infousa=="W")) + theme_bw() + stat_bin(aes(x=wru_whi, y=..count../sum(..count..), fill=race_wru), geom="bar", binwidth=0.05) + scale_fill_brewer(palette = "Set2") + labs(y="fraction",title="WRU White")
require(gridExtra)
s <- ggplot(data=x, aes(x=name))
grid.arrange(s+geom_bar(mapping = aes(fill=race_wru), position="fill"), s+geom_bar(mapping = aes(fill=race_infousa), position="fill"), ncol=1)
grid.arrange(ggplot(data=x %>% filter(race_research=='W'), aes(x=name))+geom_bar(mapping = aes(fill=race_wru), position="fill")+labs(x="white names",y="percentage")+scale_fill_manual(values=c("#4E79A7","#F28E2B","#E15759"), name="Race (WRU)"),
ggplot(data=x %>% filter(race_research=='B'), aes(x=name))+geom_bar(mapping = aes(fill=race_wru), position="fill")+labs(x="black names",y="percentage")+scale_fill_manual(values=c("#4E79A7","#E15759"), name="Race (WRU)"),
ggplot(data=x %>% filter(race_research=='H'), aes(x=name))+geom_bar(mapping = aes(fill=race_wru), position="fill")+labs(x="hispanic names",y="percentage")+scale_fill_manual(values=c("#4E79A7","#F28E2B","#E15759"), name="Race (WRU)"),
ncol = 1)
grid.arrange(ggplot(data=x %>% filter(race_research=='W'), aes(x=name))+geom_bar(mapping = aes(fill=race_infousa), position="fill")+labs(x="white names",y="percentage")+scale_fill_manual(values = c("#F778A1", "#79A1F7"), name = "infousa races"),
ggplot(data=x %>% filter(race_research=='B'), aes(x=name))+geom_bar(mapping = aes(fill=race_infousa), position="fill")+labs(x="black names",y="percentage")+scale_fill_manual(values = c("#F778A1", "#79A1F7"), name = "infousa races"),
ggplot(data=x %>% filter(race_research=='H'), aes(x=name))+geom_bar(mapping = aes(fill=race_infousa), position="fill")+labs(x="hispanic names",y="percentage")+scale_fill_manual(values = c("#A1F779"), name = "infousa races"),
ncol = 1)
avg <- x %>% group_by(name) %>% summarise(avg_whi=sum(wru_whi)/n(), avg_bla=sum(wru_bla)/n(), avg_his=sum(wru_his)/n()) %>% left_join(x %>% select(name, race_research) %>% distinct(), by="name")
| /Race Prediction/predict_race.R | no_license | uiuc-bdeep/InfoUSA_Database | R | false | false | 12,619 | r | ##
# Author: Yuchen Liang
# Date: Apr 12, 2019
##
rm(list = ls())
require(wru)
require(dplyr)
require(stringr)
# function author: Ignacio Sarmiento-Barbieri
gen_demog_infousa<-function(dta) {
# Generate Demog. variables -----------------------------------------------
Black_codes<-c("B5", "Q6", "Q7", "Q8", "Q9","AO","A8","BJ","BW","BF","BI","CM","C3","CF","TD","KM","CG",
"DJ","GQ","ET","GA","GM","GH","GW","GN","H2","CI","KE","LS", "LR","MG","MW","ML","MR","MZ","NA",
"NE","NG","RW","SN","SC","SL","SO","ZA","SD","S9","SZ","TZ","TG","UG","X5","CD","ZM","ZW","Z8")
Native_American_codes<-c("A4", "N3")
Pacific_Islander_codes<-c("FJ", "PH", "H3", "NR", "PG", "P5", "TO", "VU", "WS")
Middle_Eastern_codes<-c("DZ", "A7", "BH", "EG", "IQ", "JO", "KW", "LB", "LY", "MA", "OM", "P4", "QA", "SA",
"SY", "TN", "AE", "YE")
Jewish_codes<-c("J4")
Hispanic_codes<-c("B3","H5","PT")
Far_Eastern_codes<-c("CN", "ID", "JP", "KH", "K5", "LA", "MY", "MN", "MM", "TH", "T5", "VN")
Central_Southwest_Asian_codes<-c("AM", "AZ", "C5", "GE", "KZ", "KG", "TJ", "TM", "UZ")
South_Asian_codes<-c("AF", "BD", "BT", "IN", "NP", "O8", "PK", "LK", "T4")
Western_European_codes<-c("AT", "BE", "NL", "E5", "FR", "DE", "IE", "K8", "LI", "LU", "IM", "S3", "CH", "TR", "W4")
Mediterranean_codes<-c( "CY", "GR","IT", "MT")
Eastern_European_codes<-c("AL", "BA", "BG", "BY", "HR", "CZ", "EE", "HU", "LV", "LT", "MK", "MD", "PL", "RO", "RU", "CS", "SK", "SI", "UA")
Scandinavian_codes<-c("NO", "IS", "FI", "DK", "SE")
Other_codes<-c("AU", "GY", "MV", "NZ", "SR", "ZZ")
Asian_codes<-c(Far_Eastern_codes, Central_Southwest_Asian_codes, South_Asian_codes)
White_codes<-c(Western_European_codes,Mediterranean_codes,Eastern_European_codes,Scandinavian_codes)
dta <- dta %>% mutate(ethnicity=ifelse(Ethnicity_Code_1%in%Black_codes,"Black",
ifelse(Ethnicity_Code_1%in%Hispanic_codes,"Hispanic",
ifelse(Ethnicity_Code_1%in%Western_European_codes,"Western_European",
ifelse(Ethnicity_Code_1%in%Mediterranean_codes,"Mediterranean",
ifelse(Ethnicity_Code_1%in%Eastern_European_codes,"Eastern_European",
ifelse(Ethnicity_Code_1%in%Scandinavian_codes,"Scandinavian",
ifelse(Ethnicity_Code_1%in%c(00),"Unknown",
"Other"))))))),
race=ifelse(Ethnicity_Code_1%in%Black_codes,"Black",
ifelse(Ethnicity_Code_1%in%Hispanic_codes,"Hispanic",
ifelse(Ethnicity_Code_1%in%White_codes,"White",
ifelse(Ethnicity_Code_1%in%Asian_codes,"Asian","Other")))))
return(dta)
}
# Parameters
dat <- readRDS("/home/bdeep/share/projects/InfoUSA/shp_merging/p2/p2result_2017.rds")
# STATES <- c("AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DE", "DC", "FL",
# "GA", "HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME",
# "MD", "MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH",
# "NJ", "NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI",
# "SC", "SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI",
# "WY")
colnames(dat) <- str_to_upper(colnames(dat))
to_pred <- dat %>% select(id = FAMILYID,
surname = LAST_NAME_1,
state = STATE,
county = CENSUS2010COUNTYCODE,
tract = CENSUS2010TRACT,
block = CENSUS2010BLOCK)
# year = YEAR)
STATES <- unique(to_pred %>% pull(state))
dat <- dat %>% rename(Ethnicity_Code_1=ETHNICITY_CODE_1)
## Change this if not in the same state or year
# to_pred$state <- ST
to_pred$year <- 2017
# Prepare data.frame to predict
## county is three chars long
to_pred$county <- str_pad(to_pred$county, 3, side="left", pad="0")
## tract is six characters
to_pred$tract <- str_pad(to_pred$tract, 6, side="left", pad="0")
## block is four characters
to_pred$block <- str_pad(to_pred$block, 4, side="left", pad="0")
to_pred$surname <- str_to_title(to_pred$surname)
cong_all <- NULL
# One-shot, given that the census data is available locally
{
census_data <- list()
i <- 1
for (st in STATES){
census_data[[i]] <- readRDS(paste0("./census_data_tract/", st, "_census.rds"))[[1]]
names(census_data)[i] <- st
i <- i + 1
}
result_c <- predict_race(to_pred,
census.geo = "county",
census.key = "xxx",
census.data = census_data)
result_t <- predict_race(to_pred,
census.geo = "tract",
census.key = "xxx",
census.data = census_data)
# Combine levels
naid <- result_t %>% filter(is.na(pred.whi)) %>% select(id, year)
result_all <- result_t %>% filter(!is.na(pred.whi)) %>% bind_rows(naid %>% left_join(result_c, by=c("id", "year")))
# Test accuracy
y <- gen_demog_infousa(dat) %>% select(id=FAMILYID,
# year=YEAR,
first_name=FIRST_NAME_1,
last_name=LAST_NAME_1,
eth_code_infousa=Ethnicity_Code_1,
race_infousa=race)
y$year <- 2017
y$race_infousa <- str_sub(y$race_infousa, 1, 1)
get_names <- result_all %>% select(starts_with("pred."))
new_names <- get_names %>% rename(wru_whi=pred.whi,
wru_bla=pred.bla,
wru_his=pred.his,
wru_asi=pred.asi,
wru_oth=pred.oth)
result_all <- cbind(result_all,
data.frame(race = colnames(get_names)[max.col(get_names,ties.method="first")],
prob = apply(get_names, 1, max)),
new_names)
result_all$race_abbr <- str_to_title(str_sub(result_all$race, 6, 6))
# Merge
cong_all <- result_all %>% select(id, year, race_wru=race_abbr, prob_wru=prob, starts_with("wru_")) %>% full_join(y, by=c("id", "year")) %>% mutate(same=(race_wru==race_infousa))
}
'''
# divided by states, if census data not available
for (st in STATES){
print(paste0("Processing state ", st, "..."))
# Download census data, or get from local if available
census_data <- readRDS(paste0("./census_data_tract/", st, "_census.rds"))
# census_data <- get_census_data(key = "01d35539fab30488330596ef4cb9ecf28968827d",
# states = st,
# census.geo = "tract",
# retry = 5)
# Optional: Save to local for reference
# saveRDS(census_data, file=paste0("./census_data_tract/", st, "_census.rds"))
# Get at county level
result_c <- predict_race(to_pred %>% filter(state==st),
census.geo = "county",
census.key = "xxx",
census.data = census_data)
result_t <- predict_race(to_pred %>% filter(state==st),
census.geo = "tract",
census.key = "xxx",
census.data = census_data)
# Combine levels
naid <- result_t %>% filter(is.na(pred.whi)) %>% select(id, year)
result_all <- result_t %>% filter(!is.na(pred.whi)) %>% bind_rows(naid %>% left_join(result_c, by=c("id", "year")))
# Test accuracy
y <- gen_demog_infousa(dat %>% filter(STATE==st)) %>% select(id=FAMILYID,
# year=YEAR,
first_name=FIRST_NAME_1,
last_name=LAST_NAME_1,
eth_code_infousa=Ethnicity_Code_1,
race_infousa=race)
y$year <- 2017
y$race_infousa <- str_sub(y$race_infousa, 1, 1)
get_names <- result_all %>% select(starts_with("pred."))
new_names <- get_names %>% rename(wru_whi=pred.whi,
wru_bla=pred.bla,
wru_his=pred.his,
wru_asi=pred.asi,
wru_oth=pred.oth)
result_all <- cbind(result_all,
data.frame(race = colnames(get_names)[max.col(get_names,ties.method="first")],
prob = apply(get_names, 1, max)),
new_names)
result_all$race_abbr <- str_to_title(str_sub(result_all$race, 6, 6))
# Merge
cong <- result_all %>% select(id, year, race_wru=race_abbr, prob_wru=prob, starts_with("wru_")) %>% full_join(y, by=c("id", "year"))
cong_all <- rbind(cong_all, cong %>% mutate(same=(race_wru==race_infousa)))
}
'''
# saveRDS(cong_all, file="./info2017pred.rds")
rm(census_data, get_names, naid, cong, result_c, result_t, new_names, result_all, y)
# Evaluation
cong <- cong_all
rm(cong_all)
## overall accuracy
print("Overall Congruence:")
mean(cong$same)
## accuracy by group
print("Congrouence and Avg Prob by group:")
cong %>% group_by(race_infousa) %>% summarise(cong=mean(same))
## sensitivity by group
cong %>% group_by(race_wru) %>% summarise(tr_A=sum(race_infousa=='A')/n(),
tr_B=sum(race_infousa=='B')/n(),
tr_H=sum(race_infousa=='H')/n(),
tr_O=sum(race_infousa=='O')/n(),
tr_W=sum(race_infousa=='W')/n())
## confusion matrix
print("Confusion Matrix:")
ref <- matrix(cong$race_infousa, ncol=1)
pred <- matrix(cong$race_wru, ncol=1)
table(ref, pred)
rm(ref, pred)
## plot
x <- cong
ggplot(data=x %>% filter(race_infousa=="W")) + theme_bw() + stat_bin(aes(x=prob_wru, y=..count../sum(..count..), fill=race_wru), geom="bar", binwidth=0.05) + scale_fill_brewer(palette = "Set2") + labs(y="fraction",title="InfoUSA White")
ggplot(data=x %>% filter(race_infousa=="W")) + theme_bw() + stat_bin(aes(x=wru_whi, y=..count../sum(..count..), fill=race_wru), geom="bar", binwidth=0.05) + scale_fill_brewer(palette = "Set2") + labs(y="fraction",title="WRU White")
require(gridExtra)
s <- ggplot(data=x, aes(x=name))
grid.arrange(s+geom_bar(mapping = aes(fill=race_wru), position="fill"), s+geom_bar(mapping = aes(fill=race_infousa), position="fill"), ncol=1)
grid.arrange(ggplot(data=x %>% filter(race_research=='W'), aes(x=name))+geom_bar(mapping = aes(fill=race_wru), position="fill")+labs(x="white names",y="percentage")+scale_fill_manual(values=c("#4E79A7","#F28E2B","#E15759"), name="Race (WRU)"),
ggplot(data=x %>% filter(race_research=='B'), aes(x=name))+geom_bar(mapping = aes(fill=race_wru), position="fill")+labs(x="black names",y="percentage")+scale_fill_manual(values=c("#4E79A7","#E15759"), name="Race (WRU)"),
ggplot(data=x %>% filter(race_research=='H'), aes(x=name))+geom_bar(mapping = aes(fill=race_wru), position="fill")+labs(x="hispanic names",y="percentage")+scale_fill_manual(values=c("#4E79A7","#F28E2B","#E15759"), name="Race (WRU)"),
ncol = 1)
grid.arrange(ggplot(data=x %>% filter(race_research=='W'), aes(x=name))+geom_bar(mapping = aes(fill=race_infousa), position="fill")+labs(x="white names",y="percentage")+scale_fill_manual(values = c("#F778A1", "#79A1F7"), name = "infousa races"),
ggplot(data=x %>% filter(race_research=='B'), aes(x=name))+geom_bar(mapping = aes(fill=race_infousa), position="fill")+labs(x="black names",y="percentage")+scale_fill_manual(values = c("#F778A1", "#79A1F7"), name = "infousa races"),
ggplot(data=x %>% filter(race_research=='H'), aes(x=name))+geom_bar(mapping = aes(fill=race_infousa), position="fill")+labs(x="hispanic names",y="percentage")+scale_fill_manual(values = c("#A1F779"), name = "infousa races"),
ncol = 1)
avg <- x %>% group_by(name) %>% summarise(avg_whi=sum(wru_whi)/n(), avg_bla=sum(wru_bla)/n(), avg_his=sum(wru_his)/n()) %>% left_join(x %>% select(name, race_research) %>% distinct(), by="name")
|
# Calculate solar terrain corrections for WRF data
#paramfile <- '/Users/abbylute/Downloads/chunk_140coarse.mat'
# to run this script from command line:
# Rscript --vanilla pathtothisRscript.R pathtoparamfile.mat
# to run this script from matlab:
#pathtoR = '/Library/Frameworks/R.framework/Resources/bin/Rscript';
#pathtoRscript = '/Volumes/WDPassport/DATA/WRF/Downscaled/Code/get_solar_terrain_corrections.R';
#pathtoparamfile = '/Users/abbylute/Downloads/chunk_140coarse.mat';
#[status] = system([pathtoR,' --vanilla ',pathtoRscript,' ',pathtoparamfile]);
args = commandArgs(trailingOnly=TRUE)
# test if there is at least one argument: if not, return an error
if (length(args)==0) {
stop("At least one argument must be supplied (paramfile.mat)", call.=FALSE)
} else if (length(args)==1) {
# default output file
paramfile = args[1]
}
# so that rgdal will load:
dyn.load("/opt/modules/climatology/gdal/3.0.2/lib/libgdal.so") # on thunder
require(raster)
require(insol)
require(lutz) # for timezones
require(R.matlab)
require(Rcpp)
require(rgdal)
# to install additional package to use in R from Matlab on thunder, log onto
# matlabuser on thunder, module load R, R, install.packages("package")
# read in parameter file
params <- readMat(paramfile)
# Inputs:
ch = params$ch[1,1]
demres = params$outSR[1,1] # resolution of fine res dem (m)
demfn = params$outDEM[1,1] # fine resolution dem
outlon = params$outlon # lons to save
outlat = params$outlat # lats to save
deltat = params$outTR[1,1] # temporal resolution (hrs)
outfn = params$outfile[1,1] # output filename
outgmttz = params$finalGMTtz[1,1] # time zone GMT of output (of WRF data)
reggmttz = params$regionGMTtz[1,1] # time zone GMT of full modeling domain
if (mean(outlon) < -115){
reggmttz <- -8
} else {
reggmttz <- -7
}
tmz = reggmttz
pts_to_model = matrix(c(outlon, outlat), ncol = 2); # lon and lat to save corrections at
# Output:
# a terrain correction value for each prediction point, each mid-month, each hour
nsites = dim(pts_to_model)[1]
solar_tc <- array(rep(NaN, nsites*12*(24/deltat)), c(nsites, 12, (24/deltat))); # site, month, hour
# 1. prepare mountainous DEM:
#-------------------------
demorig <- raster(demfn)
buf <- .5
demorig <- crop(demorig,c(min(pts_to_model[,1])-buf, max(pts_to_model[,1])+buf, min(pts_to_model[,2])-buf, max(pts_to_model[,2])+buf))
demfine <- projectRaster(demorig,res=demres,crs="+proj=utm +zone=11 +ellps=WGS84 +units=m +no_defs") # zone 11 is middle of WUS
#writeRaster(dem,paste0(demdir,demres,'m/WUS_',demres,'m_utm.tif'),overwrite=T)
#rm(dem);gc()
#demfine <- raster(paste0(demdir,demres,'m/WUS_',demres,'m_utm.tif')) # takes up slightly less space if not stored in memory
height = mean(values(demfine),na.rm=T)
# 2. prepare 'flat' DEM:
#-------------------------
# It is difficult (impossible?) to import WRF DEM into R because it is on an irregular grid.
# Instead, aggregate fine DEM to 4km to use as the flat DEM
demflat <- aggregate(demfine,
round(4000/demres),
fun=mean,
expand=TRUE, na.rm=TRUE)#,
#filename=paste0(demdir,demres,'m/WUS_',demres,'m_utm_agg_to_4km.tif'))
#rm(demflat);gc()
#demflat <- raster(paste0(demdir,demres,'m/WUS_',demres,'m_utm_agg_to_4km.tif'))
# then interpolate the flat dem to fine resolution to avoid spatial chunkiness in the terrain corrections
#demflat <- projectRaster(demflat, demfine, res = res(demfine))
#demflat <- disaggregate(demflat, fact=round(4000/demres), method='bilinear')
# 3. Set Parameters:
#-------------------------
visibility=30 # km
RH=50
tempK=273.15
year=2007 # mean year of WRF time period
day=15
#timeh=12
#buf_m = 10000 # buffer in meters around each site to consider in terrain correction
mlon = mean(pts_to_model[,1])
mlat = mean(pts_to_model[,2])
#ts = seq(0,23,deltat) # time steps each day in WRF time
#ts = seq(0, 23.99, deltat)
ts = seq((deltat/2), 23.99, deltat)-deltat # WRF solar is accumulated, over previous timestep
# translate wrf times to local times:
tslocal = ts + reggmttz
tslocal[tslocal<0] <- tslocal[tslocal<0] + 24
ts = tslocal
# so output will be for WRF hours of interest starting at 0:00am GMT0
# 4. Run 'insol' solar monthly on mtn and flat dems:
#-------------------------
#print('running solar radiation algorithm over DEMs')
demm <- raster:::as.matrix(demfine)
demf <- raster:::as.matrix(demflat)
dl <- res(demfine)[1]
dl2 <- res(demflat)[1]
cgr <- cgrad(demfine) # compute unit vector normal to every grid cell
cgrf <- cgrad(demflat)
for (mm in 1:12){
#tz <- tz_lookup_coords(mlat, mlon, method = "fast", warn = F)
#tz <- tz_offset(paste0("2007-",mm,"-15"), tz = tz)
#tmz <- tz$utc_offset_h
jd=JDymd(year,mm,day,hour=12) # compute Julian Day from a date
day1=insol::daylength(mlat,mlon,jd,tmz) # length of daylight
for (tt in 1:length(ts)){ # for each time interval b/n sunrise and sunset:
srs = ts[tt]
if (srs<=day1[1] | srs>=day1[2]){ # if hr is during dark, no terrain correction
solar_tc[, mm, tt] <- 1
} else if (srs>day1[1] & srs<day1[2]) { # if hr is during daylight then run solar routine
hr_min = strsplit(as.character(srs),"\\.")
hr = as.numeric(hr_min[[1]][1])
minu = hr_min[[1]][2];
minu[is.na(minu)] <- 0
if (nchar(minu)<2) {
minu = paste0(minu,'0')
}
minu <- as.numeric(minu)
minu[is.na(minu)] <- 0;
minu = minu/100*60
jd=JDymd(year,mm,day,hour=hr,minute=minu)
sv=sunvector(jd,mlat,mlon,tmz)
zenith=sunpos(sv)[2]
# for mtn dem:
Idirdif=insolation(zenith,jd,demm,visibility,RH,tempK,0.002,0.15)
Idirdif = array(Idirdif,c(dim(Idirdif)[1],dim(Idirdif)[2]/2,2))
# plot(raster(Idirdif[,,1]))
hsh=hillshading(cgr,sv) # seems to account for surface slope
sh=doshade(demm,sv,dl) # terrain shading, 0=shade, 1=sun
## direct radiation modified by terrain + diffuse irradiation
## values in J/m2
Irr=(Idirdif[,,1] * hsh *sh + Idirdif[,,2]) * 3600 * deltat
# plot(raster(Irr))
# for flat dem:
Idirdif=insolation(zenith,jd,demf,visibility,RH,tempK,0.002,0.15)
Idirdif = array(Idirdif,c(dim(Idirdif)[1],dim(Idirdif)[2]/2,2))
hsh=hillshading(cgrf,sv) # seems to account for surface slope
sh=doshade(demf,sv,dl2) # terrain shading, 0=shade, 1=sun
## direct radiation modified by terrain + diffuse irradiation
## values in J/m2
Irrflat=(Idirdif[,,1] * hsh *sh + Idirdif[,,2]) * 3600 * deltat
# plot(raster(Irrflat))
# rasterize outputs and make comparable
Irr <- raster(Irr)
crs(Irr) <- crs(demfine)
extent(Irr) <- extent(demfine)
Irrflat <- raster(Irrflat)
crs(Irrflat) <- crs(demflat)
extent(Irrflat) <- extent(demflat)
#Irrflat <- crop(Irrflat, extent(Irr))
Ifnew <- disaggregate(Irrflat, 4000/demres, method = 'bilinear')
Ifnew <- focal(Ifnew, matrix(1,25,25), mean)#, pad = T, padValue )
Ifnew[Ifnew<0] <- 0
Ifnew <- crop(Ifnew, extent(Irr))
# calculate ratio and assign it to output
rat <- Irr/Ifnew
# transform back to lat lon
rat <- projectRaster(rat, demorig)
# extract points to model at
rat <- raster::extract(rat, pts_to_model)
# output
solar_tc[, mm, tt] <- rat
} # end if during daylight
} # end timesteps
} # end month
# save terrain corrections
writeMat(outfn, solar_tc = solar_tc)
| /get_solar_terrain_corrections.R | no_license | abbylute/WRF_downscaling | R | false | false | 7,604 | r | # Calculate solar terrain corrections for WRF data
#paramfile <- '/Users/abbylute/Downloads/chunk_140coarse.mat'
# to run this script from command line:
# Rscript --vanilla pathtothisRscript.R pathtoparamfile.mat
# to run this script from matlab:
#pathtoR = '/Library/Frameworks/R.framework/Resources/bin/Rscript';
#pathtoRscript = '/Volumes/WDPassport/DATA/WRF/Downscaled/Code/get_solar_terrain_corrections.R';
#pathtoparamfile = '/Users/abbylute/Downloads/chunk_140coarse.mat';
#[status] = system([pathtoR,' --vanilla ',pathtoRscript,' ',pathtoparamfile]);
args = commandArgs(trailingOnly=TRUE)
# test if there is at least one argument: if not, return an error
if (length(args)==0) {
stop("At least one argument must be supplied (paramfile.mat)", call.=FALSE)
} else if (length(args)==1) {
# default output file
paramfile = args[1]
}
# so that rgdal will load:
dyn.load("/opt/modules/climatology/gdal/3.0.2/lib/libgdal.so") # on thunder
require(raster)
require(insol)
require(lutz) # for timezones
require(R.matlab)
require(Rcpp)
require(rgdal)
# to install additional package to use in R from Matlab on thunder, log onto
# matlabuser on thunder, module load R, R, install.packages("package")
# read in parameter file
params <- readMat(paramfile)
# Inputs:
ch = params$ch[1,1]
demres = params$outSR[1,1] # resolution of fine res dem (m)
demfn = params$outDEM[1,1] # fine resolution dem
outlon = params$outlon # lons to save
outlat = params$outlat # lats to save
deltat = params$outTR[1,1] # temporal resolution (hrs)
outfn = params$outfile[1,1] # output filename
outgmttz = params$finalGMTtz[1,1] # time zone GMT of output (of WRF data)
reggmttz = params$regionGMTtz[1,1] # time zone GMT of full modeling domain
if (mean(outlon) < -115){
reggmttz <- -8
} else {
reggmttz <- -7
}
tmz = reggmttz
pts_to_model = matrix(c(outlon, outlat), ncol = 2); # lon and lat to save corrections at
# Output:
# a terrain correction value for each prediction point, each mid-month, each hour
nsites = dim(pts_to_model)[1]
solar_tc <- array(rep(NaN, nsites*12*(24/deltat)), c(nsites, 12, (24/deltat))); # site, month, hour
# 1. prepare mountainous DEM:
#-------------------------
demorig <- raster(demfn)
buf <- .5
demorig <- crop(demorig,c(min(pts_to_model[,1])-buf, max(pts_to_model[,1])+buf, min(pts_to_model[,2])-buf, max(pts_to_model[,2])+buf))
demfine <- projectRaster(demorig,res=demres,crs="+proj=utm +zone=11 +ellps=WGS84 +units=m +no_defs") # zone 11 is middle of WUS
#writeRaster(dem,paste0(demdir,demres,'m/WUS_',demres,'m_utm.tif'),overwrite=T)
#rm(dem);gc()
#demfine <- raster(paste0(demdir,demres,'m/WUS_',demres,'m_utm.tif')) # takes up slightly less space if not stored in memory
height = mean(values(demfine),na.rm=T)
# 2. prepare 'flat' DEM:
#-------------------------
# It is difficult (impossible?) to import WRF DEM into R because it is on an irregular grid.
# Instead, aggregate fine DEM to 4km to use as the flat DEM
demflat <- aggregate(demfine,
round(4000/demres),
fun=mean,
expand=TRUE, na.rm=TRUE)#,
#filename=paste0(demdir,demres,'m/WUS_',demres,'m_utm_agg_to_4km.tif'))
#rm(demflat);gc()
#demflat <- raster(paste0(demdir,demres,'m/WUS_',demres,'m_utm_agg_to_4km.tif'))
# then interpolate the flat dem to fine resolution to avoid spatial chunkiness in the terrain corrections
#demflat <- projectRaster(demflat, demfine, res = res(demfine))
#demflat <- disaggregate(demflat, fact=round(4000/demres), method='bilinear')
# 3. Set Parameters:
#-------------------------
visibility=30 # km
RH=50
tempK=273.15
year=2007 # mean year of WRF time period
day=15
#timeh=12
#buf_m = 10000 # buffer in meters around each site to consider in terrain correction
mlon = mean(pts_to_model[,1])
mlat = mean(pts_to_model[,2])
#ts = seq(0,23,deltat) # time steps each day in WRF time
#ts = seq(0, 23.99, deltat)
ts = seq((deltat/2), 23.99, deltat)-deltat # WRF solar is accumulated, over previous timestep
# translate wrf times to local times:
tslocal = ts + reggmttz
tslocal[tslocal<0] <- tslocal[tslocal<0] + 24
ts = tslocal
# so output will be for WRF hours of interest starting at 0:00am GMT0
# 4. Run 'insol' solar monthly on mtn and flat dems:
#-------------------------
#print('running solar radiation algorithm over DEMs')
demm <- raster:::as.matrix(demfine)
demf <- raster:::as.matrix(demflat)
dl <- res(demfine)[1]
dl2 <- res(demflat)[1]
cgr <- cgrad(demfine) # compute unit vector normal to every grid cell
cgrf <- cgrad(demflat)
for (mm in 1:12){
#tz <- tz_lookup_coords(mlat, mlon, method = "fast", warn = F)
#tz <- tz_offset(paste0("2007-",mm,"-15"), tz = tz)
#tmz <- tz$utc_offset_h
jd=JDymd(year,mm,day,hour=12) # compute Julian Day from a date
day1=insol::daylength(mlat,mlon,jd,tmz) # length of daylight
for (tt in 1:length(ts)){ # for each time interval b/n sunrise and sunset:
srs = ts[tt]
if (srs<=day1[1] | srs>=day1[2]){ # if hr is during dark, no terrain correction
solar_tc[, mm, tt] <- 1
} else if (srs>day1[1] & srs<day1[2]) { # if hr is during daylight then run solar routine
hr_min = strsplit(as.character(srs),"\\.")
hr = as.numeric(hr_min[[1]][1])
minu = hr_min[[1]][2];
minu[is.na(minu)] <- 0
if (nchar(minu)<2) {
minu = paste0(minu,'0')
}
minu <- as.numeric(minu)
minu[is.na(minu)] <- 0;
minu = minu/100*60
jd=JDymd(year,mm,day,hour=hr,minute=minu)
sv=sunvector(jd,mlat,mlon,tmz)
zenith=sunpos(sv)[2]
# for mtn dem:
Idirdif=insolation(zenith,jd,demm,visibility,RH,tempK,0.002,0.15)
Idirdif = array(Idirdif,c(dim(Idirdif)[1],dim(Idirdif)[2]/2,2))
# plot(raster(Idirdif[,,1]))
hsh=hillshading(cgr,sv) # seems to account for surface slope
sh=doshade(demm,sv,dl) # terrain shading, 0=shade, 1=sun
## direct radiation modified by terrain + diffuse irradiation
## values in J/m2
Irr=(Idirdif[,,1] * hsh *sh + Idirdif[,,2]) * 3600 * deltat
# plot(raster(Irr))
# for flat dem:
Idirdif=insolation(zenith,jd,demf,visibility,RH,tempK,0.002,0.15)
Idirdif = array(Idirdif,c(dim(Idirdif)[1],dim(Idirdif)[2]/2,2))
hsh=hillshading(cgrf,sv) # seems to account for surface slope
sh=doshade(demf,sv,dl2) # terrain shading, 0=shade, 1=sun
## direct radiation modified by terrain + diffuse irradiation
## values in J/m2
Irrflat=(Idirdif[,,1] * hsh *sh + Idirdif[,,2]) * 3600 * deltat
# plot(raster(Irrflat))
# rasterize outputs and make comparable
Irr <- raster(Irr)
crs(Irr) <- crs(demfine)
extent(Irr) <- extent(demfine)
Irrflat <- raster(Irrflat)
crs(Irrflat) <- crs(demflat)
extent(Irrflat) <- extent(demflat)
#Irrflat <- crop(Irrflat, extent(Irr))
Ifnew <- disaggregate(Irrflat, 4000/demres, method = 'bilinear')
Ifnew <- focal(Ifnew, matrix(1,25,25), mean)#, pad = T, padValue )
Ifnew[Ifnew<0] <- 0
Ifnew <- crop(Ifnew, extent(Irr))
# calculate ratio and assign it to output
rat <- Irr/Ifnew
# transform back to lat lon
rat <- projectRaster(rat, demorig)
# extract points to model at
rat <- raster::extract(rat, pts_to_model)
# output
solar_tc[, mm, tt] <- rat
} # end if during daylight
} # end timesteps
} # end month
# save terrain corrections
writeMat(outfn, solar_tc = solar_tc)
|
# Libraries #########################################################################
library(dplyr)
library(lubridate)
library(tibble)
library(magrittr)
# Inputs ############################################################################
# filepath is the filepath to the .out file you want to examine
filepath <- "C:/Users/h.malik/Desktop/sbet_lv201910110850.out" # CHANGE_NECESSARY
# myerro is the time value/s that you want to see
myerro <- c(449128) #CHANGE_NECESSARY
# Getting Binary ####################################################################
# We know the files are stored as binary files with 8 byte doubles.
# Reading speed is improved exponentially by feeding the exact size into the n
# arg of readBin
fp <- file(filepath, "rb")
fp_info <- file.info(filepath)
icount <- readBin(fp, double(), size=8, n=fp_info$size)
# It has 17 columns. The columns we want are 1, 2, and 3, which represent:
# 1) time, 2) x coordinates, and 3) y corrdinates, the relevant indices for these values within
# the icount vector are obtained using seq()
time_indices <- seq(1, by = 17, length(icount))
x_indices <- seq(2, by = 17, length(icount))
y_indices <- seq(3, by = 17, length(icount))
# Generating a df called storage to house the results. Matched nrow to the
# length of the data it will store to facilitate quick data transfer using base
# R
storage <- as.data.frame(matrix(ncol = 3, nrow = length(time_indices)))
storage_names <- c("time", "x", "y")
names(storage) <- storage_names
# Using the indices to subset icount, and then store the vectors in the
# preprepared df, storage.
temptimeDataFrame <- icount[c(time_indices)]
names(temptimeDataFrame) <- storage_names[1]
tempxDataFrame <- icount[c(x_indices)]
names(tempxDataFrame) <- storage_names[2]
tempyDataFrame <-icount[c(y_indices)]
names(tempyDataFrame) <- c("y")
storage$time <- temptimeDataFrame
storage$x <- tempxDataFrame
storage$y <- tempyDataFrame
storage <- as.data.frame(storage)
# Filtering Results ##############################################################
# returns a df names errors containing the geospatial coordinates at the
# specified time/s
errors <- storage[as.integer(storage$time, 0) %in% myerro,]
# TODO: STILL TO DO IS SELECT DISTINCT TIME,X,Y
| /sbet_binary_reader_with_comments.R | no_license | Fehiroh/sbet_binary_reader | R | false | false | 2,358 | r |
# Libraries #########################################################################
library(dplyr)
library(lubridate)
library(tibble)
library(magrittr)
# Inputs ############################################################################
# filepath is the filepath to the .out file you want to examine
filepath <- "C:/Users/h.malik/Desktop/sbet_lv201910110850.out" # CHANGE_NECESSARY
# myerro is the time value/s that you want to see
myerro <- c(449128) #CHANGE_NECESSARY
# Getting Binary ####################################################################
# We know the files are stored as binary files with 8 byte doubles.
# Reading speed is improved exponentially by feeding the exact size into the n
# arg of readBin
fp <- file(filepath, "rb")
fp_info <- file.info(filepath)
icount <- readBin(fp, double(), size=8, n=fp_info$size)
# It has 17 columns. The columns we want are 1, 2, and 3, which represent:
# 1) time, 2) x coordinates, and 3) y corrdinates, the relevant indices for these values within
# the icount vector are obtained using seq()
time_indices <- seq(1, by = 17, length(icount))
x_indices <- seq(2, by = 17, length(icount))
y_indices <- seq(3, by = 17, length(icount))
# Generating a df called storage to house the results. Matched nrow to the
# length of the data it will store to facilitate quick data transfer using base
# R
storage <- as.data.frame(matrix(ncol = 3, nrow = length(time_indices)))
storage_names <- c("time", "x", "y")
names(storage) <- storage_names
# Using the indices to subset icount, and then store the vectors in the
# preprepared df, storage.
temptimeDataFrame <- icount[c(time_indices)]
names(temptimeDataFrame) <- storage_names[1]
tempxDataFrame <- icount[c(x_indices)]
names(tempxDataFrame) <- storage_names[2]
tempyDataFrame <-icount[c(y_indices)]
names(tempyDataFrame) <- c("y")
storage$time <- temptimeDataFrame
storage$x <- tempxDataFrame
storage$y <- tempyDataFrame
storage <- as.data.frame(storage)
# Filtering Results ##############################################################
# returns a df names errors containing the geospatial coordinates at the
# specified time/s
errors <- storage[as.integer(storage$time, 0) %in% myerro,]
# TODO: STILL TO DO IS SELECT DISTINCT TIME,X,Y
|
#! /usr/bin/Rscript
library(ggplot2)
df <- read.csv('data.csv')
options(device="png")
ggplot(df, aes(x=assembly_id, y=nbills, fill=nbills)) +
geom_bar(stat="identity") +
geom_text(label=df$nbills, vjust=-0.4, size=4)
ggplot(df, aes(x=assembly_id, y=bills_per_year, fill=bills_per_year)) +
geom_bar(stat="identity") +
geom_text(label=round(df$bills_per_year), vjust=-0.4, size=4)
| /2014/nbills/draw.r | permissive | teampopong/infographics | R | false | false | 391 | r | #! /usr/bin/Rscript
library(ggplot2)
df <- read.csv('data.csv')
options(device="png")
ggplot(df, aes(x=assembly_id, y=nbills, fill=nbills)) +
geom_bar(stat="identity") +
geom_text(label=df$nbills, vjust=-0.4, size=4)
ggplot(df, aes(x=assembly_id, y=bills_per_year, fill=bills_per_year)) +
geom_bar(stat="identity") +
geom_text(label=round(df$bills_per_year), vjust=-0.4, size=4)
|
library(testthat)
library(ShinytestMinimalProject)
test_check("ShinytestMinimalProject")
| /tests/testthat.R | no_license | DavidFingerKB/Shinytest | R | false | false | 90 | r | library(testthat)
library(ShinytestMinimalProject)
test_check("ShinytestMinimalProject")
|
testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818252170339e+295, 1.22810535917845e+146, 4.08909449520863e+304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613108356-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 344 | r | testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818252170339e+295, 1.22810535917845e+146, 4.08909449520863e+304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
#-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
##-------------------------------------------------------------------------------------------------#
##' Writes a LINKAGES config file.
##'
##' Requires a pft xml object, a list of trait values for a single model run,
##' and the name of the file to create
##'
##' @name write.config.LINKAGES
##' @title Write LINKAGES configuration files
##' @param defaults list of defaults to process
##' @param trait.samples vector of samples for a given trait
##' @param settings list of settings from pecan settings file
##' @param run.id id of run
##' @return configuration file for LINKAGES for given run
##' @export
##' @author Ann Raiho
##-------------------------------------------------------------------------------------------------#
write.config.LINKAGES <- function(defaults=NULL, trait.values=NULL, settings, run.id){
# find out where to write run/ouput
rundir <- file.path(settings$run$host$rundir, run.id)
if(!file.exists(rundir)) dir.create(rundir)
outdir <- file.path(settings$run$host$outdir, run.id)
if(!file.exists(outdir)) dir.create(outdir)
#-----------------------------------------------------------------------
# write LINKAGES settings file
start.year = as.numeric(strftime(settings$run$start.date,"%Y"))
end.year = as.numeric(strftime(settings$run$end.date,"%Y"))
year = seq(start.year,end.year,1)
kprnt = 2 #year interval for output
klast = 90 #number of plots
nyear = length(year) #number of years to simulate
ipolat_nums = seq(2,nyear,25) #years for climate interpolation #need to make break points generalizable someday
ipolat = length(ipolat_nums)-1 #number of years for climate interpolation
plat = abs(settings$run$site$lat) #latitude
plong = abs(settings$run$site$lon) #longitude
bgs = 127 #DOY to begin growing season
egs = 275 #DOY to end growing season
texture = read.csv(system.file("texture.csv",package = "PEcAn.LINKAGES"))
dbcon <- db.open(settings$database$bety)
soils <- db.query(paste("SELECT soil,som,sand_pct,clay_pct,soilnotes FROM sites WHERE id =", settings$run$site$id), con=dbcon)
db.close(dbcon)
sand = as.numeric(soils[3])/100
clay = as.numeric(soils[4])/100
soil.texture <- function(sand,clay){
silt = 1 - sand - clay
sand.keep = which(texture$xsand < sand + .1 & texture$xsand > sand - .1)
clay.keep = which(texture$xclay[sand.keep] < clay + .1 & texture$xclay[sand.keep] > clay - .1)
silt.keep = which(texture$xsilt[sand.keep[clay.keep]] < silt + .1 & texture$xsilt[sand.keep[clay.keep]] > silt - .1)
row.keep = sand.keep[clay.keep[silt.keep]]
return(texture[round(mean(row.keep)),c(8,14)]*100) # might need to divide by 3 or something because linkages wants cm water/30cm soil...
}
fc = round(as.numeric(unlist(soil.texture(sand = sand, clay = clay)[2])),digits = 2)
dry = round(as.numeric(unlist(soil.texture(sand = sand, clay = clay)[1])),digits = 2)
sink(file.path(rundir,"settings.txt"))
cat(kprnt,klast,nyear,sep=",")
cat("\n")
cat(ipolat)
cat("\n")
cat(ipolat_nums,sep=",")
cat("\n")
cat(plong,plat,bgs,egs,fc,dry,sep=",")
sink()
unlink("settings")
## as initial hack, copy parameter file from inst to rundir
##param.file=system.file("SPP.DAT", package = "PEcAn.LINKAGES")
##file.copy(from = param.file,rundir)
## Parameters from specific spp. for Acer,betula,carya,castanea dentata,
##fagus grandifolia,picea,pinus,tsuga canadensis,quercus (in that order)
#####
##### Write species data table #####
#####
nspec = 9
bmspec = nspec
all_spp_params = read.csv(system.file("spp_matrix.csv",package = "PEcAn.LINKAGES"))
pick_spp = c(38,72,58,8,2,1,6,7,11)
spp_params = all_spp_params[pick_spp,3:ncol(all_spp_params)]
spec_nums = all_spp_params[pick_spp,2]
spp_params[is.na(spp_params)] <- 0
spp_params <- as.data.frame(spp_params)
sink(file.path(rundir,"spp.txt"))
cat(nspec,bmspec,sep=",")
cat("\n")
cat(spec_nums)
cat("\n")
write.table(spp_params,sep=",",col.names=FALSE,row.names=FALSE)
sink()
#####
##### Write switch text file #####
#####
switch_chars_list = read.csv(system.file("switch.csv",package = "PEcAn.LINKAGES"))
switch_chars = as.character(switch_chars_list[spec_nums,3])
sink(file.path(rundir,"/switch.txt"))
cat(switch_chars,sep="\n")
sink()
#####
##### Write underground parameters file #####
#####
NLVAR = 10
NLT = 17
NCOHRT = 1
init_litter_wt = c(rep(0,17)) #The weight of an incoming cohort of litter (initialized to zero)
init_perc_N = c(.0068,.0076,.0109,.0106,.0079,.0081,.0085,.0057,
.0090,.0056,.0063,.0046,.0096,.0038,.0038,.0038,.0050) #Initial percent of nitrogen
g_N_per_g_wt_loss = c(.0251,.0315,.0574,.0377,.0256,.0286,.0336,
.0477,.0341,.0326,.0220,.0163,.0284,.0195,
.0195,.0195,.0364) #Grams of nitrogen immobilized per gram weight loss;
crit_perc_N = c(.0183,.0239,.0465,.0271,.0177,.0205,.0251,
.0420,.0251,.0270,.0157,.0117,.0188,.0157,
.0157,.0157,.0314) #Critical percent of nitrogen
litter_type = seq(1,17,1) #Litter type: 1 through 12 are the 12 leaf-litter types in order of decreasing decay rate and increasing nitrogen-immobilization rate and correspond to species parameter TL. Thirteen is root litter. Fourteen and fifteen are fresh wood from trees less than or greater than 10 cm dbh, respectively. Sixteen is twig litter. Seventeen is well-decayed wood not yet humus;
dest = c(1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,1,1) #Destination when cohort reaches critical percent to nitrogen (1 = humus; 2 = well-decayed wood);
init_perc_lignin = c(.039,.121,.193,.158,.187,.206,.214,
.241,.248,.280,.216,.283,.253,.173,
.173,.173,.423) #Initial percent of lignin;
lignin_decay_param1 = c(.5217,.5219,.7787,.6693,.5194,.6839,.7059,
1.1967,.6105,.5926,.9052,.5646,.7000,.4831,
.4831,.4831,.7222) #Lignin decay parameters [see Eq. B-8, Appendix 2, in Pastor and Post (1985)]
lignin_decay_param2 = c(.336,.400,.508,.435,.315,.475,.460,.790,.359,
.383,.594,.327,.456,.299,.299,.299,.299)
ash_corr_factor = c(.90,.90,.92,.92,.93,.96,.94,.91,.95,.97,.97,.96,.98,.99,.99,.96,.99)
dirt_params = cbind(init_litter_wt,init_perc_N,g_N_per_g_wt_loss,crit_perc_N,
litter_type,dest,init_perc_lignin,lignin_decay_param1,lignin_decay_param2,
ash_corr_factor)
basesc = 74. #starting humus weight
basesn = 1.640 #starting N content
sink(file.path(rundir,"dirt.txt"))
cat(NLVAR,NLT,NCOHRT,sep=" ")
cat("\n")
write.table(dirt_params,sep=",",col.names=FALSE,row.names=FALSE)
cat("\n")
cat(basesc,basesn,sep=" ")
sink()
#-----------------------------------------------------------------------
# create launch script (which will create symlink)
if (!is.null(settings$run$jobtemplate) && file.exists(settings$run$jobtemplate)) {
jobsh <- readLines(con=settings$run$jobtemplate, n=-1)
} else {
jobsh <- readLines(con=system.file("template.job", package = "PEcAn.LINKAGES"), n=-1)
}
jobsh <- gsub('@SITE_LAT@', settings$run$site$lat, jobsh)
jobsh <- gsub('@SITE_LON@', settings$run$site$lon, jobsh)
jobsh <- gsub('@SITE_MET@', settings$run$inputs$met$path, jobsh)
jobsh <- gsub('@START_DATE@', settings$run$start.date, jobsh)
jobsh <- gsub('@END_DATE@', settings$run$end.date, jobsh)
jobsh <- gsub('@OUTDIR@', outdir, jobsh)
jobsh <- gsub('@RUNDIR@', rundir, jobsh)
jobsh <- gsub('@BINARY@', settings$model$binary, jobsh)
writeLines(jobsh, con=file.path(settings$rundir, run.id, "job.sh"))
Sys.chmod(file.path(settings$rundir, run.id, "job.sh"))
}
| /models/linkages/R/write.config.LINKAGES.R | permissive | davidjpmoore/pecan | R | false | false | 8,259 | r | #-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
##-------------------------------------------------------------------------------------------------#
##' Writes a LINKAGES config file.
##'
##' Requires a pft xml object, a list of trait values for a single model run,
##' and the name of the file to create
##'
##' @name write.config.LINKAGES
##' @title Write LINKAGES configuration files
##' @param defaults list of defaults to process
##' @param trait.samples vector of samples for a given trait
##' @param settings list of settings from pecan settings file
##' @param run.id id of run
##' @return configuration file for LINKAGES for given run
##' @export
##' @author Ann Raiho
##-------------------------------------------------------------------------------------------------#
write.config.LINKAGES <- function(defaults=NULL, trait.values=NULL, settings, run.id){
# find out where to write run/ouput
rundir <- file.path(settings$run$host$rundir, run.id)
if(!file.exists(rundir)) dir.create(rundir)
outdir <- file.path(settings$run$host$outdir, run.id)
if(!file.exists(outdir)) dir.create(outdir)
#-----------------------------------------------------------------------
# write LINKAGES settings file
start.year = as.numeric(strftime(settings$run$start.date,"%Y"))
end.year = as.numeric(strftime(settings$run$end.date,"%Y"))
year = seq(start.year,end.year,1)
kprnt = 2 #year interval for output
klast = 90 #number of plots
nyear = length(year) #number of years to simulate
ipolat_nums = seq(2,nyear,25) #years for climate interpolation #need to make break points generalizable someday
ipolat = length(ipolat_nums)-1 #number of years for climate interpolation
plat = abs(settings$run$site$lat) #latitude
plong = abs(settings$run$site$lon) #longitude
bgs = 127 #DOY to begin growing season
egs = 275 #DOY to end growing season
texture = read.csv(system.file("texture.csv",package = "PEcAn.LINKAGES"))
dbcon <- db.open(settings$database$bety)
soils <- db.query(paste("SELECT soil,som,sand_pct,clay_pct,soilnotes FROM sites WHERE id =", settings$run$site$id), con=dbcon)
db.close(dbcon)
sand = as.numeric(soils[3])/100
clay = as.numeric(soils[4])/100
soil.texture <- function(sand,clay){
silt = 1 - sand - clay
sand.keep = which(texture$xsand < sand + .1 & texture$xsand > sand - .1)
clay.keep = which(texture$xclay[sand.keep] < clay + .1 & texture$xclay[sand.keep] > clay - .1)
silt.keep = which(texture$xsilt[sand.keep[clay.keep]] < silt + .1 & texture$xsilt[sand.keep[clay.keep]] > silt - .1)
row.keep = sand.keep[clay.keep[silt.keep]]
return(texture[round(mean(row.keep)),c(8,14)]*100) # might need to divide by 3 or something because linkages wants cm water/30cm soil...
}
fc = round(as.numeric(unlist(soil.texture(sand = sand, clay = clay)[2])),digits = 2)
dry = round(as.numeric(unlist(soil.texture(sand = sand, clay = clay)[1])),digits = 2)
sink(file.path(rundir,"settings.txt"))
cat(kprnt,klast,nyear,sep=",")
cat("\n")
cat(ipolat)
cat("\n")
cat(ipolat_nums,sep=",")
cat("\n")
cat(plong,plat,bgs,egs,fc,dry,sep=",")
sink()
unlink("settings")
## as initial hack, copy parameter file from inst to rundir
##param.file=system.file("SPP.DAT", package = "PEcAn.LINKAGES")
##file.copy(from = param.file,rundir)
## Parameters from specific spp. for Acer,betula,carya,castanea dentata,
##fagus grandifolia,picea,pinus,tsuga canadensis,quercus (in that order)
#####
##### Write species data table #####
#####
nspec = 9
bmspec = nspec
all_spp_params = read.csv(system.file("spp_matrix.csv",package = "PEcAn.LINKAGES"))
pick_spp = c(38,72,58,8,2,1,6,7,11)
spp_params = all_spp_params[pick_spp,3:ncol(all_spp_params)]
spec_nums = all_spp_params[pick_spp,2]
spp_params[is.na(spp_params)] <- 0
spp_params <- as.data.frame(spp_params)
sink(file.path(rundir,"spp.txt"))
cat(nspec,bmspec,sep=",")
cat("\n")
cat(spec_nums)
cat("\n")
write.table(spp_params,sep=",",col.names=FALSE,row.names=FALSE)
sink()
#####
##### Write switch text file #####
#####
switch_chars_list = read.csv(system.file("switch.csv",package = "PEcAn.LINKAGES"))
switch_chars = as.character(switch_chars_list[spec_nums,3])
sink(file.path(rundir,"/switch.txt"))
cat(switch_chars,sep="\n")
sink()
#####
##### Write underground parameters file #####
#####
NLVAR = 10
NLT = 17
NCOHRT = 1
init_litter_wt = c(rep(0,17)) #The weight of an incoming cohort of litter (initialized to zero)
init_perc_N = c(.0068,.0076,.0109,.0106,.0079,.0081,.0085,.0057,
.0090,.0056,.0063,.0046,.0096,.0038,.0038,.0038,.0050) #Initial percent of nitrogen
g_N_per_g_wt_loss = c(.0251,.0315,.0574,.0377,.0256,.0286,.0336,
.0477,.0341,.0326,.0220,.0163,.0284,.0195,
.0195,.0195,.0364) #Grams of nitrogen immobilized per gram weight loss;
crit_perc_N = c(.0183,.0239,.0465,.0271,.0177,.0205,.0251,
.0420,.0251,.0270,.0157,.0117,.0188,.0157,
.0157,.0157,.0314) #Critical percent of nitrogen
litter_type = seq(1,17,1) #Litter type: 1 through 12 are the 12 leaf-litter types in order of decreasing decay rate and increasing nitrogen-immobilization rate and correspond to species parameter TL. Thirteen is root litter. Fourteen and fifteen are fresh wood from trees less than or greater than 10 cm dbh, respectively. Sixteen is twig litter. Seventeen is well-decayed wood not yet humus;
dest = c(1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,1,1) #Destination when cohort reaches critical percent to nitrogen (1 = humus; 2 = well-decayed wood);
init_perc_lignin = c(.039,.121,.193,.158,.187,.206,.214,
.241,.248,.280,.216,.283,.253,.173,
.173,.173,.423) #Initial percent of lignin;
lignin_decay_param1 = c(.5217,.5219,.7787,.6693,.5194,.6839,.7059,
1.1967,.6105,.5926,.9052,.5646,.7000,.4831,
.4831,.4831,.7222) #Lignin decay parameters [see Eq. B-8, Appendix 2, in Pastor and Post (1985)]
lignin_decay_param2 = c(.336,.400,.508,.435,.315,.475,.460,.790,.359,
.383,.594,.327,.456,.299,.299,.299,.299)
ash_corr_factor = c(.90,.90,.92,.92,.93,.96,.94,.91,.95,.97,.97,.96,.98,.99,.99,.96,.99)
dirt_params = cbind(init_litter_wt,init_perc_N,g_N_per_g_wt_loss,crit_perc_N,
litter_type,dest,init_perc_lignin,lignin_decay_param1,lignin_decay_param2,
ash_corr_factor)
basesc = 74. #starting humus weight
basesn = 1.640 #starting N content
sink(file.path(rundir,"dirt.txt"))
cat(NLVAR,NLT,NCOHRT,sep=" ")
cat("\n")
write.table(dirt_params,sep=",",col.names=FALSE,row.names=FALSE)
cat("\n")
cat(basesc,basesn,sep=" ")
sink()
#-----------------------------------------------------------------------
# create launch script (which will create symlink)
if (!is.null(settings$run$jobtemplate) && file.exists(settings$run$jobtemplate)) {
jobsh <- readLines(con=settings$run$jobtemplate, n=-1)
} else {
jobsh <- readLines(con=system.file("template.job", package = "PEcAn.LINKAGES"), n=-1)
}
jobsh <- gsub('@SITE_LAT@', settings$run$site$lat, jobsh)
jobsh <- gsub('@SITE_LON@', settings$run$site$lon, jobsh)
jobsh <- gsub('@SITE_MET@', settings$run$inputs$met$path, jobsh)
jobsh <- gsub('@START_DATE@', settings$run$start.date, jobsh)
jobsh <- gsub('@END_DATE@', settings$run$end.date, jobsh)
jobsh <- gsub('@OUTDIR@', outdir, jobsh)
jobsh <- gsub('@RUNDIR@', rundir, jobsh)
jobsh <- gsub('@BINARY@', settings$model$binary, jobsh)
writeLines(jobsh, con=file.path(settings$rundir, run.id, "job.sh"))
Sys.chmod(file.path(settings$rundir, run.id, "job.sh"))
}
|
rm(list=ls())
setwd('~/Downloads/')
#library(readxl) # THIS LIBRARY IS TO BE LOADED FOR OPENING EXCEL files only
#datos<-read_excel('Sub13.xlsx') #READING EXCEL files only
datos<-read.csv('PT6-4.csv')
# tgtfield
# TiltResponse.RESP
# TiltResponse.CRESP
# ConfResponse.RESP
datos$tgtori <-as.character(datos$tgtori)
datos$tgtori[datos$tgtori=='left'] <- 1
datos$tgtori[datos$tgtori=='right'] <- 2
datos$tgtori <-as.numeric(datos$tgtori)
datos$TiltResponse.RESP <-as.character(datos$TiltResponse.RESP)
datos$TiltResponse.RESP[datos$TiltResponse.RESP=='n'] <- 1
datos$TiltResponse.RESP[datos$TiltResponse.RESP=='m'] <- 2
datos$TiltResponse.RESP <-as.numeric(datos$TiltResponse.RESP)
datos$TiltResponse.CRESP <-as.character(datos$TiltResponse.CRESP)
datos$TiltResponse.CRESP[datos$TiltResponse.CRESP=='n'] <- 1
datos$TiltResponse.CRESP[datos$TiltResponse.CRESP=='m'] <- 2
datos$TiltResponse.CRESP <-as.numeric(datos$TiltResponse.CRESP)
datos$TiltResponse.RESP <-as.character(datos$TiltResponse.RESP)
datos$TiltResponse.RESP[datos$TiltResponse.RESP=='n'] <- 1
datos$TiltResponse.RESP[datos$TiltResponse.RESP=='m'] <- 2
datos$TiltResponse.RESP <-as.numeric(datos$TiltResponse.RESP)
#SELECT YOUR CONDITION TO COMPUTE SENSITIVITY
# this can be tgtfield=="left" or tgtfield=="right" for the patients study
# which is changed below where it says datos$tgtfield=="left"
# E.g. if you changed datos$tgtfield to =="righ" then the code will print sensitivity
# for the condition in which targets where presented on the right
datos<-subset(datos, datos$tgtfield=="right", select=c(tgtori, TiltResponse.RESP, TiltResponse.CRESP, ConfResponse.RESP))
# if this is a reward experiment then put a # above just before datos and work
# with the next line (but then remove the # below first)
# change datos$reward== to "low" "medium" or "high" depending on the condition you like to assess
#datos<-subset(datos, datos$reward=="high", select=c(tgtori, TiltResponse.RESP, TiltResponse.CRESP, ConfResponse.RESP))
datos$typ1<-ifelse(datos$TiltResponse.CRESP=="1" & datos$TiltResponse.RESP=="1", 1,ifelse(datos$TiltResponse.CRESP=="2" & datos$TiltResponse.RESP=="1", 2, ifelse(datos$TiltResponse.CRESP=="1" & datos$TiltResponse.RESP=="2", 3, ifelse(datos$TiltResponse.CRESP=="2" & datos$TiltResponse.RESP=="2", 4, 0))))
datos$typ2<-ifelse(datos$ConfResponse.RESP=="1", 1,ifelse(datos$ConfResponse.RESP=="2", 2, ifelse(datos$ConfResponse.RESP=="3", 3,ifelse(datos$ConfResponse.RESP=="4", 4,ifelse(datos$ConfResponse.RESP=="5", 5, ifelse(datos$ConfResponse.RESP=="6", 6, 0))))))
aroc2 <- matrix(NA, ncol=1, nrow=1)
nrats=4 #there are 6 conf ratings Ps should use the full scale
k = nrats+1
h2=0
fa2=0
m2=0
cr2=0
ka=0
kb=0
# getting vectors
for(con in 1:nrats) {
h2[k-1]=table(datos$typ2==con & (datos$typ1==1 | datos$typ1==4))["TRUE"] # TRUE only selects cells meeting the RULE
fa2[k-1]=table(datos$typ2==con & (datos$typ1==2 | datos$typ1==3))["TRUE"]
h2[is.na(h2)] <- 0
fa2[is.na(fa2)] <- 0
k=k-1
}
h2=h2+0.5 # add 0.5 BECAUSE??????????
fa2=fa2+0.5
# getting cumul probs
h2=(h2/sum(h2))
fa2=(fa2/sum(fa2))
# add 0 to vector
h2=c(0, h2)
fa2=c(0, fa2)
cumh2=cumsum(h2)
cumfa2=cumsum(fa2)
# Aroc and Bk from area under curve - see Kornbrot pg 398
fin=floor(nrats/2)#round to lowest
ka<-rep()#pre-allocate vble
kb<-rep()#pre-allocate vble
j=1
for (n in 1:fin) {
ka[j] <- (cumh2[n+1] - cumfa2[n])^2 - (cumh2[n] - cumfa2[n+1])^2
j=j+1
}
j=1
fin=fin+1
for (n in fin:nrats) {
kb[j] <- (cumh2[n+1] - cumfa2[n])^2 - (cumh2[n] - cumfa2[n+1])^2
j=j+1
}
aroc2 = 0.5 + (0.25*sum(ka)) + (0.25*sum(kb))
broc = log((0.25*sum(ka))/(0.25*sum(kb)));
aroc2<-as.numeric(aroc2)
print(aroc2)
print(broc)
| /type2.R | no_license | sotod/Metacognition | R | false | false | 3,718 | r | rm(list=ls())
setwd('~/Downloads/')
#library(readxl) # THIS LIBRARY IS TO BE LOADED FOR OPENING EXCEL files only
#datos<-read_excel('Sub13.xlsx') #READING EXCEL files only
datos<-read.csv('PT6-4.csv')
# tgtfield
# TiltResponse.RESP
# TiltResponse.CRESP
# ConfResponse.RESP
datos$tgtori <-as.character(datos$tgtori)
datos$tgtori[datos$tgtori=='left'] <- 1
datos$tgtori[datos$tgtori=='right'] <- 2
datos$tgtori <-as.numeric(datos$tgtori)
datos$TiltResponse.RESP <-as.character(datos$TiltResponse.RESP)
datos$TiltResponse.RESP[datos$TiltResponse.RESP=='n'] <- 1
datos$TiltResponse.RESP[datos$TiltResponse.RESP=='m'] <- 2
datos$TiltResponse.RESP <-as.numeric(datos$TiltResponse.RESP)
datos$TiltResponse.CRESP <-as.character(datos$TiltResponse.CRESP)
datos$TiltResponse.CRESP[datos$TiltResponse.CRESP=='n'] <- 1
datos$TiltResponse.CRESP[datos$TiltResponse.CRESP=='m'] <- 2
datos$TiltResponse.CRESP <-as.numeric(datos$TiltResponse.CRESP)
datos$TiltResponse.RESP <-as.character(datos$TiltResponse.RESP)
datos$TiltResponse.RESP[datos$TiltResponse.RESP=='n'] <- 1
datos$TiltResponse.RESP[datos$TiltResponse.RESP=='m'] <- 2
datos$TiltResponse.RESP <-as.numeric(datos$TiltResponse.RESP)
#SELECT YOUR CONDITION TO COMPUTE SENSITIVITY
# this can be tgtfield=="left" or tgtfield=="right" for the patients study
# which is changed below where it says datos$tgtfield=="left"
# E.g. if you changed datos$tgtfield to =="righ" then the code will print sensitivity
# for the condition in which targets where presented on the right
datos<-subset(datos, datos$tgtfield=="right", select=c(tgtori, TiltResponse.RESP, TiltResponse.CRESP, ConfResponse.RESP))
# if this is a reward experiment then put a # above just before datos and work
# with the next line (but then remove the # below first)
# change datos$reward== to "low" "medium" or "high" depending on the condition you like to assess
#datos<-subset(datos, datos$reward=="high", select=c(tgtori, TiltResponse.RESP, TiltResponse.CRESP, ConfResponse.RESP))
datos$typ1<-ifelse(datos$TiltResponse.CRESP=="1" & datos$TiltResponse.RESP=="1", 1,ifelse(datos$TiltResponse.CRESP=="2" & datos$TiltResponse.RESP=="1", 2, ifelse(datos$TiltResponse.CRESP=="1" & datos$TiltResponse.RESP=="2", 3, ifelse(datos$TiltResponse.CRESP=="2" & datos$TiltResponse.RESP=="2", 4, 0))))
datos$typ2<-ifelse(datos$ConfResponse.RESP=="1", 1,ifelse(datos$ConfResponse.RESP=="2", 2, ifelse(datos$ConfResponse.RESP=="3", 3,ifelse(datos$ConfResponse.RESP=="4", 4,ifelse(datos$ConfResponse.RESP=="5", 5, ifelse(datos$ConfResponse.RESP=="6", 6, 0))))))
aroc2 <- matrix(NA, ncol=1, nrow=1)
nrats=4 #there are 6 conf ratings Ps should use the full scale
k = nrats+1
h2=0
fa2=0
m2=0
cr2=0
ka=0
kb=0
# getting vectors
for(con in 1:nrats) {
h2[k-1]=table(datos$typ2==con & (datos$typ1==1 | datos$typ1==4))["TRUE"] # TRUE only selects cells meeting the RULE
fa2[k-1]=table(datos$typ2==con & (datos$typ1==2 | datos$typ1==3))["TRUE"]
h2[is.na(h2)] <- 0
fa2[is.na(fa2)] <- 0
k=k-1
}
h2=h2+0.5 # add 0.5 BECAUSE??????????
fa2=fa2+0.5
# getting cumul probs
h2=(h2/sum(h2))
fa2=(fa2/sum(fa2))
# add 0 to vector
h2=c(0, h2)
fa2=c(0, fa2)
cumh2=cumsum(h2)
cumfa2=cumsum(fa2)
# Aroc and Bk from area under curve - see Kornbrot pg 398
fin=floor(nrats/2)#round to lowest
ka<-rep()#pre-allocate vble
kb<-rep()#pre-allocate vble
j=1
for (n in 1:fin) {
ka[j] <- (cumh2[n+1] - cumfa2[n])^2 - (cumh2[n] - cumfa2[n+1])^2
j=j+1
}
j=1
fin=fin+1
for (n in fin:nrats) {
kb[j] <- (cumh2[n+1] - cumfa2[n])^2 - (cumh2[n] - cumfa2[n+1])^2
j=j+1
}
aroc2 = 0.5 + (0.25*sum(ka)) + (0.25*sum(kb))
broc = log((0.25*sum(ka))/(0.25*sum(kb)));
aroc2<-as.numeric(aroc2)
print(aroc2)
print(broc)
|
#install.packages("ggmap")
#install.packages("maptools")
#install.packages("ggthemes")
#install.packages("ggplot2")
#install.packages("sp")
library(maptools)
library(ggmap)
library(ggthemes)
library(ggplot2)
library(sp)
data1 <- read.csv("01_District_wise_crimes_committed_IPC_2001_2012.csv")
#---------------Recomputing total ipc crimes without the 'other IPCs' field-------------
for (i in 1:nrow(data1)){
data1$TOTAL.IPC.CRIMES[i]<-sum(data1[i,4:31])
}
############################### CRIMES PER YEAR ACROSS INDIA ################
# segregating data year-wise
b<-lapply(unique(data1$YEAR), function(x) data1[data1$YEAR == x,])
# retrieving only Totals of each state
for (i in b){
i<-subset(i,i$DISTRICT!="TOTAL" & i$DISTRICT!="DELHI UT TOTAL")
}
totcrime<-c()
# Computing total crimes commited per year
for (i in b){
totcrime<-append(totcrime,sum(i$TOTAL.IPC.CRIMES))
}
# Creating a dataframe for year-wise crime count and visualizing the data as a bar plot
peryear<-data.frame(YEAR=c(2001:2012),crimecount=totcrime)
ggplot(peryear)+geom_bar(aes(YEAR,crimecount,fill=YEAR),stat="identity")+xlab("Year") +ylab("Total Crimes") + ggtitle("Crimes per Year (India 2001-2012)") + guides(fill=FALSE) + theme_economist_white()
####################### CRIMES PER STATE OVER THE YEARS ############################
# Retrieving only totals per state
newdata<-subset(data1,data1$DISTRICT=="TOTAL"| data1$DISTRICT=="DELHI UT TOTAL")
# Segregating data based on states/Union territories
a<-lapply(unique(data1$STATE.UT), function(x) data1[data1$STATE.UT == x,])
totperstate<-c()
states_and_ut<-newdata$STATE.UT
# Calculate total crime count per state throughout the years 2001-2012
for (i in a){
j<-subset(i,i$DISTRICT=="TOTAL"| i$DISTRICT=="DELHI UT TOTAL")
totperstate<-append(totperstate,sum(j$TOTAL.IPC.CRIMES))
}
# Creating a dataframe for state/UT-wise crime count throughout the years.
top_states<-data.frame(States_and_UT=unique(states_and_ut),Totalcrimes=totperstate)
# Sorting the data in descending order of crime count and retrieving Top 5 states with highest crime count over the years
top_states <- top_states[order(-top_states$Totalcrimes),]
top_five<-head(top_states,5)
#-------------------------Top 5 States with high crime rate------------------------
#MAHARASHTRA
#ANDHRA PRADESH
#UTTAR PRADESH
#MADHYA PRADESH
#RAJASTHAN
# Retrieving data regarding the Top 5 states with highest crime and plotting line graphs of various crimes per state
top_five_data<-subset(newdata,newdata$STATE.UT %in% top_five$States_and_UT)
states_data<-data.frame(STATE.UT=top_five_data$STATE.UT,YEAR=top_five_data$YEAR,MURDER=top_five_data$MURDER,RAPE=top_five_data$RAPE,Kidnapping_and_Abduction=top_five_data$KIDNAPPING.and.ABDUCTION,DRBT=(top_five_data$DACOITY+top_five_data$ROBBERY+top_five_data$BURGLARY+top_five_data$THEFT),Cheating=top_five_data$CHEATING,Arson=top_five_data$ARSON)
year_wise_five<-lapply(unique(states_data$STATE.UT), function(x) states_data[states_data$STATE.UT == x,])
for (i in year_wise_five){
plot(i[,c(2,3)],type='o',ylim=c(min(i[,c(3,4,5,6,7,8)]),max(i[,c(3,4,5,6,7,8)])+20000),xlim=c(2000,2013),col='red',ylab='Crime count',xlab='YEAR',main=unique(i$STATE.UT))
lines(i[,c(2,4)],type='o',col='blue')
lines(i[,c(2,5)],type='o',col='green')
lines(i[,c(2,6)],type='o',col='purple')
lines(i[,c(2,7)],type='o',col='cyan')
lines(i[,c(2,8)],type='o',col='black')
legend("topright",pch=1, c('Murder','Rape','Kidnapping and abduction','DRBT (Dacoity, Robbery, Burglary, Theft)','Cheating','Arson'), lty=c(1,1), lwd=c(2.5,2.5),col=c('red','blue','green','purple','cyan','black'))
}
############################ CRIME COUNT PER DISTRICT #################################
# Retrieve district wise data but exclude the redundant 'TOTAL' fields
newdata<-subset(data1,data1$DISTRICT!="TOTAL" & data1$DISTRICT!="DELHI UT TOTAL")
totperdistrict<-c()
districts<-newdata$DISTRICT
# Segregating data district-wise
d<-lapply(unique(newdata$DISTRICT), function(x) newdata[newdata$DISTRICT == x,])
# Calculating total crimes committed per district over the years
for (i in d){
totperdistrict<-append(totperdistrict,sum(i$TOTAL.IPC.CRIMES))
}
# Creating a dataframe for District-wise crime count while ignoring certain vague fields
crimeperdistrict<-data.frame(DISTRICT=unique(districts),Totalcrimes=totperdistrict)
crimeperdistrict<-subset(crimeperdistrict,crimeperdistrict$DISTRICT!="NORTH" & crimeperdistrict$DISTRICT!="SOUTH" & crimeperdistrict$DISTRICT!="EAST" & crimeperdistrict$DISTRICT!="WEST" & crimeperdistrict$DISTRICT!="CENTRAL" & crimeperdistrict$DISTRICT!="NORTH EAST" & crimeperdistrict$DISTRICT!="NORTH WEST" & crimeperdistrict$DISTRICT!="SOUTH WEST" & crimeperdistrict$DISTRICT!="SOUTH EAST" & crimeperdistrict$DISTRICT!="RURAL" & crimeperdistrict$DISTRICT!="URBAN")
# Retrieving latitude and longitude (geolocation) values for each district
for (i in 1:nrow(crimeperdistrict)) {
latlon = geocode(as.character(crimeperdistrict[i,1]))
crimeperdistrict$lon[i] = as.numeric(latlon[1])
crimeperdistrict$lat[i] = as.numeric(latlon[2])
}
# Ignoring other erroneous rows where the District name might not make complete sense and hence provide wrong geolocation value
crimeperdistrict<-subset(crimeperdistrict,crimeperdistrict$lon < 100.0 & crimeperdistrict$lon > 60.0)
# Read a district-level outline map of India
ind2 <-readRDS("C:/Users/harsh/OneDrive/Documents/R/Project/IND_adm2.rds")
# Retrieve geocode for India and plot it's google map
india_center<-as.numeric(geocode("India"))
india<-ggmap(get_googlemap(center=india_center, scale=1, zoom=4), extent="normal")
# PS: Seen better in a larger window
india + geom_point(data=crimeperdistrict,aes(x=lon, y=lat, col='red',size=Totalcrimes))+ylab('Longitude')+xlab('Latitude') + ggtitle("Crimes per District")
# Order Districts based on their crime count over the years and plot highest five
crimeperdistrict<-crimeperdistrict[order(-crimeperdistrict$Totalcrimes),]
headcrimeperdistrict<-head(crimeperdistrict,5)
ggplot(ind2) + geom_path(aes(x=long, y=lat,group=group), color='gray') + coord_equal()+ geom_point(data=headcrimeperdistrict,aes(x=lon, y=lat, col="red",size=Totalcrimes)) + geom_text(data=headcrimeperdistrict,aes(x = lon, y = lat, label = headcrimeperdistrict$DISTRICT), size = 2)+ylab('Longitude')+xlab('Latitude') + ggtitle("Crimes per District (TOP 5)")
# Order Districts based on their crime count over the years and plot lowest ten
tailcrimeperdistrict<-tail(crimeperdistrict,10)
ggplot(ind2) + geom_path(aes(x=long, y=lat,group=group), color='gray') + coord_equal()+ geom_point(data=tailcrimeperdistrict,aes(x=lon, y=lat, col="red",size=Totalcrimes)) + geom_text(data=tailcrimeperdistrict,aes(x = lon, y = lat, label = tailcrimeperdistrict$DISTRICT), size = 2)+ylab('Longitude')+xlab('Latitude') + ggtitle("Crimes per District (BOTTOM 10)") | /visualization.R | no_license | HarshitaSingh97/Analysis-of-crime-patterns-in-India | R | false | false | 6,982 | r | #install.packages("ggmap")
#install.packages("maptools")
#install.packages("ggthemes")
#install.packages("ggplot2")
#install.packages("sp")
library(maptools)
library(ggmap)
library(ggthemes)
library(ggplot2)
library(sp)
data1 <- read.csv("01_District_wise_crimes_committed_IPC_2001_2012.csv")
#---------------Recomputing total ipc crimes without the 'other IPCs' field-------------
for (i in 1:nrow(data1)){
data1$TOTAL.IPC.CRIMES[i]<-sum(data1[i,4:31])
}
############################### CRIMES PER YEAR ACROSS INDIA ################
# segregating data year-wise
b<-lapply(unique(data1$YEAR), function(x) data1[data1$YEAR == x,])
# retrieving only Totals of each state
for (i in b){
i<-subset(i,i$DISTRICT!="TOTAL" & i$DISTRICT!="DELHI UT TOTAL")
}
totcrime<-c()
# Computing total crimes commited per year
for (i in b){
totcrime<-append(totcrime,sum(i$TOTAL.IPC.CRIMES))
}
# Creating a dataframe for year-wise crime count and visualizing the data as a bar plot
peryear<-data.frame(YEAR=c(2001:2012),crimecount=totcrime)
ggplot(peryear)+geom_bar(aes(YEAR,crimecount,fill=YEAR),stat="identity")+xlab("Year") +ylab("Total Crimes") + ggtitle("Crimes per Year (India 2001-2012)") + guides(fill=FALSE) + theme_economist_white()
####################### CRIMES PER STATE OVER THE YEARS ############################
# Retrieving only totals per state
newdata<-subset(data1,data1$DISTRICT=="TOTAL"| data1$DISTRICT=="DELHI UT TOTAL")
# Segregating data based on states/Union territories
a<-lapply(unique(data1$STATE.UT), function(x) data1[data1$STATE.UT == x,])
totperstate<-c()
states_and_ut<-newdata$STATE.UT
# Calculate total crime count per state throughout the years 2001-2012
for (i in a){
j<-subset(i,i$DISTRICT=="TOTAL"| i$DISTRICT=="DELHI UT TOTAL")
totperstate<-append(totperstate,sum(j$TOTAL.IPC.CRIMES))
}
# Creating a dataframe for state/UT-wise crime count throughout the years.
top_states<-data.frame(States_and_UT=unique(states_and_ut),Totalcrimes=totperstate)
# Sorting the data in descending order of crime count and retrieving Top 5 states with highest crime count over the years
top_states <- top_states[order(-top_states$Totalcrimes),]
top_five<-head(top_states,5)
#-------------------------Top 5 States with high crime rate------------------------
#MAHARASHTRA
#ANDHRA PRADESH
#UTTAR PRADESH
#MADHYA PRADESH
#RAJASTHAN
# Retrieving data regarding the Top 5 states with highest crime and plotting line graphs of various crimes per state
top_five_data<-subset(newdata,newdata$STATE.UT %in% top_five$States_and_UT)
states_data<-data.frame(STATE.UT=top_five_data$STATE.UT,YEAR=top_five_data$YEAR,MURDER=top_five_data$MURDER,RAPE=top_five_data$RAPE,Kidnapping_and_Abduction=top_five_data$KIDNAPPING.and.ABDUCTION,DRBT=(top_five_data$DACOITY+top_five_data$ROBBERY+top_five_data$BURGLARY+top_five_data$THEFT),Cheating=top_five_data$CHEATING,Arson=top_five_data$ARSON)
year_wise_five<-lapply(unique(states_data$STATE.UT), function(x) states_data[states_data$STATE.UT == x,])
for (i in year_wise_five){
plot(i[,c(2,3)],type='o',ylim=c(min(i[,c(3,4,5,6,7,8)]),max(i[,c(3,4,5,6,7,8)])+20000),xlim=c(2000,2013),col='red',ylab='Crime count',xlab='YEAR',main=unique(i$STATE.UT))
lines(i[,c(2,4)],type='o',col='blue')
lines(i[,c(2,5)],type='o',col='green')
lines(i[,c(2,6)],type='o',col='purple')
lines(i[,c(2,7)],type='o',col='cyan')
lines(i[,c(2,8)],type='o',col='black')
legend("topright",pch=1, c('Murder','Rape','Kidnapping and abduction','DRBT (Dacoity, Robbery, Burglary, Theft)','Cheating','Arson'), lty=c(1,1), lwd=c(2.5,2.5),col=c('red','blue','green','purple','cyan','black'))
}
############################ CRIME COUNT PER DISTRICT #################################
# Retrieve district wise data but exclude the redundant 'TOTAL' fields
newdata<-subset(data1,data1$DISTRICT!="TOTAL" & data1$DISTRICT!="DELHI UT TOTAL")
totperdistrict<-c()
districts<-newdata$DISTRICT
# Segregating data district-wise
d<-lapply(unique(newdata$DISTRICT), function(x) newdata[newdata$DISTRICT == x,])
# Calculating total crimes committed per district over the years
for (i in d){
totperdistrict<-append(totperdistrict,sum(i$TOTAL.IPC.CRIMES))
}
# Creating a dataframe for District-wise crime count while ignoring certain vague fields
crimeperdistrict<-data.frame(DISTRICT=unique(districts),Totalcrimes=totperdistrict)
crimeperdistrict<-subset(crimeperdistrict,crimeperdistrict$DISTRICT!="NORTH" & crimeperdistrict$DISTRICT!="SOUTH" & crimeperdistrict$DISTRICT!="EAST" & crimeperdistrict$DISTRICT!="WEST" & crimeperdistrict$DISTRICT!="CENTRAL" & crimeperdistrict$DISTRICT!="NORTH EAST" & crimeperdistrict$DISTRICT!="NORTH WEST" & crimeperdistrict$DISTRICT!="SOUTH WEST" & crimeperdistrict$DISTRICT!="SOUTH EAST" & crimeperdistrict$DISTRICT!="RURAL" & crimeperdistrict$DISTRICT!="URBAN")
# Retrieving latitude and longitude (geolocation) values for each district
for (i in 1:nrow(crimeperdistrict)) {
latlon = geocode(as.character(crimeperdistrict[i,1]))
crimeperdistrict$lon[i] = as.numeric(latlon[1])
crimeperdistrict$lat[i] = as.numeric(latlon[2])
}
# Ignoring other erroneous rows where the District name might not make complete sense and hence provide wrong geolocation value
crimeperdistrict<-subset(crimeperdistrict,crimeperdistrict$lon < 100.0 & crimeperdistrict$lon > 60.0)
# Read a district-level outline map of India
ind2 <-readRDS("C:/Users/harsh/OneDrive/Documents/R/Project/IND_adm2.rds")
# Retrieve geocode for India and plot it's google map
india_center<-as.numeric(geocode("India"))
india<-ggmap(get_googlemap(center=india_center, scale=1, zoom=4), extent="normal")
# PS: Seen better in a larger window
india + geom_point(data=crimeperdistrict,aes(x=lon, y=lat, col='red',size=Totalcrimes))+ylab('Longitude')+xlab('Latitude') + ggtitle("Crimes per District")
# Order Districts based on their crime count over the years and plot highest five
crimeperdistrict<-crimeperdistrict[order(-crimeperdistrict$Totalcrimes),]
headcrimeperdistrict<-head(crimeperdistrict,5)
ggplot(ind2) + geom_path(aes(x=long, y=lat,group=group), color='gray') + coord_equal()+ geom_point(data=headcrimeperdistrict,aes(x=lon, y=lat, col="red",size=Totalcrimes)) + geom_text(data=headcrimeperdistrict,aes(x = lon, y = lat, label = headcrimeperdistrict$DISTRICT), size = 2)+ylab('Longitude')+xlab('Latitude') + ggtitle("Crimes per District (TOP 5)")
# Order Districts based on their crime count over the years and plot lowest ten
tailcrimeperdistrict<-tail(crimeperdistrict,10)
ggplot(ind2) + geom_path(aes(x=long, y=lat,group=group), color='gray') + coord_equal()+ geom_point(data=tailcrimeperdistrict,aes(x=lon, y=lat, col="red",size=Totalcrimes)) + geom_text(data=tailcrimeperdistrict,aes(x = lon, y = lat, label = tailcrimeperdistrict$DISTRICT), size = 2)+ylab('Longitude')+xlab('Latitude') + ggtitle("Crimes per District (BOTTOM 10)") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class-SDMXDataFlows.R,
% R/SDMXDataFlows-methods.R
\docType{class}
\name{SDMXDataFlows}
\alias{SDMXDataFlows}
\alias{SDMXDataFlows-class}
\alias{SDMXDataFlows,SDMXDataFlows-method}
\title{Class "SDMXDataFlows"}
\usage{
SDMXDataFlows(xmlObj, namespaces)
}
\arguments{
\item{xmlObj}{object of class "XMLInternalDocument derived from XML package}
\item{namespaces}{object of class "data.frame" given the list of namespace URIs}
}
\value{
an object of class "SDMXDataFlows"
}
\description{
A basic class to handle a SDMX DataFlows
}
\section{Slots}{
\describe{
\item{\code{dataflows}}{Object of class "list" giving the list of DataFlows (datasets),
(see \link{SDMXDataFlow})}
}}
\section{Warning}{
This class is not useful in itself, but all SDMX non-abstract classes will
encapsulate it as slot, when parsing an SDMX-ML document (Concepts, or
DataStructureDefinition)
}
\seealso{
\link{readSDMX}
}
\author{
Emmanuel Blondel, \email{emmanuel.blondel1@gmail.com}
}
| /man/SDMXDataFlows.Rd | no_license | cran/rsdmx | R | false | true | 1,091 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class-SDMXDataFlows.R,
% R/SDMXDataFlows-methods.R
\docType{class}
\name{SDMXDataFlows}
\alias{SDMXDataFlows}
\alias{SDMXDataFlows-class}
\alias{SDMXDataFlows,SDMXDataFlows-method}
\title{Class "SDMXDataFlows"}
\usage{
SDMXDataFlows(xmlObj, namespaces)
}
\arguments{
\item{xmlObj}{object of class "XMLInternalDocument derived from XML package}
\item{namespaces}{object of class "data.frame" given the list of namespace URIs}
}
\value{
an object of class "SDMXDataFlows"
}
\description{
A basic class to handle a SDMX DataFlows
}
\section{Slots}{
\describe{
\item{\code{dataflows}}{Object of class "list" giving the list of DataFlows (datasets),
(see \link{SDMXDataFlow})}
}}
\section{Warning}{
This class is not useful in itself, but all SDMX non-abstract classes will
encapsulate it as slot, when parsing an SDMX-ML document (Concepts, or
DataStructureDefinition)
}
\seealso{
\link{readSDMX}
}
\author{
Emmanuel Blondel, \email{emmanuel.blondel1@gmail.com}
}
|
#fetch_overview
#This function takes 1 parameter that's the directory of file
#it will return the overview table
fetch_overview <- function (path = NA){
#import table from notebook
pipe <- readNamedRegionFromFile(path, name= 'table', header=TRUE)
##--ID VERSION OF NOTEBOOK (version 1 w/o revised contract date)
test <- grepl("Revised",colnames(pipe))
if(sum(test) == 1){
version = 2
} else if (sum(test) == 0){
version = 1
} else {
stop("Error in colnames. Can't ID version.")
}
##--GET VERSION 1 TABLE
if(version == 1){
#id relevant columns
status = grep("status", colnames(pipe),
ignore.case = TRUE)
contract = grep("due", colnames(pipe),
ignore.case = TRUE)
actual = grep("actual", colnames(pipe),
ignore.case = TRUE)
deliverable = grep("deliverable", colnames(pipe),
ignore.case = TRUE)
pipe.t <- pipe[,c(status, contract, actual, deliverable)]
#clean table up
colnames(pipe.t) <- c("Status", "Contract.due.date", "Actual.date", "Task/Deliverable")
pipe.t$Contract.due.date <-gsub("\\ 00:00:00","\\",pipe.t$Contract.due.date)
pipe.t$Actual.date <-gsub("\\ 00:00:00","\\",pipe.t$Actual.date)
#replace na's
pipe.t$Status[is.na(pipe.t$Status)] <- " "
pipe.t$Actual.date <- as.character(pipe.t$Actual.date)
pipe.t$Actual.date[is.na(pipe.t$Actual.date)] <- " "
pipe.t$Contract.due.date <- as.character(pipe.t$Contract.due.date)
pipe.t$Contract.due.date[is.na(pipe.t$Contract.due.date)] <- " "
}
##--GET VERSION 2 TABLE
if (version ==2) {
#--Find revised contract column
id <- grep("revised", colnames(pipe),
ignore.case = TRUE)
test.revised <- nrow(pipe) == sum(is.na(pipe[,id]))
#--If revised column isn't blank, use "Revised Due Date".
if(!test.revised){
contract = grep("revised", colnames(pipe),
ignore.case = TRUE)
cols <- c("Status", "Revised.due.date", "Actual.date", "Task/Deliverable")
#--If revised column has info, use it.
} else if (test.revised){
contract = grep("due", colnames(pipe),
ignore.case = TRUE)
cols <- c("Status", "Contract.due.date", "Actual.date", "Task/Deliverable")
} else {
stop("Error: Can't determine if Revised column contains info.")
}
#--Get other columns and consolidate in 1 frame
status = grep("status", colnames(pipe),
ignore.case = TRUE)
actual = grep("actual", colnames(pipe),
ignore.case = TRUE)
deliverable = grep("deliverable", colnames(pipe),
ignore.case = TRUE)
pipe.t <- pipe[,c(status, contract, actual, deliverable)]
colnames(pipe.t) <- cols
#--Clean up date columns
dates <- grep("date", colnames(pipe.t),
ignore.case = TRUE)
for(i in dates){
pipe.t[,i] <- gsub("\\ 00:00:00","\\",pipe.t[,i])
pipe.t[,i] <- as.character(pipe.t[,i])
pipe.t[,i][is.na(pipe.t[,i])] <- " "
}
#--Clean up status
pipe.t$Status[is.na(pipe.t$Status)] <- " "
}
#get rid of blank rows for deliverables
pipe.t <- pipe.t[!(is.na(pipe.t[,4])),]
if(sum(is.na(pipe.t[1,])) == ncol(pipe.t)){
pipe.t <- print("Workplan not entered")
}
#return table
pipe.t
} | /R/fetch_overview.R | no_license | michaelrahija/monitorFrame | R | false | false | 3,564 | r | #fetch_overview
#This function takes 1 parameter that's the directory of file
#it will return the overview table
fetch_overview <- function (path = NA){
#import table from notebook
pipe <- readNamedRegionFromFile(path, name= 'table', header=TRUE)
##--ID VERSION OF NOTEBOOK (version 1 w/o revised contract date)
test <- grepl("Revised",colnames(pipe))
if(sum(test) == 1){
version = 2
} else if (sum(test) == 0){
version = 1
} else {
stop("Error in colnames. Can't ID version.")
}
##--GET VERSION 1 TABLE
if(version == 1){
#id relevant columns
status = grep("status", colnames(pipe),
ignore.case = TRUE)
contract = grep("due", colnames(pipe),
ignore.case = TRUE)
actual = grep("actual", colnames(pipe),
ignore.case = TRUE)
deliverable = grep("deliverable", colnames(pipe),
ignore.case = TRUE)
pipe.t <- pipe[,c(status, contract, actual, deliverable)]
#clean table up
colnames(pipe.t) <- c("Status", "Contract.due.date", "Actual.date", "Task/Deliverable")
pipe.t$Contract.due.date <-gsub("\\ 00:00:00","\\",pipe.t$Contract.due.date)
pipe.t$Actual.date <-gsub("\\ 00:00:00","\\",pipe.t$Actual.date)
#replace na's
pipe.t$Status[is.na(pipe.t$Status)] <- " "
pipe.t$Actual.date <- as.character(pipe.t$Actual.date)
pipe.t$Actual.date[is.na(pipe.t$Actual.date)] <- " "
pipe.t$Contract.due.date <- as.character(pipe.t$Contract.due.date)
pipe.t$Contract.due.date[is.na(pipe.t$Contract.due.date)] <- " "
}
##--GET VERSION 2 TABLE
if (version ==2) {
#--Find revised contract column
id <- grep("revised", colnames(pipe),
ignore.case = TRUE)
test.revised <- nrow(pipe) == sum(is.na(pipe[,id]))
#--If revised column isn't blank, use "Revised Due Date".
if(!test.revised){
contract = grep("revised", colnames(pipe),
ignore.case = TRUE)
cols <- c("Status", "Revised.due.date", "Actual.date", "Task/Deliverable")
#--If revised column has info, use it.
} else if (test.revised){
contract = grep("due", colnames(pipe),
ignore.case = TRUE)
cols <- c("Status", "Contract.due.date", "Actual.date", "Task/Deliverable")
} else {
stop("Error: Can't determine if Revised column contains info.")
}
#--Get other columns and consolidate in 1 frame
status = grep("status", colnames(pipe),
ignore.case = TRUE)
actual = grep("actual", colnames(pipe),
ignore.case = TRUE)
deliverable = grep("deliverable", colnames(pipe),
ignore.case = TRUE)
pipe.t <- pipe[,c(status, contract, actual, deliverable)]
colnames(pipe.t) <- cols
#--Clean up date columns
dates <- grep("date", colnames(pipe.t),
ignore.case = TRUE)
for(i in dates){
pipe.t[,i] <- gsub("\\ 00:00:00","\\",pipe.t[,i])
pipe.t[,i] <- as.character(pipe.t[,i])
pipe.t[,i][is.na(pipe.t[,i])] <- " "
}
#--Clean up status
pipe.t$Status[is.na(pipe.t$Status)] <- " "
}
#get rid of blank rows for deliverables
pipe.t <- pipe.t[!(is.na(pipe.t[,4])),]
if(sum(is.na(pipe.t[1,])) == ncol(pipe.t)){
pipe.t <- print("Workplan not entered")
}
#return table
pipe.t
} |
#' Run partial proportional odds models for ordinal outcomes
#'
#' This function runs partial proportional odds models for ordinal outcomes.
#'
#' @param y.name A character vector specifying the name of the variable to be modeled.
#' @param in.data The input data object of type data frame or matrix.
#' @param prop.odds.formula An optional formula specifying the predictor variables assumed to have proportional odds across levels of y. At least one of prop.odds.formula and non.prop.odds.formula must be specified.
#' @param beta.prop.odds.start A vector of starting values for proportional odds betas. This should only be specified in conjunction with prop.odds.formula.
#' @param non.prop.odds.formula An optional formula specifying the predictor variables assumed not to have proportional odds across levels of y. At least one of prop.odds.formula and non.prop.odds.formula must be specified.
#' @param beta.non.prop.odds.start A matrix of starting values for non proportional odds betas. This should only be specified in conjunction with non.prop.odds.formula. Columns correspond to the j-1 bottom levels of the outcome variable y, rows correspond to variables.
#' @param method A character specifying the optimization method to be used by package optimx in maximizing the log likelihood. Defaults to BFGS.
#' @param int.vec.scale A tuning parameter used to adjust the starting values for the intercepts. Defaults to 5.
#' @param itnmax An optional scalar specifying the iteration limit used in maximizing the log likelihood. Defaults to the default optimx value for the given method.
#' @param seed A vector of length 2 specifying the seed used to generate starting values for model coefficients, if not user specified. Defaults to c(14, 15).
#'
#' @return A list of class partial.prop.odds
#' \item{y.name}{A character vector specifying the model outcome.}
#' \item{y.levels}{The ordered levels of the model outcome.}
#' \item{prop.odds.formula}{The formula used for the proportional odds betas.}
#' \item{non.prop.odds.formula}{The formula used for the non-proportional odds betas.}
#' \item{log.lik}{The log-likelihood of the fitted model.}
#' \item{conv.code}{The convergence code from optimx.}
#' \item{intercepts}{The fitted model intercepts}
#' \item{beta.hat.prop.odds}{A vector of the estimated proportional odds coefficients, if specified.}
#' \item{beta.hat.non.prop.odds}{A matrix of the estimated non-proportional odds coefficients, where the j-1 columns correspond to the j-1 bottom levels of y, and the rows are betas.}
#' \item{est.probs}{The fitted probabilities of each level of y for each subject. Rows are subjects, columns are levels of y.}
#'
#' @examples
#'
#' data(red_train)
#' starts <- coef(lm(quality ~ alcohol+ pH + volatile.acidity, data = red_train))
#' training.result <- partial.prop.odds.mod(y ="quality", in.data = red_train,
#' prop.odds.formula = ~ alcohol + pH,
#' beta.prop.odds.start = starts[2:3],
#' non.prop.odds.formula = ~ volatile.acidity,
#' beta.non.prop.odds.start = matrix(rep(starts[4], 5), nrow = 1),
#' method = "BFGS",
#' seed = c(14, 15), itnmax = 1000)
#'
#' @export
partial.prop.odds.mod <- function(y.name, in.data, prop.odds.formula = NULL, beta.prop.odds.start = NULL,
non.prop.odds.formula = NULL, beta.non.prop.odds.start = NULL,
method = "BFGS", int.vec.scale = 5, itnmax = NULL, seed = c(14, 15)){
#################################################################################3
#Arguments
#
# y.name: a character specifying the name of the y variable to be modeled.
# in.data: the input data object
# prop.odds.formula: an optional formula specifying the predictor variables assumed to have
# proportional odds across levels of y. At least one of prop.odds.formula
# and non.prop.odds.formula must be specified.
# beta.prop.odds.start: an optional vector of starting values for the non-proportional odds betas
# non.prop.odds.formula: an optional forumula specifying the predictor variables assumed to have
# non-proportional odds across levels of y. At least one of prop.odds.formula
# and non.prop.odds.formula must be specified.
# beta.non.prop.odds.start: an optional matrix of starting values for non-proportional odds betas
# method: The method used to maximize the log-likelihood (in package optimx). Defaults to "BFGS"
# int.vec.scale: tuning parameter used to adjust the starting values for the intercepts. Defaults to 5.
# seed: the seed used to generate the starting values for the iterative maximization procedure
#
#################################################################################
#make sure we have at least one of prop.odds.formula and non.prop.odds.formula specified
if (is.null(prop.odds.formula) & is.null(non.prop.odds.formula)){
stop("At least one of prop.odds.formula or non.prop.odds.formula must be specified.")
}
#get outcome vector
y <- in.data[ , y.name]
#get levels of y
y.levels <- sort(unique(y))
n.ylevels <- length(y.levels)
#get design matrix for proportional odds predictors
if (!is.null(prop.odds.formula)){
x.prop.odds <- model.matrix(prop.odds.formula, in.data)
#get rid of intercept, we will specify these separately
x.prop.odds <- x.prop.odds[ , ! colnames(x.prop.odds) == "(Intercept)", drop = F]
#also need starting values if not speciified
if (! is.null(beta.prop.odds.start)){
beta.prop.odds <- beta.prop.odds.start
} else {
#just using random uniform draws
n.prop.betas <- ncol(x.prop.odds)
set.seed(seed[1])
beta.prop.odds <- runif(n.prop.betas)
}
} else{
x.prop.odds <- NULL
beta.prop.odds <- NULL
}
#get design matrix for non-proportional odds predictors
if (!is.null(non.prop.odds.formula)){
x.non.prop.odds <- model.matrix(non.prop.odds.formula, in.data)
#get rid of intercept, we will specify these separately
x.non.prop.odds <- x.non.prop.odds[ , ! colnames(x.non.prop.odds) == "(Intercept)", drop = F]
n.non.prop.preds <- ncol(x.non.prop.odds)
#also need starting values if not specified
if (! is.null(beta.non.prop.odds.start)){
beta.non.prop.odds <- beta.non.prop.odds.start
} else {
#just using random uniform draws
non.prop.betas <- n.non.prop.preds*(n.ylevels - 1)
set.seed(seed[2])
beta.non.prop.odds <- matrix(runif(non.prop.betas), byrow = F, nrow = n.non.prop.preds)
}
} else{
x.non.prop.odds <- NULL
beta.non.prop.odds <- NULL
}
#starting value for intercepts
#make intercepts start proportional to the level of y
cat.probs <- as.vector(by(in.data, in.data$quality, function(x) nrow(x))) / nrow(in.data)
int.vector <- log(int.vec.scale*cumsum(cat.probs[1:(n.ylevels - 1)]))
#maxmize parameter estimates
optim.result <- maximize.partial.prop.odds.ll(y = y, y.levels = y.levels, in.data = in.data, int.vector = int.vector, method = method,
x.prop.odds = x.prop.odds, x.non.prop.odds = x.non.prop.odds, beta.prop.odds = beta.prop.odds,
beta.non.prop.odds = beta.non.prop.odds, itnmax = itnmax)
#pick out the appropriate peices of the output
intercepts <- unlist(optim.result[1:length(int.vector)])
ll <- optim.result$value
conv.code <- optim.result$convcode
if(conv.code != 0){
warning("log-likelihood maximization did not converge")
}
#start putting together results
results.list <- list(y.name = y.name, y.levels = y.levels, prop.odds.formula = prop.odds.formula,
non.prop.odds.formula = non.prop.odds.formula,
log.lik = ll, conv.code = conv.code, intercepts = intercepts)
#beta's for proportional odds predictors
if (! is.null(beta.prop.odds)){
beta.hat.prop.odds <- unlist(optim.result[(length(int.vector) + 1): (length(int.vector) + length(beta.prop.odds))])
results.list$beta.hat.prop.odds <- beta.hat.prop.odds
#also store result for calculating probabilites of different categories
xb.prop.odds <- x.prop.odds %*% beta.hat.prop.odds
}
#betas for non-proportional odds predictors
if (! is.null(beta.non.prop.odds)){
beta.hat.non.prop.odds <- unlist(optim.result[(length(int.vector) + length(beta.prop.odds)+1):
(length(int.vector) + length(beta.prop.odds) + length(beta.non.prop.odds))])
beta.hat.non.prop.odds.mat <- matrix(beta.hat.non.prop.odds, nrow = n.non.prop.preds, byrow = F)
results.list$beta.hat.non.prop.odds <- beta.hat.non.prop.odds.mat
#again store result for calculating estimated probabilities of each category
xb.non.prop.odds <- x.non.prop.odds %*% beta.hat.non.prop.odds.mat
}
#estimated probabilities for each category of the outcome
top.minus1.level <- length(y.levels) - 1
top.level <- length(y.levels)
if (! is.null(beta.prop.odds) & ! is.null(beta.non.prop.odds)){
top.level.prob <- 1 - plogis(intercepts[top.minus1.level] + xb.prop.odds + xb.non.prop.odds[ , top.minus1.level])
bottom.level.prob <- plogis(intercepts[1] + xb.prop.odds + xb.non.prop.odds[ , 1])
middle.levels <- sapply(2:top.minus1.level, function(mid.level){
plogis(intercepts[mid.level] + xb.prop.odds + xb.non.prop.odds[ , mid.level]) -
plogis(intercepts[mid.level - 1] + xb.prop.odds + xb.non.prop.odds[ , mid.level - 1])
})
} else if (! is.null(beta.prop.odds)){
top.level.prob <- 1 - plogis(intercepts[top.minus1.level] + xb.prop.odds)
bottom.level.prob <- plogis(intercepts[1] + xb.prop.odds)
middle.levels <- sapply(2:top.minus1.level, function(mid.level){
plogis(intercepts[mid.level] + xb.prop.odds) - plogis(intercepts[mid.level - 1] + xb.prop.odds)
})
} else if (! is.null(beta.non.prop.odds)){
top.level.prob <- 1 - plogis(intercepts[top.minus1.level] + xb.non.prop.odds[ , top.minus1.level])
bottom.level.prob <- plogis(intercepts[1] + xb.non.prop.odds[ , 1])
middle.levels <- sapply(2:top.minus1.level, function(mid.level){
plogis(intercepts[mid.level] + xb.non.prop.odds[ , mid.level]) -
plogis(intercepts[mid.level - 1] + xb.non.prop.odds[ , mid.level - 1])
})
}
probs <- cbind(bottom.level.prob, middle.levels, top.level.prob)
colnames(probs) <- y.levels
#check for negative probabilities
if (any(probs <= 0)){
stop("Model did not converge and has estimated negative or zero probabilities")
}
results.list$est.probs <- probs
class(results.list) <- "partial.prop.odds"
return(results.list)
}
| /R/partial.prop.odds.mod.R | no_license | group-wine/sommelieR | R | false | false | 10,768 | r | #' Run partial proportional odds models for ordinal outcomes
#'
#' This function runs partial proportional odds models for ordinal outcomes.
#'
#' @param y.name A character vector specifying the name of the variable to be modeled.
#' @param in.data The input data object of type data frame or matrix.
#' @param prop.odds.formula An optional formula specifying the predictor variables assumed to have proportional odds across levels of y. At least one of prop.odds.formula and non.prop.odds.formula must be specified.
#' @param beta.prop.odds.start A vector of starting values for proportional odds betas. This should only be specified in conjunction with prop.odds.formula.
#' @param non.prop.odds.formula An optional formula specifying the predictor variables assumed not to have proportional odds across levels of y. At least one of prop.odds.formula and non.prop.odds.formula must be specified.
#' @param beta.non.prop.odds.start A matrix of starting values for non proportional odds betas. This should only be specified in conjunction with non.prop.odds.formula. Columns correspond to the j-1 bottom levels of the outcome variable y, rows correspond to variables.
#' @param method A character specifying the optimization method to be used by package optimx in maximizing the log likelihood. Defaults to BFGS.
#' @param int.vec.scale A tuning parameter used to adjust the starting values for the intercepts. Defaults to 5.
#' @param itnmax An optional scalar specifying the iteration limit used in maximizing the log likelihood. Defaults to the default optimx value for the given method.
#' @param seed A vector of length 2 specifying the seed used to generate starting values for model coefficients, if not user specified. Defaults to c(14, 15).
#'
#' @return A list of class partial.prop.odds
#' \item{y.name}{A character vector specifying the model outcome.}
#' \item{y.levels}{The ordered levels of the model outcome.}
#' \item{prop.odds.formula}{The formula used for the proportional odds betas.}
#' \item{non.prop.odds.formula}{The formula used for the non-proportional odds betas.}
#' \item{log.lik}{The log-likelihood of the fitted model.}
#' \item{conv.code}{The convergence code from optimx.}
#' \item{intercepts}{The fitted model intercepts}
#' \item{beta.hat.prop.odds}{A vector of the estimated proportional odds coefficients, if specified.}
#' \item{beta.hat.non.prop.odds}{A matrix of the estimated non-proportional odds coefficients, where the j-1 columns correspond to the j-1 bottom levels of y, and the rows are betas.}
#' \item{est.probs}{The fitted probabilities of each level of y for each subject. Rows are subjects, columns are levels of y.}
#'
#' @examples
#'
#' data(red_train)
#' starts <- coef(lm(quality ~ alcohol+ pH + volatile.acidity, data = red_train))
#' training.result <- partial.prop.odds.mod(y ="quality", in.data = red_train,
#' prop.odds.formula = ~ alcohol + pH,
#' beta.prop.odds.start = starts[2:3],
#' non.prop.odds.formula = ~ volatile.acidity,
#' beta.non.prop.odds.start = matrix(rep(starts[4], 5), nrow = 1),
#' method = "BFGS",
#' seed = c(14, 15), itnmax = 1000)
#'
#' @export
partial.prop.odds.mod <- function(y.name, in.data, prop.odds.formula = NULL, beta.prop.odds.start = NULL,
non.prop.odds.formula = NULL, beta.non.prop.odds.start = NULL,
method = "BFGS", int.vec.scale = 5, itnmax = NULL, seed = c(14, 15)){
#################################################################################3
#Arguments
#
# y.name: a character specifying the name of the y variable to be modeled.
# in.data: the input data object
# prop.odds.formula: an optional formula specifying the predictor variables assumed to have
# proportional odds across levels of y. At least one of prop.odds.formula
# and non.prop.odds.formula must be specified.
# beta.prop.odds.start: an optional vector of starting values for the non-proportional odds betas
# non.prop.odds.formula: an optional forumula specifying the predictor variables assumed to have
# non-proportional odds across levels of y. At least one of prop.odds.formula
# and non.prop.odds.formula must be specified.
# beta.non.prop.odds.start: an optional matrix of starting values for non-proportional odds betas
# method: The method used to maximize the log-likelihood (in package optimx). Defaults to "BFGS"
# int.vec.scale: tuning parameter used to adjust the starting values for the intercepts. Defaults to 5.
# seed: the seed used to generate the starting values for the iterative maximization procedure
#
#################################################################################
#make sure we have at least one of prop.odds.formula and non.prop.odds.formula specified
if (is.null(prop.odds.formula) & is.null(non.prop.odds.formula)){
stop("At least one of prop.odds.formula or non.prop.odds.formula must be specified.")
}
#get outcome vector
y <- in.data[ , y.name]
#get levels of y
y.levels <- sort(unique(y))
n.ylevels <- length(y.levels)
#get design matrix for proportional odds predictors
if (!is.null(prop.odds.formula)){
x.prop.odds <- model.matrix(prop.odds.formula, in.data)
#get rid of intercept, we will specify these separately
x.prop.odds <- x.prop.odds[ , ! colnames(x.prop.odds) == "(Intercept)", drop = F]
#also need starting values if not speciified
if (! is.null(beta.prop.odds.start)){
beta.prop.odds <- beta.prop.odds.start
} else {
#just using random uniform draws
n.prop.betas <- ncol(x.prop.odds)
set.seed(seed[1])
beta.prop.odds <- runif(n.prop.betas)
}
} else{
x.prop.odds <- NULL
beta.prop.odds <- NULL
}
#get design matrix for non-proportional odds predictors
if (!is.null(non.prop.odds.formula)){
x.non.prop.odds <- model.matrix(non.prop.odds.formula, in.data)
#get rid of intercept, we will specify these separately
x.non.prop.odds <- x.non.prop.odds[ , ! colnames(x.non.prop.odds) == "(Intercept)", drop = F]
n.non.prop.preds <- ncol(x.non.prop.odds)
#also need starting values if not specified
if (! is.null(beta.non.prop.odds.start)){
beta.non.prop.odds <- beta.non.prop.odds.start
} else {
#just using random uniform draws
non.prop.betas <- n.non.prop.preds*(n.ylevels - 1)
set.seed(seed[2])
beta.non.prop.odds <- matrix(runif(non.prop.betas), byrow = F, nrow = n.non.prop.preds)
}
} else{
x.non.prop.odds <- NULL
beta.non.prop.odds <- NULL
}
#starting value for intercepts
#make intercepts start proportional to the level of y
cat.probs <- as.vector(by(in.data, in.data$quality, function(x) nrow(x))) / nrow(in.data)
int.vector <- log(int.vec.scale*cumsum(cat.probs[1:(n.ylevels - 1)]))
#maxmize parameter estimates
optim.result <- maximize.partial.prop.odds.ll(y = y, y.levels = y.levels, in.data = in.data, int.vector = int.vector, method = method,
x.prop.odds = x.prop.odds, x.non.prop.odds = x.non.prop.odds, beta.prop.odds = beta.prop.odds,
beta.non.prop.odds = beta.non.prop.odds, itnmax = itnmax)
#pick out the appropriate peices of the output
intercepts <- unlist(optim.result[1:length(int.vector)])
ll <- optim.result$value
conv.code <- optim.result$convcode
if(conv.code != 0){
warning("log-likelihood maximization did not converge")
}
#start putting together results
results.list <- list(y.name = y.name, y.levels = y.levels, prop.odds.formula = prop.odds.formula,
non.prop.odds.formula = non.prop.odds.formula,
log.lik = ll, conv.code = conv.code, intercepts = intercepts)
#beta's for proportional odds predictors
if (! is.null(beta.prop.odds)){
beta.hat.prop.odds <- unlist(optim.result[(length(int.vector) + 1): (length(int.vector) + length(beta.prop.odds))])
results.list$beta.hat.prop.odds <- beta.hat.prop.odds
#also store result for calculating probabilites of different categories
xb.prop.odds <- x.prop.odds %*% beta.hat.prop.odds
}
#betas for non-proportional odds predictors
if (! is.null(beta.non.prop.odds)){
beta.hat.non.prop.odds <- unlist(optim.result[(length(int.vector) + length(beta.prop.odds)+1):
(length(int.vector) + length(beta.prop.odds) + length(beta.non.prop.odds))])
beta.hat.non.prop.odds.mat <- matrix(beta.hat.non.prop.odds, nrow = n.non.prop.preds, byrow = F)
results.list$beta.hat.non.prop.odds <- beta.hat.non.prop.odds.mat
#again store result for calculating estimated probabilities of each category
xb.non.prop.odds <- x.non.prop.odds %*% beta.hat.non.prop.odds.mat
}
#estimated probabilities for each category of the outcome
top.minus1.level <- length(y.levels) - 1
top.level <- length(y.levels)
if (! is.null(beta.prop.odds) & ! is.null(beta.non.prop.odds)){
top.level.prob <- 1 - plogis(intercepts[top.minus1.level] + xb.prop.odds + xb.non.prop.odds[ , top.minus1.level])
bottom.level.prob <- plogis(intercepts[1] + xb.prop.odds + xb.non.prop.odds[ , 1])
middle.levels <- sapply(2:top.minus1.level, function(mid.level){
plogis(intercepts[mid.level] + xb.prop.odds + xb.non.prop.odds[ , mid.level]) -
plogis(intercepts[mid.level - 1] + xb.prop.odds + xb.non.prop.odds[ , mid.level - 1])
})
} else if (! is.null(beta.prop.odds)){
top.level.prob <- 1 - plogis(intercepts[top.minus1.level] + xb.prop.odds)
bottom.level.prob <- plogis(intercepts[1] + xb.prop.odds)
middle.levels <- sapply(2:top.minus1.level, function(mid.level){
plogis(intercepts[mid.level] + xb.prop.odds) - plogis(intercepts[mid.level - 1] + xb.prop.odds)
})
} else if (! is.null(beta.non.prop.odds)){
top.level.prob <- 1 - plogis(intercepts[top.minus1.level] + xb.non.prop.odds[ , top.minus1.level])
bottom.level.prob <- plogis(intercepts[1] + xb.non.prop.odds[ , 1])
middle.levels <- sapply(2:top.minus1.level, function(mid.level){
plogis(intercepts[mid.level] + xb.non.prop.odds[ , mid.level]) -
plogis(intercepts[mid.level - 1] + xb.non.prop.odds[ , mid.level - 1])
})
}
probs <- cbind(bottom.level.prob, middle.levels, top.level.prob)
colnames(probs) <- y.levels
#check for negative probabilities
if (any(probs <= 0)){
stop("Model did not converge and has estimated negative or zero probabilities")
}
results.list$est.probs <- probs
class(results.list) <- "partial.prop.odds"
return(results.list)
}
|
\name{cubehelix}
\alias{cubehelix}
\title{
Generate "cubehelix" palette.
}
\description{
\code{cubehelix} returs set of RGB colours, which are screen display of intensity images
}
\usage{
cubehelix(n, value = numeric(), weak = NA, rich = NA, rotate = NA, hue = NA, gamma = 1,
dark = NA, light = NA, inv = NA, verbose = NA)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{n}{
Positive integer. Length of returned color vector. If \code{n} is \link[base:missing]{missing} and length of \code{value} is positive, then length of \code{value}. If missing \code{n} and empty \code{value}, then \code{n=256}.
}
\item{value}{
Numerice vector of values, which are associated with a palette. If both positive and negative values are in this vector, then divergence color palette is returned. Default in numeric of length zero (unspecified).
}
\item{weak}{
Numeric. The angle (in degrees) of the helix for color with \code{light} intensity. If both \code{rich} and \code{weak} are specified, the \code{rotate} is defined as difference between \code{rich} and \code{weak}. If all \code{weak}, \code{rich} and \code{rotate} are unspecified, then random values are used. Default is \code{NA} (unspecified).
}
\item{rich}{
Numeric. The angle (in degrees) of the helix for color with \code{dark} intensity. If both \code{rich} and \code{weak} are specified, the \code{rotate} is defined as difference between \code{rich} and \code{weak}. If all \code{weak}, \code{rich} and \code{rotate} are unspecified, then random values are used. Default is \code{NA} (unspecified).
}
\item{rotate}{
Numeric. The anlge of rotation (in degrees) of the helix over the scale; can be negative. If \code{rotate} and \code{weak} are specified, then \code{rich} is defined as sum of \code{weak} and \code{rotate}. If \code{rotate} and \code{rich} are specified, then \code{weak} is defined as difference between \code{rotate} and \code{weak}. If all \code{weak}, \code{rich} and \code{rotate} are unspecified, then random values are used. Default is \code{NA} (unspecified).
}
\item{hue}{
Non-negative numeric. Saturation of color. \code{hue=0} gives pure greyscale. If unspecified, then random value in interval [0.9, 1.5] is used. Default is \code{NA} (unspecified).
}
\item{gamma}{
Numeric. Power of intensity. Intensity is between \code{dark} and \code{light}, which are normalized to interval [0, 1]. \code{gamma} changes normalized intensity to intensity\code{^gamma}. Default is 1.
}
\item{dark}{
Positive numeric in interval between 0 and 255. The intensity of the darkest color in the palette. For light backgrounds default is 63. For dark backgrounds default is 14 (inverse order with \code{light}).
}
\item{light}{
Positive numeric in interval between 0 and 255. The intensity of the lightest color in the palette. For light backgrounds default is 241, for dark backgrounds default is 192 (inverse order with \code{dark}).
}
\item{inv}{
Logical. Inversion of color intensity. If \code{TRUE} then color vector is \link[base:rev]{reversed} before return. Default is \code{FALSE}.
}
\item{verbose}{
Logical. Value \code{TRUE} provides information about cube helix on console. Default is \code{NA}, which is interpeted as \code{FALSE}.
}
}
\details{
This is modified source code of function \code{cubeHelix} from package \pkg{rje} under GPL>=2 license.
The palette design is oriented that figures can be printed on white paper. Under this assumption, light color is for small values, and dark color is for big values. In some computer vision and GIS software black background is used, and in this case light color for big values, and dark color of small values looks more naturally. For some thematic maps big values are light, and small values are small (for example, sea ice concentration: open water is blue, close ice is white). RGB and Grayscale remote sensing and photo imagery use light colors for strong signal, and dark colors for weak signal.
Light background is default for figure (specified by argument \code{background} in function \code{\link[ursa:compose_open]{compose_open}}).
%%~ and for image panels (specified by argument \code{fill} in function \code{\link[ursa:panel_new]{panel_new}}).
The palette divergency can be defined only if \code{value} is specified. If all values are positive, or all values are negative, then returned palette is not drivergent. For divergent palettes the helix sequence is continuous.
If \code{dark} and \code{lihgt} are unspecified, the color contrast bewteen \code{dark} and \code{light} drops on reducing number of colors in returned vector.
}
\value{
Vector of RGB color specification.
}
\references{
\href{http://www.mrao.cam.ac.uk/~dag/CUBEHELIX/}{Dave Green's `cubehelix' colour scheme.}
Green, D. A., 2011, `A colour scheme for the display of astronomical intensity images', Bulletin of the Astronomical Society of India, 39, 289. http://astron-soc.in/bulletin/11June/289392011.pdf \href{https://arxiv.org/pdf/1108.5083.pdf}{(pre-print at 'arxiv.org')}
\pkg{rje} at CRAN: \url{https://CRAN.R-project.org/package=rje}
}
\author{
Dave Green
Robin Evans
Nikita Platonov \email{platonov@sevin.ru}
}
%%~ \note{
%%~ %% ~~further notes~~
%%~ }
\section{Acknowledgements}{
Dave Green, Robin Evans
}
\seealso{
Original source code \code{\link[rje:cubeHelix]{rje::cubeHelix}} (clickable correctly if package \pkg{rje} is installed), or see CRAN reference.
}
\examples{
session_grid(NULL)
set.seed(352)
session_grid(regrid(mul=1/16))
a <- ursa_dummy(3,min=0,max=255)
b4 <- b3 <- b2 <- b1 <- vector("list",length(a))
for (i in seq_along(b1)) {
b1[[i]] <- colorize(a[i],pal=cubehelix(11,weak=45*i,rotate=+270),ncolor=11)
b2[[i]] <- colorize(a[i],pal=cubehelix(11,weak=45*i,rotate=-270),ncolor=11)
b3[[i]] <- colorize(a[i]-127,pal=cubehelix)
hue <- sample(seq(2)-1,1)
s <- ifelse(hue==0,NA,runif(1,min=91,max=223))
b4[[i]] <- colorize(a[i]-127,pal=cubehelix,pal.hue=hue,pal.dark=s,pal.light=s)
}
display(c(b1,b2),layout=c(2,NA),decor=FALSE)
display(c(b3,b4),layout=c(2,NA),decor=FALSE)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{color}
| /man/cubehelix.Rd | no_license | yangxhcaf/ursa | R | false | false | 6,203 | rd | \name{cubehelix}
\alias{cubehelix}
\title{
Generate "cubehelix" palette.
}
\description{
\code{cubehelix} returs set of RGB colours, which are screen display of intensity images
}
\usage{
cubehelix(n, value = numeric(), weak = NA, rich = NA, rotate = NA, hue = NA, gamma = 1,
dark = NA, light = NA, inv = NA, verbose = NA)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{n}{
Positive integer. Length of returned color vector. If \code{n} is \link[base:missing]{missing} and length of \code{value} is positive, then length of \code{value}. If missing \code{n} and empty \code{value}, then \code{n=256}.
}
\item{value}{
Numerice vector of values, which are associated with a palette. If both positive and negative values are in this vector, then divergence color palette is returned. Default in numeric of length zero (unspecified).
}
\item{weak}{
Numeric. The angle (in degrees) of the helix for color with \code{light} intensity. If both \code{rich} and \code{weak} are specified, the \code{rotate} is defined as difference between \code{rich} and \code{weak}. If all \code{weak}, \code{rich} and \code{rotate} are unspecified, then random values are used. Default is \code{NA} (unspecified).
}
\item{rich}{
Numeric. The angle (in degrees) of the helix for color with \code{dark} intensity. If both \code{rich} and \code{weak} are specified, the \code{rotate} is defined as difference between \code{rich} and \code{weak}. If all \code{weak}, \code{rich} and \code{rotate} are unspecified, then random values are used. Default is \code{NA} (unspecified).
}
\item{rotate}{
Numeric. The anlge of rotation (in degrees) of the helix over the scale; can be negative. If \code{rotate} and \code{weak} are specified, then \code{rich} is defined as sum of \code{weak} and \code{rotate}. If \code{rotate} and \code{rich} are specified, then \code{weak} is defined as difference between \code{rotate} and \code{weak}. If all \code{weak}, \code{rich} and \code{rotate} are unspecified, then random values are used. Default is \code{NA} (unspecified).
}
\item{hue}{
Non-negative numeric. Saturation of color. \code{hue=0} gives pure greyscale. If unspecified, then random value in interval [0.9, 1.5] is used. Default is \code{NA} (unspecified).
}
\item{gamma}{
Numeric. Power of intensity. Intensity is between \code{dark} and \code{light}, which are normalized to interval [0, 1]. \code{gamma} changes normalized intensity to intensity\code{^gamma}. Default is 1.
}
\item{dark}{
Positive numeric in interval between 0 and 255. The intensity of the darkest color in the palette. For light backgrounds default is 63. For dark backgrounds default is 14 (inverse order with \code{light}).
}
\item{light}{
Positive numeric in interval between 0 and 255. The intensity of the lightest color in the palette. For light backgrounds default is 241, for dark backgrounds default is 192 (inverse order with \code{dark}).
}
\item{inv}{
Logical. Inversion of color intensity. If \code{TRUE} then color vector is \link[base:rev]{reversed} before return. Default is \code{FALSE}.
}
\item{verbose}{
Logical. Value \code{TRUE} provides information about cube helix on console. Default is \code{NA}, which is interpeted as \code{FALSE}.
}
}
\details{
This is modified source code of function \code{cubeHelix} from package \pkg{rje} under GPL>=2 license.
The palette design is oriented that figures can be printed on white paper. Under this assumption, light color is for small values, and dark color is for big values. In some computer vision and GIS software black background is used, and in this case light color for big values, and dark color of small values looks more naturally. For some thematic maps big values are light, and small values are small (for example, sea ice concentration: open water is blue, close ice is white). RGB and Grayscale remote sensing and photo imagery use light colors for strong signal, and dark colors for weak signal.
Light background is default for figure (specified by argument \code{background} in function \code{\link[ursa:compose_open]{compose_open}}).
%%~ and for image panels (specified by argument \code{fill} in function \code{\link[ursa:panel_new]{panel_new}}).
The palette divergency can be defined only if \code{value} is specified. If all values are positive, or all values are negative, then returned palette is not drivergent. For divergent palettes the helix sequence is continuous.
If \code{dark} and \code{lihgt} are unspecified, the color contrast bewteen \code{dark} and \code{light} drops on reducing number of colors in returned vector.
}
\value{
Vector of RGB color specification.
}
\references{
\href{http://www.mrao.cam.ac.uk/~dag/CUBEHELIX/}{Dave Green's `cubehelix' colour scheme.}
Green, D. A., 2011, `A colour scheme for the display of astronomical intensity images', Bulletin of the Astronomical Society of India, 39, 289. http://astron-soc.in/bulletin/11June/289392011.pdf \href{https://arxiv.org/pdf/1108.5083.pdf}{(pre-print at 'arxiv.org')}
\pkg{rje} at CRAN: \url{https://CRAN.R-project.org/package=rje}
}
\author{
Dave Green
Robin Evans
Nikita Platonov \email{platonov@sevin.ru}
}
%%~ \note{
%%~ %% ~~further notes~~
%%~ }
\section{Acknowledgements}{
Dave Green, Robin Evans
}
\seealso{
Original source code \code{\link[rje:cubeHelix]{rje::cubeHelix}} (clickable correctly if package \pkg{rje} is installed), or see CRAN reference.
}
\examples{
session_grid(NULL)
set.seed(352)
session_grid(regrid(mul=1/16))
a <- ursa_dummy(3,min=0,max=255)
b4 <- b3 <- b2 <- b1 <- vector("list",length(a))
for (i in seq_along(b1)) {
b1[[i]] <- colorize(a[i],pal=cubehelix(11,weak=45*i,rotate=+270),ncolor=11)
b2[[i]] <- colorize(a[i],pal=cubehelix(11,weak=45*i,rotate=-270),ncolor=11)
b3[[i]] <- colorize(a[i]-127,pal=cubehelix)
hue <- sample(seq(2)-1,1)
s <- ifelse(hue==0,NA,runif(1,min=91,max=223))
b4[[i]] <- colorize(a[i]-127,pal=cubehelix,pal.hue=hue,pal.dark=s,pal.light=s)
}
display(c(b1,b2),layout=c(2,NA),decor=FALSE)
display(c(b3,b4),layout=c(2,NA),decor=FALSE)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{color}
|
## Get hierarchy and sites location by OU
## G. Sarfaty & A. Chafetz
## Date: 2018.05.03
## Purpose: pull hierarchy for an org unit + site location
## Dependencies
library(devtools)
install_github("jason-p-pickering/datim-validation")
library(datimvalidation)
library(tidyverse)
#initialize loadSecrets & login to API; enter username & password; then URL when prompted
loadSecrets(secrets = NA)
##use pickering's code to pull hierarchy, referencing the OU of interest by its UID; Ex is SA
df <- getOrganisationUnitMap("cDGPF739ZZr")
#create function to pull site location;
getSites <-function(organisationUnit=NA,level=NA) {
if ( is.na(organisationUnit) ) { organisationUnit<-getOption("organisationUnit") }
url<-URLencode(paste0(getOption("baseurl"),"api/organisationUnits.json?&filter=path:like:",organisationUnit,"&fields=id,code,name,coordinates&paging=false&filter=level:eq:",level))
sig<-digest::digest(url,algo='md5', serialize = FALSE)
sites<-getCachedObject(sig)
if (is.null(sites)){
r<-httr::GET(url,httr::timeout(600))
if (r$status == 200L ){
r<- httr::content(r, "text")
sites<-jsonlite::fromJSON(r,flatten=TRUE)[[1]]
saveCachedObject(sites,sig)
} else {
print(paste("Could not retreive site listing",httr::content(r,"text")))
stop()
}
}
return( sites )
}
##Call new GetSites function to get site locations as df; must reference OU of interest by UID and site level by #
df2<-getSites("nBo9Y4yZubB",7)
| /R/getOUandSites.R | no_license | USAID-OHA-SI/misc_scripts | R | false | false | 1,536 | r |
## Get hierarchy and sites location by OU
## G. Sarfaty & A. Chafetz
## Date: 2018.05.03
## Purpose: pull hierarchy for an org unit + site location
## Dependencies
library(devtools)
install_github("jason-p-pickering/datim-validation")
library(datimvalidation)
library(tidyverse)
#initialize loadSecrets & login to API; enter username & password; then URL when prompted
loadSecrets(secrets = NA)
##use pickering's code to pull hierarchy, referencing the OU of interest by its UID; Ex is SA
df <- getOrganisationUnitMap("cDGPF739ZZr")
#create function to pull site location;
getSites <-function(organisationUnit=NA,level=NA) {
if ( is.na(organisationUnit) ) { organisationUnit<-getOption("organisationUnit") }
url<-URLencode(paste0(getOption("baseurl"),"api/organisationUnits.json?&filter=path:like:",organisationUnit,"&fields=id,code,name,coordinates&paging=false&filter=level:eq:",level))
sig<-digest::digest(url,algo='md5', serialize = FALSE)
sites<-getCachedObject(sig)
if (is.null(sites)){
r<-httr::GET(url,httr::timeout(600))
if (r$status == 200L ){
r<- httr::content(r, "text")
sites<-jsonlite::fromJSON(r,flatten=TRUE)[[1]]
saveCachedObject(sites,sig)
} else {
print(paste("Could not retreive site listing",httr::content(r,"text")))
stop()
}
}
return( sites )
}
##Call new GetSites function to get site locations as df; must reference OU of interest by UID and site level by #
df2<-getSites("nBo9Y4yZubB",7)
|
#This Rscript was genereated on 07152020 for the microbiome tutorial series by Dr. Michael Jochum at Baylor College of Medicine
#and was originally designed to be used at the following binder repo:
#https://mybinder.org/v2/gh/MADscientist314/microbiome_binder_repo/master
setwd("D:/github/microbiome_binder_repo")
#Import the libraries
library(phyloseq)
library(microbiome)
library(DirichletMultinomial)
library(reshape2)
library(magrittr)
library(dplyr)
library(microbiome)
library(DESeq2)
library(metacoder)
library(knitr)
library(tibble)
# Note that this particular approach will be super slow.
# And take just as long everytime you edit your code
library(holepunch)
write_install() # Writes install.R with all your dependencies
write_runtime() # Writes the date your code was last modified. Can be overridden.
generate_badge() # Generates a badge you can add to your README. Clicking badge will launch the Binder.
# ----------------------------------------------
# At this time ???? push the code to GitHub ????
# ----------------------------------------------
# Then click the badge on your README or run
build_binder() # to kick off the build process
# ????????
data(atlas1006)
print(atlas1006)
write_phyloseq(atlas1006, type = "OTU", path = getwd())
write_phyloseq(atlas1006, type = "TAXONOMY", path = getwd())
write_phyloseq(atlas1006, type = "METADATA", path = getwd())
#test huild_1
| /07152020_commands.R | no_license | MADscientist314/microbiome_binder_repo | R | false | false | 1,400 | r | #This Rscript was genereated on 07152020 for the microbiome tutorial series by Dr. Michael Jochum at Baylor College of Medicine
#and was originally designed to be used at the following binder repo:
#https://mybinder.org/v2/gh/MADscientist314/microbiome_binder_repo/master
setwd("D:/github/microbiome_binder_repo")
#Import the libraries
library(phyloseq)
library(microbiome)
library(DirichletMultinomial)
library(reshape2)
library(magrittr)
library(dplyr)
library(microbiome)
library(DESeq2)
library(metacoder)
library(knitr)
library(tibble)
# Note that this particular approach will be super slow.
# And take just as long everytime you edit your code
library(holepunch)
write_install() # Writes install.R with all your dependencies
write_runtime() # Writes the date your code was last modified. Can be overridden.
generate_badge() # Generates a badge you can add to your README. Clicking badge will launch the Binder.
# ----------------------------------------------
# At this time ???? push the code to GitHub ????
# ----------------------------------------------
# Then click the badge on your README or run
build_binder() # to kick off the build process
# ????????
data(atlas1006)
print(atlas1006)
write_phyloseq(atlas1006, type = "OTU", path = getwd())
write_phyloseq(atlas1006, type = "TAXONOMY", path = getwd())
write_phyloseq(atlas1006, type = "METADATA", path = getwd())
#test huild_1
|
/Primer_sesion.R | no_license | GerarLDz/RInicio | R | false | false | 13,531 | r | ||
# read in the data
to_plot <- read.csv('/Users/brodyvogel/Desktop/household_power_consumption.txt', header=T, sep=';', na.strings="?")
# subset it
to_plot_1 <- subset(to_plot, Date %in% c("1/2/2007","2/2/2007"))
# reformat the date
to_plot_1$Date <- as.Date(to_plot_1$Date, format="%d/%m/%Y")
new_date <- paste(as.Date(to_plot_1$Date), to_plot_1$Time)
to_plot_1$New_Date <- as.POSIXct(new_date)
# get the multi-view
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
# and plot it
plot(Global_active_power ~ New_Date, type="l", data = to_plot_1,
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage ~ New_Date, type="l", data = to_plot_1,
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1 ~ New_Date, type="l", data = to_plot_1,
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2 ~ New_Date, col='Red', data = to_plot_1)
lines(Sub_metering_2 ~ New_Date, col='Blue', data = to_plot_1)
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex = .4)
plot(Global_reactive_power ~ New_Date, type="l", data = to_plot_1,
ylab="Global Rective Power (kilowatts)", xlab="") | /plot4.R | no_license | BrodyVogel/Exploring-Data-Project-1 | R | false | false | 1,215 | r | # read in the data
to_plot <- read.csv('/Users/brodyvogel/Desktop/household_power_consumption.txt', header=T, sep=';', na.strings="?")
# subset it
to_plot_1 <- subset(to_plot, Date %in% c("1/2/2007","2/2/2007"))
# reformat the date
to_plot_1$Date <- as.Date(to_plot_1$Date, format="%d/%m/%Y")
new_date <- paste(as.Date(to_plot_1$Date), to_plot_1$Time)
to_plot_1$New_Date <- as.POSIXct(new_date)
# get the multi-view
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
# and plot it
plot(Global_active_power ~ New_Date, type="l", data = to_plot_1,
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage ~ New_Date, type="l", data = to_plot_1,
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1 ~ New_Date, type="l", data = to_plot_1,
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2 ~ New_Date, col='Red', data = to_plot_1)
lines(Sub_metering_2 ~ New_Date, col='Blue', data = to_plot_1)
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex = .4)
plot(Global_reactive_power ~ New_Date, type="l", data = to_plot_1,
ylab="Global Rective Power (kilowatts)", xlab="") |
\name{speedglm-package}
\alias{speedglm-package}
\docType{package}
\title{
Fitting Linear and Generalized Linear Models to Large Data Sets
}
\description{
Fits Linear and Generalized Linear Models to large data sets. For data loaded in R memory the fitting is usually
fast, especially if R is linked against an optimized BLAS. For data sets of size larger than
R memory, the fitting is made by an updating algorithm.}
\details{
\tabular{ll}{
Package: \tab speedglm\cr
Type: \tab Package\cr
Version: \tab 0.3-5\cr
Date: \tab 2023-04-20\cr
Depends: \tab Matrix, stats, MASS\cr
License: \tab GPL\cr
LazyLoad: \tab yes\cr
}
}
\author{
Marco Enea <marco.enea@unipa.it>, with contributions from Ronen Meiri and Tomer Kalimi (on behalf of DMWay Analytics LTD).
Maintainer: Marco Enea <marco.enea@unipa.it>
}
\keyword{ models}
| /man/speedglm-package.rd | no_license | cran/speedglm | R | false | false | 866 | rd | \name{speedglm-package}
\alias{speedglm-package}
\docType{package}
\title{
Fitting Linear and Generalized Linear Models to Large Data Sets
}
\description{
Fits Linear and Generalized Linear Models to large data sets. For data loaded in R memory the fitting is usually
fast, especially if R is linked against an optimized BLAS. For data sets of size larger than
R memory, the fitting is made by an updating algorithm.}
\details{
\tabular{ll}{
Package: \tab speedglm\cr
Type: \tab Package\cr
Version: \tab 0.3-5\cr
Date: \tab 2023-04-20\cr
Depends: \tab Matrix, stats, MASS\cr
License: \tab GPL\cr
LazyLoad: \tab yes\cr
}
}
\author{
Marco Enea <marco.enea@unipa.it>, with contributions from Ronen Meiri and Tomer Kalimi (on behalf of DMWay Analytics LTD).
Maintainer: Marco Enea <marco.enea@unipa.it>
}
\keyword{ models}
|
library(seasonal)
### Name: easter
### Title: Dates of Chinese New Year, Indian Diwali and Easter
### Aliases: easter cny diwali
### Keywords: datasets
### ** Examples
data(holiday)
cny
diwali
easter
| /data/genthat_extracted_code/seasonal/examples/easter.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 208 | r | library(seasonal)
### Name: easter
### Title: Dates of Chinese New Year, Indian Diwali and Easter
### Aliases: easter cny diwali
### Keywords: datasets
### ** Examples
data(holiday)
cny
diwali
easter
|
###############################################################################
## MBG launch script
##
## Testing Damaris' model
##
###############################################################################
## LOAD LBD LIBRARY
personal_lib <- ifelse(grepl('health_fin', Sys.getenv("SINGULARITY_NAME")) ,
"~/R/x86_64-pc-linux-gnu-library/3.5/",
"~/R/x86_64-pc-linux-gnu-library/3.5geo/")
if(!dir.exists(personal_lib)) dir.create(personal_lib, recursive = TRUE)
Sys.setenv(R_LIBS_USER = personal_lib)
.libPaths(c(Sys.getenv("R_LIBS_USER"), .libPaths()) )
# devtools::install("/share/code/geospatial/sadatnfs/lbd_core/mbg_central/LBDCore", dependencies=F, upgrade=F)
## Load LBDCore
library(LBDCore)
## Setup -------------------------------------------------------------------------
## clear environment
rm(list=ls())
## Set repo location, indicator group, and some arguments
user <- Sys.info()['user']
# indicator_group <- 'child_growth_failure'
# indicator <- 'cgf_stateREs_test'
# # core_repo <- paste0('/share/code/geospatial/', user, '/lbd_core/')
# # indicator_repo <- paste0('/share/code/geospatial/', user, '/', 'cgf', '/')
# config_par <- 'config_stunting_mod_b'
# cov_par <- 'covs_stunting_mod_b'
# Regions <- 'PER'
core_repo <- paste0('/share/code/geospatial/', user, '/lbd_core/')
repo <- paste0('/share/code/geospatial/', user, '/', 'cgf', '/')
indicator_group <- 'child_growth_failure'
indicator <- 'stunting_mod_b'
Regions <- 'south_asia' ## could be put in config
reg <- 'south_asia'
config_par <- 'config_stunting_mod_MULTIREs'
cov_par <- 'covs_stunting_mod_b'
message(indicator)
## Create run date in correct format unless it's in the config as 'NULL'
run_date <- 'sadatnfs_testStateREs_2019_02_05_14_00_56' # paste0('sadatnfs_testStateREs_', make_time_stamp(time_stamp))
config <- set_up_config(repo = repo,
core_repo = core_repo,
indicator_group = indicator_group,
indicator = indicator,
config_name = config_par,
covs_name = cov_par)
# ## Read config file and save all parameters in memory
# config <- load_config(repo = repo,
# indicator_group = indicator_group,
# indicator = indicator,
# config_name = config_par,
# covs_name = cov_par)
#
# ## Create run date in correct format unless it's in the config as 'NULL'
# # skiptoinla <- TRUE
# # skiptoinla_from_rundate <- "sadatnfs_testStateREs_2019_01_26"
#
# ## Ensure you have defined all necessary settings in your config
# check_config(cr = repo)
####### NOTE: `use_subnat_res` and `subnat_country_to_get` are needed for adding state REs for that country
## Set project
proj <- ifelse(as.logical(use_geos_nodes), 'proj_geo_nodes', 'proj_geospatial')
## Create run date (really a comment) to store model outputs in
# run_date <- paste0('training_', user, '_packageTest')
# run_date <- paste0(user, '_test_stateREs_2019_01_26')
## Create output folder with the run_date
outputdir <- paste0('/share/geospatial/mbg/', indicator_group, '/', indicator, '/output/', run_date, '/')
dir.create(outputdir, recursive = TRUE)
## Make sure year object is in the correct format
if (class(year_list) == 'character') year_list <- eval(parse(text=year_list))
## If running individual countries make sure all country FEs and REs off
if (nchar(Regions[1]) == 3) individual_countries <- TRUE
if (individual_countries) {
use_child_country_fes <- FALSE
use_inla_country_fes <- FALSE
use_country_res <- FALSE
}
## Make holdouts -------------------------------------------------------------------------
if(makeholdouts){
message('Making holdouts')
# load the full input data
df <- load_input_data(indicator = indicator,
simple = NULL,
removeyemen = TRUE,
years = yearload,
withtag = as.logical(withtag),
datatag = datatag,
use_share = as.logical(use_share))
# add in location information
df <- merge_with_ihme_loc(df)
# make a list of dfs for each region, with 5 qt folds identified in each
stratum_ho <- make_folds(data = df,
n_folds = as.numeric(n_ho_folds),
spat_strat = 'qt',
temp_strat = 'prop',
strat_cols = 'region',
ts = as.numeric(ho_ts),
mb = as.numeric(ho_mb))
}
## Launch parallel script -------------------------------------------------------------------------
## Make loopvars aka strata grid (format = regions, ages, holdouts)
if(makeholdouts) loopvars <- expand.grid(Regions, 0, 0:n_ho_folds) else loopvars <- expand.grid(Regions, 0, 0)
## loop over them, save images and submit qsubs
for(i in 1:nrow(loopvars)){
message(paste(loopvars[i,2],as.character(loopvars[i,1]),loopvars[i,3]))
### SAVE IMAGE AND ###
# make a qsub string
qsub <- make_qsub_share(age = loopvars[i,2],
reg = as.character(loopvars[i,1]),
holdout = loopvars[i,3],
test = F,
indic = indicator,
saveimage = TRUE,
memory = ifelse(individual_countries, 10, 50),
cores = ifelse(individual_countries, 5, 10),
proj = proj,
geo_nodes = as.logical(use_geos_nodes),
corerepo = repo,
code = NULL,
addl_job_name = paste0('stateREs_', user),
singularity = 'default')
# submit job
print(qsub)
}
| /mbg/mbg_core_code/mbg_central/LBDCore/testing_models/state_REs/01_launch_script.R | no_license | The-Oxford-GBD-group/typhi_paratyphi_modelling_code | R | false | false | 6,170 | r | ###############################################################################
## MBG launch script
##
## Testing Damaris' model
##
###############################################################################
## LOAD LBD LIBRARY
personal_lib <- ifelse(grepl('health_fin', Sys.getenv("SINGULARITY_NAME")) ,
"~/R/x86_64-pc-linux-gnu-library/3.5/",
"~/R/x86_64-pc-linux-gnu-library/3.5geo/")
if(!dir.exists(personal_lib)) dir.create(personal_lib, recursive = TRUE)
Sys.setenv(R_LIBS_USER = personal_lib)
.libPaths(c(Sys.getenv("R_LIBS_USER"), .libPaths()) )
# devtools::install("/share/code/geospatial/sadatnfs/lbd_core/mbg_central/LBDCore", dependencies=F, upgrade=F)
## Load LBDCore
library(LBDCore)
## Setup -------------------------------------------------------------------------
## clear environment
rm(list=ls())
## Set repo location, indicator group, and some arguments
user <- Sys.info()['user']
# indicator_group <- 'child_growth_failure'
# indicator <- 'cgf_stateREs_test'
# # core_repo <- paste0('/share/code/geospatial/', user, '/lbd_core/')
# # indicator_repo <- paste0('/share/code/geospatial/', user, '/', 'cgf', '/')
# config_par <- 'config_stunting_mod_b'
# cov_par <- 'covs_stunting_mod_b'
# Regions <- 'PER'
core_repo <- paste0('/share/code/geospatial/', user, '/lbd_core/')
repo <- paste0('/share/code/geospatial/', user, '/', 'cgf', '/')
indicator_group <- 'child_growth_failure'
indicator <- 'stunting_mod_b'
Regions <- 'south_asia' ## could be put in config
reg <- 'south_asia'
config_par <- 'config_stunting_mod_MULTIREs'
cov_par <- 'covs_stunting_mod_b'
message(indicator)
## Create run date in correct format unless it's in the config as 'NULL'
run_date <- 'sadatnfs_testStateREs_2019_02_05_14_00_56' # paste0('sadatnfs_testStateREs_', make_time_stamp(time_stamp))
config <- set_up_config(repo = repo,
core_repo = core_repo,
indicator_group = indicator_group,
indicator = indicator,
config_name = config_par,
covs_name = cov_par)
# ## Read config file and save all parameters in memory
# config <- load_config(repo = repo,
# indicator_group = indicator_group,
# indicator = indicator,
# config_name = config_par,
# covs_name = cov_par)
#
# ## Create run date in correct format unless it's in the config as 'NULL'
# # skiptoinla <- TRUE
# # skiptoinla_from_rundate <- "sadatnfs_testStateREs_2019_01_26"
#
# ## Ensure you have defined all necessary settings in your config
# check_config(cr = repo)
####### NOTE: `use_subnat_res` and `subnat_country_to_get` are needed for adding state REs for that country
## Set project
proj <- ifelse(as.logical(use_geos_nodes), 'proj_geo_nodes', 'proj_geospatial')
## Create run date (really a comment) to store model outputs in
# run_date <- paste0('training_', user, '_packageTest')
# run_date <- paste0(user, '_test_stateREs_2019_01_26')
## Create output folder with the run_date
outputdir <- paste0('/share/geospatial/mbg/', indicator_group, '/', indicator, '/output/', run_date, '/')
dir.create(outputdir, recursive = TRUE)
## Make sure year object is in the correct format
if (class(year_list) == 'character') year_list <- eval(parse(text=year_list))
## If running individual countries make sure all country FEs and REs off
if (nchar(Regions[1]) == 3) individual_countries <- TRUE
if (individual_countries) {
use_child_country_fes <- FALSE
use_inla_country_fes <- FALSE
use_country_res <- FALSE
}
## Make holdouts -------------------------------------------------------------------------
if(makeholdouts){
message('Making holdouts')
# load the full input data
df <- load_input_data(indicator = indicator,
simple = NULL,
removeyemen = TRUE,
years = yearload,
withtag = as.logical(withtag),
datatag = datatag,
use_share = as.logical(use_share))
# add in location information
df <- merge_with_ihme_loc(df)
# make a list of dfs for each region, with 5 qt folds identified in each
stratum_ho <- make_folds(data = df,
n_folds = as.numeric(n_ho_folds),
spat_strat = 'qt',
temp_strat = 'prop',
strat_cols = 'region',
ts = as.numeric(ho_ts),
mb = as.numeric(ho_mb))
}
## Launch parallel script -------------------------------------------------------------------------
## Make loopvars aka strata grid (format = regions, ages, holdouts)
if(makeholdouts) loopvars <- expand.grid(Regions, 0, 0:n_ho_folds) else loopvars <- expand.grid(Regions, 0, 0)
## loop over them, save images and submit qsubs
for(i in 1:nrow(loopvars)){
message(paste(loopvars[i,2],as.character(loopvars[i,1]),loopvars[i,3]))
### SAVE IMAGE AND ###
# make a qsub string
qsub <- make_qsub_share(age = loopvars[i,2],
reg = as.character(loopvars[i,1]),
holdout = loopvars[i,3],
test = F,
indic = indicator,
saveimage = TRUE,
memory = ifelse(individual_countries, 10, 50),
cores = ifelse(individual_countries, 5, 10),
proj = proj,
geo_nodes = as.logical(use_geos_nodes),
corerepo = repo,
code = NULL,
addl_job_name = paste0('stateREs_', user),
singularity = 'default')
# submit job
print(qsub)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phytoFilterClass.R
\name{DebrisFilter}
\alias{DebrisFilter}
\title{the Debris class}
\usage{
DebrisFilter(
fullflowframe,
reducedflowframe,
deb_pos,
syn_all_pos,
deb_cut,
ch_chlorophyll,
ch_p2
)
DebrisFilter(
fullflowframe,
reducedflowframe,
deb_pos,
syn_all_pos,
deb_cut,
ch_chlorophyll,
ch_p2
)
}
\arguments{
\item{fullflowframe}{same as the input flowFrame}
\item{reducedflowframe}{a partial flowframe containing non-margin events}
\item{deb_pos}{number of margin particles measured}
\item{syn_all_pos}{number of non-margine particles}
\item{deb_cut}{estimated inflection point between debris and good cells}
\item{ch_chlorophyll}{channel estimating chlorophyll level}
\item{ch_p2}{plotting channel}
}
\value{
object of class DebrisFilter
}
\description{
the Debris class
constructor for the DebrisFilter class
}
\section{Slots}{
\describe{
\item{\code{fullflowframe}}{object of class "flowFrame" same as the input flowFrame}
\item{\code{reducedflowframe}}{object of class "flowFrame" a partial flowframe
containing a proportion of the measured particles}
\item{\code{deb_pos}}{object of class "numeric" representing the
proportion of particles in each cluster}
\item{\code{syn_all_pos}}{object of class "numeric" representing the
number of particles in each cluster}
\item{\code{deb_cut}}{object of class "numeric" representing the inflection point
between debris and good cells.}
\item{\code{ch_chlorophyll}}{objet of class "character" representing the chlorophyll
channel.}
\item{\code{ch_p2}}{object of class character to plot}
}}
| /man/DebrisFilter.Rd | no_license | fomotis/cyanoFilter | R | false | true | 1,663 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phytoFilterClass.R
\name{DebrisFilter}
\alias{DebrisFilter}
\title{the Debris class}
\usage{
DebrisFilter(
fullflowframe,
reducedflowframe,
deb_pos,
syn_all_pos,
deb_cut,
ch_chlorophyll,
ch_p2
)
DebrisFilter(
fullflowframe,
reducedflowframe,
deb_pos,
syn_all_pos,
deb_cut,
ch_chlorophyll,
ch_p2
)
}
\arguments{
\item{fullflowframe}{same as the input flowFrame}
\item{reducedflowframe}{a partial flowframe containing non-margin events}
\item{deb_pos}{number of margin particles measured}
\item{syn_all_pos}{number of non-margine particles}
\item{deb_cut}{estimated inflection point between debris and good cells}
\item{ch_chlorophyll}{channel estimating chlorophyll level}
\item{ch_p2}{plotting channel}
}
\value{
object of class DebrisFilter
}
\description{
the Debris class
constructor for the DebrisFilter class
}
\section{Slots}{
\describe{
\item{\code{fullflowframe}}{object of class "flowFrame" same as the input flowFrame}
\item{\code{reducedflowframe}}{object of class "flowFrame" a partial flowframe
containing a proportion of the measured particles}
\item{\code{deb_pos}}{object of class "numeric" representing the
proportion of particles in each cluster}
\item{\code{syn_all_pos}}{object of class "numeric" representing the
number of particles in each cluster}
\item{\code{deb_cut}}{object of class "numeric" representing the inflection point
between debris and good cells.}
\item{\code{ch_chlorophyll}}{objet of class "character" representing the chlorophyll
channel.}
\item{\code{ch_p2}}{object of class character to plot}
}}
|
args <- commandArgs(trailing=T)
errorCode <- args[1]
quit(status=errorCode) | /hmf-common/src/test/resources/r/dummyR.R | permissive | j-hudecek/hmftools | R | false | false | 76 | r | args <- commandArgs(trailing=T)
errorCode <- args[1]
quit(status=errorCode) |
#' Get the list of packages
#'
#' @param vec A vector of character values. Refer the relavant information about a package you are looking for.
#'
#' @return tibble. A tibble with a list of packages.
#' @author Jiaxiang Li \email{alex.lijiaxiang@foxmail.com}
#'
#' @import packagefinder
#' @import tibble
#'
#' @seealso download_log
#' @export
pkg_list <-
function(vec){
packagefinder::findPackage(vec) %>%
tibble::as_tibble()
}
| /R/pkg_list.R | permissive | JiaxiangBU/add2prep | R | false | false | 441 | r | #' Get the list of packages
#'
#' @param vec A vector of character values. Refer the relavant information about a package you are looking for.
#'
#' @return tibble. A tibble with a list of packages.
#' @author Jiaxiang Li \email{alex.lijiaxiang@foxmail.com}
#'
#' @import packagefinder
#' @import tibble
#'
#' @seealso download_log
#' @export
pkg_list <-
function(vec){
packagefinder::findPackage(vec) %>%
tibble::as_tibble()
}
|
testlist <- list(data = structure(c(5.59504565543767e+141, 5.59504565543767e+141, 5.59504750246307e+141, 1.32687698517662e-305, 5.59504565543767e+141, 5.59504565543767e+141, 2.85202113981599e-307, 5.59504565543767e+141, 5.59504739985061e+141, 3.88209828655406e-265, 3.88209828655406e-265, 3.8766467820802e-265, 3.88209828655406e-265, 3.88209828655406e-265, 3.88209828655406e-265, 3.88209828655406e-265, 3.8820982865542e-265, 1.08227766311685e-304, 2.58981145385929e-307, 1.39067116124321e-309, 3.23790861658519e-319, 3.52939189424299e+30, 3.52939363896296e+30, 4.88856325807381e+131, 4.24482048648698e-313, 3.23790861658519e-319, 0, 1.49122214107336e-312, 6.13486869518866e-92, 2.41737008055398e+35, 7.2911220195564e-304, 3.45845952088873e-322, 3.49239876416278e+30, 3.49284541247374e+30, 2.57491569628111e+29, 1.08667404455327e-310, 8.28904605845809e-317, 0, 3.81752868025887e-310, 2.41737166511059e+35, 3.52953696536801e+30, 2.6735651456626e+29, 2.47812147378838e-307, 3.80267470813202e-310, 1.65436123098017e-24, 2.67394934559373e+29, 3.5295369695637e+30, 4.67057397931826e+38, 2.92228926273377e+48, 1.70549711754706e+34, 6.1545541536469e-313, 6.22683370448953e+38, 2.73863580539485e-315, 5.3664929708415e-255, 2.64619557336756e-260, 3.81752867881136e-310, 3.52953696534134e+30, 3.52953696534134e+30, 3.52953696617977e+30, 4.4606551385273e+43, 9.32399348468807e-256, 2.27344862135178e-313, 1.0654731740058e-255, 6.38863206256519e-304, 2.41737052174617e+35, 4.66003234691539e-10, 2.75435933715521e+37, 6.95335581017114e-310, 6.95335580945396e-310, 3.56011817361152e-305, 3.87214414605933e-310, 2.84809454423549e-306, 3.52953806518976e+30, 3.52952113075679e+30, 3.94108716089668e-312, 1.49122214015934e-312, 3.52953696534134e+30, 1.03242897886929e-255, 4.24629954240216e-314, 2.4172575765633e+35, 7.29112201577501e-304), .Dim = c(9L, 9L)), q = 2.71615469085365e-312)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) | /biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610554816-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 1,968 | r | testlist <- list(data = structure(c(5.59504565543767e+141, 5.59504565543767e+141, 5.59504750246307e+141, 1.32687698517662e-305, 5.59504565543767e+141, 5.59504565543767e+141, 2.85202113981599e-307, 5.59504565543767e+141, 5.59504739985061e+141, 3.88209828655406e-265, 3.88209828655406e-265, 3.8766467820802e-265, 3.88209828655406e-265, 3.88209828655406e-265, 3.88209828655406e-265, 3.88209828655406e-265, 3.8820982865542e-265, 1.08227766311685e-304, 2.58981145385929e-307, 1.39067116124321e-309, 3.23790861658519e-319, 3.52939189424299e+30, 3.52939363896296e+30, 4.88856325807381e+131, 4.24482048648698e-313, 3.23790861658519e-319, 0, 1.49122214107336e-312, 6.13486869518866e-92, 2.41737008055398e+35, 7.2911220195564e-304, 3.45845952088873e-322, 3.49239876416278e+30, 3.49284541247374e+30, 2.57491569628111e+29, 1.08667404455327e-310, 8.28904605845809e-317, 0, 3.81752868025887e-310, 2.41737166511059e+35, 3.52953696536801e+30, 2.6735651456626e+29, 2.47812147378838e-307, 3.80267470813202e-310, 1.65436123098017e-24, 2.67394934559373e+29, 3.5295369695637e+30, 4.67057397931826e+38, 2.92228926273377e+48, 1.70549711754706e+34, 6.1545541536469e-313, 6.22683370448953e+38, 2.73863580539485e-315, 5.3664929708415e-255, 2.64619557336756e-260, 3.81752867881136e-310, 3.52953696534134e+30, 3.52953696534134e+30, 3.52953696617977e+30, 4.4606551385273e+43, 9.32399348468807e-256, 2.27344862135178e-313, 1.0654731740058e-255, 6.38863206256519e-304, 2.41737052174617e+35, 4.66003234691539e-10, 2.75435933715521e+37, 6.95335581017114e-310, 6.95335580945396e-310, 3.56011817361152e-305, 3.87214414605933e-310, 2.84809454423549e-306, 3.52953806518976e+30, 3.52952113075679e+30, 3.94108716089668e-312, 1.49122214015934e-312, 3.52953696534134e+30, 1.03242897886929e-255, 4.24629954240216e-314, 2.4172575765633e+35, 7.29112201577501e-304), .Dim = c(9L, 9L)), q = 2.71615469085365e-312)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) |
# load dhs survey
dhs_surveys = "../DHS/_Surveys/"
dhs_code = "../DHS/_Code/"
library(survey)
options(survey.lonely.psu="adjust")
options(survey.adjust.domain.lonely=TRUE)
library(ggplot2)
library(scales)
library(grid)
library(lubridate)
library(dplyr)
# functions for loading data files ####
if (!existsFunction( "survey_GIS_data" ) ) source( paste0( dhs_code, "getSurveyGIS.R") )
openSurveyFile = function(
country = NA ,
# survey_year = NA,
survey = NA,
year = NA,
tab = NA
)
{
x = NULL
file = paste0( dhs_surveys ,
ifelse( country %in% "DRC",
"Congo Democratic Republic",
country),
"/", survey, " ", year, "/", tab, ".rda")
if ( file.exists( file ) ){
load( file ) # file will be loaded as 'x'
} else {
filecodes = data_frame( abrev = c("kr", "br", "ir", "pr", "hr", "hr" ),
full = c("Children's Recode","Supplemental Births Recode","Individual Recode",
"Household Member Recode", "Household Recode", "Supplemental Household Recode"))
dir =
paste0( country, "/", year, "_", survey, "/" )
files = list.files(dir)
prefix = paste0( tolower( countrycode( country, "country.name", "iso2c") ),
filecodes$abrev[ match(tab, filecodes$full) ] )
middle = substr( files[-grep(prefix, files, fixed=T)][1], 5, 8 )
suffix = ".rds"
file = paste0( dir, prefix, middle, suffix)
if ( file.exists( file ) ){
x = readRDS( file ) # file will be loaded as 'x'
}
}
return(x)
}
# TODO: get printout working. R does not normally print from inside a function.
load_survey_object = function(
.country = "Angola",
# .survey_year = "MIS 2011",
.year = 2011 ,
.survey = "DHS",
design = FALSE, # return survey design object
dataset = TRUE, # returns dataset (x)
geo = FALSE,
printout = FALSE,
vars = NULL # if not specified, will select variables from vars() [dhs_variable_selection.R]
){
# no vars given, get basic list of variables
if ( is.null(vars) ){
source( paste0( dhs_code, "dhs_variable_selection.R") )
vars = some_variables()
}
linking_vars = c("hv001", "v001", "hv002", "v002", "hvidx", "b16", "hv003" , "v003", "hv021", "v021" )
weight_vars = c("v005", "hv005", 'weight.c', 'weight.hm', 'weight.w', 'weight.h')
vars = unique( c( vars, linking_vars, weight_vars ) ) %>% tolower
vars = vars[order(vars)]
if (printout){ cat(vars) }
c = try(
openSurveyFile(country = .country, survey = .survey, year = .year,
tab = "Children's Recode")
)
s = try(
openSurveyFile(country = .country, survey = .survey, year = .year,
tab = "Supplemental Births Recode")
)
w = try(
openSurveyFile(country = .country, survey = .survey, year = .year,
tab = "Individual Recode")
)
hm = try(
openSurveyFile(country = .country, survey = .survey, year = .year,
tab = "Household Member Recode")
)
h = try(
openSurveyFile(country = .country, survey = .survey, year = .year,
tab = "Household Recode")
)
if ( class(h) == "try-error" | class(h) == "logical" | is.null(h) ){
h = try(
openSurveyFile(country = .country, survey = .survey, year = .year,
tab = "Supplemental Household Recode")
)
}
if (geo){
g = try(
survey_GIS_data( country = .country, survey = .survey, year = .year)
)
} else { g = NULL }
if (printout){
cat(paste(
"the household file has", nrow(h), "rows and ", ncol(h), "columns", "\n",
"the household member file has", nrow(hm), "rows and ", ncol(hm), "columns", "\n",
"the women's file has", nrow(w), "rows and ", ncol(w), "columns", "\n",
"the childrens file has", nrow(c), "rows and ", ncol(c), "columns", "\n",
"the GIS file has", nrow(g), "rows and ", ncol(g), "columns")
)
}
# to avoid confusion/conflict, create file specific weight variables
c = c %>% rename( weight.c = v005 )
w = w %>% rename( weight.w = v005 )
hm = hm %>% rename( weight.hm = hv005 )
h = h %>% rename( weight.h = hv005 )
vars_c = sapply( vars, function(x) any(grepl(paste0("\\b", x, "\\b"), names(c))) )
vars_w = sapply( vars, function(x) any(grepl(paste0("\\b", x, "\\b"), names(w))) )
vars_hm = sapply( vars, function(x) any(grepl(paste0("\\b", x, "\\b"), names(hm))) )
vars_h = sapply( vars, function(x) any(grepl(paste0("\\b", x, "\\b"), names(h))) )
# start with household member file: hm
# merge 1.:
## household member with children, excluding variables already in hm
if ( !class(c) == "try-error" && !is.null(c) &&
sapply( "b16", function(x) any(grepl(paste0("\\b", x, "\\b"), names(c))) ) == TRUE
)
{
c_vars_not_in_hm = setdiff( names(vars_c[vars_c == TRUE]), names(vars_hm[vars_hm == TRUE] ) )
# full join to get both children of inteviewed women and children of women not interviewed but in house
hmc = hm[, names(vars_hm[vars_hm == TRUE]) ] %>%
full_join( c[, c_vars_not_in_hm ],
by = c("hv001"="v001", "hv002"="v002", "hvidx" = "b16") )
} else
{ hmc = hm }
# rm( hm); rm(c)
if (printout){
cat(paste( "the merged childrens-womens file has", nrow(hmc), "rows and ", ncol(hmc), "columns")
)
}
vars_hmc = sapply( vars, function(x) any(grepl(paste0("\\b", x, "\\b"), names(hmc))) )
# merge 2.:
## household member-children with W, excluding variables already in hmc
if ( !class(w) == "try-error" && !is.na(w) )
{
vars_w = sapply( vars, function(x) any(grepl(paste0("\\b", x, "\\b"), names(w))) )
w_vars_not_in_hmc = setdiff( names(vars_w[vars_w == TRUE]),
names(vars_hmc[vars_hmc == TRUE] ) )
# join all interviewed women in household
hmcw = hmc %>%
left_join( w[, c(w_vars_not_in_hmc , "v003" )],
by = c("hv001"="v001", "hv002"="v002", "hv003" = "v003") )
} else
{ hmcw = hmc }
rm( hmc )
if (printout){
cat(paste( "the merged household member-children-womens file has", nrow(hmcw), "rows and ", ncol(hmcw), "columns"))
}
vars_hmcw = sapply( vars, function(x) any(grepl(paste0("\\b", x, "\\b"), names(hmcw))) )
# merge 3.:
## merged file with household (if needed)
#Are there any variable to add?
if ( !class(h) == "try-error" && !is.na(h) )
{
vars_h = sapply( vars, function(x) any(grepl(paste0("\\b", x, "\\b"), names(h))) )
h_vars_not_in_hmcw = setdiff(names(vars_h[vars_h == TRUE]), names(vars_hmcw[vars_hmcw == TRUE]) )
if (length(h_vars_not_in_hmcw)>0 )
{
hmcwh = hmcw %>%
left_join( h[, c(h_vars_not_in_hmcw, "hv001", "hv002")],
by=c("hv001"="hv001", "hv002"="hv002"))
} else
{ hmcwh = hmcw }
} else
{ hmcwh = hmcw }
rm( hmcw )
vars_hmcwh = sapply( vars, function(x) any(grepl(paste0("\\b", x, "\\b"), names(hmcwh))) )
## join geo file
# geo file variable DHSID links with Household Member Recode variable hv001 (cluster number)
if ( geo && is.data.frame(hmcwh) && !class(g) == "try-error" && !is.null(g) )
{
hmcwhg = hmcwh %>% left_join(g, by=c("hv001"="dhsid") )
} else {
hmcwhg = hmcwh
}
rm( hmcwh )
vars_hmcwhg = sapply( c(vars, "v005w"), function(x) any(grepl(paste0("\\b", x, "\\b"), names(hmcwhg))) )
if (printout){
cat(paste( "the merged household member-children-womens-houshold file has",
nrow(hmcwhg), "rows and ", ncol(hmcwhg), "columns")
)
}
# remove hv005 and v005 (the original weight variables), if they are still present
if ("v005" %in% names(hmcwhg)){ hmcwhg = hmcwhg %>% select( -v005 )}
if ("hv005" %in% names(hmcwhg)){ hmcwhg = hmcwhg %>% select( -hv005 )}
hmcwhg = hmcwhg %>% mutate(
# hml32 = if( exists('hml32', where = hmcwh)) { ifelse(hml32>1 , NA, hml32) } else { NA},
# hml35 = if( exists('hml35', where = hmcwh)) { ifelse(hml35>1 , NA, hml35) } else { NA},
# hml1.n = if( exists('hml1', where = hmcwh)) { ifelse(hml1 %in% 0:97 , hml1, NA) } else { NA},
# hml1.own = if( exists('hml1', where = hmcwh)) { ifelse(hml1 > 0 , 1, 0) } else { NA},
# hml12.anynet = if( exists('hml12', where = hmcwh)) { ifelse( hml12 %in% 1:3, 1, 0)} else { NA},
# hml12.itn = if( exists('hml12', where = hmcwh)) { ifelse( hml12 %in% 1:2, 1, 0)} else { NA},
hv105.grp = if( exists('hv105', where = hmcwhg)) { cut( hv105, breaks = c(0, 1, 5,15,25,55,Inf), include.lowest = TRUE )} else { NA}
# hv105.grp.c = if( exists('hv105', where = hmcwh)) { cut( hv105, breaks = c(0, 1, 2, 3, 4, 5 ), include.lowest = TRUE )} else { NA},
# b78 = if( exists('b8', where = hmcwh)) { ifelse( is.na(b8), b7, b8)} else { NA}, # age(ys) of live and dead
# b78 = if( exists('b78', where = hmcwh)) { ifelse( b78>4, NA, b78)} else { NA},
# bednet usage
# children under 5 slept under net last night: v460
## if have bednet
# v460a = if( exists('v460', where = hmcwh)) { ifelse(v460 == 2 , 1, v460) } else { NA},
# v460a = if( exists('v460', where = hmcwh)) { ifelse(v460a > 1 , NA, v460a) } else { NA},
# ## all children
# v460b = if( exists('v460', where = hmcwh)) { ifelse(v460 == 2 , 1, v460) } else { NA},
# v460b = if( exists('v460', where = hmcwh)) { ifelse(v460b > 1 , 0, v460b) } else { NA},
# holes in the net: sh133b. Remove DK = 8
# sh133b = if( exists('sh133b', where = hmcwh)) { ifelse(sh133b == 8 , NA, sh133b) } else { NA},
# one = 1
)
x = hmcwhg; rm(hmcwhg)
if (printout){
cat(paste( "the completed merged file has", nrow(x), "rows and ", ncol(x), "columns"))
}
if (design){
# test if strata exists; some surveys have no strata (e.g. madagascar mis 2011)
has.strata.022h = nrow( as.data.frame.table( table(x$hv022) ) ) > 1
has.strata.023h = nrow( as.data.frame.table( table(x$hv023) ) ) > 1
has.strata.025h = nrow( as.data.frame.table( table(x$hv025) ) ) > 1
has.strata.022c = nrow( as.data.frame.table( table(x$v022) ) ) > 1
has.strata.023c = nrow( as.data.frame.table( table(x$v023) ) ) > 1
has.strata.025c = nrow( as.data.frame.table( table(x$v025) ) ) > 1
if (has.strata.022h) { # urban/rural
strataformula.h = as.formula("~hv022 ")
strataformula.hm = as.formula("~hv022 ")
} else {
strataformula.h = NULL
strataformula.hm = NULL
}
if (has.strata.022c) { # urban/rural
strataformula.c = as.formula("~v022 ")
strataformula.w = as.formula("~v022 ")
} else {
strataformula.c = NULL
strataformula.w = NULL
}
# see Vanderelst/Speybroeck (different from Damico); to include household?
x.h = x %>% filter( !is.na(weight.h), !is.na(hv021) )
# household
if ( nrow( x.h) > 0 )
{
svy.h <-
svydesign(
~hv021 , # psu
strata = strataformula.h ,
data = x.h ,
weights = ~ weight.h
)
} else {
svy.h = NULL
}
# svy.h <- update( one = 1 , svy.h ) # deprecated because 'one' previously defined in dataset x
# svytotal( ~one , svy.h ) # ????
# svyby( ~one , ~one , svy.h , unwtd.count )
# svyby( ~one , ~hv025 , svy.h , unwtd.count )
# childrens...
x.c = x %>% filter( !is.na(weight.c), !is.na(v021) )
svy.c <-
svydesign(
~v021 , # psu
strata = strataformula.c ,
data = x.c ,
weights = ~ weight.c
)
# womens
x.w = x %>% filter( !is.na(weight.w) , !is.na(v021))
svy.w <-
svydesign(
~v021 , # psu
strata = strataformula.w ,
data = x.w ,
weights = ~ weight.w
)
# household member
x.hm = x %>% filter( !is.na(weight.hm), !is.na(hv021) )
svy.hm <-
svydesign(
~hv021 , # psu
strata = strataformula.hm ,
data = x.hm ,
weights = ~ weight.hm
)
} else {
svy.h = NULL
svy.c = NULL
svy.w = NULL
svy.hm = NULL
} # end if (design)
vars_x = sapply( names(x), function(x) any(grepl(paste0("\\b", x, "\\b"), x)) )
if (dataset){
return( list(svy.h, svy.c, svy.w, svy.hm, vars_x, x))
} else {
return( list(svy.h, svy.c, svy.w, svy.hm, vars_x))
}
}
# test / not run
# svy = load_survey_object(printout = T)
# svy.h = svy[[1]]
# svy.c = svy[[2]]
# varlist = svy[[3]]
# x = svy[[4]]
# svytotal( ~one , svy.c ) # ????
# svyby( ~one , ~one , svy.c , unwtd.count )
# svy = load_survey_object()
# svy.h = svy[[1]]
# svymean( ~ hml32 , svy.h , na.rm = TRUE )
# svyby( ~ hml32 , ~ hv105.grp.c, svy.h, svymean, na.rm = TRUE )
# svytotal( ~one , svy.h ) # ????
# svyby( ~one , ~one , svy.h , unwtd.count )
# Mortality (b5 is actually survived, mortality is 1- result below)
# svyby( ~ one , ~ b5 , svy.c , unwtd.count )
# svymean(~ b5, svy.c, na.rm = TRUE)
# svytable(~ b5 + v025, svy.c, round=TRUE)
# svyby( ~ b5 , ~ v025, svy.c , svymean , na.rm = TRUE )
# svyby( ~ b5 , ~ b78, svy.c , svymean , na.rm = TRUE )
| /_Code/dhs_load_survey.R | no_license | jpainter/dhs | R | false | false | 13,789 | r | # load dhs survey
dhs_surveys = "../DHS/_Surveys/"
dhs_code = "../DHS/_Code/"
library(survey)
options(survey.lonely.psu="adjust")
options(survey.adjust.domain.lonely=TRUE)
library(ggplot2)
library(scales)
library(grid)
library(lubridate)
library(dplyr)
# functions for loading data files ####
if (!existsFunction( "survey_GIS_data" ) ) source( paste0( dhs_code, "getSurveyGIS.R") )
openSurveyFile = function(
country = NA ,
# survey_year = NA,
survey = NA,
year = NA,
tab = NA
)
{
x = NULL
file = paste0( dhs_surveys ,
ifelse( country %in% "DRC",
"Congo Democratic Republic",
country),
"/", survey, " ", year, "/", tab, ".rda")
if ( file.exists( file ) ){
load( file ) # file will be loaded as 'x'
} else {
filecodes = data_frame( abrev = c("kr", "br", "ir", "pr", "hr", "hr" ),
full = c("Children's Recode","Supplemental Births Recode","Individual Recode",
"Household Member Recode", "Household Recode", "Supplemental Household Recode"))
dir =
paste0( country, "/", year, "_", survey, "/" )
files = list.files(dir)
prefix = paste0( tolower( countrycode( country, "country.name", "iso2c") ),
filecodes$abrev[ match(tab, filecodes$full) ] )
middle = substr( files[-grep(prefix, files, fixed=T)][1], 5, 8 )
suffix = ".rds"
file = paste0( dir, prefix, middle, suffix)
if ( file.exists( file ) ){
x = readRDS( file ) # file will be loaded as 'x'
}
}
return(x)
}
# TODO: get printout working. R does not normally print from inside a function.
load_survey_object = function(
.country = "Angola",
# .survey_year = "MIS 2011",
.year = 2011 ,
.survey = "DHS",
design = FALSE, # return survey design object
dataset = TRUE, # returns dataset (x)
geo = FALSE,
printout = FALSE,
vars = NULL # if not specified, will select variables from vars() [dhs_variable_selection.R]
){
# no vars given, get basic list of variables
if ( is.null(vars) ){
source( paste0( dhs_code, "dhs_variable_selection.R") )
vars = some_variables()
}
linking_vars = c("hv001", "v001", "hv002", "v002", "hvidx", "b16", "hv003" , "v003", "hv021", "v021" )
weight_vars = c("v005", "hv005", 'weight.c', 'weight.hm', 'weight.w', 'weight.h')
vars = unique( c( vars, linking_vars, weight_vars ) ) %>% tolower
vars = vars[order(vars)]
if (printout){ cat(vars) }
c = try(
openSurveyFile(country = .country, survey = .survey, year = .year,
tab = "Children's Recode")
)
s = try(
openSurveyFile(country = .country, survey = .survey, year = .year,
tab = "Supplemental Births Recode")
)
w = try(
openSurveyFile(country = .country, survey = .survey, year = .year,
tab = "Individual Recode")
)
hm = try(
openSurveyFile(country = .country, survey = .survey, year = .year,
tab = "Household Member Recode")
)
h = try(
openSurveyFile(country = .country, survey = .survey, year = .year,
tab = "Household Recode")
)
if ( class(h) == "try-error" | class(h) == "logical" | is.null(h) ){
h = try(
openSurveyFile(country = .country, survey = .survey, year = .year,
tab = "Supplemental Household Recode")
)
}
if (geo){
g = try(
survey_GIS_data( country = .country, survey = .survey, year = .year)
)
} else { g = NULL }
if (printout){
cat(paste(
"the household file has", nrow(h), "rows and ", ncol(h), "columns", "\n",
"the household member file has", nrow(hm), "rows and ", ncol(hm), "columns", "\n",
"the women's file has", nrow(w), "rows and ", ncol(w), "columns", "\n",
"the childrens file has", nrow(c), "rows and ", ncol(c), "columns", "\n",
"the GIS file has", nrow(g), "rows and ", ncol(g), "columns")
)
}
# to avoid confusion/conflict, create file specific weight variables
c = c %>% rename( weight.c = v005 )
w = w %>% rename( weight.w = v005 )
hm = hm %>% rename( weight.hm = hv005 )
h = h %>% rename( weight.h = hv005 )
vars_c = sapply( vars, function(x) any(grepl(paste0("\\b", x, "\\b"), names(c))) )
vars_w = sapply( vars, function(x) any(grepl(paste0("\\b", x, "\\b"), names(w))) )
vars_hm = sapply( vars, function(x) any(grepl(paste0("\\b", x, "\\b"), names(hm))) )
vars_h = sapply( vars, function(x) any(grepl(paste0("\\b", x, "\\b"), names(h))) )
# start with household member file: hm
# merge 1.:
## household member with children, excluding variables already in hm
if ( !class(c) == "try-error" && !is.null(c) &&
sapply( "b16", function(x) any(grepl(paste0("\\b", x, "\\b"), names(c))) ) == TRUE
)
{
c_vars_not_in_hm = setdiff( names(vars_c[vars_c == TRUE]), names(vars_hm[vars_hm == TRUE] ) )
# full join to get both children of inteviewed women and children of women not interviewed but in house
hmc = hm[, names(vars_hm[vars_hm == TRUE]) ] %>%
full_join( c[, c_vars_not_in_hm ],
by = c("hv001"="v001", "hv002"="v002", "hvidx" = "b16") )
} else
{ hmc = hm }
# rm( hm); rm(c)
if (printout){
cat(paste( "the merged childrens-womens file has", nrow(hmc), "rows and ", ncol(hmc), "columns")
)
}
vars_hmc = sapply( vars, function(x) any(grepl(paste0("\\b", x, "\\b"), names(hmc))) )
# merge 2.:
## household member-children with W, excluding variables already in hmc
if ( !class(w) == "try-error" && !is.na(w) )
{
vars_w = sapply( vars, function(x) any(grepl(paste0("\\b", x, "\\b"), names(w))) )
w_vars_not_in_hmc = setdiff( names(vars_w[vars_w == TRUE]),
names(vars_hmc[vars_hmc == TRUE] ) )
# join all interviewed women in household
hmcw = hmc %>%
left_join( w[, c(w_vars_not_in_hmc , "v003" )],
by = c("hv001"="v001", "hv002"="v002", "hv003" = "v003") )
} else
{ hmcw = hmc }
rm( hmc )
if (printout){
cat(paste( "the merged household member-children-womens file has", nrow(hmcw), "rows and ", ncol(hmcw), "columns"))
}
vars_hmcw = sapply( vars, function(x) any(grepl(paste0("\\b", x, "\\b"), names(hmcw))) )
# merge 3.:
## merged file with household (if needed)
#Are there any variable to add?
if ( !class(h) == "try-error" && !is.na(h) )
{
vars_h = sapply( vars, function(x) any(grepl(paste0("\\b", x, "\\b"), names(h))) )
h_vars_not_in_hmcw = setdiff(names(vars_h[vars_h == TRUE]), names(vars_hmcw[vars_hmcw == TRUE]) )
if (length(h_vars_not_in_hmcw)>0 )
{
hmcwh = hmcw %>%
left_join( h[, c(h_vars_not_in_hmcw, "hv001", "hv002")],
by=c("hv001"="hv001", "hv002"="hv002"))
} else
{ hmcwh = hmcw }
} else
{ hmcwh = hmcw }
rm( hmcw )
vars_hmcwh = sapply( vars, function(x) any(grepl(paste0("\\b", x, "\\b"), names(hmcwh))) )
## join geo file
# geo file variable DHSID links with Household Member Recode variable hv001 (cluster number)
if ( geo && is.data.frame(hmcwh) && !class(g) == "try-error" && !is.null(g) )
{
hmcwhg = hmcwh %>% left_join(g, by=c("hv001"="dhsid") )
} else {
hmcwhg = hmcwh
}
rm( hmcwh )
vars_hmcwhg = sapply( c(vars, "v005w"), function(x) any(grepl(paste0("\\b", x, "\\b"), names(hmcwhg))) )
if (printout){
cat(paste( "the merged household member-children-womens-houshold file has",
nrow(hmcwhg), "rows and ", ncol(hmcwhg), "columns")
)
}
# remove hv005 and v005 (the original weight variables), if they are still present
if ("v005" %in% names(hmcwhg)){ hmcwhg = hmcwhg %>% select( -v005 )}
if ("hv005" %in% names(hmcwhg)){ hmcwhg = hmcwhg %>% select( -hv005 )}
hmcwhg = hmcwhg %>% mutate(
# hml32 = if( exists('hml32', where = hmcwh)) { ifelse(hml32>1 , NA, hml32) } else { NA},
# hml35 = if( exists('hml35', where = hmcwh)) { ifelse(hml35>1 , NA, hml35) } else { NA},
# hml1.n = if( exists('hml1', where = hmcwh)) { ifelse(hml1 %in% 0:97 , hml1, NA) } else { NA},
# hml1.own = if( exists('hml1', where = hmcwh)) { ifelse(hml1 > 0 , 1, 0) } else { NA},
# hml12.anynet = if( exists('hml12', where = hmcwh)) { ifelse( hml12 %in% 1:3, 1, 0)} else { NA},
# hml12.itn = if( exists('hml12', where = hmcwh)) { ifelse( hml12 %in% 1:2, 1, 0)} else { NA},
hv105.grp = if( exists('hv105', where = hmcwhg)) { cut( hv105, breaks = c(0, 1, 5,15,25,55,Inf), include.lowest = TRUE )} else { NA}
# hv105.grp.c = if( exists('hv105', where = hmcwh)) { cut( hv105, breaks = c(0, 1, 2, 3, 4, 5 ), include.lowest = TRUE )} else { NA},
# b78 = if( exists('b8', where = hmcwh)) { ifelse( is.na(b8), b7, b8)} else { NA}, # age(ys) of live and dead
# b78 = if( exists('b78', where = hmcwh)) { ifelse( b78>4, NA, b78)} else { NA},
# bednet usage
# children under 5 slept under net last night: v460
## if have bednet
# v460a = if( exists('v460', where = hmcwh)) { ifelse(v460 == 2 , 1, v460) } else { NA},
# v460a = if( exists('v460', where = hmcwh)) { ifelse(v460a > 1 , NA, v460a) } else { NA},
# ## all children
# v460b = if( exists('v460', where = hmcwh)) { ifelse(v460 == 2 , 1, v460) } else { NA},
# v460b = if( exists('v460', where = hmcwh)) { ifelse(v460b > 1 , 0, v460b) } else { NA},
# holes in the net: sh133b. Remove DK = 8
# sh133b = if( exists('sh133b', where = hmcwh)) { ifelse(sh133b == 8 , NA, sh133b) } else { NA},
# one = 1
)
x = hmcwhg; rm(hmcwhg)
if (printout){
cat(paste( "the completed merged file has", nrow(x), "rows and ", ncol(x), "columns"))
}
if (design){
# test if strata exists; some surveys have no strata (e.g. madagascar mis 2011)
has.strata.022h = nrow( as.data.frame.table( table(x$hv022) ) ) > 1
has.strata.023h = nrow( as.data.frame.table( table(x$hv023) ) ) > 1
has.strata.025h = nrow( as.data.frame.table( table(x$hv025) ) ) > 1
has.strata.022c = nrow( as.data.frame.table( table(x$v022) ) ) > 1
has.strata.023c = nrow( as.data.frame.table( table(x$v023) ) ) > 1
has.strata.025c = nrow( as.data.frame.table( table(x$v025) ) ) > 1
if (has.strata.022h) { # urban/rural
strataformula.h = as.formula("~hv022 ")
strataformula.hm = as.formula("~hv022 ")
} else {
strataformula.h = NULL
strataformula.hm = NULL
}
if (has.strata.022c) { # urban/rural
strataformula.c = as.formula("~v022 ")
strataformula.w = as.formula("~v022 ")
} else {
strataformula.c = NULL
strataformula.w = NULL
}
# see Vanderelst/Speybroeck (different from Damico); to include household?
x.h = x %>% filter( !is.na(weight.h), !is.na(hv021) )
# household
if ( nrow( x.h) > 0 )
{
svy.h <-
svydesign(
~hv021 , # psu
strata = strataformula.h ,
data = x.h ,
weights = ~ weight.h
)
} else {
svy.h = NULL
}
# svy.h <- update( one = 1 , svy.h ) # deprecated because 'one' previously defined in dataset x
# svytotal( ~one , svy.h ) # ????
# svyby( ~one , ~one , svy.h , unwtd.count )
# svyby( ~one , ~hv025 , svy.h , unwtd.count )
# childrens...
x.c = x %>% filter( !is.na(weight.c), !is.na(v021) )
svy.c <-
svydesign(
~v021 , # psu
strata = strataformula.c ,
data = x.c ,
weights = ~ weight.c
)
# womens
x.w = x %>% filter( !is.na(weight.w) , !is.na(v021))
svy.w <-
svydesign(
~v021 , # psu
strata = strataformula.w ,
data = x.w ,
weights = ~ weight.w
)
# household member
x.hm = x %>% filter( !is.na(weight.hm), !is.na(hv021) )
svy.hm <-
svydesign(
~hv021 , # psu
strata = strataformula.hm ,
data = x.hm ,
weights = ~ weight.hm
)
} else {
svy.h = NULL
svy.c = NULL
svy.w = NULL
svy.hm = NULL
} # end if (design)
vars_x = sapply( names(x), function(x) any(grepl(paste0("\\b", x, "\\b"), x)) )
if (dataset){
return( list(svy.h, svy.c, svy.w, svy.hm, vars_x, x))
} else {
return( list(svy.h, svy.c, svy.w, svy.hm, vars_x))
}
}
# test / not run
# svy = load_survey_object(printout = T)
# svy.h = svy[[1]]
# svy.c = svy[[2]]
# varlist = svy[[3]]
# x = svy[[4]]
# svytotal( ~one , svy.c ) # ????
# svyby( ~one , ~one , svy.c , unwtd.count )
# svy = load_survey_object()
# svy.h = svy[[1]]
# svymean( ~ hml32 , svy.h , na.rm = TRUE )
# svyby( ~ hml32 , ~ hv105.grp.c, svy.h, svymean, na.rm = TRUE )
# svytotal( ~one , svy.h ) # ????
# svyby( ~one , ~one , svy.h , unwtd.count )
# Mortality (b5 is actually survived, mortality is 1- result below)
# svyby( ~ one , ~ b5 , svy.c , unwtd.count )
# svymean(~ b5, svy.c, na.rm = TRUE)
# svytable(~ b5 + v025, svy.c, round=TRUE)
# svyby( ~ b5 , ~ v025, svy.c , svymean , na.rm = TRUE )
# svyby( ~ b5 , ~ b78, svy.c , svymean , na.rm = TRUE )
|
#' Wrapper function to run edgeR analysis of summarized reads from RNA-seq experiment
#'
#' This function allows you to run a statistical analysis of an RNA-seq experiment
#' using Bioconductor package
#' \href{https://bioconductor.org/packages/release/bioc/html/edgeR.html}{edgeR}.
#' Inputs are tables of summarized reads generated using
#' \href{http://bioinf.wehi.edu.au/featureCounts/}{featureCounts} (part of the lab's
#' RNA-seq analysis pipeline running on NYU's HPC) for each sample in the experiment.
#' The presence or absence of biological replicates is automatically inferred from the
#' \code{conditionNames} argument: conditions aggregate samples as replicates. If
#' you provide as many different \code{conditionNames} as there are \code{sampleNames}
#' each sample is taken as a single condition (no replicates).
#' The output includes tables of CPM, TPM and, if included, differential expression
#' (DE) analysis for selected pairs of samples (see "Value" section below for more
#' details). \cr \cr
#' \strong{Running without replicates:} This function allows you to run the analysis on
#' experiments without biological replicate libraries. While the calculation of CPM and
#' TPM has no requirement for replicates, you should obviously avoid doing DE analysis
#' in the absence of replicates. The statistical model used by
#' \href{https://bioconductor.org/packages/release/bioc/html/edgeR.html}{edgeR}
#' to perform DE analysis relies on biological replicates to estimate biological
#' variability. While there is no satisfactory alternative to actual biological replication,
#' you can still run a preliminary DE analysis without replicates. The approach
#' taken here relies on providing an estimate of biological variation that is reasonable
#' for RNA-seq experiments using \emph{S. cerevisiae}. This is a better alternative
#' to simply assuming that biological variability is absent. Typical values for
#' the common BCV (square root-dispersion) for datasets arising from well-controlled
#' experiments are 0.1 for data on genetically identical model organisms, so that
#' value is used here.
#' @param pathToFiles A list of strings corresponding to the full path to the featureCounts
#' output files for all samples in the experiment. No default.
#' @param sampleNames A list of strings corresponding to the names of all samples
#' in the experiment in the same order as the files in \code{pathToFiles}. No default.
#' @param conditionNames A list of strings corresponding to the experimental groups/conditions
#' for each sample/library. Will be the input to edgeR's \code{\link[edgeR]{DGEList}}
#' constructor function and will define replicate groups, if any. No default.
#' @param batchNames Optional list of strings corresponding to the experimental batch for each
#' sample/library. Will be part of the design matrix for differential expression testing and
#' will define batch groups, if any. Defaults to \code{NULL} (all samples come from the same batch).
#' @param pairwiseDE Logical indicating whether to test differential expression (DE).
#' Defaults to \code{FALSE}.
#' @param outputFilePrefix Optional string to be added as prefix to output file names.
#' No default.
#' @return The output includes several tables saved as .csv files in a directory
#' named "RNA-seq_analysis" written to the working directory:
#' \enumerate{
#' \item \strong{CPM:} Compositional bias-normalized \strong{C}ounts \strong{P}er
#' \strong{M}illion (output of edgeR's \code{\link[edgeR]{cpm}} function). Useful
#' for intersample feature expression comparison (not feature length-normalized).
#' \item \strong{TPM:} Compositional bias and feature length-normalized \strong{T}ranscripts
#' \strong{P}er \strong{M}illion. Useful for within-sample feature expression comparison.
#' \item \strong{DE:} A \strong{D}ifferential \strong{E}xpression table comparing the first sample
#' to all others. Includes log2 fold change (\code{logFC}), average log2 counts per million
#' (\code{logCPM}), two-sided p-value (\code{PValue}) and false discovery rate (\code{FDR}).\cr
#' \cr\strong{Note:} Fold change differences (\code{logFC}) between samples not directly ompared
#' in the table can be obtained by subtracting their reported \code{logFC} (to the first sample).
#' For example, if the first sample is \code{sample1} and we want the \code{logFC} between
#' \code{sample2} and \code{sample3}, simply calculate the difference:\cr
#' \cr\code{logFC.sample3_vs_sample2} = \code{logFC.sample3} - \code{logFC.sample2}
#' }
#' A \strong{M}ulti-\strong{D}imensional \strong{S}caling plot of all samples is also
#' saved in the output directory as a .pdf file.
#' @examples
#' \dontrun{
#' rna_seq_analysis(pathToFiles = list('AH119-2h_featureCounts.txt', 'AH119-3h_featureCounts.txt',
#' 'AH8104-2h_featureCounts.txt', 'AH8104-3h_featureCounts.txt'),
#' sampleNames = list('AH119_2h', 'AH119_3h', 'AH8104_2h', 'AH8104_3h'),
#' conditionNames = list('WT_2h', 'WT_3h', 'dot1_2h', 'dot1_3h'),
#' outputFilePrefix = 'dot1_noReplicates')
#'
#' rna_seq_analysis(pathToFiles = list('AH119-2h_featureCounts.txt', 'AH119-3h_featureCounts.txt',
#' 'AH8104-2h_featureCounts.txt', 'AH8104-3h_featureCounts.txt'),
#' sampleNames = list('AH119_2h', 'AH119_3h', 'AH8104_2h', 'AH8104_3h'),
#' conditionNames = list('WT_2h', 'WT_3h', 'dot1_2h', 'dot1_3h'),
#' pairwiseDE = TRUE, outputFilePrefix = 'dot1_noReplicates')
#'
#' rna_seq_analysis(pathToFiles = list('AH119-A_featureCounts.txt', 'AH119-B_featureCounts.txt',
#' 'AH8104-A_featureCounts.txt', 'AH8104-B_featureCounts.txt'),
#' sampleNames = list('AH119_A', 'AH119_B', 'AH8104_A', 'AH8104_B'),
#' conditionNames = list('WT', 'WT', 'dot1', 'dot1'),
#' batchNames = list('batch1', 'batch2', 'batch1', 'batch2')
#' pairwiseDE = TRUE)
#' }
#' @export
rna_seq_analysis <- function(pathToFiles, sampleNames, conditionNames, batchNames = NULL,
pairwiseDE = FALSE, outputFilePrefix){
ptm <- proc.time()
#----------------------------------------------------------------------------#
#-------------------------- Preliminary checks ------------------------------#
if (file.exists('RNA-seq_analysis')) {
stop('ERROR: A folder called "RNA-seq_analysis" already exists in the current working directory.\n',
'Please remove it and repeat function call.', call. = FALSE)
}
if (!all(unlist(lapply(pathToFiles, file.exists)), na.rm = FALSE)) {
stop('ERROR: Could not open one or more featureCount files.',
'Please check the provided paths to the files.', call. = FALSE)
}
# Check for dplyr and edgeR (and load edgeR)
if (!requireNamespace("dplyr", quietly = TRUE)) {
stop("R package 'dplyr' is required. Please install it.\n",
"install.packages('dplyr')", call. = FALSE)
}
if (!requireNamespace("edgeR", quietly = TRUE)) {
stop("Bioconductor package 'edgeR' is required. Please install it:\n",
"## try http:// if https:// URLs are not supported",
"source('https://bioconductor.org/biocLite.R')",
"biocLite('edgeR')", call. = FALSE)
}
# Create directory in current working directory to save output
dir.create('RNA-seq_analysis')
message('Created output directory "RNA-seq_analysis"')
#----------------------------------------------------------------------------#
#----------------------- Generate DGEList object ----------------------------#
# Load count data and create table of all samples
# Load files
counts <- lapply(pathToFiles, read.table, header = T)
# Rename feature counts column with supplied sample names
for(i in 1:length(counts)){
colnames(counts[[i]])[7] <- sampleNames[i]
}
# Reduce list to data frame
counts <- Reduce(dplyr::inner_join, counts)
# Create DGEList object
y <- edgeR::DGEList(counts = counts[, unlist(sampleNames)],
group = unlist(conditionNames),
genes = data.frame(Gene = counts$Geneid,
Length = counts$Length))
message('Created DGEList object:')
print(y$samples[, 1:2])
#----------------------------------------------------------------------------#
#----------------------------- CPM and TPM ----------------------------------#
# Filter out genes with low counts (low or no expression) based on CPMs,
# in order to account for differences in library depth
# Use a low threshold (cpm of 0.5)
message('Filtering out features with counts below threshold (cpm < 0.5):')
# If there are no replicates, keep all genes expressed in at least one sample
# If there are replicates, keep all genes expressed in at least two samples
# The code to filter rows in the edgeR manual no longer seems to work;
# try it and fallback on an alternative in case it doesn't work
try(
if(length(unique(conditionNames)) == length(unique(sampleNames))){
keep <- rowSums(edgeR::cpm(y) > 0.5) >= 1
y_filt <- y[keep, , keep.lib.sizes = FALSE]
} else {
keep <- rowSums(edgeR::cpm(y) > 0.5) >= 2
y_filt <- y[keep, , keep.lib.sizes = FALSE]
},
silent = T)
if(!exists('y_filt')){
y_filt <- y
y_filt$counts <- y$counts[keep, ]
y_filt$genes <- y$genes[keep, ]
}
# Calculate updated lib sizes (differences should be minimal):
y_filt$samples$lib.size <- colSums(y_filt$counts)
message(nrow(y_filt$counts), ' features kept of an original total of ',
nrow(y$counts))
dropped <- round((nrow(y$counts) - nrow(y_filt$counts)) / nrow(y$counts) * 100, 1)
message(' (', dropped, '% filtered out)')
# Calculate normalization factors to scale the raw library sizes
y_filt <- edgeR::calcNormFactors(y_filt)
message('Calculated normalization factors using trimmed mean of M-values (TMM) method.')
y_filt$samples
# Save MDS plot
if(missing(outputFilePrefix)){
pdf(paste0("RNA-seq_analysis/", "MDS_plot.pdf"))
} else{
pdf(paste0("RNA-seq_analysis/", outputFilePrefix, "_MDS_plot.pdf"))
}
limma::plotMDS(y_filt)
dev.off()
message('Plotted multi-dimensional scaling (MDS) and saved to .pdf file.')
# Calculate CPM and write to file
cpm_edgeR <- edgeR::cpm(y_filt)
rownames(cpm_edgeR) <- y_filt$genes$Gene
# Write to file
if(missing(outputFilePrefix)){
write.csv(cpm_edgeR, paste0("RNA-seq_analysis/", "edgeR_cpm.csv"), row.names = T)
} else {
write.csv(cpm_edgeR, paste0("RNA-seq_analysis/", outputFilePrefix, "_edgeR_cpm.csv"),
row.names = T)
}
message('Calculated CPM and saved to file.')
# Calculate TPM
# Write tpm function
calcTPM <- function(inputDGEList, gene.length) {
x <- as.matrix(inputDGEList)
len.norm.lib.size <- colSums(x / gene.length)
tpm_pre_len_norm <- t(t(x) / len.norm.lib.size) * 1e06
rownames(tpm_pre_len_norm) <- inputDGEList$genes$Gene
return(tpm_pre_len_norm / gene.length)
}
tpm <- calcTPM(y_filt, gene.length = y_filt$genes$Length)
### Write TPMs to file
# Prep table
rownames(tpm) <- y_filt$genes$Gene
# Write to file
if(missing(outputFilePrefix)){
write.csv(tpm, paste0("RNA-seq_analysis/", "tpm.csv"), row.names = T)
} else {
write.csv(tpm, paste0("RNA-seq_analysis/", outputFilePrefix, "_tpm.csv"), row.names = T)
}
message('Calculated TPM and saved to file.')
#----------------------------------------------------------------------------#
#----------------------------- DE analysis ----------------------------------#
# Run if not missing
if(pairwiseDE){
de <- de_analysis(DGEListObject=y_filt, sampleNames, conditionNames,
batchNames, outputFilePrefix)
if(missing(outputFilePrefix)){
write.csv(de$table, paste0("RNA-seq_analysis/", "de.csv"), row.names = T)
} else {
write.csv(de$table, paste0("RNA-seq_analysis/", outputFilePrefix, "_de.csv"),
row.names = T)
}
message('Calculated DE and saved to file.')
}
message("... ... ...")
message("(Output files are in directory \"RNA-seq_analysis\").")
message("Completed in ", round((proc.time()[3] - ptm[3]), 1), "sec.")
}
### Helper function to perform differential expression analyses
# Note: The experimental design is parametrized with a one-way layout.
# Must not be used if this is not appropriate for the analyzed experiment
de_analysis <- function(DGEListObject, sampleNames, conditionNames,
batchNames, outputFilePrefix){
message('Running DE analyses:')
# Design matrix
group <- factor(unlist(conditionNames))
if(!is.null(batchNames)){
batch <- factor(unlist(batchNames))
design <- model.matrix(~batch+group)
colnames(design)[2:ncol(design)] <- c(tail(levels(batch), -1),
tail(levels(group), -1))
} else {
design <- model.matrix(~group)
colnames(design)[2:ncol(design)] <- tail(levels(group), -1)
}
### Test DE:
# While the likelihood ratio test is a more obvious choice for inferences with
# GLMs, the QL F-test is preferred as it provides more robust and reliable
# error rate control when the number of replicates is small.
# glmQLFit() can only be used when there are replicates, however.
# In the absence of replicates, use glmFit() followed by glmLRT() (using the
# typical value for BCV - square root-dispersion).
# Estimate dispersion and fit model
if(length(unique(conditionNames)) == length(unique(sampleNames))){
message('Performing DE analysis (without replicates!)')
bcv <- 0.1
fit <- edgeR::glmFit(DGEListObject, design, dispersion = bcv^2)
startCondition <- ifelse(!is.null(batchNames),
length(levels(batch)) + 1, 2)
de_test <- edgeR::glmLRT(fit, coef=startCondition:ncol(design))
} else {
message('\nPerforming DE analysis with replicates:\n')
y_filt <- edgeR::estimateDisp(DGEListObject, design, robust=TRUE)
message('Estimated common dispersion: ', y_filt$common.dispersion)
fit <- edgeR::glmQLFit(y_filt, design)
startCondition <- ifelse(!is.null(batchNames),
length(levels(batch)) + 1, 2)
de_test <- edgeR::glmQLFTest(fit, coef=startCondition:ncol(design))
}
# Get all genes (n=Inf)
de_test <- edgeR::topTags(de_test, n=Inf)
return(de_test)
}
| /R/rna_seq_analysis.R | permissive | GeT-TRiX/hwglabr | R | false | false | 14,626 | r | #' Wrapper function to run edgeR analysis of summarized reads from RNA-seq experiment
#'
#' This function allows you to run a statistical analysis of an RNA-seq experiment
#' using Bioconductor package
#' \href{https://bioconductor.org/packages/release/bioc/html/edgeR.html}{edgeR}.
#' Inputs are tables of summarized reads generated using
#' \href{http://bioinf.wehi.edu.au/featureCounts/}{featureCounts} (part of the lab's
#' RNA-seq analysis pipeline running on NYU's HPC) for each sample in the experiment.
#' The presence or absence of biological replicates is automatically inferred from the
#' \code{conditionNames} argument: conditions aggregate samples as replicates. If
#' you provide as many different \code{conditionNames} as there are \code{sampleNames}
#' each sample is taken as a single condition (no replicates).
#' The output includes tables of CPM, TPM and, if included, differential expression
#' (DE) analysis for selected pairs of samples (see "Value" section below for more
#' details). \cr \cr
#' \strong{Running without replicates:} This function allows you to run the analysis on
#' experiments without biological replicate libraries. While the calculation of CPM and
#' TPM has no requirement for replicates, you should obviously avoid doing DE analysis
#' in the absence of replicates. The statistical model used by
#' \href{https://bioconductor.org/packages/release/bioc/html/edgeR.html}{edgeR}
#' to perform DE analysis relies on biological replicates to estimate biological
#' variability. While there is no satisfactory alternative to actual biological replication,
#' you can still run a preliminary DE analysis without replicates. The approach
#' taken here relies on providing an estimate of biological variation that is reasonable
#' for RNA-seq experiments using \emph{S. cerevisiae}. This is a better alternative
#' to simply assuming that biological variability is absent. Typical values for
#' the common BCV (square root-dispersion) for datasets arising from well-controlled
#' experiments are 0.1 for data on genetically identical model organisms, so that
#' value is used here.
#' @param pathToFiles A list of strings corresponding to the full path to the featureCounts
#' output files for all samples in the experiment. No default.
#' @param sampleNames A list of strings corresponding to the names of all samples
#' in the experiment in the same order as the files in \code{pathToFiles}. No default.
#' @param conditionNames A list of strings corresponding to the experimental groups/conditions
#' for each sample/library. Will be the input to edgeR's \code{\link[edgeR]{DGEList}}
#' constructor function and will define replicate groups, if any. No default.
#' @param batchNames Optional list of strings corresponding to the experimental batch for each
#' sample/library. Will be part of the design matrix for differential expression testing and
#' will define batch groups, if any. Defaults to \code{NULL} (all samples come from the same batch).
#' @param pairwiseDE Logical indicating whether to test differential expression (DE).
#' Defaults to \code{FALSE}.
#' @param outputFilePrefix Optional string to be added as prefix to output file names.
#' No default.
#' @return The output includes several tables saved as .csv files in a directory
#' named "RNA-seq_analysis" written to the working directory:
#' \enumerate{
#' \item \strong{CPM:} Compositional bias-normalized \strong{C}ounts \strong{P}er
#' \strong{M}illion (output of edgeR's \code{\link[edgeR]{cpm}} function). Useful
#' for intersample feature expression comparison (not feature length-normalized).
#' \item \strong{TPM:} Compositional bias and feature length-normalized \strong{T}ranscripts
#' \strong{P}er \strong{M}illion. Useful for within-sample feature expression comparison.
#' \item \strong{DE:} A \strong{D}ifferential \strong{E}xpression table comparing the first sample
#' to all others. Includes log2 fold change (\code{logFC}), average log2 counts per million
#' (\code{logCPM}), two-sided p-value (\code{PValue}) and false discovery rate (\code{FDR}).\cr
#' \cr\strong{Note:} Fold change differences (\code{logFC}) between samples not directly ompared
#' in the table can be obtained by subtracting their reported \code{logFC} (to the first sample).
#' For example, if the first sample is \code{sample1} and we want the \code{logFC} between
#' \code{sample2} and \code{sample3}, simply calculate the difference:\cr
#' \cr\code{logFC.sample3_vs_sample2} = \code{logFC.sample3} - \code{logFC.sample2}
#' }
#' A \strong{M}ulti-\strong{D}imensional \strong{S}caling plot of all samples is also
#' saved in the output directory as a .pdf file.
#' @examples
#' \dontrun{
#' rna_seq_analysis(pathToFiles = list('AH119-2h_featureCounts.txt', 'AH119-3h_featureCounts.txt',
#' 'AH8104-2h_featureCounts.txt', 'AH8104-3h_featureCounts.txt'),
#' sampleNames = list('AH119_2h', 'AH119_3h', 'AH8104_2h', 'AH8104_3h'),
#' conditionNames = list('WT_2h', 'WT_3h', 'dot1_2h', 'dot1_3h'),
#' outputFilePrefix = 'dot1_noReplicates')
#'
#' rna_seq_analysis(pathToFiles = list('AH119-2h_featureCounts.txt', 'AH119-3h_featureCounts.txt',
#' 'AH8104-2h_featureCounts.txt', 'AH8104-3h_featureCounts.txt'),
#' sampleNames = list('AH119_2h', 'AH119_3h', 'AH8104_2h', 'AH8104_3h'),
#' conditionNames = list('WT_2h', 'WT_3h', 'dot1_2h', 'dot1_3h'),
#' pairwiseDE = TRUE, outputFilePrefix = 'dot1_noReplicates')
#'
#' rna_seq_analysis(pathToFiles = list('AH119-A_featureCounts.txt', 'AH119-B_featureCounts.txt',
#' 'AH8104-A_featureCounts.txt', 'AH8104-B_featureCounts.txt'),
#' sampleNames = list('AH119_A', 'AH119_B', 'AH8104_A', 'AH8104_B'),
#' conditionNames = list('WT', 'WT', 'dot1', 'dot1'),
#' batchNames = list('batch1', 'batch2', 'batch1', 'batch2')
#' pairwiseDE = TRUE)
#' }
#' @export
rna_seq_analysis <- function(pathToFiles, sampleNames, conditionNames, batchNames = NULL,
pairwiseDE = FALSE, outputFilePrefix){
ptm <- proc.time()
#----------------------------------------------------------------------------#
#-------------------------- Preliminary checks ------------------------------#
if (file.exists('RNA-seq_analysis')) {
stop('ERROR: A folder called "RNA-seq_analysis" already exists in the current working directory.\n',
'Please remove it and repeat function call.', call. = FALSE)
}
if (!all(unlist(lapply(pathToFiles, file.exists)), na.rm = FALSE)) {
stop('ERROR: Could not open one or more featureCount files.',
'Please check the provided paths to the files.', call. = FALSE)
}
# Check for dplyr and edgeR (and load edgeR)
if (!requireNamespace("dplyr", quietly = TRUE)) {
stop("R package 'dplyr' is required. Please install it.\n",
"install.packages('dplyr')", call. = FALSE)
}
if (!requireNamespace("edgeR", quietly = TRUE)) {
stop("Bioconductor package 'edgeR' is required. Please install it:\n",
"## try http:// if https:// URLs are not supported",
"source('https://bioconductor.org/biocLite.R')",
"biocLite('edgeR')", call. = FALSE)
}
# Create directory in current working directory to save output
dir.create('RNA-seq_analysis')
message('Created output directory "RNA-seq_analysis"')
#----------------------------------------------------------------------------#
#----------------------- Generate DGEList object ----------------------------#
# Load count data and create table of all samples
# Load files
counts <- lapply(pathToFiles, read.table, header = T)
# Rename feature counts column with supplied sample names
for(i in 1:length(counts)){
colnames(counts[[i]])[7] <- sampleNames[i]
}
# Reduce list to data frame
counts <- Reduce(dplyr::inner_join, counts)
# Create DGEList object
y <- edgeR::DGEList(counts = counts[, unlist(sampleNames)],
group = unlist(conditionNames),
genes = data.frame(Gene = counts$Geneid,
Length = counts$Length))
message('Created DGEList object:')
print(y$samples[, 1:2])
#----------------------------------------------------------------------------#
#----------------------------- CPM and TPM ----------------------------------#
# Filter out genes with low counts (low or no expression) based on CPMs,
# in order to account for differences in library depth
# Use a low threshold (cpm of 0.5)
message('Filtering out features with counts below threshold (cpm < 0.5):')
# If there are no replicates, keep all genes expressed in at least one sample
# If there are replicates, keep all genes expressed in at least two samples
# The code to filter rows in the edgeR manual no longer seems to work;
# try it and fallback on an alternative in case it doesn't work
try(
if(length(unique(conditionNames)) == length(unique(sampleNames))){
keep <- rowSums(edgeR::cpm(y) > 0.5) >= 1
y_filt <- y[keep, , keep.lib.sizes = FALSE]
} else {
keep <- rowSums(edgeR::cpm(y) > 0.5) >= 2
y_filt <- y[keep, , keep.lib.sizes = FALSE]
},
silent = T)
if(!exists('y_filt')){
y_filt <- y
y_filt$counts <- y$counts[keep, ]
y_filt$genes <- y$genes[keep, ]
}
# Calculate updated lib sizes (differences should be minimal):
y_filt$samples$lib.size <- colSums(y_filt$counts)
message(nrow(y_filt$counts), ' features kept of an original total of ',
nrow(y$counts))
dropped <- round((nrow(y$counts) - nrow(y_filt$counts)) / nrow(y$counts) * 100, 1)
message(' (', dropped, '% filtered out)')
# Calculate normalization factors to scale the raw library sizes
y_filt <- edgeR::calcNormFactors(y_filt)
message('Calculated normalization factors using trimmed mean of M-values (TMM) method.')
y_filt$samples
# Save MDS plot
if(missing(outputFilePrefix)){
pdf(paste0("RNA-seq_analysis/", "MDS_plot.pdf"))
} else{
pdf(paste0("RNA-seq_analysis/", outputFilePrefix, "_MDS_plot.pdf"))
}
limma::plotMDS(y_filt)
dev.off()
message('Plotted multi-dimensional scaling (MDS) and saved to .pdf file.')
# Calculate CPM and write to file
cpm_edgeR <- edgeR::cpm(y_filt)
rownames(cpm_edgeR) <- y_filt$genes$Gene
# Write to file
if(missing(outputFilePrefix)){
write.csv(cpm_edgeR, paste0("RNA-seq_analysis/", "edgeR_cpm.csv"), row.names = T)
} else {
write.csv(cpm_edgeR, paste0("RNA-seq_analysis/", outputFilePrefix, "_edgeR_cpm.csv"),
row.names = T)
}
message('Calculated CPM and saved to file.')
# Calculate TPM
# Write tpm function
calcTPM <- function(inputDGEList, gene.length) {
x <- as.matrix(inputDGEList)
len.norm.lib.size <- colSums(x / gene.length)
tpm_pre_len_norm <- t(t(x) / len.norm.lib.size) * 1e06
rownames(tpm_pre_len_norm) <- inputDGEList$genes$Gene
return(tpm_pre_len_norm / gene.length)
}
tpm <- calcTPM(y_filt, gene.length = y_filt$genes$Length)
### Write TPMs to file
# Prep table
rownames(tpm) <- y_filt$genes$Gene
# Write to file
if(missing(outputFilePrefix)){
write.csv(tpm, paste0("RNA-seq_analysis/", "tpm.csv"), row.names = T)
} else {
write.csv(tpm, paste0("RNA-seq_analysis/", outputFilePrefix, "_tpm.csv"), row.names = T)
}
message('Calculated TPM and saved to file.')
#----------------------------------------------------------------------------#
#----------------------------- DE analysis ----------------------------------#
# Run if not missing
if(pairwiseDE){
de <- de_analysis(DGEListObject=y_filt, sampleNames, conditionNames,
batchNames, outputFilePrefix)
if(missing(outputFilePrefix)){
write.csv(de$table, paste0("RNA-seq_analysis/", "de.csv"), row.names = T)
} else {
write.csv(de$table, paste0("RNA-seq_analysis/", outputFilePrefix, "_de.csv"),
row.names = T)
}
message('Calculated DE and saved to file.')
}
message("... ... ...")
message("(Output files are in directory \"RNA-seq_analysis\").")
message("Completed in ", round((proc.time()[3] - ptm[3]), 1), "sec.")
}
### Helper function to perform differential expression analyses
# Note: The experimental design is parametrized with a one-way layout.
# Must not be used if this is not appropriate for the analyzed experiment
de_analysis <- function(DGEListObject, sampleNames, conditionNames,
batchNames, outputFilePrefix){
message('Running DE analyses:')
# Design matrix
group <- factor(unlist(conditionNames))
if(!is.null(batchNames)){
batch <- factor(unlist(batchNames))
design <- model.matrix(~batch+group)
colnames(design)[2:ncol(design)] <- c(tail(levels(batch), -1),
tail(levels(group), -1))
} else {
design <- model.matrix(~group)
colnames(design)[2:ncol(design)] <- tail(levels(group), -1)
}
### Test DE:
# While the likelihood ratio test is a more obvious choice for inferences with
# GLMs, the QL F-test is preferred as it provides more robust and reliable
# error rate control when the number of replicates is small.
# glmQLFit() can only be used when there are replicates, however.
# In the absence of replicates, use glmFit() followed by glmLRT() (using the
# typical value for BCV - square root-dispersion).
# Estimate dispersion and fit model
if(length(unique(conditionNames)) == length(unique(sampleNames))){
message('Performing DE analysis (without replicates!)')
bcv <- 0.1
fit <- edgeR::glmFit(DGEListObject, design, dispersion = bcv^2)
startCondition <- ifelse(!is.null(batchNames),
length(levels(batch)) + 1, 2)
de_test <- edgeR::glmLRT(fit, coef=startCondition:ncol(design))
} else {
message('\nPerforming DE analysis with replicates:\n')
y_filt <- edgeR::estimateDisp(DGEListObject, design, robust=TRUE)
message('Estimated common dispersion: ', y_filt$common.dispersion)
fit <- edgeR::glmQLFit(y_filt, design)
startCondition <- ifelse(!is.null(batchNames),
length(levels(batch)) + 1, 2)
de_test <- edgeR::glmQLFTest(fit, coef=startCondition:ncol(design))
}
# Get all genes (n=Inf)
de_test <- edgeR::topTags(de_test, n=Inf)
return(de_test)
}
|
## Function that uses scoping lexical to maintain global state in x and m for matrix generated
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y ## Mantains global state of x in other execution environment
m<<-NULL ## Preserves global state of m in other execution environment
}
get<-function() x
setmatrix<-function(solve) m <<- solve ## Assigns inverse of the matrix to cache variable
getmatrix<-function() m ## Returns inverse of the matrix
## Returns a list of operations
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## Function that computes inverse of the matrix if the operation was not executed before over the input matrix
cacheSolve <- function(x=matrix(), ...) {
m <- x$getmatrix() ## Obtains m value of the parent execution environment
## If inverse of the matrix was computed before, this section returns the result
if(!is.null(m)){
message("getting cached data")
return(m)
}
## If inverse of the matrix was not computed before, this section computes the inverse of the input matrix
data1 <- x$get()
m <- solve(data1, ...)
x$setmatrix(m)
m
}
| /cachematrix.R | no_license | hadgithub/Coursera | R | false | false | 1,210 | r | ## Function that uses scoping lexical to maintain global state in x and m for matrix generated
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y ## Mantains global state of x in other execution environment
m<<-NULL ## Preserves global state of m in other execution environment
}
get<-function() x
setmatrix<-function(solve) m <<- solve ## Assigns inverse of the matrix to cache variable
getmatrix<-function() m ## Returns inverse of the matrix
## Returns a list of operations
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## Function that computes inverse of the matrix if the operation was not executed before over the input matrix
cacheSolve <- function(x=matrix(), ...) {
m <- x$getmatrix() ## Obtains m value of the parent execution environment
## If inverse of the matrix was computed before, this section returns the result
if(!is.null(m)){
message("getting cached data")
return(m)
}
## If inverse of the matrix was not computed before, this section computes the inverse of the input matrix
data1 <- x$get()
m <- solve(data1, ...)
x$setmatrix(m)
m
}
|
#'
#' Data Import
#'
#' Importing data is a _lazy_ parse of the data. It adds an extra step so that a user may specify a variety of options
#' including a header file, separator type, and in the future column type. Additionally, the import phase provides
#' feedback on whether or not a folder or group of files may be imported together.
#' API ENDPOINT
.h2o.__IMPORT <- "ImportFiles.json" # ImportFiles.json?path=/path/to/data
#'
#' Import a Folder of Files
#'
#' Import an entire directory of files. If the given path is relative, then it will be relative to the start location
#' of the H2O instance. The default behavior is to pass-through to the parse phase automatically.
h2o.importFolder <- function(object, path, pattern = "", key = "", parse = TRUE, header, sep = "", col.names) {
if(class(object) != "H2OClient") stop("object must be of class H2OClient")
if(!is.character(path)) stop("path must be of class character")
if(nchar(path) == 0) stop("path must be a non-empty string")
if(!is.character(pattern)) stop("pattern must be of class character")
if(!is.character(key)) stop("key must be of class character")
if(nchar(key) > 0 && regexpr("^[a-zA-Z_][a-zA-Z0-9_.]*$", key)[1] == -1)
stop("key must match the regular expression '^[a-zA-Z_][a-zA-Z0-9_.]*$'")
if(!is.logical(parse)) stop("parse must be of class logical")
res <- .h2o.__remoteSend(object, 'ImportFiles.json', path=path)
if(length(res$fails) > 0) {
for(i in 1:length(res$fails))
cat(res$fails[[i]], "failed to import")
}
ret <- NULL
# Return only the files that successfully imported
if(length(res$files) > 0) {
if(parse) {
if(substr(path, nchar(path), nchar(path)) == .Platform$file.sep)
path <- substr(path, 1, nchar(path)-1)
regPath <- paste(path, pattern, sep=.Platform$file.sep)
srcKey <- ifelse(length(res$keys) == 1, res$keys[[1]], paste("*", regPath, "*", sep=""))
rawData <- new("H2ORawData", h2o=object, key=srcKey)
assign("dd", rawData, globalenv())
ret <- h2o.parseRaw(data=rawData, key=key, header=header, sep=sep, col.names=col.names)
} else {
myData = lapply(res$keys, function(x) { new("H2ORawData", h2o=object, key=x) })
if(length(res$keys) == 1) ret <- myData[[1]] else ret <- myData
}
} else stop("All files failed to import!")
path <- gsub("//", "/", path)
h2o.rm(object, "nfs:/" %p0% path)
h2o.rm(object, "nfs://private" %p0% path)
ret
}
#'
#' Import A File
#'
#' Import a single file. If the given path is relative, then it will be relative to the start location
#' of the H2O instance. The default behavior is to pass-through to the parse phase automatically.
h2o.importFile <- function(object, path, key = "", parse = TRUE, header, sep = "", col.names) {
h2o.importFolder(object, path, pattern = "", key, parse, header, sep, col.names)
}
#'
#' Import A URL
#'
#' Import a data source from a URL.
h2o.importURL <- function(object, path, key = "", parse = TRUE, header, sep = "", col.names) {
print("This function has been deprecated in FluidVecs. In the future, please use h2o.importFile with a http:// prefix instead.")
h2o.importFile(object, path, key, parse, header, sep, col.names)
}
#'
#' Import HDFS
#'
#' Import from an HDFS location.
h2o.importHDFS <- function(object, path, pattern = "", key = "", parse = TRUE, header, sep = "", col.names) {
print("This function has been deprecated in FluidVecs. In the future, please use h2o.importFolder with a hdfs:// prefix instead.")
h2o.importFolder(object, path, pattern, key, parse, header, sep, col.names)
}
#'
#' Upload Data
#'
#' Upload local files to the H2O instance.
h2o.uploadFile <- function(object, path, key = "", parse = TRUE, header, sep = "", col.names, silent = TRUE) {
if(class(object) != "H2OClient") stop("object must be of class H2OClient")
if(!is.character(path)) stop("path must be of class character")
if(nchar(path) == 0) stop("path must be a non-empty string")
if(!is.character(key)) stop("key must be of class character")
if(nchar(key) > 0 && regexpr("^[a-zA-Z_][a-zA-Z0-9_.]*$", key)[1] == -1)
stop("key must match the regular expression '^[a-zA-Z_][a-zA-Z0-9_.]*$'")
if(!is.logical(parse)) stop("parse must be of class logical")
if(!is.logical(silent)) stop("silent must be of class logical")
url = paste("http://", object@ip, ":", object@port, "/2/PostFile.json", sep="")
url = paste(url, "?key=", URLencode(path), sep="")
if(file.exists(h2o.getLogPath("Command"))) .h2o.__logIt(url, NULL, "Command")
if(silent)
temp = postForm(url, .params = list(fileData = fileUpload(normalizePath(path))))
else
temp = postForm(url, .params = list(fileData = fileUpload(normalizePath(path))), .opts = list(verbose = TRUE))
rawData = new("H2ORawData", h2o=object, key=path)
if(parse) parsedData = h2o.parseRaw(data=rawData, key=key, header=header, sep=sep, col.names=col.names) else rawData
}
| /h2o-r/h2o-package/R/import.R | permissive | krishnatray/h2o-dev | R | false | false | 4,932 | r | #'
#' Data Import
#'
#' Importing data is a _lazy_ parse of the data. It adds an extra step so that a user may specify a variety of options
#' including a header file, separator type, and in the future column type. Additionally, the import phase provides
#' feedback on whether or not a folder or group of files may be imported together.
#' API ENDPOINT
.h2o.__IMPORT <- "ImportFiles.json" # ImportFiles.json?path=/path/to/data
#'
#' Import a Folder of Files
#'
#' Import an entire directory of files. If the given path is relative, then it will be relative to the start location
#' of the H2O instance. The default behavior is to pass-through to the parse phase automatically.
h2o.importFolder <- function(object, path, pattern = "", key = "", parse = TRUE, header, sep = "", col.names) {
if(class(object) != "H2OClient") stop("object must be of class H2OClient")
if(!is.character(path)) stop("path must be of class character")
if(nchar(path) == 0) stop("path must be a non-empty string")
if(!is.character(pattern)) stop("pattern must be of class character")
if(!is.character(key)) stop("key must be of class character")
if(nchar(key) > 0 && regexpr("^[a-zA-Z_][a-zA-Z0-9_.]*$", key)[1] == -1)
stop("key must match the regular expression '^[a-zA-Z_][a-zA-Z0-9_.]*$'")
if(!is.logical(parse)) stop("parse must be of class logical")
res <- .h2o.__remoteSend(object, 'ImportFiles.json', path=path)
if(length(res$fails) > 0) {
for(i in 1:length(res$fails))
cat(res$fails[[i]], "failed to import")
}
ret <- NULL
# Return only the files that successfully imported
if(length(res$files) > 0) {
if(parse) {
if(substr(path, nchar(path), nchar(path)) == .Platform$file.sep)
path <- substr(path, 1, nchar(path)-1)
regPath <- paste(path, pattern, sep=.Platform$file.sep)
srcKey <- ifelse(length(res$keys) == 1, res$keys[[1]], paste("*", regPath, "*", sep=""))
rawData <- new("H2ORawData", h2o=object, key=srcKey)
assign("dd", rawData, globalenv())
ret <- h2o.parseRaw(data=rawData, key=key, header=header, sep=sep, col.names=col.names)
} else {
myData = lapply(res$keys, function(x) { new("H2ORawData", h2o=object, key=x) })
if(length(res$keys) == 1) ret <- myData[[1]] else ret <- myData
}
} else stop("All files failed to import!")
path <- gsub("//", "/", path)
h2o.rm(object, "nfs:/" %p0% path)
h2o.rm(object, "nfs://private" %p0% path)
ret
}
#'
#' Import A File
#'
#' Import a single file. If the given path is relative, then it will be relative to the start location
#' of the H2O instance. The default behavior is to pass-through to the parse phase automatically.
h2o.importFile <- function(object, path, key = "", parse = TRUE, header, sep = "", col.names) {
h2o.importFolder(object, path, pattern = "", key, parse, header, sep, col.names)
}
#'
#' Import A URL
#'
#' Import a data source from a URL.
h2o.importURL <- function(object, path, key = "", parse = TRUE, header, sep = "", col.names) {
print("This function has been deprecated in FluidVecs. In the future, please use h2o.importFile with a http:// prefix instead.")
h2o.importFile(object, path, key, parse, header, sep, col.names)
}
#'
#' Import HDFS
#'
#' Import from an HDFS location.
h2o.importHDFS <- function(object, path, pattern = "", key = "", parse = TRUE, header, sep = "", col.names) {
print("This function has been deprecated in FluidVecs. In the future, please use h2o.importFolder with a hdfs:// prefix instead.")
h2o.importFolder(object, path, pattern, key, parse, header, sep, col.names)
}
#'
#' Upload Data
#'
#' Upload local files to the H2O instance.
h2o.uploadFile <- function(object, path, key = "", parse = TRUE, header, sep = "", col.names, silent = TRUE) {
if(class(object) != "H2OClient") stop("object must be of class H2OClient")
if(!is.character(path)) stop("path must be of class character")
if(nchar(path) == 0) stop("path must be a non-empty string")
if(!is.character(key)) stop("key must be of class character")
if(nchar(key) > 0 && regexpr("^[a-zA-Z_][a-zA-Z0-9_.]*$", key)[1] == -1)
stop("key must match the regular expression '^[a-zA-Z_][a-zA-Z0-9_.]*$'")
if(!is.logical(parse)) stop("parse must be of class logical")
if(!is.logical(silent)) stop("silent must be of class logical")
url = paste("http://", object@ip, ":", object@port, "/2/PostFile.json", sep="")
url = paste(url, "?key=", URLencode(path), sep="")
if(file.exists(h2o.getLogPath("Command"))) .h2o.__logIt(url, NULL, "Command")
if(silent)
temp = postForm(url, .params = list(fileData = fileUpload(normalizePath(path))))
else
temp = postForm(url, .params = list(fileData = fileUpload(normalizePath(path))), .opts = list(verbose = TRUE))
rawData = new("H2ORawData", h2o=object, key=path)
if(parse) parsedData = h2o.parseRaw(data=rawData, key=key, header=header, sep=sep, col.names=col.names) else rawData
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qra_fit.R
\name{wis_loss}
\alias{wis_loss}
\title{Calculate wis_loss as a function of parameters used to construct a qra model}
\usage{
wis_loss(par, model_constructor, qfm_train, y_train, ...)
}
\arguments{
\item{par}{real-valued vector of parameters}
\item{model_constructor}{a function that accepts a real-valued vector of
parameters and returns a model of class qra_fit}
\item{qfm_train}{QuantileForecastMatrix with training set predictions from
component models}
\item{y_train}{numeric vector of responses for training set}
\item{...}{arguments passed on to the model_constructor}
}
\value{
scalar wis loss for given parameter values
}
\description{
Calculate wis_loss as a function of parameters used to construct a qra model
}
| /man/wis_loss.Rd | no_license | reichlab/covidEnsembles | R | false | true | 816 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qra_fit.R
\name{wis_loss}
\alias{wis_loss}
\title{Calculate wis_loss as a function of parameters used to construct a qra model}
\usage{
wis_loss(par, model_constructor, qfm_train, y_train, ...)
}
\arguments{
\item{par}{real-valued vector of parameters}
\item{model_constructor}{a function that accepts a real-valued vector of
parameters and returns a model of class qra_fit}
\item{qfm_train}{QuantileForecastMatrix with training set predictions from
component models}
\item{y_train}{numeric vector of responses for training set}
\item{...}{arguments passed on to the model_constructor}
}
\value{
scalar wis loss for given parameter values
}
\description{
Calculate wis_loss as a function of parameters used to construct a qra model
}
|
library(pcalg)
library(sparsebn)
library(CAM)
#library(CompareCausalNetworks)
library(MASS)
library(methods)
mkdir <- function(fold){
if(!dir.exists(fold)){
dir.create(fold)
}
}
#ccdr
#' @sparsebn is an R package for learning sparse Bayesian networks and other graphical models
#' from high-dimensional data via sparse regularization.
#' @estimate.dag for directed acyclic graphs (Bayesian networks).
#' @estimate.precision for undirected graphs (Markov random fields).
#' @estimate.covariance for covariance matrices.
ccdr <- function(dat){
dat <- sparsebnData(t(dat), type = "continuous")
dags <- estimate.dag(dat)
lambda <- select.parameter(dags,dat)
if(!is.infinite(lambda)){
am <- get.adjacency.matrix(dags[[lambda]])
}else{
am <- matrix(0, 10, 10)
}
am <- as(am, "matrix")
return(am)
}
#GES
ges <- function(dat){
score <- new("GaussL0penObsScore", data = t(dat))
result <- pcalg::ges(score)
gesmat <- as(result$essgraph, "matrix")
gesmat[gesmat] <- 1
return(gesmat)
}
#GIES
gies <- function(dat){
score <- new("GaussL0penObsScore", data = t(dat))
result <- pcalg::gies(score)
gesmat <- as(result$essgraph, "matrix")
gesmat[gesmat] <- 1
return(gesmat)
}
#LiNGAM
lingam_new <- function(dat){
estDAG <- try(lingam(t(dat)), silent = T)
if(typeof(estDAG) == "list"){
return(estDAG$Bpruned)
}else if(typeof(estDAG) == "character"){
return(matrix(0, nrow(dat), nrow(dat)))
}
}
#CAM
#cam <- function(dat){
# estDAG <- try(CAM(t(dat), scoreName = "SEMGAM", numCores = 4, output = FALSE,
# variableSel = TRUE, variableSelMethod = selGamBoost, pruning = TRUE,
# pruneMethod = selGam, pruneMethodPars = list(cutOffPVal = str(0.001))), silent = T)
# if(typeof(estDAG) == "character"){
# return(matrix, nrow(dat), nrow(dat))
# }else if(){
# }
#}
#FCI
fci_new <- function(dat, numCores = 5){
cov.mat <- cov(t(dat))
suffStat <- list(C = cov2cor(cov.mat), n = 10^9)
res <- fci(suffStat, indepTest=gaussCItest, alpha = 0.9999, p = dim(cov.mat)[1], doPdsep = FALSE, numCores = numCores)
return(list(am = res@amat, p.mat = res@pMax))
}
#IDA
ida <- function(dat){
}
decorate <- function(m){
row.names(m) <- paste("sp", seq(0, 9), sep = "")
colnames(m) <- paste("sp", seq(0, 9), sep = "")
return(m)
}
for(i in c("R")){#"hubbell", "soi"
if(i == "R"){
#s = c("100", "200", "400", "800")
s = c("S20")#"S30", , "S20", "S40", "S50", "S60"
}
for(j in s){
setwd(paste("D:\\Fangsa\\causal_compare\\Data\\TS_", i, "_", j, sep = ""))
for(k in seq(100)){
print(k)
aa <- read.table(paste("AA_", as.character(k), ".txt", sep = ""))
ra <- read.table(paste("RA_", as.character(k), ".txt", sep = ""))
#Causal Additive Model
#CCDr
mkdir("CCDr")
am_aa <- ccdr(aa)
am_ra <- ccdr(ra)
am_aa <- decorate(am_aa)
am_ra <- decorate(am_ra)
write.csv(am_aa, paste("CCDr\\AA_adjacent_matrix_",as.character(k),".csv", sep = ""), quote = F)
write.csv(am_ra, paste("CCDr\\RA_adjacent_matrix_",as.character(k),".csv", sep = ""), quote = F)
#GES
if(sum(am_aa) == 0){
print(paste(i, j, as.character(k), sep = "\t"))
}else{
am_aa <- ges(aa)
}
mkdir("GES")
am_ra <- ges(ra)
am_aa <- decorate(am_aa)
am_ra <- decorate(am_ra)
write.csv(am_aa, paste("GES\\AA_adjacent_matrix_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
write.csv(am_ra, paste("GES\\RA_adjacent_matrix_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
#GIES
mkdir("GIES")
if(sum(am_aa) == 0){
print(paste(i, j, as.character(k), sep = "\t"))
}else{
am_aa <- gies(aa)
}
am_ra <- gies(ra)
am_aa <- decorate(am_aa)
am_ra <- decorate(am_ra)
write.csv(am_aa, paste("GiES\\AA_adjacent_matrix_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
write.csv(am_ra, paste("GiES\\RA_adjacent_matrix_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
#LiNGAM
mkdir("LiNGAM")
am_aa <- lingam_new(aa)
am_ra <- lingam_new(ra)
am_aa <- decorate(am_aa)
am_ra <- decorate(am_ra)
write.csv(am_aa, paste("LiNGAM\\AA_adjacent_matrix_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
write.csv(am_ra, paste("LiNGAM\\RA_adjacent_matrix_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
#FCI
mkdir("FCI")
am_aa <- fci_new(aa)
am_ra <- fci_new(ra)
am_aa$am <- decorate(am_aa$am)
am_ra$am <- decorate(am_ra$am)
am_aa$p <- decorate(am_aa$p)
am_ra$p <- decorate(am_ra$p)
write.csv(am_aa$am, paste("FCI\\AA_adjacent_matrix_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
write.csv(am_aa$p.mat, paste("FCI\\AA_p_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
write.csv(am_ra$am, paste("FCI\\RA_adjacent_matrix_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
write.csv(am_ra$p.mat, paste("FCI\\RA_p_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
}
}
}
| /code/3_inference/cf_pcalg.R | no_license | Fangsa/CausalDiscovery | R | false | false | 5,415 | r | library(pcalg)
library(sparsebn)
library(CAM)
#library(CompareCausalNetworks)
library(MASS)
library(methods)
mkdir <- function(fold){
if(!dir.exists(fold)){
dir.create(fold)
}
}
#ccdr
#' @sparsebn is an R package for learning sparse Bayesian networks and other graphical models
#' from high-dimensional data via sparse regularization.
#' @estimate.dag for directed acyclic graphs (Bayesian networks).
#' @estimate.precision for undirected graphs (Markov random fields).
#' @estimate.covariance for covariance matrices.
ccdr <- function(dat){
dat <- sparsebnData(t(dat), type = "continuous")
dags <- estimate.dag(dat)
lambda <- select.parameter(dags,dat)
if(!is.infinite(lambda)){
am <- get.adjacency.matrix(dags[[lambda]])
}else{
am <- matrix(0, 10, 10)
}
am <- as(am, "matrix")
return(am)
}
#GES
ges <- function(dat){
score <- new("GaussL0penObsScore", data = t(dat))
result <- pcalg::ges(score)
gesmat <- as(result$essgraph, "matrix")
gesmat[gesmat] <- 1
return(gesmat)
}
#GIES
gies <- function(dat){
score <- new("GaussL0penObsScore", data = t(dat))
result <- pcalg::gies(score)
gesmat <- as(result$essgraph, "matrix")
gesmat[gesmat] <- 1
return(gesmat)
}
#LiNGAM
lingam_new <- function(dat){
estDAG <- try(lingam(t(dat)), silent = T)
if(typeof(estDAG) == "list"){
return(estDAG$Bpruned)
}else if(typeof(estDAG) == "character"){
return(matrix(0, nrow(dat), nrow(dat)))
}
}
#CAM
#cam <- function(dat){
# estDAG <- try(CAM(t(dat), scoreName = "SEMGAM", numCores = 4, output = FALSE,
# variableSel = TRUE, variableSelMethod = selGamBoost, pruning = TRUE,
# pruneMethod = selGam, pruneMethodPars = list(cutOffPVal = str(0.001))), silent = T)
# if(typeof(estDAG) == "character"){
# return(matrix, nrow(dat), nrow(dat))
# }else if(){
# }
#}
#FCI
fci_new <- function(dat, numCores = 5){
cov.mat <- cov(t(dat))
suffStat <- list(C = cov2cor(cov.mat), n = 10^9)
res <- fci(suffStat, indepTest=gaussCItest, alpha = 0.9999, p = dim(cov.mat)[1], doPdsep = FALSE, numCores = numCores)
return(list(am = res@amat, p.mat = res@pMax))
}
#IDA
ida <- function(dat){
}
decorate <- function(m){
row.names(m) <- paste("sp", seq(0, 9), sep = "")
colnames(m) <- paste("sp", seq(0, 9), sep = "")
return(m)
}
for(i in c("R")){#"hubbell", "soi"
if(i == "R"){
#s = c("100", "200", "400", "800")
s = c("S20")#"S30", , "S20", "S40", "S50", "S60"
}
for(j in s){
setwd(paste("D:\\Fangsa\\causal_compare\\Data\\TS_", i, "_", j, sep = ""))
for(k in seq(100)){
print(k)
aa <- read.table(paste("AA_", as.character(k), ".txt", sep = ""))
ra <- read.table(paste("RA_", as.character(k), ".txt", sep = ""))
#Causal Additive Model
#CCDr
mkdir("CCDr")
am_aa <- ccdr(aa)
am_ra <- ccdr(ra)
am_aa <- decorate(am_aa)
am_ra <- decorate(am_ra)
write.csv(am_aa, paste("CCDr\\AA_adjacent_matrix_",as.character(k),".csv", sep = ""), quote = F)
write.csv(am_ra, paste("CCDr\\RA_adjacent_matrix_",as.character(k),".csv", sep = ""), quote = F)
#GES
if(sum(am_aa) == 0){
print(paste(i, j, as.character(k), sep = "\t"))
}else{
am_aa <- ges(aa)
}
mkdir("GES")
am_ra <- ges(ra)
am_aa <- decorate(am_aa)
am_ra <- decorate(am_ra)
write.csv(am_aa, paste("GES\\AA_adjacent_matrix_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
write.csv(am_ra, paste("GES\\RA_adjacent_matrix_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
#GIES
mkdir("GIES")
if(sum(am_aa) == 0){
print(paste(i, j, as.character(k), sep = "\t"))
}else{
am_aa <- gies(aa)
}
am_ra <- gies(ra)
am_aa <- decorate(am_aa)
am_ra <- decorate(am_ra)
write.csv(am_aa, paste("GiES\\AA_adjacent_matrix_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
write.csv(am_ra, paste("GiES\\RA_adjacent_matrix_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
#LiNGAM
mkdir("LiNGAM")
am_aa <- lingam_new(aa)
am_ra <- lingam_new(ra)
am_aa <- decorate(am_aa)
am_ra <- decorate(am_ra)
write.csv(am_aa, paste("LiNGAM\\AA_adjacent_matrix_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
write.csv(am_ra, paste("LiNGAM\\RA_adjacent_matrix_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
#FCI
mkdir("FCI")
am_aa <- fci_new(aa)
am_ra <- fci_new(ra)
am_aa$am <- decorate(am_aa$am)
am_ra$am <- decorate(am_ra$am)
am_aa$p <- decorate(am_aa$p)
am_ra$p <- decorate(am_ra$p)
write.csv(am_aa$am, paste("FCI\\AA_adjacent_matrix_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
write.csv(am_aa$p.mat, paste("FCI\\AA_p_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
write.csv(am_ra$am, paste("FCI\\RA_adjacent_matrix_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
write.csv(am_ra$p.mat, paste("FCI\\RA_p_",as.character(k),".csv", sep = ""), row.names = T, quote = F)
}
}
}
|
library(MODISSnow)
### Name: download_data
### Title: Download MODIS snow cover data (version 6) from the National
### Snow and Ice Data Center.
### Aliases: download_data get_tile
### ** Examples
## Not run:
##D # Download MODIS snow data for a central europe h = 18 and v = 5 for the 1 of January 2016
##D dat <- download_data(lubridate::ymd("2016-01-01"), h = 18, v = 5)
##D class(dat)
##D raster::plot(dat)
## End(Not run)
| /data/genthat_extracted_code/MODISSnow/examples/MODISSnow.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 437 | r | library(MODISSnow)
### Name: download_data
### Title: Download MODIS snow cover data (version 6) from the National
### Snow and Ice Data Center.
### Aliases: download_data get_tile
### ** Examples
## Not run:
##D # Download MODIS snow data for a central europe h = 18 and v = 5 for the 1 of January 2016
##D dat <- download_data(lubridate::ymd("2016-01-01"), h = 18, v = 5)
##D class(dat)
##D raster::plot(dat)
## End(Not run)
|
library("R.utils")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Example 1
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# First, clean up if called more than once
setHook("myFunction.onEnter", NULL, action="replace")
setHook("myFunction.onExit", NULL, action="replace")
runConference <- function(...) {
callHooks("myFunction.onEnter")
cat("Speaker A: Hello there...\n")
callHooks("myFunction.onExit")
}
setHook("myFunction.onEnter", function(...) {
cat("Chair: Welcome to our conference.\n")
})
setHook("myFunction.onEnter", function(...) {
cat("Chair: Please welcome Speaker A!\n")
})
setHook("myFunction.onExit", function(...) {
cat("Chair: Please thanks Speaker A!\n")
})
runConference()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Example 2
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
setHook("randomNumber", NULL, action="replace")
setHook("randomNumber", rnorm) # By function
setHook("randomNumber", "rexp") # By name
setHook("randomNumber", "runiff") # Non-existing name
setHook("randomNumber", .GlobalEnv) # Not a function
res <- callHooks("randomNumber", n=1, removeCalledHooks=TRUE)
str(res)
cat("Number of hooks: ", length(res), "\n")
isErroneous <- unlist(lapply(res, FUN=function(x) !is.null(x$exception)))
cat("Erroneous hooks: ", sum(isErroneous), "\n")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Exception handling
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
res <- try(callHooks(character(0L)), silent=TRUE)
stopifnot(inherits(res, "try-error"))
res <- try(callHooks(c("a", "b")), silent=TRUE)
stopifnot(inherits(res, "try-error"))
| /tests/callHooks.R | no_license | HenrikBengtsson/R.utils | R | false | false | 1,698 | r | library("R.utils")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Example 1
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# First, clean up if called more than once
setHook("myFunction.onEnter", NULL, action="replace")
setHook("myFunction.onExit", NULL, action="replace")
runConference <- function(...) {
callHooks("myFunction.onEnter")
cat("Speaker A: Hello there...\n")
callHooks("myFunction.onExit")
}
setHook("myFunction.onEnter", function(...) {
cat("Chair: Welcome to our conference.\n")
})
setHook("myFunction.onEnter", function(...) {
cat("Chair: Please welcome Speaker A!\n")
})
setHook("myFunction.onExit", function(...) {
cat("Chair: Please thanks Speaker A!\n")
})
runConference()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Example 2
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
setHook("randomNumber", NULL, action="replace")
setHook("randomNumber", rnorm) # By function
setHook("randomNumber", "rexp") # By name
setHook("randomNumber", "runiff") # Non-existing name
setHook("randomNumber", .GlobalEnv) # Not a function
res <- callHooks("randomNumber", n=1, removeCalledHooks=TRUE)
str(res)
cat("Number of hooks: ", length(res), "\n")
isErroneous <- unlist(lapply(res, FUN=function(x) !is.null(x$exception)))
cat("Erroneous hooks: ", sum(isErroneous), "\n")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Exception handling
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
res <- try(callHooks(character(0L)), silent=TRUE)
stopifnot(inherits(res, "try-error"))
res <- try(callHooks(c("a", "b")), silent=TRUE)
stopifnot(inherits(res, "try-error"))
|
source("download.R")
## Read data file
fileName <- "household_power_consumption.txt"
powerDf <- read.csv(fileName, sep = ";", na.strings="?")
powerDf$DateTime <- strptime(paste(powerDf$Date, powerDf$Time), "%d/%m/%Y %H:%M")
## Filter Data Frame to data between 2/1/2007 and 2/2/2007
startDate <- as.POSIXlt("2007-02-01")
endDate <- as.POSIXlt("2007-02-03")
filteredPowerDf <- powerDf[powerDf$DateTime >= startDate & powerDf$DateTime < endDate,]
## Open the png Graphics device
png(filename = "plot2.png", width=480, height=480)
## Draw the line plot of the Global Active Power by Date and Time
plot(x=filteredPowerDf$DateTime,
y=filteredPowerDf$Global_active_power,
type="l",
xlab="",
ylab="Global Active Power (kilowatts)",
xaxs="r")
dev.off()
| /plot2.R | no_license | cmchenry/ExData_Plotting1 | R | false | false | 781 | r | source("download.R")
## Read data file
fileName <- "household_power_consumption.txt"
powerDf <- read.csv(fileName, sep = ";", na.strings="?")
powerDf$DateTime <- strptime(paste(powerDf$Date, powerDf$Time), "%d/%m/%Y %H:%M")
## Filter Data Frame to data between 2/1/2007 and 2/2/2007
startDate <- as.POSIXlt("2007-02-01")
endDate <- as.POSIXlt("2007-02-03")
filteredPowerDf <- powerDf[powerDf$DateTime >= startDate & powerDf$DateTime < endDate,]
## Open the png Graphics device
png(filename = "plot2.png", width=480, height=480)
## Draw the line plot of the Global Active Power by Date and Time
plot(x=filteredPowerDf$DateTime,
y=filteredPowerDf$Global_active_power,
type="l",
xlab="",
ylab="Global Active Power (kilowatts)",
xaxs="r")
dev.off()
|
library(fisheyeR)
### Name: plotPOI
### Title: Functions for POI plotting
### Aliases: plotPOI plotPOIGraph
### ** Examples
## Not run:
##D
##D ## rgb colors
##D rgbPOI = POICreate(type = 'POI', wordsInQuery = c('red','green','blue'),
##D colores = colors(), itemsCol = colors(),
##D docs = cbind(colors(), 1:length(colors())),
##D cos.query.docs = rep(1,length(colors())),
##D matrizSim = t(col2rgb(colors())) / max(t(col2rgb(colors())))
##D )
##D POIcoords(rgbPOI) <- POICalc(rgbPOI ,length(rgbPOI@wordsInQuery))
##D try(rm('POI.env'), silent = T)
##D plotPOI(rgbPOI)
##D
##D ## graph example
##D # igraph package -- graph.tree example looks great!
##D if (require(igraph)) {
##D GRAPH <- graph.tree(500, children = 10, mode = 'in')
##D fCompress <- 350 # compress factor
##D graphPOI <- POICreate(type = 'POIGraph')
##D graphPOI@objeto <- layout.fruchterman.reingold(GRAPH,dim = 2) / fCompress
##D graphPOI@EDGES <- cbind(GRAPH[[3]],GRAPH[[4]]) + 1
##D graphPOI@docs <- matrix(c(seq(1:nrow(graphPOI@objeto)), seq(1:nrow(graphPOI@objeto))), ncol = 2)
##D try(rm('POI.env'), silent = T)
##D plotPOIGraph(graphPOI)
##D }
##D # manually made -- but igraph example looks great!!
##D graphPOI <- POICreate(type = 'POIGraph')
##D graphPOI@objeto <- graphPOI@objeto <- rbind(c(0,.05), c(.05,0), c(0,-.05), c(-.05,0) ,round(circulo(0,0,.3,PLOT = FALSE),2))
##D graphPOI@EDGES <- matrix(c(rep(1,25), rep(2,25), rep(3,25), rep(4,25), seq(1,100)), ncol = 2)
##D graphPOI@docs <- matrix(c(seq(1:nrow(graphPOI@objeto)), seq(1:nrow(graphPOI@objeto))), ncol = 2)
##D graphPOI@colores <- c(rep(2,25), rep(3,25), rep(4,25), rep(5,25))
##D try(rm('POI.env'), silent = T)
##D plotPOIGraph(graphPOI)
##D
##D ## IRIS Example
##D data(iris)
##D # distance of each element to each dimension max and min
##D matrizSim = cbind(
##D 1 - (max(iris[,1]) - iris[,1]) / (max(max(iris[,1]) - iris[,1])),
##D 1 - (max(iris[,2]) - iris[,2]) / (max(max(iris[,2]) - iris[,2])),
##D 1 - (max(iris[,3]) - iris[,3]) / (max(max(iris[,3]) - iris[,3])),
##D 1 - (max(iris[,4]) - iris[,4]) / (max(max(iris[,4]) - iris[,4])),
##D 1 - (min(iris[,1]) - iris[,1]) / (min(min(iris[,1]) - iris[,1])),
##D 1 - (min(iris[,2]) - iris[,2]) / (min(min(iris[,2]) - iris[,2])),
##D 1 - (min(iris[,3]) - iris[,3]) / (min(min(iris[,3]) - iris[,3])),
##D 1 - (min(iris[,4]) - iris[,4]) / (min(min(iris[,4]) - iris[,4])))
##D
##D matrizSim = matrizSim^3
##D irisPOI = POICreate('POI')
##D irisPOI@matrizSim <- matrizSim
##D irisPOI@wordsInQuery <- c('high.Sepal.Length', 'high.Sepal.Width',
##D 'high.Petal.Length', 'high.Petal.Width',
##D 'low.Sepal.Length', 'low.Sepal.Width',
##D 'low.Petal.Length', 'low.Petal.Width')
##D POIcoords(irisPOI) <- POICalc(irisPOI ,length(irisPOI@wordsInQuery))
##D irisPOI@docs <- cbind(matrix(seq(1:nrow(irisPOI@objeto))),matrix(seq(1:nrow(irisPOI@objeto))))
##D irisPOI@colores <- c(rep(2,50),rep(3,50),rep(4,50))
##D try(rm('POI.env'), silent = T)
##D plotPOI(irisPOI)
##D
##D ## USArrest Example
##D # POIS = (high - low) murder, assault and rape rates
##D # colors = Population
##D data(USArrests)
##D matrizSim = cbind(
##D 1 - (max(USArrests[,1]) - USArrests[,1]) / (max(max(USArrests[,1]) - USArrests[,1])),
##D 1 - (max(USArrests[,2]) - USArrests[,2]) / (max(max(USArrests[,2]) - USArrests[,2])),
##D 1 - (max(USArrests[,4]) - USArrests[,4]) / (max(max(USArrests[,4]) - USArrests[,4])),
##D 1 - (min(USArrests[,1]) - USArrests[,1]) / (min(min(USArrests[,1]) - USArrests[,1])),
##D 1 - (min(USArrests[,2]) - USArrests[,2]) / (min(min(USArrests[,2]) - USArrests[,2])),
##D 1 - (min(USArrests[,4]) - USArrests[,4]) / (min(min(USArrests[,4]) - USArrests[,4])))
##D
##D usaPOI = POICreate('POI')
##D usaPOI@matrizSim <- matrizSim
##D usaPOI@wordsInQuery <- c(paste('High', names(USArrests[,c(1,2,4)])), paste('Low', names(USArrests[,c(1,2,4)])))
##D POIcoords(usaPOI) <- POICalc(usaPOI ,length(usaPOI@wordsInQuery))
##D usaPOI@docs <- cbind(matrix(rownames(USArrests)),matrix(seq(1:nrow(usaPOI@objeto))))
##D usaPOI@cos.query.docs <- USArrests[,3] / max(USArrests[,3])
##D POIcolors(usaPOI)<- query2Cols(usaPOI, 'terrain')
##D try(rm('POI.env'), silent = T)
##D plotPOI(usaPOI)
##D
##D ## clusters EXAMPLE
##D x <- matrix(rnorm(1500, mean = 0, sd = .5), ncol = 5)
##D atipV1 = sample(nrow(x), as.integer(nrow(x)/3)) # outliers in V1
##D atipV2 = sample(nrow(x), as.integer(nrow(x)/3)) # outliers in V2
##D x[atipV1, 1] <- rnorm(100, mean = 2, sd = .5)
##D x[atipV2, 2] <- rnorm(100, mean = 2, sd = .5)
##D cl <- kmeans(x, 3, iter.max = 100 ,nstart = 25)
##D matrizSim = sqrt(round((x - colMeans(x))^2,1 )/nrow(x)) # simmilarity within outliers
##D # OR (uncomment one)
##D # matrizSim = 1 - sqrt(round((x - colMeans(x))^2,1 )/nrow(x)) # simmilarity within mean
##D varPOI = POICreate('POI')
##D varPOI@matrizSim <- matrizSim
##D varPOI@wordsInQuery <- 1:ncol(matrizSim)
##D POIcoords(varPOI) <- POICalc(varPOI ,length(varPOI@wordsInQuery))
##D # if elements labels bother
##D varPOI@docs <- cbind(rep(' ',nrow(varPOI@objeto)),matrix(seq(1:nrow(varPOI@objeto))))
##D varPOI@cos.query.docs <- rep(1,nrow(matrizSim))
##D varPOI@colores <- cl$cluster + 1
##D try(rm('POI.env'), silent = T)
##D plotPOI(varPOI)
##D
## End(Not run)
| /data/genthat_extracted_code/fisheyeR/examples/plotPOI.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 5,843 | r | library(fisheyeR)
### Name: plotPOI
### Title: Functions for POI plotting
### Aliases: plotPOI plotPOIGraph
### ** Examples
## Not run:
##D
##D ## rgb colors
##D rgbPOI = POICreate(type = 'POI', wordsInQuery = c('red','green','blue'),
##D colores = colors(), itemsCol = colors(),
##D docs = cbind(colors(), 1:length(colors())),
##D cos.query.docs = rep(1,length(colors())),
##D matrizSim = t(col2rgb(colors())) / max(t(col2rgb(colors())))
##D )
##D POIcoords(rgbPOI) <- POICalc(rgbPOI ,length(rgbPOI@wordsInQuery))
##D try(rm('POI.env'), silent = T)
##D plotPOI(rgbPOI)
##D
##D ## graph example
##D # igraph package -- graph.tree example looks great!
##D if (require(igraph)) {
##D GRAPH <- graph.tree(500, children = 10, mode = 'in')
##D fCompress <- 350 # compress factor
##D graphPOI <- POICreate(type = 'POIGraph')
##D graphPOI@objeto <- layout.fruchterman.reingold(GRAPH,dim = 2) / fCompress
##D graphPOI@EDGES <- cbind(GRAPH[[3]],GRAPH[[4]]) + 1
##D graphPOI@docs <- matrix(c(seq(1:nrow(graphPOI@objeto)), seq(1:nrow(graphPOI@objeto))), ncol = 2)
##D try(rm('POI.env'), silent = T)
##D plotPOIGraph(graphPOI)
##D }
##D # manually made -- but igraph example looks great!!
##D graphPOI <- POICreate(type = 'POIGraph')
##D graphPOI@objeto <- graphPOI@objeto <- rbind(c(0,.05), c(.05,0), c(0,-.05), c(-.05,0) ,round(circulo(0,0,.3,PLOT = FALSE),2))
##D graphPOI@EDGES <- matrix(c(rep(1,25), rep(2,25), rep(3,25), rep(4,25), seq(1,100)), ncol = 2)
##D graphPOI@docs <- matrix(c(seq(1:nrow(graphPOI@objeto)), seq(1:nrow(graphPOI@objeto))), ncol = 2)
##D graphPOI@colores <- c(rep(2,25), rep(3,25), rep(4,25), rep(5,25))
##D try(rm('POI.env'), silent = T)
##D plotPOIGraph(graphPOI)
##D
##D ## IRIS Example
##D data(iris)
##D # distance of each element to each dimension max and min
##D matrizSim = cbind(
##D 1 - (max(iris[,1]) - iris[,1]) / (max(max(iris[,1]) - iris[,1])),
##D 1 - (max(iris[,2]) - iris[,2]) / (max(max(iris[,2]) - iris[,2])),
##D 1 - (max(iris[,3]) - iris[,3]) / (max(max(iris[,3]) - iris[,3])),
##D 1 - (max(iris[,4]) - iris[,4]) / (max(max(iris[,4]) - iris[,4])),
##D 1 - (min(iris[,1]) - iris[,1]) / (min(min(iris[,1]) - iris[,1])),
##D 1 - (min(iris[,2]) - iris[,2]) / (min(min(iris[,2]) - iris[,2])),
##D 1 - (min(iris[,3]) - iris[,3]) / (min(min(iris[,3]) - iris[,3])),
##D 1 - (min(iris[,4]) - iris[,4]) / (min(min(iris[,4]) - iris[,4])))
##D
##D matrizSim = matrizSim^3
##D irisPOI = POICreate('POI')
##D irisPOI@matrizSim <- matrizSim
##D irisPOI@wordsInQuery <- c('high.Sepal.Length', 'high.Sepal.Width',
##D 'high.Petal.Length', 'high.Petal.Width',
##D 'low.Sepal.Length', 'low.Sepal.Width',
##D 'low.Petal.Length', 'low.Petal.Width')
##D POIcoords(irisPOI) <- POICalc(irisPOI ,length(irisPOI@wordsInQuery))
##D irisPOI@docs <- cbind(matrix(seq(1:nrow(irisPOI@objeto))),matrix(seq(1:nrow(irisPOI@objeto))))
##D irisPOI@colores <- c(rep(2,50),rep(3,50),rep(4,50))
##D try(rm('POI.env'), silent = T)
##D plotPOI(irisPOI)
##D
##D ## USArrest Example
##D # POIS = (high - low) murder, assault and rape rates
##D # colors = Population
##D data(USArrests)
##D matrizSim = cbind(
##D 1 - (max(USArrests[,1]) - USArrests[,1]) / (max(max(USArrests[,1]) - USArrests[,1])),
##D 1 - (max(USArrests[,2]) - USArrests[,2]) / (max(max(USArrests[,2]) - USArrests[,2])),
##D 1 - (max(USArrests[,4]) - USArrests[,4]) / (max(max(USArrests[,4]) - USArrests[,4])),
##D 1 - (min(USArrests[,1]) - USArrests[,1]) / (min(min(USArrests[,1]) - USArrests[,1])),
##D 1 - (min(USArrests[,2]) - USArrests[,2]) / (min(min(USArrests[,2]) - USArrests[,2])),
##D 1 - (min(USArrests[,4]) - USArrests[,4]) / (min(min(USArrests[,4]) - USArrests[,4])))
##D
##D usaPOI = POICreate('POI')
##D usaPOI@matrizSim <- matrizSim
##D usaPOI@wordsInQuery <- c(paste('High', names(USArrests[,c(1,2,4)])), paste('Low', names(USArrests[,c(1,2,4)])))
##D POIcoords(usaPOI) <- POICalc(usaPOI ,length(usaPOI@wordsInQuery))
##D usaPOI@docs <- cbind(matrix(rownames(USArrests)),matrix(seq(1:nrow(usaPOI@objeto))))
##D usaPOI@cos.query.docs <- USArrests[,3] / max(USArrests[,3])
##D POIcolors(usaPOI)<- query2Cols(usaPOI, 'terrain')
##D try(rm('POI.env'), silent = T)
##D plotPOI(usaPOI)
##D
##D ## clusters EXAMPLE
##D x <- matrix(rnorm(1500, mean = 0, sd = .5), ncol = 5)
##D atipV1 = sample(nrow(x), as.integer(nrow(x)/3)) # outliers in V1
##D atipV2 = sample(nrow(x), as.integer(nrow(x)/3)) # outliers in V2
##D x[atipV1, 1] <- rnorm(100, mean = 2, sd = .5)
##D x[atipV2, 2] <- rnorm(100, mean = 2, sd = .5)
##D cl <- kmeans(x, 3, iter.max = 100 ,nstart = 25)
##D matrizSim = sqrt(round((x - colMeans(x))^2,1 )/nrow(x)) # simmilarity within outliers
##D # OR (uncomment one)
##D # matrizSim = 1 - sqrt(round((x - colMeans(x))^2,1 )/nrow(x)) # simmilarity within mean
##D varPOI = POICreate('POI')
##D varPOI@matrizSim <- matrizSim
##D varPOI@wordsInQuery <- 1:ncol(matrizSim)
##D POIcoords(varPOI) <- POICalc(varPOI ,length(varPOI@wordsInQuery))
##D # if elements labels bother
##D varPOI@docs <- cbind(rep(' ',nrow(varPOI@objeto)),matrix(seq(1:nrow(varPOI@objeto))))
##D varPOI@cos.query.docs <- rep(1,nrow(matrizSim))
##D varPOI@colores <- cl$cluster + 1
##D try(rm('POI.env'), silent = T)
##D plotPOI(varPOI)
##D
## End(Not run)
|
plot_colors <- c("#9E0142", "#D53E4F", "#F46D43", "#FDAE61",
"#FEE08B", "#FFFFBF", "#E6F598", "#ABDDA4",
"#66C2A5", "#3288BD", "#5E4FA2")
source("pops_to_files.R")
for (i in 1:10){
png(paste0("TajD_", i, ".png"))
# par(mar=c(5.1, 4.1, 4.1, 8.1), xpd=TRUE)
for (x in 1:length(file_names)){
population <- file_names[x]
df <- read.table(paste0("tajd.", population,".Tajima.D"), header=TRUE)
# print(head(df))
plot_df <- df[df$CHROM == i,]
nan_coords <- which(is.na(plot_df$TajimaD))
plot_df <- plot_df[-nan_coords,]
loess_test <- loess(plot_df$TajimaD ~ plot_df$BIN_START, span=0.1)
smoothed <- predict(loess_test)
# smoothed[smoothed<0] <- 0
if (x==1){
plot(x=plot_df$BIN_START, y=smoothed, type="l", col=plot_colors[x],
xlab="", ylab="", ylim=c(-1,1.5))
} else {
lines(x=plot_df$BIN_START, y=smoothed, type="l", col=plot_colors[x],
xlab="", ylab="", ylim=c(-1,1.5))
}
}
title(main=paste0("Chr",i),
xlab="position (Mb)", ylab="Tajima's D")
abline(h=0)
# legend("topright", inset=c(-0.3,0), legend=legend_names, fill=plot_colors)
dev.off()
}
| /plots/snp_metrics/plotTajD.R | permissive | jlboat/CP-NAM | R | false | false | 1,269 | r |
plot_colors <- c("#9E0142", "#D53E4F", "#F46D43", "#FDAE61",
"#FEE08B", "#FFFFBF", "#E6F598", "#ABDDA4",
"#66C2A5", "#3288BD", "#5E4FA2")
source("pops_to_files.R")
for (i in 1:10){
png(paste0("TajD_", i, ".png"))
# par(mar=c(5.1, 4.1, 4.1, 8.1), xpd=TRUE)
for (x in 1:length(file_names)){
population <- file_names[x]
df <- read.table(paste0("tajd.", population,".Tajima.D"), header=TRUE)
# print(head(df))
plot_df <- df[df$CHROM == i,]
nan_coords <- which(is.na(plot_df$TajimaD))
plot_df <- plot_df[-nan_coords,]
loess_test <- loess(plot_df$TajimaD ~ plot_df$BIN_START, span=0.1)
smoothed <- predict(loess_test)
# smoothed[smoothed<0] <- 0
if (x==1){
plot(x=plot_df$BIN_START, y=smoothed, type="l", col=plot_colors[x],
xlab="", ylab="", ylim=c(-1,1.5))
} else {
lines(x=plot_df$BIN_START, y=smoothed, type="l", col=plot_colors[x],
xlab="", ylab="", ylim=c(-1,1.5))
}
}
title(main=paste0("Chr",i),
xlab="position (Mb)", ylab="Tajima's D")
abline(h=0)
# legend("topright", inset=c(-0.3,0), legend=legend_names, fill=plot_colors)
dev.off()
}
|
canopy.BIC = function(sampchain, projectname, K, numchain, burnin, thin,
pdf = NULL) {
if (is.null(pdf)) {
pdf = TRUE
}
lik.k = rep(NA, length(K))
BIC = rep(NA, length(K))
ki = 1
for (k in K) {
sampchaink = sampchain[[ki]]
temp.tree = sampchaink[[1]][[1]]
s = nrow(temp.tree$VAF)
n = ncol(temp.tree$VAF)
t = ncol(temp.tree$Q)
numchain = length(sampchaink)
# burn-in
samptreenew = sampchaink[[1]][(burnin + 1):length(sampchaink[[1]])]
numpostburn = length(samptreenew)
# thinning
temp <- thin * c(1:(numpostburn/thin))
samptreethin = samptreenew[temp]
length(samptreethin)
for (numi in 2:numchain) {
samptreenew = sampchaink[[numi]][(burnin + 1):
length(sampchaink[[numi]])]
numpostburn = length(samptreenew)
temp <- thin * c(1:(numpostburn/thin))
samptreethin = c(samptreethin, samptreenew[temp])
}
samptreelik = rep(NA, length(samptreethin))
for (treei in 1:length(samptreethin)) {
samptreelik[treei] = samptreethin[[treei]]$likelihood
}
samptreethin = samptreethin[which((rank(-1 * samptreelik,
ties.method = "first")) < (length(samptreethin)/numchain))]
samptreelik = rep(NA, length(samptreethin))
for (treei in 1:length(samptreethin)) {
samptreelik[treei] = samptreethin[[treei]]$likelihood
}
lik.temp = mean(samptreelik)
cat("k =", k, ": mean likelihood", lik.temp, ".\n")
K.data = 2 * (2 * k - 3) + 2 * t + s + (k - 1) * n
N = s * n * 2 + t * n * 4 + s
BIC.temp = 2 * lik.temp - K.data * log(N)
lik.k[ki] = lik.temp
BIC[ki] = BIC.temp
ki = ki + 1
}
if (pdf) {
pdf(file = paste(projectname, "_BIC.pdf", sep = ""), height = 5,
width = 5)
}
plot(K, BIC, xlab = "Number of subclones", ylab = "BIC", type = "b",
xaxt = "n")
axis(1, at = K)
abline(v = K[which.max(BIC)], lty = 2)
title(paste("BIC for model selection", projectname))
if (pdf) {
dev.off()
}
return(BIC)
}
| /Canopy/R/canopy.BIC.R | no_license | ingted/R-Examples | R | false | false | 2,319 | r | canopy.BIC = function(sampchain, projectname, K, numchain, burnin, thin,
pdf = NULL) {
if (is.null(pdf)) {
pdf = TRUE
}
lik.k = rep(NA, length(K))
BIC = rep(NA, length(K))
ki = 1
for (k in K) {
sampchaink = sampchain[[ki]]
temp.tree = sampchaink[[1]][[1]]
s = nrow(temp.tree$VAF)
n = ncol(temp.tree$VAF)
t = ncol(temp.tree$Q)
numchain = length(sampchaink)
# burn-in
samptreenew = sampchaink[[1]][(burnin + 1):length(sampchaink[[1]])]
numpostburn = length(samptreenew)
# thinning
temp <- thin * c(1:(numpostburn/thin))
samptreethin = samptreenew[temp]
length(samptreethin)
for (numi in 2:numchain) {
samptreenew = sampchaink[[numi]][(burnin + 1):
length(sampchaink[[numi]])]
numpostburn = length(samptreenew)
temp <- thin * c(1:(numpostburn/thin))
samptreethin = c(samptreethin, samptreenew[temp])
}
samptreelik = rep(NA, length(samptreethin))
for (treei in 1:length(samptreethin)) {
samptreelik[treei] = samptreethin[[treei]]$likelihood
}
samptreethin = samptreethin[which((rank(-1 * samptreelik,
ties.method = "first")) < (length(samptreethin)/numchain))]
samptreelik = rep(NA, length(samptreethin))
for (treei in 1:length(samptreethin)) {
samptreelik[treei] = samptreethin[[treei]]$likelihood
}
lik.temp = mean(samptreelik)
cat("k =", k, ": mean likelihood", lik.temp, ".\n")
K.data = 2 * (2 * k - 3) + 2 * t + s + (k - 1) * n
N = s * n * 2 + t * n * 4 + s
BIC.temp = 2 * lik.temp - K.data * log(N)
lik.k[ki] = lik.temp
BIC[ki] = BIC.temp
ki = ki + 1
}
if (pdf) {
pdf(file = paste(projectname, "_BIC.pdf", sep = ""), height = 5,
width = 5)
}
plot(K, BIC, xlab = "Number of subclones", ylab = "BIC", type = "b",
xaxt = "n")
axis(1, at = K)
abline(v = K[which.max(BIC)], lty = 2)
title(paste("BIC for model selection", projectname))
if (pdf) {
dev.off()
}
return(BIC)
}
|
###############
#LIBS
###############
library(lme4)
library(reshape)
library(foreign)
library(ggplot2)
library(plyr)
library(data.table)
library(reshape2)
library(Hmisc)
library(mgcv)
library(gdata)
library(car)
library(dplyr)
library(ggmap)
library(broom)
library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#-------------------->> RES TABLE
res <- matrix(nrow=9, ncol=45)
res <- data.frame(res)
colnames(res) <- c(
"year"
,"m1.R2","m1.PE","m1.R2.s","m1.R2.t","m1.PE.s" #full model
,"m1cv.R2","m1cv.I","m1cv.I.se","m1cv.S","m1cv.S.se","m1cv.PE","m1cv.R2.s","m1cv.R2.t","m1cv.PE.s" #mod1 CV
,"m1cv.loc.R2","m1cv.loc.I","m1cv.loc.I.se","m1cv.loc.S","m1cv.loc.S.se","m1cv.loc.PE","m1cv.loc.PE.s","m1cv.loc.R2.s","m1cv.loc.R2.t"#loc m1
,"mod2_R2" #mod2
,"m3.t31","m3.t33" #mod3 tests
,"mod3_pm_mod3","mod3_int"
,"mod3_int_SE","mod3_Slope","mod3_Slope SE","mod3_RMSPE"
,"mod3_spatial","mod3_temporal","mod3_RMSPE_spatial",
"mod3LPM_pm_mod3LPM","mod3LPM_int","mod3LPM_int_SE","mod3LPM_Slope",
"mod3LPM_Slope SE","mod3LPM_RMSPE","mod3LPM_spatial","mod3LPM_temporal","mod3LPM_RMSPE_spatial"
)
res$year <- c(2004:2011);
### import data
m1.2004 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2004.rds")
#bad aod?
#m1.2004<-m1.2004[ aod < 0.3 ]
### subset to aqua and apply alexei cleaning methods
#MaskAdjacency == "000"
#m1.2004<-m1.2004[ UN > 0 & UN < 0.04]
################# clean BAD STN PM25 and check if improved model?
raWDaf <- ddply(m1.2004, c( "stn"),
function(x) {
mod1 <- lm(PM25 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.05]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2004[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2004 <- m1.2004[!(m1.2004$badid %in% bad$badid), ]
ugrid <-m1.2004 %>%
group_by(stn) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
setkey(raWDaf,stn)
setkey(ugrid,stn)
raw2 <- merge(raWDaf, ugrid, by.x = "stn")
write.csv(raw2,"/home/zeltak/ZH_tmp/rawdf2004.csv")
#check aod
m2.2004<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2004.rds")
aodagg <-m2.2004 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE), meanaod=mean(aod,na.rm=TRUE))
write.csv(aodagg,"/home/zeltak/ZH_tmp/aodagg2004.csv")
################# clean BAD STN PM25 and check if improved model?
raWDaf <- ddply(m1.2004, c("stn","m"),
function(x) {
mod1 <- lm(PM25 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.05]
bad[,badid := paste(stn,m,sep="-")]
#################BAD STN
m1.2004[,badid := paste(stn,m,sep="-")]
####Take out bad stations
m1.2004 <- m1.2004[!(m1.2004$badid %in% bad$badid), ]
################# region
raWDaf <- ddply(m1.2004, c("metreg"),
function(x) {
mod1 <- lm(PM25 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.05]
bad[,badid := paste(stn,season,sep="-")]
#################BAD STN
m1.2004[,badid := paste(stn,season,sep="-")]
####Take out bad stations
m1.2004 <- m1.2004[!(m1.2004$badid %in% bad$badid), ]
m1.2004[,elev.s:= scale(elev)]
m1.2004[,tden.s:= scale(tden)]
m1.2004[,pden.s:= scale(pden)]
m1.2004[,dist2A1.s:= scale(dist2A1)]
m1.2004[,dist2water.s:= scale(dist2water)]
m1.2004[,dist2rail.s:= scale(dist2rail)]
m1.2004[,Dist2road.s:= scale(Dist2road)]
m1.2004[,ndvi.s:= scale(ndvi)]
m1.2004[,MeanPbl.s:= scale(MeanPbl)]
m1.2004[,p_ind.s:= scale(p_ind)]
m1.2004[,p_for.s:= scale(p_for)]
m1.2004[,p_farm.s:= scale(p_farm)]
m1.2004[,p_dos.s:= scale(p_dos)]
m1.2004[,p_dev.s:= scale(p_dev)]
m1.2004[,p_os.s:= scale(p_os)]
m1.2004[,tempa.s:= scale(tempa)]
m1.2004[,WDa.s:= scale(WDa)]
m1.2004[,WSa.s:= scale(WSa)]
m1.2004[,RHa.s:= scale(RHa)]
m1.2004[,Raina.s:= scale(Raina)]
m1.2004[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM25~ aod
+tempa.s+WDa.s+WSa.s+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)+(1|stn)) #0.812
#full fit
m1.fit.2004 <- lmer(m1.formula,data=m1.2004,weights=normwt)
m1.2004$pred.m1 <- predict(m1.fit.2004)
print(summary(lm(PM25~pred.m1,data=m1.2004))$r.squared)
#RMSPE
print(rmse(residuals(m1.fit.2004)))
#spatial
###to check
spatial2004<-m1.2004 %>%
group_by(stn) %>%
summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2004.spat<- lm(barpm ~ barpred, data=spatial2004)
res[res$year=="2004", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2004))$r.squared)
res[res$year=="2004", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2004.spat)))
#temporal
tempo2004<-left_join(m1.2004,spatial2004)
tempo2004$delpm <-tempo2004$PM25-tempo2004$barpm
tempo2004$delpred <-tempo2004$pred.m1-tempo2004$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2004)
res[res$year=="2004", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2004))$r.squared)
saveRDS(m1.2004,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2004.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2004)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2004)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2004)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2004)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2004)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2004)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2004)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2004)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2004)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2004)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2004.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2004.cv<-lm(PM25~pred.m1.cv,data=m1.2004.cv)
res[res$year=="2004", 'm1cv.R2'] <- print(summary(lm(PM25~pred.m1.cv,data=m1.2004.cv))$r.squared)
res[res$year=="2004", 'm1cv.I'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.2004.cv))$coef[1,1])
res[res$year=="2004", 'm1cv.I.se'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.2004.cv))$coef[1,2])
res[res$year=="2004", 'm1cv.S'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.2004.cv))$coef[2,1])
res[res$year=="2004", 'm1cv.S.se'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.2004.cv))$coef[2,2])
#RMSPE
res[res$year=="2004", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2004.cv)))
#spatial
spatial2004.cv<-m1.2004.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2004.cv.s <- lm(barpm ~ barpred, data=spatial2004.cv)
res[res$year=="2004", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2004.cv))$r.squared)
res[res$year=="2004", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2004.cv.s)))
#temporal
tempo2004.cv<-left_join(m1.2004.cv,spatial2004.cv)
tempo2004.cv$delpm <-tempo2004.cv$PM25-tempo2004.cv$barpm
tempo2004.cv$delpred <-tempo2004.cv$pred.m1.cv-tempo2004.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2004.cv)
res[res$year=="2004", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2004.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2004.cv,stn)
setkey(luf,stn)
m1.2004.cv.loc <- merge(m1.2004.cv, luf, all.x = T)
m1.2004.cv.loc<-na.omit(m1.2004.cv.loc)
#create residual mp3 variable
m1.2004.cv.loc$res.m1<-m1.2004.cv.loc$PM25-m1.2004.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2004.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2004.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2004.cv.loc$pred.m1.both <- m1.2004.cv.loc$pred.m1.cv + m1.2004.cv.loc$pred.m1.loc
res[res$year=="2004", 'm1cv.loc.R2'] <- print(summary(lm(PM25~pred.m1.both,data=m1.2004.cv.loc))$r.squared)
res[res$year=="2004", 'm1cv.loc.I'] <-print(summary(lm(PM25~pred.m1.both,data=m1.2004.cv.loc))$coef[1,1])
res[res$year=="2004", 'm1cv.loc.I.se'] <-print(summary(lm(PM25~pred.m1.both,data=m1.2004.cv.loc))$coef[1,2])
res[res$year=="2004", 'm1cv.loc.S'] <-print(summary(lm(PM25~pred.m1.both,data=m1.2004.cv.loc))$coef[2,1])
res[res$year=="2004", 'm1cv.loc.S.se'] <-print(summary(lm(PM25~pred.m1.both,data=m1.2004.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2004", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2004.cv)))
#spatial
spatial2004.cv.loc<-m1.2004.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2004.cv.loc.s <- lm(barpm ~ barpred, data=spatial2004.cv.loc)
res[res$year=="2004", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2004.cv.loc))$r.squared)
res[res$year=="2004", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2004.cv.loc.s)))
#temporal
tempo2004.loc.cv<-left_join(m1.2004.cv.loc,spatial2004.cv.loc)
tempo2004.loc.cv$delpm <-tempo2004.loc.cv$PM25-tempo2004.loc.cv$barpm
tempo2004.loc.cv$delpred <-tempo2004.loc.cv$pred.m1.both-tempo2004.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2004.loc.cv)
res[res$year=="2004", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2004.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res2004.m1.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.rds")
saveRDS(m1.2004.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2004.predCV.rds")
###############
#MOD2
###############
m2.2004<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2004.rds")
m2.2004[,elev.s:= scale(elev)]
m2.2004[,tden.s:= scale(tden)]
m2.2004[,pden.s:= scale(pden)]
m2.2004[,dist2A1.s:= scale(dist2A1)]
m2.2004[,dist2water.s:= scale(dist2water)]
m2.2004[,dist2rail.s:= scale(dist2rail)]
m2.2004[,Dist2road.s:= scale(Dist2road)]
m2.2004[,ndvi.s:= scale(ndvi)]
m2.2004[,MeanPbl.s:= scale(MeanPbl)]
m2.2004[,p_ind.s:= scale(p_ind)]
m2.2004[,p_for.s:= scale(p_for)]
m2.2004[,p_farm.s:= scale(p_farm)]
m2.2004[,p_dos.s:= scale(p_dos)]
m2.2004[,p_dev.s:= scale(p_dev)]
m2.2004[,p_os.s:= scale(p_os)]
m2.2004[,tempa.s:= scale(tempa)]
m2.2004[,WDa.s:= scale(WDa)]
m2.2004[,WSa.s:= scale(WSa)]
m2.2004[,RHa.s:= scale(RHa)]
m2.2004[,Raina.s:= scale(Raina)]
m2.2004[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2004[, pred.m2 := predict(object=m1.fit.2004,newdata=m2.2004,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2004$pred.m2)
#delete implossible values
m2.2004 <- m2.2004[pred.m2 > 0.00000000000001 , ]
m2.2004 <- m2.2004[pred.m2 < 500 , ]
saveRDS(m2.2004,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2004.pred2.rds")
#map the predictions
#aggregate by guid
m2_agg <- m2.2004[, list(LTPM.m2 = mean(pred.m2, na.rm = TRUE), lat_aod = lat_aod[1], long_aod = long_aod[1]), by = aodid]
#saveRDS(m2_agg, "/media/NAS/Uni/Projects/P046.Israel_MAIAC/3.Work/2.Gather_data/FN008_model_prep/m2_agg_2004.rds")
#map the predictions
write.csv(m2_agg, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/m2.2004.LTPM.csv")
ggplot(m2_agg, aes(long_aod,lat_aod, color = LTPM.m2)) +
geom_point(size = 3, shape = 15) + xlab("longitude") + ylab("latitude") +
scale_colour_gradientn("long term PM2.5 prediction", colours = rainbow(5)) + theme_bw() + ggtitle("Long term predictions")
ggsave(file="/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM.2004.m2.png")
keep(res, rmse, splitdf, sure=TRUE)
gc()
| /Uni/Projects/code/P046.Israel_MAIAC/archive/testingpm35.r | no_license | zeltak/org | R | false | false | 15,351 | r | ###############
#LIBS
###############
library(lme4)
library(reshape)
library(foreign)
library(ggplot2)
library(plyr)
library(data.table)
library(reshape2)
library(Hmisc)
library(mgcv)
library(gdata)
library(car)
library(dplyr)
library(ggmap)
library(broom)
library(splines)
#sourcing
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/CV_splits.r")
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#-------------------->> RES TABLE
res <- matrix(nrow=9, ncol=45)
res <- data.frame(res)
colnames(res) <- c(
"year"
,"m1.R2","m1.PE","m1.R2.s","m1.R2.t","m1.PE.s" #full model
,"m1cv.R2","m1cv.I","m1cv.I.se","m1cv.S","m1cv.S.se","m1cv.PE","m1cv.R2.s","m1cv.R2.t","m1cv.PE.s" #mod1 CV
,"m1cv.loc.R2","m1cv.loc.I","m1cv.loc.I.se","m1cv.loc.S","m1cv.loc.S.se","m1cv.loc.PE","m1cv.loc.PE.s","m1cv.loc.R2.s","m1cv.loc.R2.t"#loc m1
,"mod2_R2" #mod2
,"m3.t31","m3.t33" #mod3 tests
,"mod3_pm_mod3","mod3_int"
,"mod3_int_SE","mod3_Slope","mod3_Slope SE","mod3_RMSPE"
,"mod3_spatial","mod3_temporal","mod3_RMSPE_spatial",
"mod3LPM_pm_mod3LPM","mod3LPM_int","mod3LPM_int_SE","mod3LPM_Slope",
"mod3LPM_Slope SE","mod3LPM_RMSPE","mod3LPM_spatial","mod3LPM_temporal","mod3LPM_RMSPE_spatial"
)
res$year <- c(2004:2011);
### import data
m1.2004 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2004.rds")
#bad aod?
#m1.2004<-m1.2004[ aod < 0.3 ]
### subset to aqua and apply alexei cleaning methods
#MaskAdjacency == "000"
#m1.2004<-m1.2004[ UN > 0 & UN < 0.04]
################# clean BAD STN PM25 and check if improved model?
raWDaf <- ddply(m1.2004, c( "stn"),
function(x) {
mod1 <- lm(PM25 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.05]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2004[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2004 <- m1.2004[!(m1.2004$badid %in% bad$badid), ]
ugrid <-m1.2004 %>%
group_by(stn) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
setkey(raWDaf,stn)
setkey(ugrid,stn)
raw2 <- merge(raWDaf, ugrid, by.x = "stn")
write.csv(raw2,"/home/zeltak/ZH_tmp/rawdf2004.csv")
#check aod
m2.2004<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2004.rds")
aodagg <-m2.2004 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE), meanaod=mean(aod,na.rm=TRUE))
write.csv(aodagg,"/home/zeltak/ZH_tmp/aodagg2004.csv")
################# clean BAD STN PM25 and check if improved model?
raWDaf <- ddply(m1.2004, c("stn","m"),
function(x) {
mod1 <- lm(PM25 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.05]
bad[,badid := paste(stn,m,sep="-")]
#################BAD STN
m1.2004[,badid := paste(stn,m,sep="-")]
####Take out bad stations
m1.2004 <- m1.2004[!(m1.2004$badid %in% bad$badid), ]
################# region
raWDaf <- ddply(m1.2004, c("metreg"),
function(x) {
mod1 <- lm(PM25 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.05]
bad[,badid := paste(stn,season,sep="-")]
#################BAD STN
m1.2004[,badid := paste(stn,season,sep="-")]
####Take out bad stations
m1.2004 <- m1.2004[!(m1.2004$badid %in% bad$badid), ]
m1.2004[,elev.s:= scale(elev)]
m1.2004[,tden.s:= scale(tden)]
m1.2004[,pden.s:= scale(pden)]
m1.2004[,dist2A1.s:= scale(dist2A1)]
m1.2004[,dist2water.s:= scale(dist2water)]
m1.2004[,dist2rail.s:= scale(dist2rail)]
m1.2004[,Dist2road.s:= scale(Dist2road)]
m1.2004[,ndvi.s:= scale(ndvi)]
m1.2004[,MeanPbl.s:= scale(MeanPbl)]
m1.2004[,p_ind.s:= scale(p_ind)]
m1.2004[,p_for.s:= scale(p_for)]
m1.2004[,p_farm.s:= scale(p_farm)]
m1.2004[,p_dos.s:= scale(p_dos)]
m1.2004[,p_dev.s:= scale(p_dev)]
m1.2004[,p_os.s:= scale(p_os)]
m1.2004[,tempa.s:= scale(tempa)]
m1.2004[,WDa.s:= scale(WDa)]
m1.2004[,WSa.s:= scale(WSa)]
m1.2004[,RHa.s:= scale(RHa)]
m1.2004[,Raina.s:= scale(Raina)]
m1.2004[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM25~ aod
+tempa.s+WDa.s+WSa.s+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s #+p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)+(1|stn)) #0.812
#full fit
m1.fit.2004 <- lmer(m1.formula,data=m1.2004,weights=normwt)
m1.2004$pred.m1 <- predict(m1.fit.2004)
print(summary(lm(PM25~pred.m1,data=m1.2004))$r.squared)
#RMSPE
print(rmse(residuals(m1.fit.2004)))
#spatial
###to check
spatial2004<-m1.2004 %>%
group_by(stn) %>%
summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2004.spat<- lm(barpm ~ barpred, data=spatial2004)
res[res$year=="2004", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2004))$r.squared)
res[res$year=="2004", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2004.spat)))
#temporal
tempo2004<-left_join(m1.2004,spatial2004)
tempo2004$delpm <-tempo2004$PM25-tempo2004$barpm
tempo2004$delpred <-tempo2004$pred.m1-tempo2004$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2004)
res[res$year=="2004", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2004))$r.squared)
saveRDS(m1.2004,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2004.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2004)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2004)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2004)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2004)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2004)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2004)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2004)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2004)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2004)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2004)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2004.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2004.cv<-lm(PM25~pred.m1.cv,data=m1.2004.cv)
res[res$year=="2004", 'm1cv.R2'] <- print(summary(lm(PM25~pred.m1.cv,data=m1.2004.cv))$r.squared)
res[res$year=="2004", 'm1cv.I'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.2004.cv))$coef[1,1])
res[res$year=="2004", 'm1cv.I.se'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.2004.cv))$coef[1,2])
res[res$year=="2004", 'm1cv.S'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.2004.cv))$coef[2,1])
res[res$year=="2004", 'm1cv.S.se'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.2004.cv))$coef[2,2])
#RMSPE
res[res$year=="2004", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2004.cv)))
#spatial
spatial2004.cv<-m1.2004.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2004.cv.s <- lm(barpm ~ barpred, data=spatial2004.cv)
res[res$year=="2004", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2004.cv))$r.squared)
res[res$year=="2004", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2004.cv.s)))
#temporal
tempo2004.cv<-left_join(m1.2004.cv,spatial2004.cv)
tempo2004.cv$delpm <-tempo2004.cv$PM25-tempo2004.cv$barpm
tempo2004.cv$delpred <-tempo2004.cv$pred.m1.cv-tempo2004.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2004.cv)
res[res$year=="2004", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2004.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2004.cv,stn)
setkey(luf,stn)
m1.2004.cv.loc <- merge(m1.2004.cv, luf, all.x = T)
m1.2004.cv.loc<-na.omit(m1.2004.cv.loc)
#create residual mp3 variable
m1.2004.cv.loc$res.m1<-m1.2004.cv.loc$PM25-m1.2004.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2004.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2004.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2004.cv.loc$pred.m1.both <- m1.2004.cv.loc$pred.m1.cv + m1.2004.cv.loc$pred.m1.loc
res[res$year=="2004", 'm1cv.loc.R2'] <- print(summary(lm(PM25~pred.m1.both,data=m1.2004.cv.loc))$r.squared)
res[res$year=="2004", 'm1cv.loc.I'] <-print(summary(lm(PM25~pred.m1.both,data=m1.2004.cv.loc))$coef[1,1])
res[res$year=="2004", 'm1cv.loc.I.se'] <-print(summary(lm(PM25~pred.m1.both,data=m1.2004.cv.loc))$coef[1,2])
res[res$year=="2004", 'm1cv.loc.S'] <-print(summary(lm(PM25~pred.m1.both,data=m1.2004.cv.loc))$coef[2,1])
res[res$year=="2004", 'm1cv.loc.S.se'] <-print(summary(lm(PM25~pred.m1.both,data=m1.2004.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2004", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2004.cv)))
#spatial
spatial2004.cv.loc<-m1.2004.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2004.cv.loc.s <- lm(barpm ~ barpred, data=spatial2004.cv.loc)
res[res$year=="2004", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2004.cv.loc))$r.squared)
res[res$year=="2004", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2004.cv.loc.s)))
#temporal
tempo2004.loc.cv<-left_join(m1.2004.cv.loc,spatial2004.cv.loc)
tempo2004.loc.cv$delpm <-tempo2004.loc.cv$PM25-tempo2004.loc.cv$barpm
tempo2004.loc.cv$delpred <-tempo2004.loc.cv$pred.m1.both-tempo2004.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2004.loc.cv)
res[res$year=="2004", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2004.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res2004.m1.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.rds")
saveRDS(m1.2004.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2004.predCV.rds")
###############
#MOD2
###############
m2.2004<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2004.rds")
m2.2004[,elev.s:= scale(elev)]
m2.2004[,tden.s:= scale(tden)]
m2.2004[,pden.s:= scale(pden)]
m2.2004[,dist2A1.s:= scale(dist2A1)]
m2.2004[,dist2water.s:= scale(dist2water)]
m2.2004[,dist2rail.s:= scale(dist2rail)]
m2.2004[,Dist2road.s:= scale(Dist2road)]
m2.2004[,ndvi.s:= scale(ndvi)]
m2.2004[,MeanPbl.s:= scale(MeanPbl)]
m2.2004[,p_ind.s:= scale(p_ind)]
m2.2004[,p_for.s:= scale(p_for)]
m2.2004[,p_farm.s:= scale(p_farm)]
m2.2004[,p_dos.s:= scale(p_dos)]
m2.2004[,p_dev.s:= scale(p_dev)]
m2.2004[,p_os.s:= scale(p_os)]
m2.2004[,tempa.s:= scale(tempa)]
m2.2004[,WDa.s:= scale(WDa)]
m2.2004[,WSa.s:= scale(WSa)]
m2.2004[,RHa.s:= scale(RHa)]
m2.2004[,Raina.s:= scale(Raina)]
m2.2004[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2004[, pred.m2 := predict(object=m1.fit.2004,newdata=m2.2004,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2004$pred.m2)
#delete implossible values
m2.2004 <- m2.2004[pred.m2 > 0.00000000000001 , ]
m2.2004 <- m2.2004[pred.m2 < 500 , ]
saveRDS(m2.2004,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2004.pred2.rds")
#map the predictions
#aggregate by guid
m2_agg <- m2.2004[, list(LTPM.m2 = mean(pred.m2, na.rm = TRUE), lat_aod = lat_aod[1], long_aod = long_aod[1]), by = aodid]
#saveRDS(m2_agg, "/media/NAS/Uni/Projects/P046.Israel_MAIAC/3.Work/2.Gather_data/FN008_model_prep/m2_agg_2004.rds")
#map the predictions
write.csv(m2_agg, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/m2.2004.LTPM.csv")
ggplot(m2_agg, aes(long_aod,lat_aod, color = LTPM.m2)) +
geom_point(size = 3, shape = 15) + xlab("longitude") + ylab("latitude") +
scale_colour_gradientn("long term PM2.5 prediction", colours = rainbow(5)) + theme_bw() + ggtitle("Long term predictions")
ggsave(file="/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM.2004.m2.png")
keep(res, rmse, splitdf, sure=TRUE)
gc()
|
#'Compute estimate of seasonal mean of Atlantic hurricane activity
#'
#'Compute one of G. Villarini's statistically downscaled measure of mean
#'Atlantic hurricane activity and its variance. The hurricane activity is
#'estimated using seasonal averages of sea surface temperature anomalies over
#'the tropical Atlantic (bounded by 10N-25N and 80W-20W) and the tropics at
#'large (bounded by 30N-30S). The anomalies are for the JJASON season.\cr
#'The estimated seasonal average is either 1) number of hurricanes, 2) number
#'of tropical cyclones with lifetime >=48h or 3) power dissipation index
#'(PDI; in 10^11 m^3 s^{-2}).\cr
#'The statistical models used in this function are described in\cr
#'
#'@param atlano Array of Atlantic sea surface temperature anomalies.
#' Must have the same dimension as tropano.
#'@param tropano Array of tropical sea surface temperature anomalies.
#' Must have the same dimension as atlano.
#'@param hrvar The seasonal average to be estimated. The options are either\cr
#' "HR" (hurricanes) \cr
#' "TC" (tropical cyclones with lifetime >=48h) \cr
#' "PDI" (power dissipation index) \cr
#'
#'@return A list composed of two matrices:\cr
#'\enumerate{
#' \item{
#' A matrix (mean) with the seasonal average values of the desired quantity.\cr
#' }
#' \item{
#' A matrix (var) of the variance of that quantity.\cr
#' }
#'}
#'The dimensions of the two matrices are the same as the dimensions of
#' atlano/tropano.
#'
#'@keywords datagen
#'@references
#'Villarini et al. (2010) Mon Wea Rev, 138, 2681-2705.\cr
#'Villarini et al. (2012) Mon Wea Rev, 140, 44-65.\cr
#'Villarini et al. (2012) J Clim, 25, 625-637.\cr
#'An example of how the function can be used in hurricane forecast studies
#' is given in\cr
#'Caron, L.-P. et al. (2014) Multi-year prediction skill of Atlantic hurricane
#' activity in CMIP5 decadal hindcasts. Climate Dynamics, 42, 2675-2690.
#' doi:10.1007/s00382-013-1773-1.
#'@author History:\cr
#'0.1 - 2015-11 (Louis-Philippe Caron) - Original code
#'@examples
#'# Let AtlAno represents 5 different 5-year forecasts of seasonally averaged
#'# Atlantic sea surface temperature anomalies.
#'AtlAno <- matrix(c(-0.31, -0.36, 0.26, -0.16, -0.16,
#' -0.06, -0.22, -0.31, -0.36, -0.39,
#' 0.20, -0.14, 0.12, 0.22, 0.02,
#' -0.28, 0.26, -0.10, 0.18, 0.33,
#' 0.45, 0.46, 0.04, 0.12, 0.21),
#' nrow = 5, ncol = 5)
#'# Let TropAno represents 5 corresponding 5-year forecasts of seasonally averaged
#'# tropical sea surface temperature anomalies.
#'TropAno <- matrix(c(-0.22, -.13, 0.07, -0.16, -0.15,
#' 0.00, -0.03, -0.22, -0.13, -0.10,
#' 0.07, -0.07, 0.17, 0.10, -0.15,
#' -0.01, 0.08, 0.07, 0.17, 0.13,
#' 0.16, 0.15, -0.09, 0.03, 0.27),
#' nrow = 5, ncol = 5)
#'# The seasonal average of hurricanes for each of the five forecasted years,
#'# for each forecast, would then be given by
#'hr_count <- StatSeasAtlHurr(atlano = AtlAno,
#' tropano = TropAno,
#' hrvar = 'HR')
#'print(hr_count$mean)
#'
#'@export
StatSeasAtlHurr <- function(atlano = NULL, tropano = NULL, hrvar = "HR") {
# Verify that variables are either TC, HR or PDI.
# -----------------------------------------------
if (hrvar != "HR" && hrvar != "TC" && hrvar != "PDI") {
stop("Hurricane variable not recognized.")
}
# Verify that both Atl and Trop SSTA are present.
# -----------------------------------------------
if (is.null(atlano)) {
stop("Atlantic SST missing.")
}
if (is.null(tropano)) {
stop("Tropical SST missing.")
}
# Verify that Atl and Trop SSTA are of the same dimensions.
# ---------------------------------------------------------
if (length(dim(atlano)) != length(dim(tropano))) {
stop("Input arrays are of different dimensions.")
} else {
for (i in 1:length(dim(atlano))) {
if (dim(atlano)[i] != dim(tropano)[i]) {
stop("Input arrays are of different sizes.")
}
}
}
# Get the values of the betas according to the hurricane
# activity measure we specified.
# ------------------------------------------------------
if (hrvar == "HR") {
# beta's are derived from Villarini et al. (2012), Mon Wea
# Rev, 140, 44-65. beta's are for corrected hurricane data +
# ERSST with SBC criteria (table 2)
beta0 <- 1.85
betaAtl <- 1.05
betaTrop <- -1.17
} else if (hrvar == "TC") {
# beta's are from Villarini et al. (2010), Mon Wea Rev, 138,
# 2681-2705. beta's are for corrected TC data (lifetime >=
# 48h) + ERSST (table 5)
beta0 <- 2.1
betaAtl <- 1.02
betaTrop <- -1.05
} else if (hrvar == "PDI") {
# beta's are from Villarini et al. (2012), J Clim, 25,
# 625-637. beta's are from ERSST, with SBC penalty criterion
# (table 1)
beta0 <- 0.76
betaAtl <- 1.94
betaTrop <- -1.78
}
# Create matrix of similar dimension as atlano for beta0.
# -------------------------------------------------------
intercept <- array(beta0, dim(atlano))
# Compute statistical relationship b/w SSTAs and mean
# hurricane activity.
# ---------------------------------------------------
atl <- betaAtl * atlano
trop <- betaTrop * tropano
#
temp <- intercept + atl + trop
#
statval <- list(mean = array(NA, dim(atl)), var = array(NA, dim(atl)))
statval$mean[] <- vapply(X = temp, FUN = exp, numeric(1))
# Compute the variance of the distribution. TC and HR follow
# a Poisson distribution, so the variance is equal to the
# mean. PDI follows a gamma distribution, with sigma =
# -0.57. (variance = sigma^2 * mean^2).
# -----------------------------------------------------------
if (hrvar == "HR" | hrvar == "TC") {
statval$var <- statval$mean
} else {
sigma <- -0.57
statval$var[] <- sigma^2 * vapply(X = statval$mean, FUN = function(x) x^2,
numeric(1))
}
# Output
# ~~~~~~~~
statval
}
| /R/StatSeasAtlHurr.R | no_license | cran/s2dverification | R | false | false | 6,110 | r | #'Compute estimate of seasonal mean of Atlantic hurricane activity
#'
#'Compute one of G. Villarini's statistically downscaled measure of mean
#'Atlantic hurricane activity and its variance. The hurricane activity is
#'estimated using seasonal averages of sea surface temperature anomalies over
#'the tropical Atlantic (bounded by 10N-25N and 80W-20W) and the tropics at
#'large (bounded by 30N-30S). The anomalies are for the JJASON season.\cr
#'The estimated seasonal average is either 1) number of hurricanes, 2) number
#'of tropical cyclones with lifetime >=48h or 3) power dissipation index
#'(PDI; in 10^11 m^3 s^{-2}).\cr
#'The statistical models used in this function are described in\cr
#'
#'@param atlano Array of Atlantic sea surface temperature anomalies.
#' Must have the same dimension as tropano.
#'@param tropano Array of tropical sea surface temperature anomalies.
#' Must have the same dimension as atlano.
#'@param hrvar The seasonal average to be estimated. The options are either\cr
#' "HR" (hurricanes) \cr
#' "TC" (tropical cyclones with lifetime >=48h) \cr
#' "PDI" (power dissipation index) \cr
#'
#'@return A list composed of two matrices:\cr
#'\enumerate{
#' \item{
#' A matrix (mean) with the seasonal average values of the desired quantity.\cr
#' }
#' \item{
#' A matrix (var) of the variance of that quantity.\cr
#' }
#'}
#'The dimensions of the two matrices are the same as the dimensions of
#' atlano/tropano.
#'
#'@keywords datagen
#'@references
#'Villarini et al. (2010) Mon Wea Rev, 138, 2681-2705.\cr
#'Villarini et al. (2012) Mon Wea Rev, 140, 44-65.\cr
#'Villarini et al. (2012) J Clim, 25, 625-637.\cr
#'An example of how the function can be used in hurricane forecast studies
#' is given in\cr
#'Caron, L.-P. et al. (2014) Multi-year prediction skill of Atlantic hurricane
#' activity in CMIP5 decadal hindcasts. Climate Dynamics, 42, 2675-2690.
#' doi:10.1007/s00382-013-1773-1.
#'@author History:\cr
#'0.1 - 2015-11 (Louis-Philippe Caron) - Original code
#'@examples
#'# Let AtlAno represents 5 different 5-year forecasts of seasonally averaged
#'# Atlantic sea surface temperature anomalies.
#'AtlAno <- matrix(c(-0.31, -0.36, 0.26, -0.16, -0.16,
#' -0.06, -0.22, -0.31, -0.36, -0.39,
#' 0.20, -0.14, 0.12, 0.22, 0.02,
#' -0.28, 0.26, -0.10, 0.18, 0.33,
#' 0.45, 0.46, 0.04, 0.12, 0.21),
#' nrow = 5, ncol = 5)
#'# Let TropAno represents 5 corresponding 5-year forecasts of seasonally averaged
#'# tropical sea surface temperature anomalies.
#'TropAno <- matrix(c(-0.22, -.13, 0.07, -0.16, -0.15,
#' 0.00, -0.03, -0.22, -0.13, -0.10,
#' 0.07, -0.07, 0.17, 0.10, -0.15,
#' -0.01, 0.08, 0.07, 0.17, 0.13,
#' 0.16, 0.15, -0.09, 0.03, 0.27),
#' nrow = 5, ncol = 5)
#'# The seasonal average of hurricanes for each of the five forecasted years,
#'# for each forecast, would then be given by
#'hr_count <- StatSeasAtlHurr(atlano = AtlAno,
#' tropano = TropAno,
#' hrvar = 'HR')
#'print(hr_count$mean)
#'
#'@export
StatSeasAtlHurr <- function(atlano = NULL, tropano = NULL, hrvar = "HR") {
# Verify that variables are either TC, HR or PDI.
# -----------------------------------------------
if (hrvar != "HR" && hrvar != "TC" && hrvar != "PDI") {
stop("Hurricane variable not recognized.")
}
# Verify that both Atl and Trop SSTA are present.
# -----------------------------------------------
if (is.null(atlano)) {
stop("Atlantic SST missing.")
}
if (is.null(tropano)) {
stop("Tropical SST missing.")
}
# Verify that Atl and Trop SSTA are of the same dimensions.
# ---------------------------------------------------------
if (length(dim(atlano)) != length(dim(tropano))) {
stop("Input arrays are of different dimensions.")
} else {
for (i in 1:length(dim(atlano))) {
if (dim(atlano)[i] != dim(tropano)[i]) {
stop("Input arrays are of different sizes.")
}
}
}
# Get the values of the betas according to the hurricane
# activity measure we specified.
# ------------------------------------------------------
if (hrvar == "HR") {
# beta's are derived from Villarini et al. (2012), Mon Wea
# Rev, 140, 44-65. beta's are for corrected hurricane data +
# ERSST with SBC criteria (table 2)
beta0 <- 1.85
betaAtl <- 1.05
betaTrop <- -1.17
} else if (hrvar == "TC") {
# beta's are from Villarini et al. (2010), Mon Wea Rev, 138,
# 2681-2705. beta's are for corrected TC data (lifetime >=
# 48h) + ERSST (table 5)
beta0 <- 2.1
betaAtl <- 1.02
betaTrop <- -1.05
} else if (hrvar == "PDI") {
# beta's are from Villarini et al. (2012), J Clim, 25,
# 625-637. beta's are from ERSST, with SBC penalty criterion
# (table 1)
beta0 <- 0.76
betaAtl <- 1.94
betaTrop <- -1.78
}
# Create matrix of similar dimension as atlano for beta0.
# -------------------------------------------------------
intercept <- array(beta0, dim(atlano))
# Compute statistical relationship b/w SSTAs and mean
# hurricane activity.
# ---------------------------------------------------
atl <- betaAtl * atlano
trop <- betaTrop * tropano
#
temp <- intercept + atl + trop
#
statval <- list(mean = array(NA, dim(atl)), var = array(NA, dim(atl)))
statval$mean[] <- vapply(X = temp, FUN = exp, numeric(1))
# Compute the variance of the distribution. TC and HR follow
# a Poisson distribution, so the variance is equal to the
# mean. PDI follows a gamma distribution, with sigma =
# -0.57. (variance = sigma^2 * mean^2).
# -----------------------------------------------------------
if (hrvar == "HR" | hrvar == "TC") {
statval$var <- statval$mean
} else {
sigma <- -0.57
statval$var[] <- sigma^2 * vapply(X = statval$mean, FUN = function(x) x^2,
numeric(1))
}
# Output
# ~~~~~~~~
statval
}
|
#example1
mysort <- function(x){
replicate(5, sort(x))
return(sort(x)[1:10])
}
M = matrix(rnorm(10000000), 100, 100000)
print('sequence run:')
print(system.time(x<-apply(M, 2, mysort)))
#example2
#install.packages("snowfall")
library("snow")
library("snowfall")
library("parallel")
mysort <- function(x)
{
replicate(5, sort(x))
return(sort(x)[1:10])
}
M = matrix(rnorm(10000000), 100, 100000)
cpus= detectCores ();
sfInit(parallel=TRUE, cpus=cpus)
print(sprintf('%s cpus to be used', sfCpus()))
print('parallel time cost:')
print(system.time(x<-sfApply(M, 2, mysort)))
sfStop()
#example3
mysort <- function(x){
replicate(5, sort(x))
message(msg);
return(sort(x)[1:10])
}
M = matrix(rnorm(10000000), 100, 100000)
msg ="Hello parallel";
print('sequence run:')
print(system.time(x<-apply(M, 2, mysort)))
#example4
mysort <- function(x)
{
replicate(5, sort(x))
message(msg);
return(sort(x)[1:10])
}
M = matrix(rnorm(10000000), 100, 100000)
msg ="Hello parallel";
cpus= detectCores ();
sfInit(parallel=TRUE, cpus=cpus)
print(sprintf('%s cpus to be used', sfCpus()))
print('parallel time cost:')
print(system.time(x<-sfApply(M, 2, mysort)))
sfStop()
#example5
mysort <- function(x)
{
replicate(5, sort(x))
message(msg);
return(sort(x)[1:10])
}
M = matrix(rnorm(10000000), 100, 100000)
msg ="Hello parallel";
cpus= detectCores ();
sfInit(parallel=TRUE, cpus=cpus)
sfExport("msg");
print(sprintf('%s cpus to be used', sfCpus()))
print('parallel time cost:')
print(system.time(x<-sfApply(M, 2, mysort)))
sfStop()
#example6
library("taRifx")
mysort <- function(x){
replicate(5, sort(x))
x=c(1,2,3); y=c(2,3,1); fm = data.frame(x,y);
sort.data.frame(fm,formula=~y);
return(sort(x)[1:5])
}
M = matrix(rnorm(100), 10, 10)
print('sequence run:')
print(system.time(x<-apply(M, 2, mysort)))
#example7
library("taRifx")
mysort <- function(x){
replicate(5, sort(x))
x=c(1,2,3); y=c(2,3,1); fm = data.frame(x,y);
sort.data.frame(fm,formula=~y);
return(sort(x)[1:10])
}
M = matrix(rnorm(10000000), 100, 100000)
cpus= detectCores ();
sfInit(parallel=TRUE, cpus=cpus)
print(sprintf('%s cpus to be used', sfCpus()))
print('parallel time cost:')
print(system.time(x<-sfApply(M, 2, mysort)))
sfStop()
#example8
library("taRifx")
mysort <- function(x){
replicate(5, sort(x))
x=c(1,2,3); y=c(2,3,1); fm = data.frame(x,y);
sort.data.frame(fm,formula=~y);
return(sort(x)[1:10])
}
M = matrix(rnorm(10000000), 100, 100000)
cpus= detectCores ();
sfInit(parallel=TRUE, cpus=cpus)
sfLibrary(taRifx);
print(sprintf('%s cpus to be used', sfCpus()))
print('parallel time cost:')
print(system.time(x<-sfApply(M, 2, mysort)))
sfStop() | /Code/parallel/example1_20161102103814.R | no_license | jasonyaopku/BigDataProgramming | R | false | false | 2,657 | r | #example1
mysort <- function(x){
replicate(5, sort(x))
return(sort(x)[1:10])
}
M = matrix(rnorm(10000000), 100, 100000)
print('sequence run:')
print(system.time(x<-apply(M, 2, mysort)))
#example2
#install.packages("snowfall")
library("snow")
library("snowfall")
library("parallel")
mysort <- function(x)
{
replicate(5, sort(x))
return(sort(x)[1:10])
}
M = matrix(rnorm(10000000), 100, 100000)
cpus= detectCores ();
sfInit(parallel=TRUE, cpus=cpus)
print(sprintf('%s cpus to be used', sfCpus()))
print('parallel time cost:')
print(system.time(x<-sfApply(M, 2, mysort)))
sfStop()
#example3
mysort <- function(x){
replicate(5, sort(x))
message(msg);
return(sort(x)[1:10])
}
M = matrix(rnorm(10000000), 100, 100000)
msg ="Hello parallel";
print('sequence run:')
print(system.time(x<-apply(M, 2, mysort)))
#example4
mysort <- function(x)
{
replicate(5, sort(x))
message(msg);
return(sort(x)[1:10])
}
M = matrix(rnorm(10000000), 100, 100000)
msg ="Hello parallel";
cpus= detectCores ();
sfInit(parallel=TRUE, cpus=cpus)
print(sprintf('%s cpus to be used', sfCpus()))
print('parallel time cost:')
print(system.time(x<-sfApply(M, 2, mysort)))
sfStop()
#example5
mysort <- function(x)
{
replicate(5, sort(x))
message(msg);
return(sort(x)[1:10])
}
M = matrix(rnorm(10000000), 100, 100000)
msg ="Hello parallel";
cpus= detectCores ();
sfInit(parallel=TRUE, cpus=cpus)
sfExport("msg");
print(sprintf('%s cpus to be used', sfCpus()))
print('parallel time cost:')
print(system.time(x<-sfApply(M, 2, mysort)))
sfStop()
#example6
library("taRifx")
mysort <- function(x){
replicate(5, sort(x))
x=c(1,2,3); y=c(2,3,1); fm = data.frame(x,y);
sort.data.frame(fm,formula=~y);
return(sort(x)[1:5])
}
M = matrix(rnorm(100), 10, 10)
print('sequence run:')
print(system.time(x<-apply(M, 2, mysort)))
#example7
library("taRifx")
mysort <- function(x){
replicate(5, sort(x))
x=c(1,2,3); y=c(2,3,1); fm = data.frame(x,y);
sort.data.frame(fm,formula=~y);
return(sort(x)[1:10])
}
M = matrix(rnorm(10000000), 100, 100000)
cpus= detectCores ();
sfInit(parallel=TRUE, cpus=cpus)
print(sprintf('%s cpus to be used', sfCpus()))
print('parallel time cost:')
print(system.time(x<-sfApply(M, 2, mysort)))
sfStop()
#example8
library("taRifx")
mysort <- function(x){
replicate(5, sort(x))
x=c(1,2,3); y=c(2,3,1); fm = data.frame(x,y);
sort.data.frame(fm,formula=~y);
return(sort(x)[1:10])
}
M = matrix(rnorm(10000000), 100, 100000)
cpus= detectCores ();
sfInit(parallel=TRUE, cpus=cpus)
sfLibrary(taRifx);
print(sprintf('%s cpus to be used', sfCpus()))
print('parallel time cost:')
print(system.time(x<-sfApply(M, 2, mysort)))
sfStop() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CumSumSuite.R
\name{cumsumAtSamplingLocation}
\alias{cumsumAtSamplingLocation}
\alias{cumsum_at_point}
\title{Cumulative sum at coordinates}
\usage{
cumsumAtSamplingLocation(indivraster, Lat, Lon)
}
\arguments{
\item{indivraster}{RasterLayer representing normalized probability of origin surface}
\item{Lat}{Integer latitude}
\item{Lon}{Integer longitude}
}
\description{
Function estimates cumulative sum of all values in a surface below the value at a specified longitude and latitude.
}
\examples{
# Generate example probability surface.
myiso <- raster::rasterFromXYZ(isoscape)
myiso_sd <- rasterFromXYZ(isoscape_sd)
exampleSurface <- isotopeAssignmentModel(
ID = "A",
isotopeValue = -100,
SD_indv = 5,
precip_raster = myiso,
precip_SD_raster = myiso_sd,
nClusters = FALSE
)
# Calculate odds ratio at specific point.
set.seed(1)
x <- sample( which( !is.na(exampleSurface[]) ), size = 1)
pt <- raster::xyFromCell(exampleSurface, x)
cumsumAtSamplingLocation(indivraster = exampleSurface, Lat = pt[2], Lon = pt[1])
}
\seealso{
\code{\link{makecumsumSurface}}
}
| /man/cumsumAtSamplingLocation.Rd | no_license | cjcampbell/isocat | R | false | true | 1,206 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CumSumSuite.R
\name{cumsumAtSamplingLocation}
\alias{cumsumAtSamplingLocation}
\alias{cumsum_at_point}
\title{Cumulative sum at coordinates}
\usage{
cumsumAtSamplingLocation(indivraster, Lat, Lon)
}
\arguments{
\item{indivraster}{RasterLayer representing normalized probability of origin surface}
\item{Lat}{Integer latitude}
\item{Lon}{Integer longitude}
}
\description{
Function estimates cumulative sum of all values in a surface below the value at a specified longitude and latitude.
}
\examples{
# Generate example probability surface.
myiso <- raster::rasterFromXYZ(isoscape)
myiso_sd <- rasterFromXYZ(isoscape_sd)
exampleSurface <- isotopeAssignmentModel(
ID = "A",
isotopeValue = -100,
SD_indv = 5,
precip_raster = myiso,
precip_SD_raster = myiso_sd,
nClusters = FALSE
)
# Calculate odds ratio at specific point.
set.seed(1)
x <- sample( which( !is.na(exampleSurface[]) ), size = 1)
pt <- raster::xyFromCell(exampleSurface, x)
cumsumAtSamplingLocation(indivraster = exampleSurface, Lat = pt[2], Lon = pt[1])
}
\seealso{
\code{\link{makecumsumSurface}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/poppr.R
\docType{package}
\name{poppr-package}
\alias{poppr-package}
\title{The \pkg{poppr} R package}
\description{
\pkg{Poppr} provides tools for population genetic analysis that
include genotypic diversity measures, genetic distances with bootstrap
support, native organization and handling of population hierarchies, and
clone correction.
To cite \pkg{poppr}, please use \code{citation("poppr")}. When referring to
\pkg{poppr} in your manuscript, please use lower case unless it occurs at the
beginning of a sentence.
}
\details{
This package relies on the \pkg{\link[adegenet:adegenet.package]{adegenet}} package.
It is built around the \linkS4class{genind} and
\linkS4class{genlight} object. Genind objects store genetic
information in a table of allele frequencies while genlight objects store
SNP data efficiently by packing binary allele calls into single bits.
\pkg{Poppr} has extended these object into new objects called
\linkS4class{genclone} and \linkS4class{snpclone},
respectively. These objects are designed for analysis of clonal organisms
as they add the \strong{@mlg} slot for keeping track of multilocus
genotypes and multilocus lineages.
\subsection{Documentation}{ Documentation is available for any function by
typing \code{?function_name} in the R console. Detailed topic explanations
live in the package vignettes:
\tabular{ll}{
\strong{Vignette} \tab \strong{command}\cr
Data import and manipulation \tab \code{vignette("poppr_manual", "poppr")}\cr
Algorithms and Equations \tab \code{vignette("algo", "poppr")}\cr
Multilocus Genotype Analysis \tab \code{vignette("mlg", "poppr")}
}
Essential functions for importing and manipulating data are detailed within
the \emph{Data import and manipulation} vignette, details on algorithms
used in \pkg{poppr} are within the \emph{Algorithms and equations}
vignette, and details for working with multilocus genotypes are in
\emph{Multilocus Genotype Analysis}.
Examples of analyses are available in a primer written by Niklaus J.
Grünwald, Zhian N. Kamvar, and Sydney E. Everhart at
\url{https://grunwaldlab.github.io/Population_Genetics_in_R/}.}
\subsection{Getting help}{ If you have a specific question or issue with
\pkg{poppr}, feel free to contribute to the google group at
\url{https://groups.google.com/d/forum/poppr}. If you find a bug and
are a github user, you can submit bug reports at
\url{https://github.com/grunwaldlab/poppr/issues}. Otherwise, leave a
message on the groups. Personal emails are highly discouraged as they do
not allow others to learn.}
}
\section{Functions in \pkg{poppr}}{
Below are descriptions and links to functions found in \pkg{poppr}. Be
aware that all functions in \pkg{\link[adegenet:adegenet.package]{adegenet}} are also
available. The functions are documented as:
\itemize{
\item \code{function_name()} (data type) - Description
}
Where \sQuote{data type} refers to the type of data that can be used:
\tabular{ll}{
\strong{m} \tab a genclone or genind object \cr
\strong{s} \tab a snpclone or genlight object \cr
\strong{x} \tab a different data type (e.g. a matrix from \code{\link[=mlg.table]{mlg.table()}})
}
}
\section{Data import/export}{
\itemize{
\item \code{\link[=getfile]{getfile()}} (x) - Provides a quick GUI to grab files for import
\item \code{\link[=read.genalex]{read.genalex()}} (x) - Reads GenAlEx formatted csv files to a genind object
\item \code{\link[=genind2genalex]{genind2genalex()}} (m) - Converts genind objects to GenAlEx formatted csv files
\item \code{\link[=genclone2genind]{genclone2genind()}} (m) - Removes the @mlg slot from genclone objects
\item \code{\link[=as.genambig]{as.genambig()}} (m) - Converts genind data to \pkg{polysat}'s \link[polysat:genambig-class]{genambig} data structure.
\item \code{\link[=bootgen2genind]{bootgen2genind()}} (x) - see \code{\link[=aboot]{aboot()}} for details)
}
}
\section{Data Structures}{
Data structures "genclone" (based off of adegenet's \link[adegenet:genind]{genind}) and
"snpclone" (based off of adegenet's \link[adegenet:genlight]{genlight} for large SNP data sets).
Both of these data structures are defined by the presence of an extra
MLG slot representing multilocus genotype assignments, which can be a numeric
vector or a MLG class object.
\itemize{
\item \linkS4class{genclone} - Handles microsatellite, presence/absence, and small SNP data sets
\item \linkS4class{snpclone} - Designed to handle larger binary SNP data sets.
\item \linkS4class{MLG} - An internal class holding a data frame of multilocus genotype
assignments that acts like a vector, allowing the user to easily switch
between different MLG definitions.
\item \linkS4class{bootgen} - An internal class used explicitly for \code{\link[=aboot]{aboot()}} that
inherits the \link[adegenet:virClasses]{gen-class} virtual object. It is
designed to allow for sampling loci with replacement.
\item \linkS4class{bruvomat} - An internal class designed to handle bootstrapping for
Bruvo's distance where blocks of integer loci can be shuffled.
}
}
\section{Data manipulation}{
\itemize{
\item \code{\link[=as.genclone]{as.genclone()}} (m) - Converts genind objects to genclone objects
\item \code{\link[=missingno]{missingno()}} (m) - Handles missing data
\item \code{\link[=clonecorrect]{clonecorrect()}} (m | s) - Clone-censors at a specified population hierarchy
\item \code{\link[=informloci]{informloci()}} (m) - Detects and removes phylogenetically uninformative loci
\item \code{\link[=popsub]{popsub()}} (m | s) - Subsets genind objects by population
\item \code{\link[=shufflepop]{shufflepop()}} (m) - Shuffles genotypes at each locus using four different shuffling algorithms
\item \code{\link[=recode_polyploids]{recode_polyploids()}} (m | x) - Recodes polyploid data sets with missing alleles imported as "0"
\item \code{\link[=make_haplotypes]{make_haplotypes()}} (m | s) - Splits data into pseudo-haplotypes. This is mainly used in AMOVA.
\item \code{\link[=test_replen]{test_replen()}} (m) - Tests for inconsistent repeat lengths in microsatellite data. For use in \code{\link[=bruvo.dist]{bruvo.dist()}} functions.
\item \code{\link[=fix_replen]{fix_replen()}} (m) - Fixes inconsistent repeat lengths. For use in \code{\link[=bruvo.dist]{bruvo.dist()}} functions.
}
}
\section{Genetic distances}{
\itemize{
\item \code{\link[=bruvo.dist]{bruvo.dist()}} (m) - Bruvo's distance (see also: \code{\link[=fix_replen]{fix_replen()}})
\item \code{\link[=diss.dist]{diss.dist()}} (m) - Absolute genetic distance (see \code{\link[=prevosti.dist]{prevosti.dist()}})
\item \code{\link[=nei.dist]{nei.dist()}} (m | x) - Nei's 1978 genetic distance
\item \code{\link[=rogers.dist]{rogers.dist()}} (m | x) - Rogers' euclidean distance
\item \code{\link[=reynolds.dist]{reynolds.dist()}} (m | x) - Reynolds' coancestry distance
\item \code{\link[=edwards.dist]{edwards.dist()}} (m | x) - Edwards' angular distance
\item \code{\link[=prevosti.dist]{prevosti.dist()}} (m | x) - Prevosti's absolute genetic distance
\item \code{\link[=bitwise.dist]{bitwise.dist()}} (s) - Calculates fast pairwise distances for genlight objects.
}
}
\section{Bootstrapping}{
\itemize{
\item \code{\link[=aboot]{aboot()}} (m | s | x) - Creates a bootstrapped dendrogram for any distance measure
\item \code{\link[=bruvo.boot]{bruvo.boot()}} (m) - Produces dendrograms with bootstrap support based on Bruvo's distance
\item \code{\link[=diversity_boot]{diversity_boot()}} (x) - Generates boostrap distributions of diversity statistics for multilocus genotypes
\item \code{\link[=diversity_ci]{diversity_ci()}} (m | s | x) - Generates confidence intervals for multilocus genotype diversity.
\item \code{\link[=resample.ia]{resample.ia()}} (m) - Calculates the index of association over subsets of data.
}
}
\section{Multilocus Genotypes}{
\itemize{
\item \code{\link[=mlg]{mlg()}} (m | s) - Calculates the number of multilocus genotypes
\item \code{\link[=mll]{mll()}} (m | s) - Displays the current multilocus lineages (genotypes) defined.
\item \code{\link[=nmll]{nmll()}} (m | s) - Same as \code{\link[=mlg]{mlg()}}.
\item \code{\link[=mlg.crosspop]{mlg.crosspop()}} (m | s) - Finds all multilocus genotypes that cross populations
\item \code{\link[=mlg.table]{mlg.table()}} (m | s) - Returns a table of populations by multilocus genotypes
\item \code{\link[=mlg.vector]{mlg.vector()}} (m | s) - Returns a vector of a numeric multilocus genotype assignment for each individual
\item \code{\link[=mlg.id]{mlg.id()}} (m | s) - Finds all individuals associated with a single multilocus genotype
\item \code{\link[=mlg.filter]{mlg.filter()}} (m | s) - Collapses MLGs by genetic distance
\item \code{\link[=filter_stats]{filter_stats()}} (m | s) - Calculates mlg.filter for all algorithms and plots
\item \code{\link[=cutoff_predictor]{cutoff_predictor()}} (x) - Predicts cutoff threshold from mlg.filter.
\item \code{\link[=mll.custom]{mll.custom()}} (m | s) - Allows for the custom definition of multilocus lineages
\item \code{\link[=mll.levels]{mll.levels()}} (m | s) - Allows the user to change levels of custom MLLs.
\item \code{\link[=mll.reset]{mll.reset()}} (m | s) - Reset multilocus lineages.
\item \code{\link[=diversity_stats]{diversity_stats()}} (x) - Creates a table of diversity indices for multilocus genotypes.
}
}
\section{Index of Association Analysis}{
Analysis of multilocus linkage disequilibrium.
\itemize{
\item \code{\link[=ia]{ia()}} (m) - Calculates the index of association
\item \code{\link[=pair.ia]{pair.ia()}} (m) - Calculates the index of association for all loci pairs.
\item \code{\link[=win.ia]{win.ia()}} (s) - Index of association windows for genlight objects.
\item \code{\link[=samp.ia]{samp.ia()}} (s) - Index of association on random subsets of loci for genlight objects.
}
}
\section{Population Genetic Analysis}{
\itemize{
\item \code{\link[=poppr.amova]{poppr.amova()}} (m | s) - Analysis of Molecular Variance (as implemented in ade4)
\item \code{\link[=poppr]{poppr()}} (m | x) - Returns a diversity table by population
\item \code{\link[=poppr.all]{poppr.all()}} (m | x) - Returns a diversity table by population for all compatible files specified
\item \code{\link[=private_alleles]{private_alleles()}} (m) - Tabulates the occurrences of alleles that only occur in one population.
\item \code{\link[=locus_table]{locus_table()}} (m) - Creates a table of summary statistics per locus.
\item \code{\link[=rrmlg]{rrmlg()}} (m | x) - Round-robin multilocus genotype estimates.
\item \code{\link[=rraf]{rraf()}} (m) - Round-robin allele frequency estimates.
\item \code{\link[=pgen]{pgen()}} (m) - Probability of genotypes.
\item \code{\link[=psex]{psex()}} (m) - Probability of observing a genotype more than once.
\item \link[=rare_allele_correction]{rare_allele_correction} (m) - rules for correcting rare alleles for round-robin estimates.
\item \code{\link[=incomp]{incomp()}} (m) - Check data for incomparable samples.
}
}
\section{Visualization}{
\itemize{
\item \code{\link[=imsn]{imsn()}} (m | s) - Interactive construction and visualization of minimum spanning networks
\item \code{\link[=plot_poppr_msn]{plot_poppr_msn()}} (m | s | x) - Plots minimum spanning networks produced in poppr with scale bar and legend
\item \code{\link[=greycurve]{greycurve()}} (x) - Helper to determine the appropriate parameters for adjusting the grey level for msn functions
\item \code{\link[=bruvo.msn]{bruvo.msn()}} (m) - Produces minimum spanning networks based off Bruvo's distance colored by population
\item \code{\link[=poppr.msn]{poppr.msn()}} (m | s | x) - Produces a minimum spanning network for any pairwise distance matrix related to the data
\item \code{\link[=info_table]{info_table()}} (m) - Creates a heatmap representing missing data or observed ploidy
\item \code{\link[=genotype_curve]{genotype_curve()}} (m | x) - Creates a series of boxplots to demonstrate how many markers are needed to represent the diversity of your data.
}
}
\section{Datasets}{
\itemize{
\item \code{\link[=Aeut]{Aeut()}} - (AFLP) Oomycete root rot pathogen \emph{Aphanomyces euteiches} (Grünwald and Hoheisel, 2006)
\item \code{\link[=monpop]{monpop()}} - (SSR) Peach brown rot pathogen \emph{Monilinia fructicola} (Everhart and Scherm, 2015)
\item \code{\link[=partial_clone]{partial_clone()}} - (SSR) partially-clonal data simulated via simuPOP (Peng and Amos, 2008)
\item \code{\link[=Pinf]{Pinf()}} - (SSR) Potato late blight pathogen \emph{Phytophthora infestans} (Goss et. al., 2014)
\item \code{\link[=Pram]{Pram()}} - (SSR) Sudden Oak Death pathogen \emph{Phytophthora ramorum} (Kamvar et. al., 2015; Goss et. al., 2009)
}
}
\references{
--------- Papers announcing poppr ---------
Kamvar ZN, Tabima JF, Grünwald NJ. (2014) Poppr: an R package for genetic
analysis of populations with clonal, partially clonal, and/or sexual
reproduction. PeerJ 2:e281 \doi{10.7717/peerj.281}
Kamvar ZN, Brooks JC and Grünwald NJ (2015) Novel R tools for analysis of
genome-wide population genetic data with emphasis on clonality. Front. Genet.
6:208. \doi{10.3389/fgene.2015.00208}
--------- Papers referencing data sets ---------
Grünwald, NJ and Hoheisel, G.A. 2006. Hierarchical Analysis of Diversity,
Selfing, and Genetic Differentiation in Populations of the Oomycete
\emph{Aphanomyces euteiches}. Phytopathology 96:1134-1141 doi:
\doi{10.1094/PHYTO-96-1134}
SE Everhart, H Scherm, (2015) Fine-scale genetic structure of \emph{Monilinia
fructicola} during brown rot epidemics within individual peach tree canopies.
Phytopathology 105:542-549 doi:
\doi{10.1094/PHYTO-03-14-0088-R}
Bo Peng and Christopher Amos (2008) Forward-time simulations of nonrandom
mating populations using simuPOP. \emph{bioinformatics}, 24 (11): 1408-1409.
Goss, Erica M., Javier F. Tabima, David EL Cooke, Silvia Restrepo, William E.
Fry, Gregory A. Forbes, Valerie J. Fieland, Martha Cardenas, and Niklaus J.
Grünwald. (2014) "The Irish potato famine pathogen \emph{Phytophthora
infestans} originated in central Mexico rather than the Andes." Proceedings
of the National Academy of Sciences 111:8791-8796. doi:
\doi{10.1073/pnas.1401884111}
Kamvar, Z. N., Larsen, M. M., Kanaskie, A. M., Hansen, E. M., & Grünwald, N.
J. (2015). Spatial and temporal analysis of populations of the sudden oak
death pathogen in Oregon forests. Phytopathology 105:982-989. doi:
\doi{10.1094/PHYTO-12-14-0350-FI}
Goss, E. M., Larsen, M., Chastagner, G. A., Givens, D. R., and Grünwald, N.
J. 2009. Population genetic analysis infers migration pathways of
\emph{Phytophthora ramorum} in US nurseries. PLoS Pathog. 5:e1000583. doi:
\doi{10.1371/journal.ppat.1000583}
}
\author{
Zhian N. Kamvar, Jonah C. Brooks, Sydney E. Everhart, Javier F.
Tabima, Stacy Krueger-Hadfield, Erik Sotka, Niklaus J. Grünwald
Maintainer: Zhian N. Kamvar
}
| /man/poppr-package.Rd | no_license | grunwaldlab/poppr | R | false | true | 14,904 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/poppr.R
\docType{package}
\name{poppr-package}
\alias{poppr-package}
\title{The \pkg{poppr} R package}
\description{
\pkg{Poppr} provides tools for population genetic analysis that
include genotypic diversity measures, genetic distances with bootstrap
support, native organization and handling of population hierarchies, and
clone correction.
To cite \pkg{poppr}, please use \code{citation("poppr")}. When referring to
\pkg{poppr} in your manuscript, please use lower case unless it occurs at the
beginning of a sentence.
}
\details{
This package relies on the \pkg{\link[adegenet:adegenet.package]{adegenet}} package.
It is built around the \linkS4class{genind} and
\linkS4class{genlight} object. Genind objects store genetic
information in a table of allele frequencies while genlight objects store
SNP data efficiently by packing binary allele calls into single bits.
\pkg{Poppr} has extended these object into new objects called
\linkS4class{genclone} and \linkS4class{snpclone},
respectively. These objects are designed for analysis of clonal organisms
as they add the \strong{@mlg} slot for keeping track of multilocus
genotypes and multilocus lineages.
\subsection{Documentation}{ Documentation is available for any function by
typing \code{?function_name} in the R console. Detailed topic explanations
live in the package vignettes:
\tabular{ll}{
\strong{Vignette} \tab \strong{command}\cr
Data import and manipulation \tab \code{vignette("poppr_manual", "poppr")}\cr
Algorithms and Equations \tab \code{vignette("algo", "poppr")}\cr
Multilocus Genotype Analysis \tab \code{vignette("mlg", "poppr")}
}
Essential functions for importing and manipulating data are detailed within
the \emph{Data import and manipulation} vignette, details on algorithms
used in \pkg{poppr} are within the \emph{Algorithms and equations}
vignette, and details for working with multilocus genotypes are in
\emph{Multilocus Genotype Analysis}.
Examples of analyses are available in a primer written by Niklaus J.
Grünwald, Zhian N. Kamvar, and Sydney E. Everhart at
\url{https://grunwaldlab.github.io/Population_Genetics_in_R/}.}
\subsection{Getting help}{ If you have a specific question or issue with
\pkg{poppr}, feel free to contribute to the google group at
\url{https://groups.google.com/d/forum/poppr}. If you find a bug and
are a github user, you can submit bug reports at
\url{https://github.com/grunwaldlab/poppr/issues}. Otherwise, leave a
message on the groups. Personal emails are highly discouraged as they do
not allow others to learn.}
}
\section{Functions in \pkg{poppr}}{
Below are descriptions and links to functions found in \pkg{poppr}. Be
aware that all functions in \pkg{\link[adegenet:adegenet.package]{adegenet}} are also
available. The functions are documented as:
\itemize{
\item \code{function_name()} (data type) - Description
}
Where \sQuote{data type} refers to the type of data that can be used:
\tabular{ll}{
\strong{m} \tab a genclone or genind object \cr
\strong{s} \tab a snpclone or genlight object \cr
\strong{x} \tab a different data type (e.g. a matrix from \code{\link[=mlg.table]{mlg.table()}})
}
}
\section{Data import/export}{
\itemize{
\item \code{\link[=getfile]{getfile()}} (x) - Provides a quick GUI to grab files for import
\item \code{\link[=read.genalex]{read.genalex()}} (x) - Reads GenAlEx formatted csv files to a genind object
\item \code{\link[=genind2genalex]{genind2genalex()}} (m) - Converts genind objects to GenAlEx formatted csv files
\item \code{\link[=genclone2genind]{genclone2genind()}} (m) - Removes the @mlg slot from genclone objects
\item \code{\link[=as.genambig]{as.genambig()}} (m) - Converts genind data to \pkg{polysat}'s \link[polysat:genambig-class]{genambig} data structure.
\item \code{\link[=bootgen2genind]{bootgen2genind()}} (x) - see \code{\link[=aboot]{aboot()}} for details)
}
}
\section{Data Structures}{
Data structures "genclone" (based off of adegenet's \link[adegenet:genind]{genind}) and
"snpclone" (based off of adegenet's \link[adegenet:genlight]{genlight} for large SNP data sets).
Both of these data structures are defined by the presence of an extra
MLG slot representing multilocus genotype assignments, which can be a numeric
vector or a MLG class object.
\itemize{
\item \linkS4class{genclone} - Handles microsatellite, presence/absence, and small SNP data sets
\item \linkS4class{snpclone} - Designed to handle larger binary SNP data sets.
\item \linkS4class{MLG} - An internal class holding a data frame of multilocus genotype
assignments that acts like a vector, allowing the user to easily switch
between different MLG definitions.
\item \linkS4class{bootgen} - An internal class used explicitly for \code{\link[=aboot]{aboot()}} that
inherits the \link[adegenet:virClasses]{gen-class} virtual object. It is
designed to allow for sampling loci with replacement.
\item \linkS4class{bruvomat} - An internal class designed to handle bootstrapping for
Bruvo's distance where blocks of integer loci can be shuffled.
}
}
\section{Data manipulation}{
\itemize{
\item \code{\link[=as.genclone]{as.genclone()}} (m) - Converts genind objects to genclone objects
\item \code{\link[=missingno]{missingno()}} (m) - Handles missing data
\item \code{\link[=clonecorrect]{clonecorrect()}} (m | s) - Clone-censors at a specified population hierarchy
\item \code{\link[=informloci]{informloci()}} (m) - Detects and removes phylogenetically uninformative loci
\item \code{\link[=popsub]{popsub()}} (m | s) - Subsets genind objects by population
\item \code{\link[=shufflepop]{shufflepop()}} (m) - Shuffles genotypes at each locus using four different shuffling algorithms
\item \code{\link[=recode_polyploids]{recode_polyploids()}} (m | x) - Recodes polyploid data sets with missing alleles imported as "0"
\item \code{\link[=make_haplotypes]{make_haplotypes()}} (m | s) - Splits data into pseudo-haplotypes. This is mainly used in AMOVA.
\item \code{\link[=test_replen]{test_replen()}} (m) - Tests for inconsistent repeat lengths in microsatellite data. For use in \code{\link[=bruvo.dist]{bruvo.dist()}} functions.
\item \code{\link[=fix_replen]{fix_replen()}} (m) - Fixes inconsistent repeat lengths. For use in \code{\link[=bruvo.dist]{bruvo.dist()}} functions.
}
}
\section{Genetic distances}{
\itemize{
\item \code{\link[=bruvo.dist]{bruvo.dist()}} (m) - Bruvo's distance (see also: \code{\link[=fix_replen]{fix_replen()}})
\item \code{\link[=diss.dist]{diss.dist()}} (m) - Absolute genetic distance (see \code{\link[=prevosti.dist]{prevosti.dist()}})
\item \code{\link[=nei.dist]{nei.dist()}} (m | x) - Nei's 1978 genetic distance
\item \code{\link[=rogers.dist]{rogers.dist()}} (m | x) - Rogers' euclidean distance
\item \code{\link[=reynolds.dist]{reynolds.dist()}} (m | x) - Reynolds' coancestry distance
\item \code{\link[=edwards.dist]{edwards.dist()}} (m | x) - Edwards' angular distance
\item \code{\link[=prevosti.dist]{prevosti.dist()}} (m | x) - Prevosti's absolute genetic distance
\item \code{\link[=bitwise.dist]{bitwise.dist()}} (s) - Calculates fast pairwise distances for genlight objects.
}
}
\section{Bootstrapping}{
\itemize{
\item \code{\link[=aboot]{aboot()}} (m | s | x) - Creates a bootstrapped dendrogram for any distance measure
\item \code{\link[=bruvo.boot]{bruvo.boot()}} (m) - Produces dendrograms with bootstrap support based on Bruvo's distance
\item \code{\link[=diversity_boot]{diversity_boot()}} (x) - Generates boostrap distributions of diversity statistics for multilocus genotypes
\item \code{\link[=diversity_ci]{diversity_ci()}} (m | s | x) - Generates confidence intervals for multilocus genotype diversity.
\item \code{\link[=resample.ia]{resample.ia()}} (m) - Calculates the index of association over subsets of data.
}
}
\section{Multilocus Genotypes}{
\itemize{
\item \code{\link[=mlg]{mlg()}} (m | s) - Calculates the number of multilocus genotypes
\item \code{\link[=mll]{mll()}} (m | s) - Displays the current multilocus lineages (genotypes) defined.
\item \code{\link[=nmll]{nmll()}} (m | s) - Same as \code{\link[=mlg]{mlg()}}.
\item \code{\link[=mlg.crosspop]{mlg.crosspop()}} (m | s) - Finds all multilocus genotypes that cross populations
\item \code{\link[=mlg.table]{mlg.table()}} (m | s) - Returns a table of populations by multilocus genotypes
\item \code{\link[=mlg.vector]{mlg.vector()}} (m | s) - Returns a vector of a numeric multilocus genotype assignment for each individual
\item \code{\link[=mlg.id]{mlg.id()}} (m | s) - Finds all individuals associated with a single multilocus genotype
\item \code{\link[=mlg.filter]{mlg.filter()}} (m | s) - Collapses MLGs by genetic distance
\item \code{\link[=filter_stats]{filter_stats()}} (m | s) - Calculates mlg.filter for all algorithms and plots
\item \code{\link[=cutoff_predictor]{cutoff_predictor()}} (x) - Predicts cutoff threshold from mlg.filter.
\item \code{\link[=mll.custom]{mll.custom()}} (m | s) - Allows for the custom definition of multilocus lineages
\item \code{\link[=mll.levels]{mll.levels()}} (m | s) - Allows the user to change levels of custom MLLs.
\item \code{\link[=mll.reset]{mll.reset()}} (m | s) - Reset multilocus lineages.
\item \code{\link[=diversity_stats]{diversity_stats()}} (x) - Creates a table of diversity indices for multilocus genotypes.
}
}
\section{Index of Association Analysis}{
Analysis of multilocus linkage disequilibrium.
\itemize{
\item \code{\link[=ia]{ia()}} (m) - Calculates the index of association
\item \code{\link[=pair.ia]{pair.ia()}} (m) - Calculates the index of association for all loci pairs.
\item \code{\link[=win.ia]{win.ia()}} (s) - Index of association windows for genlight objects.
\item \code{\link[=samp.ia]{samp.ia()}} (s) - Index of association on random subsets of loci for genlight objects.
}
}
\section{Population Genetic Analysis}{
\itemize{
\item \code{\link[=poppr.amova]{poppr.amova()}} (m | s) - Analysis of Molecular Variance (as implemented in ade4)
\item \code{\link[=poppr]{poppr()}} (m | x) - Returns a diversity table by population
\item \code{\link[=poppr.all]{poppr.all()}} (m | x) - Returns a diversity table by population for all compatible files specified
\item \code{\link[=private_alleles]{private_alleles()}} (m) - Tabulates the occurrences of alleles that only occur in one population.
\item \code{\link[=locus_table]{locus_table()}} (m) - Creates a table of summary statistics per locus.
\item \code{\link[=rrmlg]{rrmlg()}} (m | x) - Round-robin multilocus genotype estimates.
\item \code{\link[=rraf]{rraf()}} (m) - Round-robin allele frequency estimates.
\item \code{\link[=pgen]{pgen()}} (m) - Probability of genotypes.
\item \code{\link[=psex]{psex()}} (m) - Probability of observing a genotype more than once.
\item \link[=rare_allele_correction]{rare_allele_correction} (m) - rules for correcting rare alleles for round-robin estimates.
\item \code{\link[=incomp]{incomp()}} (m) - Check data for incomparable samples.
}
}
\section{Visualization}{
\itemize{
\item \code{\link[=imsn]{imsn()}} (m | s) - Interactive construction and visualization of minimum spanning networks
\item \code{\link[=plot_poppr_msn]{plot_poppr_msn()}} (m | s | x) - Plots minimum spanning networks produced in poppr with scale bar and legend
\item \code{\link[=greycurve]{greycurve()}} (x) - Helper to determine the appropriate parameters for adjusting the grey level for msn functions
\item \code{\link[=bruvo.msn]{bruvo.msn()}} (m) - Produces minimum spanning networks based off Bruvo's distance colored by population
\item \code{\link[=poppr.msn]{poppr.msn()}} (m | s | x) - Produces a minimum spanning network for any pairwise distance matrix related to the data
\item \code{\link[=info_table]{info_table()}} (m) - Creates a heatmap representing missing data or observed ploidy
\item \code{\link[=genotype_curve]{genotype_curve()}} (m | x) - Creates a series of boxplots to demonstrate how many markers are needed to represent the diversity of your data.
}
}
\section{Datasets}{
\itemize{
\item \code{\link[=Aeut]{Aeut()}} - (AFLP) Oomycete root rot pathogen \emph{Aphanomyces euteiches} (Grünwald and Hoheisel, 2006)
\item \code{\link[=monpop]{monpop()}} - (SSR) Peach brown rot pathogen \emph{Monilinia fructicola} (Everhart and Scherm, 2015)
\item \code{\link[=partial_clone]{partial_clone()}} - (SSR) partially-clonal data simulated via simuPOP (Peng and Amos, 2008)
\item \code{\link[=Pinf]{Pinf()}} - (SSR) Potato late blight pathogen \emph{Phytophthora infestans} (Goss et. al., 2014)
\item \code{\link[=Pram]{Pram()}} - (SSR) Sudden Oak Death pathogen \emph{Phytophthora ramorum} (Kamvar et. al., 2015; Goss et. al., 2009)
}
}
\references{
--------- Papers announcing poppr ---------
Kamvar ZN, Tabima JF, Grünwald NJ. (2014) Poppr: an R package for genetic
analysis of populations with clonal, partially clonal, and/or sexual
reproduction. PeerJ 2:e281 \doi{10.7717/peerj.281}
Kamvar ZN, Brooks JC and Grünwald NJ (2015) Novel R tools for analysis of
genome-wide population genetic data with emphasis on clonality. Front. Genet.
6:208. \doi{10.3389/fgene.2015.00208}
--------- Papers referencing data sets ---------
Grünwald, NJ and Hoheisel, G.A. 2006. Hierarchical Analysis of Diversity,
Selfing, and Genetic Differentiation in Populations of the Oomycete
\emph{Aphanomyces euteiches}. Phytopathology 96:1134-1141 doi:
\doi{10.1094/PHYTO-96-1134}
SE Everhart, H Scherm, (2015) Fine-scale genetic structure of \emph{Monilinia
fructicola} during brown rot epidemics within individual peach tree canopies.
Phytopathology 105:542-549 doi:
\doi{10.1094/PHYTO-03-14-0088-R}
Bo Peng and Christopher Amos (2008) Forward-time simulations of nonrandom
mating populations using simuPOP. \emph{bioinformatics}, 24 (11): 1408-1409.
Goss, Erica M., Javier F. Tabima, David EL Cooke, Silvia Restrepo, William E.
Fry, Gregory A. Forbes, Valerie J. Fieland, Martha Cardenas, and Niklaus J.
Grünwald. (2014) "The Irish potato famine pathogen \emph{Phytophthora
infestans} originated in central Mexico rather than the Andes." Proceedings
of the National Academy of Sciences 111:8791-8796. doi:
\doi{10.1073/pnas.1401884111}
Kamvar, Z. N., Larsen, M. M., Kanaskie, A. M., Hansen, E. M., & Grünwald, N.
J. (2015). Spatial and temporal analysis of populations of the sudden oak
death pathogen in Oregon forests. Phytopathology 105:982-989. doi:
\doi{10.1094/PHYTO-12-14-0350-FI}
Goss, E. M., Larsen, M., Chastagner, G. A., Givens, D. R., and Grünwald, N.
J. 2009. Population genetic analysis infers migration pathways of
\emph{Phytophthora ramorum} in US nurseries. PLoS Pathog. 5:e1000583. doi:
\doi{10.1371/journal.ppat.1000583}
}
\author{
Zhian N. Kamvar, Jonah C. Brooks, Sydney E. Everhart, Javier F.
Tabima, Stacy Krueger-Hadfield, Erik Sotka, Niklaus J. Grünwald
Maintainer: Zhian N. Kamvar
}
|
\name{Cement}
\alias{Cement}
\encoding{utf-8}
\docType{data}
\title{Cement hardening data}
\description{
Cement hardening data from Woods et al (1932).
}
\usage{
Cement
}
\format{
\code{Cement} is a data frame with 5 variables. \var{x1}-\var{x4} are four predictor
variables expressed as a percentage of weight.
\describe{
\item{y}{calories of heat evolved per gram of cement after 180 days of
hardening}
\item{X1}{calcium aluminate}
\item{X2}{tricalcium silicate}
\item{X3}{tetracalcium alumino ferrite}
\item{X4}{dicalcium silicate.}
}}
\source{
Woods H., Steinour H.H., Starke H.R. (1932) Effect of composition of Portland
cement on heat evolved during hardening. \emph{Industrial & Engineering
Chemistry} 24, 1207--1214.
}
\references{
Burnham, K. P. and Anderson, D. R. 2002 \emph{Model selection and multimodel
inference: a practical information-theoretic approach}. 2nd ed. New York,
Springer-Verlag.
}
\keyword{datasets}
| /man/data-Cement.Rd | no_license | cran/MuMIn | R | false | false | 954 | rd | \name{Cement}
\alias{Cement}
\encoding{utf-8}
\docType{data}
\title{Cement hardening data}
\description{
Cement hardening data from Woods et al (1932).
}
\usage{
Cement
}
\format{
\code{Cement} is a data frame with 5 variables. \var{x1}-\var{x4} are four predictor
variables expressed as a percentage of weight.
\describe{
\item{y}{calories of heat evolved per gram of cement after 180 days of
hardening}
\item{X1}{calcium aluminate}
\item{X2}{tricalcium silicate}
\item{X3}{tetracalcium alumino ferrite}
\item{X4}{dicalcium silicate.}
}}
\source{
Woods H., Steinour H.H., Starke H.R. (1932) Effect of composition of Portland
cement on heat evolved during hardening. \emph{Industrial & Engineering
Chemistry} 24, 1207--1214.
}
\references{
Burnham, K. P. and Anderson, D. R. 2002 \emph{Model selection and multimodel
inference: a practical information-theoretic approach}. 2nd ed. New York,
Springer-Verlag.
}
\keyword{datasets}
|
\name{SecondaryEffort}
\alias{SecondaryEffort}
\alias{AllSecondaryEffort}
\docType{data}
\title{Effort Data of Secondary observer for Gray Whale Southbound Migration Survey Data from 1967-2006}
\description{
Effort data for secondary observer (when more than one observer) during the southbound migration surveys of gray whales from 1987-2006.
These are only effort records with vis <4 and beaufort<=4 except that AllSecondaryEffort uses beaufort<=10 and vis<=10 because this latter
dataframe is used by \code{\link{create.match}} to include all sightings regardless of the condition.}
\usage{
data(SecondaryEffort)
data(AllSecondaryEffort)
}
\format{
A data frame with 1600 records (2124 for AllSecondaryEffort) on the following 15 variables.
\describe{
\item{\code{watch.key}}{yyyy-ddw key field, where yyyy is 4 digit Start.year, dd is days since 1 Dec and w is the watch}
\item{\code{Start.year}}{Numeric 4 digit year at the beginning of the survey}
\item{\code{key}}{Effort period key which links effort period to sightings}
\item{\code{begin}}{Begin time of effort period in decimal days from 1 Dec of survey \code{Start.year} 4 digit year}
\item{\code{end}}{End time of effort period in decimal days from 1 Dec of survey \code{Start.year} 4 digit year}
\item{\code{npods}}{Number of pods (estimate or observed) during the effort period}
\item{\code{nwhales}}{Number of whales (estimate or observed) during the effort period}
\item{\code{effort}}{Length of effort period in decimal days}
\item{\code{vis}}{Visibility code during watch: 1: Ex, 2: Very Good, 3: Good, 4: Fair, 5:Poor, 6: Unusable; for <1987 it is the average value}
\item{\code{beaufort}}{Beaufort code during watch; for <1987 it is the average value}
\item{\code{Observer}}{Observer code or initials; see \code{\link{Observer}} table}
\item{\code{time}}{Mid-point of effort period}
\item{\code{watch}}{watch period number: 1 to 3}
\item{\code{Use}}{True or False to indicate whether effort period is used in the analysis; it is NA where vis or beaufort is NA}
\item{\code{Date}}{Date yyyy-mm-dd character field}
}
}
\details{The gray whale survey data are organized into one to three watches depending on the year. One observer is on effort during the entire watch;
when there are double observers each observer has a watch with a separate Location (N/S). Prior to 1987, effort was only recorded as an entire
watch period but starting in 1987 the watch period was broken into segments with constant environmental data like visibility and wind force (beaufort).
These smaller segments are called effort periods. So for surveys before 1987 there is a single effort period for each watch and for
1987 and after there can be several effort periods per watch. Because the environmental data could vary within a watch period prior to 1987, the
value used here is the average of the vis/beaufort values from the sightings during that watch if there are any sightings. If there are no
sightings during the watch then the average of the 3 vis/beaufort measurements are used (see \code{\link{EarlyEffort}}). There is
no secondary effort prior to 1987 because there was only a single observer for 1967-1985 surveys.
}
\keyword{datasets}
| /ERAnalysis/man/SecondaryEffort.Rd | no_license | jlaake/ERAnalysis | R | false | false | 3,312 | rd | \name{SecondaryEffort}
\alias{SecondaryEffort}
\alias{AllSecondaryEffort}
\docType{data}
\title{Effort Data of Secondary observer for Gray Whale Southbound Migration Survey Data from 1967-2006}
\description{
Effort data for secondary observer (when more than one observer) during the southbound migration surveys of gray whales from 1987-2006.
These are only effort records with vis <4 and beaufort<=4 except that AllSecondaryEffort uses beaufort<=10 and vis<=10 because this latter
dataframe is used by \code{\link{create.match}} to include all sightings regardless of the condition.}
\usage{
data(SecondaryEffort)
data(AllSecondaryEffort)
}
\format{
A data frame with 1600 records (2124 for AllSecondaryEffort) on the following 15 variables.
\describe{
\item{\code{watch.key}}{yyyy-ddw key field, where yyyy is 4 digit Start.year, dd is days since 1 Dec and w is the watch}
\item{\code{Start.year}}{Numeric 4 digit year at the beginning of the survey}
\item{\code{key}}{Effort period key which links effort period to sightings}
\item{\code{begin}}{Begin time of effort period in decimal days from 1 Dec of survey \code{Start.year} 4 digit year}
\item{\code{end}}{End time of effort period in decimal days from 1 Dec of survey \code{Start.year} 4 digit year}
\item{\code{npods}}{Number of pods (estimate or observed) during the effort period}
\item{\code{nwhales}}{Number of whales (estimate or observed) during the effort period}
\item{\code{effort}}{Length of effort period in decimal days}
\item{\code{vis}}{Visibility code during watch: 1: Ex, 2: Very Good, 3: Good, 4: Fair, 5:Poor, 6: Unusable; for <1987 it is the average value}
\item{\code{beaufort}}{Beaufort code during watch; for <1987 it is the average value}
\item{\code{Observer}}{Observer code or initials; see \code{\link{Observer}} table}
\item{\code{time}}{Mid-point of effort period}
\item{\code{watch}}{watch period number: 1 to 3}
\item{\code{Use}}{True or False to indicate whether effort period is used in the analysis; it is NA where vis or beaufort is NA}
\item{\code{Date}}{Date yyyy-mm-dd character field}
}
}
\details{The gray whale survey data are organized into one to three watches depending on the year. One observer is on effort during the entire watch;
when there are double observers each observer has a watch with a separate Location (N/S). Prior to 1987, effort was only recorded as an entire
watch period but starting in 1987 the watch period was broken into segments with constant environmental data like visibility and wind force (beaufort).
These smaller segments are called effort periods. So for surveys before 1987 there is a single effort period for each watch and for
1987 and after there can be several effort periods per watch. Because the environmental data could vary within a watch period prior to 1987, the
value used here is the average of the vis/beaufort values from the sightings during that watch if there are any sightings. If there are no
sightings during the watch then the average of the 3 vis/beaufort measurements are used (see \code{\link{EarlyEffort}}). There is
no secondary effort prior to 1987 because there was only a single observer for 1967-1985 surveys.
}
\keyword{datasets}
|
# Author: Begum Topcuoglu
# Date: 2019-01-14
######################################################################
# Description:
# This function defines defines:
# 1. Tuning budget as a grid the classification methods chosen
# 2. Cross-validation method
# 3. Caret name for the classification method chosen
######################################################################
######################################################################
# Dependencies and Outputs:
# Filename to put to function:
# "Random_Forest"
# Usage:
# Call as source when using the function. The function is:
# tuning_grid()
# Output:
# List of:
# 1. Tuning budget as a grid the classification methods chosen
# 2. Cross-validation method
# 3. Caret name for the classification method chosen
######################################################################
######################################################################
#------------------------- DEFINE FUNCTION -------------------#
######################################################################
tuning_grid <- function(model){
# Cross-validation method
cv <- trainControl(method="repeatedcv",
repeats = 100, # repeat internally and give us meanAUC for each hyper-parameter
number=5, # 5fold cross-validation
returnResamp="final",
classProbs=TRUE,
summaryFunction=twoClassSummary,
indexFinal=NULL,
savePredictions = TRUE)
# Grid and caret method defined for random forest classification model
if(model=="Random_Forest"){
grid <- expand.grid(mtry = c(80,500,1000,1500))
method = "rf"
}
else {
print("Model not available")
}
params <- list(grid, method, cv)
return(params)
}
| /code/learning/model_selection_RF.R | no_license | SchlossLab/rf_pipeline | R | false | false | 1,857 | r | # Author: Begum Topcuoglu
# Date: 2019-01-14
######################################################################
# Description:
# This function defines defines:
# 1. Tuning budget as a grid the classification methods chosen
# 2. Cross-validation method
# 3. Caret name for the classification method chosen
######################################################################
######################################################################
# Dependencies and Outputs:
# Filename to put to function:
# "Random_Forest"
# Usage:
# Call as source when using the function. The function is:
# tuning_grid()
# Output:
# List of:
# 1. Tuning budget as a grid the classification methods chosen
# 2. Cross-validation method
# 3. Caret name for the classification method chosen
######################################################################
######################################################################
#------------------------- DEFINE FUNCTION -------------------#
######################################################################
tuning_grid <- function(model){
# Cross-validation method
cv <- trainControl(method="repeatedcv",
repeats = 100, # repeat internally and give us meanAUC for each hyper-parameter
number=5, # 5fold cross-validation
returnResamp="final",
classProbs=TRUE,
summaryFunction=twoClassSummary,
indexFinal=NULL,
savePredictions = TRUE)
# Grid and caret method defined for random forest classification model
if(model=="Random_Forest"){
grid <- expand.grid(mtry = c(80,500,1000,1500))
method = "rf"
}
else {
print("Model not available")
}
params <- list(grid, method, cv)
return(params)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lgb.prepare_rules2.R
\name{lgb.prepare_rules2}
\alias{lgb.prepare_rules2}
\title{Data preparator for LightGBM datasets with rules (integer)}
\usage{
lgb.prepare_rules2(data, rules = NULL)
}
\arguments{
\item{data}{A data.frame or data.table to prepare.}
\item{rules}{A set of rules from the data preparator, if already used.}
}
\value{
A list with the cleaned dataset (\code{data}) and the rules (\code{rules}). The data must be converted to a matrix format (\code{as.matrix}) for input in lgb.Dataset.
}
\description{
Attempts to prepare a clean dataset to prepare to put in a lgb.Dataset. Factors and characters are converted to numeric (specifically: integer). In addition, keeps rules created so you can convert other datasets using this converter. This is useful if you have a specific need for integer dataset instead of numeric dataset. Note that there are programs which do not support integer-only input. Consider this as a half memory technique which is dangerous, especially for LightGBM.
}
\examples{
\dontrun{
library(lightgbm)
data(iris)
str(iris)
# 'data.frame': 150 obs. of 5 variables:
# $ Sepal.Length: num 5.1 4.9 4.7 4.6 5 5.4 4.6 5 4.4 4.9 ...
# $ Sepal.Width : num 3.5 3 3.2 3.1 3.6 3.9 3.4 3.4 2.9 3.1 ...
# $ Petal.Length: num 1.4 1.4 1.3 1.5 1.4 1.7 1.4 1.5 1.4 1.5 ...
# $ Petal.Width : num 0.2 0.2 0.2 0.2 0.2 0.4 0.3 0.2 0.2 0.1 ...
# $ Species : Factor w/ 3 levels "setosa","versicolor",..: 1 1 1 1 ...
new_iris <- lgb.prepare_rules2(data = iris) # Autoconverter
str(new_iris$data)
# 'data.frame': 150 obs. of 5 variables:
# $ Sepal.Length: num 5.1 4.9 4.7 4.6 5 5.4 4.6 5 4.4 4.9 ...
# $ Sepal.Width : num 3.5 3 3.2 3.1 3.6 3.9 3.4 3.4 2.9 3.1 ...
# $ Petal.Length: num 1.4 1.4 1.3 1.5 1.4 1.7 1.4 1.5 1.4 1.5 ...
# $ Petal.Width : num 0.2 0.2 0.2 0.2 0.2 0.4 0.3 0.2 0.2 0.1 ...
# $ Species : int 1 1 1 1 1 1 1 1 1 1 ...
data(iris) # Erase iris dataset
iris$Species[1] <- "NEW FACTOR" # Introduce junk factor (NA)
# Warning message:
# In `[<-.factor`(`*tmp*`, 1, value = c(NA, 1L, 1L, 1L, 1L, 1L, 1L, :
# invalid factor level, NA generated
# Use conversion using known rules
# Unknown factors become 0, excellent for sparse datasets
newer_iris <- lgb.prepare_rules2(data = iris, rules = new_iris$rules)
# Unknown factor is now zero, perfect for sparse datasets
newer_iris$data[1, ] # Species became 0 as it is an unknown factor
# Sepal.Length Sepal.Width Petal.Length Petal.Width Species
# 1 5.1 3.5 1.4 0.2 0
newer_iris$data[1, 5] <- 1 # Put back real initial value
# Is the newly created dataset equal? YES!
all.equal(new_iris$data, newer_iris$data)
# [1] TRUE
# Can we test our own rules?
data(iris) # Erase iris dataset
# We remapped values differently
personal_rules <- list(Species = c("setosa" = 3L,
"versicolor" = 2L,
"virginica" = 1L))
newest_iris <- lgb.prepare_rules2(data = iris, rules = personal_rules)
str(newest_iris$data) # SUCCESS!
# 'data.frame': 150 obs. of 5 variables:
# $ Sepal.Length: num 5.1 4.9 4.7 4.6 5 5.4 4.6 5 4.4 4.9 ...
# $ Sepal.Width : num 3.5 3 3.2 3.1 3.6 3.9 3.4 3.4 2.9 3.1 ...
# $ Petal.Length: num 1.4 1.4 1.3 1.5 1.4 1.7 1.4 1.5 1.4 1.5 ...
# $ Petal.Width : num 0.2 0.2 0.2 0.2 0.2 0.4 0.3 0.2 0.2 0.1 ...
# $ Species : int 3 3 3 3 3 3 3 3 3 3 ...
}
}
| /R-package/man/lgb.prepare_rules2.Rd | permissive | johnduffynh/LightGBM | R | false | true | 3,458 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lgb.prepare_rules2.R
\name{lgb.prepare_rules2}
\alias{lgb.prepare_rules2}
\title{Data preparator for LightGBM datasets with rules (integer)}
\usage{
lgb.prepare_rules2(data, rules = NULL)
}
\arguments{
\item{data}{A data.frame or data.table to prepare.}
\item{rules}{A set of rules from the data preparator, if already used.}
}
\value{
A list with the cleaned dataset (\code{data}) and the rules (\code{rules}). The data must be converted to a matrix format (\code{as.matrix}) for input in lgb.Dataset.
}
\description{
Attempts to prepare a clean dataset to prepare to put in a lgb.Dataset. Factors and characters are converted to numeric (specifically: integer). In addition, keeps rules created so you can convert other datasets using this converter. This is useful if you have a specific need for integer dataset instead of numeric dataset. Note that there are programs which do not support integer-only input. Consider this as a half memory technique which is dangerous, especially for LightGBM.
}
\examples{
\dontrun{
library(lightgbm)
data(iris)
str(iris)
# 'data.frame': 150 obs. of 5 variables:
# $ Sepal.Length: num 5.1 4.9 4.7 4.6 5 5.4 4.6 5 4.4 4.9 ...
# $ Sepal.Width : num 3.5 3 3.2 3.1 3.6 3.9 3.4 3.4 2.9 3.1 ...
# $ Petal.Length: num 1.4 1.4 1.3 1.5 1.4 1.7 1.4 1.5 1.4 1.5 ...
# $ Petal.Width : num 0.2 0.2 0.2 0.2 0.2 0.4 0.3 0.2 0.2 0.1 ...
# $ Species : Factor w/ 3 levels "setosa","versicolor",..: 1 1 1 1 ...
new_iris <- lgb.prepare_rules2(data = iris) # Autoconverter
str(new_iris$data)
# 'data.frame': 150 obs. of 5 variables:
# $ Sepal.Length: num 5.1 4.9 4.7 4.6 5 5.4 4.6 5 4.4 4.9 ...
# $ Sepal.Width : num 3.5 3 3.2 3.1 3.6 3.9 3.4 3.4 2.9 3.1 ...
# $ Petal.Length: num 1.4 1.4 1.3 1.5 1.4 1.7 1.4 1.5 1.4 1.5 ...
# $ Petal.Width : num 0.2 0.2 0.2 0.2 0.2 0.4 0.3 0.2 0.2 0.1 ...
# $ Species : int 1 1 1 1 1 1 1 1 1 1 ...
data(iris) # Erase iris dataset
iris$Species[1] <- "NEW FACTOR" # Introduce junk factor (NA)
# Warning message:
# In `[<-.factor`(`*tmp*`, 1, value = c(NA, 1L, 1L, 1L, 1L, 1L, 1L, :
# invalid factor level, NA generated
# Use conversion using known rules
# Unknown factors become 0, excellent for sparse datasets
newer_iris <- lgb.prepare_rules2(data = iris, rules = new_iris$rules)
# Unknown factor is now zero, perfect for sparse datasets
newer_iris$data[1, ] # Species became 0 as it is an unknown factor
# Sepal.Length Sepal.Width Petal.Length Petal.Width Species
# 1 5.1 3.5 1.4 0.2 0
newer_iris$data[1, 5] <- 1 # Put back real initial value
# Is the newly created dataset equal? YES!
all.equal(new_iris$data, newer_iris$data)
# [1] TRUE
# Can we test our own rules?
data(iris) # Erase iris dataset
# We remapped values differently
personal_rules <- list(Species = c("setosa" = 3L,
"versicolor" = 2L,
"virginica" = 1L))
newest_iris <- lgb.prepare_rules2(data = iris, rules = personal_rules)
str(newest_iris$data) # SUCCESS!
# 'data.frame': 150 obs. of 5 variables:
# $ Sepal.Length: num 5.1 4.9 4.7 4.6 5 5.4 4.6 5 4.4 4.9 ...
# $ Sepal.Width : num 3.5 3 3.2 3.1 3.6 3.9 3.4 3.4 2.9 3.1 ...
# $ Petal.Length: num 1.4 1.4 1.3 1.5 1.4 1.7 1.4 1.5 1.4 1.5 ...
# $ Petal.Width : num 0.2 0.2 0.2 0.2 0.2 0.4 0.3 0.2 0.2 0.1 ...
# $ Species : int 3 3 3 3 3 3 3 3 3 3 ...
}
}
|
library(imguR)
### Name: get_notifications
### Title: Get notifications
### Aliases: get_notifications
### ** Examples
## Not run:
##D tkn <- imgur_login()
##D get_notifications(only_new = TRUE, token = tkn)
## End(Not run)
| /data/genthat_extracted_code/imguR/examples/get_notifications.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 232 | r | library(imguR)
### Name: get_notifications
### Title: Get notifications
### Aliases: get_notifications
### ** Examples
## Not run:
##D tkn <- imgur_login()
##D get_notifications(only_new = TRUE, token = tkn)
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/line_functions.R
\name{offset_line}
\alias{double_line}
\alias{offset_line}
\title{Create a double line or offset line}
\usage{
offset_line(shp, offset)
double_line(shp, width, sides = "both")
}
\arguments{
\item{shp}{SpatialLines(DataFrame)}
\item{offset}{offset from the original lines}
\item{width}{width between the left and righthand side}
\item{sides}{character value that specifies which sides are selected: \code{"both"}, \code{"left"}, or \code{"right"}. The default is \code{"both"}. For the other two options, see also the shortcut function \code{offset_line}.}
}
\description{
Create a double line or offset line. The double line can be useful for visualizing two-way tracks or emulating objects such as railway tracks. The offset line can be useful to prevent overlapping of spatial lines.
}
\examples{
\dontrun{
if (require(tmap)) {
### Demo to visualise the route of the Amstel Gold Race, a professional cycling race
tmpdir <- tempdir()
tmpfile <- tempfile()
download.file("http://www.gpstracks.nl/routes-fiets/f-limburg-amstel-gold-race-2014.zip",
tmpfile, mode="wb")
unzip(tmpfile, exdir=tmpdir)
# read GPX file
AGR <- read_GPX(file.path(tmpdir, "f-limburg-amstel-gold-race-2014.gpx"))
# read OSM of Zuid-Limburg
Limburg_OSM <- read_osm(AGR$tracks, ext=1.05)
# change route part names
levels(AGR$tracks$name) <- paste(c("First", "Second", "Third", "Final"), "loop")
AGR$tracks_offset2 <- offset_line(AGR$tracks, offset=c(.0005,0,-.0005,-.001))
tm_shape(Limburg_OSM) +
tm_raster(saturation=.25) +
tm_shape(AGR$tracks_offset2) +
tm_lines(col = "name", lwd = 4, title.col="Amstel Gold Race", palette="Dark2") +
tm_shape(AGR$waypoints) +
tm_bubbles(size=.1, col="gold", border.col = "black") +
tm_text("name", size = .75, bg.color="white", bg.alpha=.25, auto.placement = .25) +
tm_legend(position=c("right", "top"), frame=TRUE, bg.color = "gold") +
tm_view(basemaps = "Esri.WorldTopoMap")
# TIP: also run the plot in viewing mode, enabled with tmap_mode("view")
}
}
}
| /man/double_line.Rd | no_license | hieuqtran/tmaptools | R | false | true | 2,158 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/line_functions.R
\name{offset_line}
\alias{double_line}
\alias{offset_line}
\title{Create a double line or offset line}
\usage{
offset_line(shp, offset)
double_line(shp, width, sides = "both")
}
\arguments{
\item{shp}{SpatialLines(DataFrame)}
\item{offset}{offset from the original lines}
\item{width}{width between the left and righthand side}
\item{sides}{character value that specifies which sides are selected: \code{"both"}, \code{"left"}, or \code{"right"}. The default is \code{"both"}. For the other two options, see also the shortcut function \code{offset_line}.}
}
\description{
Create a double line or offset line. The double line can be useful for visualizing two-way tracks or emulating objects such as railway tracks. The offset line can be useful to prevent overlapping of spatial lines.
}
\examples{
\dontrun{
if (require(tmap)) {
### Demo to visualise the route of the Amstel Gold Race, a professional cycling race
tmpdir <- tempdir()
tmpfile <- tempfile()
download.file("http://www.gpstracks.nl/routes-fiets/f-limburg-amstel-gold-race-2014.zip",
tmpfile, mode="wb")
unzip(tmpfile, exdir=tmpdir)
# read GPX file
AGR <- read_GPX(file.path(tmpdir, "f-limburg-amstel-gold-race-2014.gpx"))
# read OSM of Zuid-Limburg
Limburg_OSM <- read_osm(AGR$tracks, ext=1.05)
# change route part names
levels(AGR$tracks$name) <- paste(c("First", "Second", "Third", "Final"), "loop")
AGR$tracks_offset2 <- offset_line(AGR$tracks, offset=c(.0005,0,-.0005,-.001))
tm_shape(Limburg_OSM) +
tm_raster(saturation=.25) +
tm_shape(AGR$tracks_offset2) +
tm_lines(col = "name", lwd = 4, title.col="Amstel Gold Race", palette="Dark2") +
tm_shape(AGR$waypoints) +
tm_bubbles(size=.1, col="gold", border.col = "black") +
tm_text("name", size = .75, bg.color="white", bg.alpha=.25, auto.placement = .25) +
tm_legend(position=c("right", "top"), frame=TRUE, bg.color = "gold") +
tm_view(basemaps = "Esri.WorldTopoMap")
# TIP: also run the plot in viewing mode, enabled with tmap_mode("view")
}
}
}
|
#' Report summary of Call Rate for loci or individuals
#'
#' SNP datasets generated by DArT have missing values primarily arising from failure to call a SNP because of a mutation
#' at one or both of the the restriction enzyme recognition sites. This script reports the number of missing values for each
#' of several percentiles. The script gl.filter.callrate() will filter out the loci with call rates below a specified threshold.
#'
#' @param x -- name of the genlight or genind object containing the SNP data [required]
#' @param method specify the type of report by locus (method="loc") or individual (method="ind") [default method="loc"]
#' @param plot specify if a histogram of call rate is to be produced [default TRUE]
#' @param v -- verbosity: 0, silent or fatal errors; 1, begin and end; 2, progress log ; 3, progress and results summary; 5, full report [default 2]
#' @return Mean call rate by locus (method="loc") or individual (method="ind")
#' @export
#' @author Arthur Georges (Post to \url{https://groups.google.com/d/forum/dartr})
#' @examples
#' gl.report.callrate(testset.gl)
gl.report.callrate <- function(x, method="loc", plot=TRUE, v=2) {
# ERROR CHECKING
if(class(x)!="genlight") {
cat("Fatal Error: genlight object required for gl.report.callrate!\n"); stop()
}
if (method != "ind" & method != "loc") {
cat(" Warning: method must be either \"loc\" or \"ind\", set to \"loc\" \n")
method <- "loc"
}
if (v < 0 | v > 5){
cat(" Warning: verbosity must be an integer between 0 [silent] and 5 [full report], set to 2\n")
v <- 2
}
# FLAG SCRIPT START
if (v >= 1) {
cat("Starting gl.report.callrate\n")
}
if (v >= 3){cat("Note: Missing values most commonly arise from restriction site mutation.\n\n")}
# RECALCULATE THE CALL RATE, BRING IT UP TO DATE IN CASE gl.recalc.metrics HAS NOT BEEN RUN
x <- utils.recalc.callrate(x, v=v)
# FOR METHOD BASED ON LOCUS
if(method == "loc") {
# Plot a histogram of Call Rate
if (plot) {
hist(x@other$loc.metrics$CallRate,
main="Histogram Call Rate by Locus",
xlab="Call Rate",
border="blue",
col="red",
xlim=c(min(x@other$loc.metrics$CallRate),1)
)
}
# Function to determine the loss of loci for a given filter cut-off
s <- function(gl, percentile) {
a <- sum(glNA(x,alleleAsUnit=FALSE)<=((1-percentile)*nInd(x)))
if (percentile == 1) {
cat(paste0(" Loci with no missing values = ",a," [",round((a*100/nLoc(x)),digits=1),"%]\n"))
} else {
cat(paste0(" < ",(1-percentile)*100,"% missing values = ",a," [",round((a*100/nLoc(x)),digits=1),"%]\n"))
}
return(a)
}
# Define vectors to hold the x and y axis values
b <- vector()
c <- vector()
# Generate x and y values
for (i in seq(0,100,by=5)) {
c[i+1] <- s(x,((100-i)/100))
b[i+1] <- i
if (!is.na(c[i+1])) {
if ((round(c[i+1]*100/nLoc(x))) == 100) {break}
}
}
b <- 1-(b[!is.na(b)])/100
c <- c[!is.na(c)]
df <- data.frame(cbind(b,c))
names(df) <- c("Cutoff","SNPs")
}
# FOR METHOD BASED ON INDIVIDUAL
if(method == "ind") {
# Calculate the call rate by individual
ind.call.rate <- 1 - rowSums(is.na(as.matrix(x)))/nLoc(x)
# Plot a histogram of Call Rate
if (plot) {
hist(ind.call.rate,
main="Histogram Call Rate by Individual",
xlab="Call Rate",
border="blue",
col="red",
xlim=c(min(ind.call.rate),1)
)
}
# Function to determine the loss of individuals for a given filter cut-off
s2 <- function(gl, percentile, i=ind.call.rate) {
a <- length(i) - length(i[i<=percentile])
if (percentile == 1) {
cat(paste0("Individuals no missing values = ",a," [",round((a*100/nInd(x)),digits=1),"%] across loci; all individuals would be filtered\n"))
} else {
cat(paste0(" with less than or equal to ",(1-percentile)*100,"% = ",a," [",round((a*100/nInd(x)),digits=1),"%]; ",nInd(x)-a," individuals would be filtered\n"))
}
return(a)
}
# Define vectors to hold the x and y axis values
b <- vector()
c <- vector()
# Generate x and y values
for (i in seq(0,100,by=5)) {
c[i+1] <- s2(x,percentile=((100-i)/100),i=ind.call.rate)
b[i+1] <- i
if (!is.na(c[i+1])) {
if ((round(c[i+1]*100/nInd(x))) == 100) {break}
}
}
b <- 1-(b[!is.na(b)])/100
c <- nInd(x) - c[!is.na(c)]
df <- data.frame(cbind(b,c))
names(df) <- c("Cutoff","SNPs")
}
if (v >= 1) {
cat("gl.report.callrate Completed\n")
}
a <- NULL
return()
}
| /R/gl.report.callrate.r | no_license | hakancengiz1/dartR | R | false | false | 4,775 | r | #' Report summary of Call Rate for loci or individuals
#'
#' SNP datasets generated by DArT have missing values primarily arising from failure to call a SNP because of a mutation
#' at one or both of the the restriction enzyme recognition sites. This script reports the number of missing values for each
#' of several percentiles. The script gl.filter.callrate() will filter out the loci with call rates below a specified threshold.
#'
#' @param x -- name of the genlight or genind object containing the SNP data [required]
#' @param method specify the type of report by locus (method="loc") or individual (method="ind") [default method="loc"]
#' @param plot specify if a histogram of call rate is to be produced [default TRUE]
#' @param v -- verbosity: 0, silent or fatal errors; 1, begin and end; 2, progress log ; 3, progress and results summary; 5, full report [default 2]
#' @return Mean call rate by locus (method="loc") or individual (method="ind")
#' @export
#' @author Arthur Georges (Post to \url{https://groups.google.com/d/forum/dartr})
#' @examples
#' gl.report.callrate(testset.gl)
gl.report.callrate <- function(x, method="loc", plot=TRUE, v=2) {
# ERROR CHECKING
if(class(x)!="genlight") {
cat("Fatal Error: genlight object required for gl.report.callrate!\n"); stop()
}
if (method != "ind" & method != "loc") {
cat(" Warning: method must be either \"loc\" or \"ind\", set to \"loc\" \n")
method <- "loc"
}
if (v < 0 | v > 5){
cat(" Warning: verbosity must be an integer between 0 [silent] and 5 [full report], set to 2\n")
v <- 2
}
# FLAG SCRIPT START
if (v >= 1) {
cat("Starting gl.report.callrate\n")
}
if (v >= 3){cat("Note: Missing values most commonly arise from restriction site mutation.\n\n")}
# RECALCULATE THE CALL RATE, BRING IT UP TO DATE IN CASE gl.recalc.metrics HAS NOT BEEN RUN
x <- utils.recalc.callrate(x, v=v)
# FOR METHOD BASED ON LOCUS
if(method == "loc") {
# Plot a histogram of Call Rate
if (plot) {
hist(x@other$loc.metrics$CallRate,
main="Histogram Call Rate by Locus",
xlab="Call Rate",
border="blue",
col="red",
xlim=c(min(x@other$loc.metrics$CallRate),1)
)
}
# Function to determine the loss of loci for a given filter cut-off
s <- function(gl, percentile) {
a <- sum(glNA(x,alleleAsUnit=FALSE)<=((1-percentile)*nInd(x)))
if (percentile == 1) {
cat(paste0(" Loci with no missing values = ",a," [",round((a*100/nLoc(x)),digits=1),"%]\n"))
} else {
cat(paste0(" < ",(1-percentile)*100,"% missing values = ",a," [",round((a*100/nLoc(x)),digits=1),"%]\n"))
}
return(a)
}
# Define vectors to hold the x and y axis values
b <- vector()
c <- vector()
# Generate x and y values
for (i in seq(0,100,by=5)) {
c[i+1] <- s(x,((100-i)/100))
b[i+1] <- i
if (!is.na(c[i+1])) {
if ((round(c[i+1]*100/nLoc(x))) == 100) {break}
}
}
b <- 1-(b[!is.na(b)])/100
c <- c[!is.na(c)]
df <- data.frame(cbind(b,c))
names(df) <- c("Cutoff","SNPs")
}
# FOR METHOD BASED ON INDIVIDUAL
if(method == "ind") {
# Calculate the call rate by individual
ind.call.rate <- 1 - rowSums(is.na(as.matrix(x)))/nLoc(x)
# Plot a histogram of Call Rate
if (plot) {
hist(ind.call.rate,
main="Histogram Call Rate by Individual",
xlab="Call Rate",
border="blue",
col="red",
xlim=c(min(ind.call.rate),1)
)
}
# Function to determine the loss of individuals for a given filter cut-off
s2 <- function(gl, percentile, i=ind.call.rate) {
a <- length(i) - length(i[i<=percentile])
if (percentile == 1) {
cat(paste0("Individuals no missing values = ",a," [",round((a*100/nInd(x)),digits=1),"%] across loci; all individuals would be filtered\n"))
} else {
cat(paste0(" with less than or equal to ",(1-percentile)*100,"% = ",a," [",round((a*100/nInd(x)),digits=1),"%]; ",nInd(x)-a," individuals would be filtered\n"))
}
return(a)
}
# Define vectors to hold the x and y axis values
b <- vector()
c <- vector()
# Generate x and y values
for (i in seq(0,100,by=5)) {
c[i+1] <- s2(x,percentile=((100-i)/100),i=ind.call.rate)
b[i+1] <- i
if (!is.na(c[i+1])) {
if ((round(c[i+1]*100/nInd(x))) == 100) {break}
}
}
b <- 1-(b[!is.na(b)])/100
c <- nInd(x) - c[!is.na(c)]
df <- data.frame(cbind(b,c))
names(df) <- c("Cutoff","SNPs")
}
if (v >= 1) {
cat("gl.report.callrate Completed\n")
}
a <- NULL
return()
}
|
#Program simulates rolling of dice to estimate
#probabilities of certain outcomes
#matrix x holds results of 40 rolls
x = matrix(sample(1:6, 4*10, replace=TRUE), nrow=4, ncol=10)
#1000 roll simulation
y = sample(1:6, 1000, replace=TRUE)
#returns num of rolls that were 6
sum(y == 6)
#divide by num of rolls to find ratio(should be about 1/6)
sum(x == 6)/1000
#i is a 4x10 matrix holding 40 simulated rolls.
#Each column is a trial of 4 rolls.
#j is matrix of booleans saying whether a 6 was rolled or not.
#The colSums counts num of 6's in each trial
#The sum(z > 0) shows how many trials had a 6
#The mean reveals how often a 6 was rolled in a trial of 4 rolls
i = matrix(sample(1:6, 4*10, replace=TRUE), nrow=4, ncol=10)
j = (i == 6)
z = colSums(j)
sum(z > 0)
mean(z > 0)
#The same experiment is repeated with 1000 trials
i = matrix(sample(1:6, 4*1000, replace=TRUE), nrow=4, ncol=10)
j = (i == 6)
z = colSums(j)
sum(z > 0)
mean(z > 0) #mean should equal about 0.518
#Following shows estimate of probability of summing 7
#when rolling two dice
k = matrix(sample(1:6, 2*1000, replace=TRUE), nrow=2, ncol=1000)
n = colSums(k)
mean(n == 7)
#The following shows estimate of probaility of
#rolling two 6's
a = matrix(sample(1:6, 2*1000, replace=TRUE), nrow=2, ncol=1000)
b = colSums(a)
mean(b == 12)
| /dice_simulation.R | no_license | Kevin-Howlett/RStudio-Misc | R | false | false | 1,309 | r | #Program simulates rolling of dice to estimate
#probabilities of certain outcomes
#matrix x holds results of 40 rolls
x = matrix(sample(1:6, 4*10, replace=TRUE), nrow=4, ncol=10)
#1000 roll simulation
y = sample(1:6, 1000, replace=TRUE)
#returns num of rolls that were 6
sum(y == 6)
#divide by num of rolls to find ratio(should be about 1/6)
sum(x == 6)/1000
#i is a 4x10 matrix holding 40 simulated rolls.
#Each column is a trial of 4 rolls.
#j is matrix of booleans saying whether a 6 was rolled or not.
#The colSums counts num of 6's in each trial
#The sum(z > 0) shows how many trials had a 6
#The mean reveals how often a 6 was rolled in a trial of 4 rolls
i = matrix(sample(1:6, 4*10, replace=TRUE), nrow=4, ncol=10)
j = (i == 6)
z = colSums(j)
sum(z > 0)
mean(z > 0)
#The same experiment is repeated with 1000 trials
i = matrix(sample(1:6, 4*1000, replace=TRUE), nrow=4, ncol=10)
j = (i == 6)
z = colSums(j)
sum(z > 0)
mean(z > 0) #mean should equal about 0.518
#Following shows estimate of probability of summing 7
#when rolling two dice
k = matrix(sample(1:6, 2*1000, replace=TRUE), nrow=2, ncol=1000)
n = colSums(k)
mean(n == 7)
#The following shows estimate of probaility of
#rolling two 6's
a = matrix(sample(1:6, 2*1000, replace=TRUE), nrow=2, ncol=1000)
b = colSums(a)
mean(b == 12)
|
# Clean up
rm(list=ls())
# Working directory
wd<-"/Users/alistairsenior/Dropbox (Sydney Uni)/Human lifetables and Nutrition" # Work iMac
#wd<-"/Users/asenior/Dropbox (Sydney Uni)/Human lifetables and Nutrition"
setwd(wd)
# Load libraries
library(arm)
library(plyr)
library(ggplot2)
library(mgcv)
library(gridExtra)
library(MortalityLaws)
library(Cairo)
library(doSNOW)
library(MASS)
library(metR)
source("scripts/0. Header_Functions.R")
# Read in the standard from Wilmoth et al 2012
standard<-read.csv("clean_data/wilmoth_standard.csv")
#################################################
#################### FIGURE 4 ###################
#################################################
# # # Load the full data
full_data<-read.csv("brass_data/Brass_complete_cases.csv")
# # Load the AIC favoured model which had Macro*time + GDP
# # So the model with macros * time has the best fit
load("models/Complete_cases_AIC_GAMS.rdata")
# Re-order to put females first
GAM<-list()
GAM[[1]]<-AIC_favoured_models[[2]]
GAM[[2]]<-AIC_favoured_models[[1]]
# Year and dataset to use for plotting
year_plot<-2016
dataset_plot<-full_data[which(full_data$Year == year_plot), ]
med_GDP<-round(median(dataset_plot$GDP_perCapita, na.rm=T))
# We will run each sex over 1 core using doSNOW
sexes<-c("Females", "Males")
# For each age class we will make PCF surfaces
surface_order<-list()
surface_order[[1]]<-c("Protein.kcal", "Carbo.kcal", "Fat.kcal")
surface_order[[2]]<-c("Protein.kcal", "Fat.kcal", "Carbo.kcal")
surface_order[[3]]<-c("Carbo.kcal", "Fat.kcal", "Protein.kcal")
# Labels
labels_list<-c("Protein kcal/cap/day", "Carbohydrate kcal/cap/day", "Fat kcal/cap/day")
# Limits for the y.axis - based on a bit of trial and error
y_limits<-list()
y_limits[[1]]<-c(1000, 2100)
y_limits[[2]]<-c(400, 1600)
y_limits[[3]]<-c(750, 1600)
# This specifies the color scheme for surface
rgb.palette<-colorRampPalette(c("blue","cyan","yellow","red"), space="Lab", interpolate="linear")
map<-rgb.palette(256)
# Which ages do we want to plot
ages_plot<-c(5, 60)
# List to hold the plots
plots_list<-list()
# Loop to do for each sex
for(s in 1:length(sexes)){
# Ages_list to hold the plots for all the ages
ages_list<-list()
# For each age class
for(a in 1:length(ages_plot)){
# List to hold the 3 slices
slices_list<-list()
# Objects for the min and max on each surface
surf_min<-NA
surf_max<-NA
# For each PCF surface on the minimum
for(i in 1:length(surface_order)[[1]]){
# The order for the ith surface
surface_i<-surface_order[[i]]
# Get the right set of labels
labels<-labels_list[match(surface_i, surface_order[[1]])]
# Lets work out the protein, carb and fat supply values for which we will get lifetables
energy_1<-seq(floor(min(dataset_plot[,surface_i[1]])), ceiling(max(dataset_plot[,surface_i[1]])), length=101)
energy_2<-seq(floor(min(dataset_plot[,surface_i[2]])), ceiling(max(dataset_plot[,surface_i[2]])), length=101)
energy_3<-round(quantile(dataset_plot[,surface_i[3]])[3])
# Get all of the combinations
predict_values<-expand.grid(energy_1, energy_2, energy_3)
names(predict_values)<-surface_order[[i]]
# Cull out any unobserved energy supplies
in.poly<-as.numeric(inhull(predict_values[,c("Protein.kcal", "Carbo.kcal", "Fat.kcal")], dataset_plot[,c("Protein.kcal", "Carbo.kcal", "Fat.kcal")]) != -1)
predict_values<-predict_values[which(in.poly==1),]
# Now convert each value to the ASM
predict_values$qx<-NA
for(v in 1:dim(predict_values)[1]){
predict_values$qx[v]<-as.numeric(convert_brass_general(predict_values[v, surface_order[[1]]], sex=sexes[s], age=ages_plot[a], GDP=med_GDP, year=year_plot, standard=standard, GAM=GAM[[s]]))
}
# Save the ith set of predicted values
slices_list[[i]]<-predict_values
surf_min<-min(surf_min, predict_values$qx, na.rm=T)
surf_max<-max(surf_max, predict_values$qx, na.rm=T)
}
# Re loop through to actually plot
for(i in 1:length(surface_order)[[1]]){
# The order for the ith surface
surface_i<-surface_order[[i]]
# Get the right set of labels
labels<-labels_list[match(surface_i, surface_order[[1]])]
# Make the surface
predict_values<-slices_list[[i]]
locs<-(range(predict_values$qx, na.rm=TRUE) - surf_min) / (surf_max-surf_min) * 256
contour_use<-signif((max(predict_values$qx, na.rm=T)-min(predict_values$qx, na.rm=T))/5, 1)
names(predict_values)[c(1,2)]<-c("x", "y")
plot<-ggplot(predict_values, aes(x=x, y=y)) +
geom_raster(aes(fill=qx), show.legend=F, interpolate=F, na.rm=T) +
scale_fill_gradientn(colors=map[locs[1]:locs[2]]) +
geom_contour(data=predict_values, aes(x=x, y=y, z=qx), na.rm=T, color="black", binwidth=contour_use) +
geom_label_contour(data=predict_values, aes(x=x, y=y, z=qx), size=3, binwidth=contour_use, skip=1) +
theme_bw() +
labs(x = labels[1], y = labels[2], subtitle=paste0(labels[3], " = ", round(quantile(dataset_plot[,surface_i[3]])[3]))) +
theme(axis.text=element_text(size=15), axis.title=element_text(size=15)) +
theme(title=element_text(size=15)) +
xlim(range(dataset_plot[,surface_i[1]])) +
ylim(y_limits[[i]]) +
annotate("text", x = ceiling(min(dataset_plot[,surface_i[1]])), y = max(y_limits[[i]]), label=paste0(sexes[s], ", Age = ", ages_plot[a], " to ", ages_plot[a]+5), hjust = 0, vjust = 1, size = 7)
# Save slice i
slices_list[[i]]<-plot
}
# save the 3 slices for age a
ages_list[[a]]<-slices_list
}
# Save the slices at each age for the sth sex
plots_list[[s]]<-ages_list
}
pdf("figures/Figure_4.pdf", height=20, width=15)
grid.arrange(plots_list[[1]][[1]][[1]]+labs(title="A"),
plots_list[[1]][[1]][[2]]+labs(title="B"),
plots_list[[1]][[1]][[3]]+labs(title="C"),
plots_list[[1]][[2]][[1]]+labs(title="D"),
plots_list[[1]][[2]][[2]]+labs(title="E"),
plots_list[[1]][[2]][[3]]+labs(title="F"),
plots_list[[2]][[1]][[1]]+labs(title="G"),
plots_list[[2]][[1]][[2]]+labs(title="H"),
plots_list[[2]][[1]][[3]]+labs(title="I"),
plots_list[[2]][[2]][[1]]+labs(title="J"),
plots_list[[2]][[2]][[2]]+labs(title="K"),
plots_list[[2]][[2]][[3]]+labs(title="L"),
layout_matrix=rbind(c(1,2,3),
c(4,5,6),
c(7,8,9),
c(10,11,12)))
dev.off()
| /9. Figure 4.R | no_license | Barardo/ASM_HumanLT | R | false | false | 6,483 | r |
# Clean up
rm(list=ls())
# Working directory
wd<-"/Users/alistairsenior/Dropbox (Sydney Uni)/Human lifetables and Nutrition" # Work iMac
#wd<-"/Users/asenior/Dropbox (Sydney Uni)/Human lifetables and Nutrition"
setwd(wd)
# Load libraries
library(arm)
library(plyr)
library(ggplot2)
library(mgcv)
library(gridExtra)
library(MortalityLaws)
library(Cairo)
library(doSNOW)
library(MASS)
library(metR)
source("scripts/0. Header_Functions.R")
# Read in the standard from Wilmoth et al 2012
standard<-read.csv("clean_data/wilmoth_standard.csv")
#################################################
#################### FIGURE 4 ###################
#################################################
# # # Load the full data
full_data<-read.csv("brass_data/Brass_complete_cases.csv")
# # Load the AIC favoured model which had Macro*time + GDP
# # So the model with macros * time has the best fit
load("models/Complete_cases_AIC_GAMS.rdata")
# Re-order to put females first
GAM<-list()
GAM[[1]]<-AIC_favoured_models[[2]]
GAM[[2]]<-AIC_favoured_models[[1]]
# Year and dataset to use for plotting
year_plot<-2016
dataset_plot<-full_data[which(full_data$Year == year_plot), ]
med_GDP<-round(median(dataset_plot$GDP_perCapita, na.rm=T))
# We will run each sex over 1 core using doSNOW
sexes<-c("Females", "Males")
# For each age class we will make PCF surfaces
surface_order<-list()
surface_order[[1]]<-c("Protein.kcal", "Carbo.kcal", "Fat.kcal")
surface_order[[2]]<-c("Protein.kcal", "Fat.kcal", "Carbo.kcal")
surface_order[[3]]<-c("Carbo.kcal", "Fat.kcal", "Protein.kcal")
# Labels
labels_list<-c("Protein kcal/cap/day", "Carbohydrate kcal/cap/day", "Fat kcal/cap/day")
# Limits for the y.axis - based on a bit of trial and error
y_limits<-list()
y_limits[[1]]<-c(1000, 2100)
y_limits[[2]]<-c(400, 1600)
y_limits[[3]]<-c(750, 1600)
# This specifies the color scheme for surface
rgb.palette<-colorRampPalette(c("blue","cyan","yellow","red"), space="Lab", interpolate="linear")
map<-rgb.palette(256)
# Which ages do we want to plot
ages_plot<-c(5, 60)
# List to hold the plots
plots_list<-list()
# Loop to do for each sex
for(s in 1:length(sexes)){
# Ages_list to hold the plots for all the ages
ages_list<-list()
# For each age class
for(a in 1:length(ages_plot)){
# List to hold the 3 slices
slices_list<-list()
# Objects for the min and max on each surface
surf_min<-NA
surf_max<-NA
# For each PCF surface on the minimum
for(i in 1:length(surface_order)[[1]]){
# The order for the ith surface
surface_i<-surface_order[[i]]
# Get the right set of labels
labels<-labels_list[match(surface_i, surface_order[[1]])]
# Lets work out the protein, carb and fat supply values for which we will get lifetables
energy_1<-seq(floor(min(dataset_plot[,surface_i[1]])), ceiling(max(dataset_plot[,surface_i[1]])), length=101)
energy_2<-seq(floor(min(dataset_plot[,surface_i[2]])), ceiling(max(dataset_plot[,surface_i[2]])), length=101)
energy_3<-round(quantile(dataset_plot[,surface_i[3]])[3])
# Get all of the combinations
predict_values<-expand.grid(energy_1, energy_2, energy_3)
names(predict_values)<-surface_order[[i]]
# Cull out any unobserved energy supplies
in.poly<-as.numeric(inhull(predict_values[,c("Protein.kcal", "Carbo.kcal", "Fat.kcal")], dataset_plot[,c("Protein.kcal", "Carbo.kcal", "Fat.kcal")]) != -1)
predict_values<-predict_values[which(in.poly==1),]
# Now convert each value to the ASM
predict_values$qx<-NA
for(v in 1:dim(predict_values)[1]){
predict_values$qx[v]<-as.numeric(convert_brass_general(predict_values[v, surface_order[[1]]], sex=sexes[s], age=ages_plot[a], GDP=med_GDP, year=year_plot, standard=standard, GAM=GAM[[s]]))
}
# Save the ith set of predicted values
slices_list[[i]]<-predict_values
surf_min<-min(surf_min, predict_values$qx, na.rm=T)
surf_max<-max(surf_max, predict_values$qx, na.rm=T)
}
# Re loop through to actually plot
for(i in 1:length(surface_order)[[1]]){
# The order for the ith surface
surface_i<-surface_order[[i]]
# Get the right set of labels
labels<-labels_list[match(surface_i, surface_order[[1]])]
# Make the surface
predict_values<-slices_list[[i]]
locs<-(range(predict_values$qx, na.rm=TRUE) - surf_min) / (surf_max-surf_min) * 256
contour_use<-signif((max(predict_values$qx, na.rm=T)-min(predict_values$qx, na.rm=T))/5, 1)
names(predict_values)[c(1,2)]<-c("x", "y")
plot<-ggplot(predict_values, aes(x=x, y=y)) +
geom_raster(aes(fill=qx), show.legend=F, interpolate=F, na.rm=T) +
scale_fill_gradientn(colors=map[locs[1]:locs[2]]) +
geom_contour(data=predict_values, aes(x=x, y=y, z=qx), na.rm=T, color="black", binwidth=contour_use) +
geom_label_contour(data=predict_values, aes(x=x, y=y, z=qx), size=3, binwidth=contour_use, skip=1) +
theme_bw() +
labs(x = labels[1], y = labels[2], subtitle=paste0(labels[3], " = ", round(quantile(dataset_plot[,surface_i[3]])[3]))) +
theme(axis.text=element_text(size=15), axis.title=element_text(size=15)) +
theme(title=element_text(size=15)) +
xlim(range(dataset_plot[,surface_i[1]])) +
ylim(y_limits[[i]]) +
annotate("text", x = ceiling(min(dataset_plot[,surface_i[1]])), y = max(y_limits[[i]]), label=paste0(sexes[s], ", Age = ", ages_plot[a], " to ", ages_plot[a]+5), hjust = 0, vjust = 1, size = 7)
# Save slice i
slices_list[[i]]<-plot
}
# save the 3 slices for age a
ages_list[[a]]<-slices_list
}
# Save the slices at each age for the sth sex
plots_list[[s]]<-ages_list
}
pdf("figures/Figure_4.pdf", height=20, width=15)
grid.arrange(plots_list[[1]][[1]][[1]]+labs(title="A"),
plots_list[[1]][[1]][[2]]+labs(title="B"),
plots_list[[1]][[1]][[3]]+labs(title="C"),
plots_list[[1]][[2]][[1]]+labs(title="D"),
plots_list[[1]][[2]][[2]]+labs(title="E"),
plots_list[[1]][[2]][[3]]+labs(title="F"),
plots_list[[2]][[1]][[1]]+labs(title="G"),
plots_list[[2]][[1]][[2]]+labs(title="H"),
plots_list[[2]][[1]][[3]]+labs(title="I"),
plots_list[[2]][[2]][[1]]+labs(title="J"),
plots_list[[2]][[2]][[2]]+labs(title="K"),
plots_list[[2]][[2]][[3]]+labs(title="L"),
layout_matrix=rbind(c(1,2,3),
c(4,5,6),
c(7,8,9),
c(10,11,12)))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all-sim.R
\name{TC_CAR1_sc}
\alias{TC_CAR1_sc}
\title{A Wrap Function to analyze a Simulated Data - All Cases}
\usage{
TC_CAR1_sc(
RFIanalysis,
scenario,
EE,
DE,
C.matrix,
Subject,
Time,
Nboot,
nrep,
ncores,
name_dir_sim = NULL,
print.progress = FALSE,
saveboot = FALSE
)
}
\arguments{
\item{RFIanalysis}{the output from RFI RNA-seq dataset. In the ideal simulation case,
it is res from TC_CAR1, in the misspecified case, it is res from TC_Symm}
\item{scenario}{either 2- 'Symm' or 1- 'CAR1'}
\item{EE}{number of EE genes}
\item{DE}{number of DE genes}
\item{C.matrix}{is a list of matrix Ci in testing H0: Ci*beta = 0.}
\item{Subject}{a vector of subjects or experimental units.}
\item{Time}{a vector of time points.}
\item{Nboot}{number of bootstrap replicates, default is 100.}
\item{nrep}{index of sim replicate}
\item{ncores}{number of cores for embarrassingly parallel procedure. Default
value of \code{ncores} is 1.}
\item{name_dir_sim}{name of directory to contain the output and result of this function}
\item{print.progress}{\code{TRUE} or \code{FALSE}, printing the process or not.}
\item{saveboot}{\code{TRUE} or \code{FALSE} to save or not save bootstrap output}
}
\value{
R, V, FDR, PAUC, and AUC of all 7 methods (2 oracles with unshrunken and shrunken)
with FPR = 0.05, 0.10, 0.20 for each S, R, V, FDR, PAUC.
}
\description{
This function does the following: 1. Generating a simulated
counts data set consisting of EE genes, and DE genes with
respect to some contrast from a ideal case, i.e., the counts
are generated from corCAR1 structure, or misspecified case, i.e., the counts
are generated from corSymm structure; analyzing this simulated data set
using methods: \code{\link{TC_CAR1}},
\code{\link{voomlimmaFit}}, \code{\link{edgeRFit}}
, \code{\link{DESeq2Fit}}.
}
\examples{
\donttest{
data(res)
data(resSymm)
data(design)
data(covset)
RFIanalysis <- list(CAR1 = res, Symm = resSymm)
C.matrix <- list()
# test for Line main effect
C.matrix[[1]] <- limma::makeContrasts(line2, levels = design)
names(C.matrix) <- c("line2")
scenario <- 1; EE <- 3; DE <- 2; ncores <- 1; Subject <- covset$ear;
Time <- covset$time; Nboot <- 2; nrep <- 1;
name_dir_sim <- NULL
print.progress <- FALSE; saveboot <- FALSE;
TC_Symm_scOut <- rmRNAseq:::TC_CAR1_sc(RFIanalysis, scenario, EE, DE, C.matrix,
Subject, Time, Nboot, nrep, ncores, name_dir_sim , print.progress, saveboot)
names(TC_Symm_scOut)
}
}
| /man/TC_CAR1_sc.Rd | no_license | ntyet/rmRNAseq | R | false | true | 2,532 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all-sim.R
\name{TC_CAR1_sc}
\alias{TC_CAR1_sc}
\title{A Wrap Function to analyze a Simulated Data - All Cases}
\usage{
TC_CAR1_sc(
RFIanalysis,
scenario,
EE,
DE,
C.matrix,
Subject,
Time,
Nboot,
nrep,
ncores,
name_dir_sim = NULL,
print.progress = FALSE,
saveboot = FALSE
)
}
\arguments{
\item{RFIanalysis}{the output from RFI RNA-seq dataset. In the ideal simulation case,
it is res from TC_CAR1, in the misspecified case, it is res from TC_Symm}
\item{scenario}{either 2- 'Symm' or 1- 'CAR1'}
\item{EE}{number of EE genes}
\item{DE}{number of DE genes}
\item{C.matrix}{is a list of matrix Ci in testing H0: Ci*beta = 0.}
\item{Subject}{a vector of subjects or experimental units.}
\item{Time}{a vector of time points.}
\item{Nboot}{number of bootstrap replicates, default is 100.}
\item{nrep}{index of sim replicate}
\item{ncores}{number of cores for embarrassingly parallel procedure. Default
value of \code{ncores} is 1.}
\item{name_dir_sim}{name of directory to contain the output and result of this function}
\item{print.progress}{\code{TRUE} or \code{FALSE}, printing the process or not.}
\item{saveboot}{\code{TRUE} or \code{FALSE} to save or not save bootstrap output}
}
\value{
R, V, FDR, PAUC, and AUC of all 7 methods (2 oracles with unshrunken and shrunken)
with FPR = 0.05, 0.10, 0.20 for each S, R, V, FDR, PAUC.
}
\description{
This function does the following: 1. Generating a simulated
counts data set consisting of EE genes, and DE genes with
respect to some contrast from a ideal case, i.e., the counts
are generated from corCAR1 structure, or misspecified case, i.e., the counts
are generated from corSymm structure; analyzing this simulated data set
using methods: \code{\link{TC_CAR1}},
\code{\link{voomlimmaFit}}, \code{\link{edgeRFit}}
, \code{\link{DESeq2Fit}}.
}
\examples{
\donttest{
data(res)
data(resSymm)
data(design)
data(covset)
RFIanalysis <- list(CAR1 = res, Symm = resSymm)
C.matrix <- list()
# test for Line main effect
C.matrix[[1]] <- limma::makeContrasts(line2, levels = design)
names(C.matrix) <- c("line2")
scenario <- 1; EE <- 3; DE <- 2; ncores <- 1; Subject <- covset$ear;
Time <- covset$time; Nboot <- 2; nrep <- 1;
name_dir_sim <- NULL
print.progress <- FALSE; saveboot <- FALSE;
TC_Symm_scOut <- rmRNAseq:::TC_CAR1_sc(RFIanalysis, scenario, EE, DE, C.matrix,
Subject, Time, Nboot, nrep, ncores, name_dir_sim , print.progress, saveboot)
names(TC_Symm_scOut)
}
}
|
test_that("cleanup Redis", {
skip_if_not(rpalive())
manager <- RedisBackend()
expect_error(.FLUSHALL(manager), NA)
})
test_that("Creating RedisBackend succeeds", {
skip_if_not(rpalive())
jobname <- BiocParallel::ipcid()
expect_true(validObject(RedisBackend(jobname = jobname, type = "manager")))
expect_true(validObject(RedisBackend(jobname = jobname, type = "worker")))
gc()# clean-up manager & worker
})
test_that("Managers and workers start and end correctly", {
skip_if_not(rpalive())
jobname <- BiocParallel::ipcid()
manager <- RedisBackend(jobname = jobname, type = "manager")
workers <- replicate(2L, RedisBackend(jobname = jobname, type = "worker"))
expect_equal(length(manager), 2L)
worker_ids <- vapply(workers, `[[`, character(1), "id")
expect_true(setequal(bpworkers(manager), worker_ids))
lapply(workers, .QUIT)
.QUIT(manager)
})
test_that("low-level dispatching function", {
skip_if_not(rpalive())
jobname <- BiocParallel::ipcid()
manager <- RedisBackend(jobname = jobname, type = "manager")
worker <- RedisBackend(jobname = jobname, type = "worker")
## .send_to
taskMessage <- BiocParallel:::.EXEC("test", identity,
list(a = runif(1), b = runif(1)),
static.args = "a")
.send_to(manager, 1, taskMessage)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 1L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 1L,
missingTasks = 0L,
workerNum = 1L)
)
expect_equal(
.rpstatus(worker),
list(waitingTasks = 1L, runningTasks = 0L)
)
## .recv
expect_equal(.recv(worker), taskMessage)
expect_equal(
.rpstatus(worker),
list(waitingTasks = 0L, runningTasks = 1L)
)
## .send
resultMessage <- "result message"
.send(worker, resultMessage)
expect_equal(
.rpstatus(worker),
list(waitingTasks = 0L, runningTasks = 0L)
)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 1L,
allTasks = 0L,
missingTasks = 0L,
workerNum = 1L)
)
## .recv_any
result <- .recv_any(manager)
expect_equal(result$node, 1L)
expect_equal(result$value, resultMessage)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 0L,
missingTasks = 0L,
workerNum = 1L)
)
.QUIT(manager)
.QUIT(worker)
gc()
})
test_that("Task manager: basic", {
skip_if_not(rpalive())
jobname <- BiocParallel::ipcid()
manager <- RedisParam(0, jobname = jobname, is.worker = FALSE)
worker <- RedisBackend(jobname = jobname, type = "worker")
bpstart(manager)
taskManager <- .manager(manager)
## .send_to
taskMessage <- BiocParallel:::.EXEC("test", identity,
list(a = runif(1), b = runif(1)),
static.args = "a")
.manager_send(taskManager, taskMessage)
.manager_flush(taskManager)
expect_equal(
.rpstatus(manager),
list(publicTasks = 1L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 1L,
missingTasks = 0L,
workerNum = 1L)
)
expect_equal(
.rpstatus(worker),
list(waitingTasks = 0L, runningTasks = 0L)
)
## .recv
task <- .recv(worker)
task$data$args <- task$data$args[sort(names(task$data$args))]
expect_equal(task, taskMessage)
expect_equal(
.rpstatus(worker),
list(waitingTasks = 0L, runningTasks = 1L)
)
## .send
resultMessage <- "result message"
.send(worker, resultMessage)
expect_equal(
.rpstatus(worker),
list(waitingTasks = 0L, runningTasks = 0L)
)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 1L,
allTasks = 0L,
missingTasks = 0L,
workerNum = 1L)
)
## .recv_any
result <- .manager_recv(taskManager)
expect_equal(result[[1]]$value, resultMessage)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 0L,
missingTasks = 0L,
workerNum = 1L)
)
.QUIT(worker)
gc()
})
test_that("Task manager: auto flush", {
skip_if_not(rpalive())
jobname <- BiocParallel::ipcid()
manager <- RedisParam(0, jobname = jobname, is.worker = FALSE)
worker <- RedisBackend(jobname = jobname, type = "worker")
bpstart(manager)
taskManager <- .manager(manager)
## .send_to
taskMessage <- BiocParallel:::.EXEC("test", identity,
list(a = runif(1), b = runif(1)),
static.args = "a")
.manager_send(taskManager, taskMessage)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 0L,
missingTasks = 0L,
workerNum = 1L)
)
Sys.sleep(5)
.manager_send(taskManager, taskMessage)
expect_equal(
.rpstatus(manager),
list(publicTasks = 2L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 2L,
missingTasks = 0L,
workerNum = 1L)
)
## .recv
task <- .recv(worker)
expect_equal(task, taskMessage)
task <- .recv(worker)
expect_equal(task, taskMessage)
.QUIT(worker)
gc()
})
test_that("Task manager: multiple results", {
skip_if_not(rpalive())
jobname <- BiocParallel::ipcid()
manager <- RedisParam(0, jobname = jobname, is.worker = FALSE)
worker <- RedisBackend(jobname = jobname, type = "worker")
bpstart(manager)
taskManager <- .manager(manager)
taskMessage <- BiocParallel:::.EXEC("test", identity,
list(a = runif(1), b = runif(1)),
static.args = "a")
resultMessage1 <- "result message1"
resultMessage2 <- "result message2"
.manager_send(taskManager, taskMessage)
.manager_send(taskManager, taskMessage)
.manager_flush(taskManager)
task <- .recv(worker)
expect_equal(task, taskMessage)
.send(worker, resultMessage1)
task <- .recv(worker)
expect_equal(task, taskMessage)
.send(worker, resultMessage2)
result <- .manager_recv(taskManager)
expect_equal(result[[1]]$value, resultMessage1)
expect_equal(result[[2]]$value, resultMessage2)
.QUIT(worker)
gc()
})
test_that("Task manager: send all", {
skip_if_not(rpalive())
jobname <- BiocParallel::ipcid()
manager <- RedisParam(0, jobname = jobname, is.worker = FALSE)
workers <- replicate(2L, RedisBackend(jobname = jobname, type = "worker"))
bpstart(manager)
taskManager <- .manager(manager)
## .manager_send_all
testMessage <- BiocParallel:::.DONE()
.manager_send_all(taskManager, testMessage)
.manager_flush(taskManager)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 2L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 2L,
missingTasks = 0L,
workerNum = 2L)
)
task1 <- .recv(workers[[1]])
task2 <- .recv(workers[[2]])
expect_identical(task1, testMessage)
expect_identical(task2, testMessage)
.send(workers[[1]], "DONE1")
.send(workers[[2]], "DONE2")
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 2L,
allTasks = 0L,
missingTasks = 0L,
workerNum = 2L)
)
result <- .manager_recv_all(taskManager)
expect_true(
setequal(
c(result[[1]]$value,result[[2]]$value),
c("DONE1", "DONE2")
)
)
## clean up
.QUIT(workers[[1]])
.QUIT(workers[[2]])
gc()
})
test_that("Missing task", {
skip_if_not(rpalive())
jobname <- BiocParallel::ipcid()
manager <- RedisParam(0, jobname = jobname, is.worker = FALSE)
workers <- replicate(2L, RedisBackend(jobname = jobname, type = "worker"))
bpstart(manager)
taskManager <- .manager(manager)
## send a task to the first worker
taskMessage <- BiocParallel:::.EXEC("test", identity,
list(a = runif(1), b = runif(1)),
static.args = "a")
.manager_send(taskManager, taskMessage)
.manager_flush(taskManager)
expect_equal(
.rpstatus(manager),
list(publicTasks = 1L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 1L,
missingTasks = 0L,
workerNum = 2L)
)
## .recv
task <- .recv(workers[[1]])
.QUIT(workers[[1]])
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 1L,
missingTasks = 1L,
workerNum = 1L)
)
## submit twice as the missing value needs
## to be checked twice
.resubmitMissingTasks(bpbackend(manager))
.resubmitMissingTasks(bpbackend(manager))
expect_equal(
.rpstatus(manager),
list(publicTasks = 1L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 1L,
missingTasks = 0L,
workerNum = 1L)
)
task <- .recv(workers[[2]])
task$data$args <- task$data$args[sort(names(task$data$args))]
expect_equal(task, taskMessage)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 1L,
finishedTasks = 0L,
allTasks = 1L,
missingTasks = 0L,
workerNum = 1L)
)
expect_equal(
.rpstatus(workers[[2]]),
list(waitingTasks = 0L, runningTasks = 1L)
)
## .send
resultMessage <- "result message"
.send(workers[[2]], resultMessage)
expect_equal(
.rpstatus(workers[[2]]),
list(waitingTasks = 0L, runningTasks = 0L)
)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 1L,
allTasks = 0L,
missingTasks = 0L,
workerNum = 1L)
)
## .recv_any
result <- .manager_recv(taskManager)
expect_equal(result[[1]]$value, resultMessage)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 0L,
missingTasks = 0L,
workerNum = 1L)
)
## clean up
.QUIT(workers[[2]])
gc()
})
test_that("Corrupted queue", {
skip_if_not(rpalive())
jobname <- BiocParallel::ipcid()
manager <- RedisParam(0, jobname = jobname, is.worker = FALSE)
worker <- RedisBackend(jobname = jobname, type = "worker", log = TRUE)
bpstart(manager)
taskManager <- .manager(manager)
## send a task to the first worker
taskMessage <- BiocParallel:::.EXEC("test", identity,
list(a = runif(1), b = runif(1)),
static.args = "a")
.manager_send(taskManager, taskMessage)
.manager_flush(taskManager)
## .recv
task <- .recv(worker)
.FLUSHALL(worker)
out <- suppressWarnings(capture.output(.send(worker, "test")))
expect_true(regexpr("WARN", out[1]) == 1)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 0L,
missingTasks = 0L,
workerNum = 1L)
)
## clean up
.QUIT(worker)
gc()
})
test_that("check Redis memery leak", {
skip_if_not(rpalive())
manager <- RedisBackend()
keys <- manager$redisClient$KEYS("*")
expect_true(length(keys) == 0L)
})
| /tests/testthat/test_RedisBackend.R | no_license | mtmorgan/RedisParam | R | false | false | 13,086 | r | test_that("cleanup Redis", {
skip_if_not(rpalive())
manager <- RedisBackend()
expect_error(.FLUSHALL(manager), NA)
})
test_that("Creating RedisBackend succeeds", {
skip_if_not(rpalive())
jobname <- BiocParallel::ipcid()
expect_true(validObject(RedisBackend(jobname = jobname, type = "manager")))
expect_true(validObject(RedisBackend(jobname = jobname, type = "worker")))
gc()# clean-up manager & worker
})
test_that("Managers and workers start and end correctly", {
skip_if_not(rpalive())
jobname <- BiocParallel::ipcid()
manager <- RedisBackend(jobname = jobname, type = "manager")
workers <- replicate(2L, RedisBackend(jobname = jobname, type = "worker"))
expect_equal(length(manager), 2L)
worker_ids <- vapply(workers, `[[`, character(1), "id")
expect_true(setequal(bpworkers(manager), worker_ids))
lapply(workers, .QUIT)
.QUIT(manager)
})
test_that("low-level dispatching function", {
skip_if_not(rpalive())
jobname <- BiocParallel::ipcid()
manager <- RedisBackend(jobname = jobname, type = "manager")
worker <- RedisBackend(jobname = jobname, type = "worker")
## .send_to
taskMessage <- BiocParallel:::.EXEC("test", identity,
list(a = runif(1), b = runif(1)),
static.args = "a")
.send_to(manager, 1, taskMessage)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 1L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 1L,
missingTasks = 0L,
workerNum = 1L)
)
expect_equal(
.rpstatus(worker),
list(waitingTasks = 1L, runningTasks = 0L)
)
## .recv
expect_equal(.recv(worker), taskMessage)
expect_equal(
.rpstatus(worker),
list(waitingTasks = 0L, runningTasks = 1L)
)
## .send
resultMessage <- "result message"
.send(worker, resultMessage)
expect_equal(
.rpstatus(worker),
list(waitingTasks = 0L, runningTasks = 0L)
)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 1L,
allTasks = 0L,
missingTasks = 0L,
workerNum = 1L)
)
## .recv_any
result <- .recv_any(manager)
expect_equal(result$node, 1L)
expect_equal(result$value, resultMessage)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 0L,
missingTasks = 0L,
workerNum = 1L)
)
.QUIT(manager)
.QUIT(worker)
gc()
})
test_that("Task manager: basic", {
skip_if_not(rpalive())
jobname <- BiocParallel::ipcid()
manager <- RedisParam(0, jobname = jobname, is.worker = FALSE)
worker <- RedisBackend(jobname = jobname, type = "worker")
bpstart(manager)
taskManager <- .manager(manager)
## .send_to
taskMessage <- BiocParallel:::.EXEC("test", identity,
list(a = runif(1), b = runif(1)),
static.args = "a")
.manager_send(taskManager, taskMessage)
.manager_flush(taskManager)
expect_equal(
.rpstatus(manager),
list(publicTasks = 1L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 1L,
missingTasks = 0L,
workerNum = 1L)
)
expect_equal(
.rpstatus(worker),
list(waitingTasks = 0L, runningTasks = 0L)
)
## .recv
task <- .recv(worker)
task$data$args <- task$data$args[sort(names(task$data$args))]
expect_equal(task, taskMessage)
expect_equal(
.rpstatus(worker),
list(waitingTasks = 0L, runningTasks = 1L)
)
## .send
resultMessage <- "result message"
.send(worker, resultMessage)
expect_equal(
.rpstatus(worker),
list(waitingTasks = 0L, runningTasks = 0L)
)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 1L,
allTasks = 0L,
missingTasks = 0L,
workerNum = 1L)
)
## .recv_any
result <- .manager_recv(taskManager)
expect_equal(result[[1]]$value, resultMessage)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 0L,
missingTasks = 0L,
workerNum = 1L)
)
.QUIT(worker)
gc()
})
test_that("Task manager: auto flush", {
skip_if_not(rpalive())
jobname <- BiocParallel::ipcid()
manager <- RedisParam(0, jobname = jobname, is.worker = FALSE)
worker <- RedisBackend(jobname = jobname, type = "worker")
bpstart(manager)
taskManager <- .manager(manager)
## .send_to
taskMessage <- BiocParallel:::.EXEC("test", identity,
list(a = runif(1), b = runif(1)),
static.args = "a")
.manager_send(taskManager, taskMessage)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 0L,
missingTasks = 0L,
workerNum = 1L)
)
Sys.sleep(5)
.manager_send(taskManager, taskMessage)
expect_equal(
.rpstatus(manager),
list(publicTasks = 2L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 2L,
missingTasks = 0L,
workerNum = 1L)
)
## .recv
task <- .recv(worker)
expect_equal(task, taskMessage)
task <- .recv(worker)
expect_equal(task, taskMessage)
.QUIT(worker)
gc()
})
test_that("Task manager: multiple results", {
skip_if_not(rpalive())
jobname <- BiocParallel::ipcid()
manager <- RedisParam(0, jobname = jobname, is.worker = FALSE)
worker <- RedisBackend(jobname = jobname, type = "worker")
bpstart(manager)
taskManager <- .manager(manager)
taskMessage <- BiocParallel:::.EXEC("test", identity,
list(a = runif(1), b = runif(1)),
static.args = "a")
resultMessage1 <- "result message1"
resultMessage2 <- "result message2"
.manager_send(taskManager, taskMessage)
.manager_send(taskManager, taskMessage)
.manager_flush(taskManager)
task <- .recv(worker)
expect_equal(task, taskMessage)
.send(worker, resultMessage1)
task <- .recv(worker)
expect_equal(task, taskMessage)
.send(worker, resultMessage2)
result <- .manager_recv(taskManager)
expect_equal(result[[1]]$value, resultMessage1)
expect_equal(result[[2]]$value, resultMessage2)
.QUIT(worker)
gc()
})
test_that("Task manager: send all", {
skip_if_not(rpalive())
jobname <- BiocParallel::ipcid()
manager <- RedisParam(0, jobname = jobname, is.worker = FALSE)
workers <- replicate(2L, RedisBackend(jobname = jobname, type = "worker"))
bpstart(manager)
taskManager <- .manager(manager)
## .manager_send_all
testMessage <- BiocParallel:::.DONE()
.manager_send_all(taskManager, testMessage)
.manager_flush(taskManager)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 2L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 2L,
missingTasks = 0L,
workerNum = 2L)
)
task1 <- .recv(workers[[1]])
task2 <- .recv(workers[[2]])
expect_identical(task1, testMessage)
expect_identical(task2, testMessage)
.send(workers[[1]], "DONE1")
.send(workers[[2]], "DONE2")
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 2L,
allTasks = 0L,
missingTasks = 0L,
workerNum = 2L)
)
result <- .manager_recv_all(taskManager)
expect_true(
setequal(
c(result[[1]]$value,result[[2]]$value),
c("DONE1", "DONE2")
)
)
## clean up
.QUIT(workers[[1]])
.QUIT(workers[[2]])
gc()
})
test_that("Missing task", {
skip_if_not(rpalive())
jobname <- BiocParallel::ipcid()
manager <- RedisParam(0, jobname = jobname, is.worker = FALSE)
workers <- replicate(2L, RedisBackend(jobname = jobname, type = "worker"))
bpstart(manager)
taskManager <- .manager(manager)
## send a task to the first worker
taskMessage <- BiocParallel:::.EXEC("test", identity,
list(a = runif(1), b = runif(1)),
static.args = "a")
.manager_send(taskManager, taskMessage)
.manager_flush(taskManager)
expect_equal(
.rpstatus(manager),
list(publicTasks = 1L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 1L,
missingTasks = 0L,
workerNum = 2L)
)
## .recv
task <- .recv(workers[[1]])
.QUIT(workers[[1]])
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 1L,
missingTasks = 1L,
workerNum = 1L)
)
## submit twice as the missing value needs
## to be checked twice
.resubmitMissingTasks(bpbackend(manager))
.resubmitMissingTasks(bpbackend(manager))
expect_equal(
.rpstatus(manager),
list(publicTasks = 1L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 1L,
missingTasks = 0L,
workerNum = 1L)
)
task <- .recv(workers[[2]])
task$data$args <- task$data$args[sort(names(task$data$args))]
expect_equal(task, taskMessage)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 1L,
finishedTasks = 0L,
allTasks = 1L,
missingTasks = 0L,
workerNum = 1L)
)
expect_equal(
.rpstatus(workers[[2]]),
list(waitingTasks = 0L, runningTasks = 1L)
)
## .send
resultMessage <- "result message"
.send(workers[[2]], resultMessage)
expect_equal(
.rpstatus(workers[[2]]),
list(waitingTasks = 0L, runningTasks = 0L)
)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 1L,
allTasks = 0L,
missingTasks = 0L,
workerNum = 1L)
)
## .recv_any
result <- .manager_recv(taskManager)
expect_equal(result[[1]]$value, resultMessage)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 0L,
missingTasks = 0L,
workerNum = 1L)
)
## clean up
.QUIT(workers[[2]])
gc()
})
test_that("Corrupted queue", {
skip_if_not(rpalive())
jobname <- BiocParallel::ipcid()
manager <- RedisParam(0, jobname = jobname, is.worker = FALSE)
worker <- RedisBackend(jobname = jobname, type = "worker", log = TRUE)
bpstart(manager)
taskManager <- .manager(manager)
## send a task to the first worker
taskMessage <- BiocParallel:::.EXEC("test", identity,
list(a = runif(1), b = runif(1)),
static.args = "a")
.manager_send(taskManager, taskMessage)
.manager_flush(taskManager)
## .recv
task <- .recv(worker)
.FLUSHALL(worker)
out <- suppressWarnings(capture.output(.send(worker, "test")))
expect_true(regexpr("WARN", out[1]) == 1)
expect_equal(
.rpstatus(manager),
list(publicTasks = 0L,
privateTasks = 0L,
runningTasks = 0L,
finishedTasks = 0L,
allTasks = 0L,
missingTasks = 0L,
workerNum = 1L)
)
## clean up
.QUIT(worker)
gc()
})
test_that("check Redis memery leak", {
skip_if_not(rpalive())
manager <- RedisBackend()
keys <- manager$redisClient$KEYS("*")
expect_true(length(keys) == 0L)
})
|
### Test Lords functions
context("Lords functions")
# Imports ---------------------------------------------------------------------
source("validate.R")
# Mocks -----------------------------------------------------------------------
mock_fetch_query_data <- function(house, data_output) {
switch(data_output,
"BasicDetails" = read("lords_basic_details"),
"HouseMemberships" = read("lords_house_memberships"),
"Parties" = read("lords_party_memberships"),
"OtherParliaments" = read("lords_other_parliaments"),
"ElectionsContested" = read("lords_contested_elections"),
"GovernmentPosts" = read("lords_government_roles"),
"OppositionPosts" = read("lords_opposition_roles"),
"ParliamentaryPosts" = read("lords_parliamentary_roles"),
"MaidenSpeeches" = read("lords_maiden_speeches"),
"Addresses" = read("lords_addresses")
)
}
# Tests -----------------------------------------------------------------------
test_that("fetch_lords processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"full_title",
"lord_type",
"current_status",
"current_status_reason",
"gender",
"date_of_death")
obs <- fetch_lords()
exp <- readRDS("data/fetch_lords.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords(from_date = TEST_DATE, to_date = TEST_DATE)
exp <- readRDS("data/fetch_lords_from_to.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
test_that("fetch_lords_memberships processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"seat_incumbency_start_date",
"seat_incumbency_end_date")
obs <- fetch_lords_memberships()
exp <- readRDS("data/fetch_lords_memberships.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_memberships(
from_date = TEST_DATE,
to_date = TEST_DATE)
exp <- readRDS("data/fetch_lords_memberships_from_to.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
test_that("fetch_lords_party_memberships processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"party_mnis_id",
"party_name",
"party_membership_start_date",
"party_membership_end_date")
obs <- fetch_lords_party_memberships()
exp <- readRDS("data/fetch_lords_party_memberships.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_party_memberships(
from_date = TEST_DATE,
to_date = TEST_DATE)
exp <- readRDS("data/fetch_lords_party_memberships_from_to.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_party_memberships(while_lord = FALSE)
exp <- readRDS("data/fetch_lords_party_memberships_while_lord.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_party_memberships(collapse = TRUE)
exp <- readRDS("data/fetch_lords_party_memberships_collapse.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
test_that("fetch_lords_other_parliaments processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"other_parliaments_mnis_id",
"other_parliaments_name",
"other_parliaments_incumbency_start_date",
"other_parliaments_incumbency_end_date")
obs <- fetch_lords_other_parliaments()
exp <- readRDS("data/fetch_lords_other_parliaments.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_other_parliaments(
from_date = TEST_DATE,
to_date = TEST_DATE)
exp <- readRDS("data/fetch_lords_other_parliaments_from_to.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
test_that("fetch_lords_contested_elections processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"contested_election_mnis_id",
"contested_election_name",
"contested_election_date",
"contested_election_type",
"contested_election_constituency")
obs <- fetch_lords_contested_elections()
exp <- readRDS("data/fetch_lords_contested_elections.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_contested_elections(
from_date = TEST_DATE,
to_date = TEST_DATE)
exp <- readRDS("data/fetch_lords_contested_elections_from_to.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
test_that("fetch_lords_government_roles processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"government_role_mnis_id",
"government_role_name",
"government_role_incumbency_start_date",
"government_role_incumbency_end_date",
"government_role_unpaid")
obs <- fetch_lords_government_roles()
exp <- readRDS("data/fetch_lords_government_roles.Rdata")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_government_roles(
from_date = TEST_DATE,
to_date = TEST_DATE)
exp <- readRDS("data/fetch_lords_government_roles_from_to.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_government_roles(while_lord = FALSE)
exp <- readRDS("data/fetch_lords_government_roles_while_lord.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
test_that("fetch_lords_opposition_roles processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"opposition_role_mnis_id",
"opposition_role_name",
"opposition_role_incumbency_start_date",
"opposition_role_incumbency_end_date",
"opposition_role_unpaid")
obs <- fetch_lords_opposition_roles()
exp <- readRDS("data/fetch_lords_opposition_roles.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_opposition_roles(
from_date = TEST_DATE,
to_date = TEST_DATE)
exp <- readRDS("data/fetch_lords_opposition_roles_from_to.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_opposition_roles(while_lord = FALSE)
exp <- readRDS("data/fetch_lords_opposition_roles_while_lord.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
test_that("fetch_lords_parliamentary_roles processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"parliamentary_role_mnis_id",
"parliamentary_role_name",
"parliamentary_role_incumbency_start_date",
"parliamentary_role_incumbency_end_date",
"parliamentary_role_unpaid")
obs <- fetch_lords_parliamentary_roles()
exp <- readRDS("data/fetch_lords_parliamentary_roles.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_parliamentary_roles(
from_date = TEST_DATE,
to_date = TEST_DATE)
exp <- readRDS("data/fetch_lords_parliamentary_roles_from_to.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_parliamentary_roles(while_lord = FALSE)
exp <- readRDS("data/fetch_lords_parliamentary_roles_while_lord.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
test_that("fetch_lords_maiden_speeches processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"maiden_speech_house",
"maiden_speech_date",
"maiden_speech_hansard_reference",
"maiden_speech_subject")
obs <- fetch_lords_maiden_speeches()
exp <- readRDS("data/fetch_lords_maiden_speeches.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_maiden_speeches(
from_date = TEST_DATE,
to_date = TEST_DATE)
exp <- readRDS("data/fetch_lords_maiden_speeches_from_to.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
test_that("fetch_lords_addresses processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"address_type_mnis_id",
"address_type",
"address_is_preferred",
"address_is_physical",
"address_note",
"address_1",
"address_2",
"address_3",
"address_4",
"address_5",
"postcode",
"phone",
"fax",
"email",
"address_other")
obs <- fetch_lords_addresses()
exp <- readRDS("data/fetch_lords_addresses.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
| /tests/testthat/test_lords.R | permissive | houseofcommonslibrary/clmnis | R | false | false | 10,381 | r | ### Test Lords functions
context("Lords functions")
# Imports ---------------------------------------------------------------------
source("validate.R")
# Mocks -----------------------------------------------------------------------
mock_fetch_query_data <- function(house, data_output) {
switch(data_output,
"BasicDetails" = read("lords_basic_details"),
"HouseMemberships" = read("lords_house_memberships"),
"Parties" = read("lords_party_memberships"),
"OtherParliaments" = read("lords_other_parliaments"),
"ElectionsContested" = read("lords_contested_elections"),
"GovernmentPosts" = read("lords_government_roles"),
"OppositionPosts" = read("lords_opposition_roles"),
"ParliamentaryPosts" = read("lords_parliamentary_roles"),
"MaidenSpeeches" = read("lords_maiden_speeches"),
"Addresses" = read("lords_addresses")
)
}
# Tests -----------------------------------------------------------------------
test_that("fetch_lords processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"full_title",
"lord_type",
"current_status",
"current_status_reason",
"gender",
"date_of_death")
obs <- fetch_lords()
exp <- readRDS("data/fetch_lords.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords(from_date = TEST_DATE, to_date = TEST_DATE)
exp <- readRDS("data/fetch_lords_from_to.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
test_that("fetch_lords_memberships processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"seat_incumbency_start_date",
"seat_incumbency_end_date")
obs <- fetch_lords_memberships()
exp <- readRDS("data/fetch_lords_memberships.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_memberships(
from_date = TEST_DATE,
to_date = TEST_DATE)
exp <- readRDS("data/fetch_lords_memberships_from_to.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
test_that("fetch_lords_party_memberships processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"party_mnis_id",
"party_name",
"party_membership_start_date",
"party_membership_end_date")
obs <- fetch_lords_party_memberships()
exp <- readRDS("data/fetch_lords_party_memberships.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_party_memberships(
from_date = TEST_DATE,
to_date = TEST_DATE)
exp <- readRDS("data/fetch_lords_party_memberships_from_to.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_party_memberships(while_lord = FALSE)
exp <- readRDS("data/fetch_lords_party_memberships_while_lord.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_party_memberships(collapse = TRUE)
exp <- readRDS("data/fetch_lords_party_memberships_collapse.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
test_that("fetch_lords_other_parliaments processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"other_parliaments_mnis_id",
"other_parliaments_name",
"other_parliaments_incumbency_start_date",
"other_parliaments_incumbency_end_date")
obs <- fetch_lords_other_parliaments()
exp <- readRDS("data/fetch_lords_other_parliaments.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_other_parliaments(
from_date = TEST_DATE,
to_date = TEST_DATE)
exp <- readRDS("data/fetch_lords_other_parliaments_from_to.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
test_that("fetch_lords_contested_elections processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"contested_election_mnis_id",
"contested_election_name",
"contested_election_date",
"contested_election_type",
"contested_election_constituency")
obs <- fetch_lords_contested_elections()
exp <- readRDS("data/fetch_lords_contested_elections.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_contested_elections(
from_date = TEST_DATE,
to_date = TEST_DATE)
exp <- readRDS("data/fetch_lords_contested_elections_from_to.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
test_that("fetch_lords_government_roles processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"government_role_mnis_id",
"government_role_name",
"government_role_incumbency_start_date",
"government_role_incumbency_end_date",
"government_role_unpaid")
obs <- fetch_lords_government_roles()
exp <- readRDS("data/fetch_lords_government_roles.Rdata")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_government_roles(
from_date = TEST_DATE,
to_date = TEST_DATE)
exp <- readRDS("data/fetch_lords_government_roles_from_to.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_government_roles(while_lord = FALSE)
exp <- readRDS("data/fetch_lords_government_roles_while_lord.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
test_that("fetch_lords_opposition_roles processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"opposition_role_mnis_id",
"opposition_role_name",
"opposition_role_incumbency_start_date",
"opposition_role_incumbency_end_date",
"opposition_role_unpaid")
obs <- fetch_lords_opposition_roles()
exp <- readRDS("data/fetch_lords_opposition_roles.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_opposition_roles(
from_date = TEST_DATE,
to_date = TEST_DATE)
exp <- readRDS("data/fetch_lords_opposition_roles_from_to.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_opposition_roles(while_lord = FALSE)
exp <- readRDS("data/fetch_lords_opposition_roles_while_lord.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
test_that("fetch_lords_parliamentary_roles processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"parliamentary_role_mnis_id",
"parliamentary_role_name",
"parliamentary_role_incumbency_start_date",
"parliamentary_role_incumbency_end_date",
"parliamentary_role_unpaid")
obs <- fetch_lords_parliamentary_roles()
exp <- readRDS("data/fetch_lords_parliamentary_roles.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_parliamentary_roles(
from_date = TEST_DATE,
to_date = TEST_DATE)
exp <- readRDS("data/fetch_lords_parliamentary_roles_from_to.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_parliamentary_roles(while_lord = FALSE)
exp <- readRDS("data/fetch_lords_parliamentary_roles_while_lord.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
test_that("fetch_lords_maiden_speeches processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"maiden_speech_house",
"maiden_speech_date",
"maiden_speech_hansard_reference",
"maiden_speech_subject")
obs <- fetch_lords_maiden_speeches()
exp <- readRDS("data/fetch_lords_maiden_speeches.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
obs <- fetch_lords_maiden_speeches(
from_date = TEST_DATE,
to_date = TEST_DATE)
exp <- readRDS("data/fetch_lords_maiden_speeches_from_to.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
test_that("fetch_lords_addresses processes results correctly.", {
with_mock("clmnis::fetch_query_data" = mock_fetch_query_data, {
cols <- c(
"mnis_id",
"given_name",
"family_name",
"display_name",
"address_type_mnis_id",
"address_type",
"address_is_preferred",
"address_is_physical",
"address_note",
"address_1",
"address_2",
"address_3",
"address_4",
"address_5",
"postcode",
"phone",
"fax",
"email",
"address_other")
obs <- fetch_lords_addresses()
exp <- readRDS("data/fetch_lords_addresses.RData")
compare_obs_exp(obs, exp, cols, "mnis_id")
})
})
|
plot.dmZIPt <-
function (x, ...)
{
prob <- x$prob
Xb <- x$X %*% x$beta
lambda <- exp(Xb)
p <- exp(-x$tau * t(Xb))
p <- t(p)
p <- p/(1 + p)
mu <- (1 - p) * lambda
tt <- 1:nrow(mu)
matplot(tt, mu, type = "l", lty = 1, lwd = 2, xlab = "time",
ylab = "muhat(time)")
}
| /R/plot.dmZIPt.R | no_license | cran/crimCV | R | false | false | 310 | r | plot.dmZIPt <-
function (x, ...)
{
prob <- x$prob
Xb <- x$X %*% x$beta
lambda <- exp(Xb)
p <- exp(-x$tau * t(Xb))
p <- t(p)
p <- p/(1 + p)
mu <- (1 - p) * lambda
tt <- 1:nrow(mu)
matplot(tt, mu, type = "l", lty = 1, lwd = 2, xlab = "time",
ylab = "muhat(time)")
}
|
#Multiple-Relation Diffusion Simulation User Script
#Jonathan H. Morgan
#23 December 2020
# Setting Work Directory
setwd("~/Desktop/DNAC/IDEANet/Data_Scripts/MR_EdgeDiffusion")
getwd()
# Options
options(stringsAsFactors = FALSE)
source("~/Desktop/DNAC/IDEANet/Data_Scripts/MR_EdgeDiffusion/MR_DiffusionSimulator_22June2021.R")
##############################
# CELLULAR AUTOMATA DATA #
##############################
# Note: Creating Cellular automata data for demonstration purposes.
# The simulation, in itself, is not limited to this kind of network.
# For other kinds of networks or when doing a SIS model, make cellular_net 0.
# The cellular automata visualization assumes that each node can have at max 4 alters.
# Creating Matrix
cellular_data <- matrix(seq(1, 900, 1), nrow=30, ncol=30)
# Retrieving x,y Coordinates
focus <- seq(1, 900, 1)
coordinates <- vector('list', length(focus))
names(coordinates) <- focus
for (i in seq_along(focus)) {
xy_coords <- which(cellular_data==focus[[i]], arr.ind=TRUE)
xy_coords <- as.data.frame(xy_coords)
colnames(xy_coords) <- c('x', 'y')
coordinates[[i]] <- xy_coords
rm(xy_coords)
}
coordinates <- do.call(rbind, coordinates)
coordinates <- cbind(focus, coordinates)
colnames(coordinates)[[1]] <- c('node')
# Creating Edgelist
edges <- vector('list', length(focus))
for(i in seq_along(edges)) {
node <- coordinates[coordinates$node == focus[[i]], ]
n_coord <- vector('list', 4)
n_coord[[1]] <- c(node[[2]], node[[3]] + 1)
n_coord[[2]] <- c(node[[2]] + 1, node[[3]])
n_coord[[3]] <- c(node[[2]], node[[3]] - 1)
n_coord[[4]] <- c(node[[2]] - 1, node[[3]])
n_coord <- do.call(rbind, n_coord)
n_coord <- n_coord[ n_coord[,1] != 0 & n_coord[,2] != 0, ]
n_coord <- n_coord[ n_coord[,1] <= 30 & n_coord[,2] <= 30, ]
alters <- vector('numeric', nrow(n_coord))
for(j in seq_along(n_coord[,1])) {
row_coord <- n_coord[j,]
alter <- cellular_data[row_coord[[1]], row_coord[[2]]]
alters[[j]] <- alter
rm(row_coord, alter)
}
alters <- cbind(rep(node$node,length(alters)), alters)
colnames(alters)[[1]] <- c('node')
alters <- as.data.frame(alters)
edges[[i]] <- alters
rm(node, alters, n_coord)
}
edges1 <- do.call(rbind, edges)
rm(edges)
# Adding Edge Values (Doing a Simple Value of 1 in this Case)
value <- rep(1, nrow(edges1))
edges1 <- cbind(edges1, value)
colnames(edges1)[c(1,2)] <- c('n1', 'n2')
rm(value)
# Creating Test Data
# Adding Starting Time, Finish Time, and Type to Simulate a Transmission Process
stime <- rep(1, nrow(edges1))
ftime <- round(runif(length(stime), 1, 100), digits=0)
type <- (rbinom(size=c(1), n=length(stime), prob=0.4)) + 1
edges1 <- cbind(edges1, stime, ftime, type)
rm(stime, ftime, type, focus)
#################################
# CREATING INPUT PARAMETERS #
#################################
# GENERATING NODES LIST FROM EDGES FILE
nodes <- unique(c(edges1$n1, edges1$n2))
seq_nodes <- seq(1, length(nodes), 1)
nodes <- cbind(seq_nodes, nodes)
colnames(nodes) <- c('id', 'nodes')
n <- dim(nodes)[[1]]
# SPECIFYING SIMULATION PARAMETERS
ittervec <- c(1, 1) #Relation specific within-step time adjustment
rel_iprob <- c(0.25, 0.25) #Relation specific weights
seed <- c(3, 45) #Starting Seed Nodes
spotprob <- matrix(nrow=n, ncol=1, 0.00) #Vector for spontaneous probability of infection, 0 in this case*/
durmean <- matrix(nrow=n, ncol=1, 10) #Mean infection duration - Nx1 vector
to_change <- sample(which(durmean == 10), 30)
varied_values <- vector('numeric', length(to_change))
for (i in seq_along(varied_values)) {
varied_values[[i]]<- abs(round(rnorm(n=1, mean = 20, sd = 10), digits=0))
}
durmean[to_change] <- varied_values #Adding a bit of variation to the duration times
durlatent <- matrix(nrow=n, ncol=1, 1) #Mean latency length - Nx1 vector
to_change <- sample(which(durlatent == 1), 30)
varied_values <- vector('numeric', length(to_change))
for (i in seq_along(varied_values)) {
varied_values[[i]]<- abs(round(rnorm(n=1, mean = 10, sd = 1), digits=0))
}
durlatent[to_change] <- varied_values #Adding a bit of variation to the latency times
recoverprob <- matrix(nrow=n, ncol=1, .5) #Recovery probability, governing the likelihood of death (not used currently)
sitype <- c("SIR") #Susceptibility process: SI, SIS, SIR: SI, SIS, SIR
randdur <- 0 #Randomize duration vectors, 0=no
printt <- 1 #Iterative steps are printed to the console, 1=yes
R_net <- 1 #Generate R network visualizations
Pajek_net <- 1 #Generate Pajek network objects and macros
cellular_net <- 1 #Generates cellular automata visualization
rm(to_change, varied_values)
##########################
# RUNNING SIMULATION #
##########################
MR_EdgeDiff(edges1, ittervec, rel_iprob, seed, spotprob, durmean, durlatent,
recoverprob, sitype='SIR', randdur, printt = 1, R_net = 1, Pajek_net=0,
cellular_net=1, coordinates)
#########################
# EXAMINING OUTPUTS #
#########################
# Infection History
infection_history
# Growth Curve of the Infection
growth_curve_plot
# Summary Infection Network (Network over all Time Periods)
sna_infection_plot
# If the input network was a lattice, then
lattice_plot
# If R_net is 1, then the following sna network object is generated.
summary(sna_infection_network)
# If R_net is 1, then the following list of cumulative time-slice networks is generated.
sna_timeslice_networks
# If Pajek_net is 1, you will find a Pajek net file,
# Infection_Network.net, of the cumulative infection network.
# If Pajek_net is 1, you will find a .paj file that lists the cumulative time-slice networks,
# with their associated out-degree vectors.
# If Pajek_net is 1, you will find a directory, paj_files, that includes Pajek images for animation.
# INFECTION OVER TIME ANIMATIONS (PAJEK FILES)
# Note: This functionality is similar to the animations generated when the sna package is used to generate the graphs.
# The difference is that Pajek images are used
# This functionality depends on first generating the plots in Pajek by playing Infection_Nets.mcr.
# Infection_Nets.mcr is generated by the simulation if Pajek_net = 1 in the function call.
# Importing Image into a List
paj_dir <- paste0(getwd(),'/','paj_files')
image_list <- list.files(pattern="*.jpg", path=paj_dir, full.names = TRUE)
image_id <- vector('numeric', length(image_list))
for (i in seq_along(image_id)) {
matches <- regmatches(image_list[[i]], gregexpr("[[:digit:]]+", image_list[[i]]))
image_id[[i]] <- as.numeric(unlist(matches))
rm(matches)
}
image_list <- as.data.frame(image_list)
image_list <- cbind(image_id, image_list)
image_list <- image_list[order(image_list$image_id), ]
rm(image_id)
pajek_plots <- vector('list', nrow(image_list))
names(pajek_plots) <- paste('Time', seq(1, length(pajek_plots), 1))
for(i in seq_along(pajek_plots)) {
pajek_plot <- magick::image_read(image_list[i,2])
png("p_1.png", width = 877, height = 676)
par(mar = c(0,0,0,0), family='HersheySerif')
plot(pajek_plot)
title(names(pajek_plots)[[i]], family='HersheySerif', cex.main=1.5, line=-1, adj=0)
dev.off()
g <- magick::image_read('p_1.png')
p_1 <- ggplotify::as.ggplot(g)
file.remove('p_1.png')
pajek_plots[[i]] <- p_1
rm(pajek_plot, p_1, g)
}
# Create HTML Movie showing changes in the infection network over time
animation::saveHTML({
for(i in seq_along(pajek_plots)) {
plot(pajek_plots[[i]], add=T)
}
}, htmlfile = "InfectonNetwork_OverTime.html", autoplay=FALSE, verbose=FALSE,
ani.width = 877, ani.height=676, nmax=length(pajek_plots))
| /Diffusion_Simulations/Multiplex_EdgeDiffusion/MR_Diffusion_UserScript_14Jan2020.R | no_license | ethanbhojani/IDEANet | R | false | false | 8,248 | r | #Multiple-Relation Diffusion Simulation User Script
#Jonathan H. Morgan
#23 December 2020
# Setting Work Directory
setwd("~/Desktop/DNAC/IDEANet/Data_Scripts/MR_EdgeDiffusion")
getwd()
# Options
options(stringsAsFactors = FALSE)
source("~/Desktop/DNAC/IDEANet/Data_Scripts/MR_EdgeDiffusion/MR_DiffusionSimulator_22June2021.R")
##############################
# CELLULAR AUTOMATA DATA #
##############################
# Note: Creating Cellular automata data for demonstration purposes.
# The simulation, in itself, is not limited to this kind of network.
# For other kinds of networks or when doing a SIS model, make cellular_net 0.
# The cellular automata visualization assumes that each node can have at max 4 alters.
# Creating Matrix
cellular_data <- matrix(seq(1, 900, 1), nrow=30, ncol=30)
# Retrieving x,y Coordinates
focus <- seq(1, 900, 1)
coordinates <- vector('list', length(focus))
names(coordinates) <- focus
for (i in seq_along(focus)) {
xy_coords <- which(cellular_data==focus[[i]], arr.ind=TRUE)
xy_coords <- as.data.frame(xy_coords)
colnames(xy_coords) <- c('x', 'y')
coordinates[[i]] <- xy_coords
rm(xy_coords)
}
coordinates <- do.call(rbind, coordinates)
coordinates <- cbind(focus, coordinates)
colnames(coordinates)[[1]] <- c('node')
# Creating Edgelist
edges <- vector('list', length(focus))
for(i in seq_along(edges)) {
node <- coordinates[coordinates$node == focus[[i]], ]
n_coord <- vector('list', 4)
n_coord[[1]] <- c(node[[2]], node[[3]] + 1)
n_coord[[2]] <- c(node[[2]] + 1, node[[3]])
n_coord[[3]] <- c(node[[2]], node[[3]] - 1)
n_coord[[4]] <- c(node[[2]] - 1, node[[3]])
n_coord <- do.call(rbind, n_coord)
n_coord <- n_coord[ n_coord[,1] != 0 & n_coord[,2] != 0, ]
n_coord <- n_coord[ n_coord[,1] <= 30 & n_coord[,2] <= 30, ]
alters <- vector('numeric', nrow(n_coord))
for(j in seq_along(n_coord[,1])) {
row_coord <- n_coord[j,]
alter <- cellular_data[row_coord[[1]], row_coord[[2]]]
alters[[j]] <- alter
rm(row_coord, alter)
}
alters <- cbind(rep(node$node,length(alters)), alters)
colnames(alters)[[1]] <- c('node')
alters <- as.data.frame(alters)
edges[[i]] <- alters
rm(node, alters, n_coord)
}
edges1 <- do.call(rbind, edges)
rm(edges)
# Adding Edge Values (Doing a Simple Value of 1 in this Case)
value <- rep(1, nrow(edges1))
edges1 <- cbind(edges1, value)
colnames(edges1)[c(1,2)] <- c('n1', 'n2')
rm(value)
# Creating Test Data
# Adding Starting Time, Finish Time, and Type to Simulate a Transmission Process
stime <- rep(1, nrow(edges1))
ftime <- round(runif(length(stime), 1, 100), digits=0)
type <- (rbinom(size=c(1), n=length(stime), prob=0.4)) + 1
edges1 <- cbind(edges1, stime, ftime, type)
rm(stime, ftime, type, focus)
#################################
# CREATING INPUT PARAMETERS #
#################################
# GENERATING NODES LIST FROM EDGES FILE
nodes <- unique(c(edges1$n1, edges1$n2))
seq_nodes <- seq(1, length(nodes), 1)
nodes <- cbind(seq_nodes, nodes)
colnames(nodes) <- c('id', 'nodes')
n <- dim(nodes)[[1]]
# SPECIFYING SIMULATION PARAMETERS
ittervec <- c(1, 1) #Relation specific within-step time adjustment
rel_iprob <- c(0.25, 0.25) #Relation specific weights
seed <- c(3, 45) #Starting Seed Nodes
spotprob <- matrix(nrow=n, ncol=1, 0.00) #Vector for spontaneous probability of infection, 0 in this case*/
durmean <- matrix(nrow=n, ncol=1, 10) #Mean infection duration - Nx1 vector
to_change <- sample(which(durmean == 10), 30)
varied_values <- vector('numeric', length(to_change))
for (i in seq_along(varied_values)) {
varied_values[[i]]<- abs(round(rnorm(n=1, mean = 20, sd = 10), digits=0))
}
durmean[to_change] <- varied_values #Adding a bit of variation to the duration times
durlatent <- matrix(nrow=n, ncol=1, 1) #Mean latency length - Nx1 vector
to_change <- sample(which(durlatent == 1), 30)
varied_values <- vector('numeric', length(to_change))
for (i in seq_along(varied_values)) {
varied_values[[i]]<- abs(round(rnorm(n=1, mean = 10, sd = 1), digits=0))
}
durlatent[to_change] <- varied_values #Adding a bit of variation to the latency times
recoverprob <- matrix(nrow=n, ncol=1, .5) #Recovery probability, governing the likelihood of death (not used currently)
sitype <- c("SIR") #Susceptibility process: SI, SIS, SIR: SI, SIS, SIR
randdur <- 0 #Randomize duration vectors, 0=no
printt <- 1 #Iterative steps are printed to the console, 1=yes
R_net <- 1 #Generate R network visualizations
Pajek_net <- 1 #Generate Pajek network objects and macros
cellular_net <- 1 #Generates cellular automata visualization
rm(to_change, varied_values)
##########################
# RUNNING SIMULATION #
##########################
MR_EdgeDiff(edges1, ittervec, rel_iprob, seed, spotprob, durmean, durlatent,
recoverprob, sitype='SIR', randdur, printt = 1, R_net = 1, Pajek_net=0,
cellular_net=1, coordinates)
#########################
# EXAMINING OUTPUTS #
#########################
# Infection History
infection_history
# Growth Curve of the Infection
growth_curve_plot
# Summary Infection Network (Network over all Time Periods)
sna_infection_plot
# If the input network was a lattice, then
lattice_plot
# If R_net is 1, then the following sna network object is generated.
summary(sna_infection_network)
# If R_net is 1, then the following list of cumulative time-slice networks is generated.
sna_timeslice_networks
# If Pajek_net is 1, you will find a Pajek net file,
# Infection_Network.net, of the cumulative infection network.
# If Pajek_net is 1, you will find a .paj file that lists the cumulative time-slice networks,
# with their associated out-degree vectors.
# If Pajek_net is 1, you will find a directory, paj_files, that includes Pajek images for animation.
# INFECTION OVER TIME ANIMATIONS (PAJEK FILES)
# Note: This functionality is similar to the animations generated when the sna package is used to generate the graphs.
# The difference is that Pajek images are used
# This functionality depends on first generating the plots in Pajek by playing Infection_Nets.mcr.
# Infection_Nets.mcr is generated by the simulation if Pajek_net = 1 in the function call.
# Importing Image into a List
paj_dir <- paste0(getwd(),'/','paj_files')
image_list <- list.files(pattern="*.jpg", path=paj_dir, full.names = TRUE)
image_id <- vector('numeric', length(image_list))
for (i in seq_along(image_id)) {
matches <- regmatches(image_list[[i]], gregexpr("[[:digit:]]+", image_list[[i]]))
image_id[[i]] <- as.numeric(unlist(matches))
rm(matches)
}
image_list <- as.data.frame(image_list)
image_list <- cbind(image_id, image_list)
image_list <- image_list[order(image_list$image_id), ]
rm(image_id)
pajek_plots <- vector('list', nrow(image_list))
names(pajek_plots) <- paste('Time', seq(1, length(pajek_plots), 1))
for(i in seq_along(pajek_plots)) {
pajek_plot <- magick::image_read(image_list[i,2])
png("p_1.png", width = 877, height = 676)
par(mar = c(0,0,0,0), family='HersheySerif')
plot(pajek_plot)
title(names(pajek_plots)[[i]], family='HersheySerif', cex.main=1.5, line=-1, adj=0)
dev.off()
g <- magick::image_read('p_1.png')
p_1 <- ggplotify::as.ggplot(g)
file.remove('p_1.png')
pajek_plots[[i]] <- p_1
rm(pajek_plot, p_1, g)
}
# Create HTML Movie showing changes in the infection network over time
animation::saveHTML({
for(i in seq_along(pajek_plots)) {
plot(pajek_plots[[i]], add=T)
}
}, htmlfile = "InfectonNetwork_OverTime.html", autoplay=FALSE, verbose=FALSE,
ani.width = 877, ani.height=676, nmax=length(pajek_plots))
|
## IGFS survey indices
igfs.SI1 <- mfdb_sample_count(mdb, c('length'), c(list(
sampling_type = 'IGFS',
length = mfdb_interval("len", c(20,50))),
defaults))
igfs.SI2 <- mfdb_sample_count(mdb, c('length'), c(list(
sampling_type = 'IGFS',
length = mfdb_interval("len", c(50,60,70))),
defaults))
igfs.SI3 <- mfdb_sample_count(mdb, c( 'length'), c(list(
sampling_type = 'IGFS',
length = mfdb_interval("len", c(70,80,90,100,140))),
defaults))
gadget_dir_write(gd, gadget_likelihood_component("surveyindices",
name = "si.gp1",
weight = 1,
data = igfs.SI1[[1]],
fittype = 'fixedslopeloglinearfit',
slope=1,
stocknames = c("pokimm","pokmat")))
gadget_dir_write(gd, gadget_likelihood_component("surveyindices",
name = "si.gp2",
weight = 1,
data = igfs.SI2[[1]],
fittype = 'fixedslopeloglinearfit',
slope=1,
stocknames = c("pokimm","pokmat")))
gadget_dir_write(gd, gadget_likelihood_component("surveyindices",
name = "si.gp3",
weight = 1,
data = igfs.SI3[[1]],
fittype = 'fixedslopeloglinearfit',
slope=1,
stocknames = c("pokimm","pokmat")))
| /03-saithe/00-data/setup-indices.R | no_license | bthe/gadget-models | R | false | false | 1,941 | r | ## IGFS survey indices
igfs.SI1 <- mfdb_sample_count(mdb, c('length'), c(list(
sampling_type = 'IGFS',
length = mfdb_interval("len", c(20,50))),
defaults))
igfs.SI2 <- mfdb_sample_count(mdb, c('length'), c(list(
sampling_type = 'IGFS',
length = mfdb_interval("len", c(50,60,70))),
defaults))
igfs.SI3 <- mfdb_sample_count(mdb, c( 'length'), c(list(
sampling_type = 'IGFS',
length = mfdb_interval("len", c(70,80,90,100,140))),
defaults))
gadget_dir_write(gd, gadget_likelihood_component("surveyindices",
name = "si.gp1",
weight = 1,
data = igfs.SI1[[1]],
fittype = 'fixedslopeloglinearfit',
slope=1,
stocknames = c("pokimm","pokmat")))
gadget_dir_write(gd, gadget_likelihood_component("surveyindices",
name = "si.gp2",
weight = 1,
data = igfs.SI2[[1]],
fittype = 'fixedslopeloglinearfit',
slope=1,
stocknames = c("pokimm","pokmat")))
gadget_dir_write(gd, gadget_likelihood_component("surveyindices",
name = "si.gp3",
weight = 1,
data = igfs.SI3[[1]],
fittype = 'fixedslopeloglinearfit',
slope=1,
stocknames = c("pokimm","pokmat")))
|
#! /usr/bin/Rscript
########################################################################################
library(raster)
library(ncdf)
library(rgdal)
library(maptools)
library(rAedesSim)
setwd("/home/XXXXX/procedure")
###########################################################################################
# Meteo & Geo
list_meteo_full=readRDS("simulation/list_meteo_last.rds")
redlav_point_weather=readRDS("vectors/redlav_point_weather.rds")
population_ini=readRDS("vectors/population_ini.rds")
parameters_ini=readRDS("vectors/parameters_ini.rds")
parameter_table=readRDS("vectors/parameter_table.rds")
###########################################################################################
# Water Models
data(trappola_wmodel)
###########################################################################################
# Water Models
list_obj_meteo=list()
list_obj_meteodata=list()
list_obj_biocontainer=list()
list_obj_biometeo=list()
list_obj_biopopulation=list()
list_obj_simulazione=list()
names_points=names(list_meteo_full$tmax)
date_meteo=as.Date(as.numeric(rownames(list_meteo_full$tmax)),origin="1970-01-01")
#############################################################################################
if( is.na(Sys.timezone())) {Sys.setenv(TZ='GMT') }
#############################################################################################
for (i in 1:length(names_points))
{
list_obj_meteo[[i]]=data.frame(dates=date_meteo,
tmed=list_meteo_full$tmed[,i],
tmax=list_meteo_full$tmax[,i],
tmin=list_meteo_full$tmin[,i],
rhum=list_meteo_full$urel[,i],
prec=list_meteo_full$prec[,i]*1000
);
list_obj_meteodata[[i]]=meteodata(station_name=names_points[i],
network="WRF_model",
data_type="Simulation",
standard="rAedesSim",
data_provider="IBIMET CNR-LaMMA",
data_maintainer="",
data_licence="",
date_format="YMD",
lat=coordinates(redlav_point_weather)[i,2],
lon=coordinates(redlav_point_weather)[i,1],
elevation=redlav_point_weather$topo_w[i],
timeformat="daily",
sourcedata=list_obj_meteo[[i]]
)
list_obj_biocontainer[[i]]=biocontainer(nrecipients=50,
watermodel=trappola_wmodel,
model_type="lin",
lat=coordinates(redlav_point_weather)[i,2],
lon=coordinates(redlav_point_weather)[i,1],
elevation=redlav_point_weather$topo_w[i]
)
list_obj_biometeo[[i]]=biometeo(list_obj_meteodata[[i]],
list_obj_biocontainer[[i]]
)
list_obj_biopopulation[[i]]=biopopulation(eggs=100,larvae=0,pupae=0,adults=0,eggs_diap=10)
}
#############################################################################################
res_list_simulation=list()
for (i in 1:length(names_points))
{ res_list_sim=list()
for (j in 1:15) {
list_obj_simulazione[[i]]=biomodel(list_obj_biometeo[[i]],
list_obj_biocontainer[[i]],
list_obj_biopopulation[[i]],
bioparameters(alfa_l=parameters_ini@data[i,j*2+1],
alfa_a=parameters_ini@data[i,j*2],
l_density=40)
)
res_list_sim[[j]]=apply.weekly(list_obj_simulazione[[i]]$ts_population,mean)
}
res_list_simulation[[i]]=res_list_sim;
}
saveRDS(list_obj_meteo,"simulation/list_meteo_obj_last.rds")
saveRDS(res_list_simulation,"simulation/res_list_simulation_last.rds")
saveRDS(list_obj_meteo,paste0("simulation/list_meteo_obj_",Sys.Date(),".rds"))
saveRDS(res_list_simulation,paste0("simulation/res_list_simulation_",Sys.Date(),".rds"))
#############################################################################################
| /proc_B_redlav.r | no_license | alfcrisci/redlav_server_chain | R | false | false | 4,791 | r | #! /usr/bin/Rscript
########################################################################################
library(raster)
library(ncdf)
library(rgdal)
library(maptools)
library(rAedesSim)
setwd("/home/XXXXX/procedure")
###########################################################################################
# Meteo & Geo
list_meteo_full=readRDS("simulation/list_meteo_last.rds")
redlav_point_weather=readRDS("vectors/redlav_point_weather.rds")
population_ini=readRDS("vectors/population_ini.rds")
parameters_ini=readRDS("vectors/parameters_ini.rds")
parameter_table=readRDS("vectors/parameter_table.rds")
###########################################################################################
# Water Models
data(trappola_wmodel)
###########################################################################################
# Water Models
list_obj_meteo=list()
list_obj_meteodata=list()
list_obj_biocontainer=list()
list_obj_biometeo=list()
list_obj_biopopulation=list()
list_obj_simulazione=list()
names_points=names(list_meteo_full$tmax)
date_meteo=as.Date(as.numeric(rownames(list_meteo_full$tmax)),origin="1970-01-01")
#############################################################################################
if( is.na(Sys.timezone())) {Sys.setenv(TZ='GMT') }
#############################################################################################
for (i in 1:length(names_points))
{
list_obj_meteo[[i]]=data.frame(dates=date_meteo,
tmed=list_meteo_full$tmed[,i],
tmax=list_meteo_full$tmax[,i],
tmin=list_meteo_full$tmin[,i],
rhum=list_meteo_full$urel[,i],
prec=list_meteo_full$prec[,i]*1000
);
list_obj_meteodata[[i]]=meteodata(station_name=names_points[i],
network="WRF_model",
data_type="Simulation",
standard="rAedesSim",
data_provider="IBIMET CNR-LaMMA",
data_maintainer="",
data_licence="",
date_format="YMD",
lat=coordinates(redlav_point_weather)[i,2],
lon=coordinates(redlav_point_weather)[i,1],
elevation=redlav_point_weather$topo_w[i],
timeformat="daily",
sourcedata=list_obj_meteo[[i]]
)
list_obj_biocontainer[[i]]=biocontainer(nrecipients=50,
watermodel=trappola_wmodel,
model_type="lin",
lat=coordinates(redlav_point_weather)[i,2],
lon=coordinates(redlav_point_weather)[i,1],
elevation=redlav_point_weather$topo_w[i]
)
list_obj_biometeo[[i]]=biometeo(list_obj_meteodata[[i]],
list_obj_biocontainer[[i]]
)
list_obj_biopopulation[[i]]=biopopulation(eggs=100,larvae=0,pupae=0,adults=0,eggs_diap=10)
}
#############################################################################################
res_list_simulation=list()
for (i in 1:length(names_points))
{ res_list_sim=list()
for (j in 1:15) {
list_obj_simulazione[[i]]=biomodel(list_obj_biometeo[[i]],
list_obj_biocontainer[[i]],
list_obj_biopopulation[[i]],
bioparameters(alfa_l=parameters_ini@data[i,j*2+1],
alfa_a=parameters_ini@data[i,j*2],
l_density=40)
)
res_list_sim[[j]]=apply.weekly(list_obj_simulazione[[i]]$ts_population,mean)
}
res_list_simulation[[i]]=res_list_sim;
}
saveRDS(list_obj_meteo,"simulation/list_meteo_obj_last.rds")
saveRDS(res_list_simulation,"simulation/res_list_simulation_last.rds")
saveRDS(list_obj_meteo,paste0("simulation/list_meteo_obj_",Sys.Date(),".rds"))
saveRDS(res_list_simulation,paste0("simulation/res_list_simulation_",Sys.Date(),".rds"))
#############################################################################################
|
library(suncalc)
library(lubridate)
getSunlightTimes(date = today() + 1,
tz = Sys.timezone(),
lon = 4.8945, lat = 52.3667,
keep = c("sunrise","sunset"))
#-- Impute missing values
set.seed(2)
data <- sample(c(1:20, rep(NA,6)), 10)
# Use the mean
# data[is.na(data)] <- mean(data, na.rm = TRUE)
# Use some fixed value
# data[is.na(data)] <- 0
# Use the last value
# unless it is the first, keep that NA
# ii <- which(is.na(data))
# ii <- setdiff(ii, 1)
# data[ii] <- data[ii - 1]
# linear interpolation
ii <- which(is.na(data))
data_new <- approx(1:length(data), data, xout = ii)
data[ii] <- data_new$y
| /R/functions_stubs.R | no_license | datapelikaan/marineresearch | R | false | false | 667 | r |
library(suncalc)
library(lubridate)
getSunlightTimes(date = today() + 1,
tz = Sys.timezone(),
lon = 4.8945, lat = 52.3667,
keep = c("sunrise","sunset"))
#-- Impute missing values
set.seed(2)
data <- sample(c(1:20, rep(NA,6)), 10)
# Use the mean
# data[is.na(data)] <- mean(data, na.rm = TRUE)
# Use some fixed value
# data[is.na(data)] <- 0
# Use the last value
# unless it is the first, keep that NA
# ii <- which(is.na(data))
# ii <- setdiff(ii, 1)
# data[ii] <- data[ii - 1]
# linear interpolation
ii <- which(is.na(data))
data_new <- approx(1:length(data), data, xout = ii)
data[ii] <- data_new$y
|
parse_floats <-
function( data, N, number_of_elements ) {
stopifnot( length( data ) == N*4*number_of_elements ) ;
result = readBin( data, what = "numeric", size = 4, n = N*number_of_elements )
result[ which( result == -1 )] = NA
return( result )
}
| /R/hapdb/R/parse_floats.R | permissive | gavinband/qctool | R | false | false | 264 | r | parse_floats <-
function( data, N, number_of_elements ) {
stopifnot( length( data ) == N*4*number_of_elements ) ;
result = readBin( data, what = "numeric", size = 4, n = N*number_of_elements )
result[ which( result == -1 )] = NA
return( result )
}
|
library(ggplot2)
library(dplyr)
library(scales)
###################################################################################################################################
# ICU BED VS CASES - ITALY
ICU_beds <- readxl::read_xls("italy_ICUbeds.xls")
# HYPOTHESES 1 - beds = ln(CASES * B) + C
# START IS A RESULT OF FITTING A LINEAR MODEL TO EXP(CASES) = A * CASES + B
model.0 <- lm(exp(beds/1000) ~ cases,
data = ICU_beds)
ICU_beds.i <- ICU_beds[1:(nrow(ICU_beds)-4),]
start <- list(a = coef(model.0)[1], b=coef(model.0)[2])
# HYPOTHESES 2 - beds = q * CASES ^ r
out <- nls(beds ~ (q * cases) ^ r,
data = ICU_beds,
start = list(q = 0.001, r = .5),
control = list(maxiter = 5000)
)
# Hypothesis 3 - combination of linear model and q * cases ^ r
ggplot() +
geom_point(data=ICU_beds, aes(x=cases, y=beds, colour = "blue")) +
geom_point(data=ICU_beds, aes(x=cases, y= 1000 * log(start$b * cases + start$a), colour = "orange")) +
geom_point(data=ICU_beds, aes(x=cases, y= sapply(cases, ICU.func), colour = "green")) +
scale_color_identity(name = "Model fit",
breaks = c("blue", "orange", "green"),
labels = c("Beds vs Cases", "Beds = A * ln(B * Cases + C)", "Beds = A * Cases ^ B"),
guide = "legend") +
scale_y_continuous(limits = c(0, NA), expand = c(0,0)) +
ggtitle("Cases vs Beds Assesments")
#####################################################################################################################
# CASE GROWTH ESTIMATES
cases_world <- readxl::read_xls("world_data.xls")
cases_world$Date <- cases_world$Date %>% as.Date
# SET 1
toMatch <- paste(sep = "|", "United Kingdom","Germany","France","Ireland","Spain","Italy")
cases_set1 <- cases_world$Entity %>% {grep(toMatch,.)} %>% cases_world[.,]
# population per country in 100000
pop_set1 <- list(UK = c(664.4,"United Kingdom"),
DE = c(827.9,"Germany"),
FR = c(669.9,"France"),
ES = c(446.6,"Spain"),
IT = c(604.8,"Italy"),
IE = c(48.3, "Ireland")
)
colnames(cases_set1)[4] <- "cases"
ggplot() +
geom_point(data=cases_set1$Entity %in% (pop_set1["UK"] %>% unlist())[2] %>% cases_set1[.,],
aes(x=Date,
y=cases/(pop_set1["UK"] %>% unlist())[1] %>% as.numeric,
colour = "blue")
) +
geom_point(data=cases_set1$Entity %in% (pop_set1["IT"] %>% unlist())[2] %>% cases_set1[.,],
aes(x=Date,
y=cases/(pop_set1["IT"] %>% unlist())[1] %>% as.numeric,
colour = "green")
) +
geom_point(data=cases_set1$Entity %in% (pop_set1["IE"] %>% unlist())[2] %>% cases_set1[.,],
aes(x=Date,
y=cases/(pop_set1["IE"] %>% unlist())[1] %>% as.numeric,
colour = "orange")
) +
geom_point(data=cases_set1$Entity %in% (pop_set1["ES"] %>% unlist())[2] %>% cases_set1[.,],
aes(x=Date,
y=cases/(pop_set1["ES"] %>% unlist())[1] %>% as.numeric,
colour = "yellow")
) +
geom_point(data=cases_set1$Entity %in% (pop_set1["FR"] %>% unlist())[2] %>% cases_set1[.,],
aes(x=Date,
y=cases/(pop_set1["FR"] %>% unlist())[1] %>% as.numeric,
colour = "red")
) +
geom_point(data=cases_set1$Entity %in% (pop_set1["DE"] %>% unlist())[2] %>% cases_set1[.,],
aes(x=Date,
y=cases/(pop_set1["DE"] %>% unlist())[1] %>% as.numeric,
colour = "black")
) +
scale_color_identity(name = "Ctry",
breaks = c("blue","green","orange","yellow","red","black"),
labels = c("UK","IT","IE","ES","FR","DE"),
guide = "legend") +
scale_x_date(date_breaks = "1 week",
labels=date_format("%b-%Y"),
limits = as.Date(c('2020-02-20',Sys.Date()))) +
labs(y= "date", x = "cases per 100000") +
ggtitle("Ctry cases history per 100000 people")
# normalise data i.e. italy had corona virus before anyone else, move everyone left to match italys start data
IT <- cases_set1$Entity %in% (pop_set1[c("IT")] %>% unlist())[2] %>% cases_set1$cases[.] %>% {which(. > 0)}
UK <- cases_set1$Entity %in% (pop_set1[c("UK")] %>% unlist())[2] %>% cases_set1$cases[.] %>% {which(. > 0)}
DE <- cases_set1$Entity %in% (pop_set1[c("DE")] %>% unlist())[2] %>% cases_set1$cases[.] %>% {which(. > 0)}
ES <- cases_set1$Entity %in% (pop_set1[c("ES")] %>% unlist())[2] %>% cases_set1$cases[.] %>% {which(. > 0)}
IE <- cases_set1$Entity %in% (pop_set1[c("IE")] %>% unlist())[2] %>% cases_set1$cases[.] %>% {which(. > 0)}
FR <- cases_set1$Entity %in% (pop_set1[c("FR")] %>% unlist())[2] %>% cases_set1$cases[.] %>% {which(. > 0)}
UK_set <- cases_set1$Entity %in% (pop_set1["UK"] %>% unlist())[2] %>% cases_set1[.,]
UK_set <- UK_set[UK,]
UK_set <- cbind(UK_set,1:length(UK))
colnames(UK_set)[5] <- "days"
IT_set <- cases_set1$Entity %in% (pop_set1["IT"] %>% unlist())[2] %>% cases_set1[.,]
IT_set <- IT_set[IT,]
IT_set <- cbind(IT_set,1:length(IT))
colnames(IT_set)[5] <- "days"
IE_set <- cases_set1$Entity %in% (pop_set1["IE"] %>% unlist())[2] %>% cases_set1[.,]
IE_set <- IE_set[IE,]
IE_set <- cbind(IE_set,1:length(IE))
colnames(IE_set)[5] <- "days"
ES_set <- cases_set1$Entity %in% (pop_set1["ES"] %>% unlist())[2] %>% cases_set1[.,]
ES_set <- ES_set[ES,]
ES_set <- cbind(ES_set,1:length(ES))
colnames(ES_set)[5] <- "days"
DE_set <- cases_set1$Entity %in% (pop_set1["DE"] %>% unlist())[2] %>% cases_set1[.,]
DE_set <- DE_set[DE,]
DE_set <- cbind(DE_set,1:length(DE))
colnames(DE_set)[5] <- "days"
FR_set <- cases_set1$Entity %in% (pop_set1["FR"] %>% unlist())[2] %>% cases_set1[.,]
FR_set <- FR_set[FR,]
FR_set <- cbind(FR_set,1:length(FR))
colnames(FR_set)[5] <- "days"
ggplot() +
geom_point(data=UK_set, aes(x=days, y=cases, colour = "blue")) +
geom_point(data=IT_set, aes(x=days, y=cases, colour = "green")) +
geom_point(data=IE_set, aes(x=days, y=cases, colour = "orange")) +
geom_point(data=ES_set, aes(x=days, y=cases, colour = "yellow")) +
geom_point(data=FR_set, aes(x=days, y=cases, colour = "red")) +
geom_point(data=DE_set, aes(x=days, y=cases, colour = "black")) +
scale_color_identity(name = "Ctry",
breaks = c("blue","green","orange","yellow","red","black"),
labels = c("UK","IT","IE","ES","FR","DE"),
guide = "legend") +
labs(y= "cases per 100000", x = "Days from CORVID 19 first reported") +
ggtitle("Progress of CORVID 19 since first reported cases")
ggplot() +
geom_point(data=UK_set[UK_set_start:length(UK),], aes(x=days, y=sqrt(cases), colour = "blue")) +
geom_point(data=IT_set[IT_set_start:length(IT),], aes(x=days, y=sqrt(cases), colour = "green")) +
geom_point(data=IE_set[IE_set_start:length(IE),], aes(x=days, y=sqrt(cases), colour = "orange")) +
geom_point(data=ES_set[ES_set_start:length(ES),], aes(x=days, y=sqrt(cases), colour = "yellow")) +
geom_point(data=FR_set[FR_set_start:length(FR),], aes(x=days, y=sqrt(cases), colour = "red")) +
geom_point(data=DE_set[DE_set_start:length(DE),], aes(x=days, y=sqrt(cases), colour = "black")) +
scale_color_identity(name = "Ctry",
breaks = c("blue","green","orange","yellow","red","black"),
labels = c("UK","IT","IE","ES","FR","DE"),
guide = "legend") +
labs(y= "sqrt(cases per 100000)", x = "Days from CORVID 19 first reported") +
ggtitle("Progress of CORVID 19 since first reported cases")
# take data from when cases are tested widly. This will be taken as when the data stands trending upwards on the log model
UK_set_start <- 28
IT_set_start <- 22
DE_set_start <- 29
ES_set_start <- 24
FR_set_start <- 32
IE_set_start <- 1
set.seed(20)
####################################################################################################################################
####################################################################################################################################
# UK MODELLING
# prep data for Hypothesis 1 and 2
UK_set <- cbind(UK_set,UK_set$cases/as.numeric(pop_set1$UK[1]))
UK_set <- cbind(UK_set,log(UK_set[,6]))
UK_set <- cbind(UK_set,log(UK_set$days))
colnames(UK_set)[6] <- "cases_prop"
colnames(UK_set)[7] <- "log_cases_prop"
colnames(UK_set)[8] <- "log_days"
UK_set.i <- UK_set[UK_set_start:length(UK),]
#HYPOTHESIS 1
# c is cases per 100000
# c = A * days ^ n
# ln(c) = ln(A) + n*ln(d)
UK_model <- lm(formula = log_cases_prop ~ log_days,
data = UK_set.i)
# plot ln(c) = ln(A) + n*ln(d)
ggplot(data = UK_set.i) +
geom_point(aes(x = log_days,y=log_cases_prop, color = "blue")) +
geom_point(aes(x = log_days,y= log_days * coef(UK_model)[2] + coef(UK_model)[1], color = "purple"), shape = 3) +
scale_color_identity(name = "Ctry",
breaks = c("blue","purple"),
labels = c("UK","estimate"),
guide = "legend")
#HYPOTHESIS 2
# c = A * days ^ n + B
start_UK <- list(
A_UK = exp(coef(UK_model)[1]),
n_UK = coef(UK_model)[2],
B = 0
)
UK_model2 <- nls(formula = cases_prop ~ A_UK * days ^ n_UK + B,
data = UK_set.i,
start = start_UK,
control = list(maxiter = 500))
summary(UK_model2)
UK_coefs <- coef(UK_model2)
# plot c = A * days ^ n + b
ggplot(data = UK_set.i) +
geom_point(aes(x = days,y=cases_prop, color = "blue")) +
geom_point(aes(x = days,y=sapply(days, uk.func1), color = "purple"), shape = 3) +
scale_color_identity(name = "Ctry",
breaks = c("blue","purple"),
labels = c("UK","estimate"),
guide = "legend")
####################################################################################################################################
####################################################################################################################################
# ES MODELLING
ES_set <- cbind(ES_set,ES_set$cases/as.numeric(pop_set1$ES[1]))
ES_set <- cbind(ES_set,log(ES_set[,6]))
ES_set <- cbind(ES_set,log(ES_set$days))
colnames(ES_set)[6] <- "cases_prop"
colnames(ES_set)[7] <- "log_cases_prop"
colnames(ES_set)[8] <- "log_days"
ES_set.i <- ES_set[ES_set_start:length(ES),]
#HYPOTHESIS 1
# c is cases per 100000
# c = A * days ^ n
# ln(c) = ln(A) + n*ln(d)
ES_model <- lm(formula = log_cases_prop ~ log_days,
data = ES_set.i)
# plot ln(c) = ln(A) + n*ln(d)
ggplot(data = ES_set.i) +
geom_point(aes(x = log_days,y=log_cases_prop, color = "yellow")) +
geom_point(aes(x = log_days,y=corr_n * log_days * coef(ES_model)[2] + corr_A * coef(ES_model)[1], color = "purple"), shape = 3) +
scale_color_identity(name = "Ctry",
breaks = c("yellow","purple"),
labels = c("spain","estimate"),
guide = "legend")
#HYPOTHESIS 2
# c = A * days ^ n + B
corr_n <- 1#.975
corr_A <- 1#.995
start_es <- list(
A_es = exp(coef(ES_model)[1] * corr_A),
n_es = coef(ES_model)[2] * corr_n,
B = 0
)
ES_model2 <- nls(formula = cases_prop ~ A_es * days ^ n_es + B,
data = ES_set.i,
start = start_es,
control = list(maxiter = 500))
ES_coefs <- coef(ES_model2)
# plot c = A * days ^ n + B
ggplot(data = ES_set.i) +
geom_point(aes(x = days,y=cases_prop, color = "yellow")) +
geom_point(aes(x = days,y=sapply(ES_set.i$days, es.func1), color = "purple"), shape = 3) +
scale_color_identity(name = "Ctry",
breaks = c("yellow","purple"),
labels = c("spain","estimate"),
guide = "legend")
####################################################################################################################################
####################################################################################################################################
# FUNCTIONS
# ICU Beds per cases
ICU.func <- function(cases){
if (cases < 69176) {
round((1.702 * cases)^0.6953,digits = 0)
} else if (cases >= 69176){
m <- (3732-3396)/(86498 - 69176)
b <- 3356 - 69176*m
round(m * cases + b,digits = 0)
}
}
# SPANISH DATA
# enter days, returns number of cases
es.func1 <- function(days) 0.00000000000003319 * days ^ 8.91 - 1.104
# enter cases, returns which day you should be on
es.func2 <- function(cases) ((cases + 1.104)/0.00000000000003319) ^(1/8.91)
# UK DATA
# enter days, returns number of cases
uk.func1 <- function(days) 0.00000000000000002052 * days ^ 10.22 + 0.003966
# enter cases, returns which day you should be on
uk.func2 <- function(cases) ((cases - 0.003966)/0.00000000000000002052)^(1/10.22)
# EXAMPLES
# lets say we have 50 cases per 100000, I want to know how many cases we will have in 5 days time
es.func2(50) %>% {es.func1(. + 5)} %>% round(.,digits = 0)
uk.func2(50) %>% {uk.func1(. + 5)} %>% round(.,digits = 0)
# now I want to know how many ICU beds I will need in 5 days time
es.func2(50) %>% {es.func1(. + 5)} %>% ICU.func(.)
uk.func2(50) %>% {uk.func1(. + 5)} %>% ICU.func(.)
# and now I want to predict how many ICU beds I will need
| /app/pred/ICU_analysis.R | no_license | pm9602/p153_2 | R | false | false | 14,581 | r |
library(ggplot2)
library(dplyr)
library(scales)
###################################################################################################################################
# ICU BED VS CASES - ITALY
ICU_beds <- readxl::read_xls("italy_ICUbeds.xls")
# HYPOTHESES 1 - beds = ln(CASES * B) + C
# START IS A RESULT OF FITTING A LINEAR MODEL TO EXP(CASES) = A * CASES + B
model.0 <- lm(exp(beds/1000) ~ cases,
data = ICU_beds)
ICU_beds.i <- ICU_beds[1:(nrow(ICU_beds)-4),]
start <- list(a = coef(model.0)[1], b=coef(model.0)[2])
# HYPOTHESES 2 - beds = q * CASES ^ r
out <- nls(beds ~ (q * cases) ^ r,
data = ICU_beds,
start = list(q = 0.001, r = .5),
control = list(maxiter = 5000)
)
# Hypothesis 3 - combination of linear model and q * cases ^ r
ggplot() +
geom_point(data=ICU_beds, aes(x=cases, y=beds, colour = "blue")) +
geom_point(data=ICU_beds, aes(x=cases, y= 1000 * log(start$b * cases + start$a), colour = "orange")) +
geom_point(data=ICU_beds, aes(x=cases, y= sapply(cases, ICU.func), colour = "green")) +
scale_color_identity(name = "Model fit",
breaks = c("blue", "orange", "green"),
labels = c("Beds vs Cases", "Beds = A * ln(B * Cases + C)", "Beds = A * Cases ^ B"),
guide = "legend") +
scale_y_continuous(limits = c(0, NA), expand = c(0,0)) +
ggtitle("Cases vs Beds Assesments")
#####################################################################################################################
# CASE GROWTH ESTIMATES
cases_world <- readxl::read_xls("world_data.xls")
cases_world$Date <- cases_world$Date %>% as.Date
# SET 1
toMatch <- paste(sep = "|", "United Kingdom","Germany","France","Ireland","Spain","Italy")
cases_set1 <- cases_world$Entity %>% {grep(toMatch,.)} %>% cases_world[.,]
# population per country in 100000
pop_set1 <- list(UK = c(664.4,"United Kingdom"),
DE = c(827.9,"Germany"),
FR = c(669.9,"France"),
ES = c(446.6,"Spain"),
IT = c(604.8,"Italy"),
IE = c(48.3, "Ireland")
)
colnames(cases_set1)[4] <- "cases"
ggplot() +
geom_point(data=cases_set1$Entity %in% (pop_set1["UK"] %>% unlist())[2] %>% cases_set1[.,],
aes(x=Date,
y=cases/(pop_set1["UK"] %>% unlist())[1] %>% as.numeric,
colour = "blue")
) +
geom_point(data=cases_set1$Entity %in% (pop_set1["IT"] %>% unlist())[2] %>% cases_set1[.,],
aes(x=Date,
y=cases/(pop_set1["IT"] %>% unlist())[1] %>% as.numeric,
colour = "green")
) +
geom_point(data=cases_set1$Entity %in% (pop_set1["IE"] %>% unlist())[2] %>% cases_set1[.,],
aes(x=Date,
y=cases/(pop_set1["IE"] %>% unlist())[1] %>% as.numeric,
colour = "orange")
) +
geom_point(data=cases_set1$Entity %in% (pop_set1["ES"] %>% unlist())[2] %>% cases_set1[.,],
aes(x=Date,
y=cases/(pop_set1["ES"] %>% unlist())[1] %>% as.numeric,
colour = "yellow")
) +
geom_point(data=cases_set1$Entity %in% (pop_set1["FR"] %>% unlist())[2] %>% cases_set1[.,],
aes(x=Date,
y=cases/(pop_set1["FR"] %>% unlist())[1] %>% as.numeric,
colour = "red")
) +
geom_point(data=cases_set1$Entity %in% (pop_set1["DE"] %>% unlist())[2] %>% cases_set1[.,],
aes(x=Date,
y=cases/(pop_set1["DE"] %>% unlist())[1] %>% as.numeric,
colour = "black")
) +
scale_color_identity(name = "Ctry",
breaks = c("blue","green","orange","yellow","red","black"),
labels = c("UK","IT","IE","ES","FR","DE"),
guide = "legend") +
scale_x_date(date_breaks = "1 week",
labels=date_format("%b-%Y"),
limits = as.Date(c('2020-02-20',Sys.Date()))) +
labs(y= "date", x = "cases per 100000") +
ggtitle("Ctry cases history per 100000 people")
# normalise data i.e. italy had corona virus before anyone else, move everyone left to match italys start data
IT <- cases_set1$Entity %in% (pop_set1[c("IT")] %>% unlist())[2] %>% cases_set1$cases[.] %>% {which(. > 0)}
UK <- cases_set1$Entity %in% (pop_set1[c("UK")] %>% unlist())[2] %>% cases_set1$cases[.] %>% {which(. > 0)}
DE <- cases_set1$Entity %in% (pop_set1[c("DE")] %>% unlist())[2] %>% cases_set1$cases[.] %>% {which(. > 0)}
ES <- cases_set1$Entity %in% (pop_set1[c("ES")] %>% unlist())[2] %>% cases_set1$cases[.] %>% {which(. > 0)}
IE <- cases_set1$Entity %in% (pop_set1[c("IE")] %>% unlist())[2] %>% cases_set1$cases[.] %>% {which(. > 0)}
FR <- cases_set1$Entity %in% (pop_set1[c("FR")] %>% unlist())[2] %>% cases_set1$cases[.] %>% {which(. > 0)}
UK_set <- cases_set1$Entity %in% (pop_set1["UK"] %>% unlist())[2] %>% cases_set1[.,]
UK_set <- UK_set[UK,]
UK_set <- cbind(UK_set,1:length(UK))
colnames(UK_set)[5] <- "days"
IT_set <- cases_set1$Entity %in% (pop_set1["IT"] %>% unlist())[2] %>% cases_set1[.,]
IT_set <- IT_set[IT,]
IT_set <- cbind(IT_set,1:length(IT))
colnames(IT_set)[5] <- "days"
IE_set <- cases_set1$Entity %in% (pop_set1["IE"] %>% unlist())[2] %>% cases_set1[.,]
IE_set <- IE_set[IE,]
IE_set <- cbind(IE_set,1:length(IE))
colnames(IE_set)[5] <- "days"
ES_set <- cases_set1$Entity %in% (pop_set1["ES"] %>% unlist())[2] %>% cases_set1[.,]
ES_set <- ES_set[ES,]
ES_set <- cbind(ES_set,1:length(ES))
colnames(ES_set)[5] <- "days"
DE_set <- cases_set1$Entity %in% (pop_set1["DE"] %>% unlist())[2] %>% cases_set1[.,]
DE_set <- DE_set[DE,]
DE_set <- cbind(DE_set,1:length(DE))
colnames(DE_set)[5] <- "days"
FR_set <- cases_set1$Entity %in% (pop_set1["FR"] %>% unlist())[2] %>% cases_set1[.,]
FR_set <- FR_set[FR,]
FR_set <- cbind(FR_set,1:length(FR))
colnames(FR_set)[5] <- "days"
ggplot() +
geom_point(data=UK_set, aes(x=days, y=cases, colour = "blue")) +
geom_point(data=IT_set, aes(x=days, y=cases, colour = "green")) +
geom_point(data=IE_set, aes(x=days, y=cases, colour = "orange")) +
geom_point(data=ES_set, aes(x=days, y=cases, colour = "yellow")) +
geom_point(data=FR_set, aes(x=days, y=cases, colour = "red")) +
geom_point(data=DE_set, aes(x=days, y=cases, colour = "black")) +
scale_color_identity(name = "Ctry",
breaks = c("blue","green","orange","yellow","red","black"),
labels = c("UK","IT","IE","ES","FR","DE"),
guide = "legend") +
labs(y= "cases per 100000", x = "Days from CORVID 19 first reported") +
ggtitle("Progress of CORVID 19 since first reported cases")
ggplot() +
geom_point(data=UK_set[UK_set_start:length(UK),], aes(x=days, y=sqrt(cases), colour = "blue")) +
geom_point(data=IT_set[IT_set_start:length(IT),], aes(x=days, y=sqrt(cases), colour = "green")) +
geom_point(data=IE_set[IE_set_start:length(IE),], aes(x=days, y=sqrt(cases), colour = "orange")) +
geom_point(data=ES_set[ES_set_start:length(ES),], aes(x=days, y=sqrt(cases), colour = "yellow")) +
geom_point(data=FR_set[FR_set_start:length(FR),], aes(x=days, y=sqrt(cases), colour = "red")) +
geom_point(data=DE_set[DE_set_start:length(DE),], aes(x=days, y=sqrt(cases), colour = "black")) +
scale_color_identity(name = "Ctry",
breaks = c("blue","green","orange","yellow","red","black"),
labels = c("UK","IT","IE","ES","FR","DE"),
guide = "legend") +
labs(y= "sqrt(cases per 100000)", x = "Days from CORVID 19 first reported") +
ggtitle("Progress of CORVID 19 since first reported cases")
# take data from when cases are tested widly. This will be taken as when the data stands trending upwards on the log model
UK_set_start <- 28
IT_set_start <- 22
DE_set_start <- 29
ES_set_start <- 24
FR_set_start <- 32
IE_set_start <- 1
set.seed(20)
####################################################################################################################################
####################################################################################################################################
# UK MODELLING
# prep data for Hypothesis 1 and 2
UK_set <- cbind(UK_set,UK_set$cases/as.numeric(pop_set1$UK[1]))
UK_set <- cbind(UK_set,log(UK_set[,6]))
UK_set <- cbind(UK_set,log(UK_set$days))
colnames(UK_set)[6] <- "cases_prop"
colnames(UK_set)[7] <- "log_cases_prop"
colnames(UK_set)[8] <- "log_days"
UK_set.i <- UK_set[UK_set_start:length(UK),]
#HYPOTHESIS 1
# c is cases per 100000
# c = A * days ^ n
# ln(c) = ln(A) + n*ln(d)
UK_model <- lm(formula = log_cases_prop ~ log_days,
data = UK_set.i)
# plot ln(c) = ln(A) + n*ln(d)
ggplot(data = UK_set.i) +
geom_point(aes(x = log_days,y=log_cases_prop, color = "blue")) +
geom_point(aes(x = log_days,y= log_days * coef(UK_model)[2] + coef(UK_model)[1], color = "purple"), shape = 3) +
scale_color_identity(name = "Ctry",
breaks = c("blue","purple"),
labels = c("UK","estimate"),
guide = "legend")
#HYPOTHESIS 2
# c = A * days ^ n + B
start_UK <- list(
A_UK = exp(coef(UK_model)[1]),
n_UK = coef(UK_model)[2],
B = 0
)
UK_model2 <- nls(formula = cases_prop ~ A_UK * days ^ n_UK + B,
data = UK_set.i,
start = start_UK,
control = list(maxiter = 500))
summary(UK_model2)
UK_coefs <- coef(UK_model2)
# plot c = A * days ^ n + b
ggplot(data = UK_set.i) +
geom_point(aes(x = days,y=cases_prop, color = "blue")) +
geom_point(aes(x = days,y=sapply(days, uk.func1), color = "purple"), shape = 3) +
scale_color_identity(name = "Ctry",
breaks = c("blue","purple"),
labels = c("UK","estimate"),
guide = "legend")
####################################################################################################################################
####################################################################################################################################
# ES MODELLING
ES_set <- cbind(ES_set,ES_set$cases/as.numeric(pop_set1$ES[1]))
ES_set <- cbind(ES_set,log(ES_set[,6]))
ES_set <- cbind(ES_set,log(ES_set$days))
colnames(ES_set)[6] <- "cases_prop"
colnames(ES_set)[7] <- "log_cases_prop"
colnames(ES_set)[8] <- "log_days"
ES_set.i <- ES_set[ES_set_start:length(ES),]
#HYPOTHESIS 1
# c is cases per 100000
# c = A * days ^ n
# ln(c) = ln(A) + n*ln(d)
ES_model <- lm(formula = log_cases_prop ~ log_days,
data = ES_set.i)
# plot ln(c) = ln(A) + n*ln(d)
ggplot(data = ES_set.i) +
geom_point(aes(x = log_days,y=log_cases_prop, color = "yellow")) +
geom_point(aes(x = log_days,y=corr_n * log_days * coef(ES_model)[2] + corr_A * coef(ES_model)[1], color = "purple"), shape = 3) +
scale_color_identity(name = "Ctry",
breaks = c("yellow","purple"),
labels = c("spain","estimate"),
guide = "legend")
#HYPOTHESIS 2
# c = A * days ^ n + B
corr_n <- 1#.975
corr_A <- 1#.995
start_es <- list(
A_es = exp(coef(ES_model)[1] * corr_A),
n_es = coef(ES_model)[2] * corr_n,
B = 0
)
ES_model2 <- nls(formula = cases_prop ~ A_es * days ^ n_es + B,
data = ES_set.i,
start = start_es,
control = list(maxiter = 500))
ES_coefs <- coef(ES_model2)
# plot c = A * days ^ n + B
ggplot(data = ES_set.i) +
geom_point(aes(x = days,y=cases_prop, color = "yellow")) +
geom_point(aes(x = days,y=sapply(ES_set.i$days, es.func1), color = "purple"), shape = 3) +
scale_color_identity(name = "Ctry",
breaks = c("yellow","purple"),
labels = c("spain","estimate"),
guide = "legend")
####################################################################################################################################
####################################################################################################################################
# FUNCTIONS
# ICU Beds per cases
ICU.func <- function(cases){
if (cases < 69176) {
round((1.702 * cases)^0.6953,digits = 0)
} else if (cases >= 69176){
m <- (3732-3396)/(86498 - 69176)
b <- 3356 - 69176*m
round(m * cases + b,digits = 0)
}
}
# SPANISH DATA
# enter days, returns number of cases
es.func1 <- function(days) 0.00000000000003319 * days ^ 8.91 - 1.104
# enter cases, returns which day you should be on
es.func2 <- function(cases) ((cases + 1.104)/0.00000000000003319) ^(1/8.91)
# UK DATA
# enter days, returns number of cases
uk.func1 <- function(days) 0.00000000000000002052 * days ^ 10.22 + 0.003966
# enter cases, returns which day you should be on
uk.func2 <- function(cases) ((cases - 0.003966)/0.00000000000000002052)^(1/10.22)
# EXAMPLES
# lets say we have 50 cases per 100000, I want to know how many cases we will have in 5 days time
es.func2(50) %>% {es.func1(. + 5)} %>% round(.,digits = 0)
uk.func2(50) %>% {uk.func1(. + 5)} %>% round(.,digits = 0)
# now I want to know how many ICU beds I will need in 5 days time
es.func2(50) %>% {es.func1(. + 5)} %>% ICU.func(.)
uk.func2(50) %>% {uk.func1(. + 5)} %>% ICU.func(.)
# and now I want to predict how many ICU beds I will need
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_functions.R
\name{dosePlot}
\alias{dosePlot}
\title{plot dose data}
\usage{
dosePlot(object, model.id, max.time = NULL, treatment.only = FALSE,
vol.normal = FALSE, concurrent.time = FALSE, point.shape = 21,
point.size = 3, line.size = 4, point.color = "#878787",
line.color = "#bababa", fill.col = c("#f5f5f5", "#E55100"),
modify.x.axis = F)
}
\arguments{
\item{object}{Xeva object.}
\item{model.id}{one or multiple model.id}
\item{max.time}{Maximum time point of the plot. Default \code{NULL} will plot complete data}
\item{treatment.only}{Default \code{FALSE}. Given full data \code{treatment.only=TRUE} will plot data only during treatment}
\item{vol.normal}{Default \code{FALSE}. If \code{TRUE}, volume will be normalized}
\item{concurrent.time}{Default \code{FALSE}. If \code{TRUE}, cut the batch data such that control and treatment will end at the same time point}
\item{point.shape}{shape of the point}
\item{point.size}{size of the point}
\item{line.size}{size of the line}
\item{point.color}{color for point}
\item{line.color}{color for line}
\item{fill.col}{a vector with color to fill}
\item{modify.x.axis}{Default \code{FALSE}}
}
\value{
A ggplot2 plot
}
\description{
plot data for dose in model.id
}
\examples{
data(brca)
dosePlot(brca, model.id=c("X.6047.LJ16","X.6047.LJ16.trab"), fill.col=c("#f5f5f5", "#993404"))
}
| /man/dosePlot.Rd | no_license | RNA-Ninja/Xeva | R | false | true | 1,437 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_functions.R
\name{dosePlot}
\alias{dosePlot}
\title{plot dose data}
\usage{
dosePlot(object, model.id, max.time = NULL, treatment.only = FALSE,
vol.normal = FALSE, concurrent.time = FALSE, point.shape = 21,
point.size = 3, line.size = 4, point.color = "#878787",
line.color = "#bababa", fill.col = c("#f5f5f5", "#E55100"),
modify.x.axis = F)
}
\arguments{
\item{object}{Xeva object.}
\item{model.id}{one or multiple model.id}
\item{max.time}{Maximum time point of the plot. Default \code{NULL} will plot complete data}
\item{treatment.only}{Default \code{FALSE}. Given full data \code{treatment.only=TRUE} will plot data only during treatment}
\item{vol.normal}{Default \code{FALSE}. If \code{TRUE}, volume will be normalized}
\item{concurrent.time}{Default \code{FALSE}. If \code{TRUE}, cut the batch data such that control and treatment will end at the same time point}
\item{point.shape}{shape of the point}
\item{point.size}{size of the point}
\item{line.size}{size of the line}
\item{point.color}{color for point}
\item{line.color}{color for line}
\item{fill.col}{a vector with color to fill}
\item{modify.x.axis}{Default \code{FALSE}}
}
\value{
A ggplot2 plot
}
\description{
plot data for dose in model.id
}
\examples{
data(brca)
dosePlot(brca, model.id=c("X.6047.LJ16","X.6047.LJ16.trab"), fill.col=c("#f5f5f5", "#993404"))
}
|
\name{marks}
\alias{mark_arc}
\alias{mark_area}
\alias{mark_image}
\alias{mark_line}
\alias{mark_rect}
\alias{mark_symbol}
\alias{mark_text}
\alias{marks}
\title{Vega marks.}
\usage{
mark_symbol(props = NULL, data = NULL)
mark_image(props = NULL, data = NULL)
mark_arc(props = NULL, data = NULL)
mark_area(props = NULL, data = NULL)
mark_line(props = NULL, data = NULL)
mark_rect(props = NULL, data = NULL)
mark_text(props = NULL, data = NULL)
}
\arguments{
\item{props}{A \code{\link{props}} object, named
according to the properties listed below.}
\item{data}{An optional dataset, if you want to override
the usual data inheritance for this mark.}
}
\description{
These functions create mark objects, corresponding to
vega marks. Marks are leaves in the plot tree, and
control the details of the final rendering. Marks are
equivalent to the basic geoms in ggplot2 (e.g. point,
line, polygon), where ggvis branches correspond to
combinations of geoms and statistical transforms.
}
\details{
Note that by supplying a fill property to
\code{mark_line} will produce a filled property.
}
\section{Properties}{
You can set the following mark properties:
\itemize{ \item x The first (typically left-most)
x-coordinate. \item x2 The second (typically right-most)
x-coordinate. \item width The width of the mark (if
supported). \item y The first (typically top-most)
y-coordinate. \item y2 The second (typically bottom-most)
y-coordinate. \item height The height of the mark (if
supported). \item opacity The overall opacity. \item fill
The fill color. \item fillOpacity The fill opacity \item
stroke The stroke color. \item strokeWidth The stroke
width, in pixels. \item strokeOpacity The stroke opacity.
\item size [symbol] The pixel area of the symbol. For
example in the case of circles, the radius is determined
in part by the square root of the size value. \item shape
[symbol] The symbol shape to use. One of circle
(default), square, cross, diamond, triangle-up, or
triangle-down (symbol only) \item innerRadius [arc] The
inner radius of the arc, in pixels. \item outerRadius
[arc] The outer radius of the arc, in pixels. \item
startAngle [arc] The start angle of the arc, in radians.
\item endAngle [arc] The end angle of the arc, in
radians. \item interpolate [area, line] The line
interpolation method to use. One of linear, step-before,
step-after, basis, basis-open, cardinal, cardinal-open,
monotone. \item tension [area, line] Depending on the
interpolation type, sets the tension parameter. \item url
[image] The URL from which to retrieve the image. \item
align [image, text] The horizontal alignment of the
object. One of left, right, center. \item baseline
[image, text] The vertical alignment of the object. One
of top, middle, bottom. \item text [text] The text to
display. \item dx [text] The horizontal margin, in
pixels, between the text label and its anchor point. The
value is ignored if the align property is center. \item
dy [text] The vertical margin, in pixels, between the
text label and its anchor point. The value is ignored if
the baseline property is middle. \item angle [text] The
rotation angle of the text, in degrees. \item font [text]
The typeface to set the text in (e.g., Helvetica Neue).
\item fontSize [text] The font size, in pixels. \item
fontWeight [text] The font weight (e.g., bold). \item
fontStyle [text] The font style (e.g., italic). }
To each property, you can assign any property object
(\code{\link{prop}}) either locally (i.e. in the mark),
or in a parent \code{\link{branch}}.
}
\seealso{
The "marks" vignette.
}
| /man/marks.Rd | no_license | imclab/ggvis | R | false | false | 3,690 | rd | \name{marks}
\alias{mark_arc}
\alias{mark_area}
\alias{mark_image}
\alias{mark_line}
\alias{mark_rect}
\alias{mark_symbol}
\alias{mark_text}
\alias{marks}
\title{Vega marks.}
\usage{
mark_symbol(props = NULL, data = NULL)
mark_image(props = NULL, data = NULL)
mark_arc(props = NULL, data = NULL)
mark_area(props = NULL, data = NULL)
mark_line(props = NULL, data = NULL)
mark_rect(props = NULL, data = NULL)
mark_text(props = NULL, data = NULL)
}
\arguments{
\item{props}{A \code{\link{props}} object, named
according to the properties listed below.}
\item{data}{An optional dataset, if you want to override
the usual data inheritance for this mark.}
}
\description{
These functions create mark objects, corresponding to
vega marks. Marks are leaves in the plot tree, and
control the details of the final rendering. Marks are
equivalent to the basic geoms in ggplot2 (e.g. point,
line, polygon), where ggvis branches correspond to
combinations of geoms and statistical transforms.
}
\details{
Note that by supplying a fill property to
\code{mark_line} will produce a filled property.
}
\section{Properties}{
You can set the following mark properties:
\itemize{ \item x The first (typically left-most)
x-coordinate. \item x2 The second (typically right-most)
x-coordinate. \item width The width of the mark (if
supported). \item y The first (typically top-most)
y-coordinate. \item y2 The second (typically bottom-most)
y-coordinate. \item height The height of the mark (if
supported). \item opacity The overall opacity. \item fill
The fill color. \item fillOpacity The fill opacity \item
stroke The stroke color. \item strokeWidth The stroke
width, in pixels. \item strokeOpacity The stroke opacity.
\item size [symbol] The pixel area of the symbol. For
example in the case of circles, the radius is determined
in part by the square root of the size value. \item shape
[symbol] The symbol shape to use. One of circle
(default), square, cross, diamond, triangle-up, or
triangle-down (symbol only) \item innerRadius [arc] The
inner radius of the arc, in pixels. \item outerRadius
[arc] The outer radius of the arc, in pixels. \item
startAngle [arc] The start angle of the arc, in radians.
\item endAngle [arc] The end angle of the arc, in
radians. \item interpolate [area, line] The line
interpolation method to use. One of linear, step-before,
step-after, basis, basis-open, cardinal, cardinal-open,
monotone. \item tension [area, line] Depending on the
interpolation type, sets the tension parameter. \item url
[image] The URL from which to retrieve the image. \item
align [image, text] The horizontal alignment of the
object. One of left, right, center. \item baseline
[image, text] The vertical alignment of the object. One
of top, middle, bottom. \item text [text] The text to
display. \item dx [text] The horizontal margin, in
pixels, between the text label and its anchor point. The
value is ignored if the align property is center. \item
dy [text] The vertical margin, in pixels, between the
text label and its anchor point. The value is ignored if
the baseline property is middle. \item angle [text] The
rotation angle of the text, in degrees. \item font [text]
The typeface to set the text in (e.g., Helvetica Neue).
\item fontSize [text] The font size, in pixels. \item
fontWeight [text] The font weight (e.g., bold). \item
fontStyle [text] The font style (e.g., italic). }
To each property, you can assign any property object
(\code{\link{prop}}) either locally (i.e. in the mark),
or in a parent \code{\link{branch}}.
}
\seealso{
The "marks" vignette.
}
|
#' Create a terrain tile with optional image overlay
#'
#' @inheritParams new_scene
#' @param heightmap_path The file path to the heightmap to import as terrain.
#' @param x_pos,z_pos The position of the corner of the terrain.
#' @param width,height,length The dimensions of the terrain tile,
#' in linear units.
#' @param heightmap_resolution The resolution of the heightmap image.
#' @param texture_path Optional: the file path to the image to use as a terrain
#' overlay.
#'
#' @family props
#'
#' @examples
#' if (requireNamespace("terra", quietly = TRUE)) {
#' raster <- tempfile(fileext = ".tiff")
#' r <- terra::rast(matrix(rnorm(1000^2, mean = 100, sd = 20), 1000),
#' extent = terra::ext(0, 1000, 0, 1000)
#' )
#' terra::writeRaster(r, raster)
#'
#' script <- make_script("example_script",
#' unity = waiver()
#' )
#' create_terrain(
#' script,
#' heightmap_path = raster,
#' x_pos = 0,
#' z_pos = 0,
#' width = 1000,
#' height = terra::minmax(r)[[2]],
#' length = 1000,
#' heightmap_resolution = 1000
#' )
#' }
#' @export
create_terrain <- function(script,
method_name = NULL,
heightmap_path,
x_pos,
z_pos,
width,
height,
length,
heightmap_resolution,
texture_path = "",
exec = TRUE) {
if (any(script$beats$type == "AddTexture")) {
add_texture_method <- utils::head(
script$beats[script$beats$type == "AddTexture", ]$name,
1
)
} else {
add_texture_method <- "AddTextureAutoAdd"
script <- add_texture(script, add_texture_method, exec = FALSE)
}
if (any(script$beats$type == "ReadRaw")) {
read_raw_method <- utils::head(
script$beats[script$beats$type == "ReadRaw", ]$name,
1
)
} else {
read_raw_method <- "ReadRawAutoAdd"
script <- read_raw(script, read_raw_method, exec = FALSE)
}
prop <- unifir_prop(
prop_file = system.file("CreateTerrain.cs", package = "unifir"),
method_name = method_name,
method_type = "CreateTerrain",
parameters = list(
heightmap_path = heightmap_path,
x_pos = x_pos,
z_pos = z_pos,
width = width,
height = height,
length = length,
heightmap_resolution = heightmap_resolution,
texturePath = texture_path,
add_texture_method = add_texture_method,
read_raw_method = read_raw_method
),
build = function(script, prop, debug) {
glue::glue(
readChar(prop$prop_file, file.info(prop$prop_file)$size),
.open = "%",
.close = "%",
method_name = prop$method_name,
heightmap_path = prop$parameters$heightmap_path,
base_path = basename(prop$parameters$heightmap_path),
x_pos = prop$parameters$x_pos,
z_pos = prop$parameters$z_pos,
width = prop$parameters$width,
height = prop$parameters$height,
length = prop$parameters$length,
heightmapResolution = prop$parameters$heightmap_resolution,
texturePath = prop$parameters$texturePath,
add_texture_method = prop$parameters$add_texture_method,
read_raw_method = prop$parameters$read_raw_method
)
},
using = c("System", "System.IO", "UnityEngine")
)
add_prop(script, prop, exec)
}
| /R/create_terrain.R | permissive | ropensci/unifir | R | false | false | 3,471 | r | #' Create a terrain tile with optional image overlay
#'
#' @inheritParams new_scene
#' @param heightmap_path The file path to the heightmap to import as terrain.
#' @param x_pos,z_pos The position of the corner of the terrain.
#' @param width,height,length The dimensions of the terrain tile,
#' in linear units.
#' @param heightmap_resolution The resolution of the heightmap image.
#' @param texture_path Optional: the file path to the image to use as a terrain
#' overlay.
#'
#' @family props
#'
#' @examples
#' if (requireNamespace("terra", quietly = TRUE)) {
#' raster <- tempfile(fileext = ".tiff")
#' r <- terra::rast(matrix(rnorm(1000^2, mean = 100, sd = 20), 1000),
#' extent = terra::ext(0, 1000, 0, 1000)
#' )
#' terra::writeRaster(r, raster)
#'
#' script <- make_script("example_script",
#' unity = waiver()
#' )
#' create_terrain(
#' script,
#' heightmap_path = raster,
#' x_pos = 0,
#' z_pos = 0,
#' width = 1000,
#' height = terra::minmax(r)[[2]],
#' length = 1000,
#' heightmap_resolution = 1000
#' )
#' }
#' @export
create_terrain <- function(script,
method_name = NULL,
heightmap_path,
x_pos,
z_pos,
width,
height,
length,
heightmap_resolution,
texture_path = "",
exec = TRUE) {
if (any(script$beats$type == "AddTexture")) {
add_texture_method <- utils::head(
script$beats[script$beats$type == "AddTexture", ]$name,
1
)
} else {
add_texture_method <- "AddTextureAutoAdd"
script <- add_texture(script, add_texture_method, exec = FALSE)
}
if (any(script$beats$type == "ReadRaw")) {
read_raw_method <- utils::head(
script$beats[script$beats$type == "ReadRaw", ]$name,
1
)
} else {
read_raw_method <- "ReadRawAutoAdd"
script <- read_raw(script, read_raw_method, exec = FALSE)
}
prop <- unifir_prop(
prop_file = system.file("CreateTerrain.cs", package = "unifir"),
method_name = method_name,
method_type = "CreateTerrain",
parameters = list(
heightmap_path = heightmap_path,
x_pos = x_pos,
z_pos = z_pos,
width = width,
height = height,
length = length,
heightmap_resolution = heightmap_resolution,
texturePath = texture_path,
add_texture_method = add_texture_method,
read_raw_method = read_raw_method
),
build = function(script, prop, debug) {
glue::glue(
readChar(prop$prop_file, file.info(prop$prop_file)$size),
.open = "%",
.close = "%",
method_name = prop$method_name,
heightmap_path = prop$parameters$heightmap_path,
base_path = basename(prop$parameters$heightmap_path),
x_pos = prop$parameters$x_pos,
z_pos = prop$parameters$z_pos,
width = prop$parameters$width,
height = prop$parameters$height,
length = prop$parameters$length,
heightmapResolution = prop$parameters$heightmap_resolution,
texturePath = prop$parameters$texturePath,
add_texture_method = prop$parameters$add_texture_method,
read_raw_method = prop$parameters$read_raw_method
)
},
using = c("System", "System.IO", "UnityEngine")
)
add_prop(script, prop, exec)
}
|
## these are functions from Rand Wilcox, which have been slightly modified
## see http://www-rcf.usc.edu/~rwilcox/
regci <- function(x, y, regfun = tsreg, nboot = 599, alpha = 0.05, autocor = autocor, SEED = TRUE,
pr = TRUE, xout = FALSE, outfun = out, ...) {
##
## Compute a .95 confidence interval for each of the parameters of
## a linear regression equation. The default regression method is
## the Theil-Sen estimator.
##
## When using the least squares estimator, and when n<250, use
## lsfitci instead.
##
## The predictor values are assumed to be in the n by p matrix x.
## The default number of bootstrap samples is nboot=599
##
## regfun can be any R function that returns the coefficients in
## the vector regfun$coef, the first element of which contains the
## estimated intercept, the second element contains the estimated of
## the first predictor, etc.
##
## get rid of R check annoyances
out = NULL
x <- as.matrix(x)
p1 <- ncol(x) + 1
p <- ncol(x)
xy <- cbind(x, y)
xy <- elimna(xy)
x <- xy[, 1:p]
y <- xy[, p1]
if (xout) {
m <- cbind(x, y)
flag <- outfun(x, plotit = FALSE)$keep
m <- m[flag, ]
x <- m[, 1:p]
y <- m[, p1]
}
x <- as.matrix(x)
if (SEED)
set.seed(2) ## set seed of random number generator so that
## results can be duplicated.
if (pr)
print("Taking bootstrap samples. Please wait.")
# data <- matrix(sample(length(y), size = length(y) * nboot, replace = T), nrow = nboot)
## length of block set to l^(1/3)
## Buhlmann and Kunsch 1994 report
block.length <- 1
if(autocor) block.length <- round(length(y) ^ (1 / 3))
## need to transpose ...
data <- t(samp.boot.block(length(y), nboot, block.length))
bvec <- apply(data, 1, regboot, x, y, regfun, ...)
## bvec is a p+1 by nboot matrix. The first row
## contains the bootstrap intercepts, the second row
## contains the bootstrap values for first predictor, etc.
regci <- matrix(0, p1, 5)
VAL <- c("intercept", rep("X", ncol(x)))
dimnames(regci) <- list(VAL, c("ci.low", "ci.up", "Estimate", "S.E.", "p-value"))
ilow <- round((alpha/2) * nboot)
ihi <- nboot - ilow
ilow <- ilow + 1
se <- NA
pvec <- NA
for (i in 1:p1) {
bsort <- sort(bvec[i, ])
pvec[i] <- (sum(bvec[i, ] < 0) + 0.5 * sum(bvec[i, ] == 0))/nboot
if (pvec[i] > 0.5)
pvec[i] <- 1 - pvec[i]
regci[i, 1] <- bsort[ilow]
regci[i, 2] <- bsort[ihi]
se[i] <- sqrt(var(bvec[i, ]))
}
estit = regfun(x, y)$coef
regci[, 3] = estit
pvec <- 2 * pvec
regci[, 4] = se
regci[, 5] = pvec
## if (pr) {
## print("First row of regci is the confidence interval for the intercept,")
## print("the second row is the confidence interval for the first slope, etc.")
## }
list(regci = regci)
}
elimna <- function(m) {
#
# remove any rows of data having missing values
#
m <- as.matrix(m)
ikeep <- c(1:nrow(m))
for (i in 1:nrow(m)) if (sum(is.na(m[i, ]) >= 1))
ikeep[i] <- 0
elimna <- m[ikeep[ikeep >= 1], ]
elimna
}
regboot <- function(isub, x, y, regfun, ...) {
#
# Perform regression using x[isub] to predict y[isub]
# isub is a vector of length n,
# a bootstrap sample from the sequence of integers
# 1, 2, 3, ..., n
#
# This function is used by other functions when computing
# bootstrap estimates.
#
# regfun is some regression method already stored in R
# It is assumed that regfun$coef contains the intercept and slope
# estimates produced by regfun. The regression methods written for
# this book, plus regression functions in R, have this property.
#
# x is assumed to be a matrix containing values of the predictors.
#
xmat <- matrix(x[isub, ], nrow(x), ncol(x))
vals <- regfun(xmat, y[isub], ...)$coef
vals
}
tsreg <- function(x, y, xout = FALSE, outfun = out, iter = 10, varfun = pbvar,
...) {
#
# Compute Theil-Sen regression estimator
#
# Use Gauss-Seidel algorithm
# when there is more than one predictor
#
#
## get rid of R check annoyances
out = pbvar = NULL
x <- as.matrix(x)
xx <- cbind(x, y)
xx <- elimna(xx)
x <- xx[, 1:ncol(x)]
x <- as.matrix(x)
y <- xx[, ncol(x) + 1]
temp <- NA
x <- as.matrix(x)
if (xout) {
x <- as.matrix(x)
flag <- outfun(x, ...)$keep
x <- x[flag, ]
y <- y[flag]
x <- as.matrix(x)
}
if (ncol(x) == 1) {
temp1 <- tsp1reg(x, y)
coef <- temp1$coef
res <- temp1$res
}
if (ncol(x) > 1) {
for (p in 1:ncol(x)) {
temp[p] <- tsp1reg(x[, p], y)$coef[2]
}
res <- y - x %*% temp
alpha <- median(res)
r <- matrix(NA, ncol = ncol(x), nrow = nrow(x))
tempold <- temp
for (it in 1:iter) {
for (p in 1:ncol(x)) {
r[, p] <- y - x %*% temp - alpha + temp[p] * x[, p]
temp[p] <- tsp1reg(x[, p], r[, p], plotit = FALSE)$coef[2]
}
alpha <- median(y - x %*% temp)
tempold <- temp
}
coef <- c(alpha, temp)
res <- y - x %*% temp - alpha
}
yhat <- y - res
stre = NULL
# e.pow <- varfun(yhat)/varfun(y)
#if (!is.na(e.pow)) {
#if(e.pow>=1)e.pow<-corfun(yhat,y)$cor^2
#stre=sqrt(e.pow)
# }
list(coef = coef, residuals = res, Strength.Assoc = NA, Explanatory.Power = NA)
}
tsp1reg <- function(x, y, plotit = FALSE) {
#
# Compute the Theil-Sen regression estimator.
# Only a single predictor is allowed in this version
#
temp <- matrix(c(x, y), ncol = 2)
temp <- elimna(temp) # Remove any pairs with missing values
x <- temp[, 1]
y <- temp[, 2]
ord <- order(x)
xs <- x[ord]
ys <- y[ord]
vec1 <- outer(ys, ys, "-")
vec2 <- outer(xs, xs, "-")
v1 <- vec1[vec2 > 0]
v2 <- vec2[vec2 > 0]
slope <- median(v1/v2)
coef <- median(y) - slope * median(x)
names(coef) <- "Intercept"
coef <- c(coef, slope)
if (plotit) {
plot(x, y, xlab = "X", ylab = "Y")
abline(coef)
}
res <- y - slope * x - coef[1]
list(coef = coef, residuals = res)
}
| /functions/senTheil.R | no_license | eliavs/Wind | R | false | false | 6,725 | r | ## these are functions from Rand Wilcox, which have been slightly modified
## see http://www-rcf.usc.edu/~rwilcox/
regci <- function(x, y, regfun = tsreg, nboot = 599, alpha = 0.05, autocor = autocor, SEED = TRUE,
pr = TRUE, xout = FALSE, outfun = out, ...) {
##
## Compute a .95 confidence interval for each of the parameters of
## a linear regression equation. The default regression method is
## the Theil-Sen estimator.
##
## When using the least squares estimator, and when n<250, use
## lsfitci instead.
##
## The predictor values are assumed to be in the n by p matrix x.
## The default number of bootstrap samples is nboot=599
##
## regfun can be any R function that returns the coefficients in
## the vector regfun$coef, the first element of which contains the
## estimated intercept, the second element contains the estimated of
## the first predictor, etc.
##
## get rid of R check annoyances
out = NULL
x <- as.matrix(x)
p1 <- ncol(x) + 1
p <- ncol(x)
xy <- cbind(x, y)
xy <- elimna(xy)
x <- xy[, 1:p]
y <- xy[, p1]
if (xout) {
m <- cbind(x, y)
flag <- outfun(x, plotit = FALSE)$keep
m <- m[flag, ]
x <- m[, 1:p]
y <- m[, p1]
}
x <- as.matrix(x)
if (SEED)
set.seed(2) ## set seed of random number generator so that
## results can be duplicated.
if (pr)
print("Taking bootstrap samples. Please wait.")
# data <- matrix(sample(length(y), size = length(y) * nboot, replace = T), nrow = nboot)
## length of block set to l^(1/3)
## Buhlmann and Kunsch 1994 report
block.length <- 1
if(autocor) block.length <- round(length(y) ^ (1 / 3))
## need to transpose ...
data <- t(samp.boot.block(length(y), nboot, block.length))
bvec <- apply(data, 1, regboot, x, y, regfun, ...)
## bvec is a p+1 by nboot matrix. The first row
## contains the bootstrap intercepts, the second row
## contains the bootstrap values for first predictor, etc.
regci <- matrix(0, p1, 5)
VAL <- c("intercept", rep("X", ncol(x)))
dimnames(regci) <- list(VAL, c("ci.low", "ci.up", "Estimate", "S.E.", "p-value"))
ilow <- round((alpha/2) * nboot)
ihi <- nboot - ilow
ilow <- ilow + 1
se <- NA
pvec <- NA
for (i in 1:p1) {
bsort <- sort(bvec[i, ])
pvec[i] <- (sum(bvec[i, ] < 0) + 0.5 * sum(bvec[i, ] == 0))/nboot
if (pvec[i] > 0.5)
pvec[i] <- 1 - pvec[i]
regci[i, 1] <- bsort[ilow]
regci[i, 2] <- bsort[ihi]
se[i] <- sqrt(var(bvec[i, ]))
}
estit = regfun(x, y)$coef
regci[, 3] = estit
pvec <- 2 * pvec
regci[, 4] = se
regci[, 5] = pvec
## if (pr) {
## print("First row of regci is the confidence interval for the intercept,")
## print("the second row is the confidence interval for the first slope, etc.")
## }
list(regci = regci)
}
elimna <- function(m) {
#
# remove any rows of data having missing values
#
m <- as.matrix(m)
ikeep <- c(1:nrow(m))
for (i in 1:nrow(m)) if (sum(is.na(m[i, ]) >= 1))
ikeep[i] <- 0
elimna <- m[ikeep[ikeep >= 1], ]
elimna
}
regboot <- function(isub, x, y, regfun, ...) {
#
# Perform regression using x[isub] to predict y[isub]
# isub is a vector of length n,
# a bootstrap sample from the sequence of integers
# 1, 2, 3, ..., n
#
# This function is used by other functions when computing
# bootstrap estimates.
#
# regfun is some regression method already stored in R
# It is assumed that regfun$coef contains the intercept and slope
# estimates produced by regfun. The regression methods written for
# this book, plus regression functions in R, have this property.
#
# x is assumed to be a matrix containing values of the predictors.
#
xmat <- matrix(x[isub, ], nrow(x), ncol(x))
vals <- regfun(xmat, y[isub], ...)$coef
vals
}
tsreg <- function(x, y, xout = FALSE, outfun = out, iter = 10, varfun = pbvar,
...) {
#
# Compute Theil-Sen regression estimator
#
# Use Gauss-Seidel algorithm
# when there is more than one predictor
#
#
## get rid of R check annoyances
out = pbvar = NULL
x <- as.matrix(x)
xx <- cbind(x, y)
xx <- elimna(xx)
x <- xx[, 1:ncol(x)]
x <- as.matrix(x)
y <- xx[, ncol(x) + 1]
temp <- NA
x <- as.matrix(x)
if (xout) {
x <- as.matrix(x)
flag <- outfun(x, ...)$keep
x <- x[flag, ]
y <- y[flag]
x <- as.matrix(x)
}
if (ncol(x) == 1) {
temp1 <- tsp1reg(x, y)
coef <- temp1$coef
res <- temp1$res
}
if (ncol(x) > 1) {
for (p in 1:ncol(x)) {
temp[p] <- tsp1reg(x[, p], y)$coef[2]
}
res <- y - x %*% temp
alpha <- median(res)
r <- matrix(NA, ncol = ncol(x), nrow = nrow(x))
tempold <- temp
for (it in 1:iter) {
for (p in 1:ncol(x)) {
r[, p] <- y - x %*% temp - alpha + temp[p] * x[, p]
temp[p] <- tsp1reg(x[, p], r[, p], plotit = FALSE)$coef[2]
}
alpha <- median(y - x %*% temp)
tempold <- temp
}
coef <- c(alpha, temp)
res <- y - x %*% temp - alpha
}
yhat <- y - res
stre = NULL
# e.pow <- varfun(yhat)/varfun(y)
#if (!is.na(e.pow)) {
#if(e.pow>=1)e.pow<-corfun(yhat,y)$cor^2
#stre=sqrt(e.pow)
# }
list(coef = coef, residuals = res, Strength.Assoc = NA, Explanatory.Power = NA)
}
tsp1reg <- function(x, y, plotit = FALSE) {
#
# Compute the Theil-Sen regression estimator.
# Only a single predictor is allowed in this version
#
temp <- matrix(c(x, y), ncol = 2)
temp <- elimna(temp) # Remove any pairs with missing values
x <- temp[, 1]
y <- temp[, 2]
ord <- order(x)
xs <- x[ord]
ys <- y[ord]
vec1 <- outer(ys, ys, "-")
vec2 <- outer(xs, xs, "-")
v1 <- vec1[vec2 > 0]
v2 <- vec2[vec2 > 0]
slope <- median(v1/v2)
coef <- median(y) - slope * median(x)
names(coef) <- "Intercept"
coef <- c(coef, slope)
if (plotit) {
plot(x, y, xlab = "X", ylab = "Y")
abline(coef)
}
res <- y - slope * x - coef[1]
list(coef = coef, residuals = res)
}
|
context("Omega Total Coefficient")
test_that('omega.tot and population covariance matrices',{
cong.1f<-as.numeric(round(omega.tot(cong1f, factors=1)[[1]],6))
expect_that(cong.1f, equals(.856378))
par.1f<-as.numeric(round(omega.tot(par1f, factors=1)[[1]],6))
expect_that(par.1f, equals(.888889))
tau.1f<-as.numeric(round(omega.tot(tau1f, factors=1)[[1]],6))
expect_that(tau.1f, equals(.839789))
cong.3f<-as.numeric(round(omega.tot(cong3f, factors=3)[[1]],6))
expect_that(cong.3f, equals(.826696))
par.3f<-as.numeric(round(omega.tot(par3f, factors=3)[[1]],6))
expect_that(par.3f, equals(.864865))
tau.3f<-as.numeric(round(omega.tot(tau3f, factors=3)[[1]],6))
expect_that(tau.3f, equals(.807449))
cong.5f<-as.numeric(round(omega.tot(cong5f, factors=5)[[1]],6))
expect_that(cong.5f, equals(.867708))
par.5f<-as.numeric(round(omega.tot(par5f, factors=5)[[1]],6))
expect_that(par.5f, equals(.897959))
tau.5f<-as.numeric(round(omega.tot(tau5f, factors=5)[[1]],6))
expect_that(tau.5f, equals(.852201))
}) | /inst/tests/test-omega.tot.R | no_license | JackStat/Lambda4 | R | false | false | 1,068 | r | context("Omega Total Coefficient")
test_that('omega.tot and population covariance matrices',{
cong.1f<-as.numeric(round(omega.tot(cong1f, factors=1)[[1]],6))
expect_that(cong.1f, equals(.856378))
par.1f<-as.numeric(round(omega.tot(par1f, factors=1)[[1]],6))
expect_that(par.1f, equals(.888889))
tau.1f<-as.numeric(round(omega.tot(tau1f, factors=1)[[1]],6))
expect_that(tau.1f, equals(.839789))
cong.3f<-as.numeric(round(omega.tot(cong3f, factors=3)[[1]],6))
expect_that(cong.3f, equals(.826696))
par.3f<-as.numeric(round(omega.tot(par3f, factors=3)[[1]],6))
expect_that(par.3f, equals(.864865))
tau.3f<-as.numeric(round(omega.tot(tau3f, factors=3)[[1]],6))
expect_that(tau.3f, equals(.807449))
cong.5f<-as.numeric(round(omega.tot(cong5f, factors=5)[[1]],6))
expect_that(cong.5f, equals(.867708))
par.5f<-as.numeric(round(omega.tot(par5f, factors=5)[[1]],6))
expect_that(par.5f, equals(.897959))
tau.5f<-as.numeric(round(omega.tot(tau5f, factors=5)[[1]],6))
expect_that(tau.5f, equals(.852201))
}) |
suppressPackageStartupMessages({
library(DropletUtils)
library(scater)
library(scran)
library(EnsDb.Hsapiens.v86)
})
snr <- commandArgs()[8]
## 2. Setting up the data
##### Reading in a sparse matrix
sample_num = paste0("sample", snr)
data.dir = "/nobackup/16tb_b/scRNA/10x_genomics/"
sample.dir = file.path(data.dir, snr)
plot.dir <- file.path(sample.dir, "plots")
log.file <- file.path(sample.dir, "preproc.log")
dir.create(plot.dir)
bp <- BiocParallel::registered()[[1]]
sce = read10xCounts(sample.dir)
cat("Initial dims:\n", file=log.file)
cat(dim(sce), file=log.file, append=TRUE)
##### Annotating the rows
rownames(sce) <- uniquifyFeatureNames(rowData(sce)$ID, rowData(sce)$Symbol)
location <- mapIds(EnsDb.Hsapiens.v86, keys=rowData(sce)$ID,
column="SEQNAME", keytype="GENEID")
##### Testing for deviations from ambient expression
bcrank <- barcodeRanks(counts(sce))
# Only showing unique points for plotting speed.
uniq <- !duplicated(bcrank$rank)
pdf(file.path(plot.dir, "barcode_ranks.pdf"))
plot(bcrank$rank[uniq], bcrank$total[uniq], log="xy",
xlab="Rank", ylab="Total UMI count", cex.lab=1.2)
abline(h=bcrank$inflection, col="darkgreen", lty=2)
abline(h=bcrank$knee, col="dodgerblue", lty=2)
legend("bottomleft", legend=c("Inflection", "Knee"),
col=c("darkgreen", "dodgerblue"), lty=2, cex=1.2)
dev.off()
## 4. Quality control on the cells
df <- perCellQCMetrics(sce, subsets=list(Mito=which(location=="MT")),
BPPARAM = bp)
pdf(file.path(plot.dir, "cellQC.pdf"))
par(mfrow=c(1,3))
# hist(sce$log10_total_counts, breaks=20, col="grey80",
# xlab="Log-total UMI count")
# hist(sce$log10_total_features_by_counts, breaks=20, col="grey80",
# xlab="Log-total number of expressed features")
# hist(sce$pct_counts_Mito, breaks=20, col="grey80",
# xlab="Proportion of reads in mitochondrial genes")
hist(df$sum/1e3, xlab="Library sizes (thousands)", main="",
breaks=20, col="grey80", ylab="Number of cells")
hist(df$detected, xlab="Number of expressed genes", main="",
breaks=20, col="grey80", ylab="Number of cells")
hist(df$subsets_Mito_percent, xlab="Mitochondrial proportion (%)",
ylab="Number of cells", breaks=20, main="", col="grey80")
dev.off()
high.mito <- isOutlier(df$subsets_Mito_percent, nmads = 3, type = "higher")
libsize.drop <- isOutlier(df$sum, nmads = 1, type = "lower", log = TRUE)
feature.drop <- isOutlier(df$detected, nmads = 1, type = "lower", log = TRUE)
sce <- sce[,!(high.mito | libsize.drop | feature.drop)]
df <- data.frame(ByHighMito=sum(high.mito),
ByLibSize=sum(libsize.drop),
ByFeature=sum(feature.drop),
Remaining=ncol(sce))
cat("\n\n CellQC:\n", file=log.file, append=TRUE)
cat(paste("\n ByHighMito:", sum(high.mito)), file=log.file, append=TRUE)
cat(paste("\n ByLibSize:", sum(libsize.drop)), file=log.file, append=TRUE)
cat(paste("\n ByFeature:", sum(feature.drop)), file=log.file, append=TRUE)
cat(paste("\n Remaining:", ncol(sce)), file=log.file, append=TRUE)
cat("\n\n Dims after CellQC:\n", file=log.file, append=TRUE)
cat(dim(sce), file=log.file, append=TRUE)
## 5. Examining gene expression
ave <- calculateAverage(sce, BPPARAM = bp)
pdf(file.path(plot.dir, "average_expression_hist.pdf"))
hist(log10(ave), breaks=100, main="", col="grey",
xlab=expression(Log[10]~"average count"))
dev.off()
#remove genes that have average counts of zero, as this means that they are not expressed in any cell
rowData(sce)$AveCount <- ave
to.keep = ave > 0.001
sce = sce[to.keep,]
cat(paste("\n\n Exluding", sum(!to.keep),
"genes due to insufficient expression\n\n"), file=log.file, append=TRUE)
cat("Dims after low expression filter:\n", file=log.file, append=TRUE)
cat(dim(sce), file=log.file, append=TRUE)
## 6. Normalizing for cell-specific biases
clusters <- quickCluster(sce, method="igraph", min.mean=0.1, BPPARAM = bp)
sce <- computeSumFactors(sce, min.mean=0.1, cluster=clusters, BPPARAM = bp)
pdf(file.path(plot.dir, "sizeFactors.pdf"))
plot(librarySizeFactors(sce), sizeFactors(sce), pch=16,
xlab="Library size factors", ylab="Deconvolution factors", log="xy")
dev.off()
sce <- logNormCounts(sce)
saveRDS(sce, file = file.path(sample.dir, paste0("sample", snr, "_sce.rds")))
| /inst/scripts/sc_preproc.R | no_license | waldronlab/subtypeHeterogeneity | R | false | false | 4,254 | r | suppressPackageStartupMessages({
library(DropletUtils)
library(scater)
library(scran)
library(EnsDb.Hsapiens.v86)
})
snr <- commandArgs()[8]
## 2. Setting up the data
##### Reading in a sparse matrix
sample_num = paste0("sample", snr)
data.dir = "/nobackup/16tb_b/scRNA/10x_genomics/"
sample.dir = file.path(data.dir, snr)
plot.dir <- file.path(sample.dir, "plots")
log.file <- file.path(sample.dir, "preproc.log")
dir.create(plot.dir)
bp <- BiocParallel::registered()[[1]]
sce = read10xCounts(sample.dir)
cat("Initial dims:\n", file=log.file)
cat(dim(sce), file=log.file, append=TRUE)
##### Annotating the rows
rownames(sce) <- uniquifyFeatureNames(rowData(sce)$ID, rowData(sce)$Symbol)
location <- mapIds(EnsDb.Hsapiens.v86, keys=rowData(sce)$ID,
column="SEQNAME", keytype="GENEID")
##### Testing for deviations from ambient expression
bcrank <- barcodeRanks(counts(sce))
# Only showing unique points for plotting speed.
uniq <- !duplicated(bcrank$rank)
pdf(file.path(plot.dir, "barcode_ranks.pdf"))
plot(bcrank$rank[uniq], bcrank$total[uniq], log="xy",
xlab="Rank", ylab="Total UMI count", cex.lab=1.2)
abline(h=bcrank$inflection, col="darkgreen", lty=2)
abline(h=bcrank$knee, col="dodgerblue", lty=2)
legend("bottomleft", legend=c("Inflection", "Knee"),
col=c("darkgreen", "dodgerblue"), lty=2, cex=1.2)
dev.off()
## 4. Quality control on the cells
df <- perCellQCMetrics(sce, subsets=list(Mito=which(location=="MT")),
BPPARAM = bp)
pdf(file.path(plot.dir, "cellQC.pdf"))
par(mfrow=c(1,3))
# hist(sce$log10_total_counts, breaks=20, col="grey80",
# xlab="Log-total UMI count")
# hist(sce$log10_total_features_by_counts, breaks=20, col="grey80",
# xlab="Log-total number of expressed features")
# hist(sce$pct_counts_Mito, breaks=20, col="grey80",
# xlab="Proportion of reads in mitochondrial genes")
hist(df$sum/1e3, xlab="Library sizes (thousands)", main="",
breaks=20, col="grey80", ylab="Number of cells")
hist(df$detected, xlab="Number of expressed genes", main="",
breaks=20, col="grey80", ylab="Number of cells")
hist(df$subsets_Mito_percent, xlab="Mitochondrial proportion (%)",
ylab="Number of cells", breaks=20, main="", col="grey80")
dev.off()
high.mito <- isOutlier(df$subsets_Mito_percent, nmads = 3, type = "higher")
libsize.drop <- isOutlier(df$sum, nmads = 1, type = "lower", log = TRUE)
feature.drop <- isOutlier(df$detected, nmads = 1, type = "lower", log = TRUE)
sce <- sce[,!(high.mito | libsize.drop | feature.drop)]
df <- data.frame(ByHighMito=sum(high.mito),
ByLibSize=sum(libsize.drop),
ByFeature=sum(feature.drop),
Remaining=ncol(sce))
cat("\n\n CellQC:\n", file=log.file, append=TRUE)
cat(paste("\n ByHighMito:", sum(high.mito)), file=log.file, append=TRUE)
cat(paste("\n ByLibSize:", sum(libsize.drop)), file=log.file, append=TRUE)
cat(paste("\n ByFeature:", sum(feature.drop)), file=log.file, append=TRUE)
cat(paste("\n Remaining:", ncol(sce)), file=log.file, append=TRUE)
cat("\n\n Dims after CellQC:\n", file=log.file, append=TRUE)
cat(dim(sce), file=log.file, append=TRUE)
## 5. Examining gene expression
ave <- calculateAverage(sce, BPPARAM = bp)
pdf(file.path(plot.dir, "average_expression_hist.pdf"))
hist(log10(ave), breaks=100, main="", col="grey",
xlab=expression(Log[10]~"average count"))
dev.off()
#remove genes that have average counts of zero, as this means that they are not expressed in any cell
rowData(sce)$AveCount <- ave
to.keep = ave > 0.001
sce = sce[to.keep,]
cat(paste("\n\n Exluding", sum(!to.keep),
"genes due to insufficient expression\n\n"), file=log.file, append=TRUE)
cat("Dims after low expression filter:\n", file=log.file, append=TRUE)
cat(dim(sce), file=log.file, append=TRUE)
## 6. Normalizing for cell-specific biases
clusters <- quickCluster(sce, method="igraph", min.mean=0.1, BPPARAM = bp)
sce <- computeSumFactors(sce, min.mean=0.1, cluster=clusters, BPPARAM = bp)
pdf(file.path(plot.dir, "sizeFactors.pdf"))
plot(librarySizeFactors(sce), sizeFactors(sce), pch=16,
xlab="Library size factors", ylab="Deconvolution factors", log="xy")
dev.off()
sce <- logNormCounts(sce)
saveRDS(sce, file = file.path(sample.dir, paste0("sample", snr, "_sce.rds")))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod_dimension_reduction_utils.R
\name{plot_pred_rd}
\alias{plot_pred_rd}
\title{plot_pred_rd}
\usage{
plot_pred_rd(
model,
n.comp,
titles = c("Varianza Explicada en Predictores", "Numero de Componentes",
"Porcentaje de Varianza Explicada")
)
}
\arguments{
\item{model}{a dimension reduction model.}
\item{n.comp}{the optimum number of components.}
\item{titles}{labels on the chart}
}
\value{
echarts4r plot
}
\description{
graph of variance explained in the predictors according to components used.
}
\author{
Ariel Arroyo <luis.ariel.arroyo@promidat.com>
}
| /man/plot_pred_rd.Rd | no_license | cran/regressoR | R | false | true | 649 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod_dimension_reduction_utils.R
\name{plot_pred_rd}
\alias{plot_pred_rd}
\title{plot_pred_rd}
\usage{
plot_pred_rd(
model,
n.comp,
titles = c("Varianza Explicada en Predictores", "Numero de Componentes",
"Porcentaje de Varianza Explicada")
)
}
\arguments{
\item{model}{a dimension reduction model.}
\item{n.comp}{the optimum number of components.}
\item{titles}{labels on the chart}
}
\value{
echarts4r plot
}
\description{
graph of variance explained in the predictors according to components used.
}
\author{
Ariel Arroyo <luis.ariel.arroyo@promidat.com>
}
|
# create the dataset
library(tidyverse)
library(HistData)
data("GaltonFamilies")
set.seed(1983)
galton_heights <- GaltonFamilies %>%
filter(gender == "male") %>%
group_by(family) %>%
sample_n(1) %>%
ungroup() %>%
select(father, childHeight) %>%
rename(son = childHeight)
# means and standard deviations
galton_heights %>%
summarize(mean(father), sd(father), mean(son), sd(son))
# scatterplot of father and son heights
galton_heights %>%
ggplot(aes(father, son)) +
geom_point(alpha = 0.5)
#correlation Coefficient
rho <- mean(scale(x)*scale(y))
galton_heights %>% summarize(r = cor(father, son)) %>% pull(r)
# compute sample correlation
R <- sample_n(galton_heights, 25, replace = TRUE) %>%
summarize(r = cor(father, son))
R
# Monte Carlo simulation to show distribution of sample correlation
B <- 1000
N <- 25
R <- replicate(B, {
sample_n(galton_heights, N, replace = TRUE) %>%
summarize(r = cor(father, son)) %>%
pull(r)
})
qplot(R, geom = "histogram", binwidth = 0.05, color = I("black"))
# expected value and standard error
mean(R)
sd(R)
# QQ-plot to evaluate whether N is large enough
data.frame(R) %>%
ggplot(aes(sample = R)) +
stat_qq() +
geom_abline(intercept = mean(R), slope = sqrt((1-mean(R)^2)/(N-2))) | /coorelation.R | no_license | DhrubaDR/Linear_Regression | R | false | false | 1,255 | r | # create the dataset
library(tidyverse)
library(HistData)
data("GaltonFamilies")
set.seed(1983)
galton_heights <- GaltonFamilies %>%
filter(gender == "male") %>%
group_by(family) %>%
sample_n(1) %>%
ungroup() %>%
select(father, childHeight) %>%
rename(son = childHeight)
# means and standard deviations
galton_heights %>%
summarize(mean(father), sd(father), mean(son), sd(son))
# scatterplot of father and son heights
galton_heights %>%
ggplot(aes(father, son)) +
geom_point(alpha = 0.5)
#correlation Coefficient
rho <- mean(scale(x)*scale(y))
galton_heights %>% summarize(r = cor(father, son)) %>% pull(r)
# compute sample correlation
R <- sample_n(galton_heights, 25, replace = TRUE) %>%
summarize(r = cor(father, son))
R
# Monte Carlo simulation to show distribution of sample correlation
B <- 1000
N <- 25
R <- replicate(B, {
sample_n(galton_heights, N, replace = TRUE) %>%
summarize(r = cor(father, son)) %>%
pull(r)
})
qplot(R, geom = "histogram", binwidth = 0.05, color = I("black"))
# expected value and standard error
mean(R)
sd(R)
# QQ-plot to evaluate whether N is large enough
data.frame(R) %>%
ggplot(aes(sample = R)) +
stat_qq() +
geom_abline(intercept = mean(R), slope = sqrt((1-mean(R)^2)/(N-2))) |
OTClass <-
function(XTraining,YTraining,p=0.2,t.initial=NULL, nf=NULL,ns=NULL,info=TRUE){
t.initial <- ifelse(is.null(t.initial),1000,t.initial)
ntr <- nrow(XTraining) #Number of observations in the training data provided
training2 <- sample(1:ntr,0.05*ntr) ## Sample of indices of training observations for second phase selection
Xtraining1 <- XTraining[-training2,]
Ytraining1 <- (as.factor(YTraining[-training2]))
rff <-list()
rf.use <-list()
er <-c()
if(info==TRUE)
cat("Assessing trees for individual performance..............\n")
for (j in 1:t.initial){
rff[[j]] <- randomForest(x=Xtraining1,y=Ytraining1, ntree=1, keep.forest=TRUE,norm.votes=FALSE,mtry =ifelse(is.null(nf),round(sqrt(length(Xtraining1))),nf),nodesize=ifelse(is.null(ns),1,ns))#sqrt(length(Xtraining1)))
er[[j]] <- rff[[j]]$err.rate[[1]]
}
order1 <-order(er) #order the error vector in increasing order
rff <-rff[order1] #order trees in according to the order of errors
rf.all <- rff[[1]]
RF.ALL <- rff[[1]] # initialize the forest best trees from the best single tree
if(info==TRUE)
cat("Assessing trees for collective performance..............\n")
for (k in 1:(p*length(rff)-1))
{
p1 <- predict(rf.all, XTraining[training2, ],type='prob')[,2]
bs1 <- sum((as.numeric(YTraining[training2])-as.vector(p1))^2)
rf.all<-combine(rf.all,rff[[k+1]])
p2 <- predict(rf.all, XTraining[training2, ],type='prob')[,2]
bs2 <- sum((as.numeric(YTraining[training2])-as.vector(p2))^2)
if(bs1>bs2)
RF.ALL<-combine(RF.ALL,rff[[k+1]])
}
if(info==TRUE)
cat("Number of trees selected............................ = ",RF.ALL$ntree,"\n")
results <- list("t.object" = RF.ALL, "selected trees" = RF.ALL$ntree)
return((results))
}
| /OTE/R/OTClass.R | no_license | ingted/R-Examples | R | false | false | 1,873 | r | OTClass <-
function(XTraining,YTraining,p=0.2,t.initial=NULL, nf=NULL,ns=NULL,info=TRUE){
t.initial <- ifelse(is.null(t.initial),1000,t.initial)
ntr <- nrow(XTraining) #Number of observations in the training data provided
training2 <- sample(1:ntr,0.05*ntr) ## Sample of indices of training observations for second phase selection
Xtraining1 <- XTraining[-training2,]
Ytraining1 <- (as.factor(YTraining[-training2]))
rff <-list()
rf.use <-list()
er <-c()
if(info==TRUE)
cat("Assessing trees for individual performance..............\n")
for (j in 1:t.initial){
rff[[j]] <- randomForest(x=Xtraining1,y=Ytraining1, ntree=1, keep.forest=TRUE,norm.votes=FALSE,mtry =ifelse(is.null(nf),round(sqrt(length(Xtraining1))),nf),nodesize=ifelse(is.null(ns),1,ns))#sqrt(length(Xtraining1)))
er[[j]] <- rff[[j]]$err.rate[[1]]
}
order1 <-order(er) #order the error vector in increasing order
rff <-rff[order1] #order trees in according to the order of errors
rf.all <- rff[[1]]
RF.ALL <- rff[[1]] # initialize the forest best trees from the best single tree
if(info==TRUE)
cat("Assessing trees for collective performance..............\n")
for (k in 1:(p*length(rff)-1))
{
p1 <- predict(rf.all, XTraining[training2, ],type='prob')[,2]
bs1 <- sum((as.numeric(YTraining[training2])-as.vector(p1))^2)
rf.all<-combine(rf.all,rff[[k+1]])
p2 <- predict(rf.all, XTraining[training2, ],type='prob')[,2]
bs2 <- sum((as.numeric(YTraining[training2])-as.vector(p2))^2)
if(bs1>bs2)
RF.ALL<-combine(RF.ALL,rff[[k+1]])
}
if(info==TRUE)
cat("Number of trees selected............................ = ",RF.ALL$ntree,"\n")
results <- list("t.object" = RF.ALL, "selected trees" = RF.ALL$ntree)
return((results))
}
|
source("workspace.R")
transition.calc <- function(tree, continuous, discrete, simmap.model="ER", simmap.nsim=1000, simmap.pi="estimated", anc.ML.maxit=100000){
#Assertions and argument checking
if(!inherits(tree, "phylo")) stop("Error: '", deparse(substitute(simmap)), "' must be of class 'phylo'")
if(!is.factor(discrete)) stop("Error: '", deparse(substitute(discrete)), "' must be a factor; preferably a discrete character!")
if(is.null(names(discrete))) stop("Error: '", deparse(substitute(discrete)), "' must be named")
if(!is.numeric(continuous)) stop("Error: '", deparse(substitute(continuous)), "' must be a numeric; preferably a continuous character!")
if(is.null(names(continuous))) stop("Error: '", deparse(substitute(continuous)), "' must be named")
if(!identical(sort(tree$tip.label), sort(names(discrete)))) stop("Error: mismatch between'", deparse(substitute(discrete)), "' and phylogeny")
if(!identical(sort(tree$tip.label), sort(names(continuous)))) stop("Error: mismatch between'", deparse(substitute(continuous)), "' and phylogeny")
#Make simmap
simmap <- make.simmap(tree, discrete, model=simmap.model, nsim=simmap.nsim, pi=simmap.pi)
t.ltt <- ltt(simmap[[1]], plot=FALSE, gamma=FALSE)
#Find transitions (can be multiple per branch); pre-allocation could make too long a data.frame (CHECK!)
states <- levels(discrete)
tmp <- rep(NA, length(simmap)*length(simmap[[1]]$maps))
transitions <- data.frame(from=tmp, to=tmp, end.node=tmp)
x <- 1
for(i in seq_along(simmap)){
for(j in seq_along(simmap[[i]]$maps)){
if(tree$edge[j,2]>length(tree$tip.label) & length(simmap[[i]]$maps[[j]]) > 1){
for(k in seq(from=1, to=length(simmap[[i]]$maps[[j]])-1)){
transitions$from[x] <- states[which(states==names(simmap[[i]]$maps[[j]])[k])]
transitions$to[x] <- states[which(states==names(simmap[[i]]$maps[[j]])[k+1])]
transitions$end.node[x] <- tree$edge[j,2]
x <- x + 1
if(x >= nrow(transitions))
transitions <- rbind(transitions, data.frame(from=tmp, to=tmp, end.node=tmp))
}
} else {
transitions$from[x] <- states[which(states==names(simmap[[i]]$maps[[j]])[1])]
transitions$to[x] <- states[which(states==names(simmap[[i]]$maps[[j]])[1])]
transitions$end.node[x] <- tree$edge[j,2]
x <- x + 1
if(x == nrow(transitions))
transitions <- rbind(transitions, data.frame(from=tmp, to=tmp, end.node=tmp))
}
}
}
transitions <- transitions[!is.na(transitions$from),]
#Age the transitions
transitions$age <- max(t.ltt$times) - t.ltt$times[match(transitions$end.node, names(t.ltt$times))]
transitions$first <- c(TRUE, rep(FALSE, nrow(transitions)-1))
#Reconstruct continuous state
anc.continuous <- fastAnc(tree, continuous, CI=TRUE)
#Get the modal reconstructed nodal value and plot against that
transitions$transition <- with(transitions, paste(from, to, sep="_"))
#Prepare output and return
output <- list(transitions=transitions, cont.sim=anc.continuous)
class(output) <- "transition.calc"
return(output)
}
#Silly models!
c.data$data$evergreen <- ifelse(c.data$data$phenology == "EV", 1, 0)
c.data$data$interac <- with(c.data$data, log(vesselSize) * pole.lim)
model <- with(c.data, transition.calc(phy, setNames(data$interac, rownames(data)), setNames(factor(data$evergreen), rownames(data)), simmap.nsim=1000))
counts <- with(model$transitions, table(transition, end.node))
modal.trans <- setNames(rownames(counts)[unlist(apply(counts, 2, function(x) which(max(x) == x)[1]))], unique(model$transitions$end.node))
modal.trans.frac <- setNames(numeric(length(modal.trans)), names(modal.trans))
for(i in seq_along(modal.trans.frac))
modal.trans.frac[i] <- counts[which(rownames(counts)==modal.trans[i]),i] / sum(counts[,i])
modal.trans <- modal.trans[names(modal.trans) %in% names(model$cont.sim$ace)]
t <- model$cont.sim$ace[names(model$cont.sim$ace) %in% names(modal.trans)]
identical(names(modal.trans), names(t))
save.image("trans_wip.RData")
| /transition_regression.R | no_license | Zanne-Lab/vessel_extremes | R | false | false | 4,189 | r | source("workspace.R")
transition.calc <- function(tree, continuous, discrete, simmap.model="ER", simmap.nsim=1000, simmap.pi="estimated", anc.ML.maxit=100000){
#Assertions and argument checking
if(!inherits(tree, "phylo")) stop("Error: '", deparse(substitute(simmap)), "' must be of class 'phylo'")
if(!is.factor(discrete)) stop("Error: '", deparse(substitute(discrete)), "' must be a factor; preferably a discrete character!")
if(is.null(names(discrete))) stop("Error: '", deparse(substitute(discrete)), "' must be named")
if(!is.numeric(continuous)) stop("Error: '", deparse(substitute(continuous)), "' must be a numeric; preferably a continuous character!")
if(is.null(names(continuous))) stop("Error: '", deparse(substitute(continuous)), "' must be named")
if(!identical(sort(tree$tip.label), sort(names(discrete)))) stop("Error: mismatch between'", deparse(substitute(discrete)), "' and phylogeny")
if(!identical(sort(tree$tip.label), sort(names(continuous)))) stop("Error: mismatch between'", deparse(substitute(continuous)), "' and phylogeny")
#Make simmap
simmap <- make.simmap(tree, discrete, model=simmap.model, nsim=simmap.nsim, pi=simmap.pi)
t.ltt <- ltt(simmap[[1]], plot=FALSE, gamma=FALSE)
#Find transitions (can be multiple per branch); pre-allocation could make too long a data.frame (CHECK!)
states <- levels(discrete)
tmp <- rep(NA, length(simmap)*length(simmap[[1]]$maps))
transitions <- data.frame(from=tmp, to=tmp, end.node=tmp)
x <- 1
for(i in seq_along(simmap)){
for(j in seq_along(simmap[[i]]$maps)){
if(tree$edge[j,2]>length(tree$tip.label) & length(simmap[[i]]$maps[[j]]) > 1){
for(k in seq(from=1, to=length(simmap[[i]]$maps[[j]])-1)){
transitions$from[x] <- states[which(states==names(simmap[[i]]$maps[[j]])[k])]
transitions$to[x] <- states[which(states==names(simmap[[i]]$maps[[j]])[k+1])]
transitions$end.node[x] <- tree$edge[j,2]
x <- x + 1
if(x >= nrow(transitions))
transitions <- rbind(transitions, data.frame(from=tmp, to=tmp, end.node=tmp))
}
} else {
transitions$from[x] <- states[which(states==names(simmap[[i]]$maps[[j]])[1])]
transitions$to[x] <- states[which(states==names(simmap[[i]]$maps[[j]])[1])]
transitions$end.node[x] <- tree$edge[j,2]
x <- x + 1
if(x == nrow(transitions))
transitions <- rbind(transitions, data.frame(from=tmp, to=tmp, end.node=tmp))
}
}
}
transitions <- transitions[!is.na(transitions$from),]
#Age the transitions
transitions$age <- max(t.ltt$times) - t.ltt$times[match(transitions$end.node, names(t.ltt$times))]
transitions$first <- c(TRUE, rep(FALSE, nrow(transitions)-1))
#Reconstruct continuous state
anc.continuous <- fastAnc(tree, continuous, CI=TRUE)
#Get the modal reconstructed nodal value and plot against that
transitions$transition <- with(transitions, paste(from, to, sep="_"))
#Prepare output and return
output <- list(transitions=transitions, cont.sim=anc.continuous)
class(output) <- "transition.calc"
return(output)
}
#Silly models!
c.data$data$evergreen <- ifelse(c.data$data$phenology == "EV", 1, 0)
c.data$data$interac <- with(c.data$data, log(vesselSize) * pole.lim)
model <- with(c.data, transition.calc(phy, setNames(data$interac, rownames(data)), setNames(factor(data$evergreen), rownames(data)), simmap.nsim=1000))
counts <- with(model$transitions, table(transition, end.node))
modal.trans <- setNames(rownames(counts)[unlist(apply(counts, 2, function(x) which(max(x) == x)[1]))], unique(model$transitions$end.node))
modal.trans.frac <- setNames(numeric(length(modal.trans)), names(modal.trans))
for(i in seq_along(modal.trans.frac))
modal.trans.frac[i] <- counts[which(rownames(counts)==modal.trans[i]),i] / sum(counts[,i])
modal.trans <- modal.trans[names(modal.trans) %in% names(model$cont.sim$ace)]
t <- model$cont.sim$ace[names(model$cont.sim$ace) %in% names(modal.trans)]
identical(names(modal.trans), names(t))
save.image("trans_wip.RData")
|
##!/ifs/home/c2b2/ac_lab/jh3283/tools/R/R-3-02/bin/Rscript
#Author: Jing He
#input: <file1: tumor methlation matrix> <file2: normal methylation matrix>
#output: <file: tumor sample with methylation relative to population mean>
#Description: this file was created for projFocus, ceRNA, used in step1 to eliminate tumor samples
#TODO:
sysInfo = Sys.info()
if(sysInfo['sysname']=="Darwin" ){
source("/Volumes/ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/projFocusCernaFunctions.R")
setwd("/Volumes/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/02022014/meth/")
rootd = "/Volumes/ifs/data/c2b2/ac_lab/jh3283/projFocus/"
figd = "/Volumes/ifs/data/c2b2/ac_lab/jh3283/projFocus/report/topDown_02042014/fig/"
}else if(sysInfo['sysname']=="Linux" ){
source("/ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/projFocusCernaFunctions.R")
print("working from Linux")
setwd("/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/02022014/meth")
rootd = "/ifs/scratch/c2b2/ac_lab/jh3283/"
figd = "/ifs/data/c2b2/ac_lab/jh3283/projFocus/report/topDown_02042014/fig/"
}
#
# args = getArgs()
# usage = "Usage: Rscript bridegCeRAN.r --file <gene.list> "
# example = "Example: /ifs/home/c2b2/ac_lab/jh3283/tools/R/R_current/bin/Rscript /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/filterGrplasso.r --file grplasso_coeff --cut 0.05 --out gcGenes_GeneVarNet"
# if(length(args) < 3 || is.null(args)){
# print(usage)
# print(example)
# print(args)
# stop("Input parameter error!")
# }else{
# print(args)
# }
#
###-----functions------
fillNA = function(var){
var[is.na(var)] <- mean(var, na.rm = TRUE)
return(var)
}
setwd(system("pwd",intern=T))
cwd = getwd()
# filepref = args['file']
# cutoff = as.numeric(args['cut'])
# output = paste(args['out'],".rda",sep="")
# print(paste("current working directory:",cwd))
# source("http://bioconductor.org/biocLite.R")
# biocLite("charm")
source("http://bioconductor.org/biocLite.R")
biocLite("sva")
require('sva')
library(charm)
normal = "brca_methNormal_level3_02072014.mat"
tumor = "brca_methTumor_level3_02072014.mat"
dataT = read.delim(tumor,header=T)
row.names(dataT) = dataT$barcode
dataT = apply(dataT[,-1],c(1,2),as.numeric)
dataT = dataT[,-ncol(dataT)]
dataN = read.delim(normal,header=T)
row.names(dataN) = dataN$barcode
dataN = apply(dataN[,-1],c(1,2),as.numeric)
dataN = dataN[,-ncol(dataN)]
# dataT = apply(dataT, c(1,2),betaToM)
# dataN = apply(dataN, c(1,2),betaToM)
plotValue(dataT,dataN)
##remove outlier normal samples
meansN = colSums(dataN)/nrow(dataN)
dataN = dataN[,which((meansN - mean(meansN)) / sd(meansN) < 2)]
dataAll = cbind(dataT,dataN)
dataAll = apply(dataAll,2,fillNA)
##----normalization
batch = as.matrix(c(rep("methylation27k",46),rep("methylation450k",(ncol(dataAll)-46))))
mod = model.matrix(~as.factor(c(rep("tumor",ncol(dataT)),rep("normal",ncol(dataN)) ) ))
indexNA27k = which(rowSums(dataT[,1:46])==0,arr.ind=T)
dataAll.norm = ComBat(dataAll[-indexNA27k,],batch=batch,mod=mod)
betaToM = function(beta){
max = 50
min = -50
b = as.numeric(beta)
m = ifelse(b >= 1,10,ifelse(beta <= 0,-10,log2(beta /(1 - beta)) ) )
return(m)
}
dataMAll.norm = apply(dataAll.norm,c(1,2),betaToM)
pdf(paste(figd,"meth_level3_norm_02072014.pdf",sep=""))
plotValue(dataAll[,1:117],dataAll[,118:205])
plotValue(dataMAll.norm[,1:117],dataMAll.norm[,118:205])
dev.off()
###-------------statistical testing
dataT.norm = dataMAll.norm[,1:117]
dataN.norm = dataMAll.norm[,118:205]
genMethMat = function(dataT.norm, dataN.norm, zcut = 2.5){
meansNormal = rowSums(dataN.norm) /ncol(dataN.norm)
dataT.res = matrix(0,ncol=ncol(dataT.norm),nrow=nrow(dataT.norm))
colnames(dataT.res) = colnames(dataT.norm)
rownames(dataT.res) = rownames(dataT.norm)
for (n in 1:ncol(dataT.norm)){
dataT.res[,n] = ifelse((dataT.norm[,n] - meansNormal)/sd(meansNormal) > zcut,1,0)
}
return(dataT.res)
}
result = genMethMat(dataT.norm,dataN.norm,zcut = 2.5)
# barplot(rowSums(result),width=4,col="blue")
# hist(table(rowSums(result)))
# image(result)
out = paste(tumor,"diffMeth.mat",sep="_")
write.table(result,out,sep="\t",quote=F)
### descriptive examing data
plotValue = function(dataT,dataN){
layout(matrix(1:4,nrow=2,byrow=T))
boxplot(dataT[,-1], main= "tumor meth" , xlab= "sample")
boxplot(dataN[,-1], main= " normal meth",xlab= "sample")
cut = 47
boxplot(dataT[,1:47],main="tumor meth27",xlab= "sample")
boxplot(dataT[,48:ncol(dataT)],main="tumor meth450",xlab= "sample")
}
plotGene = function(dataT,dataN){
require('gplots')
mycol = bluered(256)
heatmap.2(na.omit(dataT), trace= "none", main="tumor meth", col=mycol)
heatmap.2(na.omit(dataT[,1:47]), trace= "none", main="tumor meth27", col=mycol)
heatmap.2(na.omit(dataT[,48:96]), trace= "none", main="tumor meth450", col=mycol)
heatmap.2(na.omit(dataN), trace= "none", main="normal meth", col=mycol)
}
plotValue(dataT,dataN)
plotValue(dataMT,dataMN)
plotDensity = function(dataT,dataN, n =5){
layout(matrix(1:4,nrow=2,byrow=T))
for (i in n:(n+3)){
print(plot(density(dataT[i,]),main = rownames(dataT)[i],col="red"))
print(lines(density(dataN[i,]),main = rownames(dataN)[i],col="blue"))
}
}
plotDensity(dataT,dataN)
require(limma)
dataAll.normMedian = normalizeMedianValues(dataAll)
boxplot(dataAll.norm)
heatmap.2(na.omit(dataAll.norm), trace= "none", main="All meth", col=mycol)
dataT.norm = normQ(dataT)
dataN.norm = normQ(dataN)
plotValue(dataT.norm,dataN.norm)
plotValue(dataMT,dataMN)
plotDensity(dataAll.norm[,1:96],dataAll.norm[,97:213])
plotDensity(dataAll.normMedian[,1:96],dataAll.normMedian[,97:213])
dataAll.normBtwArray = normalizeBetweenArrays(dataAll)
plotDensity(dataAll.normBtwArray[,1:96],dataAll.normBtwArray[,97:213])
plotValue(dataMT.scale,dataMN.scale)
| /projFocus/ceRNA/step1-5_getDiffMethy.r | no_license | yuankaihuo/scripts | R | false | false | 5,805 | r | ##!/ifs/home/c2b2/ac_lab/jh3283/tools/R/R-3-02/bin/Rscript
#Author: Jing He
#input: <file1: tumor methlation matrix> <file2: normal methylation matrix>
#output: <file: tumor sample with methylation relative to population mean>
#Description: this file was created for projFocus, ceRNA, used in step1 to eliminate tumor samples
#TODO:
sysInfo = Sys.info()
if(sysInfo['sysname']=="Darwin" ){
source("/Volumes/ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/projFocusCernaFunctions.R")
setwd("/Volumes/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/02022014/meth/")
rootd = "/Volumes/ifs/data/c2b2/ac_lab/jh3283/projFocus/"
figd = "/Volumes/ifs/data/c2b2/ac_lab/jh3283/projFocus/report/topDown_02042014/fig/"
}else if(sysInfo['sysname']=="Linux" ){
source("/ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/projFocusCernaFunctions.R")
print("working from Linux")
setwd("/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/02022014/meth")
rootd = "/ifs/scratch/c2b2/ac_lab/jh3283/"
figd = "/ifs/data/c2b2/ac_lab/jh3283/projFocus/report/topDown_02042014/fig/"
}
#
# args = getArgs()
# usage = "Usage: Rscript bridegCeRAN.r --file <gene.list> "
# example = "Example: /ifs/home/c2b2/ac_lab/jh3283/tools/R/R_current/bin/Rscript /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/filterGrplasso.r --file grplasso_coeff --cut 0.05 --out gcGenes_GeneVarNet"
# if(length(args) < 3 || is.null(args)){
# print(usage)
# print(example)
# print(args)
# stop("Input parameter error!")
# }else{
# print(args)
# }
#
###-----functions------
fillNA = function(var){
var[is.na(var)] <- mean(var, na.rm = TRUE)
return(var)
}
setwd(system("pwd",intern=T))
cwd = getwd()
# filepref = args['file']
# cutoff = as.numeric(args['cut'])
# output = paste(args['out'],".rda",sep="")
# print(paste("current working directory:",cwd))
# source("http://bioconductor.org/biocLite.R")
# biocLite("charm")
source("http://bioconductor.org/biocLite.R")
biocLite("sva")
require('sva')
library(charm)
normal = "brca_methNormal_level3_02072014.mat"
tumor = "brca_methTumor_level3_02072014.mat"
dataT = read.delim(tumor,header=T)
row.names(dataT) = dataT$barcode
dataT = apply(dataT[,-1],c(1,2),as.numeric)
dataT = dataT[,-ncol(dataT)]
dataN = read.delim(normal,header=T)
row.names(dataN) = dataN$barcode
dataN = apply(dataN[,-1],c(1,2),as.numeric)
dataN = dataN[,-ncol(dataN)]
# dataT = apply(dataT, c(1,2),betaToM)
# dataN = apply(dataN, c(1,2),betaToM)
plotValue(dataT,dataN)
##remove outlier normal samples
meansN = colSums(dataN)/nrow(dataN)
dataN = dataN[,which((meansN - mean(meansN)) / sd(meansN) < 2)]
dataAll = cbind(dataT,dataN)
dataAll = apply(dataAll,2,fillNA)
##----normalization
batch = as.matrix(c(rep("methylation27k",46),rep("methylation450k",(ncol(dataAll)-46))))
mod = model.matrix(~as.factor(c(rep("tumor",ncol(dataT)),rep("normal",ncol(dataN)) ) ))
indexNA27k = which(rowSums(dataT[,1:46])==0,arr.ind=T)
dataAll.norm = ComBat(dataAll[-indexNA27k,],batch=batch,mod=mod)
betaToM = function(beta){
max = 50
min = -50
b = as.numeric(beta)
m = ifelse(b >= 1,10,ifelse(beta <= 0,-10,log2(beta /(1 - beta)) ) )
return(m)
}
dataMAll.norm = apply(dataAll.norm,c(1,2),betaToM)
pdf(paste(figd,"meth_level3_norm_02072014.pdf",sep=""))
plotValue(dataAll[,1:117],dataAll[,118:205])
plotValue(dataMAll.norm[,1:117],dataMAll.norm[,118:205])
dev.off()
###-------------statistical testing
dataT.norm = dataMAll.norm[,1:117]
dataN.norm = dataMAll.norm[,118:205]
genMethMat = function(dataT.norm, dataN.norm, zcut = 2.5){
meansNormal = rowSums(dataN.norm) /ncol(dataN.norm)
dataT.res = matrix(0,ncol=ncol(dataT.norm),nrow=nrow(dataT.norm))
colnames(dataT.res) = colnames(dataT.norm)
rownames(dataT.res) = rownames(dataT.norm)
for (n in 1:ncol(dataT.norm)){
dataT.res[,n] = ifelse((dataT.norm[,n] - meansNormal)/sd(meansNormal) > zcut,1,0)
}
return(dataT.res)
}
result = genMethMat(dataT.norm,dataN.norm,zcut = 2.5)
# barplot(rowSums(result),width=4,col="blue")
# hist(table(rowSums(result)))
# image(result)
out = paste(tumor,"diffMeth.mat",sep="_")
write.table(result,out,sep="\t",quote=F)
### descriptive examing data
plotValue = function(dataT,dataN){
layout(matrix(1:4,nrow=2,byrow=T))
boxplot(dataT[,-1], main= "tumor meth" , xlab= "sample")
boxplot(dataN[,-1], main= " normal meth",xlab= "sample")
cut = 47
boxplot(dataT[,1:47],main="tumor meth27",xlab= "sample")
boxplot(dataT[,48:ncol(dataT)],main="tumor meth450",xlab= "sample")
}
plotGene = function(dataT,dataN){
require('gplots')
mycol = bluered(256)
heatmap.2(na.omit(dataT), trace= "none", main="tumor meth", col=mycol)
heatmap.2(na.omit(dataT[,1:47]), trace= "none", main="tumor meth27", col=mycol)
heatmap.2(na.omit(dataT[,48:96]), trace= "none", main="tumor meth450", col=mycol)
heatmap.2(na.omit(dataN), trace= "none", main="normal meth", col=mycol)
}
plotValue(dataT,dataN)
plotValue(dataMT,dataMN)
plotDensity = function(dataT,dataN, n =5){
layout(matrix(1:4,nrow=2,byrow=T))
for (i in n:(n+3)){
print(plot(density(dataT[i,]),main = rownames(dataT)[i],col="red"))
print(lines(density(dataN[i,]),main = rownames(dataN)[i],col="blue"))
}
}
plotDensity(dataT,dataN)
require(limma)
dataAll.normMedian = normalizeMedianValues(dataAll)
boxplot(dataAll.norm)
heatmap.2(na.omit(dataAll.norm), trace= "none", main="All meth", col=mycol)
dataT.norm = normQ(dataT)
dataN.norm = normQ(dataN)
plotValue(dataT.norm,dataN.norm)
plotValue(dataMT,dataMN)
plotDensity(dataAll.norm[,1:96],dataAll.norm[,97:213])
plotDensity(dataAll.normMedian[,1:96],dataAll.normMedian[,97:213])
dataAll.normBtwArray = normalizeBetweenArrays(dataAll)
plotDensity(dataAll.normBtwArray[,1:96],dataAll.normBtwArray[,97:213])
plotValue(dataMT.scale,dataMN.scale)
|
.runEmptyDrops <- function(barcode.matrix=barcode.matrix, lower=lower,
niters=niters,
test.ambient=test.ambient,
ignore=ignore,
alpha=alpha,
retain=retain,
barcode.args=list(),
BPPARAM=BiocParallel::SerialParam()) {
barcode.matrix <- .convertToMatrix(barcode.matrix)
result <- DropletUtils::emptyDrops(m = barcode.matrix,
lower = lower,
niters = niters,
test.ambient = test.ambient,
ignore = ignore,
alpha = alpha,
retain = retain,
barcode.args = barcode.args,
BPPARAM = BPPARAM)
colnames(result) <- paste0("dropletUtils_emptyDrops_", colnames(result))
return(result)
}
#' @title Identify empty droplets using \link[DropletUtils]{emptyDrops}.
#' @description Run \link[DropletUtils]{emptyDrops} on the count matrix in the
#' provided \link[SingleCellExperiment]{SingleCellExperiment} object.
#' Distinguish between droplets containing cells and ambient RNA in a
#' droplet-based single-cell RNA sequencing experiment.
#' @param inSCE Input \link[SingleCellExperiment]{SingleCellExperiment} object.
#' Must contain a raw counts matrix before empty droplets have been removed.
#' @param sample Character vector. Indicates which sample each cell belongs to.
#' \link[DropletUtils]{emptyDrops} will be run on cells from each sample separately.
#' If NULL, then all cells will be processed together. Default NULL.
#' @param useAssay A string specifying which assay in the SCE to use.
#' @param lower See \link[DropletUtils]{emptyDrops} for more information.
#' @param niters See \link[DropletUtils]{emptyDrops} for more information.
#' @param testAmbient See \link[DropletUtils]{emptyDrops} for more information.
#' @param ignore See \link[DropletUtils]{emptyDrops} for more information.
#' @param alpha See \link[DropletUtils]{emptyDrops} for more information.
#' @param retain See \link[DropletUtils]{emptyDrops} for more information.
#' @param barcodeArgs See \link[DropletUtils]{emptyDrops} for more information.
#' @param BPPARAM See \link[DropletUtils]{emptyDrops} for more information.
#' @return A \link[SingleCellExperiment]{SingleCellExperiment} object with the
#' \link[DropletUtils]{emptyDrops} output table appended to the
#' \link{colData} slot. The columns include
#' \emph{emptyDrops_total}, \emph{emptyDrops_logprob},
#' \emph{emptyDrops_pvalue}, \emph{emptyDrops_limited}, \emph{emptyDrops_fdr}.
#' Please refer to the documentation of \link[DropletUtils]{emptyDrops} for
#' details.
#' @examples
#' # The following unfiltered PBMC_1k_v3 data were downloaded from
#' # https://support.10xgenomics.com/single-cell-gene-expression/datasets/3.0.0
#' # /pbmc_1k_v3
#' # Only the top 10 cells with most counts and the last 10 cells with non-zero
#' # counts are included in this example.
#' # This example only serves as an proof of concept and a tutorial on how to
#' # run the function. The results should not be
#' # used for drawing scientific conclusions.
#' data(scExample, package = "singleCellTK")
#' sce <- runEmptyDrops(inSCE = sce)
#' @import DropletUtils
#' @export
#' @importFrom SummarizedExperiment colData colData<-
runEmptyDrops <- function(inSCE,
sample = NULL,
useAssay = "counts",
lower = 100,
niters = 10000,
testAmbient = FALSE,
ignore = NULL,
alpha = NULL,
retain = NULL,
barcodeArgs = list(),
BPPARAM = BiocParallel::SerialParam()
) {
# getting the current argument values
#argsList <- as.list(formals(fun = sys.function(sys.parent()), envir = parent.frame()))
argsList <- mget(names(formals()),sys.frame(sys.nframe()))
if(!is.null(sample)) {
if(length(sample) != ncol(inSCE)) {
stop("'sample' must be the same length as the number of columns in 'inSCE'")
}
} else {
sample = rep(1, ncol(inSCE))
}
message(date(), " ... Running 'emptyDrops'")
## Define result matrix for all samples
output <- S4Vectors::DataFrame(row.names = colnames(inSCE),
dropletUtils_emptyDrops_total = integer(ncol(inSCE)),
dropletUtils_emptyDrops_logprob = numeric(ncol(inSCE)),
dropletUtils_emptyDrops_pvalue = numeric(ncol(inSCE)),
dropletUtils_emptyDrops_limited = logical(ncol(inSCE)),
dropletUtils_emptyDrops_fdr = numeric(ncol(inSCE)))
## Loop through each sample and run barcodeRank
samples <- unique(sample)
for (i in seq_len(length(samples))) {
sceSampleInd <- sample == samples[i]
sceSample <- inSCE[, sceSampleInd]
mat <- SummarizedExperiment::assay(sceSample, i = useAssay)
result <- .runEmptyDrops(barcode.matrix = mat,
lower = lower,
niters = niters,
test.ambient = testAmbient,
ignore = ignore,
alpha = alpha,
retain = retain,
barcode.args = barcodeArgs,
BPPARAM = BPPARAM)
output[sceSampleInd, ] <- result
S4Vectors::metadata(output[sceSampleInd, ]) <- S4Vectors::metadata(result)
}
colData(inSCE) = cbind(colData(inSCE), output)
argsList <- argsList[!names(argsList) %in% c("BPPARAM")]
inSCE@metadata$runEmptyDrops <- argsList[-1]
inSCE@metadata$runEmptyDrops$packageVersion <- utils::packageDescription("DropletUtils")$Version
return(inSCE)
}
| /R/dropletUtils_emptyDrops.R | permissive | ykoga07/singleCellTK | R | false | false | 6,160 | r | .runEmptyDrops <- function(barcode.matrix=barcode.matrix, lower=lower,
niters=niters,
test.ambient=test.ambient,
ignore=ignore,
alpha=alpha,
retain=retain,
barcode.args=list(),
BPPARAM=BiocParallel::SerialParam()) {
barcode.matrix <- .convertToMatrix(barcode.matrix)
result <- DropletUtils::emptyDrops(m = barcode.matrix,
lower = lower,
niters = niters,
test.ambient = test.ambient,
ignore = ignore,
alpha = alpha,
retain = retain,
barcode.args = barcode.args,
BPPARAM = BPPARAM)
colnames(result) <- paste0("dropletUtils_emptyDrops_", colnames(result))
return(result)
}
#' @title Identify empty droplets using \link[DropletUtils]{emptyDrops}.
#' @description Run \link[DropletUtils]{emptyDrops} on the count matrix in the
#' provided \link[SingleCellExperiment]{SingleCellExperiment} object.
#' Distinguish between droplets containing cells and ambient RNA in a
#' droplet-based single-cell RNA sequencing experiment.
#' @param inSCE Input \link[SingleCellExperiment]{SingleCellExperiment} object.
#' Must contain a raw counts matrix before empty droplets have been removed.
#' @param sample Character vector. Indicates which sample each cell belongs to.
#' \link[DropletUtils]{emptyDrops} will be run on cells from each sample separately.
#' If NULL, then all cells will be processed together. Default NULL.
#' @param useAssay A string specifying which assay in the SCE to use.
#' @param lower See \link[DropletUtils]{emptyDrops} for more information.
#' @param niters See \link[DropletUtils]{emptyDrops} for more information.
#' @param testAmbient See \link[DropletUtils]{emptyDrops} for more information.
#' @param ignore See \link[DropletUtils]{emptyDrops} for more information.
#' @param alpha See \link[DropletUtils]{emptyDrops} for more information.
#' @param retain See \link[DropletUtils]{emptyDrops} for more information.
#' @param barcodeArgs See \link[DropletUtils]{emptyDrops} for more information.
#' @param BPPARAM See \link[DropletUtils]{emptyDrops} for more information.
#' @return A \link[SingleCellExperiment]{SingleCellExperiment} object with the
#' \link[DropletUtils]{emptyDrops} output table appended to the
#' \link{colData} slot. The columns include
#' \emph{emptyDrops_total}, \emph{emptyDrops_logprob},
#' \emph{emptyDrops_pvalue}, \emph{emptyDrops_limited}, \emph{emptyDrops_fdr}.
#' Please refer to the documentation of \link[DropletUtils]{emptyDrops} for
#' details.
#' @examples
#' # The following unfiltered PBMC_1k_v3 data were downloaded from
#' # https://support.10xgenomics.com/single-cell-gene-expression/datasets/3.0.0
#' # /pbmc_1k_v3
#' # Only the top 10 cells with most counts and the last 10 cells with non-zero
#' # counts are included in this example.
#' # This example only serves as an proof of concept and a tutorial on how to
#' # run the function. The results should not be
#' # used for drawing scientific conclusions.
#' data(scExample, package = "singleCellTK")
#' sce <- runEmptyDrops(inSCE = sce)
#' @import DropletUtils
#' @export
#' @importFrom SummarizedExperiment colData colData<-
runEmptyDrops <- function(inSCE,
sample = NULL,
useAssay = "counts",
lower = 100,
niters = 10000,
testAmbient = FALSE,
ignore = NULL,
alpha = NULL,
retain = NULL,
barcodeArgs = list(),
BPPARAM = BiocParallel::SerialParam()
) {
# getting the current argument values
#argsList <- as.list(formals(fun = sys.function(sys.parent()), envir = parent.frame()))
argsList <- mget(names(formals()),sys.frame(sys.nframe()))
if(!is.null(sample)) {
if(length(sample) != ncol(inSCE)) {
stop("'sample' must be the same length as the number of columns in 'inSCE'")
}
} else {
sample = rep(1, ncol(inSCE))
}
message(date(), " ... Running 'emptyDrops'")
## Define result matrix for all samples
output <- S4Vectors::DataFrame(row.names = colnames(inSCE),
dropletUtils_emptyDrops_total = integer(ncol(inSCE)),
dropletUtils_emptyDrops_logprob = numeric(ncol(inSCE)),
dropletUtils_emptyDrops_pvalue = numeric(ncol(inSCE)),
dropletUtils_emptyDrops_limited = logical(ncol(inSCE)),
dropletUtils_emptyDrops_fdr = numeric(ncol(inSCE)))
## Loop through each sample and run barcodeRank
samples <- unique(sample)
for (i in seq_len(length(samples))) {
sceSampleInd <- sample == samples[i]
sceSample <- inSCE[, sceSampleInd]
mat <- SummarizedExperiment::assay(sceSample, i = useAssay)
result <- .runEmptyDrops(barcode.matrix = mat,
lower = lower,
niters = niters,
test.ambient = testAmbient,
ignore = ignore,
alpha = alpha,
retain = retain,
barcode.args = barcodeArgs,
BPPARAM = BPPARAM)
output[sceSampleInd, ] <- result
S4Vectors::metadata(output[sceSampleInd, ]) <- S4Vectors::metadata(result)
}
colData(inSCE) = cbind(colData(inSCE), output)
argsList <- argsList[!names(argsList) %in% c("BPPARAM")]
inSCE@metadata$runEmptyDrops <- argsList[-1]
inSCE@metadata$runEmptyDrops$packageVersion <- utils::packageDescription("DropletUtils")$Version
return(inSCE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/semanticPage.R
\name{get_cdn_path}
\alias{get_cdn_path}
\title{Get CDN path semantic dependencies}
\usage{
get_cdn_path()
}
\value{
CDN path of semantic dependencies
}
\description{
Internal function that returns path string from `shiny.custom.semantic.cdn` options.
}
\examples{
## Load shiny.semantic dependencies from local domain.
options("shiny.custom.semantic.cdn" = "shiny.semantic")
}
\keyword{internal}
| /man/get_cdn_path.Rd | permissive | Appsilon/shiny.semantic | R | false | true | 491 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/semanticPage.R
\name{get_cdn_path}
\alias{get_cdn_path}
\title{Get CDN path semantic dependencies}
\usage{
get_cdn_path()
}
\value{
CDN path of semantic dependencies
}
\description{
Internal function that returns path string from `shiny.custom.semantic.cdn` options.
}
\examples{
## Load shiny.semantic dependencies from local domain.
options("shiny.custom.semantic.cdn" = "shiny.semantic")
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fun.PeriodStats.R
\name{PeriodStats}
\alias{PeriodStats}
\title{Daily stats for a given time period}
\usage{
PeriodStats(fun.myDate, fun.myDate.Format = NA, fun.myPeriod.N = 30,
fun.myPeriod.Units = "d", fun.myFile, fun.myDir.import = getwd(),
fun.myDir.export = getwd(), fun.myParam.Name,
fun.myDateTime.Name = "Date.Time", fun.myDateTime.Format = NA,
fun.myThreshold = NA, fun.myConfig = "", fun.myReport.format = "",
fun.myReport.Dir = "")
}
\arguments{
\item{fun.myDate}{Benchmark date.}
\item{fun.myDate.Format}{Format of benchmark date. This should be the same
format of the date in the data file. Default is \%Y-\%m-\%d (e.g., 2017-12-31).}
\item{fun.myPeriod.N}{Period length. Default = 30.}
\item{fun.myPeriod.Units}{Period units (days or years written as d or y). Default is d.}
\item{fun.myFile}{Filename (no directory) of data file. Must be CSV file.}
\item{fun.myDir.import}{Directory for import data. Default is current working directory.}
\item{fun.myDir.export}{Directory for export data. Default is current working directory.}
\item{fun.myParam.Name}{Column name in myFile to perform summary statistics.
One or two parameters can be specified.}
\item{fun.myDateTime.Name}{Column name in myFile for date time. Default = "Date.Time".}
\item{fun.myDateTime.Format}{Format of DateTime field. Default = \%Y-\%m-\%d \%H:\%M:\%S.}
\item{fun.myThreshold}{Value to draw line on plot. For example, a regulatory limit.
Default = NA}
\item{fun.myConfig}{Configuration file to use for this data analysis.
The default is always loaded first so only "new" values need to be included. This is the easiest way to control date and time formats.}
\item{fun.myReport.format}{Report format (docx or html).
Default is specified in config.R (docx).Can be customized in config.R; ContData.env$myReport.Format.}
\item{fun.myReport.Dir}{Report (rmd) template folder.
Default is the package rmd folder. Can be customized in config.R; ContData.env$myReport.Dir.}
}
\value{
Returns a csv with daily means and a PDF summary with plots into the
specified export directory for the specified time period before the given date.
}
\description{
Generates daily stats (N, mean, min, max, range, std deviation) for the
specified time period before a given date. Output is a multiple column CSV
(Date and Parameter Name by statistic) and a report (HTML or DOCX) with plots.
Input is the ouput file of the QC operation of ContDataQC().
}
\details{
The input is output file of the QC operation in ContDataQC(). That is, a file with
Date.Time, and parameters (matching formats in config.R).
To get different periods (30, 60, or 90 days) change function input "fun.myPeriod.N".
It is possible to provide a vector for Period.N and Period.Units.
If the date range is longer than that in the data provided the stats will not calculate properly.
The dates must be in the standard format (Y-m-d) or the function may not work as intended.
For example, the date is used in the file name and dates with "/" will result in an invalid file name.
One or two parameters can be analyzed at a time.
If provide 2 parameters both will produce period statistic summaries.
And the plots will have both parameters. The 2nd parameter will be on the 2nd (right) y-axis.
Requires doBy library for the period statistics summary and rmarkdown for the report.
}
\examples{
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Save example files from Package to use for example
## This step not needed for users working on their own files
df.x <- DATA_period_test2_Aw_20130101_20141231
write.csv(df.x,"DATA_period_test2_Aw_20130101_20141231.csv")
myFile <- "config.ExcludeFailsFalse.R"
file.copy(file.path(path.package("ContDataQC"), "extdata", myFile)
, file.path(getwd(), myFile))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Load File to use for PeriodStats
myDir <- getwd()
myFile <- "DATA_period_test2_Aw_20130101_20141231.csv"
df.x <- read.csv(file.path(myDir, myFile))
# function inputs
myDate <- "2013-09-30"
myDate.Format <- "\%Y-\%m-\%d"
myPeriod.N <- c(30, 60, 90, 120, 1)
myPeriod.Units <- c("d", "d", "d", "d", "y")
myFile <- "DATA_period_test2_Aw_20130101_20141231.csv"
myDir.import <- getwd()
myDir.export <- getwd()
myParam.Name <- "Water.Temp.C"
myDateTime.Name <- "Date.Time"
myDateTime.Format <- "\%Y-\%m-\%d \%H:\%M:\%S"
myThreshold <- 20
myConfig <- ""
myReport.format <- "docx"
# Custom Config
myConfig.Fail.Include <- "config.ExcludeFailsFalse.R"
# Run Function
## Example 1. default report format (html)
PeriodStats(myDate
, myDate.Format
, myPeriod.N
, myPeriod.Units
, myFile
, myDir.import
, myDir.export
, myParam.Name
, myDateTime.Name
, myDateTime.Format
, myThreshold
, myConfig)
## Example 2. DOCX report format
PeriodStats(myDate
, myDate.Format
, myPeriod.N
, myPeriod.Units
, myFile
, myDir.import
, myDir.export
, myParam.Name
, myDateTime.Name
, myDateTime.Format
, myThreshold
, myConfig
, myReport.format)
## Example 3. DOCX report format and Include Flag Failures
PeriodStats(myDate
, myDate.Format
, myPeriod.N
, myPeriod.Units
, myFile
, myDir.import
, myDir.export
, myParam.Name
, myDateTime.Name
, myDateTime.Format
, myThreshold
, myConfig.Fail.Include
, myReport.format)
## Example 4. DOCX report format with two parameters
myParam.Name <- c("Water.Temp.C", "Sensor.Depth.ft")
PeriodStats(myDate
, myDate.Format
, myPeriod.N
, myPeriod.Units
, myFile
, myDir.import
, myDir.export
, myParam.Name
, myDateTime.Name
, myDateTime.Format
, myThreshold
, myConfig
, myReport.format)
}
| /man/PeriodStats.Rd | permissive | utah-dwq/ContDataQC | R | false | true | 6,113 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fun.PeriodStats.R
\name{PeriodStats}
\alias{PeriodStats}
\title{Daily stats for a given time period}
\usage{
PeriodStats(fun.myDate, fun.myDate.Format = NA, fun.myPeriod.N = 30,
fun.myPeriod.Units = "d", fun.myFile, fun.myDir.import = getwd(),
fun.myDir.export = getwd(), fun.myParam.Name,
fun.myDateTime.Name = "Date.Time", fun.myDateTime.Format = NA,
fun.myThreshold = NA, fun.myConfig = "", fun.myReport.format = "",
fun.myReport.Dir = "")
}
\arguments{
\item{fun.myDate}{Benchmark date.}
\item{fun.myDate.Format}{Format of benchmark date. This should be the same
format of the date in the data file. Default is \%Y-\%m-\%d (e.g., 2017-12-31).}
\item{fun.myPeriod.N}{Period length. Default = 30.}
\item{fun.myPeriod.Units}{Period units (days or years written as d or y). Default is d.}
\item{fun.myFile}{Filename (no directory) of data file. Must be CSV file.}
\item{fun.myDir.import}{Directory for import data. Default is current working directory.}
\item{fun.myDir.export}{Directory for export data. Default is current working directory.}
\item{fun.myParam.Name}{Column name in myFile to perform summary statistics.
One or two parameters can be specified.}
\item{fun.myDateTime.Name}{Column name in myFile for date time. Default = "Date.Time".}
\item{fun.myDateTime.Format}{Format of DateTime field. Default = \%Y-\%m-\%d \%H:\%M:\%S.}
\item{fun.myThreshold}{Value to draw line on plot. For example, a regulatory limit.
Default = NA}
\item{fun.myConfig}{Configuration file to use for this data analysis.
The default is always loaded first so only "new" values need to be included. This is the easiest way to control date and time formats.}
\item{fun.myReport.format}{Report format (docx or html).
Default is specified in config.R (docx).Can be customized in config.R; ContData.env$myReport.Format.}
\item{fun.myReport.Dir}{Report (rmd) template folder.
Default is the package rmd folder. Can be customized in config.R; ContData.env$myReport.Dir.}
}
\value{
Returns a csv with daily means and a PDF summary with plots into the
specified export directory for the specified time period before the given date.
}
\description{
Generates daily stats (N, mean, min, max, range, std deviation) for the
specified time period before a given date. Output is a multiple column CSV
(Date and Parameter Name by statistic) and a report (HTML or DOCX) with plots.
Input is the ouput file of the QC operation of ContDataQC().
}
\details{
The input is output file of the QC operation in ContDataQC(). That is, a file with
Date.Time, and parameters (matching formats in config.R).
To get different periods (30, 60, or 90 days) change function input "fun.myPeriod.N".
It is possible to provide a vector for Period.N and Period.Units.
If the date range is longer than that in the data provided the stats will not calculate properly.
The dates must be in the standard format (Y-m-d) or the function may not work as intended.
For example, the date is used in the file name and dates with "/" will result in an invalid file name.
One or two parameters can be analyzed at a time.
If provide 2 parameters both will produce period statistic summaries.
And the plots will have both parameters. The 2nd parameter will be on the 2nd (right) y-axis.
Requires doBy library for the period statistics summary and rmarkdown for the report.
}
\examples{
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Save example files from Package to use for example
## This step not needed for users working on their own files
df.x <- DATA_period_test2_Aw_20130101_20141231
write.csv(df.x,"DATA_period_test2_Aw_20130101_20141231.csv")
myFile <- "config.ExcludeFailsFalse.R"
file.copy(file.path(path.package("ContDataQC"), "extdata", myFile)
, file.path(getwd(), myFile))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Load File to use for PeriodStats
myDir <- getwd()
myFile <- "DATA_period_test2_Aw_20130101_20141231.csv"
df.x <- read.csv(file.path(myDir, myFile))
# function inputs
myDate <- "2013-09-30"
myDate.Format <- "\%Y-\%m-\%d"
myPeriod.N <- c(30, 60, 90, 120, 1)
myPeriod.Units <- c("d", "d", "d", "d", "y")
myFile <- "DATA_period_test2_Aw_20130101_20141231.csv"
myDir.import <- getwd()
myDir.export <- getwd()
myParam.Name <- "Water.Temp.C"
myDateTime.Name <- "Date.Time"
myDateTime.Format <- "\%Y-\%m-\%d \%H:\%M:\%S"
myThreshold <- 20
myConfig <- ""
myReport.format <- "docx"
# Custom Config
myConfig.Fail.Include <- "config.ExcludeFailsFalse.R"
# Run Function
## Example 1. default report format (html)
PeriodStats(myDate
, myDate.Format
, myPeriod.N
, myPeriod.Units
, myFile
, myDir.import
, myDir.export
, myParam.Name
, myDateTime.Name
, myDateTime.Format
, myThreshold
, myConfig)
## Example 2. DOCX report format
PeriodStats(myDate
, myDate.Format
, myPeriod.N
, myPeriod.Units
, myFile
, myDir.import
, myDir.export
, myParam.Name
, myDateTime.Name
, myDateTime.Format
, myThreshold
, myConfig
, myReport.format)
## Example 3. DOCX report format and Include Flag Failures
PeriodStats(myDate
, myDate.Format
, myPeriod.N
, myPeriod.Units
, myFile
, myDir.import
, myDir.export
, myParam.Name
, myDateTime.Name
, myDateTime.Format
, myThreshold
, myConfig.Fail.Include
, myReport.format)
## Example 4. DOCX report format with two parameters
myParam.Name <- c("Water.Temp.C", "Sensor.Depth.ft")
PeriodStats(myDate
, myDate.Format
, myPeriod.N
, myPeriod.Units
, myFile
, myDir.import
, myDir.export
, myParam.Name
, myDateTime.Name
, myDateTime.Format
, myThreshold
, myConfig
, myReport.format)
}
|
riskidx <- function (stimes, status, etimes=NULL) {
if (length(stimes)!=length(status))
stop("stimes and status must have same length")
if (min(stimes[status])<=0) {
stimes[status & stimes<=0] <- 1e-04
warning("Survival times <= 0 replaced with 1e-04")
}
utimes <- if (is.null(etimes)) {
unique(stimes[status!=0])
} else {
unique(c(stimes[(stimes <= max(etimes)) & status!=0], etimes))
}
utimes <- utimes[order(utimes)]
nt = length(utimes)
nc = length(stimes)
t.evaluate = c(0, utimes)
as.data.frame(do.call("rbind", sapply(1:nt, function(i) {
start <- t.evaluate[i]
finish <- t.evaluate[i+1]
keep <- stimes>=finish
newstat <- ifelse(stimes[keep]==finish, status[keep], 0)
id <- which(keep)
cbind(start, finish, newstat, id)
})))
}
#' @useDynLib pcox coxcount1
getRiskSet <- function (Y, evaltimes=NULL) {
if (is.null(evaltimes)) {
sorted <- order(-Y[,1], Y[,2])
newstrat <- rep.int(0L, nrow(Y))
newstrat[1] <- 1L
if (storage.mode(Y) != "double")
storage.mode(Y) <- "double"
counts <- .Call(coxcount1, Y[sorted,], as.integer(newstrat))
rs <- data.frame(start=rep(c(counts$time[-1],0), counts$nrisk),
finish=rep(counts$time, counts$nrisk),
newstat=counts$status, id=sorted[counts$index])
#rs2 <- arrange(rs2, finish, id)
} else {
stop("Not yet supported!")
}
rs
} | /R/riskidx.R | no_license | jgellar/pcox | R | false | false | 1,437 | r | riskidx <- function (stimes, status, etimes=NULL) {
if (length(stimes)!=length(status))
stop("stimes and status must have same length")
if (min(stimes[status])<=0) {
stimes[status & stimes<=0] <- 1e-04
warning("Survival times <= 0 replaced with 1e-04")
}
utimes <- if (is.null(etimes)) {
unique(stimes[status!=0])
} else {
unique(c(stimes[(stimes <= max(etimes)) & status!=0], etimes))
}
utimes <- utimes[order(utimes)]
nt = length(utimes)
nc = length(stimes)
t.evaluate = c(0, utimes)
as.data.frame(do.call("rbind", sapply(1:nt, function(i) {
start <- t.evaluate[i]
finish <- t.evaluate[i+1]
keep <- stimes>=finish
newstat <- ifelse(stimes[keep]==finish, status[keep], 0)
id <- which(keep)
cbind(start, finish, newstat, id)
})))
}
#' @useDynLib pcox coxcount1
getRiskSet <- function (Y, evaltimes=NULL) {
if (is.null(evaltimes)) {
sorted <- order(-Y[,1], Y[,2])
newstrat <- rep.int(0L, nrow(Y))
newstrat[1] <- 1L
if (storage.mode(Y) != "double")
storage.mode(Y) <- "double"
counts <- .Call(coxcount1, Y[sorted,], as.integer(newstrat))
rs <- data.frame(start=rep(c(counts$time[-1],0), counts$nrisk),
finish=rep(counts$time, counts$nrisk),
newstat=counts$status, id=sorted[counts$index])
#rs2 <- arrange(rs2, finish, id)
} else {
stop("Not yet supported!")
}
rs
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RequestPattern.R
\name{MethodPattern}
\alias{MethodPattern}
\title{MethodPattern}
\description{
method matcher
}
\details{
Matches regardless of case. e.g., POST will match to post
}
\examples{
(x <- MethodPattern$new(pattern = "post"))
x$pattern
x$matches(method = "post")
x$matches(method = "POST")
# all matches() calls should be TRUE
(x <- MethodPattern$new(pattern = "any"))
x$pattern
x$matches(method = "post")
x$matches(method = "GET")
x$matches(method = "HEAD")
}
\keyword{internal}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{pattern}}{(character) an http method}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{MethodPattern$new()}}
\item \href{#method-matches}{\code{MethodPattern$matches()}}
\item \href{#method-to_s}{\code{MethodPattern$to_s()}}
\item \href{#method-clone}{\code{MethodPattern$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
Create a new \code{MethodPattern} object
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MethodPattern$new(pattern)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{pattern}}{(character) a HTTP method, lowercase}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A new \code{MethodPattern} object
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-matches"></a>}}
\if{latex}{\out{\hypertarget{method-matches}{}}}
\subsection{Method \code{matches()}}{
test if the pattern matches a given http method
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MethodPattern$matches(method)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{method}}{(character) a HTTP method, lowercase}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
a boolean
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-to_s"></a>}}
\if{latex}{\out{\hypertarget{method-to_s}{}}}
\subsection{Method \code{to_s()}}{
Print pattern for easy human consumption
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MethodPattern$to_s()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
a string
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MethodPattern$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
| /man/MethodPattern.Rd | permissive | aaronwolen/webmockr | R | false | true | 2,890 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RequestPattern.R
\name{MethodPattern}
\alias{MethodPattern}
\title{MethodPattern}
\description{
method matcher
}
\details{
Matches regardless of case. e.g., POST will match to post
}
\examples{
(x <- MethodPattern$new(pattern = "post"))
x$pattern
x$matches(method = "post")
x$matches(method = "POST")
# all matches() calls should be TRUE
(x <- MethodPattern$new(pattern = "any"))
x$pattern
x$matches(method = "post")
x$matches(method = "GET")
x$matches(method = "HEAD")
}
\keyword{internal}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{pattern}}{(character) an http method}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{MethodPattern$new()}}
\item \href{#method-matches}{\code{MethodPattern$matches()}}
\item \href{#method-to_s}{\code{MethodPattern$to_s()}}
\item \href{#method-clone}{\code{MethodPattern$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
Create a new \code{MethodPattern} object
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MethodPattern$new(pattern)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{pattern}}{(character) a HTTP method, lowercase}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A new \code{MethodPattern} object
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-matches"></a>}}
\if{latex}{\out{\hypertarget{method-matches}{}}}
\subsection{Method \code{matches()}}{
test if the pattern matches a given http method
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MethodPattern$matches(method)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{method}}{(character) a HTTP method, lowercase}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
a boolean
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-to_s"></a>}}
\if{latex}{\out{\hypertarget{method-to_s}{}}}
\subsection{Method \code{to_s()}}{
Print pattern for easy human consumption
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MethodPattern$to_s()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
a string
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MethodPattern$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
p=list()
p$A=c(0,0,10,0,0)
p$B=c(0,1,8,1,0)
p$C=c(0,2,6,2,0)
p$D=c(1,2,4,2,1)
p$E=c(2,2,2,2,2)
p_norm=lapply(p,function(q) q/sum(q))
H=sapply(p_norm,function(q) -sum(ifelse(q==0,0,q*log(q))))
ways=c(1,90,1260,37800,113400)
logwayspp=log(ways)/10
p=0.7
A=c((1-p)^2,p*(1-p),(1-p)*p,p^2)
entropy=-sum(A*log(A))
sim.p=function(G=1.4){
x123=runif(3)
x4=((G)*sum(x123-x123[2]-x123[3])/(2-G))
z=sum(c(x123,x4))
p=c(x123,x4)/z
list(H=-sum(p*log(p)),p=p)
}
library(rethinking)
H=replicate(1e5,sim.p(1.4))
op=par(mfrow=c(2,2))
dens(as.numeric(H[1,]),adj=0.1)
curve(dgamma(x,5,20))
curve(dnorm(x,10,3))
?dlogis
curve(dlogis(x,5,1),xlim=c(-1,1))
| /StatisticalRethinking/Chapter 9.R | no_license | EijiGorilla/R-Statistics | R | false | false | 682 | r | p=list()
p$A=c(0,0,10,0,0)
p$B=c(0,1,8,1,0)
p$C=c(0,2,6,2,0)
p$D=c(1,2,4,2,1)
p$E=c(2,2,2,2,2)
p_norm=lapply(p,function(q) q/sum(q))
H=sapply(p_norm,function(q) -sum(ifelse(q==0,0,q*log(q))))
ways=c(1,90,1260,37800,113400)
logwayspp=log(ways)/10
p=0.7
A=c((1-p)^2,p*(1-p),(1-p)*p,p^2)
entropy=-sum(A*log(A))
sim.p=function(G=1.4){
x123=runif(3)
x4=((G)*sum(x123-x123[2]-x123[3])/(2-G))
z=sum(c(x123,x4))
p=c(x123,x4)/z
list(H=-sum(p*log(p)),p=p)
}
library(rethinking)
H=replicate(1e5,sim.p(1.4))
op=par(mfrow=c(2,2))
dens(as.numeric(H[1,]),adj=0.1)
curve(dgamma(x,5,20))
curve(dnorm(x,10,3))
?dlogis
curve(dlogis(x,5,1),xlim=c(-1,1))
|
##########################
# STATISTICAL THRESHOLDS #
#############################################################################################################################
minimum_fold <- 8 # Smallest fold change to be considered 'enriched' in a tissue
testing_correction <- 'fdr' # P-value adjustment for multiple testing correction
maximum_pvalue <- 0.001 # Largest adjusted P-value to be considered significant in ANOVA
flanking <- TRUE # If TRUE, buffers temporal changes by including adjacent developmental timepoints in analysis
disjoint_sets <- FALSE # If TRUE, ensures that all tissue-enriched gene sets for each stage are disjoint (non-overlapping)
reference_name <- 'reference_atlas'
#############################################################################################################################
# FUNCTIONS #
#############################################################################################################################
# Takes a list of sets and returns the same list with all elements shared between any sets removed
disjoint <- function(input_list){
disjoint_list <- input_list
for(i in names(input_list)){
for(j in names(input_list)[names(input_list)!=i]){
disjoint_list[[i]] <- disjoint_list[[i]][!disjoint_list[[i]]%in%disjoint_list[[j]]]
}
}
return(disjoint_list)
}
#############################################################################################################################
# LOADING ENVIRONMENT #
#############################################################################################################################
args = commandArgs(trailingOnly=TRUE)
if(length(args)==1){
reference_name = gsub('^datasets/','',gsub('\\.(tsv|description)$','',args[1]))
}
if(grepl('/',reference_name)){
stop("Reference table must be located in the local directory 'datasets'. Please check setup instructions (See README for details)")
}
cat('Loading reference atlas...\n')
reference_atlas <- read.delim(paste("datasets/",reference_name,".tsv",sep=''),header = T,stringsAsFactors = F,comment.char = '#')
tissues <- scan("tissues.txt",'character',sep='\n',comment.char = '#')
tissues <- strsplit(tissues,'\t')
names(tissues) <- unlist(lapply(tissues,function(x)x[1]))
tissues <- unlist(lapply(tissues,function(x)x[2]))
tissue_cols <- gsub('^(.+?)_?(.+)\\..*$','\\1',colnames(reference_atlas))
if(!all(tissue_cols%in%names(tissues))){
cat("WARNING: Unrecognized tissue names! Ignoring samples:\n\t")
ignr <- colnames(reference_atlas)[!tissue_cols%in%names(tissues)]
cat(ignr,sep='\t')
reference_atlas <- reference_atlas[,!colnames(reference_atlas)%in%ignr, drop=FALSE]
}
excluded_tissues <- names(tissues)[grep('//exclude',tissues)]
if(length(excluded_tissues)>0){
for(i in 1:length(excluded_tissues)){
tissue_cols <- gsub('^(.+?)_?(.+)\\..*$','\\1',colnames(reference_atlas))
tissues_to_exclude <- grep(excluded_tissues[i],tissue_cols)
cat(paste('Removed',length(tissues_to_exclude),gsub('//exclude','',tissues[excluded_tissues[i]]),'samples from reference atlas.\n',sep=' '))
x <- 1:ncol(reference_atlas)
reference_atlas <- reference_atlas[,x[!x%in%tissues_to_exclude], drop=FALSE]
}
}
timepoints <- scan("timepoints.txt",'character',sep='\n',comment.char = '#')
timepoints <- strsplit(timepoints,'\t')
names(timepoints) <- unlist(lapply(timepoints,function(x)x[1]))
timepoints <- unlist(lapply(timepoints,function(x)x[2]))
timepoint_cols <- gsub('^(.+?)_?(.+)\\..*$','\\2',colnames(reference_atlas))
if(!all(timepoint_cols%in%names(timepoints))){
cat("WARNING: Unrecognized timepoint names! Ignoring samples:\n\t")
ignr <- colnames(reference_atlas)[!timepoint_cols%in%names(timepoints)]
cat(ignr,sep='\t')
reference_atlas <- reference_atlas[,!colnames(reference_atlas)%in%ignr, drop=FALSE]
}
excluded_timepoints <- names(timepoints)[grep('//exclude',timepoints)]
if(length(excluded_timepoints)>0){
for(i in 1:length(excluded_timepoints)){
timepoint_cols <- gsub('^(.+?)_?(.+)\\..*$','\\2',colnames(reference_atlas))
timepoints_to_exclude <- grep(excluded_timepoints[i],timepoint_cols)
cat(paste('Removed',length(timepoints_to_exclude),gsub('//exclude','',timepoints[excluded_timepoints[i]]),'samples from reference atlas.\n',sep=' '))
x=1:ncol(reference_atlas)
reference_atlas <- reference_atlas[,x[!x%in%timepoints_to_exclude], drop=FALSE]
}
}
reference_atlas <- reference_atlas[grep(';',rownames(reference_atlas),invert=TRUE),]
cat("Reference atlas prepared.\nNumber of samples detected at each stage:\n")
#############################################################################################################################
# PERFORMING GENE SELECTION #
#############################################################################################################################
tissue_cols <- gsub('^(.+?)_?(.+)\\..*$','\\1',colnames(reference_atlas))
timepoint_cols <- gsub('^(.+?)_?(.+)\\..*$','\\2',colnames(reference_atlas))
rownames(reference_atlas) <- toupper(rownames(reference_atlas))
for(i in names(timepoints)){
cat(timepoints[i],'\n',sep='')
tbl <- table(tissue_cols[grep(i,timepoint_cols)])
if(length(tbl)==0){
cat('\tNo samples')
}else{
cat(paste('\t',tissues[sort(names(tbl))],': ',tbl[sort(names(tbl))],'\n',sep=''))
}
}
cat('\nIdentifying tissue-enriched gene expression...\n')
enriched <- list()
minimum_fold <- log2(minimum_fold)
for(stage in 1:length(timepoints)){
enriched[[names(timepoints)[stage]]]=list()
if(flanking){
samples <- which(timepoint_cols%in%as.character((stage-1):(stage+1)))
}else{
samples <- which(timepoint_cols==as.character(stage))
}
subreference_atlas <- reference_atlas[,samples, drop=FALSE]
for(tissue in names(tissues)){
tissuereps <- grep(paste('^',tissue,'_',sep=''),colnames(subreference_atlas),ignore.case = F,value = T)
nottissuereps <- colnames(subreference_atlas)[!colnames(subreference_atlas)%in%tissuereps]
if(length(tissuereps)==0){
next
}
cat(paste(tissuereps,collapse <- ','),'\nvs.\n',paste(nottissuereps,collapse=','),'\n',sep='')
foldchanges <- log2(rowMeans(subreference_atlas[,tissuereps, drop=FALSE]+.1)/rowMeans(subreference_atlas[,nottissuereps, drop=FALSE]+.1))
spec_fc <- names(which(foldchanges>=minimum_fold))
subsubreference_atlas <- subreference_atlas[spec_fc,c(tissuereps,nottissuereps)]
pvals <- apply(subsubreference_atlas,1,function(x){
dt <- data.frame(values=as.numeric(x),group=c(rep('in',length(tissuereps)),rep('out',length(nottissuereps))),stringsAsFactors = F)
dt.aov <- aov(dt$values ~ dt$group)
return(as.numeric(unlist(summary(dt.aov))['Pr(>F)1']))
})
pvals <- p.adjust(pvals,method = testing_correction)
pvalhits <- names(pvals[pvals<maximum_pvalue])
cat(length(pvalhits),'\n\n')
if(disjoint_sets){
pvalhits <- disjoint(pvalhits)
}
enriched[[names(timepoints)[stage]]][[tissue]]<-pvalhits
}
}
cat('',file = 'all_tested_genes.txt',append = F)
cat(paste(rownames(reference_atlas),collapse='\n'),file = 'all_tested_genes.txt',append=T)
cat("All genes in the reference atlas listed in 'all_tested_genes.txt'.\n")
cat('',file = 'enriched_genes.txt',append = F)
for(i in names(timepoints)){
for(j in names(enriched[[i]])){
cat(paste(paste(i,j,sep='_'),paste(enriched[[i]][[j]],collapse=','),sep='\t'),'\n',sep='',file = 'enriched_genes.txt',append=T)
}
}
cat("Tissue-enriched transcripts saved in 'enriched_genes.txt'.") | /generate-tissue-enriched-genes.R | no_license | Nodine-Group/tissue-enrichment-test | R | false | false | 7,699 | r | ##########################
# STATISTICAL THRESHOLDS #
#############################################################################################################################
minimum_fold <- 8 # Smallest fold change to be considered 'enriched' in a tissue
testing_correction <- 'fdr' # P-value adjustment for multiple testing correction
maximum_pvalue <- 0.001 # Largest adjusted P-value to be considered significant in ANOVA
flanking <- TRUE # If TRUE, buffers temporal changes by including adjacent developmental timepoints in analysis
disjoint_sets <- FALSE # If TRUE, ensures that all tissue-enriched gene sets for each stage are disjoint (non-overlapping)
reference_name <- 'reference_atlas'
#############################################################################################################################
# FUNCTIONS #
#############################################################################################################################
# Takes a list of sets and returns the same list with all elements shared between any sets removed
disjoint <- function(input_list){
disjoint_list <- input_list
for(i in names(input_list)){
for(j in names(input_list)[names(input_list)!=i]){
disjoint_list[[i]] <- disjoint_list[[i]][!disjoint_list[[i]]%in%disjoint_list[[j]]]
}
}
return(disjoint_list)
}
#############################################################################################################################
# LOADING ENVIRONMENT #
#############################################################################################################################
args = commandArgs(trailingOnly=TRUE)
if(length(args)==1){
reference_name = gsub('^datasets/','',gsub('\\.(tsv|description)$','',args[1]))
}
if(grepl('/',reference_name)){
stop("Reference table must be located in the local directory 'datasets'. Please check setup instructions (See README for details)")
}
cat('Loading reference atlas...\n')
reference_atlas <- read.delim(paste("datasets/",reference_name,".tsv",sep=''),header = T,stringsAsFactors = F,comment.char = '#')
tissues <- scan("tissues.txt",'character',sep='\n',comment.char = '#')
tissues <- strsplit(tissues,'\t')
names(tissues) <- unlist(lapply(tissues,function(x)x[1]))
tissues <- unlist(lapply(tissues,function(x)x[2]))
tissue_cols <- gsub('^(.+?)_?(.+)\\..*$','\\1',colnames(reference_atlas))
if(!all(tissue_cols%in%names(tissues))){
cat("WARNING: Unrecognized tissue names! Ignoring samples:\n\t")
ignr <- colnames(reference_atlas)[!tissue_cols%in%names(tissues)]
cat(ignr,sep='\t')
reference_atlas <- reference_atlas[,!colnames(reference_atlas)%in%ignr, drop=FALSE]
}
excluded_tissues <- names(tissues)[grep('//exclude',tissues)]
if(length(excluded_tissues)>0){
for(i in 1:length(excluded_tissues)){
tissue_cols <- gsub('^(.+?)_?(.+)\\..*$','\\1',colnames(reference_atlas))
tissues_to_exclude <- grep(excluded_tissues[i],tissue_cols)
cat(paste('Removed',length(tissues_to_exclude),gsub('//exclude','',tissues[excluded_tissues[i]]),'samples from reference atlas.\n',sep=' '))
x <- 1:ncol(reference_atlas)
reference_atlas <- reference_atlas[,x[!x%in%tissues_to_exclude], drop=FALSE]
}
}
timepoints <- scan("timepoints.txt",'character',sep='\n',comment.char = '#')
timepoints <- strsplit(timepoints,'\t')
names(timepoints) <- unlist(lapply(timepoints,function(x)x[1]))
timepoints <- unlist(lapply(timepoints,function(x)x[2]))
timepoint_cols <- gsub('^(.+?)_?(.+)\\..*$','\\2',colnames(reference_atlas))
if(!all(timepoint_cols%in%names(timepoints))){
cat("WARNING: Unrecognized timepoint names! Ignoring samples:\n\t")
ignr <- colnames(reference_atlas)[!timepoint_cols%in%names(timepoints)]
cat(ignr,sep='\t')
reference_atlas <- reference_atlas[,!colnames(reference_atlas)%in%ignr, drop=FALSE]
}
excluded_timepoints <- names(timepoints)[grep('//exclude',timepoints)]
if(length(excluded_timepoints)>0){
for(i in 1:length(excluded_timepoints)){
timepoint_cols <- gsub('^(.+?)_?(.+)\\..*$','\\2',colnames(reference_atlas))
timepoints_to_exclude <- grep(excluded_timepoints[i],timepoint_cols)
cat(paste('Removed',length(timepoints_to_exclude),gsub('//exclude','',timepoints[excluded_timepoints[i]]),'samples from reference atlas.\n',sep=' '))
x=1:ncol(reference_atlas)
reference_atlas <- reference_atlas[,x[!x%in%timepoints_to_exclude], drop=FALSE]
}
}
reference_atlas <- reference_atlas[grep(';',rownames(reference_atlas),invert=TRUE),]
cat("Reference atlas prepared.\nNumber of samples detected at each stage:\n")
#############################################################################################################################
# PERFORMING GENE SELECTION #
#############################################################################################################################
tissue_cols <- gsub('^(.+?)_?(.+)\\..*$','\\1',colnames(reference_atlas))
timepoint_cols <- gsub('^(.+?)_?(.+)\\..*$','\\2',colnames(reference_atlas))
rownames(reference_atlas) <- toupper(rownames(reference_atlas))
for(i in names(timepoints)){
cat(timepoints[i],'\n',sep='')
tbl <- table(tissue_cols[grep(i,timepoint_cols)])
if(length(tbl)==0){
cat('\tNo samples')
}else{
cat(paste('\t',tissues[sort(names(tbl))],': ',tbl[sort(names(tbl))],'\n',sep=''))
}
}
cat('\nIdentifying tissue-enriched gene expression...\n')
enriched <- list()
minimum_fold <- log2(minimum_fold)
for(stage in 1:length(timepoints)){
enriched[[names(timepoints)[stage]]]=list()
if(flanking){
samples <- which(timepoint_cols%in%as.character((stage-1):(stage+1)))
}else{
samples <- which(timepoint_cols==as.character(stage))
}
subreference_atlas <- reference_atlas[,samples, drop=FALSE]
for(tissue in names(tissues)){
tissuereps <- grep(paste('^',tissue,'_',sep=''),colnames(subreference_atlas),ignore.case = F,value = T)
nottissuereps <- colnames(subreference_atlas)[!colnames(subreference_atlas)%in%tissuereps]
if(length(tissuereps)==0){
next
}
cat(paste(tissuereps,collapse <- ','),'\nvs.\n',paste(nottissuereps,collapse=','),'\n',sep='')
foldchanges <- log2(rowMeans(subreference_atlas[,tissuereps, drop=FALSE]+.1)/rowMeans(subreference_atlas[,nottissuereps, drop=FALSE]+.1))
spec_fc <- names(which(foldchanges>=minimum_fold))
subsubreference_atlas <- subreference_atlas[spec_fc,c(tissuereps,nottissuereps)]
pvals <- apply(subsubreference_atlas,1,function(x){
dt <- data.frame(values=as.numeric(x),group=c(rep('in',length(tissuereps)),rep('out',length(nottissuereps))),stringsAsFactors = F)
dt.aov <- aov(dt$values ~ dt$group)
return(as.numeric(unlist(summary(dt.aov))['Pr(>F)1']))
})
pvals <- p.adjust(pvals,method = testing_correction)
pvalhits <- names(pvals[pvals<maximum_pvalue])
cat(length(pvalhits),'\n\n')
if(disjoint_sets){
pvalhits <- disjoint(pvalhits)
}
enriched[[names(timepoints)[stage]]][[tissue]]<-pvalhits
}
}
cat('',file = 'all_tested_genes.txt',append = F)
cat(paste(rownames(reference_atlas),collapse='\n'),file = 'all_tested_genes.txt',append=T)
cat("All genes in the reference atlas listed in 'all_tested_genes.txt'.\n")
cat('',file = 'enriched_genes.txt',append = F)
for(i in names(timepoints)){
for(j in names(enriched[[i]])){
cat(paste(paste(i,j,sep='_'),paste(enriched[[i]][[j]],collapse=','),sep='\t'),'\n',sep='',file = 'enriched_genes.txt',append=T)
}
}
cat("Tissue-enriched transcripts saved in 'enriched_genes.txt'.") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iterators.R
\name{igraph-vs-indexing2}
\alias{igraph-vs-indexing2}
\alias{[[.igraph.vs}
\title{Select vertices and show their metadata}
\usage{
\method{[[}{igraph.vs}(x, ...)
}
\arguments{
\item{x}{A vertex sequence.}
\item{...}{Additional arguments, passed to \code{[}.}
}
\value{
The double bracket operator returns another vertex sequence,
with meta-data (attribute) printing turned on. See details below.
}
\description{
The double bracket operator can be used on vertex sequences, to print
the meta-data (vertex attributes) of the vertices in the sequence.
}
\details{
Technically, when used with vertex sequences, the double bracket
operator does exactly the same as the single bracket operator,
but the resulting vertex sequence is printed differently: all
attributes of the vertices in the sequence are printed as well.
See \code{\link{[.igraph.vs}} for more about indexing vertex sequences.
}
\examples{
g <- make_ring(10) \%>\%
set_vertex_attr("color", value = "red") \%>\%
set_vertex_attr("name", value = LETTERS[1:10])
V(g)
V(g)[[]]
V(g)[1:5]
V(g)[[1:5]]
}
\seealso{
Other vertex and edge sequences: \code{\link{E}},
\code{\link{V}}, \code{\link{igraph-es-attributes}},
\code{\link{igraph-es-indexing2}},
\code{\link{igraph-es-indexing}},
\code{\link{igraph-vs-attributes}},
\code{\link{igraph-vs-indexing}},
\code{\link{print.igraph.es}},
\code{\link{print.igraph.vs}}
Other vertex and edge sequence operations: \code{\link{c.igraph.es}},
\code{\link{c.igraph.vs}},
\code{\link{difference.igraph.es}},
\code{\link{difference.igraph.vs}},
\code{\link{igraph-es-indexing2}},
\code{\link{igraph-es-indexing}},
\code{\link{igraph-vs-indexing}},
\code{\link{intersection.igraph.es}},
\code{\link{intersection.igraph.vs}},
\code{\link{rev.igraph.es}}, \code{\link{rev.igraph.vs}},
\code{\link{union.igraph.es}},
\code{\link{union.igraph.vs}},
\code{\link{unique.igraph.es}},
\code{\link{unique.igraph.vs}}
}
| /CRAN/contrib/igraph/man/igraph-vs-indexing2.Rd | no_license | PRL-PRG/dyntrace-instrumented-packages | R | false | true | 2,042 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iterators.R
\name{igraph-vs-indexing2}
\alias{igraph-vs-indexing2}
\alias{[[.igraph.vs}
\title{Select vertices and show their metadata}
\usage{
\method{[[}{igraph.vs}(x, ...)
}
\arguments{
\item{x}{A vertex sequence.}
\item{...}{Additional arguments, passed to \code{[}.}
}
\value{
The double bracket operator returns another vertex sequence,
with meta-data (attribute) printing turned on. See details below.
}
\description{
The double bracket operator can be used on vertex sequences, to print
the meta-data (vertex attributes) of the vertices in the sequence.
}
\details{
Technically, when used with vertex sequences, the double bracket
operator does exactly the same as the single bracket operator,
but the resulting vertex sequence is printed differently: all
attributes of the vertices in the sequence are printed as well.
See \code{\link{[.igraph.vs}} for more about indexing vertex sequences.
}
\examples{
g <- make_ring(10) \%>\%
set_vertex_attr("color", value = "red") \%>\%
set_vertex_attr("name", value = LETTERS[1:10])
V(g)
V(g)[[]]
V(g)[1:5]
V(g)[[1:5]]
}
\seealso{
Other vertex and edge sequences: \code{\link{E}},
\code{\link{V}}, \code{\link{igraph-es-attributes}},
\code{\link{igraph-es-indexing2}},
\code{\link{igraph-es-indexing}},
\code{\link{igraph-vs-attributes}},
\code{\link{igraph-vs-indexing}},
\code{\link{print.igraph.es}},
\code{\link{print.igraph.vs}}
Other vertex and edge sequence operations: \code{\link{c.igraph.es}},
\code{\link{c.igraph.vs}},
\code{\link{difference.igraph.es}},
\code{\link{difference.igraph.vs}},
\code{\link{igraph-es-indexing2}},
\code{\link{igraph-es-indexing}},
\code{\link{igraph-vs-indexing}},
\code{\link{intersection.igraph.es}},
\code{\link{intersection.igraph.vs}},
\code{\link{rev.igraph.es}}, \code{\link{rev.igraph.vs}},
\code{\link{union.igraph.es}},
\code{\link{union.igraph.vs}},
\code{\link{unique.igraph.es}},
\code{\link{unique.igraph.vs}}
}
|
\name{removeDuplicatedRulesDT}
\alias{removeDuplicatedRulesDT}
\title{REMOVE DUPLICATED RULES FROM DECISION TABLE}
\description{It returns a new Decision Table object without duplicated rules.}
\usage{
removeDuplicatedRulesDT(object)
}
\arguments{
\item{object}{A Decision Table object}
}
\value{It returns a Decision Table object without duplicated rules.}
\references{Pawlak, Zdzislaw 1991 \emph{Rough Sets: Theoretical Aspects of Reasoning About Data} Dordrecht: Kluwer Academic Publishing.}
\author{Alber Sanchez \email{alber.sanchez@uni-muenster.de}}
\seealso{
\code{\link{ConditionReduct-class}}
}
\examples{
exampleMatrix2 <- matrix(c(1,1,0,1,1,2,2,0,0,0,1,1,1,2,0,0,0,
0,0,0,2,1,0,0,1,2,2,2,1,1,0,0,2,2,2),ncol = 5)
dt <- new(Class="DecisionTable",decisionTable = exampleMatrix2)
dtUnique <- removeDuplicatedRulesDT(dt)
}
\keyword{logic}
\keyword{rough}
\keyword{set}
| /man/removeDuplicatedRulesDT.Rd | no_license | albhasan/RoughSetKnowledgeReduction | R | false | false | 880 | rd | \name{removeDuplicatedRulesDT}
\alias{removeDuplicatedRulesDT}
\title{REMOVE DUPLICATED RULES FROM DECISION TABLE}
\description{It returns a new Decision Table object without duplicated rules.}
\usage{
removeDuplicatedRulesDT(object)
}
\arguments{
\item{object}{A Decision Table object}
}
\value{It returns a Decision Table object without duplicated rules.}
\references{Pawlak, Zdzislaw 1991 \emph{Rough Sets: Theoretical Aspects of Reasoning About Data} Dordrecht: Kluwer Academic Publishing.}
\author{Alber Sanchez \email{alber.sanchez@uni-muenster.de}}
\seealso{
\code{\link{ConditionReduct-class}}
}
\examples{
exampleMatrix2 <- matrix(c(1,1,0,1,1,2,2,0,0,0,1,1,1,2,0,0,0,
0,0,0,2,1,0,0,1,2,2,2,1,1,0,0,2,2,2),ncol = 5)
dt <- new(Class="DecisionTable",decisionTable = exampleMatrix2)
dtUnique <- removeDuplicatedRulesDT(dt)
}
\keyword{logic}
\keyword{rough}
\keyword{set}
|
\name{Menu.contour}
\alias{Menu.contour}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Contour plots for linear models with quantitative regressors}
\description{Contour plots for linear models with quantitative regressors}
\section{Overview}{
This dialog allows to specify pairs of numeric regressors for which the
response surface is to be displayed. Each display can be a contour plot
with or without image colours, a 3D-perspective plot or an image plot.
For looking at several plots simultaneously, numbers of rows and columns
can be specified. On OK, all plots are generated. One should always choose
the number of rows and columns such that all requested plots fit on one display,
otherwise the early plots will be overwritten by the later plots.
The functions behind the functionality are \code{\link[rsm:contour.lm]{contour}},
\code{persp} and \code{image}.
The second tab on the dialogue allows to specify at which values variables not
in the plot are to be fixed. The default is the average for numeric variables,
the first level for factors. (
}
\section{Inputs}{
\describe{
\item{number of rows and number of columns}{
These are used in the \code{mfrow} function for creating the
row and column layout for arranging several plots.\cr
Within an R session, modifications are retained for further
calls of the dialog.}
\item{Select plot type }{Contour plots show contours of constant height,
3D perspective plots show the surface in mesh form; both come with or
without coloring. The image plots
show colours only, without contours. It is mainly
a matter of taste which plots to use.\cr
Within an R session, modifications are retained for further
calls of the dialog.}
\item{Select pairs for plotting }{
The default choice plots pairs for all numeric factors.
This can be too many plots for one page in case of many factors. \cr
For the top radio button, pairs can be conveniently formed from two groups
of variables: \cr
1: each pair within group 1\cr
2: each pair within both groups 1 and 2 (but not pairs \emph{between} groups)\cr
3: each pair that involves any factor from group 1\cr
4. each pair between groups 1 and 2 (but no pair \emph{within} any group)\cr
Initially, all variables are in group 1. They can be moved between groups
by selecting them with the mouse and moving them with the arrow buttons.
In case of the bottom (manual selection) radio button choice,
move selected 2-factor pairs between
the available list (those are not selected) and the selected list by
selecting them with the mouse (multiple selections possible) and
moving them with the arrow buttons.\cr
These choices have to be redone with each call to the dialog.}
\item{Modify slice positions }{
Each response surface varies the values of two numeric variables,
keeping all other variables fixed. These fixed values are also called the
slice positions. The default slice positions are the average for numeric variables
and the first level for factors. These can be modified on the second tab of
this dialog. Within an R session, modifications are retained for further
calls of the dialog w.r.t the same model.}
}
}
\author{ Ulrike Groemping }
\seealso{ See also \code{\link[rsm:contour.lm]{contour}} for the functions
that do the actual plotting}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ design }
\keyword{ array }% __ONLY ONE__ keyword per line
| /man/Menu.contour.Rd | no_license | cran/RcmdrPlugin.DoE | R | false | false | 3,732 | rd | \name{Menu.contour}
\alias{Menu.contour}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Contour plots for linear models with quantitative regressors}
\description{Contour plots for linear models with quantitative regressors}
\section{Overview}{
This dialog allows to specify pairs of numeric regressors for which the
response surface is to be displayed. Each display can be a contour plot
with or without image colours, a 3D-perspective plot or an image plot.
For looking at several plots simultaneously, numbers of rows and columns
can be specified. On OK, all plots are generated. One should always choose
the number of rows and columns such that all requested plots fit on one display,
otherwise the early plots will be overwritten by the later plots.
The functions behind the functionality are \code{\link[rsm:contour.lm]{contour}},
\code{persp} and \code{image}.
The second tab on the dialogue allows to specify at which values variables not
in the plot are to be fixed. The default is the average for numeric variables,
the first level for factors. (
}
\section{Inputs}{
\describe{
\item{number of rows and number of columns}{
These are used in the \code{mfrow} function for creating the
row and column layout for arranging several plots.\cr
Within an R session, modifications are retained for further
calls of the dialog.}
\item{Select plot type }{Contour plots show contours of constant height,
3D perspective plots show the surface in mesh form; both come with or
without coloring. The image plots
show colours only, without contours. It is mainly
a matter of taste which plots to use.\cr
Within an R session, modifications are retained for further
calls of the dialog.}
\item{Select pairs for plotting }{
The default choice plots pairs for all numeric factors.
This can be too many plots for one page in case of many factors. \cr
For the top radio button, pairs can be conveniently formed from two groups
of variables: \cr
1: each pair within group 1\cr
2: each pair within both groups 1 and 2 (but not pairs \emph{between} groups)\cr
3: each pair that involves any factor from group 1\cr
4. each pair between groups 1 and 2 (but no pair \emph{within} any group)\cr
Initially, all variables are in group 1. They can be moved between groups
by selecting them with the mouse and moving them with the arrow buttons.
In case of the bottom (manual selection) radio button choice,
move selected 2-factor pairs between
the available list (those are not selected) and the selected list by
selecting them with the mouse (multiple selections possible) and
moving them with the arrow buttons.\cr
These choices have to be redone with each call to the dialog.}
\item{Modify slice positions }{
Each response surface varies the values of two numeric variables,
keeping all other variables fixed. These fixed values are also called the
slice positions. The default slice positions are the average for numeric variables
and the first level for factors. These can be modified on the second tab of
this dialog. Within an R session, modifications are retained for further
calls of the dialog w.r.t the same model.}
}
}
\author{ Ulrike Groemping }
\seealso{ See also \code{\link[rsm:contour.lm]{contour}} for the functions
that do the actual plotting}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ design }
\keyword{ array }% __ONLY ONE__ keyword per line
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <-function(y){
x <<- y
i <<- NULL
}
setInverse <-function(inverse) i <<- inverse
getInverse <-function() i
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getInverse()
if(!is.null(i)){
message("getting cached data")
return(i)
}
data <- x$get()
i <-solve(data, ...)
x$setInverse(i)
i
}
| /cachematrix.R | no_license | SaranyaMN/ProgrammingAssignment2 | R | false | false | 785 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <-function(y){
x <<- y
i <<- NULL
}
setInverse <-function(inverse) i <<- inverse
getInverse <-function() i
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getInverse()
if(!is.null(i)){
message("getting cached data")
return(i)
}
data <- x$get()
i <-solve(data, ...)
x$setInverse(i)
i
}
|
\name{gsl-deprecated}
\title{gsl-deprecated}
\alias{gsl-deprecated}
\alias{legendre_Plm_array}
\alias{legendre_Plm_deriv_array}
\alias{legendre_sphPlm_array}
\alias{legendre_sphPlm_deriv_array}
\alias{legendre_array_size}
\alias{deprecated_legendre}
\description{
Deprecated Legendre functions as per the Gnu Scientific Library reference manual
section 7.24.
}
\usage{
legendre_Plm_array(...)
legendre_Plm_deriv_array(...)
legendre_sphPlm_array(...)
legendre_sphPlm_deriv_array(...)
legendre_array_size(...)
deprecated_legendre(...)
}
\arguments{
\item{...}{(ignored)}
}
\note{
As of GSL-2.1, functions
\itemize{
\item\code{gsl_sf_legendre_Plm_array}
\item\code{gsl_sf_legendre_Plm_deriv_array}
\item\code{gsl_sf_legendre_sphPlm_array}
\item\code{gsl_sf_legendre_sphPlm_deriv_array}
\item\code{gsl_sf_legendre_array_size}
}
are deprecated. This functionality is now provided in GSL by the
\code{gsl_sf_legendre_array} suite of functions; in R, use one of:
\itemize{
\item\code{legendre_array()}
\item\code{legendre_deriv_array()}
\item\code{legendre_deriv_alt_array()}
\item\code{legendre_deriv2_array()}
\item\code{legendre_deriv2_alt_array()}.
}
These are documented under \code{?Legendre}.
}
\references{\url{http://www.gnu.org/software/gsl}}
\author{Robin K. S. Hankin}
\seealso{\code{\link{Legendre}}}
\keyword{array}
| /man/gsl-deprecated.Rd | no_license | nsunami/gsl | R | false | false | 1,369 | rd | \name{gsl-deprecated}
\title{gsl-deprecated}
\alias{gsl-deprecated}
\alias{legendre_Plm_array}
\alias{legendre_Plm_deriv_array}
\alias{legendre_sphPlm_array}
\alias{legendre_sphPlm_deriv_array}
\alias{legendre_array_size}
\alias{deprecated_legendre}
\description{
Deprecated Legendre functions as per the Gnu Scientific Library reference manual
section 7.24.
}
\usage{
legendre_Plm_array(...)
legendre_Plm_deriv_array(...)
legendre_sphPlm_array(...)
legendre_sphPlm_deriv_array(...)
legendre_array_size(...)
deprecated_legendre(...)
}
\arguments{
\item{...}{(ignored)}
}
\note{
As of GSL-2.1, functions
\itemize{
\item\code{gsl_sf_legendre_Plm_array}
\item\code{gsl_sf_legendre_Plm_deriv_array}
\item\code{gsl_sf_legendre_sphPlm_array}
\item\code{gsl_sf_legendre_sphPlm_deriv_array}
\item\code{gsl_sf_legendre_array_size}
}
are deprecated. This functionality is now provided in GSL by the
\code{gsl_sf_legendre_array} suite of functions; in R, use one of:
\itemize{
\item\code{legendre_array()}
\item\code{legendre_deriv_array()}
\item\code{legendre_deriv_alt_array()}
\item\code{legendre_deriv2_array()}
\item\code{legendre_deriv2_alt_array()}.
}
These are documented under \code{?Legendre}.
}
\references{\url{http://www.gnu.org/software/gsl}}
\author{Robin K. S. Hankin}
\seealso{\code{\link{Legendre}}}
\keyword{array}
|
#Gets necessary packages
library(knitr)
library(readxl)
library(dplyr)
library(ggplot2)
##library(biogas)
# Load latest biogas package functions
ff <- list.files("../biogas functions", full.names = TRUE)
for (i in ff) source(i)
| /scripts/packages.R | no_license | Hannah-L-Hansen/RogDproject | R | false | false | 232 | r | #Gets necessary packages
library(knitr)
library(readxl)
library(dplyr)
library(ggplot2)
##library(biogas)
# Load latest biogas package functions
ff <- list.files("../biogas functions", full.names = TRUE)
for (i in ff) source(i)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DEG.R
\name{tools.DEG.Nanostring}
\alias{tools.DEG.Nanostring}
\title{Compute the pvalues of genes being up and downregulated for Nanostring datasets.}
\usage{
tools.DEG.Nanostring(raw.data, tool, data, tool_norm)
}
\arguments{
\item{raw.data}{rcc type file. List of 4 elements:
Samples.IDs is a dataframe with four columns: sample name, status (WildType, Mutated), filename.
rcc.df contains expression levels with samples in column and genes in rows.
annots.df is a dataframe with 3 columns: geneclass (endogenous, housekeeping, negative, positive), gene name, and the accession number.
FOV is a dataframe that contains Field of views information.}
\item{tool}{Method to use to compute the pvalues of differentially expressed genes.
"Wilcox" uses the wilcoxDEG() function implemented in this very same pacakge
"limma" uses the functions DEG_limma() that comes from the limma pacakge
"RankProduct" and "RankSum" perform respectively a Rank Product and a Rank Sum analysiswith the RankProducts() function from the RankProd package
"desq2" uses the DESeq() function from the DESeq2 package. This last one is particular, because if it is chosen it will ignore the argument "tool_norm"}
\item{data}{Dataframe of gene expression levels with sample names in columns and genes in rows}
\item{tool_norm}{Normalization tool used previously in the tools.norm.Nanostring() function}
}
\value{
A dataframe with genes in rows and pvalues of a gene being upregulated and downregulated in columns
}
\description{
Through different functions contained in several packages, this function computes pvalues of diferentially expressed genes
}
\examples{
# Import the dataset
Data = matrix(runif(5000, 10, 100), ncol=20)
group = paste0(rep(c("control", "case"), each = 10),rep(c(1:10),each = 1))
genes <- paste0(rep(LETTERS[1:25], each=10), rep(c(1:10),each = 1))
colnames(Data) = group
row.names(Data) = genes
# Normalizing data using one method
# Norm.data = tools.norm.Nanostring(raw.data = Data, tool = "nappa.NS")
# Analyze normalized data with one DEG method
#res.DEG = tools.DEG.Nanostring(raw.data = Data,
# data = Norm.data,
# tool_norm = "nappa.NS",
# tool = "RankProduct")
}
| /man/tools.DEG.Nanostring.Rd | no_license | acolajanni/GENEXPRESSO | R | false | true | 2,352 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DEG.R
\name{tools.DEG.Nanostring}
\alias{tools.DEG.Nanostring}
\title{Compute the pvalues of genes being up and downregulated for Nanostring datasets.}
\usage{
tools.DEG.Nanostring(raw.data, tool, data, tool_norm)
}
\arguments{
\item{raw.data}{rcc type file. List of 4 elements:
Samples.IDs is a dataframe with four columns: sample name, status (WildType, Mutated), filename.
rcc.df contains expression levels with samples in column and genes in rows.
annots.df is a dataframe with 3 columns: geneclass (endogenous, housekeeping, negative, positive), gene name, and the accession number.
FOV is a dataframe that contains Field of views information.}
\item{tool}{Method to use to compute the pvalues of differentially expressed genes.
"Wilcox" uses the wilcoxDEG() function implemented in this very same pacakge
"limma" uses the functions DEG_limma() that comes from the limma pacakge
"RankProduct" and "RankSum" perform respectively a Rank Product and a Rank Sum analysiswith the RankProducts() function from the RankProd package
"desq2" uses the DESeq() function from the DESeq2 package. This last one is particular, because if it is chosen it will ignore the argument "tool_norm"}
\item{data}{Dataframe of gene expression levels with sample names in columns and genes in rows}
\item{tool_norm}{Normalization tool used previously in the tools.norm.Nanostring() function}
}
\value{
A dataframe with genes in rows and pvalues of a gene being upregulated and downregulated in columns
}
\description{
Through different functions contained in several packages, this function computes pvalues of diferentially expressed genes
}
\examples{
# Import the dataset
Data = matrix(runif(5000, 10, 100), ncol=20)
group = paste0(rep(c("control", "case"), each = 10),rep(c(1:10),each = 1))
genes <- paste0(rep(LETTERS[1:25], each=10), rep(c(1:10),each = 1))
colnames(Data) = group
row.names(Data) = genes
# Normalizing data using one method
# Norm.data = tools.norm.Nanostring(raw.data = Data, tool = "nappa.NS")
# Analyze normalized data with one DEG method
#res.DEG = tools.DEG.Nanostring(raw.data = Data,
# data = Norm.data,
# tool_norm = "nappa.NS",
# tool = "RankProduct")
}
|
testlist <- list(A = structure(c(2.32784507357645e-308, 2.92950674282246e+296, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613104818-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 344 | r | testlist <- list(A = structure(c(2.32784507357645e-308, 2.92950674282246e+296, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wm_common_id.R
\name{wm_common_id}
\alias{wm_common_id}
\title{Get vernacular names from an AphiaID}
\usage{
wm_common_id(id, ...)
}
\arguments{
\item{id}{(numeric/integer) an AphiaID. required.}
\item{...}{named curl options. see \code{\link[curl]{curl_options}}}
}
\description{
Get vernacular names from an AphiaID
}
\examples{
wm_common_id(id = 105706)
wm_common_id(id = 156806)
wm_common_id(id = 397065)
}
| /man/wm_common_id.Rd | permissive | stevenysw/worrms | R | false | true | 491 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wm_common_id.R
\name{wm_common_id}
\alias{wm_common_id}
\title{Get vernacular names from an AphiaID}
\usage{
wm_common_id(id, ...)
}
\arguments{
\item{id}{(numeric/integer) an AphiaID. required.}
\item{...}{named curl options. see \code{\link[curl]{curl_options}}}
}
\description{
Get vernacular names from an AphiaID
}
\examples{
wm_common_id(id = 105706)
wm_common_id(id = 156806)
wm_common_id(id = 397065)
}
|
#' @rdname ecg_mod
#' @export
ecg_UI <- function(id){
ns <- NS(id)
tabPanel(title = "| ECG |",
sidebarLayout(
sidebarPanel(width = 3,
uiOutput(ns("uio_ecg_param_selector"))
),
mainPanel(
uiOutput(ns("uio_ECG"))
)
)
)
}
#' ECG Submodule
#'
#' Functions to enable the electrocardiogram (ECG) submodule of the patient profile.
#'
#' @param innput,output,session Shiny server arguments.
#' @inheritParams patientProfile_mod
#'
#' @rdname ecg_mod
#' @export
ecg_mod <- function(input, output, session, uid, ADEG){
ecg_choices <- reactive({
ecg_list <- list()
# ecg_allowed <- c("QTCF", "QRSDUR", "QTMEAN", "PRMEAN", "HRMEAN", "RRMEAN")
ecg_param_tab <- ADEG() %>%
filter(USUBJID == uid() & !is.na(AVAL)) %>% # & PARAMCD %in% ecg_allowed) %>%
distinct(PARAMCD, PARAM)
print(ecg_param_tab)
ecg_list$ecg_params <- ecg_param_tab
return(ecg_list = ecg_list)
})
output$uio_ecg_param_selector <- renderUI({
ns <- session$ns
tagList(
selectInput(inputId = ns("param_ecg"), label = "Select parameters",
choices = ecg_choices()$ecg_params$PARAM, selected = ecg_choices()$ecg_params$PARAM[1])
# choices = ecg_choices_tab$PARAM, selected = ecg_choice_tab$PARAM[1])
)
})
output$uio_ECG <- renderUI({
ns <- session$ns
print("renderUI: ECG")
ecgpar <- input$param_ecg
paramcd <- ecg_choices()$ecg_params %>% filter(PARAM == ecgpar)
paramcd <- paramcd$PARAMCD
output$ecg_plot <- renderPlot({
p7 <- plot_ECG(uid = uid(), paramcd = paramcd,#input$param_ecg,
ADEG = ADEG())[["plot"]]
return(p7)
})
plotOutput(ns("ecg_plot"))
})
}
| /R/module_ECG.R | permissive | vineetrepository/patprofile | R | false | false | 1,798 | r | #' @rdname ecg_mod
#' @export
ecg_UI <- function(id){
ns <- NS(id)
tabPanel(title = "| ECG |",
sidebarLayout(
sidebarPanel(width = 3,
uiOutput(ns("uio_ecg_param_selector"))
),
mainPanel(
uiOutput(ns("uio_ECG"))
)
)
)
}
#' ECG Submodule
#'
#' Functions to enable the electrocardiogram (ECG) submodule of the patient profile.
#'
#' @param innput,output,session Shiny server arguments.
#' @inheritParams patientProfile_mod
#'
#' @rdname ecg_mod
#' @export
ecg_mod <- function(input, output, session, uid, ADEG){
ecg_choices <- reactive({
ecg_list <- list()
# ecg_allowed <- c("QTCF", "QRSDUR", "QTMEAN", "PRMEAN", "HRMEAN", "RRMEAN")
ecg_param_tab <- ADEG() %>%
filter(USUBJID == uid() & !is.na(AVAL)) %>% # & PARAMCD %in% ecg_allowed) %>%
distinct(PARAMCD, PARAM)
print(ecg_param_tab)
ecg_list$ecg_params <- ecg_param_tab
return(ecg_list = ecg_list)
})
output$uio_ecg_param_selector <- renderUI({
ns <- session$ns
tagList(
selectInput(inputId = ns("param_ecg"), label = "Select parameters",
choices = ecg_choices()$ecg_params$PARAM, selected = ecg_choices()$ecg_params$PARAM[1])
# choices = ecg_choices_tab$PARAM, selected = ecg_choice_tab$PARAM[1])
)
})
output$uio_ECG <- renderUI({
ns <- session$ns
print("renderUI: ECG")
ecgpar <- input$param_ecg
paramcd <- ecg_choices()$ecg_params %>% filter(PARAM == ecgpar)
paramcd <- paramcd$PARAMCD
output$ecg_plot <- renderPlot({
p7 <- plot_ECG(uid = uid(), paramcd = paramcd,#input$param_ecg,
ADEG = ADEG())[["plot"]]
return(p7)
})
plotOutput(ns("ecg_plot"))
})
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.documentation.R
\docType{data}
\name{dietswap}
\alias{dietswap}
\title{Diet swap study data}
\format{The data set in \code{\link{phyloseq-class}} format.}
\usage{
data(dietswap)
}
\description{
The diet swap data set represents a study with African and African American groups undergoing a two-week diet swap.
For details, see \url{http://www.nature.com/ncomms/2015/150428/ncomms7342/full/ncomms7342.html}.
}
\details{
The data is also available for download from the Data Dryad repository \url{http://datadryad.org/resource/doi:10.5061/dryad.1mn1n}. See \code{\link{download_dietswap}}.
}
\author{
Leo Lahti \email{microbiome-admin@googlegroups.com}
}
\references{
O'Keefe et al. Nature Communications 6:6342, 2015.
\url{http://www.nature.com/ncomms/2015/150428/ncomms7342/full/ncomms7342.html}
To cite the microbiome R package, see citation('microbiome')
}
\keyword{data}
| /man/dietswap.Rd | no_license | TTloveTT/microbiome | R | false | true | 962 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.documentation.R
\docType{data}
\name{dietswap}
\alias{dietswap}
\title{Diet swap study data}
\format{The data set in \code{\link{phyloseq-class}} format.}
\usage{
data(dietswap)
}
\description{
The diet swap data set represents a study with African and African American groups undergoing a two-week diet swap.
For details, see \url{http://www.nature.com/ncomms/2015/150428/ncomms7342/full/ncomms7342.html}.
}
\details{
The data is also available for download from the Data Dryad repository \url{http://datadryad.org/resource/doi:10.5061/dryad.1mn1n}. See \code{\link{download_dietswap}}.
}
\author{
Leo Lahti \email{microbiome-admin@googlegroups.com}
}
\references{
O'Keefe et al. Nature Communications 6:6342, 2015.
\url{http://www.nature.com/ncomms/2015/150428/ncomms7342/full/ncomms7342.html}
To cite the microbiome R package, see citation('microbiome')
}
\keyword{data}
|
do.PCA <- function(
DF.input = NULL,
scale.or.not = FALSE
) {
thisFunctionName <- "do.PCA";
cat("\n### ~~~~~~~~~~~~~~~~~~~~ ###");
cat(paste0("\n# ",thisFunctionName,"() starts.\n"));
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
require(geigen);
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
results.prcomp <- stats::prcomp(
formula = ~ x1 + x2,
data = DF.input,
scale. = scale.or.not
);
cat("\nresults.prcomp\n");
print( results.prcomp );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
# results.geigen.values <- results.geigen[['values']];
#
# results.geigen.vectors <- results.geigen[['vectors']];
# results.geigen.vectors <- apply(
# X = results.geigen.vectors,
# MARGIN = 2,
# FUN = function(x) { return( x / sqrt(sum(x*x)) ) }
# );
#
# cat("\nresults.geigen.vectors\n");
# print( results.geigen.vectors );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
# for ( i in 1:length(results.geigen.values) ) {
#
# cat("\n##### i =",i,"\n")
#
# cat("\nX.Yt.Y.Xt %*% results.geigen.vectors[,i]\n");
# print( X.Yt.Y.Xt %*% results.geigen.vectors[,i] );
#
# cat("\nX.Xt %*% results.geigen.vectors[,i]\n");
# print( X.Xt %*% results.geigen.vectors[,i] );
#
# cat("\nX.Yt.Y.Xt %*% results.geigen.vectors[,i] - results.geigen.values[i] * X.Xt %*% results.geigen.vectors[,i]\n");
# print( X.Yt.Y.Xt %*% results.geigen.vectors[,i] - results.geigen.values[i] * X.Xt %*% results.geigen.vectors[,i] );
#
# cat("\n");
#
# }
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
cat(paste0("\n# ",thisFunctionName,"() exits."));
cat("\n### ~~~~~~~~~~~~~~~~~~~~ ###\n");
return( results.prcomp );
}
| /projects/StatCan/partial-least-squares/code/do-PCA.R | no_license | paradisepilot/statistics | R | false | false | 1,888 | r |
do.PCA <- function(
DF.input = NULL,
scale.or.not = FALSE
) {
thisFunctionName <- "do.PCA";
cat("\n### ~~~~~~~~~~~~~~~~~~~~ ###");
cat(paste0("\n# ",thisFunctionName,"() starts.\n"));
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
require(geigen);
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
results.prcomp <- stats::prcomp(
formula = ~ x1 + x2,
data = DF.input,
scale. = scale.or.not
);
cat("\nresults.prcomp\n");
print( results.prcomp );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
# results.geigen.values <- results.geigen[['values']];
#
# results.geigen.vectors <- results.geigen[['vectors']];
# results.geigen.vectors <- apply(
# X = results.geigen.vectors,
# MARGIN = 2,
# FUN = function(x) { return( x / sqrt(sum(x*x)) ) }
# );
#
# cat("\nresults.geigen.vectors\n");
# print( results.geigen.vectors );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
# for ( i in 1:length(results.geigen.values) ) {
#
# cat("\n##### i =",i,"\n")
#
# cat("\nX.Yt.Y.Xt %*% results.geigen.vectors[,i]\n");
# print( X.Yt.Y.Xt %*% results.geigen.vectors[,i] );
#
# cat("\nX.Xt %*% results.geigen.vectors[,i]\n");
# print( X.Xt %*% results.geigen.vectors[,i] );
#
# cat("\nX.Yt.Y.Xt %*% results.geigen.vectors[,i] - results.geigen.values[i] * X.Xt %*% results.geigen.vectors[,i]\n");
# print( X.Yt.Y.Xt %*% results.geigen.vectors[,i] - results.geigen.values[i] * X.Xt %*% results.geigen.vectors[,i] );
#
# cat("\n");
#
# }
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
cat(paste0("\n# ",thisFunctionName,"() exits."));
cat("\n### ~~~~~~~~~~~~~~~~~~~~ ###\n");
return( results.prcomp );
}
|
# convert points into lines for ArcMap
require(shapefiles)
require(maptools)
require(rgdal)
require(geosphere)
options(stringsAsFactors = FALSE)
options(digits = 9)
# get data
path.dat <- read.csv("C:/Users/ned.laman/Desktop/Baker Products/AI1991_2018_towpaths_corrected.csv", header = T)
haul.dat <- read.csv("C:/Users/ned.laman/Desktop/Baker Products/AI1991_2018_linepaths_corrected.csv", header = T)
# eliminate NAs in incoming data ### these mostly originate from allowing hauls with missing parameters (e.g., wire length or
# bottom depth) necessary for the correction computation to make it into the data set being passed
path.dat <- path.dat[!is.na(path.dat$c_lon_1) | !is.na(path.dat$c_lat_1) | !is.na(path.dat$c_lon_2) | !is.na(path.dat$c_lat_2), ] # 7975 records eliminated 6/18/20
haul.dat <- haul.dat[!is.na(haul.dat$c_start_lon) | !is.na(haul.dat$c_start_lat) | !is.na(haul.dat$c_end_lon) | !is.na(haul.dat$c_end_lat), ] # 16 records elminated 6/18/20
# identify destination
output.arcgis.paths.file <- "C:/Users/ned.laman/Desktop/Baker Products/towpaths"
output.arcmap.lines.file <- "C:/Users/ned.laman/Desktop/Baker Products/towlines"
################################# frame data for towpaths ###############################
# the data coming in from the CSV contain both adjacent points are pairs, I'm using just the first position of each pair
arcgis.shp <- data.frame(Id = path.dat$hauljoin, X = path.dat$c_lon_1, Y = path.dat$c_lat_1)
# eliminate NAs
arcgis.shp <- arcgis.shp[!is.na(arcgis.shp$Id),] # a data frame with 3 variables (Id, X, Y)
# just need column of unique hauljoins here
arcgis.dbf <- data.frame(Id = unique(path.dat$hauljoin))
# old school method for creating the towpaths
# maptools::readShapeLines is deprecated
# first convert points to shapefile where type 3 means polyLine
arcgis.shapefile <- shapefiles::convert.to.shapefile(shpTable = arcgis.shp, attTable = arcgis.dbf, field = "Id", type = 3)
# write shapefile which, give it the filename in out.name, and arcgis = T replaces "." with "\_" in column names for ArcGIS
shapefiles::write.shapefile(shapefile = arcgis.shapefile, out.name = paste0(output.arcgis.paths.file, "/towpath"), arcgis = T)
# DEPRECATED writes shapefile at end of path, but really doesn't...I think it makes a memory object accessible in the next step
# arcgis.shapefile <- maptools::readShapeLines(output.arcgis.paths.file, proj4string=CRS("+proj=longlat +datum=NAD83"))
# reads shapefile named above and assigns projection (P4)
rgdal.shp <- rgdal::readOGR(dsn = output.arcgis.paths.file, layer = "towpath", p4s = "+proj=longlat +datum=NAD83")
# re-writes shapefile using ESRI driver, includes projection, overwrites anything same named at the end of the path
rgdal::writeOGR(rgdal.shp, output.arcgis.paths.file, "c_towpaths", driver = "ESRI Shapefile", overwrite_layer = TRUE)
# note that when charting the corrected positions the corrected towpath looks like it is in front of the vessel positions
# the phenomenon is actually that the vessel starts at a point but has to travel the distance the net is set behind the vessel
# before arriving at the corrected position where the net touched down at on bottom
################################# frame data for towlines ###############################
# the data coming in from the CSV contain start and end points of a linear towpath
pos.dat <- data.frame()
col.names <- c("Id", "X", "Y", "OB_FB")
for(hj in sort(unique(haul.dat$hauljoin))){
p1 <- c(hj, haul.dat$c_end_lon[haul.dat$hauljoin == hj], haul.dat$c_end_lat[haul.dat$hauljoin == hj], "off bottom")
p2 <- c(hj, haul.dat$c_start_lon[haul.dat$hauljoin == hj], haul.dat$c_start_lat[haul.dat$hauljoin == hj], "on bottom")
pos.dat <- rbind(pos.dat, p2, p1)
}
names(pos.dat) <- col.names
# head(pos.dat); length(unique(pos.dat$Id)); length(pos.dat$Id)
i <- c(1:3) # columns to convert to numeric from character
pos.dat[ ,i] <- apply(pos.dat[ ,i], 2, function(x) as.numeric(as.character(x)))
# sapply(pos.dat, class); head(pos.dat)
# eliminate any NAs
pos.dat <- pos.dat[!is.na(pos.dat$Id), ]
# arcmap.shp <- data.frame(Id = pos.dat$Id, X = pos.dat$X, Y = pos.dat$Y)
# eliminate NAs
# arcmap.shp <- arcmap.shp[!is.na(arcmap.shp$Id),] # a data frame with 3 variables (Id, X, Y)
# just need column of unique hauljoins here
arcmap.dbf <- data.frame(Id = unique(pos.dat$Id))
# old school method for creating the towpaths
# maptools::readShapeLines is deprecated
# first convert points to shapefile where type 3 means polyLine
arcmap.shapefile <- shapefiles::convert.to.shapefile(shpTable = pos.dat, attTable = arcmap.dbf, field = "Id", type = 3)
# write shapefile which, give it the filename in out.name, and arcgis = T replaces "." with "\_" in column names for ArcGIS
shapefiles::write.shapefile(shapefile = arcmap.shapefile, out.name = paste0(output.arcmap.lines.file, "/linepath"), arcgis = T)
# DEPRECATED writes shapefile at end of path, but really doesn't...I think it makes a memory object accessible in the next step
# arcgis.shapefile <- maptools::readShapeLines(output.arcgis.paths.file, proj4string=CRS("+proj=longlat +datum=NAD83"))
# reads shapefile named above and assigns projection (P4)
rgdal.shp <- rgdal::readOGR(dsn = output.arcmap.lines.file, layer = "linepath", p4s = "+proj=longlat +datum=NAD83")
# re-writes shapefile using ESRI driver, includes projection, overwrites anything same named at the end of the path
rgdal::writeOGR(rgdal.shp, output.arcmap.lines.file, "c_linepaths", driver = "ESRI Shapefile", overwrite_layer = TRUE)
| /convert.points.to.paths.061820.R | no_license | Ned-Laman-NOAA/Trawlability | R | false | false | 5,552 | r | # convert points into lines for ArcMap
require(shapefiles)
require(maptools)
require(rgdal)
require(geosphere)
options(stringsAsFactors = FALSE)
options(digits = 9)
# get data
path.dat <- read.csv("C:/Users/ned.laman/Desktop/Baker Products/AI1991_2018_towpaths_corrected.csv", header = T)
haul.dat <- read.csv("C:/Users/ned.laman/Desktop/Baker Products/AI1991_2018_linepaths_corrected.csv", header = T)
# eliminate NAs in incoming data ### these mostly originate from allowing hauls with missing parameters (e.g., wire length or
# bottom depth) necessary for the correction computation to make it into the data set being passed
path.dat <- path.dat[!is.na(path.dat$c_lon_1) | !is.na(path.dat$c_lat_1) | !is.na(path.dat$c_lon_2) | !is.na(path.dat$c_lat_2), ] # 7975 records eliminated 6/18/20
haul.dat <- haul.dat[!is.na(haul.dat$c_start_lon) | !is.na(haul.dat$c_start_lat) | !is.na(haul.dat$c_end_lon) | !is.na(haul.dat$c_end_lat), ] # 16 records elminated 6/18/20
# identify destination
output.arcgis.paths.file <- "C:/Users/ned.laman/Desktop/Baker Products/towpaths"
output.arcmap.lines.file <- "C:/Users/ned.laman/Desktop/Baker Products/towlines"
################################# frame data for towpaths ###############################
# the data coming in from the CSV contain both adjacent points are pairs, I'm using just the first position of each pair
arcgis.shp <- data.frame(Id = path.dat$hauljoin, X = path.dat$c_lon_1, Y = path.dat$c_lat_1)
# eliminate NAs
arcgis.shp <- arcgis.shp[!is.na(arcgis.shp$Id),] # a data frame with 3 variables (Id, X, Y)
# just need column of unique hauljoins here
arcgis.dbf <- data.frame(Id = unique(path.dat$hauljoin))
# old school method for creating the towpaths
# maptools::readShapeLines is deprecated
# first convert points to shapefile where type 3 means polyLine
arcgis.shapefile <- shapefiles::convert.to.shapefile(shpTable = arcgis.shp, attTable = arcgis.dbf, field = "Id", type = 3)
# write shapefile which, give it the filename in out.name, and arcgis = T replaces "." with "\_" in column names for ArcGIS
shapefiles::write.shapefile(shapefile = arcgis.shapefile, out.name = paste0(output.arcgis.paths.file, "/towpath"), arcgis = T)
# DEPRECATED writes shapefile at end of path, but really doesn't...I think it makes a memory object accessible in the next step
# arcgis.shapefile <- maptools::readShapeLines(output.arcgis.paths.file, proj4string=CRS("+proj=longlat +datum=NAD83"))
# reads shapefile named above and assigns projection (P4)
rgdal.shp <- rgdal::readOGR(dsn = output.arcgis.paths.file, layer = "towpath", p4s = "+proj=longlat +datum=NAD83")
# re-writes shapefile using ESRI driver, includes projection, overwrites anything same named at the end of the path
rgdal::writeOGR(rgdal.shp, output.arcgis.paths.file, "c_towpaths", driver = "ESRI Shapefile", overwrite_layer = TRUE)
# note that when charting the corrected positions the corrected towpath looks like it is in front of the vessel positions
# the phenomenon is actually that the vessel starts at a point but has to travel the distance the net is set behind the vessel
# before arriving at the corrected position where the net touched down at on bottom
################################# frame data for towlines ###############################
# the data coming in from the CSV contain start and end points of a linear towpath
pos.dat <- data.frame()
col.names <- c("Id", "X", "Y", "OB_FB")
for(hj in sort(unique(haul.dat$hauljoin))){
p1 <- c(hj, haul.dat$c_end_lon[haul.dat$hauljoin == hj], haul.dat$c_end_lat[haul.dat$hauljoin == hj], "off bottom")
p2 <- c(hj, haul.dat$c_start_lon[haul.dat$hauljoin == hj], haul.dat$c_start_lat[haul.dat$hauljoin == hj], "on bottom")
pos.dat <- rbind(pos.dat, p2, p1)
}
names(pos.dat) <- col.names
# head(pos.dat); length(unique(pos.dat$Id)); length(pos.dat$Id)
i <- c(1:3) # columns to convert to numeric from character
pos.dat[ ,i] <- apply(pos.dat[ ,i], 2, function(x) as.numeric(as.character(x)))
# sapply(pos.dat, class); head(pos.dat)
# eliminate any NAs
pos.dat <- pos.dat[!is.na(pos.dat$Id), ]
# arcmap.shp <- data.frame(Id = pos.dat$Id, X = pos.dat$X, Y = pos.dat$Y)
# eliminate NAs
# arcmap.shp <- arcmap.shp[!is.na(arcmap.shp$Id),] # a data frame with 3 variables (Id, X, Y)
# just need column of unique hauljoins here
arcmap.dbf <- data.frame(Id = unique(pos.dat$Id))
# old school method for creating the towpaths
# maptools::readShapeLines is deprecated
# first convert points to shapefile where type 3 means polyLine
arcmap.shapefile <- shapefiles::convert.to.shapefile(shpTable = pos.dat, attTable = arcmap.dbf, field = "Id", type = 3)
# write shapefile which, give it the filename in out.name, and arcgis = T replaces "." with "\_" in column names for ArcGIS
shapefiles::write.shapefile(shapefile = arcmap.shapefile, out.name = paste0(output.arcmap.lines.file, "/linepath"), arcgis = T)
# DEPRECATED writes shapefile at end of path, but really doesn't...I think it makes a memory object accessible in the next step
# arcgis.shapefile <- maptools::readShapeLines(output.arcgis.paths.file, proj4string=CRS("+proj=longlat +datum=NAD83"))
# reads shapefile named above and assigns projection (P4)
rgdal.shp <- rgdal::readOGR(dsn = output.arcmap.lines.file, layer = "linepath", p4s = "+proj=longlat +datum=NAD83")
# re-writes shapefile using ESRI driver, includes projection, overwrites anything same named at the end of the path
rgdal::writeOGR(rgdal.shp, output.arcmap.lines.file, "c_linepaths", driver = "ESRI Shapefile", overwrite_layer = TRUE)
|
#### Test 64-bit date-time functions.
## R's internal fixes are used on 32-bit platforms.
## macOS gets these wrong: see HAVE_WORKING_64BIT_MKTIME
Sys.setenv(TZ = "UTC")
(z <- as.POSIXct("1848-01-01 12:00"))
c(unclass(z))
(z <- as.POSIXct("2040-01-01 12:00"))
c(unclass(z))
(z <- as.POSIXct("2040-07-01 12:00"))
c(unclass(z))
Sys.setenv(TZ = "Europe/London") # pretty much portable.
(z <- as.POSIXct("1848-01-01 12:00"))
c(unclass(z))
(z <- as.POSIXct("2040-01-01 12:00"))
c(unclass(z))
(z <- as.POSIXct("2040-07-01 12:00"))
c(unclass(z))
Sys.setenv(TZ = "EST5EDT")
(z <- as.POSIXct("1848-01-01 12:00"))
c(unclass(z))
(z <- as.POSIXct("2040-01-01 12:00"))
c(unclass(z))
(z <- as.POSIXct("2040-07-01 12:00"))
c(unclass(z))
## PR15613: had day as > 24hrs.
as.POSIXlt(ISOdate(2071,1,13,0,0,tz="Etc/GMT-1"))$wday
as.POSIXlt(ISOdate(2071,1,13,0,1,tz="Etc/GMT-1"))$wday
## Incorrect use of %d should work even though abbreviation does match
old <- Sys.setlocale("LC_TIME", "C") # to be sure
stopifnot(!is.na(strptime("11-August-1903", "%d-%b-%Y")))
| /R-Portable/tests/datetime.R | permissive | ksasso/Electron_ShinyApp_Deployment | R | false | false | 1,051 | r | #### Test 64-bit date-time functions.
## R's internal fixes are used on 32-bit platforms.
## macOS gets these wrong: see HAVE_WORKING_64BIT_MKTIME
Sys.setenv(TZ = "UTC")
(z <- as.POSIXct("1848-01-01 12:00"))
c(unclass(z))
(z <- as.POSIXct("2040-01-01 12:00"))
c(unclass(z))
(z <- as.POSIXct("2040-07-01 12:00"))
c(unclass(z))
Sys.setenv(TZ = "Europe/London") # pretty much portable.
(z <- as.POSIXct("1848-01-01 12:00"))
c(unclass(z))
(z <- as.POSIXct("2040-01-01 12:00"))
c(unclass(z))
(z <- as.POSIXct("2040-07-01 12:00"))
c(unclass(z))
Sys.setenv(TZ = "EST5EDT")
(z <- as.POSIXct("1848-01-01 12:00"))
c(unclass(z))
(z <- as.POSIXct("2040-01-01 12:00"))
c(unclass(z))
(z <- as.POSIXct("2040-07-01 12:00"))
c(unclass(z))
## PR15613: had day as > 24hrs.
as.POSIXlt(ISOdate(2071,1,13,0,0,tz="Etc/GMT-1"))$wday
as.POSIXlt(ISOdate(2071,1,13,0,1,tz="Etc/GMT-1"))$wday
## Incorrect use of %d should work even though abbreviation does match
old <- Sys.setlocale("LC_TIME", "C") # to be sure
stopifnot(!is.na(strptime("11-August-1903", "%d-%b-%Y")))
|
library(yardstick)
### Name: metrics
### Title: General Function to Estimate Performance
### Aliases: metrics metrics.data.frame
### ** Examples
# Accuracy and kappa
metrics(two_class_example, truth, predicted)
# Add on multinomal log loss and ROC AUC by specifying class prob columns
metrics(two_class_example, truth, predicted, Class1)
# Regression metrics
metrics(solubility_test, truth = solubility, estimate = prediction)
# Multiclass metrics work, but you cannot specify any averaging
# for roc_auc() besides the default, hand_till. Use the specific function
# if you need more customization
library(dplyr)
hpc_cv %>%
group_by(Resample) %>%
metrics(obs, pred, VF:L) %>%
print(n = 40)
| /data/genthat_extracted_code/yardstick/examples/metrics.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 709 | r | library(yardstick)
### Name: metrics
### Title: General Function to Estimate Performance
### Aliases: metrics metrics.data.frame
### ** Examples
# Accuracy and kappa
metrics(two_class_example, truth, predicted)
# Add on multinomal log loss and ROC AUC by specifying class prob columns
metrics(two_class_example, truth, predicted, Class1)
# Regression metrics
metrics(solubility_test, truth = solubility, estimate = prediction)
# Multiclass metrics work, but you cannot specify any averaging
# for roc_auc() besides the default, hand_till. Use the specific function
# if you need more customization
library(dplyr)
hpc_cv %>%
group_by(Resample) %>%
metrics(obs, pred, VF:L) %>%
print(n = 40)
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "churn")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "class")
lrn = makeLearner("classif.gamboost", par.vals = list(mstop = 200, nu = 0.1), predict.type = "prob")
#:# hash
#:# 38ee9ffbdeb015fd868160cef8428c85
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
| /models/openml_churn/classification_class/38ee9ffbdeb015fd868160cef8428c85/code.R | no_license | pysiakk/CaseStudies2019S | R | false | false | 737 | r | #:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "churn")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "class")
lrn = makeLearner("classif.gamboost", par.vals = list(mstop = 200, nu = 0.1), predict.type = "prob")
#:# hash
#:# 38ee9ffbdeb015fd868160cef8428c85
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
# PARAMETERS FOR EFA
# what correlation to use
chosen_cor <- "cor" # pearson - REPORTED
# chosen_cor <- "poly" # polychoric
# what rotation to use
chosen_rot <- "varimax" # orthogonal factors - REPORTED
# chosen_rot <- "oblimin" # correlated factors
# what factoring method to use
chosen_fm <- "minres" # minimum residuals - REPORTED
# chosen_fm <- "ols" # ordinary least squares using empirical first derivative
# chosen_fm <- "wls" # weighted least squares
# chosen_fm <- "gls" # generalized weighted least squares
# chosen_fm <- "pa" # principal factors
# chosen_fm <- "ml" # maximum likelihood
# chosen_fm <- "minchi" # minimize ss-weighted chisq
# chosen_fm <- "minrank" # minimum rank
# chosen_fm <- "old.min" # minres < April 2017
# chosen_fm <- "alpha" # alpha fa (Kaiser & Coffey, 1965)
# what scoring method to use
chosen_scores <- "tenBerge" # correlation-preserving - REPORTED
# chosen_scores <- "regression" # regression approach | /dissertation/scripts/efa_param.R | no_license | kgweisman/dimkid | R | false | false | 946 | r | # PARAMETERS FOR EFA
# what correlation to use
chosen_cor <- "cor" # pearson - REPORTED
# chosen_cor <- "poly" # polychoric
# what rotation to use
chosen_rot <- "varimax" # orthogonal factors - REPORTED
# chosen_rot <- "oblimin" # correlated factors
# what factoring method to use
chosen_fm <- "minres" # minimum residuals - REPORTED
# chosen_fm <- "ols" # ordinary least squares using empirical first derivative
# chosen_fm <- "wls" # weighted least squares
# chosen_fm <- "gls" # generalized weighted least squares
# chosen_fm <- "pa" # principal factors
# chosen_fm <- "ml" # maximum likelihood
# chosen_fm <- "minchi" # minimize ss-weighted chisq
# chosen_fm <- "minrank" # minimum rank
# chosen_fm <- "old.min" # minres < April 2017
# chosen_fm <- "alpha" # alpha fa (Kaiser & Coffey, 1965)
# what scoring method to use
chosen_scores <- "tenBerge" # correlation-preserving - REPORTED
# chosen_scores <- "regression" # regression approach |
#' Filter a lookup table to relevant dates for WoW and YoY comparisons
#'
#' @param fiscal_calendar dataframe lookup table
#' @param final_week_num fiscal week number as character
#' @param this_fiscal_year fiscal year as character
#'
#' @return filtered data frame with date information
#' @export
#' @importFrom magrittr "%>%"
get_week_comparison_lookups = function(fiscal_calendar,
final_week_num,
this_fiscal_year) {
# check that inputs can be coerced to numeric
if(is.na(as.numeric(final_week_num))|
is.na(as.numeric(this_fiscal_year))) {
stop("final_week_num and this_fiscal_year should contain only numerals")
}
previous_week_num = as.character(as.numeric(final_week_num) - 1)
previous_fiscal_year = as.character(as.numeric(this_fiscal_year) - 1)
final_two_weeks <-
fiscal_calendar %>%
dplyr::filter(
current_week_num %in% c(
as.character(final_week_num),
as.character(previous_week_num)),
fiscal_year == as.character(this_fiscal_year)
)
final_week_last_year <-
fiscal_calendar %>%
dplyr::filter(current_week_num == final_week_num,
fiscal_year == previous_fiscal_year)
return(dplyr::rbind_list(final_two_weeks, final_week_last_year))
} | /R/get_week_comparison_lookups.R | no_license | brianwonch/mediamunger | R | false | false | 1,391 | r | #' Filter a lookup table to relevant dates for WoW and YoY comparisons
#'
#' @param fiscal_calendar dataframe lookup table
#' @param final_week_num fiscal week number as character
#' @param this_fiscal_year fiscal year as character
#'
#' @return filtered data frame with date information
#' @export
#' @importFrom magrittr "%>%"
get_week_comparison_lookups = function(fiscal_calendar,
final_week_num,
this_fiscal_year) {
# check that inputs can be coerced to numeric
if(is.na(as.numeric(final_week_num))|
is.na(as.numeric(this_fiscal_year))) {
stop("final_week_num and this_fiscal_year should contain only numerals")
}
previous_week_num = as.character(as.numeric(final_week_num) - 1)
previous_fiscal_year = as.character(as.numeric(this_fiscal_year) - 1)
final_two_weeks <-
fiscal_calendar %>%
dplyr::filter(
current_week_num %in% c(
as.character(final_week_num),
as.character(previous_week_num)),
fiscal_year == as.character(this_fiscal_year)
)
final_week_last_year <-
fiscal_calendar %>%
dplyr::filter(current_week_num == final_week_num,
fiscal_year == previous_fiscal_year)
return(dplyr::rbind_list(final_two_weeks, final_week_last_year))
} |
\name{basicPLM}
\alias{basicPLM}
\title{
Simplified interface to PLM.
}
\description{
Simplified interface to PLM.
}
\usage{
basicPLM(pmMat, pnVec, normalize = TRUE, background = TRUE, transfo =
log2, method = c('plm', 'plmr', 'plmrr', 'plmrc'), verbose = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{pmMat}{Matrix of intensities to be processed.}
\item{pnVec}{Probeset names}
\item{normalize}{Logical flag: normalize?}
\item{background}{Logical flag: background adjustment?}
\item{transfo}{function: function to be used for data transformation
prior to summarization.}
\item{method}{Name of the method to be used for normalization. 'plm'
is the usual PLM model; 'plmr' is the (row and column) robust version of PLM; 'plmrr'
is the row-robust version of PLM; 'plmrc' is the column-robust version
of PLM.}
\item{verbose}{Logical flag: verbose.}
}
\value{
A list with the following components:
\item{Estimates}{A (length(pnVec) x ncol(pmMat)) matrix with probeset summaries.}
\item{StdErrors}{A (length(pnVec) x ncol(pmMat)) matrix with standard errors of 'Estimates'.}
\item{Residuals}{A (nrow(pmMat) x ncol(pmMat)) matrix of residuals.}
}
\author{
Benilton Carvalho
}
\note{
Currently, only RMA-bg-correction and quantile normalization are allowed.
}
\seealso{
\code{\link[preprocessCore]{rcModelPLM}},
\code{\link[preprocessCore]{rcModelPLMr}},
\code{\link[preprocessCore]{rcModelPLMrr}},
\code{\link[preprocessCore]{rcModelPLMrc}},
\code{\link{basicRMA}}
}
\examples{
set.seed(1)
pms <- 2^matrix(rnorm(1000), nc=20)
colnames(pms) <- paste("sample", 1:20, sep="")
pns <- rep(letters[1:10], each=5)
res <- basicPLM(pms, pns, TRUE, TRUE)
res[['Estimates']][1:4, 1:3]
res[['StdErrors']][1:4, 1:3]
res[['Residuals']][1:20, 1:3]
}
\keyword{manip}
| /man/basicPLM.Rd | no_license | benilton/oligo | R | false | false | 1,834 | rd | \name{basicPLM}
\alias{basicPLM}
\title{
Simplified interface to PLM.
}
\description{
Simplified interface to PLM.
}
\usage{
basicPLM(pmMat, pnVec, normalize = TRUE, background = TRUE, transfo =
log2, method = c('plm', 'plmr', 'plmrr', 'plmrc'), verbose = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{pmMat}{Matrix of intensities to be processed.}
\item{pnVec}{Probeset names}
\item{normalize}{Logical flag: normalize?}
\item{background}{Logical flag: background adjustment?}
\item{transfo}{function: function to be used for data transformation
prior to summarization.}
\item{method}{Name of the method to be used for normalization. 'plm'
is the usual PLM model; 'plmr' is the (row and column) robust version of PLM; 'plmrr'
is the row-robust version of PLM; 'plmrc' is the column-robust version
of PLM.}
\item{verbose}{Logical flag: verbose.}
}
\value{
A list with the following components:
\item{Estimates}{A (length(pnVec) x ncol(pmMat)) matrix with probeset summaries.}
\item{StdErrors}{A (length(pnVec) x ncol(pmMat)) matrix with standard errors of 'Estimates'.}
\item{Residuals}{A (nrow(pmMat) x ncol(pmMat)) matrix of residuals.}
}
\author{
Benilton Carvalho
}
\note{
Currently, only RMA-bg-correction and quantile normalization are allowed.
}
\seealso{
\code{\link[preprocessCore]{rcModelPLM}},
\code{\link[preprocessCore]{rcModelPLMr}},
\code{\link[preprocessCore]{rcModelPLMrr}},
\code{\link[preprocessCore]{rcModelPLMrc}},
\code{\link{basicRMA}}
}
\examples{
set.seed(1)
pms <- 2^matrix(rnorm(1000), nc=20)
colnames(pms) <- paste("sample", 1:20, sep="")
pns <- rep(letters[1:10], each=5)
res <- basicPLM(pms, pns, TRUE, TRUE)
res[['Estimates']][1:4, 1:3]
res[['StdErrors']][1:4, 1:3]
res[['Residuals']][1:20, 1:3]
}
\keyword{manip}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MapAccuracy.R
\docType{methods}
\name{getConfusionMatrix}
\alias{getConfusionMatrix}
\alias{getConfusionMatrix-generic}
\alias{getConfusionMatrix,MapAccuracy-method}
\title{Returns the object's confusion matrix}
\usage{
getConfusionMatrix(object)
\S4method{getConfusionMatrix}{MapAccuracy}(object)
}
\arguments{
\item{object}{A MapAccuracy object}
}
\description{
Returns the object's confusion matrix
}
| /man/getConfusionMatrix.Rd | permissive | albhasan/mapAccuracy | R | false | true | 483 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MapAccuracy.R
\docType{methods}
\name{getConfusionMatrix}
\alias{getConfusionMatrix}
\alias{getConfusionMatrix-generic}
\alias{getConfusionMatrix,MapAccuracy-method}
\title{Returns the object's confusion matrix}
\usage{
getConfusionMatrix(object)
\S4method{getConfusionMatrix}{MapAccuracy}(object)
}
\arguments{
\item{object}{A MapAccuracy object}
}
\description{
Returns the object's confusion matrix
}
|
library(Risk)
### Name: luceg4
### Title: Luce (1980)'s Fourth Risk Measure
### Aliases: luceg4
### Keywords: Luce (1980)'s fourth risk measure
### ** Examples
luceg4("norm",-Inf, Inf, 1, 0)
| /data/genthat_extracted_code/Risk/examples/luceg4.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 196 | r | library(Risk)
### Name: luceg4
### Title: Luce (1980)'s Fourth Risk Measure
### Aliases: luceg4
### Keywords: Luce (1980)'s fourth risk measure
### ** Examples
luceg4("norm",-Inf, Inf, 1, 0)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/basic.R
\name{mode.value}
\alias{mode.value}
\title{Determine the mode of any given vector \code{x}}
\usage{
mode.value(x)
}
\arguments{
\item{x}{vector of values; can be integers/strings/booleans/etc.}
}
\value{
value the value that occurs most often
}
\description{
It's kinda dumb that R doesn't have a built-in mode function. So here.
}
\examples{
# Construct vector where 10 appears three times.
x <- c(1:10, 10:20, 10:30)
mode.value(x)
}
| /man/mode.value.Rd | no_license | dataframing/archive | R | false | true | 523 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/basic.R
\name{mode.value}
\alias{mode.value}
\title{Determine the mode of any given vector \code{x}}
\usage{
mode.value(x)
}
\arguments{
\item{x}{vector of values; can be integers/strings/booleans/etc.}
}
\value{
value the value that occurs most often
}
\description{
It's kinda dumb that R doesn't have a built-in mode function. So here.
}
\examples{
# Construct vector where 10 appears three times.
x <- c(1:10, 10:20, 10:30)
mode.value(x)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.