content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
########################## # load and processing NLSY79 ######################### # library library(sdazar) library(ggplot2) library(zoo) # load data dat <- readRDS("data/nlsy79selection.rds") nrow(dat) wt <- readRDS("data/nlsy79weights.rds") # dat <- readRDS("data/nlsy79selection_missing.rds") # newdata <- readRDS("data/type_residence.rds") # setkey(dat, caseid_1979) # setkey(newdata, caseid_1979) # dat = newdata[dat] # saveRDS(dat, "data/nlsy79selection.rds") #################### # define variables #################### # identifiers lookvar(dat, "id") setnames(dat, "caseid_1979", "id") summary(dat$id) anyDuplicated(dat[, id]) setnames(dat, "sample_id_1979", "type") table(dat$type) years = c(1979:1994, seq(1996,2014, 2)) # merge weights setkey(wt, id) setkey(dat, id) nrow(dat) dat <- wt[dat] nrow(dat) # rename some demographic variables setnames(dat, "sample_race_78scrn", "race") setnames(dat, "sample_sex_1979", "gender") table(dat$gender) table(dat$race) dat[, sample := ifelse(type %in% 1:8, 1, ifelse(type %in% 9:14, 2, 3))] table(dat[, .(sample)]) # age vars <- lookvar(dat, "^age") vars length(vars) nvars = paste0("age", years) length(nvars) setnames(dat, vars, nvars) table(dat$age1979) table(dat$age2014) # missing data codes # -1 Refused # -2 Dont know # -3 Invalid missing # -4 Valid missing # -5 Non-interview # 1 to 4 are item non-response # NA for non-interview cases dat = dat[, lapply(.SD, function(x) ifelse(x == -5, NA, x))] table(dat$age1979) table(dat$age1980) table(dat$age1981) ################################################## # define variables and then change to long format ################################################## # deaths ok # incarceration ok # age ok # race ok # gender ok # parents' education ok max # respondent's education ok # welfare # job # income ok # marriage ok # health insurance (1984) ok # health issues (job) ok # substance use (1980) # cigarette use # delinquency (1980) ok # violence # locus of control # criminal contact ok # weights ok # parents' education lookvar(dat, "hgc") setnames(dat, c("hgc-mother_1979", "hgc-father_1979"), c("medu", "fedu")) table(dat$fedu) table(dat$medu) dat[, c("fedu", "medu") := lapply(.SD, function(x) ifelse(x %in% c(-4, -3, -2, -1), NA, x)), .SDcols = c("fedu", "medu")] dat[, pedu := pmax(fedu, medu, na.rm = TRUE)] table(dat$pedu) # education vars = lookvar(dat, "hgc") setnames(dat, vars, paste0("edu", years)) table(dat$edu1979) table(dat$edu2014) # welfare (pending), transform this variable separately # vars = paste0("welfare-amt-", years, "_revised_xrnd") # nvars = paste0("welfare.years") # setnames(dat, vars, nvars) # lookvar(dat, "welfare") # names(dat) # income vars = lookvar(dat, "tnfi") setnames(dat, vars, paste0("income", years)) summary(dat$income1979) summary(dat$income2014) # poverty vars = lookvar(dat, "povst") setnames(dat, vars, paste0("poverty", years)) table(dat$poverty1979) table(dat$poverty2014) # job vars = lookvar(dat, "hrs") nvars = paste0("hoursworked", years) length(vars) length(nvars) setnames(dat, vars, nvars) # vars = lookvar(dat, "esr") # nvars = paste0("working", years[years<2007]) # setnames(dat, vars, nvars) table(dat$hoursworked1979) # hist(dat$hoursworked1979) # dat[, mean(hoursworked1979), esr_col_1979] # marriage vars = lookvar(dat, "marst") nvars = paste0("mstatus", years) setnames(dat, vars, nvars) # health coverage vars = lookvar(dat, "q11-79") nvars = paste0("healthcare", years[years > 1988 & years != 1991]) length(nvars) length(vars) setnames(dat, vars, nvars) # health problems vars = lookvar(dat, "q11-4") nvars = paste0("healthw", years) setnames(dat, vars, nvars) table(dat$healthw1979) table(dat$healthw2014) # delinquency vars = lookvar(dat, "(delin-[1][0-9])|(delin-[4-9])|delin-20") length(vars) nvars = paste0("del", 1:17) setnames(dat, vars, nvars) dat[, (nvars) := lapply(.SD, function(x) ifelse(x < 0, NA, x)), .SDcols = nvars] table(dat$del1) table(dat$del17) dat[, deltot := rowscore(dat, nvars, p = 0)] # all summary(dat$deltot) # hist(log(dat$deltot)) # criminal justice experiences lookvar(dat, "police") table(dat[, "police-7_1980", with = FALSE]) table(dat[, "police-7c_y_1980", with = FALSE]) setnames(dat, "police-7_1980" , "ever_prison1980") setnames(dat, "police-7c_y_1980" , "ever_prison1980_release") # weights vars = lookvar(dat, "sampweight") nvars = paste0("wgt", years) setnames(dat, vars, nvars) # type of residence (key incarceration variable) vars = lookvar(dat, "hh1") nvars = paste0("resid", years) setnames(dat, vars, nvars) table(dat$resid1979) table(dat$resid2000) # code of jail / prison jail = c(3, rep(5, 25)) # different coding during first year, 3 versus 5 resid = lookvar(dat, "resid") length(jail) length(resid) for (i in seq_along(years)) { dat[, paste0("prison", years[i]) := ifelse(get(resid[i]) == jail[i], 1, 0)] } table(dat$resid2004 ) table(dat$prison2004) table(dat$resid1979 ) table(dat$prison1979 ) # deaths rni = lookvar(dat, "rni") length(rni) death = paste0("death", years[-1]) death table(dat$rni_1980) dat[, (death) := lapply(.SD, function(x) ifelse(x == 65, 1, 0)), .SDcols = rni] table(dat$death1980) table(dat$death2014) # not interviewed people because incarcerated rni = lookvar(dat, "rni") length(rni) incarcerated = paste0("incarcerated", years[-1]) length(incarcerated) dat[, (incarcerated) := lapply(.SD, function(x) ifelse(x == 73, 1, 0)), .SDcols = rni] dat[, (incarcerated) := lapply(.SD, function(x) ifelse(is.na(x), 0, x)), .SDcols = incarcerated] table(dat$incarcerated1980) table(dat$prison2014) # different records table(dat[, .(incarcerated2006, prison2006)]) table(dat[, .(incarcerated2014, prison2014)]) # non-response (excluding incarceration) rni = lookvar(dat, "rni") length(rni) nonresponse = paste0("nonresponse", years[-1]) length(nonresponse) values = c(60:64, 66:67, 71:72, 74) dat[, (nonresponse) := lapply(.SD, function(x) ifelse(x %in% values, 1, 0)), .SDcols = rni] dat[, (nonresponse) := lapply(.SD, function(x) ifelse(is.na(x), 0, x)), .SDcols = nonresponse] table(dat$nonresponse1980) table(dat$nonresponse2012) ######################################### # set long format ######################################## # create null variables vars = c("nonresponse1979", "incarcerated1979", "death1979") dat[, (vars) := 0] dat[, rni_1979 := -4] vars = paste0("healthcare", years) ovars = lookvar(dat, "healthcare") dat[, (vars[-which(vars %in% ovars)]) := NA] lookvar(dat, "care") # create variable names list_variables = c('age', 'prison', 'income', 'poverty', 'healthcare', 'nonresponse', 'incarcerated', 'death', 'mstatus', 'edu', 'healthw', 'wgt', 'hoursworked', 'rni_', 'resid') list_names = c('rage', 'rprison', 'rinc', 'rpov', 'rhealthcare', 'rnonresponse', 'rincarcerated', 'rdeath', 'rmstatus', 'redu', 'rhealthw', 'rwgt', 'rhoursworked', 'rrni', 'rresid') create_name_years = function(x, years) { paste0(x, years) } for (i in seq_along(list_variables)) { assign(list_names[i], paste0(list_variables[i], years)) } lookvar(dat, "ever") vars = c("id", "sample", "wt", "cluster", "stratum", "gender", "race", "pedu", "ever_prison1980", "ever_prison1980_release", "deltot", rrni, rresid, rhealthw, rhealthcare, rwgt, rhoursworked, rmstatus, redu, rage, rnonresponse, rprison, rincarcerated, rdeath, rinc, rpov) # vars wdat = copy(dat[, vars, with = FALSE]) names(wdat) nrow(wdat) # 12686 # patterns and variables setkey(wdat, id) # same order patt = c("^rni", "^resid", "^nonresponse", "^incarcerated", "^death", "^edu", "^age", "^prison", "^income", "^poverty", "^mstatus", "^healthw", "^healthcare", "^wgt", "^hoursworked") vnames = c("whynr", "resid", "nonresponse", "incarcerated", "death", "edu", "age", "prison", "income", "poverty", "mstatus", "healthw", "healthcare", "wgt", "hoursworked") length(patt) == length(vnames) # adjust variable types before melting vars = names(wdat)[names(wdat) %like% '^rni_'] wdat[, c(vars) := lapply(.SD, as.numeric), .SDcols = vars] vars = names(wdat)[names(wdat) %like% '^healthcare[0-9]+'] wdat[, c(vars) := lapply(.SD, as.numeric), .SDcols = vars] # melt data vars = c("id", "sample", "stratum", "cluster", "wt", "gender", "race", "pedu", "deltot", "ever_prison1980", "ever_prison1980_release") ldat = data.table::melt(wdat, id.vars = vars, measure.vars = patterns(patt), value.name = vnames, variable.name = "time") head(ldat) # set variables in long format for (i in seq_along(years)) { ldat[time == i, year := years[i]] } table(ldat[, .(year)]) # explore cases ids = unique(ldat$id) sid = sample(ids, 1) ldat[id == sid, .(id, deltot, wgt, year, age, gender, nonresponse, prison, incarcerated, death, poverty, income)] # death setkey(ldat, id, year) table(ldat$death) ldat[, ldeath := shift(death, type = "lead"), by = id] table(ldat$ldeath) replace_na_with_last = function(x,a=!is.na(x)) { x[which(a)[c(1,1:sum(a))][cumsum(a)+1]] } ldat[, ldeath := replace_na_with_last(ldeath), by = id] table(ldat$ldeath) table(ldat$death) sid = sample(ids, 1) ldat[id == sid, .(id, death, ldeath)] ldat[, cumdeath := cumsum(ldeath), by = id] table(ldat$cumdeath) ldat = ldat[cumdeath < 2] table(ldat$cumdeath) # 816 table(ldat$ldeath) # define non-response and dropouts ldat[, response := 0] ldat[!is.na(age), response := 1] ldat[incarcerated == 1, response := 1] table(ldat$response) setorder(ldat, -year) ldat[, cresp := cumsum(response), by = id] ldat = ldat[cresp > 0] table(ldat$incarcerated) table(ldat$ldeath) # dropouts ldat[, dropout := 0] ldat[, cresp := cumsum(cresp), id] ldat[, dropout := ifelse(year < 2014 & cresp == 1 & ldeath == 0, 1, dropout)] table(ldat$dropout) setorder(ldat, year, id) sid = sample(ids, 1) ldat[id == sid, .(id, cresp, response, wgt, year, age, dropout, ldeath, prison, incarcerated, death, poverty, income)] table(ldat[, .(sample, dropout)]) table(ldat[, .(year, dropout)]) table(ldat$dropout) table(ldat[, cumsum(dropout), id]$V1) # 4901 # combine incarceration records ldat[, rprison := pmax(prison, incarcerated, na.rm = TRUE)] table(ldat$incarcerated) # prison variable (just combine both records) ldat[ever_prison1980_release > 0 & ever_prison1980_release < 81, yprison := as.numeric(ever_prison1980_release) + 1900] table(ldat$yprison) ldat[, prison1980 := ifelse(year == yprison, 1, 0)][is.na(prison1980), prison1980 := 0] table(ldat$prison1980) table(ldat$rprison) ldat[, rprison := pmax(rprison, prison1980, na.rm = TRUE)] table(ldat[, .(whynr, rprison)]) ldat[is.na(age) & year < 2004, rprison := NA] table(ldat$rprison) setorder(ldat, year, id) ldat[id == sample(ids, 1), .(id, whynr ,response, year, age, dropout, ldeath, prison1980, prison, rprison, incarcerated, death, income)] # expand dataset setkey(ldat, id, year) ldat[, myear := min(year), id] table(ldat$myear) ldat[, start := (year - myear) + 1, id] ldat[, stop := ifelse(year < 1994, start + 1, start + 2)] ldat[, count := stop - start] table(ldat[, .(year, count)]) ldat[ldat[, .I[.N], id][, V1], count := 1] table(ldat$count) ids = sample(unique(ldat$id), 1) ldat[id %in% ids, .(id, year, count, start, stop)] # expand data xx = ldat[rep(seq(1, nrow(ldat)), ldat$count)] xx[, nyear := year[1]:year[.N], id] ids = sample(unique(xx$id), 1) xx[id %in% ids, .(id, response, start, stop, year, nyear, age, count)] xx[xx[, .I[2], .(id, year)][, V1], age := NA] # remove repeated age ldat = xx ldat[, oyear := year] ldat[, year := nyear] ldat[id %in% ids, .(id, start, stop, year, age, count)] table(ldat$year) table(ldat[year == 2014, whynr]) # all observed # impute age function setkey(ldat, id, year) impute.age = function(age, year) { if (any(is.na(age))) { min.age = Min(age) position = which(age == min.age)[1] # ties if (!is.na(position)) { if (position > 1) { # initial values for (i in 1:(position-1)) { age[position - i] = age[position] - i } } missing = which(is.na(age)) # missing data position for (i in missing) { age[i] = age[i-1] + (year[i] - year[i-1]) } } else { age = as.numeric(NA) } } return(age) } ldat[, age := as.numeric(age)] ldat[, age := ifelse(age < 0, NA, age)] ldat[, agei := impute.age(age, year), by = id] sid = sample(ids, 1) ldat[id == sid, .(id, year, age, agei, death, dropout, nonresponse, incarcerated, rprison)] # ggplot(ldat, aes(x = agei, y = agei)) + geom_jitter() summary(ldat$agei) # ok! ############################## # assign missing data values ############################## # gender ldat[, male := ifelse(gender == 1, 1, 0)] table(ldat[, .(male)]) # income summary(ldat$income) ldat[, income := ifelse(income < 0, NA, income)] # died variable ldat[, died := ldeath] table(ldat$died) # 681 # education setkey(ldat, id, year) table(ldat$edu) ldat[, edu := ifelse(edu < 0, NA, edu)] table(ldat$edu) # parents education table(ldat$pedu) # time invariant # married table(ldat[, .(mstatus)]) ldat[, mstatus := ifelse(mstatus < 0, NA, mstatus)] ldat[, married := ifelse(mstatus == 1 | mstatus == 5, 1, 0)] table(ldat[, .(married)]) # health table(ldat[, .(healthw)]) ldat[healthw == -4, healthw := 0][, healthw := ifelse(healthw < 0, NA, healthw)] table(ldat[, .(healthw)]) # jobs table(ldat[, .(hoursworked)]) ldat[hoursworked == -4, hoursworked := 0] ldat[, job := ifelse(hoursworked > 0, 1, 0)][hoursworked < 0, job := NA] table(ldat$job) prop.table(table(ldat[year == 2000, job])) # not sure this is the best # by age x = ldat[, mean(job, na.rm = TRUE), agei] setorder(x, agei) remove(x) # race ldat[, race := factor(race, labels = c("hispanic", "black", "non-hispanic/non-black"))] table(ldat$race) # select records 18 or above x = ldat[year >= 1980 & agei >= 18] table(x[, cumsum(died), id]$V1) # only ones table(x[, cumsum(dropout), id]$V1) # only ones summary(x$agei) x[id == sample(unique(x$id), 1), .(id, year, age, agei, died, rprison, dropout)] ldat = x # create time variables setkey(ldat, id, year) ldat[, myear := min(as.numeric(year)), id] ldat[, start := year - myear, id] ldat[, stop := start + 1] summary(ldat$start) summary(ldat$stop) # 35, I removed 1979 to avoid adjusting by the future table(ldat$start) table(ldat$stop) ldat[, magei := min(agei), id] table(ldat$male) # create cumulative prison variables! setkey(ldat, id, year) ldat[, tprison := rprison][is.na(tprison), tprison := 0] table(ldat$tprison) table(ldat[, .(tprison, died)]) # okey ldat[, cprison := cumsum(tprison), id][, cprison := ifelse(cprison > 0, 1, 0)] table(ldat$cprison) table(ldat[, .(cprison, died)]) # 81 cases table(ldat[male == 1, .(cprison, died)]) # 64 table(ldat[male == 0, .(cprison, died)]) # 17 mean(ldat[, max(stop), id]$V1) # 26 years hist(ldat[died == 1, agei]) # time-varying variables forward and backward # impute forward and backward lookvar(ldat, "prison") ldat[, iprison := ifelse(is.na(prison), 0, prison)] ldat[, prison := cumsum(iprison), id][, prison := ifelse(prison > 0, 1, 0)] ldat[, index_prison := cumsum(iprison), id] table(ldat$index_prison) ldat[, select := 1] # education table(ldat$edu) # education before imprisonment ldat[, index_edu := cumsum(!is.na(edu)), id] if ('before' %in% names(ldat)) { ldat[, before := NULL] } ldat[, before := as.numeric(any(index_prison <= index_edu)), id] table(ldat$before) # define selection variable ldat[before == 0, select:= 0] table(ldat$select) # check nrow(ldat[, max(select), .(select, id)]) == length(unique(ldat$id)) table(ldat$select) ldat[select == 0, .(id, year, agei, prison, edu)] # impute education forward, and then backward ldat[, iedu:= na.locf(edu, na.rm = FALSE), id] ldat[select == 1, iedu := na.locf(iedu, fromLast=TRUE), id] # all cases, it is not relevant # income # income before imprisonment ldat[, index_inc := cumsum(!is.na(income)), id] ldat[, before := NULL] ldat[, before := as.numeric(any(index_prison <= index_inc)), id] table(ldat$before) # impute education forward, and then backward ldat[, income := ifelse(income == 0, 1, income)] ldat[, iinc := na.locf(income, na.rm = FALSE), id] ldat[select == 1, iinc := na.locf(iinc, fromLast=TRUE), id] # all cases, it is not relevant cpi = fread("data/nlsy_cpi.csv") setkey(cpi, year) setkey(ldat, year) ldat = cpi[ldat] ldat[, cpi := cpi / 100][, iinc := iinc* cpi] ldat[, liinc := scale(log(iinc), center = TRUE, scale = FALSE)] # health at working place # health before imprisonment ldat[, index_health := cumsum(!is.na(healthw)), id] ldat[, before := NULL] ldat[, before := as.numeric(any(index_prison <= index_health)), id] table(ldat$before) # define selection variable ldat[before == 0, select:= 0] table(ldat$select) # check nrow(ldat[, max(select), .(select, id)]) == length(unique(ldat$id)) table(ldat$select) ldat[select == 0, .(id, year, agei, prison, healthw)] countmis(ldat[, .(healthw)]) # impute education forward, and then backward ldat[, ihealthw := na.locf(healthw, na.rm = FALSE), id] ldat[select == 1, ihealthw := na.locf(ihealthw, fromLast=TRUE), id] # all cases, it is not relevant summary(ldat$ihealthw) # job # job before imprisonment ldat[, index_job := cumsum(!is.na(job)), id] ldat[, before := NULL] ldat[, before := as.numeric(any(index_prison <= index_job)), id] table(ldat$before) # define selection variable ldat[before == 0, select:= 0] table(ldat$select) # check nrow(ldat[, max(select), .(select, id)]) == length(unique(ldat$id)) table(ldat$select) ldat[select == 0, .(id, year, agei, prison, job)] countmis(ldat[, .(job)]) # impute education forward, and then backward ldat[, ijob := na.locf(job, na.rm = FALSE), id] ldat[select == 1, ijob := na.locf(ijob, fromLast=TRUE), id] # all cases, it is not relevant summary(ldat$ijob) # married # married before imprisonment ldat[, index_married := cumsum(!is.na(married)), id] ldat[, before := NULL] ldat[, before := as.numeric(any(index_prison <= index_married)), id] table(ldat$before) # define selection variable ldat[before == 0, select:= 0] table(ldat$select) # check nrow(ldat[, max(select), .(select, id)]) == length(unique(ldat$id)) table(ldat$select) ldat[select == 0, .(id, year, agei, prison, married)] countmis(ldat[, .(married)]) # impute education forward, and then backward ldat[, imarried := na.locf(married, na.rm = FALSE), id] ldat[select == 1, imarried := na.locf(imarried, fromLast=TRUE), id] # all cases, it is not relevant summary(ldat$imarried) # save data saveRDS(ldat, file = "output/nlsy79_long_format_covariates.rds")
/src/nlsy/01_load_data_nlsy79.R
permissive
sdaza/mortality-incarceration-paper
R
false
false
19,113
r
########################## # load and processing NLSY79 ######################### # library library(sdazar) library(ggplot2) library(zoo) # load data dat <- readRDS("data/nlsy79selection.rds") nrow(dat) wt <- readRDS("data/nlsy79weights.rds") # dat <- readRDS("data/nlsy79selection_missing.rds") # newdata <- readRDS("data/type_residence.rds") # setkey(dat, caseid_1979) # setkey(newdata, caseid_1979) # dat = newdata[dat] # saveRDS(dat, "data/nlsy79selection.rds") #################### # define variables #################### # identifiers lookvar(dat, "id") setnames(dat, "caseid_1979", "id") summary(dat$id) anyDuplicated(dat[, id]) setnames(dat, "sample_id_1979", "type") table(dat$type) years = c(1979:1994, seq(1996,2014, 2)) # merge weights setkey(wt, id) setkey(dat, id) nrow(dat) dat <- wt[dat] nrow(dat) # rename some demographic variables setnames(dat, "sample_race_78scrn", "race") setnames(dat, "sample_sex_1979", "gender") table(dat$gender) table(dat$race) dat[, sample := ifelse(type %in% 1:8, 1, ifelse(type %in% 9:14, 2, 3))] table(dat[, .(sample)]) # age vars <- lookvar(dat, "^age") vars length(vars) nvars = paste0("age", years) length(nvars) setnames(dat, vars, nvars) table(dat$age1979) table(dat$age2014) # missing data codes # -1 Refused # -2 Dont know # -3 Invalid missing # -4 Valid missing # -5 Non-interview # 1 to 4 are item non-response # NA for non-interview cases dat = dat[, lapply(.SD, function(x) ifelse(x == -5, NA, x))] table(dat$age1979) table(dat$age1980) table(dat$age1981) ################################################## # define variables and then change to long format ################################################## # deaths ok # incarceration ok # age ok # race ok # gender ok # parents' education ok max # respondent's education ok # welfare # job # income ok # marriage ok # health insurance (1984) ok # health issues (job) ok # substance use (1980) # cigarette use # delinquency (1980) ok # violence # locus of control # criminal contact ok # weights ok # parents' education lookvar(dat, "hgc") setnames(dat, c("hgc-mother_1979", "hgc-father_1979"), c("medu", "fedu")) table(dat$fedu) table(dat$medu) dat[, c("fedu", "medu") := lapply(.SD, function(x) ifelse(x %in% c(-4, -3, -2, -1), NA, x)), .SDcols = c("fedu", "medu")] dat[, pedu := pmax(fedu, medu, na.rm = TRUE)] table(dat$pedu) # education vars = lookvar(dat, "hgc") setnames(dat, vars, paste0("edu", years)) table(dat$edu1979) table(dat$edu2014) # welfare (pending), transform this variable separately # vars = paste0("welfare-amt-", years, "_revised_xrnd") # nvars = paste0("welfare.years") # setnames(dat, vars, nvars) # lookvar(dat, "welfare") # names(dat) # income vars = lookvar(dat, "tnfi") setnames(dat, vars, paste0("income", years)) summary(dat$income1979) summary(dat$income2014) # poverty vars = lookvar(dat, "povst") setnames(dat, vars, paste0("poverty", years)) table(dat$poverty1979) table(dat$poverty2014) # job vars = lookvar(dat, "hrs") nvars = paste0("hoursworked", years) length(vars) length(nvars) setnames(dat, vars, nvars) # vars = lookvar(dat, "esr") # nvars = paste0("working", years[years<2007]) # setnames(dat, vars, nvars) table(dat$hoursworked1979) # hist(dat$hoursworked1979) # dat[, mean(hoursworked1979), esr_col_1979] # marriage vars = lookvar(dat, "marst") nvars = paste0("mstatus", years) setnames(dat, vars, nvars) # health coverage vars = lookvar(dat, "q11-79") nvars = paste0("healthcare", years[years > 1988 & years != 1991]) length(nvars) length(vars) setnames(dat, vars, nvars) # health problems vars = lookvar(dat, "q11-4") nvars = paste0("healthw", years) setnames(dat, vars, nvars) table(dat$healthw1979) table(dat$healthw2014) # delinquency vars = lookvar(dat, "(delin-[1][0-9])|(delin-[4-9])|delin-20") length(vars) nvars = paste0("del", 1:17) setnames(dat, vars, nvars) dat[, (nvars) := lapply(.SD, function(x) ifelse(x < 0, NA, x)), .SDcols = nvars] table(dat$del1) table(dat$del17) dat[, deltot := rowscore(dat, nvars, p = 0)] # all summary(dat$deltot) # hist(log(dat$deltot)) # criminal justice experiences lookvar(dat, "police") table(dat[, "police-7_1980", with = FALSE]) table(dat[, "police-7c_y_1980", with = FALSE]) setnames(dat, "police-7_1980" , "ever_prison1980") setnames(dat, "police-7c_y_1980" , "ever_prison1980_release") # weights vars = lookvar(dat, "sampweight") nvars = paste0("wgt", years) setnames(dat, vars, nvars) # type of residence (key incarceration variable) vars = lookvar(dat, "hh1") nvars = paste0("resid", years) setnames(dat, vars, nvars) table(dat$resid1979) table(dat$resid2000) # code of jail / prison jail = c(3, rep(5, 25)) # different coding during first year, 3 versus 5 resid = lookvar(dat, "resid") length(jail) length(resid) for (i in seq_along(years)) { dat[, paste0("prison", years[i]) := ifelse(get(resid[i]) == jail[i], 1, 0)] } table(dat$resid2004 ) table(dat$prison2004) table(dat$resid1979 ) table(dat$prison1979 ) # deaths rni = lookvar(dat, "rni") length(rni) death = paste0("death", years[-1]) death table(dat$rni_1980) dat[, (death) := lapply(.SD, function(x) ifelse(x == 65, 1, 0)), .SDcols = rni] table(dat$death1980) table(dat$death2014) # not interviewed people because incarcerated rni = lookvar(dat, "rni") length(rni) incarcerated = paste0("incarcerated", years[-1]) length(incarcerated) dat[, (incarcerated) := lapply(.SD, function(x) ifelse(x == 73, 1, 0)), .SDcols = rni] dat[, (incarcerated) := lapply(.SD, function(x) ifelse(is.na(x), 0, x)), .SDcols = incarcerated] table(dat$incarcerated1980) table(dat$prison2014) # different records table(dat[, .(incarcerated2006, prison2006)]) table(dat[, .(incarcerated2014, prison2014)]) # non-response (excluding incarceration) rni = lookvar(dat, "rni") length(rni) nonresponse = paste0("nonresponse", years[-1]) length(nonresponse) values = c(60:64, 66:67, 71:72, 74) dat[, (nonresponse) := lapply(.SD, function(x) ifelse(x %in% values, 1, 0)), .SDcols = rni] dat[, (nonresponse) := lapply(.SD, function(x) ifelse(is.na(x), 0, x)), .SDcols = nonresponse] table(dat$nonresponse1980) table(dat$nonresponse2012) ######################################### # set long format ######################################## # create null variables vars = c("nonresponse1979", "incarcerated1979", "death1979") dat[, (vars) := 0] dat[, rni_1979 := -4] vars = paste0("healthcare", years) ovars = lookvar(dat, "healthcare") dat[, (vars[-which(vars %in% ovars)]) := NA] lookvar(dat, "care") # create variable names list_variables = c('age', 'prison', 'income', 'poverty', 'healthcare', 'nonresponse', 'incarcerated', 'death', 'mstatus', 'edu', 'healthw', 'wgt', 'hoursworked', 'rni_', 'resid') list_names = c('rage', 'rprison', 'rinc', 'rpov', 'rhealthcare', 'rnonresponse', 'rincarcerated', 'rdeath', 'rmstatus', 'redu', 'rhealthw', 'rwgt', 'rhoursworked', 'rrni', 'rresid') create_name_years = function(x, years) { paste0(x, years) } for (i in seq_along(list_variables)) { assign(list_names[i], paste0(list_variables[i], years)) } lookvar(dat, "ever") vars = c("id", "sample", "wt", "cluster", "stratum", "gender", "race", "pedu", "ever_prison1980", "ever_prison1980_release", "deltot", rrni, rresid, rhealthw, rhealthcare, rwgt, rhoursworked, rmstatus, redu, rage, rnonresponse, rprison, rincarcerated, rdeath, rinc, rpov) # vars wdat = copy(dat[, vars, with = FALSE]) names(wdat) nrow(wdat) # 12686 # patterns and variables setkey(wdat, id) # same order patt = c("^rni", "^resid", "^nonresponse", "^incarcerated", "^death", "^edu", "^age", "^prison", "^income", "^poverty", "^mstatus", "^healthw", "^healthcare", "^wgt", "^hoursworked") vnames = c("whynr", "resid", "nonresponse", "incarcerated", "death", "edu", "age", "prison", "income", "poverty", "mstatus", "healthw", "healthcare", "wgt", "hoursworked") length(patt) == length(vnames) # adjust variable types before melting vars = names(wdat)[names(wdat) %like% '^rni_'] wdat[, c(vars) := lapply(.SD, as.numeric), .SDcols = vars] vars = names(wdat)[names(wdat) %like% '^healthcare[0-9]+'] wdat[, c(vars) := lapply(.SD, as.numeric), .SDcols = vars] # melt data vars = c("id", "sample", "stratum", "cluster", "wt", "gender", "race", "pedu", "deltot", "ever_prison1980", "ever_prison1980_release") ldat = data.table::melt(wdat, id.vars = vars, measure.vars = patterns(patt), value.name = vnames, variable.name = "time") head(ldat) # set variables in long format for (i in seq_along(years)) { ldat[time == i, year := years[i]] } table(ldat[, .(year)]) # explore cases ids = unique(ldat$id) sid = sample(ids, 1) ldat[id == sid, .(id, deltot, wgt, year, age, gender, nonresponse, prison, incarcerated, death, poverty, income)] # death setkey(ldat, id, year) table(ldat$death) ldat[, ldeath := shift(death, type = "lead"), by = id] table(ldat$ldeath) replace_na_with_last = function(x,a=!is.na(x)) { x[which(a)[c(1,1:sum(a))][cumsum(a)+1]] } ldat[, ldeath := replace_na_with_last(ldeath), by = id] table(ldat$ldeath) table(ldat$death) sid = sample(ids, 1) ldat[id == sid, .(id, death, ldeath)] ldat[, cumdeath := cumsum(ldeath), by = id] table(ldat$cumdeath) ldat = ldat[cumdeath < 2] table(ldat$cumdeath) # 816 table(ldat$ldeath) # define non-response and dropouts ldat[, response := 0] ldat[!is.na(age), response := 1] ldat[incarcerated == 1, response := 1] table(ldat$response) setorder(ldat, -year) ldat[, cresp := cumsum(response), by = id] ldat = ldat[cresp > 0] table(ldat$incarcerated) table(ldat$ldeath) # dropouts ldat[, dropout := 0] ldat[, cresp := cumsum(cresp), id] ldat[, dropout := ifelse(year < 2014 & cresp == 1 & ldeath == 0, 1, dropout)] table(ldat$dropout) setorder(ldat, year, id) sid = sample(ids, 1) ldat[id == sid, .(id, cresp, response, wgt, year, age, dropout, ldeath, prison, incarcerated, death, poverty, income)] table(ldat[, .(sample, dropout)]) table(ldat[, .(year, dropout)]) table(ldat$dropout) table(ldat[, cumsum(dropout), id]$V1) # 4901 # combine incarceration records ldat[, rprison := pmax(prison, incarcerated, na.rm = TRUE)] table(ldat$incarcerated) # prison variable (just combine both records) ldat[ever_prison1980_release > 0 & ever_prison1980_release < 81, yprison := as.numeric(ever_prison1980_release) + 1900] table(ldat$yprison) ldat[, prison1980 := ifelse(year == yprison, 1, 0)][is.na(prison1980), prison1980 := 0] table(ldat$prison1980) table(ldat$rprison) ldat[, rprison := pmax(rprison, prison1980, na.rm = TRUE)] table(ldat[, .(whynr, rprison)]) ldat[is.na(age) & year < 2004, rprison := NA] table(ldat$rprison) setorder(ldat, year, id) ldat[id == sample(ids, 1), .(id, whynr ,response, year, age, dropout, ldeath, prison1980, prison, rprison, incarcerated, death, income)] # expand dataset setkey(ldat, id, year) ldat[, myear := min(year), id] table(ldat$myear) ldat[, start := (year - myear) + 1, id] ldat[, stop := ifelse(year < 1994, start + 1, start + 2)] ldat[, count := stop - start] table(ldat[, .(year, count)]) ldat[ldat[, .I[.N], id][, V1], count := 1] table(ldat$count) ids = sample(unique(ldat$id), 1) ldat[id %in% ids, .(id, year, count, start, stop)] # expand data xx = ldat[rep(seq(1, nrow(ldat)), ldat$count)] xx[, nyear := year[1]:year[.N], id] ids = sample(unique(xx$id), 1) xx[id %in% ids, .(id, response, start, stop, year, nyear, age, count)] xx[xx[, .I[2], .(id, year)][, V1], age := NA] # remove repeated age ldat = xx ldat[, oyear := year] ldat[, year := nyear] ldat[id %in% ids, .(id, start, stop, year, age, count)] table(ldat$year) table(ldat[year == 2014, whynr]) # all observed # impute age function setkey(ldat, id, year) impute.age = function(age, year) { if (any(is.na(age))) { min.age = Min(age) position = which(age == min.age)[1] # ties if (!is.na(position)) { if (position > 1) { # initial values for (i in 1:(position-1)) { age[position - i] = age[position] - i } } missing = which(is.na(age)) # missing data position for (i in missing) { age[i] = age[i-1] + (year[i] - year[i-1]) } } else { age = as.numeric(NA) } } return(age) } ldat[, age := as.numeric(age)] ldat[, age := ifelse(age < 0, NA, age)] ldat[, agei := impute.age(age, year), by = id] sid = sample(ids, 1) ldat[id == sid, .(id, year, age, agei, death, dropout, nonresponse, incarcerated, rprison)] # ggplot(ldat, aes(x = agei, y = agei)) + geom_jitter() summary(ldat$agei) # ok! ############################## # assign missing data values ############################## # gender ldat[, male := ifelse(gender == 1, 1, 0)] table(ldat[, .(male)]) # income summary(ldat$income) ldat[, income := ifelse(income < 0, NA, income)] # died variable ldat[, died := ldeath] table(ldat$died) # 681 # education setkey(ldat, id, year) table(ldat$edu) ldat[, edu := ifelse(edu < 0, NA, edu)] table(ldat$edu) # parents education table(ldat$pedu) # time invariant # married table(ldat[, .(mstatus)]) ldat[, mstatus := ifelse(mstatus < 0, NA, mstatus)] ldat[, married := ifelse(mstatus == 1 | mstatus == 5, 1, 0)] table(ldat[, .(married)]) # health table(ldat[, .(healthw)]) ldat[healthw == -4, healthw := 0][, healthw := ifelse(healthw < 0, NA, healthw)] table(ldat[, .(healthw)]) # jobs table(ldat[, .(hoursworked)]) ldat[hoursworked == -4, hoursworked := 0] ldat[, job := ifelse(hoursworked > 0, 1, 0)][hoursworked < 0, job := NA] table(ldat$job) prop.table(table(ldat[year == 2000, job])) # not sure this is the best # by age x = ldat[, mean(job, na.rm = TRUE), agei] setorder(x, agei) remove(x) # race ldat[, race := factor(race, labels = c("hispanic", "black", "non-hispanic/non-black"))] table(ldat$race) # select records 18 or above x = ldat[year >= 1980 & agei >= 18] table(x[, cumsum(died), id]$V1) # only ones table(x[, cumsum(dropout), id]$V1) # only ones summary(x$agei) x[id == sample(unique(x$id), 1), .(id, year, age, agei, died, rprison, dropout)] ldat = x # create time variables setkey(ldat, id, year) ldat[, myear := min(as.numeric(year)), id] ldat[, start := year - myear, id] ldat[, stop := start + 1] summary(ldat$start) summary(ldat$stop) # 35, I removed 1979 to avoid adjusting by the future table(ldat$start) table(ldat$stop) ldat[, magei := min(agei), id] table(ldat$male) # create cumulative prison variables! setkey(ldat, id, year) ldat[, tprison := rprison][is.na(tprison), tprison := 0] table(ldat$tprison) table(ldat[, .(tprison, died)]) # okey ldat[, cprison := cumsum(tprison), id][, cprison := ifelse(cprison > 0, 1, 0)] table(ldat$cprison) table(ldat[, .(cprison, died)]) # 81 cases table(ldat[male == 1, .(cprison, died)]) # 64 table(ldat[male == 0, .(cprison, died)]) # 17 mean(ldat[, max(stop), id]$V1) # 26 years hist(ldat[died == 1, agei]) # time-varying variables forward and backward # impute forward and backward lookvar(ldat, "prison") ldat[, iprison := ifelse(is.na(prison), 0, prison)] ldat[, prison := cumsum(iprison), id][, prison := ifelse(prison > 0, 1, 0)] ldat[, index_prison := cumsum(iprison), id] table(ldat$index_prison) ldat[, select := 1] # education table(ldat$edu) # education before imprisonment ldat[, index_edu := cumsum(!is.na(edu)), id] if ('before' %in% names(ldat)) { ldat[, before := NULL] } ldat[, before := as.numeric(any(index_prison <= index_edu)), id] table(ldat$before) # define selection variable ldat[before == 0, select:= 0] table(ldat$select) # check nrow(ldat[, max(select), .(select, id)]) == length(unique(ldat$id)) table(ldat$select) ldat[select == 0, .(id, year, agei, prison, edu)] # impute education forward, and then backward ldat[, iedu:= na.locf(edu, na.rm = FALSE), id] ldat[select == 1, iedu := na.locf(iedu, fromLast=TRUE), id] # all cases, it is not relevant # income # income before imprisonment ldat[, index_inc := cumsum(!is.na(income)), id] ldat[, before := NULL] ldat[, before := as.numeric(any(index_prison <= index_inc)), id] table(ldat$before) # impute education forward, and then backward ldat[, income := ifelse(income == 0, 1, income)] ldat[, iinc := na.locf(income, na.rm = FALSE), id] ldat[select == 1, iinc := na.locf(iinc, fromLast=TRUE), id] # all cases, it is not relevant cpi = fread("data/nlsy_cpi.csv") setkey(cpi, year) setkey(ldat, year) ldat = cpi[ldat] ldat[, cpi := cpi / 100][, iinc := iinc* cpi] ldat[, liinc := scale(log(iinc), center = TRUE, scale = FALSE)] # health at working place # health before imprisonment ldat[, index_health := cumsum(!is.na(healthw)), id] ldat[, before := NULL] ldat[, before := as.numeric(any(index_prison <= index_health)), id] table(ldat$before) # define selection variable ldat[before == 0, select:= 0] table(ldat$select) # check nrow(ldat[, max(select), .(select, id)]) == length(unique(ldat$id)) table(ldat$select) ldat[select == 0, .(id, year, agei, prison, healthw)] countmis(ldat[, .(healthw)]) # impute education forward, and then backward ldat[, ihealthw := na.locf(healthw, na.rm = FALSE), id] ldat[select == 1, ihealthw := na.locf(ihealthw, fromLast=TRUE), id] # all cases, it is not relevant summary(ldat$ihealthw) # job # job before imprisonment ldat[, index_job := cumsum(!is.na(job)), id] ldat[, before := NULL] ldat[, before := as.numeric(any(index_prison <= index_job)), id] table(ldat$before) # define selection variable ldat[before == 0, select:= 0] table(ldat$select) # check nrow(ldat[, max(select), .(select, id)]) == length(unique(ldat$id)) table(ldat$select) ldat[select == 0, .(id, year, agei, prison, job)] countmis(ldat[, .(job)]) # impute education forward, and then backward ldat[, ijob := na.locf(job, na.rm = FALSE), id] ldat[select == 1, ijob := na.locf(ijob, fromLast=TRUE), id] # all cases, it is not relevant summary(ldat$ijob) # married # married before imprisonment ldat[, index_married := cumsum(!is.na(married)), id] ldat[, before := NULL] ldat[, before := as.numeric(any(index_prison <= index_married)), id] table(ldat$before) # define selection variable ldat[before == 0, select:= 0] table(ldat$select) # check nrow(ldat[, max(select), .(select, id)]) == length(unique(ldat$id)) table(ldat$select) ldat[select == 0, .(id, year, agei, prison, married)] countmis(ldat[, .(married)]) # impute education forward, and then backward ldat[, imarried := na.locf(married, na.rm = FALSE), id] ldat[select == 1, imarried := na.locf(imarried, fromLast=TRUE), id] # all cases, it is not relevant summary(ldat$imarried) # save data saveRDS(ldat, file = "output/nlsy79_long_format_covariates.rds")
##' @title Clump markers according to their LD. ##' @description \code{clumpMarkers} implements clumping procedure (as described in PLINK documentation) on ##' a \code{\link[GenABEL]{gwaa.data-class}} object. ##' @author Marcin Kierczak <\email{Marcin.Kierczak@@imbim.uu.se}> ##' @param p1 threshold for index markers, ##' @param p2 threshold for clumping, ##' @param r2 threshold for LD, ##' @param bp.dist threshold for inter-marker distance, ##' @param chr chromosome to be clumped, ##' @param gwas.result \code{\link[GenABEL]{gwaa.scan-class}} object with association test results, ##' @param data data object in \code{\link[GenABEL]{gwaa.data-class}}, ##' @param image a logical indicating whether to plot clumping results or not, ##' @param verbose a logical indicating whether to print clumping results as it proceeds ##' ##' @return a list of clumps ##' ##' @references \url{http://pngu.mgh.harvard.edu/~purcell/plink/clump.shtml} ##' ##' @examples \dontrun{ ##' clumps <- clump.markers(data.qc0, gwas.result = an0, chr = 6, bp.dist = 250e3, p1 = 0.0001, p2 = 0.01, r2 = 0.5, image=T) ##' } ##' ##' @export clump.markers <- function(data, gwas.result, chr=1, bp.dist=250e3, p1=0.0001, p2=0.01, r2=0.5, image=F, verbose=F) { an <- gwas.result data.chr <- data[,data@gtdata@chromosome == chr] result <- gwas.result[gwas.result@annotation$Chromosome == chr,] result.sorted <- result[order(result$P1df),] signif.p1 <- rownames(result.sorted[result.sorted$P1df <= p1,]) signif.p2 <- rownames(result[result$P1df <= p2,]) data.signif <- data.chr[,data.chr@gtdata@snpnames %in% signif.p2] r2matrix <- r2fast(data.signif) r2matrix[lower.tri(r2matrix)] <- t(r2matrix)[lower.tri(r2matrix)] #image(r2matrix) d <- as.matrix(dist(cbind(data.signif@gtdata@map, rep(0, times=length(signif.p2))))) #image(d) clumpmatrix <- matrix(rep(0, times=length(signif.p2)^2), nrow=length(signif.p2), ncol=length(signif.p2)) clumpmatrix[which(d <= bp.dist)] <- clumpmatrix[which(d <= bp.dist)] + 1 clumpmatrix[which(r2matrix >= r2)] <- clumpmatrix[which(r2matrix >= r2)] + 3 colnames(clumpmatrix) <- colnames(d) rownames(clumpmatrix) <- rownames(d) #image(clumpmatrix) marker.names <- data.signif@gtdata@snpnames used <- rep(0, times=length(signif.p2)) clumps <- list() for (i in 1:length(signif.p1)) { marker <- signif.p1[i] marker.index <- which(signif.p2 == marker) if (used[marker.index] == 0) { used[marker.index] <- 1 newClump <- list() clump <- which(clumpmatrix[marker,] == 4) unused <- which(used[clump] == 0) clump <- clump[unused] if (length(clump) > 0) { p <- paste("Marker ", marker, " clumps with markers: ", paste(marker.names[clump], collapse=', '), sep="") snpnames <- c(marker, marker.names[clump]) newClump[["snpnames"]] <- snpnames newClump[["chr"]] <- as.character(an@annotation$Chromosome[which(rownames(an@results) %in% snpnames)]) newClump[["coord"]] <- an@annotation$Position[which(rownames(an@results) %in% snpnames)] newClump[["pval"]] <- an@results$P1df[which(rownames(an@results) %in% snpnames)] clumps[[marker]] <- newClump if (verbose) { print(p) } } #else {warning("No clumps found in dataset!")} used[clump] <- 1 } } if (image == T) { par(mfrow=c(1,3)) image(r2matrix, col=rev(heat.colors(100)), main="r2 matrix") image(d, col=rev(heat.colors(100)), main="distance matrix") image(clumpmatrix, col=c("cornsilk1","blue","tomato","red"), main="clumping matrix") } if (length(clumps) == 0) { warning("No clumps found in dataset!") } clumps }
/R/clump.markers.r
no_license
cgmisc-team/cgmisc
R
false
false
3,699
r
##' @title Clump markers according to their LD. ##' @description \code{clumpMarkers} implements clumping procedure (as described in PLINK documentation) on ##' a \code{\link[GenABEL]{gwaa.data-class}} object. ##' @author Marcin Kierczak <\email{Marcin.Kierczak@@imbim.uu.se}> ##' @param p1 threshold for index markers, ##' @param p2 threshold for clumping, ##' @param r2 threshold for LD, ##' @param bp.dist threshold for inter-marker distance, ##' @param chr chromosome to be clumped, ##' @param gwas.result \code{\link[GenABEL]{gwaa.scan-class}} object with association test results, ##' @param data data object in \code{\link[GenABEL]{gwaa.data-class}}, ##' @param image a logical indicating whether to plot clumping results or not, ##' @param verbose a logical indicating whether to print clumping results as it proceeds ##' ##' @return a list of clumps ##' ##' @references \url{http://pngu.mgh.harvard.edu/~purcell/plink/clump.shtml} ##' ##' @examples \dontrun{ ##' clumps <- clump.markers(data.qc0, gwas.result = an0, chr = 6, bp.dist = 250e3, p1 = 0.0001, p2 = 0.01, r2 = 0.5, image=T) ##' } ##' ##' @export clump.markers <- function(data, gwas.result, chr=1, bp.dist=250e3, p1=0.0001, p2=0.01, r2=0.5, image=F, verbose=F) { an <- gwas.result data.chr <- data[,data@gtdata@chromosome == chr] result <- gwas.result[gwas.result@annotation$Chromosome == chr,] result.sorted <- result[order(result$P1df),] signif.p1 <- rownames(result.sorted[result.sorted$P1df <= p1,]) signif.p2 <- rownames(result[result$P1df <= p2,]) data.signif <- data.chr[,data.chr@gtdata@snpnames %in% signif.p2] r2matrix <- r2fast(data.signif) r2matrix[lower.tri(r2matrix)] <- t(r2matrix)[lower.tri(r2matrix)] #image(r2matrix) d <- as.matrix(dist(cbind(data.signif@gtdata@map, rep(0, times=length(signif.p2))))) #image(d) clumpmatrix <- matrix(rep(0, times=length(signif.p2)^2), nrow=length(signif.p2), ncol=length(signif.p2)) clumpmatrix[which(d <= bp.dist)] <- clumpmatrix[which(d <= bp.dist)] + 1 clumpmatrix[which(r2matrix >= r2)] <- clumpmatrix[which(r2matrix >= r2)] + 3 colnames(clumpmatrix) <- colnames(d) rownames(clumpmatrix) <- rownames(d) #image(clumpmatrix) marker.names <- data.signif@gtdata@snpnames used <- rep(0, times=length(signif.p2)) clumps <- list() for (i in 1:length(signif.p1)) { marker <- signif.p1[i] marker.index <- which(signif.p2 == marker) if (used[marker.index] == 0) { used[marker.index] <- 1 newClump <- list() clump <- which(clumpmatrix[marker,] == 4) unused <- which(used[clump] == 0) clump <- clump[unused] if (length(clump) > 0) { p <- paste("Marker ", marker, " clumps with markers: ", paste(marker.names[clump], collapse=', '), sep="") snpnames <- c(marker, marker.names[clump]) newClump[["snpnames"]] <- snpnames newClump[["chr"]] <- as.character(an@annotation$Chromosome[which(rownames(an@results) %in% snpnames)]) newClump[["coord"]] <- an@annotation$Position[which(rownames(an@results) %in% snpnames)] newClump[["pval"]] <- an@results$P1df[which(rownames(an@results) %in% snpnames)] clumps[[marker]] <- newClump if (verbose) { print(p) } } #else {warning("No clumps found in dataset!")} used[clump] <- 1 } } if (image == T) { par(mfrow=c(1,3)) image(r2matrix, col=rev(heat.colors(100)), main="r2 matrix") image(d, col=rev(heat.colors(100)), main="distance matrix") image(clumpmatrix, col=c("cornsilk1","blue","tomato","red"), main="clumping matrix") } if (length(clumps) == 0) { warning("No clumps found in dataset!") } clumps }
#================================= ## GET SWORD ENV DATA FOR HMMOCE #================================= load('~/Data/Swordfish/batch/unique_sword_dates.rda') udates <- udatesList; rm(udatesList) str(udates) udates <- udates[order(udates)] # SET SPATIAL LIMITS sp.lim <- list(lonmin=-85, lonmax=-15, latmin=8, latmax=53) #if (exists('sp.lim')){ # locs.grid <- setup.locs.grid(sp.lim) #} else{ # locs.grid <- setup.locs.grid(locs) # sp.lim <- list(lonmin = min(locs.grid$lon[1,]), lonmax = max(locs.grid$lon[1,]), # latmin = min(locs.grid$lat[,1]), latmax = max(locs.grid$lat[,1])) #} # IF YOU NEED TO DOWNLOAD SST DATA sst.dir <- '~/EnvData/sst/Swordfish/' dir.create(file.path(sst.dir), recursive = TRUE, showWarnings = FALSE) udates <- udates[19:length(udates)] get.env(udates, ptt='sword', type = 'sst', sst.type='ghr', spatLim = sp.lim, save.dir = sst.dir) get.sst.dates <- udates[!(udates %in% as.Date(substr(list.files(sst.dir), 7, 16)))] get.env(get.sst.dates, ptt = ptt, type = 'sst', sst.type='ghr',spatLim = sp.lim, save.dir = sst.dir) # HYCOM DATA hycom.dir <- '~/EnvData/hycom3/Swordfish/' dir.create(file.path(hycom.dir), recursive = TRUE, showWarnings = FALSE) setwd('~/HMMoce'); devtools::load_all() udates <- udates[order(udates)] get.env(udates[500], filename='sword', type = 'hycom', spatLim = sp.lim, save.dir = hycom.dir) get.pdt.dates <- udates[!(udates %in% as.Date(substr(list.files(hycom.dir), 7, 16)))] get.pdt.dates <- rev(get.pdt.dates) get.env(get.pdt.dates, filename='sword', type = 'hycom', spatLim = sp.lim, save.dir = hycom.dir) hycom.dir <- '~/EnvData/hycom3/Swordfish/' setwd('~/HMMoce'); devtools::load_all() get.pdt.dates <- udates[!(udates %in% as.Date(substr(list.files(hycom.dir), 7, 16)))] for (i in 648:1){ repeat{ try(get.env(get.pdt.dates[i], filename='sword', type = 'hycom', spatLim = sp.lim, save.dir = hycom.dir), silent=T) if(file.info(paste('sword_', get.pdt.dates[i],'.nc', sep=''))$size > 37e5){ break } } print(paste('Finished ', i)) } save(get.pdt.dates,file='get_pdt_dates.rda')
/get_sword_env.r
no_license
camrinbraun/HMMoce_run
R
false
false
2,097
r
#================================= ## GET SWORD ENV DATA FOR HMMOCE #================================= load('~/Data/Swordfish/batch/unique_sword_dates.rda') udates <- udatesList; rm(udatesList) str(udates) udates <- udates[order(udates)] # SET SPATIAL LIMITS sp.lim <- list(lonmin=-85, lonmax=-15, latmin=8, latmax=53) #if (exists('sp.lim')){ # locs.grid <- setup.locs.grid(sp.lim) #} else{ # locs.grid <- setup.locs.grid(locs) # sp.lim <- list(lonmin = min(locs.grid$lon[1,]), lonmax = max(locs.grid$lon[1,]), # latmin = min(locs.grid$lat[,1]), latmax = max(locs.grid$lat[,1])) #} # IF YOU NEED TO DOWNLOAD SST DATA sst.dir <- '~/EnvData/sst/Swordfish/' dir.create(file.path(sst.dir), recursive = TRUE, showWarnings = FALSE) udates <- udates[19:length(udates)] get.env(udates, ptt='sword', type = 'sst', sst.type='ghr', spatLim = sp.lim, save.dir = sst.dir) get.sst.dates <- udates[!(udates %in% as.Date(substr(list.files(sst.dir), 7, 16)))] get.env(get.sst.dates, ptt = ptt, type = 'sst', sst.type='ghr',spatLim = sp.lim, save.dir = sst.dir) # HYCOM DATA hycom.dir <- '~/EnvData/hycom3/Swordfish/' dir.create(file.path(hycom.dir), recursive = TRUE, showWarnings = FALSE) setwd('~/HMMoce'); devtools::load_all() udates <- udates[order(udates)] get.env(udates[500], filename='sword', type = 'hycom', spatLim = sp.lim, save.dir = hycom.dir) get.pdt.dates <- udates[!(udates %in% as.Date(substr(list.files(hycom.dir), 7, 16)))] get.pdt.dates <- rev(get.pdt.dates) get.env(get.pdt.dates, filename='sword', type = 'hycom', spatLim = sp.lim, save.dir = hycom.dir) hycom.dir <- '~/EnvData/hycom3/Swordfish/' setwd('~/HMMoce'); devtools::load_all() get.pdt.dates <- udates[!(udates %in% as.Date(substr(list.files(hycom.dir), 7, 16)))] for (i in 648:1){ repeat{ try(get.env(get.pdt.dates[i], filename='sword', type = 'hycom', spatLim = sp.lim, save.dir = hycom.dir), silent=T) if(file.info(paste('sword_', get.pdt.dates[i],'.nc', sep=''))$size > 37e5){ break } } print(paste('Finished ', i)) } save(get.pdt.dates,file='get_pdt_dates.rda')
.onLoad <- function(lib, pkg) { library.dynam('solidearthtide', pkg, lib) }
/R/zzz.r
no_license
cran/solidearthtide
R
false
false
79
r
.onLoad <- function(lib, pkg) { library.dynam('solidearthtide', pkg, lib) }
#' @title An introductory plot #' #' @param x A quantitative vector #' #' @return A plot #' @export #' #' @examples #' \dontrun{d <- 1:40; myplot(x = d)} myplot=function(x){ y <- 0.86089580 +1.46959217*x -0.02745726*x^2 plot(y~x, col = "blue", lwd = 2, type = "l", main = "Small plot") }
/R/myplot.R
permissive
MATHSTATSOU/MATH4753STEWART4
R
false
false
293
r
#' @title An introductory plot #' #' @param x A quantitative vector #' #' @return A plot #' @export #' #' @examples #' \dontrun{d <- 1:40; myplot(x = d)} myplot=function(x){ y <- 0.86089580 +1.46959217*x -0.02745726*x^2 plot(y~x, col = "blue", lwd = 2, type = "l", main = "Small plot") }
if(interactive()) library("testthat") settingsFile <- AzureML.config.default workspace <- function(..., .validate = FALSE) AzureML::workspace(..., .validate = .validate) # ------------------------------------------------------------------------ context("workspace - connect to workspace") test_that("Can connect to workspace with supplied id and auth", { AzureML:::skip_if_missing_config(settingsFile) js <- read.AzureML.config(settingsFile) id <- js$workspace$id auth <- js$workspace$authorization_token expect_true(!is.null(id)) expect_true(!is.null(auth)) ws <- workspace(id, auth) expect_is(ws, c("Workspace")) expect_equal(ls(ws), c("datasets", "experiments", "id", "services")) expect_equal(ws$id, id) }) test_that("Can connect to workspace with config file", { AzureML:::skip_if_missing_config(settingsFile) ws <- workspace() expect_is(ws, c("Workspace")) expect_equal(ls(ws), c("datasets", "experiments", "id", "services")) }) test_that("workspace() throws helpful 401 error with invalid id", { # AzureML:::skip_if_missing_config(settingsFile) .catchError <- function(expr){ tryCatch(expr, error = function(e)e)$message } .expect_error_in <- function(object, msgs){ if(missing(object) || is.null(object)) return(FALSE) ptn <- sprintf("[%s]", paste(sprintf("(%s)", msgs), collapse = "|")) grepl(ptn, object) } m <- .catchError(workspace(id = "x", auth = "y", .validate = TRUE)) msg <- c("invalid workspaceId", "401 (Unauthorised). Please check your workspace ID and auth codes." ) .expect_error_in(m, msg = msg) }) # ------------------------------------------------------------------------ context("workspace - reading from settings.json file") test_that("workspace() adds api_endpoint and management_endpoint if missing from config", { tf <- tempfile(fileext = ".json") on.exit(unlink(tf)) write.AzureML.config("x", "y", file = tf) ws <- workspace(config = tf) expect_equal(ws$id, "x") expect_equal( ws$.api_endpoint, default_api(ws$.api_endpoint)[["api_endpoint"]] ) expect_equal( ws$.management_endpoint, default_api(ws$.api_endpoint)[["management_endpoint"]] ) }) test_that("workspace() throws helpful error if config file does not exist", { expect_error( workspace(config = "file_does_not_exist"), "config file is missing: 'file_does_not_exist'" ) }) test_that("workspace() throws helpful error if config is invalid json", { tf <- tempfile(fileext = ".json") on.exit(unlink(tf)) writeLines("garbage", con = tf) msg <- tryCatch(workspace(config = tf), error = function(e)e)$message expect_true( grepl("Your config file contains invalid json", msg) ) })
/tests/testthat/test-1-workspace.R
no_license
Henri-Lo/AzureML
R
false
false
2,850
r
if(interactive()) library("testthat") settingsFile <- AzureML.config.default workspace <- function(..., .validate = FALSE) AzureML::workspace(..., .validate = .validate) # ------------------------------------------------------------------------ context("workspace - connect to workspace") test_that("Can connect to workspace with supplied id and auth", { AzureML:::skip_if_missing_config(settingsFile) js <- read.AzureML.config(settingsFile) id <- js$workspace$id auth <- js$workspace$authorization_token expect_true(!is.null(id)) expect_true(!is.null(auth)) ws <- workspace(id, auth) expect_is(ws, c("Workspace")) expect_equal(ls(ws), c("datasets", "experiments", "id", "services")) expect_equal(ws$id, id) }) test_that("Can connect to workspace with config file", { AzureML:::skip_if_missing_config(settingsFile) ws <- workspace() expect_is(ws, c("Workspace")) expect_equal(ls(ws), c("datasets", "experiments", "id", "services")) }) test_that("workspace() throws helpful 401 error with invalid id", { # AzureML:::skip_if_missing_config(settingsFile) .catchError <- function(expr){ tryCatch(expr, error = function(e)e)$message } .expect_error_in <- function(object, msgs){ if(missing(object) || is.null(object)) return(FALSE) ptn <- sprintf("[%s]", paste(sprintf("(%s)", msgs), collapse = "|")) grepl(ptn, object) } m <- .catchError(workspace(id = "x", auth = "y", .validate = TRUE)) msg <- c("invalid workspaceId", "401 (Unauthorised). Please check your workspace ID and auth codes." ) .expect_error_in(m, msg = msg) }) # ------------------------------------------------------------------------ context("workspace - reading from settings.json file") test_that("workspace() adds api_endpoint and management_endpoint if missing from config", { tf <- tempfile(fileext = ".json") on.exit(unlink(tf)) write.AzureML.config("x", "y", file = tf) ws <- workspace(config = tf) expect_equal(ws$id, "x") expect_equal( ws$.api_endpoint, default_api(ws$.api_endpoint)[["api_endpoint"]] ) expect_equal( ws$.management_endpoint, default_api(ws$.api_endpoint)[["management_endpoint"]] ) }) test_that("workspace() throws helpful error if config file does not exist", { expect_error( workspace(config = "file_does_not_exist"), "config file is missing: 'file_does_not_exist'" ) }) test_that("workspace() throws helpful error if config is invalid json", { tf <- tempfile(fileext = ".json") on.exit(unlink(tf)) writeLines("garbage", con = tf) msg <- tryCatch(workspace(config = tf), error = function(e)e)$message expect_true( grepl("Your config file contains invalid json", msg) ) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bsw.R \name{bsw} \alias{bsw} \title{Blended sea winds (BSW)} \usage{ bsw(date = NULL, uv_stress = "uv", resolution = "6hrly", ...) } \arguments{ \item{date}{(date/character) date, in the form YYYY-MM-DD if resolution is 6hrly or daily, or in the form YYYY-MM if resolution is monthly. For resolution=clm can be left NULL. If given, must be in the range 1987-07-09 to today-1 (yesterday)} \item{uv_stress}{(character) one of uv or stresss, not sure what these mean exactly yet. Default: uv} \item{resolution}{(character) temporal resolution. one of 6hrly, clm, daily, or monthly. See Details.} \item{...}{curl options passed on to \code{\link[crul]{HttpClient}}} } \value{ an object of class \code{ncdf4} } \description{ The Blended Sea Winds dataset contains globally gridded, high-resolution ocean surface vector winds and wind stresses on a global 0.25° grid, and multiple time resolutions of six-hourly, daily, monthly, and 11-year (1995–2005) climatological monthlies. } \details{ Products are available from July 9th, 1987 - present. Uses \code{ncdf4} under the hood to read NetCDF files Use \code{bsw_cache} object to manage cached files. } \note{ We only handle the netcdf files for now, we're avoiding the ieee files, see http://www.cpc.ncep.noaa.gov/products/wesley/wgrib2/ieee.html } \section{Citing NOAA and BSW data}{ Message from NOAA: "We also ask you to acknowledge us in your use of the data to help us justify continued service. This may be done by including text such as: The wind data are acquired from NOAA's National Climatic Data Center, via their website http://www.ncdc.noaa.gov/oa/rsad/blendedseawinds.html. We would also appreciate receiving a copy of the relevant publication." } \section{Temporal resolution}{ \itemize{ \item 6hrly: 6-hourly, 4 global snapshots (u,v) at UTC 00, 06, 12 and 18Z \item clm: climatological monthlies; also provided is the scalar mean (u,v,w) \item daily: averages of the 6hrly time points, thus with a center time 09Z; also provided is the scalar mean, (u,v,w) \item monthly: averages of daily data; also provided is the scalar mean (u,v,w) } } \examples{ \dontrun{ # cache control bsw_cache$list() bsw_cache$details() # bsw_cache$delete_all() # 6hrly data ## uv x <- bsw(date = "2017-10-01") ## stress y <- bsw(date = "2011-08-01", uv_stress = "stress") # daily z <- bsw(date = "2017-10-01", resolution = "daily") # monthly w <- bsw(date = "2011-08", resolution = "monthly") # clm # x <- bsw(resolution = "clm") } } \references{ \url{https://www.ncdc.noaa.gov/data-access/marineocean-data/blended-global/blended-sea-winds} ftp://eclipse.ncdc.noaa.gov/pub/seawinds/ ieee files: http://www.cpc.ncep.noaa.gov/products/wesley/wgrib2/ieee.html }
/man/bsw.Rd
permissive
nemochina2008/rnoaa
R
false
true
2,821
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bsw.R \name{bsw} \alias{bsw} \title{Blended sea winds (BSW)} \usage{ bsw(date = NULL, uv_stress = "uv", resolution = "6hrly", ...) } \arguments{ \item{date}{(date/character) date, in the form YYYY-MM-DD if resolution is 6hrly or daily, or in the form YYYY-MM if resolution is monthly. For resolution=clm can be left NULL. If given, must be in the range 1987-07-09 to today-1 (yesterday)} \item{uv_stress}{(character) one of uv or stresss, not sure what these mean exactly yet. Default: uv} \item{resolution}{(character) temporal resolution. one of 6hrly, clm, daily, or monthly. See Details.} \item{...}{curl options passed on to \code{\link[crul]{HttpClient}}} } \value{ an object of class \code{ncdf4} } \description{ The Blended Sea Winds dataset contains globally gridded, high-resolution ocean surface vector winds and wind stresses on a global 0.25° grid, and multiple time resolutions of six-hourly, daily, monthly, and 11-year (1995–2005) climatological monthlies. } \details{ Products are available from July 9th, 1987 - present. Uses \code{ncdf4} under the hood to read NetCDF files Use \code{bsw_cache} object to manage cached files. } \note{ We only handle the netcdf files for now, we're avoiding the ieee files, see http://www.cpc.ncep.noaa.gov/products/wesley/wgrib2/ieee.html } \section{Citing NOAA and BSW data}{ Message from NOAA: "We also ask you to acknowledge us in your use of the data to help us justify continued service. This may be done by including text such as: The wind data are acquired from NOAA's National Climatic Data Center, via their website http://www.ncdc.noaa.gov/oa/rsad/blendedseawinds.html. We would also appreciate receiving a copy of the relevant publication." } \section{Temporal resolution}{ \itemize{ \item 6hrly: 6-hourly, 4 global snapshots (u,v) at UTC 00, 06, 12 and 18Z \item clm: climatological monthlies; also provided is the scalar mean (u,v,w) \item daily: averages of the 6hrly time points, thus with a center time 09Z; also provided is the scalar mean, (u,v,w) \item monthly: averages of daily data; also provided is the scalar mean (u,v,w) } } \examples{ \dontrun{ # cache control bsw_cache$list() bsw_cache$details() # bsw_cache$delete_all() # 6hrly data ## uv x <- bsw(date = "2017-10-01") ## stress y <- bsw(date = "2011-08-01", uv_stress = "stress") # daily z <- bsw(date = "2017-10-01", resolution = "daily") # monthly w <- bsw(date = "2011-08", resolution = "monthly") # clm # x <- bsw(resolution = "clm") } } \references{ \url{https://www.ncdc.noaa.gov/data-access/marineocean-data/blended-global/blended-sea-winds} ftp://eclipse.ncdc.noaa.gov/pub/seawinds/ ieee files: http://www.cpc.ncep.noaa.gov/products/wesley/wgrib2/ieee.html }
#' List occurrences of "TBD" unknown plant species in veg species inventory data #' #' @param conn Database connection generated from call to \code{OpenDatabaseConnection()}. Ignored if \code{data.source} is \code{"local"}. #' @param path.to.data The directory containing the csv data exports generated from \code{SaveDataToCsv()}. Ignored if \code{data.source} is \code{"database"}. #' @param park Optional. Four-letter park code to filter on, e.g. "MOJA". #' @param spring Optional. Spring code to filter on, e.g. "LAKE_P_BLUE0". #' @param field.season Optional. Field season name to filter on, e.g. "2019". #' @param data.source Character string indicating whether to access data in the spring veg database (\code{"database"}, default) or to use data saved locally (\code{"local"}). In order to access the most up-to-date data, it is recommended that you select \code{"database"} unless you are working offline or your code will be shared with someone who doesn't have access to the database. #' #' @return A tibble with columns Park, SpringCode, SpringName, VisitType, FieldSeason, StartDate, UnknownPlantCode, TransectNumber. #' @export #' #' @importFrom magrittr %>% %<>% #' #' @examples #' \dontrun{ #' conn <- OpenDatabaseConnection() #' SpInvQcTBDSpecies(conn, park = "LAKE") #' SpInvQcTBDSpecies(conn, spring = "LAKE_P_BLUE0", field.season = "2019") #' SpInvQcTBDSpecies(path.to.data = "path/to/data", data.source = "local") #' CloseDatabaseConnection(conn) #' } SpInvQcTBDSpecies <- function(conn, path.to.data, park, spring, field.season, data.source = "database") { tbd.species <- ReadAndFilterData(conn, path.to.data, park, spring, field.season, data.source, "VegetationInventory") tbd.species %<>% dplyr::filter(USDAPlantsCode == "TBD") %>% dplyr::select(Park, SpringCode, SpringName, VisitType, FieldSeason, StartDate, UnknownPlantCode, TransectNumber) %>% dplyr::arrange(Park, SpringCode, SpringName, VisitType, FieldSeason, StartDate, UnknownPlantCode, TransectNumber) %>% unique() return(tbd.species) } #' List occurrences of "UNK" unknown plant species in veg species inventory data #' #' @param conn Database connection generated from call to \code{OpenDatabaseConnection()}. Ignored if \code{data.source} is \code{"local"}. #' @param path.to.data The directory containing the csv data exports generated from \code{SaveDataToCsv()}. Ignored if \code{data.source} is \code{"database"}. #' @param park Optional. Four-letter park code to filter on, e.g. "MOJA". #' @param spring Optional. Spring code to filter on, e.g. "LAKE_P_BLUE0". #' @param field.season Optional. Field season name to filter on, e.g. "2019". #' @param data.source Character string indicating whether to access data in the spring veg database (\code{"database"}, default) or to use data saved locally (\code{"local"}). In order to access the most up-to-date data, it is recommended that you select \code{"database"} unless you are working offline or your code will be shared with someone who doesn't have access to the database. #' #' @return A tibble with columns Park, SpringCode, SpringName, VisitType, FieldSeason, StartDate, UnknownPlantCode, TransectNumber. #' @export #' #' @importFrom magrittr %>% %<>% #' #' @examples #' \dontrun{ #' conn <- OpenDatabaseConnection() #' SpInvQcUNKSpecies(conn, park = "LAKE") #' SpInvQcUNKSpecies(conn, spring = "LAKE_P_BLUE0", field.season = "2019") #' SpInvQcUNKSpecies(path.to.data = "path/to/data", data.source = "local") #' CloseDatabaseConnection(conn) #' } SpInvQcUNKSpecies <- function(conn, path.to.data, park, spring, field.season, data.source = "database") { unk.species <- ReadAndFilterData(conn, path.to.data, park, spring, field.season, data.source, "VegetationInventory") unk.species %<>% dplyr::filter(USDAPlantsCode == "UNK") %>% dplyr::select(Park, SpringCode, SpringName, VisitType, FieldSeason, StartDate, UnknownPlantCode, TransectNumber) %>% dplyr::arrange(Park, SpringCode, SpringName, VisitType, FieldSeason, StartDate, UnknownPlantCode, TransectNumber) %>% unique() return(unk.species) } #' List occurrences of duplicate plant species in species inventory data #' #' @param conn Database connection generated from call to \code{OpenDatabaseConnection()}. Ignored if \code{data.source} is \code{"local"}. #' @param path.to.data The directory containing the csv data exports generated from \code{SaveDataToCsv()}. Ignored if \code{data.source} is \code{"database"}. #' @param park Optional. Four-letter park code to filter on, e.g. "MOJA". #' @param spring Optional. Spring code to filter on, e.g. "LAKE_P_BLUE0". #' @param field.season Optional. Field season name to filter on, e.g. "2019". #' @param data.source Character string indicating whether to access data in the spring veg database (\code{"database"}, default) or to use data saved locally (\code{"local"}). In order to access the most up-to-date data, it is recommended that you select \code{"database"} unless you are working offline or your code will be shared with someone who doesn't have access to the database. #' #' @return A tibble with columns Park, SpringCode, SpringName, VisitType, FieldSeason, StartDate, UnknownPlantCode, TransectNumber, USDAPlantsCode #' @export #' #' @importFrom magrittr %>% %<>% #' #' @examples #' \dontrun{ #' conn <- OpenDatabaseConnection() #' SpInvQcDuplicateSpecies(conn, park = "LAKE") #' SpInvQcDuplicateSpecies(conn, spring = "LAKE_P_BLUE0", field.season = "2019") #' SpInvQcDuplicateSpecies(path.to.data = "path/to/data", data.source = "local") #' CloseDatabaseConnection(conn) #' } SpInvQcDuplicateSpecies <- function(conn, path.to.data, park, spring, field.season, data.source = "database") { sp.inv <- ReadAndFilterData(conn, path.to.data, park, spring, field.season, data.source, "VegetationInventory") dup.species <- sp.inv %>% dplyr::select(Park, SpringCode, SpringName, VisitType, FieldSeason, StartDate, UnknownPlantCode, TransectNumber, USDAPlantsCode) %>% dplyr::filter(!(USDAPlantsCode %in% c("UNK", "TBD"))) %>% dplyr::group_by(Park, SpringCode, SpringName, VisitType, FieldSeason, StartDate, TransectNumber, USDAPlantsCode) %>% dplyr::mutate(UnknownPlantCode = paste(UnknownPlantCode, collapse = ", ")) %>% dplyr::mutate(DupCount = dplyr::n()) %>% dplyr::filter(DupCount > 1) %>% dplyr::ungroup() %>% dplyr::select(-DupCount) %>% unique() return(dup.species) }
/R/species-inv-qc.R
no_license
nationalparkservice/mojn-sv-rpackage
R
false
false
6,490
r
#' List occurrences of "TBD" unknown plant species in veg species inventory data #' #' @param conn Database connection generated from call to \code{OpenDatabaseConnection()}. Ignored if \code{data.source} is \code{"local"}. #' @param path.to.data The directory containing the csv data exports generated from \code{SaveDataToCsv()}. Ignored if \code{data.source} is \code{"database"}. #' @param park Optional. Four-letter park code to filter on, e.g. "MOJA". #' @param spring Optional. Spring code to filter on, e.g. "LAKE_P_BLUE0". #' @param field.season Optional. Field season name to filter on, e.g. "2019". #' @param data.source Character string indicating whether to access data in the spring veg database (\code{"database"}, default) or to use data saved locally (\code{"local"}). In order to access the most up-to-date data, it is recommended that you select \code{"database"} unless you are working offline or your code will be shared with someone who doesn't have access to the database. #' #' @return A tibble with columns Park, SpringCode, SpringName, VisitType, FieldSeason, StartDate, UnknownPlantCode, TransectNumber. #' @export #' #' @importFrom magrittr %>% %<>% #' #' @examples #' \dontrun{ #' conn <- OpenDatabaseConnection() #' SpInvQcTBDSpecies(conn, park = "LAKE") #' SpInvQcTBDSpecies(conn, spring = "LAKE_P_BLUE0", field.season = "2019") #' SpInvQcTBDSpecies(path.to.data = "path/to/data", data.source = "local") #' CloseDatabaseConnection(conn) #' } SpInvQcTBDSpecies <- function(conn, path.to.data, park, spring, field.season, data.source = "database") { tbd.species <- ReadAndFilterData(conn, path.to.data, park, spring, field.season, data.source, "VegetationInventory") tbd.species %<>% dplyr::filter(USDAPlantsCode == "TBD") %>% dplyr::select(Park, SpringCode, SpringName, VisitType, FieldSeason, StartDate, UnknownPlantCode, TransectNumber) %>% dplyr::arrange(Park, SpringCode, SpringName, VisitType, FieldSeason, StartDate, UnknownPlantCode, TransectNumber) %>% unique() return(tbd.species) } #' List occurrences of "UNK" unknown plant species in veg species inventory data #' #' @param conn Database connection generated from call to \code{OpenDatabaseConnection()}. Ignored if \code{data.source} is \code{"local"}. #' @param path.to.data The directory containing the csv data exports generated from \code{SaveDataToCsv()}. Ignored if \code{data.source} is \code{"database"}. #' @param park Optional. Four-letter park code to filter on, e.g. "MOJA". #' @param spring Optional. Spring code to filter on, e.g. "LAKE_P_BLUE0". #' @param field.season Optional. Field season name to filter on, e.g. "2019". #' @param data.source Character string indicating whether to access data in the spring veg database (\code{"database"}, default) or to use data saved locally (\code{"local"}). In order to access the most up-to-date data, it is recommended that you select \code{"database"} unless you are working offline or your code will be shared with someone who doesn't have access to the database. #' #' @return A tibble with columns Park, SpringCode, SpringName, VisitType, FieldSeason, StartDate, UnknownPlantCode, TransectNumber. #' @export #' #' @importFrom magrittr %>% %<>% #' #' @examples #' \dontrun{ #' conn <- OpenDatabaseConnection() #' SpInvQcUNKSpecies(conn, park = "LAKE") #' SpInvQcUNKSpecies(conn, spring = "LAKE_P_BLUE0", field.season = "2019") #' SpInvQcUNKSpecies(path.to.data = "path/to/data", data.source = "local") #' CloseDatabaseConnection(conn) #' } SpInvQcUNKSpecies <- function(conn, path.to.data, park, spring, field.season, data.source = "database") { unk.species <- ReadAndFilterData(conn, path.to.data, park, spring, field.season, data.source, "VegetationInventory") unk.species %<>% dplyr::filter(USDAPlantsCode == "UNK") %>% dplyr::select(Park, SpringCode, SpringName, VisitType, FieldSeason, StartDate, UnknownPlantCode, TransectNumber) %>% dplyr::arrange(Park, SpringCode, SpringName, VisitType, FieldSeason, StartDate, UnknownPlantCode, TransectNumber) %>% unique() return(unk.species) } #' List occurrences of duplicate plant species in species inventory data #' #' @param conn Database connection generated from call to \code{OpenDatabaseConnection()}. Ignored if \code{data.source} is \code{"local"}. #' @param path.to.data The directory containing the csv data exports generated from \code{SaveDataToCsv()}. Ignored if \code{data.source} is \code{"database"}. #' @param park Optional. Four-letter park code to filter on, e.g. "MOJA". #' @param spring Optional. Spring code to filter on, e.g. "LAKE_P_BLUE0". #' @param field.season Optional. Field season name to filter on, e.g. "2019". #' @param data.source Character string indicating whether to access data in the spring veg database (\code{"database"}, default) or to use data saved locally (\code{"local"}). In order to access the most up-to-date data, it is recommended that you select \code{"database"} unless you are working offline or your code will be shared with someone who doesn't have access to the database. #' #' @return A tibble with columns Park, SpringCode, SpringName, VisitType, FieldSeason, StartDate, UnknownPlantCode, TransectNumber, USDAPlantsCode #' @export #' #' @importFrom magrittr %>% %<>% #' #' @examples #' \dontrun{ #' conn <- OpenDatabaseConnection() #' SpInvQcDuplicateSpecies(conn, park = "LAKE") #' SpInvQcDuplicateSpecies(conn, spring = "LAKE_P_BLUE0", field.season = "2019") #' SpInvQcDuplicateSpecies(path.to.data = "path/to/data", data.source = "local") #' CloseDatabaseConnection(conn) #' } SpInvQcDuplicateSpecies <- function(conn, path.to.data, park, spring, field.season, data.source = "database") { sp.inv <- ReadAndFilterData(conn, path.to.data, park, spring, field.season, data.source, "VegetationInventory") dup.species <- sp.inv %>% dplyr::select(Park, SpringCode, SpringName, VisitType, FieldSeason, StartDate, UnknownPlantCode, TransectNumber, USDAPlantsCode) %>% dplyr::filter(!(USDAPlantsCode %in% c("UNK", "TBD"))) %>% dplyr::group_by(Park, SpringCode, SpringName, VisitType, FieldSeason, StartDate, TransectNumber, USDAPlantsCode) %>% dplyr::mutate(UnknownPlantCode = paste(UnknownPlantCode, collapse = ", ")) %>% dplyr::mutate(DupCount = dplyr::n()) %>% dplyr::filter(DupCount > 1) %>% dplyr::ungroup() %>% dplyr::select(-DupCount) %>% unique() return(dup.species) }
\name{likeli_4_optim} \alias{likeli_4_optim} \title{Use Likelihood with Optim} \description{ Wraps the function \code{\link{likeli}} so you can use it with \code{\link{optim}}. This allows you to use other optimization methods to find maximum likelihood estimates. } \usage{ likeli_4_optim(par_2_analyze, model, par_names, var, source_data, pdf) } \arguments{ \item{par_2_analyze}{Vector of initial values for those parameters that are to be optimized. This should be a vector, NOT a list. This MUST be a one-dimensional vector - i.e. none of the vector members can be vectors themselves (in contrast to the rules for \code{anneal}). \code{optim} will pass this argument to \code{likeli_4_optim} automatically. See the example for more.} \item{model}{Model function for which to calculate likelihood.} \item{par_names}{Character vector with the name for each value in \code{par_2_analyze}.} \item{var}{List object with the source for all other non-parameter arguments and data used by the model, the PDF, and any sub-functions. This is the same as the argument that you pass to \code{\link{anneal}} or \code{\link{likeli}}.} \item{source_data}{Data frame containing any needed source data, including observed values.} \item{pdf}{Probability density function to use. If you want a log likelihood value, which is usual, the PDF must calculate the log of its result.} } \value{ A single numeric value for the likelihood. It is possible for this to be \code{NAN} or \code{Inf}. } \details{ This wraps the \code{\link{likeli}} function so that it can conform to the requirements of \code{\link{optim}}. Setting up to use this function is exactly like setting up to use \code{likeli}. Remember to set the \code{fnscale} option in the control list for \code{optim} to -1 so that \code{optim} performs a maximization rather than the default minimization (see example for details). } \examples{ ################# ## Set up for likeli ################# ## Use the included crown_rad dataset data(crown_rad) ## Create our model function - crown radius is a linear function of DBH. ## DBH is a column of data from the crown_rad dataset; a and b are single ## parameter values. model <- function (a, b, DBH) {a + b * DBH} ## We are planning to get maximum likelihood estimates for a and b. Create ## the list that says where all other functions and data are to be found. ## Indicate that DBH comes from the column marked "DBH" in the crown_rad dataset. var<-list(DBH = "DBH") ## We'll use the normal probability density function dnorm - add its ## arguments to our parameter list ## "x" value in PDF is observed value var$x <- "Radius" ## The mean is the predicted value, the outcome of the model statement. Use ## the reserved word "predicted" var$mean <- "predicted" var$sd <- 1.0 ## Have dnorm calculate log likelihood var$log <- TRUE ## Set up a vector with initial values for a and b par_2_analyze <- c(0.1, 0.1) ## Set up the vector with the names of a and b, so likeli_4_optim knows ## what the values in for_optim are par_names <- c("a", "b") ## Set your choice of optim controls - pass the other likeli_4_optim arguments ## by name so optim knows they are for likeli_4_optim ## Remember to set the fnscale option of optim to a negative value to perform ## a maximization rather than a minimization optim(par_2_analyze, likeli_4_optim, method = "Nelder-Mead", control = list(fnscale = -1), model = model, par_names = par_names, var = var, source_data = crown_rad, pdf = dnorm) }
/man/likeli_4_optim.Rd
no_license
LMurphy186232/likelihood
R
false
false
3,547
rd
\name{likeli_4_optim} \alias{likeli_4_optim} \title{Use Likelihood with Optim} \description{ Wraps the function \code{\link{likeli}} so you can use it with \code{\link{optim}}. This allows you to use other optimization methods to find maximum likelihood estimates. } \usage{ likeli_4_optim(par_2_analyze, model, par_names, var, source_data, pdf) } \arguments{ \item{par_2_analyze}{Vector of initial values for those parameters that are to be optimized. This should be a vector, NOT a list. This MUST be a one-dimensional vector - i.e. none of the vector members can be vectors themselves (in contrast to the rules for \code{anneal}). \code{optim} will pass this argument to \code{likeli_4_optim} automatically. See the example for more.} \item{model}{Model function for which to calculate likelihood.} \item{par_names}{Character vector with the name for each value in \code{par_2_analyze}.} \item{var}{List object with the source for all other non-parameter arguments and data used by the model, the PDF, and any sub-functions. This is the same as the argument that you pass to \code{\link{anneal}} or \code{\link{likeli}}.} \item{source_data}{Data frame containing any needed source data, including observed values.} \item{pdf}{Probability density function to use. If you want a log likelihood value, which is usual, the PDF must calculate the log of its result.} } \value{ A single numeric value for the likelihood. It is possible for this to be \code{NAN} or \code{Inf}. } \details{ This wraps the \code{\link{likeli}} function so that it can conform to the requirements of \code{\link{optim}}. Setting up to use this function is exactly like setting up to use \code{likeli}. Remember to set the \code{fnscale} option in the control list for \code{optim} to -1 so that \code{optim} performs a maximization rather than the default minimization (see example for details). } \examples{ ################# ## Set up for likeli ################# ## Use the included crown_rad dataset data(crown_rad) ## Create our model function - crown radius is a linear function of DBH. ## DBH is a column of data from the crown_rad dataset; a and b are single ## parameter values. model <- function (a, b, DBH) {a + b * DBH} ## We are planning to get maximum likelihood estimates for a and b. Create ## the list that says where all other functions and data are to be found. ## Indicate that DBH comes from the column marked "DBH" in the crown_rad dataset. var<-list(DBH = "DBH") ## We'll use the normal probability density function dnorm - add its ## arguments to our parameter list ## "x" value in PDF is observed value var$x <- "Radius" ## The mean is the predicted value, the outcome of the model statement. Use ## the reserved word "predicted" var$mean <- "predicted" var$sd <- 1.0 ## Have dnorm calculate log likelihood var$log <- TRUE ## Set up a vector with initial values for a and b par_2_analyze <- c(0.1, 0.1) ## Set up the vector with the names of a and b, so likeli_4_optim knows ## what the values in for_optim are par_names <- c("a", "b") ## Set your choice of optim controls - pass the other likeli_4_optim arguments ## by name so optim knows they are for likeli_4_optim ## Remember to set the fnscale option of optim to a negative value to perform ## a maximization rather than a minimization optim(par_2_analyze, likeli_4_optim, method = "Nelder-Mead", control = list(fnscale = -1), model = model, par_names = par_names, var = var, source_data = crown_rad, pdf = dnorm) }
#' Spark ML -- Naive-Bayes #' #' Naive Bayes Classifiers. It supports Multinomial NB (see \href{http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html}{here}) which can handle finitely supported discrete data. For example, by converting documents into TF-IDF vectors, it can be used for document classification. By making every vector a binary (0/1) data, it can also be used as Bernoulli NB (see \href{http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html}{here}). The input feature values must be nonnegative. #' #' @template roxlate-ml-algo #' @template roxlate-ml-probabilistic-classifier-params #' @template roxlate-ml-predictor-params #' @template roxlate-ml-formula-params #' @param model_type The model type. Supported options: \code{"multinomial"} #' and \code{"bernoulli"}. (default = \code{multinomial}) #' @param smoothing The (Laplace) smoothing parameter. Defaults to 1. #' @param weight_col (Spark 2.1.0+) Weight column name. If this is not set or empty, we treat all instance weights as 1.0. #' @export ml_naive_bayes <- function( x, formula = NULL, model_type = "multinomial", smoothing = 1, thresholds = NULL, weight_col = NULL, features_col = "features", label_col = "label", prediction_col = "prediction", probability_col = "probability", raw_prediction_col = "rawPrediction", uid = random_string("naive_bayes_"), ... ) { UseMethod("ml_naive_bayes") } #' @export ml_naive_bayes.spark_connection <- function( x, formula = NULL, model_type = "multinomial", smoothing = 1, thresholds = NULL, weight_col = NULL, features_col = "features", label_col = "label", prediction_col = "prediction", probability_col = "probability", raw_prediction_col = "rawPrediction", uid = random_string("naive_bayes_"), ...) { ml_ratify_args() jobj <- ml_new_classifier( x, "org.apache.spark.ml.classification.NaiveBayes", uid, features_col, label_col, prediction_col, probability_col, raw_prediction_col ) %>% invoke("setSmoothing", smoothing) %>% invoke("setModelType", model_type) if(!rlang::is_null(thresholds)) jobj <- invoke(jobj, "setThresholds", thresholds) if (!rlang::is_null(weight_col)) jobj <- jobj_set_param(jobj, "setWeightCol", weight_col, NULL, "2.1.0") new_ml_naive_bayes(jobj) } #' @export ml_naive_bayes.ml_pipeline <- function( x, formula = NULL, model_type = "multinomial", smoothing = 1, thresholds = NULL, weight_col = NULL, features_col = "features", label_col = "label", prediction_col = "prediction", probability_col = "probability", raw_prediction_col = "rawPrediction", uid = random_string("naive_bayes_"), ...) { transformer <- ml_new_stage_modified_args() ml_add_stage(x, transformer) } #' @export ml_naive_bayes.tbl_spark <- function( x, formula = NULL, model_type = "multinomial", smoothing = 1, thresholds = NULL, weight_col = NULL, features_col = "features", label_col = "label", prediction_col = "prediction", probability_col = "probability", raw_prediction_col = "rawPrediction", uid = random_string("naive_bayes_"), response = NULL, features = NULL, predicted_label_col = "predicted_label", ...) { predictor <- ml_new_stage_modified_args() ml_formula_transformation() if (is.null(formula)) { predictor %>% ml_fit(x) } else { ml_generate_ml_model( x, predictor, formula, features_col, label_col, "classification", new_ml_model_naive_bayes, predicted_label_col ) } } # Validator ml_validator_naive_bayes <- function(args, nms) { old_new_mapping <- list( lambda = "smoothing" ) args %>% ml_validate_args({ if (!rlang::is_null(thresholds)) thresholds <- lapply(thresholds, ensure_scalar_double) smoothing <- ensure_scalar_double(smoothing) if (!rlang::is_null(weight_col)) weight_col <- ensure_scalar_character(weight_col) model_type <- rlang::arg_match(model_type, c("multinomial", "bernoulli")) }, old_new_mapping) %>% ml_extract_args(nms, old_new_mapping) } # Constructors new_ml_naive_bayes <- function(jobj) { new_ml_predictor(jobj, subclass = "ml_naive_bayes") } new_ml_naive_bayes_model <- function(jobj) { new_ml_prediction_model( jobj, num_features = invoke(jobj, "numFeatures"), num_classes = invoke(jobj, "numClasses"), pi = read_spark_vector(jobj, "pi"), theta = read_spark_matrix(jobj, "theta"), features_col = invoke(jobj, "getFeaturesCol"), prediction_col = invoke(jobj, "getPredictionCol"), probability_col = invoke(jobj, "getProbabilityCol"), raw_prediction_col = invoke(jobj, "getRawPredictionCol"), thresholds = try_null(invoke(jobj, "getThresholds")), subclass = "ml_naive_bayes_model") } new_ml_model_naive_bayes <- function( pipeline, pipeline_model, model, dataset, formula, feature_names, index_labels, call) { pi <- model$pi names(pi) <- index_labels theta <- model$theta rownames(theta) <- index_labels colnames(theta) <- feature_names new_ml_model_classification( pipeline, pipeline_model, model, dataset, formula, subclass = "ml_model_naive_bayes", !!! list(pi = pi, theta = theta, .features = feature_names, .index_labels = index_labels, .call = call) ) } # Generic implementations #' @export print.ml_model_naive_bayes <- function(x, ...) { ml_model_print_call(x) print_newline() printf("A-priority probabilities:\n") print(exp(x$pi)) print_newline() printf("Conditional probabilities:\n") print(exp(x$theta)) print_newline() x } #' @export summary.ml_model_naive_bayes <- function(object, ...) { print(object, ...) object }
/R/ml_classification_naive_bayes.R
permissive
iffmainak/sparklyr
R
false
false
5,749
r
#' Spark ML -- Naive-Bayes #' #' Naive Bayes Classifiers. It supports Multinomial NB (see \href{http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html}{here}) which can handle finitely supported discrete data. For example, by converting documents into TF-IDF vectors, it can be used for document classification. By making every vector a binary (0/1) data, it can also be used as Bernoulli NB (see \href{http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html}{here}). The input feature values must be nonnegative. #' #' @template roxlate-ml-algo #' @template roxlate-ml-probabilistic-classifier-params #' @template roxlate-ml-predictor-params #' @template roxlate-ml-formula-params #' @param model_type The model type. Supported options: \code{"multinomial"} #' and \code{"bernoulli"}. (default = \code{multinomial}) #' @param smoothing The (Laplace) smoothing parameter. Defaults to 1. #' @param weight_col (Spark 2.1.0+) Weight column name. If this is not set or empty, we treat all instance weights as 1.0. #' @export ml_naive_bayes <- function( x, formula = NULL, model_type = "multinomial", smoothing = 1, thresholds = NULL, weight_col = NULL, features_col = "features", label_col = "label", prediction_col = "prediction", probability_col = "probability", raw_prediction_col = "rawPrediction", uid = random_string("naive_bayes_"), ... ) { UseMethod("ml_naive_bayes") } #' @export ml_naive_bayes.spark_connection <- function( x, formula = NULL, model_type = "multinomial", smoothing = 1, thresholds = NULL, weight_col = NULL, features_col = "features", label_col = "label", prediction_col = "prediction", probability_col = "probability", raw_prediction_col = "rawPrediction", uid = random_string("naive_bayes_"), ...) { ml_ratify_args() jobj <- ml_new_classifier( x, "org.apache.spark.ml.classification.NaiveBayes", uid, features_col, label_col, prediction_col, probability_col, raw_prediction_col ) %>% invoke("setSmoothing", smoothing) %>% invoke("setModelType", model_type) if(!rlang::is_null(thresholds)) jobj <- invoke(jobj, "setThresholds", thresholds) if (!rlang::is_null(weight_col)) jobj <- jobj_set_param(jobj, "setWeightCol", weight_col, NULL, "2.1.0") new_ml_naive_bayes(jobj) } #' @export ml_naive_bayes.ml_pipeline <- function( x, formula = NULL, model_type = "multinomial", smoothing = 1, thresholds = NULL, weight_col = NULL, features_col = "features", label_col = "label", prediction_col = "prediction", probability_col = "probability", raw_prediction_col = "rawPrediction", uid = random_string("naive_bayes_"), ...) { transformer <- ml_new_stage_modified_args() ml_add_stage(x, transformer) } #' @export ml_naive_bayes.tbl_spark <- function( x, formula = NULL, model_type = "multinomial", smoothing = 1, thresholds = NULL, weight_col = NULL, features_col = "features", label_col = "label", prediction_col = "prediction", probability_col = "probability", raw_prediction_col = "rawPrediction", uid = random_string("naive_bayes_"), response = NULL, features = NULL, predicted_label_col = "predicted_label", ...) { predictor <- ml_new_stage_modified_args() ml_formula_transformation() if (is.null(formula)) { predictor %>% ml_fit(x) } else { ml_generate_ml_model( x, predictor, formula, features_col, label_col, "classification", new_ml_model_naive_bayes, predicted_label_col ) } } # Validator ml_validator_naive_bayes <- function(args, nms) { old_new_mapping <- list( lambda = "smoothing" ) args %>% ml_validate_args({ if (!rlang::is_null(thresholds)) thresholds <- lapply(thresholds, ensure_scalar_double) smoothing <- ensure_scalar_double(smoothing) if (!rlang::is_null(weight_col)) weight_col <- ensure_scalar_character(weight_col) model_type <- rlang::arg_match(model_type, c("multinomial", "bernoulli")) }, old_new_mapping) %>% ml_extract_args(nms, old_new_mapping) } # Constructors new_ml_naive_bayes <- function(jobj) { new_ml_predictor(jobj, subclass = "ml_naive_bayes") } new_ml_naive_bayes_model <- function(jobj) { new_ml_prediction_model( jobj, num_features = invoke(jobj, "numFeatures"), num_classes = invoke(jobj, "numClasses"), pi = read_spark_vector(jobj, "pi"), theta = read_spark_matrix(jobj, "theta"), features_col = invoke(jobj, "getFeaturesCol"), prediction_col = invoke(jobj, "getPredictionCol"), probability_col = invoke(jobj, "getProbabilityCol"), raw_prediction_col = invoke(jobj, "getRawPredictionCol"), thresholds = try_null(invoke(jobj, "getThresholds")), subclass = "ml_naive_bayes_model") } new_ml_model_naive_bayes <- function( pipeline, pipeline_model, model, dataset, formula, feature_names, index_labels, call) { pi <- model$pi names(pi) <- index_labels theta <- model$theta rownames(theta) <- index_labels colnames(theta) <- feature_names new_ml_model_classification( pipeline, pipeline_model, model, dataset, formula, subclass = "ml_model_naive_bayes", !!! list(pi = pi, theta = theta, .features = feature_names, .index_labels = index_labels, .call = call) ) } # Generic implementations #' @export print.ml_model_naive_bayes <- function(x, ...) { ml_model_print_call(x) print_newline() printf("A-priority probabilities:\n") print(exp(x$pi)) print_newline() printf("Conditional probabilities:\n") print(exp(x$theta)) print_newline() x } #' @export summary.ml_model_naive_bayes <- function(object, ...) { print(object, ...) object }
#' @title #' Bias of an estimator in percentage #' @description #' Function for calculating the bias of an estimator. #' @details #' Function for calculating the bias of an estimator, given the observed values, and the estimated values. #' #' @param df a data frame. #' @param y Quoted name of the variable representing the observed values in the data frame. If a data frame is not provided, \code{y} can also be a numeric vector. #' @param yhat Quoted name of the variable representing the estimated values in the data frame. If a data frame is not provided, \code{yhat} can also be a numeric vector. #' @param na.rm a logical value indicating whether NA values should be stripped before the computation proceeds. default: \code{TRUE} #' @return Numeric vector with the bias value, in percentage. #' #' @keywords bias #' #' @seealso other statistics to evaluate estimators: #' \code{\link{rmse_per}} for the Root mean square error of an estimator #' @export #' @examples #' library(forestmangr) #' data("exfm11") #' head(exfm11) #' #' # Bias of an estimator, given the data frame and quoted variable names: #' bias_per(exfm11, "TH", "TH_EST3") #' #' # Bias of an estimator, given the vectors for observed and estimated values: #' bias_per(y = exfm11$TH, yhat = exfm11$TH_EST3) #' #' @author Sollano Rabelo Braga \email{sollanorb@@gmail.com} bias_per <- function(df, y, yhat, na.rm = TRUE){ # Checagem de variaveis #### # se df nao for fornecido, nulo, ou nao for dataframe, ou nao tiver tamanho e nrow maior que 1,tratar como vetores if( missing(df) ){ df <- data.frame("y"=y,"yhat"=yhat) y <- "y" yhat <- "yhat" }else if(!is.data.frame(df)){ stop("df must be a dataframe", call.=F) }else if(length(df)<=1 | nrow(df)<=1){ stop("Length and number of rows of 'df' must be greater than 1", call.=F) } # se y nao for fornecido nao for character, ou nao for um nome de variavel,ou nao for de tamanho 1, parar if( missing(y) ){ stop("y not set", call. = F) }else if( !is.character(y) ){ stop("'y' must be a character containing a variable name", call.=F) }else if(length(y)!=1){ stop("Length of 'y' must be 1", call.=F) }else if(forestmangr::check_names(df, y)==F){ stop(forestmangr::check_names(df, y, boolean=F), call.=F) } # se yhat nao for fornecido nao for character, ou nao for um nome de variavel,ou nao for de tamanho 1, parar if( missing(yhat) ){ stop("yhat not set", call. = F) }else if( !is.character(yhat) ){ stop("'yhat' must be a character containing a variable name", call.=F) }else if(length(yhat)!=1){ stop("Length of 'yhat' must be 1", call.=F) }else if(forestmangr::check_names(df, yhat)==F){ stop(forestmangr::check_names(df, yhat, boolean=F), call.=F) } # #### y_sym <- rlang::sym(y) yhat_sym <- rlang::sym(yhat) if(na.rm==TRUE) df <- df %>% dplyr::select(!!y_sym, !!yhat_sym) %>% stats::na.omit() y_vec <- df %>% dplyr::pull(!!y_sym) yhat_vec <- df %>% dplyr::pull(!!yhat_sym) sum(y_vec - yhat_vec)/sum(y_vec) * 100 }
/R/bias_per.R
permissive
cran/forestmangr
R
false
false
3,167
r
#' @title #' Bias of an estimator in percentage #' @description #' Function for calculating the bias of an estimator. #' @details #' Function for calculating the bias of an estimator, given the observed values, and the estimated values. #' #' @param df a data frame. #' @param y Quoted name of the variable representing the observed values in the data frame. If a data frame is not provided, \code{y} can also be a numeric vector. #' @param yhat Quoted name of the variable representing the estimated values in the data frame. If a data frame is not provided, \code{yhat} can also be a numeric vector. #' @param na.rm a logical value indicating whether NA values should be stripped before the computation proceeds. default: \code{TRUE} #' @return Numeric vector with the bias value, in percentage. #' #' @keywords bias #' #' @seealso other statistics to evaluate estimators: #' \code{\link{rmse_per}} for the Root mean square error of an estimator #' @export #' @examples #' library(forestmangr) #' data("exfm11") #' head(exfm11) #' #' # Bias of an estimator, given the data frame and quoted variable names: #' bias_per(exfm11, "TH", "TH_EST3") #' #' # Bias of an estimator, given the vectors for observed and estimated values: #' bias_per(y = exfm11$TH, yhat = exfm11$TH_EST3) #' #' @author Sollano Rabelo Braga \email{sollanorb@@gmail.com} bias_per <- function(df, y, yhat, na.rm = TRUE){ # Checagem de variaveis #### # se df nao for fornecido, nulo, ou nao for dataframe, ou nao tiver tamanho e nrow maior que 1,tratar como vetores if( missing(df) ){ df <- data.frame("y"=y,"yhat"=yhat) y <- "y" yhat <- "yhat" }else if(!is.data.frame(df)){ stop("df must be a dataframe", call.=F) }else if(length(df)<=1 | nrow(df)<=1){ stop("Length and number of rows of 'df' must be greater than 1", call.=F) } # se y nao for fornecido nao for character, ou nao for um nome de variavel,ou nao for de tamanho 1, parar if( missing(y) ){ stop("y not set", call. = F) }else if( !is.character(y) ){ stop("'y' must be a character containing a variable name", call.=F) }else if(length(y)!=1){ stop("Length of 'y' must be 1", call.=F) }else if(forestmangr::check_names(df, y)==F){ stop(forestmangr::check_names(df, y, boolean=F), call.=F) } # se yhat nao for fornecido nao for character, ou nao for um nome de variavel,ou nao for de tamanho 1, parar if( missing(yhat) ){ stop("yhat not set", call. = F) }else if( !is.character(yhat) ){ stop("'yhat' must be a character containing a variable name", call.=F) }else if(length(yhat)!=1){ stop("Length of 'yhat' must be 1", call.=F) }else if(forestmangr::check_names(df, yhat)==F){ stop(forestmangr::check_names(df, yhat, boolean=F), call.=F) } # #### y_sym <- rlang::sym(y) yhat_sym <- rlang::sym(yhat) if(na.rm==TRUE) df <- df %>% dplyr::select(!!y_sym, !!yhat_sym) %>% stats::na.omit() y_vec <- df %>% dplyr::pull(!!y_sym) yhat_vec <- df %>% dplyr::pull(!!yhat_sym) sum(y_vec - yhat_vec)/sum(y_vec) * 100 }
library(car) library(ggplot2) ###1a Load in babyskin.csv. This spreadsheet contains data on the orangeness of different babies' skin and how many cans of carrot baby food they eat each month. #Run a simple regression with orangeness as your outcome and carrots as your predictor. Discuss significance, amount of variance explained, and direction of relationship. babyskin <- read.table("babyskin.csv",sep="\t",header=T,comment.char="",quote="") head(babyskin) linearModel <- lm(Orangeness~Carrots, data=babyskin) summary(linearModel) #Coefficients: # Estimate Std. Error t value Pr(>|t|) #(Intercept) 103.2869 11.1231 9.286 <2e-16 *** # Carrots 7.8537 0.3213 24.445 <2e-16 *** # --- # Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 # #Residual standard error: 78.72 on 198 degrees of freedom #Multiple R-squared: 0.7511, Adjusted R-squared: 0.7499 #F-statistic: 597.6 on 1 and 198 DF, p-value: < 2.2e-16 #The models formula is Orangeness~Carrots. #Carrots as a prediction is significant. #The model is significant overall (F 1,198=597.6, p < 2.2e-16) and has a high variance explanation (mult.R-squared=0.7111,adj.R-squared=0.7499) #All regression coefficients, as well as their SE, t-scores and p-values are provided in the appendix. ###1b Now check assumptions. #does violate the linearity assumption ggplot(babyskin, aes(Carrots, Orangeness)) + geom_point() #no autocorrelation, durbinWatsonTest(linearModel) #dw-test return no significant autocorrelation #homoscedasticity, par(mfrow=c(2,2)) par(mar=c(1,1,1,1)) plot(linearModel) # There does not seem to be homoscedasticity as the data points do not fan out a bit #normally distributed residuals, #the residuals do seem normal distributed as the data does not deviate a lot from the line #and check that there are no highly influential points. #there is no highly influential point that has a Cook's distance of > 1 ###1c You should have noticed that the assumption of linearity was violated. #Apply a regression technique that's appropriate to use when your data is curved. #Is your new model significant? How has R^2 changed? model_2 <- lm(Orangeness~poly(Carrots,2), data=babyskin) summary(model_2) #Amount of explained variance (R^2) increased from 0.7511 to 0.8128 ###1d Plot a curved line that corresponds to your new regression on top of a scatterplot of the data points. predict(model_2, newdata=data.frame(Carrots=c(10,20,30))) babyskin$predicted_Orangeness <- predict(model_2) head(babyskin) ggplot(babyskin, aes(Carrots, Orangeness)) + geom_point() + geom_smooth(method="lm") + geom_line(aes(Carrots,predicted_Orangeness), colour="red") ###2a Load in healing.csv. This spreadsheet contains data on the time it takes for small cuts in the skin heal for people of different ages. #Run a simple linear regression, discuss significance, variance explained, and direction of relationship, #and check assumptions (HINT: your plot for checking the assumption of homoscedasticity is going to look weird, #but remember that the assumption is violated if there is a "fan shape" to the data points) healing <- read.table("healing.csv",sep="\t",header=T,comment.char="",quote="") head(healing) linearModel <- lm(healing_rate~age, data=healing) summary(linearModel) #Coefficients: # Estimate Std. Error t value Pr(>|t|) #(Intercept) 7.374313 0.220436 33.45 <2e-16 *** # age -0.052480 0.004717 -11.12 <2e-16 *** # --- # Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 # #Residual standard error: 0.9526 on 98 degrees of freedom #Multiple R-squared: 0.5581, Adjusted R-squared: 0.5536 #F-statistic: 123.8 on 1 and 98 DF, p-value: < 2.2e-16 #The models formula is healing_rate~age #age as a prediction is significant. #The model is significant overall (F 1,98=123.8, p < 2.2e-16) and has a high variance explanation (mult.R-squared=0.5581,adj.R-squared=0.5536) #All regression coefficients, as well as their SE, t-scores and p-values are provided in the appendix. #does violate the linearity assumption ggplot(healing, aes(age, healing_rate)) + geom_point() #no autocorrelation, durbinWatsonTest(linearModel) #dw-test return no significant autocorrelation #homoscedasticity, par(mfrow=c(2,2)) par(mar=c(1,1,1,1)) plot(linearModel) # There does not seem to be homoscedasticity as the data points do not fan out a bit #normally distributed residuals, #the residuals do seem normal distributed as the data does not deviate a lot from the line #and check that there are no highly influential points. #there is no highly influential point that has a Cook's distance of > 1 #2b As in 1, the assumption of linearity has been violated. #Again, you need to apply a regression technique that's appropriate to use when your data is curved. #(HINT: HOW MANY CURVES ARE THERE IN THIS DATA??) Is your new model significant? How has R^2 changed? model_2 <- lm(healing_rate~poly(age,3), data=healing) summary(model_2) #Coefficients: # Estimate Std. Error t value Pr(>|t|) #(Intercept) 5.16281 0.07245 71.259 < 2e-16 *** # poly(age, 3)1 -10.59784 0.72452 -14.627 < 2e-16 *** # poly(age, 3)2 1.28423 0.72452 1.773 0.0795 . # poly(age, 3)3 -6.07394 0.72452 -8.383 4.36e-13 *** # --- # Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 # #Residual standard error: 0.7245 on 96 degrees of freedom #Multiple R-squared: 0.7496, Adjusted R-squared: 0.7418 #F-statistic: 95.8 on 3 and 96 DF, p-value: < 2.2e-16 #The predicotors appears to be sigificant with the only not significant predictor having a low significant of 0.0797 #The model is significant overall (F 3,96 = 95.8, p < 2.2e-16) #Amount of explained variance (R^2) increased from 0.5581 to 0.7245 ###2c Plot a curved line that corresponds to your new regression on top of a scatterplot of the data points. predict(model_2, newdata=data.frame(age=c(10,20,30))) healing$predicted_healing_rate <- predict(model_2) head(healing) ggplot(healing, aes(age, healing_rate)) + geom_point() + geom_smooth(method="lm") + geom_line(aes(age,predicted_healing_rate), colour="red") ###3a Load in uscrime.csv. This spreadsheet contains data on U.S. crime statistics in 1960 for different U.S. states. #You are going to run a multiple regression (in 3b) to determine which variables predict crime. The variables in this spreadsheet include: #Crime: Crime rate (offenses per 100,000 people) #M: Percentage of males aged 14-24 in state population #Ed: mean years of schooling of the population aged 25 or older #Po1: per capita expenditure on police protection #U2: unemployment rate of males 35-39 #Ineq: income inequality #Prob: probability of imprisonment for crime #LF: labor force participation rate of urban males 14-24 #M.F: number of males per 100 females #NW: percentage nonwhites in population #Time: average time in months served vy offenders in state prisons UScrime <- read.table("uscrime.csv",sep="\t",header=T,comment.char="",quote="") head(UScrime) ###3b Run a multiple regression with Crime as your outcome variable and all other variables as predictors. #Is the model significant? How much variance is explained in the sample? How much variance would we expect to be explained in the general population? Which predictors are significant? linearModel <- lm(Crime~M+Ed+Po1+LF+M.F+NW+U2+Ineq+Prob+Time, data=UScrime) summary(linearModel) #Coefficients: # Estimate Std. Error tvalue Pr(>|t|) #(Intercept) -5549.136 1383.300 -4.012 0.000292 *** # M 95.100 40.549 2.345 0.024640 * # Ed 168.424 58.961 2.857 0.007071 ** # Po1 113.991 16.692 6.829 0.51e-08 *** # LF 234.612 1172.477 0.200 0.842529 #M.F 9.765 16.046 0.609 0.546650 #NW 1.789 5.583 0.321 0.750426 #U2 82.664 50.240 1.645 0.108596 #Ineq 63.036 16.374 3.850 0.000466 *** # Prob -4169.393 2143.516 -1.945 0.059603 . #Time -1.084 6.421 -0.169 0.866836 #--- # Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 # #Residual standard error: 209 on 36 degrees of freedom #Multiple R-squared: 0.7714, Adjusted R-squared: 0.7079 #F-statistic: 12.15 on 10 and 36 DF, p-value: 8.027e-09 #The models formula is Crime~M+Ed+Po1+LF+M.F+NW+U2+Ineq+Prob+Time. #Not all the main effects were significant. M, Ed, Pol and Ineq are significant. LF,M.F,NW,U2,Prob and Time are not. #The model is significant overall (F 10,36 = 12.15, p=8.027e-09) and has a high variance explanation (mult.R-squared=0.7714,adj.R-squared=0.7079) #All regression coefficients, as well as their SE, t-scores and p-values are provided in the appendix. ###3c Check assumptions of the multiple regression technique (don't worry about linearity here). #Is there a problem with multicollinearity in this model? vif(linearModel) #avg=2.915987 = (2.733841+4.580025+2.590702+2.363404+2.353677+3.469670+1.895208+4.492696+2.500527+2.180123)/10 cor(UScrime[,1:10]) #Yes there is a problem. Te avg is three times higher then 1 which is a sign for multicollinearity. #If we look at the cor of the predictors there also seems to be quite some correlation between some predictors. #no autocorrelation, durbinWatsonTest(linearModel) #dw-test return no significant autocorrelation #homoscedasticity, par(mfrow=c(2,2)) par(mar=c(1,1,1,1)) plot(linearModel) # There does not seem to be homoscedasticity as the data points have a nice cloud shape #normally distributed residuals, #the residuals do seem normal distributed as the tail and the head do not deviate a lot from the line #and check that there are no highly influential points. #there is no highly influential point that has a Cook's distance of > 1 ###3d Now perform model selection, removing predictors until you arrive at a final model. #At each step, state which predictor you have chosen to remove and why. When you get to the final model, explain why you aren't removing any more predictors. How do multiple R^2 and adjusted R^2 change each time you remove a predictor? Why do you think you see this pattern? linearModel <- lm(Crime~M+Ed+Po1+LF+M.F+NW+U2+Ineq+Prob+Time, data=UScrime) summary(linearModel) #At start : Multiple R-squared: 0.7714, Adjusted R-squared: 0.7079 drop1(linearModel, test="F") #Remove time as AIC decreases from 511.66 to 509.70 and Time has a significance of 0.866 linearModel <- lm(Crime~M+Ed+Po1+LF+M.F+NW+U2+Ineq+Prob, data=UScrime) summary(linearModel) #Multiple R-squared: 0.7712, Adjusted R-squared: 0.7156 drop1(linearModel, test="F") #Remove LF as AIC decreases from 509.70 to 507.82 and LF has a significance of 0.86 linearModel <- lm(Crime~M+Ed+Po1+M.F+NW+U2+Ineq+Prob, data=UScrime) summary(linearModel) #Multiple R-squared: 0.771, Adjusted R-squared: 0.7228 drop1(linearModel, test="F") #Remove NW as AIC decreases from 507.82 to 505.87 and LF has a significance of 0.745 linearModel <- lm(Crime~M+Ed+Po1+M.F+U2+Ineq+Prob, data=UScrime) summary(linearModel) #Multiple R-squared: 0.7704, Adjusted R-squared: 0.7291 drop1(linearModel, test="F") #Remove M.F as AIC decreases from 505.87 to 504.79 and LF has a significance of 0.387499 linearModel <- lm(Crime~M+Ed+Po1+U2+Ineq+Prob, data=UScrime) summary(linearModel) #Multiple R-squared: 0.7659, Adjusted R-squared: 0.7307 drop1(linearModel, test="F") #Removing more predictors would increase the AIC value and all the predictors are significant #We see that Multiple R-squared goes down as we remove predictors bit Adjusted R-squared goes up. We get a better prediction so Adjusted R-squared goed up and because we use less predictors it follows that Multiple R-squared goed down. ###3e Refer back to what each predictor variable name means as described in the instructions for exercise 3a. #Provide a description of the different societal factors that predict crime rate in U.S. states (what I mean is don't just list the variable names from the spreadsheet, #but actually say what they refer to and how they relate to crime rate). Describe the direction of the relationship between each of these factors and crime rate. #Crime: Crime rate (offenses per 100,000 people) #M: Percentage of males aged 14-24 in state population #The relation is 105.0 which is avg compared to the other predictors. It appears that the Percentage of males aged 14-24 in state population is a decent predictor of crime. #This would be the case if Males are more criminal then Females. #Ed: mean years of schooling of the population aged 25 or older #The relation is 196.47 which is high compared to the other predictors. This is a bit weird of a relation as you would expect that education in general reduces crime. #The reason could be that if people of 25 and older receive a lot of education it means they didnt had it when young, and this can mean that young people are doing crimes. #Po1: per capita expenditure on police protection #The relation is 115.02 which is avg compared to the other predictors. This relation is also a bit weird as more money to the police should reduce crime. #The reason is probably that when there is high crime somewhere there is more money for the police to fight it. Then it if also often the case that where there is alot of money for the police #it is probably for the high crimes that are there #U2: unemployment rate of males 35-39 #The relation is 89.37 which is little below avg compared to the other predictors. Unemployemt is a logical predictor of crime, people do not have anything to do and want money #Ineq: income inequality #The relation is 67.65 which is below avg compared to the other predictors. This means that when rich people earn a lot and people in a lower class earn less there is more crime. #This is also very reasonable. The poor people want money, and the rich have stuff they can steal, there might also even be a touch of sense of fairness. #Prob: probability of imprisonment for crime #The relation is -3801.84 which is a very high negative relation. This means that as the probability of imprisonment increases a little the crimes reduce much more. #This is expected, the higher the consequences the less easy people commit crimes ###3f The directions of several of the relationships you described in 3e make some sense. #However, a couple are very strange. First, it is strange that, as per capita expenditure on police protection increases, so does crime rate. #What does this relationship suggest about causality between predictors and outcome variables in observational studies like this one? #A predictor is not nessesaraly a cause of the thing you want to predict. In this case its plausible that it is the other way around, crime causes more per capita expenditure on police protection. #Because there is a relation per capita expenditure will predict crime to some extend ###3g Another strange relationship is the positive one between number of years of schooling in the population and crime rate. #Do you think there is a direct relationship between these two? #No, there is higly likely a third variable that explains this relation ###4a load in mtcars.csv (set row.names=1 in the read.table function). You've worked with this data before; it describes characteristics of different cars. #Perform a multiple regression, predicting mpg (miles per gallon, which is a measure of fuel efficiency) based on the following predictors: #cyl: number of cylinders in the engine (a cylinder is an engine component where power is created) #disp: size of engine #hp: horsepower (how powerful the engine is) #wt: the weight of the car. mtcars <- read.table("mtcars.csv",sep="\t",header=T,comment.char="",quote="",row.names = 1) head(mtcars) ###4b Is the model significant? How much variance in the sample is explained? #How much variance in the population would we expect to be explained? Which predictors are significant? linearModel <- lm(mpg~cyl+disp+hp+drat+wt+qsec+vs+am+gear+carb, data=mtcars) summary(linearModel) #Coefficients: # Estimate Std. Error t value Pr(>|t|) #(Intercept) 12.30337 18.71788 0.657 0.5181 #cyl -0.11144 1.04502 -0.107 0.9161 #disp 0.01334 0.01786 0.747 0.4635 #hp -0.02148 0.02177 -0.987 0.3350 #drat 0.78711 1.63537 0.481 0.6353 #wt -3.71530 1.89441 -1.961 0.0633 . #qsec 0.82104 0.73084 1.123 0.2739 #vs 0.31776 2.10451 0.151 0.8814 #am 2.52023 2.05665 1.225 0.2340 #gear 0.65541 1.49326 0.439 0.6652 #carb -0.19942 0.82875 -0.241 0.8122 #--- # Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 # #Residual standard error: 2.65 on 21 degrees of freedom #Multiple R-squared: 0.869, Adjusted R-squared: 0.8066 #F-statistic: 13.93 on 10 and 21 DF, p-value: 3.793e-07 #The models formula is mpg~cyl+disp+hp+drat+wt+qsec+vs+am+gear+carb. #None of the main effects were significant. #The model is significant overall (F 10,21 = 13.93, p=3.793e-07) and has a high variance explanation (mult.R-squared=0.869,adj.R-squared=0.8066) #All regression coefficients, as well as their SE, t-scores and p-values are provided in the appendix. ###4c Test the model assumptions. Are any of them violated? #Is there a problem with multicollinearity in this model? vif(linearModel) #avg=9.577414 = (15.373833+21.620241+9.832037+3.374620+15.164887+7.527958+4.965873+4.648487+5.357452+7.908747)/10 cor(mtcars[,2:10]) #Yes there is a problem. Te avg is 9.5 times higher then 1 which is a sign for multicollinearity. And for cyl,disp,hp,wt the value is very close or over 10 #If we look at the cor of the predictors there also seems to be quite some correlation between some predictors. #no autocorrelation, durbinWatsonTest(linearModel) #dw-test return no significant autocorrelation #homoscedasticity, par(mfrow=c(2,2)) par(mar=c(1,1,1,1)) plot(linearModel) # There does not seem to be homoscedasticity as the data points have a nice cloud shape #normally distributed residuals, #the residuals do seem normal distributed as the tail and the head do not deviate a lot from the line #and check that there are no highly influential points. #there is one highly influential point that has a Cook's distance of > 0.5 but not > 1 ###4d Run the drop1() function on your regression model. #Which predictor would be the best one to drop? Which would be the second best to drop? Why? (don't actually drop them!) drop1(linearModel, test="F") #The best predictor to drop is cyl with an decrease of AIC from 70.989 to 68.915 and has a significance of 0.91609 #The second best predictor to drop is vs with an decrease of AIC from 70.989 to 68.932 and has a significance of 0.88142 ###4e Now you're going to run 2 simple regressions: one predicting mpg as a function of disp, and a second predicting mpg as a function of hp. Are these models significant? linearModel <- lm(mpg~disp, data=mtcars) summary(linearModel) #Coefficients: # Estimate Std. Error t value Pr(>|t|) #(Intercept) 29.599855 1.229720 24.070 < 2e-16 *** # disp -0.041215 0.004712 -8.747 9.38e-10 *** # --- # Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 # #Residual standard error: 3.251 on 30 degrees of freedom #Multiple R-squared: 0.7183, Adjusted R-squared: 0.709 #F-statistic: 76.51 on 1 and 30 DF, p-value: 9.38e-10 #The models formula is mpg~disp #disp as a prediction is significant. #The model is significant overall (F 1,30=76.51 p = 9.38e-10) and has a high variance explanation (mult.R-squared=0.7183) #All regression coefficients, as well as their SE, t-scores and p-values are provided in the appendix. linearModel <- lm(mpg~hp, data=mtcars) summary(linearModel) #Coefficients: # Estimate Std. Error t value Pr(>|t|) #(Intercept) 30.09886 1.63392 18.421 < 2e-16 *** # hp -0.06823 0.01012 -6.742 1.79e-07 *** # --- # Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 # #Residual standard error: 3.863 on 30 degrees of freedom #Multiple R-squared: 0.6024, Adjusted R-squared: 0.5892 #F-statistic: 45.46 on 1 and 30 DF, p-value: 1.788e-07 #The models formula is mpg~hp #hp as a prediction is significant. #The model is significant overall (F 1,30=45.46 p = 1.788e-07) and has a high variance explanation (mult.R-squared=0.6024) #All regression coefficients, as well as their SE, t-scores and p-values are provided in the appendix. ###4f How do disp and hp behave differently in 4d versus 4e? Why do you think this is? (consider what assumption was violated in 4c. It may also help to think back to partial correlations.) #in 4e they were not significant predictors, in 4d they are. In 4c we discovered that there is a multicollinarity violation, there is a correlation between the variables themselves. #The model is better without the "double" predictors.
/Week8/Rouwmaat_Heuvel_U8.R
no_license
anhnguyendepocen/FrequentistStatistics
R
false
false
21,348
r
library(car) library(ggplot2) ###1a Load in babyskin.csv. This spreadsheet contains data on the orangeness of different babies' skin and how many cans of carrot baby food they eat each month. #Run a simple regression with orangeness as your outcome and carrots as your predictor. Discuss significance, amount of variance explained, and direction of relationship. babyskin <- read.table("babyskin.csv",sep="\t",header=T,comment.char="",quote="") head(babyskin) linearModel <- lm(Orangeness~Carrots, data=babyskin) summary(linearModel) #Coefficients: # Estimate Std. Error t value Pr(>|t|) #(Intercept) 103.2869 11.1231 9.286 <2e-16 *** # Carrots 7.8537 0.3213 24.445 <2e-16 *** # --- # Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 # #Residual standard error: 78.72 on 198 degrees of freedom #Multiple R-squared: 0.7511, Adjusted R-squared: 0.7499 #F-statistic: 597.6 on 1 and 198 DF, p-value: < 2.2e-16 #The models formula is Orangeness~Carrots. #Carrots as a prediction is significant. #The model is significant overall (F 1,198=597.6, p < 2.2e-16) and has a high variance explanation (mult.R-squared=0.7111,adj.R-squared=0.7499) #All regression coefficients, as well as their SE, t-scores and p-values are provided in the appendix. ###1b Now check assumptions. #does violate the linearity assumption ggplot(babyskin, aes(Carrots, Orangeness)) + geom_point() #no autocorrelation, durbinWatsonTest(linearModel) #dw-test return no significant autocorrelation #homoscedasticity, par(mfrow=c(2,2)) par(mar=c(1,1,1,1)) plot(linearModel) # There does not seem to be homoscedasticity as the data points do not fan out a bit #normally distributed residuals, #the residuals do seem normal distributed as the data does not deviate a lot from the line #and check that there are no highly influential points. #there is no highly influential point that has a Cook's distance of > 1 ###1c You should have noticed that the assumption of linearity was violated. #Apply a regression technique that's appropriate to use when your data is curved. #Is your new model significant? How has R^2 changed? model_2 <- lm(Orangeness~poly(Carrots,2), data=babyskin) summary(model_2) #Amount of explained variance (R^2) increased from 0.7511 to 0.8128 ###1d Plot a curved line that corresponds to your new regression on top of a scatterplot of the data points. predict(model_2, newdata=data.frame(Carrots=c(10,20,30))) babyskin$predicted_Orangeness <- predict(model_2) head(babyskin) ggplot(babyskin, aes(Carrots, Orangeness)) + geom_point() + geom_smooth(method="lm") + geom_line(aes(Carrots,predicted_Orangeness), colour="red") ###2a Load in healing.csv. This spreadsheet contains data on the time it takes for small cuts in the skin heal for people of different ages. #Run a simple linear regression, discuss significance, variance explained, and direction of relationship, #and check assumptions (HINT: your plot for checking the assumption of homoscedasticity is going to look weird, #but remember that the assumption is violated if there is a "fan shape" to the data points) healing <- read.table("healing.csv",sep="\t",header=T,comment.char="",quote="") head(healing) linearModel <- lm(healing_rate~age, data=healing) summary(linearModel) #Coefficients: # Estimate Std. Error t value Pr(>|t|) #(Intercept) 7.374313 0.220436 33.45 <2e-16 *** # age -0.052480 0.004717 -11.12 <2e-16 *** # --- # Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 # #Residual standard error: 0.9526 on 98 degrees of freedom #Multiple R-squared: 0.5581, Adjusted R-squared: 0.5536 #F-statistic: 123.8 on 1 and 98 DF, p-value: < 2.2e-16 #The models formula is healing_rate~age #age as a prediction is significant. #The model is significant overall (F 1,98=123.8, p < 2.2e-16) and has a high variance explanation (mult.R-squared=0.5581,adj.R-squared=0.5536) #All regression coefficients, as well as their SE, t-scores and p-values are provided in the appendix. #does violate the linearity assumption ggplot(healing, aes(age, healing_rate)) + geom_point() #no autocorrelation, durbinWatsonTest(linearModel) #dw-test return no significant autocorrelation #homoscedasticity, par(mfrow=c(2,2)) par(mar=c(1,1,1,1)) plot(linearModel) # There does not seem to be homoscedasticity as the data points do not fan out a bit #normally distributed residuals, #the residuals do seem normal distributed as the data does not deviate a lot from the line #and check that there are no highly influential points. #there is no highly influential point that has a Cook's distance of > 1 #2b As in 1, the assumption of linearity has been violated. #Again, you need to apply a regression technique that's appropriate to use when your data is curved. #(HINT: HOW MANY CURVES ARE THERE IN THIS DATA??) Is your new model significant? How has R^2 changed? model_2 <- lm(healing_rate~poly(age,3), data=healing) summary(model_2) #Coefficients: # Estimate Std. Error t value Pr(>|t|) #(Intercept) 5.16281 0.07245 71.259 < 2e-16 *** # poly(age, 3)1 -10.59784 0.72452 -14.627 < 2e-16 *** # poly(age, 3)2 1.28423 0.72452 1.773 0.0795 . # poly(age, 3)3 -6.07394 0.72452 -8.383 4.36e-13 *** # --- # Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 # #Residual standard error: 0.7245 on 96 degrees of freedom #Multiple R-squared: 0.7496, Adjusted R-squared: 0.7418 #F-statistic: 95.8 on 3 and 96 DF, p-value: < 2.2e-16 #The predicotors appears to be sigificant with the only not significant predictor having a low significant of 0.0797 #The model is significant overall (F 3,96 = 95.8, p < 2.2e-16) #Amount of explained variance (R^2) increased from 0.5581 to 0.7245 ###2c Plot a curved line that corresponds to your new regression on top of a scatterplot of the data points. predict(model_2, newdata=data.frame(age=c(10,20,30))) healing$predicted_healing_rate <- predict(model_2) head(healing) ggplot(healing, aes(age, healing_rate)) + geom_point() + geom_smooth(method="lm") + geom_line(aes(age,predicted_healing_rate), colour="red") ###3a Load in uscrime.csv. This spreadsheet contains data on U.S. crime statistics in 1960 for different U.S. states. #You are going to run a multiple regression (in 3b) to determine which variables predict crime. The variables in this spreadsheet include: #Crime: Crime rate (offenses per 100,000 people) #M: Percentage of males aged 14-24 in state population #Ed: mean years of schooling of the population aged 25 or older #Po1: per capita expenditure on police protection #U2: unemployment rate of males 35-39 #Ineq: income inequality #Prob: probability of imprisonment for crime #LF: labor force participation rate of urban males 14-24 #M.F: number of males per 100 females #NW: percentage nonwhites in population #Time: average time in months served vy offenders in state prisons UScrime <- read.table("uscrime.csv",sep="\t",header=T,comment.char="",quote="") head(UScrime) ###3b Run a multiple regression with Crime as your outcome variable and all other variables as predictors. #Is the model significant? How much variance is explained in the sample? How much variance would we expect to be explained in the general population? Which predictors are significant? linearModel <- lm(Crime~M+Ed+Po1+LF+M.F+NW+U2+Ineq+Prob+Time, data=UScrime) summary(linearModel) #Coefficients: # Estimate Std. Error tvalue Pr(>|t|) #(Intercept) -5549.136 1383.300 -4.012 0.000292 *** # M 95.100 40.549 2.345 0.024640 * # Ed 168.424 58.961 2.857 0.007071 ** # Po1 113.991 16.692 6.829 0.51e-08 *** # LF 234.612 1172.477 0.200 0.842529 #M.F 9.765 16.046 0.609 0.546650 #NW 1.789 5.583 0.321 0.750426 #U2 82.664 50.240 1.645 0.108596 #Ineq 63.036 16.374 3.850 0.000466 *** # Prob -4169.393 2143.516 -1.945 0.059603 . #Time -1.084 6.421 -0.169 0.866836 #--- # Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 # #Residual standard error: 209 on 36 degrees of freedom #Multiple R-squared: 0.7714, Adjusted R-squared: 0.7079 #F-statistic: 12.15 on 10 and 36 DF, p-value: 8.027e-09 #The models formula is Crime~M+Ed+Po1+LF+M.F+NW+U2+Ineq+Prob+Time. #Not all the main effects were significant. M, Ed, Pol and Ineq are significant. LF,M.F,NW,U2,Prob and Time are not. #The model is significant overall (F 10,36 = 12.15, p=8.027e-09) and has a high variance explanation (mult.R-squared=0.7714,adj.R-squared=0.7079) #All regression coefficients, as well as their SE, t-scores and p-values are provided in the appendix. ###3c Check assumptions of the multiple regression technique (don't worry about linearity here). #Is there a problem with multicollinearity in this model? vif(linearModel) #avg=2.915987 = (2.733841+4.580025+2.590702+2.363404+2.353677+3.469670+1.895208+4.492696+2.500527+2.180123)/10 cor(UScrime[,1:10]) #Yes there is a problem. Te avg is three times higher then 1 which is a sign for multicollinearity. #If we look at the cor of the predictors there also seems to be quite some correlation between some predictors. #no autocorrelation, durbinWatsonTest(linearModel) #dw-test return no significant autocorrelation #homoscedasticity, par(mfrow=c(2,2)) par(mar=c(1,1,1,1)) plot(linearModel) # There does not seem to be homoscedasticity as the data points have a nice cloud shape #normally distributed residuals, #the residuals do seem normal distributed as the tail and the head do not deviate a lot from the line #and check that there are no highly influential points. #there is no highly influential point that has a Cook's distance of > 1 ###3d Now perform model selection, removing predictors until you arrive at a final model. #At each step, state which predictor you have chosen to remove and why. When you get to the final model, explain why you aren't removing any more predictors. How do multiple R^2 and adjusted R^2 change each time you remove a predictor? Why do you think you see this pattern? linearModel <- lm(Crime~M+Ed+Po1+LF+M.F+NW+U2+Ineq+Prob+Time, data=UScrime) summary(linearModel) #At start : Multiple R-squared: 0.7714, Adjusted R-squared: 0.7079 drop1(linearModel, test="F") #Remove time as AIC decreases from 511.66 to 509.70 and Time has a significance of 0.866 linearModel <- lm(Crime~M+Ed+Po1+LF+M.F+NW+U2+Ineq+Prob, data=UScrime) summary(linearModel) #Multiple R-squared: 0.7712, Adjusted R-squared: 0.7156 drop1(linearModel, test="F") #Remove LF as AIC decreases from 509.70 to 507.82 and LF has a significance of 0.86 linearModel <- lm(Crime~M+Ed+Po1+M.F+NW+U2+Ineq+Prob, data=UScrime) summary(linearModel) #Multiple R-squared: 0.771, Adjusted R-squared: 0.7228 drop1(linearModel, test="F") #Remove NW as AIC decreases from 507.82 to 505.87 and LF has a significance of 0.745 linearModel <- lm(Crime~M+Ed+Po1+M.F+U2+Ineq+Prob, data=UScrime) summary(linearModel) #Multiple R-squared: 0.7704, Adjusted R-squared: 0.7291 drop1(linearModel, test="F") #Remove M.F as AIC decreases from 505.87 to 504.79 and LF has a significance of 0.387499 linearModel <- lm(Crime~M+Ed+Po1+U2+Ineq+Prob, data=UScrime) summary(linearModel) #Multiple R-squared: 0.7659, Adjusted R-squared: 0.7307 drop1(linearModel, test="F") #Removing more predictors would increase the AIC value and all the predictors are significant #We see that Multiple R-squared goes down as we remove predictors bit Adjusted R-squared goes up. We get a better prediction so Adjusted R-squared goed up and because we use less predictors it follows that Multiple R-squared goed down. ###3e Refer back to what each predictor variable name means as described in the instructions for exercise 3a. #Provide a description of the different societal factors that predict crime rate in U.S. states (what I mean is don't just list the variable names from the spreadsheet, #but actually say what they refer to and how they relate to crime rate). Describe the direction of the relationship between each of these factors and crime rate. #Crime: Crime rate (offenses per 100,000 people) #M: Percentage of males aged 14-24 in state population #The relation is 105.0 which is avg compared to the other predictors. It appears that the Percentage of males aged 14-24 in state population is a decent predictor of crime. #This would be the case if Males are more criminal then Females. #Ed: mean years of schooling of the population aged 25 or older #The relation is 196.47 which is high compared to the other predictors. This is a bit weird of a relation as you would expect that education in general reduces crime. #The reason could be that if people of 25 and older receive a lot of education it means they didnt had it when young, and this can mean that young people are doing crimes. #Po1: per capita expenditure on police protection #The relation is 115.02 which is avg compared to the other predictors. This relation is also a bit weird as more money to the police should reduce crime. #The reason is probably that when there is high crime somewhere there is more money for the police to fight it. Then it if also often the case that where there is alot of money for the police #it is probably for the high crimes that are there #U2: unemployment rate of males 35-39 #The relation is 89.37 which is little below avg compared to the other predictors. Unemployemt is a logical predictor of crime, people do not have anything to do and want money #Ineq: income inequality #The relation is 67.65 which is below avg compared to the other predictors. This means that when rich people earn a lot and people in a lower class earn less there is more crime. #This is also very reasonable. The poor people want money, and the rich have stuff they can steal, there might also even be a touch of sense of fairness. #Prob: probability of imprisonment for crime #The relation is -3801.84 which is a very high negative relation. This means that as the probability of imprisonment increases a little the crimes reduce much more. #This is expected, the higher the consequences the less easy people commit crimes ###3f The directions of several of the relationships you described in 3e make some sense. #However, a couple are very strange. First, it is strange that, as per capita expenditure on police protection increases, so does crime rate. #What does this relationship suggest about causality between predictors and outcome variables in observational studies like this one? #A predictor is not nessesaraly a cause of the thing you want to predict. In this case its plausible that it is the other way around, crime causes more per capita expenditure on police protection. #Because there is a relation per capita expenditure will predict crime to some extend ###3g Another strange relationship is the positive one between number of years of schooling in the population and crime rate. #Do you think there is a direct relationship between these two? #No, there is higly likely a third variable that explains this relation ###4a load in mtcars.csv (set row.names=1 in the read.table function). You've worked with this data before; it describes characteristics of different cars. #Perform a multiple regression, predicting mpg (miles per gallon, which is a measure of fuel efficiency) based on the following predictors: #cyl: number of cylinders in the engine (a cylinder is an engine component where power is created) #disp: size of engine #hp: horsepower (how powerful the engine is) #wt: the weight of the car. mtcars <- read.table("mtcars.csv",sep="\t",header=T,comment.char="",quote="",row.names = 1) head(mtcars) ###4b Is the model significant? How much variance in the sample is explained? #How much variance in the population would we expect to be explained? Which predictors are significant? linearModel <- lm(mpg~cyl+disp+hp+drat+wt+qsec+vs+am+gear+carb, data=mtcars) summary(linearModel) #Coefficients: # Estimate Std. Error t value Pr(>|t|) #(Intercept) 12.30337 18.71788 0.657 0.5181 #cyl -0.11144 1.04502 -0.107 0.9161 #disp 0.01334 0.01786 0.747 0.4635 #hp -0.02148 0.02177 -0.987 0.3350 #drat 0.78711 1.63537 0.481 0.6353 #wt -3.71530 1.89441 -1.961 0.0633 . #qsec 0.82104 0.73084 1.123 0.2739 #vs 0.31776 2.10451 0.151 0.8814 #am 2.52023 2.05665 1.225 0.2340 #gear 0.65541 1.49326 0.439 0.6652 #carb -0.19942 0.82875 -0.241 0.8122 #--- # Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 # #Residual standard error: 2.65 on 21 degrees of freedom #Multiple R-squared: 0.869, Adjusted R-squared: 0.8066 #F-statistic: 13.93 on 10 and 21 DF, p-value: 3.793e-07 #The models formula is mpg~cyl+disp+hp+drat+wt+qsec+vs+am+gear+carb. #None of the main effects were significant. #The model is significant overall (F 10,21 = 13.93, p=3.793e-07) and has a high variance explanation (mult.R-squared=0.869,adj.R-squared=0.8066) #All regression coefficients, as well as their SE, t-scores and p-values are provided in the appendix. ###4c Test the model assumptions. Are any of them violated? #Is there a problem with multicollinearity in this model? vif(linearModel) #avg=9.577414 = (15.373833+21.620241+9.832037+3.374620+15.164887+7.527958+4.965873+4.648487+5.357452+7.908747)/10 cor(mtcars[,2:10]) #Yes there is a problem. Te avg is 9.5 times higher then 1 which is a sign for multicollinearity. And for cyl,disp,hp,wt the value is very close or over 10 #If we look at the cor of the predictors there also seems to be quite some correlation between some predictors. #no autocorrelation, durbinWatsonTest(linearModel) #dw-test return no significant autocorrelation #homoscedasticity, par(mfrow=c(2,2)) par(mar=c(1,1,1,1)) plot(linearModel) # There does not seem to be homoscedasticity as the data points have a nice cloud shape #normally distributed residuals, #the residuals do seem normal distributed as the tail and the head do not deviate a lot from the line #and check that there are no highly influential points. #there is one highly influential point that has a Cook's distance of > 0.5 but not > 1 ###4d Run the drop1() function on your regression model. #Which predictor would be the best one to drop? Which would be the second best to drop? Why? (don't actually drop them!) drop1(linearModel, test="F") #The best predictor to drop is cyl with an decrease of AIC from 70.989 to 68.915 and has a significance of 0.91609 #The second best predictor to drop is vs with an decrease of AIC from 70.989 to 68.932 and has a significance of 0.88142 ###4e Now you're going to run 2 simple regressions: one predicting mpg as a function of disp, and a second predicting mpg as a function of hp. Are these models significant? linearModel <- lm(mpg~disp, data=mtcars) summary(linearModel) #Coefficients: # Estimate Std. Error t value Pr(>|t|) #(Intercept) 29.599855 1.229720 24.070 < 2e-16 *** # disp -0.041215 0.004712 -8.747 9.38e-10 *** # --- # Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 # #Residual standard error: 3.251 on 30 degrees of freedom #Multiple R-squared: 0.7183, Adjusted R-squared: 0.709 #F-statistic: 76.51 on 1 and 30 DF, p-value: 9.38e-10 #The models formula is mpg~disp #disp as a prediction is significant. #The model is significant overall (F 1,30=76.51 p = 9.38e-10) and has a high variance explanation (mult.R-squared=0.7183) #All regression coefficients, as well as their SE, t-scores and p-values are provided in the appendix. linearModel <- lm(mpg~hp, data=mtcars) summary(linearModel) #Coefficients: # Estimate Std. Error t value Pr(>|t|) #(Intercept) 30.09886 1.63392 18.421 < 2e-16 *** # hp -0.06823 0.01012 -6.742 1.79e-07 *** # --- # Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 # #Residual standard error: 3.863 on 30 degrees of freedom #Multiple R-squared: 0.6024, Adjusted R-squared: 0.5892 #F-statistic: 45.46 on 1 and 30 DF, p-value: 1.788e-07 #The models formula is mpg~hp #hp as a prediction is significant. #The model is significant overall (F 1,30=45.46 p = 1.788e-07) and has a high variance explanation (mult.R-squared=0.6024) #All regression coefficients, as well as their SE, t-scores and p-values are provided in the appendix. ###4f How do disp and hp behave differently in 4d versus 4e? Why do you think this is? (consider what assumption was violated in 4c. It may also help to think back to partial correlations.) #in 4e they were not significant predictors, in 4d they are. In 4c we discovered that there is a multicollinarity violation, there is a correlation between the variables themselves. #The model is better without the "double" predictors.
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hfr_dqa_checks.R \name{is_ou_valid} \alias{is_ou_valid} \title{Check if OperatingUnit is valid} \usage{ is_ou_valid(.data, df_orgs) } \arguments{ \item{df_orgs}{df_orgs} \item{data}{df} } \value{ boolean } \description{ Check if OperatingUnit is valid } \examples{ \dontrun{ data \%>\% is_ou_valid(df_orgs) } }
/man/is_ou_valid.Rd
permissive
USAID-OHA-SI/Wavelength
R
false
true
393
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hfr_dqa_checks.R \name{is_ou_valid} \alias{is_ou_valid} \title{Check if OperatingUnit is valid} \usage{ is_ou_valid(.data, df_orgs) } \arguments{ \item{df_orgs}{df_orgs} \item{data}{df} } \value{ boolean } \description{ Check if OperatingUnit is valid } \examples{ \dontrun{ data \%>\% is_ou_valid(df_orgs) } }
library(dplyr) library(gbm) mmsd = read.csv("data/MMSDvectorized.csv") glri = read.csv("data/GLRIvectorized.csv") ss = read.csv("data/SSvectorized.csv") pred = as.data.frame(cbind(mmsd$Human_virus, mmsd[,24:ncol(mmsd)])) names(pred)[1] = "Human_virus" gbm(Human_virus~., data = pred, distribution = "gaussian", n.trees = 2000, interaction.depth = 5, n.minobsinnode = 3, shrinkage = 0.001, bag.fraction = 0.7 )
/scratch/scratch.r
no_license
wes-brooks/opticalww
R
false
false
447
r
library(dplyr) library(gbm) mmsd = read.csv("data/MMSDvectorized.csv") glri = read.csv("data/GLRIvectorized.csv") ss = read.csv("data/SSvectorized.csv") pred = as.data.frame(cbind(mmsd$Human_virus, mmsd[,24:ncol(mmsd)])) names(pred)[1] = "Human_virus" gbm(Human_virus~., data = pred, distribution = "gaussian", n.trees = 2000, interaction.depth = 5, n.minobsinnode = 3, shrinkage = 0.001, bag.fraction = 0.7 )
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fslrand.help.R \name{fslrand.help} \alias{fslrand.help} \title{fslrand Help} \usage{ fslrand.help(...) } \arguments{ \item{...}{passed to \code{\link{fslmaths.help}}} } \value{ Prints help output and returns output as character vector } \description{ This function calls \code{fslmaths}'s help, as \code{fslrand} is a wrapper for \code{fslmaths} } \examples{ if (have.fsl()){ fslrand.help() } }
/man/fslrand.help.Rd
no_license
muschellij2/fslr
R
false
true
476
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fslrand.help.R \name{fslrand.help} \alias{fslrand.help} \title{fslrand Help} \usage{ fslrand.help(...) } \arguments{ \item{...}{passed to \code{\link{fslmaths.help}}} } \value{ Prints help output and returns output as character vector } \description{ This function calls \code{fslmaths}'s help, as \code{fslrand} is a wrapper for \code{fslmaths} } \examples{ if (have.fsl()){ fslrand.help() } }
getValues <- function(times){ u <- runif(times) return((- 1 + sqrt(1 + 8 * u)) / 2) } getValuesAlternateWithComposition <- function(times, alpha = 0.5){ ans <- NULL for(i in 1 : times){ u = runif(1) if(u < alpha){ ans <- c(ans, sqrt(runif(1))) } else { ans <- c(ans, runif(1)) } } return(ans) } pdf("InversaContinua2.pdf") xx <- getValues(1000) yy <- getValuesAlternateWithComposition(1000) hist(xx, main = "Metodo de Transformada Inversa a lo bruto") hist(yy, main = "Metodo de Transformada Inversa con Composicion")
/Clase 3/InversaContinua2.R
no_license
racsosabe/Simulacion
R
false
false
543
r
getValues <- function(times){ u <- runif(times) return((- 1 + sqrt(1 + 8 * u)) / 2) } getValuesAlternateWithComposition <- function(times, alpha = 0.5){ ans <- NULL for(i in 1 : times){ u = runif(1) if(u < alpha){ ans <- c(ans, sqrt(runif(1))) } else { ans <- c(ans, runif(1)) } } return(ans) } pdf("InversaContinua2.pdf") xx <- getValues(1000) yy <- getValuesAlternateWithComposition(1000) hist(xx, main = "Metodo de Transformada Inversa a lo bruto") hist(yy, main = "Metodo de Transformada Inversa con Composicion")
\name{masses} \alias{masses} \alias{masses.default} \alias{masses.pdb} \title{Mass of Chemical Elements} \description{Determine the mass of chemical elements} \usage{ masses(...) \method{masses}{default}(x, ...) \method{masses}{pdb}(x, ...) } \arguments{ \item{\dots}{further arguments passed to or from other methods.} \item{x}{either a character or an integer vector containing element symbols or atomic numbers, or an object of class \sQuote{pdb} from which element symbols are determined (see details).} } \details{ \code{masses} is a generic function to determine the mass of chemical elements. For objects of class \sQuote{pdb}: \itemize{ \item First the element names are converted into element symbols using the \code{toSymbols} function. \item Then their masses are taken from the \code{elements} data set. } \code{NA} values are returned for unrecognized elements. } \value{Return a numeric vector containing the mass of chemical elements.} \seealso{ \code{\link{toSymbols}} } \examples{ x <- read.pdb(system.file("examples/PCBM_ODCB.pdb",package="Rpdb")) masses(x) masses(c("C","Cl",NA,"AA","N")) } \keyword{ manip }
/man/masses.Rd
no_license
lptolik/Rpdb
R
false
false
1,137
rd
\name{masses} \alias{masses} \alias{masses.default} \alias{masses.pdb} \title{Mass of Chemical Elements} \description{Determine the mass of chemical elements} \usage{ masses(...) \method{masses}{default}(x, ...) \method{masses}{pdb}(x, ...) } \arguments{ \item{\dots}{further arguments passed to or from other methods.} \item{x}{either a character or an integer vector containing element symbols or atomic numbers, or an object of class \sQuote{pdb} from which element symbols are determined (see details).} } \details{ \code{masses} is a generic function to determine the mass of chemical elements. For objects of class \sQuote{pdb}: \itemize{ \item First the element names are converted into element symbols using the \code{toSymbols} function. \item Then their masses are taken from the \code{elements} data set. } \code{NA} values are returned for unrecognized elements. } \value{Return a numeric vector containing the mass of chemical elements.} \seealso{ \code{\link{toSymbols}} } \examples{ x <- read.pdb(system.file("examples/PCBM_ODCB.pdb",package="Rpdb")) masses(x) masses(c("C","Cl",NA,"AA","N")) } \keyword{ manip }
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/formality.R \name{print.formality} \alias{print.formality} \title{Prints a formality Object} \usage{ \method{print}{formality}(x, digits, ...) } \arguments{ \item{x}{The formality object.} \item{digits}{The number of digits to print.} \item{\ldots}{ignored} } \description{ Prints a formality object. }
/man/print.formality.Rd
no_license
Maddocent/qdap
R
false
false
393
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/formality.R \name{print.formality} \alias{print.formality} \title{Prints a formality Object} \usage{ \method{print}{formality}(x, digits, ...) } \arguments{ \item{x}{The formality object.} \item{digits}{The number of digits to print.} \item{\ldots}{ignored} } \description{ Prints a formality object. }
#### Analytical script Run 1 #### #### Assumptions: #### 1. baseline model #### 2. Variable wood NC #### 3. baseline N cycle #### ################################################################################ #### Functions Perform_Analytical_Run1 <- function(f.flag = 1) { #### Function to perform analytical run 1 simulations #### eDF: stores equilibrium points #### cDF: stores constraint points (curves) #### f.flag: = 1 simply plot analytical solution and create individual pdf file #### f.flag: = 2 return a list consisting of two dataframes ######### Main program source("Parameters/Analytical_Run1_Parameters.R") ### create a range of nc for shoot to initiate nfseq <- round(seq(0.001, 0.1, by = 0.001),5) ### create nc ratio for wood, root, and allocation coefficients a_nf <- as.data.frame(alloc(nfseq)) ### calculate photosynthetic constraint at CO2 = 350 P350 <- photo_constraint_full(nf=nfseq, nfdf=a_nf, CO2=CO2_1) ### calculate very long term NC constraint on NPP, respectively VL <- VL_constraint(nf=nfseq, nfdf=a_nf) ### finding the equilibrium point between photosynthesis and very long term nutrient constraints VL_eq <- solve_VL_full(CO2=CO2_1) ### calculate nw and nr for VL equilibrated nf value a_eq <- alloc(VL_eq$nf) ### calculate soil parameters, e.g. reburial coef. s_coef <- soil_coef(df=VL_eq$nf, a=a_eq) omega_ap <- a_eq$af*s_coef$omega_af_pass + a_eq$ar*s_coef$omega_ar_pass omega_as <- a_eq$af*s_coef$omega_af_slow + a_eq$ar*s_coef$omega_ar_slow ### Get C from very-long term nutrient cycling solution ### return in g C m-2 C_pass_VL <- omega_ap*VL_eq$NPP/s_coef$decomp_pass/(1-s_coef$qq_pass)*1000.0 ### Calculate long term nutrient constraint L <- L_constraint(df=nfseq, a=a_nf, C_pass=C_pass_VL, Nin_L = Nin) ### Find long term equilibrium point L_eq <- solve_L_full(CO2=CO2_1, C_pass=C_pass_VL, Nin_L = Nin) ### Get Cslow from long nutrient cycling solution ### return in g C m-2 C_slow_L <- omega_as*L_eq$NPP/s_coef$decomp_slow/(1-s_coef$qq_slow)*1000.0 ### Calculate nutrient release from slow woody pool ### return in g N m-2 yr-1 N_wood_L <- a_eq$aw*a_eq$nw*VL_eq$NPP*1000.0 ### Calculate medium term nutrient constraint M <- M_constraint(df=nfseq,a=a_nf, C_pass=C_pass_VL, C_slow=C_slow_L, Nin_L = Nin+N_wood_L) ### calculate M equilibrium point M_eq <- solve_M_full(CO2=CO2_1, C_pass=C_pass_VL, C_slow=C_slow_L, Nin_L = Nin+N_wood_L) out350DF <- data.frame(CO2_1, nfseq, P350, VL$NPP, L$NPP, M$NPP) colnames(out350DF) <- c("CO2", "nc", "NPP_photo", "NPP_VL", "NPP_L", "NPP_M") equil350DF <- data.frame(CO2_1, VL_eq, L_eq, M_eq) colnames(equil350DF) <- c("CO2", "nc_VL", "NPP_VL", "nc_L", "NPP_L", "nc_M", "NPP_M") ##### CO2 = 700 ### photo constraint P700 <- photo_constraint_full(nf=nfseq, nfdf=a_nf, CO2=CO2_2) ### VL equilibrated point with eCO2 VL_eq <- solve_VL_full(CO2=CO2_2) ### Find long term equilibrium point L_eq <- solve_L_full(CO2=CO2_2, C_pass=C_pass_VL, Nin_L = Nin) ### Find medium term equilibrium point M_eq <- solve_M_full(CO2=CO2_2, C_pass=C_pass_VL, C_slow=C_slow_L, Nin_L = Nin+N_wood_L) out700DF <- data.frame(CO2_2, nfseq, P700, VL$NPP, L$NPP, M$NPP) colnames(out700DF) <- c("CO2", "nc", "NPP_photo", "NPP_VL", "NPP_L", "NPP_M") equil700DF <- data.frame(CO2_2, VL_eq, L_eq, M_eq) colnames(equil700DF) <- c("CO2", "nc_VL", "NPP_VL", "nc_L", "NPP_L", "nc_M", "NPP_M") ### get the point instantaneous NPP response to doubling of CO2 df700 <- as.data.frame(cbind(round(nfseq,3), P700)) inst700 <- inst_NPP(equil350DF$nc_VL, df700) equil350DF$NPP_I <- inst700$equilNPP equil700DF$NPP_I <- inst700$equilNPP if (f.flag == 1) { ### plot 2-d plots of nf vs. npp and nf vs. pf tiff("Plots/Analytical_Run1_2d.tiff", width = 5, height = 5, units = "in", res = 300) par(mar=c(5.1,6.1,2.1,2.1)) ### shoot nc vs. NPP plot(out350DF$nc, out350DF$NPP_photo, xlim=c(0.001, 0.03), ylim=c(1.0, 2.0), type = "l", xlab = "Leaf N:C ratio", ylab = expression(paste("NPP [kg C ", m^-2, " ", yr^-1, "]")), col="cyan", lwd = 3, cex.lab=1.5) abline(h = seq(0.5, 3.0, 0.5), v = seq(0.01, 0.05, 0.01), col="lightgray", lty = 3) points(out350DF$nc, out350DF$NPP_VL, type="l", col="tomato", lwd = 3) points(equil350DF$nc_VL, equil350DF$NPP_VL, type="p", pch = 19, col = "blue", cex = 2) points(out350DF$nc, out350DF$NPP_L, type='l',col="violet", lwd = 3) points(out350DF$nc, out350DF$NPP_M, type="l", col="darkred", lwd = 3) points(out700DF$nc, out700DF$NPP_photo, col="green", type="l", lwd = 3) points(equil350DF$nc_VL, inst700$equilNPP, type="p", col = "darkgreen", pch=19, cex = 2) points(equil700DF$nc_VL, equil700DF$NPP_VL, type="p", col="orange", pch = 19, cex = 2) points(equil700DF$nc_L, equil700DF$NPP_L,type="p", col="red", pch = 19, cex = 2) points(equil700DF$nc_M, equil700DF$NPP_M, type="p", col="purple", pch = 19, cex = 2) legend("bottomright", c("P350", "P700", "VL", "L", "M", "A", "B", "C", "D", "E"), col=c("cyan","green", "tomato", "violet","darkred","blue", "darkgreen","purple","red", "orange"), lwd=c(2,2,2,2,2,NA,NA,NA,NA,NA), pch=c(NA,NA,NA,NA,NA,19,19,19,19,19), cex = 1.0, bg = adjustcolor("grey", 0.8), ncol=2) dev.off() } else if (f.flag == 2) { my.list <- list(cDF = data.frame(rbind(out350DF, out700DF)), eDF = data.frame(rbind(equil350DF, equil700DF))) return(my.list) } }
/QE_Scripts/Analytical_Run1.R
no_license
SoilTSSM/QEframework
R
false
false
6,514
r
#### Analytical script Run 1 #### #### Assumptions: #### 1. baseline model #### 2. Variable wood NC #### 3. baseline N cycle #### ################################################################################ #### Functions Perform_Analytical_Run1 <- function(f.flag = 1) { #### Function to perform analytical run 1 simulations #### eDF: stores equilibrium points #### cDF: stores constraint points (curves) #### f.flag: = 1 simply plot analytical solution and create individual pdf file #### f.flag: = 2 return a list consisting of two dataframes ######### Main program source("Parameters/Analytical_Run1_Parameters.R") ### create a range of nc for shoot to initiate nfseq <- round(seq(0.001, 0.1, by = 0.001),5) ### create nc ratio for wood, root, and allocation coefficients a_nf <- as.data.frame(alloc(nfseq)) ### calculate photosynthetic constraint at CO2 = 350 P350 <- photo_constraint_full(nf=nfseq, nfdf=a_nf, CO2=CO2_1) ### calculate very long term NC constraint on NPP, respectively VL <- VL_constraint(nf=nfseq, nfdf=a_nf) ### finding the equilibrium point between photosynthesis and very long term nutrient constraints VL_eq <- solve_VL_full(CO2=CO2_1) ### calculate nw and nr for VL equilibrated nf value a_eq <- alloc(VL_eq$nf) ### calculate soil parameters, e.g. reburial coef. s_coef <- soil_coef(df=VL_eq$nf, a=a_eq) omega_ap <- a_eq$af*s_coef$omega_af_pass + a_eq$ar*s_coef$omega_ar_pass omega_as <- a_eq$af*s_coef$omega_af_slow + a_eq$ar*s_coef$omega_ar_slow ### Get C from very-long term nutrient cycling solution ### return in g C m-2 C_pass_VL <- omega_ap*VL_eq$NPP/s_coef$decomp_pass/(1-s_coef$qq_pass)*1000.0 ### Calculate long term nutrient constraint L <- L_constraint(df=nfseq, a=a_nf, C_pass=C_pass_VL, Nin_L = Nin) ### Find long term equilibrium point L_eq <- solve_L_full(CO2=CO2_1, C_pass=C_pass_VL, Nin_L = Nin) ### Get Cslow from long nutrient cycling solution ### return in g C m-2 C_slow_L <- omega_as*L_eq$NPP/s_coef$decomp_slow/(1-s_coef$qq_slow)*1000.0 ### Calculate nutrient release from slow woody pool ### return in g N m-2 yr-1 N_wood_L <- a_eq$aw*a_eq$nw*VL_eq$NPP*1000.0 ### Calculate medium term nutrient constraint M <- M_constraint(df=nfseq,a=a_nf, C_pass=C_pass_VL, C_slow=C_slow_L, Nin_L = Nin+N_wood_L) ### calculate M equilibrium point M_eq <- solve_M_full(CO2=CO2_1, C_pass=C_pass_VL, C_slow=C_slow_L, Nin_L = Nin+N_wood_L) out350DF <- data.frame(CO2_1, nfseq, P350, VL$NPP, L$NPP, M$NPP) colnames(out350DF) <- c("CO2", "nc", "NPP_photo", "NPP_VL", "NPP_L", "NPP_M") equil350DF <- data.frame(CO2_1, VL_eq, L_eq, M_eq) colnames(equil350DF) <- c("CO2", "nc_VL", "NPP_VL", "nc_L", "NPP_L", "nc_M", "NPP_M") ##### CO2 = 700 ### photo constraint P700 <- photo_constraint_full(nf=nfseq, nfdf=a_nf, CO2=CO2_2) ### VL equilibrated point with eCO2 VL_eq <- solve_VL_full(CO2=CO2_2) ### Find long term equilibrium point L_eq <- solve_L_full(CO2=CO2_2, C_pass=C_pass_VL, Nin_L = Nin) ### Find medium term equilibrium point M_eq <- solve_M_full(CO2=CO2_2, C_pass=C_pass_VL, C_slow=C_slow_L, Nin_L = Nin+N_wood_L) out700DF <- data.frame(CO2_2, nfseq, P700, VL$NPP, L$NPP, M$NPP) colnames(out700DF) <- c("CO2", "nc", "NPP_photo", "NPP_VL", "NPP_L", "NPP_M") equil700DF <- data.frame(CO2_2, VL_eq, L_eq, M_eq) colnames(equil700DF) <- c("CO2", "nc_VL", "NPP_VL", "nc_L", "NPP_L", "nc_M", "NPP_M") ### get the point instantaneous NPP response to doubling of CO2 df700 <- as.data.frame(cbind(round(nfseq,3), P700)) inst700 <- inst_NPP(equil350DF$nc_VL, df700) equil350DF$NPP_I <- inst700$equilNPP equil700DF$NPP_I <- inst700$equilNPP if (f.flag == 1) { ### plot 2-d plots of nf vs. npp and nf vs. pf tiff("Plots/Analytical_Run1_2d.tiff", width = 5, height = 5, units = "in", res = 300) par(mar=c(5.1,6.1,2.1,2.1)) ### shoot nc vs. NPP plot(out350DF$nc, out350DF$NPP_photo, xlim=c(0.001, 0.03), ylim=c(1.0, 2.0), type = "l", xlab = "Leaf N:C ratio", ylab = expression(paste("NPP [kg C ", m^-2, " ", yr^-1, "]")), col="cyan", lwd = 3, cex.lab=1.5) abline(h = seq(0.5, 3.0, 0.5), v = seq(0.01, 0.05, 0.01), col="lightgray", lty = 3) points(out350DF$nc, out350DF$NPP_VL, type="l", col="tomato", lwd = 3) points(equil350DF$nc_VL, equil350DF$NPP_VL, type="p", pch = 19, col = "blue", cex = 2) points(out350DF$nc, out350DF$NPP_L, type='l',col="violet", lwd = 3) points(out350DF$nc, out350DF$NPP_M, type="l", col="darkred", lwd = 3) points(out700DF$nc, out700DF$NPP_photo, col="green", type="l", lwd = 3) points(equil350DF$nc_VL, inst700$equilNPP, type="p", col = "darkgreen", pch=19, cex = 2) points(equil700DF$nc_VL, equil700DF$NPP_VL, type="p", col="orange", pch = 19, cex = 2) points(equil700DF$nc_L, equil700DF$NPP_L,type="p", col="red", pch = 19, cex = 2) points(equil700DF$nc_M, equil700DF$NPP_M, type="p", col="purple", pch = 19, cex = 2) legend("bottomright", c("P350", "P700", "VL", "L", "M", "A", "B", "C", "D", "E"), col=c("cyan","green", "tomato", "violet","darkred","blue", "darkgreen","purple","red", "orange"), lwd=c(2,2,2,2,2,NA,NA,NA,NA,NA), pch=c(NA,NA,NA,NA,NA,19,19,19,19,19), cex = 1.0, bg = adjustcolor("grey", 0.8), ncol=2) dev.off() } else if (f.flag == 2) { my.list <- list(cDF = data.frame(rbind(out350DF, out700DF)), eDF = data.frame(rbind(equil350DF, equil700DF))) return(my.list) } }
\name{Signaldata} \alias{Signaldata} \docType{data} \title{ Decay curves datasets } \description{ CW-OSL and LM-OSL decay curves. } \usage{data(Signaldata)} \format{ A list that contains CW-OSL and LM-OSL decay curves: \describe{ \item{cw}{a number of CW-OSL decay curves of a sand sample from the Tengger Desert in northern china (Peng and Han, 2013)} \item{lm}{a LM-OSL decay curve from Li and Li (2006)} } } \references{ Li SH, Li B, 2006. Dose measurement using the fast component of LM-OSL signals from quartz. Radiation Measurements, 41(5): 534-541. Peng J, Han FQ, 2013. Selections of fast-component OSL signal using sediments from the south edge of Tengger Desert. Acta Geoscientica Sinica, 34(6): 757-762. } \seealso{ \link{decomp}; \link{fastED} } \examples{ # Not run. # data(Signaldata) # names(Signaldata) } \keyword{OSL dating} \keyword{decay curve}
/man/Signaldata.Rd
no_license
cran/numOSL
R
false
false
881
rd
\name{Signaldata} \alias{Signaldata} \docType{data} \title{ Decay curves datasets } \description{ CW-OSL and LM-OSL decay curves. } \usage{data(Signaldata)} \format{ A list that contains CW-OSL and LM-OSL decay curves: \describe{ \item{cw}{a number of CW-OSL decay curves of a sand sample from the Tengger Desert in northern china (Peng and Han, 2013)} \item{lm}{a LM-OSL decay curve from Li and Li (2006)} } } \references{ Li SH, Li B, 2006. Dose measurement using the fast component of LM-OSL signals from quartz. Radiation Measurements, 41(5): 534-541. Peng J, Han FQ, 2013. Selections of fast-component OSL signal using sediments from the south edge of Tengger Desert. Acta Geoscientica Sinica, 34(6): 757-762. } \seealso{ \link{decomp}; \link{fastED} } \examples{ # Not run. # data(Signaldata) # names(Signaldata) } \keyword{OSL dating} \keyword{decay curve}
library(tidyverse) library(cowplot) library(colorspace) # Plot 1000 Genomes PCAs labels_df <- read_tsv('data/kgp_meta/integrated_call_samples_v2.20130502.ALL.ped', col_types = cols_only('Individual ID' = col_character(), 'Population' = col_character())) super_pop_df <- read_tsv('data/kgp_meta/20131219.populations.tsv') labels_df <- labels_df %>% inner_join(super_pop_df, by = c('Population' = 'Population Code')) kgp_pc_df <- read_tsv( 'data/kgp_merged/projection.sscore', col_types = cols_only(IID = col_character(), PC1_AVG = col_double(), PC2_AVG = col_double()) ) kgp_pc_plot <- kgp_pc_df %>% inner_join(labels_df, by = c('IID' = 'Individual ID')) %>% ggplot(aes(x = PC1_AVG, y = PC2_AVG, color = !!as.name('Super Population'))) + geom_point(alpha = 0.5) + xlab('PC1') + ylab('PC2') ggsave(filename = 'img/1000_genomes_pca.png', plot = kgp_pc_plot, dpi = 300, width = 6, height = 5) # Plot UK Biobank PCA (computed on 1000 genomes) ukb_pca_df <- read_tsv('data/ukb_merged/projection.sscore') ukb_labels_df <- read_tsv('data/ukb_merged/population_labels_10PCS.tsv.gz') ukb_df <- ukb_pca_df %>% inner_join(ukb_labels_df, by = c('IID')) rm(ukb_pca_df) rm(ukb_labels_df) ukb_pc_plot <- ukb_df %>% filter(IID > 0) %>% ggplot(aes(x = PC1_AVG, y = PC2_AVG, color = predicted, shape = inconclusive)) + geom_point(alpha = 0.5) + xlab('PC1') + ylab('PC2') ggsave(filename = 'img/ukb_pca.png', plot = ukb_pc_plot, dpi = 300, width = 6, height = 5) # Plot UK Biobank with conclusiveness separated counts_df <- ukb_df %>% filter(IID > 0) %>% mutate(conclusive = inconclusive %>% as.integer %>% recode_factor('0' = 'Conclusive population label (P ≥ 0.9)', '1' = 'Inconclusive population label (P < 0.9)')) %>% group_by(inconclusive, conclusive) %>% summarize(num = n()) %>% ungroup %>% mutate( num_f = format(num, big.mark = ",", scientific = FALSE), label = str_glue('N = {num_f}') ) ukb_pc_sep_plot <- ukb_df %>% filter(IID > 0) %>% mutate(conclusive = inconclusive %>% as.integer %>% recode_factor('0' = 'Conclusive population label (P ≥ 0.9)', '1' = 'Inconclusive population label (P < 0.9)')) %>% ggplot(aes(x = PC1_AVG, y = PC2_AVG, color = predicted, shape = inconclusive)) + geom_point(alpha = 0.5) + xlab('PC1') + ylab('PC2') + facet_wrap(vars(conclusive)) + geom_text(data = counts_df, aes(label = label), x = -0.015, y = 0.075, color = 'black') + scale_shape_discrete(guide = "none") + scale_color_discrete(name = 'Predicted\npopulation') ggsave(filename = 'img/ukb_pca_sep.png', plot = ukb_pc_sep_plot, dpi = 300, width = 12, height = 5) # Compare PCA projections between 1000 Genomes and UK Biobank kgp_ukb_pca_plot <- plot_grid(kgp_pc_plot, ukb_pc_plot, nrow = 1) ggsave(filename = 'img/kgp_ukb_combined_pca.png', plot = kgp_ukb_pca_plot, dpi = 300, width = 12, height = 5)
/scripts/05a_plot_gwas.R
no_license
zietzm/prs-portability
R
false
false
3,146
r
library(tidyverse) library(cowplot) library(colorspace) # Plot 1000 Genomes PCAs labels_df <- read_tsv('data/kgp_meta/integrated_call_samples_v2.20130502.ALL.ped', col_types = cols_only('Individual ID' = col_character(), 'Population' = col_character())) super_pop_df <- read_tsv('data/kgp_meta/20131219.populations.tsv') labels_df <- labels_df %>% inner_join(super_pop_df, by = c('Population' = 'Population Code')) kgp_pc_df <- read_tsv( 'data/kgp_merged/projection.sscore', col_types = cols_only(IID = col_character(), PC1_AVG = col_double(), PC2_AVG = col_double()) ) kgp_pc_plot <- kgp_pc_df %>% inner_join(labels_df, by = c('IID' = 'Individual ID')) %>% ggplot(aes(x = PC1_AVG, y = PC2_AVG, color = !!as.name('Super Population'))) + geom_point(alpha = 0.5) + xlab('PC1') + ylab('PC2') ggsave(filename = 'img/1000_genomes_pca.png', plot = kgp_pc_plot, dpi = 300, width = 6, height = 5) # Plot UK Biobank PCA (computed on 1000 genomes) ukb_pca_df <- read_tsv('data/ukb_merged/projection.sscore') ukb_labels_df <- read_tsv('data/ukb_merged/population_labels_10PCS.tsv.gz') ukb_df <- ukb_pca_df %>% inner_join(ukb_labels_df, by = c('IID')) rm(ukb_pca_df) rm(ukb_labels_df) ukb_pc_plot <- ukb_df %>% filter(IID > 0) %>% ggplot(aes(x = PC1_AVG, y = PC2_AVG, color = predicted, shape = inconclusive)) + geom_point(alpha = 0.5) + xlab('PC1') + ylab('PC2') ggsave(filename = 'img/ukb_pca.png', plot = ukb_pc_plot, dpi = 300, width = 6, height = 5) # Plot UK Biobank with conclusiveness separated counts_df <- ukb_df %>% filter(IID > 0) %>% mutate(conclusive = inconclusive %>% as.integer %>% recode_factor('0' = 'Conclusive population label (P ≥ 0.9)', '1' = 'Inconclusive population label (P < 0.9)')) %>% group_by(inconclusive, conclusive) %>% summarize(num = n()) %>% ungroup %>% mutate( num_f = format(num, big.mark = ",", scientific = FALSE), label = str_glue('N = {num_f}') ) ukb_pc_sep_plot <- ukb_df %>% filter(IID > 0) %>% mutate(conclusive = inconclusive %>% as.integer %>% recode_factor('0' = 'Conclusive population label (P ≥ 0.9)', '1' = 'Inconclusive population label (P < 0.9)')) %>% ggplot(aes(x = PC1_AVG, y = PC2_AVG, color = predicted, shape = inconclusive)) + geom_point(alpha = 0.5) + xlab('PC1') + ylab('PC2') + facet_wrap(vars(conclusive)) + geom_text(data = counts_df, aes(label = label), x = -0.015, y = 0.075, color = 'black') + scale_shape_discrete(guide = "none") + scale_color_discrete(name = 'Predicted\npopulation') ggsave(filename = 'img/ukb_pca_sep.png', plot = ukb_pc_sep_plot, dpi = 300, width = 12, height = 5) # Compare PCA projections between 1000 Genomes and UK Biobank kgp_ukb_pca_plot <- plot_grid(kgp_pc_plot, ukb_pc_plot, nrow = 1) ggsave(filename = 'img/kgp_ukb_combined_pca.png', plot = kgp_ukb_pca_plot, dpi = 300, width = 12, height = 5)
### 1. Download and extract the data set into the WORKING directory ### ################################################################################ # 1. If the data set is not loaded, get and clean the data set------------------ search <- ls(pattern = "house.power.comp") if (length(search) == 0) { # 1.1 Assign the files' names file.url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" zip.name <- "household_power_consumption.zip" file.name <- "household_power_consumption.txt" # 1.2 Download and unzip the data set if (!file.exists(zip.name)) { download.file(url = file.url, destfile = zip.name, method = "curl", quiet = FALSE) } if (!file.exists(file.name)) { unzip(zipfile = zip.name, setTimes = TRUE) } # 1.3 Load the first 1000 rows to see the format of each variable initial <- read.table(file.name, nrows = 1000, sep = ";", stringsAsFactors = FALSE, header = TRUE) head(initial, n = 10) # 1.4 Define the variables names and types col.names <- colnames(initial) col.classes <- c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric") # 1.5 Extract the rows id for Februray 1st and 2nd from the data set lines <- grep("^1/2/2007|^2/2/2007", readLines(file.name), value = FALSE) # 1.5 Import the extracted lines into a data.frame. using the rows id house.power.comp <- read.table(file = file.name, header = FALSE, sep = ";", dec = ".", stringsAsFactors = FALSE, skip = lines[1] - 1, nrows = length(lines), colClasses = col.classes, na.strings = "?", col.names = col.names) # 1.6 Clean the data frame str(house.power.comp) house.power.comp$Date <- as.Date(house.power.comp$Date, format = "%d/%m/%Y") house.power.comp$Date <- with(house.power.comp, paste(Date, "T", Time, sep = " ")) # 1.7 Assign the time format (household located in Sceaux, near Paris) house.power.comp$Date <- strptime(house.power.comp$Date, format = c("%Y-%m-%d T %H:%M:%S"), tz = "Europe/Paris") # 1.8 Delete downloaded and extracted files to save disk space file.remove(file.name) file.remove(zip.name) } # 2. Only Keep the clean data.frame in the R environment------------------------ rm(list = setdiff(ls(), "house.power.comp")) ################################################################################ ### 2. Plot a histogram into a png-file ### ################################################################################ # 1. Open the PNG graphic device------------------------------------------------ png(filename = "plot1.png", width = 480, height = 480, res = 100) # 2. Draw the histogram--------------------------------------------------------- hist(x = house.power.comp$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (Kilowatts)") # 3. Close the PNG graphic device----------------------------------------------- dev.off()
/Plot1.R
no_license
sudha-png/Power-Consumption-Measurements
R
false
false
3,760
r
### 1. Download and extract the data set into the WORKING directory ### ################################################################################ # 1. If the data set is not loaded, get and clean the data set------------------ search <- ls(pattern = "house.power.comp") if (length(search) == 0) { # 1.1 Assign the files' names file.url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" zip.name <- "household_power_consumption.zip" file.name <- "household_power_consumption.txt" # 1.2 Download and unzip the data set if (!file.exists(zip.name)) { download.file(url = file.url, destfile = zip.name, method = "curl", quiet = FALSE) } if (!file.exists(file.name)) { unzip(zipfile = zip.name, setTimes = TRUE) } # 1.3 Load the first 1000 rows to see the format of each variable initial <- read.table(file.name, nrows = 1000, sep = ";", stringsAsFactors = FALSE, header = TRUE) head(initial, n = 10) # 1.4 Define the variables names and types col.names <- colnames(initial) col.classes <- c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric") # 1.5 Extract the rows id for Februray 1st and 2nd from the data set lines <- grep("^1/2/2007|^2/2/2007", readLines(file.name), value = FALSE) # 1.5 Import the extracted lines into a data.frame. using the rows id house.power.comp <- read.table(file = file.name, header = FALSE, sep = ";", dec = ".", stringsAsFactors = FALSE, skip = lines[1] - 1, nrows = length(lines), colClasses = col.classes, na.strings = "?", col.names = col.names) # 1.6 Clean the data frame str(house.power.comp) house.power.comp$Date <- as.Date(house.power.comp$Date, format = "%d/%m/%Y") house.power.comp$Date <- with(house.power.comp, paste(Date, "T", Time, sep = " ")) # 1.7 Assign the time format (household located in Sceaux, near Paris) house.power.comp$Date <- strptime(house.power.comp$Date, format = c("%Y-%m-%d T %H:%M:%S"), tz = "Europe/Paris") # 1.8 Delete downloaded and extracted files to save disk space file.remove(file.name) file.remove(zip.name) } # 2. Only Keep the clean data.frame in the R environment------------------------ rm(list = setdiff(ls(), "house.power.comp")) ################################################################################ ### 2. Plot a histogram into a png-file ### ################################################################################ # 1. Open the PNG graphic device------------------------------------------------ png(filename = "plot1.png", width = 480, height = 480, res = 100) # 2. Draw the histogram--------------------------------------------------------- hist(x = house.power.comp$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (Kilowatts)") # 3. Close the PNG graphic device----------------------------------------------- dev.off()
#install.packages("igraph") library("igraph") #Defining Basic Graph size <- 7 edges <- c(1, 2, 1, 3, 1, 5, 1, 6, 1, 7, 2, 5, 2, 7, 3, 4, 3, 5, 3, 6, 4, 5, 4, 6, 5, 6, 6, 7) my.graph <- graph(edges, n = size, directed = FALSE) plot(my.graph) #Giving weightage to each edge g.weights <- c(580, 2878, 10003, 9928, 6222, 10562, 5767, 1206, 7490, 7250, 6313, 6104, 444, 8210) E(my.graph)$weights <- g.weights plot(my.graph, edge.label = g.weights) #Constructing adjacency/cost matrix MyMatrix <- matrix(get.adjacency(my.graph, attr = "weights"), size, size) for (i in 1:size) { for (j in 1:size) { if (MyMatrix[i, j] == 0) MyMatrix[i, j] <- Inf } } MyMatrix
/3. Plotting Airport Connectivity As A Graph And Obtaining Adjacency Matrix.R
no_license
akshitbhalla2/GraphTheory
R
false
false
962
r
#install.packages("igraph") library("igraph") #Defining Basic Graph size <- 7 edges <- c(1, 2, 1, 3, 1, 5, 1, 6, 1, 7, 2, 5, 2, 7, 3, 4, 3, 5, 3, 6, 4, 5, 4, 6, 5, 6, 6, 7) my.graph <- graph(edges, n = size, directed = FALSE) plot(my.graph) #Giving weightage to each edge g.weights <- c(580, 2878, 10003, 9928, 6222, 10562, 5767, 1206, 7490, 7250, 6313, 6104, 444, 8210) E(my.graph)$weights <- g.weights plot(my.graph, edge.label = g.weights) #Constructing adjacency/cost matrix MyMatrix <- matrix(get.adjacency(my.graph, attr = "weights"), size, size) for (i in 1:size) { for (j in 1:size) { if (MyMatrix[i, j] == 0) MyMatrix[i, j] <- Inf } } MyMatrix
library(dplyr) library(pbapply) library(parallel) setwd("C:/Users/Alejandro/Desktop/") # link = "triGramsFreq.txt.tar.gz" # untar(link, "triGramsFreq.txt") con = file("triGramsFreq.txt") myLines = readLines(con) close.connection(con) reformat <- function(line, N = 2) { library(dplyr) line = line %>% gsub("\\t", " ", .) %>% strsplit(" ") %>% unlist() nGram = line[1] %>% strsplit("-") %>% unlist() %>% matrix(ncol = N) data.frame(nGram, Freq = line[2]) } makeDF <- function(myLines, N = 2) { cl = makeCluster(2L) df = myLines %>% pblapply(reformat, N, cl = cl) %>% pbsapply(unlist, cl = cl) %>% t() %>% as.data.frame() stopCluster(cl) df } print("Mapping...") nPartitions = 30 totalDFS = myLines %>% length() partLength = round(totalDFS / nPartitions) parts = gl(n = nPartitions, k = partLength, length = totalDFS) print("Shuffling...") i = 1 for (i in 1:10) { ngrams = makeDF(myLines[parts == as.character(i)], N = 3) print(paste("finished part", i, "of", nPartitions)) write.csv(ngrams, paste0("triGramsFreqFinal-Part1-", i, ".csv")) }
/reformatNGrams(virtual).R
no_license
laandrad/CDSCapstone
R
false
false
1,118
r
library(dplyr) library(pbapply) library(parallel) setwd("C:/Users/Alejandro/Desktop/") # link = "triGramsFreq.txt.tar.gz" # untar(link, "triGramsFreq.txt") con = file("triGramsFreq.txt") myLines = readLines(con) close.connection(con) reformat <- function(line, N = 2) { library(dplyr) line = line %>% gsub("\\t", " ", .) %>% strsplit(" ") %>% unlist() nGram = line[1] %>% strsplit("-") %>% unlist() %>% matrix(ncol = N) data.frame(nGram, Freq = line[2]) } makeDF <- function(myLines, N = 2) { cl = makeCluster(2L) df = myLines %>% pblapply(reformat, N, cl = cl) %>% pbsapply(unlist, cl = cl) %>% t() %>% as.data.frame() stopCluster(cl) df } print("Mapping...") nPartitions = 30 totalDFS = myLines %>% length() partLength = round(totalDFS / nPartitions) parts = gl(n = nPartitions, k = partLength, length = totalDFS) print("Shuffling...") i = 1 for (i in 1:10) { ngrams = makeDF(myLines[parts == as.character(i)], N = 3) print(paste("finished part", i, "of", nPartitions)) write.csv(ngrams, paste0("triGramsFreqFinal-Part1-", i, ".csv")) }
library(tableMatrix) ### Name: [.tableList ### Title: Bracket ### Aliases: [.tableList [<-.tableList ### ** Examples data(images8By8) # Create tableList from images8By8[,1:3] TL <- tableList(images8By8[,1:3]) # Apply data.table bracket on a tableList object TL[direction=="both"] TL[2,1] <- "aaa" # setting row TL[2,] <- list("aaa", 1000, 1000) # setting column TL[,2] <- 1
/data/genthat_extracted_code/tableMatrix/examples/sub-.tableList.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
389
r
library(tableMatrix) ### Name: [.tableList ### Title: Bracket ### Aliases: [.tableList [<-.tableList ### ** Examples data(images8By8) # Create tableList from images8By8[,1:3] TL <- tableList(images8By8[,1:3]) # Apply data.table bracket on a tableList object TL[direction=="both"] TL[2,1] <- "aaa" # setting row TL[2,] <- list("aaa", 1000, 1000) # setting column TL[,2] <- 1
## 构建表达谱 ## CEL文件处理 source("http://bioconductor.org/biocLite.R") cels = list.files("GSE64614_RAW/",pattern='[gz]') ## 安装R.utils并加载 # biocLite("R.utils") # biocLite("R.oo") # biocLite("R.methodsS3") library(R.utils) library(R.oo) library(R.methodsS3) library(hgu133plus2cdf) sapply(paste('GSE64614_RAW',cels,sep='/'),gunzip) celpath=paste(getwd(),'GSE64614_RAW',sep='/') oldWD=setwd(celpath) ## 数据预处理 # biocLite("affy") library(affy) library(BiocGenerics) library(parallel) library(Biobase) raw_data=ReadAffy() setwd(oldWD) # 删除GSE16515_1文件及其内容 unlink('GSE64614_RAW',recursive=TRUE) # 用rma方法处理原始数据,其结果是经过对数变换的;也可以用mas5处理,其结果是原始信号强度 # mas_data = mas5(raw_data) rma_data=rma(raw_data) rma_exp=exprs(rma_data) write.table(rma_exp, 'probeExpData/GSE64614.txt', sep='\t',row.names = TRUE, col.names = TRUE, quote=F) ## 注释 # biocLite('annotate') # biocLite('hgu133plus2.db') library(annotate) library(AnnotationDbi) library(stats4) library(IRanges) library(S4Vectors) library(XML) affydb=annPkgName(rma_data@annotation,type='db') library(affydb,character.only=TRUE) library(org.Hs.eg.db) ## 去除一对空的探针和一对多的探针 # raw_symbols = as.matrix(getSYMBOL(rownames(rma_exp),affydb)) raw_geneid = as.matrix(getEG(rownames(rma_exp),affydb)) colnames(raw_geneid) = c('geneid') new_geneid = as.numeric(as.matrix(raw_geneid)) rma_exp2 = cbind(new_geneid, rma_exp) # 去除NA值 geneid=na.omit(new_geneid) # loc=match(rownames(rma_exp),rownames(geneid)) dim(rma_exp2) rma_exp3 = na.omit(rma_exp2)[,-1] dim(rma_exp3) ## 对多个探针对应一个基因的探针集取均值 geneid_factor=factor(geneid) gene_exp_matrix=apply(rma_exp3, 2, function(x) tapply(x,geneid_factor,mean)) dim(gene_exp_matrix) rownames(gene_exp_matrix) = levels(geneid_factor) gene_exp_matrix_1 = data.frame(Entrez_ID = rownames(gene_exp_matrix),gene_exp_matrix) write.table(gene_exp_matrix_1, 'normalize_data/GSE69438_1.txt',sep = '\t', quote=F, row.names=F)
/AnalyseMethod/表达谱构成/GEO_rawdata.r
no_license
lifeonsci-com/lifeonsci
R
false
false
2,137
r
## 构建表达谱 ## CEL文件处理 source("http://bioconductor.org/biocLite.R") cels = list.files("GSE64614_RAW/",pattern='[gz]') ## 安装R.utils并加载 # biocLite("R.utils") # biocLite("R.oo") # biocLite("R.methodsS3") library(R.utils) library(R.oo) library(R.methodsS3) library(hgu133plus2cdf) sapply(paste('GSE64614_RAW',cels,sep='/'),gunzip) celpath=paste(getwd(),'GSE64614_RAW',sep='/') oldWD=setwd(celpath) ## 数据预处理 # biocLite("affy") library(affy) library(BiocGenerics) library(parallel) library(Biobase) raw_data=ReadAffy() setwd(oldWD) # 删除GSE16515_1文件及其内容 unlink('GSE64614_RAW',recursive=TRUE) # 用rma方法处理原始数据,其结果是经过对数变换的;也可以用mas5处理,其结果是原始信号强度 # mas_data = mas5(raw_data) rma_data=rma(raw_data) rma_exp=exprs(rma_data) write.table(rma_exp, 'probeExpData/GSE64614.txt', sep='\t',row.names = TRUE, col.names = TRUE, quote=F) ## 注释 # biocLite('annotate') # biocLite('hgu133plus2.db') library(annotate) library(AnnotationDbi) library(stats4) library(IRanges) library(S4Vectors) library(XML) affydb=annPkgName(rma_data@annotation,type='db') library(affydb,character.only=TRUE) library(org.Hs.eg.db) ## 去除一对空的探针和一对多的探针 # raw_symbols = as.matrix(getSYMBOL(rownames(rma_exp),affydb)) raw_geneid = as.matrix(getEG(rownames(rma_exp),affydb)) colnames(raw_geneid) = c('geneid') new_geneid = as.numeric(as.matrix(raw_geneid)) rma_exp2 = cbind(new_geneid, rma_exp) # 去除NA值 geneid=na.omit(new_geneid) # loc=match(rownames(rma_exp),rownames(geneid)) dim(rma_exp2) rma_exp3 = na.omit(rma_exp2)[,-1] dim(rma_exp3) ## 对多个探针对应一个基因的探针集取均值 geneid_factor=factor(geneid) gene_exp_matrix=apply(rma_exp3, 2, function(x) tapply(x,geneid_factor,mean)) dim(gene_exp_matrix) rownames(gene_exp_matrix) = levels(geneid_factor) gene_exp_matrix_1 = data.frame(Entrez_ID = rownames(gene_exp_matrix),gene_exp_matrix) write.table(gene_exp_matrix_1, 'normalize_data/GSE69438_1.txt',sep = '\t', quote=F, row.names=F)
\name{NEC} \Rdversion{1.1} \alias{NEC} \alias{NEC.2} \alias{NEC.3} \alias{NEC.4} \title{ Dose-response model for estimation of no effect concentration (NEC). } \description{ The no effect concentration has been proposed as an alternative to both the classical no observed effect concentration (NOEC) and the regression-based EC/ED approach. The NEC model is a dose-response model with a threshold below which the response is assumed constant and equal to the control response. } \usage{ NEC(fixed = c(NA, NA, NA, NA), names = c("b", "c", "d", "e"), fctName, fctText) NEC.2(upper = 1, fixed = c(NA, NA), names = c("b", "e"), ...) NEC.3(fixed = c(NA, NA, NA), names = c("b", "d", "e"), ...) NEC.4(fixed = c(NA, NA, NA, NA), names = c("b", "c", "d", "e"), ...) } \arguments{ \item{fixed}{numeric vector specifying which parameters are fixed and at what value they are fixed. NAs are used for parameters that are not fixed.} \item{names}{a vector of character strings giving the names of the parameters (should not contain ":"). The default is reasonable (see under 'Usage').} \item{fctName}{optional character string used internally by convenience functions.} \item{fctText}{optional character string used internally by convenience functions.} \item{upper}{numeric value. The fixed, upper limit in the model. Default is 1.} \item{...}{additional arguments in \code{\link{NEC}}} } \details{ The NEC model function proposed by Pires \emph{et al} (2002) is defined as follows \deqn{ f(x) = c + (d-c)\exp(-b(x-e)I(x-e)) + \frac{d2}{1+\exp(b2(\log(x)-\log(e2)))}} where \eqn{I(x-e)} is the indicator function. It is equal to 0 for \eqn{x<=e} and equal 1 for \eqn{x>e}. In other words: The parameter e in \code{NEC} in "drc" corresponds to the parameter c' in Pires \emph{et al} (2002), the parameter b in \code{NEC} in "drc" corresponds to the parameter m' in Pires \emph{et al} (2002), the parameter d in \code{NEC} in "drc" corresponds to the parameter l' in Pires \emph{et al} (2002), and finally the parameter c in \code{NEC} in "drc" (the lower horizontal limit) is (implictly) fixed at 0 in Pires \emph{et al} (2002) } \value{ The value returned is a list containing the nonlinear function, the self starter function and the parameter names. } \references{ Pires, A. M., Branco, J. A., Picado, A., Mendonca, E. (2002) Models for the estimation of a 'no effect concentration', \emph{Environmetrics}, \bold{13}, 15--27. } \author{ Christian Ritz } %\note{} %\seealso{} \examples{ nec.m1 <- drm(rootl~conc, data=ryegrass, fct=NEC.4()) summary(nec.m1) plot(nec.m1) abline(v=coef(nec.m1)[4], lty=2) # showing the estimated threshold } \keyword{models} \keyword{nonlinear}
/man/NEC.Rd
no_license
csetraynor/drc
R
false
false
2,860
rd
\name{NEC} \Rdversion{1.1} \alias{NEC} \alias{NEC.2} \alias{NEC.3} \alias{NEC.4} \title{ Dose-response model for estimation of no effect concentration (NEC). } \description{ The no effect concentration has been proposed as an alternative to both the classical no observed effect concentration (NOEC) and the regression-based EC/ED approach. The NEC model is a dose-response model with a threshold below which the response is assumed constant and equal to the control response. } \usage{ NEC(fixed = c(NA, NA, NA, NA), names = c("b", "c", "d", "e"), fctName, fctText) NEC.2(upper = 1, fixed = c(NA, NA), names = c("b", "e"), ...) NEC.3(fixed = c(NA, NA, NA), names = c("b", "d", "e"), ...) NEC.4(fixed = c(NA, NA, NA, NA), names = c("b", "c", "d", "e"), ...) } \arguments{ \item{fixed}{numeric vector specifying which parameters are fixed and at what value they are fixed. NAs are used for parameters that are not fixed.} \item{names}{a vector of character strings giving the names of the parameters (should not contain ":"). The default is reasonable (see under 'Usage').} \item{fctName}{optional character string used internally by convenience functions.} \item{fctText}{optional character string used internally by convenience functions.} \item{upper}{numeric value. The fixed, upper limit in the model. Default is 1.} \item{...}{additional arguments in \code{\link{NEC}}} } \details{ The NEC model function proposed by Pires \emph{et al} (2002) is defined as follows \deqn{ f(x) = c + (d-c)\exp(-b(x-e)I(x-e)) + \frac{d2}{1+\exp(b2(\log(x)-\log(e2)))}} where \eqn{I(x-e)} is the indicator function. It is equal to 0 for \eqn{x<=e} and equal 1 for \eqn{x>e}. In other words: The parameter e in \code{NEC} in "drc" corresponds to the parameter c' in Pires \emph{et al} (2002), the parameter b in \code{NEC} in "drc" corresponds to the parameter m' in Pires \emph{et al} (2002), the parameter d in \code{NEC} in "drc" corresponds to the parameter l' in Pires \emph{et al} (2002), and finally the parameter c in \code{NEC} in "drc" (the lower horizontal limit) is (implictly) fixed at 0 in Pires \emph{et al} (2002) } \value{ The value returned is a list containing the nonlinear function, the self starter function and the parameter names. } \references{ Pires, A. M., Branco, J. A., Picado, A., Mendonca, E. (2002) Models for the estimation of a 'no effect concentration', \emph{Environmetrics}, \bold{13}, 15--27. } \author{ Christian Ritz } %\note{} %\seealso{} \examples{ nec.m1 <- drm(rootl~conc, data=ryegrass, fct=NEC.4()) summary(nec.m1) plot(nec.m1) abline(v=coef(nec.m1)[4], lty=2) # showing the estimated threshold } \keyword{models} \keyword{nonlinear}
############################################################################### # Multi-resolution blockmodel # # file: indian_village_postprocess.R # # This file does convergence checks, does post processing, and makes plots for #the India village data. Assuming that we have already run India_village_estimation.R ## and have results. # # #Setup here to use four chains, but can be run with more/fewer. To use this file, you'll #need to be sure that the names of the # # # Author: tedwestling ############################################################################### rm(list = ls()) ##set your working directory to the top level multiresolution_networks folder #setwd("") # Source header file (should be in top level of working directory) source('header.R') # We set the seed numerous times throughout the document at points at natural reload points set.seed(22311) vilno <- 59 edgetype <- 'visitcome' # Load in household-level network data for the particular edgetype and village network <- as.matrix(read.csv(paste0('data/indian_village_raw/1. Network Data/Adjacency Matrices/adj_', edgetype, '_HH_vilno_', vilno, '.csv'), header=FALSE)) # Remove nodes with degree 0 fullnetwork <- network zros <- which(colSums(network) == 0) network <- network[-zros, -zros] N <- nrow(network) K <- Khat <- 6 thinfirst=T thinfac=4 #keep every fourth #note on these runs we've already thinned by 10 within the sampler ####for the longer chains, add some thinning load('data/results/village_59_mcmc_chain1.Rdata') chain1a=chain1 load('data/results/village_59_mcmc_chain2.Rdata') chain2=chain1 load('data/results/village_59_mcmc_chain3.Rdata') chain3=chain1 load('data/results/village_59_mcmc_chain4.Rdata') chain4=chain1 chain1=chain1a if(thinfirst==T){ #list(beta=beta, sigma=sigma, pi=pi, mu=mu, Sigma=Sigma, B=B, Z=Z, gamma=gamma) keep=which(c(1:length(chain1$beta[,1]))%%thinfac==0) chain1=list(beta=chain1$beta[keep,], sigma=chain1$sigma[keep,], pi=chain1$pi[keep,], mu=chain1$mu[keep,], Sigma=chain1$Sigma[keep,,], B=chain1$B[keep,,], Z=chain1$Z[keep,,], gamma=chain1$gamma[keep,]) # chain2=list(beta=chain2$beta[keep,], sigma=chain2$sigma[keep,], pi=chain2$pi[keep,], mu=chain2$mu[keep,], Sigma=chain2$Sigma[keep,,], B=chain2$B[keep,,], Z=chain2$Z[keep,,], gamma=chain2$gamma[keep,]) # chain3=list(beta=chain3$beta[keep,], sigma=chain3$sigma[keep,], pi=chain3$pi[keep,], mu=chain3$mu[keep,], Sigma=chain3$Sigma[keep,,], B=chain3$B[keep,,], Z=chain3$Z[keep,,], gamma=chain3$gamma[keep,]) # chain4=list(beta=chain4$beta[keep,], sigma=chain4$sigma[keep,], pi=chain4$pi[keep,], mu=chain4$mu[keep,], Sigma=chain4$Sigma[keep,,], B=chain4$B[keep,,], Z=chain4$Z[keep,,], gamma=chain4$gamma[keep,]) } # Postprocess Samples all_chains <- list(beta=rbind(chain1$beta, chain2$beta, chain3$beta, chain4$beta), sigma=rbind(chain1$sigma, chain2$sigma, chain3$sigma, chain4$sigma), pi=rbind(chain1$pi, chain2$pi, chain3$pi, chain4$pi), mu=rbind(chain1$mu, chain2$mu, chain3$mu, chain4$mu), Sigma=abind(chain1$Sigma, chain2$Sigma, chain3$Sigma, chain4$Sigma, along=1), B=abind(chain1$B, chain2$B, chain3$B, chain4$B, along=1), Z=abind(chain1$Z, chain2$Z, chain3$Z, chain4$Z, along=1), gamma=rbind(chain1$gamma, chain2$gamma, chain3$gamma, chain4$gamma)) set.seed(303854) # Use asortative spectral clustering estimate as fixed membership vector to rotate towards spec <- spectral_cluster(network, Krange = Khat, assortative = TRUE, plot=FALSE, degree_correct = FALSE) #uncomment to plot spectral clusters #plot_blocked_matrix(network, spec[[1]]$clusters) mcmc_samplesc1 <- postprocess_MCMC(chain1, network, fixed_memb = spec[[1]]$clusters) mcmc_samplesc2 <- postprocess_MCMC(chain2, network, fixed_memb = spec[[1]]$clusters) mcmc_samplesc3 <- postprocess_MCMC(chain3, network, fixed_memb = spec[[1]]$clusters) mcmc_samplesc4 <- postprocess_MCMC(chain4, network, fixed_memb = spec[[1]]$clusters) #save your post-processed samples #save.image("data/results/village_59_mcmc_strongass_v1_postprocessed.Rdata") save(network,chain1,chain2,chain3,chain4,mcmc_samplesc1,mcmc_samplesc2,mcmc_samplesc3,mcmc_samplesc4,N,K,Khat,vilno,file="data/results/village_59_mcmc_strongass_v1_postprocessed.Rdata")
/code/scripts/India_village_postprocess.R
permissive
freekang/multiresolution_networks
R
false
false
4,323
r
############################################################################### # Multi-resolution blockmodel # # file: indian_village_postprocess.R # # This file does convergence checks, does post processing, and makes plots for #the India village data. Assuming that we have already run India_village_estimation.R ## and have results. # # #Setup here to use four chains, but can be run with more/fewer. To use this file, you'll #need to be sure that the names of the # # # Author: tedwestling ############################################################################### rm(list = ls()) ##set your working directory to the top level multiresolution_networks folder #setwd("") # Source header file (should be in top level of working directory) source('header.R') # We set the seed numerous times throughout the document at points at natural reload points set.seed(22311) vilno <- 59 edgetype <- 'visitcome' # Load in household-level network data for the particular edgetype and village network <- as.matrix(read.csv(paste0('data/indian_village_raw/1. Network Data/Adjacency Matrices/adj_', edgetype, '_HH_vilno_', vilno, '.csv'), header=FALSE)) # Remove nodes with degree 0 fullnetwork <- network zros <- which(colSums(network) == 0) network <- network[-zros, -zros] N <- nrow(network) K <- Khat <- 6 thinfirst=T thinfac=4 #keep every fourth #note on these runs we've already thinned by 10 within the sampler ####for the longer chains, add some thinning load('data/results/village_59_mcmc_chain1.Rdata') chain1a=chain1 load('data/results/village_59_mcmc_chain2.Rdata') chain2=chain1 load('data/results/village_59_mcmc_chain3.Rdata') chain3=chain1 load('data/results/village_59_mcmc_chain4.Rdata') chain4=chain1 chain1=chain1a if(thinfirst==T){ #list(beta=beta, sigma=sigma, pi=pi, mu=mu, Sigma=Sigma, B=B, Z=Z, gamma=gamma) keep=which(c(1:length(chain1$beta[,1]))%%thinfac==0) chain1=list(beta=chain1$beta[keep,], sigma=chain1$sigma[keep,], pi=chain1$pi[keep,], mu=chain1$mu[keep,], Sigma=chain1$Sigma[keep,,], B=chain1$B[keep,,], Z=chain1$Z[keep,,], gamma=chain1$gamma[keep,]) # chain2=list(beta=chain2$beta[keep,], sigma=chain2$sigma[keep,], pi=chain2$pi[keep,], mu=chain2$mu[keep,], Sigma=chain2$Sigma[keep,,], B=chain2$B[keep,,], Z=chain2$Z[keep,,], gamma=chain2$gamma[keep,]) # chain3=list(beta=chain3$beta[keep,], sigma=chain3$sigma[keep,], pi=chain3$pi[keep,], mu=chain3$mu[keep,], Sigma=chain3$Sigma[keep,,], B=chain3$B[keep,,], Z=chain3$Z[keep,,], gamma=chain3$gamma[keep,]) # chain4=list(beta=chain4$beta[keep,], sigma=chain4$sigma[keep,], pi=chain4$pi[keep,], mu=chain4$mu[keep,], Sigma=chain4$Sigma[keep,,], B=chain4$B[keep,,], Z=chain4$Z[keep,,], gamma=chain4$gamma[keep,]) } # Postprocess Samples all_chains <- list(beta=rbind(chain1$beta, chain2$beta, chain3$beta, chain4$beta), sigma=rbind(chain1$sigma, chain2$sigma, chain3$sigma, chain4$sigma), pi=rbind(chain1$pi, chain2$pi, chain3$pi, chain4$pi), mu=rbind(chain1$mu, chain2$mu, chain3$mu, chain4$mu), Sigma=abind(chain1$Sigma, chain2$Sigma, chain3$Sigma, chain4$Sigma, along=1), B=abind(chain1$B, chain2$B, chain3$B, chain4$B, along=1), Z=abind(chain1$Z, chain2$Z, chain3$Z, chain4$Z, along=1), gamma=rbind(chain1$gamma, chain2$gamma, chain3$gamma, chain4$gamma)) set.seed(303854) # Use asortative spectral clustering estimate as fixed membership vector to rotate towards spec <- spectral_cluster(network, Krange = Khat, assortative = TRUE, plot=FALSE, degree_correct = FALSE) #uncomment to plot spectral clusters #plot_blocked_matrix(network, spec[[1]]$clusters) mcmc_samplesc1 <- postprocess_MCMC(chain1, network, fixed_memb = spec[[1]]$clusters) mcmc_samplesc2 <- postprocess_MCMC(chain2, network, fixed_memb = spec[[1]]$clusters) mcmc_samplesc3 <- postprocess_MCMC(chain3, network, fixed_memb = spec[[1]]$clusters) mcmc_samplesc4 <- postprocess_MCMC(chain4, network, fixed_memb = spec[[1]]$clusters) #save your post-processed samples #save.image("data/results/village_59_mcmc_strongass_v1_postprocessed.Rdata") save(network,chain1,chain2,chain3,chain4,mcmc_samplesc1,mcmc_samplesc2,mcmc_samplesc3,mcmc_samplesc4,N,K,Khat,vilno,file="data/results/village_59_mcmc_strongass_v1_postprocessed.Rdata")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/7SLwrappers.R \name{SL.QAW.W4} \alias{SL.QAW.W4} \title{QAW for W5} \usage{ SL.QAW.W4(Y, X, newX, family, obsWeights, model = TRUE, ...) } \arguments{ \item{Y}{outcome} \item{X}{predictors} \item{newX}{new X} \item{family}{family} \item{obsWeights}{obsWeights} \item{model}{model} \item{...}{other} \item{SL.library}{SL library} } \value{ SL.QAW.W4 } \description{ QAW for W5. }
/man/SL.QAW.W4.Rd
no_license
lmmontoya/SL.ODTR
R
false
true
464
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/7SLwrappers.R \name{SL.QAW.W4} \alias{SL.QAW.W4} \title{QAW for W5} \usage{ SL.QAW.W4(Y, X, newX, family, obsWeights, model = TRUE, ...) } \arguments{ \item{Y}{outcome} \item{X}{predictors} \item{newX}{new X} \item{family}{family} \item{obsWeights}{obsWeights} \item{model}{model} \item{...}{other} \item{SL.library}{SL library} } \value{ SL.QAW.W4 } \description{ QAW for W5. }
find.lambda <- function(response, design, penalties, k, m, control, trace){ if(trace){ cat("Find maximal lambda...\n") } cur.low <- 0 cur.up <- lambda.ratio <- Inf cur.lambda <- 100 while(lambda.ratio>0.15){ m.cur <- try(fit.BTLLasso(response, design, penalties, cur.lambda, k, m, control, trace = FALSE)) # coefs.cur <- m.cur$coefs[rowSums(abs(penalties$acoefs))!=0] coefs.cur <-m.cur$coefs%*%penalties$acoefs coefs.cur[abs(coefs.cur) < 1/(10^control$precision)] <-0 if(sum(abs(coefs.cur))==0){ cur.up <- cur.lambda }else{ cur.low <- cur.lambda } if(is.finite(cur.up)){ cur.lambda <- (cur.up-cur.low)*0.7+cur.low lambda.ratio <- (cur.up-cur.low)/cur.up }else{ cur.lambda <- cur.lambda*2 } ##end of while loop } if(control$log.lambda){ lambda <- exp(seq(log(cur.up+0.01*cur.up), log(control$lambda.min+0.01*cur.up), length = control$l.lambda))-0.01*cur.up lambda[control$l.lambda] <- control$lambda.min }else{ lambda <- seq(cur.up,control$lambda.min,length=control$l.lambda) } return(lambda) }
/fuzzedpackages/BTLLasso/R/find.lambda.R
no_license
akhikolla/testpackages
R
false
false
1,211
r
find.lambda <- function(response, design, penalties, k, m, control, trace){ if(trace){ cat("Find maximal lambda...\n") } cur.low <- 0 cur.up <- lambda.ratio <- Inf cur.lambda <- 100 while(lambda.ratio>0.15){ m.cur <- try(fit.BTLLasso(response, design, penalties, cur.lambda, k, m, control, trace = FALSE)) # coefs.cur <- m.cur$coefs[rowSums(abs(penalties$acoefs))!=0] coefs.cur <-m.cur$coefs%*%penalties$acoefs coefs.cur[abs(coefs.cur) < 1/(10^control$precision)] <-0 if(sum(abs(coefs.cur))==0){ cur.up <- cur.lambda }else{ cur.low <- cur.lambda } if(is.finite(cur.up)){ cur.lambda <- (cur.up-cur.low)*0.7+cur.low lambda.ratio <- (cur.up-cur.low)/cur.up }else{ cur.lambda <- cur.lambda*2 } ##end of while loop } if(control$log.lambda){ lambda <- exp(seq(log(cur.up+0.01*cur.up), log(control$lambda.min+0.01*cur.up), length = control$l.lambda))-0.01*cur.up lambda[control$l.lambda] <- control$lambda.min }else{ lambda <- seq(cur.up,control$lambda.min,length=control$l.lambda) } return(lambda) }
% Generated by roxygen2 (4.0.1): do not edit by hand \name{DownsideFrequency} \alias{DownsideFrequency} \title{downside frequency of the return distribution} \usage{ DownsideFrequency(R, MAR = 0, ...) } \arguments{ \item{R}{an xts, vector, matrix, data frame, timeSeries or zoo object of asset returns} \item{MAR}{Minimum Acceptable Return, in the same periodicity as your returns} \item{\dots}{any other passthru parameters} } \description{ To calculate Downside Frequency, we take the subset of returns that are less than the target (or Minimum Acceptable Returns (MAR)) returns and divide the length of this subset by the total number of returns. } \details{ \deqn{ DownsideFrequency(R , MAR) = \sum^{n}_{t=1}\frac{min[(R_{t} - MAR), 0]}{R_{t}*n}}{DownsideFrequency(R, MAR) = length(subset of returns below MAR) / length(total returns)} where \eqn{n} is the number of observations of the entire series } \examples{ data(portfolio_bacon) MAR = 0.005 print(DownsideFrequency(portfolio_bacon[,1], MAR)) #expected 0.458 data(managers) print(DownsideFrequency(managers['1996'])) print(DownsideFrequency(managers['1996',1])) #expected 0.25 } \author{ Matthieu Lestel } \references{ Carl Bacon, \emph{Practical portfolio performance measurement and attribution}, second edition 2008 p.94 } \keyword{distribution} \keyword{models} \keyword{multivariate} \keyword{ts}
/man/DownsideFrequency.Rd
no_license
ecjbosu/PerformanceAnalytics
R
false
false
1,417
rd
% Generated by roxygen2 (4.0.1): do not edit by hand \name{DownsideFrequency} \alias{DownsideFrequency} \title{downside frequency of the return distribution} \usage{ DownsideFrequency(R, MAR = 0, ...) } \arguments{ \item{R}{an xts, vector, matrix, data frame, timeSeries or zoo object of asset returns} \item{MAR}{Minimum Acceptable Return, in the same periodicity as your returns} \item{\dots}{any other passthru parameters} } \description{ To calculate Downside Frequency, we take the subset of returns that are less than the target (or Minimum Acceptable Returns (MAR)) returns and divide the length of this subset by the total number of returns. } \details{ \deqn{ DownsideFrequency(R , MAR) = \sum^{n}_{t=1}\frac{min[(R_{t} - MAR), 0]}{R_{t}*n}}{DownsideFrequency(R, MAR) = length(subset of returns below MAR) / length(total returns)} where \eqn{n} is the number of observations of the entire series } \examples{ data(portfolio_bacon) MAR = 0.005 print(DownsideFrequency(portfolio_bacon[,1], MAR)) #expected 0.458 data(managers) print(DownsideFrequency(managers['1996'])) print(DownsideFrequency(managers['1996',1])) #expected 0.25 } \author{ Matthieu Lestel } \references{ Carl Bacon, \emph{Practical portfolio performance measurement and attribution}, second edition 2008 p.94 } \keyword{distribution} \keyword{models} \keyword{multivariate} \keyword{ts}
# set up ----- rm(list = ls()) invisible(gc(verbose = F)) options( scipen = 999, max.print = 100 ) # pakages ----- suppressMessages({ library(shiny) library(shinyWidgets) library(shinyjs) library(dplyr) library(googlesheets) library(googledrive) library(xlsx) }) # source ----- source("functions.R") # google authorization ----- gs_auth() drive_auth() # static data ----- scale_anchors <- list( evaluative = list(c("Bad", "Awful"), c("Good", "Nice")), potency = list(c("Powerless", "Little"), c("Powerful", "Big")), activity = list(c("Slow", "Quiet", "Inactive"), c("Fast", "Noisy", "Active")) )
/WebApp/webapp-bundle/global.R
no_license
AustinVL/AustinVL-Word-embeddings-reveal-how-fundamental-sentiments-structure-natural-language
R
false
false
659
r
# set up ----- rm(list = ls()) invisible(gc(verbose = F)) options( scipen = 999, max.print = 100 ) # pakages ----- suppressMessages({ library(shiny) library(shinyWidgets) library(shinyjs) library(dplyr) library(googlesheets) library(googledrive) library(xlsx) }) # source ----- source("functions.R") # google authorization ----- gs_auth() drive_auth() # static data ----- scale_anchors <- list( evaluative = list(c("Bad", "Awful"), c("Good", "Nice")), potency = list(c("Powerless", "Little"), c("Powerful", "Big")), activity = list(c("Slow", "Quiet", "Inactive"), c("Fast", "Noisy", "Active")) )
#' Feature importance #' #' @description #' \code{FeatureImp} computes feature importances for prediction models. #' The importance is measured as the factor by which the model's prediction error increases when the feature is shuffled. #' #' @format \code{\link{R6Class}} object. #' @name FeatureImp #' #' @section Usage: #' \preformatted{ #' imp = FeatureImp$new(predictor, loss, method = "shuffle", run = TRUE) #' #' plot(imp) #' imp$results #' print(imp) #' } #' #' @section Arguments: #' #' For FeatureImp$new(): #' \describe{ #' \item{predictor: }{(Predictor)\cr #' The object (created with Predictor$new()) holding the machine learning model and the data.} #' \item{loss: }{(`character(1)` | function)\cr The loss function. Either the name of a loss (e.g. "ce" for classification or "mse") or a loss function. See Details for allowed losses.} #' \item{method: }{(`character(1)`)\cr Either "shuffle" or "cartesian". See Details.} #' \item{run: }{(`logical(1)`)\cr Should the Interpretation method be run?} #' } #' #' @section Details: #' Read the Interpretable Machine Learning book to learn in detail about feature importance: #' \url{https://christophm.github.io/interpretable-ml-book/feature-importance.html} #' #' Two permutation schemes are implemented: #' \itemize{ #' \item shuffle: A simple shuffling of the feature values, yielding n perturbed instances per feature (fast) #' \item cartesian: Matching every instance with the feature value of all other instances, yielding n x (n-1) perturbed instances per feature (very slow) #' } #' #' The loss function can be either specified via a string, or by handing a function to \code{FeatureImp()}. #' If you want to use your own loss function it should have this signature: function(actual, predicted). #' Using the string is a shortcut to using loss functions from the \code{Metrics} package. #' Only use functions that return a single performance value, not a vector. #' Allowed losses are: "ce", "f1", "logLoss", "mae", "mse", "rmse", "mape", "mdae", #' "msle", "percent_bias", "rae", "rmse", "rmsle", "rse", "rrse", "smape" #' See \code{library(help = "Metrics")} to get a list of functions. #' #' #' @section Fields: #' \describe{ #' \item{original.error: }{(`numeric(1)`)\cr The loss of the model before perturbing features.} #' \item{predictor: }{(Predictor)\cr The prediction model that was analysed.} #' \item{results: }{(data.frame)\cr data.frame with the results of the feature importance computation.} #' } #' #' @section Methods: #' \describe{ #' \item{loss(actual,predicted)}{The loss function. Can also be applied to data: \code{object$loss(actual, predicted)}} #' \item{plot()}{method to plot the feature importances. See \link{plot.FeatureImp}} #' \item{\code{run()}}{[internal] method to run the interpretability method. Use \code{obj$run(force = TRUE)} to force a rerun.} #' \item{\code{clone()}}{[internal] method to clone the R6 object.} #' \item{\code{initialize()}}{[internal] method to initialize the R6 object.} #' } #' #' @references #' Fisher, A., Rudin, C., and Dominici, F. (2018). Model Class Reliance: Variable Importance Measures for any Machine Learning Model Class, from the "Rashomon" Perspective. Retrieved from http://arxiv.org/abs/1801.01489 #' #' @import Metrics #' @importFrom data.table copy rbindlist #' @examples #' if (require("rpart")) { #' # We train a tree on the Boston dataset: #' data("Boston", package = "MASS") #' tree = rpart(medv ~ ., data = Boston) #' y = Boston$medv #' X = Boston[-which(names(Boston) == "medv")] #' mod = Predictor$new(tree, data = X, y = y) #' #' # Compute feature importances as the performance drop in mean absolute error #' imp = FeatureImp$new(mod, loss = "mae") #' #' # Plot the results directly #' plot(imp) #' #' #' # Since the result is a ggplot object, you can extend it: #' if (require("ggplot2")) { #' plot(imp) + theme_bw() #' # If you want to do your own thing, just extract the data: #' imp.dat = imp$results #' head(imp.dat) #' ggplot(imp.dat, aes(x = feature, y = importance)) + geom_point() + #' theme_bw() #' } #' #' # FeatureImp also works with multiclass classification. #' # In this case, the importance measurement regards all classes #' tree = rpart(Species ~ ., data= iris) #' X = iris[-which(names(iris) == "Species")] #' y = iris$Species #' mod = Predictor$new(tree, data = X, y = y, type = "prob") #' #' # For some models we have to specify additional arguments for the predict function #' imp = FeatureImp$new(mod, loss = "ce") #' plot(imp) #' #' # For multiclass classification models, you can choose to only compute performance for one class. #' # Make sure to adapt y #' mod = Predictor$new(tree, data = X, y = y == "virginica", #' type = "prob", class = "virginica") #' imp = FeatureImp$new(mod, loss = "ce") #' plot(imp) #' } NULL #' @export FeatureImp = R6::R6Class("FeatureImp", inherit = InterpretationMethod, public = list( loss = NULL, original.error = NULL, initialize = function(predictor, loss, method = "shuffle", run = TRUE) { assert_choice(method, c("shuffle", "cartesian")) if (!inherits(loss, "function")) { ## Only allow metrics from Metrics package allowedLosses = c("ce", "f1", "logLoss", "mae", "mse", "rmse", "mape", "mdae", "msle", "percent_bias", "rae", "rmse", "rmsle", "rse", "rrse", "smape") checkmate::assert_choice(loss, allowedLosses) private$loss.string = loss loss = getFromNamespace(loss, "Metrics") } else { private$loss.string = head(loss) } if (is.null(predictor$data$y)) { stop("Please call Predictor$new() with the y target vector.") } super$initialize(predictor = predictor) self$loss = private$set.loss(loss) private$method = method private$getData = private$sampler$get.xy actual = private$sampler$y[[1]] predicted = private$q(self$predictor$predict(private$sampler$X))[[1]] # Assuring that levels are the same self$original.error = loss(actual, predicted) if(run) self$run() } ), private = list( method = NULL, # for printing loss.string = NULL, q = function(pred) probs.to.labels(pred), intervene = function() { X.inter.list = lapply(private$sampler$feature.names, function(i) { n.times = ifelse(private$method == "cartesian", nrow(private$dataSample), 1) mg = generate.marginals(private$dataSample, private$dataSample, features = i, n.sample.dist = n.times) mg$.feature = i mg }) rbindlist(X.inter.list, use.names = TRUE) }, aggregate = function() { y = private$dataDesign[, private$sampler$y.names, with = FALSE] y.hat = private$qResults # For classification we work with the class labels instead of probs result = data.table(feature = private$dataDesign$.feature, actual = y[[1]], predicted = y.hat[[1]]) result = result[, list("original.error" = self$original.error, "permutation.error" = self$loss(actual, predicted)), by = feature] result[, importance := permutation.error / self$original.error] result = result[order(result$importance, decreasing = TRUE),] result }, generatePlot = function(sort = TRUE, ...) { res = self$results if (sort) { res$feature = factor(res$feature, levels = res$feature[order(res$importance)]) } ggplot(res, aes(y = feature, x = importance)) + geom_point()+ geom_segment(aes(y = feature, yend = feature, x=1, xend = importance)) + scale_x_continuous("Feature Importance") + scale_y_discrete("Feature") }, set.loss = function(loss) { self$loss = loss }, printParameters = function() { cat("error function:", private$loss.string) } ) ) #' Plot Feature Importance #' #' plot.FeatureImp() plots the feature importance results of a FeatureImp object. #' #' @param x A FeatureImp R6 object #' @param sort logical. Should the features be sorted in descending order? Defaults to TRUE. #' @param ... Further arguments for the objects plot function #' @return ggplot2 plot object #' @export #' @seealso #' \link{FeatureImp} #' @examples #' if (require("rpart")) { #' # We train a tree on the Boston dataset: #' data("Boston", package = "MASS") #' tree = rpart(medv ~ ., data = Boston) #' y = Boston$medv #' X = Boston[-which(names(Boston) == "medv")] #' mod = Predictor$new(tree, data = X, y = y) #' #' # Compute feature importances as the performance drop in mean absolute error #' imp = FeatureImp$new(mod, loss = "mae") #' #' # Plot the results directly #' plot(imp) #' } plot.FeatureImp = function(x, sort = TRUE, ...) { x$plot(sort = sort, ...) }
/R/FeatureImp.R
no_license
ezanutto/iml
R
false
false
8,805
r
#' Feature importance #' #' @description #' \code{FeatureImp} computes feature importances for prediction models. #' The importance is measured as the factor by which the model's prediction error increases when the feature is shuffled. #' #' @format \code{\link{R6Class}} object. #' @name FeatureImp #' #' @section Usage: #' \preformatted{ #' imp = FeatureImp$new(predictor, loss, method = "shuffle", run = TRUE) #' #' plot(imp) #' imp$results #' print(imp) #' } #' #' @section Arguments: #' #' For FeatureImp$new(): #' \describe{ #' \item{predictor: }{(Predictor)\cr #' The object (created with Predictor$new()) holding the machine learning model and the data.} #' \item{loss: }{(`character(1)` | function)\cr The loss function. Either the name of a loss (e.g. "ce" for classification or "mse") or a loss function. See Details for allowed losses.} #' \item{method: }{(`character(1)`)\cr Either "shuffle" or "cartesian". See Details.} #' \item{run: }{(`logical(1)`)\cr Should the Interpretation method be run?} #' } #' #' @section Details: #' Read the Interpretable Machine Learning book to learn in detail about feature importance: #' \url{https://christophm.github.io/interpretable-ml-book/feature-importance.html} #' #' Two permutation schemes are implemented: #' \itemize{ #' \item shuffle: A simple shuffling of the feature values, yielding n perturbed instances per feature (fast) #' \item cartesian: Matching every instance with the feature value of all other instances, yielding n x (n-1) perturbed instances per feature (very slow) #' } #' #' The loss function can be either specified via a string, or by handing a function to \code{FeatureImp()}. #' If you want to use your own loss function it should have this signature: function(actual, predicted). #' Using the string is a shortcut to using loss functions from the \code{Metrics} package. #' Only use functions that return a single performance value, not a vector. #' Allowed losses are: "ce", "f1", "logLoss", "mae", "mse", "rmse", "mape", "mdae", #' "msle", "percent_bias", "rae", "rmse", "rmsle", "rse", "rrse", "smape" #' See \code{library(help = "Metrics")} to get a list of functions. #' #' #' @section Fields: #' \describe{ #' \item{original.error: }{(`numeric(1)`)\cr The loss of the model before perturbing features.} #' \item{predictor: }{(Predictor)\cr The prediction model that was analysed.} #' \item{results: }{(data.frame)\cr data.frame with the results of the feature importance computation.} #' } #' #' @section Methods: #' \describe{ #' \item{loss(actual,predicted)}{The loss function. Can also be applied to data: \code{object$loss(actual, predicted)}} #' \item{plot()}{method to plot the feature importances. See \link{plot.FeatureImp}} #' \item{\code{run()}}{[internal] method to run the interpretability method. Use \code{obj$run(force = TRUE)} to force a rerun.} #' \item{\code{clone()}}{[internal] method to clone the R6 object.} #' \item{\code{initialize()}}{[internal] method to initialize the R6 object.} #' } #' #' @references #' Fisher, A., Rudin, C., and Dominici, F. (2018). Model Class Reliance: Variable Importance Measures for any Machine Learning Model Class, from the "Rashomon" Perspective. Retrieved from http://arxiv.org/abs/1801.01489 #' #' @import Metrics #' @importFrom data.table copy rbindlist #' @examples #' if (require("rpart")) { #' # We train a tree on the Boston dataset: #' data("Boston", package = "MASS") #' tree = rpart(medv ~ ., data = Boston) #' y = Boston$medv #' X = Boston[-which(names(Boston) == "medv")] #' mod = Predictor$new(tree, data = X, y = y) #' #' # Compute feature importances as the performance drop in mean absolute error #' imp = FeatureImp$new(mod, loss = "mae") #' #' # Plot the results directly #' plot(imp) #' #' #' # Since the result is a ggplot object, you can extend it: #' if (require("ggplot2")) { #' plot(imp) + theme_bw() #' # If you want to do your own thing, just extract the data: #' imp.dat = imp$results #' head(imp.dat) #' ggplot(imp.dat, aes(x = feature, y = importance)) + geom_point() + #' theme_bw() #' } #' #' # FeatureImp also works with multiclass classification. #' # In this case, the importance measurement regards all classes #' tree = rpart(Species ~ ., data= iris) #' X = iris[-which(names(iris) == "Species")] #' y = iris$Species #' mod = Predictor$new(tree, data = X, y = y, type = "prob") #' #' # For some models we have to specify additional arguments for the predict function #' imp = FeatureImp$new(mod, loss = "ce") #' plot(imp) #' #' # For multiclass classification models, you can choose to only compute performance for one class. #' # Make sure to adapt y #' mod = Predictor$new(tree, data = X, y = y == "virginica", #' type = "prob", class = "virginica") #' imp = FeatureImp$new(mod, loss = "ce") #' plot(imp) #' } NULL #' @export FeatureImp = R6::R6Class("FeatureImp", inherit = InterpretationMethod, public = list( loss = NULL, original.error = NULL, initialize = function(predictor, loss, method = "shuffle", run = TRUE) { assert_choice(method, c("shuffle", "cartesian")) if (!inherits(loss, "function")) { ## Only allow metrics from Metrics package allowedLosses = c("ce", "f1", "logLoss", "mae", "mse", "rmse", "mape", "mdae", "msle", "percent_bias", "rae", "rmse", "rmsle", "rse", "rrse", "smape") checkmate::assert_choice(loss, allowedLosses) private$loss.string = loss loss = getFromNamespace(loss, "Metrics") } else { private$loss.string = head(loss) } if (is.null(predictor$data$y)) { stop("Please call Predictor$new() with the y target vector.") } super$initialize(predictor = predictor) self$loss = private$set.loss(loss) private$method = method private$getData = private$sampler$get.xy actual = private$sampler$y[[1]] predicted = private$q(self$predictor$predict(private$sampler$X))[[1]] # Assuring that levels are the same self$original.error = loss(actual, predicted) if(run) self$run() } ), private = list( method = NULL, # for printing loss.string = NULL, q = function(pred) probs.to.labels(pred), intervene = function() { X.inter.list = lapply(private$sampler$feature.names, function(i) { n.times = ifelse(private$method == "cartesian", nrow(private$dataSample), 1) mg = generate.marginals(private$dataSample, private$dataSample, features = i, n.sample.dist = n.times) mg$.feature = i mg }) rbindlist(X.inter.list, use.names = TRUE) }, aggregate = function() { y = private$dataDesign[, private$sampler$y.names, with = FALSE] y.hat = private$qResults # For classification we work with the class labels instead of probs result = data.table(feature = private$dataDesign$.feature, actual = y[[1]], predicted = y.hat[[1]]) result = result[, list("original.error" = self$original.error, "permutation.error" = self$loss(actual, predicted)), by = feature] result[, importance := permutation.error / self$original.error] result = result[order(result$importance, decreasing = TRUE),] result }, generatePlot = function(sort = TRUE, ...) { res = self$results if (sort) { res$feature = factor(res$feature, levels = res$feature[order(res$importance)]) } ggplot(res, aes(y = feature, x = importance)) + geom_point()+ geom_segment(aes(y = feature, yend = feature, x=1, xend = importance)) + scale_x_continuous("Feature Importance") + scale_y_discrete("Feature") }, set.loss = function(loss) { self$loss = loss }, printParameters = function() { cat("error function:", private$loss.string) } ) ) #' Plot Feature Importance #' #' plot.FeatureImp() plots the feature importance results of a FeatureImp object. #' #' @param x A FeatureImp R6 object #' @param sort logical. Should the features be sorted in descending order? Defaults to TRUE. #' @param ... Further arguments for the objects plot function #' @return ggplot2 plot object #' @export #' @seealso #' \link{FeatureImp} #' @examples #' if (require("rpart")) { #' # We train a tree on the Boston dataset: #' data("Boston", package = "MASS") #' tree = rpart(medv ~ ., data = Boston) #' y = Boston$medv #' X = Boston[-which(names(Boston) == "medv")] #' mod = Predictor$new(tree, data = X, y = y) #' #' # Compute feature importances as the performance drop in mean absolute error #' imp = FeatureImp$new(mod, loss = "mae") #' #' # Plot the results directly #' plot(imp) #' } plot.FeatureImp = function(x, sort = TRUE, ...) { x$plot(sort = sort, ...) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_edges.R \name{get_edges} \alias{get_edges} \title{Get node IDs associated with edges} \usage{ get_edges(x, edge_attr = NULL, match = NULL, return_type = "vector") } \arguments{ \item{x}{either a graph object of class \code{dgr_graph} that is created using \code{create_graph} or an edge data frame.} \item{edge_attr}{an optional character vector of edge attribute values for filtering the edges returned.} \item{match}{an option to provide a logical expression with a comparison operator (\code{>}, \code{<}, \code{==}, or \code{!=}) followed by a number for numerical filtering, or, a character string for filtering the edges returned through string matching.} \item{return_type}{using \code{vector} (the default), a vector of character objects representing the edges is provided. With \code{list} a list object will be provided that contains vectors of outgoing and incoming node IDs associated with edges. With \code{df}, a data frame containing outgoing and incoming node IDs associated with edges.} } \value{ a list, data frame, or a vector object, depending on the value given to \code{return_type}. } \description{ Obtain a vector, data frame, or list of node IDs from a graph object or an edge data frame. An optional filter by edge attribute can limit the set of edges returned. } \examples{ # Create a node data frame (ndf) nodes <- create_nodes( nodes = c("a", "b", "c", "d"), type = "letter", color = c("red", "green", "grey", "blue"), value = c(3.5, 2.6, 9.4, 2.7)) # Create an edge data frame (edf) edges <- create_edges( from = c("a", "b", "c"), to = c("d", "c", "a"), rel = "leading_to", color = c("pink", "blue", "red"), value = c(3.9, 2.5, 7.3)) # Create a graph graph <- create_graph( nodes_df = nodes, edges_df = edges) # Get all edges within a graph, returned as a list get_edges(graph) #> [[1]] #> [1] "a" "b" "c" #> #> [[2]] #> [1] "d" "c" "a" # Get all edges within a graph, returned as a # data frame get_edges(graph, return_type = "df") #> from to #> 1 a d #> 2 b c #> 3 c a # Get all edges within a graph, returned as a vector get_edges(graph, return_type = "vector") #> [1] "a -> d" "b -> c" "c -> a" # Get a vector of edges using a numeric # comparison (i.e., all edges with a `value` # attribute greater than 3) get_edges( graph, edge_attr = "value", match = "> 3", return_type = "vector") #> [1] "a -> d" "c -> a" # Get a vector of edges using a match get_edges( graph, edge_attr = "color", match = "pink", return_type = "vector") #> [1] "a -> d" }
/man/get_edges.Rd
no_license
timelyportfolio/DiagrammeR
R
false
true
2,653
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_edges.R \name{get_edges} \alias{get_edges} \title{Get node IDs associated with edges} \usage{ get_edges(x, edge_attr = NULL, match = NULL, return_type = "vector") } \arguments{ \item{x}{either a graph object of class \code{dgr_graph} that is created using \code{create_graph} or an edge data frame.} \item{edge_attr}{an optional character vector of edge attribute values for filtering the edges returned.} \item{match}{an option to provide a logical expression with a comparison operator (\code{>}, \code{<}, \code{==}, or \code{!=}) followed by a number for numerical filtering, or, a character string for filtering the edges returned through string matching.} \item{return_type}{using \code{vector} (the default), a vector of character objects representing the edges is provided. With \code{list} a list object will be provided that contains vectors of outgoing and incoming node IDs associated with edges. With \code{df}, a data frame containing outgoing and incoming node IDs associated with edges.} } \value{ a list, data frame, or a vector object, depending on the value given to \code{return_type}. } \description{ Obtain a vector, data frame, or list of node IDs from a graph object or an edge data frame. An optional filter by edge attribute can limit the set of edges returned. } \examples{ # Create a node data frame (ndf) nodes <- create_nodes( nodes = c("a", "b", "c", "d"), type = "letter", color = c("red", "green", "grey", "blue"), value = c(3.5, 2.6, 9.4, 2.7)) # Create an edge data frame (edf) edges <- create_edges( from = c("a", "b", "c"), to = c("d", "c", "a"), rel = "leading_to", color = c("pink", "blue", "red"), value = c(3.9, 2.5, 7.3)) # Create a graph graph <- create_graph( nodes_df = nodes, edges_df = edges) # Get all edges within a graph, returned as a list get_edges(graph) #> [[1]] #> [1] "a" "b" "c" #> #> [[2]] #> [1] "d" "c" "a" # Get all edges within a graph, returned as a # data frame get_edges(graph, return_type = "df") #> from to #> 1 a d #> 2 b c #> 3 c a # Get all edges within a graph, returned as a vector get_edges(graph, return_type = "vector") #> [1] "a -> d" "b -> c" "c -> a" # Get a vector of edges using a numeric # comparison (i.e., all edges with a `value` # attribute greater than 3) get_edges( graph, edge_attr = "value", match = "> 3", return_type = "vector") #> [1] "a -> d" "c -> a" # Get a vector of edges using a match get_edges( graph, edge_attr = "color", match = "pink", return_type = "vector") #> [1] "a -> d" }
assert_io_paths <- function(input, input_ext, output_file, output_file_ext) { assert_path_ext(input, input_ext, arg = "input") if (!is.null(output_file)) { assert_path_ext(output_file, output_file_ext, arg = "output_file") } } assert_path_ext <- function(path, expected_ext, arg) { if (!test_path_ext(path, expected_ext)) { expected_ext <- paste0(".", expected_ext, collapse = ", ") stop("`", arg, "` must have extension: ", expected_ext, call. = FALSE) } } test_path_ext <- function(path, expected_ext) { return(tolower(fs::path_ext(path)) %in% expected_ext) } assert_chrome_installed <- function() { assert_chromote() chromePath <- NULL error <- paste0( "This function requires a local installation of the Chrome ", "browser. You can also use other browsers based on Chromium, ", "such as Chromium itself, Edge, Vivaldi, Brave, or Opera.") tryCatch({ chromePath <- chromote::find_chrome() }, error = function(e) { message(error) } ) if (is.null(chromePath)) { stop(error) } } assert_chromote <- function() { if (!requireNamespace("chromote", quietly = TRUE)) { stop("`chromote` is required: remotes::install_github('rstudio/chromote')") } if (utils::packageVersion("chromote") < package_version("0.0.0.9003")) { warning("Please upgrade `chromote` to version 0.0.0.9003 or later.") } } build_paths <- function(input, output_file = NULL) { # Build input paths if (is_url(input)) { input_root <- fs::path_abs(fs::path_file(input)) input_html <- input input_url <- input } else { input_root <- fs::path_abs(input) input_html <- fs::path_ext_set(input_root, "html") input_url <- paste0("file://", input_html) } input_rmd <- fs::path_ext_set(input_root, "rmd") input_pdf <- fs::path_ext_set(input_root, "pdf") # Build output_file paths if (is.null(output_file)) { if (is_url(input)) { output_root <- fs::path_abs(fs::path_file(input)) } else { output_root <- fs::path_abs(input) } } else { output_root <- fs::path_abs(output_file) } output_html <- fs::path_ext_set(output_root, "html") output_pdf <- fs::path_ext_set(output_root, "pdf") output_gif <- fs::path_ext_set(output_root, "gif") output_pptx <- fs::path_ext_set(output_root, "pptx") output_mp4 <- fs::path_ext_set(output_root, "mp4") output_zip <- fs::path_ext_set(output_root, "zip") output_png <- fs::path_ext_set(output_root, "png") output_social <- output_png # Append "_social" to png outputs if (is.null(output_file)) { output_social <- append_to_file_path(output_png, "_social") } # Return path list return(list( input = list( url = input_url, html = input_html, rmd = input_rmd, pdf = input_pdf ), output = list( html = output_html, pdf = output_pdf, gif = output_gif, pptx = output_pptx, mp4 = output_mp4, zip = output_zip, png = output_png, social = output_social ) )) } is_url <- function(input) { return(grepl("^(ht|f)tp", tolower(input))) } append_to_file_path <- function(path, s) { # Appends s to path before the extension, e.g. # path: "file.png" # s: "_social" # returns: "file_social.png" return( fs::path_ext_set( paste0(fs::path_ext_remove(path), s), fs::path_ext(path) ) ) } print_build_status <- function(input, output_file) { input <- fs::path_file(input) output <- fs::path_file(output_file) cli::cli_process_start( paste0("Building ", output, " from ", input), on_exit = "done", .envir = parent.frame(n = 2) ) } pdf_to_pngs <- function(input, density) { return(magick::image_read_pdf(input, density = density)) } build_to_pdf <- function( input, paths, complex_slides, partial_slides, delay ) { if (test_path_ext(input, "rmd")) { build_pdf( input = paths$input$rmd, output_file = paths$output$pdf, complex_slides, partial_slides, delay) } else if (test_path_ext(input, "html")) { build_pdf( input = input, output_file = paths$output$pdf, complex_slides, partial_slides, delay) } }
/R/utils.R
permissive
vsntos/xaringanBuilder
R
false
false
4,475
r
assert_io_paths <- function(input, input_ext, output_file, output_file_ext) { assert_path_ext(input, input_ext, arg = "input") if (!is.null(output_file)) { assert_path_ext(output_file, output_file_ext, arg = "output_file") } } assert_path_ext <- function(path, expected_ext, arg) { if (!test_path_ext(path, expected_ext)) { expected_ext <- paste0(".", expected_ext, collapse = ", ") stop("`", arg, "` must have extension: ", expected_ext, call. = FALSE) } } test_path_ext <- function(path, expected_ext) { return(tolower(fs::path_ext(path)) %in% expected_ext) } assert_chrome_installed <- function() { assert_chromote() chromePath <- NULL error <- paste0( "This function requires a local installation of the Chrome ", "browser. You can also use other browsers based on Chromium, ", "such as Chromium itself, Edge, Vivaldi, Brave, or Opera.") tryCatch({ chromePath <- chromote::find_chrome() }, error = function(e) { message(error) } ) if (is.null(chromePath)) { stop(error) } } assert_chromote <- function() { if (!requireNamespace("chromote", quietly = TRUE)) { stop("`chromote` is required: remotes::install_github('rstudio/chromote')") } if (utils::packageVersion("chromote") < package_version("0.0.0.9003")) { warning("Please upgrade `chromote` to version 0.0.0.9003 or later.") } } build_paths <- function(input, output_file = NULL) { # Build input paths if (is_url(input)) { input_root <- fs::path_abs(fs::path_file(input)) input_html <- input input_url <- input } else { input_root <- fs::path_abs(input) input_html <- fs::path_ext_set(input_root, "html") input_url <- paste0("file://", input_html) } input_rmd <- fs::path_ext_set(input_root, "rmd") input_pdf <- fs::path_ext_set(input_root, "pdf") # Build output_file paths if (is.null(output_file)) { if (is_url(input)) { output_root <- fs::path_abs(fs::path_file(input)) } else { output_root <- fs::path_abs(input) } } else { output_root <- fs::path_abs(output_file) } output_html <- fs::path_ext_set(output_root, "html") output_pdf <- fs::path_ext_set(output_root, "pdf") output_gif <- fs::path_ext_set(output_root, "gif") output_pptx <- fs::path_ext_set(output_root, "pptx") output_mp4 <- fs::path_ext_set(output_root, "mp4") output_zip <- fs::path_ext_set(output_root, "zip") output_png <- fs::path_ext_set(output_root, "png") output_social <- output_png # Append "_social" to png outputs if (is.null(output_file)) { output_social <- append_to_file_path(output_png, "_social") } # Return path list return(list( input = list( url = input_url, html = input_html, rmd = input_rmd, pdf = input_pdf ), output = list( html = output_html, pdf = output_pdf, gif = output_gif, pptx = output_pptx, mp4 = output_mp4, zip = output_zip, png = output_png, social = output_social ) )) } is_url <- function(input) { return(grepl("^(ht|f)tp", tolower(input))) } append_to_file_path <- function(path, s) { # Appends s to path before the extension, e.g. # path: "file.png" # s: "_social" # returns: "file_social.png" return( fs::path_ext_set( paste0(fs::path_ext_remove(path), s), fs::path_ext(path) ) ) } print_build_status <- function(input, output_file) { input <- fs::path_file(input) output <- fs::path_file(output_file) cli::cli_process_start( paste0("Building ", output, " from ", input), on_exit = "done", .envir = parent.frame(n = 2) ) } pdf_to_pngs <- function(input, density) { return(magick::image_read_pdf(input, density = density)) } build_to_pdf <- function( input, paths, complex_slides, partial_slides, delay ) { if (test_path_ext(input, "rmd")) { build_pdf( input = paths$input$rmd, output_file = paths$output$pdf, complex_slides, partial_slides, delay) } else if (test_path_ext(input, "html")) { build_pdf( input = input, output_file = paths$output$pdf, complex_slides, partial_slides, delay) } }
# --------------------------- DRM cohort new 2----------------------------------- # --------------------------- 2017-06-28 --------------------------------------- library(gemini) lib.pa() rm(list = ls()) smh.phar <- readg(smh, phar, dt = T) sbk.phar <- readg(sbk, phar, dt = T) sbk.phar$EncID.new <- as.character(sbk.phar$EncID.new) sbk.phar$ndc_din[!is.na(sbk.phar$ndc_din)&str_detect(sbk.phar$ndc_din, "-")] <- str_split(sbk.phar$ndc_din[!is.na(sbk.phar$ndc_din)&str_detect(sbk.phar$ndc_din, "-")], "-") %>% unlist %>% matrix(ncol = 2, byrow = T) %>% `[`(,1) sbk.phar$ndc_din <- gsub("(?<![0-9])0+", "", sbk.phar$ndc_din, perl = TRUE) uhn.phar <- readg(uhn, phar.nophi, dt = T) uhn.phar$DIN <- gsub("(?<![0-9])0+", "", uhn.phar$DIN, perl = TRUE) drm.din <- readxl::read_excel("H:/GEMINI/Feasibility/DRM/FINALDINLIST.xlsx") drm.din2 <- readxl::read_excel("H:/GEMINI/Feasibility/DRM/FINALDINLIST2.xls") drm.din2$din <- gsub("(?<![0-9])0+", "", drm.din2$din, perl = TRUE) drm.din2 <- drm.din2[!is.na(drm.din2$din),] din.drm <- union(drm.din$`FINAL DINS`, drm.din2$din) smh.generic <- readxl::read_excel("H:/GEMINI/Results/DRM/abx_generic_not_din/SMH_GENERIC_PICKUPS.xlsx")%>% filter(include==1) %>% select(generic_name) uhn.generic <- readxl::read_excel("H:/GEMINI/Results/DRM/abx_generic_not_din/UHNlist_GENERICPICKUPS.xlsx")%>% filter(include==1) %>% select(Generic_Name) # include only those receiving abox within 48 h smh.abx <- smh.phar[din%in%din.drm|generic_name%in%smh.generic$generic_name] sbk.abx <- sbk.phar[(ndc_din%in%din.drm)] uhn.abx <- uhn.phar[(DIN%in%din.drm|toupper(Generic_Name)%in%toupper(uhn.generic$Generic_Name))] # # check how many captured by din and how many be generic name # smh.abx[, ':='(bydin = din%in%din.drm, bygene = generic_name%in%generic)] # sbk.abx[, ':='(bydin = ndc_din%in%din.drm, bygene = generic_name%in%generic)] # uhn.abx[, ':='(bydin = DIN%in%din.drm, bygene = toupper(Generic_Name)%in%generic)] # # checkdin_abx <- function(x){ # table(x[, .(bydin, bygene)]) # } # # checkdin_abx(smh.abx) # checkdin_abx(sbk.abx) # checkdin_abx(uhn.abx) # smh.abx[bygene&!bydin, din] %>% table %>% data.table %>% fwrite("H:/GEMINI/Results/DRM/abx_generic_not_din/smh.csv") # sbk.abx[bygene&!bydin, ndc_din] %>% table %>% data.table %>% fwrite("H:/GEMINI/Results/DRM/abx_generic_not_din/sbk.csv") # uhn.abx[bygene&!bydin, DIN] %>% table %>% data.table %>% fwrite("H:/GEMINI/Results/DRM/abx_generic_not_din/uhn.csv") # # # smh.abx[bygene&!bydin, .(generic_name, din, route, ord_frequency)] %>% unique %>% # fwrite("H:/GEMINI/Results/DRM/abx_generic_not_din/smh.generic.not.din.csv") # sbk.abx[bygene&!bydin, .(generic_name, ord_description, # ndc_din, route, frequency)] %>% unique %>% # fwrite("H:/GEMINI/Results/DRM/abx_generic_not_din/sbk.generic.not.din.csv") # uhn.abx[bygene&!bydin, .(Generic_Name, DIN, Route_Code, Frequency)] %>% unique %>% # fwrite("H:/GEMINI/Results/DRM/abx_generic_not_din/uhn.generic.not.din.csv") # apply(smh.abx, 2, function(x)sum(is.na(x))) # apply(sbk.abx, 2, function(x)sum(is.na(x))) # apply(uhn.abx, 2, function(x)sum(is.na(x))) # # frequency table of frequency # data.table(table(smh.abx[,ord_frequency])) %>% # fwrite("H:/GEMINI/Results/DRM/smh.abx.freq.csv") # data.table(table(sbk.abx[,frequency])) %>% # fwrite("H:/GEMINI/Results/DRM/sbk.abx.freq.csv") # data.table(table(uhn.abx[,Frequency])) %>% # fwrite("H:/GEMINI/Results/DRM/uhn.abx.freq.csv") abx.inc <- rbind(smh.abx[,.(abx.dttm = ymd_hm(paste(start_date, start_time)), abx.stop.dttm = ymd_hm(paste(stop_date, stop_time)), adm.dttm = ymd_hm(paste(Admit.Date, Admit.Time)), dis.dttm = ymd_hm(paste(Discharge.Date, Discharge.Time)), din, generic.name = generic_name, EncID.new)], sbk.abx[,.(abx.dttm = mdy_hms(paste(start_date, start_time)), abx.stop.dttm = mdy_hms(paste(stop_date, stop_time)), adm.dttm = ymd_hm(paste(Admit.Date, Admit.Time)), dis.dttm = ymd_hm(paste(Discharge.Date, Discharge.Time)), din = ndc_din, generic.name = generic_name, EncID.new)], uhn.abx[,.(abx.dttm = dmy_hm(paste(str_sub(Order_Sta, 1, 10), Order_Start_Time)), abx.stop.dttm = dmy_hm(paste(Order_Sto, Order_Stop_Time)), adm.dttm = ymd_hm(paste(Admit.Date, Admit.Time)), dis.dttm = ymd_hm(paste(Discharge.Date, Discharge.Time)), din = DIN, generic.name = toupper(Generic_Name), EncID.new)]) abx.inc[, .N, by = .(din, generic.name)][order(generic.name)] %>% fwrite("H:/GEMINI/Results/DRM/cohort_new/medication.frequency.csv") abx.inc[is.na(abx.stop.dttm), abx.stop.dttm := dis.dttm] apply(abx.inc, 2, function(x)sum(is.na(x))) abx.d1 <- abx.inc[date(abx.dttm)<=date(adm.dttm)&date(abx.stop.dttm)>=date(adm.dttm)] abx.d2 <- abx.inc[date(abx.dttm)<=date(adm.dttm)+days(1)&date(abx.stop.dttm)>=date(adm.dttm)+days(1)] abx.d3 <- abx.inc[date(abx.dttm)<=date(adm.dttm)+days(2)&date(abx.stop.dttm)>=date(adm.dttm)+days(2)] abx <- data.table(EncID.new = unique(abx.inc$EncID.new)) abx[, ':='(d1 = EncID.new%in%abx.d1$EncID.new, d2 = EncID.new%in%abx.d2$EncID.new, d3 = EncID.new%in%abx.d3$EncID.new)] abx[, n.abx := d1 + d2 + d3] table(abx$n.abx, useNA = "ifany") fwrite(abx, "H:/GEMINI/Results/DRM/new_170424/abx.new.csv") # --------------------------- culture ------------------------------------------ library(gemini) lib.pa() rm(list = ls()) smh.mic <- readg(smh, micro, dt = T) smh.mic <- smh.mic[!Org_Name%in%c("Coagulase negative Staphylococcus species", "Methicillin Resistant Staphylococcus aureus")] smh.mic[, paste := paste(Test_ID, Source)] culture.marked <- readxl::read_excel("H:/GEMINI/Feasibility/DRM/CultureClassification_Feb9.xlsx", sheet = 1) %>% data.table culture.marked[,paste := paste(Test_ID, Source)] names(culture.marked)[6] <- "Urine" names(smh.mic)[3] <- "Specimen_Collection_Date/Time" cul.ns.smh <- smh.mic[paste%in%culture.marked[NonScreening==1, paste]& !is.na(Isolate_num)] smh.mic[is.na(Isolate_num)] %>% dim cul.ns.smh <- cul.ns.smh[mdy_hm(`Specimen_Collection_Date/Time`)>=ymd_hm(paste(Admit.Date, Admit.Time))& mdy_hm(`Specimen_Collection_Date/Time`)<=(ymd_hm(paste(Admit.Date, Admit.Time))+hours(48))] smh.drm.cul <- merge(cul.ns.smh, culture.marked[,.(paste, Urine , Blood, Resp, Screening, NonScreening, NonBacterial, unknown)], by = "paste", all.x = T) smh.drm.cul <- smh.drm.cul[,.(EncID.new, Urine , Blood, Resp, Screening, NonScreening, NonBacterial)] smh.mic[!is.na(Isolate_num)] -> pos length(unique(pos$Order_Number))/length(unique(smh.mic$Order_Number)) length(pos$Order_Number)/length(smh.mic$Order_Number) #sbk culture.marked <- readxl::read_excel("H:/GEMINI/Feasibility/DRM/CultureClassification_Feb9.xlsx", sheet = 2)%>% data.table names(culture.marked)[5] <- "Urine" culture.marked[, paste := paste(culture_test_cd, description, specimen_source)] cul.ns <- culture.marked[NonScreening==1] sbk.mic <- readg(sbk, micro_pos.csv, dt = T) sbk.mic <- sbk.mic[!Organism%in%c("Staphylococcus capitis", "Staphylococcus epidermidis", "Staphylococcus schleiferi", "Staphylococcus species", "Coagulase negative staphylococci")] 12315/(115887+12315 ) sbk.mic[, paste:= paste(culture_test_cd, description, specimen_source)] cul.ns.sbk <- sbk.mic[paste%in%cul.ns$paste& mdy_hm(specimen_collection_datetime)>=ymd_hm(paste(Admit.Date, Admit.Time))& mdy_hm(specimen_collection_datetime)<=(ymd_hm(paste(Admit.Date, Admit.Time))+hours(48))] sbk.drm.cul <- merge(cul.ns.sbk, culture.marked[,.(paste, Urine, Blood, Resp, Screening, NonScreening, NonBacterial)], by = "paste", all.x = T) sbk.drm.cul <- sbk.drm.cul[,.(EncID.new, Urine, Blood, Resp, Screening, NonScreening, NonBacterial)] # UHN # Check the concordance bewteen the two marked files uhn.marked <- readxl::read_excel("H:/GEMINI/Feasibility/DRM/UHN_Source Setup.xls", sheet = 1)%>%data.table names(uhn.marked)[3] <- "Urine" uhn.micro <- rbind(readg(uhn.tgh, micro), readg(uhn.twh, micro), fill = T) uhn.micro <- uhn.micro[!ORG%in%c("Coagulase negative Staphylococcus", "Coagulase negative staphylococcus (not S. lugdunensis)", "Staphylococcus saprophyticus")] cul.ns.uhn <- uhn.micro[SRC%in%uhn.marked[NonScreening==1, SOURCE_ID]| (SRC=="NONE"&TEST%in%c("BC", "UCS")), ] cul.ns.uhn <- merge(cul.ns.uhn[,.(EncID.new, TEST, SRC)], uhn.marked[NonScreening==1, .(SOURCE_ID, Urine, Blood, Resp, Screening, NonScreening, NonBacterial)], by.x = "SRC", by.y = "SOURCE_ID", all.x = T, all.y = F) cul.ns.uhn[SRC=="NONE"&TEST=="BC", ':='(Blood =1, NonScreening=1)] cul.ns.uhn[SRC=="NONE"&TEST=="UCS", ':='(Urine = 1, NonScreening=1)] drm.cul <- rbind(smh.drm.cul, sbk.drm.cul, cul.ns.uhn[,.(EncID.new, Urine, Blood, Resp, Screening, NonScreening, NonBacterial)]) fwrite(drm.cul, "H:/GEMINI/Results/DRM/new_170424/drm.cul.ns.new.csv") # ------------------------------ feasibility ----------------------------------- rm(list = ls()) antibio.inc <- fread("H:/GEMINI/Results/DRM/new_170424/abx.new.csv") ns.cul <- fread("H:/GEMINI/Results/DRM/new_170424/drm.cul.ns.new.csv") drm.cohort <- intersect(antibio.inc[n.abx>=2, EncID.new], ns.cul$EncID.new) all.dad <- fread("H:/GEMINI/Results/DesignPaper/design.paper.dad.v4.csv") # ---------- Sample for Chart review --------------- # smh.drm <- all.dad[EncID.new%in%drm.cohort&Institution.Number=="SMH", # .(EncID.new, Admit.Date, Admit.Time, # Discharge.Date, Discharge.Time)] # smh.drm <- add_smh_mrn(smh.drm) # fwrite(smh.drm, "H:/GEMINI/Results/DRM/chart_review/DRM.smh.csv") # # # all.dad[EncID.new%in%antibio.inc[n.abx>=3, EncID.new], # Institution.Number] %>% table # all.dad[EncID.new%in%ns.cul$EncID.new, Institution.Number] %>% table # # drm.cohort <- drm.cohort[!drm.cohort%in%all.dad[Discharge.Disposition=="7"&LoS<=3, EncID.new]] # ---------------------- CREATE LIST FOR CHART REVIEW -------------------------- drm.cohort <- all.dad[EncID.new%in%drm.cohort] table(drm.cohort$Institution.Number) for(i in unique(drm.cohort$Institution.Number)){ dat <- drm.cohort[Institution.Number==i, .(EncID.new, Admit.Date, Admit.Time, Discharge.Date, Discharge.Time)] if(i =="SMH"){ link <- fread("R:/GEMINI/_RESTORE/SMH/CIHI/SMH.LINKLIST_NEWHASH.csv") link$EncID.new <- paste("11", link$EncID.new, sep = "") dat$EncID.new <- as.character(dat$EncID.new) dat <- merge(dat, link[,.(MRN, FIRSTNAME, LASTNAME,EncID.new)], by = "EncID.new", all.x = T, all.y = F) dat <- dat[,.(MRN, EncID.new, First.Name = FIRSTNAME, Last.Name = LASTNAME, Admit.Date, Admit.Time, Discharge.Date, Discharge.Time)] } fwrite(dat, paste("R:/GEMINI-DRM-TEAM/DRM-TEAM/cohort_", i, ".csv", sep = "")) }
/DRM/drm.cohort.new2.r
no_license
yishan-guo/gemini
R
false
false
11,971
r
# --------------------------- DRM cohort new 2----------------------------------- # --------------------------- 2017-06-28 --------------------------------------- library(gemini) lib.pa() rm(list = ls()) smh.phar <- readg(smh, phar, dt = T) sbk.phar <- readg(sbk, phar, dt = T) sbk.phar$EncID.new <- as.character(sbk.phar$EncID.new) sbk.phar$ndc_din[!is.na(sbk.phar$ndc_din)&str_detect(sbk.phar$ndc_din, "-")] <- str_split(sbk.phar$ndc_din[!is.na(sbk.phar$ndc_din)&str_detect(sbk.phar$ndc_din, "-")], "-") %>% unlist %>% matrix(ncol = 2, byrow = T) %>% `[`(,1) sbk.phar$ndc_din <- gsub("(?<![0-9])0+", "", sbk.phar$ndc_din, perl = TRUE) uhn.phar <- readg(uhn, phar.nophi, dt = T) uhn.phar$DIN <- gsub("(?<![0-9])0+", "", uhn.phar$DIN, perl = TRUE) drm.din <- readxl::read_excel("H:/GEMINI/Feasibility/DRM/FINALDINLIST.xlsx") drm.din2 <- readxl::read_excel("H:/GEMINI/Feasibility/DRM/FINALDINLIST2.xls") drm.din2$din <- gsub("(?<![0-9])0+", "", drm.din2$din, perl = TRUE) drm.din2 <- drm.din2[!is.na(drm.din2$din),] din.drm <- union(drm.din$`FINAL DINS`, drm.din2$din) smh.generic <- readxl::read_excel("H:/GEMINI/Results/DRM/abx_generic_not_din/SMH_GENERIC_PICKUPS.xlsx")%>% filter(include==1) %>% select(generic_name) uhn.generic <- readxl::read_excel("H:/GEMINI/Results/DRM/abx_generic_not_din/UHNlist_GENERICPICKUPS.xlsx")%>% filter(include==1) %>% select(Generic_Name) # include only those receiving abox within 48 h smh.abx <- smh.phar[din%in%din.drm|generic_name%in%smh.generic$generic_name] sbk.abx <- sbk.phar[(ndc_din%in%din.drm)] uhn.abx <- uhn.phar[(DIN%in%din.drm|toupper(Generic_Name)%in%toupper(uhn.generic$Generic_Name))] # # check how many captured by din and how many be generic name # smh.abx[, ':='(bydin = din%in%din.drm, bygene = generic_name%in%generic)] # sbk.abx[, ':='(bydin = ndc_din%in%din.drm, bygene = generic_name%in%generic)] # uhn.abx[, ':='(bydin = DIN%in%din.drm, bygene = toupper(Generic_Name)%in%generic)] # # checkdin_abx <- function(x){ # table(x[, .(bydin, bygene)]) # } # # checkdin_abx(smh.abx) # checkdin_abx(sbk.abx) # checkdin_abx(uhn.abx) # smh.abx[bygene&!bydin, din] %>% table %>% data.table %>% fwrite("H:/GEMINI/Results/DRM/abx_generic_not_din/smh.csv") # sbk.abx[bygene&!bydin, ndc_din] %>% table %>% data.table %>% fwrite("H:/GEMINI/Results/DRM/abx_generic_not_din/sbk.csv") # uhn.abx[bygene&!bydin, DIN] %>% table %>% data.table %>% fwrite("H:/GEMINI/Results/DRM/abx_generic_not_din/uhn.csv") # # # smh.abx[bygene&!bydin, .(generic_name, din, route, ord_frequency)] %>% unique %>% # fwrite("H:/GEMINI/Results/DRM/abx_generic_not_din/smh.generic.not.din.csv") # sbk.abx[bygene&!bydin, .(generic_name, ord_description, # ndc_din, route, frequency)] %>% unique %>% # fwrite("H:/GEMINI/Results/DRM/abx_generic_not_din/sbk.generic.not.din.csv") # uhn.abx[bygene&!bydin, .(Generic_Name, DIN, Route_Code, Frequency)] %>% unique %>% # fwrite("H:/GEMINI/Results/DRM/abx_generic_not_din/uhn.generic.not.din.csv") # apply(smh.abx, 2, function(x)sum(is.na(x))) # apply(sbk.abx, 2, function(x)sum(is.na(x))) # apply(uhn.abx, 2, function(x)sum(is.na(x))) # # frequency table of frequency # data.table(table(smh.abx[,ord_frequency])) %>% # fwrite("H:/GEMINI/Results/DRM/smh.abx.freq.csv") # data.table(table(sbk.abx[,frequency])) %>% # fwrite("H:/GEMINI/Results/DRM/sbk.abx.freq.csv") # data.table(table(uhn.abx[,Frequency])) %>% # fwrite("H:/GEMINI/Results/DRM/uhn.abx.freq.csv") abx.inc <- rbind(smh.abx[,.(abx.dttm = ymd_hm(paste(start_date, start_time)), abx.stop.dttm = ymd_hm(paste(stop_date, stop_time)), adm.dttm = ymd_hm(paste(Admit.Date, Admit.Time)), dis.dttm = ymd_hm(paste(Discharge.Date, Discharge.Time)), din, generic.name = generic_name, EncID.new)], sbk.abx[,.(abx.dttm = mdy_hms(paste(start_date, start_time)), abx.stop.dttm = mdy_hms(paste(stop_date, stop_time)), adm.dttm = ymd_hm(paste(Admit.Date, Admit.Time)), dis.dttm = ymd_hm(paste(Discharge.Date, Discharge.Time)), din = ndc_din, generic.name = generic_name, EncID.new)], uhn.abx[,.(abx.dttm = dmy_hm(paste(str_sub(Order_Sta, 1, 10), Order_Start_Time)), abx.stop.dttm = dmy_hm(paste(Order_Sto, Order_Stop_Time)), adm.dttm = ymd_hm(paste(Admit.Date, Admit.Time)), dis.dttm = ymd_hm(paste(Discharge.Date, Discharge.Time)), din = DIN, generic.name = toupper(Generic_Name), EncID.new)]) abx.inc[, .N, by = .(din, generic.name)][order(generic.name)] %>% fwrite("H:/GEMINI/Results/DRM/cohort_new/medication.frequency.csv") abx.inc[is.na(abx.stop.dttm), abx.stop.dttm := dis.dttm] apply(abx.inc, 2, function(x)sum(is.na(x))) abx.d1 <- abx.inc[date(abx.dttm)<=date(adm.dttm)&date(abx.stop.dttm)>=date(adm.dttm)] abx.d2 <- abx.inc[date(abx.dttm)<=date(adm.dttm)+days(1)&date(abx.stop.dttm)>=date(adm.dttm)+days(1)] abx.d3 <- abx.inc[date(abx.dttm)<=date(adm.dttm)+days(2)&date(abx.stop.dttm)>=date(adm.dttm)+days(2)] abx <- data.table(EncID.new = unique(abx.inc$EncID.new)) abx[, ':='(d1 = EncID.new%in%abx.d1$EncID.new, d2 = EncID.new%in%abx.d2$EncID.new, d3 = EncID.new%in%abx.d3$EncID.new)] abx[, n.abx := d1 + d2 + d3] table(abx$n.abx, useNA = "ifany") fwrite(abx, "H:/GEMINI/Results/DRM/new_170424/abx.new.csv") # --------------------------- culture ------------------------------------------ library(gemini) lib.pa() rm(list = ls()) smh.mic <- readg(smh, micro, dt = T) smh.mic <- smh.mic[!Org_Name%in%c("Coagulase negative Staphylococcus species", "Methicillin Resistant Staphylococcus aureus")] smh.mic[, paste := paste(Test_ID, Source)] culture.marked <- readxl::read_excel("H:/GEMINI/Feasibility/DRM/CultureClassification_Feb9.xlsx", sheet = 1) %>% data.table culture.marked[,paste := paste(Test_ID, Source)] names(culture.marked)[6] <- "Urine" names(smh.mic)[3] <- "Specimen_Collection_Date/Time" cul.ns.smh <- smh.mic[paste%in%culture.marked[NonScreening==1, paste]& !is.na(Isolate_num)] smh.mic[is.na(Isolate_num)] %>% dim cul.ns.smh <- cul.ns.smh[mdy_hm(`Specimen_Collection_Date/Time`)>=ymd_hm(paste(Admit.Date, Admit.Time))& mdy_hm(`Specimen_Collection_Date/Time`)<=(ymd_hm(paste(Admit.Date, Admit.Time))+hours(48))] smh.drm.cul <- merge(cul.ns.smh, culture.marked[,.(paste, Urine , Blood, Resp, Screening, NonScreening, NonBacterial, unknown)], by = "paste", all.x = T) smh.drm.cul <- smh.drm.cul[,.(EncID.new, Urine , Blood, Resp, Screening, NonScreening, NonBacterial)] smh.mic[!is.na(Isolate_num)] -> pos length(unique(pos$Order_Number))/length(unique(smh.mic$Order_Number)) length(pos$Order_Number)/length(smh.mic$Order_Number) #sbk culture.marked <- readxl::read_excel("H:/GEMINI/Feasibility/DRM/CultureClassification_Feb9.xlsx", sheet = 2)%>% data.table names(culture.marked)[5] <- "Urine" culture.marked[, paste := paste(culture_test_cd, description, specimen_source)] cul.ns <- culture.marked[NonScreening==1] sbk.mic <- readg(sbk, micro_pos.csv, dt = T) sbk.mic <- sbk.mic[!Organism%in%c("Staphylococcus capitis", "Staphylococcus epidermidis", "Staphylococcus schleiferi", "Staphylococcus species", "Coagulase negative staphylococci")] 12315/(115887+12315 ) sbk.mic[, paste:= paste(culture_test_cd, description, specimen_source)] cul.ns.sbk <- sbk.mic[paste%in%cul.ns$paste& mdy_hm(specimen_collection_datetime)>=ymd_hm(paste(Admit.Date, Admit.Time))& mdy_hm(specimen_collection_datetime)<=(ymd_hm(paste(Admit.Date, Admit.Time))+hours(48))] sbk.drm.cul <- merge(cul.ns.sbk, culture.marked[,.(paste, Urine, Blood, Resp, Screening, NonScreening, NonBacterial)], by = "paste", all.x = T) sbk.drm.cul <- sbk.drm.cul[,.(EncID.new, Urine, Blood, Resp, Screening, NonScreening, NonBacterial)] # UHN # Check the concordance bewteen the two marked files uhn.marked <- readxl::read_excel("H:/GEMINI/Feasibility/DRM/UHN_Source Setup.xls", sheet = 1)%>%data.table names(uhn.marked)[3] <- "Urine" uhn.micro <- rbind(readg(uhn.tgh, micro), readg(uhn.twh, micro), fill = T) uhn.micro <- uhn.micro[!ORG%in%c("Coagulase negative Staphylococcus", "Coagulase negative staphylococcus (not S. lugdunensis)", "Staphylococcus saprophyticus")] cul.ns.uhn <- uhn.micro[SRC%in%uhn.marked[NonScreening==1, SOURCE_ID]| (SRC=="NONE"&TEST%in%c("BC", "UCS")), ] cul.ns.uhn <- merge(cul.ns.uhn[,.(EncID.new, TEST, SRC)], uhn.marked[NonScreening==1, .(SOURCE_ID, Urine, Blood, Resp, Screening, NonScreening, NonBacterial)], by.x = "SRC", by.y = "SOURCE_ID", all.x = T, all.y = F) cul.ns.uhn[SRC=="NONE"&TEST=="BC", ':='(Blood =1, NonScreening=1)] cul.ns.uhn[SRC=="NONE"&TEST=="UCS", ':='(Urine = 1, NonScreening=1)] drm.cul <- rbind(smh.drm.cul, sbk.drm.cul, cul.ns.uhn[,.(EncID.new, Urine, Blood, Resp, Screening, NonScreening, NonBacterial)]) fwrite(drm.cul, "H:/GEMINI/Results/DRM/new_170424/drm.cul.ns.new.csv") # ------------------------------ feasibility ----------------------------------- rm(list = ls()) antibio.inc <- fread("H:/GEMINI/Results/DRM/new_170424/abx.new.csv") ns.cul <- fread("H:/GEMINI/Results/DRM/new_170424/drm.cul.ns.new.csv") drm.cohort <- intersect(antibio.inc[n.abx>=2, EncID.new], ns.cul$EncID.new) all.dad <- fread("H:/GEMINI/Results/DesignPaper/design.paper.dad.v4.csv") # ---------- Sample for Chart review --------------- # smh.drm <- all.dad[EncID.new%in%drm.cohort&Institution.Number=="SMH", # .(EncID.new, Admit.Date, Admit.Time, # Discharge.Date, Discharge.Time)] # smh.drm <- add_smh_mrn(smh.drm) # fwrite(smh.drm, "H:/GEMINI/Results/DRM/chart_review/DRM.smh.csv") # # # all.dad[EncID.new%in%antibio.inc[n.abx>=3, EncID.new], # Institution.Number] %>% table # all.dad[EncID.new%in%ns.cul$EncID.new, Institution.Number] %>% table # # drm.cohort <- drm.cohort[!drm.cohort%in%all.dad[Discharge.Disposition=="7"&LoS<=3, EncID.new]] # ---------------------- CREATE LIST FOR CHART REVIEW -------------------------- drm.cohort <- all.dad[EncID.new%in%drm.cohort] table(drm.cohort$Institution.Number) for(i in unique(drm.cohort$Institution.Number)){ dat <- drm.cohort[Institution.Number==i, .(EncID.new, Admit.Date, Admit.Time, Discharge.Date, Discharge.Time)] if(i =="SMH"){ link <- fread("R:/GEMINI/_RESTORE/SMH/CIHI/SMH.LINKLIST_NEWHASH.csv") link$EncID.new <- paste("11", link$EncID.new, sep = "") dat$EncID.new <- as.character(dat$EncID.new) dat <- merge(dat, link[,.(MRN, FIRSTNAME, LASTNAME,EncID.new)], by = "EncID.new", all.x = T, all.y = F) dat <- dat[,.(MRN, EncID.new, First.Name = FIRSTNAME, Last.Name = LASTNAME, Admit.Date, Admit.Time, Discharge.Date, Discharge.Time)] } fwrite(dat, paste("R:/GEMINI-DRM-TEAM/DRM-TEAM/cohort_", i, ".csv", sep = "")) }
# Data Analysis | Reporting Practices # Import data: myData <- read.csv("AllData.csv", header = TRUE) head(myData) ### Correlations: correlationMatrix <- round(cor(myData), digits = 2) # Bias.Strength & Belief-Mean.MSE = 0.43 correlationMatrix[4, 12] # Hyperbole & Belief-Mean.MSE = 0.33 correlationMatrix[5, 12] # Extermity Bias & Belief-Mean.MSE = 0.37 correlationMatrix[6, 12] # Fair+Balanced & Belief-Mean.MSE = 0.09 correlationMatrix[7, 12] # Bias.Strength & Belief.Var.Error = 0.43 correlationMatrix[4, 10] # Hyperbole & Belief.Var.Error = 0.33 correlationMatrix[5, 10] # Extermity Bias & Belief.Var.Error = 0.37 correlationMatrix[6, 10] # Fair+Balanced & Belief.Var.Error = 0.09 correlationMatrix[7, 10] colnames(myData) ### Conditional Correlation (Bias.Strength = 1): conditionalcorrelationMatrix1 <- round(cor(myData[which(myData$Bias.Strength == 1),]), digits = 2) # Bias.Strength & Belief-Mean.MSE = 0.43 conditionalcorrelationMatrix1[4, 12] # Hyperbole & Belief-Mean.MSE = 0.33 conditionalcorrelationMatrix1[5, 12] # Extermity Bias & Belief-Mean.MSE = 0.37 conditionalcorrelationMatrix1[6, 12] # Fair+Balanced & Belief-Mean.MSE = 0.09 conditionalcorrelationMatrix1[7, 12] # Bias.Strength & Belief.Var.Error = 0.43 conditionalcorrelationMatrix1[4, 10] # Hyperbole & Belief.Var.Error = 0.33 conditionalcorrelationMatrix1[5, 10] # Extermity Bias & Belief.Var.Error = 0.37 conditionalcorrelationMatrix1[6, 10] # Fair+Balanced & Belief.Var.Error = 0.09 conditionalcorrelationMatrix1[7, 10] ### Conditional Correlation (Bias.Strength = 0.1): conditionalcorrelationMatrix0 <- round(cor(myData[which(myData$Bias.Strength == 0.1),]), digits = 2) # Bias.Strength & Belief-Mean.MSE = 0.43 conditionalcorrelationMatrix0[4, 12] # Hyperbole & Belief-Mean.MSE = 0.33 conditionalcorrelationMatrix0[5, 12] # Extermity Bias & Belief-Mean.MSE = 0.37 conditionalcorrelationMatrix0[6, 12] # Fair+Balanced & Belief-Mean.MSE = 0.09 conditionalcorrelationMatrix0[7, 12] # Bias.Strength & Belief.Var.Error = 0.43 conditionalcorrelationMatrix0[4, 10] # Hyperbole & Belief.Var.Error = 0.33 conditionalcorrelationMatrix0[5, 10] # Extermity Bias & Belief.Var.Error = 0.37 conditionalcorrelationMatrix0[6, 10] # Fair+Balanced & Belief.Var.Error = 0.09 conditionalcorrelationMatrix0[7, 10]
/resources/Data Analysis + Reporting Practices.R
permissive
amohseni/Reporting-and-Bias-Gamma
R
false
false
2,284
r
# Data Analysis | Reporting Practices # Import data: myData <- read.csv("AllData.csv", header = TRUE) head(myData) ### Correlations: correlationMatrix <- round(cor(myData), digits = 2) # Bias.Strength & Belief-Mean.MSE = 0.43 correlationMatrix[4, 12] # Hyperbole & Belief-Mean.MSE = 0.33 correlationMatrix[5, 12] # Extermity Bias & Belief-Mean.MSE = 0.37 correlationMatrix[6, 12] # Fair+Balanced & Belief-Mean.MSE = 0.09 correlationMatrix[7, 12] # Bias.Strength & Belief.Var.Error = 0.43 correlationMatrix[4, 10] # Hyperbole & Belief.Var.Error = 0.33 correlationMatrix[5, 10] # Extermity Bias & Belief.Var.Error = 0.37 correlationMatrix[6, 10] # Fair+Balanced & Belief.Var.Error = 0.09 correlationMatrix[7, 10] colnames(myData) ### Conditional Correlation (Bias.Strength = 1): conditionalcorrelationMatrix1 <- round(cor(myData[which(myData$Bias.Strength == 1),]), digits = 2) # Bias.Strength & Belief-Mean.MSE = 0.43 conditionalcorrelationMatrix1[4, 12] # Hyperbole & Belief-Mean.MSE = 0.33 conditionalcorrelationMatrix1[5, 12] # Extermity Bias & Belief-Mean.MSE = 0.37 conditionalcorrelationMatrix1[6, 12] # Fair+Balanced & Belief-Mean.MSE = 0.09 conditionalcorrelationMatrix1[7, 12] # Bias.Strength & Belief.Var.Error = 0.43 conditionalcorrelationMatrix1[4, 10] # Hyperbole & Belief.Var.Error = 0.33 conditionalcorrelationMatrix1[5, 10] # Extermity Bias & Belief.Var.Error = 0.37 conditionalcorrelationMatrix1[6, 10] # Fair+Balanced & Belief.Var.Error = 0.09 conditionalcorrelationMatrix1[7, 10] ### Conditional Correlation (Bias.Strength = 0.1): conditionalcorrelationMatrix0 <- round(cor(myData[which(myData$Bias.Strength == 0.1),]), digits = 2) # Bias.Strength & Belief-Mean.MSE = 0.43 conditionalcorrelationMatrix0[4, 12] # Hyperbole & Belief-Mean.MSE = 0.33 conditionalcorrelationMatrix0[5, 12] # Extermity Bias & Belief-Mean.MSE = 0.37 conditionalcorrelationMatrix0[6, 12] # Fair+Balanced & Belief-Mean.MSE = 0.09 conditionalcorrelationMatrix0[7, 12] # Bias.Strength & Belief.Var.Error = 0.43 conditionalcorrelationMatrix0[4, 10] # Hyperbole & Belief.Var.Error = 0.33 conditionalcorrelationMatrix0[5, 10] # Extermity Bias & Belief.Var.Error = 0.37 conditionalcorrelationMatrix0[6, 10] # Fair+Balanced & Belief.Var.Error = 0.09 conditionalcorrelationMatrix0[7, 10]
require(car) require(ggplot2) require(qcc) require(gdata) require(readxl) require(reshape2) dadosentry <- read.table("dadoscontrol.txt", h=T) dadosentry entrada1 <- read_excel("Procedimentos_2017-2018.xls",sheet=3, na = "NA",skip = 1) summary(entrada1) transpose1 <- melt(entrada1, value.name = "Qtd. Util.", variable.name = c("Meses")) entrada2 <- read_excel("Procedimentos_2017-2018.xls",sheet=4, na = "NA",skip = 1) summary(entrada2) transpose2 <- melt(entrada2, value.name = "Qtd. Benef.", variable.name = c("Meses")) entrada3 <- read_excel("Procedimentos_2017-2018.xls",sheet=5, na = "NA",skip = 1) summary(entrada3) transpose3 <- melt(entrada3, value.name = "Valor Proced.", variable.name = c("Meses")) uniao1 = merge(x=transpose1, y=transpose2, by = c("Cod. Procedimento", "Meses")) uniaofinal = merge(x=uniao1, y=transpose3, by = c("Cod. Procedimento", "Meses")) dataframe <- as.data.frame(cod = uniaofinal$`Cod. Procedimento`, meses = uniaofinal$Meses, valor = uniaofinal$`Valor Proced.`, limiteinf = uniaofinal$limits[[1]], limitesup = uniaofinal$limits[[2]]) require(qcc) require(dplyr) require(broom) ze = uniaofinal %>% group_by("Cod. Procedimento") %>% data.frame(qcc(.$`Valor Proced.`, type="xbar.one", confidence.level = 0.90, plot = F))
/Rotina Procedimentos.R
no_license
maguiiiar/projetos_unimed
R
false
false
1,271
r
require(car) require(ggplot2) require(qcc) require(gdata) require(readxl) require(reshape2) dadosentry <- read.table("dadoscontrol.txt", h=T) dadosentry entrada1 <- read_excel("Procedimentos_2017-2018.xls",sheet=3, na = "NA",skip = 1) summary(entrada1) transpose1 <- melt(entrada1, value.name = "Qtd. Util.", variable.name = c("Meses")) entrada2 <- read_excel("Procedimentos_2017-2018.xls",sheet=4, na = "NA",skip = 1) summary(entrada2) transpose2 <- melt(entrada2, value.name = "Qtd. Benef.", variable.name = c("Meses")) entrada3 <- read_excel("Procedimentos_2017-2018.xls",sheet=5, na = "NA",skip = 1) summary(entrada3) transpose3 <- melt(entrada3, value.name = "Valor Proced.", variable.name = c("Meses")) uniao1 = merge(x=transpose1, y=transpose2, by = c("Cod. Procedimento", "Meses")) uniaofinal = merge(x=uniao1, y=transpose3, by = c("Cod. Procedimento", "Meses")) dataframe <- as.data.frame(cod = uniaofinal$`Cod. Procedimento`, meses = uniaofinal$Meses, valor = uniaofinal$`Valor Proced.`, limiteinf = uniaofinal$limits[[1]], limitesup = uniaofinal$limits[[2]]) require(qcc) require(dplyr) require(broom) ze = uniaofinal %>% group_by("Cod. Procedimento") %>% data.frame(qcc(.$`Valor Proced.`, type="xbar.one", confidence.level = 0.90, plot = F))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/day05.R \name{day05_decodeop} \alias{day05_decodeop} \title{Decode operation} \usage{ day05_decodeop(opcode) } \arguments{ \item{opcode}{two digit operation code} } \description{ Decode operation }
/man/day05_decodeop.Rd
permissive
EvgenyPetrovsky/aoc2019
R
false
true
276
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/day05.R \name{day05_decodeop} \alias{day05_decodeop} \title{Decode operation} \usage{ day05_decodeop(opcode) } \arguments{ \item{opcode}{two digit operation code} } \description{ Decode operation }
require(shiny) require(shinythemes) # Define input widget for lognormal distribution logN.input <- function(title, suffix, E.value, cv.value){ wellPanel( style = "padding: 5px;", h5(title), sliderInput(paste0("E_", suffix), "Mean catch:", min = 0, max = 2, step = 0.05, value = E.value, ticks = FALSE), sliderInput(paste0("cv_", suffix), "Coef. variation (%):", min = 0, max = 100, step = 5, value = cv.value, ticks = FALSE) ) } # Define input widget for beta distribution beta.input <- function(title, suffix, p.value, cv.value){ wellPanel( style = "padding: 5px;", h5(title), sliderInput(paste0("p_", suffix), "Expected probability:", min = 0, max = 1, step = 0.05, value = p.value, ticks = FALSE), sliderInput(paste0("cv_", suffix), "Coef. variation (%):", min = 0, max = 100, step = 5, value = cv.value, ticks = FALSE) ) } # Define input widget for management options mng_input <- function(id, title){ # wellPanel( #style = "padding: 5px;", checkboxGroupInput(id, label = h4(title), choices = list("Ban Shark lines" = "NoShkln", "Ban wire trace" = "NoWire", "Ban shallow hooks" = "NoShallow", "Restrict to Circle-hooks only" = "AllCircle"), selected = NULL, inline = FALSE) # ) } # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ shinyUI( navbarPage("Impact of longlining in sharks: simulation of mitigation measures", theme = shinytheme("flatly"), # 1st Tab ------------------------------------------------------------------ tabPanel("Step 1: Choose species & input distributions", tags$style(type="text/css", "label {font-size: 13px;}", ".recalculating {opacity: 1.0;}" ), tags$head( tags$link(rel = "stylesheet", type = "text/css", href = "slidebarColours.css") ), sidebarLayout( sidebarPanel(width = 3, selectInput("spp", label = h3("Choose species"), choices = list("Oceanic whitetip shark", "Silky shark"), selected = "Oceanic whitetip shark"), em("NOTE: Hyperparameter's default values as specified in ", a("Shelton et al. (2015)", href = "https://dl.dropboxusercontent.com/u/250971/EB-WP-02-%5BMC_sharks%5D.pdf")) ), mainPanel( br(), h3("Specify the input distributions"), br(), p("<Some text here (explaining the two components of the simulation model?)>"), br(), tabsetPanel( tabPanel(h4("Catch Component"), h4("Catch Rate per 100 hooks in:"), br(), fluidRow( column(5, fluidRow( column(6, logN.input("Shark lines", "shkln", E.value = 0.620, cv.value = 5)), column(6, logN.input("Shallow Hooks", "shll", E.value = 0.008, cv.value = 4)) ), fluidRow( column(6, logN.input("Deep Hooks", "deep", E.value = 0.016, cv.value = 3)) )), br(), column(7, plotOutput("cbtyPlot")) ) #fluidRow(column(3, verbatimTextOutput("value3"))), #fluidRow(column(3, verbatimTextOutput("value4"))), ), tabPanel(h4("Fate Component"), br(), p(em("NOTE: The upper limit of the CV for probability inputs are defined by the Beta distn constraint: CV < sqrt((1-p)/p)")), br(), h4("Probability of lip hook (vs. gut hook) given:"), br(), fluidRow( column(5, fluidRow( column(6, beta.input("J-Hook", "LHP.J", p.value = 0.3, cv.value = 43)), column(6, beta.input("T-Hook", "LHP.T", p.value = 0.33, cv.value = 40)) ), fluidRow( column(6, beta.input("C-Hook", "LHP.C", p.value = 0.9, cv.value = 10)) )), br(), column(7, plotOutput("LHP")) ), hr(), h4("Probability of bite-off given:"), br(), fluidRow( column(5, fluidRow( column(6, beta.input("Mono Leader & lip-hooked", "BOP.ML", p.value = 0.33, cv.value = 10)), column(6, beta.input("Mono Leader & gut-hooked", "BOP.MG", p.value = 0.72, cv.value = 20)) ), fluidRow( column(6, beta.input("Wire Leader & lip-hooked", "BOP.WL", p.value = 0.01, cv.value = 10)), column(6, beta.input("Wire Leader & gut-hooked", "BOP.WG", p.value = 0.01, cv.value = 10)) )), br(), column(7, plotOutput("BOP")) ), hr(), h4("Probability of mortality given bite-off and:"), br(), fluidRow( column(5, fluidRow( column(6, beta.input("Lip-hooked", "BOM.L", p.value = 0.03, cv.value = 95)), column(6, beta.input("Gut-hooked", "BOM.G", p.value = 0.06, cv.value = 80)) )), br(), column(7, plotOutput("BOM")) ), hr(), h4("Probability of mortality at retrieval given:"), br(), fluidRow( column(5, fluidRow( column(6, beta.input("Lip-hooked", "RM.L", p.value = 0.19, cv.value = 5)), column(6, beta.input("Gut-hooked", "RM.G", p.value = 0.19, cv.value = 5)) )), br(), column(7, plotOutput("RM")) ), hr(), h4("Probability of release in-water (vs. brought-on then released):"), br(), fluidRow( column(5, fluidRow( column(6, beta.input("", "WRP", p.value = 0.5, cv.value = 30)) )), br(), column(7, plotOutput("WRP")) ), hr(), h4("Probability of mortality upon release given:"), br(), fluidRow( column(5, fluidRow( column(6, beta.input("In-water release & lip-hooked", "URM.WL", p.value = 0.15, cv.value = 25)), column(6, beta.input("In-water release & gut-hooked", "URM.WG", p.value = 0.19, cv.value = 20)) ), fluidRow( column(6, beta.input("Landed release & lip-hooked", "URM.LL", p.value = 0.34, cv.value = 15)), column(6, beta.input("Landed release & gut-hooked", "URM.LG", p.value = 0.44, cv.value = 12)) )), br(), column(7, plotOutput("URM")) ) ))))), # 2nd Tab ------------------------------------------------------------------ tabPanel("Step 2: Choose management scenarios & Run MC simulation", sidebarLayout( sidebarPanel(width = 3, h3("Management scenarios"), br(), p("Within each management scenario frame:"), tags$ul( tags$li("Select one or a combination of options"), tags$li("If none of the boxes is selected, the management scenario is not considered") ), br(), mng_input("MngScn1", "Management Scenario 1"), hr(), mng_input("MngScn2", "Management Scenario 2"), hr(), mng_input("MngScn3", "Management Scenario 3"), hr(), mng_input("MngScn4", "Management Scenario 4") ), mainPanel( fluidRow( column(3, numericInput("nsims", label = h4("Number of simulations"), value = 1000)), column(3, selectInput("bskSize", label = h4("Basket Size"), choices = list("20", "25", "30", "35", "40"), selected = "30")) ), br(), fluidRow( column(5, actionButton("simButton", h4("Run Simulation"))) ), # fluidRow( # column(6, verbatimTextOutput("value1")) # ), br(), br(), br(), tabsetPanel( tabPanel(h4("Catch and mortality"), br(), h5("Monte Carlo distributions of catch and mortality under each scenario"), fluidRow(column(8, plotOutput("MCplots_catchMort", height = "550px"), offset = 2)), br(), br(), hr(), h5("Monte Carlo percentiles"), br(), fluidRow(column(8, p("Total Catch"), tableOutput("tab_summCatch"))), fluidRow(column(8, p("Mortality"), tableOutput("tab_summMort"))) ), tabPanel(h4("Mortality rate"), br(), h5("Monte Carlo distributions of mortality rate (i.e. deaths/catch) under each scenario"), fluidRow(column(6, plotOutput("MCplots_MortRate", height = "550px"), offset = 3)), br(), br(), hr(), h5("Monte Carlo percentiles"), br(), fluidRow(column(8, tableOutput("tab_summMortRate"), offset = 3)) ), tabPanel(h4("Mortality components"), br(), h5("Median of Monte Carlo distributions of mortality components under each scenario"), fluidRow(column(10, plotOutput("MCplots_MedianMortElem", height = "500px"), offset = 1)) )) ))) # # 3nd Tab ------------------------------------------------------------------ # # tabPanel("Step 3: Run simulation & Outputs", # tabsetPanel( # tabPanel("Contrast Plots", # h4("Monte Carlo distributions of catch and mortality under each scenario"), # fluidRow(column(6, plotOutput("MCplots_catchMort", height = "650px"), offset = 2)), # br(), # # hr(), # br(), # h4("Monte Carlo distributions of mortality rate (i.e. deaths/catch) under each scenario"), # fluidRow( # column(5, plotOutput("MCplots_MortRate", height = "650px"), offset = 2) # # column(4, # # br(), br(), br(), br(), # # tableOutput("table")) # ), # # hr(), # br(), # h4("Median of Monte Carlo distributions of mortality components under each scenario"), # fluidRow(column(8, plotOutput("MCplots_MedianMortElem", height = "650px"), offset = 2)) # # ), # # # tabPanel("Contrast summary tables", # # h4("Overall mortality rate (i.e. deaths/catch)"), # tableOutput("table") # ) # )) ))
/ui.R
no_license
dmpstats/shiny-shark
R
false
false
17,204
r
require(shiny) require(shinythemes) # Define input widget for lognormal distribution logN.input <- function(title, suffix, E.value, cv.value){ wellPanel( style = "padding: 5px;", h5(title), sliderInput(paste0("E_", suffix), "Mean catch:", min = 0, max = 2, step = 0.05, value = E.value, ticks = FALSE), sliderInput(paste0("cv_", suffix), "Coef. variation (%):", min = 0, max = 100, step = 5, value = cv.value, ticks = FALSE) ) } # Define input widget for beta distribution beta.input <- function(title, suffix, p.value, cv.value){ wellPanel( style = "padding: 5px;", h5(title), sliderInput(paste0("p_", suffix), "Expected probability:", min = 0, max = 1, step = 0.05, value = p.value, ticks = FALSE), sliderInput(paste0("cv_", suffix), "Coef. variation (%):", min = 0, max = 100, step = 5, value = cv.value, ticks = FALSE) ) } # Define input widget for management options mng_input <- function(id, title){ # wellPanel( #style = "padding: 5px;", checkboxGroupInput(id, label = h4(title), choices = list("Ban Shark lines" = "NoShkln", "Ban wire trace" = "NoWire", "Ban shallow hooks" = "NoShallow", "Restrict to Circle-hooks only" = "AllCircle"), selected = NULL, inline = FALSE) # ) } # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ shinyUI( navbarPage("Impact of longlining in sharks: simulation of mitigation measures", theme = shinytheme("flatly"), # 1st Tab ------------------------------------------------------------------ tabPanel("Step 1: Choose species & input distributions", tags$style(type="text/css", "label {font-size: 13px;}", ".recalculating {opacity: 1.0;}" ), tags$head( tags$link(rel = "stylesheet", type = "text/css", href = "slidebarColours.css") ), sidebarLayout( sidebarPanel(width = 3, selectInput("spp", label = h3("Choose species"), choices = list("Oceanic whitetip shark", "Silky shark"), selected = "Oceanic whitetip shark"), em("NOTE: Hyperparameter's default values as specified in ", a("Shelton et al. (2015)", href = "https://dl.dropboxusercontent.com/u/250971/EB-WP-02-%5BMC_sharks%5D.pdf")) ), mainPanel( br(), h3("Specify the input distributions"), br(), p("<Some text here (explaining the two components of the simulation model?)>"), br(), tabsetPanel( tabPanel(h4("Catch Component"), h4("Catch Rate per 100 hooks in:"), br(), fluidRow( column(5, fluidRow( column(6, logN.input("Shark lines", "shkln", E.value = 0.620, cv.value = 5)), column(6, logN.input("Shallow Hooks", "shll", E.value = 0.008, cv.value = 4)) ), fluidRow( column(6, logN.input("Deep Hooks", "deep", E.value = 0.016, cv.value = 3)) )), br(), column(7, plotOutput("cbtyPlot")) ) #fluidRow(column(3, verbatimTextOutput("value3"))), #fluidRow(column(3, verbatimTextOutput("value4"))), ), tabPanel(h4("Fate Component"), br(), p(em("NOTE: The upper limit of the CV for probability inputs are defined by the Beta distn constraint: CV < sqrt((1-p)/p)")), br(), h4("Probability of lip hook (vs. gut hook) given:"), br(), fluidRow( column(5, fluidRow( column(6, beta.input("J-Hook", "LHP.J", p.value = 0.3, cv.value = 43)), column(6, beta.input("T-Hook", "LHP.T", p.value = 0.33, cv.value = 40)) ), fluidRow( column(6, beta.input("C-Hook", "LHP.C", p.value = 0.9, cv.value = 10)) )), br(), column(7, plotOutput("LHP")) ), hr(), h4("Probability of bite-off given:"), br(), fluidRow( column(5, fluidRow( column(6, beta.input("Mono Leader & lip-hooked", "BOP.ML", p.value = 0.33, cv.value = 10)), column(6, beta.input("Mono Leader & gut-hooked", "BOP.MG", p.value = 0.72, cv.value = 20)) ), fluidRow( column(6, beta.input("Wire Leader & lip-hooked", "BOP.WL", p.value = 0.01, cv.value = 10)), column(6, beta.input("Wire Leader & gut-hooked", "BOP.WG", p.value = 0.01, cv.value = 10)) )), br(), column(7, plotOutput("BOP")) ), hr(), h4("Probability of mortality given bite-off and:"), br(), fluidRow( column(5, fluidRow( column(6, beta.input("Lip-hooked", "BOM.L", p.value = 0.03, cv.value = 95)), column(6, beta.input("Gut-hooked", "BOM.G", p.value = 0.06, cv.value = 80)) )), br(), column(7, plotOutput("BOM")) ), hr(), h4("Probability of mortality at retrieval given:"), br(), fluidRow( column(5, fluidRow( column(6, beta.input("Lip-hooked", "RM.L", p.value = 0.19, cv.value = 5)), column(6, beta.input("Gut-hooked", "RM.G", p.value = 0.19, cv.value = 5)) )), br(), column(7, plotOutput("RM")) ), hr(), h4("Probability of release in-water (vs. brought-on then released):"), br(), fluidRow( column(5, fluidRow( column(6, beta.input("", "WRP", p.value = 0.5, cv.value = 30)) )), br(), column(7, plotOutput("WRP")) ), hr(), h4("Probability of mortality upon release given:"), br(), fluidRow( column(5, fluidRow( column(6, beta.input("In-water release & lip-hooked", "URM.WL", p.value = 0.15, cv.value = 25)), column(6, beta.input("In-water release & gut-hooked", "URM.WG", p.value = 0.19, cv.value = 20)) ), fluidRow( column(6, beta.input("Landed release & lip-hooked", "URM.LL", p.value = 0.34, cv.value = 15)), column(6, beta.input("Landed release & gut-hooked", "URM.LG", p.value = 0.44, cv.value = 12)) )), br(), column(7, plotOutput("URM")) ) ))))), # 2nd Tab ------------------------------------------------------------------ tabPanel("Step 2: Choose management scenarios & Run MC simulation", sidebarLayout( sidebarPanel(width = 3, h3("Management scenarios"), br(), p("Within each management scenario frame:"), tags$ul( tags$li("Select one or a combination of options"), tags$li("If none of the boxes is selected, the management scenario is not considered") ), br(), mng_input("MngScn1", "Management Scenario 1"), hr(), mng_input("MngScn2", "Management Scenario 2"), hr(), mng_input("MngScn3", "Management Scenario 3"), hr(), mng_input("MngScn4", "Management Scenario 4") ), mainPanel( fluidRow( column(3, numericInput("nsims", label = h4("Number of simulations"), value = 1000)), column(3, selectInput("bskSize", label = h4("Basket Size"), choices = list("20", "25", "30", "35", "40"), selected = "30")) ), br(), fluidRow( column(5, actionButton("simButton", h4("Run Simulation"))) ), # fluidRow( # column(6, verbatimTextOutput("value1")) # ), br(), br(), br(), tabsetPanel( tabPanel(h4("Catch and mortality"), br(), h5("Monte Carlo distributions of catch and mortality under each scenario"), fluidRow(column(8, plotOutput("MCplots_catchMort", height = "550px"), offset = 2)), br(), br(), hr(), h5("Monte Carlo percentiles"), br(), fluidRow(column(8, p("Total Catch"), tableOutput("tab_summCatch"))), fluidRow(column(8, p("Mortality"), tableOutput("tab_summMort"))) ), tabPanel(h4("Mortality rate"), br(), h5("Monte Carlo distributions of mortality rate (i.e. deaths/catch) under each scenario"), fluidRow(column(6, plotOutput("MCplots_MortRate", height = "550px"), offset = 3)), br(), br(), hr(), h5("Monte Carlo percentiles"), br(), fluidRow(column(8, tableOutput("tab_summMortRate"), offset = 3)) ), tabPanel(h4("Mortality components"), br(), h5("Median of Monte Carlo distributions of mortality components under each scenario"), fluidRow(column(10, plotOutput("MCplots_MedianMortElem", height = "500px"), offset = 1)) )) ))) # # 3nd Tab ------------------------------------------------------------------ # # tabPanel("Step 3: Run simulation & Outputs", # tabsetPanel( # tabPanel("Contrast Plots", # h4("Monte Carlo distributions of catch and mortality under each scenario"), # fluidRow(column(6, plotOutput("MCplots_catchMort", height = "650px"), offset = 2)), # br(), # # hr(), # br(), # h4("Monte Carlo distributions of mortality rate (i.e. deaths/catch) under each scenario"), # fluidRow( # column(5, plotOutput("MCplots_MortRate", height = "650px"), offset = 2) # # column(4, # # br(), br(), br(), br(), # # tableOutput("table")) # ), # # hr(), # br(), # h4("Median of Monte Carlo distributions of mortality components under each scenario"), # fluidRow(column(8, plotOutput("MCplots_MedianMortElem", height = "650px"), offset = 2)) # # ), # # # tabPanel("Contrast summary tables", # # h4("Overall mortality rate (i.e. deaths/catch)"), # tableOutput("table") # ) # )) ))
#----hash---- # compute hash using md5 hash <- digest::digest(res, "md5") if(hash != "e9e0dfe4b4753c1e00bb02e4205d8772"){ warning("Mismatch between original and current simulations!\nHash now is:\n '", hash, "'") }
/R/hash.R
permissive
aaronpeikert/repro-tutorial
R
false
false
220
r
#----hash---- # compute hash using md5 hash <- digest::digest(res, "md5") if(hash != "e9e0dfe4b4753c1e00bb02e4205d8772"){ warning("Mismatch between original and current simulations!\nHash now is:\n '", hash, "'") }
#' Function implementing the Rescorla-Wagner learning equations without #' outcome competition (for illustration purposes) for a single learning #' event. #' #' @description Function implementing the Rescorla-Wagner learning equations #' without outcome competition (for illustration purposes) for a single #' learning event. A set of cues and outcomes are provided, #' and a weightmatrix that needs to be updated. #' @export #' @param cur.cues A vector with cues. #' @param cur.outcomes A vector with outcomes. #' @param wm A weightmatrix of class matrix. If not provided a new #' weightmatrix is returned. Note that the cues and outcomes do not #' necessarily need to be available as cues and outcomes in the #' weightmatrix: if not present, they will be added. #' @param eta Learning parameter, typically set to 0.01. #' If \code{eta} is not specified and set to the value NULL, #' the values of \code{alpha}, \code{beta1}, and \code{beta2} #' determine the learning rate. However, changing these settings #' is generally not very useful (see Hoppe et al, submitted). #' @param lambda Constant constraining the connection strength. #' @param alpha Learning parameter (scaling both positive and negative #' evidence adjustments), typically set to 0.1. #' @param beta1 Learning parameter for positive evidence, typically set to #' 0.1. #' @param beta2 Learning parameter for negative evidence, typically set to #' 0.1. #' @return A weightmatrix (matrix) #' @seealso \code{\link[ndl]{RescorlaWagner}}, \code{\link{RWlearning}} #' @author Dorothee Hoppe, based on \code{\link[ndl]{RescorlaWagner}} #' @family functions for explaining error-driven learning #' @examples #' # load example data: #' data(dat) #' #' # add obligatory columns Cues, Outcomes, and Frequency: #' dat$Cues <- paste("BG", dat$Shape, dat$Color, sep="_") #' dat$Outcomes <- dat$Category #' dat$Frequency <- dat$Frequency1 #' head(dat) #' dim(dat) #' #' # now use createTrainingData to sample from the specified frequencies: #' train <- createTrainingData(dat) #' #' # this training data can actually be used train network: #' wm <- RWlearningNoOutcomeCompetition(train) #' # retrieve trained network: #' new <- getWM(wm) #' #' train2 <- createTrainingData(dat) #' updateWeightsNoOutcomeCompetition(getValues(train2$Cues[1]), #' getValues(train2$Outcomes[1]), wm=new) #' #' # comparison between eta and alpha, beta1, beta2: #' check.cues <- c("BG", "car", "red") #' new[check.cues,] #' tmp1 <- updateWeightsNoOutcomeCompetition(check.cues, #' c("vehicle", "animal"), wm=new) #' tmp2 <- updateWeightsNoOutcomeCompetition(check.cues, #' c("vehicle", "animal"), wm=new, eta=NULL) #' tmp3 <- updateWeightsNoOutcomeCompetition(check.cues, #' c("vehicle", "animal"), wm=new, beta1=0.2) #' tmp4 <- updateWeightsNoOutcomeCompetition(check.cues, #' c("vehicle", "animal"), wm=new, eta=NULL, beta1=0.2) #' # these two should be the same: #' tmp1[check.cues,] #' tmp2[check.cues,] #' # now we change beta2, but this does not change anything, #' # because eta is being used: #' tmp3[check.cues,] #' # when we turn eta off, beta2 changes the values: #' tmp4[check.cues,] #' updateWeightsNoOutcomeCompetition <- function(cur.cues, cur.outcomes, wm=NULL, eta=0.01, lambda = 1, alpha = 0.1, beta1 = 0.1, beta2 = 0.1){ bg <- getOption("background") cur.cues <- c(bg, cur.cues) # if no wm is specified, create new wm: if(is.null(wm)){ wm <- createWM(cues=cur.cues, outcomes=cur.outcomes) }else{ wm <- checkWM(cues=cur.cues, outcomes=cur.outcomes, wm=wm) } Vtotal = 0 if(length(cur.cues) <= 1){ Vtotal = wm[cur.cues,] }else{ if(ncol(wm) > 1){ Vtotal = colSums(wm[cur.cues,], na.rm = TRUE) }else{ Vtotal = sum(wm[cur.cues,], na.rm = TRUE) } } Lambda = rep(0, ncol(wm)) Lambda[which(colnames(wm) %in% cur.outcomes)] <- lambda # determine learning rate: lr = rep(eta, length(Lambda)) if(is.null(eta)){ lr = alpha * (beta1*Lambda + beta2*(lambda-Lambda)) } wm[cur.cues,] = wm[cur.cues,] + matrix( rep(lr * (Lambda - Vtotal) * Lambda, length(cur.cues)), nrow=length(cur.cues), byrow=TRUE ) return(wm) }
/R/updateWeightsNoOutcomeCompetition.R
no_license
cran/edl
R
false
false
4,264
r
#' Function implementing the Rescorla-Wagner learning equations without #' outcome competition (for illustration purposes) for a single learning #' event. #' #' @description Function implementing the Rescorla-Wagner learning equations #' without outcome competition (for illustration purposes) for a single #' learning event. A set of cues and outcomes are provided, #' and a weightmatrix that needs to be updated. #' @export #' @param cur.cues A vector with cues. #' @param cur.outcomes A vector with outcomes. #' @param wm A weightmatrix of class matrix. If not provided a new #' weightmatrix is returned. Note that the cues and outcomes do not #' necessarily need to be available as cues and outcomes in the #' weightmatrix: if not present, they will be added. #' @param eta Learning parameter, typically set to 0.01. #' If \code{eta} is not specified and set to the value NULL, #' the values of \code{alpha}, \code{beta1}, and \code{beta2} #' determine the learning rate. However, changing these settings #' is generally not very useful (see Hoppe et al, submitted). #' @param lambda Constant constraining the connection strength. #' @param alpha Learning parameter (scaling both positive and negative #' evidence adjustments), typically set to 0.1. #' @param beta1 Learning parameter for positive evidence, typically set to #' 0.1. #' @param beta2 Learning parameter for negative evidence, typically set to #' 0.1. #' @return A weightmatrix (matrix) #' @seealso \code{\link[ndl]{RescorlaWagner}}, \code{\link{RWlearning}} #' @author Dorothee Hoppe, based on \code{\link[ndl]{RescorlaWagner}} #' @family functions for explaining error-driven learning #' @examples #' # load example data: #' data(dat) #' #' # add obligatory columns Cues, Outcomes, and Frequency: #' dat$Cues <- paste("BG", dat$Shape, dat$Color, sep="_") #' dat$Outcomes <- dat$Category #' dat$Frequency <- dat$Frequency1 #' head(dat) #' dim(dat) #' #' # now use createTrainingData to sample from the specified frequencies: #' train <- createTrainingData(dat) #' #' # this training data can actually be used train network: #' wm <- RWlearningNoOutcomeCompetition(train) #' # retrieve trained network: #' new <- getWM(wm) #' #' train2 <- createTrainingData(dat) #' updateWeightsNoOutcomeCompetition(getValues(train2$Cues[1]), #' getValues(train2$Outcomes[1]), wm=new) #' #' # comparison between eta and alpha, beta1, beta2: #' check.cues <- c("BG", "car", "red") #' new[check.cues,] #' tmp1 <- updateWeightsNoOutcomeCompetition(check.cues, #' c("vehicle", "animal"), wm=new) #' tmp2 <- updateWeightsNoOutcomeCompetition(check.cues, #' c("vehicle", "animal"), wm=new, eta=NULL) #' tmp3 <- updateWeightsNoOutcomeCompetition(check.cues, #' c("vehicle", "animal"), wm=new, beta1=0.2) #' tmp4 <- updateWeightsNoOutcomeCompetition(check.cues, #' c("vehicle", "animal"), wm=new, eta=NULL, beta1=0.2) #' # these two should be the same: #' tmp1[check.cues,] #' tmp2[check.cues,] #' # now we change beta2, but this does not change anything, #' # because eta is being used: #' tmp3[check.cues,] #' # when we turn eta off, beta2 changes the values: #' tmp4[check.cues,] #' updateWeightsNoOutcomeCompetition <- function(cur.cues, cur.outcomes, wm=NULL, eta=0.01, lambda = 1, alpha = 0.1, beta1 = 0.1, beta2 = 0.1){ bg <- getOption("background") cur.cues <- c(bg, cur.cues) # if no wm is specified, create new wm: if(is.null(wm)){ wm <- createWM(cues=cur.cues, outcomes=cur.outcomes) }else{ wm <- checkWM(cues=cur.cues, outcomes=cur.outcomes, wm=wm) } Vtotal = 0 if(length(cur.cues) <= 1){ Vtotal = wm[cur.cues,] }else{ if(ncol(wm) > 1){ Vtotal = colSums(wm[cur.cues,], na.rm = TRUE) }else{ Vtotal = sum(wm[cur.cues,], na.rm = TRUE) } } Lambda = rep(0, ncol(wm)) Lambda[which(colnames(wm) %in% cur.outcomes)] <- lambda # determine learning rate: lr = rep(eta, length(Lambda)) if(is.null(eta)){ lr = alpha * (beta1*Lambda + beta2*(lambda-Lambda)) } wm[cur.cues,] = wm[cur.cues,] + matrix( rep(lr * (Lambda - Vtotal) * Lambda, length(cur.cues)), nrow=length(cur.cues), byrow=TRUE ) return(wm) }
#'Calculate non-wadeable littoral depth metrics #' #'\code{calculateLittoralDepth} calculates the non-wadeable #'littoral depth metrics. There are two different implementations that vary #'primarily in speed. \code{calculateLittoralDepth} is faster although the #'implementation is somewhat more obtuse. \code{calculateLittoralDepth2} uses #'\link{ddply} and is fairly simple. #'@param uid a vector of site-visit indicators #'@param depth a vector of depth measurements (with the same units) #'@export #'@examples #'calculateLittoralDepth(uid = rep(1:10, each = 10), depth = rnorm(100)) #'calculateLittoralDepth(uid = rep(1, 5), depth = rep(NA, 5)) calculateLittoralDepth <- function(uid, depth){ kMetrics <- c('xlit', 'mnlit', 'mxlit', 'vlit') nas <- expand.grid(uid = unique(uid), metric = kMetrics) dots <- list(xlit = ~mean(depth), mnlit = ~min(depth), mxlit = ~max(depth), vlit = ~sd(depth)) ans <- dplyr::data_frame(uid = uid, depth = depth) %>% na.omit() %>% dplyr::group_by_(~uid) %>% dplyr::summarize_(.dots = dots) %>% tidyr::gather_('metric', 'result', kMetrics) %>% dplyr::right_join(nas, by = c('uid', 'metric')) %>% dplyr::arrange_(~uid, ~metric) progressReport('Finished non-wadeable littoral depth metrics') return(ans) } #calculateLittoralDepth <- function(uid, depth){ # uid <- as.factor(uid) # allna <- igroupAlls(is.na(depth), uid) # x <- igroupMeans(depth, uid, na.rm = T) # mn <- igroupMins(depth, uid, na.rm = T) # mx <- igroupMaxs(depth, uid, na.rm = T) # std <- tapply(depth, uid, sd, na.rm = T) # ans <- cbind(x, mn, mx, std) # dimnames(ans) <- list(uid = levels(uid), # metric = c('xlit', 'mnlit', 'mxlit', 'vlit')) # nas <- matrix(allna, nrow(ans), ncol(ans)) # is.na(ans) <- nas # progressReport('Finished non-wadeable littoral depth metrics') # ans #} # calculateLittoralDepth <- function(uid, depth){ # x <- data.frame(uid = as.factor(uid), # depth = depth) # f <- function(x){ # allna <- all(is.na(x$depth)) # if(!allna){ # return(c(xlit = mean(x$depth, na.rm = T), # mnlit = min(x$depth, na.rm = T), # mxlit = max(x$depth, na.rm = T), # vlit = sd(x$depth, na.rm = T))) # } else { # return(c(xlit = NA, mnlit = NA, mxlit = NA, vlit = NA)) # } # } # ans <- ddply(x, .(uid), f) # progressReport('Finished non-wadeable littoral depth metrics') # return(ans) # }
/R/calculateLittoralDepth.r
no_license
PDXChris/nrsa
R
false
false
2,544
r
#'Calculate non-wadeable littoral depth metrics #' #'\code{calculateLittoralDepth} calculates the non-wadeable #'littoral depth metrics. There are two different implementations that vary #'primarily in speed. \code{calculateLittoralDepth} is faster although the #'implementation is somewhat more obtuse. \code{calculateLittoralDepth2} uses #'\link{ddply} and is fairly simple. #'@param uid a vector of site-visit indicators #'@param depth a vector of depth measurements (with the same units) #'@export #'@examples #'calculateLittoralDepth(uid = rep(1:10, each = 10), depth = rnorm(100)) #'calculateLittoralDepth(uid = rep(1, 5), depth = rep(NA, 5)) calculateLittoralDepth <- function(uid, depth){ kMetrics <- c('xlit', 'mnlit', 'mxlit', 'vlit') nas <- expand.grid(uid = unique(uid), metric = kMetrics) dots <- list(xlit = ~mean(depth), mnlit = ~min(depth), mxlit = ~max(depth), vlit = ~sd(depth)) ans <- dplyr::data_frame(uid = uid, depth = depth) %>% na.omit() %>% dplyr::group_by_(~uid) %>% dplyr::summarize_(.dots = dots) %>% tidyr::gather_('metric', 'result', kMetrics) %>% dplyr::right_join(nas, by = c('uid', 'metric')) %>% dplyr::arrange_(~uid, ~metric) progressReport('Finished non-wadeable littoral depth metrics') return(ans) } #calculateLittoralDepth <- function(uid, depth){ # uid <- as.factor(uid) # allna <- igroupAlls(is.na(depth), uid) # x <- igroupMeans(depth, uid, na.rm = T) # mn <- igroupMins(depth, uid, na.rm = T) # mx <- igroupMaxs(depth, uid, na.rm = T) # std <- tapply(depth, uid, sd, na.rm = T) # ans <- cbind(x, mn, mx, std) # dimnames(ans) <- list(uid = levels(uid), # metric = c('xlit', 'mnlit', 'mxlit', 'vlit')) # nas <- matrix(allna, nrow(ans), ncol(ans)) # is.na(ans) <- nas # progressReport('Finished non-wadeable littoral depth metrics') # ans #} # calculateLittoralDepth <- function(uid, depth){ # x <- data.frame(uid = as.factor(uid), # depth = depth) # f <- function(x){ # allna <- all(is.na(x$depth)) # if(!allna){ # return(c(xlit = mean(x$depth, na.rm = T), # mnlit = min(x$depth, na.rm = T), # mxlit = max(x$depth, na.rm = T), # vlit = sd(x$depth, na.rm = T))) # } else { # return(c(xlit = NA, mnlit = NA, mxlit = NA, vlit = NA)) # } # } # ans <- ddply(x, .(uid), f) # progressReport('Finished non-wadeable littoral depth metrics') # return(ans) # }
#3 coin_sides <- c("head", "tail") n_flips <- 10 # Sample from coin_sides n_flips times with replacement sample(coin_sides, n_flips, replace = TRUE) # Update the function to return n_flips coin tosses toss_coin <- function(n_flips) { coin_sides <- c("head", "tail") sample(coin_sides, n_flips, replace = TRUE) } # Generate 10 coin tosses toss_coin(10)
/Introduction to Writing Functions in R/how to write a function/3.R
no_license
Pranav-Polavarapu/Datacamp-Data-Scientist-with-R-Track-
R
false
false
378
r
#3 coin_sides <- c("head", "tail") n_flips <- 10 # Sample from coin_sides n_flips times with replacement sample(coin_sides, n_flips, replace = TRUE) # Update the function to return n_flips coin tosses toss_coin <- function(n_flips) { coin_sides <- c("head", "tail") sample(coin_sides, n_flips, replace = TRUE) } # Generate 10 coin tosses toss_coin(10)
# Load R packages #rm(list=ls()) library(rstan) rstan_options(auto_write = TRUE) options(mc.cores = parallel::detectCores()) library(ggplot2) library(mvtnorm) set.seed(12345) # Set paramters for the simulated data I <- 34 # questions J <- 100 # students K <- 2 # years N <- I*J*K # item responses #means and variance-covariance matrix for IRT parameters mu <- rep(c(0, 0),K) tau <- rep(c(0.25, 1),K) variance_block <- matrix(c(1, 0.3, 0.3, 1), ncol = 2) Omega <- matrix(rep(0,4*K^2),nrow=K*2) for (k in 1:K){ Omega[(2*k-1):(2*k),(2*k-1):(2*k)] <- variance_block } # Calculate or sample remaining paramters Sigma <- tau %*% t(tau) * Omega xi <- MASS::mvrnorm(I, rep(0,2*K), Sigma) alpha=numeric() beta= numeric() for (i in seq(1,K)){ alpha=c(alpha,exp(mu[(2*i-1)] + as.vector(xi[, (2*i-1)]))) beta =c(beta,beta <- as.vector(mu[(2*i)] + xi[, (2*i)])) } theta_rho = -1/2 + runif(J) theta_m = matrix(rep(0,J*K),nrow=J) theta_m for (i in seq(1:J)){ theta_sigma = matrix(c(1,theta_rho[i],theta_rho[i],1),nrow=2) theta_m[i,] = rmvnorm(1,mean=rep(0,K),sigma=theta_sigma) } # Assemble data and simulate response ii = rep(rep(1:I,times=K), times=J) #column of item numbers jj = rep(1:J,each=(K*I)) #column of student numbers kk = rep(rep(1:K,each=I),times=J) #column of year numbers eta=rep(0,N) # a(theta-b) y = rep(0,N) # question responses for (i in 1:N){ eta[i] <- alpha[ii[i]] * (theta_m[jj[i],kk[i]] - beta[ii[i]]) y[i] <- as.numeric(boot::inv.logit(eta[i]) > runif(1)) } LKJF = 5 data_list <- list(I=I,J=J, K=K, N=N, y=y, LKJF=LKJF) # Fit model to simulated data sim_fit <- stan(file = "hierarchical_2pl_multiyear_V4_local.stan", data=data_list, chains = 2, iter = 500) summary(sim_fit) pd <- extract(sim_fit) k=22 mean(pd$zi[,k,1]) mean(pd$zi[,k,2]) cov(pd$zi[,k,]) library(shinystan) launch_shinystan(sim_fit) sim_summary <- as.data.frame(summary(sim_fit)[[1]]) sim_summary$Parameter <- as.factor(gsub("\\[.*]", "", rownames(sim_summary))) ggplot(sim_summary) + aes(x = Parameter, y = Rhat, color = Parameter) + geom_jitter(height = 0, width = 0.5, show.legend = FALSE) + ylab(expression(hat(italic(R)))) # Make vector of wanted parameter names wanted_pars <- c(paste0("alpha[", 1:I, "]"), paste0("beta[", 1:I, "]"), c("mu[1]", "mu[2]", "tau[1]", "tau[2]", "Omega[1,2]")) # Get estimated and generating values for wanted parameters generating_values = c(alpha, beta, mu, tau, Omega[1, 2]) estimated_values <- sim_summary[wanted_pars, c("mean", "2.5%", "97.5%")] # Assesmble a data frame to pass to ggplot() sim_df <- data.frame(parameter = factor(wanted_pars, rev(wanted_pars)), row.names = NULL) sim_df$middle <- estimated_values[, "mean"] - generating_values sim_df$lower <- estimated_values[, "2.5%"] - generating_values sim_df$upper <- estimated_values[, "97.5%"] - generating_values # Plot the discrepancy ggplot(sim_df) + aes(x = parameter, y = middle, ymin = lower, ymax = upper) + scale_x_discrete() + geom_abline(intercept = 0, slope = 0, color = "white") + geom_linerange() + geom_point(size = 2) + labs(y = "Discrepancy", x = NULL) + theme(panel.grid = element_blank()) + coord_flip()
/hierarchical_2pl_2year_V4.R
no_license
equinn1/NESS_2019
R
false
false
3,336
r
# Load R packages #rm(list=ls()) library(rstan) rstan_options(auto_write = TRUE) options(mc.cores = parallel::detectCores()) library(ggplot2) library(mvtnorm) set.seed(12345) # Set paramters for the simulated data I <- 34 # questions J <- 100 # students K <- 2 # years N <- I*J*K # item responses #means and variance-covariance matrix for IRT parameters mu <- rep(c(0, 0),K) tau <- rep(c(0.25, 1),K) variance_block <- matrix(c(1, 0.3, 0.3, 1), ncol = 2) Omega <- matrix(rep(0,4*K^2),nrow=K*2) for (k in 1:K){ Omega[(2*k-1):(2*k),(2*k-1):(2*k)] <- variance_block } # Calculate or sample remaining paramters Sigma <- tau %*% t(tau) * Omega xi <- MASS::mvrnorm(I, rep(0,2*K), Sigma) alpha=numeric() beta= numeric() for (i in seq(1,K)){ alpha=c(alpha,exp(mu[(2*i-1)] + as.vector(xi[, (2*i-1)]))) beta =c(beta,beta <- as.vector(mu[(2*i)] + xi[, (2*i)])) } theta_rho = -1/2 + runif(J) theta_m = matrix(rep(0,J*K),nrow=J) theta_m for (i in seq(1:J)){ theta_sigma = matrix(c(1,theta_rho[i],theta_rho[i],1),nrow=2) theta_m[i,] = rmvnorm(1,mean=rep(0,K),sigma=theta_sigma) } # Assemble data and simulate response ii = rep(rep(1:I,times=K), times=J) #column of item numbers jj = rep(1:J,each=(K*I)) #column of student numbers kk = rep(rep(1:K,each=I),times=J) #column of year numbers eta=rep(0,N) # a(theta-b) y = rep(0,N) # question responses for (i in 1:N){ eta[i] <- alpha[ii[i]] * (theta_m[jj[i],kk[i]] - beta[ii[i]]) y[i] <- as.numeric(boot::inv.logit(eta[i]) > runif(1)) } LKJF = 5 data_list <- list(I=I,J=J, K=K, N=N, y=y, LKJF=LKJF) # Fit model to simulated data sim_fit <- stan(file = "hierarchical_2pl_multiyear_V4_local.stan", data=data_list, chains = 2, iter = 500) summary(sim_fit) pd <- extract(sim_fit) k=22 mean(pd$zi[,k,1]) mean(pd$zi[,k,2]) cov(pd$zi[,k,]) library(shinystan) launch_shinystan(sim_fit) sim_summary <- as.data.frame(summary(sim_fit)[[1]]) sim_summary$Parameter <- as.factor(gsub("\\[.*]", "", rownames(sim_summary))) ggplot(sim_summary) + aes(x = Parameter, y = Rhat, color = Parameter) + geom_jitter(height = 0, width = 0.5, show.legend = FALSE) + ylab(expression(hat(italic(R)))) # Make vector of wanted parameter names wanted_pars <- c(paste0("alpha[", 1:I, "]"), paste0("beta[", 1:I, "]"), c("mu[1]", "mu[2]", "tau[1]", "tau[2]", "Omega[1,2]")) # Get estimated and generating values for wanted parameters generating_values = c(alpha, beta, mu, tau, Omega[1, 2]) estimated_values <- sim_summary[wanted_pars, c("mean", "2.5%", "97.5%")] # Assesmble a data frame to pass to ggplot() sim_df <- data.frame(parameter = factor(wanted_pars, rev(wanted_pars)), row.names = NULL) sim_df$middle <- estimated_values[, "mean"] - generating_values sim_df$lower <- estimated_values[, "2.5%"] - generating_values sim_df$upper <- estimated_values[, "97.5%"] - generating_values # Plot the discrepancy ggplot(sim_df) + aes(x = parameter, y = middle, ymin = lower, ymax = upper) + scale_x_discrete() + geom_abline(intercept = 0, slope = 0, color = "white") + geom_linerange() + geom_point(size = 2) + labs(y = "Discrepancy", x = NULL) + theme(panel.grid = element_blank()) + coord_flip()
#' @title Simulate the Confidence Interval for a Ratio #' @description Simulate the Confidence Interval for a Ratio #' @usage ciprob.sim(n, p = 0.5, alp = 0.05, N = 100, seed = 9857, dig = 4, plot = TRUE) #' #' @param n Sample size #' @param p Population ratio value, Default: 0 #' @param alp Level of significance, Default: 0.05 #' @param seed Seed value for generating random numbers, Default: 9857 #' @param dig Number of digits below the decimal point, Default: 4 #' @param plot Plot confidence intervals? Default: TRUE #' @param n Sample size, Default: 100 #' #' @return None. #' @examples #' ciprob.sim(n = 16, p = 0.6, alp = 0.05, N = 100) #' ciprob.sim(n = 16, p = 0.6, alp = 0.05, N = 10000, plot = FALSE) #' @export ciprob.sim <- function(n, p = 0.5, alp = 0.05, N = 100, seed = 9857, dig = 4, plot = TRUE) { ir <- 1:N zv <- qnorm(1 - alp / 2) set.seed(seed) xm <- rbinom(N, n, p) xp <- xm / n xv <- xp * (1 - xp) / n lcl <- pmax(0, xp - zv * sqrt(xv)) ucl <- pmin(1, xp + zv * sqrt(xv)) ci <- cbind(lcl, xp, ucl) if (plot) { win.graph(7, 4) plot(ir, ci[, 2], type = "p", pch = 19, cex = 0.6, col = 1, ylim = c(min(ci), max(ci)), main = "Confidence Intervals for a Population Ratio", ylab = "Confidence Interval", xlab = "Iteration" ) abline(h = p, col = 2) arrows(ir, ci[, 1], ir, ci[, 3], length = 0.03, code = 3, angle = 90, lwd = 1.5, col = ifelse((ci[, 1] > p | ci[, 3] < p), 2, 4) ) } nup <- sum(ci[, 1] > p) nlow <- sum(ci[, 3] < p) cat(paste0( "P(LCL > ", p, ") = ", nup, " / ", N, " = ", nup / N, "\t P(UCL < ", p, ") = ", nlow, " / ", N, " = ", nlow / N ), "\n") }
/R/ciprob.sim.R
permissive
jhk0530/Rstat
R
false
false
1,713
r
#' @title Simulate the Confidence Interval for a Ratio #' @description Simulate the Confidence Interval for a Ratio #' @usage ciprob.sim(n, p = 0.5, alp = 0.05, N = 100, seed = 9857, dig = 4, plot = TRUE) #' #' @param n Sample size #' @param p Population ratio value, Default: 0 #' @param alp Level of significance, Default: 0.05 #' @param seed Seed value for generating random numbers, Default: 9857 #' @param dig Number of digits below the decimal point, Default: 4 #' @param plot Plot confidence intervals? Default: TRUE #' @param n Sample size, Default: 100 #' #' @return None. #' @examples #' ciprob.sim(n = 16, p = 0.6, alp = 0.05, N = 100) #' ciprob.sim(n = 16, p = 0.6, alp = 0.05, N = 10000, plot = FALSE) #' @export ciprob.sim <- function(n, p = 0.5, alp = 0.05, N = 100, seed = 9857, dig = 4, plot = TRUE) { ir <- 1:N zv <- qnorm(1 - alp / 2) set.seed(seed) xm <- rbinom(N, n, p) xp <- xm / n xv <- xp * (1 - xp) / n lcl <- pmax(0, xp - zv * sqrt(xv)) ucl <- pmin(1, xp + zv * sqrt(xv)) ci <- cbind(lcl, xp, ucl) if (plot) { win.graph(7, 4) plot(ir, ci[, 2], type = "p", pch = 19, cex = 0.6, col = 1, ylim = c(min(ci), max(ci)), main = "Confidence Intervals for a Population Ratio", ylab = "Confidence Interval", xlab = "Iteration" ) abline(h = p, col = 2) arrows(ir, ci[, 1], ir, ci[, 3], length = 0.03, code = 3, angle = 90, lwd = 1.5, col = ifelse((ci[, 1] > p | ci[, 3] < p), 2, 4) ) } nup <- sum(ci[, 1] > p) nlow <- sum(ci[, 3] < p) cat(paste0( "P(LCL > ", p, ") = ", nup, " / ", N, " = ", nup / N, "\t P(UCL < ", p, ") = ", nlow, " / ", N, " = ", nlow / N ), "\n") }
#' Document-Term Matrix Categorization #' #' Reduces the dimensions of a document-term matrix by dictionary-based categorization. #' @param dtm A matrix with terms as column names. #' @param dict The name of a provided dictionary #' (\href{https://osf.io/y6g5b/wiki/home}{osf.io/y6g5b/wiki}) or of a file found in #' \code{dir}, or a \code{list} object with named character vectors as word lists, #' or the path to a file to be read in by \code{\link{read.dic}}. #' @param term.weights A \code{list} object with named numeric vectors lining up with the character #' vectors in \code{dict}, used to weight the terms in each \code{dict} vector. If a category in #' \code{dict} is not specified in \code{term.weights}, or the \code{dict} and \code{term.weights} #' vectors aren't the same length, the weights for that category will be 1. #' @param bias A list or named vector specifying a constant to add to the named category. If a term #' matching \code{bias.name} is included in a category, it's associated \code{weight} will be used #' as the \code{bias} for that category. #' @param bias.name A character specifying a term to be used as a category bias; default is #' \code{'_intercept'}. #' @param escape Logical indicating whether the terms in \code{dict} should not be treated as plain #' text (including asterisk wild cards). If \code{TRUE}, regular expression related characters are #' escaped. Set to \code{TRUE} if you get PCRE compilation errors. #' @param partial Logical; if \code{TRUE} terms are partially matched (not padded by ^ and $). #' @param glob Logical; if \code{TRUE} (default), will convert initial and terminal asterisks to #' partial matches. #' @param to.lower Logical; if \code{TRUE} will lowercase dictionary terms. Otherwise, dictionary #' terms will be converted to match the terms if they are single-cased. Set to \code{FALSE} to #' always keep dictionary terms as entered. #' @param term.filter A regular expression string used to format the text of each term (passed to #' \code{gsub}). For example, if terms are part-of-speech tagged (e.g., #' \code{'a_DT'}), \code{'_.*'} would remove the tag. #' @param term.break If a category has more than \code{term.break} characters, it will be processed #' in chunks. Reduce from 20000 if you get a PCRE compilation error. #' @param dir Path to a folder in which to look for \code{dict}; \cr #' will look in \code{'~/Dictionaries'} by default. \cr #' Set a session default with \code{options(lingmatch.dict.dir = 'desired/path')}. #' @seealso For applying pattern-based dictionaries (to raw text) see \code{\link{lma_patcat}}. #' @family Dictionary functions #' @return A matrix with a row per \code{dtm} row and columns per dictionary category, and a \code{WC} attribute #' with original word counts. #' @examples #' \dontrun{ #' #' # Score texts with the NRC Affect Intensity Lexicon #' #' dict <- readLines("https://saifmohammad.com/WebDocs/NRC-AffectIntensity-Lexicon.txt") #' dict <- read.table( #' text = dict[-seq_len(grep("term\tscore", dict, fixed = TRUE)[[1]])], #' col.names = c("term", "weight", "category") #' ) #' #' text <- c( #' angry = paste( #' "We are outraged by their hateful brutality,", #' "and by the way they terrorize us with their hatred." #' ), #' fearful = paste( #' "The horrific torture of that terrorist was tantamount", #' "to the terrorism of terrorists." #' ), #' joyous = "I am jubilant to be celebrating the bliss of this happiest happiness.", #' sad = paste( #' "They are nearly suicidal in their mourning after", #' "the tragic and heartbreaking holocaust." #' ) #' ) #' #' emotion_scores <- lma_termcat(text, dict) #' if (require("splot")) splot(emotion_scores ~ names(text), leg = "out") #' #' ## or use the standardized version (which includes more categories) #' #' emotion_scores <- lma_termcat(text, "nrc_eil", dir = "~/Dictionaries") #' emotion_scores <- emotion_scores[, c("anger", "fear", "joy", "sadness")] #' if (require("splot")) splot(emotion_scores ~ names(text), leg = "out") #' } #' @export lma_termcat <- function(dtm, dict, term.weights = NULL, bias = NULL, bias.name = "_intercept", escape = TRUE, partial = FALSE, glob = TRUE, term.filter = NULL, term.break = 2e4, to.lower = FALSE, dir = getOption("lingmatch.dict.dir")) { st <- proc.time()[[3]] if (ckd <- dir == "") dir <- "~/Dictionaries" if (missing(dict)) dict <- lma_dict(1:9) if (is.factor(dict)) dict <- as.character(dict) if (is.character(dict) && length(dict) == 1 && missing(term.weights) && (file.exists(dict) || !grepl("\\s", dict))) { if (!file.exists(dict) && any(file.exists(normalizePath(paste0(dir, "/", dict), "/", FALSE)))) { dict <- normalizePath(paste0(dir, "/", dict)) } td <- tryCatch(read.dic(dict, dir = if (ckd) "" else dir), error = function(e) NULL) dict <- if (is.null(td)) list(cat1 = dict) else td } if (!is.null(dim(dict))) { if (!is.null(term.weights)) { if (is.character(term.weights) && any(su <- term.weights %in% colnames(dict))) { term.weights <- dict[, term.weights[su], drop = FALSE] } if (!is.null(dim(term.weights))) { term.weights <- term.weights[, vapply( seq_len(ncol(term.weights)), function(col) is.numeric(term.weights[, col]), TRUE )] } } else if (any(su <- vapply(seq_len(ncol(dict)), function(col) is.numeric(dict[, col]), TRUE))) { term.weights <- dict[, su, drop = FALSE] dict <- if (all(su)) { if (!is.null(rownames(dict))) { data.frame(term = rownames(dict), stringsAsFactors = FALSE) } else { term.weights <- if (ncol(term.weights) == 1) NULL else term.weights[, -1, drop = FALSE] dict[, 1, drop = FALSE] } } else { dict[, !su, drop = FALSE] } } if (!is.null(rownames(dict)) && ncol(dict) == 1 && any(grepl("^[a-z]", rownames(dict), TRUE))) { dict <- rownames(dict) } else { su <- vapply(seq_len(ncol(dict)), function(col) !is.numeric(dict[, col]), TRUE) if (!any(su)) stop("no terms found in dictionary") dict <- if (sum(su) > 1) { su <- which(su) if (!is.null(term.weights) && (!is.list(term.weights) || ncol(term.weights) == 1)) { if (is.list(term.weights)) term.weights <- term.weights[, 1] ssu <- vapply(su, function(col) length(unique(dict[, col])), 0) + seq(length(su), 1) term.weights <- split(term.weights, dict[, which.min(ssu)]) dict <- split(dict[, which.max(ssu)], dict[, which.min(ssu)]) } else { ssu <- vapply(su, function(col) anyDuplicated(dict[, col]) == 0, TRUE) if (any(ssu)) dict[, su[ssu][1]] else dict[, su[1]] } } else { dict[, su] } } } if (is.numeric(dict) && is.null(term.weights)) { term.weights <- dict dict <- names(dict) } if (is.factor(dict)) dict <- as.character(dict) if (!is.null(dim(term.weights))) { if (is.null(colnames(term.weights))) colnames(term.weights) <- if (length(dict) == length(term.weights)) names(dict) else paste0("cat", seq_len(ncol(term.weights))) if (!is.data.frame(term.weights)) term.weights <- as.data.frame(term.weights, stringsAsFactors = FALSE) su <- vapply(term.weights, is.numeric, TRUE) if (any(!su)) { if (any(ssu <- !su & vapply(term.weights, anyDuplicated, 0) == 0)) { rownames(term.weights) <- term.weights[, which(ssu)[1]] } term.weights <- term.weights[, su] } if (!length(term.weights)) stop("no numeric columns in term.weights") } if (!is.list(dict)) { dict <- if (is.matrix(dict)) { as.data.frame(dict, stringsAsFactors = FALSE) } else if (is.character(dict) && length(dict) == 1 && (file.exists(dict) || dict %in% rownames(select.dict()$info))) { read.dic(dict, dir = if (ckd) "" else dir) } else { list(dict) } } if (is.list(dict)) { if (is.null(names(dict))) { tn <- if (!is.null(colnames(term.weights))) colnames(term.weights) else names(term.weights) names(dict) <- if (!is.null(tn) && length(tn) == length(dict)) tn else paste0("cat", seq_along(dict)) } else if (any(su <- names(dict) == "")) { names(dict)[su] <- if (sum(su) == 1) "cat_unnamed" else paste0("cat_unnamed", seq_len(sum(su))) if (!is.null(term.weights) && any(su <- names(term.weights) == "")) { names(term.weights)[su] <- if (sum(su) == 1) "cat_unnamed" else paste0("cat_unnamed", seq_len(sum(su))) } } } else { dict <- list(dict) } if (!is.null(term.weights)) { if (is.null(dim(term.weights))) { if (is.list(term.weights)) { if (length(dict) != length(term.weights) && !is.null(names(term.weights[[1]]))) dict <- term.weights if (length(dict) == length(term.weights) && !all(names(dict) %in% names(term.weights))) { if (is.null(names(term.weights)) || !any(names(term.weights) %in% names(dict))) { names(term.weights) <- names(dict) } else { for (cat in names(dict)[!names(dict) %in% names(term.weights)]) { term.weights[cat] <- structure(rep(1, length(dict[[cat]])), names = dict[[cat]]) } } } for (cat in names(dict)) { if (is.null(names(term.weights[[cat]]))) { if (length(term.weights[[cat]]) == length(dict[[cat]])) { names(term.weights[[cat]]) <- dict[[cat]] } else { term.weights[[cat]] <- structure(rep(1, length(dict[[cat]])), names = dict[[cat]]) } } } } else { if (is.null(names(term.weights))) { if (length(dict[[1]]) == length(term.weights)) { term.weights <- list(term.weights) names(term.weights) <- names(dict) names(term.weights[[1]]) <- dict[[1]] } else { term.weights <- NULL warning("term.weights were dropped as they could not be aligned with dict") } } } } else { if (length(dict) == 1 && length(dict[[1]]) == nrow(term.weights) && !any(grepl("[a-z]", rownames(term.weights), TRUE))) { if (is.factor(dict[[1]])) dict[[1]] <- as.character(dict[[1]]) if (anyDuplicated(dict[[1]])) { dt <- unique(dict[[1]][duplicated(dict[[1]])]) su <- dict[[1]] %in% dt td <- term.weights[su, ] tw <- matrix(0, length(dt), ncol(term.weights), dimnames = list(dt, colnames(term.weights))) for (term in dt) tw[term, ] <- colMeans(term.weights[dict[[1]] == term, , drop = FALSE], na.rm = TRUE) term.weights <- rbind(term.weights[!su, ], tw) rownames(term.weights) <- c(dict[[1]][!su], dt) dict[[1]] <- rownames(term.weights) } else { rownames(term.weights) <- dict[[1]] } } } if (!is.null(term.weights)) { if (!is.list(term.weights)) term.weights <- list(term.weights) dlen <- length(dict) if (is.null(names(term.weights))) { names(term.weights) <- if (length(term.weights) == dlen) names(dict) else seq_along(term.weights) } if (length(term.weights) > dlen && dlen == 1 && all(vapply(term.weights, length, 0) == length(dict[[1]]))) { dict <- lapply(term.weights, function(ws) dict[[1]]) } } } dict <- lapply(dict, function(cat) { if (!is.character(cat)) { if (is.null(names(cat))) as.character(cat) else names(cat) } else { cat } }) if (!is.null(bias) && is.null(names(bias))) { names(bias) <- if (length(bias) == length(dict)) names(dict) else seq_along(bias) } if (!is.null(names(term.weights)) && length(names(term.weights)) == length(dict)) names(dict) <- names(term.weights) for (n in names(dict)) { if (!n %in% names(bias) && any(ii <- !is.na(dict[[n]]) & dict[[n]] == bias.name)) { bias[n] <- term.weights[[n]][ii] term.weights[[n]][ii] <- 0 } } dict_chars <- list( all = paste(unique(strsplit(paste0(unique(unlist(dict, use.names = FALSE)), collapse = ""), "")[[1]]), collapse = "" ) ) dict_chars$alpha <- gsub("[^A-Za-z]", "", dict_chars$all) dict_chars$case <- if (grepl("[A-Z]", dict_chars$alpha)) { if (grepl("[a-z]", dict_chars$alpha)) "mixed" else "upper" } else { "lower" } edtm <- substitute(dtm) if (is.factor(dtm)) dtm <- as.character(dtm) if (is.character(dtm) || !any(grepl("\\s", colnames(dtm)))) { if (any(grepl("\\s", unlist(dict, use.names = FALSE)))) { if (is.character(dtm)) { warning( "dict has terms with spaces, so using lma_patcat instead;", "\n enter a dtm (e.g., lma_dtm(", edtm, ")) to force lma_termcat use" ) args <- list(text = dtm, dict = dict) if (!is.null(term.weights)) args$pattern.weights <- term.weights if (!is.null(bias)) args$bias <- bias if (!missing(glob)) args$globtoregex <- glob if (!missing(partial) && !partial) args$boundary <- "\\b" if (!missing(dir)) args$dir <- if (ckd) "" else dir return(do.call(lma_patcat, args)) } } if (is.character(dtm)) { if (dict_chars$case == "upper") dtm <- toupper(dtm) dtm <- lma_dtm(dtm, numbers = grepl("[0-9]", dict_chars$all), punct = grepl('[_/\\?!."-]', dict_chars$all), to.lower = dict_chars$case == "lower" ) } } if (is.null(dim(dtm))) dtm <- t(dtm) ats <- attributes(dtm)[c("opts", "WC", "type")] ats <- ats[!vapply(ats, is.null, TRUE)] atsn <- names(ats) ws <- if (is.null(term.filter)) colnames(dtm) else gsub(term.filter, "", colnames(dtm), perl = TRUE) if ((missing(to.lower) || !is.logical(to.lower)) && dict_chars$case != "mixed") { text_case <- if (any(grepl("[A-Z]", ws))) if (any(grepl("[a-z]", ws))) "mixed" else "upper" else "lower" if (text_case == "upper") { dict <- lapply(dict, toupper) dict_chars$case <- "upper" } to.lower <- text_case == "lower" } if (to.lower && dict_chars$case != "lower") { dict <- lapply(dict, tolower) dict_chars$case <- "lower" } if (dict_chars$case != "mixed") ws <- (if (dict_chars$case == "lower") tolower else toupper)(ws) odict <- dict boundaries <- FALSE formatdict <- function(dict, collapse = "|") { lab <- if (!escape) { lab <- lapply(dict, function(l) { if (!any(grepl("[][)(}{]", l))) { return(FALSE) } sl <- strsplit(l, "") !any(grepl("\\[.+\\]|\\(.+\\)|\\{.+\\}", l)) || any(vapply( sl, function(cs) { sum(sl == "[") != sum(sl == "]") & sum(sl == "{") != sum(sl == "}") & sum(sl == "(") != sum(sl == ")") }, TRUE )) }) Filter(isTRUE, lab) } else { logical() } if (!partial) { s <- "^" e <- "$" } else { s <- e <- "" } rec <- "([][)(}{*.^$+?\\|\\\\])" if (length(lab)) { for (l in names(lab)) dict[[l]][lab[[l]]] <- gsub("([][)(}{])", "\\\\\\1", dict[[l]][lab[[l]]]) rec <- "([*.^$+?\\|])" } res <- if (escape) { lapply(dict, function(l) { paste0(s, gsub(rec, "\\\\\\1", l, perl = TRUE), e, collapse = collapse) }) } else { lapply(dict, function(l) paste(paste0(s, gsub("([+*])[+*]+", "\\\\\\1+", l), e), collapse = collapse)) } if (glob) { lapply(res, function(l) { gsub(paste0( if (s == "^") "\\" else "", s, if (escape) "\\\\" else "", "\\*|", if (escape) "\\\\" else "", "\\*", if (e == "$") { "\\" } else { "" }, e ), "", l) }) } else { res } } for (l in dict) { if (!boundaries) boundaries <- !any(grepl("^\\*|\\*$", l)) && any(grepl("^\\^|\\$$", l)) if (missing(partial) && boundaries) partial <- TRUE if (missing(glob) && (any(grepl("([][}{.^$+?\\|\\\\])", l)) || any(grepl("\\w\\*\\w", l)))) glob <- FALSE if (missing(escape) && (boundaries || any(grepl("[.])][+*]|[.+*]\\?|\\[\\^", l))) && !any(grepl("[({[][^])}]*$|^[^({[]*[])}]", l))) { escape <- FALSE } } cls <- 0 if (is.null(term.weights)) { cls <- structure(numeric(length(dict)), names = names(dict)) for (cat in seq_along(dict)) { ccls <- tryCatch(nchar(dict[[cat]]), error = function(e) NULL) if (is.null(ccls)) { warning( "dict appears to be misencoded, so results may not be as expected;\n", 'might try reading the dictionary in with encoding = "latin1"' ) dict[[cat]] <- iconv(dict[[cat]], sub = "#") ccls <- nchar(dict[[cat]]) } cls[cat] <- sum(ccls) } } if (any(cls > term.break)) { br <- function(l, e = term.break) { f <- ceiling(cls[[l]] / e) l <- length(dict[[l]]) e <- ceiling(l / f) o <- lapply(seq_len(f), function(i) seq_len(e) + e * (i - 1)) o[[f]] <- o[[f]][o[[f]] <= l] o } op <- matrix(0, nrow(dtm), length(dict), dimnames = list(rownames(dtm), names(dict))) for (cat in names(dict)) { matches <- if (cls[[cat]] > term.break) { unique(unlist(lapply(br(cat), function(s) { grep(formatdict(list(dict[[cat]][s]))[[1]], ws, perl = TRUE) }))) } else { grep(formatdict(list(dict[[cat]])), ws, perl = TRUE) } if (length(matches)) { op[, cat] <- rowSums(dtm[, matches, drop = FALSE], na.rm = TRUE) } } } else { if (!is.null(term.weights)) { dict <- formatdict(dict, NULL) terms <- unique(unlist(dict)) termmap <- lapply(terms, grep, ws, perl = TRUE, value = TRUE) names(termmap) <- unique(unlist(odict)) termmap <- Filter(length, termmap) if (is.null(dim(term.weights))) { op <- matrix(0, nrow(dtm), length(dict), dimnames = list(rownames(dtm), names(dict))) if (length(termmap)) { weights <- lapply(names(term.weights), function(n) { l <- term.weights[[n]] if (is.null(names(l)) && n %in% names(dict) && length(dict[[n]]) == length(l)) { names(term.weights[[n]]) <- dict[[n]] l <- term.weights[[n]] } if (any(su <- !names(termmap) %in% names(l))) l[names(termmap)[su]] <- 0 do.call(c, lapply(names(termmap), function(p) { structure(rep(l[[p]], length(termmap[[p]])), names = termmap[[p]]) })) }) names(weights) <- names(term.weights) for (cat in names(dict)) { if (length(weights[[cat]])) { op[, cat] <- as.numeric(dtm[, names(weights[[cat]]), drop = FALSE] %*% weights[[cat]]) } } } } else { if (length(termmap)) { weights <- do.call(rbind, lapply(names(termmap), function(p) { matrix( rep(as.numeric(term.weights[p, ]), length(termmap[[p]])), ncol = ncol(term.weights), dimnames = list(termmap[[p]], colnames(term.weights)) ) })) op <- matrix(0, nrow(dtm), ncol(weights), dimnames = list(rownames(dtm), colnames(weights))) for (cat in colnames(op)) { op[, cat] <- as.numeric(dtm[, rownames(weights), drop = FALSE] %*% weights[, cat]) } } else { op <- matrix(0, nrow(dtm), length(dict), dimnames = list(rownames(dtm), colnames(weights))) } } } else { dict <- formatdict(dict) op <- vapply(names(dict), function(cat) { rowSums(dtm[, grep(dict[[cat]], ws, perl = TRUE), drop = FALSE ], na.rm = TRUE) }, numeric(nrow(dtm))) if (nrow(dtm) == 1) { op <- t(op) rownames(op) <- 1 } } } if (!is.null(bias)) for (n in names(bias)) if (n %in% colnames(op)) op[, n] <- op[, n] + bias[[n]] attr(op, "WC") <- if ("WC" %in% atsn) { ats$WC } else if (all(vapply(seq_len(ncol(dtm)), function(i) { is.numeric(dtm[, i]) || is.integer(dtm[, i]) }, TRUE))) { rowSums(dtm, na.rm = TRUE) } else { NULL } attr(op, "time") <- c(attr(dtm, "time"), termcat = proc.time()[[3]] - st) if ("type" %in% atsn) attr(op, "type") <- ats$type op }
/R/lma_termcat.R
no_license
miserman/lingmatch
R
false
false
20,399
r
#' Document-Term Matrix Categorization #' #' Reduces the dimensions of a document-term matrix by dictionary-based categorization. #' @param dtm A matrix with terms as column names. #' @param dict The name of a provided dictionary #' (\href{https://osf.io/y6g5b/wiki/home}{osf.io/y6g5b/wiki}) or of a file found in #' \code{dir}, or a \code{list} object with named character vectors as word lists, #' or the path to a file to be read in by \code{\link{read.dic}}. #' @param term.weights A \code{list} object with named numeric vectors lining up with the character #' vectors in \code{dict}, used to weight the terms in each \code{dict} vector. If a category in #' \code{dict} is not specified in \code{term.weights}, or the \code{dict} and \code{term.weights} #' vectors aren't the same length, the weights for that category will be 1. #' @param bias A list or named vector specifying a constant to add to the named category. If a term #' matching \code{bias.name} is included in a category, it's associated \code{weight} will be used #' as the \code{bias} for that category. #' @param bias.name A character specifying a term to be used as a category bias; default is #' \code{'_intercept'}. #' @param escape Logical indicating whether the terms in \code{dict} should not be treated as plain #' text (including asterisk wild cards). If \code{TRUE}, regular expression related characters are #' escaped. Set to \code{TRUE} if you get PCRE compilation errors. #' @param partial Logical; if \code{TRUE} terms are partially matched (not padded by ^ and $). #' @param glob Logical; if \code{TRUE} (default), will convert initial and terminal asterisks to #' partial matches. #' @param to.lower Logical; if \code{TRUE} will lowercase dictionary terms. Otherwise, dictionary #' terms will be converted to match the terms if they are single-cased. Set to \code{FALSE} to #' always keep dictionary terms as entered. #' @param term.filter A regular expression string used to format the text of each term (passed to #' \code{gsub}). For example, if terms are part-of-speech tagged (e.g., #' \code{'a_DT'}), \code{'_.*'} would remove the tag. #' @param term.break If a category has more than \code{term.break} characters, it will be processed #' in chunks. Reduce from 20000 if you get a PCRE compilation error. #' @param dir Path to a folder in which to look for \code{dict}; \cr #' will look in \code{'~/Dictionaries'} by default. \cr #' Set a session default with \code{options(lingmatch.dict.dir = 'desired/path')}. #' @seealso For applying pattern-based dictionaries (to raw text) see \code{\link{lma_patcat}}. #' @family Dictionary functions #' @return A matrix with a row per \code{dtm} row and columns per dictionary category, and a \code{WC} attribute #' with original word counts. #' @examples #' \dontrun{ #' #' # Score texts with the NRC Affect Intensity Lexicon #' #' dict <- readLines("https://saifmohammad.com/WebDocs/NRC-AffectIntensity-Lexicon.txt") #' dict <- read.table( #' text = dict[-seq_len(grep("term\tscore", dict, fixed = TRUE)[[1]])], #' col.names = c("term", "weight", "category") #' ) #' #' text <- c( #' angry = paste( #' "We are outraged by their hateful brutality,", #' "and by the way they terrorize us with their hatred." #' ), #' fearful = paste( #' "The horrific torture of that terrorist was tantamount", #' "to the terrorism of terrorists." #' ), #' joyous = "I am jubilant to be celebrating the bliss of this happiest happiness.", #' sad = paste( #' "They are nearly suicidal in their mourning after", #' "the tragic and heartbreaking holocaust." #' ) #' ) #' #' emotion_scores <- lma_termcat(text, dict) #' if (require("splot")) splot(emotion_scores ~ names(text), leg = "out") #' #' ## or use the standardized version (which includes more categories) #' #' emotion_scores <- lma_termcat(text, "nrc_eil", dir = "~/Dictionaries") #' emotion_scores <- emotion_scores[, c("anger", "fear", "joy", "sadness")] #' if (require("splot")) splot(emotion_scores ~ names(text), leg = "out") #' } #' @export lma_termcat <- function(dtm, dict, term.weights = NULL, bias = NULL, bias.name = "_intercept", escape = TRUE, partial = FALSE, glob = TRUE, term.filter = NULL, term.break = 2e4, to.lower = FALSE, dir = getOption("lingmatch.dict.dir")) { st <- proc.time()[[3]] if (ckd <- dir == "") dir <- "~/Dictionaries" if (missing(dict)) dict <- lma_dict(1:9) if (is.factor(dict)) dict <- as.character(dict) if (is.character(dict) && length(dict) == 1 && missing(term.weights) && (file.exists(dict) || !grepl("\\s", dict))) { if (!file.exists(dict) && any(file.exists(normalizePath(paste0(dir, "/", dict), "/", FALSE)))) { dict <- normalizePath(paste0(dir, "/", dict)) } td <- tryCatch(read.dic(dict, dir = if (ckd) "" else dir), error = function(e) NULL) dict <- if (is.null(td)) list(cat1 = dict) else td } if (!is.null(dim(dict))) { if (!is.null(term.weights)) { if (is.character(term.weights) && any(su <- term.weights %in% colnames(dict))) { term.weights <- dict[, term.weights[su], drop = FALSE] } if (!is.null(dim(term.weights))) { term.weights <- term.weights[, vapply( seq_len(ncol(term.weights)), function(col) is.numeric(term.weights[, col]), TRUE )] } } else if (any(su <- vapply(seq_len(ncol(dict)), function(col) is.numeric(dict[, col]), TRUE))) { term.weights <- dict[, su, drop = FALSE] dict <- if (all(su)) { if (!is.null(rownames(dict))) { data.frame(term = rownames(dict), stringsAsFactors = FALSE) } else { term.weights <- if (ncol(term.weights) == 1) NULL else term.weights[, -1, drop = FALSE] dict[, 1, drop = FALSE] } } else { dict[, !su, drop = FALSE] } } if (!is.null(rownames(dict)) && ncol(dict) == 1 && any(grepl("^[a-z]", rownames(dict), TRUE))) { dict <- rownames(dict) } else { su <- vapply(seq_len(ncol(dict)), function(col) !is.numeric(dict[, col]), TRUE) if (!any(su)) stop("no terms found in dictionary") dict <- if (sum(su) > 1) { su <- which(su) if (!is.null(term.weights) && (!is.list(term.weights) || ncol(term.weights) == 1)) { if (is.list(term.weights)) term.weights <- term.weights[, 1] ssu <- vapply(su, function(col) length(unique(dict[, col])), 0) + seq(length(su), 1) term.weights <- split(term.weights, dict[, which.min(ssu)]) dict <- split(dict[, which.max(ssu)], dict[, which.min(ssu)]) } else { ssu <- vapply(su, function(col) anyDuplicated(dict[, col]) == 0, TRUE) if (any(ssu)) dict[, su[ssu][1]] else dict[, su[1]] } } else { dict[, su] } } } if (is.numeric(dict) && is.null(term.weights)) { term.weights <- dict dict <- names(dict) } if (is.factor(dict)) dict <- as.character(dict) if (!is.null(dim(term.weights))) { if (is.null(colnames(term.weights))) colnames(term.weights) <- if (length(dict) == length(term.weights)) names(dict) else paste0("cat", seq_len(ncol(term.weights))) if (!is.data.frame(term.weights)) term.weights <- as.data.frame(term.weights, stringsAsFactors = FALSE) su <- vapply(term.weights, is.numeric, TRUE) if (any(!su)) { if (any(ssu <- !su & vapply(term.weights, anyDuplicated, 0) == 0)) { rownames(term.weights) <- term.weights[, which(ssu)[1]] } term.weights <- term.weights[, su] } if (!length(term.weights)) stop("no numeric columns in term.weights") } if (!is.list(dict)) { dict <- if (is.matrix(dict)) { as.data.frame(dict, stringsAsFactors = FALSE) } else if (is.character(dict) && length(dict) == 1 && (file.exists(dict) || dict %in% rownames(select.dict()$info))) { read.dic(dict, dir = if (ckd) "" else dir) } else { list(dict) } } if (is.list(dict)) { if (is.null(names(dict))) { tn <- if (!is.null(colnames(term.weights))) colnames(term.weights) else names(term.weights) names(dict) <- if (!is.null(tn) && length(tn) == length(dict)) tn else paste0("cat", seq_along(dict)) } else if (any(su <- names(dict) == "")) { names(dict)[su] <- if (sum(su) == 1) "cat_unnamed" else paste0("cat_unnamed", seq_len(sum(su))) if (!is.null(term.weights) && any(su <- names(term.weights) == "")) { names(term.weights)[su] <- if (sum(su) == 1) "cat_unnamed" else paste0("cat_unnamed", seq_len(sum(su))) } } } else { dict <- list(dict) } if (!is.null(term.weights)) { if (is.null(dim(term.weights))) { if (is.list(term.weights)) { if (length(dict) != length(term.weights) && !is.null(names(term.weights[[1]]))) dict <- term.weights if (length(dict) == length(term.weights) && !all(names(dict) %in% names(term.weights))) { if (is.null(names(term.weights)) || !any(names(term.weights) %in% names(dict))) { names(term.weights) <- names(dict) } else { for (cat in names(dict)[!names(dict) %in% names(term.weights)]) { term.weights[cat] <- structure(rep(1, length(dict[[cat]])), names = dict[[cat]]) } } } for (cat in names(dict)) { if (is.null(names(term.weights[[cat]]))) { if (length(term.weights[[cat]]) == length(dict[[cat]])) { names(term.weights[[cat]]) <- dict[[cat]] } else { term.weights[[cat]] <- structure(rep(1, length(dict[[cat]])), names = dict[[cat]]) } } } } else { if (is.null(names(term.weights))) { if (length(dict[[1]]) == length(term.weights)) { term.weights <- list(term.weights) names(term.weights) <- names(dict) names(term.weights[[1]]) <- dict[[1]] } else { term.weights <- NULL warning("term.weights were dropped as they could not be aligned with dict") } } } } else { if (length(dict) == 1 && length(dict[[1]]) == nrow(term.weights) && !any(grepl("[a-z]", rownames(term.weights), TRUE))) { if (is.factor(dict[[1]])) dict[[1]] <- as.character(dict[[1]]) if (anyDuplicated(dict[[1]])) { dt <- unique(dict[[1]][duplicated(dict[[1]])]) su <- dict[[1]] %in% dt td <- term.weights[su, ] tw <- matrix(0, length(dt), ncol(term.weights), dimnames = list(dt, colnames(term.weights))) for (term in dt) tw[term, ] <- colMeans(term.weights[dict[[1]] == term, , drop = FALSE], na.rm = TRUE) term.weights <- rbind(term.weights[!su, ], tw) rownames(term.weights) <- c(dict[[1]][!su], dt) dict[[1]] <- rownames(term.weights) } else { rownames(term.weights) <- dict[[1]] } } } if (!is.null(term.weights)) { if (!is.list(term.weights)) term.weights <- list(term.weights) dlen <- length(dict) if (is.null(names(term.weights))) { names(term.weights) <- if (length(term.weights) == dlen) names(dict) else seq_along(term.weights) } if (length(term.weights) > dlen && dlen == 1 && all(vapply(term.weights, length, 0) == length(dict[[1]]))) { dict <- lapply(term.weights, function(ws) dict[[1]]) } } } dict <- lapply(dict, function(cat) { if (!is.character(cat)) { if (is.null(names(cat))) as.character(cat) else names(cat) } else { cat } }) if (!is.null(bias) && is.null(names(bias))) { names(bias) <- if (length(bias) == length(dict)) names(dict) else seq_along(bias) } if (!is.null(names(term.weights)) && length(names(term.weights)) == length(dict)) names(dict) <- names(term.weights) for (n in names(dict)) { if (!n %in% names(bias) && any(ii <- !is.na(dict[[n]]) & dict[[n]] == bias.name)) { bias[n] <- term.weights[[n]][ii] term.weights[[n]][ii] <- 0 } } dict_chars <- list( all = paste(unique(strsplit(paste0(unique(unlist(dict, use.names = FALSE)), collapse = ""), "")[[1]]), collapse = "" ) ) dict_chars$alpha <- gsub("[^A-Za-z]", "", dict_chars$all) dict_chars$case <- if (grepl("[A-Z]", dict_chars$alpha)) { if (grepl("[a-z]", dict_chars$alpha)) "mixed" else "upper" } else { "lower" } edtm <- substitute(dtm) if (is.factor(dtm)) dtm <- as.character(dtm) if (is.character(dtm) || !any(grepl("\\s", colnames(dtm)))) { if (any(grepl("\\s", unlist(dict, use.names = FALSE)))) { if (is.character(dtm)) { warning( "dict has terms with spaces, so using lma_patcat instead;", "\n enter a dtm (e.g., lma_dtm(", edtm, ")) to force lma_termcat use" ) args <- list(text = dtm, dict = dict) if (!is.null(term.weights)) args$pattern.weights <- term.weights if (!is.null(bias)) args$bias <- bias if (!missing(glob)) args$globtoregex <- glob if (!missing(partial) && !partial) args$boundary <- "\\b" if (!missing(dir)) args$dir <- if (ckd) "" else dir return(do.call(lma_patcat, args)) } } if (is.character(dtm)) { if (dict_chars$case == "upper") dtm <- toupper(dtm) dtm <- lma_dtm(dtm, numbers = grepl("[0-9]", dict_chars$all), punct = grepl('[_/\\?!."-]', dict_chars$all), to.lower = dict_chars$case == "lower" ) } } if (is.null(dim(dtm))) dtm <- t(dtm) ats <- attributes(dtm)[c("opts", "WC", "type")] ats <- ats[!vapply(ats, is.null, TRUE)] atsn <- names(ats) ws <- if (is.null(term.filter)) colnames(dtm) else gsub(term.filter, "", colnames(dtm), perl = TRUE) if ((missing(to.lower) || !is.logical(to.lower)) && dict_chars$case != "mixed") { text_case <- if (any(grepl("[A-Z]", ws))) if (any(grepl("[a-z]", ws))) "mixed" else "upper" else "lower" if (text_case == "upper") { dict <- lapply(dict, toupper) dict_chars$case <- "upper" } to.lower <- text_case == "lower" } if (to.lower && dict_chars$case != "lower") { dict <- lapply(dict, tolower) dict_chars$case <- "lower" } if (dict_chars$case != "mixed") ws <- (if (dict_chars$case == "lower") tolower else toupper)(ws) odict <- dict boundaries <- FALSE formatdict <- function(dict, collapse = "|") { lab <- if (!escape) { lab <- lapply(dict, function(l) { if (!any(grepl("[][)(}{]", l))) { return(FALSE) } sl <- strsplit(l, "") !any(grepl("\\[.+\\]|\\(.+\\)|\\{.+\\}", l)) || any(vapply( sl, function(cs) { sum(sl == "[") != sum(sl == "]") & sum(sl == "{") != sum(sl == "}") & sum(sl == "(") != sum(sl == ")") }, TRUE )) }) Filter(isTRUE, lab) } else { logical() } if (!partial) { s <- "^" e <- "$" } else { s <- e <- "" } rec <- "([][)(}{*.^$+?\\|\\\\])" if (length(lab)) { for (l in names(lab)) dict[[l]][lab[[l]]] <- gsub("([][)(}{])", "\\\\\\1", dict[[l]][lab[[l]]]) rec <- "([*.^$+?\\|])" } res <- if (escape) { lapply(dict, function(l) { paste0(s, gsub(rec, "\\\\\\1", l, perl = TRUE), e, collapse = collapse) }) } else { lapply(dict, function(l) paste(paste0(s, gsub("([+*])[+*]+", "\\\\\\1+", l), e), collapse = collapse)) } if (glob) { lapply(res, function(l) { gsub(paste0( if (s == "^") "\\" else "", s, if (escape) "\\\\" else "", "\\*|", if (escape) "\\\\" else "", "\\*", if (e == "$") { "\\" } else { "" }, e ), "", l) }) } else { res } } for (l in dict) { if (!boundaries) boundaries <- !any(grepl("^\\*|\\*$", l)) && any(grepl("^\\^|\\$$", l)) if (missing(partial) && boundaries) partial <- TRUE if (missing(glob) && (any(grepl("([][}{.^$+?\\|\\\\])", l)) || any(grepl("\\w\\*\\w", l)))) glob <- FALSE if (missing(escape) && (boundaries || any(grepl("[.])][+*]|[.+*]\\?|\\[\\^", l))) && !any(grepl("[({[][^])}]*$|^[^({[]*[])}]", l))) { escape <- FALSE } } cls <- 0 if (is.null(term.weights)) { cls <- structure(numeric(length(dict)), names = names(dict)) for (cat in seq_along(dict)) { ccls <- tryCatch(nchar(dict[[cat]]), error = function(e) NULL) if (is.null(ccls)) { warning( "dict appears to be misencoded, so results may not be as expected;\n", 'might try reading the dictionary in with encoding = "latin1"' ) dict[[cat]] <- iconv(dict[[cat]], sub = "#") ccls <- nchar(dict[[cat]]) } cls[cat] <- sum(ccls) } } if (any(cls > term.break)) { br <- function(l, e = term.break) { f <- ceiling(cls[[l]] / e) l <- length(dict[[l]]) e <- ceiling(l / f) o <- lapply(seq_len(f), function(i) seq_len(e) + e * (i - 1)) o[[f]] <- o[[f]][o[[f]] <= l] o } op <- matrix(0, nrow(dtm), length(dict), dimnames = list(rownames(dtm), names(dict))) for (cat in names(dict)) { matches <- if (cls[[cat]] > term.break) { unique(unlist(lapply(br(cat), function(s) { grep(formatdict(list(dict[[cat]][s]))[[1]], ws, perl = TRUE) }))) } else { grep(formatdict(list(dict[[cat]])), ws, perl = TRUE) } if (length(matches)) { op[, cat] <- rowSums(dtm[, matches, drop = FALSE], na.rm = TRUE) } } } else { if (!is.null(term.weights)) { dict <- formatdict(dict, NULL) terms <- unique(unlist(dict)) termmap <- lapply(terms, grep, ws, perl = TRUE, value = TRUE) names(termmap) <- unique(unlist(odict)) termmap <- Filter(length, termmap) if (is.null(dim(term.weights))) { op <- matrix(0, nrow(dtm), length(dict), dimnames = list(rownames(dtm), names(dict))) if (length(termmap)) { weights <- lapply(names(term.weights), function(n) { l <- term.weights[[n]] if (is.null(names(l)) && n %in% names(dict) && length(dict[[n]]) == length(l)) { names(term.weights[[n]]) <- dict[[n]] l <- term.weights[[n]] } if (any(su <- !names(termmap) %in% names(l))) l[names(termmap)[su]] <- 0 do.call(c, lapply(names(termmap), function(p) { structure(rep(l[[p]], length(termmap[[p]])), names = termmap[[p]]) })) }) names(weights) <- names(term.weights) for (cat in names(dict)) { if (length(weights[[cat]])) { op[, cat] <- as.numeric(dtm[, names(weights[[cat]]), drop = FALSE] %*% weights[[cat]]) } } } } else { if (length(termmap)) { weights <- do.call(rbind, lapply(names(termmap), function(p) { matrix( rep(as.numeric(term.weights[p, ]), length(termmap[[p]])), ncol = ncol(term.weights), dimnames = list(termmap[[p]], colnames(term.weights)) ) })) op <- matrix(0, nrow(dtm), ncol(weights), dimnames = list(rownames(dtm), colnames(weights))) for (cat in colnames(op)) { op[, cat] <- as.numeric(dtm[, rownames(weights), drop = FALSE] %*% weights[, cat]) } } else { op <- matrix(0, nrow(dtm), length(dict), dimnames = list(rownames(dtm), colnames(weights))) } } } else { dict <- formatdict(dict) op <- vapply(names(dict), function(cat) { rowSums(dtm[, grep(dict[[cat]], ws, perl = TRUE), drop = FALSE ], na.rm = TRUE) }, numeric(nrow(dtm))) if (nrow(dtm) == 1) { op <- t(op) rownames(op) <- 1 } } } if (!is.null(bias)) for (n in names(bias)) if (n %in% colnames(op)) op[, n] <- op[, n] + bias[[n]] attr(op, "WC") <- if ("WC" %in% atsn) { ats$WC } else if (all(vapply(seq_len(ncol(dtm)), function(i) { is.numeric(dtm[, i]) || is.integer(dtm[, i]) }, TRUE))) { rowSums(dtm, na.rm = TRUE) } else { NULL } attr(op, "time") <- c(attr(dtm, "time"), termcat = proc.time()[[3]] - st) if ("type" %in% atsn) attr(op, "type") <- ats$type op }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/paws.rekognition_operations.R \name{start_face_detection} \alias{start_face_detection} \title{Starts asynchronous detection of faces in a stored video} \usage{ start_face_detection(Video, ClientRequestToken = NULL, NotificationChannel = NULL, FaceAttributes = NULL, JobTag = NULL) } \arguments{ \item{Video}{[required] The video in which you want to detect faces. The video must be stored in an Amazon S3 bucket.} \item{ClientRequestToken}{Idempotent token used to identify the start request. If you use the same token with multiple \code{StartFaceDetection} requests, the same \code{JobId} is returned. Use \code{ClientRequestToken} to prevent the same job from being accidently started more than once.} \item{NotificationChannel}{The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the face detection operation.} \item{FaceAttributes}{The face attributes you want returned. \code{DEFAULT} - The following subset of facial attributes are returned: BoundingBox, Confidence, Pose, Quality and Landmarks. \code{ALL} - All facial attributes are returned.} \item{JobTag}{Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.} } \description{ Starts asynchronous detection of faces in a stored video. } \details{ Amazon Rekognition Video can detect faces in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. \code{StartFaceDetection} returns a job identifier (\code{JobId}) that you use to get the results of the operation. When face detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in \code{NotificationChannel}. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is \code{SUCCEEDED}. If so, call GetFaceDetection and pass the job identifier (\code{JobId}) from the initial call to \code{StartFaceDetection}. For more information, see Detecting Faces in a Stored Video in the Amazon Rekognition Developer Guide. } \section{Accepted Parameters}{ \preformatted{start_face_detection( Video = list( S3Object = list( Bucket = "string", Name = "string", Version = "string" ) ), ClientRequestToken = "string", NotificationChannel = list( SNSTopicArn = "string", RoleArn = "string" ), FaceAttributes = "DEFAULT"|"ALL", JobTag = "string" ) } }
/service/paws.rekognition/man/start_face_detection.Rd
permissive
CR-Mercado/paws
R
false
true
2,626
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/paws.rekognition_operations.R \name{start_face_detection} \alias{start_face_detection} \title{Starts asynchronous detection of faces in a stored video} \usage{ start_face_detection(Video, ClientRequestToken = NULL, NotificationChannel = NULL, FaceAttributes = NULL, JobTag = NULL) } \arguments{ \item{Video}{[required] The video in which you want to detect faces. The video must be stored in an Amazon S3 bucket.} \item{ClientRequestToken}{Idempotent token used to identify the start request. If you use the same token with multiple \code{StartFaceDetection} requests, the same \code{JobId} is returned. Use \code{ClientRequestToken} to prevent the same job from being accidently started more than once.} \item{NotificationChannel}{The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the face detection operation.} \item{FaceAttributes}{The face attributes you want returned. \code{DEFAULT} - The following subset of facial attributes are returned: BoundingBox, Confidence, Pose, Quality and Landmarks. \code{ALL} - All facial attributes are returned.} \item{JobTag}{Unique identifier you specify to identify the job in the completion status published to the Amazon Simple Notification Service topic.} } \description{ Starts asynchronous detection of faces in a stored video. } \details{ Amazon Rekognition Video can detect faces in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. \code{StartFaceDetection} returns a job identifier (\code{JobId}) that you use to get the results of the operation. When face detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in \code{NotificationChannel}. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is \code{SUCCEEDED}. If so, call GetFaceDetection and pass the job identifier (\code{JobId}) from the initial call to \code{StartFaceDetection}. For more information, see Detecting Faces in a Stored Video in the Amazon Rekognition Developer Guide. } \section{Accepted Parameters}{ \preformatted{start_face_detection( Video = list( S3Object = list( Bucket = "string", Name = "string", Version = "string" ) ), ClientRequestToken = "string", NotificationChannel = list( SNSTopicArn = "string", RoleArn = "string" ), FaceAttributes = "DEFAULT"|"ALL", JobTag = "string" ) } }
#store langugage settings user_lang <- Sys.getlocale("LC_TIME") #set language to English (to properly display days of the week) Sys.setlocale("LC_TIME", "English") #read data data <- read.csv("household_power_consumption.txt", header = TRUE, sep=";", colClasses = c("character", "character", rep("numeric", 7)), na.strings = "?") #subset data <- data[data$Date=="1/2/2007" | data$Date == "2/2/2007", ] #convert the Date and Time variables to Date/Time classes data$Time <- strptime(paste(data$Date, data$Time), format = "%d/%m/%Y %H:%M:%S") data$Date <- as.Date(data$Date, format = "%d/%m/%Y") #png as graphic device png(filename = "plot2.png", width = 480, height = 480, units = "px", pointsize = 12, bg = "white") #plot plot(x = data$Time, y = data$Global_active_power, type="l", xlab = "", ylab = "Global Active Power (kilowatts)") #close the graphic device dev.off() #restore language Sys.setlocale("LC_TIME", user_lang)
/plot2.R
no_license
h1427096/ExData_Plotting1
R
false
false
931
r
#store langugage settings user_lang <- Sys.getlocale("LC_TIME") #set language to English (to properly display days of the week) Sys.setlocale("LC_TIME", "English") #read data data <- read.csv("household_power_consumption.txt", header = TRUE, sep=";", colClasses = c("character", "character", rep("numeric", 7)), na.strings = "?") #subset data <- data[data$Date=="1/2/2007" | data$Date == "2/2/2007", ] #convert the Date and Time variables to Date/Time classes data$Time <- strptime(paste(data$Date, data$Time), format = "%d/%m/%Y %H:%M:%S") data$Date <- as.Date(data$Date, format = "%d/%m/%Y") #png as graphic device png(filename = "plot2.png", width = 480, height = 480, units = "px", pointsize = 12, bg = "white") #plot plot(x = data$Time, y = data$Global_active_power, type="l", xlab = "", ylab = "Global Active Power (kilowatts)") #close the graphic device dev.off() #restore language Sys.setlocale("LC_TIME", user_lang)
# setup ------------------------------------------------------------------- library(tidyverse) # data -------------------------------------------------------------------- data <- readRDS("~/../Downloads/06_dvars.rds") glimpse(data) data %>% count(segmento_ori, segmento.x, segmento.y) data %>% count(incum) # remover variables en monto ---------------------------------------------- vars_to_rm <- data %>% select( matches("mean_\\d{2}$"), matches("max_\\d{2}$"), matches("max_disponible_compra"), -matches("porc"), -matches("ratio"), -matches("n_inst"), -matches("mora"), ) %>% names() data <- select(data, setdiff(names(data), vars_to_rm)) # select ------------------------------------------------------------------ d <- data %>% filter(segmento.x == "Segmento 4 Montos Bajos") %>% select( -contains("segmento"), -incum, -periodo_defuncion_m12, -sexo ) %>% rename( id = rut, time = periodo, bad_good = incum12 ) %>% select(id, time, bad_good, everything()) %>% sample_frac(1) %>% mutate(id = row_number()) names(d) names(d %>% select(where(negate(is.numeric)))) # rename ------------------------------------------------------------------ rename_vars <- function(x = c("saldo", "saldo_car", "saldo_car_max")){ x %>% str_replace_all("banca_", "matur_") %>% str_replace_all("marca_base", "basemark") %>% str_replace_all("producto_integrado", "interprod") %>% str_replace_all("m4", "n5") %>% str_replace_all("r04", "r50") %>% str_replace_all("edad", "age") %>% str_replace_all("hipoteca", "jom") %>% str_replace_all("saldo_", "monte_") %>% str_replace_all("_car_", "_auto_") %>% str_replace_all("_car$", "_auto") %>% str_replace_all("_banco_", "_bank_") %>% str_replace_all("_banco$", "_bank") %>% str_replace_all("_avance_", "_advance_") %>% str_replace_all("_avance$", "_advance") %>% str_replace_all("_sav_", "_save_") %>% str_replace_all("_sav$", "_save") %>% str_replace_all("_cad_", "_dac_") %>% str_replace_all("_cad$", "_dac") %>% str_replace_all("_deuda_", "_dado_") %>% str_replace_all("_deuda$", "_dado") %>% str_replace_all("uso_", "puso_") %>% str_replace_all("_uso_", "_puso_") %>% str_replace_all("_uso$", "_puso") %>% str_replace_all("_porc_", "_perc_") %>% str_replace_all("_porc$", "_perc") %>% str_replace_all("dias_", "time_") %>% str_replace_all("_prom_", "_mean_") %>% str_replace_all("_porc_", "_perc_") %>% str_replace_all("_porc$", "_perc") } d <- d %>% rename_with(rename_vars) d %>% rename_with(rename_vars) %>% names() %>% sample() # change var -------------------------------------------------------------- change_char <- function(x = head(data$marca_base, 10)){ x %>% as.factor() %>% as.numeric() %>% LETTERS[.] } d <- d %>% mutate(across(where(negate(is.numeric)), change_char)) # export ------------------------------------------------------------------ glimpse(d %>% select(1:30)) dim(d) saveRDS(d, "data-raw/cd.rds")
/dev/data_2_cd.R
permissive
jbkunst/risk3r
R
false
false
3,093
r
# setup ------------------------------------------------------------------- library(tidyverse) # data -------------------------------------------------------------------- data <- readRDS("~/../Downloads/06_dvars.rds") glimpse(data) data %>% count(segmento_ori, segmento.x, segmento.y) data %>% count(incum) # remover variables en monto ---------------------------------------------- vars_to_rm <- data %>% select( matches("mean_\\d{2}$"), matches("max_\\d{2}$"), matches("max_disponible_compra"), -matches("porc"), -matches("ratio"), -matches("n_inst"), -matches("mora"), ) %>% names() data <- select(data, setdiff(names(data), vars_to_rm)) # select ------------------------------------------------------------------ d <- data %>% filter(segmento.x == "Segmento 4 Montos Bajos") %>% select( -contains("segmento"), -incum, -periodo_defuncion_m12, -sexo ) %>% rename( id = rut, time = periodo, bad_good = incum12 ) %>% select(id, time, bad_good, everything()) %>% sample_frac(1) %>% mutate(id = row_number()) names(d) names(d %>% select(where(negate(is.numeric)))) # rename ------------------------------------------------------------------ rename_vars <- function(x = c("saldo", "saldo_car", "saldo_car_max")){ x %>% str_replace_all("banca_", "matur_") %>% str_replace_all("marca_base", "basemark") %>% str_replace_all("producto_integrado", "interprod") %>% str_replace_all("m4", "n5") %>% str_replace_all("r04", "r50") %>% str_replace_all("edad", "age") %>% str_replace_all("hipoteca", "jom") %>% str_replace_all("saldo_", "monte_") %>% str_replace_all("_car_", "_auto_") %>% str_replace_all("_car$", "_auto") %>% str_replace_all("_banco_", "_bank_") %>% str_replace_all("_banco$", "_bank") %>% str_replace_all("_avance_", "_advance_") %>% str_replace_all("_avance$", "_advance") %>% str_replace_all("_sav_", "_save_") %>% str_replace_all("_sav$", "_save") %>% str_replace_all("_cad_", "_dac_") %>% str_replace_all("_cad$", "_dac") %>% str_replace_all("_deuda_", "_dado_") %>% str_replace_all("_deuda$", "_dado") %>% str_replace_all("uso_", "puso_") %>% str_replace_all("_uso_", "_puso_") %>% str_replace_all("_uso$", "_puso") %>% str_replace_all("_porc_", "_perc_") %>% str_replace_all("_porc$", "_perc") %>% str_replace_all("dias_", "time_") %>% str_replace_all("_prom_", "_mean_") %>% str_replace_all("_porc_", "_perc_") %>% str_replace_all("_porc$", "_perc") } d <- d %>% rename_with(rename_vars) d %>% rename_with(rename_vars) %>% names() %>% sample() # change var -------------------------------------------------------------- change_char <- function(x = head(data$marca_base, 10)){ x %>% as.factor() %>% as.numeric() %>% LETTERS[.] } d <- d %>% mutate(across(where(negate(is.numeric)), change_char)) # export ------------------------------------------------------------------ glimpse(d %>% select(1:30)) dim(d) saveRDS(d, "data-raw/cd.rds")
context("sfattributes") test_that("sf attributes are extracted", { testthat::skip_on_cran() library(sf) nc <- sf::st_read(system.file("shape/nc.shp", package="sf"), quiet = T) enc <- encode(nc) sfAttrs <- sfAttributes(enc) expect_equal(sfAttrs$type,"MULTIPOLYGON") expect_equal(sfAttrs$dim,"XY") expect_equal(round(sfAttrs$bbox[[1]],3),-84.324) expect_equal(round(sfAttrs$bbox[[2]], 3),33.882) expect_equal(round(sfAttrs$bbox[[3]],3),-75.457) expect_equal(round(sfAttrs$bbox[[4]],3),36.59) expect_equal(sfAttrs$epsg,4267) expect_equal(sfAttrs$proj,"+proj=longlat +datum=NAD27 +no_defs") }) test_that("sf attributes extracted", { testthat::skip_on_cran() library(sf) nc <- sf::st_read(system.file("shape/nc.shp", package="sf"), quiet = T) ## from sf obj sfAttrs <- googlePolylines:::sfGeometryAttributes(nc) ## from sfc column sfcAttrs <- googlePolylines:::sfGeometryAttributes(nc[['geometry']]) expect_equal(sfAttrs$type,"MULTIPOLYGON") expect_equal(sfcAttrs$type,"MULTIPOLYGON") expect_equal(sfAttrs$dim,"XY") expect_equal(sfcAttrs$dim,"XY") expect_equal(round(sfAttrs$bbox[[1]],3),-84.324) expect_equal(round(sfcAttrs$bbox[[1]],3),-84.324) expect_equal(round(sfAttrs$bbox[[2]], 3),33.882) expect_equal(round(sfcAttrs$bbox[[2]], 3),33.882) expect_equal(round(sfAttrs$bbox[[3]],3),-75.457) expect_equal(round(sfcAttrs$bbox[[3]],3),-75.457) expect_equal(round(sfAttrs$bbox[[4]],3),36.59) expect_equal(round(sfcAttrs$bbox[[4]],3),36.59) expect_equal(sfAttrs$epsg,4267) expect_equal(sfcAttrs$epsg,4267) expect_equal(sfAttrs$proj,"+proj=longlat +datum=NAD27 +no_defs") expect_equal(sfcAttrs$proj,"+proj=longlat +datum=NAD27 +no_defs") }) test_that("geometry rows extracted", { testthat::skip_on_cran() library(sf) df <- data.frame(myId = c(1,1,1,1,1,1,1,1,2,2,2,2), lineId = c(1,1,1,1,2,2,2,2,1,1,1,2), lon = c(-80.190, -66.118, -64.757, -80.190, -70.579, -67.514, -66.668, -70.579, -70, -49, -51, -70), lat = c(26.774, 18.466, 32.321, 26.774, 28.745, 29.570, 27.339, 28.745, 22, 23, 22, 22)) p1 <- as.matrix(df[1:4, c("lon", "lat")]) p2 <- as.matrix(df[5:8, c("lon", "lat")]) p3 <- as.matrix(df[9:12, c("lon", "lat")]) point <- sf::st_sfc(sf::st_point(x = c(df[1,"lon"], df[1,"lat"]))) multipoint <- sf::st_sfc(sf::st_multipoint(x = as.matrix(df[1:2, c("lon", "lat")]))) polygon <- sf::st_sfc(sf::st_polygon(x = list(p1, p2))) linestring <- sf::st_sfc(sf::st_linestring(p3)) multilinestring <- sf::st_sfc(sf::st_multilinestring(list(p1, p2))) multipolygon <- sf::st_sfc(sf::st_multipolygon(x = list(list(p1, p2), list(p3)))) sf <- rbind( sf::st_sf(geo = polygon), sf::st_sf(geo = multilinestring), sf::st_sf(geo = linestring), sf::st_sf(geo = point) ) enc <- encode(sf) expect_equal(geometryRow(enc, "POINT"),4) expect_true(all(c(2,3) %in% geometryRow(enc, "LINESTRING"))) expect_equal(geometryRow(enc, "POLYGON"),1) expect_error(geometryRow(sf),"This function should be called on an sfencoded object") enc <- encode(sf, strip = TRUE) expect_error(polyline_wkt(enc),"No geometry attribute found") })
/tests/testthat/test-Utils_sf.R
permissive
SymbolixAU/googlePolylines
R
false
false
3,171
r
context("sfattributes") test_that("sf attributes are extracted", { testthat::skip_on_cran() library(sf) nc <- sf::st_read(system.file("shape/nc.shp", package="sf"), quiet = T) enc <- encode(nc) sfAttrs <- sfAttributes(enc) expect_equal(sfAttrs$type,"MULTIPOLYGON") expect_equal(sfAttrs$dim,"XY") expect_equal(round(sfAttrs$bbox[[1]],3),-84.324) expect_equal(round(sfAttrs$bbox[[2]], 3),33.882) expect_equal(round(sfAttrs$bbox[[3]],3),-75.457) expect_equal(round(sfAttrs$bbox[[4]],3),36.59) expect_equal(sfAttrs$epsg,4267) expect_equal(sfAttrs$proj,"+proj=longlat +datum=NAD27 +no_defs") }) test_that("sf attributes extracted", { testthat::skip_on_cran() library(sf) nc <- sf::st_read(system.file("shape/nc.shp", package="sf"), quiet = T) ## from sf obj sfAttrs <- googlePolylines:::sfGeometryAttributes(nc) ## from sfc column sfcAttrs <- googlePolylines:::sfGeometryAttributes(nc[['geometry']]) expect_equal(sfAttrs$type,"MULTIPOLYGON") expect_equal(sfcAttrs$type,"MULTIPOLYGON") expect_equal(sfAttrs$dim,"XY") expect_equal(sfcAttrs$dim,"XY") expect_equal(round(sfAttrs$bbox[[1]],3),-84.324) expect_equal(round(sfcAttrs$bbox[[1]],3),-84.324) expect_equal(round(sfAttrs$bbox[[2]], 3),33.882) expect_equal(round(sfcAttrs$bbox[[2]], 3),33.882) expect_equal(round(sfAttrs$bbox[[3]],3),-75.457) expect_equal(round(sfcAttrs$bbox[[3]],3),-75.457) expect_equal(round(sfAttrs$bbox[[4]],3),36.59) expect_equal(round(sfcAttrs$bbox[[4]],3),36.59) expect_equal(sfAttrs$epsg,4267) expect_equal(sfcAttrs$epsg,4267) expect_equal(sfAttrs$proj,"+proj=longlat +datum=NAD27 +no_defs") expect_equal(sfcAttrs$proj,"+proj=longlat +datum=NAD27 +no_defs") }) test_that("geometry rows extracted", { testthat::skip_on_cran() library(sf) df <- data.frame(myId = c(1,1,1,1,1,1,1,1,2,2,2,2), lineId = c(1,1,1,1,2,2,2,2,1,1,1,2), lon = c(-80.190, -66.118, -64.757, -80.190, -70.579, -67.514, -66.668, -70.579, -70, -49, -51, -70), lat = c(26.774, 18.466, 32.321, 26.774, 28.745, 29.570, 27.339, 28.745, 22, 23, 22, 22)) p1 <- as.matrix(df[1:4, c("lon", "lat")]) p2 <- as.matrix(df[5:8, c("lon", "lat")]) p3 <- as.matrix(df[9:12, c("lon", "lat")]) point <- sf::st_sfc(sf::st_point(x = c(df[1,"lon"], df[1,"lat"]))) multipoint <- sf::st_sfc(sf::st_multipoint(x = as.matrix(df[1:2, c("lon", "lat")]))) polygon <- sf::st_sfc(sf::st_polygon(x = list(p1, p2))) linestring <- sf::st_sfc(sf::st_linestring(p3)) multilinestring <- sf::st_sfc(sf::st_multilinestring(list(p1, p2))) multipolygon <- sf::st_sfc(sf::st_multipolygon(x = list(list(p1, p2), list(p3)))) sf <- rbind( sf::st_sf(geo = polygon), sf::st_sf(geo = multilinestring), sf::st_sf(geo = linestring), sf::st_sf(geo = point) ) enc <- encode(sf) expect_equal(geometryRow(enc, "POINT"),4) expect_true(all(c(2,3) %in% geometryRow(enc, "LINESTRING"))) expect_equal(geometryRow(enc, "POLYGON"),1) expect_error(geometryRow(sf),"This function should be called on an sfencoded object") enc <- encode(sf, strip = TRUE) expect_error(polyline_wkt(enc),"No geometry attribute found") })
#!/usr/local/bin/Rscript # generate dataset with certain seed set.seed(2) data <- dyntoy::generate_dataset( id = "specific_example/merlot", num_cells = 500, num_features = 200, model = "tree", normalise = FALSE ) # add method specific args (if needed) data$parameters <- list() data$seed <- 2L # write example dataset to file file <- commandArgs(trailingOnly = TRUE)[[1]] dynutils::write_h5(data, file)
/example.sh
permissive
dynverse/ti_merlot
R
false
false
416
sh
#!/usr/local/bin/Rscript # generate dataset with certain seed set.seed(2) data <- dyntoy::generate_dataset( id = "specific_example/merlot", num_cells = 500, num_features = 200, model = "tree", normalise = FALSE ) # add method specific args (if needed) data$parameters <- list() data$seed <- 2L # write example dataset to file file <- commandArgs(trailingOnly = TRUE)[[1]] dynutils::write_h5(data, file)
library(forecast) esa_cases <- read.csv("esa_casos.csv", sep = ";", header = T) # ESA cases per epidemiological week esa_cases$fecha <- as.Date(esa_cases$fecha, format="%d/%m/%Y") esa_cases$weeks <- epiweek(esa_cases$fecha) as.data.frame(esa_cases) #Group by weeks esa_week <- esa_cases %>% group_by(weeks) %>% summarise(importados=sum(importados),locales=sum(locales),import_acum=max(import_acum), locales_acum=max(locales_acum)) as.data.frame(esa_week) #------------------------------------------------------------------------- #data frame to be plotted df <- data.frame(fecha=esa_cases$fecha, impor=esa_cases$importados, loc=esa_cases$locales) df$fecha <- format(as.Date(df$fecha, format="%Y/%m/%d"), "%d-%m") df_ac <- data.frame(fecha=esa_cases$fecha, impor_ac=esa_cases$import_acum, loc_ac=esa_cases$locales_acum) df_ac$fecha <- format(as.Date(df_ac$fecha, format="%Y/%m/%d"), "%d-%m") # per week mdf <- data.frame(weeks=esa_week$weeks, impor=esa_week$importados, loc=esa_week$locales) mdf_ac <- data.frame(weeks=esa_week$weeks, impor_ac=esa_week$import_acum, loc_ac=esa_week$locales_acum) df$x <- seq(1,length(df$fecha),1) df_ac$x <- seq(1,length(df_ac$fecha),1) # proy<-function(x,y,tipom,ym){ # P<-forecast(y, level=c(90,95)) # trace1 <- list( # line = list( # color = "#03ddff", # fillcolor = "#03ddff"), # mode = 'lines+markers', # name = tipom, # type = "scatter", # x = x-1, # y = y) # trace2 <- list( # line = list( # color = "#f0f2f2"), # mode = 'lines+markers', # name = "95% de Confianza", # fill = "toself", # type = "scatter", # x = c(max(x)+0:4,max(x)+4:0), # y = c(P$lower[1:5,2],P$upper[5:1,2]), # xaxis = "x", # yaxis = "y", # hoveron = "points") # # trace3 <- list( # line = list( # color = "#b4bfbf", # fillcolor = "#b4bfbf"), # mode = 'lines+markers', # name = "90% de Confianza", # fill = "toself", # type = "scatter", # x = c(max(x)+0:4,max(x)+4:0), # y = c(P$lower[1:5,1],P$upper[5:1,1]), # xaxis = "x", # yaxis = "y", # hoveron = "points") # trace4 <- list( # line = list( # color = "#6b89b0", # fillcolor = "#6b89b0"), # # mode = 'lines+markers', # name = "Predicción", # type = "scatter", # x = max(x)+0:4, # y = (P$mean[1:5])) # layout <- list( # title = "Conteo de casos", # xaxis = list( # title = "Fecha", # domain = c(0, 1) # ), # yaxis = list( # title = ym, # domain = c(0, 1) # ), # margin = list( # b = 40, # l = 60, # r = 10, # t = 25 # ) # ) # gf <- plot_ly() # gf <- add_trace(gf, line=trace1$line, mode=trace1$mode, name=trace1$name, type=trace1$type, x=trace1$x, # y=trace1$y) # gf <- add_trace(gf, fill=trace2$fill, line=trace2$line, mode=trace2$mode, name=trace2$name, type=trace2$type, x=trace2$x, # y=trace2$y, hoveron=trace2$hoveron) # gf <- add_trace(gf,fill=trace3$fill, line=trace3$line, mode=trace3$mode, # name=trace3$name, type=trace3$type, x=trace3$x, y=trace3$y, # xaxis=trace3$xaxis, yaxis=trace3$yaxis, hoveron=trace3$hoveron) # gf <- add_trace(gf, line=trace4$line, mode=trace4$mode, name=trace4$name, type=trace4$type, # x=trace4$x, y=trace4$y) # gf <- layout(gf, title=layout$title, xaxis=layout$xaxis, yaxis=layout$yaxis, margin=layout$margin) # tablapred<-data.frame(Fecha = as.character.Date(max(x)+0:4,format="%d de %B %Y"), Confirmados = round(P$mean[1:5])) # return(list(gf,tablapred)) # # } # function to plot forecast proy<-function(x,y,tipom,ym){ P<-forecast(y, level=c(90,95)) trace1 <- list( line = list(color = "#03ddff", fillcolor = "#03ddff"), mode = 'lines+markers', name = tipom, type = "scatter", x = x-1, y = y) trace2 <- list( line = list(color = "#f0f2f2"), mode = 'lines+markers', name = "95% de Confianza", fill = "toself", type = "scatter", x = c(max(x)+0:4,max(x)+4:0), y = c(P$lower[1:5,2],P$upper[5:1,2]), xaxis = "x", yaxis = "y", hoveron = "points") trace3 <- list( line = list(color = "#b4bfbf", fillcolor = "#b4bfbf"), mode = 'lines+markers', name = "90% de Confianza", fill = "toself", type = "scatter", x = c(max(x)+0:4,max(x)+4:0), y = c(P$lower[1:5,1],P$upper[5:1,1]), xaxis = "x", yaxis = "y", hoveron = "points") trace4 <- list( line = list(color = "#6b89b0", fillcolor = "#6b89b0"), mode = 'lines+markers', name = "Predicción", type = "scatter", x = max(x)+0:4, y = (P$mean[1:5])) layout <- list( title = "Proyección de casos", xaxis = list(title = "Días", domain = c(0, 1)), yaxis = list(title = ym, domain = c(0, 1)), margin = list(b = 40, l = 60, r = 10, t = 25)) gf <- plot_ly() gf <- add_trace(gf, line=trace1$line, mode=trace1$mode, name=trace1$name, type=trace1$type, x=trace1$x, y=trace1$y) gf <- add_trace(gf, fill=trace2$fill, line=trace2$line, mode=trace2$mode, name=trace2$name, type=trace2$type, x=trace2$x, y=trace2$y, hoveron=trace2$hoveron) gf <- add_trace(gf,fill=trace3$fill, line=trace3$line, mode=trace3$mode, name=trace3$name, type=trace3$type, x=trace3$x, y=trace3$y, xaxis=trace3$xaxis, yaxis=trace3$yaxis, hoveron=trace3$hoveron) gf <- add_trace(gf, line=trace4$line, mode=trace4$mode, name=trace4$name, type=trace4$type, x=trace4$x, y=trace4$y) gf <- layout(gf, title=layout$title, xaxis=layout$xaxis, yaxis=layout$yaxis, margin=layout$margin) tabla<-data.frame(Día = as.character.Date(max(x)+0:4), Confirmados = round(P$mean[1:5]), LI =round(P$lower[1:5,1]), LS=round(P$upper[1:5,2])) return(list(gf,tabla)) } tabPanel("Proyecciones", sidebarLayout( sidebarPanel(width = 3, h3("Proyección de casos"), p("Para realizar el pronóstico de la evolución de contagios locales y totales se ha usado el análisis de series de tiempo."), br(), p("Dado que se disponen de pocos datos hasta la fecha, se genera la proyección para cinco días con un nivel de confianza del 90% y del 95%, se muestran los casos que podrían caer dentro de los intervalos de confianza.") ), mainPanel( tabsetPanel( tabPanel("Casos Locales", fluidRow( column(width = 6.5, class="well", h4("Casos locales por día"), plotlyOutput("plotf1") ), column(width = 6.5, class="well", h4("Casos locales acumulados"), plotlyOutput("plotf2") ) ) ), tabPanel("Casos Totales", fluidRow( column(width = 6.5, class="well", h4("Casos diarios totales"), plotlyOutput("plotf3") ), column(width = 6.5, class="well", h4("Casos acumulados totales"), plotlyOutput("plotf4") ) ) ), tabPanel("Tablas", fluidRow( column(width = 6, class="well", h4("Casos locales"), tableOutput("tabla1") ), column(width = 6, class="well", h4("Casos totales"), tableOutput("table2") ) ) ) ) ) ) ), # plot forecast for cumulative local cases f1 <- proy(df_ac$x,df_ac$loc_ac,"Casos locales" ,"Casos") plotf1 <- f1[1] #table tabla1 <- f1[2] # plot forecast for cumulative cases: locales + importados f2 <-proy(esa_cases$x,esa_cases$acumulado_total,"Casos diarios" ,"Casos") plotf2 <- f2[1] # table tabla2 <- f2[2] #*20Dat0s20*
/COVID19/ts.R
no_license
IrysArteaga/COVID19
R
false
false
8,801
r
library(forecast) esa_cases <- read.csv("esa_casos.csv", sep = ";", header = T) # ESA cases per epidemiological week esa_cases$fecha <- as.Date(esa_cases$fecha, format="%d/%m/%Y") esa_cases$weeks <- epiweek(esa_cases$fecha) as.data.frame(esa_cases) #Group by weeks esa_week <- esa_cases %>% group_by(weeks) %>% summarise(importados=sum(importados),locales=sum(locales),import_acum=max(import_acum), locales_acum=max(locales_acum)) as.data.frame(esa_week) #------------------------------------------------------------------------- #data frame to be plotted df <- data.frame(fecha=esa_cases$fecha, impor=esa_cases$importados, loc=esa_cases$locales) df$fecha <- format(as.Date(df$fecha, format="%Y/%m/%d"), "%d-%m") df_ac <- data.frame(fecha=esa_cases$fecha, impor_ac=esa_cases$import_acum, loc_ac=esa_cases$locales_acum) df_ac$fecha <- format(as.Date(df_ac$fecha, format="%Y/%m/%d"), "%d-%m") # per week mdf <- data.frame(weeks=esa_week$weeks, impor=esa_week$importados, loc=esa_week$locales) mdf_ac <- data.frame(weeks=esa_week$weeks, impor_ac=esa_week$import_acum, loc_ac=esa_week$locales_acum) df$x <- seq(1,length(df$fecha),1) df_ac$x <- seq(1,length(df_ac$fecha),1) # proy<-function(x,y,tipom,ym){ # P<-forecast(y, level=c(90,95)) # trace1 <- list( # line = list( # color = "#03ddff", # fillcolor = "#03ddff"), # mode = 'lines+markers', # name = tipom, # type = "scatter", # x = x-1, # y = y) # trace2 <- list( # line = list( # color = "#f0f2f2"), # mode = 'lines+markers', # name = "95% de Confianza", # fill = "toself", # type = "scatter", # x = c(max(x)+0:4,max(x)+4:0), # y = c(P$lower[1:5,2],P$upper[5:1,2]), # xaxis = "x", # yaxis = "y", # hoveron = "points") # # trace3 <- list( # line = list( # color = "#b4bfbf", # fillcolor = "#b4bfbf"), # mode = 'lines+markers', # name = "90% de Confianza", # fill = "toself", # type = "scatter", # x = c(max(x)+0:4,max(x)+4:0), # y = c(P$lower[1:5,1],P$upper[5:1,1]), # xaxis = "x", # yaxis = "y", # hoveron = "points") # trace4 <- list( # line = list( # color = "#6b89b0", # fillcolor = "#6b89b0"), # # mode = 'lines+markers', # name = "Predicción", # type = "scatter", # x = max(x)+0:4, # y = (P$mean[1:5])) # layout <- list( # title = "Conteo de casos", # xaxis = list( # title = "Fecha", # domain = c(0, 1) # ), # yaxis = list( # title = ym, # domain = c(0, 1) # ), # margin = list( # b = 40, # l = 60, # r = 10, # t = 25 # ) # ) # gf <- plot_ly() # gf <- add_trace(gf, line=trace1$line, mode=trace1$mode, name=trace1$name, type=trace1$type, x=trace1$x, # y=trace1$y) # gf <- add_trace(gf, fill=trace2$fill, line=trace2$line, mode=trace2$mode, name=trace2$name, type=trace2$type, x=trace2$x, # y=trace2$y, hoveron=trace2$hoveron) # gf <- add_trace(gf,fill=trace3$fill, line=trace3$line, mode=trace3$mode, # name=trace3$name, type=trace3$type, x=trace3$x, y=trace3$y, # xaxis=trace3$xaxis, yaxis=trace3$yaxis, hoveron=trace3$hoveron) # gf <- add_trace(gf, line=trace4$line, mode=trace4$mode, name=trace4$name, type=trace4$type, # x=trace4$x, y=trace4$y) # gf <- layout(gf, title=layout$title, xaxis=layout$xaxis, yaxis=layout$yaxis, margin=layout$margin) # tablapred<-data.frame(Fecha = as.character.Date(max(x)+0:4,format="%d de %B %Y"), Confirmados = round(P$mean[1:5])) # return(list(gf,tablapred)) # # } # function to plot forecast proy<-function(x,y,tipom,ym){ P<-forecast(y, level=c(90,95)) trace1 <- list( line = list(color = "#03ddff", fillcolor = "#03ddff"), mode = 'lines+markers', name = tipom, type = "scatter", x = x-1, y = y) trace2 <- list( line = list(color = "#f0f2f2"), mode = 'lines+markers', name = "95% de Confianza", fill = "toself", type = "scatter", x = c(max(x)+0:4,max(x)+4:0), y = c(P$lower[1:5,2],P$upper[5:1,2]), xaxis = "x", yaxis = "y", hoveron = "points") trace3 <- list( line = list(color = "#b4bfbf", fillcolor = "#b4bfbf"), mode = 'lines+markers', name = "90% de Confianza", fill = "toself", type = "scatter", x = c(max(x)+0:4,max(x)+4:0), y = c(P$lower[1:5,1],P$upper[5:1,1]), xaxis = "x", yaxis = "y", hoveron = "points") trace4 <- list( line = list(color = "#6b89b0", fillcolor = "#6b89b0"), mode = 'lines+markers', name = "Predicción", type = "scatter", x = max(x)+0:4, y = (P$mean[1:5])) layout <- list( title = "Proyección de casos", xaxis = list(title = "Días", domain = c(0, 1)), yaxis = list(title = ym, domain = c(0, 1)), margin = list(b = 40, l = 60, r = 10, t = 25)) gf <- plot_ly() gf <- add_trace(gf, line=trace1$line, mode=trace1$mode, name=trace1$name, type=trace1$type, x=trace1$x, y=trace1$y) gf <- add_trace(gf, fill=trace2$fill, line=trace2$line, mode=trace2$mode, name=trace2$name, type=trace2$type, x=trace2$x, y=trace2$y, hoveron=trace2$hoveron) gf <- add_trace(gf,fill=trace3$fill, line=trace3$line, mode=trace3$mode, name=trace3$name, type=trace3$type, x=trace3$x, y=trace3$y, xaxis=trace3$xaxis, yaxis=trace3$yaxis, hoveron=trace3$hoveron) gf <- add_trace(gf, line=trace4$line, mode=trace4$mode, name=trace4$name, type=trace4$type, x=trace4$x, y=trace4$y) gf <- layout(gf, title=layout$title, xaxis=layout$xaxis, yaxis=layout$yaxis, margin=layout$margin) tabla<-data.frame(Día = as.character.Date(max(x)+0:4), Confirmados = round(P$mean[1:5]), LI =round(P$lower[1:5,1]), LS=round(P$upper[1:5,2])) return(list(gf,tabla)) } tabPanel("Proyecciones", sidebarLayout( sidebarPanel(width = 3, h3("Proyección de casos"), p("Para realizar el pronóstico de la evolución de contagios locales y totales se ha usado el análisis de series de tiempo."), br(), p("Dado que se disponen de pocos datos hasta la fecha, se genera la proyección para cinco días con un nivel de confianza del 90% y del 95%, se muestran los casos que podrían caer dentro de los intervalos de confianza.") ), mainPanel( tabsetPanel( tabPanel("Casos Locales", fluidRow( column(width = 6.5, class="well", h4("Casos locales por día"), plotlyOutput("plotf1") ), column(width = 6.5, class="well", h4("Casos locales acumulados"), plotlyOutput("plotf2") ) ) ), tabPanel("Casos Totales", fluidRow( column(width = 6.5, class="well", h4("Casos diarios totales"), plotlyOutput("plotf3") ), column(width = 6.5, class="well", h4("Casos acumulados totales"), plotlyOutput("plotf4") ) ) ), tabPanel("Tablas", fluidRow( column(width = 6, class="well", h4("Casos locales"), tableOutput("tabla1") ), column(width = 6, class="well", h4("Casos totales"), tableOutput("table2") ) ) ) ) ) ) ), # plot forecast for cumulative local cases f1 <- proy(df_ac$x,df_ac$loc_ac,"Casos locales" ,"Casos") plotf1 <- f1[1] #table tabla1 <- f1[2] # plot forecast for cumulative cases: locales + importados f2 <-proy(esa_cases$x,esa_cases$acumulado_total,"Casos diarios" ,"Casos") plotf2 <- f2[1] # table tabla2 <- f2[2] #*20Dat0s20*
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Filter.R \docType{data} \name{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{rf.importance} \alias{makeFilter} \alias{rf.min.depth} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{univariate} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \title{Create a feature filter.} \format{An object of class \code{Filter} of length 6.} \usage{ makeFilter(name, desc, pkg, supported.tasks, supported.features, fun) rf.importance rf.min.depth univariate } \arguments{ \item{name}{(\code{character(1)})\cr Identifier for the filter.} \item{desc}{(\code{character(1)})\cr Short description of the filter.} \item{pkg}{(\code{character(1)})\cr Source package where the filter is implemented.} \item{supported.tasks}{(\link{character})\cr Task types supported.} \item{supported.features}{(\link{character})\cr Feature types supported.} \item{fun}{(\code{function(task, nselect, ...})\cr Function which takes a task and returns a named numeric vector of scores, one score for each feature of \code{task}. Higher scores mean higher importance of the feature. At least \code{nselect} features must be calculated, the remaining may be set to \code{NA} or omitted, and thus will not be selected. the original order will be restored if necessary.} } \value{ Object of class \dQuote{Filter}. } \description{ Creates and registers custom feature filters. Implemented filters can be listed with \link{listFilterMethods}. Additional documentation for the \code{fun} parameter specific to each filter can be found in the description. Minimum redundancy, maximum relevance filter \dQuote{mrmr} computes the mutual information between the target and each individual feature minus the average mutual information of previously selected features and this feature using the \pkg{mRMRe} package. Filter \dQuote{carscore} determines the \dQuote{Correlation-Adjusted (marginal) coRelation scores} (short CAR scores). The CAR scores for a set of features are defined as the correlations between the target and the decorrelated features. Filter \dQuote{randomForestSRC.rfsrc} computes the importance of random forests fitted in package \pkg{randomForestSRC}. The concrete method is selected via the \code{method} parameter. Possible values are \code{permute} (default), \code{random}, \code{anti}, \code{permute.ensemble}, \code{random.ensemble}, \code{anti.ensemble}. See the VIMP section in the docs for \link[randomForestSRC:rfsrc]{randomForestSRC::rfsrc} for details. Filter \dQuote{randomForestSRC.var.select} uses the minimal depth variable selection proposed by Ishwaran et al. (2010) (\code{method = "md"}) or a variable hunting approach (\code{method = "vh"} or \code{method = "vh.vimp"}). The minimal depth measure is the default. Permutation importance of random forests fitted in package \pkg{party}. The implementation follows the principle of mean decrese in accuracy used by the \pkg{randomForest} package (see description of \dQuote{randomForest.importance}) filter. Filter \dQuote{randomForest.importance} makes use of the \link[randomForest:importance]{randomForest::importance} from package \pkg{randomForest}. The importance measure to use is selected via the \code{method} parameter: \describe{ \item{oob.accuracy}{Permutation of Out of Bag (OOB) data.} \item{node.impurity}{Total decrease in node impurity.} } The Pearson correlation between each feature and the target is used as an indicator of feature importance. Rows with NA values are not taken into consideration. The Spearman correlation between each feature and the target is used as an indicator of feature importance. Rows with NA values are not taken into consideration. Filter \dQuote{information.gain} uses the entropy-based information gain between each feature and target individually as an importance measure. Filter \dQuote{gain.ratio} uses the entropy-based information gain ratio between each feature and target individually as an importance measure. Filter \dQuote{symmetrical.uncertainty} uses the entropy-based symmetrical uncertainty between each feature and target individually as an importance measure. The chi-square test is a statistical test of independence to determine whether two variables are independent. Filter \dQuote{chi.squared} applies this test in the following way. For each feature the chi-square test statistic is computed checking if there is a dependency between the feature and the target variable. Low values of the test statistic indicate a poor relationship. High values, i.e., high dependency identifies a feature as more important. Filter \dQuote{relief} is based on the feature selection algorithm \dQuote{ReliefF} by Kononenko et al., which is a generalization of the orignal \dQuote{Relief} algorithm originally proposed by Kira and Rendell. Feature weights are initialized with zeros. Then for each instance \code{sample.size} instances are sampled, \code{neighbours.count} nearest-hit and nearest-miss neighbours are computed and the weight vector for each feature is updated based on these values. Filter \dQuote{oneR} makes use of a simple \dQuote{One-Rule} (OneR) learner to determine feature importance. For this purpose the OneR learner generates one simple association rule for each feature in the data individually and computes the total error. The lower the error value the more important the correspoding feature. The \dQuote{univariate.model.score} feature filter resamples an \pkg{mlr} learner specified via \code{perf.learner} for each feature individually with randomForest from package \pkg{rpart} being the default learner. Further parameter are the resamling strategey \code{perf.resampling} and the performance measure \code{perf.measure}. Filter \dQuote{anova.test} is based on the Analysis of Variance (ANOVA) between feature and class. The value of the F-statistic is used as a measure of feature importance. Filter \dQuote{kruskal.test} applies a Kruskal-Wallis rank sum test of the null hypothesis that the location parameters of the distribution of a feature are the same in each class and considers the test statistic as an variable importance measure: if the location parameters do not differ in at least one case, i.e., the null hypothesis cannot be rejected, there is little evidence that the corresponding feature is suitable for classification. Simple filter based on the variance of the features indepentent of each other. Features with higher variance are considered more important than features with low importance. Filter \dQuote{permutation.importance} computes a loss function between predictions made by a learner before and after a feature is permuted. Special arguments to the filter function are \code{imp.learner}, a (\link{Learner} or \code{character(1)]) which specifies the learner to use when computing the permutation importance,}contrast\code{, a}function\code{which takes two numeric vectors and returns one (default is the difference),}aggregation\code{, a}function\code{which takes a}numeric\code{and returns a}numeric(1)\code{(default is the mean),}nmc\code{, an}integer(1)\code{, and}replace\code{, a}logical(1)` which determines whether the feature being permuted is sampled with or without replacement. Filter \dQuote{auc} determines for each feature, how well the target variable can be predicted only based on this feature. More precisely, the prediction rule is: class 1 if the feature exceeds a threshold and class 0 otherwise. The performance of this classification rule is measured by the AUC and the resulting filter score is |0.5 - AUC|. } \references{ Kira, Kenji and Rendell, Larry (1992). The Feature Selection Problem: Traditional Methods and a New Algorithm. AAAI-92 Proceedings. Kononenko, Igor et al. Overcoming the myopia of inductive learning algorithms with RELIEFF (1997), Applied Intelligence, 7(1), p39-55. } \seealso{ Other filter: \code{\link{filterFeatures}}, \code{\link{generateFilterValuesData}}, \code{\link{getFilterValues}}, \code{\link{getFilteredFeatures}}, \code{\link{listFilterMethods}}, \code{\link{makeFilterWrapper}}, \code{\link{plotFilterValuesGGVIS}}, \code{\link{plotFilterValues}} } \keyword{datasets}
/man/makeFilter.Rd
no_license
praneesh12/mlr
R
false
true
8,473
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Filter.R \docType{data} \name{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{rf.importance} \alias{makeFilter} \alias{rf.min.depth} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{univariate} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \alias{makeFilter} \title{Create a feature filter.} \format{An object of class \code{Filter} of length 6.} \usage{ makeFilter(name, desc, pkg, supported.tasks, supported.features, fun) rf.importance rf.min.depth univariate } \arguments{ \item{name}{(\code{character(1)})\cr Identifier for the filter.} \item{desc}{(\code{character(1)})\cr Short description of the filter.} \item{pkg}{(\code{character(1)})\cr Source package where the filter is implemented.} \item{supported.tasks}{(\link{character})\cr Task types supported.} \item{supported.features}{(\link{character})\cr Feature types supported.} \item{fun}{(\code{function(task, nselect, ...})\cr Function which takes a task and returns a named numeric vector of scores, one score for each feature of \code{task}. Higher scores mean higher importance of the feature. At least \code{nselect} features must be calculated, the remaining may be set to \code{NA} or omitted, and thus will not be selected. the original order will be restored if necessary.} } \value{ Object of class \dQuote{Filter}. } \description{ Creates and registers custom feature filters. Implemented filters can be listed with \link{listFilterMethods}. Additional documentation for the \code{fun} parameter specific to each filter can be found in the description. Minimum redundancy, maximum relevance filter \dQuote{mrmr} computes the mutual information between the target and each individual feature minus the average mutual information of previously selected features and this feature using the \pkg{mRMRe} package. Filter \dQuote{carscore} determines the \dQuote{Correlation-Adjusted (marginal) coRelation scores} (short CAR scores). The CAR scores for a set of features are defined as the correlations between the target and the decorrelated features. Filter \dQuote{randomForestSRC.rfsrc} computes the importance of random forests fitted in package \pkg{randomForestSRC}. The concrete method is selected via the \code{method} parameter. Possible values are \code{permute} (default), \code{random}, \code{anti}, \code{permute.ensemble}, \code{random.ensemble}, \code{anti.ensemble}. See the VIMP section in the docs for \link[randomForestSRC:rfsrc]{randomForestSRC::rfsrc} for details. Filter \dQuote{randomForestSRC.var.select} uses the minimal depth variable selection proposed by Ishwaran et al. (2010) (\code{method = "md"}) or a variable hunting approach (\code{method = "vh"} or \code{method = "vh.vimp"}). The minimal depth measure is the default. Permutation importance of random forests fitted in package \pkg{party}. The implementation follows the principle of mean decrese in accuracy used by the \pkg{randomForest} package (see description of \dQuote{randomForest.importance}) filter. Filter \dQuote{randomForest.importance} makes use of the \link[randomForest:importance]{randomForest::importance} from package \pkg{randomForest}. The importance measure to use is selected via the \code{method} parameter: \describe{ \item{oob.accuracy}{Permutation of Out of Bag (OOB) data.} \item{node.impurity}{Total decrease in node impurity.} } The Pearson correlation between each feature and the target is used as an indicator of feature importance. Rows with NA values are not taken into consideration. The Spearman correlation between each feature and the target is used as an indicator of feature importance. Rows with NA values are not taken into consideration. Filter \dQuote{information.gain} uses the entropy-based information gain between each feature and target individually as an importance measure. Filter \dQuote{gain.ratio} uses the entropy-based information gain ratio between each feature and target individually as an importance measure. Filter \dQuote{symmetrical.uncertainty} uses the entropy-based symmetrical uncertainty between each feature and target individually as an importance measure. The chi-square test is a statistical test of independence to determine whether two variables are independent. Filter \dQuote{chi.squared} applies this test in the following way. For each feature the chi-square test statistic is computed checking if there is a dependency between the feature and the target variable. Low values of the test statistic indicate a poor relationship. High values, i.e., high dependency identifies a feature as more important. Filter \dQuote{relief} is based on the feature selection algorithm \dQuote{ReliefF} by Kononenko et al., which is a generalization of the orignal \dQuote{Relief} algorithm originally proposed by Kira and Rendell. Feature weights are initialized with zeros. Then for each instance \code{sample.size} instances are sampled, \code{neighbours.count} nearest-hit and nearest-miss neighbours are computed and the weight vector for each feature is updated based on these values. Filter \dQuote{oneR} makes use of a simple \dQuote{One-Rule} (OneR) learner to determine feature importance. For this purpose the OneR learner generates one simple association rule for each feature in the data individually and computes the total error. The lower the error value the more important the correspoding feature. The \dQuote{univariate.model.score} feature filter resamples an \pkg{mlr} learner specified via \code{perf.learner} for each feature individually with randomForest from package \pkg{rpart} being the default learner. Further parameter are the resamling strategey \code{perf.resampling} and the performance measure \code{perf.measure}. Filter \dQuote{anova.test} is based on the Analysis of Variance (ANOVA) between feature and class. The value of the F-statistic is used as a measure of feature importance. Filter \dQuote{kruskal.test} applies a Kruskal-Wallis rank sum test of the null hypothesis that the location parameters of the distribution of a feature are the same in each class and considers the test statistic as an variable importance measure: if the location parameters do not differ in at least one case, i.e., the null hypothesis cannot be rejected, there is little evidence that the corresponding feature is suitable for classification. Simple filter based on the variance of the features indepentent of each other. Features with higher variance are considered more important than features with low importance. Filter \dQuote{permutation.importance} computes a loss function between predictions made by a learner before and after a feature is permuted. Special arguments to the filter function are \code{imp.learner}, a (\link{Learner} or \code{character(1)]) which specifies the learner to use when computing the permutation importance,}contrast\code{, a}function\code{which takes two numeric vectors and returns one (default is the difference),}aggregation\code{, a}function\code{which takes a}numeric\code{and returns a}numeric(1)\code{(default is the mean),}nmc\code{, an}integer(1)\code{, and}replace\code{, a}logical(1)` which determines whether the feature being permuted is sampled with or without replacement. Filter \dQuote{auc} determines for each feature, how well the target variable can be predicted only based on this feature. More precisely, the prediction rule is: class 1 if the feature exceeds a threshold and class 0 otherwise. The performance of this classification rule is measured by the AUC and the resulting filter score is |0.5 - AUC|. } \references{ Kira, Kenji and Rendell, Larry (1992). The Feature Selection Problem: Traditional Methods and a New Algorithm. AAAI-92 Proceedings. Kononenko, Igor et al. Overcoming the myopia of inductive learning algorithms with RELIEFF (1997), Applied Intelligence, 7(1), p39-55. } \seealso{ Other filter: \code{\link{filterFeatures}}, \code{\link{generateFilterValuesData}}, \code{\link{getFilterValues}}, \code{\link{getFilteredFeatures}}, \code{\link{listFilterMethods}}, \code{\link{makeFilterWrapper}}, \code{\link{plotFilterValuesGGVIS}}, \code{\link{plotFilterValues}} } \keyword{datasets}
library(ggplot2) # v. 3.2.0 library(purrr) # v. 0.3.2 library(ggpubr) library(dplyr) library(psych) # Select altered ---------------------------------------------------------- setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/") Family_MI <- read.csv("Macroinvertebrates_family_Spain_att.csv") Family_MI_Summer <- Family_MI %>% filter(MONTH == "6" | MONTH == "7" | MONTH == "8"| MONTH == "9"| MONTH == "10")%>% filter(A_COND20SITU < 800) #Family_MI_Summer_ranges <- describeBy(Family_MI_Summer, Family_MI_Summer$Flow.regim) #Family_MI_Summer_ranges_natural <- Family_MI_Summer_ranges$Natural #Family_MI_Summer_ranges_altered <- Family_MI_Summer_ranges$Altered #write.csv2(Family_MI_Summer_ranges_natural, "Family_MI_Summer_ranges_natural.csv") #write.csv2(Family_MI_Summer_ranges_altered, "Family_MI_Summer_ranges_altered.csv") #MI_ranges_natural <- read.csv2("Family_MI_Summer_natural.csv", row.names = 1) Altered <- Family_MI %>% filter(Flow.regim == "Altered") %>% filter(MONTH == "6" | MONTH == "7" | MONTH == "8"| MONTH == "9"| MONTH == "10" ) Natural <- Family_MI %>% filter(Flow.regim == "Natural") %>% filter(MONTH == "6" | MONTH == "7" | MONTH == "8"| MONTH == "9"| MONTH == "10") %>% filter(A_COND20SITU < 800) Altered_filter_range <- Altered %>% #filter(between(A_TAGUA, min(Natural$A_TAGUA, na.rm = T), max(Natural$A_TAGUA, na.rm = T)))%>% filter(between(A_COND20SITU, min(Natural$A_COND20SITU, na.rm = T), max(Natural$A_COND20SITU, na.rm = T)))%>% filter(between(A_PHSITU, min(Natural$A_PHSITU, na.rm = T), max(Natural$A_PHSITU, na.rm = T)))%>% filter(between(MN_UHD, min(Natural$MN_UHD), max(Natural$MN_UHD)))%>% filter(between(MN_AGR, min(Natural$MN_AGR), max(Natural$MN_AGR)))%>% filter(between(MN_PAS, min(Natural$MN_PAS), max(Natural$MN_PAS)))%>% filter(between(MN_SSH, min(Natural$MN_SSH), max(Natural$MN_PAS)))%>% filter(between(MN_TEMP, min(Natural$MN_TEMP), max(Natural$MN_TEMP)))%>% filter(between(MN_PREC, min(Natural$MN_PREC), max(Natural$MN_PREC)))%>% filter(between(MN_ETP, min(Natural$MN_ETP), max(Natural$MN_ETP)))%>% filter(between(MN_HARD, min(Natural$MN_HARD), max(Natural$MN_HARD)))%>% filter(between(MN_COND, min(Natural$MN_COND), max(Natural$MN_COND)))%>% filter(between(AREA_KM2, min(Natural$AREA_KM2), max(Natural$AREA_KM2)))%>% filter(between(DEPTH, min(Natural$DEPTH), max(Natural$DEPTH)))%>% filter(between(WIDTH, min(Natural$WIDTH), max(Natural$WIDTH)))%>% filter(between(ELEV_M, min(Natural$ELEV_M), max(Natural$ELEV_M)))%>% filter(between(SINUOSITY, min(Natural$SINUOSITY), max(Natural$SINUOSITY)))%>% filter(between(MnSlope, min(Natural$MnSlope), max(Natural$MnSlope))) Nat_alt_summer <- merge(Natural, Altered_filter_range, all = T) write.csv2(Nat_alt_summer, "Nat_alt_summer_614.csv", row.names = T) # Graphs Hyd -------------------------------------------------------------- setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/") Nat_alt_summer <- read.csv2("Nat_alt_summer_614.csv", stringsAsFactors=F) #Nat_alt_summer2 <- Nat_alt_summer[!rownames(Nat_alt_summer) %in% c("CAN_M_097", "GAL_M_010", "GAL_M_011", "GAL_M_012", "GAL_M_013"), ] FQ <- select(Nat_alt_summer, A_O2_SAT:A_PHSITU) Hyd <- select(Nat_alt_summer, MeanJan:X95per) Env <- select(Nat_alt_summer, AREA_KM2:MN_SSH) Lev_20 <- as.factor(Nat_alt_summer$Lev_20) Tipologia <- as.factor(Nat_alt_summer$Tipologia) FQ_names = names(FQ) Hyd_names = names(Hyd) Env_names = names(Env) Nat_alt_summer_3 <- Nat_alt_summer %>% filter(Lev_20 == "3") Nat_alt_summer_8 <- Nat_alt_summer %>% filter(Lev_20 == "8") Nat_alt_summer_10 <- Nat_alt_summer %>% filter(Lev_20 == "10") Nat_alt_summer_13 <- Nat_alt_summer %>% filter(Lev_20 == "13") theme_set(theme_bw()) myplots <- lapply(Hyd_names, function(q){ Hyd_plots <- ggplot(Nat_alt_summer, aes_string(x = "Flow.regim", y = q)) + geom_boxplot(width = 0.45, fill = "white", notch = T) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 0.5) + scale_color_manual(values = c("tomato2", "palegreen3")) + theme(legend.position = "none") + labs(x = NULL) #+ annotate("text", x = 4, y = 25, label = "Test_p_value", parse = TRUE) return(Hyd_plots) }) myplots_env <- lapply(Env_names, function(q){ Env_plots <- ggplot(Nat_alt_summer, aes_string(x = "Flow.regim", y = q)) + geom_boxplot(width = 0.45, fill = "white", notch = T) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 0.5) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + theme(legend.position = "none") + labs(x = NULL) #+ annotate("text", x = 4, y = 25, label = "Test_p_value", parse = TRUE) return(Env_plots) }) myplots_FQ <- lapply(FQ_names, function(q){ FQ_plots <- ggplot(Nat_alt_summer, aes_string(x = "Flow.regim", y = q)) + geom_boxplot(width = 0.45, fill = "white", notch = T) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 0.5) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + theme(legend.position = "none") + labs(x = NULL) #+ annotate("text", x = 4, y = 25, label = "Test_p_value", parse = TRUE) return(FQ_plots) }) p.value.list <- lapply(Hyd_names, function(resp) { mF <- formula(paste(resp, " ~ as.factor(Flow.regim)")) Test <- wilcox.test(mF, data=Nat_alt_summer) return(Test$p.value) }) class(p.value.list) modelList <- cbind(Hyd_names,p.value.list) write.csv2(modelList, "p_values_Hyd2.csv", row.names = F) multi.page <- ggarrange(plotlist = myplots, nrow = 3, ncol = 4) ggexport(multi.page, filename = "Graphs_Hyd_614.pdf") multi.page_env <- ggarrange(plotlist = myplots_env, nrow = 3, ncol = 4) ggexport(multi.page_env, filename = "Graphs_Env.pdf") multi.page_FQ <- ggarrange(plotlist = myplots_FQ, nrow = 3, ncol = 4) ggexport(multi.page_FQ, filename = "Graphs_FQ.pdf") # Classes hidrológicas ---------------------------------------------------- ##Classe 10 Nat_alt_summer_10 <- Nat_alt_summer %>% filter(Lev_20 == "10") myplots_10 <- lapply(Hyd_names, function(q){ Hyd_plots_10 <- ggplot(Nat_alt_summer_10, aes_string(x = "Flow.regim", y = q)) + geom_boxplot(width = 0.45, fill = "white", notch = T) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 0.5) + scale_color_manual(values = c("tomato2", "palegreen3")) + theme(legend.position = "none") + labs(x = NULL) return(Hyd_plots_10) }) multi.page <- ggarrange(plotlist = myplots_10, nrow = 3, ncol = 4) ggexport(multi.page, filename = "Graphs_Hyd_Class10.pdf") p.value.list <- lapply(Hyd_names, function(resp) { mF <- formula(paste(resp, " ~ as.factor(Flow.regim)")) Test <- wilcox.test(mF, data=Nat_alt_summer_10) return(Test$p.value) }) class(p.value.list) modelList <- cbind(Hyd_names,p.value.list) write.csv2(modelList, "p_values_Class10.csv", row.names = F) Nat_alt_summer_13 <- Nat_alt_summer %>% filter(Lev_20 == "13") myplots_13 <- lapply(Hyd_names, function(q){ Hyd_plots_13 <- ggplot(Nat_alt_summer_13, aes_string(x = "Flow.regim", y = q)) + geom_boxplot(width = 0.45, fill = "white", notch = T) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 0.5) + scale_color_manual(values = c("tomato2", "palegreen3")) + theme(legend.position = "none") + labs(x = NULL) return(Hyd_plots_13) }) multi.page <- ggarrange(plotlist = myplots_13, nrow = 3, ncol = 4) ggexport(multi.page, filename = "Graphs_Hyd_Class13.pdf") p.value.list <- lapply(Hyd_names, function(resp) { mF <- formula(paste(resp, " ~ as.factor(Flow.regim)")) Test <- wilcox.test(mF, data=Nat_alt_summer_13) return(Test$p.value) }) class(p.value.list) modelList <- cbind(Hyd_names,p.value.list) write.csv2(modelList, "p_values_Class13.csv", row.names = F) Nat_alt_summer_3 <- Nat_alt_summer %>% filter(Lev_20 == "3") myplots_3 <- lapply(Hyd_names, function(q){ Hyd_plots_3 <- ggplot(Nat_alt_summer_3, aes_string(x = "Flow.regim", y = q)) + geom_boxplot(width = 0.45, fill = "white", notch = T) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 0.5) + scale_color_manual(values = c("tomato2", "palegreen3")) + theme(legend.position = "none") + labs(x = NULL) return(Hyd_plots_3) }) multi.page <- ggarrange(plotlist = myplots_3, nrow = 3, ncol = 4) ggexport(multi.page, filename = "Graphs_Hyd_Class3.pdf") p.value.list <- lapply(Hyd_names, function(resp) { mF <- formula(paste(resp, " ~ as.factor(Flow.regim)")) Test <- wilcox.test(mF, data=Nat_alt_summer_3) return(Test$p.value) }) class(p.value.list) modelList <- cbind(Hyd_names,p.value.list) write.csv2(modelList, "p_values_Class3.csv", row.names = F) Nat_alt_summer_8 <- Nat_alt_summer %>% filter(Lev_20 == "8") myplots_8 <- lapply(Hyd_names, function(q){ Hyd_plots_8 <- ggplot(Nat_alt_summer_8, aes_string(x = "Flow.regim", y = q)) + geom_boxplot(width = 0.45, fill = "white", notch = T) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 0.5) + scale_color_manual(values = c("tomato2", "palegreen3")) + theme(legend.position = "none") + labs(x = NULL) return(Hyd_plots_8) }) multi.page <- ggarrange(plotlist = myplots_8, nrow = 3, ncol = 4) ggexport(multi.page, filename = "Graphs_Hyd_Class8.pdf") p.value.list <- lapply(Hyd_names, function(resp) { mF <- formula(paste(resp, " ~ as.factor(Flow.regim)")) Test <- wilcox.test(mF, data=Nat_alt_summer_8) return(Test$p.value) }) class(p.value.list) modelList <- cbind(Hyd_names,p.value.list) write.csv2(modelList, "p_values_Class8.csv", row.names = F) # Gráficos 1x1 ------------------------------------------------------------ ###Estes funcionan bien: MeanJan <- ggplot(Nat_alt_summer, aes(x = factor(Flow.regim), y = MeanJan)) + geom_boxplot(width = 0.4, fill = "white") + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + labs(x = NULL) + theme(legend.position = "none") # Remove x axis label MeanFeb <- ggplot(Nat_alt_summer, aes(x = factor(Flow.regim), y = MeanFeb)) + geom_boxplot(width = 0.4, fill = "white") + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + labs(x = NULL) + theme(legend.position = "none") # Remove x axis label MeanMar <- ggplot(Nat_alt_summer, aes(x = factor(Flow.regim), y = MeanMar)) + geom_boxplot(width = 0.4, fill = "white") + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + labs(x = NULL) + theme(legend.position = "none") # Remove x axis label MeanApr <- ggplot(Nat_alt_summer, aes(x = factor(Flow.regim), y = MeanApr)) + geom_boxplot(width = 0.4, fill = "white") + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + labs(x = NULL) + theme(legend.position = "none") # Remove x axis label MeanMay <- ggplot(Nat_alt_summer, aes(x = factor(Flow.regim), y = MeanMay)) + geom_boxplot(width = 0.4, fill = "white") + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + labs(x = NULL) + theme(legend.position = "none") # Remove x axis label MeanJun <- ggplot(Nat_alt_summer, aes(x = factor(Flow.regim), y = MeanJun)) + geom_boxplot(width = 0.4, fill = "white", show.legend = FALSE) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + labs(x = NULL)+ theme(legend.position = "none") # Remove x axis label MeanJul <- ggplot(Nat_alt_summer, aes(x = factor(Flow.regim), y = MeanJul)) + geom_boxplot(width = 0.4, fill = "white", show.legend = FALSE) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + labs(x = NULL)+ theme(legend.position = "none") # Remove x axis label MeanAug <- ggplot(Nat_alt_summer, aes(x = factor(Flow.regim), y = MeanAug)) + geom_boxplot(width = 0.4, fill = "white", show.legend = FALSE) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + labs(x = NULL)+ theme(legend.position = "none") # Remove x axis label MeanSep <- ggplot(Nat_alt_summer, aes(x = factor(Flow.regim), y = MeanSep)) + geom_boxplot(width = 0.4, fill = "white", show.legend = FALSE) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + labs(x = NULL)+ theme(legend.position = "none") library("cowplot") Hyd_1_plot <- plot_grid(MeanJan, MeanFeb, MeanMar, MeanApr, MeanMay, MeanJun, MeanJul, MeanAug, MeanSep, ncol = 3, nrow = 3) save_plot("Hyd1.png", Hyd_1_plot, base_width = 5) #for (i in seq_along(Hyd1)) { print(i) myfile <- file.path(paste("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Coord/Graphs/", Hyd1[[i]],".png",sep="")) print(myfile) print( ggplot(Nat_alt_summer, aes_string(x = "Flow.regim", y = Hyd[i])) + geom_boxplot(width = 0.3, fill = "white") + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1.5) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + theme(legend.position = "none") + ggsave(myfile) ) #plot(value)} Test_MeanJan <- wilcox.test(MeanJan ~ as.factor(Flow.regim), data=Nat_alt_summer) wilcox.test(JulianMin ~ Flow.regim, data=Nat_alt_summer2) wilcox.test(MeanMar ~ as.factor(Flow.regim), data=Nat_alt_summer) wilcox.test( ~ as.factor(Flow.regim), data=Nat_alt_summer) # Biotic indices ---------------------------------------------------------- library(biomonitoR) library(biotic) library(BBI) require(data.table) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Coord") setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/") Nat_alt_summer <- read.csv2("Nat_alt_summer_614.csv") #Nat_alt_summer <- Nat_alt_summer[!rownames(Nat_alt_summer) %in% c("GAL_M_010", "GAL_M_011", "GAL_M_012", "GAL_M_013"), ] Taxa <- select(Nat_alt_summer, ID, Hydrobiidae:Plumatellidae) setnames(Taxa, "Ancylidae", "Ancylus") Taxa_transp <- dcast(melt(Taxa, id.vars = "ID"), variable ~ ID) ##Ítranspose to use the packages setnames(Taxa_transp, "variable", "Taxa") #Indices <- select(Nat_alt_summer, EPT:IASPT) #Bio_indices <- names(Indices) #Indices_p_value <- c('MeanJan', 'MeanFeb', 'MeanMar', 'MeanApr', 'MeanJun', 'MeanJul', 'MeanAug', 'MeanSep', 'MeanNov', 'MeanDec', 'Mean1DayFlowMaxs', 'Mean3DayFlowMaxs', 'Mean7DayFlowMaxs', 'Mean90DayFlowMins', 'ZeroFlowDays', 'JulianMin', 'JulianMax', 'nPulsesLow', 'MeanPulseLengthLow', 'nPos', 'nNeg', 'meanNeg', 'FRE3', 'FRE7', 'Reversals', 'StDevMeanMay', 'StDevMeanJun', 'StDevMeanJul', 'StDevMeanAug', 'StDevMeanSep', 'StDevMeanOct', 'StDevMeanNov', 'StDevMean1DayFlowMins', 'StDevMean3DayFlowMins', 'StDevMean3DayFlowMaxs', 'StDevMean7DayFlowMins', 'StDevMean7DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevMean30DayFlowMaxs', 'StDevMean90DayFlowMins', 'StDevMean90DayFlowMaxs', 'StDevZeroFlowDays', 'StDevBFI', 'StDevJulianMin', 'StDevJulianMax', 'StDevnPulsesLow', 'StDevnPulsesHigh', 'StDevMeanPulseLengthHigh', 'StDevnPos', 'StDevnNeg', 'StDevFRE1', 'StDevFRE3', 'StDevFRE7', 'StDevReversals', 'Predictability', 'X5per') # Calculate indices ------------------------------------------------------- library(biotic) Index <- calcindex(Taxa_transp) Index$Life_biotic <- calcindex(Taxa_transp, "LIFE", "num") library("biomonitoR") data(macro_ex) # Prepare data for the analysis. data.bio <- asBiomonitor(Taxa_transp) data.agR <- aggregatoR(data.bio) richness(data.agR, "Genus") richness(data.agR, "Family") Index$Richness <- allrich(data.agR) Index$Shannon <- biomonitoR::shannon(data.agR) Index$Simpson <- simpson(data.agR) Index$Margalef <- margalef(data.agR) Index$Menhinick <- menhinick(data.agR) Index$Life2 <- life(data.agR) # calculate iberian bmwp and aspt Index$IBMWP <- bmwp(data.agR, method="spa") Index$IASPT <- aspt(data.agR, method="spa") Index$BMWP <- bmwp(data.agR) Index$Div_Shannon <- diversity(Taxa, index = "shannon") write.csv2(Index, "indices_614.csv", dec = "," ) # plot indices bio -------------------------------------------------------- #FQ plots_indices_bio <- lapply(FQ_names, function(q){ Bio_plots <- ggplot(Nat_alt_summer, aes_string(x = q, y = "EPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_EPT, filename = "Graphs_EPT_FQ.pdf") plots_indices_bio <- lapply(Indices_p_value, function(q){ Bio_plots <- ggplot(Nat_alt_summer, aes_string(x = q, y = "EPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_EPT, filename = "Graphs_EPT.pdf") #---- plots_indices_bio <- lapply(Indices_p_value, function(q){ Bio_plots <- ggplot(Nat_alt_summer, aes_string(x = q, y = "OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("brown2", "seagreen")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_OCH <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_OCH, filename = "Graphs_OCH.pdf") #---- plots_indices_bio <- lapply(Indices_p_value, function(q){ Bio_plots <- ggplot(Nat_alt_summer, aes_string(x = q, y = "Diptera", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("brown2", "seagreen")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Diptera <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_OCH, filename = "Graphs_Diptera.pdf") #---- plots_indices_bio <- lapply(Indices_p_value, function(q){ Bio_plots <- ggplot(Nat_alt_summer, aes_string(x = q, y = "EPT_OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("brown2", "seagreen")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT_OCH <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_EPT_OCH, filename = "Graphs_EPT_OCH.pdf") #---- plots_indices_bio <- lapply(Indices_p_value, function(q){ Bio_plots <- ggplot(Nat_alt_summer, aes_string(x = q, y = "Non.Insect", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("brown2", "seagreen")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Non_insect <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_Non_insect, filename = "Graphs_Non_insect.pdf") #---- plots_indices_bio <- lapply(Indices_p_value, function(q){ Bio_plots <- ggscatter(Nat_alt_summer, x = q, y = "Insect", color = "Flow.regim", palette = "nrc", add = "reg.line", conf.int = T) + stat_cor(aes(color = Flow.regim), label.x = 2.5, label.y.npc = "bottom") + theme_bw()+ theme(legend.position = "none") return(Bio_plots) }) multi.page_Insect <- ggarrange(plotlist = plots_indices_bio, nrow = 1, ncol =2) ggexport(multi.page_Insect, filename = "Graphs_Insect.pdf") #---- plots_indices_bio <- lapply(Bio_indices, function(q){ Bio_plots <- ggplot(Nat_alt_summer, aes_string(x = "MeanJul", y = q, color="Flow.regim")) + geom_point(shape=20, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("brown2", "seagreen")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_MeanJul <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_MeanJul, filename = "Graphs_MeanJul.pdf") plots_indices_bio <- lapply(Bio_indices, function(q){ Bio_plots <- ggplot(Nat_alt_summer, aes_string(x = "nPos", y = q, color="Flow.regim")) + geom_point(shape=20, size=0.8)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("brown1", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_nPos <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_nPos, filename = "Graphs_nPos.pdf") plots_indices_bio <- lapply(Bio_indices, function(q){ Bio_plots <- ggplot(Nat_alt_summer, aes_string(x = "Mean90DayFlowMins", y = q, color="Flow.regim")) + geom_point(shape=20, size=0.8)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("brown1", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Mean90Min <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_Mean90Min, filename = "Graphs_Mean90Min.pdf") # Plot Bio_Class ---------------------------------------------------------- ##### EPT ##### ##Class 3 Indices_p_value_3 <- c('StDevZeroFlowDays', 'ZeroFlowDays', 'nPos', 'nNeg', 'MeanApr', 'JulianMax', 'StDevMean7DayFlowMaxs', 'StDevnPos', 'MeanJul', 'StDevMean30DayFlowMaxs', 'Mean90DayFlowMaxs', 'StDevnNeg', 'StDevJulianMax', 'MeanAug', 'StDevMeanSep', 'StDevMean3DayFlowMaxs', 'MeanSep', 'StDevMeanAug', 'l2', 'MeanMar', 'StDevMeanJul', 'Mean30DayFlowMaxs', 'MeanMay', 'X75per', 'MeanDec', 'lca', 'StDevnPulsesHigh', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'Mean7DayFlowMins', 'StDevMean90DayFlowMaxs', 'Mean3DayFlowMins', 'Mean7DayFlowMaxs', 'X95per', 'nPulsesHigh', 'Mean30DayFlowMins', 'StDevReversals') plots_indices_bio <- lapply(Indices_p_value_3, function(q){ Bio_plots <- ggplot(Nat_alt_summer_3, aes_string(x = q, y = "EPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_EPT, filename = "Graphs_EPT_Class3.pdf") ##Class 8 Indices_p_value_8 <- c('StDevnPulsesHigh', 'nPos', 'StDevMeanFeb', 'StDevReversals', 'StDevnPulsesLow', 'ZeroFlowDays', 'StDevZeroFlowDays', 'StDevMean7DayFlowMaxs', 'StDevMean3DayFlowMaxs', 'Predictability', 'MeanMay', 'StDevMeanSep', 'lkur', 'nPulsesLow', 'meanNeg', 'StDevMeanAug', 'MeanApr', 'StDevMean30DayFlowMaxs', 'Reversals', 'FRE3', 'StDevBFI', 'StDevnNeg', 'StDevMeanJul', 'StDevnPos', 'StDevJulianMin', 'MeanMar', 'nPulsesHigh', 'nNeg', 'StDevMeanMar', 'StDevMeanJun', 'StDevMean1DayFlowMaxs', 'X95per', 'X25per', 'StDevMean90DayFlowMaxs', 'FRE7', 'Mean1DayFlowMins', 'StDevJulianMax', 'StDevMean90DayFlowMins', 'StDevFRE1', 'JulianMin', 'Mean90DayFlowMaxs', 'MeanSep', 'Mean7DayFlowMaxs', 'Mean30DayFlowMaxs') plots_indices_bio <- lapply(Indices_p_value_8, function(q){ Bio_plots <- ggplot(Nat_alt_summer_8, aes_string(x = q, y = "EPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_EPT, filename = "Graphs_EPT_Class8.pdf") ##Class 10 Indices_p_value_10 <- c('JulianMin', 'MeanSep', 'MeanAug', 'nPos', 'StDevnPulsesLow', 'Mean90DayFlowMins', 'StDevJulianMin', 'Mean90DayFlowMaxs', 'l2', 'FRE7', 'nNeg', 'Mean3DayFlowMaxs', 'MeanJun', 'nPulsesLow', 'Mean1DayFlowMaxs', 'MeanJul', 'Mean30DayFlowMins', 'Mean30DayFlowMaxs', 'BFI', 'MeanJan', 'X75per', 'Reversals', 'Mean7DayFlowMaxs', 'Mean7DayFlowMins', 'StDevBFI', 'Mean3DayFlowMins', 'StDevMeanNov', 'StDevMeanAug', 'X95per', 'StDevMean30DayFlowMins', 'X5per', 'StDevMean90DayFlowMins', 'StDevnPos', 'Mean1DayFlowMins', 'StDevnNeg', 'lca', 'StDevMeanSep', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean3DayFlowMins', 'MeanDec', 'MeanFeb', 'StDevFRE1', 'StDevMean1DayFlowMins', 'StDevMean1DayFlowMaxs', 'MeanMar', 'MeanPulseLengthLow', 'StDevReversals', 'StDevFRE7', 'StDevMeanDec', 'StDevnPulsesHigh', 'FRE3', 'StDevMean3DayFlowMaxs', 'meanPos', 'MeanMay') plots_indices_bio <- lapply(Indices_p_value_10, function(q){ Bio_plots <- ggplot(Nat_alt_summer_10, aes_string(x = q, y = "EPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_EPT, filename = "Graphs_EPT_Class10.pdf") ##Class 13 Indices_p_value_13 <- c('Mean90DayFlowMins', 'nNeg', 'nPos', 'Mean1DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevJulianMin', 'Mean7DayFlowMaxs', 'MeanJul', 'Mean3DayFlowMaxs', 'MeanSep', 'StDevnPulsesLow', 'Reversals', 'nPulsesLow', 'MeanAug', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean90DayFlowMins', 'X5per', 'StDevBFI', 'l2', 'MeanDec', 'Mean30DayFlowMins', 'StDevMeanNov', 'X75per', 'MeanJun', 'Mean30DayFlowMaxs', 'FRE7', 'MeanJan', 'StDevFRE7', 'StDevMean3DayFlowMins', 'Mean90DayFlowMaxs', 'StDevMeanDec', 'X95per', 'StDevMeanApr', 'FRE3', 'StDevMean1DayFlowMins', 'Mean7DayFlowMins', 'BFI', 'MeanFeb', 'lca', 'StDevnNeg', 'Mean3DayFlowMins', 'MeanMar', 'MeanPulseLengthLow', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'StDevFRE1', 'StDevMeanJul', 'MeanMay', 'StDevMeanJan', 'StDevJulianMax', 'lkur', 'JulianMin', 'StDevMean3DayFlowMaxs', 'StDevMeanJun', 'StDevMeanSep', 'StDevMean7DayFlowMaxs', 'StDevMeanFeb', 'StDevMeanMar', 'MeanApr') plots_indices_bio <- lapply(Indices_p_value_13, function(q){ Bio_plots <- ggplot(Nat_alt_summer_13, aes_string(x = q, y = "EPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_EPT, filename = "Graphs_EPT_Class13.pdf") ##### OCH ##### ##Class 3 Indices_p_value_3 <- c('StDevZeroFlowDays', 'ZeroFlowDays', 'nPos', 'nNeg', 'MeanApr', 'JulianMax', 'StDevMean7DayFlowMaxs', 'StDevnPos', 'MeanJul', 'StDevMean30DayFlowMaxs', 'Mean90DayFlowMaxs', 'StDevnNeg', 'StDevJulianMax', 'MeanAug', 'StDevMeanSep', 'StDevMean3DayFlowMaxs', 'MeanSep', 'StDevMeanAug', 'l2', 'MeanMar', 'StDevMeanJul', 'Mean30DayFlowMaxs', 'MeanMay', 'X75per', 'MeanDec', 'lca', 'StDevnPulsesHigh', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'Mean7DayFlowMins', 'StDevMean90DayFlowMaxs', 'Mean3DayFlowMins', 'Mean7DayFlowMaxs', 'X95per', 'nPulsesHigh', 'Mean30DayFlowMins', 'StDevReversals') plots_indices_bio <- lapply(Indices_p_value_3, function(q){ Bio_plots <- ggplot(Nat_alt_summer_3, aes_string(x = q, y = "OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_OCH <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_OCH, filename = "Graphs_OCH_Class3.pdf") ##Class 8 Indices_p_value_8 <- c('StDevnPulsesHigh', 'nPos', 'StDevMeanFeb', 'StDevReversals', 'StDevnPulsesLow', 'ZeroFlowDays', 'StDevZeroFlowDays', 'StDevMean7DayFlowMaxs', 'StDevMean3DayFlowMaxs', 'Predictability', 'MeanMay', 'StDevMeanSep', 'lkur', 'nPulsesLow', 'meanNeg', 'StDevMeanAug', 'MeanApr', 'StDevMean30DayFlowMaxs', 'Reversals', 'FRE3', 'StDevBFI', 'StDevnNeg', 'StDevMeanJul', 'StDevnPos', 'StDevJulianMin', 'MeanMar', 'nPulsesHigh', 'nNeg', 'StDevMeanMar', 'StDevMeanJun', 'StDevMean1DayFlowMaxs', 'X95per', 'X25per', 'StDevMean90DayFlowMaxs', 'FRE7', 'Mean1DayFlowMins', 'StDevJulianMax', 'StDevMean90DayFlowMins', 'StDevFRE1', 'JulianMin', 'Mean90DayFlowMaxs', 'MeanSep', 'Mean7DayFlowMaxs', 'Mean30DayFlowMaxs') plots_indices_bio <- lapply(Indices_p_value_8, function(q){ Bio_plots <- ggplot(Nat_alt_summer_8, aes_string(x = q, y = "OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_OCH <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_OCH, filename = "Graphs_OCH_Class8.pdf") ##Class 10 Indices_p_value_10 <- c('JulianMin', 'MeanSep', 'MeanAug', 'nPos', 'StDevnPulsesLow', 'Mean90DayFlowMins', 'StDevJulianMin', 'Mean90DayFlowMaxs', 'l2', 'FRE7', 'nNeg', 'Mean3DayFlowMaxs', 'MeanJun', 'nPulsesLow', 'Mean1DayFlowMaxs', 'MeanJul', 'Mean30DayFlowMins', 'Mean30DayFlowMaxs', 'BFI', 'MeanJan', 'X75per', 'Reversals', 'Mean7DayFlowMaxs', 'Mean7DayFlowMins', 'StDevBFI', 'Mean3DayFlowMins', 'StDevMeanNov', 'StDevMeanAug', 'X95per', 'StDevMean30DayFlowMins', 'X5per', 'StDevMean90DayFlowMins', 'StDevnPos', 'Mean1DayFlowMins', 'StDevnNeg', 'lca', 'StDevMeanSep', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean3DayFlowMins', 'MeanDec', 'MeanFeb', 'StDevFRE1', 'StDevMean1DayFlowMins', 'StDevMean1DayFlowMaxs', 'MeanMar', 'MeanPulseLengthLow', 'StDevReversals', 'StDevFRE7', 'StDevMeanDec', 'StDevnPulsesHigh', 'FRE3', 'StDevMean3DayFlowMaxs', 'meanPos', 'MeanMay') plots_indices_bio <- lapply(Indices_p_value_10, function(q){ Bio_plots <- ggplot(Nat_alt_summer_10, aes_string(x = q, y = "OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_OCH <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_OCH, filename = "Graphs_OCH_Class10.pdf") ##Class 13 Indices_p_value_13 <- c('Mean90DayFlowMins', 'nNeg', 'nPos', 'Mean1DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevJulianMin', 'Mean7DayFlowMaxs', 'MeanJul', 'Mean3DayFlowMaxs', 'MeanSep', 'StDevnPulsesLow', 'Reversals', 'nPulsesLow', 'MeanAug', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean90DayFlowMins', 'X5per', 'StDevBFI', 'l2', 'MeanDec', 'Mean30DayFlowMins', 'StDevMeanNov', 'X75per', 'MeanJun', 'Mean30DayFlowMaxs', 'FRE7', 'MeanJan', 'StDevFRE7', 'StDevMean3DayFlowMins', 'Mean90DayFlowMaxs', 'StDevMeanDec', 'X95per', 'StDevMeanApr', 'FRE3', 'StDevMean1DayFlowMins', 'Mean7DayFlowMins', 'BFI', 'MeanFeb', 'lca', 'StDevnNeg', 'Mean3DayFlowMins', 'MeanMar', 'MeanPulseLengthLow', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'StDevFRE1', 'StDevMeanJul', 'MeanMay', 'StDevMeanJan', 'StDevJulianMax', 'lkur', 'JulianMin', 'StDevMean3DayFlowMaxs', 'StDevMeanJun', 'StDevMeanSep', 'StDevMean7DayFlowMaxs', 'StDevMeanFeb', 'StDevMeanMar', 'MeanApr') plots_indices_bio <- lapply(Indices_p_value_13, function(q){ Bio_plots <- ggplot(Nat_alt_summer_13, aes_string(x = q, y = "OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_OCH <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_OCH, filename = "Graphs_OCH_Class13.pdf") ##### IASPT ##### ##Class 3 Indices_p_value_3 <- c('StDevZeroFlowDays', 'ZeroFlowDays', 'nPos', 'nNeg', 'MeanApr', 'JulianMax', 'StDevMean7DayFlowMaxs', 'StDevnPos', 'MeanJul', 'StDevMean30DayFlowMaxs', 'Mean90DayFlowMaxs', 'StDevnNeg', 'StDevJulianMax', 'MeanAug', 'StDevMeanSep', 'StDevMean3DayFlowMaxs', 'MeanSep', 'StDevMeanAug', 'l2', 'MeanMar', 'StDevMeanJul', 'Mean30DayFlowMaxs', 'MeanMay', 'X75per', 'MeanDec', 'lca', 'StDevnPulsesHigh', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'Mean7DayFlowMins', 'StDevMean90DayFlowMaxs', 'Mean3DayFlowMins', 'Mean7DayFlowMaxs', 'X95per', 'nPulsesHigh', 'Mean30DayFlowMins', 'StDevReversals') plots_indices_bio <- lapply(Indices_p_value_3, function(q){ Bio_plots <- ggplot(Nat_alt_summer_3, aes_string(x = q, y = "IASPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_IASPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_IASPT, filename = "Graphs_IASPT_Class3.pdf") ##Class 8 Indices_p_value_8 <- c('StDevnPulsesHigh', 'nPos', 'StDevMeanFeb', 'StDevReversals', 'StDevnPulsesLow', 'ZeroFlowDays', 'StDevZeroFlowDays', 'StDevMean7DayFlowMaxs', 'StDevMean3DayFlowMaxs', 'Predictability', 'MeanMay', 'StDevMeanSep', 'lkur', 'nPulsesLow', 'meanNeg', 'StDevMeanAug', 'MeanApr', 'StDevMean30DayFlowMaxs', 'Reversals', 'FRE3', 'StDevBFI', 'StDevnNeg', 'StDevMeanJul', 'StDevnPos', 'StDevJulianMin', 'MeanMar', 'nPulsesHigh', 'nNeg', 'StDevMeanMar', 'StDevMeanJun', 'StDevMean1DayFlowMaxs', 'X95per', 'X25per', 'StDevMean90DayFlowMaxs', 'FRE7', 'Mean1DayFlowMins', 'StDevJulianMax', 'StDevMean90DayFlowMins', 'StDevFRE1', 'JulianMin', 'Mean90DayFlowMaxs', 'MeanSep', 'Mean7DayFlowMaxs', 'Mean30DayFlowMaxs') plots_indices_bio <- lapply(Indices_p_value_8, function(q){ Bio_plots <- ggplot(Nat_alt_summer_8, aes_string(x = q, y = "IASPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_IASPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_IASPT, filename = "Graphs_IASPT_Class8.pdf") ##Class 10 Indices_p_value_10 <- c('JulianMin', 'MeanSep', 'MeanAug', 'nPos', 'StDevnPulsesLow', 'Mean90DayFlowMins', 'StDevJulianMin', 'Mean90DayFlowMaxs', 'l2', 'FRE7', 'nNeg', 'Mean3DayFlowMaxs', 'MeanJun', 'nPulsesLow', 'Mean1DayFlowMaxs', 'MeanJul', 'Mean30DayFlowMins', 'Mean30DayFlowMaxs', 'BFI', 'MeanJan', 'X75per', 'Reversals', 'Mean7DayFlowMaxs', 'Mean7DayFlowMins', 'StDevBFI', 'Mean3DayFlowMins', 'StDevMeanNov', 'StDevMeanAug', 'X95per', 'StDevMean30DayFlowMins', 'X5per', 'StDevMean90DayFlowMins', 'StDevnPos', 'Mean1DayFlowMins', 'StDevnNeg', 'lca', 'StDevMeanSep', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean3DayFlowMins', 'MeanDec', 'MeanFeb', 'StDevFRE1', 'StDevMean1DayFlowMins', 'StDevMean1DayFlowMaxs', 'MeanMar', 'MeanPulseLengthLow', 'StDevReversals', 'StDevFRE7', 'StDevMeanDec', 'StDevnPulsesHigh', 'FRE3', 'StDevMean3DayFlowMaxs', 'meanPos', 'MeanMay') plots_indices_bio <- lapply(Indices_p_value_10, function(q){ Bio_plots <- ggplot(Nat_alt_summer_10, aes_string(x = q, y = "IASPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_IASPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_IASPT, filename = "Graphs_IASPT_Class10.pdf") ##Class 13 Indices_p_value_13 <- c('Mean90DayFlowMins', 'nNeg', 'nPos', 'Mean1DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevJulianMin', 'Mean7DayFlowMaxs', 'MeanJul', 'Mean3DayFlowMaxs', 'MeanSep', 'StDevnPulsesLow', 'Reversals', 'nPulsesLow', 'MeanAug', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean90DayFlowMins', 'X5per', 'StDevBFI', 'l2', 'MeanDec', 'Mean30DayFlowMins', 'StDevMeanNov', 'X75per', 'MeanJun', 'Mean30DayFlowMaxs', 'FRE7', 'MeanJan', 'StDevFRE7', 'StDevMean3DayFlowMins', 'Mean90DayFlowMaxs', 'StDevMeanDec', 'X95per', 'StDevMeanApr', 'FRE3', 'StDevMean1DayFlowMins', 'Mean7DayFlowMins', 'BFI', 'MeanFeb', 'lca', 'StDevnNeg', 'Mean3DayFlowMins', 'MeanMar', 'MeanPulseLengthLow', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'StDevFRE1', 'StDevMeanJul', 'MeanMay', 'StDevMeanJan', 'StDevJulianMax', 'lkur', 'JulianMin', 'StDevMean3DayFlowMaxs', 'StDevMeanJun', 'StDevMeanSep', 'StDevMean7DayFlowMaxs', 'StDevMeanFeb', 'StDevMeanMar', 'MeanApr') plots_indices_bio <- lapply(Indices_p_value_13, function(q){ Bio_plots <- ggplot(Nat_alt_summer_13, aes_string(x = q, y = "IASPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_IASPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_IASPT, filename = "Graphs_IASPT_Class13.pdf") ##### IBMWP ##### ##Class 3 Indices_p_value_3 <- c('StDevZeroFlowDays', 'ZeroFlowDays', 'nPos', 'nNeg', 'MeanApr', 'JulianMax', 'StDevMean7DayFlowMaxs', 'StDevnPos', 'MeanJul', 'StDevMean30DayFlowMaxs', 'Mean90DayFlowMaxs', 'StDevnNeg', 'StDevJulianMax', 'MeanAug', 'StDevMeanSep', 'StDevMean3DayFlowMaxs', 'MeanSep', 'StDevMeanAug', 'l2', 'MeanMar', 'StDevMeanJul', 'Mean30DayFlowMaxs', 'MeanMay', 'X75per', 'MeanDec', 'lca', 'StDevnPulsesHigh', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'Mean7DayFlowMins', 'StDevMean90DayFlowMaxs', 'Mean3DayFlowMins', 'Mean7DayFlowMaxs', 'X95per', 'nPulsesHigh', 'Mean30DayFlowMins', 'StDevReversals') plots_indices_bio <- lapply(Indices_p_value_3, function(q){ Bio_plots <- ggplot(Nat_alt_summer_3, aes_string(x = q, y = "IBMWP", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_IBMWP <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_IBMWP, filename = "Graphs_IBMWP_Class3.pdf") ##Class 8 Indices_p_value_8 <- c('StDevnPulsesHigh', 'nPos', 'StDevMeanFeb', 'StDevReversals', 'StDevnPulsesLow', 'ZeroFlowDays', 'StDevZeroFlowDays', 'StDevMean7DayFlowMaxs', 'StDevMean3DayFlowMaxs', 'Predictability', 'MeanMay', 'StDevMeanSep', 'lkur', 'nPulsesLow', 'meanNeg', 'StDevMeanAug', 'MeanApr', 'StDevMean30DayFlowMaxs', 'Reversals', 'FRE3', 'StDevBFI', 'StDevnNeg', 'StDevMeanJul', 'StDevnPos', 'StDevJulianMin', 'MeanMar', 'nPulsesHigh', 'nNeg', 'StDevMeanMar', 'StDevMeanJun', 'StDevMean1DayFlowMaxs', 'X95per', 'X25per', 'StDevMean90DayFlowMaxs', 'FRE7', 'Mean1DayFlowMins', 'StDevJulianMax', 'StDevMean90DayFlowMins', 'StDevFRE1', 'JulianMin', 'Mean90DayFlowMaxs', 'MeanSep', 'Mean7DayFlowMaxs', 'Mean30DayFlowMaxs') plots_indices_bio <- lapply(Indices_p_value_8, function(q){ Bio_plots <- ggplot(Nat_alt_summer_8, aes_string(x = q, y = "IBMWP", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_IBMWP <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_IBMWP, filename = "Graphs_IBMWP_Class8.pdf") ##Class 10 Indices_p_value_10 <- c('JulianMin', 'MeanSep', 'MeanAug', 'nPos', 'StDevnPulsesLow', 'Mean90DayFlowMins', 'StDevJulianMin', 'Mean90DayFlowMaxs', 'l2', 'FRE7', 'nNeg', 'Mean3DayFlowMaxs', 'MeanJun', 'nPulsesLow', 'Mean1DayFlowMaxs', 'MeanJul', 'Mean30DayFlowMins', 'Mean30DayFlowMaxs', 'BFI', 'MeanJan', 'X75per', 'Reversals', 'Mean7DayFlowMaxs', 'Mean7DayFlowMins', 'StDevBFI', 'Mean3DayFlowMins', 'StDevMeanNov', 'StDevMeanAug', 'X95per', 'StDevMean30DayFlowMins', 'X5per', 'StDevMean90DayFlowMins', 'StDevnPos', 'Mean1DayFlowMins', 'StDevnNeg', 'lca', 'StDevMeanSep', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean3DayFlowMins', 'MeanDec', 'MeanFeb', 'StDevFRE1', 'StDevMean1DayFlowMins', 'StDevMean1DayFlowMaxs', 'MeanMar', 'MeanPulseLengthLow', 'StDevReversals', 'StDevFRE7', 'StDevMeanDec', 'StDevnPulsesHigh', 'FRE3', 'StDevMean3DayFlowMaxs', 'meanPos', 'MeanMay') plots_indices_bio <- lapply(Indices_p_value_10, function(q){ Bio_plots <- ggplot(Nat_alt_summer_10, aes_string(x = q, y = "IBMWP", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_IBMWP <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_IBMWP, filename = "Graphs_IBMWP_Class10.pdf") ##Class 13 Indices_p_value_13 <- c('Mean90DayFlowMins', 'nNeg', 'nPos', 'Mean1DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevJulianMin', 'Mean7DayFlowMaxs', 'MeanJul', 'Mean3DayFlowMaxs', 'MeanSep', 'StDevnPulsesLow', 'Reversals', 'nPulsesLow', 'MeanAug', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean90DayFlowMins', 'X5per', 'StDevBFI', 'l2', 'MeanDec', 'Mean30DayFlowMins', 'StDevMeanNov', 'X75per', 'MeanJun', 'Mean30DayFlowMaxs', 'FRE7', 'MeanJan', 'StDevFRE7', 'StDevMean3DayFlowMins', 'Mean90DayFlowMaxs', 'StDevMeanDec', 'X95per', 'StDevMeanApr', 'FRE3', 'StDevMean1DayFlowMins', 'Mean7DayFlowMins', 'BFI', 'MeanFeb', 'lca', 'StDevnNeg', 'Mean3DayFlowMins', 'MeanMar', 'MeanPulseLengthLow', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'StDevFRE1', 'StDevMeanJul', 'MeanMay', 'StDevMeanJan', 'StDevJulianMax', 'lkur', 'JulianMin', 'StDevMean3DayFlowMaxs', 'StDevMeanJun', 'StDevMeanSep', 'StDevMean7DayFlowMaxs', 'StDevMeanFeb', 'StDevMeanMar', 'MeanApr') plots_indices_bio <- lapply(Indices_p_value_13, function(q){ Bio_plots <- ggplot(Nat_alt_summer_13, aes_string(x = q, y = "IBMWP", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_IBMWP <- ggarrange(plotlist = plots_indices_bio,nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_IBMWP, filename = "Graphs_IBMWP_Class13.pdf") class(Nat_alt_summer_10$IBMWP) ##### LIFE ##### ##Class 3 Indices_p_value_3 <- c('StDevZeroFlowDays', 'ZeroFlowDays', 'nPos', 'nNeg', 'MeanApr', 'JulianMax', 'StDevMean7DayFlowMaxs', 'StDevnPos', 'MeanJul', 'StDevMean30DayFlowMaxs', 'Mean90DayFlowMaxs', 'StDevnNeg', 'StDevJulianMax', 'MeanAug', 'StDevMeanSep', 'StDevMean3DayFlowMaxs', 'MeanSep', 'StDevMeanAug', 'l2', 'MeanMar', 'StDevMeanJul', 'Mean30DayFlowMaxs', 'MeanMay', 'X75per', 'MeanDec', 'lca', 'StDevnPulsesHigh', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'Mean7DayFlowMins', 'StDevMean90DayFlowMaxs', 'Mean3DayFlowMins', 'Mean7DayFlowMaxs', 'X95per', 'nPulsesHigh', 'Mean30DayFlowMins', 'StDevReversals') plots_indices_bio <- lapply(Indices_p_value_3, function(q){ Bio_plots <- ggplot(Nat_alt_summer_3, aes_string(x = q, y = "LIFE", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_LIFE <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_LIFE, filename = "Graphs_LIFE_Class3.pdf") ##Class 8 Indices_p_value_8 <- c('StDevnPulsesHigh', 'nPos', 'StDevMeanFeb', 'StDevReversals', 'StDevnPulsesLow', 'ZeroFlowDays', 'StDevZeroFlowDays', 'StDevMean7DayFlowMaxs', 'StDevMean3DayFlowMaxs', 'Predictability', 'MeanMay', 'StDevMeanSep', 'lkur', 'nPulsesLow', 'meanNeg', 'StDevMeanAug', 'MeanApr', 'StDevMean30DayFlowMaxs', 'Reversals', 'FRE3', 'StDevBFI', 'StDevnNeg', 'StDevMeanJul', 'StDevnPos', 'StDevJulianMin', 'MeanMar', 'nPulsesHigh', 'nNeg', 'StDevMeanMar', 'StDevMeanJun', 'StDevMean1DayFlowMaxs', 'X95per', 'X25per', 'StDevMean90DayFlowMaxs', 'FRE7', 'Mean1DayFlowMins', 'StDevJulianMax', 'StDevMean90DayFlowMins', 'StDevFRE1', 'JulianMin', 'Mean90DayFlowMaxs', 'MeanSep', 'Mean7DayFlowMaxs', 'Mean30DayFlowMaxs') plots_indices_bio <- lapply(Indices_p_value_8, function(q){ Bio_plots <- ggplot(Nat_alt_summer_8, aes_string(x = q, y = "LIFE", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_LIFE <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_LIFE, filename = "Graphs_LIFE_Class8.pdf") ##Class 10 Indices_p_value_10 <- c('JulianMin', 'MeanSep', 'MeanAug', 'nPos', 'StDevnPulsesLow', 'Mean90DayFlowMins', 'StDevJulianMin', 'Mean90DayFlowMaxs', 'l2', 'FRE7', 'nNeg', 'Mean3DayFlowMaxs', 'MeanJun', 'nPulsesLow', 'Mean1DayFlowMaxs', 'MeanJul', 'Mean30DayFlowMins', 'Mean30DayFlowMaxs', 'BFI', 'MeanJan', 'X75per', 'Reversals', 'Mean7DayFlowMaxs', 'Mean7DayFlowMins', 'StDevBFI', 'Mean3DayFlowMins', 'StDevMeanNov', 'StDevMeanAug', 'X95per', 'StDevMean30DayFlowMins', 'X5per', 'StDevMean90DayFlowMins', 'StDevnPos', 'Mean1DayFlowMins', 'StDevnNeg', 'lca', 'StDevMeanSep', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean3DayFlowMins', 'MeanDec', 'MeanFeb', 'StDevFRE1', 'StDevMean1DayFlowMins', 'StDevMean1DayFlowMaxs', 'MeanMar', 'MeanPulseLengthLow', 'StDevReversals', 'StDevFRE7', 'StDevMeanDec', 'StDevnPulsesHigh', 'FRE3', 'StDevMean3DayFlowMaxs', 'meanPos', 'MeanMay') plots_indices_bio <- lapply(Indices_p_value_10, function(q){ Bio_plots <- ggplot(Nat_alt_summer_10, aes_string(x = q, y = "LIFE", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_LIFE <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_LIFE, filename = "Graphs_LIFE_Class10.pdf") ##Class 13 Indices_p_value_13 <- c('Mean90DayFlowMins', 'nNeg', 'nPos', 'Mean1DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevJulianMin', 'Mean7DayFlowMaxs', 'MeanJul', 'Mean3DayFlowMaxs', 'MeanSep', 'StDevnPulsesLow', 'Reversals', 'nPulsesLow', 'MeanAug', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean90DayFlowMins', 'X5per', 'StDevBFI', 'l2', 'MeanDec', 'Mean30DayFlowMins', 'StDevMeanNov', 'X75per', 'MeanJun', 'Mean30DayFlowMaxs', 'FRE7', 'MeanJan', 'StDevFRE7', 'StDevMean3DayFlowMins', 'Mean90DayFlowMaxs', 'StDevMeanDec', 'X95per', 'StDevMeanApr', 'FRE3', 'StDevMean1DayFlowMins', 'Mean7DayFlowMins', 'BFI', 'MeanFeb', 'lca', 'StDevnNeg', 'Mean3DayFlowMins', 'MeanMar', 'MeanPulseLengthLow', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'StDevFRE1', 'StDevMeanJul', 'MeanMay', 'StDevMeanJan', 'StDevJulianMax', 'lkur', 'JulianMin', 'StDevMean3DayFlowMaxs', 'StDevMeanJun', 'StDevMeanSep', 'StDevMean7DayFlowMaxs', 'StDevMeanFeb', 'StDevMeanMar', 'MeanApr') plots_indices_bio <- lapply(Indices_p_value_13, function(q){ Bio_plots <- ggplot(Nat_alt_summer_13, aes_string(x = q, y = "LIFE", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_LIFE <- ggarrange(plotlist = plots_indices_bio,nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_LIFE, filename = "Graphs_LIFE_Class13.pdf") ##### EPT_OCH ##### ##Class 3 Indices_p_value_3 <- c('StDevZeroFlowDays', 'ZeroFlowDays', 'nPos', 'nNeg', 'MeanApr', 'JulianMax', 'StDevMean7DayFlowMaxs', 'StDevnPos', 'MeanJul', 'StDevMean30DayFlowMaxs', 'Mean90DayFlowMaxs', 'StDevnNeg', 'StDevJulianMax', 'MeanAug', 'StDevMeanSep', 'StDevMean3DayFlowMaxs', 'MeanSep', 'StDevMeanAug', 'l2', 'MeanMar', 'StDevMeanJul', 'Mean30DayFlowMaxs', 'MeanMay', 'X75per', 'MeanDec', 'lca', 'StDevnPulsesHigh', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'Mean7DayFlowMins', 'StDevMean90DayFlowMaxs', 'Mean3DayFlowMins', 'Mean7DayFlowMaxs', 'X95per', 'nPulsesHigh', 'Mean30DayFlowMins', 'StDevReversals') plots_indices_bio <- lapply(Indices_p_value_3, function(q){ Bio_plots <- ggplot(Nat_alt_summer_3, aes_string(x = q, y = "EPT_OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_EPT_OCH <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_EPT_OCH, filename = "Graphs_EPT_OCH_Class3.pdf") ##Class 8 Indices_p_value_8 <- c('StDevnPulsesHigh', 'nPos', 'StDevMeanFeb', 'StDevReversals', 'StDevnPulsesLow', 'ZeroFlowDays', 'StDevZeroFlowDays', 'StDevMean7DayFlowMaxs', 'StDevMean3DayFlowMaxs', 'Predictability', 'MeanMay', 'StDevMeanSep', 'lkur', 'nPulsesLow', 'meanNeg', 'StDevMeanAug', 'MeanApr', 'StDevMean30DayFlowMaxs', 'Reversals', 'FRE3', 'StDevBFI', 'StDevnNeg', 'StDevMeanJul', 'StDevnPos', 'StDevJulianMin', 'MeanMar', 'nPulsesHigh', 'nNeg', 'StDevMeanMar', 'StDevMeanJun', 'StDevMean1DayFlowMaxs', 'X95per', 'X25per', 'StDevMean90DayFlowMaxs', 'FRE7', 'Mean1DayFlowMins', 'StDevJulianMax', 'StDevMean90DayFlowMins', 'StDevFRE1', 'JulianMin', 'Mean90DayFlowMaxs', 'MeanSep', 'Mean7DayFlowMaxs', 'Mean30DayFlowMaxs') plots_indices_bio <- lapply(Indices_p_value_8, function(q){ Bio_plots <- ggplot(Nat_alt_summer_8, aes_string(x = q, y = "EPT_OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT_OCH <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_EPT_OCH, filename = "Graphs_EPT_OCH_Class8.pdf") ##Class 10 Indices_p_value_10 <- c('JulianMin', 'MeanSep', 'MeanAug', 'nPos', 'StDevnPulsesLow', 'Mean90DayFlowMins', 'StDevJulianMin', 'Mean90DayFlowMaxs', 'l2', 'FRE7', 'nNeg', 'Mean3DayFlowMaxs', 'MeanJun', 'nPulsesLow', 'Mean1DayFlowMaxs', 'MeanJul', 'Mean30DayFlowMins', 'Mean30DayFlowMaxs', 'BFI', 'MeanJan', 'X75per', 'Reversals', 'Mean7DayFlowMaxs', 'Mean7DayFlowMins', 'StDevBFI', 'Mean3DayFlowMins', 'StDevMeanNov', 'StDevMeanAug', 'X95per', 'StDevMean30DayFlowMins', 'X5per', 'StDevMean90DayFlowMins', 'StDevnPos', 'Mean1DayFlowMins', 'StDevnNeg', 'lca', 'StDevMeanSep', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean3DayFlowMins', 'MeanDec', 'MeanFeb', 'StDevFRE1', 'StDevMean1DayFlowMins', 'StDevMean1DayFlowMaxs', 'MeanMar', 'MeanPulseLengthLow', 'StDevReversals', 'StDevFRE7', 'StDevMeanDec', 'StDevnPulsesHigh', 'FRE3', 'StDevMean3DayFlowMaxs', 'meanPos', 'MeanMay') plots_indices_bio <- lapply(Indices_p_value_10, function(q){ Bio_plots <- ggplot(Nat_alt_summer_10, aes_string(x = q, y = "EPT_OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT_OCH <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_EPT_OCH, filename = "Graphs_EPT_OCH_Class10.pdf") ##Class 13 Indices_p_value_13 <- c('Mean90DayFlowMins', 'nNeg', 'nPos', 'Mean1DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevJulianMin', 'Mean7DayFlowMaxs', 'MeanJul', 'Mean3DayFlowMaxs', 'MeanSep', 'StDevnPulsesLow', 'Reversals', 'nPulsesLow', 'MeanAug', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean90DayFlowMins', 'X5per', 'StDevBFI', 'l2', 'MeanDec', 'Mean30DayFlowMins', 'StDevMeanNov', 'X75per', 'MeanJun', 'Mean30DayFlowMaxs', 'FRE7', 'MeanJan', 'StDevFRE7', 'StDevMean3DayFlowMins', 'Mean90DayFlowMaxs', 'StDevMeanDec', 'X95per', 'StDevMeanApr', 'FRE3', 'StDevMean1DayFlowMins', 'Mean7DayFlowMins', 'BFI', 'MeanFeb', 'lca', 'StDevnNeg', 'Mean3DayFlowMins', 'MeanMar', 'MeanPulseLengthLow', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'StDevFRE1', 'StDevMeanJul', 'MeanMay', 'StDevMeanJan', 'StDevJulianMax', 'lkur', 'JulianMin', 'StDevMean3DayFlowMaxs', 'StDevMeanJun', 'StDevMeanSep', 'StDevMean7DayFlowMaxs', 'StDevMeanFeb', 'StDevMeanMar', 'MeanApr') plots_indices_bio <- lapply(Indices_p_value_13, function(q){ Bio_plots <- ggplot(Nat_alt_summer_13, aes_string(x = q, y = "EPT_OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT_OCH <- ggarrange(plotlist = plots_indices_bio,nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_EPT_OCH, filename = "Graphs_EPT_OCH_Class13.pdf") ### Non.Insect ### ##Class 3 Indices_p_value_3 <- c('StDevZeroFlowDays', 'ZeroFlowDays', 'nPos', 'nNeg', 'MeanApr', 'JulianMax', 'StDevMean7DayFlowMaxs', 'StDevnPos', 'MeanJul', 'StDevMean30DayFlowMaxs', 'Mean90DayFlowMaxs', 'StDevnNeg', 'StDevJulianMax', 'MeanAug', 'StDevMeanSep', 'StDevMean3DayFlowMaxs', 'MeanSep', 'StDevMeanAug', 'l2', 'MeanMar', 'StDevMeanJul', 'Mean30DayFlowMaxs', 'MeanMay', 'X75per', 'MeanDec', 'lca', 'StDevnPulsesHigh', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'Mean7DayFlowMins', 'StDevMean90DayFlowMaxs', 'Mean3DayFlowMins', 'Mean7DayFlowMaxs', 'X95per', 'nPulsesHigh', 'Mean30DayFlowMins', 'StDevReversals') plots_indices_bio <- lapply(Indices_p_value_3, function(q){ Bio_plots <- ggplot(Nat_alt_summer_3, aes_string(x = q, y = "Non.Insect", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_Non.Insect <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Non.Insect, filename = "Graphs_Non.Insect_Class3.pdf") ##Class 8 Indices_p_value_8 <- c('StDevnPulsesHigh', 'nPos', 'StDevMeanFeb', 'StDevReversals', 'StDevnPulsesLow', 'ZeroFlowDays', 'StDevZeroFlowDays', 'StDevMean7DayFlowMaxs', 'StDevMean3DayFlowMaxs', 'Predictability', 'MeanMay', 'StDevMeanSep', 'lkur', 'nPulsesLow', 'meanNeg', 'StDevMeanAug', 'MeanApr', 'StDevMean30DayFlowMaxs', 'Reversals', 'FRE3', 'StDevBFI', 'StDevnNeg', 'StDevMeanJul', 'StDevnPos', 'StDevJulianMin', 'MeanMar', 'nPulsesHigh', 'nNeg', 'StDevMeanMar', 'StDevMeanJun', 'StDevMean1DayFlowMaxs', 'X95per', 'X25per', 'StDevMean90DayFlowMaxs', 'FRE7', 'Mean1DayFlowMins', 'StDevJulianMax', 'StDevMean90DayFlowMins', 'StDevFRE1', 'JulianMin', 'Mean90DayFlowMaxs', 'MeanSep', 'Mean7DayFlowMaxs', 'Mean30DayFlowMaxs') plots_indices_bio <- lapply(Indices_p_value_8, function(q){ Bio_plots <- ggplot(Nat_alt_summer_8, aes_string(x = q, y = "Non.Insect", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Non.Insect <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Non.Insect, filename = "Graphs_Non.Insect_Class8.pdf") ##Class 10 Indices_p_value_10 <- c('JulianMin', 'MeanSep', 'MeanAug', 'nPos', 'StDevnPulsesLow', 'Mean90DayFlowMins', 'StDevJulianMin', 'Mean90DayFlowMaxs', 'l2', 'FRE7', 'nNeg', 'Mean3DayFlowMaxs', 'MeanJun', 'nPulsesLow', 'Mean1DayFlowMaxs', 'MeanJul', 'Mean30DayFlowMins', 'Mean30DayFlowMaxs', 'BFI', 'MeanJan', 'X75per', 'Reversals', 'Mean7DayFlowMaxs', 'Mean7DayFlowMins', 'StDevBFI', 'Mean3DayFlowMins', 'StDevMeanNov', 'StDevMeanAug', 'X95per', 'StDevMean30DayFlowMins', 'X5per', 'StDevMean90DayFlowMins', 'StDevnPos', 'Mean1DayFlowMins', 'StDevnNeg', 'lca', 'StDevMeanSep', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean3DayFlowMins', 'MeanDec', 'MeanFeb', 'StDevFRE1', 'StDevMean1DayFlowMins', 'StDevMean1DayFlowMaxs', 'MeanMar', 'MeanPulseLengthLow', 'StDevReversals', 'StDevFRE7', 'StDevMeanDec', 'StDevnPulsesHigh', 'FRE3', 'StDevMean3DayFlowMaxs', 'meanPos', 'MeanMay') plots_indices_bio <- lapply(Indices_p_value_10, function(q){ Bio_plots <- ggplot(Nat_alt_summer_10, aes_string(x = q, y = "Non.Insect", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Non.Insect <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Non.Insect, filename = "Graphs_Non.Insect_Class10.pdf") ##Class 13 Indices_p_value_13 <- c('Mean90DayFlowMins', 'nNeg', 'nPos', 'Mean1DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevJulianMin', 'Mean7DayFlowMaxs', 'MeanJul', 'Mean3DayFlowMaxs', 'MeanSep', 'StDevnPulsesLow', 'Reversals', 'nPulsesLow', 'MeanAug', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean90DayFlowMins', 'X5per', 'StDevBFI', 'l2', 'MeanDec', 'Mean30DayFlowMins', 'StDevMeanNov', 'X75per', 'MeanJun', 'Mean30DayFlowMaxs', 'FRE7', 'MeanJan', 'StDevFRE7', 'StDevMean3DayFlowMins', 'Mean90DayFlowMaxs', 'StDevMeanDec', 'X95per', 'StDevMeanApr', 'FRE3', 'StDevMean1DayFlowMins', 'Mean7DayFlowMins', 'BFI', 'MeanFeb', 'lca', 'StDevnNeg', 'Mean3DayFlowMins', 'MeanMar', 'MeanPulseLengthLow', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'StDevFRE1', 'StDevMeanJul', 'MeanMay', 'StDevMeanJan', 'StDevJulianMax', 'lkur', 'JulianMin', 'StDevMean3DayFlowMaxs', 'StDevMeanJun', 'StDevMeanSep', 'StDevMean7DayFlowMaxs', 'StDevMeanFeb', 'StDevMeanMar', 'MeanApr') plots_indices_bio <- lapply(Indices_p_value_13, function(q){ Bio_plots <- ggplot(Nat_alt_summer_13, aes_string(x = q, y = "Non.Insect", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Non.Insect <- ggarrange(plotlist = plots_indices_bio,nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Non.Insect, filename = "Graphs_Non.Insect_Class13.pdf") ### Diptera ### ##Class 3 Indices_p_value_3 <- c('StDevZeroFlowDays', 'ZeroFlowDays', 'nPos', 'nNeg', 'MeanApr', 'JulianMax', 'StDevMean7DayFlowMaxs', 'StDevnPos', 'MeanJul', 'StDevMean30DayFlowMaxs', 'Mean90DayFlowMaxs', 'StDevnNeg', 'StDevJulianMax', 'MeanAug', 'StDevMeanSep', 'StDevMean3DayFlowMaxs', 'MeanSep', 'StDevMeanAug', 'l2', 'MeanMar', 'StDevMeanJul', 'Mean30DayFlowMaxs', 'MeanMay', 'X75per', 'MeanDec', 'lca', 'StDevnPulsesHigh', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'Mean7DayFlowMins', 'StDevMean90DayFlowMaxs', 'Mean3DayFlowMins', 'Mean7DayFlowMaxs', 'X95per', 'nPulsesHigh', 'Mean30DayFlowMins', 'StDevReversals') plots_indices_bio <- lapply(Indices_p_value_3, function(q){ Bio_plots <- ggplot(Nat_alt_summer_3, aes_string(x = q, y = "Diptera", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_Diptera <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Diptera, filename = "Graphs_Diptera_Class3.pdf") ##Class 8 Indices_p_value_8 <- c('StDevnPulsesHigh', 'nPos', 'StDevMeanFeb', 'StDevReversals', 'StDevnPulsesLow', 'ZeroFlowDays', 'StDevZeroFlowDays', 'StDevMean7DayFlowMaxs', 'StDevMean3DayFlowMaxs', 'Predictability', 'MeanMay', 'StDevMeanSep', 'lkur', 'nPulsesLow', 'meanNeg', 'StDevMeanAug', 'MeanApr', 'StDevMean30DayFlowMaxs', 'Reversals', 'FRE3', 'StDevBFI', 'StDevnNeg', 'StDevMeanJul', 'StDevnPos', 'StDevJulianMin', 'MeanMar', 'nPulsesHigh', 'nNeg', 'StDevMeanMar', 'StDevMeanJun', 'StDevMean1DayFlowMaxs', 'X95per', 'X25per', 'StDevMean90DayFlowMaxs', 'FRE7', 'Mean1DayFlowMins', 'StDevJulianMax', 'StDevMean90DayFlowMins', 'StDevFRE1', 'JulianMin', 'Mean90DayFlowMaxs', 'MeanSep', 'Mean7DayFlowMaxs', 'Mean30DayFlowMaxs') plots_indices_bio <- lapply(Indices_p_value_8, function(q){ Bio_plots <- ggplot(Nat_alt_summer_8, aes_string(x = q, y = "Diptera", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Diptera <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Diptera, filename = "Graphs_Diptera_Class8.pdf") ##Class 10 Indices_p_value_10 <- c('JulianMin', 'MeanSep', 'MeanAug', 'nPos', 'StDevnPulsesLow', 'Mean90DayFlowMins', 'StDevJulianMin', 'Mean90DayFlowMaxs', 'l2', 'FRE7', 'nNeg', 'Mean3DayFlowMaxs', 'MeanJun', 'nPulsesLow', 'Mean1DayFlowMaxs', 'MeanJul', 'Mean30DayFlowMins', 'Mean30DayFlowMaxs', 'BFI', 'MeanJan', 'X75per', 'Reversals', 'Mean7DayFlowMaxs', 'Mean7DayFlowMins', 'StDevBFI', 'Mean3DayFlowMins', 'StDevMeanNov', 'StDevMeanAug', 'X95per', 'StDevMean30DayFlowMins', 'X5per', 'StDevMean90DayFlowMins', 'StDevnPos', 'Mean1DayFlowMins', 'StDevnNeg', 'lca', 'StDevMeanSep', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean3DayFlowMins', 'MeanDec', 'MeanFeb', 'StDevFRE1', 'StDevMean1DayFlowMins', 'StDevMean1DayFlowMaxs', 'MeanMar', 'MeanPulseLengthLow', 'StDevReversals', 'StDevFRE7', 'StDevMeanDec', 'StDevnPulsesHigh', 'FRE3', 'StDevMean3DayFlowMaxs', 'meanPos', 'MeanMay') plots_indices_bio <- lapply(Indices_p_value_10, function(q){ Bio_plots <- ggplot(Nat_alt_summer_10, aes_string(x = q, y = "Diptera", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Diptera <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Diptera, filename = "Graphs_Diptera_Class10.pdf") ##Class 13 Indices_p_value_13 <- c('Mean90DayFlowMins', 'nNeg', 'nPos', 'Mean1DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevJulianMin', 'Mean7DayFlowMaxs', 'MeanJul', 'Mean3DayFlowMaxs', 'MeanSep', 'StDevnPulsesLow', 'Reversals', 'nPulsesLow', 'MeanAug', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean90DayFlowMins', 'X5per', 'StDevBFI', 'l2', 'MeanDec', 'Mean30DayFlowMins', 'StDevMeanNov', 'X75per', 'MeanJun', 'Mean30DayFlowMaxs', 'FRE7', 'MeanJan', 'StDevFRE7', 'StDevMean3DayFlowMins', 'Mean90DayFlowMaxs', 'StDevMeanDec', 'X95per', 'StDevMeanApr', 'FRE3', 'StDevMean1DayFlowMins', 'Mean7DayFlowMins', 'BFI', 'MeanFeb', 'lca', 'StDevnNeg', 'Mean3DayFlowMins', 'MeanMar', 'MeanPulseLengthLow', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'StDevFRE1', 'StDevMeanJul', 'MeanMay', 'StDevMeanJan', 'StDevJulianMax', 'lkur', 'JulianMin', 'StDevMean3DayFlowMaxs', 'StDevMeanJun', 'StDevMeanSep', 'StDevMean7DayFlowMaxs', 'StDevMeanFeb', 'StDevMeanMar', 'MeanApr') plots_indices_bio <- lapply(Indices_p_value_13, function(q){ Bio_plots <- ggplot(Nat_alt_summer_13, aes_string(x = q, y = "Diptera", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Diptera <- ggarrange(plotlist = plots_indices_bio,nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Diptera, filename = "Graphs_Diptera_Class13.pdf") ### Insect ### ##Class 3 Indices_p_value_3 <- c('StDevZeroFlowDays', 'ZeroFlowDays', 'nPos', 'nNeg', 'MeanApr', 'JulianMax', 'StDevMean7DayFlowMaxs', 'StDevnPos', 'MeanJul', 'StDevMean30DayFlowMaxs', 'Mean90DayFlowMaxs', 'StDevnNeg', 'StDevJulianMax', 'MeanAug', 'StDevMeanSep', 'StDevMean3DayFlowMaxs', 'MeanSep', 'StDevMeanAug', 'l2', 'MeanMar', 'StDevMeanJul', 'Mean30DayFlowMaxs', 'MeanMay', 'X75per', 'MeanDec', 'lca', 'StDevnPulsesHigh', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'Mean7DayFlowMins', 'StDevMean90DayFlowMaxs', 'Mean3DayFlowMins', 'Mean7DayFlowMaxs', 'X95per', 'nPulsesHigh', 'Mean30DayFlowMins', 'StDevReversals') plots_indices_bio <- lapply(Indices_p_value_3, function(q){ Bio_plots <- ggplot(Nat_alt_summer_3, aes_string(x = q, y = "Insect", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_Insect <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Insect, filename = "Graphs_Insect_Class3.pdf") ##Class 8 Indices_p_value_8 <- c('StDevnPulsesHigh', 'nPos', 'StDevMeanFeb', 'StDevReversals', 'StDevnPulsesLow', 'ZeroFlowDays', 'StDevZeroFlowDays', 'StDevMean7DayFlowMaxs', 'StDevMean3DayFlowMaxs', 'Predictability', 'MeanMay', 'StDevMeanSep', 'lkur', 'nPulsesLow', 'meanNeg', 'StDevMeanAug', 'MeanApr', 'StDevMean30DayFlowMaxs', 'Reversals', 'FRE3', 'StDevBFI', 'StDevnNeg', 'StDevMeanJul', 'StDevnPos', 'StDevJulianMin', 'MeanMar', 'nPulsesHigh', 'nNeg', 'StDevMeanMar', 'StDevMeanJun', 'StDevMean1DayFlowMaxs', 'X95per', 'X25per', 'StDevMean90DayFlowMaxs', 'FRE7', 'Mean1DayFlowMins', 'StDevJulianMax', 'StDevMean90DayFlowMins', 'StDevFRE1', 'JulianMin', 'Mean90DayFlowMaxs', 'MeanSep', 'Mean7DayFlowMaxs', 'Mean30DayFlowMaxs') plots_indices_bio <- lapply(Indices_p_value_8, function(q){ Bio_plots <- ggplot(Nat_alt_summer_8, aes_string(x = q, y = "Insect", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Insect <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Insect, filename = "Graphs_Insect_Class8.pdf") ##Class 10 Indices_p_value_10 <- c('JulianMin', 'MeanSep', 'MeanAug', 'nPos', 'StDevnPulsesLow', 'Mean90DayFlowMins', 'StDevJulianMin', 'Mean90DayFlowMaxs', 'l2', 'FRE7', 'nNeg', 'Mean3DayFlowMaxs', 'MeanJun', 'nPulsesLow', 'Mean1DayFlowMaxs', 'MeanJul', 'Mean30DayFlowMins', 'Mean30DayFlowMaxs', 'BFI', 'MeanJan', 'X75per', 'Reversals', 'Mean7DayFlowMaxs', 'Mean7DayFlowMins', 'StDevBFI', 'Mean3DayFlowMins', 'StDevMeanNov', 'StDevMeanAug', 'X95per', 'StDevMean30DayFlowMins', 'X5per', 'StDevMean90DayFlowMins', 'StDevnPos', 'Mean1DayFlowMins', 'StDevnNeg', 'lca', 'StDevMeanSep', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean3DayFlowMins', 'MeanDec', 'MeanFeb', 'StDevFRE1', 'StDevMean1DayFlowMins', 'StDevMean1DayFlowMaxs', 'MeanMar', 'MeanPulseLengthLow', 'StDevReversals', 'StDevFRE7', 'StDevMeanDec', 'StDevnPulsesHigh', 'FRE3', 'StDevMean3DayFlowMaxs', 'meanPos', 'MeanMay') plots_indices_bio <- lapply(Indices_p_value_10, function(q){ Bio_plots <- ggplot(Nat_alt_summer_10, aes_string(x = q, y = "Insect", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Insect <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Insect, filename = "Graphs_Insect_Class10.pdf") ##Class 13 Indices_p_value_13 <- c('Mean90DayFlowMins', 'nNeg', 'nPos', 'Mean1DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevJulianMin', 'Mean7DayFlowMaxs', 'MeanJul', 'Mean3DayFlowMaxs', 'MeanSep', 'StDevnPulsesLow', 'Reversals', 'nPulsesLow', 'MeanAug', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean90DayFlowMins', 'X5per', 'StDevBFI', 'l2', 'MeanDec', 'Mean30DayFlowMins', 'StDevMeanNov', 'X75per', 'MeanJun', 'Mean30DayFlowMaxs', 'FRE7', 'MeanJan', 'StDevFRE7', 'StDevMean3DayFlowMins', 'Mean90DayFlowMaxs', 'StDevMeanDec', 'X95per', 'StDevMeanApr', 'FRE3', 'StDevMean1DayFlowMins', 'Mean7DayFlowMins', 'BFI', 'MeanFeb', 'lca', 'StDevnNeg', 'Mean3DayFlowMins', 'MeanMar', 'MeanPulseLengthLow', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'StDevFRE1', 'StDevMeanJul', 'MeanMay', 'StDevMeanJan', 'StDevJulianMax', 'lkur', 'JulianMin', 'StDevMean3DayFlowMaxs', 'StDevMeanJun', 'StDevMeanSep', 'StDevMean7DayFlowMaxs', 'StDevMeanFeb', 'StDevMeanMar', 'MeanApr') plots_indices_bio <- lapply(Indices_p_value_13, function(q){ Bio_plots <- ggplot(Nat_alt_summer_13, aes_string(x = q, y = "Insect", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Insect <- ggarrange(plotlist = plots_indices_bio,nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Insect, filename = "Graphs_Insect_Class13.pdf") # Ordinations ------------------------------------------------------------- ## Macroinvertebrates - CLASS 3 Nat_alt_summer_3 <- Nat_alt_summer %>% filter(Lev_20 == "3") row.names(Nat_alt_summer_3) <- Nat_alt_summer_3$ID Taxa <- select(Nat_alt_summer_3, Hydrobiidae:Plumatellidae) Env <- select(Nat_alt_summer_3, AREA_KM2:MN_calc, MN_PERM:MN_HARD, MN_UHD:MN_PLT, MN_DEN) Hyd <- select(Nat_alt_summer_3, MeanJan:X95per) FQ <- select(Nat_alt_summer_3, A_COND20SITU,A_PHSITU) All <- cbind(Env, Hyd, FQ) Col_EPT <- as.character(Nat_alt_summer_3$Color) Col_Flow <- as.character(Nat_alt_summer_3$Flow_Color) Sym_EPT <- Nat_alt_summer_3$Symbol Taxa_RA <- decostand(Taxa, "total") ## Relative abundance Taxa_H <- sqrt(Taxa_RA) ## Relative abundance, percentage of each species based on total of the site Taxa_PA <- decostand(Taxa, "pa") ## Relative abundance, percentage of each species based on total of the site Taxa_Norm <- decostand(Taxa, "normalize") ## Relative abundance, percentage of each species based on total of the site rowSums (Taxa, na.rm = FALSE, dims = 1) rowSums (Taxa_RA, na.rm = FALSE, dims = 1) rowSums (Taxa_H, na.rm = FALSE, dims = 1) rowSums (Taxa_PA, na.rm = FALSE, dims = 1) Dist_bray_H <- vegdist(Taxa_H, method = "bray") Dist_Jac <- vegdist(Taxa_PA, method = "jac") PCoA_Com_H <- cmdscale(Dist_bray_H, k = 2, eig = TRUE) ##0.302 PCoA_Com_PA <- cmdscale(Dist_Jac, k = 2, eig = TRUE) ##0.21 (fit_Env_PCoA_Com_H <- envfit(PCoA_Com_H, scale(Env), perm = 1000, na.rm = T)) (fit_Hyd_PCoA_Com_H <- envfit(PCoA_Com_H, scale(Hyd), perm = 1000, na.rm = T)) (fit_FQ_PCoA_Com_H <- envfit(PCoA_Com_H, scale(FQ), perm = 1000, na.rm = T)) (fit_All_PCoA_Com_H <- envfit(PCoA_Com_H, scale(All), perm = 1000, na.rm = T)) pcoa.C.H <- ordiplot(PCoA_Com_H, type = "n", main = "CLASS 3 - MI - Community Structure",#xlim = c(-0.5,0.2), # xlim = c(-0.22,0.17), ylim= c(-0.01,0.01), xlab="PCoA 1 (226,8%)", ylab="PCoA 2 (3,37%)") abline(h = 0, lty = 1) abline(v = 0, lty = 1) #mtext("GOF = 0,213", side=3, cex=0.8) Macroinv_wa <- wascores(PCoA_Com_H$points[, 1:2], Taxa_H) ###write.csv2(Macroinv_wa, "macroinv_Com_H_taxa.csv") ##text(Macroinv_wa, rownames(Macroinv_wa),cex = 0.7, col = "azure4") #text(Macroinv_wa, rownames(Macroinv_wa),cex = 0.9, col = "azure4") #♥points(Macroinv_wa,cex =1, pch=Simbolos, col=Col_insect) points(pcoa.C.H, "sites", pch = Sym_EPT, cex=1, col = Col_EPT) points(pcoa.C.H, "sites", pch = 19, cex=1, col = Col_Flow) #points(pcoa.C.H, "sites", pch = 19, cex=0.55, col = Col_Flow) plot(fit_Env_PCoA_Com_H, p.max = 0.001, col = "grey20", cex=0.7) plot(fit_Hyd_PCoA_Com_H, p.max = 0.001, col = "brown2", cex=0.7) plot(fit_Env_PCoA_Com_H, p.max = 0.001, col = "darkorange", cex=0.7) plot(fit_FQ_PCoA_Com_H, p.max = 0.001, col = "darkblue", cex=0.7) plot(fit_All_PCoA_Com_H, p.max = 0.001, col = "darkblue", cex=0.7) ### Macroinvertebrates - CLASS 8 ### Nat_alt_summer_8 <- Nat_alt_summer %>% filter(Lev_20 == "8") row.names(Nat_alt_summer_8) <- Nat_alt_summer_8$ID Taxa <- select(Nat_alt_summer_8, Hydrobiidae:Plumatellidae) Env <- select(Nat_alt_summer_8, AREA_KM2:MN_calc, MN_PERM:MN_HARD, MN_UHD:MN_PLT, MN_DEN) Hyd <- select(Nat_alt_summer_8, MeanJan:X95per) FQ <- select(Nat_alt_summer_8, A_COND20SITU,A_PHSITU) All <- cbind(Env, Hyd, FQ) Col_EPT <- as.character(Nat_alt_summer_8$Color) Col_Flow <- as.character(Nat_alt_summer_8$Flow_Color) Sym_EPT <- Nat_alt_summer_8$Symbol Taxa_RA <- decostand(Taxa, "total") ## Relative abundance Taxa_H <- sqrt(Taxa_RA) ## Relative abundance, percentage of each species based on total of the site Taxa_PA <- decostand(Taxa, "pa") ## Relative abundance, percentage of each species based on total of the site Taxa_Norm <- decostand(Taxa, "normalize") ## Relative abundance, percentage of each species based on total of the site rowSums (Taxa, na.rm = FALSE, dims = 1) rowSums (Taxa_RA, na.rm = FALSE, dims = 1) rowSums (Taxa_H, na.rm = FALSE, dims = 1) rowSums (Taxa_PA, na.rm = FALSE, dims = 1) Dist_bray_H <- vegdist(Taxa_H, method = "bray") Dist_Jac <- vegdist(Taxa_PA, method = "jac") PCoA_Com_H <- cmdscale(Dist_bray_H, k = 2, eig = TRUE) ##0.302 PCoA_Com_PA <- cmdscale(Dist_Jac, k = 2, eig = TRUE) ##0.21 (fit_Env_PCoA_Com_H <- envfit(PCoA_Com_H, scale(Env), perm = 1000, na.rm = T)) (fit_Hyd_PCoA_Com_H <- envfit(PCoA_Com_H, scale(Hyd), perm = 1000, na.rm = T)) (fit_FQ_PCoA_Com_H <- envfit(PCoA_Com_H, scale(FQ), perm = 1000, na.rm = T)) (fit_All_PCoA_Com_H <- envfit(PCoA_Com_H, scale(All), perm = 1000, na.rm = T)) pcoa.C.H <- ordiplot(PCoA_Com_H, type = "n", main = "CLASS 8 - MI - Community Structure",#xlim = c(-0.5,0.2), # xlim = c(-0.22,0.17), ylim= c(-0.01,0.01), xlab="PCoA 1 (37,68%)", ylab="PCoA 2 (3,0%)") abline(h = 0, lty = 1) abline(v = 0, lty = 1) #mtext("GOF = 0,213", side=3, cex=0.8) Macroinv_wa <- wascores(PCoA_Com_H$points[, 1:2], Taxa_H) ###write.csv2(Macroinv_wa, "macroinv_Com_H_taxa.csv") ##text(Macroinv_wa, rownames(Macroinv_wa),cex = 0.7, col = "azure4") #text(Macroinv_wa, rownames(Macroinv_wa),cex = 0.9, col = "azure4") #♥points(Macroinv_wa,cex =1, pch=Simbolos, col=Col_insect) points(pcoa.C.H, "sites", pch = Sym_EPT, cex=1, col = Col_EPT) points(pcoa.C.H, "sites", pch = 19, cex=1, col = Col_Flow) #points(pcoa.C.H, "sites", pch = 19, cex=0.55, col = Col_Flow) plot(fit_Env_PCoA_Com_H, p.max = 0.001, col = "grey20", cex=0.7) plot(fit_Hyd_PCoA_Com_H, p.max = 0.001, col = "brown2", cex=0.7) plot(fit_Env_PCoA_Com_H, p.max = 0.001, col = "darkorange", cex=0.7) plot(fit_FQ_PCoA_Com_H, p.max = 0.001, col = "darkblue", cex=0.7) plot(fit_All_PCoA_Com_H, p.max = 0.001, col = "darkblue", cex=0.7) ## Macroinvertebrates - CLASS 10 Nat_alt_summer_10 <- Nat_alt_summer %>% filter(Lev_20 == "10") row.names(Nat_alt_summer_10) <- Nat_alt_summer_10$ID Taxa <- select(Nat_alt_summer_10, Hydrobiidae:Plumatellidae) Env <- select(Nat_alt_summer_10, AREA_KM2:MN_calc, MN_PERM:MN_HARD, MN_UHD:MN_PLT, MN_DEN) Hyd <- select(Nat_alt_summer_10, MeanJan:X95per) FQ <- select(Nat_alt_summer_10, A_COND20SITU,A_PHSITU) All <- cbind(Env, Hyd) Col_EPT <- as.character(Nat_alt_summer_10$Color) Col_Flow <- as.character(Nat_alt_summer_10$Flow_Color) Sym_EPT <- Nat_alt_summer_10$Symbol Taxa_RA <- decostand(Taxa, "total") ## Relative abundance Taxa_H <- sqrt(Taxa_RA) ## Relative abundance, percentage of each species based on total of the site Taxa_PA <- decostand(Taxa, "pa") ## Relative abundance, percentage of each species based on total of the site Taxa_Norm <- decostand(Taxa, "normalize") ## Relative abundance, percentage of each species based on total of the site rowSums (Taxa, na.rm = FALSE, dims = 1) rowSums (Taxa_RA, na.rm = FALSE, dims = 1) rowSums (Taxa_H, na.rm = FALSE, dims = 1) rowSums (Taxa_PA, na.rm = FALSE, dims = 1) Dist_bray_H <- vegdist(Taxa_H, method = "bray") Dist_Jac <- vegdist(Taxa_PA, method = "jac") PCoA_Com_H <- cmdscale(Dist_bray_H, k = 2, eig = TRUE) ##0.302 PCoA_Com_PA <- cmdscale(Dist_Jac, k = 2, eig = TRUE) ##0.21 (fit_Env_PCoA_Com_H <- envfit(PCoA_Com_H, scale(Env), perm = 1000, na.rm = T)) (fit_Hyd_PCoA_Com_H <- envfit(PCoA_Com_H, scale(Hyd), perm = 1000, na.rm = T)) (fit_FQ_PCoA_Com_H <- envfit(PCoA_Com_H, (FQ), perm = 1000, na.rm = T)) (fit_All_PCoA_Com_H <- envfit(PCoA_Com_H, scale(All), perm = 1000, na.rm = T)) pcoa.C.H <- ordiplot(PCoA_Com_H, type = "n", main = "CLASS 10 - MI - Community Structure",#xlim = c(-0.5,0.2), # xlim = c(-0.22,0.17), ylim= c(-0.01,0.01), xlab="PCoA 1 (28,3%)", ylab="PCoA 2 (3,2%)") abline(h = 0, lty = 1) abline(v = 0, lty = 1) #mtext("GOF = 0,213", side=3, cex=0.8) Macroinv_wa <- wascores(PCoA_Com_H$points[, 1:2], Taxa_H) ###write.csv2(Macroinv_wa, "macroinv_Com_H_taxa.csv") ##text(Macroinv_wa, rownames(Macroinv_wa),cex = 0.7, col = "azure4") #text(Macroinv_wa, rownames(Macroinv_wa),cex = 0.9, col = "azure4") #♥points(Macroinv_wa,cex =1, pch=Simbolos, col=Col_insect) points(pcoa.C.H, "sites", pch = Sym_EPT, cex=1, col = Col_EPT) points(pcoa.C.H, "sites", pch = 19, cex=1, col = Col_Flow) #points(pcoa.C.H, "sites", pch = 19, cex=0.55, col = Col_Flow) plot(fit_Env_PCoA_Com_H, p.max = 0.001, col = "grey20", cex=0.7) plot(fit_Hyd_PCoA_Com_H, p.max = 0.001, col = "brown2", cex=0.7) plot(fit_Env_PCoA_Com_H, p.max = 0.001, col = "darkorange", cex=0.7) plot(fit_FQ_PCoA_Com_H, p.max = 0.001, col = "darkblue", cex=0.7) plot(fit_All_PCoA_Com_H, p.max = 0.001, col = "darkblue", cex=0.7) ## Macroinvertebrates - CLASS 13 Nat_alt_summer_13 <- Nat_alt_summer %>% filter(Lev_20 == "13") row.names(Nat_alt_summer_13) <- Nat_alt_summer_13$ID Taxa <- select(Nat_alt_summer_13, Hydrobiidae:Plumatellidae) Env <- select(Nat_alt_summer_13, AREA_KM2:MN_calc, MN_PERM:MN_HARD, MN_UHD:MN_PLT, MN_DEN) Hyd <- select(Nat_alt_summer_13, MeanJan:X95per) FQ <- select(Nat_alt_summer_13, A_COND20SITU,A_PHSITU) All <- cbind(Env, Hyd) Col_EPT <- as.character(Nat_alt_summer_13$Color) Col_Flow <- as.character(Nat_alt_summer_13$Flow_Color) Sym_EPT <- Nat_alt_summer_13$Symbol Taxa_RA <- decostand(Taxa, "total") ## Relative abundance Taxa_H <- sqrt(Taxa_RA) ## Relative abundance, percentage of each species based on total of the site Taxa_PA <- decostand(Taxa, "pa") ## Relative abundance, percentage of each species based on total of the site Dist_bray_H <- vegdist(Taxa_H, method = "bray") Dist_Jac <- vegdist(Taxa_PA, method = "jac") PCoA_Com_H <- cmdscale(Dist_bray_H, k = 2, eig = TRUE) ##0.302 PCoA_Com_PA <- cmdscale(Dist_Jac, k = 2, eig = TRUE) ##0.21 (fit_Env_PCoA_Com_H <- envfit(PCoA_Com_H, scale(Env), perm = 1000, na.rm = T)) (fit_Hyd_PCoA_Com_H <- envfit(PCoA_Com_H, scale(Hyd), perm = 1000, na.rm = T)) (fit_FQ_PCoA_Com_H <- envfit(PCoA_Com_H, (FQ), perm = 1000, na.rm = T)) (fit_All_PCoA_Com_H <- envfit(PCoA_Com_H, scale(All), perm = 1000, na.rm = T)) pcoa.C.H <- ordiplot(PCoA_Com_H, type = "n", main = "CLASS 13 - MI - Community Structure",#xlim = c(-0.5,0.2), # xlim = c(-0.22,0.17), ylim= c(-0.01,0.01), xlab="PCoA 1 (25,9%)", ylab="PCoA 2 (2,5%)") abline(h = 0, lty = 1) abline(v = 0, lty = 1) #mtext("GOF = 0,213", side=3, cex=0.8) Macroinv_wa <- wascores(PCoA_Com_H$points[, 1:2], Taxa_H) points(pcoa.C.H, "sites", pch = Sym_EPT, cex=1, col = Col_EPT) points(pcoa.C.H, "sites", pch = 19, cex=1, col = Col_Flow) plot(fit_Env_PCoA_Com_H, p.max = 0.001, col = "grey20", cex=0.7) plot(fit_Hyd_PCoA_Com_H, p.max = 0.001, col = "brown2", cex=0.7) plot(fit_Env_PCoA_Com_H, p.max = 0.001, col = "darkorange", cex=0.7) plot(fit_FQ_PCoA_Com_H, p.max = 0.001, col = "darkblue", cex=0.7) plot(fit_All_PCoA_Com_H, p.max = 0.001, col = "darkblue", cex=0.7)
/Loop_graphs_MI.R
no_license
cassrp/Hyd_alteration
R
false
false
83,293
r
library(ggplot2) # v. 3.2.0 library(purrr) # v. 0.3.2 library(ggpubr) library(dplyr) library(psych) # Select altered ---------------------------------------------------------- setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/") Family_MI <- read.csv("Macroinvertebrates_family_Spain_att.csv") Family_MI_Summer <- Family_MI %>% filter(MONTH == "6" | MONTH == "7" | MONTH == "8"| MONTH == "9"| MONTH == "10")%>% filter(A_COND20SITU < 800) #Family_MI_Summer_ranges <- describeBy(Family_MI_Summer, Family_MI_Summer$Flow.regim) #Family_MI_Summer_ranges_natural <- Family_MI_Summer_ranges$Natural #Family_MI_Summer_ranges_altered <- Family_MI_Summer_ranges$Altered #write.csv2(Family_MI_Summer_ranges_natural, "Family_MI_Summer_ranges_natural.csv") #write.csv2(Family_MI_Summer_ranges_altered, "Family_MI_Summer_ranges_altered.csv") #MI_ranges_natural <- read.csv2("Family_MI_Summer_natural.csv", row.names = 1) Altered <- Family_MI %>% filter(Flow.regim == "Altered") %>% filter(MONTH == "6" | MONTH == "7" | MONTH == "8"| MONTH == "9"| MONTH == "10" ) Natural <- Family_MI %>% filter(Flow.regim == "Natural") %>% filter(MONTH == "6" | MONTH == "7" | MONTH == "8"| MONTH == "9"| MONTH == "10") %>% filter(A_COND20SITU < 800) Altered_filter_range <- Altered %>% #filter(between(A_TAGUA, min(Natural$A_TAGUA, na.rm = T), max(Natural$A_TAGUA, na.rm = T)))%>% filter(between(A_COND20SITU, min(Natural$A_COND20SITU, na.rm = T), max(Natural$A_COND20SITU, na.rm = T)))%>% filter(between(A_PHSITU, min(Natural$A_PHSITU, na.rm = T), max(Natural$A_PHSITU, na.rm = T)))%>% filter(between(MN_UHD, min(Natural$MN_UHD), max(Natural$MN_UHD)))%>% filter(between(MN_AGR, min(Natural$MN_AGR), max(Natural$MN_AGR)))%>% filter(between(MN_PAS, min(Natural$MN_PAS), max(Natural$MN_PAS)))%>% filter(between(MN_SSH, min(Natural$MN_SSH), max(Natural$MN_PAS)))%>% filter(between(MN_TEMP, min(Natural$MN_TEMP), max(Natural$MN_TEMP)))%>% filter(between(MN_PREC, min(Natural$MN_PREC), max(Natural$MN_PREC)))%>% filter(between(MN_ETP, min(Natural$MN_ETP), max(Natural$MN_ETP)))%>% filter(between(MN_HARD, min(Natural$MN_HARD), max(Natural$MN_HARD)))%>% filter(between(MN_COND, min(Natural$MN_COND), max(Natural$MN_COND)))%>% filter(between(AREA_KM2, min(Natural$AREA_KM2), max(Natural$AREA_KM2)))%>% filter(between(DEPTH, min(Natural$DEPTH), max(Natural$DEPTH)))%>% filter(between(WIDTH, min(Natural$WIDTH), max(Natural$WIDTH)))%>% filter(between(ELEV_M, min(Natural$ELEV_M), max(Natural$ELEV_M)))%>% filter(between(SINUOSITY, min(Natural$SINUOSITY), max(Natural$SINUOSITY)))%>% filter(between(MnSlope, min(Natural$MnSlope), max(Natural$MnSlope))) Nat_alt_summer <- merge(Natural, Altered_filter_range, all = T) write.csv2(Nat_alt_summer, "Nat_alt_summer_614.csv", row.names = T) # Graphs Hyd -------------------------------------------------------------- setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/") Nat_alt_summer <- read.csv2("Nat_alt_summer_614.csv", stringsAsFactors=F) #Nat_alt_summer2 <- Nat_alt_summer[!rownames(Nat_alt_summer) %in% c("CAN_M_097", "GAL_M_010", "GAL_M_011", "GAL_M_012", "GAL_M_013"), ] FQ <- select(Nat_alt_summer, A_O2_SAT:A_PHSITU) Hyd <- select(Nat_alt_summer, MeanJan:X95per) Env <- select(Nat_alt_summer, AREA_KM2:MN_SSH) Lev_20 <- as.factor(Nat_alt_summer$Lev_20) Tipologia <- as.factor(Nat_alt_summer$Tipologia) FQ_names = names(FQ) Hyd_names = names(Hyd) Env_names = names(Env) Nat_alt_summer_3 <- Nat_alt_summer %>% filter(Lev_20 == "3") Nat_alt_summer_8 <- Nat_alt_summer %>% filter(Lev_20 == "8") Nat_alt_summer_10 <- Nat_alt_summer %>% filter(Lev_20 == "10") Nat_alt_summer_13 <- Nat_alt_summer %>% filter(Lev_20 == "13") theme_set(theme_bw()) myplots <- lapply(Hyd_names, function(q){ Hyd_plots <- ggplot(Nat_alt_summer, aes_string(x = "Flow.regim", y = q)) + geom_boxplot(width = 0.45, fill = "white", notch = T) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 0.5) + scale_color_manual(values = c("tomato2", "palegreen3")) + theme(legend.position = "none") + labs(x = NULL) #+ annotate("text", x = 4, y = 25, label = "Test_p_value", parse = TRUE) return(Hyd_plots) }) myplots_env <- lapply(Env_names, function(q){ Env_plots <- ggplot(Nat_alt_summer, aes_string(x = "Flow.regim", y = q)) + geom_boxplot(width = 0.45, fill = "white", notch = T) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 0.5) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + theme(legend.position = "none") + labs(x = NULL) #+ annotate("text", x = 4, y = 25, label = "Test_p_value", parse = TRUE) return(Env_plots) }) myplots_FQ <- lapply(FQ_names, function(q){ FQ_plots <- ggplot(Nat_alt_summer, aes_string(x = "Flow.regim", y = q)) + geom_boxplot(width = 0.45, fill = "white", notch = T) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 0.5) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + theme(legend.position = "none") + labs(x = NULL) #+ annotate("text", x = 4, y = 25, label = "Test_p_value", parse = TRUE) return(FQ_plots) }) p.value.list <- lapply(Hyd_names, function(resp) { mF <- formula(paste(resp, " ~ as.factor(Flow.regim)")) Test <- wilcox.test(mF, data=Nat_alt_summer) return(Test$p.value) }) class(p.value.list) modelList <- cbind(Hyd_names,p.value.list) write.csv2(modelList, "p_values_Hyd2.csv", row.names = F) multi.page <- ggarrange(plotlist = myplots, nrow = 3, ncol = 4) ggexport(multi.page, filename = "Graphs_Hyd_614.pdf") multi.page_env <- ggarrange(plotlist = myplots_env, nrow = 3, ncol = 4) ggexport(multi.page_env, filename = "Graphs_Env.pdf") multi.page_FQ <- ggarrange(plotlist = myplots_FQ, nrow = 3, ncol = 4) ggexport(multi.page_FQ, filename = "Graphs_FQ.pdf") # Classes hidrológicas ---------------------------------------------------- ##Classe 10 Nat_alt_summer_10 <- Nat_alt_summer %>% filter(Lev_20 == "10") myplots_10 <- lapply(Hyd_names, function(q){ Hyd_plots_10 <- ggplot(Nat_alt_summer_10, aes_string(x = "Flow.regim", y = q)) + geom_boxplot(width = 0.45, fill = "white", notch = T) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 0.5) + scale_color_manual(values = c("tomato2", "palegreen3")) + theme(legend.position = "none") + labs(x = NULL) return(Hyd_plots_10) }) multi.page <- ggarrange(plotlist = myplots_10, nrow = 3, ncol = 4) ggexport(multi.page, filename = "Graphs_Hyd_Class10.pdf") p.value.list <- lapply(Hyd_names, function(resp) { mF <- formula(paste(resp, " ~ as.factor(Flow.regim)")) Test <- wilcox.test(mF, data=Nat_alt_summer_10) return(Test$p.value) }) class(p.value.list) modelList <- cbind(Hyd_names,p.value.list) write.csv2(modelList, "p_values_Class10.csv", row.names = F) Nat_alt_summer_13 <- Nat_alt_summer %>% filter(Lev_20 == "13") myplots_13 <- lapply(Hyd_names, function(q){ Hyd_plots_13 <- ggplot(Nat_alt_summer_13, aes_string(x = "Flow.regim", y = q)) + geom_boxplot(width = 0.45, fill = "white", notch = T) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 0.5) + scale_color_manual(values = c("tomato2", "palegreen3")) + theme(legend.position = "none") + labs(x = NULL) return(Hyd_plots_13) }) multi.page <- ggarrange(plotlist = myplots_13, nrow = 3, ncol = 4) ggexport(multi.page, filename = "Graphs_Hyd_Class13.pdf") p.value.list <- lapply(Hyd_names, function(resp) { mF <- formula(paste(resp, " ~ as.factor(Flow.regim)")) Test <- wilcox.test(mF, data=Nat_alt_summer_13) return(Test$p.value) }) class(p.value.list) modelList <- cbind(Hyd_names,p.value.list) write.csv2(modelList, "p_values_Class13.csv", row.names = F) Nat_alt_summer_3 <- Nat_alt_summer %>% filter(Lev_20 == "3") myplots_3 <- lapply(Hyd_names, function(q){ Hyd_plots_3 <- ggplot(Nat_alt_summer_3, aes_string(x = "Flow.regim", y = q)) + geom_boxplot(width = 0.45, fill = "white", notch = T) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 0.5) + scale_color_manual(values = c("tomato2", "palegreen3")) + theme(legend.position = "none") + labs(x = NULL) return(Hyd_plots_3) }) multi.page <- ggarrange(plotlist = myplots_3, nrow = 3, ncol = 4) ggexport(multi.page, filename = "Graphs_Hyd_Class3.pdf") p.value.list <- lapply(Hyd_names, function(resp) { mF <- formula(paste(resp, " ~ as.factor(Flow.regim)")) Test <- wilcox.test(mF, data=Nat_alt_summer_3) return(Test$p.value) }) class(p.value.list) modelList <- cbind(Hyd_names,p.value.list) write.csv2(modelList, "p_values_Class3.csv", row.names = F) Nat_alt_summer_8 <- Nat_alt_summer %>% filter(Lev_20 == "8") myplots_8 <- lapply(Hyd_names, function(q){ Hyd_plots_8 <- ggplot(Nat_alt_summer_8, aes_string(x = "Flow.regim", y = q)) + geom_boxplot(width = 0.45, fill = "white", notch = T) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 0.5) + scale_color_manual(values = c("tomato2", "palegreen3")) + theme(legend.position = "none") + labs(x = NULL) return(Hyd_plots_8) }) multi.page <- ggarrange(plotlist = myplots_8, nrow = 3, ncol = 4) ggexport(multi.page, filename = "Graphs_Hyd_Class8.pdf") p.value.list <- lapply(Hyd_names, function(resp) { mF <- formula(paste(resp, " ~ as.factor(Flow.regim)")) Test <- wilcox.test(mF, data=Nat_alt_summer_8) return(Test$p.value) }) class(p.value.list) modelList <- cbind(Hyd_names,p.value.list) write.csv2(modelList, "p_values_Class8.csv", row.names = F) # Gráficos 1x1 ------------------------------------------------------------ ###Estes funcionan bien: MeanJan <- ggplot(Nat_alt_summer, aes(x = factor(Flow.regim), y = MeanJan)) + geom_boxplot(width = 0.4, fill = "white") + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + labs(x = NULL) + theme(legend.position = "none") # Remove x axis label MeanFeb <- ggplot(Nat_alt_summer, aes(x = factor(Flow.regim), y = MeanFeb)) + geom_boxplot(width = 0.4, fill = "white") + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + labs(x = NULL) + theme(legend.position = "none") # Remove x axis label MeanMar <- ggplot(Nat_alt_summer, aes(x = factor(Flow.regim), y = MeanMar)) + geom_boxplot(width = 0.4, fill = "white") + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + labs(x = NULL) + theme(legend.position = "none") # Remove x axis label MeanApr <- ggplot(Nat_alt_summer, aes(x = factor(Flow.regim), y = MeanApr)) + geom_boxplot(width = 0.4, fill = "white") + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + labs(x = NULL) + theme(legend.position = "none") # Remove x axis label MeanMay <- ggplot(Nat_alt_summer, aes(x = factor(Flow.regim), y = MeanMay)) + geom_boxplot(width = 0.4, fill = "white") + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + labs(x = NULL) + theme(legend.position = "none") # Remove x axis label MeanJun <- ggplot(Nat_alt_summer, aes(x = factor(Flow.regim), y = MeanJun)) + geom_boxplot(width = 0.4, fill = "white", show.legend = FALSE) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + labs(x = NULL)+ theme(legend.position = "none") # Remove x axis label MeanJul <- ggplot(Nat_alt_summer, aes(x = factor(Flow.regim), y = MeanJul)) + geom_boxplot(width = 0.4, fill = "white", show.legend = FALSE) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + labs(x = NULL)+ theme(legend.position = "none") # Remove x axis label MeanAug <- ggplot(Nat_alt_summer, aes(x = factor(Flow.regim), y = MeanAug)) + geom_boxplot(width = 0.4, fill = "white", show.legend = FALSE) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + labs(x = NULL)+ theme(legend.position = "none") # Remove x axis label MeanSep <- ggplot(Nat_alt_summer, aes(x = factor(Flow.regim), y = MeanSep)) + geom_boxplot(width = 0.4, fill = "white", show.legend = FALSE) + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + labs(x = NULL)+ theme(legend.position = "none") library("cowplot") Hyd_1_plot <- plot_grid(MeanJan, MeanFeb, MeanMar, MeanApr, MeanMay, MeanJun, MeanJul, MeanAug, MeanSep, ncol = 3, nrow = 3) save_plot("Hyd1.png", Hyd_1_plot, base_width = 5) #for (i in seq_along(Hyd1)) { print(i) myfile <- file.path(paste("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Coord/Graphs/", Hyd1[[i]],".png",sep="")) print(myfile) print( ggplot(Nat_alt_summer, aes_string(x = "Flow.regim", y = Hyd[i])) + geom_boxplot(width = 0.3, fill = "white") + geom_jitter(aes(color = Flow.regim, shape = Flow.regim), width = 0.1, size = 1.5) + scale_color_manual(values = c("#00AFBB", "#E7B800")) + theme(legend.position = "none") + ggsave(myfile) ) #plot(value)} Test_MeanJan <- wilcox.test(MeanJan ~ as.factor(Flow.regim), data=Nat_alt_summer) wilcox.test(JulianMin ~ Flow.regim, data=Nat_alt_summer2) wilcox.test(MeanMar ~ as.factor(Flow.regim), data=Nat_alt_summer) wilcox.test( ~ as.factor(Flow.regim), data=Nat_alt_summer) # Biotic indices ---------------------------------------------------------- library(biomonitoR) library(biotic) library(BBI) require(data.table) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Coord") setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/") Nat_alt_summer <- read.csv2("Nat_alt_summer_614.csv") #Nat_alt_summer <- Nat_alt_summer[!rownames(Nat_alt_summer) %in% c("GAL_M_010", "GAL_M_011", "GAL_M_012", "GAL_M_013"), ] Taxa <- select(Nat_alt_summer, ID, Hydrobiidae:Plumatellidae) setnames(Taxa, "Ancylidae", "Ancylus") Taxa_transp <- dcast(melt(Taxa, id.vars = "ID"), variable ~ ID) ##Ítranspose to use the packages setnames(Taxa_transp, "variable", "Taxa") #Indices <- select(Nat_alt_summer, EPT:IASPT) #Bio_indices <- names(Indices) #Indices_p_value <- c('MeanJan', 'MeanFeb', 'MeanMar', 'MeanApr', 'MeanJun', 'MeanJul', 'MeanAug', 'MeanSep', 'MeanNov', 'MeanDec', 'Mean1DayFlowMaxs', 'Mean3DayFlowMaxs', 'Mean7DayFlowMaxs', 'Mean90DayFlowMins', 'ZeroFlowDays', 'JulianMin', 'JulianMax', 'nPulsesLow', 'MeanPulseLengthLow', 'nPos', 'nNeg', 'meanNeg', 'FRE3', 'FRE7', 'Reversals', 'StDevMeanMay', 'StDevMeanJun', 'StDevMeanJul', 'StDevMeanAug', 'StDevMeanSep', 'StDevMeanOct', 'StDevMeanNov', 'StDevMean1DayFlowMins', 'StDevMean3DayFlowMins', 'StDevMean3DayFlowMaxs', 'StDevMean7DayFlowMins', 'StDevMean7DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevMean30DayFlowMaxs', 'StDevMean90DayFlowMins', 'StDevMean90DayFlowMaxs', 'StDevZeroFlowDays', 'StDevBFI', 'StDevJulianMin', 'StDevJulianMax', 'StDevnPulsesLow', 'StDevnPulsesHigh', 'StDevMeanPulseLengthHigh', 'StDevnPos', 'StDevnNeg', 'StDevFRE1', 'StDevFRE3', 'StDevFRE7', 'StDevReversals', 'Predictability', 'X5per') # Calculate indices ------------------------------------------------------- library(biotic) Index <- calcindex(Taxa_transp) Index$Life_biotic <- calcindex(Taxa_transp, "LIFE", "num") library("biomonitoR") data(macro_ex) # Prepare data for the analysis. data.bio <- asBiomonitor(Taxa_transp) data.agR <- aggregatoR(data.bio) richness(data.agR, "Genus") richness(data.agR, "Family") Index$Richness <- allrich(data.agR) Index$Shannon <- biomonitoR::shannon(data.agR) Index$Simpson <- simpson(data.agR) Index$Margalef <- margalef(data.agR) Index$Menhinick <- menhinick(data.agR) Index$Life2 <- life(data.agR) # calculate iberian bmwp and aspt Index$IBMWP <- bmwp(data.agR, method="spa") Index$IASPT <- aspt(data.agR, method="spa") Index$BMWP <- bmwp(data.agR) Index$Div_Shannon <- diversity(Taxa, index = "shannon") write.csv2(Index, "indices_614.csv", dec = "," ) # plot indices bio -------------------------------------------------------- #FQ plots_indices_bio <- lapply(FQ_names, function(q){ Bio_plots <- ggplot(Nat_alt_summer, aes_string(x = q, y = "EPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_EPT, filename = "Graphs_EPT_FQ.pdf") plots_indices_bio <- lapply(Indices_p_value, function(q){ Bio_plots <- ggplot(Nat_alt_summer, aes_string(x = q, y = "EPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_EPT, filename = "Graphs_EPT.pdf") #---- plots_indices_bio <- lapply(Indices_p_value, function(q){ Bio_plots <- ggplot(Nat_alt_summer, aes_string(x = q, y = "OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("brown2", "seagreen")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_OCH <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_OCH, filename = "Graphs_OCH.pdf") #---- plots_indices_bio <- lapply(Indices_p_value, function(q){ Bio_plots <- ggplot(Nat_alt_summer, aes_string(x = q, y = "Diptera", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("brown2", "seagreen")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Diptera <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_OCH, filename = "Graphs_Diptera.pdf") #---- plots_indices_bio <- lapply(Indices_p_value, function(q){ Bio_plots <- ggplot(Nat_alt_summer, aes_string(x = q, y = "EPT_OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("brown2", "seagreen")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT_OCH <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_EPT_OCH, filename = "Graphs_EPT_OCH.pdf") #---- plots_indices_bio <- lapply(Indices_p_value, function(q){ Bio_plots <- ggplot(Nat_alt_summer, aes_string(x = q, y = "Non.Insect", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("brown2", "seagreen")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Non_insect <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_Non_insect, filename = "Graphs_Non_insect.pdf") #---- plots_indices_bio <- lapply(Indices_p_value, function(q){ Bio_plots <- ggscatter(Nat_alt_summer, x = q, y = "Insect", color = "Flow.regim", palette = "nrc", add = "reg.line", conf.int = T) + stat_cor(aes(color = Flow.regim), label.x = 2.5, label.y.npc = "bottom") + theme_bw()+ theme(legend.position = "none") return(Bio_plots) }) multi.page_Insect <- ggarrange(plotlist = plots_indices_bio, nrow = 1, ncol =2) ggexport(multi.page_Insect, filename = "Graphs_Insect.pdf") #---- plots_indices_bio <- lapply(Bio_indices, function(q){ Bio_plots <- ggplot(Nat_alt_summer, aes_string(x = "MeanJul", y = q, color="Flow.regim")) + geom_point(shape=20, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("brown2", "seagreen")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_MeanJul <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_MeanJul, filename = "Graphs_MeanJul.pdf") plots_indices_bio <- lapply(Bio_indices, function(q){ Bio_plots <- ggplot(Nat_alt_summer, aes_string(x = "nPos", y = q, color="Flow.regim")) + geom_point(shape=20, size=0.8)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("brown1", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_nPos <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_nPos, filename = "Graphs_nPos.pdf") plots_indices_bio <- lapply(Bio_indices, function(q){ Bio_plots <- ggplot(Nat_alt_summer, aes_string(x = "Mean90DayFlowMins", y = q, color="Flow.regim")) + geom_point(shape=20, size=0.8)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("brown1", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Mean90Min <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_Mean90Min, filename = "Graphs_Mean90Min.pdf") # Plot Bio_Class ---------------------------------------------------------- ##### EPT ##### ##Class 3 Indices_p_value_3 <- c('StDevZeroFlowDays', 'ZeroFlowDays', 'nPos', 'nNeg', 'MeanApr', 'JulianMax', 'StDevMean7DayFlowMaxs', 'StDevnPos', 'MeanJul', 'StDevMean30DayFlowMaxs', 'Mean90DayFlowMaxs', 'StDevnNeg', 'StDevJulianMax', 'MeanAug', 'StDevMeanSep', 'StDevMean3DayFlowMaxs', 'MeanSep', 'StDevMeanAug', 'l2', 'MeanMar', 'StDevMeanJul', 'Mean30DayFlowMaxs', 'MeanMay', 'X75per', 'MeanDec', 'lca', 'StDevnPulsesHigh', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'Mean7DayFlowMins', 'StDevMean90DayFlowMaxs', 'Mean3DayFlowMins', 'Mean7DayFlowMaxs', 'X95per', 'nPulsesHigh', 'Mean30DayFlowMins', 'StDevReversals') plots_indices_bio <- lapply(Indices_p_value_3, function(q){ Bio_plots <- ggplot(Nat_alt_summer_3, aes_string(x = q, y = "EPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_EPT, filename = "Graphs_EPT_Class3.pdf") ##Class 8 Indices_p_value_8 <- c('StDevnPulsesHigh', 'nPos', 'StDevMeanFeb', 'StDevReversals', 'StDevnPulsesLow', 'ZeroFlowDays', 'StDevZeroFlowDays', 'StDevMean7DayFlowMaxs', 'StDevMean3DayFlowMaxs', 'Predictability', 'MeanMay', 'StDevMeanSep', 'lkur', 'nPulsesLow', 'meanNeg', 'StDevMeanAug', 'MeanApr', 'StDevMean30DayFlowMaxs', 'Reversals', 'FRE3', 'StDevBFI', 'StDevnNeg', 'StDevMeanJul', 'StDevnPos', 'StDevJulianMin', 'MeanMar', 'nPulsesHigh', 'nNeg', 'StDevMeanMar', 'StDevMeanJun', 'StDevMean1DayFlowMaxs', 'X95per', 'X25per', 'StDevMean90DayFlowMaxs', 'FRE7', 'Mean1DayFlowMins', 'StDevJulianMax', 'StDevMean90DayFlowMins', 'StDevFRE1', 'JulianMin', 'Mean90DayFlowMaxs', 'MeanSep', 'Mean7DayFlowMaxs', 'Mean30DayFlowMaxs') plots_indices_bio <- lapply(Indices_p_value_8, function(q){ Bio_plots <- ggplot(Nat_alt_summer_8, aes_string(x = q, y = "EPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_EPT, filename = "Graphs_EPT_Class8.pdf") ##Class 10 Indices_p_value_10 <- c('JulianMin', 'MeanSep', 'MeanAug', 'nPos', 'StDevnPulsesLow', 'Mean90DayFlowMins', 'StDevJulianMin', 'Mean90DayFlowMaxs', 'l2', 'FRE7', 'nNeg', 'Mean3DayFlowMaxs', 'MeanJun', 'nPulsesLow', 'Mean1DayFlowMaxs', 'MeanJul', 'Mean30DayFlowMins', 'Mean30DayFlowMaxs', 'BFI', 'MeanJan', 'X75per', 'Reversals', 'Mean7DayFlowMaxs', 'Mean7DayFlowMins', 'StDevBFI', 'Mean3DayFlowMins', 'StDevMeanNov', 'StDevMeanAug', 'X95per', 'StDevMean30DayFlowMins', 'X5per', 'StDevMean90DayFlowMins', 'StDevnPos', 'Mean1DayFlowMins', 'StDevnNeg', 'lca', 'StDevMeanSep', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean3DayFlowMins', 'MeanDec', 'MeanFeb', 'StDevFRE1', 'StDevMean1DayFlowMins', 'StDevMean1DayFlowMaxs', 'MeanMar', 'MeanPulseLengthLow', 'StDevReversals', 'StDevFRE7', 'StDevMeanDec', 'StDevnPulsesHigh', 'FRE3', 'StDevMean3DayFlowMaxs', 'meanPos', 'MeanMay') plots_indices_bio <- lapply(Indices_p_value_10, function(q){ Bio_plots <- ggplot(Nat_alt_summer_10, aes_string(x = q, y = "EPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_EPT, filename = "Graphs_EPT_Class10.pdf") ##Class 13 Indices_p_value_13 <- c('Mean90DayFlowMins', 'nNeg', 'nPos', 'Mean1DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevJulianMin', 'Mean7DayFlowMaxs', 'MeanJul', 'Mean3DayFlowMaxs', 'MeanSep', 'StDevnPulsesLow', 'Reversals', 'nPulsesLow', 'MeanAug', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean90DayFlowMins', 'X5per', 'StDevBFI', 'l2', 'MeanDec', 'Mean30DayFlowMins', 'StDevMeanNov', 'X75per', 'MeanJun', 'Mean30DayFlowMaxs', 'FRE7', 'MeanJan', 'StDevFRE7', 'StDevMean3DayFlowMins', 'Mean90DayFlowMaxs', 'StDevMeanDec', 'X95per', 'StDevMeanApr', 'FRE3', 'StDevMean1DayFlowMins', 'Mean7DayFlowMins', 'BFI', 'MeanFeb', 'lca', 'StDevnNeg', 'Mean3DayFlowMins', 'MeanMar', 'MeanPulseLengthLow', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'StDevFRE1', 'StDevMeanJul', 'MeanMay', 'StDevMeanJan', 'StDevJulianMax', 'lkur', 'JulianMin', 'StDevMean3DayFlowMaxs', 'StDevMeanJun', 'StDevMeanSep', 'StDevMean7DayFlowMaxs', 'StDevMeanFeb', 'StDevMeanMar', 'MeanApr') plots_indices_bio <- lapply(Indices_p_value_13, function(q){ Bio_plots <- ggplot(Nat_alt_summer_13, aes_string(x = q, y = "EPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_EPT, filename = "Graphs_EPT_Class13.pdf") ##### OCH ##### ##Class 3 Indices_p_value_3 <- c('StDevZeroFlowDays', 'ZeroFlowDays', 'nPos', 'nNeg', 'MeanApr', 'JulianMax', 'StDevMean7DayFlowMaxs', 'StDevnPos', 'MeanJul', 'StDevMean30DayFlowMaxs', 'Mean90DayFlowMaxs', 'StDevnNeg', 'StDevJulianMax', 'MeanAug', 'StDevMeanSep', 'StDevMean3DayFlowMaxs', 'MeanSep', 'StDevMeanAug', 'l2', 'MeanMar', 'StDevMeanJul', 'Mean30DayFlowMaxs', 'MeanMay', 'X75per', 'MeanDec', 'lca', 'StDevnPulsesHigh', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'Mean7DayFlowMins', 'StDevMean90DayFlowMaxs', 'Mean3DayFlowMins', 'Mean7DayFlowMaxs', 'X95per', 'nPulsesHigh', 'Mean30DayFlowMins', 'StDevReversals') plots_indices_bio <- lapply(Indices_p_value_3, function(q){ Bio_plots <- ggplot(Nat_alt_summer_3, aes_string(x = q, y = "OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_OCH <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_OCH, filename = "Graphs_OCH_Class3.pdf") ##Class 8 Indices_p_value_8 <- c('StDevnPulsesHigh', 'nPos', 'StDevMeanFeb', 'StDevReversals', 'StDevnPulsesLow', 'ZeroFlowDays', 'StDevZeroFlowDays', 'StDevMean7DayFlowMaxs', 'StDevMean3DayFlowMaxs', 'Predictability', 'MeanMay', 'StDevMeanSep', 'lkur', 'nPulsesLow', 'meanNeg', 'StDevMeanAug', 'MeanApr', 'StDevMean30DayFlowMaxs', 'Reversals', 'FRE3', 'StDevBFI', 'StDevnNeg', 'StDevMeanJul', 'StDevnPos', 'StDevJulianMin', 'MeanMar', 'nPulsesHigh', 'nNeg', 'StDevMeanMar', 'StDevMeanJun', 'StDevMean1DayFlowMaxs', 'X95per', 'X25per', 'StDevMean90DayFlowMaxs', 'FRE7', 'Mean1DayFlowMins', 'StDevJulianMax', 'StDevMean90DayFlowMins', 'StDevFRE1', 'JulianMin', 'Mean90DayFlowMaxs', 'MeanSep', 'Mean7DayFlowMaxs', 'Mean30DayFlowMaxs') plots_indices_bio <- lapply(Indices_p_value_8, function(q){ Bio_plots <- ggplot(Nat_alt_summer_8, aes_string(x = q, y = "OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_OCH <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_OCH, filename = "Graphs_OCH_Class8.pdf") ##Class 10 Indices_p_value_10 <- c('JulianMin', 'MeanSep', 'MeanAug', 'nPos', 'StDevnPulsesLow', 'Mean90DayFlowMins', 'StDevJulianMin', 'Mean90DayFlowMaxs', 'l2', 'FRE7', 'nNeg', 'Mean3DayFlowMaxs', 'MeanJun', 'nPulsesLow', 'Mean1DayFlowMaxs', 'MeanJul', 'Mean30DayFlowMins', 'Mean30DayFlowMaxs', 'BFI', 'MeanJan', 'X75per', 'Reversals', 'Mean7DayFlowMaxs', 'Mean7DayFlowMins', 'StDevBFI', 'Mean3DayFlowMins', 'StDevMeanNov', 'StDevMeanAug', 'X95per', 'StDevMean30DayFlowMins', 'X5per', 'StDevMean90DayFlowMins', 'StDevnPos', 'Mean1DayFlowMins', 'StDevnNeg', 'lca', 'StDevMeanSep', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean3DayFlowMins', 'MeanDec', 'MeanFeb', 'StDevFRE1', 'StDevMean1DayFlowMins', 'StDevMean1DayFlowMaxs', 'MeanMar', 'MeanPulseLengthLow', 'StDevReversals', 'StDevFRE7', 'StDevMeanDec', 'StDevnPulsesHigh', 'FRE3', 'StDevMean3DayFlowMaxs', 'meanPos', 'MeanMay') plots_indices_bio <- lapply(Indices_p_value_10, function(q){ Bio_plots <- ggplot(Nat_alt_summer_10, aes_string(x = q, y = "OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_OCH <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_OCH, filename = "Graphs_OCH_Class10.pdf") ##Class 13 Indices_p_value_13 <- c('Mean90DayFlowMins', 'nNeg', 'nPos', 'Mean1DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevJulianMin', 'Mean7DayFlowMaxs', 'MeanJul', 'Mean3DayFlowMaxs', 'MeanSep', 'StDevnPulsesLow', 'Reversals', 'nPulsesLow', 'MeanAug', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean90DayFlowMins', 'X5per', 'StDevBFI', 'l2', 'MeanDec', 'Mean30DayFlowMins', 'StDevMeanNov', 'X75per', 'MeanJun', 'Mean30DayFlowMaxs', 'FRE7', 'MeanJan', 'StDevFRE7', 'StDevMean3DayFlowMins', 'Mean90DayFlowMaxs', 'StDevMeanDec', 'X95per', 'StDevMeanApr', 'FRE3', 'StDevMean1DayFlowMins', 'Mean7DayFlowMins', 'BFI', 'MeanFeb', 'lca', 'StDevnNeg', 'Mean3DayFlowMins', 'MeanMar', 'MeanPulseLengthLow', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'StDevFRE1', 'StDevMeanJul', 'MeanMay', 'StDevMeanJan', 'StDevJulianMax', 'lkur', 'JulianMin', 'StDevMean3DayFlowMaxs', 'StDevMeanJun', 'StDevMeanSep', 'StDevMean7DayFlowMaxs', 'StDevMeanFeb', 'StDevMeanMar', 'MeanApr') plots_indices_bio <- lapply(Indices_p_value_13, function(q){ Bio_plots <- ggplot(Nat_alt_summer_13, aes_string(x = q, y = "OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_OCH <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) ggexport(multi.page_OCH, filename = "Graphs_OCH_Class13.pdf") ##### IASPT ##### ##Class 3 Indices_p_value_3 <- c('StDevZeroFlowDays', 'ZeroFlowDays', 'nPos', 'nNeg', 'MeanApr', 'JulianMax', 'StDevMean7DayFlowMaxs', 'StDevnPos', 'MeanJul', 'StDevMean30DayFlowMaxs', 'Mean90DayFlowMaxs', 'StDevnNeg', 'StDevJulianMax', 'MeanAug', 'StDevMeanSep', 'StDevMean3DayFlowMaxs', 'MeanSep', 'StDevMeanAug', 'l2', 'MeanMar', 'StDevMeanJul', 'Mean30DayFlowMaxs', 'MeanMay', 'X75per', 'MeanDec', 'lca', 'StDevnPulsesHigh', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'Mean7DayFlowMins', 'StDevMean90DayFlowMaxs', 'Mean3DayFlowMins', 'Mean7DayFlowMaxs', 'X95per', 'nPulsesHigh', 'Mean30DayFlowMins', 'StDevReversals') plots_indices_bio <- lapply(Indices_p_value_3, function(q){ Bio_plots <- ggplot(Nat_alt_summer_3, aes_string(x = q, y = "IASPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_IASPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_IASPT, filename = "Graphs_IASPT_Class3.pdf") ##Class 8 Indices_p_value_8 <- c('StDevnPulsesHigh', 'nPos', 'StDevMeanFeb', 'StDevReversals', 'StDevnPulsesLow', 'ZeroFlowDays', 'StDevZeroFlowDays', 'StDevMean7DayFlowMaxs', 'StDevMean3DayFlowMaxs', 'Predictability', 'MeanMay', 'StDevMeanSep', 'lkur', 'nPulsesLow', 'meanNeg', 'StDevMeanAug', 'MeanApr', 'StDevMean30DayFlowMaxs', 'Reversals', 'FRE3', 'StDevBFI', 'StDevnNeg', 'StDevMeanJul', 'StDevnPos', 'StDevJulianMin', 'MeanMar', 'nPulsesHigh', 'nNeg', 'StDevMeanMar', 'StDevMeanJun', 'StDevMean1DayFlowMaxs', 'X95per', 'X25per', 'StDevMean90DayFlowMaxs', 'FRE7', 'Mean1DayFlowMins', 'StDevJulianMax', 'StDevMean90DayFlowMins', 'StDevFRE1', 'JulianMin', 'Mean90DayFlowMaxs', 'MeanSep', 'Mean7DayFlowMaxs', 'Mean30DayFlowMaxs') plots_indices_bio <- lapply(Indices_p_value_8, function(q){ Bio_plots <- ggplot(Nat_alt_summer_8, aes_string(x = q, y = "IASPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_IASPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_IASPT, filename = "Graphs_IASPT_Class8.pdf") ##Class 10 Indices_p_value_10 <- c('JulianMin', 'MeanSep', 'MeanAug', 'nPos', 'StDevnPulsesLow', 'Mean90DayFlowMins', 'StDevJulianMin', 'Mean90DayFlowMaxs', 'l2', 'FRE7', 'nNeg', 'Mean3DayFlowMaxs', 'MeanJun', 'nPulsesLow', 'Mean1DayFlowMaxs', 'MeanJul', 'Mean30DayFlowMins', 'Mean30DayFlowMaxs', 'BFI', 'MeanJan', 'X75per', 'Reversals', 'Mean7DayFlowMaxs', 'Mean7DayFlowMins', 'StDevBFI', 'Mean3DayFlowMins', 'StDevMeanNov', 'StDevMeanAug', 'X95per', 'StDevMean30DayFlowMins', 'X5per', 'StDevMean90DayFlowMins', 'StDevnPos', 'Mean1DayFlowMins', 'StDevnNeg', 'lca', 'StDevMeanSep', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean3DayFlowMins', 'MeanDec', 'MeanFeb', 'StDevFRE1', 'StDevMean1DayFlowMins', 'StDevMean1DayFlowMaxs', 'MeanMar', 'MeanPulseLengthLow', 'StDevReversals', 'StDevFRE7', 'StDevMeanDec', 'StDevnPulsesHigh', 'FRE3', 'StDevMean3DayFlowMaxs', 'meanPos', 'MeanMay') plots_indices_bio <- lapply(Indices_p_value_10, function(q){ Bio_plots <- ggplot(Nat_alt_summer_10, aes_string(x = q, y = "IASPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_IASPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_IASPT, filename = "Graphs_IASPT_Class10.pdf") ##Class 13 Indices_p_value_13 <- c('Mean90DayFlowMins', 'nNeg', 'nPos', 'Mean1DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevJulianMin', 'Mean7DayFlowMaxs', 'MeanJul', 'Mean3DayFlowMaxs', 'MeanSep', 'StDevnPulsesLow', 'Reversals', 'nPulsesLow', 'MeanAug', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean90DayFlowMins', 'X5per', 'StDevBFI', 'l2', 'MeanDec', 'Mean30DayFlowMins', 'StDevMeanNov', 'X75per', 'MeanJun', 'Mean30DayFlowMaxs', 'FRE7', 'MeanJan', 'StDevFRE7', 'StDevMean3DayFlowMins', 'Mean90DayFlowMaxs', 'StDevMeanDec', 'X95per', 'StDevMeanApr', 'FRE3', 'StDevMean1DayFlowMins', 'Mean7DayFlowMins', 'BFI', 'MeanFeb', 'lca', 'StDevnNeg', 'Mean3DayFlowMins', 'MeanMar', 'MeanPulseLengthLow', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'StDevFRE1', 'StDevMeanJul', 'MeanMay', 'StDevMeanJan', 'StDevJulianMax', 'lkur', 'JulianMin', 'StDevMean3DayFlowMaxs', 'StDevMeanJun', 'StDevMeanSep', 'StDevMean7DayFlowMaxs', 'StDevMeanFeb', 'StDevMeanMar', 'MeanApr') plots_indices_bio <- lapply(Indices_p_value_13, function(q){ Bio_plots <- ggplot(Nat_alt_summer_13, aes_string(x = q, y = "IASPT", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_IASPT <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_IASPT, filename = "Graphs_IASPT_Class13.pdf") ##### IBMWP ##### ##Class 3 Indices_p_value_3 <- c('StDevZeroFlowDays', 'ZeroFlowDays', 'nPos', 'nNeg', 'MeanApr', 'JulianMax', 'StDevMean7DayFlowMaxs', 'StDevnPos', 'MeanJul', 'StDevMean30DayFlowMaxs', 'Mean90DayFlowMaxs', 'StDevnNeg', 'StDevJulianMax', 'MeanAug', 'StDevMeanSep', 'StDevMean3DayFlowMaxs', 'MeanSep', 'StDevMeanAug', 'l2', 'MeanMar', 'StDevMeanJul', 'Mean30DayFlowMaxs', 'MeanMay', 'X75per', 'MeanDec', 'lca', 'StDevnPulsesHigh', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'Mean7DayFlowMins', 'StDevMean90DayFlowMaxs', 'Mean3DayFlowMins', 'Mean7DayFlowMaxs', 'X95per', 'nPulsesHigh', 'Mean30DayFlowMins', 'StDevReversals') plots_indices_bio <- lapply(Indices_p_value_3, function(q){ Bio_plots <- ggplot(Nat_alt_summer_3, aes_string(x = q, y = "IBMWP", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_IBMWP <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_IBMWP, filename = "Graphs_IBMWP_Class3.pdf") ##Class 8 Indices_p_value_8 <- c('StDevnPulsesHigh', 'nPos', 'StDevMeanFeb', 'StDevReversals', 'StDevnPulsesLow', 'ZeroFlowDays', 'StDevZeroFlowDays', 'StDevMean7DayFlowMaxs', 'StDevMean3DayFlowMaxs', 'Predictability', 'MeanMay', 'StDevMeanSep', 'lkur', 'nPulsesLow', 'meanNeg', 'StDevMeanAug', 'MeanApr', 'StDevMean30DayFlowMaxs', 'Reversals', 'FRE3', 'StDevBFI', 'StDevnNeg', 'StDevMeanJul', 'StDevnPos', 'StDevJulianMin', 'MeanMar', 'nPulsesHigh', 'nNeg', 'StDevMeanMar', 'StDevMeanJun', 'StDevMean1DayFlowMaxs', 'X95per', 'X25per', 'StDevMean90DayFlowMaxs', 'FRE7', 'Mean1DayFlowMins', 'StDevJulianMax', 'StDevMean90DayFlowMins', 'StDevFRE1', 'JulianMin', 'Mean90DayFlowMaxs', 'MeanSep', 'Mean7DayFlowMaxs', 'Mean30DayFlowMaxs') plots_indices_bio <- lapply(Indices_p_value_8, function(q){ Bio_plots <- ggplot(Nat_alt_summer_8, aes_string(x = q, y = "IBMWP", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_IBMWP <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_IBMWP, filename = "Graphs_IBMWP_Class8.pdf") ##Class 10 Indices_p_value_10 <- c('JulianMin', 'MeanSep', 'MeanAug', 'nPos', 'StDevnPulsesLow', 'Mean90DayFlowMins', 'StDevJulianMin', 'Mean90DayFlowMaxs', 'l2', 'FRE7', 'nNeg', 'Mean3DayFlowMaxs', 'MeanJun', 'nPulsesLow', 'Mean1DayFlowMaxs', 'MeanJul', 'Mean30DayFlowMins', 'Mean30DayFlowMaxs', 'BFI', 'MeanJan', 'X75per', 'Reversals', 'Mean7DayFlowMaxs', 'Mean7DayFlowMins', 'StDevBFI', 'Mean3DayFlowMins', 'StDevMeanNov', 'StDevMeanAug', 'X95per', 'StDevMean30DayFlowMins', 'X5per', 'StDevMean90DayFlowMins', 'StDevnPos', 'Mean1DayFlowMins', 'StDevnNeg', 'lca', 'StDevMeanSep', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean3DayFlowMins', 'MeanDec', 'MeanFeb', 'StDevFRE1', 'StDevMean1DayFlowMins', 'StDevMean1DayFlowMaxs', 'MeanMar', 'MeanPulseLengthLow', 'StDevReversals', 'StDevFRE7', 'StDevMeanDec', 'StDevnPulsesHigh', 'FRE3', 'StDevMean3DayFlowMaxs', 'meanPos', 'MeanMay') plots_indices_bio <- lapply(Indices_p_value_10, function(q){ Bio_plots <- ggplot(Nat_alt_summer_10, aes_string(x = q, y = "IBMWP", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_IBMWP <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_IBMWP, filename = "Graphs_IBMWP_Class10.pdf") ##Class 13 Indices_p_value_13 <- c('Mean90DayFlowMins', 'nNeg', 'nPos', 'Mean1DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevJulianMin', 'Mean7DayFlowMaxs', 'MeanJul', 'Mean3DayFlowMaxs', 'MeanSep', 'StDevnPulsesLow', 'Reversals', 'nPulsesLow', 'MeanAug', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean90DayFlowMins', 'X5per', 'StDevBFI', 'l2', 'MeanDec', 'Mean30DayFlowMins', 'StDevMeanNov', 'X75per', 'MeanJun', 'Mean30DayFlowMaxs', 'FRE7', 'MeanJan', 'StDevFRE7', 'StDevMean3DayFlowMins', 'Mean90DayFlowMaxs', 'StDevMeanDec', 'X95per', 'StDevMeanApr', 'FRE3', 'StDevMean1DayFlowMins', 'Mean7DayFlowMins', 'BFI', 'MeanFeb', 'lca', 'StDevnNeg', 'Mean3DayFlowMins', 'MeanMar', 'MeanPulseLengthLow', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'StDevFRE1', 'StDevMeanJul', 'MeanMay', 'StDevMeanJan', 'StDevJulianMax', 'lkur', 'JulianMin', 'StDevMean3DayFlowMaxs', 'StDevMeanJun', 'StDevMeanSep', 'StDevMean7DayFlowMaxs', 'StDevMeanFeb', 'StDevMeanMar', 'MeanApr') plots_indices_bio <- lapply(Indices_p_value_13, function(q){ Bio_plots <- ggplot(Nat_alt_summer_13, aes_string(x = q, y = "IBMWP", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_IBMWP <- ggarrange(plotlist = plots_indices_bio,nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_IBMWP, filename = "Graphs_IBMWP_Class13.pdf") class(Nat_alt_summer_10$IBMWP) ##### LIFE ##### ##Class 3 Indices_p_value_3 <- c('StDevZeroFlowDays', 'ZeroFlowDays', 'nPos', 'nNeg', 'MeanApr', 'JulianMax', 'StDevMean7DayFlowMaxs', 'StDevnPos', 'MeanJul', 'StDevMean30DayFlowMaxs', 'Mean90DayFlowMaxs', 'StDevnNeg', 'StDevJulianMax', 'MeanAug', 'StDevMeanSep', 'StDevMean3DayFlowMaxs', 'MeanSep', 'StDevMeanAug', 'l2', 'MeanMar', 'StDevMeanJul', 'Mean30DayFlowMaxs', 'MeanMay', 'X75per', 'MeanDec', 'lca', 'StDevnPulsesHigh', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'Mean7DayFlowMins', 'StDevMean90DayFlowMaxs', 'Mean3DayFlowMins', 'Mean7DayFlowMaxs', 'X95per', 'nPulsesHigh', 'Mean30DayFlowMins', 'StDevReversals') plots_indices_bio <- lapply(Indices_p_value_3, function(q){ Bio_plots <- ggplot(Nat_alt_summer_3, aes_string(x = q, y = "LIFE", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_LIFE <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_LIFE, filename = "Graphs_LIFE_Class3.pdf") ##Class 8 Indices_p_value_8 <- c('StDevnPulsesHigh', 'nPos', 'StDevMeanFeb', 'StDevReversals', 'StDevnPulsesLow', 'ZeroFlowDays', 'StDevZeroFlowDays', 'StDevMean7DayFlowMaxs', 'StDevMean3DayFlowMaxs', 'Predictability', 'MeanMay', 'StDevMeanSep', 'lkur', 'nPulsesLow', 'meanNeg', 'StDevMeanAug', 'MeanApr', 'StDevMean30DayFlowMaxs', 'Reversals', 'FRE3', 'StDevBFI', 'StDevnNeg', 'StDevMeanJul', 'StDevnPos', 'StDevJulianMin', 'MeanMar', 'nPulsesHigh', 'nNeg', 'StDevMeanMar', 'StDevMeanJun', 'StDevMean1DayFlowMaxs', 'X95per', 'X25per', 'StDevMean90DayFlowMaxs', 'FRE7', 'Mean1DayFlowMins', 'StDevJulianMax', 'StDevMean90DayFlowMins', 'StDevFRE1', 'JulianMin', 'Mean90DayFlowMaxs', 'MeanSep', 'Mean7DayFlowMaxs', 'Mean30DayFlowMaxs') plots_indices_bio <- lapply(Indices_p_value_8, function(q){ Bio_plots <- ggplot(Nat_alt_summer_8, aes_string(x = q, y = "LIFE", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_LIFE <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_LIFE, filename = "Graphs_LIFE_Class8.pdf") ##Class 10 Indices_p_value_10 <- c('JulianMin', 'MeanSep', 'MeanAug', 'nPos', 'StDevnPulsesLow', 'Mean90DayFlowMins', 'StDevJulianMin', 'Mean90DayFlowMaxs', 'l2', 'FRE7', 'nNeg', 'Mean3DayFlowMaxs', 'MeanJun', 'nPulsesLow', 'Mean1DayFlowMaxs', 'MeanJul', 'Mean30DayFlowMins', 'Mean30DayFlowMaxs', 'BFI', 'MeanJan', 'X75per', 'Reversals', 'Mean7DayFlowMaxs', 'Mean7DayFlowMins', 'StDevBFI', 'Mean3DayFlowMins', 'StDevMeanNov', 'StDevMeanAug', 'X95per', 'StDevMean30DayFlowMins', 'X5per', 'StDevMean90DayFlowMins', 'StDevnPos', 'Mean1DayFlowMins', 'StDevnNeg', 'lca', 'StDevMeanSep', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean3DayFlowMins', 'MeanDec', 'MeanFeb', 'StDevFRE1', 'StDevMean1DayFlowMins', 'StDevMean1DayFlowMaxs', 'MeanMar', 'MeanPulseLengthLow', 'StDevReversals', 'StDevFRE7', 'StDevMeanDec', 'StDevnPulsesHigh', 'FRE3', 'StDevMean3DayFlowMaxs', 'meanPos', 'MeanMay') plots_indices_bio <- lapply(Indices_p_value_10, function(q){ Bio_plots <- ggplot(Nat_alt_summer_10, aes_string(x = q, y = "LIFE", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_LIFE <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_LIFE, filename = "Graphs_LIFE_Class10.pdf") ##Class 13 Indices_p_value_13 <- c('Mean90DayFlowMins', 'nNeg', 'nPos', 'Mean1DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevJulianMin', 'Mean7DayFlowMaxs', 'MeanJul', 'Mean3DayFlowMaxs', 'MeanSep', 'StDevnPulsesLow', 'Reversals', 'nPulsesLow', 'MeanAug', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean90DayFlowMins', 'X5per', 'StDevBFI', 'l2', 'MeanDec', 'Mean30DayFlowMins', 'StDevMeanNov', 'X75per', 'MeanJun', 'Mean30DayFlowMaxs', 'FRE7', 'MeanJan', 'StDevFRE7', 'StDevMean3DayFlowMins', 'Mean90DayFlowMaxs', 'StDevMeanDec', 'X95per', 'StDevMeanApr', 'FRE3', 'StDevMean1DayFlowMins', 'Mean7DayFlowMins', 'BFI', 'MeanFeb', 'lca', 'StDevnNeg', 'Mean3DayFlowMins', 'MeanMar', 'MeanPulseLengthLow', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'StDevFRE1', 'StDevMeanJul', 'MeanMay', 'StDevMeanJan', 'StDevJulianMax', 'lkur', 'JulianMin', 'StDevMean3DayFlowMaxs', 'StDevMeanJun', 'StDevMeanSep', 'StDevMean7DayFlowMaxs', 'StDevMeanFeb', 'StDevMeanMar', 'MeanApr') plots_indices_bio <- lapply(Indices_p_value_13, function(q){ Bio_plots <- ggplot(Nat_alt_summer_13, aes_string(x = q, y = "LIFE", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_LIFE <- ggarrange(plotlist = plots_indices_bio,nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_LIFE, filename = "Graphs_LIFE_Class13.pdf") ##### EPT_OCH ##### ##Class 3 Indices_p_value_3 <- c('StDevZeroFlowDays', 'ZeroFlowDays', 'nPos', 'nNeg', 'MeanApr', 'JulianMax', 'StDevMean7DayFlowMaxs', 'StDevnPos', 'MeanJul', 'StDevMean30DayFlowMaxs', 'Mean90DayFlowMaxs', 'StDevnNeg', 'StDevJulianMax', 'MeanAug', 'StDevMeanSep', 'StDevMean3DayFlowMaxs', 'MeanSep', 'StDevMeanAug', 'l2', 'MeanMar', 'StDevMeanJul', 'Mean30DayFlowMaxs', 'MeanMay', 'X75per', 'MeanDec', 'lca', 'StDevnPulsesHigh', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'Mean7DayFlowMins', 'StDevMean90DayFlowMaxs', 'Mean3DayFlowMins', 'Mean7DayFlowMaxs', 'X95per', 'nPulsesHigh', 'Mean30DayFlowMins', 'StDevReversals') plots_indices_bio <- lapply(Indices_p_value_3, function(q){ Bio_plots <- ggplot(Nat_alt_summer_3, aes_string(x = q, y = "EPT_OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_EPT_OCH <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_EPT_OCH, filename = "Graphs_EPT_OCH_Class3.pdf") ##Class 8 Indices_p_value_8 <- c('StDevnPulsesHigh', 'nPos', 'StDevMeanFeb', 'StDevReversals', 'StDevnPulsesLow', 'ZeroFlowDays', 'StDevZeroFlowDays', 'StDevMean7DayFlowMaxs', 'StDevMean3DayFlowMaxs', 'Predictability', 'MeanMay', 'StDevMeanSep', 'lkur', 'nPulsesLow', 'meanNeg', 'StDevMeanAug', 'MeanApr', 'StDevMean30DayFlowMaxs', 'Reversals', 'FRE3', 'StDevBFI', 'StDevnNeg', 'StDevMeanJul', 'StDevnPos', 'StDevJulianMin', 'MeanMar', 'nPulsesHigh', 'nNeg', 'StDevMeanMar', 'StDevMeanJun', 'StDevMean1DayFlowMaxs', 'X95per', 'X25per', 'StDevMean90DayFlowMaxs', 'FRE7', 'Mean1DayFlowMins', 'StDevJulianMax', 'StDevMean90DayFlowMins', 'StDevFRE1', 'JulianMin', 'Mean90DayFlowMaxs', 'MeanSep', 'Mean7DayFlowMaxs', 'Mean30DayFlowMaxs') plots_indices_bio <- lapply(Indices_p_value_8, function(q){ Bio_plots <- ggplot(Nat_alt_summer_8, aes_string(x = q, y = "EPT_OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT_OCH <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_EPT_OCH, filename = "Graphs_EPT_OCH_Class8.pdf") ##Class 10 Indices_p_value_10 <- c('JulianMin', 'MeanSep', 'MeanAug', 'nPos', 'StDevnPulsesLow', 'Mean90DayFlowMins', 'StDevJulianMin', 'Mean90DayFlowMaxs', 'l2', 'FRE7', 'nNeg', 'Mean3DayFlowMaxs', 'MeanJun', 'nPulsesLow', 'Mean1DayFlowMaxs', 'MeanJul', 'Mean30DayFlowMins', 'Mean30DayFlowMaxs', 'BFI', 'MeanJan', 'X75per', 'Reversals', 'Mean7DayFlowMaxs', 'Mean7DayFlowMins', 'StDevBFI', 'Mean3DayFlowMins', 'StDevMeanNov', 'StDevMeanAug', 'X95per', 'StDevMean30DayFlowMins', 'X5per', 'StDevMean90DayFlowMins', 'StDevnPos', 'Mean1DayFlowMins', 'StDevnNeg', 'lca', 'StDevMeanSep', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean3DayFlowMins', 'MeanDec', 'MeanFeb', 'StDevFRE1', 'StDevMean1DayFlowMins', 'StDevMean1DayFlowMaxs', 'MeanMar', 'MeanPulseLengthLow', 'StDevReversals', 'StDevFRE7', 'StDevMeanDec', 'StDevnPulsesHigh', 'FRE3', 'StDevMean3DayFlowMaxs', 'meanPos', 'MeanMay') plots_indices_bio <- lapply(Indices_p_value_10, function(q){ Bio_plots <- ggplot(Nat_alt_summer_10, aes_string(x = q, y = "EPT_OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT_OCH <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_EPT_OCH, filename = "Graphs_EPT_OCH_Class10.pdf") ##Class 13 Indices_p_value_13 <- c('Mean90DayFlowMins', 'nNeg', 'nPos', 'Mean1DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevJulianMin', 'Mean7DayFlowMaxs', 'MeanJul', 'Mean3DayFlowMaxs', 'MeanSep', 'StDevnPulsesLow', 'Reversals', 'nPulsesLow', 'MeanAug', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean90DayFlowMins', 'X5per', 'StDevBFI', 'l2', 'MeanDec', 'Mean30DayFlowMins', 'StDevMeanNov', 'X75per', 'MeanJun', 'Mean30DayFlowMaxs', 'FRE7', 'MeanJan', 'StDevFRE7', 'StDevMean3DayFlowMins', 'Mean90DayFlowMaxs', 'StDevMeanDec', 'X95per', 'StDevMeanApr', 'FRE3', 'StDevMean1DayFlowMins', 'Mean7DayFlowMins', 'BFI', 'MeanFeb', 'lca', 'StDevnNeg', 'Mean3DayFlowMins', 'MeanMar', 'MeanPulseLengthLow', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'StDevFRE1', 'StDevMeanJul', 'MeanMay', 'StDevMeanJan', 'StDevJulianMax', 'lkur', 'JulianMin', 'StDevMean3DayFlowMaxs', 'StDevMeanJun', 'StDevMeanSep', 'StDevMean7DayFlowMaxs', 'StDevMeanFeb', 'StDevMeanMar', 'MeanApr') plots_indices_bio <- lapply(Indices_p_value_13, function(q){ Bio_plots <- ggplot(Nat_alt_summer_13, aes_string(x = q, y = "EPT_OCH", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_EPT_OCH <- ggarrange(plotlist = plots_indices_bio,nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_EPT_OCH, filename = "Graphs_EPT_OCH_Class13.pdf") ### Non.Insect ### ##Class 3 Indices_p_value_3 <- c('StDevZeroFlowDays', 'ZeroFlowDays', 'nPos', 'nNeg', 'MeanApr', 'JulianMax', 'StDevMean7DayFlowMaxs', 'StDevnPos', 'MeanJul', 'StDevMean30DayFlowMaxs', 'Mean90DayFlowMaxs', 'StDevnNeg', 'StDevJulianMax', 'MeanAug', 'StDevMeanSep', 'StDevMean3DayFlowMaxs', 'MeanSep', 'StDevMeanAug', 'l2', 'MeanMar', 'StDevMeanJul', 'Mean30DayFlowMaxs', 'MeanMay', 'X75per', 'MeanDec', 'lca', 'StDevnPulsesHigh', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'Mean7DayFlowMins', 'StDevMean90DayFlowMaxs', 'Mean3DayFlowMins', 'Mean7DayFlowMaxs', 'X95per', 'nPulsesHigh', 'Mean30DayFlowMins', 'StDevReversals') plots_indices_bio <- lapply(Indices_p_value_3, function(q){ Bio_plots <- ggplot(Nat_alt_summer_3, aes_string(x = q, y = "Non.Insect", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_Non.Insect <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Non.Insect, filename = "Graphs_Non.Insect_Class3.pdf") ##Class 8 Indices_p_value_8 <- c('StDevnPulsesHigh', 'nPos', 'StDevMeanFeb', 'StDevReversals', 'StDevnPulsesLow', 'ZeroFlowDays', 'StDevZeroFlowDays', 'StDevMean7DayFlowMaxs', 'StDevMean3DayFlowMaxs', 'Predictability', 'MeanMay', 'StDevMeanSep', 'lkur', 'nPulsesLow', 'meanNeg', 'StDevMeanAug', 'MeanApr', 'StDevMean30DayFlowMaxs', 'Reversals', 'FRE3', 'StDevBFI', 'StDevnNeg', 'StDevMeanJul', 'StDevnPos', 'StDevJulianMin', 'MeanMar', 'nPulsesHigh', 'nNeg', 'StDevMeanMar', 'StDevMeanJun', 'StDevMean1DayFlowMaxs', 'X95per', 'X25per', 'StDevMean90DayFlowMaxs', 'FRE7', 'Mean1DayFlowMins', 'StDevJulianMax', 'StDevMean90DayFlowMins', 'StDevFRE1', 'JulianMin', 'Mean90DayFlowMaxs', 'MeanSep', 'Mean7DayFlowMaxs', 'Mean30DayFlowMaxs') plots_indices_bio <- lapply(Indices_p_value_8, function(q){ Bio_plots <- ggplot(Nat_alt_summer_8, aes_string(x = q, y = "Non.Insect", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Non.Insect <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Non.Insect, filename = "Graphs_Non.Insect_Class8.pdf") ##Class 10 Indices_p_value_10 <- c('JulianMin', 'MeanSep', 'MeanAug', 'nPos', 'StDevnPulsesLow', 'Mean90DayFlowMins', 'StDevJulianMin', 'Mean90DayFlowMaxs', 'l2', 'FRE7', 'nNeg', 'Mean3DayFlowMaxs', 'MeanJun', 'nPulsesLow', 'Mean1DayFlowMaxs', 'MeanJul', 'Mean30DayFlowMins', 'Mean30DayFlowMaxs', 'BFI', 'MeanJan', 'X75per', 'Reversals', 'Mean7DayFlowMaxs', 'Mean7DayFlowMins', 'StDevBFI', 'Mean3DayFlowMins', 'StDevMeanNov', 'StDevMeanAug', 'X95per', 'StDevMean30DayFlowMins', 'X5per', 'StDevMean90DayFlowMins', 'StDevnPos', 'Mean1DayFlowMins', 'StDevnNeg', 'lca', 'StDevMeanSep', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean3DayFlowMins', 'MeanDec', 'MeanFeb', 'StDevFRE1', 'StDevMean1DayFlowMins', 'StDevMean1DayFlowMaxs', 'MeanMar', 'MeanPulseLengthLow', 'StDevReversals', 'StDevFRE7', 'StDevMeanDec', 'StDevnPulsesHigh', 'FRE3', 'StDevMean3DayFlowMaxs', 'meanPos', 'MeanMay') plots_indices_bio <- lapply(Indices_p_value_10, function(q){ Bio_plots <- ggplot(Nat_alt_summer_10, aes_string(x = q, y = "Non.Insect", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Non.Insect <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Non.Insect, filename = "Graphs_Non.Insect_Class10.pdf") ##Class 13 Indices_p_value_13 <- c('Mean90DayFlowMins', 'nNeg', 'nPos', 'Mean1DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevJulianMin', 'Mean7DayFlowMaxs', 'MeanJul', 'Mean3DayFlowMaxs', 'MeanSep', 'StDevnPulsesLow', 'Reversals', 'nPulsesLow', 'MeanAug', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean90DayFlowMins', 'X5per', 'StDevBFI', 'l2', 'MeanDec', 'Mean30DayFlowMins', 'StDevMeanNov', 'X75per', 'MeanJun', 'Mean30DayFlowMaxs', 'FRE7', 'MeanJan', 'StDevFRE7', 'StDevMean3DayFlowMins', 'Mean90DayFlowMaxs', 'StDevMeanDec', 'X95per', 'StDevMeanApr', 'FRE3', 'StDevMean1DayFlowMins', 'Mean7DayFlowMins', 'BFI', 'MeanFeb', 'lca', 'StDevnNeg', 'Mean3DayFlowMins', 'MeanMar', 'MeanPulseLengthLow', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'StDevFRE1', 'StDevMeanJul', 'MeanMay', 'StDevMeanJan', 'StDevJulianMax', 'lkur', 'JulianMin', 'StDevMean3DayFlowMaxs', 'StDevMeanJun', 'StDevMeanSep', 'StDevMean7DayFlowMaxs', 'StDevMeanFeb', 'StDevMeanMar', 'MeanApr') plots_indices_bio <- lapply(Indices_p_value_13, function(q){ Bio_plots <- ggplot(Nat_alt_summer_13, aes_string(x = q, y = "Non.Insect", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Non.Insect <- ggarrange(plotlist = plots_indices_bio,nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Non.Insect, filename = "Graphs_Non.Insect_Class13.pdf") ### Diptera ### ##Class 3 Indices_p_value_3 <- c('StDevZeroFlowDays', 'ZeroFlowDays', 'nPos', 'nNeg', 'MeanApr', 'JulianMax', 'StDevMean7DayFlowMaxs', 'StDevnPos', 'MeanJul', 'StDevMean30DayFlowMaxs', 'Mean90DayFlowMaxs', 'StDevnNeg', 'StDevJulianMax', 'MeanAug', 'StDevMeanSep', 'StDevMean3DayFlowMaxs', 'MeanSep', 'StDevMeanAug', 'l2', 'MeanMar', 'StDevMeanJul', 'Mean30DayFlowMaxs', 'MeanMay', 'X75per', 'MeanDec', 'lca', 'StDevnPulsesHigh', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'Mean7DayFlowMins', 'StDevMean90DayFlowMaxs', 'Mean3DayFlowMins', 'Mean7DayFlowMaxs', 'X95per', 'nPulsesHigh', 'Mean30DayFlowMins', 'StDevReversals') plots_indices_bio <- lapply(Indices_p_value_3, function(q){ Bio_plots <- ggplot(Nat_alt_summer_3, aes_string(x = q, y = "Diptera", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_Diptera <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Diptera, filename = "Graphs_Diptera_Class3.pdf") ##Class 8 Indices_p_value_8 <- c('StDevnPulsesHigh', 'nPos', 'StDevMeanFeb', 'StDevReversals', 'StDevnPulsesLow', 'ZeroFlowDays', 'StDevZeroFlowDays', 'StDevMean7DayFlowMaxs', 'StDevMean3DayFlowMaxs', 'Predictability', 'MeanMay', 'StDevMeanSep', 'lkur', 'nPulsesLow', 'meanNeg', 'StDevMeanAug', 'MeanApr', 'StDevMean30DayFlowMaxs', 'Reversals', 'FRE3', 'StDevBFI', 'StDevnNeg', 'StDevMeanJul', 'StDevnPos', 'StDevJulianMin', 'MeanMar', 'nPulsesHigh', 'nNeg', 'StDevMeanMar', 'StDevMeanJun', 'StDevMean1DayFlowMaxs', 'X95per', 'X25per', 'StDevMean90DayFlowMaxs', 'FRE7', 'Mean1DayFlowMins', 'StDevJulianMax', 'StDevMean90DayFlowMins', 'StDevFRE1', 'JulianMin', 'Mean90DayFlowMaxs', 'MeanSep', 'Mean7DayFlowMaxs', 'Mean30DayFlowMaxs') plots_indices_bio <- lapply(Indices_p_value_8, function(q){ Bio_plots <- ggplot(Nat_alt_summer_8, aes_string(x = q, y = "Diptera", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Diptera <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Diptera, filename = "Graphs_Diptera_Class8.pdf") ##Class 10 Indices_p_value_10 <- c('JulianMin', 'MeanSep', 'MeanAug', 'nPos', 'StDevnPulsesLow', 'Mean90DayFlowMins', 'StDevJulianMin', 'Mean90DayFlowMaxs', 'l2', 'FRE7', 'nNeg', 'Mean3DayFlowMaxs', 'MeanJun', 'nPulsesLow', 'Mean1DayFlowMaxs', 'MeanJul', 'Mean30DayFlowMins', 'Mean30DayFlowMaxs', 'BFI', 'MeanJan', 'X75per', 'Reversals', 'Mean7DayFlowMaxs', 'Mean7DayFlowMins', 'StDevBFI', 'Mean3DayFlowMins', 'StDevMeanNov', 'StDevMeanAug', 'X95per', 'StDevMean30DayFlowMins', 'X5per', 'StDevMean90DayFlowMins', 'StDevnPos', 'Mean1DayFlowMins', 'StDevnNeg', 'lca', 'StDevMeanSep', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean3DayFlowMins', 'MeanDec', 'MeanFeb', 'StDevFRE1', 'StDevMean1DayFlowMins', 'StDevMean1DayFlowMaxs', 'MeanMar', 'MeanPulseLengthLow', 'StDevReversals', 'StDevFRE7', 'StDevMeanDec', 'StDevnPulsesHigh', 'FRE3', 'StDevMean3DayFlowMaxs', 'meanPos', 'MeanMay') plots_indices_bio <- lapply(Indices_p_value_10, function(q){ Bio_plots <- ggplot(Nat_alt_summer_10, aes_string(x = q, y = "Diptera", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Diptera <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Diptera, filename = "Graphs_Diptera_Class10.pdf") ##Class 13 Indices_p_value_13 <- c('Mean90DayFlowMins', 'nNeg', 'nPos', 'Mean1DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevJulianMin', 'Mean7DayFlowMaxs', 'MeanJul', 'Mean3DayFlowMaxs', 'MeanSep', 'StDevnPulsesLow', 'Reversals', 'nPulsesLow', 'MeanAug', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean90DayFlowMins', 'X5per', 'StDevBFI', 'l2', 'MeanDec', 'Mean30DayFlowMins', 'StDevMeanNov', 'X75per', 'MeanJun', 'Mean30DayFlowMaxs', 'FRE7', 'MeanJan', 'StDevFRE7', 'StDevMean3DayFlowMins', 'Mean90DayFlowMaxs', 'StDevMeanDec', 'X95per', 'StDevMeanApr', 'FRE3', 'StDevMean1DayFlowMins', 'Mean7DayFlowMins', 'BFI', 'MeanFeb', 'lca', 'StDevnNeg', 'Mean3DayFlowMins', 'MeanMar', 'MeanPulseLengthLow', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'StDevFRE1', 'StDevMeanJul', 'MeanMay', 'StDevMeanJan', 'StDevJulianMax', 'lkur', 'JulianMin', 'StDevMean3DayFlowMaxs', 'StDevMeanJun', 'StDevMeanSep', 'StDevMean7DayFlowMaxs', 'StDevMeanFeb', 'StDevMeanMar', 'MeanApr') plots_indices_bio <- lapply(Indices_p_value_13, function(q){ Bio_plots <- ggplot(Nat_alt_summer_13, aes_string(x = q, y = "Diptera", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Diptera <- ggarrange(plotlist = plots_indices_bio,nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Diptera, filename = "Graphs_Diptera_Class13.pdf") ### Insect ### ##Class 3 Indices_p_value_3 <- c('StDevZeroFlowDays', 'ZeroFlowDays', 'nPos', 'nNeg', 'MeanApr', 'JulianMax', 'StDevMean7DayFlowMaxs', 'StDevnPos', 'MeanJul', 'StDevMean30DayFlowMaxs', 'Mean90DayFlowMaxs', 'StDevnNeg', 'StDevJulianMax', 'MeanAug', 'StDevMeanSep', 'StDevMean3DayFlowMaxs', 'MeanSep', 'StDevMeanAug', 'l2', 'MeanMar', 'StDevMeanJul', 'Mean30DayFlowMaxs', 'MeanMay', 'X75per', 'MeanDec', 'lca', 'StDevnPulsesHigh', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'Mean7DayFlowMins', 'StDevMean90DayFlowMaxs', 'Mean3DayFlowMins', 'Mean7DayFlowMaxs', 'X95per', 'nPulsesHigh', 'Mean30DayFlowMins', 'StDevReversals') plots_indices_bio <- lapply(Indices_p_value_3, function(q){ Bio_plots <- ggplot(Nat_alt_summer_3, aes_string(x = q, y = "Insect", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) return(Bio_plots) }) multi.page_Insect <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Insect, filename = "Graphs_Insect_Class3.pdf") ##Class 8 Indices_p_value_8 <- c('StDevnPulsesHigh', 'nPos', 'StDevMeanFeb', 'StDevReversals', 'StDevnPulsesLow', 'ZeroFlowDays', 'StDevZeroFlowDays', 'StDevMean7DayFlowMaxs', 'StDevMean3DayFlowMaxs', 'Predictability', 'MeanMay', 'StDevMeanSep', 'lkur', 'nPulsesLow', 'meanNeg', 'StDevMeanAug', 'MeanApr', 'StDevMean30DayFlowMaxs', 'Reversals', 'FRE3', 'StDevBFI', 'StDevnNeg', 'StDevMeanJul', 'StDevnPos', 'StDevJulianMin', 'MeanMar', 'nPulsesHigh', 'nNeg', 'StDevMeanMar', 'StDevMeanJun', 'StDevMean1DayFlowMaxs', 'X95per', 'X25per', 'StDevMean90DayFlowMaxs', 'FRE7', 'Mean1DayFlowMins', 'StDevJulianMax', 'StDevMean90DayFlowMins', 'StDevFRE1', 'JulianMin', 'Mean90DayFlowMaxs', 'MeanSep', 'Mean7DayFlowMaxs', 'Mean30DayFlowMaxs') plots_indices_bio <- lapply(Indices_p_value_8, function(q){ Bio_plots <- ggplot(Nat_alt_summer_8, aes_string(x = q, y = "Insect", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Insect <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Insect, filename = "Graphs_Insect_Class8.pdf") ##Class 10 Indices_p_value_10 <- c('JulianMin', 'MeanSep', 'MeanAug', 'nPos', 'StDevnPulsesLow', 'Mean90DayFlowMins', 'StDevJulianMin', 'Mean90DayFlowMaxs', 'l2', 'FRE7', 'nNeg', 'Mean3DayFlowMaxs', 'MeanJun', 'nPulsesLow', 'Mean1DayFlowMaxs', 'MeanJul', 'Mean30DayFlowMins', 'Mean30DayFlowMaxs', 'BFI', 'MeanJan', 'X75per', 'Reversals', 'Mean7DayFlowMaxs', 'Mean7DayFlowMins', 'StDevBFI', 'Mean3DayFlowMins', 'StDevMeanNov', 'StDevMeanAug', 'X95per', 'StDevMean30DayFlowMins', 'X5per', 'StDevMean90DayFlowMins', 'StDevnPos', 'Mean1DayFlowMins', 'StDevnNeg', 'lca', 'StDevMeanSep', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean3DayFlowMins', 'MeanDec', 'MeanFeb', 'StDevFRE1', 'StDevMean1DayFlowMins', 'StDevMean1DayFlowMaxs', 'MeanMar', 'MeanPulseLengthLow', 'StDevReversals', 'StDevFRE7', 'StDevMeanDec', 'StDevnPulsesHigh', 'FRE3', 'StDevMean3DayFlowMaxs', 'meanPos', 'MeanMay') plots_indices_bio <- lapply(Indices_p_value_10, function(q){ Bio_plots <- ggplot(Nat_alt_summer_10, aes_string(x = q, y = "Insect", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Insect <- ggarrange(plotlist = plots_indices_bio, nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Insect, filename = "Graphs_Insect_Class10.pdf") ##Class 13 Indices_p_value_13 <- c('Mean90DayFlowMins', 'nNeg', 'nPos', 'Mean1DayFlowMaxs', 'StDevMean30DayFlowMins', 'StDevJulianMin', 'Mean7DayFlowMaxs', 'MeanJul', 'Mean3DayFlowMaxs', 'MeanSep', 'StDevnPulsesLow', 'Reversals', 'nPulsesLow', 'MeanAug', 'StDevMean7DayFlowMins', 'MeanNov', 'StDevMean90DayFlowMins', 'X5per', 'StDevBFI', 'l2', 'MeanDec', 'Mean30DayFlowMins', 'StDevMeanNov', 'X75per', 'MeanJun', 'Mean30DayFlowMaxs', 'FRE7', 'MeanJan', 'StDevFRE7', 'StDevMean3DayFlowMins', 'Mean90DayFlowMaxs', 'StDevMeanDec', 'X95per', 'StDevMeanApr', 'FRE3', 'StDevMean1DayFlowMins', 'Mean7DayFlowMins', 'BFI', 'MeanFeb', 'lca', 'StDevnNeg', 'Mean3DayFlowMins', 'MeanMar', 'MeanPulseLengthLow', 'StDevMean1DayFlowMaxs', 'Mean1DayFlowMins', 'StDevFRE1', 'StDevMeanJul', 'MeanMay', 'StDevMeanJan', 'StDevJulianMax', 'lkur', 'JulianMin', 'StDevMean3DayFlowMaxs', 'StDevMeanJun', 'StDevMeanSep', 'StDevMean7DayFlowMaxs', 'StDevMeanFeb', 'StDevMeanMar', 'MeanApr') plots_indices_bio <- lapply(Indices_p_value_13, function(q){ Bio_plots <- ggplot(Nat_alt_summer_13, aes_string(x = q, y = "Insect", color="Flow.regim")) + geom_point(shape=19, size=0.7)+ theme(legend.position = "none") + geom_smooth(method=lm, se=F) + scale_color_manual(values = c("tomato2", "springgreen3")) #+ scale_x_continuous(trans = 'log10') return(Bio_plots) }) multi.page_Insect <- ggarrange(plotlist = plots_indices_bio,nrow = 2, ncol =2) setwd("~/Cássia/11 Nabia/Hyd_alteration/Biological_data/Bio_classes") ggexport(multi.page_Insect, filename = "Graphs_Insect_Class13.pdf") # Ordinations ------------------------------------------------------------- ## Macroinvertebrates - CLASS 3 Nat_alt_summer_3 <- Nat_alt_summer %>% filter(Lev_20 == "3") row.names(Nat_alt_summer_3) <- Nat_alt_summer_3$ID Taxa <- select(Nat_alt_summer_3, Hydrobiidae:Plumatellidae) Env <- select(Nat_alt_summer_3, AREA_KM2:MN_calc, MN_PERM:MN_HARD, MN_UHD:MN_PLT, MN_DEN) Hyd <- select(Nat_alt_summer_3, MeanJan:X95per) FQ <- select(Nat_alt_summer_3, A_COND20SITU,A_PHSITU) All <- cbind(Env, Hyd, FQ) Col_EPT <- as.character(Nat_alt_summer_3$Color) Col_Flow <- as.character(Nat_alt_summer_3$Flow_Color) Sym_EPT <- Nat_alt_summer_3$Symbol Taxa_RA <- decostand(Taxa, "total") ## Relative abundance Taxa_H <- sqrt(Taxa_RA) ## Relative abundance, percentage of each species based on total of the site Taxa_PA <- decostand(Taxa, "pa") ## Relative abundance, percentage of each species based on total of the site Taxa_Norm <- decostand(Taxa, "normalize") ## Relative abundance, percentage of each species based on total of the site rowSums (Taxa, na.rm = FALSE, dims = 1) rowSums (Taxa_RA, na.rm = FALSE, dims = 1) rowSums (Taxa_H, na.rm = FALSE, dims = 1) rowSums (Taxa_PA, na.rm = FALSE, dims = 1) Dist_bray_H <- vegdist(Taxa_H, method = "bray") Dist_Jac <- vegdist(Taxa_PA, method = "jac") PCoA_Com_H <- cmdscale(Dist_bray_H, k = 2, eig = TRUE) ##0.302 PCoA_Com_PA <- cmdscale(Dist_Jac, k = 2, eig = TRUE) ##0.21 (fit_Env_PCoA_Com_H <- envfit(PCoA_Com_H, scale(Env), perm = 1000, na.rm = T)) (fit_Hyd_PCoA_Com_H <- envfit(PCoA_Com_H, scale(Hyd), perm = 1000, na.rm = T)) (fit_FQ_PCoA_Com_H <- envfit(PCoA_Com_H, scale(FQ), perm = 1000, na.rm = T)) (fit_All_PCoA_Com_H <- envfit(PCoA_Com_H, scale(All), perm = 1000, na.rm = T)) pcoa.C.H <- ordiplot(PCoA_Com_H, type = "n", main = "CLASS 3 - MI - Community Structure",#xlim = c(-0.5,0.2), # xlim = c(-0.22,0.17), ylim= c(-0.01,0.01), xlab="PCoA 1 (226,8%)", ylab="PCoA 2 (3,37%)") abline(h = 0, lty = 1) abline(v = 0, lty = 1) #mtext("GOF = 0,213", side=3, cex=0.8) Macroinv_wa <- wascores(PCoA_Com_H$points[, 1:2], Taxa_H) ###write.csv2(Macroinv_wa, "macroinv_Com_H_taxa.csv") ##text(Macroinv_wa, rownames(Macroinv_wa),cex = 0.7, col = "azure4") #text(Macroinv_wa, rownames(Macroinv_wa),cex = 0.9, col = "azure4") #♥points(Macroinv_wa,cex =1, pch=Simbolos, col=Col_insect) points(pcoa.C.H, "sites", pch = Sym_EPT, cex=1, col = Col_EPT) points(pcoa.C.H, "sites", pch = 19, cex=1, col = Col_Flow) #points(pcoa.C.H, "sites", pch = 19, cex=0.55, col = Col_Flow) plot(fit_Env_PCoA_Com_H, p.max = 0.001, col = "grey20", cex=0.7) plot(fit_Hyd_PCoA_Com_H, p.max = 0.001, col = "brown2", cex=0.7) plot(fit_Env_PCoA_Com_H, p.max = 0.001, col = "darkorange", cex=0.7) plot(fit_FQ_PCoA_Com_H, p.max = 0.001, col = "darkblue", cex=0.7) plot(fit_All_PCoA_Com_H, p.max = 0.001, col = "darkblue", cex=0.7) ### Macroinvertebrates - CLASS 8 ### Nat_alt_summer_8 <- Nat_alt_summer %>% filter(Lev_20 == "8") row.names(Nat_alt_summer_8) <- Nat_alt_summer_8$ID Taxa <- select(Nat_alt_summer_8, Hydrobiidae:Plumatellidae) Env <- select(Nat_alt_summer_8, AREA_KM2:MN_calc, MN_PERM:MN_HARD, MN_UHD:MN_PLT, MN_DEN) Hyd <- select(Nat_alt_summer_8, MeanJan:X95per) FQ <- select(Nat_alt_summer_8, A_COND20SITU,A_PHSITU) All <- cbind(Env, Hyd, FQ) Col_EPT <- as.character(Nat_alt_summer_8$Color) Col_Flow <- as.character(Nat_alt_summer_8$Flow_Color) Sym_EPT <- Nat_alt_summer_8$Symbol Taxa_RA <- decostand(Taxa, "total") ## Relative abundance Taxa_H <- sqrt(Taxa_RA) ## Relative abundance, percentage of each species based on total of the site Taxa_PA <- decostand(Taxa, "pa") ## Relative abundance, percentage of each species based on total of the site Taxa_Norm <- decostand(Taxa, "normalize") ## Relative abundance, percentage of each species based on total of the site rowSums (Taxa, na.rm = FALSE, dims = 1) rowSums (Taxa_RA, na.rm = FALSE, dims = 1) rowSums (Taxa_H, na.rm = FALSE, dims = 1) rowSums (Taxa_PA, na.rm = FALSE, dims = 1) Dist_bray_H <- vegdist(Taxa_H, method = "bray") Dist_Jac <- vegdist(Taxa_PA, method = "jac") PCoA_Com_H <- cmdscale(Dist_bray_H, k = 2, eig = TRUE) ##0.302 PCoA_Com_PA <- cmdscale(Dist_Jac, k = 2, eig = TRUE) ##0.21 (fit_Env_PCoA_Com_H <- envfit(PCoA_Com_H, scale(Env), perm = 1000, na.rm = T)) (fit_Hyd_PCoA_Com_H <- envfit(PCoA_Com_H, scale(Hyd), perm = 1000, na.rm = T)) (fit_FQ_PCoA_Com_H <- envfit(PCoA_Com_H, scale(FQ), perm = 1000, na.rm = T)) (fit_All_PCoA_Com_H <- envfit(PCoA_Com_H, scale(All), perm = 1000, na.rm = T)) pcoa.C.H <- ordiplot(PCoA_Com_H, type = "n", main = "CLASS 8 - MI - Community Structure",#xlim = c(-0.5,0.2), # xlim = c(-0.22,0.17), ylim= c(-0.01,0.01), xlab="PCoA 1 (37,68%)", ylab="PCoA 2 (3,0%)") abline(h = 0, lty = 1) abline(v = 0, lty = 1) #mtext("GOF = 0,213", side=3, cex=0.8) Macroinv_wa <- wascores(PCoA_Com_H$points[, 1:2], Taxa_H) ###write.csv2(Macroinv_wa, "macroinv_Com_H_taxa.csv") ##text(Macroinv_wa, rownames(Macroinv_wa),cex = 0.7, col = "azure4") #text(Macroinv_wa, rownames(Macroinv_wa),cex = 0.9, col = "azure4") #♥points(Macroinv_wa,cex =1, pch=Simbolos, col=Col_insect) points(pcoa.C.H, "sites", pch = Sym_EPT, cex=1, col = Col_EPT) points(pcoa.C.H, "sites", pch = 19, cex=1, col = Col_Flow) #points(pcoa.C.H, "sites", pch = 19, cex=0.55, col = Col_Flow) plot(fit_Env_PCoA_Com_H, p.max = 0.001, col = "grey20", cex=0.7) plot(fit_Hyd_PCoA_Com_H, p.max = 0.001, col = "brown2", cex=0.7) plot(fit_Env_PCoA_Com_H, p.max = 0.001, col = "darkorange", cex=0.7) plot(fit_FQ_PCoA_Com_H, p.max = 0.001, col = "darkblue", cex=0.7) plot(fit_All_PCoA_Com_H, p.max = 0.001, col = "darkblue", cex=0.7) ## Macroinvertebrates - CLASS 10 Nat_alt_summer_10 <- Nat_alt_summer %>% filter(Lev_20 == "10") row.names(Nat_alt_summer_10) <- Nat_alt_summer_10$ID Taxa <- select(Nat_alt_summer_10, Hydrobiidae:Plumatellidae) Env <- select(Nat_alt_summer_10, AREA_KM2:MN_calc, MN_PERM:MN_HARD, MN_UHD:MN_PLT, MN_DEN) Hyd <- select(Nat_alt_summer_10, MeanJan:X95per) FQ <- select(Nat_alt_summer_10, A_COND20SITU,A_PHSITU) All <- cbind(Env, Hyd) Col_EPT <- as.character(Nat_alt_summer_10$Color) Col_Flow <- as.character(Nat_alt_summer_10$Flow_Color) Sym_EPT <- Nat_alt_summer_10$Symbol Taxa_RA <- decostand(Taxa, "total") ## Relative abundance Taxa_H <- sqrt(Taxa_RA) ## Relative abundance, percentage of each species based on total of the site Taxa_PA <- decostand(Taxa, "pa") ## Relative abundance, percentage of each species based on total of the site Taxa_Norm <- decostand(Taxa, "normalize") ## Relative abundance, percentage of each species based on total of the site rowSums (Taxa, na.rm = FALSE, dims = 1) rowSums (Taxa_RA, na.rm = FALSE, dims = 1) rowSums (Taxa_H, na.rm = FALSE, dims = 1) rowSums (Taxa_PA, na.rm = FALSE, dims = 1) Dist_bray_H <- vegdist(Taxa_H, method = "bray") Dist_Jac <- vegdist(Taxa_PA, method = "jac") PCoA_Com_H <- cmdscale(Dist_bray_H, k = 2, eig = TRUE) ##0.302 PCoA_Com_PA <- cmdscale(Dist_Jac, k = 2, eig = TRUE) ##0.21 (fit_Env_PCoA_Com_H <- envfit(PCoA_Com_H, scale(Env), perm = 1000, na.rm = T)) (fit_Hyd_PCoA_Com_H <- envfit(PCoA_Com_H, scale(Hyd), perm = 1000, na.rm = T)) (fit_FQ_PCoA_Com_H <- envfit(PCoA_Com_H, (FQ), perm = 1000, na.rm = T)) (fit_All_PCoA_Com_H <- envfit(PCoA_Com_H, scale(All), perm = 1000, na.rm = T)) pcoa.C.H <- ordiplot(PCoA_Com_H, type = "n", main = "CLASS 10 - MI - Community Structure",#xlim = c(-0.5,0.2), # xlim = c(-0.22,0.17), ylim= c(-0.01,0.01), xlab="PCoA 1 (28,3%)", ylab="PCoA 2 (3,2%)") abline(h = 0, lty = 1) abline(v = 0, lty = 1) #mtext("GOF = 0,213", side=3, cex=0.8) Macroinv_wa <- wascores(PCoA_Com_H$points[, 1:2], Taxa_H) ###write.csv2(Macroinv_wa, "macroinv_Com_H_taxa.csv") ##text(Macroinv_wa, rownames(Macroinv_wa),cex = 0.7, col = "azure4") #text(Macroinv_wa, rownames(Macroinv_wa),cex = 0.9, col = "azure4") #♥points(Macroinv_wa,cex =1, pch=Simbolos, col=Col_insect) points(pcoa.C.H, "sites", pch = Sym_EPT, cex=1, col = Col_EPT) points(pcoa.C.H, "sites", pch = 19, cex=1, col = Col_Flow) #points(pcoa.C.H, "sites", pch = 19, cex=0.55, col = Col_Flow) plot(fit_Env_PCoA_Com_H, p.max = 0.001, col = "grey20", cex=0.7) plot(fit_Hyd_PCoA_Com_H, p.max = 0.001, col = "brown2", cex=0.7) plot(fit_Env_PCoA_Com_H, p.max = 0.001, col = "darkorange", cex=0.7) plot(fit_FQ_PCoA_Com_H, p.max = 0.001, col = "darkblue", cex=0.7) plot(fit_All_PCoA_Com_H, p.max = 0.001, col = "darkblue", cex=0.7) ## Macroinvertebrates - CLASS 13 Nat_alt_summer_13 <- Nat_alt_summer %>% filter(Lev_20 == "13") row.names(Nat_alt_summer_13) <- Nat_alt_summer_13$ID Taxa <- select(Nat_alt_summer_13, Hydrobiidae:Plumatellidae) Env <- select(Nat_alt_summer_13, AREA_KM2:MN_calc, MN_PERM:MN_HARD, MN_UHD:MN_PLT, MN_DEN) Hyd <- select(Nat_alt_summer_13, MeanJan:X95per) FQ <- select(Nat_alt_summer_13, A_COND20SITU,A_PHSITU) All <- cbind(Env, Hyd) Col_EPT <- as.character(Nat_alt_summer_13$Color) Col_Flow <- as.character(Nat_alt_summer_13$Flow_Color) Sym_EPT <- Nat_alt_summer_13$Symbol Taxa_RA <- decostand(Taxa, "total") ## Relative abundance Taxa_H <- sqrt(Taxa_RA) ## Relative abundance, percentage of each species based on total of the site Taxa_PA <- decostand(Taxa, "pa") ## Relative abundance, percentage of each species based on total of the site Dist_bray_H <- vegdist(Taxa_H, method = "bray") Dist_Jac <- vegdist(Taxa_PA, method = "jac") PCoA_Com_H <- cmdscale(Dist_bray_H, k = 2, eig = TRUE) ##0.302 PCoA_Com_PA <- cmdscale(Dist_Jac, k = 2, eig = TRUE) ##0.21 (fit_Env_PCoA_Com_H <- envfit(PCoA_Com_H, scale(Env), perm = 1000, na.rm = T)) (fit_Hyd_PCoA_Com_H <- envfit(PCoA_Com_H, scale(Hyd), perm = 1000, na.rm = T)) (fit_FQ_PCoA_Com_H <- envfit(PCoA_Com_H, (FQ), perm = 1000, na.rm = T)) (fit_All_PCoA_Com_H <- envfit(PCoA_Com_H, scale(All), perm = 1000, na.rm = T)) pcoa.C.H <- ordiplot(PCoA_Com_H, type = "n", main = "CLASS 13 - MI - Community Structure",#xlim = c(-0.5,0.2), # xlim = c(-0.22,0.17), ylim= c(-0.01,0.01), xlab="PCoA 1 (25,9%)", ylab="PCoA 2 (2,5%)") abline(h = 0, lty = 1) abline(v = 0, lty = 1) #mtext("GOF = 0,213", side=3, cex=0.8) Macroinv_wa <- wascores(PCoA_Com_H$points[, 1:2], Taxa_H) points(pcoa.C.H, "sites", pch = Sym_EPT, cex=1, col = Col_EPT) points(pcoa.C.H, "sites", pch = 19, cex=1, col = Col_Flow) plot(fit_Env_PCoA_Com_H, p.max = 0.001, col = "grey20", cex=0.7) plot(fit_Hyd_PCoA_Com_H, p.max = 0.001, col = "brown2", cex=0.7) plot(fit_Env_PCoA_Com_H, p.max = 0.001, col = "darkorange", cex=0.7) plot(fit_FQ_PCoA_Com_H, p.max = 0.001, col = "darkblue", cex=0.7) plot(fit_All_PCoA_Com_H, p.max = 0.001, col = "darkblue", cex=0.7)
rm(list=ls()) .libPaths("/global/home/users/cbrook/R/x86_64-pc-linux-gnu-library/4.0") #setwd("/Users/caraebrook/Documents/R/R_repositories/Berkeley-Reopening/Dec-2020/all-runs/Re-Run-12-24/FigS1/") #no group, no test, no trace library(data.table) library(plyr) library(dplyr) library(deSolve) library(matrixStats) library(fitdistrplus) #load parameters including pre-run titer trajectories for each individual load("titer.dat.20K.Rdata") #load("titer.dat.2K.Rdata") load("virus.par.12.15.Rdata") load("pop.par.base.Rdata") get.real.cases <- function(pop.dat, event.dat, titer.dat1, within.host.theta, group.limit){ #if no cases caused, then ignore if((pop.dat$original_potential_cases_caused_UCB>0) & (pop.dat$num_infection_events>0)){ #then allocate all the cases to the events #distribute cases at random amongst the events event.names <- 1:as.numeric(pop.dat$num_infection_events) actual.events <- sample(x=event.names, size=as.numeric(pop.dat$original_potential_cases_caused_UCB), replace = T) event.data <- cbind.data.frame(actual.events, event.dat[actual.events]) names(event.data) <- c("event", "gentime") #and add the titer at the time of the event gen.tmp = as.list(event.data$gentime) event.data$titer <- c(unlist(lapply(gen.tmp, grab.titer, dat.vir =titer.dat1))) #now that you have titer, here calculate the probability of transmission, given a certain viral load, #based off of the probabiliy model from the URT in Ke et al. 2020 # in Ke et al. 2020, theta is fixed at 0.05 (could be modulated and/or fit to data) #draw Km from a normal disribution centered at the midpoint between the two values explored in Ke et al. 2020 (10^3 and 10^4) event.data$Km <- rnorm(nrow(event.data),mean=5500, sd=1000) event.data$prob_exposure = within.host.theta*(event.data$titer/(event.data$titer + event.data$Km)) event.data$prob_exposure[event.data$prob_exposure<0] <- 0 #probability is small: ~5% for a typical contact if theta = 0.05 as in Ke. #for theta = .7 here, up to 50% depending on theta #does the infection happen? make it a probabilistic outcome of the titer #then, you role a dice to see if this exposure causes an infection tmp.prob <- as.list(event.data$prob_exposure) event.data$InfectionYN = c(unlist(lapply(tmp.prob, test.titer))) #then total the events that actually happen to incorporate into the original data pop.dat$post_titer_potential_cases_caused_UCB <- sum(event.data$InfectionYN) #and then, if there is a group size limit, impose it here if((group.limit>0) & (pop.dat$obs_dist_limits==TRUE)){ #gives you the number of successful transmissions per event event.sum <- ddply(event.data, .(event),summarize, N=sum(InfectionYN)) event.sum$over_lim = event.sum$N-group.limit event.sum$over_lim[event.sum$over_lim<0] <- 0 #truncate # of events for the IDs listed above to the group limit. event.data.list = dlply(subset(event.data, InfectionYN==1), .(event)) new.event.list <- lapply(event.data.list, impose.group, group.limit=group.limit) #new.event.data <- do.call("rbind", new.event.list) new.event.data <-data.table::rbindlist(new.event.list) pop.dat$potential_cases_caused = sum(new.event.data$InfectionYN) #in this case, return the generation time table after the group intervention if(pop.dat$potential_cases_caused >0){ dat.gen.tab <- cbind.data.frame(rep(unique(pop.dat$employ_ids), nrow(new.event.data)), new.event.data$gentime) names(dat.gen.tab) <- c("employ_ids", "generation_time") }else{ dat.gen.tab <- cbind.data.frame(unique(pop.dat$employ_ids), NA) names(dat.gen.tab) <- c("employ_ids", "generation_time") } }else{ pop.dat$potential_cases_caused <- pop.dat$post_titer_potential_cases_caused_UCB if(pop.dat$potential_cases_caused >0){ event.data.out = subset(event.data, InfectionYN==1) dat.gen.tab <- cbind.data.frame(rep(unique(pop.dat$employ_ids), nrow(event.data.out)), event.data.out$gentime) names(dat.gen.tab) <- c("employ_ids", "generation_time") }else{ dat.gen.tab <- cbind.data.frame(unique(pop.dat$employ_ids), NA) names(dat.gen.tab) <- c("employ_ids", "generation_time") } } }else{ #none take place #return the original data with 0s pop.dat$post_titer_potential_cases_caused_UCB <- 0 pop.dat$potential_cases_caused <- 0 #and return a table of generation times with nothing dat.gen.tab <- cbind.data.frame(unique(pop.dat$employ_ids), NA) names(dat.gen.tab) <- c("employ_ids", "generation_time") } return(list(pop.dat, dat.gen.tab)) } test.titer <- function(prob1){ Y_N =sample(c(0,1), size=1, prob = c(1-prob1, prob1)) return(Y_N) } impose.group <- function(event.dat1, group.limit){ tot.transmissions = nrow(event.dat1) if(tot.transmissions>group.limit){ choose.events <- sample(x=1:tot.transmissions, size=group.limit, replace = F) event.dat2 = event.dat1[choose.events,] return(event.dat2) }else{ return(event.dat1) } } get.event.time <- function(dat, genTime){ event.times = genTime(as.numeric(dat$num_infection_events)) return(event.times) } grab.titer <- function(dat1, dat.vir){ titer.out <- dat.vir$V[dat.vir$time>dat1][1] return(titer.out) } normal_fn <- function(meanpar=NULL, sdpar=NULL){ out <- purrr::partial(rnorm, mean = meanpar, sd = sdpar) return(out) } poisson_fn <- function(lambda=NULL){ out <- purrr::partial(rpois, lambda = lambda) return(out) } lognormal_fn <- function(meanlogpar=NULL, sdlogpar=NULL){ out <- purrr::partial(rlnorm, meanlog = meanlogpar, sdlog = sdlogpar) return(out) } add.risk.cat <- function(dat, pop_dat){ dat = data.table(dat) daily_new <- dat[, day := ceiling(time_isolation) #time_isolated ][, .(daily_isolations = .N), by = day ] pop_dat$add <- 0 for (i in 1:length(daily_new$day)){ pop_dat$add[pop_dat$day==daily_new$day[i]] <- daily_new$daily_isolations[i] } out.vect <- as.data.frame(pop_dat$add) return( out.vect) } add.risk.cat.exp <- function(dat, pop_dat, input_par){ dat = data.table(dat) daily_new <- dat[, day := ceiling(exposure_time) ][, .(daily_exposures = .N), by = day ] pop_dat$add <- 0 for (i in 1:length(daily_new$day)){ pop_dat$add[pop_dat$day==daily_new$day[i]] <- daily_new$daily_exposures[i] } out.vect <- as.data.frame(pop_dat$add) #then add deaths based on each pop cat pop.cat = unique(dat$employ_cat) out.vect2 <- as.data.frame(as.numeric(input_par$par1[input_par$parameter=="CFR" & input_par$population==pop.cat])*out.vect) # dat.out = cbind.data.frame(out.vect, out.vect2) return(list(out.vect, out.vect2)) #return(dat.out) } cross.infect <- function(dat, all.sus, input.par){ pop.par = subset(input.par, population == unique(dat$infector_cat)) #first, elim any populations for which there are no longer remaining susceptibles rem.cat = unique(all.sus$employ_cat) all.cat = unique(pop.par$par2[pop.par$parameter=="meta-pop"]) missed.cat = setdiff(all.cat, rem.cat) pop.par$sub = 0 for (i in 1: length(missed.cat)) { pop.par$sub[pop.par$parameter=="meta-pop" & pop.par$par2==missed.cat[i]] <- 1 } pop.par = subset(pop.par, sub==0) #then allocate the population of the new cases based on the proportion within and without tot.cases = nrow(dat) #then need to reallocate probabilities comparatively without the remaining possible.cat = unique(pop.par$par2[pop.par$parameter=="meta-pop"]) old.cat = as.numeric(unique(input.par$par2[input.par$parameter=="meta-pop"])) old.prob = as.numeric(input.par$par1[input.par$parameter=="meta-pop"])[1:length(old.cat)] if(length(possible.cat)<length(old.cat)){ if(length(possible.cat)==1){ dat$new_cat = possible.cat }else{ #if you've run out of probabilities, just, rellocate proportionally new.prob = rep((1/length(possible.cat)), length(possible.cat)) dat$new_cat = sample(x=possible.cat, size = tot.cases, replace = TRUE, prob = new.prob) } }else{ dat$new_cat = sample(x=old.cat, size = tot.cases, replace = TRUE, prob = old.prob) } return(dat) } assign.ID = function(sus.dat.sub, dat.new.sub){ #at the very end of the time series, you may run out of susceptibles in the right category, in which case, these just become lost infections if(nrow(dat.new.sub)<=length(sus.dat.sub$employ_ids)){ dat.new.sub$new_infected = sample(sus.dat.sub$employ_ids, size=nrow(dat.new.sub), replace=FALSE) }else{ new.count = length(sus.dat.sub$employ_ids) new.missed = nrow(dat.new.sub) - new.count row.tmp = seq(1, nrow(dat.new.sub),1) row.take = sample(row.tmp, size = new.count, replace = FALSE) dat.new.sub <- dat.new.sub[row.take,] dat.new.sub$new_infected = sample(sus.dat.sub$employ_ids, size=nrow(dat.new.sub), replace=FALSE) } return(dat.new.sub) } assign.infections <- function(pop.mat, gen_list, timestep, input.par){ # assign new exposures (and times) based on 'actual cases caused' above # and move those that have transmitted to isolated/recovered state #(asymptomatics will be missed in iso time unless tested) # timestep.prev = unique(pop.mat$timestep) #first, pair each case with its generation times new.mat <- dplyr::select(pop.mat, employ_ids, employ_cat, state, exposure_time, actual_cases_caused, time_isolation) new.mat <- new.mat[!is.na(new.mat$actual_cases_caused) & new.mat$state==1,] #only matters if it actually causes cases. new.mat.zero = subset(new.mat, actual_cases_caused<1) new.mat <- subset(new.mat, actual_cases_caused>0) if(nrow(new.mat)>0){ new.mat.list <- dlply(new.mat, .(employ_ids)) #print("1") new.mat.list <- lapply(new.mat.list, make.rows) #the new new mat - no longer includes those which cased 0 actual cases #should always have at least one row because of the if-statement above #new.mat <- do.call("rbind", new.mat.list) new.mat <- data.table::rbindlist(new.mat.list) #now attach a generation time with each of these cases and a random sample from the susceptibles new.mat$generation_time <- NA index.ids = unique(new.mat$employ_ids) for(i in 1:length(index.ids )){ tmp = nrow(new.mat[new.mat$employ_ids == index.ids[i],]) #print(index.ids[[i]]) new.mat$generation_time[new.mat$employ_ids == index.ids[i]] <- gen_list[[index.ids[i]]]$generation_time[1:tmp] } #now, attach a place to infect (susceptible) #bias the sampling based on the proportion of infections within and without of your direct cohort #first, pair the remaining susceptibles with their category all.sus <- cbind.data.frame(pop.mat$employ_ids[pop.mat$state==0],pop.mat$employ_cat[pop.mat$state==0]) names(all.sus) = c("employ_ids", "employ_cat") new.list = dlply(new.mat, .(employ_ids)) #cross infect by cat #print("2") new.list.out <- lapply(new.list, cross.infect, all.sus=all.sus, input.par=input.par) new.mat = data.table::rbindlist(new.list.out) #new.mat = do.call("rbind", new.list.out) rownames(new.mat) <- c() #then, assign names by category of new infections id.cat = data.frame(sort(unique(new.mat$new_cat))) all.sus = arrange(all.sus, employ_cat) names(id.cat) <- "employ_cat" tmp.sus = merge(x=all.sus, y=id.cat) tmp.sus.split = dlply(tmp.sus, .(employ_cat)) new.mat.split <- dlply(new.mat, .(new_cat)) #print("3") dat.new.split.out = mapply(FUN=assign.ID, sus.dat.sub= tmp.sus.split, dat.new.sub= new.mat.split, SIMPLIFY = FALSE) new.mat = data.table::rbindlist(dat.new.split.out) #new.mat = do.call("rbind", dat.new.split.out) new.mat$new_exposure_time = new.mat$exposure_time + new.mat$generation_time #and merge into pop.mat #new.merge <- dplyr::select(new.mat, new_infected, employ_ids, infector_iso_time, new_exposure_time) #names(new.merge) <- c("employ_ids", "infector", "infector_iso_time", "exposure_time") #now put them into pop.mat for(i in 1:nrow(new.mat)){ #identify infector and iso time pop.mat$infector[pop.mat$employ_ids==new.mat$new_infected[i] ] <- new.mat$employ_ids[i] pop.mat$infector_iso_time[pop.mat$employ_ids==new.mat$new_infected[i]] <- new.mat$infector_iso_time[i] pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$traced==TRUE] <- pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$traced==TRUE] + new.mat$infector_iso_time[i] pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$traced==FALSE] <- Inf #if traced==FALSE, this is never tracked #and exposure time pop.mat$exposure_time[pop.mat$employ_ids==new.mat$new_infected[i]] <- new.mat$new_exposure_time[i] #pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.mat$new_infected[i]] <-new.mat$new_exposure_time[i] + pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.mat$new_infected[i]] #pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.mat$new_infected[i]] <-new.mat$new_exposure_time[i] + pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.mat$new_infected[i]] pop.mat$case_source[pop.mat$employ_ids==new.mat$new_infected[i]] <- "UCB" #transmission within berkeley #change state - only if exposure time is already achieved pop.mat$state[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$exposure_time<=timestep] <- 1 #otherwise, they still stay suceptible - but you mark them pop.mat$state[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$exposure_time>timestep] <- 3 } #else, just return pop mat } #now, make those that already transmitted recovered/isolated pop.mat$state[(pop.mat$state==1 & !is.na(pop.mat$actual_cases_caused))] <- 5 #and, if any of the old "pre-exposed" have reached their exposure time, you can #and return pop.mat return(pop.mat) } assign.last.infections <- function(pop.mat, gen_list, remaining.susceptibles, timestep){ # assign new exposures (and times) based on 'actual cases caused' above # and move those that have transmitted to isolated/recovered state #(asymptomatics will be missed in iso time unless tested) timestep.prev = unique(pop.mat$timestep) if(remaining.susceptibles>0){ #first, pair each case with its generation times new.mat <- dplyr::select(pop.mat, employ_ids, state, exposure_time, actual_cases_caused, time_isolation)#, time_of_tracing_iso) new.mat <- new.mat[ new.mat$state==1 & !is.na(new.mat$actual_cases_caused) ,] #get rid of those that cause no cases new.mat <- new.mat[new.mat$actual_cases_caused>0,] #sum(new.mat$actual_cases_caused)>remaining susceptibles #so need to pick these at random to generate new infections instead all.possible = c(rep(new.mat$employ_ids, times=new.mat$actual_cases_caused)) last.infector.ids = sample(all.possible, size=remaining.susceptibles, replace=FALSE) last.infector.ids = data.frame(last.infector.ids) names( last.infector.ids) ="employ_ids" new.dat = ddply(last.infector.ids,.(employ_ids), summarise, actual_cases_caused=length(employ_ids)) #and new.mat becomes just these new.dat$state <- new.dat$time_isolation <- new.dat$exposure_time <- NA for (i in 1:nrow(new.mat)){ new.dat$state[new.dat$employ_ids==new.mat$employ_ids[i]] <- new.mat$state[i] new.dat$time_isolation[new.dat$employ_ids==new.mat$employ_ids[i]] <- new.mat$time_isolation[i] new.dat$exposure_time[new.dat$employ_ids==new.mat$employ_ids[i]] <- new.mat$exposure_time[i] } #then, new dat takes the place of new mat new.dat.list <- dlply(new.dat, .(employ_ids)) new.dat.list <- lapply(new.dat.list, make.rows) new.dat <- data.table::rbindlist(new.dat.list) #new.dat <- do.call("rbind", new.dat.list) #now attach a generation time with each of these cases and a random sample from the susceptibles new.dat$generation_time <- NA index.ids = unique(new.dat$employ_ids) for(i in 1:length(index.ids )){ #print(index.ids[[i]]) new.mat$generation_time[new.mat$employ_ids == index.ids[i]] <- gen_list[[index.ids[i]]]$generation_time[1:length(new.mat$generation_time[new.mat$employ_ids == index.ids[i]])] } #now, attach a place to infect (susceptible) -- should be enough all.sus <- pop.mat$employ_ids[pop.mat$state==0] new.dat$new_infected <- sample(all.sus, size=nrow(new.dat), replace=FALSE) new.dat$new_exposure_time = new.dat$exposure_time + new.dat$generation_time #now put them into pop.mat for(i in 1:nrow(new.dat)){ #identify infector and iso time pop.mat$infector[pop.mat$employ_ids==new.dat$new_infected[i] ] <- new.dat$employ_ids[i] pop.mat$infector_iso_time[pop.mat$employ_ids==new.dat$new_infected[i]] <- new.dat$infector_iso_time[i] pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$traced==TRUE] <- pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$traced==TRUE] + new.dat$infector_iso_time[i] pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$traced==FALSE] <- Inf #if traced==FALSE, this is never tracked #and exposure time pop.mat$exposure_time[pop.mat$employ_ids==new.dat$new_infected[i]] <- new.dat$new_exposure_time[i] #pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.mat$new_infected[i]] <-new.mat$new_exposure_time[i] + pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.mat$new_infected[i]] #pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.mat$new_infected[i]] <-new.mat$new_exposure_time[i] + pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.mat$new_infected[i]] pop.mat$case_source[pop.mat$employ_ids==new.dat$new_infected[i]] <- "UCB" #transmission within berkeley #change state - only if exposure time is already achieved pop.mat$state[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$exposure_time<=timestep] <- 1 #otherwise, they still stay suceptible - but you mark them pop.mat$state[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$exposure_time>timestep] <- 3 } #else, just return pop mat } #other #now, make those that already transmitted recovered/isolated pop.mat$state[(pop.mat$state==1 & !is.na(pop.mat$actual_cases_caused))] <- 5 #and, if any of the old "pre-exposed" have reached their exposure time, you can #then, those active infections will cause no more new cases pop.mat$actual_cases_caused[pop.mat$state==1] <- 0 #and return pop.mat return(pop.mat) } get.actual.cases = function(pop.dat, dat.gen, timestep){ sub.gen =subset(dat.gen, employ_ids==unique(pop.dat$employ_ids)) #count the number of exposure time + generation time combos that take place before the iso time sub.gen$new_exposures = sub.gen$generation_time + pop.dat$exposure_time n.out = length(sub.gen$new_exposures[sub.gen$new_exposures<=pop.dat$time_isolation]) return(n.out) } get.symptom.onset <- function(dat, dat.vir, LOD){ #get titer limit symptom.lim <- as.numeric(unique(dat$titer_lim_for_symptoms)) #get the timing in the trajectory that first crosses this limit dat$time_of_symptom_onset <- min(dat.vir$time[dat.vir$V>symptom.lim]) dat$time_test_sensitive_start <- min(dat.vir$time[dat.vir$V>LOD]) dat$time_test_sensitive_end <- max(dat.vir$time[dat.vir$V>LOD]) #will return infinity if wrong return(dat) } make.rows <- function(dat){ n = dat$actual_cases_caused new.dat <- data.frame(matrix(NA, nrow=n, ncol=5) ) names(new.dat) <- c("employ_ids", "exposure_time", "actual_cases_caused", "infector_iso_time", "infector_cat")#, "time_of_test_sensitivity")#, "time_of_tracing_iso") new.dat$employ_ids <- rep(dat$employ_ids, nrow(new.dat)) new.dat$infector_iso_time <- rep(dat$time_isolation, nrow(new.dat)) new.dat$infector_cat <- rep(dat$employ_cat, nrow(new.dat)) new.dat$exposure_time <- rep(dat$exposure_time, nrow(new.dat)) #new.dat$time_of_tracing_iso <- rep(dat$time_of_tracing_iso, nrow(new.dat)) if(nrow(new.dat)>0){ new.dat$actual_cases_caused <- 1 return(new.dat) } #else, return nothing } delayfn_surv <- function(delay_mean, delay_sd){ out <- purrr::partial(rnorm, mean = delay_mean, sd = delay_sd) return(out) }#symptomatic surveillance/TAT delay generationTime_fn <- function(serial_dist=NULL, serial_shape = NULL, serial_scale = NULL) { if(serial_dist=="weibull"){ out <- purrr::partial(rweibull, shape = serial_shape, scale = serial_scale) }else if(serial_dist=="gamma"){ out <- purrr::partial(rgamma, shape = serial_shape, scale = serial_scale) } return(out) } #weibull or gamma serial interval as the case may be inc_fn <- function(n_inc_samp = NULL, meanInc=NULL, sdInc=NULL) { out= purrr::partial(rlnorm, meanlog = log(meanInc), sdlog = log(sdInc)) #out[out < 1] <- 1 return(out) } #lognormal incubation time draw R0_fn <- function(meanR0=NULL, sdR0=NULL){ out <- purrr::partial(rlnorm, meanlog = log(meanR0), sdlog = log(sdR0)) return(out) } #lognormal R0 R0_fn_nb <- function(muR0=NULL, sizeR0=NULL){ out <- purrr::partial(rnbinom, mu = muR0, size = sizeR0) return(out) } #nb R0 initiate.pop <- function(start.ID.employ, pop.UCB, n.init.exposed, pop.ID, within.host.theta, input.par, R0fn, eventFn, titer.dat, LOD, virus.par){ #sample serial interval genTime = generationTime_fn(serial_dist = virus.par$distribution[virus.par$parameter=="generation_time"], serial_shape= virus.par$par1[virus.par$parameter=="generation_time"], serial_scale= virus.par$par2[virus.par$parameter=="generation_time"]) pop.par = subset(input.par, population==pop.ID) #make table one pop.mat = cbind.data.frame(matrix(NA, nrow=pop.UCB, ncol =27)) names(pop.mat) <- c( "employ_ids","employ_cat", "state", "traced", "testing", "obs_dist_limits", "exposure_time", "total_potential_cases_caused", "original_potential_cases_caused_UCB", "num_infection_events", "post_titer_potential_cases_caused_UCB", "potential_cases_caused", "actual_cases_caused", "case_source", "infector", "time_test_sensitive_start", "time_test_sensitive_end", "infector_iso_time", "time_of_tracing_iso", "time_of_next_test", "time_of_testing_iso", "titer_lim_for_symptoms", "time_of_symptom_onset", "time_of_symptom_iso", "time_isolation", "reason_isolated", "timestep") #and fill in all you can pop.mat$testing = pop.par$par1[pop.par$parameter=="test-on"] pop.mat$timestep = 0 pop.mat$employ_cat = pop.ID #assign them all an employer ID pop.mat$employ_ids = start.ID.employ:(pop.UCB+start.ID.employ-1) #assign a "first test" depending on how many days of testing per week... test_rotation = as.character(pop.par$par1[pop.par$parameter=="test-rotation"] ) n.test.day.per.wk = as.numeric(pop.par$par1[pop.par$parameter=="n-test-days-per-week"]) #then, if this is bigger than a weekly regime, half of them must happen one week and half the other if ((test_rotation=="biweekly" & n.test.day.per.wk==2) | (test_rotation=="weekly" & n.test.day.per.wk==2)){ pop.mat$time_of_next_test = rep(c(3,7), length=pop.UCB) }else if((test_rotation=="biweekly" & n.test.day.per.wk==5) | (test_rotation=="weekly" & n.test.day.per.wk==5)){ pop.mat$time_of_next_test = rep(c(3,4,5,6,7), length=pop.UCB) }else if((test_rotation=="biweekly" & n.test.day.per.wk==7) | (test_rotation=="weekly" & n.test.day.per.wk==7)){ pop.mat$time_of_next_test = rep(c(1,2,3,4,5,6,7), length=pop.UCB) }else if(test_rotation=="two-week" & n.test.day.per.wk==7){ pop.mat$time_of_next_test = rep(c(1,2,3,4,5,6,7,8,9,10,11,12,13,14), length=pop.UCB) }else if(test_rotation=="two-week" & n.test.day.per.wk==5){ pop.mat$time_of_next_test = rep(c(3,4,5,6,7,10,11,12,13,14), length=pop.UCB) }else if(test_rotation=="two-week" & n.test.day.per.wk==2){ pop.mat$time_of_next_test = rep(c(3,7, 10,14), length=pop.UCB) }else if (test_rotation=="two-week-ThFri"){ pop.mat$time_of_next_test = rep(c(seq((7-n.test.day.per.wk+1),7,1),seq((14-n.test.day.per.wk+1),14,1)), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday }else if (test_rotation=="two-week-daily"){ pop.mat$time_of_next_test = rep(c(seq(1,14,1)), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday }else if (test_rotation=="two-week-MonTues"){ pop.mat$time_of_next_test = rep(c(seq(3,4,1),seq(10,11,1)), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday }else if (test_rotation=="two-week-TuesWed"){ pop.mat$time_of_next_test = rep(c(seq(4,5,1),seq(11,12,1)), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday }else if (test_rotation=="two-week-MonFri"){ pop.mat$time_of_next_test = rep(c(3,7,10,14), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday }else if (test_rotation=="two-week-MonWed"){ pop.mat$time_of_next_test = rep(c(3,5,10,12), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday }else if (test_rotation=="one-week-ThFri"){ pop.mat$time_of_next_test = rep(seq((7-n.test.day.per.wk+1),7,1), length=pop.UCB) }else if (test_rotation=="one-week-MonTues"){ pop.mat$time_of_next_test = rep(seq(3,4,1), length=pop.UCB) }else if (test_rotation=="one-week-MonFri"){ pop.mat$time_of_next_test = rep(c(3,7), length=pop.UCB) }else if(test_rotation=="one-week-daily"){ pop.mat$time_of_next_test = rep(c(seq(1,7,1)), length=pop.UCB) }else if(test_rotation=="none"){ pop.mat$time_of_next_test=Inf }else if(test_rotation=="thrice-weekly-MonTues"){ pop.mat$time_of_next_test = rep(c(3,4,10,11,17,18), length=pop.UCB) }else if (test_rotation=="two_day"){ pop.mat$time_of_next_test = rep(c(3,7), length=pop.UCB) }else if (test_rotation=="four_week"){ pop.mat$time_of_next_test = rep(c(seq((7-n.test.day.per.wk+1),7,1), seq((14-n.test.day.per.wk+1),14,1), seq((21-n.test.day.per.wk+1),21,1), seq((28-n.test.day.per.wk+1),28,1)), length=pop.UCB) } pop.mat$time_of_next_test = sample(pop.mat$time_of_next_test, size=length(pop.mat$time_of_next_test), replace = FALSE) #scramble prop.traced = as.numeric(pop.par$par1[pop.par$parameter=="prop.trace"]) #for all, based on proportions, give whether traced or not pop.mat$traced = sample(x=c("TRUE", "FALSE"), size=length(pop.mat$traced), replace=TRUE, prob=c(prop.traced, 1-prop.traced)) #and the same for proportion observign distancing limits prop.obs = as.numeric(pop.par$par1[pop.par$parameter=="percent-obs-dist-lim"]) pop.mat$obs_dist_limits = sample(x=c("TRUE", "FALSE"), size=length(pop.mat$obs_dist_limits), replace=TRUE, prob=c(prop.obs, 1-prop.obs)) # and whether asymp or not #pop.mat$stat_asymp = sample(x=c("TRUE", "FALSE"), size=length(pop.mat$stat_asymp), replace=TRUE, prob=c(prop.asym, 1-prop.asym)) #make initial state variable pop.mat$state <- rep(as.integer(0),pop.UCB) #based on the proportion vaccinated, some get moved to recovered (state 5) right away #for all of our model runs, this is 0, so this gets skipped if(as.numeric(pop.par$par1[pop.par$parameter=="prop-vaccinated"])>0){ tot.vacc <- round(as.numeric(pop.par$par1[pop.par$parameter=="prop-vaccinated"])*pop.UCB,0) index.vacc = sample(1:pop.UCB, size=tot.vacc, replace=FALSE) pop.mat$state[index.vacc] <- 5 } #then, regardless of vaccination, overwrite susceptibles with those initially exposed #initially exposed get distributed at random index.init = sample(as.numeric(rownames(pop.mat[pop.mat$state==0,])), size=n.init.exposed, replace=FALSE) pop.mat$state[index.init] <- 1 #here, build distributions #symptomatic isolation delay delayfn_symp = delayfn_surv(delay_mean=as.numeric(pop.par$par1[pop.par$parameter=="iso-lag"]), delay_sd= as.numeric(pop.par$par2[pop.par$parameter=="iso-lag"])) #turnaround testing delay delayfn_TAT = delayfn_surv(delay_mean=as.numeric(pop.par$par1[pop.par$parameter=="TAT-lag"]), delay_sd= as.numeric(pop.par$par2[pop.par$parameter=="TAT-lag"])) #contact tracing lag delayfn_trace = delayfn_surv(delay_mean=as.numeric(pop.par$par1[pop.par$parameter=="trace-lag"]), delay_sd= as.numeric(pop.par$par2[pop.par$parameter=="trace-lag"])) #titer limit for symptoms titer_lim = lognormal_fn(meanlogpar=log(as.numeric(pop.par$par1[pop.par$parameter=="symptom-lim"])), sdlogpar = log(as.numeric(pop.par$par2[pop.par$parameter=="symptom-lim"]))) prop.cases.UCB = as.numeric(pop.par$par1[pop.par$parameter=="prop.cases.UCB"]) #now generate potential new infections based on your status #this gives the weekend average number of infections #pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==TRUE] = floor(R0fn.asym.wk(length(pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==TRUE]))*prop.cases.UCB) #pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==FALSE] = floor(R0fn.wk(length(pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==FALSE]))*prop.cases.UCB) #and during the week, fewer cases #pop.mat$wk_tot_potential_cases_caused[pop.mat$stat_asymp==TRUE] = floor(R0fn.asym(length(pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==TRUE]))*prop.cases.UCB) #here are all possible cases pop.mat$total_potential_cases_caused = R0fn(length(pop.mat$employ_ids)) #here are all possible at UC Berkeley - before the titer cull pop.mat$original_potential_cases_caused_UCB = floor(pop.mat$total_potential_cases_caused*prop.cases.UCB) #you should have already brought in a titer trajectory for everyone in your population #choose a threshold titer for symptom onset pop.mat$titer_lim_for_symptoms = titer_lim(pop.UCB) pop.split <- dlply(pop.mat, .(employ_ids)) titer.split <- dlply(titer.dat, .(employ_ids)) #now, based on this, go into each person's virus trajectory and calculate the timing of symptom onset #while you are at it, you can also look at their titer and the LOD and calculate the start/end times for which they are test sensitive pop.split.new <- mapply(get.symptom.onset, dat = pop.split, dat.vir=titer.split, MoreArgs = list(LOD=LOD), SIMPLIFY = FALSE) #when there is nothing that is under the limit, these infections become "asymptomatic" -- #we can later play with the proportion that classify as this by modulating the mean value for the symptom onset limit pop.mat <- data.table::rbindlist(pop.split.new) #pop.mat <- do.call("rbind", pop.split.new) #and the delay to isolation pop.mat$time_of_symptom_onset[pop.mat$time_of_symptom_onset<0]<-0 pop.mat$time_of_symptom_iso = delayfn_symp(pop.UCB) pop.mat$time_of_symptom_iso[pop.mat$time_of_symptom_iso<0]<- 0 pop.mat$time_of_symptom_iso <- pop.mat$time_of_symptom_iso + pop.mat$time_of_symptom_onset pop.mat$time_of_testing_iso = delayfn_TAT(pop.UCB) pop.mat$time_of_testing_iso[pop.mat$time_of_testing_iso<0] <- 0 pop.mat$time_of_testing_iso <- pop.mat$time_of_testing_iso + pop.mat$time_of_next_test pop.mat$time_of_tracing_iso = delayfn_trace(pop.UCB) pop.mat$time_of_tracing_iso[pop.mat$time_of_tracing_iso<0] <- 0 #now, if not traced, never: pop.mat$time_of_tracing_iso[pop.mat$traced==FALSE] <- Inf pop.mat$time_of_tracing_iso[pop.mat$state>0] <- Inf # new introductions cannot be traced pop.mat$infector[pop.mat$state>0] <- 0 # new introductions cannot be traced pop.mat$infector_iso_time[pop.mat$state>0] <- Inf # new introductions cannot be traced pop.mat$case_source[pop.mat$state>0] <- "alameda" #NOW, we generate new cases: #we break down each infectious individual based on that individual's: #(a) within-host titer trajectory, (b) the selected value for within-host theta (how viral load translates toinfection probability), #(c) the number of discrete transmission events that we draw for each person, and #(d) the generation time of those contact events #(for d, we currently use the Ferretti weibull, but we are hoping that a constant hazard of events # + the titer trajectory of the pathogen should roughly produce the expected generation time) #(1) First, for each person, we draw the number of possible cases from R0 - this equates to individual heterogeneity in infectiousness # (one type of superspreading) and is already captured in the "total_potential_cases_caused" column, which then gets reduced down to the # proportion in the UCB community in the "original_potential_cases_caused_UCB" column #(2) Then, we draw a number of contact events, among which the above cases get distributed. (this equates to event-based superspreading # - fewer event draws and a high number of transmissions from #1 generate the biggest superspreading events). Current, we draw this # from a Poisson with lambda=3 #(3) Then, for each "event", we draw a time that this event took place (here, represented from the generation time Weibull, though this could change) #(4) Then, for each event + time, we go into individual's titer trajectory to determine if each transmission actually # takes place, based on the person's titer load at the point of infection. Since our initial R0 is 2.5, we fix theta at .7, such that the max # probability of infection taking place is ~50% at peak viral load. If one 'event' generates multiple cases, each case is treated independently # with this titer-based transmission probability. #(5) If there is a group size limit, it gets imposed here. Say that group limit is 6 and one event is supposed to generate 10 cases. # If this person abides by group limits (there is a parameter for this), we truncate the 10 person event to a 6 person event, and assume # as a worst-case scenario that all 6 of those people get infected #first, draw number of transmission events per person pop.mat$num_infection_events <- eventFn(pop.UCB) #then get a list of event times per person for each of these events pop.list <- dlply(pop.mat, .(employ_ids)) event.times.list <- lapply(pop.list, get.event.time, genTime=genTime) # now, each person has a number of cases, a number of events, a time for each event, # and a virus titer trajectory. # take this information and determine which events actually take place and when they occur # also, if applicable, here impose the group size limit and record cases both before # and after that limit occurs # return the data as well as the edited event times list that replaces each # failed case generation with NA double.list <- mapply(FUN=get.real.cases, pop.dat=pop.list, event.dat=event.times.list, titer.dat1 = titer.split, MoreArgs = list(within.host.theta=within.host.theta, group.limit=as.numeric(pop.par$par1[pop.par$parameter=="group-size-limit"])), SIMPLIFY = FALSE) pop.mat.list <- sapply(double.list, "[",1) pop.mat <- data.table::rbindlist(pop.mat.list) #pop.mat <- do.call("rbind", pop.mat.list) gen_time_list <- sapply(double.list, "[",2) dat.gen <- data.table::rbindlist(gen_time_list) #dat.gen = do.call("rbind", gen_time_list) #now cases from potential get distributed among events #then we determine how many take place based on titers #then we remove those that don't take place based on group size limitation #then, we set an exposure time for those cases that actually occur pop.mat$exposure_time[pop.mat$state>0] <- 0 #first, assume that isolation time is symptomatic pop.mat$time_isolation[pop.mat$state==1 ] <- as.numeric(pop.mat$time_of_symptom_iso[pop.mat$state==1 ]) pop.mat$time_isolation = as.numeric(pop.mat$time_isolation) pop.mat$reason_isolated[pop.mat$state==1 ] <- "symptom_iso" #now, if testing (and, for other cases, tracing) comes first, we replace it #test needs to be AFTER start time of test sensitive and before end time of test sensitive pop.mat$reason_isolated[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end] <- "testing_iso" pop.mat$time_isolation[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end & complete.cases(pop.mat)] <- pop.mat$time_of_testing_iso[pop.mat$state==1 & pop.mat$time_isolation > pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end] #and then, if any of these are Inf, change the reason to NA pop.mat$reason_isolated[pop.mat$time_isolation==Inf] <- NA #now, based on isolation time and the generation times in the vector, determine the actual number of cases caused, #then export, regroup with other half of population and assign those new infections in the next time step new.cases = dlply(pop.mat[pop.mat$state==1& pop.mat$potential_cases_caused>0,], .(employ_ids)) #if potential cases were 0, then actual cases are too: pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused ==0] <- 0 pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused > 0] <- c(unlist(lapply(new.cases, get.actual.cases, dat.gen=dat.gen, timestep))) #now pop it back out, join with other sub-mats and assign those infections in time and space using your generation time vector. return(list(pop.mat, dat.gen)) } epidemic.step = function(pop.mat, timestep, length_timestep, prob.out, gen_list, input.par){ #pop.mat <- as.data.frame(pop.mat) pop.par = subset(input.par, population ==unique(pop.mat$employ_cat)) #advance timestep pop.mat$timestep = timestep #introduce outside infections into susceptible spaces (cannot misplace those "exposed" by UCB above since we are guaranteeing those transmissions to take place) #could easily modulate this for risk cohorts in future #check if weekend # we say days 1 and 2 are testing # days ###MULTIPLE # if(timestep ==1 | timestep ==2 | (timestep%%7==1)| (timestep%%7==2)){ # n.outside.exposures = sum(sample(x=c(0,1), size=length(pop.mat$state[pop.mat$state==0]), replace=TRUE, prob = c(1-prob.out.wk, prob.out.wk))) #}else{ n.outside.exposures = sum(sample(x=c(0,1), size=length(pop.mat$state[pop.mat$state==0]), replace=TRUE, prob = c(1-prob.out, prob.out))) #} if(n.outside.exposures>0){ #if you find some, fill them in with an exposure time of now, distributed at random #could add in higher introduction rate for certain sub-groups in this case new.case.ids = sample(pop.mat$employ_ids[pop.mat$state==0], size = n.outside.exposures, replace=FALSE) # print(new.case.ids) #and assign for (i in 1:length(new.case.ids)){ #print(i) #print(new.case.ids[i]) #expose the new cases immediately - but only those that have reached the current timestep already #those "predestined" for exposure get passed over for now. pop.mat$state[pop.mat$employ_ids==new.case.ids[i]] <- 1 #pop.mat$state[pop.mat$employ_ids==new.case.ids[i] & pop.mat$stat_asymp==TRUE] <- 2 #exposure time is this timestep pop.mat$exposure_time[pop.mat$employ_ids==new.case.ids[i]] <- timestep #infection kicks off so you can now calculat symptom onset time #pop.mat$time_of_symptom_onset[pop.mat$employ_ids==new.case.ids[i]] <- pop.mat$time_of_symptom_onset[pop.mat$employ_ids==new.case.ids[i]] + timestep #tmp <- pop.mat$time_of_test_positivity[pop.mat$employ_ids==new.case.ids[i]] #pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.case.ids[i]] <- pop.mat$exposure_time[pop.mat$employ_ids==new.case.ids[i]] + pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.case.ids[i]] #pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.case.ids[i]] <- pop.mat$exposure_time[pop.mat$employ_ids==new.case.ids[i]] + pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.case.ids[i]] #infector is outside source that cannot be tracked pop.mat$infector[pop.mat$employ_ids==new.case.ids[i]] <- 0 pop.mat$infector_iso_time[pop.mat$employ_ids==new.case.ids[i]] <- Inf pop.mat$case_source[pop.mat$employ_ids==new.case.ids[i]] <- "alameda" #introduced cases cannot be traced pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.case.ids[i]] <- Inf } } #pop.mat <- subset(pop.mat, !is.na(employ_ids)) #now, for those that are currently exposed (both from outside and UCB), # compute distributions of iso time # and new actual cases caused. #then, we can assign those times and move them to recovered status pop.mat.old <- pop.mat pop.mat <- dplyr::select(pop.mat, -(actual_cases_caused)) #first, go ahead and move test postivity to the appropriate degree #pop.mat$time_of_test_positivity[pop.mat$state==1 | pop.mat$state==2] <- pop.mat$time_of_test_positivity[pop.mat$state==1 | pop.mat$state==2] + pop.mat$exposure_time[pop.mat$state==1 | pop.mat$state==2] #print("7") #first, assume that isolation time is symptomatic pop.mat$time_isolation[pop.mat$state==1 ] <- as.numeric(pop.mat$time_of_symptom_iso[pop.mat$state==1]) pop.mat$time_isolation = as.numeric(pop.mat$time_isolation) pop.mat$reason_isolated[pop.mat$state==1 ] <- "symptom_iso" #now, if tracing comes first, we replace it #tracing only applicable within our community #print("8") pop.mat$reason_isolated[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_tracing_iso & pop.mat$traced==TRUE & complete.cases(pop.mat) ] <- "tracing_iso" pop.mat$time_isolation[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_tracing_iso & pop.mat$traced==TRUE & complete.cases(pop.mat) ] <- pop.mat$time_of_tracing_iso[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_tracing_iso & pop.mat$traced==TRUE & complete.cases(pop.mat)] #or, finally, if testing comes first, we replace it here - IF the infection is test sensitive at the time of testing #print("9") pop.mat$reason_isolated[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & complete.cases(pop.mat) & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end] <- "testing_iso" pop.mat$time_isolation[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & complete.cases(pop.mat) & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end] <- pop.mat$time_of_testing_iso[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & complete.cases(pop.mat) & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end] #and then, if any of these are Inf, change the reason to NA pop.mat$reason_isolated[pop.mat$time_isolation==Inf] <- NA #first, double-check that nothing was exposed after the isolation time (would be based on tracing only) #if that ever happens, that person becomes susceptible again because that infection was never generated #first flag #then, go in and find that person's infector and reduce their actual cases by one #based on this case that did not occur pop.mat$state[pop.mat$exposure_time>pop.mat$time_isolation & pop.mat$state==1 & pop.mat$reason_isolated=="tracing_iso" ] <- 7 pop.mat$reason_isolated[pop.mat$state==7] <- NA pop.mat$time_isolation[pop.mat$state==7] <- NA pop.mat$case_source[pop.mat$state==7 ] <- NA #now remove a case from the infectors that "caused" these events infector.sub1 = pop.mat[pop.mat$state==7,] infector.sum1 = ddply(infector.sub1, .(infector), summarize, cases_removed = length(employ_ids)) #save this for the end pop.mat$infector[pop.mat$state==7] <- NA pop.mat$infector_iso_time[pop.mat$state==7] <- NA pop.mat$exposure_time[pop.mat$state==7]<- NA pop.mat$state[pop.mat$state==7] <- 0 #now, based on isolation time and the generation times in the vector, determine the actual number of cases caused, #then export, and assign those new infections in the next time step #now, advance forward all of the "time of etc." for susceptibles #and time of next testing for all new.cases = dlply(pop.mat[pop.mat$state==1& pop.mat$potential_cases_caused>0,], .(employ_ids)) #if potential cases were 0, then actual cases are too: #pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused ==0| pop.mat$state==2 & pop.mat$potential_cases_caused ==0] <- 0 #but, if potential cases were greater than 0, then actual might be as well, depending on the isolation times #dat.gen.new = do.call("rbind", gen_list) dat.gen.new = data.table::rbindlist(gen_list) #pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused > 0| pop.mat$state==2 & pop.mat$potential_cases_caused > 0] <- c(unlist(lapply(new.cases, get.actual.cases, dat.gen=dat.gen.new, timestep=timestep, weekend.amp=weekend.amp))) new.actual.cases <- c(unlist(lapply(new.cases, get.actual.cases, dat.gen=dat.gen.new, timestep=timestep))) #these have not kicked off, so let them kick forward pop.mat$time_of_symptom_onset[pop.mat$state==0 | pop.mat$state==3] <- pop.mat$time_of_symptom_onset[pop.mat$state==0 | pop.mat$state==3] + length_timestep pop.mat$time_of_symptom_iso[pop.mat$state==0 | pop.mat$state==3 ] <- pop.mat$time_of_symptom_iso[pop.mat$state==0 | pop.mat$state==3] + length_timestep pop.mat$time_test_sensitive_start[pop.mat$state==0 | pop.mat$state==3 ] <- pop.mat$time_test_sensitive_start[pop.mat$state==0 | pop.mat$state==3] + length_timestep pop.mat$time_test_sensitive_end[pop.mat$state==0 | pop.mat$state==3 ] <- pop.mat$time_test_sensitive_end[pop.mat$state==0 | pop.mat$state==3] + length_timestep #tracing only gets started when infector iso time is assigned, so we don't touch it here #if you are at your current testing date, then next test is bumped into the future. #Otherwise, you just advance in time until you reach it #but the lag time is maintained after the new test date, so deal with that first pop.mat$time_of_testing_iso = pop.mat$time_of_testing_iso - pop.mat$time_of_next_test #now this is just the lag time #now, compute actual next test day if today is the test day of the runs in question - add different frequencies depending on the type pop.mat$time_of_next_test[pop.mat$time_of_next_test==timestep] <- timestep + as.numeric(pop.par$par1[pop.par$parameter=="test-freq"]) #now put the lag back on to the new test day for isolation pop.mat$time_of_testing_iso <- pop.mat$time_of_testing_iso + pop.mat$time_of_next_test pop.mat$time_of_testing_iso[pop.mat$time_of_next_test==Inf] <- Inf #and, finally, check in on those that were "pre-exposed" up above. #move them up to their appropriate status if they should be exposed now #if they reach it, go ahead and assign their actual cases #first, eliminate if they should not occur #first flag #then, go in and find that person's infector and reduce their actual cases by one #based on this case that did not occur pop.mat$state[pop.mat$state==3 & pop.mat$exposure_time>pop.mat$infector_iso_time ] <- 8 pop.mat$time_of_tracing_iso[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time ] <- pop.mat$time_of_tracing_iso[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time ] - pop.mat$infector_iso_time[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time] pop.mat$case_source[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time] <- NA #now remove a case from the infectors that "caused" these events infector.sub2 = pop.mat[pop.mat$state==8,] infector.sum2 = ddply(infector.sub2, .(infector), summarize, cases_removed = length(employ_ids)) #save this for the end pop.mat$infector[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time ] <- NA pop.mat$state[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time ] <- 0 pop.mat$infector_iso_time[pop.mat$state==0] <- NA pop.mat$exposure_time[pop.mat$state==0] <- NA if (exists('infector.sum1') & exists('infector.sum2')){ infector.sum <- rbind(infector.sum1, infector.sum2) }else if(exists('infector.sum1')){ infector.sum <- infector.sum1 }else if(exists('infector.sum2')){ infector.sum <- infector.sum2 } #then, if they pass that test and still remain 'pre-exposed', check to see if they should be elevated in status to 1 or 2 #(meaning they have reached the exposure time) #if so, assign them isolation time and actual cases which get allocated in the next round. #otherwise, they just keep current status as "pre-exposed" #first make them complete cases, so that R is not angry with new columns being filled in pop.mat$time_isolation[pop.mat$state==3 & pop.mat$exposure_time<=timestep] <- Inf pop.mat$reason_isolated[pop.mat$state==3 & pop.mat$exposure_time<=timestep] <- "in progress" #first, assume that isolation time is symptomatic #print("1") pop.mat$reason_isolated[pop.mat$state==3 & pop.mat$exposure_time<=timestep & complete.cases(pop.mat)] <- "symptom_iso" # print("2") pop.mat$time_isolation[pop.mat$state==3 & pop.mat$exposure_time<=timestep & complete.cases(pop.mat)] <- pop.mat$time_of_symptom_iso[pop.mat$state==3 & pop.mat$exposure_time<=timestep & complete.cases(pop.mat)] pop.mat$time_isolation = as.numeric(pop.mat$time_isolation) #now, if tracing comes first, we replace it #print("3") pop.mat$reason_isolated[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_tracing_iso<pop.mat$time_isolation & pop.mat$traced==TRUE & complete.cases(pop.mat)] <- "tracing_iso" # print("4") pop.mat$time_isolation[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_tracing_iso<pop.mat$time_isolation & pop.mat$traced==TRUE & complete.cases(pop.mat)] <- pop.mat$time_of_tracing_iso[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_tracing_iso<pop.mat$time_isolation & pop.mat$traced==TRUE & complete.cases(pop.mat)] #finally, if testing comes first, we replace it #print("5") pop.mat$reason_isolated[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_testing_iso<pop.mat$time_isolation & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end & complete.cases(pop.mat)] <- "testing_iso" #print("6") #print(pop.mat[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_testing_iso<pop.mat$time_isolation & pop.mat$testing==TRUE & complete.cases(pop.mat) ,]) #print(pop.mat) pop.mat$time_isolation[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_testing_iso<pop.mat$time_isolation & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end & complete.cases(pop.mat)] <- pop.mat$time_of_testing_iso[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_testing_iso<pop.mat$time_isolation & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end & complete.cases(pop.mat)] #and then, if any of these are Inf, change the reason to NA pop.mat$reason_isolated[pop.mat$time_isolation==Inf] <- NA #now, based on isolation time and the generation times in the vector, determine the actual number of cases caused, #then export, regroup with other half of population and assign those new infections in the next time step new.cases = dlply(pop.mat[pop.mat$state== 3& pop.mat$potential_cases_caused>0 & pop.mat$exposure_time<=timestep & complete.cases(pop.mat) ,], .(employ_ids)) #if potential cases were 0, then actual cases are too: #pop.mat$actual_cases_caused[pop.mat$state==3 & pop.mat$potential_cases_caused ==0 & pop.mat$exposure_time<=timestep | pop.mat$state==4 & pop.mat$potential_cases_caused ==0 & pop.mat$exposure_time<=timestep] <- 0 tmp.dat = pop.mat[pop.mat$state==3 & pop.mat$potential_cases_caused >0 & pop.mat$exposure_time<=timestep ,] new.cases.2 <- dlply(tmp.dat, .(employ_ids)) new.actual.cases.3 <- c(unlist(lapply(new.cases.2, get.actual.cases, dat.gen=dat.gen.new, timestep=timestep))) #now add the actual cases back in pop.mat <- cbind.data.frame(pop.mat, pop.mat.old$actual_cases_caused) names(pop.mat)[length(names(pop.mat))] <- "actual_cases_caused" #reorder pop.mat <- dplyr::select(pop.mat, names(pop.mat.old)) #and add in the new actual cases pop.mat$actual_cases_caused[pop.mat$state==3 & pop.mat$potential_cases_caused ==0 & pop.mat$exposure_time<=timestep | pop.mat$state==1 & pop.mat$potential_cases_caused ==0 & pop.mat$exposure_time<=timestep ] <- 0 pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused >0 & pop.mat$exposure_time<=timestep] <- new.actual.cases pop.mat$actual_cases_caused[pop.mat$state==3 & pop.mat$potential_cases_caused >0 & pop.mat$exposure_time<=timestep ] <- new.actual.cases.3 #and, finally, change state so these cases can get allocated in the next round. pop.mat$state[pop.mat$state==3 & pop.mat$exposure_time<=timestep ] <- 1 #and remove any avoided cases if there were some if (exists('infector.sum')){ if(nrow(infector.sum)>0){ for(i in 1:length(infector.sum$infector)){ pop.mat$actual_cases_caused[pop.mat$employ_ids==infector.sum$infector[i]] <- pop.mat$actual_cases_caused[pop.mat$employ_ids==infector.sum$infector[i]] - infector.sum$cases_removed[i] } } } #and return return(pop.mat) } get.mean.sd <- function(vector, name){ #first, trim to same length min.length <- min(unlist(lapply(vector, length))) for (i in 1:length(vector)){ vector[[i]] <- vector[[i]][1:min.length] } vec <- unlist(vector, use.names = FALSE) DIM <- c(length(vector[[1]]),1) n <- length(vector) list.mean <- tapply(vec, rep(1:prod(DIM),times = n), mean) attr(list.mean, "dim") <- DIM list.mean <- as.data.frame(list.mean) list.sd <- tapply(vec, rep(1:prod(DIM),times = n), sd) attr(list.sd, "dim") <- DIM list.sd <- as.data.frame(list.sd) list.uci = list.mean + 1.96*list.sd list.lci = list.mean - 1.96*list.sd list.lci[list.lci<0] <- 0 list.uci[list.uci<0] <- 0 dat= cbind.data.frame(list.mean, list.lci, list.uci) names(dat) = paste(c("mean", "lci", "uci"), name, sep="_") return(dat) } get.mean.matrix <- function(mat){ #first, trim to same length min.length <- min(unlist(lapply(mat, nrow))) n.cat = ncol(mat[[1]])/3 for (i in 1:length(mat)){ mat[[i]] <- mat[[i]][1:min.length,] } list.mean <- Reduce("+",mat) / length(mat) mat.2 <- do.call("cbind", mat) #mat.2 <- data.table::rbindlist(mat) list.sd <- apply(mat.2, 1, sd) list.uci = list.mean + 1.96*list.sd list.lci = list.mean - 1.96*list.sd list.lci[list.lci<0] <- 0 list.uci[list.uci<0] <- 0 dat= cbind.data.frame(list.mean, list.lci, list.uci) names(dat) = c(paste0("mean_iso_cat_",seq(1,n.cat,1)), paste0("lci_iso_cat_",seq(1,n.cat,1)), paste0("uci_iso_cat_",seq(1,n.cat,1)), paste0("mean_exp_cat_",seq(1,n.cat,1)), paste0("lci_exp_cat_",seq(1,n.cat,1)), paste0("uci_exp_cat_",seq(1,n.cat,1)), paste0("mean_deaths_cat_",seq(1,n.cat,1)), paste0("lci_deaths_cat_",seq(1,n.cat,1)), paste0("uci_deaths_cat_",seq(1,n.cat,1))) return(dat) } convert.cat = function(dat){ n.cat = ncol(dat)/9 max.times = nrow(dat) iso.dat = dat[,1:(n.cat*3)] exp.dat = dat[,(n.cat*3+1):(n.cat*3*2)] death.dat = dat[,(n.cat*3*2+1):ncol(dat)] #then, sep by cat list.iso <- list.exp <- list.deaths <- list() for(i in 1:n.cat){ list.iso[[i]] <- cbind.data.frame(iso.dat[,i], iso.dat[,i+n.cat],iso.dat[,i+(n.cat*2)]) list.exp[[i]] <- cbind.data.frame(exp.dat[,i],exp.dat[,i+n.cat],exp.dat[,i+(n.cat*2)]) list.deaths[[i]] <- cbind.data.frame(death.dat[,i], death.dat[,i+n.cat],death.dat[,i+(n.cat*2)]) } #iso.db <- do.call("rbind", list.iso) iso.db <- data.table::rbindlist(list.iso) iso.db$type = rep(1:n.cat, each = max.times) iso.db$type <- paste0("iso-pop-", iso.db$type) names(iso.db) <- c("mean", "lci", "uci", "type") #exp.db <- do.call("rbind", list.exp) exp.db <- data.table::rbindlist(list.exp) exp.db$type = rep(1:n.cat, each = max.times) exp.db$type <- paste0("exp-pop-", exp.db$type) names(exp.db) <- c("mean", "lci", "uci", "type") #death.db <- do.call("rbind", list.deaths) death.db <- data.table::rbindlist(list.deaths) death.db$type = rep(1:n.cat, each = max.times) death.db$type <- paste0("death-pop-", death.db$type) names(death.db) <- c("mean", "lci", "uci", "type") return(list(iso.db, exp.db, death.db)) } R.fit.sum <- function(mat.df){ #apply across all columns mean.all <- apply(mat.df, 2,mean) sd.all <- apply(mat.df, 2,sd) lci.all <- mean.all-1.96*sd.all lci.all[ lci.all < 0] <- 0 uci.all <- mean.all+1.96*sd.all #and nbinom fit all.fit <- apply(mat.df, 2, fitdist, distr="nbinom") #and return out.dat <- cbind.data.frame(mean.all, lci.all, uci.all) out.dat$class <- names(mat.df) #names(out.dat) <- names(mat.df) #out.dat$estimate <- c("mean", "lci", "uci") #out.dat[out.dat<0] <- 0 #and add fit size.out <- list() mu.out <- list() for(i in 1:length(all.fit)){ size.out[[i]] <- all.fit[[i]]$estimate[1] mu.out[[i]] <- all.fit[[i]]$estimate[2] } size.out <- c(unlist(size.out)) mu.out <- c(unlist(mu.out)) out.dat$nb_mu <- mu.out out.dat$nb_size <- size.out # names(size.out) <- names(mu.out) <- names(out.dat) # out.dat <- rbind(out.dat, size.out, mu.out) # # out.dat$total_potential_cases <- as.numeric(out.dat$total_potential_cases) # out.dat$UCB_potential_cases <- as.numeric(out.dat$UCB_potential_cases) # out.dat$UCB_post_group_potential_cases <- as.numeric(out.dat$UCB_post_group_potential_cases) # out.dat$UCB_post_titer_potential_cases <- as.numeric(out.dat$UCB_post_titer_potential_cases) # out.dat$UCB_post_isolations_actual_cases <- as.numeric(out.dat$UCB_post_isolations_actual_cases) # return(out.dat) } R.fit.sum.lognorm <- function(mat.df){ #apply across all columns mean.all <- apply(mat.df, 2,mean) sd.all <- apply(mat.df, 2,sd) lci.all <- mean.all-1.96*sd.all lci.all[ lci.all < 0] <- 0 uci.all <- mean.all+1.96*sd.all #and return out.dat <- cbind.data.frame(mean.all, lci.all, uci.all) out.dat$class <- names(mat.df) #names(out.dat) <- names(mat.df) #out.dat$estimate <- c("mean", "lci", "uci") #out.dat[out.dat<0] <- 0 # return(out.dat) } simulate.epidemic <- function(input.pop, n.init.exposed.vector, employ.id.vector, times, virus.par, input.par, burnin, test.freq, length_timestep, bay.area.prev, initial.R, within.host.theta, titer.dat, LOD, test_rotation_name){ if (virus.par$distribution[virus.par$parameter=="R0"]=="log-normal"){ #sample R0 normal R0fn = R0_fn(meanR0=virus.par$par1[virus.par$parameter=="R0"], sdR0=virus.par$par2[virus.par$parameter=="R0"]) }else if(virus.par$distribution[virus.par$parameter=="R0"]=="negbinom"){ #sample R0 normal R0fn = R0_fn_nb(muR0=virus.par$par1[virus.par$parameter=="R0"], sizeR0=virus.par$par2[virus.par$parameter=="R0"]) } #and the number of transmission events, from a negbinom #remember that fewer events = higher likelihood of a big superspreading event. #but the vast majority of people have both few events and few cases eventFn = poisson_fn(lambda =as.numeric(input.par$par1[input.par$parameter=="transmission-events"])) #and normal distribution of the detection limit #then, form your new populations #now split the population based on risk tot.pop = length(input.pop) pop.num = 1:tot.pop titer.dat$cat <- NA for (i in 1:(length(pop.num)-1)){ titer.dat$cat[titer.dat$employ_ids < employ.id.vector [i+1] & titer.dat$employ_ids >= employ.id.vector [i]] <- pop.num[i] } titer.dat$cat[is.na(titer.dat$cat)] <- pop.num[length(pop.num)] #and split titer.dat.split <- dlply(titer.dat, .(cat)) #make the proper number of pop.mat depending on the total number of subpopulations #populate each using the appropriate parameters out.list = mapply(FUN=initiate.pop, start.ID.employ = as.list(employ.id.vector), pop.UCB=as.list(input.pop), n.init.exposed= as.list(n.init.exposed.vector), pop.ID = as.list(pop.num), titer.dat=titer.dat.split, MoreArgs= list(input.par=input.par, virus.par=virus.par, R0fn=R0fn, eventFn=eventFn, within.host.theta=within.host.theta, LOD=LOD)) pop.list = out.list[1,] gen_list_long <- out.list[2,] #original.r0 <- out.list[3,][[1]] #gen_list_long_wkend <- out.list[3,] #pop.mat <- do.call("rbind", pop.list) pop.mat <- data.table::rbindlist(pop.list) #gen.dat.all <- do.call("rbind", gen_list_long) gen.dat.all <- data.table::rbindlist(gen_list_long) #now, double-check that the generation time dataframe is the same length as the number of unique employ ids if(sum(setdiff(pop.mat$employ_ids, gen.dat.all$employ_ids))>0){ missing.ids <- setdiff(pop.mat$employ_ids, gen.dat.all$employ_ids) missing.cases <- list() for(i in 1:length(missing.ids)){ missing.cases[[i]] <- pop.mat$potential_cases_caused[pop.mat$employ_ids==missing.ids[i]] } missing.cases <- c(unlist(missing.cases)) if(sum(missing.cases)>0){ missing.gen <- genTime(missing.cases) add.dat <- cbind.data.frame(rep(missing.ids, missing.cases), missing.gen) }else{ missing.gen <- rep(NA, length(missing.cases)) add.dat <- cbind.data.frame(missing.ids, missing.gen) } names(add.dat) <- names(gen.dat.all) gen.dat.all <- rbind(gen.dat.all, add.dat) gen.dat.all <- arrange(gen.dat.all, employ_ids) } gen_list = dlply(gen.dat.all, .(employ_ids)) #gen_list_wk = dlply(gen.dat.all.wk, .(employ_ids)) foi.bay.area = initial.R*bay.area.prev*length_timestep #rate per day at which susceptibles become infected #foi.wkend = bay.area.R*bay.area.prev*length_timestep*weekend.amp prob.outside.exposure =1-(exp(-1*foi.bay.area)) #for each person in berkeley, this is the probability of getting exposed each day prob.outside.exposure[prob.outside.exposure<0] <- 0 #prob.outside.exposure.wk =1-(exp(-1*foi.wkend)) #could also be a vector times_vect = seq(length_timestep,times, by = length_timestep) for(i in 1: length(times_vect)){ #print(i) timestep = times_vect[i] #could make other functions here if people mostly infect their own subgroups #here, we distribute the infections amongst new people and retire the old pop.mat = assign.infections(pop.mat = pop.mat, gen_list=gen_list, timestep = timestep, input.par = input.par) #now split it by population to introduce outside exposures pop.split = dlply(pop.mat, .(employ_cat)) pop.mat.list = lapply(pop.split, FUN=epidemic.step, timestep= timestep, prob.out = prob.outside.exposure, gen_list=gen_list, input.par=input.par, length_timestep = length_timestep) #then, rejoin #pop.mat = do.call("rbind", pop.mat.list)#print(i) pop.mat = data.table::rbindlist(pop.mat.list) #then, just keep tabs that there are enough susceptibles to fill the new cases in the next step remaining.susceptibles = length(pop.mat$state[pop.mat$state==0]) future.cases = sum(pop.mat$actual_cases_caused[pop.mat$state==1]) if(future.cases>remaining.susceptibles){ #if there are not enough susceptibles left for all of the assigned cases before you reach the end of the time series, then you go into the next step #print(i) pop.mat = assign.last.infections(pop.mat = pop.mat, gen_list = gen_list, remaining.susceptibles = remaining.susceptibles, timestep = timestep) #print(i) } } #collect all the "R" reduction info: R.mat <- dplyr::select(pop.mat, total_potential_cases_caused, original_potential_cases_caused_UCB, post_titer_potential_cases_caused_UCB, potential_cases_caused, actual_cases_caused) names(R.mat) <- c( "total_potential_cases", "UCB_potential_cases", "UCB_post_titer_potential_cases", "UCB_post_group_potential_cases", "UCB_post_isolations_actual_cases") R.mat <- arrange(R.mat, desc(total_potential_cases)) R.mat$UCB_post_isolations_actual_cases[is.na(R.mat$UCB_post_isolations_actual_cases)] <- 0 #R.mat <- as.matrix(R.mat) # #new R0 # new.R0 = subset(pop.mat, !is.na(infector)) # new.R0 = ddply(new.R0, .(infector), summarize, cases_caused=length(employ_ids)) # tot.introductions = new.R0$cases_caused[new.R0$infector=="0"] # new.R0 = subset(new.R0, infector!="0") # # maxID = max(pop.mat$employ_ids) # missing_ids <- (1:maxID)[!(1:maxID %in% new.R0$infector)] # # # add in missing days if any are missing # if (length(missing_ids > 0)) { # R0comp <- data.table::rbindlist(list(new.R0, # data.table(infector = missing_ids, # cases_caused = 0))) # } # # R0comp <- arrange(R0comp, infector) # # #now add back in those cases not at UCB... # #original.r0$actual_cases_caused_UCB <- R0comp$cases_caused #get prop.asymptomatic at this cutoff prop.asym <- length(pop.mat$time_of_symptom_onset[pop.mat$time_of_symptom_onset==Inf])/length(pop.mat$time_of_symptom_iso) #from here, compute Reffective R.dat = dplyr::select(pop.mat, employ_ids, infector, time_isolation, case_source) R.dat = arrange(R.dat, time_isolation) #icidence will just be cases by time isolated #if not isolated, you don't count for incidence... R.dat = R.dat[!is.na(R.dat$time_isolation),] R.dat$time_isolation = ceiling(R.dat$time_isolation) #could add source. don't for now R.sum = ddply(R.dat, .(time_isolation), summarise, length(employ_ids)) #R.sum = ddply(R.dat, .(time_isolated, source), summarise, length(employ_ids)) names(R.sum) = c( "day", "incidence") #plot as incidence #plot(as.incidence(R.sum$incidence, dates = R.sum$day)) #this will go in as your incidence data #now add in pairs to estimate the serial interval #T <- nrow(R.sum) #t_start <- seq(2, T-13) # starting at 2 as conditional on the past observations #t_end <- t_start + 13 # # R.est = estimate_R(R.sum$incidence, # method="parametric_si", # config = make_config(list(#t_start = t_start, # #t_end = t_end, # mean_si = serial_mean, std_si = serial_sd))) # # #plot(R.est, "R") # #get midpoint and R values and extract # R.out = cbind.data.frame(get.midpoint(par.low = R.est$R$t_start, par.hi = R.est$R$t_end), R.est$R$`Mean(R)`) # names(R.out) = c("day", "Reffective") # #and try it based on pairs pop.mat = data.table(pop.mat) #now, get broad incidence data to report UCB.mat = subset(pop.mat, case_source=="UCB") alameda.mat = subset(pop.mat, case_source=="alameda") symp.mat = subset(pop.mat, reason_isolated=="symptom_iso") trace.mat = subset(pop.mat, reason_isolated=="tracing_iso") test.mat = subset(pop.mat, reason_isolated=="testing_iso") daily_exposures <- pop.mat[, day := ceiling(exposure_time) #time_isolated ][, .(daily_exposures = .N), by = day ] # #daily isolations daily_isolations <- pop.mat[, day := ceiling(time_isolation) # ][, .(daily_isolations = .N), by = day ] daily_cal <- UCB.mat[, day := ceiling(time_isolation) #time_isolated ][, .(daily_isolations = .N), by = day ] daily_alameda <- alameda.mat[, day := ceiling(time_isolation) #time_isolated ][, .(daily_isolations = .N), by = day ] daily_symp <- symp.mat[, day := ceiling(time_isolation) #time_isolated ][, .(daily_isolations = .N), by = day ] daily_trace <- trace.mat[, day := ceiling(time_isolation) #time_isolated ][, .(daily_isolations = .N), by = day ] daily_test <- test.mat[, day := ceiling(time_isolation) #time_isolated ][, .(daily_isolations = .N), by = day ] # maximum outbreak day max_day <- ceiling(times) # days with 0 cases in 0:max_week #missing_days <- (0:max_day)[!(0:max_day %in% daily_isolations$day)] missing_days <- (0:max_day)[!(0:max_day %in% daily_exposures$day)] # add in missing days if any are missing if (length(missing_days > 0)) { daily_cases <- data.table::rbindlist(list(daily_exposures, data.table(day = missing_days, daily_exposures = 0))) } #reorder as appropriate #daily_cases <- arrange(daily_cases, day) # order and sum up daily_cases <- daily_exposures[order(day) ][, cumulative := cumsum(daily_exposures)] # cut at max_week daily_cases <- daily_cases[day<=max_day] # and isoaltions daily_cases$daily_isolations <- 0 for (i in 1:length(daily_isolations$day)){ daily_cases$daily_isolations[daily_cases$day==daily_isolations$day[i]] <- daily_isolations$daily_isolations[i] } #and cumulative isolations daily_cases$cumulative_iso = cumsum(daily_cases$daily_isolations) # #and cases in UCB vs out daily_cases$daily_UCB_isolations <- 0 for (i in 1:length(daily_cal$day)){ daily_cases$daily_UCB_isolations[daily_cases$day==daily_cal$day[i]] <- daily_cal$daily_isolations[i] } # # #and cases in UCB vs out daily_cases$daily_alameda_isolations <- 0 for (i in 1:length(daily_alameda$day)){ daily_cases$daily_alameda_isolations[daily_cases$day==daily_alameda$day[i]] <- daily_alameda$daily_isolations[i] } daily_cases$daily_symptomatic_isolations <- 0 for (i in 1:length(daily_symp$day)){ daily_cases$daily_symptomatic_isolations[daily_cases$day==daily_symp$day[i]] <- daily_symp$daily_isolations[i] } daily_cases$daily_tracing_isolations <- 0 for (i in 1:length(daily_trace$day)){ daily_cases$daily_tracing_isolations[daily_cases$day==daily_trace$day[i]] <- daily_trace$daily_isolations[i] } daily_cases$daily_testing_isolations <- 0 for (i in 1:length(daily_test$day)){ daily_cases$daily_testing_isolations[daily_cases$day==daily_test$day[i]] <- daily_test$daily_isolations[i] } # # #now attach R-effective # daily_cases$Reffective = NA # # for(i in 1:nrow(R.out)){ # daily_cases$Reffective[daily_cases$day==R.out$day[i]] <- R.out$Reffective[i] # } # #add category pop.mat.cat= dlply(pop.mat, .(employ_cat)) new_col <- lapply(pop.mat.cat, FUN=add.risk.cat, pop_dat=daily_cases) #and also the daily exposures new_col2 <- lapply(pop.mat.cat, FUN=add.risk.cat.exp, pop_dat=daily_cases, input_par=input.par) new_col_exp <- sapply(new_col2, "[", 1) new_col_deaths <- sapply(new_col2, "[", 2) #tmp = data.table::rbindlist(new_col) tmp = as.data.frame(do.call("cbind", new_col)) names(tmp) <- paste0("isolations-employ-cat-", unique(input.par$population)) tmp2 = as.data.frame(do.call("cbind", new_col_exp)) #tmp2 = data.table::rbindlist(new_col_exp) names(tmp2) <- paste0("exposures-employ-cat-", unique(input.par$population)) tmp3 = as.data.frame(do.call("cbind", new_col_deaths)) #tmp3 = data.table::rbindlist(new_col_deaths) names(tmp3) <- paste0("deaths-employ-cat-", unique(input.par$population)) #and attach to daily cases daily_cases <- cbind.data.frame(daily_cases, tmp, tmp2, tmp3) # #finally, calculate some summary statistics from the epidemic # tot.exposures = sum(daily_cases$daily_exposures, na.rm=T) # tot.isolations = sum(daily_cases$daily_isolations, na.rm=T) # #time.to.control = max(daily_cases$day[!is.na(daily_cases$Reffective)]) # max.exposures.per.day = max(daily_cases$daily_exposures, na.rm=T) # mean.exposures.per.day = mean(daily_cases$daily_exposures, na.rm=T) # max.iso.per.day = max(daily_cases$daily_isolations, na.rm=T) # mean.iso.per.day = mean(daily_cases$daily_isolations, na.rm=T) # time.of.peak.iso = min(daily_cases$day[daily_cases$daily_isolations==max(daily_cases$daily_isolations, na.rm=T)]) # time.of.peak.exposure = min(daily_cases$day[daily_cases$daily_exposures==max(daily_cases$daily_exposures, na.rm=T)]) # #and report out the max day before your cases are too few to calculate Reffective #out.stat <- c(tot.exposures, tot.isolations, max.exposures.per.day, mean.exposures.per.day, max.iso.per.day, mean.iso.per.day, time.of.peak.exposure, time.of.peak.iso) #names(out.stat) <- c("total_exposures", "total_isolations", "max_exp_per_day", "mean_exp_per_day", "max_iso_per_day", "mean_iso_per_day", "time_peak_exposure", "time_peak_isolation") pop.mat$LOD <- LOD #add TAT if this is a single population model, but if it is mixed in a multipop, note that if(length(unique(input.par$par1[input.par$parameter=="TAT-lag"]))==1){ pop.mat$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"]) }else{ pop.mat$TAT <- "multiple" } pop.mat$test_rotation <- test_rotation_name return(list(daily_cases,pop.mat, prop.asym, R.mat)) } replicate.epidemic = function(n.reps, input.pop, n.init.exposed.vector, employ.id.vector, times, virus.par, input.par, burnin, test.freq, length_timestep, bay.area.prev, initial.R, within.host.theta, test_rotation_name, LOD, titer.dat){ out = replicate(n.reps, simulate.epidemic(virus.par = virus.par, input.par = input.par, input.pop=input.pop, n.init.exposed.vector=n.init.exposed.vector, times=times, bay.area.prev = bay.area.prev, initial.R = initial.R, within.host.theta = within.host.theta, burnin = burnin, length_timestep=length_timestep, employ.id.vector =employ.id.vector, LOD = LOD, titer.dat = titer.dat, test_rotation_name = test_rotation_name), simplify = "array") #make list out.time<- out.daily <- out.cal <- out.iso <- out.cumulative <- out.ala <- out.symp <- out.trace <- out.test <- out.iso <-out.cum.iso <- pop.mat.chain <- out.prop.asym <- R.mat.out <- list() #and make list of all the categories of sub-pop out.cat <- list() for (i in 1:ncol(out)){ #tmp <- data.table::cbindlist(out[,i][[1]]) tmp <- do.call("cbind", out[,i][[1]]) out.time[[i]] <- c(unlist(tmp[,1])) out.daily[[i]] <- c(unlist(tmp[,2])) out.cumulative[[i]] <- c(unlist(tmp[,3])) out.iso[[i]] <- c(unlist(tmp[,4])) out.cum.iso[[i]] <- c(unlist(tmp[,5])) out.cal[[i]] <- c(unlist(tmp[,6])) out.ala[[i]] <- c(unlist(tmp[,7])) out.symp[[i]] <- c(unlist(tmp[,8])) out.trace[[i]] <- c(unlist(tmp[,9])) out.test[[i]] <- c(unlist(tmp[,10])) #out.R[[i]] <- c(unlist(tmp[,11])) out.cat[[i]] <- cbind(unlist(tmp[,11:(10+(length(unique(input.par$population)))*3)])) #and save a chain of pop.mat tmp2 <- out[,i][[2]] pop.mat.chain[[i]] <- tmp2 #and the prop.asym tmp3 <- out[,i][[3]] out.prop.asym[[i]] <- tmp3 tmp4 <- out[,i][[4]] rownames(tmp4) <- c() R.mat.out[[i]] <- tmp4 #unique(input.par$population) } #now shorten them all to the same length and get mean + sd #print(out.time) mean.time = get.mean.sd(vector= out.time, name = "day")[,1] #print(out.daily) mean.daily = get.mean.sd(vector=out.daily, name = "exposures") #print(out.cumulative) mean.cumulative= get.mean.sd(vector=out.cumulative, name = "cumulative") #print(out.cal) mean.cal = get.mean.sd(vector=out.cal, name="UCB") #print(out.ala) mean.ala = get.mean.sd(vector=out.ala, name = "AlamedaCo") #print(out.low) mean.symp = get.mean.sd(vector=out.symp, name="symptomatic_iso") mean.trace = get.mean.sd(vector=out.trace, name="tracing_iso") mean.test = get.mean.sd(vector=out.test, name="testing_iso") #print(out.iso) mean.iso = get.mean.sd(vector=out.iso, name = "isolations") #print(out.cum.iso) mean.cum.iso = get.mean.sd(vector=out.cum.iso, name = "cumulative_isolations") #print(out.sum) #mean.sum = get.mean.sd.summary(out.sum) #and the employ-cat mean.cat = get.mean.matrix(mat=out.cat) #print(out.hi) mean.dat = cbind.data.frame(mean.time, mean.daily, mean.cumulative, mean.cal, mean.ala, mean.symp, mean.trace,mean.test, mean.iso, mean.cum.iso, mean.cat)#, mean.R) names(mean.dat)[1] = "day" #all of the descriptors can now change within the pop mean.dat$LOD <- LOD if(length(unique(input.par$par1[input.par$parameter=="TAT-lag"]))==1){ mean.dat$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"]) }else{ mean.dat$TAT <- "multiple" } mean.dat$test_rotation <- test_rotation_name #mean.dat$prop_asym = prop.asym mean.dat$virus_par = unique(virus.par$version) mean.dat$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"]) avg.prop.asym <- mean(c(unlist(out.prop.asym))) mean.dat$prop_asym= avg.prop.asym #and the long version mean.daily$type = "all_exposures" mean.cumulative$type = "cumulative" mean.cal$type = "UCB" mean.ala$type = "AlamedaCo" mean.symp$type = "symptomatic_iso" mean.trace$type = "tracing_iso" mean.test$type = "testing_iso" #mean.R$type = "Reffective" mean.iso$type= "isolations" #don't bother with employ-cat 00 can add later if needed mean.cat.long.list = convert.cat(mean.cat) mean.cat.long = data.table::rbindlist(mean.cat.long.list) #mean.cat.long = do.call("rbind", mean.cat.long.list) names(mean.daily) <- names(mean.cumulative) <- names(mean.cal) <- names(mean.ala) <- names(mean.symp) <- names(mean.trace) <- names(mean.test) <- names(mean.iso) <- c("mean", "lci", "uci", "type") #<- names(mean.R) mean.long <- rbind(mean.daily, mean.cumulative, mean.cal, mean.ala, mean.symp, mean.trace, mean.test, mean.iso, mean.cat.long)#, mean.R) n.cat = length(input.pop) mean.long$day = c(rep(mean.time, (8+(3*n.cat))))#, mean.time[-1]) mean.long$LOD <- LOD if(length(unique(input.par$par1[input.par$parameter=="TAT-lag"]))==1){ mean.long$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"]) }else{ mean.long$TAT <- "multiple" } mean.long$test_rotation <- test_rotation_name #mean.long$prop_asym = prop.asym mean.long$virus_par = unique(virus.par$version) mean.long$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"]) mean.long$prop_asym = avg.prop.asym # mean.sum$sim_cat = sim_cat # #mean.sum$prop_asym = prop.asym # mean.sum$virus_par = unique(virus.par$version) # # mean.sum$superspread = superspread # mean.sum$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"]) # # # #and summarize R # mean.R = summarise.R(out.list.R=out.R, day.vec = mean.dat$day, n.reps=n.reps) # mean.R$LOD <- LOD # mean.R$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"]) # mean.R$test_rotation <- test_rotation_name # #mean.R$sim_cat = sim_cat # #mean.R$prop_asym = prop.asym # mean.R$virus_par = unique(virus.par$version) # # mean.R$superspread = superspread # mean.R$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"]) # # mean.R$prop_asym <- avg.prop.asym # # # mean.R.mat = manage.R.matrix(mat.list=R.mat.out) #and do the best you can with the R-output #put it all together #R.mat.use <- do.call("rbind", R.mat.out) R.mat.use <- data.table::rbindlist(R.mat.out) R.mat.use <- arrange(R.mat.use, total_potential_cases) if(virus.par$distribution[virus.par$parameter=="R0"]=="negbinom"){ mean.R.mat = R.fit.sum(R.mat.use) }else{ mean.R.mat = R.fit.sum.lognorm(R.mat.use) } rownames(mean.R.mat) <- c() mean.R.mat$LOD <- LOD if(length(unique(input.par$par1[input.par$parameter=="TAT-lag"]))==1){ mean.R.mat$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"]) }else{ mean.R.mat$TAT <- "multiple" } mean.R.mat$test_rotation <- test_rotation_name mean.R.mat$virus_par = unique(virus.par$version) mean.R.mat$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"]) mean.R.mat$prop_asym <- avg.prop.asym #return these summaries and the list of pop.mats return(list(mean.dat, mean.long, pop.mat.chain, mean.R.mat)) } pop.par.base$par1[pop.par.base$parameter=="TAT-lag"] <- 1 pop.par.base$par2[pop.par.base$parameter=="TAT-lag"] <- .5 pop.par.base$par1[pop.par.base$parameter=="test-rotation"] <- "two-week" pop.par.base$par1[pop.par.base$parameter=="n-test-days-per-week"] <- 7 pop.par.base$par1[pop.par.base$parameter=="test-on"] <- TRUE pop.par.base$par1[pop.par.base$parameter=="test-freq"] <- 14 pop.par.base$par1[pop.par.base$parameter=="trace-lag"] <- 1 pop.par.base$par2[pop.par.base$parameter=="trace-lag"] <- .5 pop.par.base$par1[pop.par.base$parameter=="prop.trace"] <- .9 pop.par.base$par1[pop.par.base$parameter=="iso-lag"] <- 1 pop.par.base$par2[pop.par.base$parameter=="iso-lag"] <- .5 pop.par.base$par1[pop.par.base$parameter=="prop.cases.UCB"] <- .5 out = replicate.epidemic(n.reps = 100, virus.par = virus.par, input.par = pop.par.base, input.pop=c(20000),#2000 n.init.exposed.vector=c(100),#10 times=365*2, bay.area.prev = .1/100, initial.R = 2.5, within.host.theta = .72, burnin = 0, length_timestep=1, employ.id.vector = c(1), LOD=(10^1), titer.dat = titer.dat, test_rotation_name = "two-week-7-test-days") save(out, file = "two-week-7-test-days-trace-1-symp.Rdata")
/all-model-runs/Fig4-Layered-Interventions-lower-prop-UCB/two-week/two-week-7-test-days-trace-1-symp.R
no_license
carabrook/Berkeley-COVID-testing
R
false
false
85,544
r
rm(list=ls()) .libPaths("/global/home/users/cbrook/R/x86_64-pc-linux-gnu-library/4.0") #setwd("/Users/caraebrook/Documents/R/R_repositories/Berkeley-Reopening/Dec-2020/all-runs/Re-Run-12-24/FigS1/") #no group, no test, no trace library(data.table) library(plyr) library(dplyr) library(deSolve) library(matrixStats) library(fitdistrplus) #load parameters including pre-run titer trajectories for each individual load("titer.dat.20K.Rdata") #load("titer.dat.2K.Rdata") load("virus.par.12.15.Rdata") load("pop.par.base.Rdata") get.real.cases <- function(pop.dat, event.dat, titer.dat1, within.host.theta, group.limit){ #if no cases caused, then ignore if((pop.dat$original_potential_cases_caused_UCB>0) & (pop.dat$num_infection_events>0)){ #then allocate all the cases to the events #distribute cases at random amongst the events event.names <- 1:as.numeric(pop.dat$num_infection_events) actual.events <- sample(x=event.names, size=as.numeric(pop.dat$original_potential_cases_caused_UCB), replace = T) event.data <- cbind.data.frame(actual.events, event.dat[actual.events]) names(event.data) <- c("event", "gentime") #and add the titer at the time of the event gen.tmp = as.list(event.data$gentime) event.data$titer <- c(unlist(lapply(gen.tmp, grab.titer, dat.vir =titer.dat1))) #now that you have titer, here calculate the probability of transmission, given a certain viral load, #based off of the probabiliy model from the URT in Ke et al. 2020 # in Ke et al. 2020, theta is fixed at 0.05 (could be modulated and/or fit to data) #draw Km from a normal disribution centered at the midpoint between the two values explored in Ke et al. 2020 (10^3 and 10^4) event.data$Km <- rnorm(nrow(event.data),mean=5500, sd=1000) event.data$prob_exposure = within.host.theta*(event.data$titer/(event.data$titer + event.data$Km)) event.data$prob_exposure[event.data$prob_exposure<0] <- 0 #probability is small: ~5% for a typical contact if theta = 0.05 as in Ke. #for theta = .7 here, up to 50% depending on theta #does the infection happen? make it a probabilistic outcome of the titer #then, you role a dice to see if this exposure causes an infection tmp.prob <- as.list(event.data$prob_exposure) event.data$InfectionYN = c(unlist(lapply(tmp.prob, test.titer))) #then total the events that actually happen to incorporate into the original data pop.dat$post_titer_potential_cases_caused_UCB <- sum(event.data$InfectionYN) #and then, if there is a group size limit, impose it here if((group.limit>0) & (pop.dat$obs_dist_limits==TRUE)){ #gives you the number of successful transmissions per event event.sum <- ddply(event.data, .(event),summarize, N=sum(InfectionYN)) event.sum$over_lim = event.sum$N-group.limit event.sum$over_lim[event.sum$over_lim<0] <- 0 #truncate # of events for the IDs listed above to the group limit. event.data.list = dlply(subset(event.data, InfectionYN==1), .(event)) new.event.list <- lapply(event.data.list, impose.group, group.limit=group.limit) #new.event.data <- do.call("rbind", new.event.list) new.event.data <-data.table::rbindlist(new.event.list) pop.dat$potential_cases_caused = sum(new.event.data$InfectionYN) #in this case, return the generation time table after the group intervention if(pop.dat$potential_cases_caused >0){ dat.gen.tab <- cbind.data.frame(rep(unique(pop.dat$employ_ids), nrow(new.event.data)), new.event.data$gentime) names(dat.gen.tab) <- c("employ_ids", "generation_time") }else{ dat.gen.tab <- cbind.data.frame(unique(pop.dat$employ_ids), NA) names(dat.gen.tab) <- c("employ_ids", "generation_time") } }else{ pop.dat$potential_cases_caused <- pop.dat$post_titer_potential_cases_caused_UCB if(pop.dat$potential_cases_caused >0){ event.data.out = subset(event.data, InfectionYN==1) dat.gen.tab <- cbind.data.frame(rep(unique(pop.dat$employ_ids), nrow(event.data.out)), event.data.out$gentime) names(dat.gen.tab) <- c("employ_ids", "generation_time") }else{ dat.gen.tab <- cbind.data.frame(unique(pop.dat$employ_ids), NA) names(dat.gen.tab) <- c("employ_ids", "generation_time") } } }else{ #none take place #return the original data with 0s pop.dat$post_titer_potential_cases_caused_UCB <- 0 pop.dat$potential_cases_caused <- 0 #and return a table of generation times with nothing dat.gen.tab <- cbind.data.frame(unique(pop.dat$employ_ids), NA) names(dat.gen.tab) <- c("employ_ids", "generation_time") } return(list(pop.dat, dat.gen.tab)) } test.titer <- function(prob1){ Y_N =sample(c(0,1), size=1, prob = c(1-prob1, prob1)) return(Y_N) } impose.group <- function(event.dat1, group.limit){ tot.transmissions = nrow(event.dat1) if(tot.transmissions>group.limit){ choose.events <- sample(x=1:tot.transmissions, size=group.limit, replace = F) event.dat2 = event.dat1[choose.events,] return(event.dat2) }else{ return(event.dat1) } } get.event.time <- function(dat, genTime){ event.times = genTime(as.numeric(dat$num_infection_events)) return(event.times) } grab.titer <- function(dat1, dat.vir){ titer.out <- dat.vir$V[dat.vir$time>dat1][1] return(titer.out) } normal_fn <- function(meanpar=NULL, sdpar=NULL){ out <- purrr::partial(rnorm, mean = meanpar, sd = sdpar) return(out) } poisson_fn <- function(lambda=NULL){ out <- purrr::partial(rpois, lambda = lambda) return(out) } lognormal_fn <- function(meanlogpar=NULL, sdlogpar=NULL){ out <- purrr::partial(rlnorm, meanlog = meanlogpar, sdlog = sdlogpar) return(out) } add.risk.cat <- function(dat, pop_dat){ dat = data.table(dat) daily_new <- dat[, day := ceiling(time_isolation) #time_isolated ][, .(daily_isolations = .N), by = day ] pop_dat$add <- 0 for (i in 1:length(daily_new$day)){ pop_dat$add[pop_dat$day==daily_new$day[i]] <- daily_new$daily_isolations[i] } out.vect <- as.data.frame(pop_dat$add) return( out.vect) } add.risk.cat.exp <- function(dat, pop_dat, input_par){ dat = data.table(dat) daily_new <- dat[, day := ceiling(exposure_time) ][, .(daily_exposures = .N), by = day ] pop_dat$add <- 0 for (i in 1:length(daily_new$day)){ pop_dat$add[pop_dat$day==daily_new$day[i]] <- daily_new$daily_exposures[i] } out.vect <- as.data.frame(pop_dat$add) #then add deaths based on each pop cat pop.cat = unique(dat$employ_cat) out.vect2 <- as.data.frame(as.numeric(input_par$par1[input_par$parameter=="CFR" & input_par$population==pop.cat])*out.vect) # dat.out = cbind.data.frame(out.vect, out.vect2) return(list(out.vect, out.vect2)) #return(dat.out) } cross.infect <- function(dat, all.sus, input.par){ pop.par = subset(input.par, population == unique(dat$infector_cat)) #first, elim any populations for which there are no longer remaining susceptibles rem.cat = unique(all.sus$employ_cat) all.cat = unique(pop.par$par2[pop.par$parameter=="meta-pop"]) missed.cat = setdiff(all.cat, rem.cat) pop.par$sub = 0 for (i in 1: length(missed.cat)) { pop.par$sub[pop.par$parameter=="meta-pop" & pop.par$par2==missed.cat[i]] <- 1 } pop.par = subset(pop.par, sub==0) #then allocate the population of the new cases based on the proportion within and without tot.cases = nrow(dat) #then need to reallocate probabilities comparatively without the remaining possible.cat = unique(pop.par$par2[pop.par$parameter=="meta-pop"]) old.cat = as.numeric(unique(input.par$par2[input.par$parameter=="meta-pop"])) old.prob = as.numeric(input.par$par1[input.par$parameter=="meta-pop"])[1:length(old.cat)] if(length(possible.cat)<length(old.cat)){ if(length(possible.cat)==1){ dat$new_cat = possible.cat }else{ #if you've run out of probabilities, just, rellocate proportionally new.prob = rep((1/length(possible.cat)), length(possible.cat)) dat$new_cat = sample(x=possible.cat, size = tot.cases, replace = TRUE, prob = new.prob) } }else{ dat$new_cat = sample(x=old.cat, size = tot.cases, replace = TRUE, prob = old.prob) } return(dat) } assign.ID = function(sus.dat.sub, dat.new.sub){ #at the very end of the time series, you may run out of susceptibles in the right category, in which case, these just become lost infections if(nrow(dat.new.sub)<=length(sus.dat.sub$employ_ids)){ dat.new.sub$new_infected = sample(sus.dat.sub$employ_ids, size=nrow(dat.new.sub), replace=FALSE) }else{ new.count = length(sus.dat.sub$employ_ids) new.missed = nrow(dat.new.sub) - new.count row.tmp = seq(1, nrow(dat.new.sub),1) row.take = sample(row.tmp, size = new.count, replace = FALSE) dat.new.sub <- dat.new.sub[row.take,] dat.new.sub$new_infected = sample(sus.dat.sub$employ_ids, size=nrow(dat.new.sub), replace=FALSE) } return(dat.new.sub) } assign.infections <- function(pop.mat, gen_list, timestep, input.par){ # assign new exposures (and times) based on 'actual cases caused' above # and move those that have transmitted to isolated/recovered state #(asymptomatics will be missed in iso time unless tested) # timestep.prev = unique(pop.mat$timestep) #first, pair each case with its generation times new.mat <- dplyr::select(pop.mat, employ_ids, employ_cat, state, exposure_time, actual_cases_caused, time_isolation) new.mat <- new.mat[!is.na(new.mat$actual_cases_caused) & new.mat$state==1,] #only matters if it actually causes cases. new.mat.zero = subset(new.mat, actual_cases_caused<1) new.mat <- subset(new.mat, actual_cases_caused>0) if(nrow(new.mat)>0){ new.mat.list <- dlply(new.mat, .(employ_ids)) #print("1") new.mat.list <- lapply(new.mat.list, make.rows) #the new new mat - no longer includes those which cased 0 actual cases #should always have at least one row because of the if-statement above #new.mat <- do.call("rbind", new.mat.list) new.mat <- data.table::rbindlist(new.mat.list) #now attach a generation time with each of these cases and a random sample from the susceptibles new.mat$generation_time <- NA index.ids = unique(new.mat$employ_ids) for(i in 1:length(index.ids )){ tmp = nrow(new.mat[new.mat$employ_ids == index.ids[i],]) #print(index.ids[[i]]) new.mat$generation_time[new.mat$employ_ids == index.ids[i]] <- gen_list[[index.ids[i]]]$generation_time[1:tmp] } #now, attach a place to infect (susceptible) #bias the sampling based on the proportion of infections within and without of your direct cohort #first, pair the remaining susceptibles with their category all.sus <- cbind.data.frame(pop.mat$employ_ids[pop.mat$state==0],pop.mat$employ_cat[pop.mat$state==0]) names(all.sus) = c("employ_ids", "employ_cat") new.list = dlply(new.mat, .(employ_ids)) #cross infect by cat #print("2") new.list.out <- lapply(new.list, cross.infect, all.sus=all.sus, input.par=input.par) new.mat = data.table::rbindlist(new.list.out) #new.mat = do.call("rbind", new.list.out) rownames(new.mat) <- c() #then, assign names by category of new infections id.cat = data.frame(sort(unique(new.mat$new_cat))) all.sus = arrange(all.sus, employ_cat) names(id.cat) <- "employ_cat" tmp.sus = merge(x=all.sus, y=id.cat) tmp.sus.split = dlply(tmp.sus, .(employ_cat)) new.mat.split <- dlply(new.mat, .(new_cat)) #print("3") dat.new.split.out = mapply(FUN=assign.ID, sus.dat.sub= tmp.sus.split, dat.new.sub= new.mat.split, SIMPLIFY = FALSE) new.mat = data.table::rbindlist(dat.new.split.out) #new.mat = do.call("rbind", dat.new.split.out) new.mat$new_exposure_time = new.mat$exposure_time + new.mat$generation_time #and merge into pop.mat #new.merge <- dplyr::select(new.mat, new_infected, employ_ids, infector_iso_time, new_exposure_time) #names(new.merge) <- c("employ_ids", "infector", "infector_iso_time", "exposure_time") #now put them into pop.mat for(i in 1:nrow(new.mat)){ #identify infector and iso time pop.mat$infector[pop.mat$employ_ids==new.mat$new_infected[i] ] <- new.mat$employ_ids[i] pop.mat$infector_iso_time[pop.mat$employ_ids==new.mat$new_infected[i]] <- new.mat$infector_iso_time[i] pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$traced==TRUE] <- pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$traced==TRUE] + new.mat$infector_iso_time[i] pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$traced==FALSE] <- Inf #if traced==FALSE, this is never tracked #and exposure time pop.mat$exposure_time[pop.mat$employ_ids==new.mat$new_infected[i]] <- new.mat$new_exposure_time[i] #pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.mat$new_infected[i]] <-new.mat$new_exposure_time[i] + pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.mat$new_infected[i]] #pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.mat$new_infected[i]] <-new.mat$new_exposure_time[i] + pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.mat$new_infected[i]] pop.mat$case_source[pop.mat$employ_ids==new.mat$new_infected[i]] <- "UCB" #transmission within berkeley #change state - only if exposure time is already achieved pop.mat$state[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$exposure_time<=timestep] <- 1 #otherwise, they still stay suceptible - but you mark them pop.mat$state[pop.mat$employ_ids==new.mat$new_infected[i] & pop.mat$exposure_time>timestep] <- 3 } #else, just return pop mat } #now, make those that already transmitted recovered/isolated pop.mat$state[(pop.mat$state==1 & !is.na(pop.mat$actual_cases_caused))] <- 5 #and, if any of the old "pre-exposed" have reached their exposure time, you can #and return pop.mat return(pop.mat) } assign.last.infections <- function(pop.mat, gen_list, remaining.susceptibles, timestep){ # assign new exposures (and times) based on 'actual cases caused' above # and move those that have transmitted to isolated/recovered state #(asymptomatics will be missed in iso time unless tested) timestep.prev = unique(pop.mat$timestep) if(remaining.susceptibles>0){ #first, pair each case with its generation times new.mat <- dplyr::select(pop.mat, employ_ids, state, exposure_time, actual_cases_caused, time_isolation)#, time_of_tracing_iso) new.mat <- new.mat[ new.mat$state==1 & !is.na(new.mat$actual_cases_caused) ,] #get rid of those that cause no cases new.mat <- new.mat[new.mat$actual_cases_caused>0,] #sum(new.mat$actual_cases_caused)>remaining susceptibles #so need to pick these at random to generate new infections instead all.possible = c(rep(new.mat$employ_ids, times=new.mat$actual_cases_caused)) last.infector.ids = sample(all.possible, size=remaining.susceptibles, replace=FALSE) last.infector.ids = data.frame(last.infector.ids) names( last.infector.ids) ="employ_ids" new.dat = ddply(last.infector.ids,.(employ_ids), summarise, actual_cases_caused=length(employ_ids)) #and new.mat becomes just these new.dat$state <- new.dat$time_isolation <- new.dat$exposure_time <- NA for (i in 1:nrow(new.mat)){ new.dat$state[new.dat$employ_ids==new.mat$employ_ids[i]] <- new.mat$state[i] new.dat$time_isolation[new.dat$employ_ids==new.mat$employ_ids[i]] <- new.mat$time_isolation[i] new.dat$exposure_time[new.dat$employ_ids==new.mat$employ_ids[i]] <- new.mat$exposure_time[i] } #then, new dat takes the place of new mat new.dat.list <- dlply(new.dat, .(employ_ids)) new.dat.list <- lapply(new.dat.list, make.rows) new.dat <- data.table::rbindlist(new.dat.list) #new.dat <- do.call("rbind", new.dat.list) #now attach a generation time with each of these cases and a random sample from the susceptibles new.dat$generation_time <- NA index.ids = unique(new.dat$employ_ids) for(i in 1:length(index.ids )){ #print(index.ids[[i]]) new.mat$generation_time[new.mat$employ_ids == index.ids[i]] <- gen_list[[index.ids[i]]]$generation_time[1:length(new.mat$generation_time[new.mat$employ_ids == index.ids[i]])] } #now, attach a place to infect (susceptible) -- should be enough all.sus <- pop.mat$employ_ids[pop.mat$state==0] new.dat$new_infected <- sample(all.sus, size=nrow(new.dat), replace=FALSE) new.dat$new_exposure_time = new.dat$exposure_time + new.dat$generation_time #now put them into pop.mat for(i in 1:nrow(new.dat)){ #identify infector and iso time pop.mat$infector[pop.mat$employ_ids==new.dat$new_infected[i] ] <- new.dat$employ_ids[i] pop.mat$infector_iso_time[pop.mat$employ_ids==new.dat$new_infected[i]] <- new.dat$infector_iso_time[i] pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$traced==TRUE] <- pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$traced==TRUE] + new.dat$infector_iso_time[i] pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$traced==FALSE] <- Inf #if traced==FALSE, this is never tracked #and exposure time pop.mat$exposure_time[pop.mat$employ_ids==new.dat$new_infected[i]] <- new.dat$new_exposure_time[i] #pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.mat$new_infected[i]] <-new.mat$new_exposure_time[i] + pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.mat$new_infected[i]] #pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.mat$new_infected[i]] <-new.mat$new_exposure_time[i] + pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.mat$new_infected[i]] pop.mat$case_source[pop.mat$employ_ids==new.dat$new_infected[i]] <- "UCB" #transmission within berkeley #change state - only if exposure time is already achieved pop.mat$state[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$exposure_time<=timestep] <- 1 #otherwise, they still stay suceptible - but you mark them pop.mat$state[pop.mat$employ_ids==new.dat$new_infected[i] & pop.mat$exposure_time>timestep] <- 3 } #else, just return pop mat } #other #now, make those that already transmitted recovered/isolated pop.mat$state[(pop.mat$state==1 & !is.na(pop.mat$actual_cases_caused))] <- 5 #and, if any of the old "pre-exposed" have reached their exposure time, you can #then, those active infections will cause no more new cases pop.mat$actual_cases_caused[pop.mat$state==1] <- 0 #and return pop.mat return(pop.mat) } get.actual.cases = function(pop.dat, dat.gen, timestep){ sub.gen =subset(dat.gen, employ_ids==unique(pop.dat$employ_ids)) #count the number of exposure time + generation time combos that take place before the iso time sub.gen$new_exposures = sub.gen$generation_time + pop.dat$exposure_time n.out = length(sub.gen$new_exposures[sub.gen$new_exposures<=pop.dat$time_isolation]) return(n.out) } get.symptom.onset <- function(dat, dat.vir, LOD){ #get titer limit symptom.lim <- as.numeric(unique(dat$titer_lim_for_symptoms)) #get the timing in the trajectory that first crosses this limit dat$time_of_symptom_onset <- min(dat.vir$time[dat.vir$V>symptom.lim]) dat$time_test_sensitive_start <- min(dat.vir$time[dat.vir$V>LOD]) dat$time_test_sensitive_end <- max(dat.vir$time[dat.vir$V>LOD]) #will return infinity if wrong return(dat) } make.rows <- function(dat){ n = dat$actual_cases_caused new.dat <- data.frame(matrix(NA, nrow=n, ncol=5) ) names(new.dat) <- c("employ_ids", "exposure_time", "actual_cases_caused", "infector_iso_time", "infector_cat")#, "time_of_test_sensitivity")#, "time_of_tracing_iso") new.dat$employ_ids <- rep(dat$employ_ids, nrow(new.dat)) new.dat$infector_iso_time <- rep(dat$time_isolation, nrow(new.dat)) new.dat$infector_cat <- rep(dat$employ_cat, nrow(new.dat)) new.dat$exposure_time <- rep(dat$exposure_time, nrow(new.dat)) #new.dat$time_of_tracing_iso <- rep(dat$time_of_tracing_iso, nrow(new.dat)) if(nrow(new.dat)>0){ new.dat$actual_cases_caused <- 1 return(new.dat) } #else, return nothing } delayfn_surv <- function(delay_mean, delay_sd){ out <- purrr::partial(rnorm, mean = delay_mean, sd = delay_sd) return(out) }#symptomatic surveillance/TAT delay generationTime_fn <- function(serial_dist=NULL, serial_shape = NULL, serial_scale = NULL) { if(serial_dist=="weibull"){ out <- purrr::partial(rweibull, shape = serial_shape, scale = serial_scale) }else if(serial_dist=="gamma"){ out <- purrr::partial(rgamma, shape = serial_shape, scale = serial_scale) } return(out) } #weibull or gamma serial interval as the case may be inc_fn <- function(n_inc_samp = NULL, meanInc=NULL, sdInc=NULL) { out= purrr::partial(rlnorm, meanlog = log(meanInc), sdlog = log(sdInc)) #out[out < 1] <- 1 return(out) } #lognormal incubation time draw R0_fn <- function(meanR0=NULL, sdR0=NULL){ out <- purrr::partial(rlnorm, meanlog = log(meanR0), sdlog = log(sdR0)) return(out) } #lognormal R0 R0_fn_nb <- function(muR0=NULL, sizeR0=NULL){ out <- purrr::partial(rnbinom, mu = muR0, size = sizeR0) return(out) } #nb R0 initiate.pop <- function(start.ID.employ, pop.UCB, n.init.exposed, pop.ID, within.host.theta, input.par, R0fn, eventFn, titer.dat, LOD, virus.par){ #sample serial interval genTime = generationTime_fn(serial_dist = virus.par$distribution[virus.par$parameter=="generation_time"], serial_shape= virus.par$par1[virus.par$parameter=="generation_time"], serial_scale= virus.par$par2[virus.par$parameter=="generation_time"]) pop.par = subset(input.par, population==pop.ID) #make table one pop.mat = cbind.data.frame(matrix(NA, nrow=pop.UCB, ncol =27)) names(pop.mat) <- c( "employ_ids","employ_cat", "state", "traced", "testing", "obs_dist_limits", "exposure_time", "total_potential_cases_caused", "original_potential_cases_caused_UCB", "num_infection_events", "post_titer_potential_cases_caused_UCB", "potential_cases_caused", "actual_cases_caused", "case_source", "infector", "time_test_sensitive_start", "time_test_sensitive_end", "infector_iso_time", "time_of_tracing_iso", "time_of_next_test", "time_of_testing_iso", "titer_lim_for_symptoms", "time_of_symptom_onset", "time_of_symptom_iso", "time_isolation", "reason_isolated", "timestep") #and fill in all you can pop.mat$testing = pop.par$par1[pop.par$parameter=="test-on"] pop.mat$timestep = 0 pop.mat$employ_cat = pop.ID #assign them all an employer ID pop.mat$employ_ids = start.ID.employ:(pop.UCB+start.ID.employ-1) #assign a "first test" depending on how many days of testing per week... test_rotation = as.character(pop.par$par1[pop.par$parameter=="test-rotation"] ) n.test.day.per.wk = as.numeric(pop.par$par1[pop.par$parameter=="n-test-days-per-week"]) #then, if this is bigger than a weekly regime, half of them must happen one week and half the other if ((test_rotation=="biweekly" & n.test.day.per.wk==2) | (test_rotation=="weekly" & n.test.day.per.wk==2)){ pop.mat$time_of_next_test = rep(c(3,7), length=pop.UCB) }else if((test_rotation=="biweekly" & n.test.day.per.wk==5) | (test_rotation=="weekly" & n.test.day.per.wk==5)){ pop.mat$time_of_next_test = rep(c(3,4,5,6,7), length=pop.UCB) }else if((test_rotation=="biweekly" & n.test.day.per.wk==7) | (test_rotation=="weekly" & n.test.day.per.wk==7)){ pop.mat$time_of_next_test = rep(c(1,2,3,4,5,6,7), length=pop.UCB) }else if(test_rotation=="two-week" & n.test.day.per.wk==7){ pop.mat$time_of_next_test = rep(c(1,2,3,4,5,6,7,8,9,10,11,12,13,14), length=pop.UCB) }else if(test_rotation=="two-week" & n.test.day.per.wk==5){ pop.mat$time_of_next_test = rep(c(3,4,5,6,7,10,11,12,13,14), length=pop.UCB) }else if(test_rotation=="two-week" & n.test.day.per.wk==2){ pop.mat$time_of_next_test = rep(c(3,7, 10,14), length=pop.UCB) }else if (test_rotation=="two-week-ThFri"){ pop.mat$time_of_next_test = rep(c(seq((7-n.test.day.per.wk+1),7,1),seq((14-n.test.day.per.wk+1),14,1)), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday }else if (test_rotation=="two-week-daily"){ pop.mat$time_of_next_test = rep(c(seq(1,14,1)), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday }else if (test_rotation=="two-week-MonTues"){ pop.mat$time_of_next_test = rep(c(seq(3,4,1),seq(10,11,1)), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday }else if (test_rotation=="two-week-TuesWed"){ pop.mat$time_of_next_test = rep(c(seq(4,5,1),seq(11,12,1)), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday }else if (test_rotation=="two-week-MonFri"){ pop.mat$time_of_next_test = rep(c(3,7,10,14), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday }else if (test_rotation=="two-week-MonWed"){ pop.mat$time_of_next_test = rep(c(3,5,10,12), length=pop.UCB) #end of week. if 1 and 2 are weekend (sat/sun), then this is thursday/friday }else if (test_rotation=="one-week-ThFri"){ pop.mat$time_of_next_test = rep(seq((7-n.test.day.per.wk+1),7,1), length=pop.UCB) }else if (test_rotation=="one-week-MonTues"){ pop.mat$time_of_next_test = rep(seq(3,4,1), length=pop.UCB) }else if (test_rotation=="one-week-MonFri"){ pop.mat$time_of_next_test = rep(c(3,7), length=pop.UCB) }else if(test_rotation=="one-week-daily"){ pop.mat$time_of_next_test = rep(c(seq(1,7,1)), length=pop.UCB) }else if(test_rotation=="none"){ pop.mat$time_of_next_test=Inf }else if(test_rotation=="thrice-weekly-MonTues"){ pop.mat$time_of_next_test = rep(c(3,4,10,11,17,18), length=pop.UCB) }else if (test_rotation=="two_day"){ pop.mat$time_of_next_test = rep(c(3,7), length=pop.UCB) }else if (test_rotation=="four_week"){ pop.mat$time_of_next_test = rep(c(seq((7-n.test.day.per.wk+1),7,1), seq((14-n.test.day.per.wk+1),14,1), seq((21-n.test.day.per.wk+1),21,1), seq((28-n.test.day.per.wk+1),28,1)), length=pop.UCB) } pop.mat$time_of_next_test = sample(pop.mat$time_of_next_test, size=length(pop.mat$time_of_next_test), replace = FALSE) #scramble prop.traced = as.numeric(pop.par$par1[pop.par$parameter=="prop.trace"]) #for all, based on proportions, give whether traced or not pop.mat$traced = sample(x=c("TRUE", "FALSE"), size=length(pop.mat$traced), replace=TRUE, prob=c(prop.traced, 1-prop.traced)) #and the same for proportion observign distancing limits prop.obs = as.numeric(pop.par$par1[pop.par$parameter=="percent-obs-dist-lim"]) pop.mat$obs_dist_limits = sample(x=c("TRUE", "FALSE"), size=length(pop.mat$obs_dist_limits), replace=TRUE, prob=c(prop.obs, 1-prop.obs)) # and whether asymp or not #pop.mat$stat_asymp = sample(x=c("TRUE", "FALSE"), size=length(pop.mat$stat_asymp), replace=TRUE, prob=c(prop.asym, 1-prop.asym)) #make initial state variable pop.mat$state <- rep(as.integer(0),pop.UCB) #based on the proportion vaccinated, some get moved to recovered (state 5) right away #for all of our model runs, this is 0, so this gets skipped if(as.numeric(pop.par$par1[pop.par$parameter=="prop-vaccinated"])>0){ tot.vacc <- round(as.numeric(pop.par$par1[pop.par$parameter=="prop-vaccinated"])*pop.UCB,0) index.vacc = sample(1:pop.UCB, size=tot.vacc, replace=FALSE) pop.mat$state[index.vacc] <- 5 } #then, regardless of vaccination, overwrite susceptibles with those initially exposed #initially exposed get distributed at random index.init = sample(as.numeric(rownames(pop.mat[pop.mat$state==0,])), size=n.init.exposed, replace=FALSE) pop.mat$state[index.init] <- 1 #here, build distributions #symptomatic isolation delay delayfn_symp = delayfn_surv(delay_mean=as.numeric(pop.par$par1[pop.par$parameter=="iso-lag"]), delay_sd= as.numeric(pop.par$par2[pop.par$parameter=="iso-lag"])) #turnaround testing delay delayfn_TAT = delayfn_surv(delay_mean=as.numeric(pop.par$par1[pop.par$parameter=="TAT-lag"]), delay_sd= as.numeric(pop.par$par2[pop.par$parameter=="TAT-lag"])) #contact tracing lag delayfn_trace = delayfn_surv(delay_mean=as.numeric(pop.par$par1[pop.par$parameter=="trace-lag"]), delay_sd= as.numeric(pop.par$par2[pop.par$parameter=="trace-lag"])) #titer limit for symptoms titer_lim = lognormal_fn(meanlogpar=log(as.numeric(pop.par$par1[pop.par$parameter=="symptom-lim"])), sdlogpar = log(as.numeric(pop.par$par2[pop.par$parameter=="symptom-lim"]))) prop.cases.UCB = as.numeric(pop.par$par1[pop.par$parameter=="prop.cases.UCB"]) #now generate potential new infections based on your status #this gives the weekend average number of infections #pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==TRUE] = floor(R0fn.asym.wk(length(pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==TRUE]))*prop.cases.UCB) #pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==FALSE] = floor(R0fn.wk(length(pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==FALSE]))*prop.cases.UCB) #and during the week, fewer cases #pop.mat$wk_tot_potential_cases_caused[pop.mat$stat_asymp==TRUE] = floor(R0fn.asym(length(pop.mat$tot_potential_cases_caused[pop.mat$stat_asymp==TRUE]))*prop.cases.UCB) #here are all possible cases pop.mat$total_potential_cases_caused = R0fn(length(pop.mat$employ_ids)) #here are all possible at UC Berkeley - before the titer cull pop.mat$original_potential_cases_caused_UCB = floor(pop.mat$total_potential_cases_caused*prop.cases.UCB) #you should have already brought in a titer trajectory for everyone in your population #choose a threshold titer for symptom onset pop.mat$titer_lim_for_symptoms = titer_lim(pop.UCB) pop.split <- dlply(pop.mat, .(employ_ids)) titer.split <- dlply(titer.dat, .(employ_ids)) #now, based on this, go into each person's virus trajectory and calculate the timing of symptom onset #while you are at it, you can also look at their titer and the LOD and calculate the start/end times for which they are test sensitive pop.split.new <- mapply(get.symptom.onset, dat = pop.split, dat.vir=titer.split, MoreArgs = list(LOD=LOD), SIMPLIFY = FALSE) #when there is nothing that is under the limit, these infections become "asymptomatic" -- #we can later play with the proportion that classify as this by modulating the mean value for the symptom onset limit pop.mat <- data.table::rbindlist(pop.split.new) #pop.mat <- do.call("rbind", pop.split.new) #and the delay to isolation pop.mat$time_of_symptom_onset[pop.mat$time_of_symptom_onset<0]<-0 pop.mat$time_of_symptom_iso = delayfn_symp(pop.UCB) pop.mat$time_of_symptom_iso[pop.mat$time_of_symptom_iso<0]<- 0 pop.mat$time_of_symptom_iso <- pop.mat$time_of_symptom_iso + pop.mat$time_of_symptom_onset pop.mat$time_of_testing_iso = delayfn_TAT(pop.UCB) pop.mat$time_of_testing_iso[pop.mat$time_of_testing_iso<0] <- 0 pop.mat$time_of_testing_iso <- pop.mat$time_of_testing_iso + pop.mat$time_of_next_test pop.mat$time_of_tracing_iso = delayfn_trace(pop.UCB) pop.mat$time_of_tracing_iso[pop.mat$time_of_tracing_iso<0] <- 0 #now, if not traced, never: pop.mat$time_of_tracing_iso[pop.mat$traced==FALSE] <- Inf pop.mat$time_of_tracing_iso[pop.mat$state>0] <- Inf # new introductions cannot be traced pop.mat$infector[pop.mat$state>0] <- 0 # new introductions cannot be traced pop.mat$infector_iso_time[pop.mat$state>0] <- Inf # new introductions cannot be traced pop.mat$case_source[pop.mat$state>0] <- "alameda" #NOW, we generate new cases: #we break down each infectious individual based on that individual's: #(a) within-host titer trajectory, (b) the selected value for within-host theta (how viral load translates toinfection probability), #(c) the number of discrete transmission events that we draw for each person, and #(d) the generation time of those contact events #(for d, we currently use the Ferretti weibull, but we are hoping that a constant hazard of events # + the titer trajectory of the pathogen should roughly produce the expected generation time) #(1) First, for each person, we draw the number of possible cases from R0 - this equates to individual heterogeneity in infectiousness # (one type of superspreading) and is already captured in the "total_potential_cases_caused" column, which then gets reduced down to the # proportion in the UCB community in the "original_potential_cases_caused_UCB" column #(2) Then, we draw a number of contact events, among which the above cases get distributed. (this equates to event-based superspreading # - fewer event draws and a high number of transmissions from #1 generate the biggest superspreading events). Current, we draw this # from a Poisson with lambda=3 #(3) Then, for each "event", we draw a time that this event took place (here, represented from the generation time Weibull, though this could change) #(4) Then, for each event + time, we go into individual's titer trajectory to determine if each transmission actually # takes place, based on the person's titer load at the point of infection. Since our initial R0 is 2.5, we fix theta at .7, such that the max # probability of infection taking place is ~50% at peak viral load. If one 'event' generates multiple cases, each case is treated independently # with this titer-based transmission probability. #(5) If there is a group size limit, it gets imposed here. Say that group limit is 6 and one event is supposed to generate 10 cases. # If this person abides by group limits (there is a parameter for this), we truncate the 10 person event to a 6 person event, and assume # as a worst-case scenario that all 6 of those people get infected #first, draw number of transmission events per person pop.mat$num_infection_events <- eventFn(pop.UCB) #then get a list of event times per person for each of these events pop.list <- dlply(pop.mat, .(employ_ids)) event.times.list <- lapply(pop.list, get.event.time, genTime=genTime) # now, each person has a number of cases, a number of events, a time for each event, # and a virus titer trajectory. # take this information and determine which events actually take place and when they occur # also, if applicable, here impose the group size limit and record cases both before # and after that limit occurs # return the data as well as the edited event times list that replaces each # failed case generation with NA double.list <- mapply(FUN=get.real.cases, pop.dat=pop.list, event.dat=event.times.list, titer.dat1 = titer.split, MoreArgs = list(within.host.theta=within.host.theta, group.limit=as.numeric(pop.par$par1[pop.par$parameter=="group-size-limit"])), SIMPLIFY = FALSE) pop.mat.list <- sapply(double.list, "[",1) pop.mat <- data.table::rbindlist(pop.mat.list) #pop.mat <- do.call("rbind", pop.mat.list) gen_time_list <- sapply(double.list, "[",2) dat.gen <- data.table::rbindlist(gen_time_list) #dat.gen = do.call("rbind", gen_time_list) #now cases from potential get distributed among events #then we determine how many take place based on titers #then we remove those that don't take place based on group size limitation #then, we set an exposure time for those cases that actually occur pop.mat$exposure_time[pop.mat$state>0] <- 0 #first, assume that isolation time is symptomatic pop.mat$time_isolation[pop.mat$state==1 ] <- as.numeric(pop.mat$time_of_symptom_iso[pop.mat$state==1 ]) pop.mat$time_isolation = as.numeric(pop.mat$time_isolation) pop.mat$reason_isolated[pop.mat$state==1 ] <- "symptom_iso" #now, if testing (and, for other cases, tracing) comes first, we replace it #test needs to be AFTER start time of test sensitive and before end time of test sensitive pop.mat$reason_isolated[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end] <- "testing_iso" pop.mat$time_isolation[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end & complete.cases(pop.mat)] <- pop.mat$time_of_testing_iso[pop.mat$state==1 & pop.mat$time_isolation > pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end] #and then, if any of these are Inf, change the reason to NA pop.mat$reason_isolated[pop.mat$time_isolation==Inf] <- NA #now, based on isolation time and the generation times in the vector, determine the actual number of cases caused, #then export, regroup with other half of population and assign those new infections in the next time step new.cases = dlply(pop.mat[pop.mat$state==1& pop.mat$potential_cases_caused>0,], .(employ_ids)) #if potential cases were 0, then actual cases are too: pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused ==0] <- 0 pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused > 0] <- c(unlist(lapply(new.cases, get.actual.cases, dat.gen=dat.gen, timestep))) #now pop it back out, join with other sub-mats and assign those infections in time and space using your generation time vector. return(list(pop.mat, dat.gen)) } epidemic.step = function(pop.mat, timestep, length_timestep, prob.out, gen_list, input.par){ #pop.mat <- as.data.frame(pop.mat) pop.par = subset(input.par, population ==unique(pop.mat$employ_cat)) #advance timestep pop.mat$timestep = timestep #introduce outside infections into susceptible spaces (cannot misplace those "exposed" by UCB above since we are guaranteeing those transmissions to take place) #could easily modulate this for risk cohorts in future #check if weekend # we say days 1 and 2 are testing # days ###MULTIPLE # if(timestep ==1 | timestep ==2 | (timestep%%7==1)| (timestep%%7==2)){ # n.outside.exposures = sum(sample(x=c(0,1), size=length(pop.mat$state[pop.mat$state==0]), replace=TRUE, prob = c(1-prob.out.wk, prob.out.wk))) #}else{ n.outside.exposures = sum(sample(x=c(0,1), size=length(pop.mat$state[pop.mat$state==0]), replace=TRUE, prob = c(1-prob.out, prob.out))) #} if(n.outside.exposures>0){ #if you find some, fill them in with an exposure time of now, distributed at random #could add in higher introduction rate for certain sub-groups in this case new.case.ids = sample(pop.mat$employ_ids[pop.mat$state==0], size = n.outside.exposures, replace=FALSE) # print(new.case.ids) #and assign for (i in 1:length(new.case.ids)){ #print(i) #print(new.case.ids[i]) #expose the new cases immediately - but only those that have reached the current timestep already #those "predestined" for exposure get passed over for now. pop.mat$state[pop.mat$employ_ids==new.case.ids[i]] <- 1 #pop.mat$state[pop.mat$employ_ids==new.case.ids[i] & pop.mat$stat_asymp==TRUE] <- 2 #exposure time is this timestep pop.mat$exposure_time[pop.mat$employ_ids==new.case.ids[i]] <- timestep #infection kicks off so you can now calculat symptom onset time #pop.mat$time_of_symptom_onset[pop.mat$employ_ids==new.case.ids[i]] <- pop.mat$time_of_symptom_onset[pop.mat$employ_ids==new.case.ids[i]] + timestep #tmp <- pop.mat$time_of_test_positivity[pop.mat$employ_ids==new.case.ids[i]] #pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.case.ids[i]] <- pop.mat$exposure_time[pop.mat$employ_ids==new.case.ids[i]] + pop.mat$time_test_sensitive_end[pop.mat$employ_ids==new.case.ids[i]] #pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.case.ids[i]] <- pop.mat$exposure_time[pop.mat$employ_ids==new.case.ids[i]] + pop.mat$time_test_sensitive_start[pop.mat$employ_ids==new.case.ids[i]] #infector is outside source that cannot be tracked pop.mat$infector[pop.mat$employ_ids==new.case.ids[i]] <- 0 pop.mat$infector_iso_time[pop.mat$employ_ids==new.case.ids[i]] <- Inf pop.mat$case_source[pop.mat$employ_ids==new.case.ids[i]] <- "alameda" #introduced cases cannot be traced pop.mat$time_of_tracing_iso[pop.mat$employ_ids==new.case.ids[i]] <- Inf } } #pop.mat <- subset(pop.mat, !is.na(employ_ids)) #now, for those that are currently exposed (both from outside and UCB), # compute distributions of iso time # and new actual cases caused. #then, we can assign those times and move them to recovered status pop.mat.old <- pop.mat pop.mat <- dplyr::select(pop.mat, -(actual_cases_caused)) #first, go ahead and move test postivity to the appropriate degree #pop.mat$time_of_test_positivity[pop.mat$state==1 | pop.mat$state==2] <- pop.mat$time_of_test_positivity[pop.mat$state==1 | pop.mat$state==2] + pop.mat$exposure_time[pop.mat$state==1 | pop.mat$state==2] #print("7") #first, assume that isolation time is symptomatic pop.mat$time_isolation[pop.mat$state==1 ] <- as.numeric(pop.mat$time_of_symptom_iso[pop.mat$state==1]) pop.mat$time_isolation = as.numeric(pop.mat$time_isolation) pop.mat$reason_isolated[pop.mat$state==1 ] <- "symptom_iso" #now, if tracing comes first, we replace it #tracing only applicable within our community #print("8") pop.mat$reason_isolated[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_tracing_iso & pop.mat$traced==TRUE & complete.cases(pop.mat) ] <- "tracing_iso" pop.mat$time_isolation[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_tracing_iso & pop.mat$traced==TRUE & complete.cases(pop.mat) ] <- pop.mat$time_of_tracing_iso[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_tracing_iso & pop.mat$traced==TRUE & complete.cases(pop.mat)] #or, finally, if testing comes first, we replace it here - IF the infection is test sensitive at the time of testing #print("9") pop.mat$reason_isolated[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & complete.cases(pop.mat) & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end] <- "testing_iso" pop.mat$time_isolation[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & complete.cases(pop.mat) & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end] <- pop.mat$time_of_testing_iso[pop.mat$state==1 & pop.mat$time_isolation> pop.mat$time_of_testing_iso & pop.mat$testing==TRUE & complete.cases(pop.mat) & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end] #and then, if any of these are Inf, change the reason to NA pop.mat$reason_isolated[pop.mat$time_isolation==Inf] <- NA #first, double-check that nothing was exposed after the isolation time (would be based on tracing only) #if that ever happens, that person becomes susceptible again because that infection was never generated #first flag #then, go in and find that person's infector and reduce their actual cases by one #based on this case that did not occur pop.mat$state[pop.mat$exposure_time>pop.mat$time_isolation & pop.mat$state==1 & pop.mat$reason_isolated=="tracing_iso" ] <- 7 pop.mat$reason_isolated[pop.mat$state==7] <- NA pop.mat$time_isolation[pop.mat$state==7] <- NA pop.mat$case_source[pop.mat$state==7 ] <- NA #now remove a case from the infectors that "caused" these events infector.sub1 = pop.mat[pop.mat$state==7,] infector.sum1 = ddply(infector.sub1, .(infector), summarize, cases_removed = length(employ_ids)) #save this for the end pop.mat$infector[pop.mat$state==7] <- NA pop.mat$infector_iso_time[pop.mat$state==7] <- NA pop.mat$exposure_time[pop.mat$state==7]<- NA pop.mat$state[pop.mat$state==7] <- 0 #now, based on isolation time and the generation times in the vector, determine the actual number of cases caused, #then export, and assign those new infections in the next time step #now, advance forward all of the "time of etc." for susceptibles #and time of next testing for all new.cases = dlply(pop.mat[pop.mat$state==1& pop.mat$potential_cases_caused>0,], .(employ_ids)) #if potential cases were 0, then actual cases are too: #pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused ==0| pop.mat$state==2 & pop.mat$potential_cases_caused ==0] <- 0 #but, if potential cases were greater than 0, then actual might be as well, depending on the isolation times #dat.gen.new = do.call("rbind", gen_list) dat.gen.new = data.table::rbindlist(gen_list) #pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused > 0| pop.mat$state==2 & pop.mat$potential_cases_caused > 0] <- c(unlist(lapply(new.cases, get.actual.cases, dat.gen=dat.gen.new, timestep=timestep, weekend.amp=weekend.amp))) new.actual.cases <- c(unlist(lapply(new.cases, get.actual.cases, dat.gen=dat.gen.new, timestep=timestep))) #these have not kicked off, so let them kick forward pop.mat$time_of_symptom_onset[pop.mat$state==0 | pop.mat$state==3] <- pop.mat$time_of_symptom_onset[pop.mat$state==0 | pop.mat$state==3] + length_timestep pop.mat$time_of_symptom_iso[pop.mat$state==0 | pop.mat$state==3 ] <- pop.mat$time_of_symptom_iso[pop.mat$state==0 | pop.mat$state==3] + length_timestep pop.mat$time_test_sensitive_start[pop.mat$state==0 | pop.mat$state==3 ] <- pop.mat$time_test_sensitive_start[pop.mat$state==0 | pop.mat$state==3] + length_timestep pop.mat$time_test_sensitive_end[pop.mat$state==0 | pop.mat$state==3 ] <- pop.mat$time_test_sensitive_end[pop.mat$state==0 | pop.mat$state==3] + length_timestep #tracing only gets started when infector iso time is assigned, so we don't touch it here #if you are at your current testing date, then next test is bumped into the future. #Otherwise, you just advance in time until you reach it #but the lag time is maintained after the new test date, so deal with that first pop.mat$time_of_testing_iso = pop.mat$time_of_testing_iso - pop.mat$time_of_next_test #now this is just the lag time #now, compute actual next test day if today is the test day of the runs in question - add different frequencies depending on the type pop.mat$time_of_next_test[pop.mat$time_of_next_test==timestep] <- timestep + as.numeric(pop.par$par1[pop.par$parameter=="test-freq"]) #now put the lag back on to the new test day for isolation pop.mat$time_of_testing_iso <- pop.mat$time_of_testing_iso + pop.mat$time_of_next_test pop.mat$time_of_testing_iso[pop.mat$time_of_next_test==Inf] <- Inf #and, finally, check in on those that were "pre-exposed" up above. #move them up to their appropriate status if they should be exposed now #if they reach it, go ahead and assign their actual cases #first, eliminate if they should not occur #first flag #then, go in and find that person's infector and reduce their actual cases by one #based on this case that did not occur pop.mat$state[pop.mat$state==3 & pop.mat$exposure_time>pop.mat$infector_iso_time ] <- 8 pop.mat$time_of_tracing_iso[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time ] <- pop.mat$time_of_tracing_iso[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time ] - pop.mat$infector_iso_time[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time] pop.mat$case_source[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time] <- NA #now remove a case from the infectors that "caused" these events infector.sub2 = pop.mat[pop.mat$state==8,] infector.sum2 = ddply(infector.sub2, .(infector), summarize, cases_removed = length(employ_ids)) #save this for the end pop.mat$infector[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time ] <- NA pop.mat$state[pop.mat$state==8 & pop.mat$exposure_time>pop.mat$infector_iso_time ] <- 0 pop.mat$infector_iso_time[pop.mat$state==0] <- NA pop.mat$exposure_time[pop.mat$state==0] <- NA if (exists('infector.sum1') & exists('infector.sum2')){ infector.sum <- rbind(infector.sum1, infector.sum2) }else if(exists('infector.sum1')){ infector.sum <- infector.sum1 }else if(exists('infector.sum2')){ infector.sum <- infector.sum2 } #then, if they pass that test and still remain 'pre-exposed', check to see if they should be elevated in status to 1 or 2 #(meaning they have reached the exposure time) #if so, assign them isolation time and actual cases which get allocated in the next round. #otherwise, they just keep current status as "pre-exposed" #first make them complete cases, so that R is not angry with new columns being filled in pop.mat$time_isolation[pop.mat$state==3 & pop.mat$exposure_time<=timestep] <- Inf pop.mat$reason_isolated[pop.mat$state==3 & pop.mat$exposure_time<=timestep] <- "in progress" #first, assume that isolation time is symptomatic #print("1") pop.mat$reason_isolated[pop.mat$state==3 & pop.mat$exposure_time<=timestep & complete.cases(pop.mat)] <- "symptom_iso" # print("2") pop.mat$time_isolation[pop.mat$state==3 & pop.mat$exposure_time<=timestep & complete.cases(pop.mat)] <- pop.mat$time_of_symptom_iso[pop.mat$state==3 & pop.mat$exposure_time<=timestep & complete.cases(pop.mat)] pop.mat$time_isolation = as.numeric(pop.mat$time_isolation) #now, if tracing comes first, we replace it #print("3") pop.mat$reason_isolated[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_tracing_iso<pop.mat$time_isolation & pop.mat$traced==TRUE & complete.cases(pop.mat)] <- "tracing_iso" # print("4") pop.mat$time_isolation[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_tracing_iso<pop.mat$time_isolation & pop.mat$traced==TRUE & complete.cases(pop.mat)] <- pop.mat$time_of_tracing_iso[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_tracing_iso<pop.mat$time_isolation & pop.mat$traced==TRUE & complete.cases(pop.mat)] #finally, if testing comes first, we replace it #print("5") pop.mat$reason_isolated[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_testing_iso<pop.mat$time_isolation & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end & complete.cases(pop.mat)] <- "testing_iso" #print("6") #print(pop.mat[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_testing_iso<pop.mat$time_isolation & pop.mat$testing==TRUE & complete.cases(pop.mat) ,]) #print(pop.mat) pop.mat$time_isolation[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_testing_iso<pop.mat$time_isolation & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end & complete.cases(pop.mat)] <- pop.mat$time_of_testing_iso[pop.mat$state==3 & pop.mat$exposure_time<=timestep & pop.mat$time_of_testing_iso<pop.mat$time_isolation & pop.mat$testing==TRUE & pop.mat$time_of_next_test>=pop.mat$time_test_sensitive_start & pop.mat$time_of_next_test<pop.mat$time_test_sensitive_end & complete.cases(pop.mat)] #and then, if any of these are Inf, change the reason to NA pop.mat$reason_isolated[pop.mat$time_isolation==Inf] <- NA #now, based on isolation time and the generation times in the vector, determine the actual number of cases caused, #then export, regroup with other half of population and assign those new infections in the next time step new.cases = dlply(pop.mat[pop.mat$state== 3& pop.mat$potential_cases_caused>0 & pop.mat$exposure_time<=timestep & complete.cases(pop.mat) ,], .(employ_ids)) #if potential cases were 0, then actual cases are too: #pop.mat$actual_cases_caused[pop.mat$state==3 & pop.mat$potential_cases_caused ==0 & pop.mat$exposure_time<=timestep | pop.mat$state==4 & pop.mat$potential_cases_caused ==0 & pop.mat$exposure_time<=timestep] <- 0 tmp.dat = pop.mat[pop.mat$state==3 & pop.mat$potential_cases_caused >0 & pop.mat$exposure_time<=timestep ,] new.cases.2 <- dlply(tmp.dat, .(employ_ids)) new.actual.cases.3 <- c(unlist(lapply(new.cases.2, get.actual.cases, dat.gen=dat.gen.new, timestep=timestep))) #now add the actual cases back in pop.mat <- cbind.data.frame(pop.mat, pop.mat.old$actual_cases_caused) names(pop.mat)[length(names(pop.mat))] <- "actual_cases_caused" #reorder pop.mat <- dplyr::select(pop.mat, names(pop.mat.old)) #and add in the new actual cases pop.mat$actual_cases_caused[pop.mat$state==3 & pop.mat$potential_cases_caused ==0 & pop.mat$exposure_time<=timestep | pop.mat$state==1 & pop.mat$potential_cases_caused ==0 & pop.mat$exposure_time<=timestep ] <- 0 pop.mat$actual_cases_caused[pop.mat$state==1 & pop.mat$potential_cases_caused >0 & pop.mat$exposure_time<=timestep] <- new.actual.cases pop.mat$actual_cases_caused[pop.mat$state==3 & pop.mat$potential_cases_caused >0 & pop.mat$exposure_time<=timestep ] <- new.actual.cases.3 #and, finally, change state so these cases can get allocated in the next round. pop.mat$state[pop.mat$state==3 & pop.mat$exposure_time<=timestep ] <- 1 #and remove any avoided cases if there were some if (exists('infector.sum')){ if(nrow(infector.sum)>0){ for(i in 1:length(infector.sum$infector)){ pop.mat$actual_cases_caused[pop.mat$employ_ids==infector.sum$infector[i]] <- pop.mat$actual_cases_caused[pop.mat$employ_ids==infector.sum$infector[i]] - infector.sum$cases_removed[i] } } } #and return return(pop.mat) } get.mean.sd <- function(vector, name){ #first, trim to same length min.length <- min(unlist(lapply(vector, length))) for (i in 1:length(vector)){ vector[[i]] <- vector[[i]][1:min.length] } vec <- unlist(vector, use.names = FALSE) DIM <- c(length(vector[[1]]),1) n <- length(vector) list.mean <- tapply(vec, rep(1:prod(DIM),times = n), mean) attr(list.mean, "dim") <- DIM list.mean <- as.data.frame(list.mean) list.sd <- tapply(vec, rep(1:prod(DIM),times = n), sd) attr(list.sd, "dim") <- DIM list.sd <- as.data.frame(list.sd) list.uci = list.mean + 1.96*list.sd list.lci = list.mean - 1.96*list.sd list.lci[list.lci<0] <- 0 list.uci[list.uci<0] <- 0 dat= cbind.data.frame(list.mean, list.lci, list.uci) names(dat) = paste(c("mean", "lci", "uci"), name, sep="_") return(dat) } get.mean.matrix <- function(mat){ #first, trim to same length min.length <- min(unlist(lapply(mat, nrow))) n.cat = ncol(mat[[1]])/3 for (i in 1:length(mat)){ mat[[i]] <- mat[[i]][1:min.length,] } list.mean <- Reduce("+",mat) / length(mat) mat.2 <- do.call("cbind", mat) #mat.2 <- data.table::rbindlist(mat) list.sd <- apply(mat.2, 1, sd) list.uci = list.mean + 1.96*list.sd list.lci = list.mean - 1.96*list.sd list.lci[list.lci<0] <- 0 list.uci[list.uci<0] <- 0 dat= cbind.data.frame(list.mean, list.lci, list.uci) names(dat) = c(paste0("mean_iso_cat_",seq(1,n.cat,1)), paste0("lci_iso_cat_",seq(1,n.cat,1)), paste0("uci_iso_cat_",seq(1,n.cat,1)), paste0("mean_exp_cat_",seq(1,n.cat,1)), paste0("lci_exp_cat_",seq(1,n.cat,1)), paste0("uci_exp_cat_",seq(1,n.cat,1)), paste0("mean_deaths_cat_",seq(1,n.cat,1)), paste0("lci_deaths_cat_",seq(1,n.cat,1)), paste0("uci_deaths_cat_",seq(1,n.cat,1))) return(dat) } convert.cat = function(dat){ n.cat = ncol(dat)/9 max.times = nrow(dat) iso.dat = dat[,1:(n.cat*3)] exp.dat = dat[,(n.cat*3+1):(n.cat*3*2)] death.dat = dat[,(n.cat*3*2+1):ncol(dat)] #then, sep by cat list.iso <- list.exp <- list.deaths <- list() for(i in 1:n.cat){ list.iso[[i]] <- cbind.data.frame(iso.dat[,i], iso.dat[,i+n.cat],iso.dat[,i+(n.cat*2)]) list.exp[[i]] <- cbind.data.frame(exp.dat[,i],exp.dat[,i+n.cat],exp.dat[,i+(n.cat*2)]) list.deaths[[i]] <- cbind.data.frame(death.dat[,i], death.dat[,i+n.cat],death.dat[,i+(n.cat*2)]) } #iso.db <- do.call("rbind", list.iso) iso.db <- data.table::rbindlist(list.iso) iso.db$type = rep(1:n.cat, each = max.times) iso.db$type <- paste0("iso-pop-", iso.db$type) names(iso.db) <- c("mean", "lci", "uci", "type") #exp.db <- do.call("rbind", list.exp) exp.db <- data.table::rbindlist(list.exp) exp.db$type = rep(1:n.cat, each = max.times) exp.db$type <- paste0("exp-pop-", exp.db$type) names(exp.db) <- c("mean", "lci", "uci", "type") #death.db <- do.call("rbind", list.deaths) death.db <- data.table::rbindlist(list.deaths) death.db$type = rep(1:n.cat, each = max.times) death.db$type <- paste0("death-pop-", death.db$type) names(death.db) <- c("mean", "lci", "uci", "type") return(list(iso.db, exp.db, death.db)) } R.fit.sum <- function(mat.df){ #apply across all columns mean.all <- apply(mat.df, 2,mean) sd.all <- apply(mat.df, 2,sd) lci.all <- mean.all-1.96*sd.all lci.all[ lci.all < 0] <- 0 uci.all <- mean.all+1.96*sd.all #and nbinom fit all.fit <- apply(mat.df, 2, fitdist, distr="nbinom") #and return out.dat <- cbind.data.frame(mean.all, lci.all, uci.all) out.dat$class <- names(mat.df) #names(out.dat) <- names(mat.df) #out.dat$estimate <- c("mean", "lci", "uci") #out.dat[out.dat<0] <- 0 #and add fit size.out <- list() mu.out <- list() for(i in 1:length(all.fit)){ size.out[[i]] <- all.fit[[i]]$estimate[1] mu.out[[i]] <- all.fit[[i]]$estimate[2] } size.out <- c(unlist(size.out)) mu.out <- c(unlist(mu.out)) out.dat$nb_mu <- mu.out out.dat$nb_size <- size.out # names(size.out) <- names(mu.out) <- names(out.dat) # out.dat <- rbind(out.dat, size.out, mu.out) # # out.dat$total_potential_cases <- as.numeric(out.dat$total_potential_cases) # out.dat$UCB_potential_cases <- as.numeric(out.dat$UCB_potential_cases) # out.dat$UCB_post_group_potential_cases <- as.numeric(out.dat$UCB_post_group_potential_cases) # out.dat$UCB_post_titer_potential_cases <- as.numeric(out.dat$UCB_post_titer_potential_cases) # out.dat$UCB_post_isolations_actual_cases <- as.numeric(out.dat$UCB_post_isolations_actual_cases) # return(out.dat) } R.fit.sum.lognorm <- function(mat.df){ #apply across all columns mean.all <- apply(mat.df, 2,mean) sd.all <- apply(mat.df, 2,sd) lci.all <- mean.all-1.96*sd.all lci.all[ lci.all < 0] <- 0 uci.all <- mean.all+1.96*sd.all #and return out.dat <- cbind.data.frame(mean.all, lci.all, uci.all) out.dat$class <- names(mat.df) #names(out.dat) <- names(mat.df) #out.dat$estimate <- c("mean", "lci", "uci") #out.dat[out.dat<0] <- 0 # return(out.dat) } simulate.epidemic <- function(input.pop, n.init.exposed.vector, employ.id.vector, times, virus.par, input.par, burnin, test.freq, length_timestep, bay.area.prev, initial.R, within.host.theta, titer.dat, LOD, test_rotation_name){ if (virus.par$distribution[virus.par$parameter=="R0"]=="log-normal"){ #sample R0 normal R0fn = R0_fn(meanR0=virus.par$par1[virus.par$parameter=="R0"], sdR0=virus.par$par2[virus.par$parameter=="R0"]) }else if(virus.par$distribution[virus.par$parameter=="R0"]=="negbinom"){ #sample R0 normal R0fn = R0_fn_nb(muR0=virus.par$par1[virus.par$parameter=="R0"], sizeR0=virus.par$par2[virus.par$parameter=="R0"]) } #and the number of transmission events, from a negbinom #remember that fewer events = higher likelihood of a big superspreading event. #but the vast majority of people have both few events and few cases eventFn = poisson_fn(lambda =as.numeric(input.par$par1[input.par$parameter=="transmission-events"])) #and normal distribution of the detection limit #then, form your new populations #now split the population based on risk tot.pop = length(input.pop) pop.num = 1:tot.pop titer.dat$cat <- NA for (i in 1:(length(pop.num)-1)){ titer.dat$cat[titer.dat$employ_ids < employ.id.vector [i+1] & titer.dat$employ_ids >= employ.id.vector [i]] <- pop.num[i] } titer.dat$cat[is.na(titer.dat$cat)] <- pop.num[length(pop.num)] #and split titer.dat.split <- dlply(titer.dat, .(cat)) #make the proper number of pop.mat depending on the total number of subpopulations #populate each using the appropriate parameters out.list = mapply(FUN=initiate.pop, start.ID.employ = as.list(employ.id.vector), pop.UCB=as.list(input.pop), n.init.exposed= as.list(n.init.exposed.vector), pop.ID = as.list(pop.num), titer.dat=titer.dat.split, MoreArgs= list(input.par=input.par, virus.par=virus.par, R0fn=R0fn, eventFn=eventFn, within.host.theta=within.host.theta, LOD=LOD)) pop.list = out.list[1,] gen_list_long <- out.list[2,] #original.r0 <- out.list[3,][[1]] #gen_list_long_wkend <- out.list[3,] #pop.mat <- do.call("rbind", pop.list) pop.mat <- data.table::rbindlist(pop.list) #gen.dat.all <- do.call("rbind", gen_list_long) gen.dat.all <- data.table::rbindlist(gen_list_long) #now, double-check that the generation time dataframe is the same length as the number of unique employ ids if(sum(setdiff(pop.mat$employ_ids, gen.dat.all$employ_ids))>0){ missing.ids <- setdiff(pop.mat$employ_ids, gen.dat.all$employ_ids) missing.cases <- list() for(i in 1:length(missing.ids)){ missing.cases[[i]] <- pop.mat$potential_cases_caused[pop.mat$employ_ids==missing.ids[i]] } missing.cases <- c(unlist(missing.cases)) if(sum(missing.cases)>0){ missing.gen <- genTime(missing.cases) add.dat <- cbind.data.frame(rep(missing.ids, missing.cases), missing.gen) }else{ missing.gen <- rep(NA, length(missing.cases)) add.dat <- cbind.data.frame(missing.ids, missing.gen) } names(add.dat) <- names(gen.dat.all) gen.dat.all <- rbind(gen.dat.all, add.dat) gen.dat.all <- arrange(gen.dat.all, employ_ids) } gen_list = dlply(gen.dat.all, .(employ_ids)) #gen_list_wk = dlply(gen.dat.all.wk, .(employ_ids)) foi.bay.area = initial.R*bay.area.prev*length_timestep #rate per day at which susceptibles become infected #foi.wkend = bay.area.R*bay.area.prev*length_timestep*weekend.amp prob.outside.exposure =1-(exp(-1*foi.bay.area)) #for each person in berkeley, this is the probability of getting exposed each day prob.outside.exposure[prob.outside.exposure<0] <- 0 #prob.outside.exposure.wk =1-(exp(-1*foi.wkend)) #could also be a vector times_vect = seq(length_timestep,times, by = length_timestep) for(i in 1: length(times_vect)){ #print(i) timestep = times_vect[i] #could make other functions here if people mostly infect their own subgroups #here, we distribute the infections amongst new people and retire the old pop.mat = assign.infections(pop.mat = pop.mat, gen_list=gen_list, timestep = timestep, input.par = input.par) #now split it by population to introduce outside exposures pop.split = dlply(pop.mat, .(employ_cat)) pop.mat.list = lapply(pop.split, FUN=epidemic.step, timestep= timestep, prob.out = prob.outside.exposure, gen_list=gen_list, input.par=input.par, length_timestep = length_timestep) #then, rejoin #pop.mat = do.call("rbind", pop.mat.list)#print(i) pop.mat = data.table::rbindlist(pop.mat.list) #then, just keep tabs that there are enough susceptibles to fill the new cases in the next step remaining.susceptibles = length(pop.mat$state[pop.mat$state==0]) future.cases = sum(pop.mat$actual_cases_caused[pop.mat$state==1]) if(future.cases>remaining.susceptibles){ #if there are not enough susceptibles left for all of the assigned cases before you reach the end of the time series, then you go into the next step #print(i) pop.mat = assign.last.infections(pop.mat = pop.mat, gen_list = gen_list, remaining.susceptibles = remaining.susceptibles, timestep = timestep) #print(i) } } #collect all the "R" reduction info: R.mat <- dplyr::select(pop.mat, total_potential_cases_caused, original_potential_cases_caused_UCB, post_titer_potential_cases_caused_UCB, potential_cases_caused, actual_cases_caused) names(R.mat) <- c( "total_potential_cases", "UCB_potential_cases", "UCB_post_titer_potential_cases", "UCB_post_group_potential_cases", "UCB_post_isolations_actual_cases") R.mat <- arrange(R.mat, desc(total_potential_cases)) R.mat$UCB_post_isolations_actual_cases[is.na(R.mat$UCB_post_isolations_actual_cases)] <- 0 #R.mat <- as.matrix(R.mat) # #new R0 # new.R0 = subset(pop.mat, !is.na(infector)) # new.R0 = ddply(new.R0, .(infector), summarize, cases_caused=length(employ_ids)) # tot.introductions = new.R0$cases_caused[new.R0$infector=="0"] # new.R0 = subset(new.R0, infector!="0") # # maxID = max(pop.mat$employ_ids) # missing_ids <- (1:maxID)[!(1:maxID %in% new.R0$infector)] # # # add in missing days if any are missing # if (length(missing_ids > 0)) { # R0comp <- data.table::rbindlist(list(new.R0, # data.table(infector = missing_ids, # cases_caused = 0))) # } # # R0comp <- arrange(R0comp, infector) # # #now add back in those cases not at UCB... # #original.r0$actual_cases_caused_UCB <- R0comp$cases_caused #get prop.asymptomatic at this cutoff prop.asym <- length(pop.mat$time_of_symptom_onset[pop.mat$time_of_symptom_onset==Inf])/length(pop.mat$time_of_symptom_iso) #from here, compute Reffective R.dat = dplyr::select(pop.mat, employ_ids, infector, time_isolation, case_source) R.dat = arrange(R.dat, time_isolation) #icidence will just be cases by time isolated #if not isolated, you don't count for incidence... R.dat = R.dat[!is.na(R.dat$time_isolation),] R.dat$time_isolation = ceiling(R.dat$time_isolation) #could add source. don't for now R.sum = ddply(R.dat, .(time_isolation), summarise, length(employ_ids)) #R.sum = ddply(R.dat, .(time_isolated, source), summarise, length(employ_ids)) names(R.sum) = c( "day", "incidence") #plot as incidence #plot(as.incidence(R.sum$incidence, dates = R.sum$day)) #this will go in as your incidence data #now add in pairs to estimate the serial interval #T <- nrow(R.sum) #t_start <- seq(2, T-13) # starting at 2 as conditional on the past observations #t_end <- t_start + 13 # # R.est = estimate_R(R.sum$incidence, # method="parametric_si", # config = make_config(list(#t_start = t_start, # #t_end = t_end, # mean_si = serial_mean, std_si = serial_sd))) # # #plot(R.est, "R") # #get midpoint and R values and extract # R.out = cbind.data.frame(get.midpoint(par.low = R.est$R$t_start, par.hi = R.est$R$t_end), R.est$R$`Mean(R)`) # names(R.out) = c("day", "Reffective") # #and try it based on pairs pop.mat = data.table(pop.mat) #now, get broad incidence data to report UCB.mat = subset(pop.mat, case_source=="UCB") alameda.mat = subset(pop.mat, case_source=="alameda") symp.mat = subset(pop.mat, reason_isolated=="symptom_iso") trace.mat = subset(pop.mat, reason_isolated=="tracing_iso") test.mat = subset(pop.mat, reason_isolated=="testing_iso") daily_exposures <- pop.mat[, day := ceiling(exposure_time) #time_isolated ][, .(daily_exposures = .N), by = day ] # #daily isolations daily_isolations <- pop.mat[, day := ceiling(time_isolation) # ][, .(daily_isolations = .N), by = day ] daily_cal <- UCB.mat[, day := ceiling(time_isolation) #time_isolated ][, .(daily_isolations = .N), by = day ] daily_alameda <- alameda.mat[, day := ceiling(time_isolation) #time_isolated ][, .(daily_isolations = .N), by = day ] daily_symp <- symp.mat[, day := ceiling(time_isolation) #time_isolated ][, .(daily_isolations = .N), by = day ] daily_trace <- trace.mat[, day := ceiling(time_isolation) #time_isolated ][, .(daily_isolations = .N), by = day ] daily_test <- test.mat[, day := ceiling(time_isolation) #time_isolated ][, .(daily_isolations = .N), by = day ] # maximum outbreak day max_day <- ceiling(times) # days with 0 cases in 0:max_week #missing_days <- (0:max_day)[!(0:max_day %in% daily_isolations$day)] missing_days <- (0:max_day)[!(0:max_day %in% daily_exposures$day)] # add in missing days if any are missing if (length(missing_days > 0)) { daily_cases <- data.table::rbindlist(list(daily_exposures, data.table(day = missing_days, daily_exposures = 0))) } #reorder as appropriate #daily_cases <- arrange(daily_cases, day) # order and sum up daily_cases <- daily_exposures[order(day) ][, cumulative := cumsum(daily_exposures)] # cut at max_week daily_cases <- daily_cases[day<=max_day] # and isoaltions daily_cases$daily_isolations <- 0 for (i in 1:length(daily_isolations$day)){ daily_cases$daily_isolations[daily_cases$day==daily_isolations$day[i]] <- daily_isolations$daily_isolations[i] } #and cumulative isolations daily_cases$cumulative_iso = cumsum(daily_cases$daily_isolations) # #and cases in UCB vs out daily_cases$daily_UCB_isolations <- 0 for (i in 1:length(daily_cal$day)){ daily_cases$daily_UCB_isolations[daily_cases$day==daily_cal$day[i]] <- daily_cal$daily_isolations[i] } # # #and cases in UCB vs out daily_cases$daily_alameda_isolations <- 0 for (i in 1:length(daily_alameda$day)){ daily_cases$daily_alameda_isolations[daily_cases$day==daily_alameda$day[i]] <- daily_alameda$daily_isolations[i] } daily_cases$daily_symptomatic_isolations <- 0 for (i in 1:length(daily_symp$day)){ daily_cases$daily_symptomatic_isolations[daily_cases$day==daily_symp$day[i]] <- daily_symp$daily_isolations[i] } daily_cases$daily_tracing_isolations <- 0 for (i in 1:length(daily_trace$day)){ daily_cases$daily_tracing_isolations[daily_cases$day==daily_trace$day[i]] <- daily_trace$daily_isolations[i] } daily_cases$daily_testing_isolations <- 0 for (i in 1:length(daily_test$day)){ daily_cases$daily_testing_isolations[daily_cases$day==daily_test$day[i]] <- daily_test$daily_isolations[i] } # # #now attach R-effective # daily_cases$Reffective = NA # # for(i in 1:nrow(R.out)){ # daily_cases$Reffective[daily_cases$day==R.out$day[i]] <- R.out$Reffective[i] # } # #add category pop.mat.cat= dlply(pop.mat, .(employ_cat)) new_col <- lapply(pop.mat.cat, FUN=add.risk.cat, pop_dat=daily_cases) #and also the daily exposures new_col2 <- lapply(pop.mat.cat, FUN=add.risk.cat.exp, pop_dat=daily_cases, input_par=input.par) new_col_exp <- sapply(new_col2, "[", 1) new_col_deaths <- sapply(new_col2, "[", 2) #tmp = data.table::rbindlist(new_col) tmp = as.data.frame(do.call("cbind", new_col)) names(tmp) <- paste0("isolations-employ-cat-", unique(input.par$population)) tmp2 = as.data.frame(do.call("cbind", new_col_exp)) #tmp2 = data.table::rbindlist(new_col_exp) names(tmp2) <- paste0("exposures-employ-cat-", unique(input.par$population)) tmp3 = as.data.frame(do.call("cbind", new_col_deaths)) #tmp3 = data.table::rbindlist(new_col_deaths) names(tmp3) <- paste0("deaths-employ-cat-", unique(input.par$population)) #and attach to daily cases daily_cases <- cbind.data.frame(daily_cases, tmp, tmp2, tmp3) # #finally, calculate some summary statistics from the epidemic # tot.exposures = sum(daily_cases$daily_exposures, na.rm=T) # tot.isolations = sum(daily_cases$daily_isolations, na.rm=T) # #time.to.control = max(daily_cases$day[!is.na(daily_cases$Reffective)]) # max.exposures.per.day = max(daily_cases$daily_exposures, na.rm=T) # mean.exposures.per.day = mean(daily_cases$daily_exposures, na.rm=T) # max.iso.per.day = max(daily_cases$daily_isolations, na.rm=T) # mean.iso.per.day = mean(daily_cases$daily_isolations, na.rm=T) # time.of.peak.iso = min(daily_cases$day[daily_cases$daily_isolations==max(daily_cases$daily_isolations, na.rm=T)]) # time.of.peak.exposure = min(daily_cases$day[daily_cases$daily_exposures==max(daily_cases$daily_exposures, na.rm=T)]) # #and report out the max day before your cases are too few to calculate Reffective #out.stat <- c(tot.exposures, tot.isolations, max.exposures.per.day, mean.exposures.per.day, max.iso.per.day, mean.iso.per.day, time.of.peak.exposure, time.of.peak.iso) #names(out.stat) <- c("total_exposures", "total_isolations", "max_exp_per_day", "mean_exp_per_day", "max_iso_per_day", "mean_iso_per_day", "time_peak_exposure", "time_peak_isolation") pop.mat$LOD <- LOD #add TAT if this is a single population model, but if it is mixed in a multipop, note that if(length(unique(input.par$par1[input.par$parameter=="TAT-lag"]))==1){ pop.mat$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"]) }else{ pop.mat$TAT <- "multiple" } pop.mat$test_rotation <- test_rotation_name return(list(daily_cases,pop.mat, prop.asym, R.mat)) } replicate.epidemic = function(n.reps, input.pop, n.init.exposed.vector, employ.id.vector, times, virus.par, input.par, burnin, test.freq, length_timestep, bay.area.prev, initial.R, within.host.theta, test_rotation_name, LOD, titer.dat){ out = replicate(n.reps, simulate.epidemic(virus.par = virus.par, input.par = input.par, input.pop=input.pop, n.init.exposed.vector=n.init.exposed.vector, times=times, bay.area.prev = bay.area.prev, initial.R = initial.R, within.host.theta = within.host.theta, burnin = burnin, length_timestep=length_timestep, employ.id.vector =employ.id.vector, LOD = LOD, titer.dat = titer.dat, test_rotation_name = test_rotation_name), simplify = "array") #make list out.time<- out.daily <- out.cal <- out.iso <- out.cumulative <- out.ala <- out.symp <- out.trace <- out.test <- out.iso <-out.cum.iso <- pop.mat.chain <- out.prop.asym <- R.mat.out <- list() #and make list of all the categories of sub-pop out.cat <- list() for (i in 1:ncol(out)){ #tmp <- data.table::cbindlist(out[,i][[1]]) tmp <- do.call("cbind", out[,i][[1]]) out.time[[i]] <- c(unlist(tmp[,1])) out.daily[[i]] <- c(unlist(tmp[,2])) out.cumulative[[i]] <- c(unlist(tmp[,3])) out.iso[[i]] <- c(unlist(tmp[,4])) out.cum.iso[[i]] <- c(unlist(tmp[,5])) out.cal[[i]] <- c(unlist(tmp[,6])) out.ala[[i]] <- c(unlist(tmp[,7])) out.symp[[i]] <- c(unlist(tmp[,8])) out.trace[[i]] <- c(unlist(tmp[,9])) out.test[[i]] <- c(unlist(tmp[,10])) #out.R[[i]] <- c(unlist(tmp[,11])) out.cat[[i]] <- cbind(unlist(tmp[,11:(10+(length(unique(input.par$population)))*3)])) #and save a chain of pop.mat tmp2 <- out[,i][[2]] pop.mat.chain[[i]] <- tmp2 #and the prop.asym tmp3 <- out[,i][[3]] out.prop.asym[[i]] <- tmp3 tmp4 <- out[,i][[4]] rownames(tmp4) <- c() R.mat.out[[i]] <- tmp4 #unique(input.par$population) } #now shorten them all to the same length and get mean + sd #print(out.time) mean.time = get.mean.sd(vector= out.time, name = "day")[,1] #print(out.daily) mean.daily = get.mean.sd(vector=out.daily, name = "exposures") #print(out.cumulative) mean.cumulative= get.mean.sd(vector=out.cumulative, name = "cumulative") #print(out.cal) mean.cal = get.mean.sd(vector=out.cal, name="UCB") #print(out.ala) mean.ala = get.mean.sd(vector=out.ala, name = "AlamedaCo") #print(out.low) mean.symp = get.mean.sd(vector=out.symp, name="symptomatic_iso") mean.trace = get.mean.sd(vector=out.trace, name="tracing_iso") mean.test = get.mean.sd(vector=out.test, name="testing_iso") #print(out.iso) mean.iso = get.mean.sd(vector=out.iso, name = "isolations") #print(out.cum.iso) mean.cum.iso = get.mean.sd(vector=out.cum.iso, name = "cumulative_isolations") #print(out.sum) #mean.sum = get.mean.sd.summary(out.sum) #and the employ-cat mean.cat = get.mean.matrix(mat=out.cat) #print(out.hi) mean.dat = cbind.data.frame(mean.time, mean.daily, mean.cumulative, mean.cal, mean.ala, mean.symp, mean.trace,mean.test, mean.iso, mean.cum.iso, mean.cat)#, mean.R) names(mean.dat)[1] = "day" #all of the descriptors can now change within the pop mean.dat$LOD <- LOD if(length(unique(input.par$par1[input.par$parameter=="TAT-lag"]))==1){ mean.dat$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"]) }else{ mean.dat$TAT <- "multiple" } mean.dat$test_rotation <- test_rotation_name #mean.dat$prop_asym = prop.asym mean.dat$virus_par = unique(virus.par$version) mean.dat$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"]) avg.prop.asym <- mean(c(unlist(out.prop.asym))) mean.dat$prop_asym= avg.prop.asym #and the long version mean.daily$type = "all_exposures" mean.cumulative$type = "cumulative" mean.cal$type = "UCB" mean.ala$type = "AlamedaCo" mean.symp$type = "symptomatic_iso" mean.trace$type = "tracing_iso" mean.test$type = "testing_iso" #mean.R$type = "Reffective" mean.iso$type= "isolations" #don't bother with employ-cat 00 can add later if needed mean.cat.long.list = convert.cat(mean.cat) mean.cat.long = data.table::rbindlist(mean.cat.long.list) #mean.cat.long = do.call("rbind", mean.cat.long.list) names(mean.daily) <- names(mean.cumulative) <- names(mean.cal) <- names(mean.ala) <- names(mean.symp) <- names(mean.trace) <- names(mean.test) <- names(mean.iso) <- c("mean", "lci", "uci", "type") #<- names(mean.R) mean.long <- rbind(mean.daily, mean.cumulative, mean.cal, mean.ala, mean.symp, mean.trace, mean.test, mean.iso, mean.cat.long)#, mean.R) n.cat = length(input.pop) mean.long$day = c(rep(mean.time, (8+(3*n.cat))))#, mean.time[-1]) mean.long$LOD <- LOD if(length(unique(input.par$par1[input.par$parameter=="TAT-lag"]))==1){ mean.long$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"]) }else{ mean.long$TAT <- "multiple" } mean.long$test_rotation <- test_rotation_name #mean.long$prop_asym = prop.asym mean.long$virus_par = unique(virus.par$version) mean.long$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"]) mean.long$prop_asym = avg.prop.asym # mean.sum$sim_cat = sim_cat # #mean.sum$prop_asym = prop.asym # mean.sum$virus_par = unique(virus.par$version) # # mean.sum$superspread = superspread # mean.sum$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"]) # # # #and summarize R # mean.R = summarise.R(out.list.R=out.R, day.vec = mean.dat$day, n.reps=n.reps) # mean.R$LOD <- LOD # mean.R$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"]) # mean.R$test_rotation <- test_rotation_name # #mean.R$sim_cat = sim_cat # #mean.R$prop_asym = prop.asym # mean.R$virus_par = unique(virus.par$version) # # mean.R$superspread = superspread # mean.R$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"]) # # mean.R$prop_asym <- avg.prop.asym # # # mean.R.mat = manage.R.matrix(mat.list=R.mat.out) #and do the best you can with the R-output #put it all together #R.mat.use <- do.call("rbind", R.mat.out) R.mat.use <- data.table::rbindlist(R.mat.out) R.mat.use <- arrange(R.mat.use, total_potential_cases) if(virus.par$distribution[virus.par$parameter=="R0"]=="negbinom"){ mean.R.mat = R.fit.sum(R.mat.use) }else{ mean.R.mat = R.fit.sum.lognorm(R.mat.use) } rownames(mean.R.mat) <- c() mean.R.mat$LOD <- LOD if(length(unique(input.par$par1[input.par$parameter=="TAT-lag"]))==1){ mean.R.mat$TAT <- unique(input.par$par1[input.par$parameter=="TAT-lag"]) }else{ mean.R.mat$TAT <- "multiple" } mean.R.mat$test_rotation <- test_rotation_name mean.R.mat$virus_par = unique(virus.par$version) mean.R.mat$distance_limit = unique(input.par$par1[input.par$parameter=="group-size-limit"]) mean.R.mat$prop_asym <- avg.prop.asym #return these summaries and the list of pop.mats return(list(mean.dat, mean.long, pop.mat.chain, mean.R.mat)) } pop.par.base$par1[pop.par.base$parameter=="TAT-lag"] <- 1 pop.par.base$par2[pop.par.base$parameter=="TAT-lag"] <- .5 pop.par.base$par1[pop.par.base$parameter=="test-rotation"] <- "two-week" pop.par.base$par1[pop.par.base$parameter=="n-test-days-per-week"] <- 7 pop.par.base$par1[pop.par.base$parameter=="test-on"] <- TRUE pop.par.base$par1[pop.par.base$parameter=="test-freq"] <- 14 pop.par.base$par1[pop.par.base$parameter=="trace-lag"] <- 1 pop.par.base$par2[pop.par.base$parameter=="trace-lag"] <- .5 pop.par.base$par1[pop.par.base$parameter=="prop.trace"] <- .9 pop.par.base$par1[pop.par.base$parameter=="iso-lag"] <- 1 pop.par.base$par2[pop.par.base$parameter=="iso-lag"] <- .5 pop.par.base$par1[pop.par.base$parameter=="prop.cases.UCB"] <- .5 out = replicate.epidemic(n.reps = 100, virus.par = virus.par, input.par = pop.par.base, input.pop=c(20000),#2000 n.init.exposed.vector=c(100),#10 times=365*2, bay.area.prev = .1/100, initial.R = 2.5, within.host.theta = .72, burnin = 0, length_timestep=1, employ.id.vector = c(1), LOD=(10^1), titer.dat = titer.dat, test_rotation_name = "two-week-7-test-days") save(out, file = "two-week-7-test-days-trace-1-symp.Rdata")
# # Digit Recognizer: Kaggle Competition # # The data directory contains 3 files: # - test.csv # - train.csv # - sample_submission.csv ##### Clean Up Workspace ##### rm(list = ls()) # Remove Previous Workspace gc(reset = TRUE) # Garbage Collection ##### Install and/or Load Packages ##### packages <- function(x, repos = "http://cran.r-project.org", ...) { x <- deparse(substitute(x)) if (!require(x, character.only = TRUE)) { install.packages(pkgs = x, dependencies = TRUE, repos = repos, ...) library(x, character.only = TRUE) } } # Load libraries packages(doParallel) # Parallel Computing packages(foreach) # Parallel Computing packages(reshape2) # Manipulate Datasets packages(dplyr) # Splitting, applying, and combining data packages(boot) # Contains cv.glm packages(leaps) # For regsubsets packages(ggplot2) packages(glmnet) packages(forcats) packages(caret) packages(randomForest) packages(e1071) packages(h2o) ##### DEFINE FUNCTIONS ##### plotDigit <- function(x, zlim = c(-1, 1)) { cols <- gray.colors(100)[100:1] image(matrix(x, nrow = 28)[, 28:1], col = cols, zlim = zlim, axes = FALSE) } miss.class <- function (pred.class, true.class, produceOutput=F) { confusion.mat <- table(pred.class, true.class) if (produceOutput) { return(1 - sum(diag(confusion.mat)) / sum(confusion.mat)) } else { print('miss-class') print(1 - sum(diag(confusion.mat)) / sum(confusion.mat)) print('confusion mat') print(confusion.mat) } } ##### SET UP DATA ##### # Import train and test data train <- read.csv("train.csv") test <- read.csv("test.csv") # Convert label to factor for classification train[, 1] <- as.factor(train[, 1]) # Create matrices X <- as.matrix(train[, -1]) Y <- train[, 1] X_0 <- as.matrix(test) # Start a local h2o cluster localH2O <- h2o.init(max_mem_size = '6g', nthreads = -1) # Create H2O df train.h2o <- as.h2o(train) test.h2o <- as.h2o(test) ##### DATA EXPLORATION ##### # Plot an image plotDigit(X[1, ]) # from training set plotDigit(X_0[1, ]) # from test set for (i in 1:nrow()) # Missing values, duplicate data, etc. str(train) # Structure of the df: # of obs, # of variables, types of variables any(is.na(train)) # TRUE if missing values exist, FALSE otherwise colSums(sapply(train, is.na)) # Number of missing values per column sum(is.na(train)) / (nrow(train) * ncol(train)) # Percentage of values that are missing nrow(train) - nrow(unique(train)) # Number of duplicate rows ##### MODELS ##### # Set timer s <- proc.time() # random forest (h2o) start <- proc.time()[3] rf.h2o <- h2o.randomForest(x = names(train.h2o[, -1]), y = "label", train.h2o, ntree = 50) stop <- proc.time()[3] print(stop-start) yhat.rf <- as.numeric(h2o.predict(rf.h2o, newdata=test.h2o, type='class')) # deep learning (h2o) start <- proc.time()[3] nn.h2o <- h2o.deeplearning(x = 2:785, y = 1, training_frame = train.h2o, activation = "RectifierWithDropout", input_dropout_ratio = 0.2, hidden_dropout_ratios = c(0.5, 0.5), balance_classes = T, hidden = c(100, 100), momentum_stable = 0.99, nesterov_accelerated_gradient = T, epochs = 15) stop <- proc.time()[3] print(stop-start) h2o.confusionMatrix(nn.h2o) yhat.nn <- h2o.predict(nn.h2o, test.h2o) nn.pred <- as.data.frame(yhat.nn) nn.pred <- data.frame(ImageId = seq(1,length(nn.pred$predict)), Label = nn.pred$predict) write.csv(nn.pred, "nn_predictions.csv", row.names = F, quote = F) ### Random Forest require(randomForest) start <- proc.time()[3] rf.out <- randomForest(x = X, y = as.factor(Y), ntree = 50) stop <- proc.time()[3] print(stop-start) y.hat <- as.numeric(predict(rf.out, newdata=X_0, type='class')) - 1 # Create submission files rf.pred <- data.frame(seq(1:nrow(test)), y.hat) names(rf.pred) <- c("ImageId", "Label") write.csv(rf.pred, "rf_predictions.csv", row.names = F, quote = F) ### Support Vector Machines # Fit SVMs with linear, radial, and polynomial kernels svm.lin <- tune(svm, as.factor(label) ~ ., data=train, kernel="linear", type='C', ranges=list(0.01, 0.1, 1)) yhat.svm <- predict(svm.lin$best.model, test) ### Linear Discriminant Analysis packages(MASS) # Remove constant variables train.nzv <- train[, -nearZeroVar(train, freqCut = 99999, uniqueCut = 0.000001)] # Scale data by max value train.s <- train.nzv[, -1] / max(train.nzv[, -1]) train.s <- cbind(Y = train$label, train.s) # Fit an LDA model lda.out <- lda(Y ~ ., data=train.s) yhat.lda <- predict(lda.out, test) # Create submission files lda.pred <- data.frame(ImageId = seq(1:nrow(test)), Label = yhat.lda$class) write.csv(lda.pred, "lda_predictions.csv", row.names = F, quote = F) ### Neural Net install.packages("drat", repos = "https://cran.rstudio.com") drat::addRepo("dmlc") install.packages("mxnet") library(mxnet) ### Boosting trees require(gbm) lambdas <- c(0.01, 0.1) depths <- c(1, 2) iterations <- 50 distribution <- "multinomial" for (lambda in lambdas) { for (depth in depths) { for (iteration in iterations) { gbm.fit <- gbm(Y ~ ., data = train.s, distribution = distribution, n.trees = iterations, shrinkage = lambda, interaction.depth = depth, verbose = T) yhat.gbm <- predict.gbm(gbm.fit, test, n.trees = iteration, type="response") yhat.gbm[yhat.gbm >= 0.5] } } } yhat.gbm <- predict.gbm(gbm.fit, test, n.trees = 50, type="response")
/digit_recognizer.R
no_license
risachoe/digit-recognizer-neural-net-R
R
false
false
5,557
r
# # Digit Recognizer: Kaggle Competition # # The data directory contains 3 files: # - test.csv # - train.csv # - sample_submission.csv ##### Clean Up Workspace ##### rm(list = ls()) # Remove Previous Workspace gc(reset = TRUE) # Garbage Collection ##### Install and/or Load Packages ##### packages <- function(x, repos = "http://cran.r-project.org", ...) { x <- deparse(substitute(x)) if (!require(x, character.only = TRUE)) { install.packages(pkgs = x, dependencies = TRUE, repos = repos, ...) library(x, character.only = TRUE) } } # Load libraries packages(doParallel) # Parallel Computing packages(foreach) # Parallel Computing packages(reshape2) # Manipulate Datasets packages(dplyr) # Splitting, applying, and combining data packages(boot) # Contains cv.glm packages(leaps) # For regsubsets packages(ggplot2) packages(glmnet) packages(forcats) packages(caret) packages(randomForest) packages(e1071) packages(h2o) ##### DEFINE FUNCTIONS ##### plotDigit <- function(x, zlim = c(-1, 1)) { cols <- gray.colors(100)[100:1] image(matrix(x, nrow = 28)[, 28:1], col = cols, zlim = zlim, axes = FALSE) } miss.class <- function (pred.class, true.class, produceOutput=F) { confusion.mat <- table(pred.class, true.class) if (produceOutput) { return(1 - sum(diag(confusion.mat)) / sum(confusion.mat)) } else { print('miss-class') print(1 - sum(diag(confusion.mat)) / sum(confusion.mat)) print('confusion mat') print(confusion.mat) } } ##### SET UP DATA ##### # Import train and test data train <- read.csv("train.csv") test <- read.csv("test.csv") # Convert label to factor for classification train[, 1] <- as.factor(train[, 1]) # Create matrices X <- as.matrix(train[, -1]) Y <- train[, 1] X_0 <- as.matrix(test) # Start a local h2o cluster localH2O <- h2o.init(max_mem_size = '6g', nthreads = -1) # Create H2O df train.h2o <- as.h2o(train) test.h2o <- as.h2o(test) ##### DATA EXPLORATION ##### # Plot an image plotDigit(X[1, ]) # from training set plotDigit(X_0[1, ]) # from test set for (i in 1:nrow()) # Missing values, duplicate data, etc. str(train) # Structure of the df: # of obs, # of variables, types of variables any(is.na(train)) # TRUE if missing values exist, FALSE otherwise colSums(sapply(train, is.na)) # Number of missing values per column sum(is.na(train)) / (nrow(train) * ncol(train)) # Percentage of values that are missing nrow(train) - nrow(unique(train)) # Number of duplicate rows ##### MODELS ##### # Set timer s <- proc.time() # random forest (h2o) start <- proc.time()[3] rf.h2o <- h2o.randomForest(x = names(train.h2o[, -1]), y = "label", train.h2o, ntree = 50) stop <- proc.time()[3] print(stop-start) yhat.rf <- as.numeric(h2o.predict(rf.h2o, newdata=test.h2o, type='class')) # deep learning (h2o) start <- proc.time()[3] nn.h2o <- h2o.deeplearning(x = 2:785, y = 1, training_frame = train.h2o, activation = "RectifierWithDropout", input_dropout_ratio = 0.2, hidden_dropout_ratios = c(0.5, 0.5), balance_classes = T, hidden = c(100, 100), momentum_stable = 0.99, nesterov_accelerated_gradient = T, epochs = 15) stop <- proc.time()[3] print(stop-start) h2o.confusionMatrix(nn.h2o) yhat.nn <- h2o.predict(nn.h2o, test.h2o) nn.pred <- as.data.frame(yhat.nn) nn.pred <- data.frame(ImageId = seq(1,length(nn.pred$predict)), Label = nn.pred$predict) write.csv(nn.pred, "nn_predictions.csv", row.names = F, quote = F) ### Random Forest require(randomForest) start <- proc.time()[3] rf.out <- randomForest(x = X, y = as.factor(Y), ntree = 50) stop <- proc.time()[3] print(stop-start) y.hat <- as.numeric(predict(rf.out, newdata=X_0, type='class')) - 1 # Create submission files rf.pred <- data.frame(seq(1:nrow(test)), y.hat) names(rf.pred) <- c("ImageId", "Label") write.csv(rf.pred, "rf_predictions.csv", row.names = F, quote = F) ### Support Vector Machines # Fit SVMs with linear, radial, and polynomial kernels svm.lin <- tune(svm, as.factor(label) ~ ., data=train, kernel="linear", type='C', ranges=list(0.01, 0.1, 1)) yhat.svm <- predict(svm.lin$best.model, test) ### Linear Discriminant Analysis packages(MASS) # Remove constant variables train.nzv <- train[, -nearZeroVar(train, freqCut = 99999, uniqueCut = 0.000001)] # Scale data by max value train.s <- train.nzv[, -1] / max(train.nzv[, -1]) train.s <- cbind(Y = train$label, train.s) # Fit an LDA model lda.out <- lda(Y ~ ., data=train.s) yhat.lda <- predict(lda.out, test) # Create submission files lda.pred <- data.frame(ImageId = seq(1:nrow(test)), Label = yhat.lda$class) write.csv(lda.pred, "lda_predictions.csv", row.names = F, quote = F) ### Neural Net install.packages("drat", repos = "https://cran.rstudio.com") drat::addRepo("dmlc") install.packages("mxnet") library(mxnet) ### Boosting trees require(gbm) lambdas <- c(0.01, 0.1) depths <- c(1, 2) iterations <- 50 distribution <- "multinomial" for (lambda in lambdas) { for (depth in depths) { for (iteration in iterations) { gbm.fit <- gbm(Y ~ ., data = train.s, distribution = distribution, n.trees = iterations, shrinkage = lambda, interaction.depth = depth, verbose = T) yhat.gbm <- predict.gbm(gbm.fit, test, n.trees = iteration, type="response") yhat.gbm[yhat.gbm >= 0.5] } } } yhat.gbm <- predict.gbm(gbm.fit, test, n.trees = 50, type="response")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Algorithm.r \docType{class} \name{AlgorithmiaAlgorithm-class} \alias{AlgorithmiaAlgorithm} \alias{AlgorithmiaAlgorithm-class} \title{Algorithm object which enables you to call Algorithmia algorithms. To create one, call: `client$algo("algoUrl")`} \description{ Algorithm object which enables you to call Algorithmia algorithms. To create one, call: `client$algo("algoUrl")` } \section{Fields}{ \describe{ \item{\code{client}}{Reference to the AlgorithmiaClient object that has the credentials necessary to make API calls.} \item{\code{algoUrl}}{The unique identifier for an algorithm. Follows the pattern: [Algorithm Author]/[Algorithm Name]/[Optional Version] like: 'demo/Hello/0.1.1'.} \item{\code{queryParameters}}{Mutable list of parameters to use while making algorithm calls. These can be changed by calling setOptions.} }} \section{Methods}{ \describe{ \item{\code{pipe(input)}}{Calls an algorithm with the input provided.} \item{\code{setOptions(timeout = 300, stdout = FALSE, output = "default", parameters = list())}}{Allows you to set the timeout duration (in seconds), whether you want the stdout that was produced while running the algorithm (this only works when the algorithm author call it), and whether this should run as async (output = 'void') or in raw mode (output = 'raw').} }}
/man/AlgorithmiaAlgorithm-class.Rd
permissive
MarkEdmondson1234/algorithmia-r
R
false
true
1,386
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Algorithm.r \docType{class} \name{AlgorithmiaAlgorithm-class} \alias{AlgorithmiaAlgorithm} \alias{AlgorithmiaAlgorithm-class} \title{Algorithm object which enables you to call Algorithmia algorithms. To create one, call: `client$algo("algoUrl")`} \description{ Algorithm object which enables you to call Algorithmia algorithms. To create one, call: `client$algo("algoUrl")` } \section{Fields}{ \describe{ \item{\code{client}}{Reference to the AlgorithmiaClient object that has the credentials necessary to make API calls.} \item{\code{algoUrl}}{The unique identifier for an algorithm. Follows the pattern: [Algorithm Author]/[Algorithm Name]/[Optional Version] like: 'demo/Hello/0.1.1'.} \item{\code{queryParameters}}{Mutable list of parameters to use while making algorithm calls. These can be changed by calling setOptions.} }} \section{Methods}{ \describe{ \item{\code{pipe(input)}}{Calls an algorithm with the input provided.} \item{\code{setOptions(timeout = 300, stdout = FALSE, output = "default", parameters = list())}}{Allows you to set the timeout duration (in seconds), whether you want the stdout that was produced while running the algorithm (this only works when the algorithm author call it), and whether this should run as async (output = 'void') or in raw mode (output = 'raw').} }}
# # areadiff.R # # $Revision: 1.33 $ $Date: 2017/06/05 10:31:58 $ # # Computes sufficient statistic for area-interaction process # # Invokes areadiff.c # # areaLoss = area lost by removing X[i] from X areaLoss <- function(X, r, ..., W=as.owin(X), subset=NULL, exact=FALSE, ngrid=spatstat.options("ngrid.disc")) { if(exact) areaLoss.diri(X, r, ..., W=W, subset=subset) else areaLoss.grid(X, r, ..., W=W, subset=subset, ngrid=ngrid) } # areaGain = area gained by adding u[i] to X areaGain <- function(u, X, r, ..., W=as.owin(X), exact=FALSE, ngrid=spatstat.options("ngrid.disc")) { if(exact) areaGain.diri(u, X, r, ..., W=W) else areaGain.grid(u, X, r, W=W, ngrid=ngrid) } #//////////////////////////////////////////////////////////// # algorithms using Dirichlet tessellation #/////////////////////////////////////////////////////////// areaLoss.diri <- function(X, r, ..., W=as.owin(X), subset=NULL) { stopifnot(is.ppp(X)) npts <- npoints(X) if(is.matrix(r)) { if(sum(dim(r) > 1) > 1) stop("r should be a vector or single value") r <- as.vector(r) } nr <- length(r) if(npts == 0) return(matrix(, nrow=0, ncol=nr)) else if(npts == 1) return(matrix(discpartarea(X, r, W), nrow=1)) # set up output array indices <- 1L:npts if(!is.null(subset)) indices <- indices[subset] out <- matrix(, nrow=length(indices), ncol=nr) # w <- X$window pir2 <- pi * r^2 # dirichlet neighbour relation in entire pattern dd <- deldir(X$x, X$y, rw=c(w$xrange, w$yrange)) a <- dd$delsgs[,5L] b <- dd$delsgs[,6L] for(k in seq_along(indices)) { i <- indices[k] # find all Delaunay neighbours of i jj <- c(b[a==i], a[b==i]) jj <- sort(unique(jj)) # extract only these points Yminus <- X[jj] Yplus <- X[c(jj, i)] # dilate aplus <- dilated.areas(Yplus, r, W, exact=TRUE) aminus <- dilated.areas(Yminus, r, W, exact=TRUE) areas <- aplus - aminus # area/(pi * r^2) must be positive and nonincreasing y <- ifelseAX(r == 0, 1, areas/pir2) y <- pmin.int(1, y) ok <- is.finite(y) y[ok] <- rev(cummax(rev(y[ok]))) areas <- pmax.int(0, y * pir2) # save out[k, ] <- areas } return(out) } areaGain.diri <- function(u, X, r, ..., W=as.owin(X), verbose=FALSE) { stopifnot(is.ppp(X)) Y <- as.ppp(u, W=W) nX <- X$n nY <- Y$n if(is.matrix(r)) { if(sum(dim(r) > 1) > 1) stop("r should be a vector or single value") r <- as.vector(r) } nr <- length(r) if(nY == 0) return(matrix(, nrow=0, ncol=nr)) if(nX == 0) return(matrix(pi * r^2, nrow=nY, ncol=nr, byrow=TRUE)) if(verbose) splat("areaGain,", nY, ngettext(nY, "point,", "points,"), nr, ngettext(nr, "rvalue", "r values")) out <- matrix(0, nrow=nY, ncol=nr) pir2 <- pi * r^2 wbox <- as.rectangle(as.owin(X)) # state <- list() for(i in 1L:nY) { if(verbose) state <- progressreport(i, nY, state=state) V <- superimpose(Y[i], X, W=wbox, check=FALSE) # Dirichlet neighbour relation for V dd <- deldir(V$x, V$y, rw=c(wbox$xrange, wbox$yrange)) aa <- dd$delsgs[,5L] bb <- dd$delsgs[,6L] # find all Delaunay neighbours of Y[1] in V jj <- c(bb[aa==1L], aa[bb==1L]) jj <- sort(unique(jj)) # extract only these points Zminus <- V[jj] Zplus <- V[c(1, jj)] # dilate aplus <- dilated.areas(Zplus, r, W, exact=TRUE) aminus <- dilated.areas(Zminus, r, W, exact=TRUE) areas <- aplus - aminus # area/(pi * r^2) must be in [0,1] and nonincreasing y <- ifelseAX(r == 0, 1, areas/pir2) y <- pmin.int(1, y) ok <- is.finite(y) y[ok] <- rev(cummax(rev(y[ok]))) areas <- pmax.int(0, y * pir2) # save out[i,] <- areas } return(out) } #//////////////////////////////////////////////////////////////////////// # alternative implementations using grid counting in C #//////////////////////////////////////////////////////////////////////// areaGain.grid <- function(u, X, r, ..., W=NULL, ngrid=spatstat.options("ngrid.disc")) { verifyclass(X, "ppp") u <- as.ppp(u, W=as.owin(X)) stopifnot(is.numeric(r) && all(is.finite(r)) && all(r >= 0)) # nu <- u$n nr <- length(r) if(nr == 0) return(numeric(0)) rmax <- max(r) # constrain <- !is.null(W) if(constrain && (W$type != "rectangle")) { # Constrained to an irregular window # initialise to value for small-r result <- matrix(pi * r^2, nrow=nu, ncol=nr, byrow=TRUE) # vector of radii below which b(u,r) is disjoint from U(X,r) rcrit.u <- nncross(u, X, what="dist")/2 rcrit.min <- min(rcrit.u) # Use distance transform and set covariance D <- distmap(X, ...) DW <- D[W, drop=FALSE] # distance from (0,0) - thresholded to make digital discs discWin <- owin(c(-rmax,rmax),c(-rmax,rmax)) discWin <- as.mask(discWin, eps=min(D$xstep, rmax/4)) rad <- as.im(function(x,y){sqrt(x^2+y^2)}, W=discWin) # for(j in which(r > rcrit.min)) { # rj is above the critical radius rcrit.u[i] for at least one point u[i] rj <- r[j] if(any(above <- (rj > rcrit.u))) { Uncovered <- levelset(DW, rj, ">") DiscRj <- levelset(rad, rj, "<=") AreaGainIm <- setcov(Uncovered, DiscRj) result[above, j] <- safelookup(AreaGainIm, u[above]) } } return(result) } # # xx <- X$x yy <- X$y result <- matrix(, nrow=nu, ncol=nr) # for(i in 1L:nu) { # shift u[i] to origin xu <- u$x[i] yu <- u$y[i] xshift <- xx - xu yshift <- yy - yu # find points within distance 2 rmax of origin close <- (xshift^2 + yshift^2 < 4 * rmax^2) nclose <- sum(close) # invoke C routine if(!constrain) { z <- .C("areadifs", rad = as.double(r), nrads = as.integer(nr), x = as.double(xshift[close]), y = as.double(yshift[close]), nn = as.integer(nclose), ngrid = as.integer(ngrid), answer = as.double(numeric(nr)), PACKAGE = "spatstat") result[i,] <- z$answer } else { z <- .C("areaBdif", rad = as.double(r), nrads = as.integer(nr), x = as.double(xshift[close]), y = as.double(yshift[close]), nn = as.integer(nclose), ngrid = as.integer(ngrid), x0 = as.double(W$xrange[1L] - xu), y0 = as.double(W$yrange[1L] - yu), x1 = as.double(W$xrange[2L] - xu), y1 = as.double(W$yrange[2L] - yu), answer = as.double(numeric(nr)), PACKAGE = "spatstat") result[i,] <- z$answer } } return(result) } areaLoss.grid <- function(X, r, ..., W=as.owin(X), subset=NULL, method = c("count", "distmap"), ngrid = spatstat.options("ngrid.disc"), exact = FALSE) { verifyclass(X, "ppp") n <- npoints(X) nr <- length(r) indices <- if(is.null(subset)) 1L:n else (1L:n)[subset] answer <- matrix(, nrow=length(indices), ncol=nr) if(missing(method)) { method <- if(nr <= 20 || exact) "count" else "distmap" } else method <- match.arg(method) switch(method, count = { # one value of r: use grid-counting for(k in seq_along(indices)) { i <- indices[k] answer[k,] <- areaGain(X[i], X[-i], r, W=W, ngrid=ngrid, exact=exact) } }, distmap = { # Many values of r: use distance transform D <- distmap(X, ...) DW <- D[W, drop=FALSE] a <- area(Window(DW)) # empirical cdf of distance values FW <- ecdf(DW[drop=TRUE]) # radii below which there are no overlaps rcrit <- nndist(X)/2 for(k in seq_along(indices)) { i <- indices[k] Di <- distmap(X[-i], ...) FiW <- ecdf(Di[W, drop=TRUE]) answer[k, ] <- ifelseXY(r > rcrit[i], a * (FW(r) - FiW(r)), pi * r^2) } }) return(answer) }
/R/areadiff.R
no_license
edzer/spatstat
R
false
false
8,300
r
# # areadiff.R # # $Revision: 1.33 $ $Date: 2017/06/05 10:31:58 $ # # Computes sufficient statistic for area-interaction process # # Invokes areadiff.c # # areaLoss = area lost by removing X[i] from X areaLoss <- function(X, r, ..., W=as.owin(X), subset=NULL, exact=FALSE, ngrid=spatstat.options("ngrid.disc")) { if(exact) areaLoss.diri(X, r, ..., W=W, subset=subset) else areaLoss.grid(X, r, ..., W=W, subset=subset, ngrid=ngrid) } # areaGain = area gained by adding u[i] to X areaGain <- function(u, X, r, ..., W=as.owin(X), exact=FALSE, ngrid=spatstat.options("ngrid.disc")) { if(exact) areaGain.diri(u, X, r, ..., W=W) else areaGain.grid(u, X, r, W=W, ngrid=ngrid) } #//////////////////////////////////////////////////////////// # algorithms using Dirichlet tessellation #/////////////////////////////////////////////////////////// areaLoss.diri <- function(X, r, ..., W=as.owin(X), subset=NULL) { stopifnot(is.ppp(X)) npts <- npoints(X) if(is.matrix(r)) { if(sum(dim(r) > 1) > 1) stop("r should be a vector or single value") r <- as.vector(r) } nr <- length(r) if(npts == 0) return(matrix(, nrow=0, ncol=nr)) else if(npts == 1) return(matrix(discpartarea(X, r, W), nrow=1)) # set up output array indices <- 1L:npts if(!is.null(subset)) indices <- indices[subset] out <- matrix(, nrow=length(indices), ncol=nr) # w <- X$window pir2 <- pi * r^2 # dirichlet neighbour relation in entire pattern dd <- deldir(X$x, X$y, rw=c(w$xrange, w$yrange)) a <- dd$delsgs[,5L] b <- dd$delsgs[,6L] for(k in seq_along(indices)) { i <- indices[k] # find all Delaunay neighbours of i jj <- c(b[a==i], a[b==i]) jj <- sort(unique(jj)) # extract only these points Yminus <- X[jj] Yplus <- X[c(jj, i)] # dilate aplus <- dilated.areas(Yplus, r, W, exact=TRUE) aminus <- dilated.areas(Yminus, r, W, exact=TRUE) areas <- aplus - aminus # area/(pi * r^2) must be positive and nonincreasing y <- ifelseAX(r == 0, 1, areas/pir2) y <- pmin.int(1, y) ok <- is.finite(y) y[ok] <- rev(cummax(rev(y[ok]))) areas <- pmax.int(0, y * pir2) # save out[k, ] <- areas } return(out) } areaGain.diri <- function(u, X, r, ..., W=as.owin(X), verbose=FALSE) { stopifnot(is.ppp(X)) Y <- as.ppp(u, W=W) nX <- X$n nY <- Y$n if(is.matrix(r)) { if(sum(dim(r) > 1) > 1) stop("r should be a vector or single value") r <- as.vector(r) } nr <- length(r) if(nY == 0) return(matrix(, nrow=0, ncol=nr)) if(nX == 0) return(matrix(pi * r^2, nrow=nY, ncol=nr, byrow=TRUE)) if(verbose) splat("areaGain,", nY, ngettext(nY, "point,", "points,"), nr, ngettext(nr, "rvalue", "r values")) out <- matrix(0, nrow=nY, ncol=nr) pir2 <- pi * r^2 wbox <- as.rectangle(as.owin(X)) # state <- list() for(i in 1L:nY) { if(verbose) state <- progressreport(i, nY, state=state) V <- superimpose(Y[i], X, W=wbox, check=FALSE) # Dirichlet neighbour relation for V dd <- deldir(V$x, V$y, rw=c(wbox$xrange, wbox$yrange)) aa <- dd$delsgs[,5L] bb <- dd$delsgs[,6L] # find all Delaunay neighbours of Y[1] in V jj <- c(bb[aa==1L], aa[bb==1L]) jj <- sort(unique(jj)) # extract only these points Zminus <- V[jj] Zplus <- V[c(1, jj)] # dilate aplus <- dilated.areas(Zplus, r, W, exact=TRUE) aminus <- dilated.areas(Zminus, r, W, exact=TRUE) areas <- aplus - aminus # area/(pi * r^2) must be in [0,1] and nonincreasing y <- ifelseAX(r == 0, 1, areas/pir2) y <- pmin.int(1, y) ok <- is.finite(y) y[ok] <- rev(cummax(rev(y[ok]))) areas <- pmax.int(0, y * pir2) # save out[i,] <- areas } return(out) } #//////////////////////////////////////////////////////////////////////// # alternative implementations using grid counting in C #//////////////////////////////////////////////////////////////////////// areaGain.grid <- function(u, X, r, ..., W=NULL, ngrid=spatstat.options("ngrid.disc")) { verifyclass(X, "ppp") u <- as.ppp(u, W=as.owin(X)) stopifnot(is.numeric(r) && all(is.finite(r)) && all(r >= 0)) # nu <- u$n nr <- length(r) if(nr == 0) return(numeric(0)) rmax <- max(r) # constrain <- !is.null(W) if(constrain && (W$type != "rectangle")) { # Constrained to an irregular window # initialise to value for small-r result <- matrix(pi * r^2, nrow=nu, ncol=nr, byrow=TRUE) # vector of radii below which b(u,r) is disjoint from U(X,r) rcrit.u <- nncross(u, X, what="dist")/2 rcrit.min <- min(rcrit.u) # Use distance transform and set covariance D <- distmap(X, ...) DW <- D[W, drop=FALSE] # distance from (0,0) - thresholded to make digital discs discWin <- owin(c(-rmax,rmax),c(-rmax,rmax)) discWin <- as.mask(discWin, eps=min(D$xstep, rmax/4)) rad <- as.im(function(x,y){sqrt(x^2+y^2)}, W=discWin) # for(j in which(r > rcrit.min)) { # rj is above the critical radius rcrit.u[i] for at least one point u[i] rj <- r[j] if(any(above <- (rj > rcrit.u))) { Uncovered <- levelset(DW, rj, ">") DiscRj <- levelset(rad, rj, "<=") AreaGainIm <- setcov(Uncovered, DiscRj) result[above, j] <- safelookup(AreaGainIm, u[above]) } } return(result) } # # xx <- X$x yy <- X$y result <- matrix(, nrow=nu, ncol=nr) # for(i in 1L:nu) { # shift u[i] to origin xu <- u$x[i] yu <- u$y[i] xshift <- xx - xu yshift <- yy - yu # find points within distance 2 rmax of origin close <- (xshift^2 + yshift^2 < 4 * rmax^2) nclose <- sum(close) # invoke C routine if(!constrain) { z <- .C("areadifs", rad = as.double(r), nrads = as.integer(nr), x = as.double(xshift[close]), y = as.double(yshift[close]), nn = as.integer(nclose), ngrid = as.integer(ngrid), answer = as.double(numeric(nr)), PACKAGE = "spatstat") result[i,] <- z$answer } else { z <- .C("areaBdif", rad = as.double(r), nrads = as.integer(nr), x = as.double(xshift[close]), y = as.double(yshift[close]), nn = as.integer(nclose), ngrid = as.integer(ngrid), x0 = as.double(W$xrange[1L] - xu), y0 = as.double(W$yrange[1L] - yu), x1 = as.double(W$xrange[2L] - xu), y1 = as.double(W$yrange[2L] - yu), answer = as.double(numeric(nr)), PACKAGE = "spatstat") result[i,] <- z$answer } } return(result) } areaLoss.grid <- function(X, r, ..., W=as.owin(X), subset=NULL, method = c("count", "distmap"), ngrid = spatstat.options("ngrid.disc"), exact = FALSE) { verifyclass(X, "ppp") n <- npoints(X) nr <- length(r) indices <- if(is.null(subset)) 1L:n else (1L:n)[subset] answer <- matrix(, nrow=length(indices), ncol=nr) if(missing(method)) { method <- if(nr <= 20 || exact) "count" else "distmap" } else method <- match.arg(method) switch(method, count = { # one value of r: use grid-counting for(k in seq_along(indices)) { i <- indices[k] answer[k,] <- areaGain(X[i], X[-i], r, W=W, ngrid=ngrid, exact=exact) } }, distmap = { # Many values of r: use distance transform D <- distmap(X, ...) DW <- D[W, drop=FALSE] a <- area(Window(DW)) # empirical cdf of distance values FW <- ecdf(DW[drop=TRUE]) # radii below which there are no overlaps rcrit <- nndist(X)/2 for(k in seq_along(indices)) { i <- indices[k] Di <- distmap(X[-i], ...) FiW <- ecdf(Di[W, drop=TRUE]) answer[k, ] <- ifelseXY(r > rcrit[i], a * (FW(r) - FiW(r)), pi * r^2) } }) return(answer) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/MBDETES-Model.R \name{MBDETES-Model} \alias{MBDETES-Model} \title{MBDETES: Model Specification} \description{ MBDETES: Model Specification } \section{Background}{ Under some restricted conditions on waiting times, MBITES is what we would get if we translated MBITES into a stochastic model, under the Gillespie algorithm. We can, therefore, use MBDETES to compute expectations for MBITES under these same contitions. The utilties in MBDETES-Auxiliary.R and MBDETES-Model.R outputs state transitions as described in the following. \itemize{ \item Let \eqn{0\leq P_{X,Y}\leq 1} denote the probability of making a transition from state \eqn{X} to state \eqn{Y} after one bout \item The probability of dying, \eqn{0 \leq P_{X,D} \leq 1} must satifsy the condition \deqn{\sum_{Y\neq D}P_{X,Y} =1-P_{X,D}} \item Since there is the probability of repeating a state, we must compute the probability of making a transition out of the state and ending up alive and in a new state. We let this quantity be \eqn{\Psi_{X,Y}} \item Let \eqn{T_{X}} the average time spent in a bout of type \eqn{X}. \item Let \eqn{T_{X,Y}} denote the expected waiting time to make the transition from \eqn{X} to \eqn{Y}. } } \section{Differential equations}{ In some sense, we can rewrite MBDETES as a conservation equation in the following way: \deqn{\left[ \begin{array}{c}dF/dt \\ dB/dt \\ dR/dt \\ dL/dt \\ dO/dt \\ dD/dt\end{array}\right]= \left[ \begin{array}{ccccc} P_{F,F}-1 & P_{F,B} & 0& 0 & 0 & P_{F,D} \\ P_{B,F} & P_{B,B}-1 & P_{B,R} & 0 &0 & P_{B,D} \\ P_{R,F} & P_{R,B} & -1 & P_{R,L} & P_{R,O}& P_{R,D} \\ 0 & 0 & 0 & P_{L,L}-1& P_{L,O} & P_{L,D} \\ P_{O,F} & P_{O,B} & 0 & P_{O,L} & P_{O,O}-1 & P_{O,D} \\ P_{F,D} & P_{B,D} & P_{R,D} & P_{L,D} & P_{O,D} & 0 \end{array}\right] \left[ \begin{array}{c}F/t_F\\B/t_B\\R/t_R\\L/t_R\\O/t_O \\D \\ \end{array}\right]} }
/MASH-dev/SeanWu/MBITES/man/MBDETES-Model.Rd
no_license
aucarter/MASH-Main
R
false
true
1,943
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/MBDETES-Model.R \name{MBDETES-Model} \alias{MBDETES-Model} \title{MBDETES: Model Specification} \description{ MBDETES: Model Specification } \section{Background}{ Under some restricted conditions on waiting times, MBITES is what we would get if we translated MBITES into a stochastic model, under the Gillespie algorithm. We can, therefore, use MBDETES to compute expectations for MBITES under these same contitions. The utilties in MBDETES-Auxiliary.R and MBDETES-Model.R outputs state transitions as described in the following. \itemize{ \item Let \eqn{0\leq P_{X,Y}\leq 1} denote the probability of making a transition from state \eqn{X} to state \eqn{Y} after one bout \item The probability of dying, \eqn{0 \leq P_{X,D} \leq 1} must satifsy the condition \deqn{\sum_{Y\neq D}P_{X,Y} =1-P_{X,D}} \item Since there is the probability of repeating a state, we must compute the probability of making a transition out of the state and ending up alive and in a new state. We let this quantity be \eqn{\Psi_{X,Y}} \item Let \eqn{T_{X}} the average time spent in a bout of type \eqn{X}. \item Let \eqn{T_{X,Y}} denote the expected waiting time to make the transition from \eqn{X} to \eqn{Y}. } } \section{Differential equations}{ In some sense, we can rewrite MBDETES as a conservation equation in the following way: \deqn{\left[ \begin{array}{c}dF/dt \\ dB/dt \\ dR/dt \\ dL/dt \\ dO/dt \\ dD/dt\end{array}\right]= \left[ \begin{array}{ccccc} P_{F,F}-1 & P_{F,B} & 0& 0 & 0 & P_{F,D} \\ P_{B,F} & P_{B,B}-1 & P_{B,R} & 0 &0 & P_{B,D} \\ P_{R,F} & P_{R,B} & -1 & P_{R,L} & P_{R,O}& P_{R,D} \\ 0 & 0 & 0 & P_{L,L}-1& P_{L,O} & P_{L,D} \\ P_{O,F} & P_{O,B} & 0 & P_{O,L} & P_{O,O}-1 & P_{O,D} \\ P_{F,D} & P_{B,D} & P_{R,D} & P_{L,D} & P_{O,D} & 0 \end{array}\right] \left[ \begin{array}{c}F/t_F\\B/t_B\\R/t_R\\L/t_R\\O/t_O \\D \\ \end{array}\right]} }
# ---------------------------------------- # Objective: Create "PlotRF_BM" sensitivity curves for each model/data stream # that are based on all sites, all Biomass # Christy Rollinson, crollinson@gmail.com # Date Created: 28 July 2015 # ---------------------------------------- # # ------------------------- # Workflow # ------------------------- # 1. Set up Data # a. Ecosystem model output # b. Tree Ring NPP products # c. Raw Tree Ring widths # 2. Run the gamms (with site intercept) # 3. Bind Models into single list # 4. Diagnostic Graphs # ------------------------- # ---------------------------------------- rm(list=ls()) # ---------------------------------------- # Load Libaries # ---------------------------------------- library(parallel) library(mgcv) library(ggplot2); library(grid) library(car) # ---------------------------------------- # ---------------------------------------- # Define constants # ---------------------------------------- sec2yr <- 1*60*60*24*365 predictors.all <- c("tair", "precipf", "CO2") predictor.suffix <- c(".gs") resolutions <- "t.001" k=3 # ---------------------------------------- # ---------------------------------------- # Set Directories & file paths # ---------------------------------------- setwd("~/Desktop/Research/PalEON_CR/PalEON_MIP_Site/Analyses/Temporal-Scaling") dat.base="Data/gamms" fig.base="Figures/gamms" # Source the gamm file source('R/0_calculate.sensitivity_TPC_PlotRF_BM.R', chdir = TRUE) # Making sure the appropriate file paths exist if(!dir.exists(dat.base)) dir.create(dat.base) if(!dir.exists(fig.base)) dir.create(fig.base) # Setting the data & figure directories fig.dir <- file.path(fig.base, "Sensitivity_PlotRF_BM") dat.dir <- file.path(dat.base, "Sensitivity_PlotRF_BM") # Make sure the appropriate file paths are in place if(!dir.exists(dat.dir)) dir.create(dat.dir) if(!dir.exists(fig.dir)) dir.create(fig.dir) # ---------------------------------------- # ------------------------------------------------------------------------------- # 1. Set up Data # ------------------------------------------------------------------------------- { paleon.models <- list() # ---------------------------------------- # 1.a. Load & set up Ecosystem Model Output first # ---------------------------------------- { # Define what our response variable will be response <- "NPP" time.mod <- "Year" # Ecosys file = organized, post-processed m.name outputs # generated with 1_generate_ecosys.R load(file.path("Data", "EcosysData.Rdata")) # Get rid of LINKAGES because it's weird & hasn't been updated ecosys <- ecosys[!ecosys$Model=="linkages",] summary(ecosys) for(m in unique(ecosys$Model)){ print("-------------------------------------") print(paste0("------ Processing Model: ", m, " ------")) # Taking the subsets of data we want in a single gam dat.subsets <- ecosys$Resolution == "t.001" & ecosys$Model == m # What will our spatio-temporal explanatory factor ("Biomass") be? if(!is.na(mean(ecosys[dat.subsets,"AGB"]))) biomass.mod="AGB" else biomass.mod="LAI" data.temp <- ecosys[dat.subsets, c("Model", "Model.Order", "Site", "Year")] data.temp$PlotID <- ecosys[dat.subsets,"Site" ] data.temp$TreeID <- as.factor(NA) data.temp$Y <- ecosys[dat.subsets,response] data.temp$Biomass <- ecosys[dat.subsets,biomass.mod] data.temp$Time <- ecosys[dat.subsets,time.mod] data.temp[,predictors.all] <- ecosys[dat.subsets, paste0(predictors.all, predictor.suffix)] data.temp$Resolution <- ecosys[dat.subsets,"Resolution"] # Getting rid of NAs in predictors data.temp <- data.temp[complete.cases(data.temp[,c(predictors.all, "Y", "Biomass", "Time")]),] # Copy the response variable & some other things for the model paleon.models[[paste(m)]] <- data.temp } # End Model Loop } # End Model setup # ---------------------------------------- # ---------------------------------------- # 1.b. Load & set up tree ring NPP # ---------------------------------------- { # Define what our response & Biomass variables will be response <- "ABI.area" biomass.mod <- "AB.area" time.mod <- "plot.Age" # Load Tree ring NPP data spp.npp <- read.csv(file.path("Data", "TreeRing_NPP_PlotSpecies.csv")) summary(spp.npp) # aggregate to total plot NPP (ABI.area) plot.npp <- aggregate(spp.npp[,c(response, biomass.mod)], by=spp.npp[,c("Site", "Site2", "PlotID", "Year")], FUN=sum) plot.npp[,c(paste0(predictors.all, predictor.suffix))] <- aggregate(spp.npp[,c(paste0(predictors.all, predictor.suffix))], by=spp.npp[,c("Site", "Site2", "PlotID", "Year")], FUN=mean)[,c(paste0(predictors.all, predictor.suffix))] summary(plot.npp) # ------- # Loading tree ring data & extracting plot age # ------- tree.rings <- read.csv("Data/TreeRing_RingWidths.csv") summary(tree.rings) plot.age <- aggregate(tree.rings[,c("Age", "DBH")], by=tree.rings[,c("PlotID", "Year")], FUN=mean, na.rm=T) plot.age$BA.sum <- aggregate(tree.rings[,c("DBH")], by=tree.rings[,c("PlotID", "Year")], FUN=function(x){sum(pi*((x/2)^2), na.rm=T)})[,3] plot.age$BA.mean <- aggregate(tree.rings[,c("DBH")], by=tree.rings[,c("PlotID", "Year")], FUN=function(x){mean(pi*((x/2)^2), na.rm=T)})[,3] names(plot.age)[3:ncol(plot.age)] <- paste0("plot.", names(plot.age)[3:ncol(plot.age)]) summary(plot.age) plot.npp <- merge(plot.npp, plot.age, all.x=T, all.y=F) summary(plot.npp) # ------- # Subset a period where we're not worried about plot.npp <- plot.npp[complete.cases(plot.npp) & plot.npp$Year>=(2010-30),] summary(plot.npp) # Add the data to paleon.models paleon.models[["TreeRingNPP"]] <- plot.npp[,c("Site", "PlotID", "Year")] paleon.models$TreeRingNPP$Model <- as.factor("TreeRingNPP") paleon.models$TreeRingNPP$Model.Order <- as.factor("Tree Ring NPP") paleon.models$TreeRingNPP$TreeID <- as.factor(NA) paleon.models$TreeRingNPP$Y <- plot.npp[,response] paleon.models$TreeRingNPP$Biomass <- plot.npp[,biomass.mod] paleon.models$TreeRingNPP$Time <- plot.npp[,time.mod] paleon.models$TreeRingNPP[,predictors.all] <- plot.npp[, paste0(predictors.all, predictor.suffix)] paleon.models$TreeRingNPP$Resolution <- as.factor("t.001") # Make sure everything is complete cases paleon.models$TreeRingNPP <- paleon.models$TreeRingNPP[complete.cases(paleon.models$TreeRingNPP[,c(predictors.all, "Y", "Biomass", "Time")]),] # Order everything the same way to make life easier paleon.models$TreeRingNPP <- paleon.models$TreeRingNPP[,names(paleon.models[[1]])] summary(paleon.models$TreeRingNPP) } # End Tree Ring NPP setup # ---------------------------------------- # ---------------------------------------- # 1.c. Load & set up raw ring widths # ---------------------------------------- { response <- "RW" biomass.mod <- "DBH" time.mod <- "Age" tree.rings <- read.csv("Data/TreeRing_RingWidths.csv") summary(tree.rings) # subset only complete cases where we have met data & ring width measurements tree.rings <- tree.rings[complete.cases(tree.rings[,c(response, paste0(predictors.all, predictor.suffix))]) & tree.rings$Resolution=="t.001",] summary(tree.rings) # ----------- # Adding in a few other factors that could be good predictors # ----------- plot.age <- aggregate(tree.rings[,c("Age", "DBH")], by=tree.rings[,c("PlotID", "Year")], FUN=mean, na.rm=T) plot.age$BA.sum <- aggregate(tree.rings[,c("DBH")], by=tree.rings[,c("PlotID", "Year")], FUN=function(x){sum(pi*((x/2)^2), na.rm=T)})[,3] plot.age$BA.mean <- aggregate(tree.rings[,c("DBH")], by=tree.rings[,c("PlotID", "Year")], FUN=function(x){mean(pi*((x/2)^2), na.rm=T)})[,3] names(plot.age)[3:ncol(plot.age)] <- paste0("plot.", names(plot.age)[3:ncol(plot.age)]) summary(plot.age) tree.rings <- merge(tree.rings, plot.age, all.x=T, all.y=T) tree.rings$BA <- pi*(tree.rings$DBH/2)^2 summary(tree.rings) # ----------- # Add the data to paleon.models paleon.models[["TreeRingRW"]] <- tree.rings[,c("Site", "PlotID", "TreeID", "Year")] paleon.models$TreeRingRW$Model <- as.factor("TreeRingRW") paleon.models$TreeRingRW$Model.Order <- as.factor("Tree Ring RW") paleon.models$TreeRingRW[,predictors.all] <- tree.rings[,paste0(predictors.all, predictor.suffix)] paleon.models$TreeRingRW$Y <- tree.rings[,response] paleon.models$TreeRingRW$Biomass <- tree.rings[,biomass.mod] paleon.models$TreeRingRW$Time <- tree.rings[,time.mod] paleon.models$TreeRingRW$Resolution <- tree.rings[,"Resolution"] # Make sure everything is complete cases paleon.models$TreeRingRW <- paleon.models$TreeRingRW[complete.cases(paleon.models$TreeRingRW[,c(predictors.all, "Y", "Biomass", "Time")]),] # Order everything the same way to make life easier paleon.models$TreeRingRW <- paleon.models$TreeRingRW[,names(paleon.models[[1]])] summary(paleon.models$TreeRingRW) } # End Ring Width setups # ---------------------------------------- } # ------------------------------------------------------------------------------- # ------------------------------------------------------------------------------- # 2. Run the gamms # ------------------------------------------------------------------------------- cores.use <- min(12, length(paleon.models)) # cores.use <- length(paleon.models) models.base <- mclapply(paleon.models, paleon.gams.models, mc.cores=cores.use, k=k, predictors.all=predictors.all, PFT=F) # ------------------------------------------------------------------------------- # ------------------------------------------------------------------------------- # 3. Bind Models together to put them in a single object to make them easier to work with # ------------------------------------------------------------------------------- { for(i in 1:length(models.base)){ if(i==1) { mod.out <- list() mod.out$data <- models.base[[i]]$data mod.out$weights <- models.base[[i]]$weights mod.out$ci.response <- models.base[[i]]$ci.response mod.out$sim.response <- models.base[[i]]$sim.response mod.out$ci.terms <- models.base[[i]]$ci.terms mod.out$sim.terms <- models.base[[i]]$sim.terms mod.out[[paste("gamm", names(models.base)[i], "PlotRF_BM", sep=".")]] <- models.base[[i]]$gamm } else { mod.out$data <- rbind(mod.out$data, models.base[[i]]$data) mod.out$weights <- rbind(mod.out$weights, models.base[[i]]$weights) mod.out$ci.response <- rbind(mod.out$ci.response, models.base[[i]]$ci.response) mod.out$sim.response <- rbind(mod.out$sim.response, models.base[[i]]$sim.response) mod.out$ci.terms <- rbind(mod.out$ci.terms, models.base[[i]]$ci.terms) mod.out$sim.terms <- rbind(mod.out$sim.terms, models.base[[i]]$sim.terms) mod.out[[paste("gamm", names(models.base)[i], "PlotRF_BM", sep=".")]] <- models.base[[i]]$gamm } } save(mod.out, file=file.path(dat.dir, "gamm_PlotRF_BM.Rdata")) } # ------------------------------------------------------------------------------- # ------------------------------------------------------------------------------- # 4. Diagnostic Graphs # ------------------------------------------------------------------------------- { m.order <- unique(mod.out$data$Model.Order) col.model <- c(paste(model.colors[model.colors$Model.Order %in% m.order,"color"]), "black", "gray30") pdf(file.path(fig.dir, "GAMM_ModelFit_PlotRF_BM.pdf")) { print( ggplot(data=mod.out$ci.response[!substr(mod.out$ci.response$Model, 1, 8)=="TreeRing",]) + facet_grid(PlotID~ Model, scales="free") + theme_bw() + geom_line(data= mod.out$data[!substr(mod.out$data$Model, 1, 8)=="TreeRing",], aes(x=Year, y=Y), alpha=0.5) + geom_ribbon(aes(x=Year, ymin=lwr, ymax=upr, fill=Model), alpha=0.5) + geom_line(aes(x=Year, y=mean, color=Model), size=0.35) + scale_x_continuous(limits=c(1900,2010)) + # scale_y_continuous(limits=quantile(mod.out$data[mod.out$data$Year>=1900,"response"], c(0.01, 0.99),na.rm=T)) + scale_fill_manual(values=paste(col.model)) + scale_color_manual(values=paste(col.model)) + labs(title=paste("PlotRF_BM"), x="Year", y="NPP") ) print( ggplot(data=mod.out$ci.response[mod.out$ci.response$Model=="TreeRingNPP",]) + facet_wrap(~ PlotID, scales="fixed") + theme_bw() + geom_line(data= mod.out$data[mod.out$data$Model=="TreeRingNPP",], aes(x=Year, y=Y), alpha=0.5) + geom_ribbon(aes(x=Year, ymin=lwr, ymax=upr, fill=Model), alpha=0.5) + geom_line(aes(x=Year, y=mean, color=Model), size=0.35) + # scale_x_continuous(limits=c(1900,2010)) + # scale_y_continuous(limits=quantile(mod.out$data[mod.out$data$Year>=1900,"response"], c(0.01, 0.99),na.rm=T)) + scale_fill_manual(values=paste(col.model)) + scale_color_manual(values=paste(col.model)) + labs(title=paste("PlotRF_BM"), x="Year", y="NPP") ) print( ggplot(data=mod.out$ci.response[mod.out$ci.response$Model=="TreeRingRW" & mod.out$ci.response$PlotID=="ME029",]) + facet_wrap(~ TreeID, scales="fixed") + theme_bw() + geom_line(data= mod.out$data[mod.out$data$Model=="TreeRingRW" & mod.out$data$PlotID=="ME029",], aes(x=Year, y=Y), alpha=0.5) + geom_ribbon(aes(x=Year, ymin=lwr, ymax=upr, fill=Model), alpha=0.5) + geom_line(aes(x=Year, y=mean, color=Model), size=0.35) + # scale_x_continuous(limits=c(1900,2010)) + # scale_y_continuous(limits=quantile(mod.out$data[mod.out$data$Year>=1900,"response"], c(0.01, 0.99),na.rm=T)) + scale_fill_manual(values=paste(col.model)) + scale_color_manual(values=paste(col.model)) + labs(title=paste("PlotRF_BM"), x="Year", y="RW") ) print( ggplot(data=mod.out$ci.response[mod.out$ci.response$Model=="TreeRingRW" & mod.out$ci.response$PlotID=="TP1",]) + facet_wrap(~ TreeID, scales="fixed") + theme_bw() + geom_line(data= mod.out$data[mod.out$data$Model=="TreeRingRW" & mod.out$data$PlotID=="TP1",], aes(x=Year, y=Y), alpha=0.5) + geom_ribbon(aes(x=Year, ymin=lwr, ymax=upr, fill=Model), alpha=0.5) + geom_line(aes(x=Year, y=mean, color=Model), size=0.35) + # scale_x_continuous(limits=c(1900,2010)) + # scale_y_continuous(limits=quantile(mod.out$data[mod.out$data$Year>=1900,"response"], c(0.01, 0.99),na.rm=T)) + scale_fill_manual(values=paste(col.model)) + scale_color_manual(values=paste(col.model)) + labs(title=paste("PlotRF_BM"), x="Year", y="RW") ) } dev.off() mod.out$ci.terms$x <- as.numeric(paste(mod.out$ci.terms$x)) summary(mod.out$ci.terms) pdf(file.path(fig.dir, "GAMM_DriverSensitivity_PlotRF_BM.pdf"), height=8.5, width=11) { print( ggplot(data=mod.out$ci.terms[mod.out$ci.terms$Effect %in% c("tair", "precipf", "CO2"),]) + facet_wrap(~ Effect, scales="free_x") + theme_bw() + geom_ribbon(aes(x=x, ymin=lwr, ymax=upr, fill=Model), alpha=0.5) + geom_line(aes(x=x, y=mean, color=Model), size=2) + geom_hline(yintercept=0, linetype="dashed") + scale_y_continuous(limits=c(-5, 5)) + scale_fill_manual(values=paste(col.model)) + scale_color_manual(values=paste(col.model)) + labs(title=paste0("Driver Sensitivity (not Relativized)"), y=paste0("NPP Contribution")) # + ) print( ggplot(data=mod.out$ci.terms[mod.out$ci.terms$Effect %in% c("Biomass"),]) + facet_wrap( ~ Model, scales="free_x") + theme_bw() + geom_ribbon(aes(x=x, ymin=lwr, ymax=upr, fill=Model), alpha=0.5) + geom_line(aes(x=x, y=mean, color=Model), size=2) + geom_hline(yintercept=0, linetype="dashed") + scale_y_continuous(limits=c(-5, 5)) + scale_fill_manual(values=paste(col.model)) + scale_color_manual(values=paste(col.model)) + labs(title=paste0("Driver Sensitivity (not Relativized)"), y=paste0("NPP Contribution")) ) } dev.off() } ## -------------------------------------------------------------------------------
/R/Unused/4d_GAMMs_Baseline_PlotRF_BM.R
no_license
PalEON-Project/Temporal-Scaling-MS
R
false
false
15,648
r
# ---------------------------------------- # Objective: Create "PlotRF_BM" sensitivity curves for each model/data stream # that are based on all sites, all Biomass # Christy Rollinson, crollinson@gmail.com # Date Created: 28 July 2015 # ---------------------------------------- # # ------------------------- # Workflow # ------------------------- # 1. Set up Data # a. Ecosystem model output # b. Tree Ring NPP products # c. Raw Tree Ring widths # 2. Run the gamms (with site intercept) # 3. Bind Models into single list # 4. Diagnostic Graphs # ------------------------- # ---------------------------------------- rm(list=ls()) # ---------------------------------------- # Load Libaries # ---------------------------------------- library(parallel) library(mgcv) library(ggplot2); library(grid) library(car) # ---------------------------------------- # ---------------------------------------- # Define constants # ---------------------------------------- sec2yr <- 1*60*60*24*365 predictors.all <- c("tair", "precipf", "CO2") predictor.suffix <- c(".gs") resolutions <- "t.001" k=3 # ---------------------------------------- # ---------------------------------------- # Set Directories & file paths # ---------------------------------------- setwd("~/Desktop/Research/PalEON_CR/PalEON_MIP_Site/Analyses/Temporal-Scaling") dat.base="Data/gamms" fig.base="Figures/gamms" # Source the gamm file source('R/0_calculate.sensitivity_TPC_PlotRF_BM.R', chdir = TRUE) # Making sure the appropriate file paths exist if(!dir.exists(dat.base)) dir.create(dat.base) if(!dir.exists(fig.base)) dir.create(fig.base) # Setting the data & figure directories fig.dir <- file.path(fig.base, "Sensitivity_PlotRF_BM") dat.dir <- file.path(dat.base, "Sensitivity_PlotRF_BM") # Make sure the appropriate file paths are in place if(!dir.exists(dat.dir)) dir.create(dat.dir) if(!dir.exists(fig.dir)) dir.create(fig.dir) # ---------------------------------------- # ------------------------------------------------------------------------------- # 1. Set up Data # ------------------------------------------------------------------------------- { paleon.models <- list() # ---------------------------------------- # 1.a. Load & set up Ecosystem Model Output first # ---------------------------------------- { # Define what our response variable will be response <- "NPP" time.mod <- "Year" # Ecosys file = organized, post-processed m.name outputs # generated with 1_generate_ecosys.R load(file.path("Data", "EcosysData.Rdata")) # Get rid of LINKAGES because it's weird & hasn't been updated ecosys <- ecosys[!ecosys$Model=="linkages",] summary(ecosys) for(m in unique(ecosys$Model)){ print("-------------------------------------") print(paste0("------ Processing Model: ", m, " ------")) # Taking the subsets of data we want in a single gam dat.subsets <- ecosys$Resolution == "t.001" & ecosys$Model == m # What will our spatio-temporal explanatory factor ("Biomass") be? if(!is.na(mean(ecosys[dat.subsets,"AGB"]))) biomass.mod="AGB" else biomass.mod="LAI" data.temp <- ecosys[dat.subsets, c("Model", "Model.Order", "Site", "Year")] data.temp$PlotID <- ecosys[dat.subsets,"Site" ] data.temp$TreeID <- as.factor(NA) data.temp$Y <- ecosys[dat.subsets,response] data.temp$Biomass <- ecosys[dat.subsets,biomass.mod] data.temp$Time <- ecosys[dat.subsets,time.mod] data.temp[,predictors.all] <- ecosys[dat.subsets, paste0(predictors.all, predictor.suffix)] data.temp$Resolution <- ecosys[dat.subsets,"Resolution"] # Getting rid of NAs in predictors data.temp <- data.temp[complete.cases(data.temp[,c(predictors.all, "Y", "Biomass", "Time")]),] # Copy the response variable & some other things for the model paleon.models[[paste(m)]] <- data.temp } # End Model Loop } # End Model setup # ---------------------------------------- # ---------------------------------------- # 1.b. Load & set up tree ring NPP # ---------------------------------------- { # Define what our response & Biomass variables will be response <- "ABI.area" biomass.mod <- "AB.area" time.mod <- "plot.Age" # Load Tree ring NPP data spp.npp <- read.csv(file.path("Data", "TreeRing_NPP_PlotSpecies.csv")) summary(spp.npp) # aggregate to total plot NPP (ABI.area) plot.npp <- aggregate(spp.npp[,c(response, biomass.mod)], by=spp.npp[,c("Site", "Site2", "PlotID", "Year")], FUN=sum) plot.npp[,c(paste0(predictors.all, predictor.suffix))] <- aggregate(spp.npp[,c(paste0(predictors.all, predictor.suffix))], by=spp.npp[,c("Site", "Site2", "PlotID", "Year")], FUN=mean)[,c(paste0(predictors.all, predictor.suffix))] summary(plot.npp) # ------- # Loading tree ring data & extracting plot age # ------- tree.rings <- read.csv("Data/TreeRing_RingWidths.csv") summary(tree.rings) plot.age <- aggregate(tree.rings[,c("Age", "DBH")], by=tree.rings[,c("PlotID", "Year")], FUN=mean, na.rm=T) plot.age$BA.sum <- aggregate(tree.rings[,c("DBH")], by=tree.rings[,c("PlotID", "Year")], FUN=function(x){sum(pi*((x/2)^2), na.rm=T)})[,3] plot.age$BA.mean <- aggregate(tree.rings[,c("DBH")], by=tree.rings[,c("PlotID", "Year")], FUN=function(x){mean(pi*((x/2)^2), na.rm=T)})[,3] names(plot.age)[3:ncol(plot.age)] <- paste0("plot.", names(plot.age)[3:ncol(plot.age)]) summary(plot.age) plot.npp <- merge(plot.npp, plot.age, all.x=T, all.y=F) summary(plot.npp) # ------- # Subset a period where we're not worried about plot.npp <- plot.npp[complete.cases(plot.npp) & plot.npp$Year>=(2010-30),] summary(plot.npp) # Add the data to paleon.models paleon.models[["TreeRingNPP"]] <- plot.npp[,c("Site", "PlotID", "Year")] paleon.models$TreeRingNPP$Model <- as.factor("TreeRingNPP") paleon.models$TreeRingNPP$Model.Order <- as.factor("Tree Ring NPP") paleon.models$TreeRingNPP$TreeID <- as.factor(NA) paleon.models$TreeRingNPP$Y <- plot.npp[,response] paleon.models$TreeRingNPP$Biomass <- plot.npp[,biomass.mod] paleon.models$TreeRingNPP$Time <- plot.npp[,time.mod] paleon.models$TreeRingNPP[,predictors.all] <- plot.npp[, paste0(predictors.all, predictor.suffix)] paleon.models$TreeRingNPP$Resolution <- as.factor("t.001") # Make sure everything is complete cases paleon.models$TreeRingNPP <- paleon.models$TreeRingNPP[complete.cases(paleon.models$TreeRingNPP[,c(predictors.all, "Y", "Biomass", "Time")]),] # Order everything the same way to make life easier paleon.models$TreeRingNPP <- paleon.models$TreeRingNPP[,names(paleon.models[[1]])] summary(paleon.models$TreeRingNPP) } # End Tree Ring NPP setup # ---------------------------------------- # ---------------------------------------- # 1.c. Load & set up raw ring widths # ---------------------------------------- { response <- "RW" biomass.mod <- "DBH" time.mod <- "Age" tree.rings <- read.csv("Data/TreeRing_RingWidths.csv") summary(tree.rings) # subset only complete cases where we have met data & ring width measurements tree.rings <- tree.rings[complete.cases(tree.rings[,c(response, paste0(predictors.all, predictor.suffix))]) & tree.rings$Resolution=="t.001",] summary(tree.rings) # ----------- # Adding in a few other factors that could be good predictors # ----------- plot.age <- aggregate(tree.rings[,c("Age", "DBH")], by=tree.rings[,c("PlotID", "Year")], FUN=mean, na.rm=T) plot.age$BA.sum <- aggregate(tree.rings[,c("DBH")], by=tree.rings[,c("PlotID", "Year")], FUN=function(x){sum(pi*((x/2)^2), na.rm=T)})[,3] plot.age$BA.mean <- aggregate(tree.rings[,c("DBH")], by=tree.rings[,c("PlotID", "Year")], FUN=function(x){mean(pi*((x/2)^2), na.rm=T)})[,3] names(plot.age)[3:ncol(plot.age)] <- paste0("plot.", names(plot.age)[3:ncol(plot.age)]) summary(plot.age) tree.rings <- merge(tree.rings, plot.age, all.x=T, all.y=T) tree.rings$BA <- pi*(tree.rings$DBH/2)^2 summary(tree.rings) # ----------- # Add the data to paleon.models paleon.models[["TreeRingRW"]] <- tree.rings[,c("Site", "PlotID", "TreeID", "Year")] paleon.models$TreeRingRW$Model <- as.factor("TreeRingRW") paleon.models$TreeRingRW$Model.Order <- as.factor("Tree Ring RW") paleon.models$TreeRingRW[,predictors.all] <- tree.rings[,paste0(predictors.all, predictor.suffix)] paleon.models$TreeRingRW$Y <- tree.rings[,response] paleon.models$TreeRingRW$Biomass <- tree.rings[,biomass.mod] paleon.models$TreeRingRW$Time <- tree.rings[,time.mod] paleon.models$TreeRingRW$Resolution <- tree.rings[,"Resolution"] # Make sure everything is complete cases paleon.models$TreeRingRW <- paleon.models$TreeRingRW[complete.cases(paleon.models$TreeRingRW[,c(predictors.all, "Y", "Biomass", "Time")]),] # Order everything the same way to make life easier paleon.models$TreeRingRW <- paleon.models$TreeRingRW[,names(paleon.models[[1]])] summary(paleon.models$TreeRingRW) } # End Ring Width setups # ---------------------------------------- } # ------------------------------------------------------------------------------- # ------------------------------------------------------------------------------- # 2. Run the gamms # ------------------------------------------------------------------------------- cores.use <- min(12, length(paleon.models)) # cores.use <- length(paleon.models) models.base <- mclapply(paleon.models, paleon.gams.models, mc.cores=cores.use, k=k, predictors.all=predictors.all, PFT=F) # ------------------------------------------------------------------------------- # ------------------------------------------------------------------------------- # 3. Bind Models together to put them in a single object to make them easier to work with # ------------------------------------------------------------------------------- { for(i in 1:length(models.base)){ if(i==1) { mod.out <- list() mod.out$data <- models.base[[i]]$data mod.out$weights <- models.base[[i]]$weights mod.out$ci.response <- models.base[[i]]$ci.response mod.out$sim.response <- models.base[[i]]$sim.response mod.out$ci.terms <- models.base[[i]]$ci.terms mod.out$sim.terms <- models.base[[i]]$sim.terms mod.out[[paste("gamm", names(models.base)[i], "PlotRF_BM", sep=".")]] <- models.base[[i]]$gamm } else { mod.out$data <- rbind(mod.out$data, models.base[[i]]$data) mod.out$weights <- rbind(mod.out$weights, models.base[[i]]$weights) mod.out$ci.response <- rbind(mod.out$ci.response, models.base[[i]]$ci.response) mod.out$sim.response <- rbind(mod.out$sim.response, models.base[[i]]$sim.response) mod.out$ci.terms <- rbind(mod.out$ci.terms, models.base[[i]]$ci.terms) mod.out$sim.terms <- rbind(mod.out$sim.terms, models.base[[i]]$sim.terms) mod.out[[paste("gamm", names(models.base)[i], "PlotRF_BM", sep=".")]] <- models.base[[i]]$gamm } } save(mod.out, file=file.path(dat.dir, "gamm_PlotRF_BM.Rdata")) } # ------------------------------------------------------------------------------- # ------------------------------------------------------------------------------- # 4. Diagnostic Graphs # ------------------------------------------------------------------------------- { m.order <- unique(mod.out$data$Model.Order) col.model <- c(paste(model.colors[model.colors$Model.Order %in% m.order,"color"]), "black", "gray30") pdf(file.path(fig.dir, "GAMM_ModelFit_PlotRF_BM.pdf")) { print( ggplot(data=mod.out$ci.response[!substr(mod.out$ci.response$Model, 1, 8)=="TreeRing",]) + facet_grid(PlotID~ Model, scales="free") + theme_bw() + geom_line(data= mod.out$data[!substr(mod.out$data$Model, 1, 8)=="TreeRing",], aes(x=Year, y=Y), alpha=0.5) + geom_ribbon(aes(x=Year, ymin=lwr, ymax=upr, fill=Model), alpha=0.5) + geom_line(aes(x=Year, y=mean, color=Model), size=0.35) + scale_x_continuous(limits=c(1900,2010)) + # scale_y_continuous(limits=quantile(mod.out$data[mod.out$data$Year>=1900,"response"], c(0.01, 0.99),na.rm=T)) + scale_fill_manual(values=paste(col.model)) + scale_color_manual(values=paste(col.model)) + labs(title=paste("PlotRF_BM"), x="Year", y="NPP") ) print( ggplot(data=mod.out$ci.response[mod.out$ci.response$Model=="TreeRingNPP",]) + facet_wrap(~ PlotID, scales="fixed") + theme_bw() + geom_line(data= mod.out$data[mod.out$data$Model=="TreeRingNPP",], aes(x=Year, y=Y), alpha=0.5) + geom_ribbon(aes(x=Year, ymin=lwr, ymax=upr, fill=Model), alpha=0.5) + geom_line(aes(x=Year, y=mean, color=Model), size=0.35) + # scale_x_continuous(limits=c(1900,2010)) + # scale_y_continuous(limits=quantile(mod.out$data[mod.out$data$Year>=1900,"response"], c(0.01, 0.99),na.rm=T)) + scale_fill_manual(values=paste(col.model)) + scale_color_manual(values=paste(col.model)) + labs(title=paste("PlotRF_BM"), x="Year", y="NPP") ) print( ggplot(data=mod.out$ci.response[mod.out$ci.response$Model=="TreeRingRW" & mod.out$ci.response$PlotID=="ME029",]) + facet_wrap(~ TreeID, scales="fixed") + theme_bw() + geom_line(data= mod.out$data[mod.out$data$Model=="TreeRingRW" & mod.out$data$PlotID=="ME029",], aes(x=Year, y=Y), alpha=0.5) + geom_ribbon(aes(x=Year, ymin=lwr, ymax=upr, fill=Model), alpha=0.5) + geom_line(aes(x=Year, y=mean, color=Model), size=0.35) + # scale_x_continuous(limits=c(1900,2010)) + # scale_y_continuous(limits=quantile(mod.out$data[mod.out$data$Year>=1900,"response"], c(0.01, 0.99),na.rm=T)) + scale_fill_manual(values=paste(col.model)) + scale_color_manual(values=paste(col.model)) + labs(title=paste("PlotRF_BM"), x="Year", y="RW") ) print( ggplot(data=mod.out$ci.response[mod.out$ci.response$Model=="TreeRingRW" & mod.out$ci.response$PlotID=="TP1",]) + facet_wrap(~ TreeID, scales="fixed") + theme_bw() + geom_line(data= mod.out$data[mod.out$data$Model=="TreeRingRW" & mod.out$data$PlotID=="TP1",], aes(x=Year, y=Y), alpha=0.5) + geom_ribbon(aes(x=Year, ymin=lwr, ymax=upr, fill=Model), alpha=0.5) + geom_line(aes(x=Year, y=mean, color=Model), size=0.35) + # scale_x_continuous(limits=c(1900,2010)) + # scale_y_continuous(limits=quantile(mod.out$data[mod.out$data$Year>=1900,"response"], c(0.01, 0.99),na.rm=T)) + scale_fill_manual(values=paste(col.model)) + scale_color_manual(values=paste(col.model)) + labs(title=paste("PlotRF_BM"), x="Year", y="RW") ) } dev.off() mod.out$ci.terms$x <- as.numeric(paste(mod.out$ci.terms$x)) summary(mod.out$ci.terms) pdf(file.path(fig.dir, "GAMM_DriverSensitivity_PlotRF_BM.pdf"), height=8.5, width=11) { print( ggplot(data=mod.out$ci.terms[mod.out$ci.terms$Effect %in% c("tair", "precipf", "CO2"),]) + facet_wrap(~ Effect, scales="free_x") + theme_bw() + geom_ribbon(aes(x=x, ymin=lwr, ymax=upr, fill=Model), alpha=0.5) + geom_line(aes(x=x, y=mean, color=Model), size=2) + geom_hline(yintercept=0, linetype="dashed") + scale_y_continuous(limits=c(-5, 5)) + scale_fill_manual(values=paste(col.model)) + scale_color_manual(values=paste(col.model)) + labs(title=paste0("Driver Sensitivity (not Relativized)"), y=paste0("NPP Contribution")) # + ) print( ggplot(data=mod.out$ci.terms[mod.out$ci.terms$Effect %in% c("Biomass"),]) + facet_wrap( ~ Model, scales="free_x") + theme_bw() + geom_ribbon(aes(x=x, ymin=lwr, ymax=upr, fill=Model), alpha=0.5) + geom_line(aes(x=x, y=mean, color=Model), size=2) + geom_hline(yintercept=0, linetype="dashed") + scale_y_continuous(limits=c(-5, 5)) + scale_fill_manual(values=paste(col.model)) + scale_color_manual(values=paste(col.model)) + labs(title=paste0("Driver Sensitivity (not Relativized)"), y=paste0("NPP Contribution")) ) } dev.off() } ## -------------------------------------------------------------------------------
#Exercise1 x <- c(10.4,5.6,3.1,6.4,21.7) sum(x) # 47.2 mean(x) # 9.44 median(x) # 6.4 sd(x) # 7.33846 var(x) # 53.853 #--------------------------------------------------------------------------------------------------------------------- #Exercise2.1 # Because the data.frame is a data structure that can combine the same category and list data into a table. # Therefore, it is appropriate to combine data that must be matched. names <- c("Iron Man","The Incredible Hulk","Iron Man 2","Thor","Captain America: The First Avenger", "The Avengers","Iron Man 3","Thor: The Dark World","Captain America: The Winter Soldier", "Guardians of the Galaxy","Avengers: Age of Ultron","Ant-Man","Captain America: Civil War", "Doctor Strange","Guardians of the Galaxy 2","Spider-Man: Homecoming","Thor: Ragnarok","Black Panther", "Avengers: Infinity War","Ant-Man and the Wasp","Captain Marvel","Avengers: Endgame", "Spider-Man: Far From Home","WandaVision","Falcon and the Winter Soldier","Loki","Black Widow") years <- c(2008,2008,2010,2011,2011,2012,rep(2013:2016,each=2), rep(2017,4),rep(2018,2),rep(2019,3),rep(2021,4)) marvel_movies <- data.frame(names,years) View(marvel_movies) #--------------------------------------------------------------------------------------------------------------------- #Exercise2.2 #The numbers of movies length(names) # 27 #Finding the 19th movies name names[19] # Avengers: Infinity War #Which year is most released movies table(years) sort(table(years), decreasing = TRUE) # 2017 and 2021
/assignment/HW1/HW01_63130500055.R
permissive
jirasin-c/008-Netflix-Original-Films-And-IMDB-Scores
R
false
false
1,646
r
#Exercise1 x <- c(10.4,5.6,3.1,6.4,21.7) sum(x) # 47.2 mean(x) # 9.44 median(x) # 6.4 sd(x) # 7.33846 var(x) # 53.853 #--------------------------------------------------------------------------------------------------------------------- #Exercise2.1 # Because the data.frame is a data structure that can combine the same category and list data into a table. # Therefore, it is appropriate to combine data that must be matched. names <- c("Iron Man","The Incredible Hulk","Iron Man 2","Thor","Captain America: The First Avenger", "The Avengers","Iron Man 3","Thor: The Dark World","Captain America: The Winter Soldier", "Guardians of the Galaxy","Avengers: Age of Ultron","Ant-Man","Captain America: Civil War", "Doctor Strange","Guardians of the Galaxy 2","Spider-Man: Homecoming","Thor: Ragnarok","Black Panther", "Avengers: Infinity War","Ant-Man and the Wasp","Captain Marvel","Avengers: Endgame", "Spider-Man: Far From Home","WandaVision","Falcon and the Winter Soldier","Loki","Black Widow") years <- c(2008,2008,2010,2011,2011,2012,rep(2013:2016,each=2), rep(2017,4),rep(2018,2),rep(2019,3),rep(2021,4)) marvel_movies <- data.frame(names,years) View(marvel_movies) #--------------------------------------------------------------------------------------------------------------------- #Exercise2.2 #The numbers of movies length(names) # 27 #Finding the 19th movies name names[19] # Avengers: Infinity War #Which year is most released movies table(years) sort(table(years), decreasing = TRUE) # 2017 and 2021
#****************************************************************** #Title: MusicDataSetNormalisation.R Script #Date: 2016 #Author: Shane Coleman #Date Created: March 2016 #Modified: User Created R Script (MusicDataSetNormalisation.R) #Modified / Refactored Code Referenced Below #****************************************************************** #Full List of Attributes #http://www.inside-r.org/packages/cran/dprep/docs/mmnorm install.packages("dprep") library(dprep) musicDataSet <- read.csv("MusicDataSet.csv", sep = ",", stringsAsFactors = TRUE, header = TRUE) #****************************************************************** #Title: Using apply, sapply, lapply in R #Site Owner / Sponsor: r-bloggers.com #Date: 2012 #Author: Pete #Availability: http://www.r-bloggers.com/using-apply-sapply-lapply-in-r/ #Date Accessed: March 2016 #Modified: Code refactord (Data frame altered) sapply(musicDataSet, class) #****************************************************************** musicDataSet[1:9] <- sapply(musicDataSet[1:9],as.numeric) sapply(musicDataSet, class) musicData <- musicDataSet head(musicData) write.csv(musicData, file = "MusicData.csv") #****************************************************************** #Title: Min-max normalization #Site Owner / Sponsor: http://finzi.psych.upenn.edu/ #Author: Caroline Rodriguez and Edgar Acuna #Availability: http://finzi.psych.upenn.edu/library/dprep/html/mmnorm.html #Other Availability: http://www.public.iastate.edu/~maitra/stat501/Rcode/mmnorm.R #Date Accessed: March 2016 #Modified: Code refactord (Data frame altered) musicDataNorm <- mmnorm(musicData[1:9], minval = 0, maxval = 1) head(musicDataNorm) #****************************************************************** musicDataNorm$genre <- musicDataNorm$genre musicDataNorm$genre2 <- musicDataNorm$genre musicDataNorm$genre3 <- musicDataNorm$genre musicDataNorm$genre4 <- musicDataNorm$genre head(musicDataNorm) musicDataNorm$genre[musicDataNorm$genre == 1] <- 0 musicDataNorm$genre2[musicDataNorm$genre2 == 1] <- 1 musicDataNorm$genre3[musicDataNorm$genre3 == 1] <- 0 musicDataNorm$genre4[musicDataNorm$genre4 == 1] <- 0 musicDataNorm$genre[musicDataNorm$genre == 2] <- 0 musicDataNorm$genre2[musicDataNorm$genre2 == 2] <- 0 musicDataNorm$genre3[musicDataNorm$genre3 == 2] <- 0 musicDataNorm$genre4[musicDataNorm$genre4 == 2] <- 1 musicDataNorm$genre[musicDataNorm$genre == 3] <- 0 musicDataNorm$genre2[musicDataNorm$genre2 == 3] <- 0 musicDataNorm$genre3[musicDataNorm$genre3 == 3] <- 1 musicDataNorm$genre4[musicDataNorm$genre4 == 3] <- 0 musicDataNorm$genre[musicDataNorm$genre == 4] <- 1 musicDataNorm$genre2[musicDataNorm$genre2 == 4] <- 0 musicDataNorm$genre3[musicDataNorm$genre3 == 4] <- 0 musicDataNorm$genre4[musicDataNorm$genre4 == 4] <- 0 #****************************************************************** #Title: How do you delete the header in a dataframe? #Site Owner / Sponsor: stackoverflow.com #Date: 2013 #Author: Karsten W. #Availability: http://stackoverflow.com/questions/18175602/how-do-you-delete-the-header-in-a-dataframe #Date Accessed: March 2016 #Modified: Code refactord (Data frame altered) names(musicDataNorm) <- NULL head(musicDataNorm) #****************************************************************** write.csv(musicDataNorm, file = "MusicDataNorm.csv",row.names = FALSE)
/MusicDataSetNormalisation.R
no_license
ShaneColeman/FYP_RStudio
R
false
false
3,346
r
#****************************************************************** #Title: MusicDataSetNormalisation.R Script #Date: 2016 #Author: Shane Coleman #Date Created: March 2016 #Modified: User Created R Script (MusicDataSetNormalisation.R) #Modified / Refactored Code Referenced Below #****************************************************************** #Full List of Attributes #http://www.inside-r.org/packages/cran/dprep/docs/mmnorm install.packages("dprep") library(dprep) musicDataSet <- read.csv("MusicDataSet.csv", sep = ",", stringsAsFactors = TRUE, header = TRUE) #****************************************************************** #Title: Using apply, sapply, lapply in R #Site Owner / Sponsor: r-bloggers.com #Date: 2012 #Author: Pete #Availability: http://www.r-bloggers.com/using-apply-sapply-lapply-in-r/ #Date Accessed: March 2016 #Modified: Code refactord (Data frame altered) sapply(musicDataSet, class) #****************************************************************** musicDataSet[1:9] <- sapply(musicDataSet[1:9],as.numeric) sapply(musicDataSet, class) musicData <- musicDataSet head(musicData) write.csv(musicData, file = "MusicData.csv") #****************************************************************** #Title: Min-max normalization #Site Owner / Sponsor: http://finzi.psych.upenn.edu/ #Author: Caroline Rodriguez and Edgar Acuna #Availability: http://finzi.psych.upenn.edu/library/dprep/html/mmnorm.html #Other Availability: http://www.public.iastate.edu/~maitra/stat501/Rcode/mmnorm.R #Date Accessed: March 2016 #Modified: Code refactord (Data frame altered) musicDataNorm <- mmnorm(musicData[1:9], minval = 0, maxval = 1) head(musicDataNorm) #****************************************************************** musicDataNorm$genre <- musicDataNorm$genre musicDataNorm$genre2 <- musicDataNorm$genre musicDataNorm$genre3 <- musicDataNorm$genre musicDataNorm$genre4 <- musicDataNorm$genre head(musicDataNorm) musicDataNorm$genre[musicDataNorm$genre == 1] <- 0 musicDataNorm$genre2[musicDataNorm$genre2 == 1] <- 1 musicDataNorm$genre3[musicDataNorm$genre3 == 1] <- 0 musicDataNorm$genre4[musicDataNorm$genre4 == 1] <- 0 musicDataNorm$genre[musicDataNorm$genre == 2] <- 0 musicDataNorm$genre2[musicDataNorm$genre2 == 2] <- 0 musicDataNorm$genre3[musicDataNorm$genre3 == 2] <- 0 musicDataNorm$genre4[musicDataNorm$genre4 == 2] <- 1 musicDataNorm$genre[musicDataNorm$genre == 3] <- 0 musicDataNorm$genre2[musicDataNorm$genre2 == 3] <- 0 musicDataNorm$genre3[musicDataNorm$genre3 == 3] <- 1 musicDataNorm$genre4[musicDataNorm$genre4 == 3] <- 0 musicDataNorm$genre[musicDataNorm$genre == 4] <- 1 musicDataNorm$genre2[musicDataNorm$genre2 == 4] <- 0 musicDataNorm$genre3[musicDataNorm$genre3 == 4] <- 0 musicDataNorm$genre4[musicDataNorm$genre4 == 4] <- 0 #****************************************************************** #Title: How do you delete the header in a dataframe? #Site Owner / Sponsor: stackoverflow.com #Date: 2013 #Author: Karsten W. #Availability: http://stackoverflow.com/questions/18175602/how-do-you-delete-the-header-in-a-dataframe #Date Accessed: March 2016 #Modified: Code refactord (Data frame altered) names(musicDataNorm) <- NULL head(musicDataNorm) #****************************************************************** write.csv(musicDataNorm, file = "MusicDataNorm.csv",row.names = FALSE)
################################################################################################################################### # Functions for doing the gene presence?absence analysis ################################################################################################################################### # Given a metabolite abundance normalized matrix, gets the cluster each strain belong among 4 major clusters getMetClusts <- function(normMets, methodDist = "euclidean", methodAgg = "ward.D"){ distMat <- dist(normMets, method = methodDist) HCA <- hclust(distMat, method = methodAgg) split <- cutree(HCA, k = 2) distMat1 <- dist(normMets[split == 1, ], method = methodDist) HCA1 <- hclust(distMat1, method = methodAgg) split1 <- cutree(HCA1, k = 2) distMat2 <- dist(normMets[split == 2, ], method = methodDist) HCA2 <- hclust(distMat2, method = methodAgg) split2 <- cutree(HCA2, k = 2) metClusts <- c() clustVec <- c() for(i in 1:nrow(normMets)){ if(split[i] == 1){ if(split1[names(split[i])] == 1){clustVec <- c(clustVec, "1.1")} if(split1[names(split[i])] == 2){clustVec <- c(clustVec, "1.2")} } if(split[i] == 2){ if(split2[names(split[i])] == 1){clustVec <- c(clustVec, "2.1")} if(split2[names(split[i])] == 2){clustVec <- c(clustVec, "2.2")} } } clustVec <- as.factor(clustVec) names(clustVec) <- names(split) strainNames <- unique(gsub("\\_.*|(PA14).*", rep = "\\1", names(clustVec))) metClusts <- c() for(i in seq_along(strainNames)){ metClusts <- c(metClusts, names(which.max(table(clustVec[gsub("\\_.*|(PA14).*", rep = "\\1", names(clustVec)) == strainNames[i]])))) } metClusts <- as.factor(metClusts) names(metClusts) <- strainNames return(metClusts) } # Given the gene presence/absence matrix and the metClust vector, yields a contingency table for each gene and cluster getContTab <- function(genePresAbsObjkt, metClustObjkt){ cont_tab <- c() groups <- sort(unique(metClustObjkt)) for(i in seq_along(groups)){ cont_tab <- rbind(cont_tab, colSums(genePresAbsObjkt[metClustObjkt == groups[i], ])) } rownames(cont_tab) <- groups return(cont_tab) } # Does a Fisher test between the 4 major clusters for each gene doFisher <- function(cont_tab, metClustObjkt, alpha = 0.05){ pb = txtProgressBar(min = 0, max = ncol(cont_tab), initial = 0) clust_total <- table(metClustObjkt) fishResult <- matrix(nrow = ncol(cont_tab), ncol = 2, dimnames = list(NULL, c("Gene", "p.value"))) for(i in 1:ncol(cont_tab)){ geneContTab <- rbind(cont_tab[, i], clust_total - cont_tab[, i]) fishResult[i, 1] <- colnames(cont_tab)[i] fishResult[i, 2] <- fisher.test(geneContTab)[[1]] setTxtProgressBar(pb, i) } fishSign <- fishResult[fishResult[, 2] <= 0.05, ] return(list("all.genes" = fishResult, "sign.genes" = fishSign)) } doFisher <- function(cont_tab, metClustObjkt, alpha = 0.05, padj = T, method = "BH"){ pb = txtProgressBar(min = 0, max = ncol(cont_tab), initial = 0) clust_total <- table(metClustObjkt) gens <- c() pVals <- c() for(i in 1:ncol(cont_tab)){ geneContTab <- rbind(cont_tab[, i], clust_total - cont_tab[, i]) gen <- colnames(cont_tab)[i] pVal <- fisher.test(geneContTab)[[1]] gens <- c(gens, gen) pVals <- c(pVals, pVal) setTxtProgressBar(pb, i) } fishResult <- data.frame(genes = gens, p.value = pVals, p.adj = p.adjust(pVals, method = method)) if(padj == T){ fishSign <- fishResult[fishResult[, 3] <= 0.05, ] }else{ fishSign <- fishResult[fishResult[, 2] <= 0.05, ] } return(list("all.genes" = fishResult, "sign.genes" = fishSign)) } # Given a metabolite abundance normalized matrix and a gene presence/absence matrix, performs a Mann Whitney test for # each metabolite versus each gene, according to if the strains have or not the gene: determines if the strains that # have or not have a gene are different in the terms of the abundance of each metabolite. The output is a matrix of # p-values and a matrix of z-scores. getMannWhitPerGene <- function(genePresAbsObjkt, metMatObjkt, p_adjust = T, method = NULL, remove.underrepresented = T, threshold = 1){ if(!require(uwIntroStats)) install.packages('uwIntroStats') library(uwIntroStats) pb = txtProgressBar(min = 0, max = ncol(genePresAbsObjkt), initial = 0) mannWhitPerGene <- matrix(nrow = ncol(genePresAbsObjkt), ncol = ncol(metMatObjkt)) zScore <- matrix(nrow = ncol(genePresAbsObjkt), ncol = ncol(metMatObjkt)) for(j in 1:ncol(genePresAbsObjkt)){ genePosi <- rownames(genePresAbsObjkt)[genePresAbsObjkt[, j] == 1] geneNega <- rownames(genePresAbsObjkt)[genePresAbsObjkt[, j] == 0] for(h in 1:ncol(metMatObjkt)){ if(remove.underrepresented == T){ if(length(genePosi) > threshold && length(geneNega) > threshold){ p_val <- wilcoxon(metMatObjkt[grepl(paste(genePosi, collapse = "|"), rownames(metMatObjkt)), h], metMatObjkt[grepl(paste(geneNega, collapse = "|"), rownames(metMatObjkt)), h])$p.value mannWhitPerGene[j, h] <- p_val z_score <- as.numeric(wilcoxon(metMatObjkt[grepl(paste(genePosi, collapse = "|"), rownames(metMatObjkt)), h], metMatObjkt[grepl(paste(geneNega, collapse = "|"), rownames(metMatObjkt)), h])$inf[2, 1]) zScore[j, h ] <- z_score } }else{ p_val <- wilcoxon(metMatObjkt[grepl(paste(genePosi, collapse = "|"), rownames(metMatObjkt)), h], metMatObjkt[grepl(paste(geneNega, collapse = "|"), rownames(metMatObjkt)), h])$p.value mannWhitPerGene[j, h] <- p_val z_score <- as.numeric(wilcoxon(metMatObjkt[grepl(paste(genePosi, collapse = "|"), rownames(metMatObjkt)), h], metMatObjkt[grepl(paste(geneNega, collapse = "|"), rownames(metMatObjkt)), h])$inf[2, 1]) zScore[j, h ] <- z_score } } if(p_adjust == T && is.null(method)){ stop( "Please select p-adjusting method") }else if(p_adjust == T && !is.null(method)){ if(anyNA(mannWhitPerGene[j, ]) == F){ mannWhitPerGene[j, ] <- p.adjust(mannWhitPerGene[j, ], method) } } setTxtProgressBar(pb, j) } rownames(mannWhitPerGene) <- colnames(genePresAbsObjkt) colnames(mannWhitPerGene) <- colnames(metMatObjkt) mannWhitPerGene <- mannWhitPerGene[!is.na(rowSums(mannWhitPerGene)), ] rownames(zScore) <- colnames(genePresAbsObjkt) colnames(zScore) <- colnames(metMatObjkt) zScore <- zScore[!is.na(rowSums(zScore)), ] return(list("p.value" = mannWhitPerGene, "z.score" = zScore)) } # Filters the p-value matrix obtained in previous step (mannWhitPerGeneObjkt) to a given alpha (default: alpha = 0.05) filtMannWhitPerGene <- function(mannWhitPerGeneObjkt, alpha = 0.05){ if(!require(plyr)) install.packages('plyr') library(plyr) mannWhitPerGeneObjkt <- mannWhitPerGeneObjkt$p.value FUN <- function(x) t(as.matrix(names(x)[(x <= alpha)])) difMetsPerGene <- apply(mannWhitPerGeneObjkt, 1, FUN) difMetsPerGeneMat <- t(rbind.fill.matrix(difMetsPerGene)) colnames(difMetsPerGeneMat) <- names(difMetsPerGene) return(list("as_list" = difMetsPerGene, "as_mat" = difMetsPerGeneMat)) } # Given the diffMetObjkt (differential metabolites between each pair of clusters) and the object generated in past # function (mannWhitPerGeneFiltObjkt) gives the names of all the genes whose presence/absence is correlated to # differences in abundance of each one of the differential metabolites between clusters (the ones that appear in # diffMetObjkt) getDiffGenePerDiffMet <- function(mannWhitPerGeneFiltObjkt, diffMetObjkt){ pb = txtProgressBar(min = 0, max = length(diffMetObjkt[!is.na(diffMetObjkt)]), initial = 0) pbAv <- 0 mannWhit <- mannWhitPerGeneFiltObjkt$as_list diffGenesPerDiffMet <- list() for(i in 1:ncol(diffMetObjkt)){ genesRelated2Met <- list() for(j in 1:length(diffMetObjkt[, i][!is.na(diffMetObjkt[, i])])){ FUN <- function(x) any(x %in% diffMetObjkt[j, i]) genes <- names(which(lapply(mannWhit, FUN) == T)) if(length(genes) > 0){ genesRelated2Met[[j]] <- genes }else{ genesRelated2Met[[j]] <- "Any relationship between the gene and the metabolite" } pbAv <- pbAv + 1 setTxtProgressBar(pb, pbAv) } names(genesRelated2Met) <- diffMetObjkt[, i][!is.na(diffMetObjkt[, i])] diffGenesPerDiffMet[[i]] <- genesRelated2Met } names(diffGenesPerDiffMet) <- colnames(diffMetObjkt) return(diffGenesPerDiffMet) } # Given the presence/absence matrix (genePresAbsObjkt), the output of the past function (DGenesPerDMetsObjkt) and the # metClustObjkt (cluster each strain belongs to), gives a matrix for each one of the differential metabolites between # each clusters. In that matrix appear the genes that appect that metabolite, indicating which strain have it and # dividing the strains according to the cluster they belong to, as well as a column of totals per strain in each matrix. getGeneMatsPerMet <- function(DGenesPerDMetsObjkt, genePresAbsObjkt, metClustObjkt){ geneMatsPerMet <- list() for(i in seq_along(DGenesPerDMetsObjkt)){ groups <- strsplit(names(DGenesPerDMetsObjkt)[i], "&")[[1]] metMats <- list() notEmpty <- DGenesPerDMetsObjkt[[i]][DGenesPerDMetsObjkt[[i]] != "Any relationship between the gene and the metabolite"] for(h in seq_along(notEmpty)){ if(i == 1){ group1 <- names(metClustObjkt[gsub("\\..*", rep = "", metClustObjkt) == groups[1]]) group2 <- names(metClustObjkt[gsub("\\..*", rep = "", metClustObjkt) == groups[2]]) }else{ group1 <- names(metClustObjkt[metClustObjkt == groups[1]]) group2 <- names(metClustObjkt[metClustObjkt == groups[2]]) } metMat1 <- as.matrix(genePresAbsObjkt[group1, notEmpty[[h]]]) metMat2 <- as.matrix(genePresAbsObjkt[group2, notEmpty[[h]]]) metMat <- rbind(metMat1, rep(NA, length(notEmpty[[h]])), metMat2) colnames(metMat) <- notEmpty[[h]] metMat <- cbind(metMat, Total = rowSums(metMat)) metMats[[h]] <- metMat } names(metMats) <- names(notEmpty) geneMatsPerMet[[i]] <- metMats } names(geneMatsPerMet) <- names(DGenesPerDMetsObjkt) return(geneMatsPerMet) } # Takes as an input the matrixes generated in past step (geneMatMetObjkt), and does a Mann Whitney test between the vectors of strains corresponding to # each pair of clusters gene by gene by one side, and between totals, by the other side. mannWhit_tabs <- function(geneMatMetObjkt){ mannWhitTabs <- list() allDiffMets <- lapply(geneMatMetObjkt, names) allDiffMets <- unique(unlist(allDiffMets)) totalMannWhit <- matrix(nrow =length(allDiffMets), ncol = length(geneMatMetObjkt), dimnames = list(allDiffMets, names(geneMatMetObjkt))) for(ii in seq_along(geneMatMetObjkt)){ allDiffGenes <- lapply(geneMatMetObjkt[[ii]], colnames) allDiffGenes <- unique(unlist(allDiffGenes)) allDiffGenes <- allDiffGenes[-which(allDiffGenes == "Total")] equator <- which(is.na(geneMatMetObjkt[[ii]][[1]][, 1])) upper <- length(geneMatMetObjkt[[ii]][[1]][, 1]) metGenes <- matrix(nrow = length(geneMatMetObjkt[[ii]]), ncol = length(allDiffGenes)) rownames(metGenes) <- names(geneMatMetObjkt[[ii]]) colnames(metGenes) <- allDiffGenes for(j in 1:length(geneMatMetObjkt[[ii]])){ for(i in 1:(ncol(geneMatMetObjkt[[ii]][[j]]) - 1)){ metGenes[names(geneMatMetObjkt[[ii]][j]), colnames(geneMatMetObjkt[[ii]][[j]])[i]] <- wilcox.test(as.numeric(geneMatMetObjkt[[ii]][[j]][1:(equator - 1), i]), as.numeric(geneMatMetObjkt[[ii]][[j]][(equator + 1):upper, i]))[[3]] } totalMannWhit[names(geneMatMetObjkt[[ii]])[j], ii] <- wilcox.test(as.numeric(geneMatMetObjkt[[ii]][[j]][1:(equator - 1), "Total"]), as.numeric(geneMatMetObjkt[[ii]][[j]][(equator + 1):upper, "Total"]))[[3]] } mannWhitTabs[[ii]] <- metGenes } names(mannWhitTabs) <- names(geneMatMetObjkt) return(list("Individual_genes" = mannWhitTabs, "Totals" = totalMannWhit)) } # Filters the result of mannWhitTabsObjkt (the p-values generated in past function) to a given alpha (default: alpha = 0.05) filtMannWhitTabs <- function(manWhitTabsObjkt, alpha = 0.05){ single <- manWhitTabsObjkt$Individual_genes totals <- manWhitTabsObjkt$Totals filteredTabs <- list() filteredTabs_Total <- list() for(ii in seq_along(single)){ diffGenesPerMet <- list() for(i in 1:nrow(single[[ii]])){ diffGenes <- c() for(j in 1:ncol(single[[ii]])){ if(!is.na(single[[ii]][i, j]) && single[[ii]][i, j] <= alpha){ diffGenes <- c(diffGenes, colnames(single[[ii]])[j]) } } if(is.null(diffGenes)){ diffGenes <- "Any correlated gene with the metabolite found" } diffGenesPerMet[[i]] <- diffGenes } names(diffGenesPerMet) <- rownames(single[[ii]]) filteredTabs[[ii]] <- diffGenesPerMet genesMet <- list() totals_sign <- names(which(totals[, ii] <= alpha)) for(i in seq_along(totals_sign)){ trueVec <- !is.na(single[[ii]][totals_sign[i], ]) genesMet[[i]] <- names(single[[ii]][totals_sign[i], ])[trueVec] } names(genesMet) <- totals_sign filteredTabs_Total[[ii]] <- genesMet } names(filteredTabs) <- names(single) names(filteredTabs_Total) <- names(single) return(list("Individual_genes" = filteredTabs, "Total" = filteredTabs_Total)) }
/genePresAbs_functions.R
no_license
guisantagui/MSK_repo_new
R
false
false
18,315
r
################################################################################################################################### # Functions for doing the gene presence?absence analysis ################################################################################################################################### # Given a metabolite abundance normalized matrix, gets the cluster each strain belong among 4 major clusters getMetClusts <- function(normMets, methodDist = "euclidean", methodAgg = "ward.D"){ distMat <- dist(normMets, method = methodDist) HCA <- hclust(distMat, method = methodAgg) split <- cutree(HCA, k = 2) distMat1 <- dist(normMets[split == 1, ], method = methodDist) HCA1 <- hclust(distMat1, method = methodAgg) split1 <- cutree(HCA1, k = 2) distMat2 <- dist(normMets[split == 2, ], method = methodDist) HCA2 <- hclust(distMat2, method = methodAgg) split2 <- cutree(HCA2, k = 2) metClusts <- c() clustVec <- c() for(i in 1:nrow(normMets)){ if(split[i] == 1){ if(split1[names(split[i])] == 1){clustVec <- c(clustVec, "1.1")} if(split1[names(split[i])] == 2){clustVec <- c(clustVec, "1.2")} } if(split[i] == 2){ if(split2[names(split[i])] == 1){clustVec <- c(clustVec, "2.1")} if(split2[names(split[i])] == 2){clustVec <- c(clustVec, "2.2")} } } clustVec <- as.factor(clustVec) names(clustVec) <- names(split) strainNames <- unique(gsub("\\_.*|(PA14).*", rep = "\\1", names(clustVec))) metClusts <- c() for(i in seq_along(strainNames)){ metClusts <- c(metClusts, names(which.max(table(clustVec[gsub("\\_.*|(PA14).*", rep = "\\1", names(clustVec)) == strainNames[i]])))) } metClusts <- as.factor(metClusts) names(metClusts) <- strainNames return(metClusts) } # Given the gene presence/absence matrix and the metClust vector, yields a contingency table for each gene and cluster getContTab <- function(genePresAbsObjkt, metClustObjkt){ cont_tab <- c() groups <- sort(unique(metClustObjkt)) for(i in seq_along(groups)){ cont_tab <- rbind(cont_tab, colSums(genePresAbsObjkt[metClustObjkt == groups[i], ])) } rownames(cont_tab) <- groups return(cont_tab) } # Does a Fisher test between the 4 major clusters for each gene doFisher <- function(cont_tab, metClustObjkt, alpha = 0.05){ pb = txtProgressBar(min = 0, max = ncol(cont_tab), initial = 0) clust_total <- table(metClustObjkt) fishResult <- matrix(nrow = ncol(cont_tab), ncol = 2, dimnames = list(NULL, c("Gene", "p.value"))) for(i in 1:ncol(cont_tab)){ geneContTab <- rbind(cont_tab[, i], clust_total - cont_tab[, i]) fishResult[i, 1] <- colnames(cont_tab)[i] fishResult[i, 2] <- fisher.test(geneContTab)[[1]] setTxtProgressBar(pb, i) } fishSign <- fishResult[fishResult[, 2] <= 0.05, ] return(list("all.genes" = fishResult, "sign.genes" = fishSign)) } doFisher <- function(cont_tab, metClustObjkt, alpha = 0.05, padj = T, method = "BH"){ pb = txtProgressBar(min = 0, max = ncol(cont_tab), initial = 0) clust_total <- table(metClustObjkt) gens <- c() pVals <- c() for(i in 1:ncol(cont_tab)){ geneContTab <- rbind(cont_tab[, i], clust_total - cont_tab[, i]) gen <- colnames(cont_tab)[i] pVal <- fisher.test(geneContTab)[[1]] gens <- c(gens, gen) pVals <- c(pVals, pVal) setTxtProgressBar(pb, i) } fishResult <- data.frame(genes = gens, p.value = pVals, p.adj = p.adjust(pVals, method = method)) if(padj == T){ fishSign <- fishResult[fishResult[, 3] <= 0.05, ] }else{ fishSign <- fishResult[fishResult[, 2] <= 0.05, ] } return(list("all.genes" = fishResult, "sign.genes" = fishSign)) } # Given a metabolite abundance normalized matrix and a gene presence/absence matrix, performs a Mann Whitney test for # each metabolite versus each gene, according to if the strains have or not the gene: determines if the strains that # have or not have a gene are different in the terms of the abundance of each metabolite. The output is a matrix of # p-values and a matrix of z-scores. getMannWhitPerGene <- function(genePresAbsObjkt, metMatObjkt, p_adjust = T, method = NULL, remove.underrepresented = T, threshold = 1){ if(!require(uwIntroStats)) install.packages('uwIntroStats') library(uwIntroStats) pb = txtProgressBar(min = 0, max = ncol(genePresAbsObjkt), initial = 0) mannWhitPerGene <- matrix(nrow = ncol(genePresAbsObjkt), ncol = ncol(metMatObjkt)) zScore <- matrix(nrow = ncol(genePresAbsObjkt), ncol = ncol(metMatObjkt)) for(j in 1:ncol(genePresAbsObjkt)){ genePosi <- rownames(genePresAbsObjkt)[genePresAbsObjkt[, j] == 1] geneNega <- rownames(genePresAbsObjkt)[genePresAbsObjkt[, j] == 0] for(h in 1:ncol(metMatObjkt)){ if(remove.underrepresented == T){ if(length(genePosi) > threshold && length(geneNega) > threshold){ p_val <- wilcoxon(metMatObjkt[grepl(paste(genePosi, collapse = "|"), rownames(metMatObjkt)), h], metMatObjkt[grepl(paste(geneNega, collapse = "|"), rownames(metMatObjkt)), h])$p.value mannWhitPerGene[j, h] <- p_val z_score <- as.numeric(wilcoxon(metMatObjkt[grepl(paste(genePosi, collapse = "|"), rownames(metMatObjkt)), h], metMatObjkt[grepl(paste(geneNega, collapse = "|"), rownames(metMatObjkt)), h])$inf[2, 1]) zScore[j, h ] <- z_score } }else{ p_val <- wilcoxon(metMatObjkt[grepl(paste(genePosi, collapse = "|"), rownames(metMatObjkt)), h], metMatObjkt[grepl(paste(geneNega, collapse = "|"), rownames(metMatObjkt)), h])$p.value mannWhitPerGene[j, h] <- p_val z_score <- as.numeric(wilcoxon(metMatObjkt[grepl(paste(genePosi, collapse = "|"), rownames(metMatObjkt)), h], metMatObjkt[grepl(paste(geneNega, collapse = "|"), rownames(metMatObjkt)), h])$inf[2, 1]) zScore[j, h ] <- z_score } } if(p_adjust == T && is.null(method)){ stop( "Please select p-adjusting method") }else if(p_adjust == T && !is.null(method)){ if(anyNA(mannWhitPerGene[j, ]) == F){ mannWhitPerGene[j, ] <- p.adjust(mannWhitPerGene[j, ], method) } } setTxtProgressBar(pb, j) } rownames(mannWhitPerGene) <- colnames(genePresAbsObjkt) colnames(mannWhitPerGene) <- colnames(metMatObjkt) mannWhitPerGene <- mannWhitPerGene[!is.na(rowSums(mannWhitPerGene)), ] rownames(zScore) <- colnames(genePresAbsObjkt) colnames(zScore) <- colnames(metMatObjkt) zScore <- zScore[!is.na(rowSums(zScore)), ] return(list("p.value" = mannWhitPerGene, "z.score" = zScore)) } # Filters the p-value matrix obtained in previous step (mannWhitPerGeneObjkt) to a given alpha (default: alpha = 0.05) filtMannWhitPerGene <- function(mannWhitPerGeneObjkt, alpha = 0.05){ if(!require(plyr)) install.packages('plyr') library(plyr) mannWhitPerGeneObjkt <- mannWhitPerGeneObjkt$p.value FUN <- function(x) t(as.matrix(names(x)[(x <= alpha)])) difMetsPerGene <- apply(mannWhitPerGeneObjkt, 1, FUN) difMetsPerGeneMat <- t(rbind.fill.matrix(difMetsPerGene)) colnames(difMetsPerGeneMat) <- names(difMetsPerGene) return(list("as_list" = difMetsPerGene, "as_mat" = difMetsPerGeneMat)) } # Given the diffMetObjkt (differential metabolites between each pair of clusters) and the object generated in past # function (mannWhitPerGeneFiltObjkt) gives the names of all the genes whose presence/absence is correlated to # differences in abundance of each one of the differential metabolites between clusters (the ones that appear in # diffMetObjkt) getDiffGenePerDiffMet <- function(mannWhitPerGeneFiltObjkt, diffMetObjkt){ pb = txtProgressBar(min = 0, max = length(diffMetObjkt[!is.na(diffMetObjkt)]), initial = 0) pbAv <- 0 mannWhit <- mannWhitPerGeneFiltObjkt$as_list diffGenesPerDiffMet <- list() for(i in 1:ncol(diffMetObjkt)){ genesRelated2Met <- list() for(j in 1:length(diffMetObjkt[, i][!is.na(diffMetObjkt[, i])])){ FUN <- function(x) any(x %in% diffMetObjkt[j, i]) genes <- names(which(lapply(mannWhit, FUN) == T)) if(length(genes) > 0){ genesRelated2Met[[j]] <- genes }else{ genesRelated2Met[[j]] <- "Any relationship between the gene and the metabolite" } pbAv <- pbAv + 1 setTxtProgressBar(pb, pbAv) } names(genesRelated2Met) <- diffMetObjkt[, i][!is.na(diffMetObjkt[, i])] diffGenesPerDiffMet[[i]] <- genesRelated2Met } names(diffGenesPerDiffMet) <- colnames(diffMetObjkt) return(diffGenesPerDiffMet) } # Given the presence/absence matrix (genePresAbsObjkt), the output of the past function (DGenesPerDMetsObjkt) and the # metClustObjkt (cluster each strain belongs to), gives a matrix for each one of the differential metabolites between # each clusters. In that matrix appear the genes that appect that metabolite, indicating which strain have it and # dividing the strains according to the cluster they belong to, as well as a column of totals per strain in each matrix. getGeneMatsPerMet <- function(DGenesPerDMetsObjkt, genePresAbsObjkt, metClustObjkt){ geneMatsPerMet <- list() for(i in seq_along(DGenesPerDMetsObjkt)){ groups <- strsplit(names(DGenesPerDMetsObjkt)[i], "&")[[1]] metMats <- list() notEmpty <- DGenesPerDMetsObjkt[[i]][DGenesPerDMetsObjkt[[i]] != "Any relationship between the gene and the metabolite"] for(h in seq_along(notEmpty)){ if(i == 1){ group1 <- names(metClustObjkt[gsub("\\..*", rep = "", metClustObjkt) == groups[1]]) group2 <- names(metClustObjkt[gsub("\\..*", rep = "", metClustObjkt) == groups[2]]) }else{ group1 <- names(metClustObjkt[metClustObjkt == groups[1]]) group2 <- names(metClustObjkt[metClustObjkt == groups[2]]) } metMat1 <- as.matrix(genePresAbsObjkt[group1, notEmpty[[h]]]) metMat2 <- as.matrix(genePresAbsObjkt[group2, notEmpty[[h]]]) metMat <- rbind(metMat1, rep(NA, length(notEmpty[[h]])), metMat2) colnames(metMat) <- notEmpty[[h]] metMat <- cbind(metMat, Total = rowSums(metMat)) metMats[[h]] <- metMat } names(metMats) <- names(notEmpty) geneMatsPerMet[[i]] <- metMats } names(geneMatsPerMet) <- names(DGenesPerDMetsObjkt) return(geneMatsPerMet) } # Takes as an input the matrixes generated in past step (geneMatMetObjkt), and does a Mann Whitney test between the vectors of strains corresponding to # each pair of clusters gene by gene by one side, and between totals, by the other side. mannWhit_tabs <- function(geneMatMetObjkt){ mannWhitTabs <- list() allDiffMets <- lapply(geneMatMetObjkt, names) allDiffMets <- unique(unlist(allDiffMets)) totalMannWhit <- matrix(nrow =length(allDiffMets), ncol = length(geneMatMetObjkt), dimnames = list(allDiffMets, names(geneMatMetObjkt))) for(ii in seq_along(geneMatMetObjkt)){ allDiffGenes <- lapply(geneMatMetObjkt[[ii]], colnames) allDiffGenes <- unique(unlist(allDiffGenes)) allDiffGenes <- allDiffGenes[-which(allDiffGenes == "Total")] equator <- which(is.na(geneMatMetObjkt[[ii]][[1]][, 1])) upper <- length(geneMatMetObjkt[[ii]][[1]][, 1]) metGenes <- matrix(nrow = length(geneMatMetObjkt[[ii]]), ncol = length(allDiffGenes)) rownames(metGenes) <- names(geneMatMetObjkt[[ii]]) colnames(metGenes) <- allDiffGenes for(j in 1:length(geneMatMetObjkt[[ii]])){ for(i in 1:(ncol(geneMatMetObjkt[[ii]][[j]]) - 1)){ metGenes[names(geneMatMetObjkt[[ii]][j]), colnames(geneMatMetObjkt[[ii]][[j]])[i]] <- wilcox.test(as.numeric(geneMatMetObjkt[[ii]][[j]][1:(equator - 1), i]), as.numeric(geneMatMetObjkt[[ii]][[j]][(equator + 1):upper, i]))[[3]] } totalMannWhit[names(geneMatMetObjkt[[ii]])[j], ii] <- wilcox.test(as.numeric(geneMatMetObjkt[[ii]][[j]][1:(equator - 1), "Total"]), as.numeric(geneMatMetObjkt[[ii]][[j]][(equator + 1):upper, "Total"]))[[3]] } mannWhitTabs[[ii]] <- metGenes } names(mannWhitTabs) <- names(geneMatMetObjkt) return(list("Individual_genes" = mannWhitTabs, "Totals" = totalMannWhit)) } # Filters the result of mannWhitTabsObjkt (the p-values generated in past function) to a given alpha (default: alpha = 0.05) filtMannWhitTabs <- function(manWhitTabsObjkt, alpha = 0.05){ single <- manWhitTabsObjkt$Individual_genes totals <- manWhitTabsObjkt$Totals filteredTabs <- list() filteredTabs_Total <- list() for(ii in seq_along(single)){ diffGenesPerMet <- list() for(i in 1:nrow(single[[ii]])){ diffGenes <- c() for(j in 1:ncol(single[[ii]])){ if(!is.na(single[[ii]][i, j]) && single[[ii]][i, j] <= alpha){ diffGenes <- c(diffGenes, colnames(single[[ii]])[j]) } } if(is.null(diffGenes)){ diffGenes <- "Any correlated gene with the metabolite found" } diffGenesPerMet[[i]] <- diffGenes } names(diffGenesPerMet) <- rownames(single[[ii]]) filteredTabs[[ii]] <- diffGenesPerMet genesMet <- list() totals_sign <- names(which(totals[, ii] <= alpha)) for(i in seq_along(totals_sign)){ trueVec <- !is.na(single[[ii]][totals_sign[i], ]) genesMet[[i]] <- names(single[[ii]][totals_sign[i], ])[trueVec] } names(genesMet) <- totals_sign filteredTabs_Total[[ii]] <- genesMet } names(filteredTabs) <- names(single) names(filteredTabs_Total) <- names(single) return(list("Individual_genes" = filteredTabs, "Total" = filteredTabs_Total)) }
# step3_(group project)_Xiyucode_adjacencyMatrix_outcitations # set working directory setwd("C:/Users/Xiyu/Desktop/Xiyu's Folder/2020 Fall/Stat 992") # source file source("code/step3_(group project)_Xiyucode_adjacencyMatrix_abstract.R") # packages library(dplyr) library(readr) library(tidyverse) library(tidytext) # read in data files <- list.files(path = "data/FDR_000_180", pattern = "*.csv", full.names = T) df <- as.list(seq_len(length(files))) for (i in 1:length(files)){ data = read.csv(files[i]) df[[i]] = data } df = do.call(rbind,df) abstract_ids <- edgeList$id %>% unique() abstract_ids <- as.data.frame(abstract_ids) abstract_ids <- abstract_ids %>% left_join(identifiers, by=c("abstract_ids"="identifier")) %>% select(id) ## find out papers included in the abstract network df <- abstract_ids %>% left_join(df) # check missing data df %>% filter(outCitation=="") %>% count() # check how many only contains one or two outCitations few_outCit <- df %>% select(id,outCitation) %>% unnest_tokens(word,outCitation) %>% group_by(id) %>% count() %>% filter(n<=5) # identifier outCitations = df %>% select(outCitation) %>% unnest_tokens(word, outCitation) %>% rename(id = word) paper_ids = df %>% select(id) %>% rbind(outCitations) %>% unique() identifiers_out = paper_ids %>% mutate(identifier_out = 1:nrow(paper_ids)) # edge list edgeList_out = df %>% select(id, outCitation) %>% unnest_tokens(word, outCitation) %>% rename(outCitation = word) %>% left_join(identifiers_out, by = "id") %>% select(identifier_out, outCitation) %>% rename(id = identifier_out) edgeList_out = edgeList_out %>% left_join(identifiers_out, by=c("outCitation" = "id")) %>% select(id, identifier_out) %>% rename(outCitation = identifier_out) # adjacency matrix adjMatrix = cast_sparse(edgeList_out, id, outCitation) # save adjacency matrix as rds file saveRDS(adjMatrix, file = "data/outcitation_adjMatrix_new.rds") saveRDS(identifiers_out, file = "data/identifiers_out_new.rds")
/code/step3_(group project)_Xiyucode_adjacencyMatrix_outcitations.R
no_license
xiyuy98/stat-992-project
R
false
false
2,126
r
# step3_(group project)_Xiyucode_adjacencyMatrix_outcitations # set working directory setwd("C:/Users/Xiyu/Desktop/Xiyu's Folder/2020 Fall/Stat 992") # source file source("code/step3_(group project)_Xiyucode_adjacencyMatrix_abstract.R") # packages library(dplyr) library(readr) library(tidyverse) library(tidytext) # read in data files <- list.files(path = "data/FDR_000_180", pattern = "*.csv", full.names = T) df <- as.list(seq_len(length(files))) for (i in 1:length(files)){ data = read.csv(files[i]) df[[i]] = data } df = do.call(rbind,df) abstract_ids <- edgeList$id %>% unique() abstract_ids <- as.data.frame(abstract_ids) abstract_ids <- abstract_ids %>% left_join(identifiers, by=c("abstract_ids"="identifier")) %>% select(id) ## find out papers included in the abstract network df <- abstract_ids %>% left_join(df) # check missing data df %>% filter(outCitation=="") %>% count() # check how many only contains one or two outCitations few_outCit <- df %>% select(id,outCitation) %>% unnest_tokens(word,outCitation) %>% group_by(id) %>% count() %>% filter(n<=5) # identifier outCitations = df %>% select(outCitation) %>% unnest_tokens(word, outCitation) %>% rename(id = word) paper_ids = df %>% select(id) %>% rbind(outCitations) %>% unique() identifiers_out = paper_ids %>% mutate(identifier_out = 1:nrow(paper_ids)) # edge list edgeList_out = df %>% select(id, outCitation) %>% unnest_tokens(word, outCitation) %>% rename(outCitation = word) %>% left_join(identifiers_out, by = "id") %>% select(identifier_out, outCitation) %>% rename(id = identifier_out) edgeList_out = edgeList_out %>% left_join(identifiers_out, by=c("outCitation" = "id")) %>% select(id, identifier_out) %>% rename(outCitation = identifier_out) # adjacency matrix adjMatrix = cast_sparse(edgeList_out, id, outCitation) # save adjacency matrix as rds file saveRDS(adjMatrix, file = "data/outcitation_adjMatrix_new.rds") saveRDS(identifiers_out, file = "data/identifiers_out_new.rds")
#' Download Occurrences from the Paleobiology Database #' #' Downloads a data frame of Paleobiology Database fossil occurrences. #' #' @param Taxa a character vector #' @param StartInterval a character vector #' @param StopInterval a character vector #' #' @details Downloads a data frame of Paleobiology Database fossil occurrences matching certain taxonomic groups and age range. This is simply a convenience function for rapid data download, and only returns the most generically useful fields. Go directly to the Paleobiology Database to make more complex searches or access additional fields. This function makes use of the RCurl package. #' #' \itemize{ ##' \item{ocurrence_no:} {The Paleobiology Database occurrence number.} ##' \item{collection_no:} {The Paleobiology Database collection number.} ##' \item{reference_no:} {The Paleobiology Database reference number.} ##' \item{Classifications:} {The stated Linnean classification of the occurence from phylum through genus. See \code{cleanTaxonomy} for how to simplify these fields.} ##' \item{accepted_name:} {The highest resolution taxonomic name assigned to the occurrence.} ##' \item{Geologic Intervals:} {The earliest possible age of the occurrence and latest possible age of the occurrence, expressed in terms of geologic intervals. See \code{constrainAge} for how to simplify these fields.} ##' \item{Numeric Ages:} {The earliest possible age of the occurrence and latest possible age of the occurrence, expressed as millions of years ago.} ##' \item{Geolocation:} {Both present-day and rotated paleocoordinates of the occurrence. The geoplate id used by the rotation model is also included. The key for geoplate ids can be found in the Paleobiology Database API documentation.} #' } #' #' @return a data frame #' #' @author Andrew A. Zaffos #' #' @examples #' #' # Download a test dataset of Ypresian bivalves. #' # DataPBDB<-downloadPBDB(Taxa="Bivalvia",StartInterval="Ypresian",StopInterval="Ypresian") #' #' # Download a test dataset of Ordovician-Silurian trilobites and brachiopods. #' # DataPBDB<-downloadPBDB(c("Trilobita","Brachiopoda"),"Ordovician","Silurian") #' #' @rdname downloadPBDB #' @export # A function for downloading data from the Paleobiology database downloadPBDB<-function(Taxa,StartInterval="Pliocene",StopInterval="Pleistocene") { Taxa<-paste(Taxa,collapse=",") URL<-paste("https://paleobiodb.org/data1.2/occs/list.csv?base_name=",Taxa,"&interval=",StartInterval,",",StopInterval,"&show=coords,paleoloc,phylo&limit=all",sep="") GotURL<-RCurl::getURL(URL) File<-utils::read.csv(text=GotURL,header=TRUE) # Subset to include the most generically useful columns File<-File[,c("occurrence_no","collection_no","reference_no","phylum","class","order","family","genus","accepted_name","early_interval","late_interval","max_ma","min_ma","lng","lat","paleolng","paleolat","geoplate")] return(File) }
/velociraptr/R/downloadPBDB.R
no_license
PeterJWagner3/paleobiodb_utilities
R
false
false
2,891
r
#' Download Occurrences from the Paleobiology Database #' #' Downloads a data frame of Paleobiology Database fossil occurrences. #' #' @param Taxa a character vector #' @param StartInterval a character vector #' @param StopInterval a character vector #' #' @details Downloads a data frame of Paleobiology Database fossil occurrences matching certain taxonomic groups and age range. This is simply a convenience function for rapid data download, and only returns the most generically useful fields. Go directly to the Paleobiology Database to make more complex searches or access additional fields. This function makes use of the RCurl package. #' #' \itemize{ ##' \item{ocurrence_no:} {The Paleobiology Database occurrence number.} ##' \item{collection_no:} {The Paleobiology Database collection number.} ##' \item{reference_no:} {The Paleobiology Database reference number.} ##' \item{Classifications:} {The stated Linnean classification of the occurence from phylum through genus. See \code{cleanTaxonomy} for how to simplify these fields.} ##' \item{accepted_name:} {The highest resolution taxonomic name assigned to the occurrence.} ##' \item{Geologic Intervals:} {The earliest possible age of the occurrence and latest possible age of the occurrence, expressed in terms of geologic intervals. See \code{constrainAge} for how to simplify these fields.} ##' \item{Numeric Ages:} {The earliest possible age of the occurrence and latest possible age of the occurrence, expressed as millions of years ago.} ##' \item{Geolocation:} {Both present-day and rotated paleocoordinates of the occurrence. The geoplate id used by the rotation model is also included. The key for geoplate ids can be found in the Paleobiology Database API documentation.} #' } #' #' @return a data frame #' #' @author Andrew A. Zaffos #' #' @examples #' #' # Download a test dataset of Ypresian bivalves. #' # DataPBDB<-downloadPBDB(Taxa="Bivalvia",StartInterval="Ypresian",StopInterval="Ypresian") #' #' # Download a test dataset of Ordovician-Silurian trilobites and brachiopods. #' # DataPBDB<-downloadPBDB(c("Trilobita","Brachiopoda"),"Ordovician","Silurian") #' #' @rdname downloadPBDB #' @export # A function for downloading data from the Paleobiology database downloadPBDB<-function(Taxa,StartInterval="Pliocene",StopInterval="Pleistocene") { Taxa<-paste(Taxa,collapse=",") URL<-paste("https://paleobiodb.org/data1.2/occs/list.csv?base_name=",Taxa,"&interval=",StartInterval,",",StopInterval,"&show=coords,paleoloc,phylo&limit=all",sep="") GotURL<-RCurl::getURL(URL) File<-utils::read.csv(text=GotURL,header=TRUE) # Subset to include the most generically useful columns File<-File[,c("occurrence_no","collection_no","reference_no","phylum","class","order","family","genus","accepted_name","early_interval","late_interval","max_ma","min_ma","lng","lat","paleolng","paleolat","geoplate")] return(File) }
# Script To get endpoints of backwards particles # Run on UNSW Katana HPC library(dplyr) mydata <- readRDS("../../srv/scratch/z3374139/Combined_Backwards.rds") total_data2 <- subset(mydata, degree_days >= 500) total_data2 <- na.omit(total_data2) total_data2 <- total_data2 %>% group_by(Particle) %>% top_n(-1, degree_days) saveRDS(total_data2, file = "../../srv/scratch/z3374139/Backwards_endpoints.rds")
/2_SimulationAnalysis/Backwards_calculating_endpoints.R
no_license
jaseeverett/TailorParticleTracking
R
false
false
423
r
# Script To get endpoints of backwards particles # Run on UNSW Katana HPC library(dplyr) mydata <- readRDS("../../srv/scratch/z3374139/Combined_Backwards.rds") total_data2 <- subset(mydata, degree_days >= 500) total_data2 <- na.omit(total_data2) total_data2 <- total_data2 %>% group_by(Particle) %>% top_n(-1, degree_days) saveRDS(total_data2, file = "../../srv/scratch/z3374139/Backwards_endpoints.rds")
\name{centerx} \alias{centerx} \title{Center Matrix} \description{ Centers a matrix. } \usage{ centerx(x) } \arguments{ \item{x}{ a matrix } } \details{ Returns a centered matrix, i.e., each column of the matrix is replaced by deviations from its column mean. } \value{ The centered matrix. } \author{ John Kloke \email{kloke@biostat.wisc.edu}, Joseph McKean} \seealso{ scale } \examples{ x <- cbind(seq(1,5,length=5),seq(10,20,length=5)) xc <- centerx(x) apply(xc,1,mean) }
/man/centerx.Rd
no_license
kloke/npsm
R
false
false
478
rd
\name{centerx} \alias{centerx} \title{Center Matrix} \description{ Centers a matrix. } \usage{ centerx(x) } \arguments{ \item{x}{ a matrix } } \details{ Returns a centered matrix, i.e., each column of the matrix is replaced by deviations from its column mean. } \value{ The centered matrix. } \author{ John Kloke \email{kloke@biostat.wisc.edu}, Joseph McKean} \seealso{ scale } \examples{ x <- cbind(seq(1,5,length=5),seq(10,20,length=5)) xc <- centerx(x) apply(xc,1,mean) }
#' ks.big_deep_learning #' #' Run deep_learning in batches so we will not overload every computer on earth... #' #' ks.big_deep_learning = function(hyperparameters = expand.grid(layer1 = seq(3,11, by = 2), layer2 = c(0,seq(3,11, by = 2)), layer3 = c(0,seq(3,11, by = 2)), activation_function_layer1 = c("relu","sigmoid"), activation_function_layer2 = c("relu","sigmoid"), activation_function_layer3 = c("relu","sigmoid"), dropout_layer1 = c(0, 0.1), dropout_layer2 = c(0, 0.1), dropout_layer3 = c(0), layer1_regularizer = c(T,F), layer2_regularizer = c(T,F), layer3_regularizer = c(T,F), optimizer = c("adam","rmsprop","sgd"), autoencoder = c(0,7,-7), balanced = balanced, formula = as.character(ks.create_miRNA_formula(selected_miRNAs))[3], scaled = c(F,T), stringsAsFactors = F), nazwa_konfiguracji = "TCGA_wybraneprzezWF+normalizatory.csv", selected_miRNAs = c("hsa.miR.192.5p", "hsa.let.7g.5p", "hsa.let.7a.5p", "hsa.let.7d.5p", "hsa.miR.194.5p", "hsa.miR.98.5p", "hsa.let.7f.5p", "hsa.miR.122.5p", "hsa.miR.340.5p", "hsa.miR.26b.5p" ,"hsa.miR.17.5p", "hsa.miR.199a.3p.hsa.miR.199b.3p.1", "hsa.miR.28.3p", "hsa.miR.92a.3p" ), balanced = F, ...) { ile = nrow(hyperparameters) ile_w_batchu = 1000 ile_batchy = ceiling(nrow(hyperparameters)/ile_w_batchu) batch_start = 1 for (i in 1:ile_batchy) { batch_end = batch_start + (ile_w_batchu-1) if (batch_end > ile) { batch_end = ile } cat(paste0("\n\nProcessing batch no ", i , " of ", ile_batchy, " (", batch_start, "-", batch_end, ")")) ks.deep_learning(selected_miRNAs = selected_miRNAs, wd = getwd(), SMOTE = balanced, start = batch_start, end = batch_end, output_file = nazwa_konfiguracji, ...) batch_start = batch_end + 1 } }
/R/ks.big_deep_learning.R
no_license
mgkaszkowiak/miRNAselector
R
false
false
2,056
r
#' ks.big_deep_learning #' #' Run deep_learning in batches so we will not overload every computer on earth... #' #' ks.big_deep_learning = function(hyperparameters = expand.grid(layer1 = seq(3,11, by = 2), layer2 = c(0,seq(3,11, by = 2)), layer3 = c(0,seq(3,11, by = 2)), activation_function_layer1 = c("relu","sigmoid"), activation_function_layer2 = c("relu","sigmoid"), activation_function_layer3 = c("relu","sigmoid"), dropout_layer1 = c(0, 0.1), dropout_layer2 = c(0, 0.1), dropout_layer3 = c(0), layer1_regularizer = c(T,F), layer2_regularizer = c(T,F), layer3_regularizer = c(T,F), optimizer = c("adam","rmsprop","sgd"), autoencoder = c(0,7,-7), balanced = balanced, formula = as.character(ks.create_miRNA_formula(selected_miRNAs))[3], scaled = c(F,T), stringsAsFactors = F), nazwa_konfiguracji = "TCGA_wybraneprzezWF+normalizatory.csv", selected_miRNAs = c("hsa.miR.192.5p", "hsa.let.7g.5p", "hsa.let.7a.5p", "hsa.let.7d.5p", "hsa.miR.194.5p", "hsa.miR.98.5p", "hsa.let.7f.5p", "hsa.miR.122.5p", "hsa.miR.340.5p", "hsa.miR.26b.5p" ,"hsa.miR.17.5p", "hsa.miR.199a.3p.hsa.miR.199b.3p.1", "hsa.miR.28.3p", "hsa.miR.92a.3p" ), balanced = F, ...) { ile = nrow(hyperparameters) ile_w_batchu = 1000 ile_batchy = ceiling(nrow(hyperparameters)/ile_w_batchu) batch_start = 1 for (i in 1:ile_batchy) { batch_end = batch_start + (ile_w_batchu-1) if (batch_end > ile) { batch_end = ile } cat(paste0("\n\nProcessing batch no ", i , " of ", ile_batchy, " (", batch_start, "-", batch_end, ")")) ks.deep_learning(selected_miRNAs = selected_miRNAs, wd = getwd(), SMOTE = balanced, start = batch_start, end = batch_end, output_file = nazwa_konfiguracji, ...) batch_start = batch_end + 1 } }
## Decision tree library(rpart) install.packages('rpart.plot') library(rpart.plot) str(kyphosis) tree <- rpart(Kyphosis ~ .,method = 'class', data = kyphosis) printcp(tree) plot(tree, uniform = T, main = 'Kyphosis Tree') text(tree, use.n = T, all = T) prp(tree) ## Random Forest
/R-Course-HTML-Notes/R-for-Data-Science-and-Machine-Learning/Machine Learning with R/tree.R
no_license
rsharma11/Machine-learning
R
false
false
283
r
## Decision tree library(rpart) install.packages('rpart.plot') library(rpart.plot) str(kyphosis) tree <- rpart(Kyphosis ~ .,method = 'class', data = kyphosis) printcp(tree) plot(tree, uniform = T, main = 'Kyphosis Tree') text(tree, use.n = T, all = T) prp(tree) ## Random Forest
rm(list=ls()) # working on setting appropriate parms for recruitment models #Ricker model N=seq(0,5000, length.out = 100) r.a=20 r.b=0.001 R.r=r.a*N*exp(-r.b*N) plot(N,R.r,pch=16) # Beverton Holt model N=seq(0,5000,length.out = 100) bh.a=5000 bh.b=5e2 R.bh=bh.a*N/(bh.b+N) plot(N,R.bh,pch=16) #plotting both together plot(N,R.bh,type="l",lwd=2, ylab = "Recruits", xlab = "Spawners", ylim = c(0,max(c(R.bh,R.r)))) lines(N,R.r, lwd=2, col="grey") legend("right",legend = c("Bev-Holt", "Ricker"), lwd=2, col=c("black","grey"), bty = "n")
/stock-recruitComparison.R
no_license
chelsnieman/MultiSppModel
R
false
false
575
r
rm(list=ls()) # working on setting appropriate parms for recruitment models #Ricker model N=seq(0,5000, length.out = 100) r.a=20 r.b=0.001 R.r=r.a*N*exp(-r.b*N) plot(N,R.r,pch=16) # Beverton Holt model N=seq(0,5000,length.out = 100) bh.a=5000 bh.b=5e2 R.bh=bh.a*N/(bh.b+N) plot(N,R.bh,pch=16) #plotting both together plot(N,R.bh,type="l",lwd=2, ylab = "Recruits", xlab = "Spawners", ylim = c(0,max(c(R.bh,R.r)))) lines(N,R.r, lwd=2, col="grey") legend("right",legend = c("Bev-Holt", "Ricker"), lwd=2, col=c("black","grey"), bty = "n")
#' Find Index for Gaming Statistics #' #' A helper function to count for N x N gaming indeces. #' #' Takes in a numeric vector for which we're looking to find a N x N index. #' Returns the highest N where N >= N items deep in a descending sorted list. #' #' @param num_vect A numeric vector find_index <- function(num_vect) { # count items that have value of counter or higher counter <- 1 while (num_vect[counter] >= counter) { counter <- counter + 1 } counter - 1 }
/R/utils.R
permissive
jossives/bgStats
R
false
false
498
r
#' Find Index for Gaming Statistics #' #' A helper function to count for N x N gaming indeces. #' #' Takes in a numeric vector for which we're looking to find a N x N index. #' Returns the highest N where N >= N items deep in a descending sorted list. #' #' @param num_vect A numeric vector find_index <- function(num_vect) { # count items that have value of counter or higher counter <- 1 while (num_vect[counter] >= counter) { counter <- counter + 1 } counter - 1 }
#################################################################### #' Download and plot daily downloads of CRAN packages #' #' Download daily downloads stats from CRAN for any package, and plot. #' It can also be used as an auxiliary function to plot #' (\code{cranlogs::cran_downloads}) results. #' #' @param input Character vector with package names or data.frame product of #' \code{cranlogs::cran_downloads}. #' @param from,to Dates. Range of dates to fetch downloads metrics. #' @param type Character. Any of: "daily" or "total". #' @param plot Boolean. Create a plot? #' @examples #' \donttest{ #' cran_logs(c("lares", "dplyr"), from = "2021-05-31") #' } #' @return List with data.frame and plot if \code{plot=TRUE}. #' @export cran_logs <- function(input = "lares", from = Sys.Date() - 31, to = Sys.Date() - 1, type = "daily", plot = TRUE) { if (is.vector(input)) { check_opts(type, c("daily", "total")) base <- sprintf("https://cranlogs.r-pkg.org/downloads/%s", type) dates <- sprintf("%s:%s", as.character(from), as.character(to)) packages <- paste(unique(input), collapse = ",") url <- paste(base, dates, packages, sep = "/") scrap <- content(GET(url), encoding = "UTF-8") if (!"downloads" %in% names(scrap[[1]])) { warning("Site currently unavailable") return(invisible(url)) } cran.df <- bind_rows(scrap) if (type == "daily") { cran.df <- cran.df %>% mutate( date = unlist(lapply(.data$downloads, function(x) x[[1]])), count = unlist(lapply(.data$downloads, function(x) x[[2]])) ) %>% select(.data$date, .data$count, .data$package) %>% mutate(date = as.Date(.data$date, origin = "1970-01-01")) %>% arrange(desc(.data$date)) } } else { check_opts(colnames(input), c("date", "count", "package"), input_name = "column names") cran.df <- as_tibble(input) } package <- unique(cran.df$package) if (nrow(cran.df) >= length(package) * 2 && plot && type == "daily") { input <- cran.df %>% arrange(.data$date) %>% group_by(.data$package) %>% mutate( cum = cumsum(.data$count), package = sprintf("%s\n(%s)", .data$package, formatNum(max(.data$cum), abbr = TRUE)), MN = mean(.data$count, na.rm = TRUE) ) dMean <- input %>% group_by(.data$package) %>% summarise(MN = mean(.data$count, na.rm = TRUE)) %>% mutate( date = min(input$date), MN_label = formatNum(.data$MN, 2, abbr = TRUE) ) plot <- ggplot(input, aes(x = .data$date)) + geom_line(aes(y = .data$count), size = 1.2, colour = lares_pal("simple")[1], alpha = 0.8) + theme_lares(legend = "top", pal = 2, panel_colour = "grey95") + labs( title = glued("CRAN: {x} downloads", x = v2t(package, and = "and")), subtitle = sprintf("(%s to %s)", min(input$date), max(input$date)), x = "Download Date", y = "Dailly Downloads", color = NULL ) + facet_grid(.data$package ~ ., scales = "free") + geom_line(aes(y = .data$MN), linetype = "dashed") + geom_text( data = dMean, aes(x = .data$date, y = .data$MN, label = .data$MN_label), vjust = -0.5, size = 2.5 ) + scale_y_abbr() + expand_limits(y = 0) return(list(df = as_tibble(cran.df), plot = plot)) } return(cran.df) }
/R/cran.R
no_license
laresbernardo/lares
R
false
false
3,460
r
#################################################################### #' Download and plot daily downloads of CRAN packages #' #' Download daily downloads stats from CRAN for any package, and plot. #' It can also be used as an auxiliary function to plot #' (\code{cranlogs::cran_downloads}) results. #' #' @param input Character vector with package names or data.frame product of #' \code{cranlogs::cran_downloads}. #' @param from,to Dates. Range of dates to fetch downloads metrics. #' @param type Character. Any of: "daily" or "total". #' @param plot Boolean. Create a plot? #' @examples #' \donttest{ #' cran_logs(c("lares", "dplyr"), from = "2021-05-31") #' } #' @return List with data.frame and plot if \code{plot=TRUE}. #' @export cran_logs <- function(input = "lares", from = Sys.Date() - 31, to = Sys.Date() - 1, type = "daily", plot = TRUE) { if (is.vector(input)) { check_opts(type, c("daily", "total")) base <- sprintf("https://cranlogs.r-pkg.org/downloads/%s", type) dates <- sprintf("%s:%s", as.character(from), as.character(to)) packages <- paste(unique(input), collapse = ",") url <- paste(base, dates, packages, sep = "/") scrap <- content(GET(url), encoding = "UTF-8") if (!"downloads" %in% names(scrap[[1]])) { warning("Site currently unavailable") return(invisible(url)) } cran.df <- bind_rows(scrap) if (type == "daily") { cran.df <- cran.df %>% mutate( date = unlist(lapply(.data$downloads, function(x) x[[1]])), count = unlist(lapply(.data$downloads, function(x) x[[2]])) ) %>% select(.data$date, .data$count, .data$package) %>% mutate(date = as.Date(.data$date, origin = "1970-01-01")) %>% arrange(desc(.data$date)) } } else { check_opts(colnames(input), c("date", "count", "package"), input_name = "column names") cran.df <- as_tibble(input) } package <- unique(cran.df$package) if (nrow(cran.df) >= length(package) * 2 && plot && type == "daily") { input <- cran.df %>% arrange(.data$date) %>% group_by(.data$package) %>% mutate( cum = cumsum(.data$count), package = sprintf("%s\n(%s)", .data$package, formatNum(max(.data$cum), abbr = TRUE)), MN = mean(.data$count, na.rm = TRUE) ) dMean <- input %>% group_by(.data$package) %>% summarise(MN = mean(.data$count, na.rm = TRUE)) %>% mutate( date = min(input$date), MN_label = formatNum(.data$MN, 2, abbr = TRUE) ) plot <- ggplot(input, aes(x = .data$date)) + geom_line(aes(y = .data$count), size = 1.2, colour = lares_pal("simple")[1], alpha = 0.8) + theme_lares(legend = "top", pal = 2, panel_colour = "grey95") + labs( title = glued("CRAN: {x} downloads", x = v2t(package, and = "and")), subtitle = sprintf("(%s to %s)", min(input$date), max(input$date)), x = "Download Date", y = "Dailly Downloads", color = NULL ) + facet_grid(.data$package ~ ., scales = "free") + geom_line(aes(y = .data$MN), linetype = "dashed") + geom_text( data = dMean, aes(x = .data$date, y = .data$MN, label = .data$MN_label), vjust = -0.5, size = 2.5 ) + scale_y_abbr() + expand_limits(y = 0) return(list(df = as_tibble(cran.df), plot = plot)) } return(cran.df) }
# Statistical learning methods # Lab 1 - Introduction to R, Logistic regression, shiny, plotly # materials: https://github.com/nosarzewski/SGH_SDM_18 # mail: nosarzewski.aleks@gmail.com, an56786@sgh.waw.pl # Plan # I. About R # II. Working R # III. Objects in R # IV. Data Export/Import # V. Statistics in R # VI. Conditional instructions # VII. Loops # VIII. Functions # I. About R ------------------------------------------------------------- # Where to find information? # http://www.r-project.org/doc/bib/R-books.html ## manuals: # http://cran.r-project.org/manuals.html ## R for data analysis ## Gareth James, Daniela Witten, Trevor Hastie and Robert Tibshirani ## An Introduction to Statistical Learning with Applications in R #http://www-bcf.usc.edu/~gareth/ISL/ ## Andy Field, Discovering Statistics Using R - very straightforward and easy to understand #Polish books, websites: ## Bogumił Kamiński, Mateusz Zawisza "Receptury w R - Podręcznik dla ekonomistów" ##K. Kopczewska, T. Kopczewski, P. Wójcik, Metody ilościowe w R. Aplikacje ekonomiczne i finansowe, 2009, CeDeWu ## Eugeniusz Gatnar, Marek Walesiak "Analiza danych jakościowych i symbolicznych z wykorzystaniem programu R" ## Eugeniusz Gatnar, Marek Walesiak "Statystyczna analiza danych z wykorzystaniem programu R" ## Przemysław Biecek 'Przewodnik po pakiecie R' # http://www.biecek.pl/R/R.pdf ## Przemysław Biecek 'Na przelaj przez Data Mining z pakietem R' # http://www.biecek.pl/NaPrzelajPrzezDataMining/NaPrzelajPrzezDataMining.pdf ## Project website: # http://www.r-project.org/ ##Some MOOCs: # https://www.datacamp.com/courses/free-introduction-to-r # http://tryr.codeschool.com/ # http://swirlstats.com/ (learning R in R!) # https://www.edx.org/course/introduction-r-data-science-microsoft-dat204x-7 ## I strongly recommend attending prof. Ramsza's classes (both in Polish and English) - a bit more demanding but very valueable: # Basic R programming / Podstawy programowania w R # http://michal.ramsza.org/ # What if I did't find there help? # http://r.789695.n4.nabble.com/ # http://stackoverflow.com/ ## Selected keyboard shortcuts in RSTUDIO # CTRL+ENTER: running a instruction (R GUI - F5): active line or # TAB: hints for functions/objects # TAB after opening bracket: function arguments # F1 on function: help # CTRL+1: switching to editor # CTRL+2: switching to console # I. Working with R ----------------------------------------------------------- # How R works: # - console # - scripts # If we need documantation of function (or any other thing) # help(<function_name>) help() help(plot) # Similar to: # ?function_name - looks for functions with identical name as given string # ??"function_name" - looks for functions that have in name or in description given string ?plot ??"plot" # Setting and changing working directory getwd() setwd("D:/") #R uses Unix-type (macOS or Linux) locations, on Windows replace '\' with '/' dir() #content of working directory # We can also change it by menu and "session" tab # Environment # keeps declared variables/functions/data x <- 2 ls() # check what we have in workspace rm("x") # deleting element from workspace rm(list=ls()) # deleting ALL elements from workspace ## Additional packages # Installing # install.packages("package_name") install.packages("randomForest") # First you have to load package, before you use it # library(<package_name>) library(randomForest) require(randomForest) ## Types of variables # There are no seperate types for letters and words typeof() #logical values T == TRUE F == FALSE # Class is different from type. Type defines the way that object is kept # Class is an attribute of the object (in the terms of object programming) # Klasa to co innego ni? typ. Typ kt?ry okre?la wewn?trzny spos?b typeof(1) class(1) # Family of function checking and converting types is.integer(1.5) as.character(3) # If we do not have data NA # Not Available is.na(NA) NaN # Not a Number is.na(NaN) #check vetor with NA and "NA" # what to do with na? na.rm abc <- c(1,2,3,NA) mean(abc, na.rm = TRUE) # arrows can be used in both ways 10 -> a a # multiassignement a <- b <- c <- 20 a b c # III. Objects in R -------------------------------------------------------- # !!! In R evertying is an object ### scalars b <- 5 b # Maths # Mathematical functions # square root # absolute value # logarithm 2^3 # power sqrt(5) # square root of 5 5^0.5 5^(-1/3) abs(-99) # absolute value of -99 log(56) # natural logarithm log2(64) # binary logarithm logb(56, base = 5.6) # base of choice exp(2) # Euler number to the power of 2 (e^2) factorial(5) # 5! sin(0) ?Trig #How do we round numbers? ceiling(3.5) floor(3.5) trunc(5.99) # difference between 'trunc' and 'floor' x <- c(-3.2, -1.8, 2.3, 2.9) floor(x) trunc(x) round(6.592, digits = 2) ### Vectors # !!! Vector keeps elements of the same type # Vectors begin with 'c' x <- c(1, 6, 9) x y <- c("a", "b", "c") y z <- c(TRUE, FALSE, TRUE) # !!! Vector keeps elements of the same type f <- c("a", 1, TRUE) f # values automatically converted to strings #Sequences 1:10 x <- seq(1, 8, by = 2) x x <- seq(1, 8, length = 5) x #Vectors with replicated values rep(c(1, 3, 5), times = 3) rep(c(1, 3, 5), each = 3) rep(c(1, 3, 5), times = 3, each = 3) # Different functions useful for vectors #sample #length #rev #unique #sort #order #sum, cummulative sum #product, cumulative prod #difference x <- sample(1:10, 10) # random vector length(x) # haw many elements? rev(x) # reversing backwards unique(x) # unique values sort(x) # sorting (by default ascending) sort(x, decreasing = TRUE) order(x) # order in sorted vector x[order(x)] # we get sorted vector sum(x) # sum of vector elements prod(x) # product of vectors elements cumsum(x) # cumulative sum cumprod(x) # cumulative product diff(x) # differences between following elements #Generating random numbers set.seed(1) #setting seed of generator los <- rnorm(1000) # generates 1000 pseudo random numbers from N(0,1) hist(los,freq=0) # draws histogram lines(density(los),col="blue") # adds density function x <- seq(from=min(los),to=max(los),by=0.1) mean(los) #srednia sd(los) #odchylenie standardowe wart.f.gestosci <- dnorm(x,mean=mean(los),sd=sd(los)) # theoretical density lines(x,wart.f.gestosci,col="red") hist(x<-rnorm(100,mean=10,sd=3),freq=0) lines(density(x)) los<-runif(10^6) hist(los,xlim=c(-1,2)) # Indexing, can be done be reffering the position y <- c(3, 5, 1, -0.9, 44, 17, 9) y[5] # 5th element y[3:5] # elements from 3 to 5 y[4] <- 1000 y y[-4] # except 4th element # we can also use vectors for indexing y[c(1, 5, 6)] y[c(-2, -3, -7)] # it is not possible to mix positive and negative values y[c(-3, 1)] # or by logical values # & - and # | - or y <- rnorm(100) y > 0 y[y > 2] # elements bigger than 5 y[y < -2 | y > 2] # elements smaller than -2 OR greater than 2 y[y > 1 & y < 2] # elements greater than 1 AND lower than 2 y[y > mean(y)] x <- y # x becomes y x == y # comparing values all(x == y) # comparing whole vectors (x[6] <- 0) !(x == y) # negation all(x == y) any(x == y) ## Working on vectors # working sperately on each element # or we can use functions # mathematical operations performed on each element seperately (x <- 1:12) x + 2 x * 5 x ^ 2 # working with two vectors x <- 1:5 y <- 6:10 x + y x^y # Vector functions # many functions take numbers, as well as vectors as arguments # function os performed for each element seperately sin(x) exp(x) ## Excercise # Create vector named id that consints of numbers from your student number # Check if its type is numeric or character # Replace 3th element with 5 # Take 1st and 2nd element to the power of two # Check which elements are greater than 8 and smaller or equal to 35 # Sort decreasingly # Sum all elements, devide by 3 and round to integer value ## Matrix # Matrices keep elements of the same type x <- seq(1, 99, length = 8) A <- matrix(x, ncol = 4, nrow = 2, byrow = FALSE) A A <- matrix(x, ncol = 4, nrow = 2, byrow = TRUE) A # Indexing A[3:4] # returns elements counting form up to down and then from left to right A[2, ] # whole rows A[, 4] # whole columns A # Multiplying matrixes B <- matrix(1:16, nrow = 4) A %*% B # dimmensins have to be correct B %*% A dim(A) # output: x,y, where: x - rows, y - columns nrow(A) ncol(B) # Multiplying elements by each other A * A # Important # For matrixes and vectors we can use most mathematical functions log(B) sin(A) A * 5 A + 10 # Binding matrixes cbind(B, t(B)) rbind(A, 2 * A) # dimensions must be the same! cbind(A, B) rbind(A, B) ### Lists # Lists are a collection of vectors that can keep elements of different types (or other lists as well) myList <- list(a = 1, b = "a", c = 1:4, d = list(), 6) myList #Indexing # ATTENTION list indexing is similar to vectors but there are some substantioal differences myList[3] # returns a list of one element class(myList[3]) # just to make sure that we get a list instead of vector myList[[3]] # returns an element of list myList[[3]][1] # returns first element of third element from list #or we can reffer to name lista[["c"]] # or use '$' operator lista$c ##### DATA FRAME Dane <- data.frame(aa = B[,1], bb = B[,2], B[,3]) # Vectors have to have the same length Dane Dane$aa # attributes(Dane) head(Dane, 1) tail(Dane) str(Dane) # Most functions can be applied to data.frame sin(Dane) t(Dane) rbind(Dane, Dane) # EXCERCISE # # Create data frame "myData" with following records: # ? name # ? second_name (NA if he/she does not have) # ? age # ? sex (TRUE if woman, FALSE if man) # containg data for 3 people # order according to age # choose 1 and 3 record # add new column named occup that has value 'student' for all cases #### Factors eyeColor <- c('n', 'n', 'z', 'b', 'b', 'b', 'n', 'z', 'b', 'z') eyeColor eyeColor <- factor(eyeColor) eyeColor levels(eyeColor) levels(eyeColor) <- c("brown", "blue", "green") #### Working with text value #### # 'paste' - tekst <- c("X","Y","Z") liczba <- c(1:12) paste(tekst, liczba, sep="") paste(tekst, liczba, collapse = "") paste0(tekst, liczba) zdanie <- c("Ala", "ma", "kota") grep("Ala", zdanie) # returns indices of elements that match given pattern grepl("ma", zdanie) # as above but returns logical vector gsub("ma", "nie ma", zdanie) # replacing given string with another # IV. Data export/import --------------------------------------------------- ?read.table ?read.csv ?read.csv2 getwd() setwd("/Desktop/materiały") # in labs it is advised to use desktop as location (due to access issues) ex_data<-read.csv("quantile_health.csv",sep=",",dec=".") read.csv() read.csv2() # Useful libraries: 'foreign' (SPSS, SAS formats), XLConnect (xls, xlsx) # V. Statistics in R ------------------------------------------------- require(moments) require(MASS) #1. Basic statistics # Lets use previous data head(ex_data) # first observations and column names tail(ex_data) # last observations and column names colnames(ex_data) # column names ex_data$age # particular column/variable ex_data[,6] # same as '$' ex_data <- rbind(ex_data, rep(NA, length(colnames(ex_data)))) sum(is.na(ex_data)) # counts NAs (lacking observations) summary(ex_data) # basic statistics for each column/variable # A) Mean, median, quantiles mean(ex_data$age) mean(ex_data$age[!is.na(ex_data$age)]) mean(ex_data$age, na.rm=TRUE) ex_data <- ex_data[-nrow(ex_data),] mean(ex_data$age) mean(ex_data$age, trim=0.10) # skips 10% of highest and lowest values quantile(ex_data$age, 0.1) #10th percentile median(ex_data$age) # same as second quartile plot(ex_data$age,pch=20,type="p", ylab = "Age", xlab = "Number of observation", ylim = c(50,100)) # pch - typ punktu - 20 to wypelniona kropka xlab - opis osi x, ylab opis osi y, typ wykresu (tu: p-points) abline(h=mean(ex_data$age),col="red",lwd=2) # linia oznaczajaca wartosc srednia , abline - kreski pozioma (horizontal h) i pionowa (vertrical v) abline(h=quantile(ex_data$age,0.1),col="red",lwd=2,lty=2) # linia oznaczajaca wartosc srednia , abline - kreski pozioma (horizontal h) i pionowa (vertrical v),lty - typ lini, lwd - grubo?? lini abline(h=quantile(ex_data$age,0.9),col="red",lwd=2,lty=2) # B) Variance and standard deviation var(ex_data$age) sd(ex_data$age) # standard deviation abline(h=c(mean(ex_data$age)+3*sd(ex_data$age),mean(ex_data$age)-3*sd(ex_data$age)),lwd=2,lty="dashed",col="blue") # C) Maximum and minimum max(ex_data$age) min(ex_data$age) # D) Quantiles quantile(ex_data$age) # E) Range (rozstęp) range(ex_data$age) # F) Quantile range IQR(ex_data$age) # G) Number of elements length(ex_data$age) # H) Skewness skewness(ex_data$age) # I) Kurtosis kurtosis(ex_data$age)-3 # this function does not subtract 3 from kurtosis so we have to do this manually # K) Correlation cor(ex_data$age,ex_data$totexp) cor(ex_data$totexp,ex_data$totchr) # L) Standarization stand<-scale(ex_data$totexp) # scales to normal distribution N(0,1) sigma<-stand[stand<3 & stand>-3] # we get rid of extreme observations (differing from bean more than 3 st. deviations) length(sigma)/length(stand) # N) All-in-One summary(ex_data$age) ##Frequencies tablica <- table(ex_data$white,ex_data$female) prop.table(tablica,1) prop.table(tablica,2) margin.table(tablica,1) margin.table(tablica,2) #Excercise # Load dataset 'cars' # Check number of observations # Check tha values of variables in the last line # Calculate mean, median, standard deviation for variable 'disp'. Chcek the skewness # Draw a scatterplot for variable 'Hight". With red line indicate the mean value, change axis nammes # Conditional instructions ------------------------------------------------- (variable <- rnorm(1)) #IF statement # IF condition met THEN do something if (variable < 0){ cat("lower \n") } # IF condition met THEN do something ELSE do something else if (variable < 0){ cat("lower \n") }else{ cat("higher") } # condition must return only one value if (c(-1,0,1) > 0){ cat("higher\n") } # advanced conditions variable <- 1.2 if (variable < 0 && variable^2 > 0.5) { cat("OK\n") } else { cat("Not OK\n") } if (variable < 0 || variable^2 > 0.5){ cat("OK\n") }else{ cat("Not OK\n") } ### IFELSE - vector version of IF statement (variable <- rnorm(5)) ifelse(variable < 0, "lower", "higher") d <- ifelse(variable< 0, "lower", "higher") # it is done for each element d # values returned can also be vectors x <- 1:5 y <- -(1:5) ifelse(variable < 0, x, y) # VII. Loops --------------------------------------------------------------- ### FOR for(i in 1:5) { cat("Current value of i is:", i, "\n") } (macierz <- matrix(1:20, 5, 4)) for(i in 1:nrow(macierz)) { print(mean(macierz[i,])) } # Loops in R are SLOW, it is better to use vector functions instead when possible rowMeans(macierz) # next skips to next element immediately (omits code after itself) ## 'next' i 'break' for(i in 1:5){ if (i == 3) next cat(paste("Current value of i is:", i, "\n")) } # break ends loop for(i in 1:5){ cat(paste("Current value of i is:", i, "\n")) if (i == 3) break } # Nested loops for(i in 1:4){ for (j in 1:4){ if (j == i) break cat(paste("Current value of i is:", i, "and for j:", j, "\n")) } } #VIII. Functions myFunction <- function(argumentsNames){ instructions } # sayHello<-function(i){ cat(rep("Hello world!\n",i)) } sayHello(3) jedynka <- function(x, y){ z1 <- sin(x) z2 <- cos(y) z1^2 + z2^2 } jedynka(2,1) # by default only last value calculated is returned # we can declare what should be returned jedynka <- function(x, y){ z1 <- sin(x) z2 <- cos(y) wynik <- z1^2+z2^2 return(wynik) # return ends function so 'blablabla' won't be displayed cat("blablabla") } jedynka(2,1) #-------------------LOGISTIC REGRESSION----------------------------- rm(list=ls()) dane <- read.csv("adult.csv") head(dane) attach(dane) # number of occurances table(ABOVE50K) cut_off <- sum(ABOVE50K)/length(ABOVE50K) # create Training set input_ones <- dane[which(ABOVE50K == 1), ] # all 1's input_zeros <- dane[which(ABOVE50K == 0), ] # all 0's set.seed(100) # for repeatability of samples input_ones_training_rows <- sample(1:nrow(input_ones), 0.7*nrow(input_ones)) # 1's for training input_zeros_training_rows <- sample(1:nrow(input_zeros), 0.7*nrow(input_ones)) # 0's for training. Pick as many 0's as 1's training_ones <- input_ones[input_ones_training_rows, ] training_zeros <- input_zeros[input_zeros_training_rows, ] trainingData <- rbind(training_ones, training_zeros) # row bind the 1's and 0's # create Test set test_ones <- input_ones[-input_ones_training_rows, ] test_zeros <- input_zeros[-input_zeros_training_rows, ] testData <- rbind(test_ones, test_zeros) # row bind the 1's and 0's # build model logitMod <- glm(ABOVE50K ~ ., data=trainingData, family=binomial(link="logit")) summary(logitMod) logitMod_short <- glm(ABOVE50K ~ RELATIONSHIP + AGE + CAPITALGAIN + OCCUPATION + EDUCATIONNUM, data=trainingData, family=binomial(link="logit")) summary(logitMod_short) # predict for test set predicted_odds <- predict(logitMod_short, testData) # predicted odds (not from [0,1]) predicted_prob <- plogis(predict(logitMod_short, testData)) # predicted probabilities (from [0,1]) predicted_bin <- ifelse(predicted_prob<cut_off,0,1) # predicted classification
/Class 1/Introduction_R_after class.R
no_license
nosarzewski/SGH_SDM_18
R
false
false
17,670
r
# Statistical learning methods # Lab 1 - Introduction to R, Logistic regression, shiny, plotly # materials: https://github.com/nosarzewski/SGH_SDM_18 # mail: nosarzewski.aleks@gmail.com, an56786@sgh.waw.pl # Plan # I. About R # II. Working R # III. Objects in R # IV. Data Export/Import # V. Statistics in R # VI. Conditional instructions # VII. Loops # VIII. Functions # I. About R ------------------------------------------------------------- # Where to find information? # http://www.r-project.org/doc/bib/R-books.html ## manuals: # http://cran.r-project.org/manuals.html ## R for data analysis ## Gareth James, Daniela Witten, Trevor Hastie and Robert Tibshirani ## An Introduction to Statistical Learning with Applications in R #http://www-bcf.usc.edu/~gareth/ISL/ ## Andy Field, Discovering Statistics Using R - very straightforward and easy to understand #Polish books, websites: ## Bogumił Kamiński, Mateusz Zawisza "Receptury w R - Podręcznik dla ekonomistów" ##K. Kopczewska, T. Kopczewski, P. Wójcik, Metody ilościowe w R. Aplikacje ekonomiczne i finansowe, 2009, CeDeWu ## Eugeniusz Gatnar, Marek Walesiak "Analiza danych jakościowych i symbolicznych z wykorzystaniem programu R" ## Eugeniusz Gatnar, Marek Walesiak "Statystyczna analiza danych z wykorzystaniem programu R" ## Przemysław Biecek 'Przewodnik po pakiecie R' # http://www.biecek.pl/R/R.pdf ## Przemysław Biecek 'Na przelaj przez Data Mining z pakietem R' # http://www.biecek.pl/NaPrzelajPrzezDataMining/NaPrzelajPrzezDataMining.pdf ## Project website: # http://www.r-project.org/ ##Some MOOCs: # https://www.datacamp.com/courses/free-introduction-to-r # http://tryr.codeschool.com/ # http://swirlstats.com/ (learning R in R!) # https://www.edx.org/course/introduction-r-data-science-microsoft-dat204x-7 ## I strongly recommend attending prof. Ramsza's classes (both in Polish and English) - a bit more demanding but very valueable: # Basic R programming / Podstawy programowania w R # http://michal.ramsza.org/ # What if I did't find there help? # http://r.789695.n4.nabble.com/ # http://stackoverflow.com/ ## Selected keyboard shortcuts in RSTUDIO # CTRL+ENTER: running a instruction (R GUI - F5): active line or # TAB: hints for functions/objects # TAB after opening bracket: function arguments # F1 on function: help # CTRL+1: switching to editor # CTRL+2: switching to console # I. Working with R ----------------------------------------------------------- # How R works: # - console # - scripts # If we need documantation of function (or any other thing) # help(<function_name>) help() help(plot) # Similar to: # ?function_name - looks for functions with identical name as given string # ??"function_name" - looks for functions that have in name or in description given string ?plot ??"plot" # Setting and changing working directory getwd() setwd("D:/") #R uses Unix-type (macOS or Linux) locations, on Windows replace '\' with '/' dir() #content of working directory # We can also change it by menu and "session" tab # Environment # keeps declared variables/functions/data x <- 2 ls() # check what we have in workspace rm("x") # deleting element from workspace rm(list=ls()) # deleting ALL elements from workspace ## Additional packages # Installing # install.packages("package_name") install.packages("randomForest") # First you have to load package, before you use it # library(<package_name>) library(randomForest) require(randomForest) ## Types of variables # There are no seperate types for letters and words typeof() #logical values T == TRUE F == FALSE # Class is different from type. Type defines the way that object is kept # Class is an attribute of the object (in the terms of object programming) # Klasa to co innego ni? typ. Typ kt?ry okre?la wewn?trzny spos?b typeof(1) class(1) # Family of function checking and converting types is.integer(1.5) as.character(3) # If we do not have data NA # Not Available is.na(NA) NaN # Not a Number is.na(NaN) #check vetor with NA and "NA" # what to do with na? na.rm abc <- c(1,2,3,NA) mean(abc, na.rm = TRUE) # arrows can be used in both ways 10 -> a a # multiassignement a <- b <- c <- 20 a b c # III. Objects in R -------------------------------------------------------- # !!! In R evertying is an object ### scalars b <- 5 b # Maths # Mathematical functions # square root # absolute value # logarithm 2^3 # power sqrt(5) # square root of 5 5^0.5 5^(-1/3) abs(-99) # absolute value of -99 log(56) # natural logarithm log2(64) # binary logarithm logb(56, base = 5.6) # base of choice exp(2) # Euler number to the power of 2 (e^2) factorial(5) # 5! sin(0) ?Trig #How do we round numbers? ceiling(3.5) floor(3.5) trunc(5.99) # difference between 'trunc' and 'floor' x <- c(-3.2, -1.8, 2.3, 2.9) floor(x) trunc(x) round(6.592, digits = 2) ### Vectors # !!! Vector keeps elements of the same type # Vectors begin with 'c' x <- c(1, 6, 9) x y <- c("a", "b", "c") y z <- c(TRUE, FALSE, TRUE) # !!! Vector keeps elements of the same type f <- c("a", 1, TRUE) f # values automatically converted to strings #Sequences 1:10 x <- seq(1, 8, by = 2) x x <- seq(1, 8, length = 5) x #Vectors with replicated values rep(c(1, 3, 5), times = 3) rep(c(1, 3, 5), each = 3) rep(c(1, 3, 5), times = 3, each = 3) # Different functions useful for vectors #sample #length #rev #unique #sort #order #sum, cummulative sum #product, cumulative prod #difference x <- sample(1:10, 10) # random vector length(x) # haw many elements? rev(x) # reversing backwards unique(x) # unique values sort(x) # sorting (by default ascending) sort(x, decreasing = TRUE) order(x) # order in sorted vector x[order(x)] # we get sorted vector sum(x) # sum of vector elements prod(x) # product of vectors elements cumsum(x) # cumulative sum cumprod(x) # cumulative product diff(x) # differences between following elements #Generating random numbers set.seed(1) #setting seed of generator los <- rnorm(1000) # generates 1000 pseudo random numbers from N(0,1) hist(los,freq=0) # draws histogram lines(density(los),col="blue") # adds density function x <- seq(from=min(los),to=max(los),by=0.1) mean(los) #srednia sd(los) #odchylenie standardowe wart.f.gestosci <- dnorm(x,mean=mean(los),sd=sd(los)) # theoretical density lines(x,wart.f.gestosci,col="red") hist(x<-rnorm(100,mean=10,sd=3),freq=0) lines(density(x)) los<-runif(10^6) hist(los,xlim=c(-1,2)) # Indexing, can be done be reffering the position y <- c(3, 5, 1, -0.9, 44, 17, 9) y[5] # 5th element y[3:5] # elements from 3 to 5 y[4] <- 1000 y y[-4] # except 4th element # we can also use vectors for indexing y[c(1, 5, 6)] y[c(-2, -3, -7)] # it is not possible to mix positive and negative values y[c(-3, 1)] # or by logical values # & - and # | - or y <- rnorm(100) y > 0 y[y > 2] # elements bigger than 5 y[y < -2 | y > 2] # elements smaller than -2 OR greater than 2 y[y > 1 & y < 2] # elements greater than 1 AND lower than 2 y[y > mean(y)] x <- y # x becomes y x == y # comparing values all(x == y) # comparing whole vectors (x[6] <- 0) !(x == y) # negation all(x == y) any(x == y) ## Working on vectors # working sperately on each element # or we can use functions # mathematical operations performed on each element seperately (x <- 1:12) x + 2 x * 5 x ^ 2 # working with two vectors x <- 1:5 y <- 6:10 x + y x^y # Vector functions # many functions take numbers, as well as vectors as arguments # function os performed for each element seperately sin(x) exp(x) ## Excercise # Create vector named id that consints of numbers from your student number # Check if its type is numeric or character # Replace 3th element with 5 # Take 1st and 2nd element to the power of two # Check which elements are greater than 8 and smaller or equal to 35 # Sort decreasingly # Sum all elements, devide by 3 and round to integer value ## Matrix # Matrices keep elements of the same type x <- seq(1, 99, length = 8) A <- matrix(x, ncol = 4, nrow = 2, byrow = FALSE) A A <- matrix(x, ncol = 4, nrow = 2, byrow = TRUE) A # Indexing A[3:4] # returns elements counting form up to down and then from left to right A[2, ] # whole rows A[, 4] # whole columns A # Multiplying matrixes B <- matrix(1:16, nrow = 4) A %*% B # dimmensins have to be correct B %*% A dim(A) # output: x,y, where: x - rows, y - columns nrow(A) ncol(B) # Multiplying elements by each other A * A # Important # For matrixes and vectors we can use most mathematical functions log(B) sin(A) A * 5 A + 10 # Binding matrixes cbind(B, t(B)) rbind(A, 2 * A) # dimensions must be the same! cbind(A, B) rbind(A, B) ### Lists # Lists are a collection of vectors that can keep elements of different types (or other lists as well) myList <- list(a = 1, b = "a", c = 1:4, d = list(), 6) myList #Indexing # ATTENTION list indexing is similar to vectors but there are some substantioal differences myList[3] # returns a list of one element class(myList[3]) # just to make sure that we get a list instead of vector myList[[3]] # returns an element of list myList[[3]][1] # returns first element of third element from list #or we can reffer to name lista[["c"]] # or use '$' operator lista$c ##### DATA FRAME Dane <- data.frame(aa = B[,1], bb = B[,2], B[,3]) # Vectors have to have the same length Dane Dane$aa # attributes(Dane) head(Dane, 1) tail(Dane) str(Dane) # Most functions can be applied to data.frame sin(Dane) t(Dane) rbind(Dane, Dane) # EXCERCISE # # Create data frame "myData" with following records: # ? name # ? second_name (NA if he/she does not have) # ? age # ? sex (TRUE if woman, FALSE if man) # containg data for 3 people # order according to age # choose 1 and 3 record # add new column named occup that has value 'student' for all cases #### Factors eyeColor <- c('n', 'n', 'z', 'b', 'b', 'b', 'n', 'z', 'b', 'z') eyeColor eyeColor <- factor(eyeColor) eyeColor levels(eyeColor) levels(eyeColor) <- c("brown", "blue", "green") #### Working with text value #### # 'paste' - tekst <- c("X","Y","Z") liczba <- c(1:12) paste(tekst, liczba, sep="") paste(tekst, liczba, collapse = "") paste0(tekst, liczba) zdanie <- c("Ala", "ma", "kota") grep("Ala", zdanie) # returns indices of elements that match given pattern grepl("ma", zdanie) # as above but returns logical vector gsub("ma", "nie ma", zdanie) # replacing given string with another # IV. Data export/import --------------------------------------------------- ?read.table ?read.csv ?read.csv2 getwd() setwd("/Desktop/materiały") # in labs it is advised to use desktop as location (due to access issues) ex_data<-read.csv("quantile_health.csv",sep=",",dec=".") read.csv() read.csv2() # Useful libraries: 'foreign' (SPSS, SAS formats), XLConnect (xls, xlsx) # V. Statistics in R ------------------------------------------------- require(moments) require(MASS) #1. Basic statistics # Lets use previous data head(ex_data) # first observations and column names tail(ex_data) # last observations and column names colnames(ex_data) # column names ex_data$age # particular column/variable ex_data[,6] # same as '$' ex_data <- rbind(ex_data, rep(NA, length(colnames(ex_data)))) sum(is.na(ex_data)) # counts NAs (lacking observations) summary(ex_data) # basic statistics for each column/variable # A) Mean, median, quantiles mean(ex_data$age) mean(ex_data$age[!is.na(ex_data$age)]) mean(ex_data$age, na.rm=TRUE) ex_data <- ex_data[-nrow(ex_data),] mean(ex_data$age) mean(ex_data$age, trim=0.10) # skips 10% of highest and lowest values quantile(ex_data$age, 0.1) #10th percentile median(ex_data$age) # same as second quartile plot(ex_data$age,pch=20,type="p", ylab = "Age", xlab = "Number of observation", ylim = c(50,100)) # pch - typ punktu - 20 to wypelniona kropka xlab - opis osi x, ylab opis osi y, typ wykresu (tu: p-points) abline(h=mean(ex_data$age),col="red",lwd=2) # linia oznaczajaca wartosc srednia , abline - kreski pozioma (horizontal h) i pionowa (vertrical v) abline(h=quantile(ex_data$age,0.1),col="red",lwd=2,lty=2) # linia oznaczajaca wartosc srednia , abline - kreski pozioma (horizontal h) i pionowa (vertrical v),lty - typ lini, lwd - grubo?? lini abline(h=quantile(ex_data$age,0.9),col="red",lwd=2,lty=2) # B) Variance and standard deviation var(ex_data$age) sd(ex_data$age) # standard deviation abline(h=c(mean(ex_data$age)+3*sd(ex_data$age),mean(ex_data$age)-3*sd(ex_data$age)),lwd=2,lty="dashed",col="blue") # C) Maximum and minimum max(ex_data$age) min(ex_data$age) # D) Quantiles quantile(ex_data$age) # E) Range (rozstęp) range(ex_data$age) # F) Quantile range IQR(ex_data$age) # G) Number of elements length(ex_data$age) # H) Skewness skewness(ex_data$age) # I) Kurtosis kurtosis(ex_data$age)-3 # this function does not subtract 3 from kurtosis so we have to do this manually # K) Correlation cor(ex_data$age,ex_data$totexp) cor(ex_data$totexp,ex_data$totchr) # L) Standarization stand<-scale(ex_data$totexp) # scales to normal distribution N(0,1) sigma<-stand[stand<3 & stand>-3] # we get rid of extreme observations (differing from bean more than 3 st. deviations) length(sigma)/length(stand) # N) All-in-One summary(ex_data$age) ##Frequencies tablica <- table(ex_data$white,ex_data$female) prop.table(tablica,1) prop.table(tablica,2) margin.table(tablica,1) margin.table(tablica,2) #Excercise # Load dataset 'cars' # Check number of observations # Check tha values of variables in the last line # Calculate mean, median, standard deviation for variable 'disp'. Chcek the skewness # Draw a scatterplot for variable 'Hight". With red line indicate the mean value, change axis nammes # Conditional instructions ------------------------------------------------- (variable <- rnorm(1)) #IF statement # IF condition met THEN do something if (variable < 0){ cat("lower \n") } # IF condition met THEN do something ELSE do something else if (variable < 0){ cat("lower \n") }else{ cat("higher") } # condition must return only one value if (c(-1,0,1) > 0){ cat("higher\n") } # advanced conditions variable <- 1.2 if (variable < 0 && variable^2 > 0.5) { cat("OK\n") } else { cat("Not OK\n") } if (variable < 0 || variable^2 > 0.5){ cat("OK\n") }else{ cat("Not OK\n") } ### IFELSE - vector version of IF statement (variable <- rnorm(5)) ifelse(variable < 0, "lower", "higher") d <- ifelse(variable< 0, "lower", "higher") # it is done for each element d # values returned can also be vectors x <- 1:5 y <- -(1:5) ifelse(variable < 0, x, y) # VII. Loops --------------------------------------------------------------- ### FOR for(i in 1:5) { cat("Current value of i is:", i, "\n") } (macierz <- matrix(1:20, 5, 4)) for(i in 1:nrow(macierz)) { print(mean(macierz[i,])) } # Loops in R are SLOW, it is better to use vector functions instead when possible rowMeans(macierz) # next skips to next element immediately (omits code after itself) ## 'next' i 'break' for(i in 1:5){ if (i == 3) next cat(paste("Current value of i is:", i, "\n")) } # break ends loop for(i in 1:5){ cat(paste("Current value of i is:", i, "\n")) if (i == 3) break } # Nested loops for(i in 1:4){ for (j in 1:4){ if (j == i) break cat(paste("Current value of i is:", i, "and for j:", j, "\n")) } } #VIII. Functions myFunction <- function(argumentsNames){ instructions } # sayHello<-function(i){ cat(rep("Hello world!\n",i)) } sayHello(3) jedynka <- function(x, y){ z1 <- sin(x) z2 <- cos(y) z1^2 + z2^2 } jedynka(2,1) # by default only last value calculated is returned # we can declare what should be returned jedynka <- function(x, y){ z1 <- sin(x) z2 <- cos(y) wynik <- z1^2+z2^2 return(wynik) # return ends function so 'blablabla' won't be displayed cat("blablabla") } jedynka(2,1) #-------------------LOGISTIC REGRESSION----------------------------- rm(list=ls()) dane <- read.csv("adult.csv") head(dane) attach(dane) # number of occurances table(ABOVE50K) cut_off <- sum(ABOVE50K)/length(ABOVE50K) # create Training set input_ones <- dane[which(ABOVE50K == 1), ] # all 1's input_zeros <- dane[which(ABOVE50K == 0), ] # all 0's set.seed(100) # for repeatability of samples input_ones_training_rows <- sample(1:nrow(input_ones), 0.7*nrow(input_ones)) # 1's for training input_zeros_training_rows <- sample(1:nrow(input_zeros), 0.7*nrow(input_ones)) # 0's for training. Pick as many 0's as 1's training_ones <- input_ones[input_ones_training_rows, ] training_zeros <- input_zeros[input_zeros_training_rows, ] trainingData <- rbind(training_ones, training_zeros) # row bind the 1's and 0's # create Test set test_ones <- input_ones[-input_ones_training_rows, ] test_zeros <- input_zeros[-input_zeros_training_rows, ] testData <- rbind(test_ones, test_zeros) # row bind the 1's and 0's # build model logitMod <- glm(ABOVE50K ~ ., data=trainingData, family=binomial(link="logit")) summary(logitMod) logitMod_short <- glm(ABOVE50K ~ RELATIONSHIP + AGE + CAPITALGAIN + OCCUPATION + EDUCATIONNUM, data=trainingData, family=binomial(link="logit")) summary(logitMod_short) # predict for test set predicted_odds <- predict(logitMod_short, testData) # predicted odds (not from [0,1]) predicted_prob <- plogis(predict(logitMod_short, testData)) # predicted probabilities (from [0,1]) predicted_bin <- ifelse(predicted_prob<cut_off,0,1) # predicted classification
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PeriodSelector.R \docType{class} \name{PeriodSelector-class} \alias{PeriodSelector-class} \title{PeriodSelector} \description{ Define the PeriodSelector properties. } \section{Slots}{ \describe{ \item{\code{periods}}{\code{list}. Period object has 4 properties - period, count, label and selected. Possible period values are: "ss" - seconds, "mm" - minutes, "hh" - hours, "DD" - days, "MM" - months and "YYYY" - years. property "count" specifies how many periods this button will select. "label" will be displayed on a button and "selected" is a boolean which specifies if this button is selected when chart is initialized or not.} \item{\code{listeners}}{\code{list} containining the listeners to add to the object. The list must be named as in the official API. Each element must a character string. See examples for details.} \item{\code{otherProperties}}{\code{list}, containing other avalaible properties non coded in the package yet.} \item{\code{value}}{Object of class \code{numeric}.} }} \author{ Dataknowledge }
/man/PeriodSelector-class.Rd
no_license
lucavignali/rAmCharts
R
false
true
1,105
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PeriodSelector.R \docType{class} \name{PeriodSelector-class} \alias{PeriodSelector-class} \title{PeriodSelector} \description{ Define the PeriodSelector properties. } \section{Slots}{ \describe{ \item{\code{periods}}{\code{list}. Period object has 4 properties - period, count, label and selected. Possible period values are: "ss" - seconds, "mm" - minutes, "hh" - hours, "DD" - days, "MM" - months and "YYYY" - years. property "count" specifies how many periods this button will select. "label" will be displayed on a button and "selected" is a boolean which specifies if this button is selected when chart is initialized or not.} \item{\code{listeners}}{\code{list} containining the listeners to add to the object. The list must be named as in the official API. Each element must a character string. See examples for details.} \item{\code{otherProperties}}{\code{list}, containing other avalaible properties non coded in the package yet.} \item{\code{value}}{Object of class \code{numeric}.} }} \author{ Dataknowledge }
#:# libraries library(digest) library(mlr) library(OpenML) library(farff) #:# config set.seed(1) #:# data dataset <- getOMLDataSet(data.name = "sonar") head(dataset$data) #:# preprocessing head(dataset$data) #:# model task = makeClassifTask(id = "task", data = dataset$data, target = "Class") lrn = makeLearner("classif.PART", par.vals = list(R = FALSE, B = FALSE, M = 1, J = TRUE), predict.type = "prob") #:# hash #:# af427dbc4897bc1895327019f2385ed0 hash <- digest(list(task, lrn)) hash #:# audit cv <- makeResampleDesc("CV", iters = 5) r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1)) ACC <- r$aggr ACC #:# session info sink(paste0("sessionInfo.txt")) sessionInfo() sink()
/models/openml_sonar/classification_Class/af427dbc4897bc1895327019f2385ed0/code.R
no_license
pysiakk/CaseStudies2019S
R
false
false
714
r
#:# libraries library(digest) library(mlr) library(OpenML) library(farff) #:# config set.seed(1) #:# data dataset <- getOMLDataSet(data.name = "sonar") head(dataset$data) #:# preprocessing head(dataset$data) #:# model task = makeClassifTask(id = "task", data = dataset$data, target = "Class") lrn = makeLearner("classif.PART", par.vals = list(R = FALSE, B = FALSE, M = 1, J = TRUE), predict.type = "prob") #:# hash #:# af427dbc4897bc1895327019f2385ed0 hash <- digest(list(task, lrn)) hash #:# audit cv <- makeResampleDesc("CV", iters = 5) r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1)) ACC <- r$aggr ACC #:# session info sink(paste0("sessionInfo.txt")) sessionInfo() sink()
#' sim_dauer_biased #' #' simulate unbalanced dauer assay data using 3 groups of strains #' I, A and B. Simulation uses day-to-day and plate-to-plate variance. #' Can be used to simulate any binomial data with hierarchical clusters. #' Contains nested function gen.dauer.data which is specific for unbalanced.data. #' #' #' @param settings input list of settings for the simulation. See sim_dauer #' #' @importFrom magrittr '%>%' #' @importFrom magrittr '%<>%' #' @importFrom dplyr '%>%' #' @importFrom lmerTest 'lmer' #' #' @export #' @examples settings <- list(settings <- list( #' I = 0 #population control intercept (in logit). 0 = p(0.5) #' ,nP = 12 # number of plates in total (for group I) min = 12 #' ,nD = 6 # number of days (some will be randomly missing) min = 6 #' ,sP = 0.1 # plate to plate variance (0.3) #' ,sD = 0.5 # day to day variance (0.2) #' ,sG = 0.5 # genotype variance due to culture history (logit) (0.2) #' ,k = 60 # number animal per plate) #' ,A = 0 # population A intercept (expt - genotype2) #' ,B = 0 #pop B intercept (expt - genotype2) #' )) #' #' ### to plot one simulation #' sim_dauer_biased(settings = c(settings, do.plot = TRUE)) #' #' ### to simulate and perform model tests once - see scripts/sim_dauer_data.r for multiple simulations #' simulation <- sim_dauer_unbal(settings = c(settings, do.plot = FALSE, do.stan = TRUE)) sim_dauer_biased<-function(settings) { # get settings I = settings$I #population control intercept (in logit) nP = settings$nP # number of plates nD = settings$nD # number of days sP <- settings$sP # plate to plate sd sD = settings$sD # day to day sd sG = settings$sG # genotype sd due to culture history (logit) k = settings$k # number animal per plate) A = settings$A # population A intercept (expt) B = settings$B # pop B intercept do.plot = settings$do.plot # plot for vis inpection (no model fits) do.stan = settings$do.stan # fit stan_glmer ############### generate simulated data ############# day = (seq(1:nD)) plate = seq(1:(nP)) #correlation matrix for multivariate normal random effect rho <- cbind(c(1, .8, .8), c(.8, 1, .8), c(.8, .8, 1)) # high correlation Sigma <- sD * rho missing.days.1 <- sample(day,3) #missing days for group A = genotype2 missing.days.2 <- day[!day %in% missing.days.1] #missing days for group B = genotype3 #missing.plates.1 <- c((missing.days.1*2) - 1, missing.days.1*2) # missing plates for group A (for indexing) #missing.plates.2 <- c((missing.days.2*2) - 1, missing.days.2*2) # missing plates for group B) gen.dauer.data <- function(...) { # random effects with mean 0 and var = sP,sD or sG N RE.p.I = as.numeric(rnorm(nP, 0, sd = sP)) # random plate intercept based on sP RE.p.A = as.numeric(rnorm(nP, 0, sd = sP)) RE.p.B = as.numeric(rnorm(nP, 0, sd = sP)) RE.GP.I = as.numeric(rnorm(nD, 0, sd = sG)) RE.GP.A = as.numeric(rnorm(nD, 0, sd = sG)) RE.GP.B = as.numeric(rnorm(nD, 0, sd = sG)) RE.d.1 = as.numeric(mvrnorm(1,c(0,0,0),Sigma)) #correlated random effects for 3 days RE.d.2 = as.numeric(mvrnorm(1,c(0,0,0),Sigma)) #correlated random effects for other 3 days.1 <- cbind(day = missing.days.2,RE.d = RE.d.1) days.2 <- cbind(day = missing.days.1,RE.d = RE.d.2) days.all <- rbind(days.1,days.2) %>% data.frame # data for three groups - balanced data.I <- cbind(genotype = 1, plate = plate, mean = I, k = rpois(nP, k), RE.p = RE.p.I, day = rep(days.all$day, each = nP/nD), RE.d = rep(days.all$RE.d, each = nP/nD), RE.GP = rep(RE.GP.I, each = nP/nD), y = NA) %>% data.frame() %>% dplyr::mutate(y=rbinom(nP,k,boot::inv.logit(RE.p + RE.d + RE.GP + mean))) data.A <- cbind(genotype = 2, plate = plate, mean = A, k = rpois(nP, 60), RE.p = RE.p.A, day = rep(days.all$day, each = nP/nD), RE.d = rep(days.all$RE.d, each = nP/nD), RE.GP = rep(RE.GP.A, each = nP/nD), y = NA)%>% data.frame() %>% dplyr::filter(!day %in% missing.days.1) %>% dplyr::mutate(y=rbinom(nP-(length(missing.days.1)*2),k,boot::inv.logit(RE.p + RE.d + RE.GP + mean))) data.B <- cbind(genotype = 3, plate = plate, k = rpois(nP, 60), mean = B, RE.p = RE.p.B, day = rep(days.all$day, each = nP/nD), RE.d = rep(days.all$RE.d, each = nP/nD), RE.GP = rep(RE.GP.B, each = nP/nD), y = NA) %>% data.frame() %>% dplyr::filter(!day %in% missing.days.2) %>% dplyr::mutate(y=rbinom(nP-(length(missing.days.2)*2),k,boot::inv.logit(RE.p + RE.d + RE.GP + mean))) data <- rbind(data.I, data.A, data.B) %>% dplyr::mutate(genotype = as.factor(genotype), strainDate = interaction(genotype,day), plateID = interaction(genotype,plate), p = y/k) return(data) } data <- gen.dauer.data() ############# model functions ####################### lm.sim <- function(df) { modsum <- df %>% lm(formula = p~genotype) %>% summary() genotype2 <- as.numeric(modsum$coefficients[,4][2]) genotype3 <- as.numeric(modsum$coefficients[,4][3]) Fp <- as.numeric(1-pf(modsum$fstatistic[1],modsum$fstatistic[2],modsum$fstatistic[3])) Chisq.p = NA model <- "anova" p.val <- data.frame(cbind(model, genotype2, genotype3, Fp, Chisq.p)) return(p.val) } t.sim <- function(df) { genotype2 = data %>% dplyr::filter(genotype != "3" & !day %in% missing.days.1) %$% t.test(p~genotype)$p.value genotype3 = data %>% dplyr::filter(genotype != "2" & !day %in% missing.days.2) %$% t.test(p~genotype)$p.value model = "t" Fp = NA Chisq.p = NA p.val <- data.frame(cbind(model,genotype2, genotype3, Fp, Chisq.p)) return(p.val) } glmm.sim <- function(df) { mod = data %>% lme4::glmer(formula = cbind(y, (k-y)) ~ genotype + (1|day/strainDate/plateID), family = binomial, control=glmerControl(optimizer="bobyqa")) nullmod = data %>% lme4::glmer(formula = cbind(y, (k-y)) ~ 1 + (1|day/strainDate/plateID), family = binomial, control=glmerControl(optimizer="bobyqa")) modsum <- mod %>% summary() genotype2 <- as.numeric(modsum$coefficients[,4][2]) genotype3 <- as.numeric(modsum$coefficients[,4][3]) model <- "glmm" compmod <- anova(nullmod, mod) Fp = NA Chisq.p <- compmod$`Pr(>Chisq)`[2] p.val <- data.frame(cbind(model, genotype2, genotype3, Fp, Chisq.p)) return(p.val) } stan.sim <- function(df) { library (rstan) rstan_options (auto_write=TRUE) options (mc.cores=parallel::detectCores ()) # Run on multiple cores # run stan mod with default priors mod <- stan_glmer( cbind(y, k-y) ~ genotype + (1|day) + (1|strainDate) + (1|plateID), data=data, family = binomial(link="logit"), chains = 3, cores =4, seed = 2000, control = list(adapt_delta=0.99) ) model = "stan" # get posterior 95% cred interval, test if it contains 0 (abs(sum) != sum(abs)) mod.pp <- posterior_interval(mod, prob = 0.95, pars = c("genotype2", "genotype3")) # will give TRUE if 95% CI contains 0 genotype2 <- as.numeric(abs(mod.pp[1,1]) + abs(mod.pp[1,2]) != abs(mod.pp[1,1] + mod.pp[1,2])) genotype3 <- as.numeric(abs(mod.pp[2,1]) + abs(mod.pp[2,2]) != abs(mod.pp[2,1] + mod.pp[2,2])) Fp = NA Chisq.p = NA p.val <- data.frame(cbind(model, genotype2, genotype3, Fp, Chisq.p)) return(p.val) #return(mod) } lmm.sim <- function(df) { library(lme4) #library(lsmeans) mod <- df %>% lmer(formula = p~genotype + (1|day) + (1|strainDate)) rg <- mod %>% lsmeans::ref.grid() modsum <- rg %>% lsmeans::lsmeans("genotype") %>% lsmeans::contrast("trt.vs.ctrl", ref = 1) %>% summary(adjust = "none") genotype2 <- as.numeric(modsum$p.value[1]) genotype3 <- as.numeric(modsum$p.value[2]) Fp <- as.numeric(lmerTest::anova( lmerTest::lmer(formula = p~genotype + (1|day) + (1|strainDate), data = data), test = "F")[6][1,1]) Chisq.p = NA model <- "lmm" p.val <- data.frame(cbind(model, genotype2, genotype3, Fp, Chisq.p)) return(p.val) } # optional plot (use only for single simulation inspection) if(do.plot) { p<-data %>% ggplot(aes(x=genotype, y=p)) + geom_boxplot() + geom_point(aes(x=genotype, colour = factor(day))) return(p) } else { if(do.stan) { lm <- lm.sim(data) t <- t.sim(data) glmm <- glmm.sim(data) stan <- stan.sim(data) lmm <- lmm.sim(data) p.val <- rbind(lm, t, glmm, stan, lmm) return(p.val) } else { lm <- lm.sim(data) t <- t.sim(data) glmm <- glmm.sim(data) lmm <- lmm.sim(data) p.val <- rbind(lm, t, glmm, lmm) return(p.val) } } }
/R/sim_dauer_biased.R
no_license
mikeod38/dauergut
R
false
false
9,353
r
#' sim_dauer_biased #' #' simulate unbalanced dauer assay data using 3 groups of strains #' I, A and B. Simulation uses day-to-day and plate-to-plate variance. #' Can be used to simulate any binomial data with hierarchical clusters. #' Contains nested function gen.dauer.data which is specific for unbalanced.data. #' #' #' @param settings input list of settings for the simulation. See sim_dauer #' #' @importFrom magrittr '%>%' #' @importFrom magrittr '%<>%' #' @importFrom dplyr '%>%' #' @importFrom lmerTest 'lmer' #' #' @export #' @examples settings <- list(settings <- list( #' I = 0 #population control intercept (in logit). 0 = p(0.5) #' ,nP = 12 # number of plates in total (for group I) min = 12 #' ,nD = 6 # number of days (some will be randomly missing) min = 6 #' ,sP = 0.1 # plate to plate variance (0.3) #' ,sD = 0.5 # day to day variance (0.2) #' ,sG = 0.5 # genotype variance due to culture history (logit) (0.2) #' ,k = 60 # number animal per plate) #' ,A = 0 # population A intercept (expt - genotype2) #' ,B = 0 #pop B intercept (expt - genotype2) #' )) #' #' ### to plot one simulation #' sim_dauer_biased(settings = c(settings, do.plot = TRUE)) #' #' ### to simulate and perform model tests once - see scripts/sim_dauer_data.r for multiple simulations #' simulation <- sim_dauer_unbal(settings = c(settings, do.plot = FALSE, do.stan = TRUE)) sim_dauer_biased<-function(settings) { # get settings I = settings$I #population control intercept (in logit) nP = settings$nP # number of plates nD = settings$nD # number of days sP <- settings$sP # plate to plate sd sD = settings$sD # day to day sd sG = settings$sG # genotype sd due to culture history (logit) k = settings$k # number animal per plate) A = settings$A # population A intercept (expt) B = settings$B # pop B intercept do.plot = settings$do.plot # plot for vis inpection (no model fits) do.stan = settings$do.stan # fit stan_glmer ############### generate simulated data ############# day = (seq(1:nD)) plate = seq(1:(nP)) #correlation matrix for multivariate normal random effect rho <- cbind(c(1, .8, .8), c(.8, 1, .8), c(.8, .8, 1)) # high correlation Sigma <- sD * rho missing.days.1 <- sample(day,3) #missing days for group A = genotype2 missing.days.2 <- day[!day %in% missing.days.1] #missing days for group B = genotype3 #missing.plates.1 <- c((missing.days.1*2) - 1, missing.days.1*2) # missing plates for group A (for indexing) #missing.plates.2 <- c((missing.days.2*2) - 1, missing.days.2*2) # missing plates for group B) gen.dauer.data <- function(...) { # random effects with mean 0 and var = sP,sD or sG N RE.p.I = as.numeric(rnorm(nP, 0, sd = sP)) # random plate intercept based on sP RE.p.A = as.numeric(rnorm(nP, 0, sd = sP)) RE.p.B = as.numeric(rnorm(nP, 0, sd = sP)) RE.GP.I = as.numeric(rnorm(nD, 0, sd = sG)) RE.GP.A = as.numeric(rnorm(nD, 0, sd = sG)) RE.GP.B = as.numeric(rnorm(nD, 0, sd = sG)) RE.d.1 = as.numeric(mvrnorm(1,c(0,0,0),Sigma)) #correlated random effects for 3 days RE.d.2 = as.numeric(mvrnorm(1,c(0,0,0),Sigma)) #correlated random effects for other 3 days.1 <- cbind(day = missing.days.2,RE.d = RE.d.1) days.2 <- cbind(day = missing.days.1,RE.d = RE.d.2) days.all <- rbind(days.1,days.2) %>% data.frame # data for three groups - balanced data.I <- cbind(genotype = 1, plate = plate, mean = I, k = rpois(nP, k), RE.p = RE.p.I, day = rep(days.all$day, each = nP/nD), RE.d = rep(days.all$RE.d, each = nP/nD), RE.GP = rep(RE.GP.I, each = nP/nD), y = NA) %>% data.frame() %>% dplyr::mutate(y=rbinom(nP,k,boot::inv.logit(RE.p + RE.d + RE.GP + mean))) data.A <- cbind(genotype = 2, plate = plate, mean = A, k = rpois(nP, 60), RE.p = RE.p.A, day = rep(days.all$day, each = nP/nD), RE.d = rep(days.all$RE.d, each = nP/nD), RE.GP = rep(RE.GP.A, each = nP/nD), y = NA)%>% data.frame() %>% dplyr::filter(!day %in% missing.days.1) %>% dplyr::mutate(y=rbinom(nP-(length(missing.days.1)*2),k,boot::inv.logit(RE.p + RE.d + RE.GP + mean))) data.B <- cbind(genotype = 3, plate = plate, k = rpois(nP, 60), mean = B, RE.p = RE.p.B, day = rep(days.all$day, each = nP/nD), RE.d = rep(days.all$RE.d, each = nP/nD), RE.GP = rep(RE.GP.B, each = nP/nD), y = NA) %>% data.frame() %>% dplyr::filter(!day %in% missing.days.2) %>% dplyr::mutate(y=rbinom(nP-(length(missing.days.2)*2),k,boot::inv.logit(RE.p + RE.d + RE.GP + mean))) data <- rbind(data.I, data.A, data.B) %>% dplyr::mutate(genotype = as.factor(genotype), strainDate = interaction(genotype,day), plateID = interaction(genotype,plate), p = y/k) return(data) } data <- gen.dauer.data() ############# model functions ####################### lm.sim <- function(df) { modsum <- df %>% lm(formula = p~genotype) %>% summary() genotype2 <- as.numeric(modsum$coefficients[,4][2]) genotype3 <- as.numeric(modsum$coefficients[,4][3]) Fp <- as.numeric(1-pf(modsum$fstatistic[1],modsum$fstatistic[2],modsum$fstatistic[3])) Chisq.p = NA model <- "anova" p.val <- data.frame(cbind(model, genotype2, genotype3, Fp, Chisq.p)) return(p.val) } t.sim <- function(df) { genotype2 = data %>% dplyr::filter(genotype != "3" & !day %in% missing.days.1) %$% t.test(p~genotype)$p.value genotype3 = data %>% dplyr::filter(genotype != "2" & !day %in% missing.days.2) %$% t.test(p~genotype)$p.value model = "t" Fp = NA Chisq.p = NA p.val <- data.frame(cbind(model,genotype2, genotype3, Fp, Chisq.p)) return(p.val) } glmm.sim <- function(df) { mod = data %>% lme4::glmer(formula = cbind(y, (k-y)) ~ genotype + (1|day/strainDate/plateID), family = binomial, control=glmerControl(optimizer="bobyqa")) nullmod = data %>% lme4::glmer(formula = cbind(y, (k-y)) ~ 1 + (1|day/strainDate/plateID), family = binomial, control=glmerControl(optimizer="bobyqa")) modsum <- mod %>% summary() genotype2 <- as.numeric(modsum$coefficients[,4][2]) genotype3 <- as.numeric(modsum$coefficients[,4][3]) model <- "glmm" compmod <- anova(nullmod, mod) Fp = NA Chisq.p <- compmod$`Pr(>Chisq)`[2] p.val <- data.frame(cbind(model, genotype2, genotype3, Fp, Chisq.p)) return(p.val) } stan.sim <- function(df) { library (rstan) rstan_options (auto_write=TRUE) options (mc.cores=parallel::detectCores ()) # Run on multiple cores # run stan mod with default priors mod <- stan_glmer( cbind(y, k-y) ~ genotype + (1|day) + (1|strainDate) + (1|plateID), data=data, family = binomial(link="logit"), chains = 3, cores =4, seed = 2000, control = list(adapt_delta=0.99) ) model = "stan" # get posterior 95% cred interval, test if it contains 0 (abs(sum) != sum(abs)) mod.pp <- posterior_interval(mod, prob = 0.95, pars = c("genotype2", "genotype3")) # will give TRUE if 95% CI contains 0 genotype2 <- as.numeric(abs(mod.pp[1,1]) + abs(mod.pp[1,2]) != abs(mod.pp[1,1] + mod.pp[1,2])) genotype3 <- as.numeric(abs(mod.pp[2,1]) + abs(mod.pp[2,2]) != abs(mod.pp[2,1] + mod.pp[2,2])) Fp = NA Chisq.p = NA p.val <- data.frame(cbind(model, genotype2, genotype3, Fp, Chisq.p)) return(p.val) #return(mod) } lmm.sim <- function(df) { library(lme4) #library(lsmeans) mod <- df %>% lmer(formula = p~genotype + (1|day) + (1|strainDate)) rg <- mod %>% lsmeans::ref.grid() modsum <- rg %>% lsmeans::lsmeans("genotype") %>% lsmeans::contrast("trt.vs.ctrl", ref = 1) %>% summary(adjust = "none") genotype2 <- as.numeric(modsum$p.value[1]) genotype3 <- as.numeric(modsum$p.value[2]) Fp <- as.numeric(lmerTest::anova( lmerTest::lmer(formula = p~genotype + (1|day) + (1|strainDate), data = data), test = "F")[6][1,1]) Chisq.p = NA model <- "lmm" p.val <- data.frame(cbind(model, genotype2, genotype3, Fp, Chisq.p)) return(p.val) } # optional plot (use only for single simulation inspection) if(do.plot) { p<-data %>% ggplot(aes(x=genotype, y=p)) + geom_boxplot() + geom_point(aes(x=genotype, colour = factor(day))) return(p) } else { if(do.stan) { lm <- lm.sim(data) t <- t.sim(data) glmm <- glmm.sim(data) stan <- stan.sim(data) lmm <- lmm.sim(data) p.val <- rbind(lm, t, glmm, stan, lmm) return(p.val) } else { lm <- lm.sim(data) t <- t.sim(data) glmm <- glmm.sim(data) lmm <- lmm.sim(data) p.val <- rbind(lm, t, glmm, lmm) return(p.val) } } }
#' Title: Elastic net example #' Purpose: Build an elastic net for classification #' Author: Ted Kwartler #' email: edwardkwartler@fas.harvard.edu #' License: GPL>=3 #' Date: Nov 21, 2022 #' # Wd setwd("~/Desktop/Harvard_DataMining_Business_Student/personalFiles") # Libs library(text2vec) library(caret) library(tm) library(glmnet) library(readr) # Custom cleaning function diagnosisClean<-function(xVec){ xVec <- removePunctuation(xVec) xVec <- stripWhitespace(xVec) xVec <- tolower(xVec) return(xVec) } # Read diabetes <- read_csv('https://raw.githubusercontent.com/kwartler/Harvard_DataMining_Business_Student/master/Lessons/K_More_TM_DocClass/data/diabetes_subset_8500.csv', locale = locale(encoding = "Latin1")) # Concantenate texts in 3 columns diabetes$diagnosisText <- as.character(paste(diabetes$diag_1_desc, diabetes$diag_2_desc, diabetes$diag_3_desc, sep=' ')) # For your reference head(diabetes$diagnosisText) ### SAMPLE : Partiting idx <- createDataPartition(diabetes$readmitted,p=.7,list=F) trainDiabetesTxt <- diabetes[idx,] testDiabetesTxt <- diabetes[-idx,] ### EXPLORE head(trainDiabetesTxt$diagnosisText,2) table(trainDiabetesTxt$readmitted) ### MODIFY # trainDiabetesTxt$diagnosisText <- diagnosisClean(trainDiabetesTxt$diagnosisText) # Initial iterator to make vocabulary iterMaker <- itoken(trainDiabetesTxt$diagnosisText, preprocess_function = list(tolower), progressbar = T) textVocab <- create_vocabulary(iterMaker, stopwords=stopwords('SMART')) head(textVocab) tail(textVocab) nrow(textVocab) #prune vocab to make DTM smaller prunedtextVocab <- prune_vocabulary(textVocab, term_count_min = 10, doc_proportion_max = 0.5, doc_proportion_min = 0.001) nrow(prunedtextVocab) # Using the pruned vocabulary to declare the DTM vectors vectorizer <- vocab_vectorizer(prunedtextVocab) # Take the vocabulary lexicon and the pruned text function to make a DTM diabetesDTM <- create_dtm(iterMaker, vectorizer) dim(diabetesDTM) # Default is TF but if you want TF-IDF #idf <- get_idf(diabetesDTM) #diabetesDTM <- transform_tfidf(diabetesDTM,idf) ### MODEL(s) #train text only model textFit <- cv.glmnet(diabetesDTM, y=as.factor(trainDiabetesTxt$readmitted), alpha=0.9, family='binomial', type.measure='auc', nfolds=5, intercept=F) # Examine head(coefficients(textFit),10) # Subset to impacting terms bestTerms <- subset(as.matrix(coefficients(textFit)), as.matrix(coefficients(textFit)) !=0) head(bestTerms) nrow(bestTerms) ncol(diabetesDTM) # Make training predictions trainingPreds <- predict(textFit, diabetesDTM, type = 'class') confusionMatrix(as.factor(trainingPreds), as.factor(trainDiabetesTxt$readmitted)) # End
/Lessons/K_More_TM_DocClass/scripts/F_ElasticNetExample.R
no_license
kwartler/Harvard_DataMining_Business_Student
R
false
false
3,099
r
#' Title: Elastic net example #' Purpose: Build an elastic net for classification #' Author: Ted Kwartler #' email: edwardkwartler@fas.harvard.edu #' License: GPL>=3 #' Date: Nov 21, 2022 #' # Wd setwd("~/Desktop/Harvard_DataMining_Business_Student/personalFiles") # Libs library(text2vec) library(caret) library(tm) library(glmnet) library(readr) # Custom cleaning function diagnosisClean<-function(xVec){ xVec <- removePunctuation(xVec) xVec <- stripWhitespace(xVec) xVec <- tolower(xVec) return(xVec) } # Read diabetes <- read_csv('https://raw.githubusercontent.com/kwartler/Harvard_DataMining_Business_Student/master/Lessons/K_More_TM_DocClass/data/diabetes_subset_8500.csv', locale = locale(encoding = "Latin1")) # Concantenate texts in 3 columns diabetes$diagnosisText <- as.character(paste(diabetes$diag_1_desc, diabetes$diag_2_desc, diabetes$diag_3_desc, sep=' ')) # For your reference head(diabetes$diagnosisText) ### SAMPLE : Partiting idx <- createDataPartition(diabetes$readmitted,p=.7,list=F) trainDiabetesTxt <- diabetes[idx,] testDiabetesTxt <- diabetes[-idx,] ### EXPLORE head(trainDiabetesTxt$diagnosisText,2) table(trainDiabetesTxt$readmitted) ### MODIFY # trainDiabetesTxt$diagnosisText <- diagnosisClean(trainDiabetesTxt$diagnosisText) # Initial iterator to make vocabulary iterMaker <- itoken(trainDiabetesTxt$diagnosisText, preprocess_function = list(tolower), progressbar = T) textVocab <- create_vocabulary(iterMaker, stopwords=stopwords('SMART')) head(textVocab) tail(textVocab) nrow(textVocab) #prune vocab to make DTM smaller prunedtextVocab <- prune_vocabulary(textVocab, term_count_min = 10, doc_proportion_max = 0.5, doc_proportion_min = 0.001) nrow(prunedtextVocab) # Using the pruned vocabulary to declare the DTM vectors vectorizer <- vocab_vectorizer(prunedtextVocab) # Take the vocabulary lexicon and the pruned text function to make a DTM diabetesDTM <- create_dtm(iterMaker, vectorizer) dim(diabetesDTM) # Default is TF but if you want TF-IDF #idf <- get_idf(diabetesDTM) #diabetesDTM <- transform_tfidf(diabetesDTM,idf) ### MODEL(s) #train text only model textFit <- cv.glmnet(diabetesDTM, y=as.factor(trainDiabetesTxt$readmitted), alpha=0.9, family='binomial', type.measure='auc', nfolds=5, intercept=F) # Examine head(coefficients(textFit),10) # Subset to impacting terms bestTerms <- subset(as.matrix(coefficients(textFit)), as.matrix(coefficients(textFit)) !=0) head(bestTerms) nrow(bestTerms) ncol(diabetesDTM) # Make training predictions trainingPreds <- predict(textFit, diabetesDTM, type = 'class') confusionMatrix(as.factor(trainingPreds), as.factor(trainDiabetesTxt$readmitted)) # End
#### Purpose: Calibrate the logit-normal GAS model with a pool of loans, 2 macro variables, one macro factor and one frailty factor #Time-series tobs <- unique(DM_1C[DateQtr > 2003]$DateQtr) #Cross-section cobs <- matrix(data = 0, nrow = length(tobs), ncol = 1) for(i in 1:length(tobs)){ cobs[i] <- nrow(DM_1C[DateQtr == tobs[i]]) } #Macro path_n <- matrix(data = 0, nrow = length(tobs), ncol = 2) for(i in 1:length(tobs)){ path_n[i,] <- unique(DM_1C[DateQtr == tobs[i], get(c("HPI", "UR"))]) } #Default rate path_l <- matrix(data = 0, nrow = length(tobs), ncol = 1) for(i in 1:length(tobs)){ path_l[i] <- sum(DM_1C[DateQtr == tobs[i], Default])/cobs[i] } #Likelihood function loglikelihood <- function(par, path_l, path_n, tobs, cobs){ f1 <- par[grepl("f1",names(par))] Zc <- par[grepl("Zc",names(par))] Zm <- cbind(matrix(data = 1, nrow = ncol(path_n), ncol = 1), matrix(data = 0, nrow = ncol(path_n), ncol = 1)) Zm[2,1] <- par[grepl("Zm",names(par))] w <- par[grepl("w",names(par))] A <- abs(diag(par[grepl("A",names(par))], length(f1), length(f1))) B <- diag(par[grepl("B",names(par))], length(f1), length(f1)) ssq <- abs(par[grepl("Sig",names(par))]) Sig <- matrix(data = 0, ncol = ncol(path_n), nrow = ncol(path_n)) diag(Sig) <- ssq Siginv <- diag(1/ssq, ncol = ncol(path_n), nrow = ncol(path_n)) #Common score_ <- matrix(data = NA, nrow = length(tobs), ncol = 2) f_ <- matrix(data = NA, nrow = length(tobs)+1, ncol = 2) loglike <- matrix(data = NA, nrow = length(tobs), ncol = 1) #Logit p_ <- matrix(data = NA, nrow = length(tobs), ncol = 1) score_l_ <- matrix(data = NA, nrow = length(tobs), ncol = 2) #Normal score_n_ <- matrix(data = NA, nrow = length(tobs), ncol = 2) #Initialize GAS component f_[1,] <- f1 #compute likelihood and other elements at every t for(i in 1:length(tobs)){ #Dynamic probability for logit component p_[i] <- 1/(1 + exp(Zc%*%f_[i,])) #Score score_l_[i,] <- cobs[i]*p_[i]%*%Zc - cobs[i]*path_l[i]%*%Zc score_n_[i,] <- Zm%*%Siginv%*%(path_n[i] - Zm%*%f_[i,]) score_[i,] <- score_l_[i,] + score_n_[i,] #Log-likelihood loglike_l <- -cobs[i]*path_l[i]%*%Zc%*%f_[i,] - cobs[i]*log(1 + exp(-Zc%*%f_[i,])) loglike_n <- -0.5*ncol(path_n)*log(2*pi) - 0.5*log(det(Sig)) - 0.5 * t((path_n[i] - Zm%*%f_[i,])) %*% Siginv %*% (path_n[i] - Zm%*%f_[i,]) loglike[i] <- loglike_n + loglike_l f_[i+1,] <- w + A%*%score_[i,] + B%*%f_[i,] } #compute log-likelihood loglike <- sum(loglike) return(-loglike) } #Initialization parameters <- c(Zm1 = 0.5, Zc1 = 0.001, Zc2 = 0.001, B1 = 0.5, B2 = 0.5, A1 = 0.5, A2 = 0.5, Sig1 = 0.7, Sig2 = 0.1, f1m = 0, f1c = 0 , w1 = 0, w2 = 0) #Estimation fit <- optim(par = parameters, fn = loglikelihood, method = "BFGS" , path_l = path_l, path_n = path_n, cobs = cobs, tobs = tobs, control=list(trace = 1, REPORT=10, maxit = 300), hessian = TRUE) #Transform parameters to their restricted counter part par <- c(fit$par[1], fit$par[2], fit$par[3], fit$par[4], fit$par[5], abs(fit$par[6]), abs(fit$par[7]), abs(fit$par[8]), abs(fit$par[9]), fit$par[10], fit$par[11], fit$par[12], fit$par[13]) #Comparison of fitted default rate with observed default rate: plot(p_, type = "l") plot(path_l, type = "l") #Standard Errors hi <- solve(-fit$hessian) se <- sqrt(diag(hi)) #P-values b <- fit$par zscore <- b / se p_value <- 2*(1 - pnorm(abs(zscore))) #Unconditional mean of Factor Ef <- c(fit$par["w1"], fit$par["w2"])%*%solve((1 - diag(c(fit$par["B1"], fit$par["B2"]) , 2, 2))) plot(score_[,1], type = "l") plot(f_[,2], type = "l") #Conclusions: #1. The calibration results are extremely sensitive to the parameter constraints. #1.1 The parameter contrains on B to be within the unit interval generates a terrible p_ series. Although, the factors # and the scores are centered at their theoretical values. #1.2 By only imposing the restriction on A, the generated p_ is very similar to the observed default rate. #1.3 By not imposing restrictions nor on A or B, the results are also terrible. #1.4 By setting the constraint w = 0 on the constant of the GAS process, the generated default rates seem to improve. # However, the factors are not centred on their unconditional mean. The score is centred around zero. # The factors look more stable by using this constraint. #1.5 By imposing all three restrictions, the results are again terrible. #1.6 The contraint on the diagonal of the factor loading matrix Zm has also shown to be essential for the maximisation to # generate sensitive results.
/Mix-Measurement GAS/Empirical Analysis/Logit_2Normal_1Fn_1Fl.R
permissive
felixjff/GAS-Models
R
false
false
4,656
r
#### Purpose: Calibrate the logit-normal GAS model with a pool of loans, 2 macro variables, one macro factor and one frailty factor #Time-series tobs <- unique(DM_1C[DateQtr > 2003]$DateQtr) #Cross-section cobs <- matrix(data = 0, nrow = length(tobs), ncol = 1) for(i in 1:length(tobs)){ cobs[i] <- nrow(DM_1C[DateQtr == tobs[i]]) } #Macro path_n <- matrix(data = 0, nrow = length(tobs), ncol = 2) for(i in 1:length(tobs)){ path_n[i,] <- unique(DM_1C[DateQtr == tobs[i], get(c("HPI", "UR"))]) } #Default rate path_l <- matrix(data = 0, nrow = length(tobs), ncol = 1) for(i in 1:length(tobs)){ path_l[i] <- sum(DM_1C[DateQtr == tobs[i], Default])/cobs[i] } #Likelihood function loglikelihood <- function(par, path_l, path_n, tobs, cobs){ f1 <- par[grepl("f1",names(par))] Zc <- par[grepl("Zc",names(par))] Zm <- cbind(matrix(data = 1, nrow = ncol(path_n), ncol = 1), matrix(data = 0, nrow = ncol(path_n), ncol = 1)) Zm[2,1] <- par[grepl("Zm",names(par))] w <- par[grepl("w",names(par))] A <- abs(diag(par[grepl("A",names(par))], length(f1), length(f1))) B <- diag(par[grepl("B",names(par))], length(f1), length(f1)) ssq <- abs(par[grepl("Sig",names(par))]) Sig <- matrix(data = 0, ncol = ncol(path_n), nrow = ncol(path_n)) diag(Sig) <- ssq Siginv <- diag(1/ssq, ncol = ncol(path_n), nrow = ncol(path_n)) #Common score_ <- matrix(data = NA, nrow = length(tobs), ncol = 2) f_ <- matrix(data = NA, nrow = length(tobs)+1, ncol = 2) loglike <- matrix(data = NA, nrow = length(tobs), ncol = 1) #Logit p_ <- matrix(data = NA, nrow = length(tobs), ncol = 1) score_l_ <- matrix(data = NA, nrow = length(tobs), ncol = 2) #Normal score_n_ <- matrix(data = NA, nrow = length(tobs), ncol = 2) #Initialize GAS component f_[1,] <- f1 #compute likelihood and other elements at every t for(i in 1:length(tobs)){ #Dynamic probability for logit component p_[i] <- 1/(1 + exp(Zc%*%f_[i,])) #Score score_l_[i,] <- cobs[i]*p_[i]%*%Zc - cobs[i]*path_l[i]%*%Zc score_n_[i,] <- Zm%*%Siginv%*%(path_n[i] - Zm%*%f_[i,]) score_[i,] <- score_l_[i,] + score_n_[i,] #Log-likelihood loglike_l <- -cobs[i]*path_l[i]%*%Zc%*%f_[i,] - cobs[i]*log(1 + exp(-Zc%*%f_[i,])) loglike_n <- -0.5*ncol(path_n)*log(2*pi) - 0.5*log(det(Sig)) - 0.5 * t((path_n[i] - Zm%*%f_[i,])) %*% Siginv %*% (path_n[i] - Zm%*%f_[i,]) loglike[i] <- loglike_n + loglike_l f_[i+1,] <- w + A%*%score_[i,] + B%*%f_[i,] } #compute log-likelihood loglike <- sum(loglike) return(-loglike) } #Initialization parameters <- c(Zm1 = 0.5, Zc1 = 0.001, Zc2 = 0.001, B1 = 0.5, B2 = 0.5, A1 = 0.5, A2 = 0.5, Sig1 = 0.7, Sig2 = 0.1, f1m = 0, f1c = 0 , w1 = 0, w2 = 0) #Estimation fit <- optim(par = parameters, fn = loglikelihood, method = "BFGS" , path_l = path_l, path_n = path_n, cobs = cobs, tobs = tobs, control=list(trace = 1, REPORT=10, maxit = 300), hessian = TRUE) #Transform parameters to their restricted counter part par <- c(fit$par[1], fit$par[2], fit$par[3], fit$par[4], fit$par[5], abs(fit$par[6]), abs(fit$par[7]), abs(fit$par[8]), abs(fit$par[9]), fit$par[10], fit$par[11], fit$par[12], fit$par[13]) #Comparison of fitted default rate with observed default rate: plot(p_, type = "l") plot(path_l, type = "l") #Standard Errors hi <- solve(-fit$hessian) se <- sqrt(diag(hi)) #P-values b <- fit$par zscore <- b / se p_value <- 2*(1 - pnorm(abs(zscore))) #Unconditional mean of Factor Ef <- c(fit$par["w1"], fit$par["w2"])%*%solve((1 - diag(c(fit$par["B1"], fit$par["B2"]) , 2, 2))) plot(score_[,1], type = "l") plot(f_[,2], type = "l") #Conclusions: #1. The calibration results are extremely sensitive to the parameter constraints. #1.1 The parameter contrains on B to be within the unit interval generates a terrible p_ series. Although, the factors # and the scores are centered at their theoretical values. #1.2 By only imposing the restriction on A, the generated p_ is very similar to the observed default rate. #1.3 By not imposing restrictions nor on A or B, the results are also terrible. #1.4 By setting the constraint w = 0 on the constant of the GAS process, the generated default rates seem to improve. # However, the factors are not centred on their unconditional mean. The score is centred around zero. # The factors look more stable by using this constraint. #1.5 By imposing all three restrictions, the results are again terrible. #1.6 The contraint on the diagonal of the factor loading matrix Zm has also shown to be essential for the maximisation to # generate sensitive results.
#' Retrieve a data frame of all active surveys on Qualtrics #' #' @seealso See \url{https://api.qualtrics.com/docs/} for documentation on the #' Qualtrics API. #' @importFrom dplyr bind_rows #' @export #' @examples #' \dontrun{ #' # Register your Qualtrics credentials if you haven't already #' qualtrics_api_credentials( #' api_key = "<YOUR-API-KEY>", #' base_url = "<YOUR-BASE-URL>" #' ) #' #' # Retrieve a list of all surveys #' surveys <- all_surveys() #' #' # Retrieve a single survey #' mysurvey <- fetch_survey(surveyID = surveys$id[6]) #' #' mysurvey <- fetch_survey( #' surveyID = surveys$id[6], #' save_dir = tempdir(), #' start_date = "2018-01-01", #' end_date = "2018-01-31", #' limit = 100, #' label = TRUE, #' unanswer_recode = "UNANS", #' verbose = TRUE #' ) #' } #' all_surveys <- function() { # CHECK PARAMS AND PREP QUERY ---- # Check params assert_base_url() assert_api_key() # Function-specific API stuff surveys_url <- create_surveys_url(Sys.getenv("QUALTRICS_BASE_URL")) # SEND REQUEST TO QUALTRICS ---- # Send GET request to list all surveys resp <- qualtrics_api_request("GET", surveys_url) # Put results in list master <- list() # Append results master <- append(master, resp$result$elements) # If nextPage != null, keep calling while (!is.null(resp$result$nextPage)) { # Send GET request to list all surveys resp <- qualtrics_api_request("GET", resp$result$nextPage) # Append results master <- append(master, resp$result$elements) } # WRAP-UP AND RETURN ---- # Bind to one large data frame & return d <- bind_rows(master) return(d) }
/R/all_surveys.R
permissive
shleeneu/qualtRics
R
false
false
1,643
r
#' Retrieve a data frame of all active surveys on Qualtrics #' #' @seealso See \url{https://api.qualtrics.com/docs/} for documentation on the #' Qualtrics API. #' @importFrom dplyr bind_rows #' @export #' @examples #' \dontrun{ #' # Register your Qualtrics credentials if you haven't already #' qualtrics_api_credentials( #' api_key = "<YOUR-API-KEY>", #' base_url = "<YOUR-BASE-URL>" #' ) #' #' # Retrieve a list of all surveys #' surveys <- all_surveys() #' #' # Retrieve a single survey #' mysurvey <- fetch_survey(surveyID = surveys$id[6]) #' #' mysurvey <- fetch_survey( #' surveyID = surveys$id[6], #' save_dir = tempdir(), #' start_date = "2018-01-01", #' end_date = "2018-01-31", #' limit = 100, #' label = TRUE, #' unanswer_recode = "UNANS", #' verbose = TRUE #' ) #' } #' all_surveys <- function() { # CHECK PARAMS AND PREP QUERY ---- # Check params assert_base_url() assert_api_key() # Function-specific API stuff surveys_url <- create_surveys_url(Sys.getenv("QUALTRICS_BASE_URL")) # SEND REQUEST TO QUALTRICS ---- # Send GET request to list all surveys resp <- qualtrics_api_request("GET", surveys_url) # Put results in list master <- list() # Append results master <- append(master, resp$result$elements) # If nextPage != null, keep calling while (!is.null(resp$result$nextPage)) { # Send GET request to list all surveys resp <- qualtrics_api_request("GET", resp$result$nextPage) # Append results master <- append(master, resp$result$elements) } # WRAP-UP AND RETURN ---- # Bind to one large data frame & return d <- bind_rows(master) return(d) }
#IMPORTANT: The household_power_consumption.txt file must be in your working directory. p1 <- read.table("household_power_consumption.txt", sep=";", header=TRUE) library(datasets) p1s <- p1[as.Date(p1$Date, "%d/%m/%Y") %in% as.Date(c('2007-02-01', '2007-02-02')),] p1s$newtime <- as.POSIXct(paste(p1s$Date, p1s$Time), format="%d/%m/%Y %H:%M:%S") png("plot3.png", width=480, height=480) with (p1s, plot(as.POSIXct(newtime, '%Y/%m/%d %H:%M:%S'), as.numeric(as.character(Sub_metering_1)), type="l", xlab='', ylab='Energy sub metering')) lines(as.POSIXct(p1s$newtime, '%Y/%m/%d %H:%M:%S'), as.numeric(as.character(p1s$Sub_metering_2)), type="l", xlab='', ylab='', col="red") lines(as.POSIXct(p1s$newtime, '%Y/%m/%d %H:%M:%S'), as.numeric(as.character(p1s$Sub_metering_3)), type="l", xlab='', ylab='', col="blue") legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=c(1,1,1), col=c("black","red","blue")) dev.off()
/plot3.R
no_license
rbroderson/ExData_Plotting1
R
false
false
946
r
#IMPORTANT: The household_power_consumption.txt file must be in your working directory. p1 <- read.table("household_power_consumption.txt", sep=";", header=TRUE) library(datasets) p1s <- p1[as.Date(p1$Date, "%d/%m/%Y") %in% as.Date(c('2007-02-01', '2007-02-02')),] p1s$newtime <- as.POSIXct(paste(p1s$Date, p1s$Time), format="%d/%m/%Y %H:%M:%S") png("plot3.png", width=480, height=480) with (p1s, plot(as.POSIXct(newtime, '%Y/%m/%d %H:%M:%S'), as.numeric(as.character(Sub_metering_1)), type="l", xlab='', ylab='Energy sub metering')) lines(as.POSIXct(p1s$newtime, '%Y/%m/%d %H:%M:%S'), as.numeric(as.character(p1s$Sub_metering_2)), type="l", xlab='', ylab='', col="red") lines(as.POSIXct(p1s$newtime, '%Y/%m/%d %H:%M:%S'), as.numeric(as.character(p1s$Sub_metering_3)), type="l", xlab='', ylab='', col="blue") legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=c(1,1,1), col=c("black","red","blue")) dev.off()
#' @title Net #' #' #' @description Net model from Migrating_Pytorch #' @return model #' #' @examples #' #' \dontrun{ #' #' Net() #' #' } #' #' @export Net = function() { migrating_pytorch$Net() } #' @title Train_loader #' #' @description Data loader. Combines a dataset and a sampler, and provides an iterable over #' #' @details the given dataset. The :class:`~torch.utils.data.DataLoader` supports both map-style and #' iterable-style datasets with single- or multi-process loading, customizing #' loading order and optional automatic batching (collation) and memory pinning. #' #' @return loader #' @export train_loader = function() { invisible(migrating_pytorch$train_loader) } #' @title Test_loader #' #' @description Data loader. Combines a dataset and a sampler, and provides an iterable over #' #' @details the given dataset. The :class:`~torch.utils.data.DataLoader` supports both map-style and #' iterable-style datasets with single- or multi-process loading, customizing #' loading order and optional automatic batching (collation) and memory pinning. See :py:mod:`torch.utils.data` documentation page for more details. #' #' @return loader #' @export test_loader = function() { migrating_pytorch$test_loader } #' @title Get data loaders #' #' #' @param train_batch_size train dataset batch size #' @param val_batch_size validation dataset batch size #' @return None #' @export get_data_loaders <- function(train_batch_size, val_batch_size) { migrating_ignite$get_data_loaders( train_batch_size = as.integer(train_batch_size), val_batch_size = as.integer(val_batch_size) ) } #' @title Lit Model #' #' @return model #' @export LitModel = function() { migrating_lightning$LitModel() } #' @title Loaders #' @description a loader from Catalyst #' @return None #' #' @examples #' #' \dontrun{ #' #' # trigger download #' loaders() #' #' } #' #' @export loaders = function() { catalyst$loaders() } #' @title Catalyst model #' #' @return model #' @export catalyst_model = function() { catalyst$model }
/R/custom_modules.R
permissive
han-tun/fastai
R
false
false
2,058
r
#' @title Net #' #' #' @description Net model from Migrating_Pytorch #' @return model #' #' @examples #' #' \dontrun{ #' #' Net() #' #' } #' #' @export Net = function() { migrating_pytorch$Net() } #' @title Train_loader #' #' @description Data loader. Combines a dataset and a sampler, and provides an iterable over #' #' @details the given dataset. The :class:`~torch.utils.data.DataLoader` supports both map-style and #' iterable-style datasets with single- or multi-process loading, customizing #' loading order and optional automatic batching (collation) and memory pinning. #' #' @return loader #' @export train_loader = function() { invisible(migrating_pytorch$train_loader) } #' @title Test_loader #' #' @description Data loader. Combines a dataset and a sampler, and provides an iterable over #' #' @details the given dataset. The :class:`~torch.utils.data.DataLoader` supports both map-style and #' iterable-style datasets with single- or multi-process loading, customizing #' loading order and optional automatic batching (collation) and memory pinning. See :py:mod:`torch.utils.data` documentation page for more details. #' #' @return loader #' @export test_loader = function() { migrating_pytorch$test_loader } #' @title Get data loaders #' #' #' @param train_batch_size train dataset batch size #' @param val_batch_size validation dataset batch size #' @return None #' @export get_data_loaders <- function(train_batch_size, val_batch_size) { migrating_ignite$get_data_loaders( train_batch_size = as.integer(train_batch_size), val_batch_size = as.integer(val_batch_size) ) } #' @title Lit Model #' #' @return model #' @export LitModel = function() { migrating_lightning$LitModel() } #' @title Loaders #' @description a loader from Catalyst #' @return None #' #' @examples #' #' \dontrun{ #' #' # trigger download #' loaders() #' #' } #' #' @export loaders = function() { catalyst$loaders() } #' @title Catalyst model #' #' @return model #' @export catalyst_model = function() { catalyst$model }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gimme-data.R \docType{data} \name{ts2} \alias{ts2} \title{Example heterogeneous data, 2} \format{A data frame with 50 observations on 3 variables.} \usage{ ts2 } \description{ This data contains simulated time-series data for a single individual with 50 time points and 3 variables, or regions of interest. } \keyword{datasets}
/gimme/man/ts2.Rd
no_license
stlane/gimme
R
false
true
407
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gimme-data.R \docType{data} \name{ts2} \alias{ts2} \title{Example heterogeneous data, 2} \format{A data frame with 50 observations on 3 variables.} \usage{ ts2 } \description{ This data contains simulated time-series data for a single individual with 50 time points and 3 variables, or regions of interest. } \keyword{datasets}
require(BayesTree) setwd("~/Documents/Research/Genomics") source("bart_fns.R") source("bart_fns2.R") ##Set-up set.seed(20) ##TF settings tf.size2=c(10,20,30,40) ##script is set to work with this right now ##Observation settings n50=50 n300=300 mean.tf=0 #1.4862e-05 ##generate the X matrix which will be fixed- sl sd.tf=1 #.4778817-not using just to keep larger numbers at play ##Generate Design Matrices tf.exp.300=sapply(rep(n300,max(tf.size2)),rnorm,mean=mean.tf,sd=sd.tf) #gives full matrix tf.exp.50=tf.exp.300[1:n50,] tf.exp.100=tf.exp.300[1:n100,] ##Beta settings tf.beta.1=c(1,rep(0,times=max(tf.size2-1))) #tf.beta.2=c(2,rep(0,times=max(tf.size2-1))) not really using here ##Function params n.tree.vec=c(10,20) factor.vec2=c(1,2,3,4,5) ##work with this right now burn=2000 post=5000 n.genes=100 ##generate list of genes gene.list2=get_gene_data_list(n.obs=50,tf.exp.50,factor.vec2,tf.beta.1,reps=n.genes) ##Run Algos bart.sig.50.prior=bart_sig_prior(tf.exp.50,gene.list2,n.obs=50,reps=n.genes,factor.vec2,n.tree.vec,tf.vec=40, sigest=NA,burn_size=burn,post_size=post, sig_df=3,sig_quant=.9) ls.sig.est=numeric(length(bart.sig.50.prior)) for(i in 1:length(bart.sig.50.prior)){ ls.sig.est[i]=bart.sig.50.prior[[i]][[3]] } prior.sig.vec=numeric(length(bart.sig.50.prior)) for(i in 1:length(bart.sig.50.prior)){ prior.sig.vec[i]=bart.sig.50.prior[[i]][[2]][4] } prior.sig.vec ls.sig.est ##how many times in total number of reps was right TF selected prop.true.prior=numeric(length(bart.sig.50.prior)) for(i in 1:length(bart.sig.50.prior)){ prop.true.prior[i]=bart.sig.50.prior[[i]][[4]] } prop.true.prior ##Avg. variable usage proportion prop.selected.prior=numeric(length(bart.sig.50.prior)) for(i in 1:length(bart.sig.50.prior)){ prop.selected.prior[i]=bart.sig.50.prior[[i]][[5]] } prop.selected.prior ##use large df and center at median bart.sig.50.post=bart_sig_post(tf.exp.50,gene.list2,n50,reps=n.genes,factor.vec2,n.tree.vec,tf.vec=40, sigest=prior.sig.vec,burn_size=burn,post_size=post, sig_df=200,sig_quant=.5) post.sig.vec=numeric(length(bart.sig.50.post)) for(i in 1:length(bart.sig.50.post)){ post.sig.vec[i]=bart.sig.50.post[[i]][[2]][4] } post.sig.vec prop.true.post=numeric(length(bart.sig.50.post)) for(i in 1:length(bart.sig.50.post)){ prop.true.post[i]=bart.sig.50.post[[i]][[4]] } prop.true.post prop.selected.post=numeric(length(bart.sig.50.post)) for(i in 1:length(bart.sig.50.post)){ prop.selected.post[i]=bart.sig.50.post[[i]][[5]] } prop.selected.post ##Default settings for df and quantile, just using estimated sigma bart.sig.50.post.def=bart_sig_post(tf.exp.50,gene.list2,n50,reps=n.genes,factor.vec2,n.tree.vec,tf.vec=40, sigest=prior.sig.vec,burn_size=burn,post_size=post, sig_df=3,sig_quant=.9) post.sig.vec.def=numeric(length(bart.sig.50.post.def)) for(i in 1:length(bart.sig.50.post.def)){ post.sig.vec.def[i]=bart.sig.50.post.def[[i]][[2]][4] } post.sig.vec.def prop.true.post.def=numeric(length(bart.sig.50.post.def)) for(i in 1:length(bart.sig.50.post.def)){ prop.true.post.def[i]=bart.sig.50.post.def[[i]][[4]] } prop.true.post.def prop.selected.post.def=numeric(length(bart.sig.50.post.def)) for(i in 1:length(bart.sig.50.post.def)){ prop.selected.post.def[i]=bart.sig.50.post.def[[i]][[5]] } prop.selected.post.def ##Bind together to see desired outcomes ##Generate column and row names cnames=c("Prior_Prop. ","Post_Prop._Fixed ", "Post_Prop._Default") ##rownames rnames=character(length(n.tree.vec)*length(factor.vec2)) count=1 for(i in 1:length(n.tree.vec)){ for(j in 1:length(factor.vec2)){ rnames[count]=paste(n.tree.vec[i]," Trees ", factor.vec2[j], "x N/S", sep="") count=count+1 } } ##Proportion of n=rep simulations where true TF was selected propmat=cbind(prop.true.prior,prop.true.post,prop.true.post.def) colnames(propmat)=cnames rownames(propmat)=rnames propmat ##Avg. selection proportion for True TF across simulations selmat=cbind(prop.selected.prior,prop.selected.post,prop.selected.post.def) colnames(selmat)=cnames rownames(selmat)=rnames round(selmat,3) ##Compare 3 variances sig.mat=round(cbind(ls.sig.est,prior.sig.vec,post.sig.vec,post.sig.vec.def),2) colnames(sig.mat)=c("LS_Est. ","Prior_Est. ","Post_Est._Fixed ","Post_Est._Default") rownames(sig.mat)=rnames sig.mat par(mfrow=c(1,2)) plot_sig_full(bart.sig.50.post.def[[2]][[1]],n_burn=1500,subtitle="Post_Default") plot_sig_full(bart.sig.50.post[[2]][[1]],n_burn=1500,subtitle="Posterior_Fixed") mean(bart.sig.50.prior[[1]][[1]]$sigma) bart.sig.50.prior[[2]][[1]]$sigest mean(bart.sig.50.prior[[2]][[1]]$sigma) bart.sig.50.post[[2]][[1]]$sigma[1:5] plot(bart.sig.50.prior[[2]][[1]]) bart.sig.50.prior[[2]][[1]]$first.sigma bart.sig.50.prior[[1]][[2]][1] bart_sig_summary(bart.sig.50.prior) bart_sig_summary(bart.sig.50.post) save(bart.sig.50.prior,file="prior50.R") save(bart.sig.50.post,file="post50.R") load("prior50.R") load("post50.R") ##Functions ##1. ##Creates y-vectors- first index in list corresponds to noise factor right now. ##Can change to additional element to serve as legend or hashmap it. get_gene_data_list=function(n.obs,tf.exp,factor.vec, tf.beta,reps){ signal.temp=sum(abs(tf.exp%*%tf.beta))/n.obs out=list() for(i in 1:length(factor.vec)){ out[[i]]=list() sigma=signal.temp*factor.vec[i] for(j in 1:reps){ out[[i]][[j]]=as.numeric(tf.exp%*%tf.beta+rnorm(n.obs,mean=0,sd=sigma)) } } return(out) } ##2. bart_sig_prior=function(tf.exp,gene_list,n.obs,reps,factor.vec,tree.vec, tf.vec,sigest=NA,burn_size,post_size,sig_df=3,sig_quant=.9,thin.size=2){ ##Second item in the list has the following order: #(tree size, number of tfs, noise factor, posterior sigma estimate) big.list=list() bart.sig=sigest count=1 for(i in 1:length(tree.vec)){ for(j in 1:length(factor.vec)){ for(k in 1:length(tf.vec)){ sig.holder=numeric(reps) prior.sig=numeric(reps) true.tf=numeric(reps) sel.prop=numeric(reps) ##vector for proportion of time selected ##general loop-creates sigma, then response, then runs BART for(rep in 1:reps){ gene.exp=gene_list[[j]][[rep]] #print(gene.exp[1:4]) print(var(gene.exp)) train.exp=tf.exp[1:n.obs,1:tf.vec[k]] bart.mod = bart(x.train=train.exp, y.train=gene.exp, ntree=n.tree.vec[k], sigest=bart.sig, sigdf=sig_df, sigquant=sig_quant, nskip=burn_size, ndpost=post_size, keepevery=thin.size, verbose=F) ##Suppress printing print(rep) sig.holder[rep]=mean(bart.mod$sigma) ##hold the mean of the posterior-estimate of sigma prior.sig[rep]=bart.mod$sigest ##hold the priors for comparison ##add component to check how many times it was correct sums=sum_calc(bart.mod) true.tf[rep]=which.max(sums)==1 sel.prop[rep]=sums[1]/sum(sums) } ##printing print(mean(sig.holder)) print(bart.mod$sigest) print(true.tf) big.list[[count]]=list() big.list[[count]][[1]]=bart.mod big.list[[count]][[2]]=c(tree.vec[i],tf.vec[k],factor.vec[j],mean(sig.holder)) big.list[[count]][[3]]=mean(prior.sig) big.list[[count]][[4]]=mean(true.tf) big.list[[count]][[5]]=mean(sel.prop) print(count) count=count+1 } } } return(big.list) } ##3. bart_sig_post=function(tf.exp,gene_list,n.obs,reps,factor.vec,tree.vec, tf.vec,sigest.vec,burn_size,post_size,sig_df=3,sig_quant=.90,thin.size=2){ ##Second item in the list has the following order: #(tree size, number of tfs, noise factor, posterior sigma estimate) ##Need to make more robust to handle different orderings-right now just copies above big.list=list() count=1 for(i in 1:length(tree.vec)){ for(j in 1:length(factor.vec)){ for(k in 1:length(tf.vec)){ sig.holder=numeric(reps) ##posterior var vector true.tf=numeric(reps) ##vector for correct proportion sel.prop=numeric(reps) ##vector for proportion of time selected for(rep in 1:reps){ gene.exp=gene_list[[j]][[rep]] #print(gene.exp[1:4]) print(var(gene.exp)) train.exp=tf.exp[1:n.obs,1:tf.vec[k]] bart.mod = bart(x.train=train.exp, y.train=gene.exp, ntree=n.tree.vec[k], sigest=sigest.vec[count], sigdf=sig_df, sigquant=sig_quant, nskip=burn_size, ndpost=post_size, keepevery=thin.size, verbose=F) ##Suppress printing print(rep) sig.holder[rep]=mean(bart.mod$sigma) sums=sum_calc(bart.mod) true.tf[rep]=which.max(sums)==1 sel.prop[rep]=sums[1]/sum(sums) } ##printing to check print(mean(sig.holder)) print(bart.mod$sigest) print(true.tf) big.list[[count]]=list() big.list[[count]][[1]]=bart.mod big.list[[count]][[2]]=c(tree.vec[i],tf.vec[k],factor.vec[j],mean(sig.holder)) big.list[[count]][[3]]=NA big.list[[count]][[4]]=mean(true.tf) big.list[[count]][[5]]=mean(sel.prop) print(count) count=count+1 } } } return(big.list) } ##4. plot_sig_full=function(bart.list,n_burn,subtitle){ sig=c(bart.list$first.sigma,bart.list$sigma) burn.idx=1:n_burn burn=sig[burn.idx] post=sig[-burn.idx] plot(burn,main=paste("Convergence Plot for Sigma\n",subtitle), xlab="Sample Number", ylab="Sigma", xlim=c(0,length(sig)), ylim=c(min(sig),max(sig)), col="red" ) points((length(burn.idx)+1):length(sig),post) } ##5. bart_sig_summary=function(bart.mod){ output=data.frame() count=1 for(i in 1:length(bart.mod)){ output[count,1]=bart.mod[[i]][[2]][1] ##Trees output[count,2]=bart.mod[[i]][[2]][2] ##TF output[count,3]=bart.mod[[i]][[2]][3] ##Noise sums=sum_calc(bart.mod[[i]][[1]]) props=prop_calc(bart.mod[[i]][[1]]) sorted.sums=sort(sums,decreasing=T) sorted.props=sort(props,decreasing=T) output[count,4]=which.max(sums)==1 ##Does our appear most? output[count,5]=sorted.props[1] ##Top three TFs output[count,6]=which(props==sorted.props[1])[1] ##How to deal with ties? output[count,7]=sorted.props[2] output[count,8]=which(props==sorted.props[2])[1] output[count,9]=sorted.props[3] output[count,10]=which(props==sorted.props[3])[1] output[count,11]=sums[1] output[count,12]=sum(sums) ##Total splits output[count,13]=mean(bart.mod[[i]][[1]]$sigma) output[count,14]=props[1] print(count) count=count+1 } colnames(output)=c("Num_Trees","Num_TFs","Noise/Signal","True_Most_Common?","1st","Name-1st", "2nd","Name-2nd","3rd","Name-3rd","True_TF_Sum","Tot_Splits","Sigma_Estimate","True_TF_Prop") return(output) } ##simulations with chisquare library(geoR) d=150 x=seq(.5,3,.01) dens=dinvchisq(x,df=d,scale=1) plot(x,dens) n=rinvchisq(10000,d,scale=1) mean(n) quantile(n,c(.25,.5,.75)) plot(n) var(n)
/Code_Objects/bart_sig_est_v2.R
no_license
jbleich89/bart_gene
R
false
false
11,746
r
require(BayesTree) setwd("~/Documents/Research/Genomics") source("bart_fns.R") source("bart_fns2.R") ##Set-up set.seed(20) ##TF settings tf.size2=c(10,20,30,40) ##script is set to work with this right now ##Observation settings n50=50 n300=300 mean.tf=0 #1.4862e-05 ##generate the X matrix which will be fixed- sl sd.tf=1 #.4778817-not using just to keep larger numbers at play ##Generate Design Matrices tf.exp.300=sapply(rep(n300,max(tf.size2)),rnorm,mean=mean.tf,sd=sd.tf) #gives full matrix tf.exp.50=tf.exp.300[1:n50,] tf.exp.100=tf.exp.300[1:n100,] ##Beta settings tf.beta.1=c(1,rep(0,times=max(tf.size2-1))) #tf.beta.2=c(2,rep(0,times=max(tf.size2-1))) not really using here ##Function params n.tree.vec=c(10,20) factor.vec2=c(1,2,3,4,5) ##work with this right now burn=2000 post=5000 n.genes=100 ##generate list of genes gene.list2=get_gene_data_list(n.obs=50,tf.exp.50,factor.vec2,tf.beta.1,reps=n.genes) ##Run Algos bart.sig.50.prior=bart_sig_prior(tf.exp.50,gene.list2,n.obs=50,reps=n.genes,factor.vec2,n.tree.vec,tf.vec=40, sigest=NA,burn_size=burn,post_size=post, sig_df=3,sig_quant=.9) ls.sig.est=numeric(length(bart.sig.50.prior)) for(i in 1:length(bart.sig.50.prior)){ ls.sig.est[i]=bart.sig.50.prior[[i]][[3]] } prior.sig.vec=numeric(length(bart.sig.50.prior)) for(i in 1:length(bart.sig.50.prior)){ prior.sig.vec[i]=bart.sig.50.prior[[i]][[2]][4] } prior.sig.vec ls.sig.est ##how many times in total number of reps was right TF selected prop.true.prior=numeric(length(bart.sig.50.prior)) for(i in 1:length(bart.sig.50.prior)){ prop.true.prior[i]=bart.sig.50.prior[[i]][[4]] } prop.true.prior ##Avg. variable usage proportion prop.selected.prior=numeric(length(bart.sig.50.prior)) for(i in 1:length(bart.sig.50.prior)){ prop.selected.prior[i]=bart.sig.50.prior[[i]][[5]] } prop.selected.prior ##use large df and center at median bart.sig.50.post=bart_sig_post(tf.exp.50,gene.list2,n50,reps=n.genes,factor.vec2,n.tree.vec,tf.vec=40, sigest=prior.sig.vec,burn_size=burn,post_size=post, sig_df=200,sig_quant=.5) post.sig.vec=numeric(length(bart.sig.50.post)) for(i in 1:length(bart.sig.50.post)){ post.sig.vec[i]=bart.sig.50.post[[i]][[2]][4] } post.sig.vec prop.true.post=numeric(length(bart.sig.50.post)) for(i in 1:length(bart.sig.50.post)){ prop.true.post[i]=bart.sig.50.post[[i]][[4]] } prop.true.post prop.selected.post=numeric(length(bart.sig.50.post)) for(i in 1:length(bart.sig.50.post)){ prop.selected.post[i]=bart.sig.50.post[[i]][[5]] } prop.selected.post ##Default settings for df and quantile, just using estimated sigma bart.sig.50.post.def=bart_sig_post(tf.exp.50,gene.list2,n50,reps=n.genes,factor.vec2,n.tree.vec,tf.vec=40, sigest=prior.sig.vec,burn_size=burn,post_size=post, sig_df=3,sig_quant=.9) post.sig.vec.def=numeric(length(bart.sig.50.post.def)) for(i in 1:length(bart.sig.50.post.def)){ post.sig.vec.def[i]=bart.sig.50.post.def[[i]][[2]][4] } post.sig.vec.def prop.true.post.def=numeric(length(bart.sig.50.post.def)) for(i in 1:length(bart.sig.50.post.def)){ prop.true.post.def[i]=bart.sig.50.post.def[[i]][[4]] } prop.true.post.def prop.selected.post.def=numeric(length(bart.sig.50.post.def)) for(i in 1:length(bart.sig.50.post.def)){ prop.selected.post.def[i]=bart.sig.50.post.def[[i]][[5]] } prop.selected.post.def ##Bind together to see desired outcomes ##Generate column and row names cnames=c("Prior_Prop. ","Post_Prop._Fixed ", "Post_Prop._Default") ##rownames rnames=character(length(n.tree.vec)*length(factor.vec2)) count=1 for(i in 1:length(n.tree.vec)){ for(j in 1:length(factor.vec2)){ rnames[count]=paste(n.tree.vec[i]," Trees ", factor.vec2[j], "x N/S", sep="") count=count+1 } } ##Proportion of n=rep simulations where true TF was selected propmat=cbind(prop.true.prior,prop.true.post,prop.true.post.def) colnames(propmat)=cnames rownames(propmat)=rnames propmat ##Avg. selection proportion for True TF across simulations selmat=cbind(prop.selected.prior,prop.selected.post,prop.selected.post.def) colnames(selmat)=cnames rownames(selmat)=rnames round(selmat,3) ##Compare 3 variances sig.mat=round(cbind(ls.sig.est,prior.sig.vec,post.sig.vec,post.sig.vec.def),2) colnames(sig.mat)=c("LS_Est. ","Prior_Est. ","Post_Est._Fixed ","Post_Est._Default") rownames(sig.mat)=rnames sig.mat par(mfrow=c(1,2)) plot_sig_full(bart.sig.50.post.def[[2]][[1]],n_burn=1500,subtitle="Post_Default") plot_sig_full(bart.sig.50.post[[2]][[1]],n_burn=1500,subtitle="Posterior_Fixed") mean(bart.sig.50.prior[[1]][[1]]$sigma) bart.sig.50.prior[[2]][[1]]$sigest mean(bart.sig.50.prior[[2]][[1]]$sigma) bart.sig.50.post[[2]][[1]]$sigma[1:5] plot(bart.sig.50.prior[[2]][[1]]) bart.sig.50.prior[[2]][[1]]$first.sigma bart.sig.50.prior[[1]][[2]][1] bart_sig_summary(bart.sig.50.prior) bart_sig_summary(bart.sig.50.post) save(bart.sig.50.prior,file="prior50.R") save(bart.sig.50.post,file="post50.R") load("prior50.R") load("post50.R") ##Functions ##1. ##Creates y-vectors- first index in list corresponds to noise factor right now. ##Can change to additional element to serve as legend or hashmap it. get_gene_data_list=function(n.obs,tf.exp,factor.vec, tf.beta,reps){ signal.temp=sum(abs(tf.exp%*%tf.beta))/n.obs out=list() for(i in 1:length(factor.vec)){ out[[i]]=list() sigma=signal.temp*factor.vec[i] for(j in 1:reps){ out[[i]][[j]]=as.numeric(tf.exp%*%tf.beta+rnorm(n.obs,mean=0,sd=sigma)) } } return(out) } ##2. bart_sig_prior=function(tf.exp,gene_list,n.obs,reps,factor.vec,tree.vec, tf.vec,sigest=NA,burn_size,post_size,sig_df=3,sig_quant=.9,thin.size=2){ ##Second item in the list has the following order: #(tree size, number of tfs, noise factor, posterior sigma estimate) big.list=list() bart.sig=sigest count=1 for(i in 1:length(tree.vec)){ for(j in 1:length(factor.vec)){ for(k in 1:length(tf.vec)){ sig.holder=numeric(reps) prior.sig=numeric(reps) true.tf=numeric(reps) sel.prop=numeric(reps) ##vector for proportion of time selected ##general loop-creates sigma, then response, then runs BART for(rep in 1:reps){ gene.exp=gene_list[[j]][[rep]] #print(gene.exp[1:4]) print(var(gene.exp)) train.exp=tf.exp[1:n.obs,1:tf.vec[k]] bart.mod = bart(x.train=train.exp, y.train=gene.exp, ntree=n.tree.vec[k], sigest=bart.sig, sigdf=sig_df, sigquant=sig_quant, nskip=burn_size, ndpost=post_size, keepevery=thin.size, verbose=F) ##Suppress printing print(rep) sig.holder[rep]=mean(bart.mod$sigma) ##hold the mean of the posterior-estimate of sigma prior.sig[rep]=bart.mod$sigest ##hold the priors for comparison ##add component to check how many times it was correct sums=sum_calc(bart.mod) true.tf[rep]=which.max(sums)==1 sel.prop[rep]=sums[1]/sum(sums) } ##printing print(mean(sig.holder)) print(bart.mod$sigest) print(true.tf) big.list[[count]]=list() big.list[[count]][[1]]=bart.mod big.list[[count]][[2]]=c(tree.vec[i],tf.vec[k],factor.vec[j],mean(sig.holder)) big.list[[count]][[3]]=mean(prior.sig) big.list[[count]][[4]]=mean(true.tf) big.list[[count]][[5]]=mean(sel.prop) print(count) count=count+1 } } } return(big.list) } ##3. bart_sig_post=function(tf.exp,gene_list,n.obs,reps,factor.vec,tree.vec, tf.vec,sigest.vec,burn_size,post_size,sig_df=3,sig_quant=.90,thin.size=2){ ##Second item in the list has the following order: #(tree size, number of tfs, noise factor, posterior sigma estimate) ##Need to make more robust to handle different orderings-right now just copies above big.list=list() count=1 for(i in 1:length(tree.vec)){ for(j in 1:length(factor.vec)){ for(k in 1:length(tf.vec)){ sig.holder=numeric(reps) ##posterior var vector true.tf=numeric(reps) ##vector for correct proportion sel.prop=numeric(reps) ##vector for proportion of time selected for(rep in 1:reps){ gene.exp=gene_list[[j]][[rep]] #print(gene.exp[1:4]) print(var(gene.exp)) train.exp=tf.exp[1:n.obs,1:tf.vec[k]] bart.mod = bart(x.train=train.exp, y.train=gene.exp, ntree=n.tree.vec[k], sigest=sigest.vec[count], sigdf=sig_df, sigquant=sig_quant, nskip=burn_size, ndpost=post_size, keepevery=thin.size, verbose=F) ##Suppress printing print(rep) sig.holder[rep]=mean(bart.mod$sigma) sums=sum_calc(bart.mod) true.tf[rep]=which.max(sums)==1 sel.prop[rep]=sums[1]/sum(sums) } ##printing to check print(mean(sig.holder)) print(bart.mod$sigest) print(true.tf) big.list[[count]]=list() big.list[[count]][[1]]=bart.mod big.list[[count]][[2]]=c(tree.vec[i],tf.vec[k],factor.vec[j],mean(sig.holder)) big.list[[count]][[3]]=NA big.list[[count]][[4]]=mean(true.tf) big.list[[count]][[5]]=mean(sel.prop) print(count) count=count+1 } } } return(big.list) } ##4. plot_sig_full=function(bart.list,n_burn,subtitle){ sig=c(bart.list$first.sigma,bart.list$sigma) burn.idx=1:n_burn burn=sig[burn.idx] post=sig[-burn.idx] plot(burn,main=paste("Convergence Plot for Sigma\n",subtitle), xlab="Sample Number", ylab="Sigma", xlim=c(0,length(sig)), ylim=c(min(sig),max(sig)), col="red" ) points((length(burn.idx)+1):length(sig),post) } ##5. bart_sig_summary=function(bart.mod){ output=data.frame() count=1 for(i in 1:length(bart.mod)){ output[count,1]=bart.mod[[i]][[2]][1] ##Trees output[count,2]=bart.mod[[i]][[2]][2] ##TF output[count,3]=bart.mod[[i]][[2]][3] ##Noise sums=sum_calc(bart.mod[[i]][[1]]) props=prop_calc(bart.mod[[i]][[1]]) sorted.sums=sort(sums,decreasing=T) sorted.props=sort(props,decreasing=T) output[count,4]=which.max(sums)==1 ##Does our appear most? output[count,5]=sorted.props[1] ##Top three TFs output[count,6]=which(props==sorted.props[1])[1] ##How to deal with ties? output[count,7]=sorted.props[2] output[count,8]=which(props==sorted.props[2])[1] output[count,9]=sorted.props[3] output[count,10]=which(props==sorted.props[3])[1] output[count,11]=sums[1] output[count,12]=sum(sums) ##Total splits output[count,13]=mean(bart.mod[[i]][[1]]$sigma) output[count,14]=props[1] print(count) count=count+1 } colnames(output)=c("Num_Trees","Num_TFs","Noise/Signal","True_Most_Common?","1st","Name-1st", "2nd","Name-2nd","3rd","Name-3rd","True_TF_Sum","Tot_Splits","Sigma_Estimate","True_TF_Prop") return(output) } ##simulations with chisquare library(geoR) d=150 x=seq(.5,3,.01) dens=dinvchisq(x,df=d,scale=1) plot(x,dens) n=rinvchisq(10000,d,scale=1) mean(n) quantile(n,c(.25,.5,.75)) plot(n) var(n)
#Data reading & formatting household_power_consumption <- read.csv("~/Downloads/household_power_consumption.txt", sep=";", na.strings="?", stringsAsFactors=FALSE) npc_short <- subset(household_power_consumption, Date=="1/2/2007" | Date=="2/2/2007") npc_short$newDate <- as.Date(npc_short$Date, format="%d/%m/%Y") npc_short$datetime<- as.POSIXct(paste(npc_short$newDate, npc_short$Time)) #New device dev.new(width=480, length=480) #Plot with(npc_short, hist(Global_active_power, col="red", xlab = "Global Active Power (kilowatts)", main="Global Active Power")) #Print dev.copy(png, "plot1.png") dev.off()
/plot1.R
no_license
kpgergely/kpgergely_assignment_1
R
false
false
650
r
#Data reading & formatting household_power_consumption <- read.csv("~/Downloads/household_power_consumption.txt", sep=";", na.strings="?", stringsAsFactors=FALSE) npc_short <- subset(household_power_consumption, Date=="1/2/2007" | Date=="2/2/2007") npc_short$newDate <- as.Date(npc_short$Date, format="%d/%m/%Y") npc_short$datetime<- as.POSIXct(paste(npc_short$newDate, npc_short$Time)) #New device dev.new(width=480, length=480) #Plot with(npc_short, hist(Global_active_power, col="red", xlab = "Global Active Power (kilowatts)", main="Global Active Power")) #Print dev.copy(png, "plot1.png") dev.off()
##Plot all 4 graphs labels <- c("Sub_metering_1","Sub_metering_2","Sub_metering_3") columnlines <- c("black","red","blue") par(mfrow=c(2,2)) plot(finalData$SetTime, finalData$Global_active_power, type="l", col="green", xlab="", ylab="Global Active Power") plot(finalData$SetTime, finalData$Voltage, type="l", col="orange", xlab="datetime", ylab="Voltage") plot(finalData$SetTime, finalData$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering") lines(finalData$SetTime, finalData$Sub_metering_2, type="l", col="red") lines(finalData$SetTime, finalData$Sub_metering_3, type="l", col="blue") legend("topright", bty="n", legend=labels, lty=1, col=columnlines) plot(finalData$SetTime, finalData$Global_reactive_power, type="l", col="blue", xlab="datetime", ylab="Global_reactive_power")
/plot4.R
no_license
lacey-loftin/ExData_Plotting1
R
false
false
790
r
##Plot all 4 graphs labels <- c("Sub_metering_1","Sub_metering_2","Sub_metering_3") columnlines <- c("black","red","blue") par(mfrow=c(2,2)) plot(finalData$SetTime, finalData$Global_active_power, type="l", col="green", xlab="", ylab="Global Active Power") plot(finalData$SetTime, finalData$Voltage, type="l", col="orange", xlab="datetime", ylab="Voltage") plot(finalData$SetTime, finalData$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering") lines(finalData$SetTime, finalData$Sub_metering_2, type="l", col="red") lines(finalData$SetTime, finalData$Sub_metering_3, type="l", col="blue") legend("topright", bty="n", legend=labels, lty=1, col=columnlines) plot(finalData$SetTime, finalData$Global_reactive_power, type="l", col="blue", xlab="datetime", ylab="Global_reactive_power")
################################################################################ # # Vladimir Zhurov, vzhurov2@uwo.ca, Fri Oct 20 21:43:31 2017 # # This is workflow for analysis of RNA-Seq data # # Experiment: GENOMITE, mite on tomato data # # Interactive analysis using voom/limma # # Input: htseq-count results from tophat/bowtie2 or star # # Output: expression data tables # ################################################################################ # Start ------------------------------------------------------------------- # load libraries library("edgeR") library("limma") # specify data set to process dataset <- "genomite_strawberry" # specify mapper used: "star" or "tophat" mapper <- "star" # parameters of exactTest and decideTestsDGE adjust.method = "BH" # default is BH or fdr p = 0.05 lfc = 1 # set main working path mainDir <- "/home/sobczm/popgen/rnaseq/genomite/htseq_out/strawberry_htseq_merged/merged" dataDir <- "/home/sobczm/popgen/rnaseq/genomite/htseq_out/strawberry_htseq_merged/merged" annoDir <- paste0(mainDir, "/annotation") edgerDir <- paste0(mainDir, "/edger") voomDir <- paste0(mainDir, "/voom") limmaDir <- voomDir cpmDir <- paste0(mainDir, "/cpm") geneListsDir <- paste0(mainDir, "/genelists") gsaDir <- paste0(mainDir, "/gsa") graphsDir <- paste0(mainDir, "/graphs") topGODir <- paste0(mainDir, "/topGO") heatmapDir <- paste0(mainDir, "/heatmap") pcaDir <- paste0(mainDir, "/pca") # load annotation --------------------------------------------------------- setwd(annoDir) annotation.length <- read.delim("vesca11_transcript_length-current.txt", header = TRUE) #annotation <- read.delim("vesca11_annotation-current.txt", header = TRUE) # voom analysis of differential gene expression -------------------------- # load count data setwd(dataDir) # load Targets.txt file with file list, groups and labels targets <- readTargets() dge <- readDGE(files = targets$File, header = FALSE) # calculate library size normalization factors dge <- calcNormFactors(dge, method = "TMM") # keep loci that are expressed isexpr <- rowSums(cpm(dge) > 1) >= 3 dge <- dge[isexpr, keep.lib.sizes = FALSE] # Constitutive differences between mite populations ----------------------- # create design matrix batch <- factor(targets$Batch) treatment <- factor(targets$Mite, levels = c("nonadapted", "adapted")) design <- model.matrix(~ batch + treatment) colnames(design) <- c("Intercept", "batch.2", "batch.3", "adapt") # apply voom transformation # dge.voom <- voom(dge, design, plot = FALSE) dge.voom <- voomWithQualityWeights(dge, design, plot = FALSE) # fit model fit <- lmFit(dge.voom, design) # set suffix for output files lfc_text <- round(2^lfc, digits = 1) suffix <- paste("_voomW_Mite", mapper, lfc_text, p, adjust.method, sep = "_") # create and fit contrasts cont <- makeContrasts( Mite.Adapt.v.Nonadapt = adapt, levels = design ) c.fit <- contrasts.fit(fit, cont) eb.fit <- eBayes(c.fit) ### generate topTable based on any time point and filter using decideTests topTable.complete <- topTable(eb.fit, number = Inf, sort.by = "none", adjust.method = adjust.method) topTable.complete <- cbind(ID = rownames(topTable.complete), topTable.complete) topTable.complete <- merge(topTable.complete, annotation, by = "ID", all.x = TRUE) # apply cut-off's dt <- decideTests(eb.fit, lfc = lfc, p = p, adjust.method = adjust.method, method = "separate") rs <- rowSums(abs(dt)) topTable.filtered <- topTable.complete[rs != 0, ] # save limma result setwd(limmaDir) filename <- paste("results_", dataset, suffix, "_all-individual_p_based.txt", sep = "") write.table(x = topTable.filtered, file = filename, row.names = FALSE, sep = "\t", quote = FALSE) # save gene list setwd(geneListsDir) filename.list <- paste("list_", dataset, suffix, "_all-individual_p_based.txt", sep = "") write.table(x = topTable.filtered$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) ### generate DEG table based on overall p-value topTable.overall <- topTable(eb.fit, number = Inf, p = p, lfc = lfc, adjust.method = adjust.method) topTable.overall <- cbind(ID = rownames(topTable.overall), topTable.overall) topTable.overall <- merge(topTable.overall, annotation, by = "ID", all.x = TRUE) # save limma result setwd(limmaDir) filename <- paste("results_", dataset, suffix, "_all-overall_p_based.txt", sep = "") write.table(x = topTable.overall, file = filename, row.names = FALSE, sep = "\t", quote = FALSE) # save gene list setwd(geneListsDir) filename.list <- paste("list_", dataset, suffix, "_all-overall_p_based.txt", sep = "") write.table(x = topTable.overall$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) ### save individual results and gene lists per contrast l <- dim(cont) for (i in 1:l[2]) { # generate topTable for a given contrast topTable <- topTable(eb.fit, coef = i, genelist = fit$genes, n = Inf, p = p, lfc = lfc, adjust.method = adjust.method) topTable <- cbind(ID = rownames(topTable), topTable) topTable <- merge(topTable, annotation, by = "ID", all.x = TRUE) cont.names <- colnames(cont) # save limma result setwd(limmaDir) filename.result <- paste0("results_", dataset, suffix, "_", cont.names[i], ".txt") write.table(x = topTable, file = filename.result, row.names = FALSE, sep = "\t", quote = FALSE) # save gene list setwd(geneListsDir) filename.list <- paste0("list_", dataset, suffix, "_", cont.names[i], ".txt") write.table(x = topTable$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) # separate up- and down-regulated genes # upregulated genes topTable.up <- topTable[(topTable$logFC > 0), ] # save results setwd(limmaDir) filename.result <- paste0("results_", dataset, suffix, "_", cont.names[i], "-Up.txt") write.table(x = topTable.up, file = filename.result, row.names = FALSE, sep = "\t") # save gene list setwd(geneListsDir) filename.list <- paste0("list_", dataset, suffix, "_", cont.names[i], "-Up.txt") write.table(x = topTable.up$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) # downregulated genes topTable.down <- topTable[(topTable$logFC < 0), ] # save results setwd(limmaDir) filename.result <- paste0("results_", dataset, suffix, "_", cont.names[i], "-Down.txt") write.table(x = topTable.down, file = filename.result, row.names = FALSE, sep = "\t") # save gene list setwd(geneListsDir) filename.list <- paste0("list_", dataset, suffix, "_", cont.names[i], "-Down.txt") write.table(x = topTable.down$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) } # Constitutive difference between drought and no stress -------------------- # create design matrix batch <- factor(targets$Batch) treatment <- factor(targets$Plant, levels = c("cntrl", "nostress", "drought")) design <- model.matrix(~ batch + treatment) colnames(design) <- c("Intercept", "batch.2", "batch.3", "nodrought", "drought") # apply voom transformation # dge.voom <- voom(dge, design, plot = FALSE) dge.voom <- voomWithQualityWeights(dge, design, plot = FALSE) # fit model fit <- lmFit(dge.voom, design) # set LFC and FDR cut-offs p = 0.05 lfc = 0.58 # absolute fold change of 1.5, 50% increase or decrease # set suffix for output files lfc_text <- round(2^lfc, digits = 1) suffix <- paste("_voomW_Drought", mapper, lfc_text, p, adjust.method, sep = "_") # create and fit contrasts cont <- makeContrasts( Nodrought.Cntrl = nodrought, Drought.Cntrl = drought, Drought.Nodrought = drought - nodrought, levels = design ) c.fit <- contrasts.fit(fit, cont) eb.fit <- eBayes(c.fit) ### generate topTable based on any time point and filter using decideTests topTable.complete <- topTable(eb.fit, number = Inf, sort.by = "none", adjust.method = adjust.method) topTable.complete <- cbind(ID = rownames(topTable.complete), topTable.complete) topTable.complete <- merge(topTable.complete, annotation, by = "ID", all.x = TRUE) # apply cut-off's dt <- decideTests(eb.fit, lfc = lfc, p = p, adjust.method = adjust.method, method = "separate") rs <- rowSums(abs(dt)) topTable.filtered <- topTable.complete[rs != 0, ] # save limma result setwd(limmaDir) filename <- paste("results_", dataset, suffix, "_all-individual_p_based.txt", sep = "") write.table(x = topTable.filtered, file = filename, row.names = FALSE, sep = "\t", quote = FALSE) # save gene list setwd(geneListsDir) filename.list <- paste("list_", dataset, suffix, "_all-individual_p_based.txt", sep = "") write.table(x = topTable.filtered$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) ### generate DEG table based on overall p-value topTable.overall <- topTable(eb.fit, number = Inf, p = p, lfc = lfc, adjust.method = adjust.method) topTable.overall <- cbind(ID = rownames(topTable.overall), topTable.overall) topTable.overall <- merge(topTable.overall, annotation, by = "ID", all.x = TRUE) # save limma result setwd(limmaDir) filename <- paste("results_", dataset, suffix, "_all-overall_p_based.txt", sep = "") write.table(x = topTable.overall, file = filename, row.names = FALSE, sep = "\t", quote = FALSE) # save gene list setwd(geneListsDir) filename.list <- paste("list_", dataset, suffix, "_all-overall_p_based.txt", sep = "") write.table(x = topTable.overall$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) ### save individual results and gene lists per contrast l <- dim(cont) for (i in 1:l[2]) { # generate topTable for a given contrast topTable <- topTable(eb.fit, coef = i, genelist = fit$genes, n = Inf, p = p, lfc = lfc, adjust.method = adjust.method) topTable <- cbind(ID = rownames(topTable), topTable) topTable <- merge(topTable, annotation, by = "ID", all.x = TRUE) cont.names <- colnames(cont) # save limma result setwd(limmaDir) filename.result <- paste0("results_", dataset, suffix, "_", cont.names[i], ".txt") write.table(x = topTable, file = filename.result, row.names = FALSE, sep = "\t", quote = FALSE) # save gene list setwd(geneListsDir) filename.list <- paste0("list_", dataset, suffix, "_", cont.names[i], ".txt") write.table(x = topTable$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) # separate up- and down-regulated genes # upregulated genes topTable.up <- topTable[(topTable$logFC > 0), ] # save results setwd(limmaDir) filename.result <- paste0("results_", dataset, suffix, "_", cont.names[i], "-Up.txt") write.table(x = topTable.up, file = filename.result, row.names = FALSE, sep = "\t") # save gene list setwd(geneListsDir) filename.list <- paste0("list_", dataset, suffix, "_", cont.names[i], "-Up.txt") write.table(x = topTable.up$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) # downregulated genes topTable.down <- topTable[(topTable$logFC < 0), ] # save results setwd(limmaDir) filename.result <- paste0("results_", dataset, suffix, "_", cont.names[i], "-Down.txt") write.table(x = topTable.down, file = filename.result, row.names = FALSE, sep = "\t") # save gene list setwd(geneListsDir) filename.list <- paste0("list_", dataset, suffix, "_", cont.names[i], "-Down.txt") write.table(x = topTable.down$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) } # End ---------------------------------------------------------------------
/rnaseq/2_strawberry_dge.R
no_license
sunnycqcn/popgen
R
false
false
11,438
r
################################################################################ # # Vladimir Zhurov, vzhurov2@uwo.ca, Fri Oct 20 21:43:31 2017 # # This is workflow for analysis of RNA-Seq data # # Experiment: GENOMITE, mite on tomato data # # Interactive analysis using voom/limma # # Input: htseq-count results from tophat/bowtie2 or star # # Output: expression data tables # ################################################################################ # Start ------------------------------------------------------------------- # load libraries library("edgeR") library("limma") # specify data set to process dataset <- "genomite_strawberry" # specify mapper used: "star" or "tophat" mapper <- "star" # parameters of exactTest and decideTestsDGE adjust.method = "BH" # default is BH or fdr p = 0.05 lfc = 1 # set main working path mainDir <- "/home/sobczm/popgen/rnaseq/genomite/htseq_out/strawberry_htseq_merged/merged" dataDir <- "/home/sobczm/popgen/rnaseq/genomite/htseq_out/strawberry_htseq_merged/merged" annoDir <- paste0(mainDir, "/annotation") edgerDir <- paste0(mainDir, "/edger") voomDir <- paste0(mainDir, "/voom") limmaDir <- voomDir cpmDir <- paste0(mainDir, "/cpm") geneListsDir <- paste0(mainDir, "/genelists") gsaDir <- paste0(mainDir, "/gsa") graphsDir <- paste0(mainDir, "/graphs") topGODir <- paste0(mainDir, "/topGO") heatmapDir <- paste0(mainDir, "/heatmap") pcaDir <- paste0(mainDir, "/pca") # load annotation --------------------------------------------------------- setwd(annoDir) annotation.length <- read.delim("vesca11_transcript_length-current.txt", header = TRUE) #annotation <- read.delim("vesca11_annotation-current.txt", header = TRUE) # voom analysis of differential gene expression -------------------------- # load count data setwd(dataDir) # load Targets.txt file with file list, groups and labels targets <- readTargets() dge <- readDGE(files = targets$File, header = FALSE) # calculate library size normalization factors dge <- calcNormFactors(dge, method = "TMM") # keep loci that are expressed isexpr <- rowSums(cpm(dge) > 1) >= 3 dge <- dge[isexpr, keep.lib.sizes = FALSE] # Constitutive differences between mite populations ----------------------- # create design matrix batch <- factor(targets$Batch) treatment <- factor(targets$Mite, levels = c("nonadapted", "adapted")) design <- model.matrix(~ batch + treatment) colnames(design) <- c("Intercept", "batch.2", "batch.3", "adapt") # apply voom transformation # dge.voom <- voom(dge, design, plot = FALSE) dge.voom <- voomWithQualityWeights(dge, design, plot = FALSE) # fit model fit <- lmFit(dge.voom, design) # set suffix for output files lfc_text <- round(2^lfc, digits = 1) suffix <- paste("_voomW_Mite", mapper, lfc_text, p, adjust.method, sep = "_") # create and fit contrasts cont <- makeContrasts( Mite.Adapt.v.Nonadapt = adapt, levels = design ) c.fit <- contrasts.fit(fit, cont) eb.fit <- eBayes(c.fit) ### generate topTable based on any time point and filter using decideTests topTable.complete <- topTable(eb.fit, number = Inf, sort.by = "none", adjust.method = adjust.method) topTable.complete <- cbind(ID = rownames(topTable.complete), topTable.complete) topTable.complete <- merge(topTable.complete, annotation, by = "ID", all.x = TRUE) # apply cut-off's dt <- decideTests(eb.fit, lfc = lfc, p = p, adjust.method = adjust.method, method = "separate") rs <- rowSums(abs(dt)) topTable.filtered <- topTable.complete[rs != 0, ] # save limma result setwd(limmaDir) filename <- paste("results_", dataset, suffix, "_all-individual_p_based.txt", sep = "") write.table(x = topTable.filtered, file = filename, row.names = FALSE, sep = "\t", quote = FALSE) # save gene list setwd(geneListsDir) filename.list <- paste("list_", dataset, suffix, "_all-individual_p_based.txt", sep = "") write.table(x = topTable.filtered$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) ### generate DEG table based on overall p-value topTable.overall <- topTable(eb.fit, number = Inf, p = p, lfc = lfc, adjust.method = adjust.method) topTable.overall <- cbind(ID = rownames(topTable.overall), topTable.overall) topTable.overall <- merge(topTable.overall, annotation, by = "ID", all.x = TRUE) # save limma result setwd(limmaDir) filename <- paste("results_", dataset, suffix, "_all-overall_p_based.txt", sep = "") write.table(x = topTable.overall, file = filename, row.names = FALSE, sep = "\t", quote = FALSE) # save gene list setwd(geneListsDir) filename.list <- paste("list_", dataset, suffix, "_all-overall_p_based.txt", sep = "") write.table(x = topTable.overall$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) ### save individual results and gene lists per contrast l <- dim(cont) for (i in 1:l[2]) { # generate topTable for a given contrast topTable <- topTable(eb.fit, coef = i, genelist = fit$genes, n = Inf, p = p, lfc = lfc, adjust.method = adjust.method) topTable <- cbind(ID = rownames(topTable), topTable) topTable <- merge(topTable, annotation, by = "ID", all.x = TRUE) cont.names <- colnames(cont) # save limma result setwd(limmaDir) filename.result <- paste0("results_", dataset, suffix, "_", cont.names[i], ".txt") write.table(x = topTable, file = filename.result, row.names = FALSE, sep = "\t", quote = FALSE) # save gene list setwd(geneListsDir) filename.list <- paste0("list_", dataset, suffix, "_", cont.names[i], ".txt") write.table(x = topTable$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) # separate up- and down-regulated genes # upregulated genes topTable.up <- topTable[(topTable$logFC > 0), ] # save results setwd(limmaDir) filename.result <- paste0("results_", dataset, suffix, "_", cont.names[i], "-Up.txt") write.table(x = topTable.up, file = filename.result, row.names = FALSE, sep = "\t") # save gene list setwd(geneListsDir) filename.list <- paste0("list_", dataset, suffix, "_", cont.names[i], "-Up.txt") write.table(x = topTable.up$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) # downregulated genes topTable.down <- topTable[(topTable$logFC < 0), ] # save results setwd(limmaDir) filename.result <- paste0("results_", dataset, suffix, "_", cont.names[i], "-Down.txt") write.table(x = topTable.down, file = filename.result, row.names = FALSE, sep = "\t") # save gene list setwd(geneListsDir) filename.list <- paste0("list_", dataset, suffix, "_", cont.names[i], "-Down.txt") write.table(x = topTable.down$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) } # Constitutive difference between drought and no stress -------------------- # create design matrix batch <- factor(targets$Batch) treatment <- factor(targets$Plant, levels = c("cntrl", "nostress", "drought")) design <- model.matrix(~ batch + treatment) colnames(design) <- c("Intercept", "batch.2", "batch.3", "nodrought", "drought") # apply voom transformation # dge.voom <- voom(dge, design, plot = FALSE) dge.voom <- voomWithQualityWeights(dge, design, plot = FALSE) # fit model fit <- lmFit(dge.voom, design) # set LFC and FDR cut-offs p = 0.05 lfc = 0.58 # absolute fold change of 1.5, 50% increase or decrease # set suffix for output files lfc_text <- round(2^lfc, digits = 1) suffix <- paste("_voomW_Drought", mapper, lfc_text, p, adjust.method, sep = "_") # create and fit contrasts cont <- makeContrasts( Nodrought.Cntrl = nodrought, Drought.Cntrl = drought, Drought.Nodrought = drought - nodrought, levels = design ) c.fit <- contrasts.fit(fit, cont) eb.fit <- eBayes(c.fit) ### generate topTable based on any time point and filter using decideTests topTable.complete <- topTable(eb.fit, number = Inf, sort.by = "none", adjust.method = adjust.method) topTable.complete <- cbind(ID = rownames(topTable.complete), topTable.complete) topTable.complete <- merge(topTable.complete, annotation, by = "ID", all.x = TRUE) # apply cut-off's dt <- decideTests(eb.fit, lfc = lfc, p = p, adjust.method = adjust.method, method = "separate") rs <- rowSums(abs(dt)) topTable.filtered <- topTable.complete[rs != 0, ] # save limma result setwd(limmaDir) filename <- paste("results_", dataset, suffix, "_all-individual_p_based.txt", sep = "") write.table(x = topTable.filtered, file = filename, row.names = FALSE, sep = "\t", quote = FALSE) # save gene list setwd(geneListsDir) filename.list <- paste("list_", dataset, suffix, "_all-individual_p_based.txt", sep = "") write.table(x = topTable.filtered$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) ### generate DEG table based on overall p-value topTable.overall <- topTable(eb.fit, number = Inf, p = p, lfc = lfc, adjust.method = adjust.method) topTable.overall <- cbind(ID = rownames(topTable.overall), topTable.overall) topTable.overall <- merge(topTable.overall, annotation, by = "ID", all.x = TRUE) # save limma result setwd(limmaDir) filename <- paste("results_", dataset, suffix, "_all-overall_p_based.txt", sep = "") write.table(x = topTable.overall, file = filename, row.names = FALSE, sep = "\t", quote = FALSE) # save gene list setwd(geneListsDir) filename.list <- paste("list_", dataset, suffix, "_all-overall_p_based.txt", sep = "") write.table(x = topTable.overall$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) ### save individual results and gene lists per contrast l <- dim(cont) for (i in 1:l[2]) { # generate topTable for a given contrast topTable <- topTable(eb.fit, coef = i, genelist = fit$genes, n = Inf, p = p, lfc = lfc, adjust.method = adjust.method) topTable <- cbind(ID = rownames(topTable), topTable) topTable <- merge(topTable, annotation, by = "ID", all.x = TRUE) cont.names <- colnames(cont) # save limma result setwd(limmaDir) filename.result <- paste0("results_", dataset, suffix, "_", cont.names[i], ".txt") write.table(x = topTable, file = filename.result, row.names = FALSE, sep = "\t", quote = FALSE) # save gene list setwd(geneListsDir) filename.list <- paste0("list_", dataset, suffix, "_", cont.names[i], ".txt") write.table(x = topTable$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) # separate up- and down-regulated genes # upregulated genes topTable.up <- topTable[(topTable$logFC > 0), ] # save results setwd(limmaDir) filename.result <- paste0("results_", dataset, suffix, "_", cont.names[i], "-Up.txt") write.table(x = topTable.up, file = filename.result, row.names = FALSE, sep = "\t") # save gene list setwd(geneListsDir) filename.list <- paste0("list_", dataset, suffix, "_", cont.names[i], "-Up.txt") write.table(x = topTable.up$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) # downregulated genes topTable.down <- topTable[(topTable$logFC < 0), ] # save results setwd(limmaDir) filename.result <- paste0("results_", dataset, suffix, "_", cont.names[i], "-Down.txt") write.table(x = topTable.down, file = filename.result, row.names = FALSE, sep = "\t") # save gene list setwd(geneListsDir) filename.list <- paste0("list_", dataset, suffix, "_", cont.names[i], "-Down.txt") write.table(x = topTable.down$ID, file = filename.list, quote = FALSE, col.names = FALSE, row.names = FALSE) } # End ---------------------------------------------------------------------
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/knitr_helper.R \name{sub_ext} \alias{sub_ext} \title{substitute extension of file x with ext} \usage{ sub_ext(x, ext) } \arguments{ \item{x}{vector with original file names} \item{ext}{new extension to be added} } \value{ file names with new extensions } \description{ x is assumed to be a vector of characters which stand for filenames. The extension of the file name is the suffix that occurs after the last dot (.) in every component of x. The substitution is done in two steps. First the existing extension of x is removed using function \code{sans_ext}. Then the new extension is added with a dot (.) as separator. }
/man/sub_ext.Rd
no_license
charlotte-ngs/rcoursetools
R
false
true
702
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/knitr_helper.R \name{sub_ext} \alias{sub_ext} \title{substitute extension of file x with ext} \usage{ sub_ext(x, ext) } \arguments{ \item{x}{vector with original file names} \item{ext}{new extension to be added} } \value{ file names with new extensions } \description{ x is assumed to be a vector of characters which stand for filenames. The extension of the file name is the suffix that occurs after the last dot (.) in every component of x. The substitution is done in two steps. First the existing extension of x is removed using function \code{sans_ext}. Then the new extension is added with a dot (.) as separator. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary.evsi.R \name{summary.evsi} \alias{summary.evsi} \title{Summarises the output of an evsi object} \usage{ \method{summary}{evsi}(obj.evsi, wtp = NULL, N = NULL, CI = NULL) } \arguments{ \item{obj.evsi}{An evsi object.} \item{wtp}{A fixed willingness-to-pay for which we would like to summarise the EVSI. If NULL then the willingness-to-pay assocaited with the optimal decision uncertainty is chosen.} \item{N}{The sample size for which the EVSI is to be summerised. If NULL then chose the middle value for N.} } \value{ Prints a summary table with information on the Value of Information analysis } \description{ Summarises the output of an evsi object } \author{ Anna Heath and Gianluca Baio }
/man/summary.evsi.Rd
no_license
annaheath/EVSI
R
false
true
781
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary.evsi.R \name{summary.evsi} \alias{summary.evsi} \title{Summarises the output of an evsi object} \usage{ \method{summary}{evsi}(obj.evsi, wtp = NULL, N = NULL, CI = NULL) } \arguments{ \item{obj.evsi}{An evsi object.} \item{wtp}{A fixed willingness-to-pay for which we would like to summarise the EVSI. If NULL then the willingness-to-pay assocaited with the optimal decision uncertainty is chosen.} \item{N}{The sample size for which the EVSI is to be summerised. If NULL then chose the middle value for N.} } \value{ Prints a summary table with information on the Value of Information analysis } \description{ Summarises the output of an evsi object } \author{ Anna Heath and Gianluca Baio }
context("plot.mgcv.smooth ") test_that("plot.mgcv.smooth", { library(mgcViz); library(plyr); library(gridExtra) set.seed(2) ## simulate some data... dat <- gamSim(1,n=200,dist="normal",scale=2) x <- gam(y~s(x0)+s(x1)+s(x2)+s(x3),data=dat) combs <- as.matrix(expand.grid(c(F,T),c(F,T),c(F,T),c(F,T))) plts <- alply(combs, 1, function(.inp) plot(x, residuals = .inp[1], scale = .inp[2], se = .inp[3], rug = .inp[4], select = 1, draw = F)[[1]]) do.call(grid.arrange, plts) })
/inst/tests/test-plot_mgcv_smooth.R
no_license
dill/mgcViz
R
false
false
495
r
context("plot.mgcv.smooth ") test_that("plot.mgcv.smooth", { library(mgcViz); library(plyr); library(gridExtra) set.seed(2) ## simulate some data... dat <- gamSim(1,n=200,dist="normal",scale=2) x <- gam(y~s(x0)+s(x1)+s(x2)+s(x3),data=dat) combs <- as.matrix(expand.grid(c(F,T),c(F,T),c(F,T),c(F,T))) plts <- alply(combs, 1, function(.inp) plot(x, residuals = .inp[1], scale = .inp[2], se = .inp[3], rug = .inp[4], select = 1, draw = F)[[1]]) do.call(grid.arrange, plts) })
library("aroma.light") # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Example 2: Two-enzyme fragment-length normalization of 6 arrays # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - set.seed(0xbeef) # Number samples I <- 5 # Number of loci J <- 3000 # Fragment lengths (two enzymes) fl <- matrix(0, nrow=J, ncol=2) fl[,1] <- seq(from=100, to=1000, length.out=J) fl[,2] <- seq(from=1000, to=100, length.out=J) # Let 1/2 of the units be on both enzymes fl[seq(from=1, to=J, by=4),1] <- NA_real_ fl[seq(from=2, to=J, by=4),2] <- NA_real_ # Let some have unknown fragment lengths hasUnknownFL <- seq(from=1, to=J, by=15) fl[hasUnknownFL,] <- NA_real_ # Sty/Nsp mixing proportions: rho <- rep(1, I) rho[1] <- 1/3; # Less Sty in 1st sample rho[3] <- 3/2; # More Sty in 3rd sample # Simulate data z <- array(0, dim=c(J,2,I)) maxLog2Theta <- 12 for (ii in 1:I) { # Common effect for both enzymes mu <- function(fl) { k <- runif(n=1, min=3, max=5) mu <- rep(maxLog2Theta, length(fl)) ok <- is.finite(fl) mu[ok] <- mu[ok] - fl[ok]^{1/k} mu } # Calculate the effect for each data point for (ee in 1:2) { z[,ee,ii] <- mu(fl[,ee]) } # Update the Sty/Nsp mixing proportions ee <- 2 z[,ee,ii] <- rho[ii]*z[,ee,ii] # Add random errors for (ee in 1:2) { eps <- rnorm(J, mean=0, sd=1/sqrt(2)) z[,ee,ii] <- z[,ee,ii] + eps } } hasFl <- is.finite(fl) unitSets <- list( nsp = which( hasFl[,1] & !hasFl[,2]), sty = which(!hasFl[,1] & hasFl[,2]), both = which( hasFl[,1] & hasFl[,2]), none = which(!hasFl[,1] & !hasFl[,2]) ) # The observed data is a mix of two enzymes theta <- matrix(NA_real_, nrow=J, ncol=I) # Single-enzyme units for (ee in 1:2) { uu <- unitSets[[ee]] theta[uu,] <- 2^z[uu,ee,] } # Both-enzyme units (sum on intensity scale) uu <- unitSets$both theta[uu,] <- (2^z[uu,1,]+2^z[uu,2,])/2 # Missing units (sample from the others) uu <- unitSets$none theta[uu,] <- apply(theta, MARGIN=2, sample, size=length(uu)) # Calculate target array thetaT <- rowMeans(theta, na.rm=TRUE) targetFcns <- list() for (ee in 1:2) { uu <- unitSets[[ee]] fit <- lowess(fl[uu,ee], log2(thetaT[uu])) class(fit) <- "lowess" targetFcns[[ee]] <- function(fl, ...) { predict(fit, newdata=fl) } } # Fit model only to a subset of the data subsetToFit <- setdiff(1:J, seq(from=1, to=J, by=10)) # Normalize data (to a target baseline) thetaN <- matrix(NA_real_, nrow=J, ncol=I) fits <- vector("list", I) for (ii in 1:I) { lthetaNi <- normalizeFragmentLength(log2(theta[,ii]), targetFcns=targetFcns, fragmentLengths=fl, onMissing="median", subsetToFit=subsetToFit, .returnFit=TRUE) fits[[ii]] <- attr(lthetaNi, "modelFit") thetaN[,ii] <- 2^lthetaNi } # Plot raw data xlim <- c(0, max(fl, na.rm=TRUE)) ylim <- c(0, max(log2(theta), na.rm=TRUE)) Mlim <- c(-1,1)*4 xlab <- "Fragment length" ylab <- expression(log2(theta)) Mlab <- expression(M==log[2](theta/theta[R])) layout(matrix(1:(3*I), ncol=I, byrow=TRUE)) for (ii in 1:I) { plot(NA, xlim=xlim, ylim=ylim, xlab=xlab, ylab=ylab, main="raw") # Single-enzyme units for (ee in 1:2) { # The raw data uu <- unitSets[[ee]] points(fl[uu,ee], log2(theta[uu,ii]), col=ee+1) } # Both-enzyme units (use fragment-length for enzyme #1) uu <- unitSets$both points(fl[uu,1], log2(theta[uu,ii]), col=3+1) for (ee in 1:2) { # The true effects uu <- unitSets[[ee]] lines(lowess(fl[uu,ee], log2(theta[uu,ii])), col="black", lwd=4, lty=3) # The estimated effects fit <- fits[[ii]][[ee]]$fit lines(fit, col="orange", lwd=3) muT <- targetFcns[[ee]](fl[uu,ee]) lines(fl[uu,ee], muT, col="cyan", lwd=1) } } # Calculate log-ratios thetaR <- rowMeans(thetaN, na.rm=TRUE) M <- log2(thetaN/thetaR) # Plot normalized data for (ii in 1:I) { plot(NA, xlim=xlim, ylim=Mlim, xlab=xlab, ylab=Mlab, main="normalized") # Single-enzyme units for (ee in 1:2) { # The normalized data uu <- unitSets[[ee]] points(fl[uu,ee], M[uu,ii], col=ee+1) } # Both-enzyme units (use fragment-length for enzyme #1) uu <- unitSets$both points(fl[uu,1], M[uu,ii], col=3+1) } ylim <- c(0,1.5) for (ii in 1:I) { data <- list() for (ee in 1:2) { # The normalized data uu <- unitSets[[ee]] data[[ee]] <- M[uu,ii] } uu <- unitSets$both if (length(uu) > 0) data[[3]] <- M[uu,ii] uu <- unitSets$none if (length(uu) > 0) data[[4]] <- M[uu,ii] cols <- seq_along(data)+1 plotDensity(data, col=cols, xlim=Mlim, xlab=Mlab, main="normalized") abline(v=0, lty=2) }
/tests/normalizeFragmentLength-ex2.R
no_license
HenrikBengtsson/aroma.light
R
false
false
4,659
r
library("aroma.light") # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Example 2: Two-enzyme fragment-length normalization of 6 arrays # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - set.seed(0xbeef) # Number samples I <- 5 # Number of loci J <- 3000 # Fragment lengths (two enzymes) fl <- matrix(0, nrow=J, ncol=2) fl[,1] <- seq(from=100, to=1000, length.out=J) fl[,2] <- seq(from=1000, to=100, length.out=J) # Let 1/2 of the units be on both enzymes fl[seq(from=1, to=J, by=4),1] <- NA_real_ fl[seq(from=2, to=J, by=4),2] <- NA_real_ # Let some have unknown fragment lengths hasUnknownFL <- seq(from=1, to=J, by=15) fl[hasUnknownFL,] <- NA_real_ # Sty/Nsp mixing proportions: rho <- rep(1, I) rho[1] <- 1/3; # Less Sty in 1st sample rho[3] <- 3/2; # More Sty in 3rd sample # Simulate data z <- array(0, dim=c(J,2,I)) maxLog2Theta <- 12 for (ii in 1:I) { # Common effect for both enzymes mu <- function(fl) { k <- runif(n=1, min=3, max=5) mu <- rep(maxLog2Theta, length(fl)) ok <- is.finite(fl) mu[ok] <- mu[ok] - fl[ok]^{1/k} mu } # Calculate the effect for each data point for (ee in 1:2) { z[,ee,ii] <- mu(fl[,ee]) } # Update the Sty/Nsp mixing proportions ee <- 2 z[,ee,ii] <- rho[ii]*z[,ee,ii] # Add random errors for (ee in 1:2) { eps <- rnorm(J, mean=0, sd=1/sqrt(2)) z[,ee,ii] <- z[,ee,ii] + eps } } hasFl <- is.finite(fl) unitSets <- list( nsp = which( hasFl[,1] & !hasFl[,2]), sty = which(!hasFl[,1] & hasFl[,2]), both = which( hasFl[,1] & hasFl[,2]), none = which(!hasFl[,1] & !hasFl[,2]) ) # The observed data is a mix of two enzymes theta <- matrix(NA_real_, nrow=J, ncol=I) # Single-enzyme units for (ee in 1:2) { uu <- unitSets[[ee]] theta[uu,] <- 2^z[uu,ee,] } # Both-enzyme units (sum on intensity scale) uu <- unitSets$both theta[uu,] <- (2^z[uu,1,]+2^z[uu,2,])/2 # Missing units (sample from the others) uu <- unitSets$none theta[uu,] <- apply(theta, MARGIN=2, sample, size=length(uu)) # Calculate target array thetaT <- rowMeans(theta, na.rm=TRUE) targetFcns <- list() for (ee in 1:2) { uu <- unitSets[[ee]] fit <- lowess(fl[uu,ee], log2(thetaT[uu])) class(fit) <- "lowess" targetFcns[[ee]] <- function(fl, ...) { predict(fit, newdata=fl) } } # Fit model only to a subset of the data subsetToFit <- setdiff(1:J, seq(from=1, to=J, by=10)) # Normalize data (to a target baseline) thetaN <- matrix(NA_real_, nrow=J, ncol=I) fits <- vector("list", I) for (ii in 1:I) { lthetaNi <- normalizeFragmentLength(log2(theta[,ii]), targetFcns=targetFcns, fragmentLengths=fl, onMissing="median", subsetToFit=subsetToFit, .returnFit=TRUE) fits[[ii]] <- attr(lthetaNi, "modelFit") thetaN[,ii] <- 2^lthetaNi } # Plot raw data xlim <- c(0, max(fl, na.rm=TRUE)) ylim <- c(0, max(log2(theta), na.rm=TRUE)) Mlim <- c(-1,1)*4 xlab <- "Fragment length" ylab <- expression(log2(theta)) Mlab <- expression(M==log[2](theta/theta[R])) layout(matrix(1:(3*I), ncol=I, byrow=TRUE)) for (ii in 1:I) { plot(NA, xlim=xlim, ylim=ylim, xlab=xlab, ylab=ylab, main="raw") # Single-enzyme units for (ee in 1:2) { # The raw data uu <- unitSets[[ee]] points(fl[uu,ee], log2(theta[uu,ii]), col=ee+1) } # Both-enzyme units (use fragment-length for enzyme #1) uu <- unitSets$both points(fl[uu,1], log2(theta[uu,ii]), col=3+1) for (ee in 1:2) { # The true effects uu <- unitSets[[ee]] lines(lowess(fl[uu,ee], log2(theta[uu,ii])), col="black", lwd=4, lty=3) # The estimated effects fit <- fits[[ii]][[ee]]$fit lines(fit, col="orange", lwd=3) muT <- targetFcns[[ee]](fl[uu,ee]) lines(fl[uu,ee], muT, col="cyan", lwd=1) } } # Calculate log-ratios thetaR <- rowMeans(thetaN, na.rm=TRUE) M <- log2(thetaN/thetaR) # Plot normalized data for (ii in 1:I) { plot(NA, xlim=xlim, ylim=Mlim, xlab=xlab, ylab=Mlab, main="normalized") # Single-enzyme units for (ee in 1:2) { # The normalized data uu <- unitSets[[ee]] points(fl[uu,ee], M[uu,ii], col=ee+1) } # Both-enzyme units (use fragment-length for enzyme #1) uu <- unitSets$both points(fl[uu,1], M[uu,ii], col=3+1) } ylim <- c(0,1.5) for (ii in 1:I) { data <- list() for (ee in 1:2) { # The normalized data uu <- unitSets[[ee]] data[[ee]] <- M[uu,ii] } uu <- unitSets$both if (length(uu) > 0) data[[3]] <- M[uu,ii] uu <- unitSets$none if (length(uu) > 0) data[[4]] <- M[uu,ii] cols <- seq_along(data)+1 plotDensity(data, col=cols, xlim=Mlim, xlab=Mlab, main="normalized") abline(v=0, lty=2) }
library(tidyverse) library("dplyr") bill <- read.csv("500_Cities__Local_Data_for_Better_Health.csv") print(bill) State_description <- bill %>% select(Year, StateAbbr, Category, CityName, Measure, PopulationCount, Data_Value, Low_Confidence_Limit,High_Confidence_Limit)%>% filter(StateAbbr =="IL")%>% filter(Category =="Health Outcomes")%>% filter(Measure =="Stroke among adults aged >=18 Years")%>% filter(Year == 2014)%>% mutate(PopulationCount, PopIncidence = Data_Value * PopulationCount * 0.01)%>% group_by(CityName, Measure, Year)%>% summarise(IncidenceRate=sum(PopIncidence, na.rm = TRUE), Incidence = 100*IncidenceRate/sum(PopulationCount)) View(State_description) View(bill) s <- ggplot(State_description,aes(x=CityName,y=Incidence,fill=CityName)) s + ggtitle("Stroke among adults aged >=18 Years")+ coord_flip() + geom_col()+ scale_fill_manual(values=c("orange", "orange","black","orange","black","orange","orange","black","orange","orange","orange","orange","orange","black","black","orange","black","orange"))
/Visualize Your Data6.R
no_license
Ory-Data-Science/final-project-sam-and-stacey
R
false
false
1,050
r
library(tidyverse) library("dplyr") bill <- read.csv("500_Cities__Local_Data_for_Better_Health.csv") print(bill) State_description <- bill %>% select(Year, StateAbbr, Category, CityName, Measure, PopulationCount, Data_Value, Low_Confidence_Limit,High_Confidence_Limit)%>% filter(StateAbbr =="IL")%>% filter(Category =="Health Outcomes")%>% filter(Measure =="Stroke among adults aged >=18 Years")%>% filter(Year == 2014)%>% mutate(PopulationCount, PopIncidence = Data_Value * PopulationCount * 0.01)%>% group_by(CityName, Measure, Year)%>% summarise(IncidenceRate=sum(PopIncidence, na.rm = TRUE), Incidence = 100*IncidenceRate/sum(PopulationCount)) View(State_description) View(bill) s <- ggplot(State_description,aes(x=CityName,y=Incidence,fill=CityName)) s + ggtitle("Stroke among adults aged >=18 Years")+ coord_flip() + geom_col()+ scale_fill_manual(values=c("orange", "orange","black","orange","black","orange","orange","black","orange","orange","orange","orange","orange","black","black","orange","black","orange"))
# Subject : Data Programming HW 4 # Author : Yim Yonghwan # Final update : 2018.11.08 ######################################################################################## # Ex 1-1.Binomial dbinom(0, size = 30, prob = 0.05) # Ex 1-2. prob1 <- 1 - pbinom(3, size = 30, prob = 0.05); prob1 ######################################################################################## # Ex 2-1. Poisson dpois(0, lambda = 1.5) # lambda = 30 * 0.05 # Ex 2-2. prob2 <- 1 - ppois(3, lambda = 1.5); prob2 ######################################################################################## # Ex 3-1. Negative binomial dnbinom(x = 9, 1, p = 0.7) dbinom(1, size = 10, prob = 0.7) # Ex 3-2. dnbinom(x = 2, 3, p = 0.7) ######################################################################################## # Ex 4-1. Normal prob4 <- 1 - pnorm(180, mean = 170, sd = 5); prob4 # Ex 4-2. qnorm(0.7, mean = 170, sd = 5) ######################################################################################## # Ex 5. t t1 <- qt(0.3, 24); t1 t1 * 15 + 66 ######################################################################################## # Ex 6-1. Exponential pexp(1, 1/2) # Ex 6-2. p1 <- 1 - pexp(3, 1/2); p1 ######################################################################################## # Ex 7-1. Chi-square qchisq(0.95, 12) # Ex 7-2. t qt(0.90, 27) # Ex 7-3. F qf(0.975, 12, 50) ######################################################################################## # Ex 8-1. p1 <- 15 * (1/6); p1 # Ex 8-2. p2 <- pbinom(5, 15, 1/2); p2 # Ex 8-3. p3 <- dbinom(10, 15, 2/3); p3 ######################################################################################## # Ex 9. lambda1 <- 650 * 0.00035; lambda1 ppois(5, lambda1) ######################################################################################## # Ex 10. m1 <- 2/5; m1 # per min p1 <- pexp(0.5, m1); p1 ######################################################################################## # Ex 11-1. p1 <- pnorm(3, mean = 0, sd = 1); p1 p2 <- p1 - 0.5; p2 p3 <- p2 * 2; p3 # Ex 11-2. p1 <- 1 - pnorm(1.64, mean = 0, sd = 1); p1 p2 <- p1 * 2; p2 # Ex 11-3. qnorm(0.9, mean = 0, sd = 1) # Ex 11-4. mu1 <- 3; mu1 sd1 <- 2; sd1 z1 = (2 - mu1) / sd1; z1 z2 = (5 - mu1) / sd1; z2 p1 <- pnorm(z2, mean = 0, sd = 1); p1 p2 <- pnorm(z1, mean = 0, sd = 1); p2 p3 <- p1 - p2; p3 ######################################################################################## # Ex 12. f = function(x) cos(2*pi*x) * exp(mean(x)) curve(f, -2, 2)
/DP_HW_4.R
no_license
YonghwanYim/Data_Programming
R
false
false
2,659
r
# Subject : Data Programming HW 4 # Author : Yim Yonghwan # Final update : 2018.11.08 ######################################################################################## # Ex 1-1.Binomial dbinom(0, size = 30, prob = 0.05) # Ex 1-2. prob1 <- 1 - pbinom(3, size = 30, prob = 0.05); prob1 ######################################################################################## # Ex 2-1. Poisson dpois(0, lambda = 1.5) # lambda = 30 * 0.05 # Ex 2-2. prob2 <- 1 - ppois(3, lambda = 1.5); prob2 ######################################################################################## # Ex 3-1. Negative binomial dnbinom(x = 9, 1, p = 0.7) dbinom(1, size = 10, prob = 0.7) # Ex 3-2. dnbinom(x = 2, 3, p = 0.7) ######################################################################################## # Ex 4-1. Normal prob4 <- 1 - pnorm(180, mean = 170, sd = 5); prob4 # Ex 4-2. qnorm(0.7, mean = 170, sd = 5) ######################################################################################## # Ex 5. t t1 <- qt(0.3, 24); t1 t1 * 15 + 66 ######################################################################################## # Ex 6-1. Exponential pexp(1, 1/2) # Ex 6-2. p1 <- 1 - pexp(3, 1/2); p1 ######################################################################################## # Ex 7-1. Chi-square qchisq(0.95, 12) # Ex 7-2. t qt(0.90, 27) # Ex 7-3. F qf(0.975, 12, 50) ######################################################################################## # Ex 8-1. p1 <- 15 * (1/6); p1 # Ex 8-2. p2 <- pbinom(5, 15, 1/2); p2 # Ex 8-3. p3 <- dbinom(10, 15, 2/3); p3 ######################################################################################## # Ex 9. lambda1 <- 650 * 0.00035; lambda1 ppois(5, lambda1) ######################################################################################## # Ex 10. m1 <- 2/5; m1 # per min p1 <- pexp(0.5, m1); p1 ######################################################################################## # Ex 11-1. p1 <- pnorm(3, mean = 0, sd = 1); p1 p2 <- p1 - 0.5; p2 p3 <- p2 * 2; p3 # Ex 11-2. p1 <- 1 - pnorm(1.64, mean = 0, sd = 1); p1 p2 <- p1 * 2; p2 # Ex 11-3. qnorm(0.9, mean = 0, sd = 1) # Ex 11-4. mu1 <- 3; mu1 sd1 <- 2; sd1 z1 = (2 - mu1) / sd1; z1 z2 = (5 - mu1) / sd1; z2 p1 <- pnorm(z2, mean = 0, sd = 1); p1 p2 <- pnorm(z1, mean = 0, sd = 1); p2 p3 <- p1 - p2; p3 ######################################################################################## # Ex 12. f = function(x) cos(2*pi*x) * exp(mean(x)) curve(f, -2, 2)
# Create vector of values rabbitLateness <- c(40, 30, 20, 45, 60, 120, 35, 40, 55, 55, 53, 22, 27, 48, 62, 33, 35, 40, 45) # Range range(rabbitLateness) # Create intervals bins <- seq(20, 130, by = 10) # Frequency table intervals <- cut(rabbitLateness, bins, right = FALSE) table(intervals) # Histogram plot(table(intervals), type = "h", main = "Rabbit Arrival Histogram", xlab = "Intervals", ylab = "Frequency") hist(rabbitLateness, breaks = bins) # Mean mean(rabbitLateness) # Median median(rabbitLateness) # Mode sort(table(rabbitLateness), decreasing = TRUE)[1]
/R2_range_freq_mean.R
no_license
evahegnar/R
R
false
false
592
r
# Create vector of values rabbitLateness <- c(40, 30, 20, 45, 60, 120, 35, 40, 55, 55, 53, 22, 27, 48, 62, 33, 35, 40, 45) # Range range(rabbitLateness) # Create intervals bins <- seq(20, 130, by = 10) # Frequency table intervals <- cut(rabbitLateness, bins, right = FALSE) table(intervals) # Histogram plot(table(intervals), type = "h", main = "Rabbit Arrival Histogram", xlab = "Intervals", ylab = "Frequency") hist(rabbitLateness, breaks = bins) # Mean mean(rabbitLateness) # Median median(rabbitLateness) # Mode sort(table(rabbitLateness), decreasing = TRUE)[1]
plot4 <- function() { ##define zip_url zip_url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" ##download .zip file to working directory download.file(zip_url, destfile = "project_data.zip") ##unzip data file to "project" folder in working directory unzip("project_data.zip", exdir = "project") ##create column class types types <- c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric") ##read full file into R data <- read.table(paste(getwd(), "project", list.files("./project"), sep = "/"), header = TRUE, sep = ";", na.strings = c("?"), colClasses = types) ##create subset for specific dates sub <- subset(data, Date == "1/2/2007"|Date == "2/2/2007") ##order subset by Date and Time sub <- sub[order(sub$Date, sub$Time), ] ##convert Date column to date format sub$Date <- as.Date(sub$Date, "%d/%m/%Y") ##create x-axis labels (abbreviated weekdays) ##find days included days <- unique(sub$Date) days <- c(days, days[2] + 1) ##create abbreviated weekdays vector x <- weekdays(days, abbreviate = TRUE) ##figure out where tick marks should be ##create logical vector to determine where 2007-02-02 records are in the data set dt <- sub$Date == "2007-02-02" ##create vector of x-axis positions ##first position = 0 ##second position = first occurrence of 2007-02-02 ##third position = number of rows in the subset x_tick <- c(0, min(which(dt == TRUE)), nrow(sub)) ##set up png file png(file = "plot4.png", width = 480, height = 480) ##set up 2x2 panel for multiple plots par(mfrow = c(2, 2)) ##create plot for top left quadrant ##plot Global Active Power data as a line graph plot(sub$Global_active_power, type = "l", ylab = "Global Active Power", xlab = NA, xaxt = "n") ##add x-axis tick marks and labels axis(side = 1, at = x_tick, labels = x, tck = -.04) ##create plot for top right quadrant ##plot Voltage data as a line graph plot(sub$Voltage, type = "l", ylab = "Voltage", xlab = "datetime", xaxt = "n") ##add x-axis tick marks and labels axis(side = 1, at = x_tick, labels = x, tck = -.04) ##create plot for bottom left quadrant ##create empty plot for sub metering plot(sub$Sub_metering_1, type = "n", ylab = "Energy sub metering", xlab = NA, xaxt = "n") ##add sub metering lines to empty graph with appropriate colors lines(sub$Sub_metering_1) lines(sub$Sub_metering_2, col = "orangered3") lines(sub$Sub_metering_3, col = "mediumblue") ##add legend to the graph legend("topright", lty = c(1, 1, 1), col = c("black", "orangered3", "mediumblue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n") ##add x-axis tick marks and labels axis(side = 1, at = x_tick, labels = x, tck = -.04) ##create plot for bottom right quadrant ##plot Global Reactive Power as line graph with(sub, plot(Global_reactive_power, type = "l", xlab = "datetime", xaxt = "n")) ##add x-axis tick marks and labels axis(side = 1, at = x_tick, labels = x, tck = -.04) ##run dev.off() to close the png file and save it to working directory dev.off() }
/plot4.R
no_license
marktgross/ExData_Plotting1
R
false
false
3,417
r
plot4 <- function() { ##define zip_url zip_url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" ##download .zip file to working directory download.file(zip_url, destfile = "project_data.zip") ##unzip data file to "project" folder in working directory unzip("project_data.zip", exdir = "project") ##create column class types types <- c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric") ##read full file into R data <- read.table(paste(getwd(), "project", list.files("./project"), sep = "/"), header = TRUE, sep = ";", na.strings = c("?"), colClasses = types) ##create subset for specific dates sub <- subset(data, Date == "1/2/2007"|Date == "2/2/2007") ##order subset by Date and Time sub <- sub[order(sub$Date, sub$Time), ] ##convert Date column to date format sub$Date <- as.Date(sub$Date, "%d/%m/%Y") ##create x-axis labels (abbreviated weekdays) ##find days included days <- unique(sub$Date) days <- c(days, days[2] + 1) ##create abbreviated weekdays vector x <- weekdays(days, abbreviate = TRUE) ##figure out where tick marks should be ##create logical vector to determine where 2007-02-02 records are in the data set dt <- sub$Date == "2007-02-02" ##create vector of x-axis positions ##first position = 0 ##second position = first occurrence of 2007-02-02 ##third position = number of rows in the subset x_tick <- c(0, min(which(dt == TRUE)), nrow(sub)) ##set up png file png(file = "plot4.png", width = 480, height = 480) ##set up 2x2 panel for multiple plots par(mfrow = c(2, 2)) ##create plot for top left quadrant ##plot Global Active Power data as a line graph plot(sub$Global_active_power, type = "l", ylab = "Global Active Power", xlab = NA, xaxt = "n") ##add x-axis tick marks and labels axis(side = 1, at = x_tick, labels = x, tck = -.04) ##create plot for top right quadrant ##plot Voltage data as a line graph plot(sub$Voltage, type = "l", ylab = "Voltage", xlab = "datetime", xaxt = "n") ##add x-axis tick marks and labels axis(side = 1, at = x_tick, labels = x, tck = -.04) ##create plot for bottom left quadrant ##create empty plot for sub metering plot(sub$Sub_metering_1, type = "n", ylab = "Energy sub metering", xlab = NA, xaxt = "n") ##add sub metering lines to empty graph with appropriate colors lines(sub$Sub_metering_1) lines(sub$Sub_metering_2, col = "orangered3") lines(sub$Sub_metering_3, col = "mediumblue") ##add legend to the graph legend("topright", lty = c(1, 1, 1), col = c("black", "orangered3", "mediumblue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n") ##add x-axis tick marks and labels axis(side = 1, at = x_tick, labels = x, tck = -.04) ##create plot for bottom right quadrant ##plot Global Reactive Power as line graph with(sub, plot(Global_reactive_power, type = "l", xlab = "datetime", xaxt = "n")) ##add x-axis tick marks and labels axis(side = 1, at = x_tick, labels = x, tck = -.04) ##run dev.off() to close the png file and save it to working directory dev.off() }
library(plyr) library(ggplot2) require(reshape2) library(nlme) library(gridExtra) require(scales) #mytheme <-theme_bw(base_size=30) + theme(plot.title = element_text(vjust=2), panel.margin= unit(0.75, "lines"), axis.title.y = element_text(vjust=0), #plot.margin=unit(c(1,1,1.5,1.2),"cm"), panel.border = element_rect(fill = NA, colour = "grey", linetype=1, size = 1)) mytheme <-theme_bw(base_size=30) + theme(plot.title = element_text(vjust=2), panel.margin= unit(0.75, "lines"), axis.title.y = element_text(vjust=0), plot.margin=unit(c(1,1,1.5,1.2),"cm"), panel.border = element_rect(fill = NA, colour = "grey", linetype=1, size = 1)) + theme(strip.background = element_rect(fill = 'white')) ### Function to change facet label Facet_label <- function(var, value){ value <- as.character(value) if (var=="Treatment") { value[value=="large"] <- "Large Prey" value[value=="small"] <- "Small Prey" } else if (var=="Instar") { value[value=="Sub1"] <- "Subadult 1" value[value=="Sub2"] <- "Subadult 2" } return(value) } give.n <- function(x){ return(c(y = mean(x), label = length(x) )) } #### Box trails graphs. Code importing and manipulating the data is in BoxTrialsData.R source("G:/PhDWork/EclipseWorkspace/R/EcuRCode/BoxTrials/BoxTrialsData.R") ################### Histograms ###################################################### ##Looking at the total time eating in box. Need to remove maybe all records time < 1hour? BoxSubset<-subset(BoxCombo, TimeOfDay == "morn") BoxEating <- aggregate(BoxSubset, by = list(BoxSubset$TrialID), FUN = mean) ggplot(BoxEating, aes(x= x)) + geom_histogram(binwidth = 15) BoxWeight <- unique(subset(BoxCombo, select = c(Weight.1, Instar))) ggplot(BoxWeight, aes(x=(Weight.1)) ) + geom_histogram() + facet_wrap(~Instar) ######### Barplot Capture vs eating ################################################### ##################################################################################### CapVsEat <-subset(BoxCombo, select = c("FeedIndPos", "CaptureIndPos", "Treatment", "Instar", "LogHunger") ) CapVsEat <-na.omit(CapVsEat) CapVsEat$FeedIndPos <- factor(CapVsEat$FeedIndPos, levels =c("y", "n")) CapVsEat$FeedAndCap <- paste("Cap", CapVsEat$CaptureIndPos, "Feed", CapVsEat$FeedIndPos) pdf("RuthEcuador2013/BoxFeedingTrials/Graphs/CaptureVsFeed.pdf", width = 10, height = 10) ##separate bars; sub 1 and 2 combined ggplot(data=CapVsEat, aes(x=CaptureIndPos, fill = FeedIndPos)) + geom_bar(stat="bin", position="fill", colour = "black") + xlab("Participated in Prey Capture") + ylab("") + scale_x_discrete(breaks=c("y", "n"), labels=c("Yes", "No")) + theme(axis.text=element_text(colour="black"), axis.title = element_blank()) + scale_fill_discrete(name = "Fed?", breaks = c("y", "n"), labels = c("Yes", "No")) + mytheme ##just bottom bar ggplot(data=CapVsEat, aes(x=CaptureIndPos, fill = FeedIndPos)) + geom_bar(stat="bin", position="fill") + xlab("Participated in Prey Capture") + ylab("No. of Individuals That Fed") + scale_x_discrete(breaks=c("y", "n"), labels=c("Yes", "No")) + theme(axis.text=element_text(colour="black"), axis.title = element_blank()) + scale_fill_discrete(name = "Fed?", breaks = c("y", "n"), labels = c("Yes", "No")) + mytheme + scale_y_continuous(labels = percent)+ scale_fill_manual("FeedIndPos", values = c("darkblue", "white")) + theme(legend.position = "none") ##comparing the proportion of eaters and captures by TREATMENT ## no top bar ggplot(data=CapVsEat, aes(x=CaptureIndPos, fill = FeedIndPos)) + geom_bar(stat="bin", position="fill") + xlab("Participated in Prey Capture")+ ylab("Percentage of Individuals That Fed") + scale_x_discrete(breaks=c("y", "n"), labels=c("Yes", "No")) + theme(axis.text=element_text(colour="black"), axis.title = element_blank()) + scale_fill_discrete(name = "Fed?", breaks = c("y", "n"), labels = c("Yes", "No")) + mytheme + scale_y_continuous(labels = percent) + facet_grid(.~Treatment, labeller = Facet_label) + scale_fill_manual("FeedIndPos", values = c("darkblue", "white")) + theme(legend.position = "none") ggplot(data=CapVsEat, aes(x=FeedIndPos, fill = CaptureIndPos)) + geom_bar(stat="bin", position="fill", colour = "black") + scale_x_discrete(breaks=c("y", "n"), labels=c("Fed", "Did Not Feed")) + theme(axis.text=element_text(colour="black"), axis.title = element_blank()) + scale_fill_discrete(name = "Involved with\nprey capture?", breaks = c("n", "y"), labels = c("No", "Yes")) + facet_wrap(~Instar) ggplot(data=subset(CapVsEat, FeedIndPos == "y"), aes(x= FeedAndCap, y = LogHunger)) + geom_boxplot() + facet_wrap(~Instar + Treatment) dev.off() ######################################################################################### ########graph of number of individuals and total duration vs prey size and instar######## ####removing evening feeds as no or little feeding observations ##Counting the number of individuals eating in each trial pdf("RuthEcuador2013/BoxFeedingTrials/Graphs/NoAndDurationFeeding.pdf", onefile = "TRUE") ##graph total number of individuals feeding vs prey size ggplot(AveByTrial, aes(x=Treatment, y=noFeed)) + geom_boxplot() + stat_summary(fun.y=mean, geom="point", shape=5, size=4) + ggtitle("Log of Total number of spiders fed on prey against prey size") + xlab("Prey Size") + ylab("Total number of spiders feeding on prey") + scale_y_log10() + stat_summary(fun.data = give.n, geom = "text") #Number feeding vs treatment ggplot(AveByTrial, aes(x=Treatment, y=noFeed)) + geom_boxplot() + stat_summary(fun.y=mean, geom="point", shape=5, size=4) + ggtitle("Total number of spiders fed on prey against prey size") + xlab("Prey Size") + ylab("Total number of spiders feeding on prey by instar") + facet_wrap(~Instar) + scale_y_log10() ##### total box time eating vs prey ggplot(AveByTrial, aes(x=Treatment, y=feedDur)) + geom_boxplot() + stat_summary(fun.y=mean, geom="point", shape=5, size=4) + ggtitle("Log of Total amount of time feeding on prey per box") + ylab("Total time feeding (mins)") + xlab("Prey Size") + scale_y_log10() #total box time eating vs treatment by instar ggplot(AveByTrial, aes(x=Treatment, y=feedDur)) + geom_boxplot() + stat_summary(fun.y=mean, geom="point", shape=5, size=4) + facet_wrap(~Instar) + ggtitle("Log of Total amount of time feeding on prey per box by instar") + ylab("Total time feeding (mins)") + xlab("Prey Size") + scale_y_log10() ################################ Feeding fraction ##################################### ##Feed Fraction histograms ggplot((subset(BoxComboMorn, FeedFraction > 0)), aes(ASFeedFrac)) + geom_histogram() #asin makes it more normal ####graph of individual feeding fractionvs prey size and instar (graph looks pretty much the same # with zeros included compared to no zeros included ggplot((subset(BoxComboMorn, FeedFraction > 0)), aes(x=Treatment, y=ASFeedFrac)) + geom_boxplot() + stat_summary(fun.y=mean, geom="point", shape=5, size=4) + ggtitle("Fraction of time feeding by individual (no eaters removed)") + ylab("Fraction of time spent eating prey by each individual") + xlab("Prey Size") # feeding fraction by instar ggplot((subset(BoxComboMorn, FeedFraction > 0)), aes(x=Treatment, y=ASFeedFrac)) + geom_boxplot() + stat_summary(fun.y=mean, geom="point", shape=5, size=4) + ggtitle("Fraction of time feeding by individual (zero eaters removed)") + ylab("Fraction of time spent eating prey by each individual") + xlab("Prey Size") + facet_wrap(~Instar) ggplot(subset(BoxComboMorn, FeedFraction > 0), aes(x= Hunger, y = ASFeedFrac, colour = Treatment)) + geom_point() + geom_smooth(method = "lm", formula =y ~ poly(x, 1, raw = TRUE), se = TRUE) + ggtitle("Feeding Fraction against hunger level (head length / weight") + facet_wrap(Treatment~Instar, scales = "free_x") dev.off() ########################################################################################## ##Feeding duration (rank?) vs weight rank ##need to combine small trials ...... ####remove evening trials and moulted individuals pdf("RuthEcuador2013/BoxFeedingTrials/Graphs/InitalWeights.pdf", onefile = "TRUE") # weight rank vs rank eating duration by treatment ggplot(BoxComboAve, aes(x = Rank.Weights, y = RankEatDur.Mean)) + geom_point() + geom_smooth(method = "lm", formula =y ~ poly(x, 1, raw = TRUE), se = TRUE) + ggtitle("Weight ranked within box vs time eating ranked within box ") + ylab("Rank of Time Eating") + xlab("Weight rank within box") + facet_wrap(~Treatment) # leg length rank vs rank of eating duration ggplot(BoxComboAve, aes(x = Rank.Legs, y = RankEatDur.Mean, colour = Treatment)) + geom_point() + geom_smooth(method = "lm", formula =y ~ poly(x, 1, raw = TRUE), se = TRUE) + ggtitle("Rank of leg eating vs rank of time eating") + ylab("Rank of time eating") + xlab("Rank of leg length") # hunger by sum of time eating ggplot(BoxComboAve, aes(x= Hunger, y = SumIndEat, colour = Treatment)) + geom_point() + geom_smooth(method = "lm", formula =y ~ poly(x, 1, raw = TRUE), se = TRUE) + ggtitle("Total Time Eating against hunger level (head length / weight") + facet_wrap(Treatment~Instar, scales = "free_x") # hunger by sum of time eating by instar ggplot(subset(BoxComboAve, SumIndEat>0), aes(x= Hunger, y = SumIndEat, colour = Treatment)) + geom_point() + geom_smooth(method = "lm", formula =y ~ poly(x, 2, raw = TRUE), se = TRUE) + ggtitle("Total Time Eating against hunger level (head length / weight") + facet_wrap(Treatment~Instar, scales = "free_x") dev.off() pdf("RuthEcuador2013/BoxFeedingTrials/Graphs/FeedingAndHunger.pdf", width= 16, height =8.5) # hunger boxplot by ate or didn't ggplot(subset(BoxComboMorn, IndFeed != "NA") , aes(x = IndFeed, y = LogHunger)) + geom_boxplot(aes(fill = IndFeed)) + facet_grid(Instar ~ Treatment, labeller = Facet_label ) + mytheme + ylab("Log Hunger") + xlab("")+ scale_x_discrete(breaks=c("y", "n"), labels=c("Fed", "Did Not\nFeed")) + coord_flip() + guides(fill = FALSE) ggplot(subset(BoxComboMorn, IndFeed == "y"), aes(x= LogHunger, y = TimeEatingLog1, colour = Treatment)) + geom_point() + geom_smooth(method = "lm", formula =y ~ poly(x, 1, raw = TRUE), se = TRUE) + ggtitle("Total Time Eating against hunger level- zeros removed") + facet_wrap(Treatment~Instar, scales = "free_x") ggplot(subset(BoxComboMorn, IndFeed != "NA") , aes(x = IndFeed, y = log10(1/Hunger))) + geom_boxplot(aes(fill = IndFeed)) + facet_grid(Instar ~ Treatment, labeller = Facet_label ) + mytheme + ylab("Log Condition") + xlab("") + scale_x_discrete(breaks=c("y", "n"), labels=c("Fed", "Did Not\nFeed")) + coord_flip() + guides(fill = FALSE) ggplot(subset(BoxComboMorn, IndCapture != "NA") , aes(x = IndCapture, y = log10(1/Hunger))) + geom_boxplot(aes(fill = IndCapture)) + facet_grid(Instar ~ Treatment, labeller = Facet_label ) + mytheme + ylab("Log Condition") + xlab("") + scale_x_discrete(breaks=c("y", "n"), labels=c("Capture", "Did Not\nCapture")) + coord_flip() + guides(fill = FALSE) ## boxplot with rank and instar combined ggplot(subset(BoxComboMorn, IndFeed != "NA") , aes(x = IndFeed, y = Rank.Cond)) + geom_boxplot(aes(fill = IndFeed)) + facet_grid(~Treatment ) + mytheme + ylab("Condition Rank") + xlab("") + scale_x_discrete(breaks=c("y", "n"), labels=c("Fed", "Did Not\nFeed")) + coord_flip() + guides(fill = FALSE) ggplot(subset(BoxComboAve, IndCapture != "NA") , aes(x = IndCapture, y = Rank.Cond)) + geom_boxplot(aes(fill = IndCapture)) + facet_grid(~Treatment ) + mytheme + ylab("Condition Rank") + xlab("") + scale_x_discrete(breaks=c("y", "n"), labels=c("Capture", "Did Not\nCapture")) + coord_flip() + guides(fill = FALSE) ## Logistic regression ggplot(subset(BoxComboMorn, IndFeed != "NA"), aes(x = Cond.Scal, y = IndFeedNum, colour = Treatment)) + geom_point() + stat_smooth(method="glm", family="binomial", se=FALSE) + mytheme #+ facet_wrap(~Instar, scales = "free" ) ggplot(subset(BoxComboMorn, IndFeed != "NA"), aes(x = Cond.Scal, y = IndCapNum, colour = Treatment)) + geom_point() + stat_smooth(method="glm", family="binomial", se=FALSE) + mytheme #ggplot(Weights, aes(x = Cond.Scal, fill = Instar)) + geom_histogram() dev.off() ##################################################################################### #### Behavior vs physiological things # Poke rating vs inital weight by instar ggplot(Weights, aes(x= AvePokeRating, y = Weight.1)) + geom_point() + geom_smooth(method = "lm", formula =y ~ poly(x, 1, raw = TRUE), se = TRUE) + facet_wrap(~Instar, scales = "free_y") # Boldness vs inital weight by instar ggplot(Weights, aes(x= AveBoldness, y = Weight.1)) + geom_point() + facet_wrap(~Instar, scales = "free_y") + geom_smooth(method = "lm", formula =y ~ poly(x, 1, raw = TRUE), se = TRUE) # Boldness vs hunger by instar ggplot(Weights, aes(x= AveBoldness, y = Hunger)) + geom_point() + facet_wrap(~Instar, scales = "free_y") + geom_smooth(method = "lm", formula =y ~ poly(x, 1, raw = TRUE), se = TRUE) # Boldness vs leg length by instar ggplot(Weights, aes(x= AveBoldness, y = LegLen.mm)) + geom_point() + facet_wrap(~Instar) + geom_smooth(method = "lm", formula =y ~ poly(x, 1, raw = TRUE), se = TRUE) ####################################################################################### ####### Behaviours over time ######################### # Poke time1 vs time2 barchart although NA's are included ANNOYING!! ggplot(data=Weights, aes(x=Poke.1, fill = as.factor(Poke.2))) + geom_bar(stat="bin", position="fill", colour = "black") pdf("RuthEcuador2013/BoxFeedingTrials/Graphs/BehaviourOverTime.pdf") # Poke1 vs Poke2 ggplot(data=Weights, aes(x= PokeRating.1, y = PokeRating.2)) + geom_jitter(position = position_jitter(w = 0.1, h = 0.1)) + geom_smooth(method = "lm", formula =y ~ poly(x, 2 , raw = TRUE), se = TRUE) + ggtitle("Poke rating of same spider at different times (jittered points)") # Boldness1 vs Boldness2 ggplot(data = Weights, aes(x = BoldnessRank.1, y = BoldnessRank.2)) + geom_jitter(position = position_jitter(w = 0.1, h = 0.1)) + geom_smooth(method = "lm", formula =y ~ poly(x, 2 , raw = TRUE), se = TRUE) + ggtitle("Boldness rank of same spider at different times (jittered points)") dev.off() #Bar chart of cup drop1 vs cup drop2 ggplot(data= Weights, aes(x = as.factor(CupDrop.2), fill = as.factor(CupDrop.1))) + geom_bar(stat="bin", position="fill", colour = "black") # Bar chart of box drop1 vs box drop 2 ggplot(data= Weights, aes(x = as.factor(DropBox.2), fill = as.factor(BoxDrop.1))) + geom_bar(stat="bin", position="fill", colour = "black") ##################################################################################### ##Behaviour vs feeding and capture pdf("RuthEcuador2013/BoxFeedingTrials/Graphs/Behaviour.pdf") # Capture vs boldness by instar ggplot(BoxComboAve, aes(x= AveCap, y = AveBoldness)) + geom_jitter(position = position_jitter(w = 0.1, h = 0.1)) + geom_smooth(method = "lm", formula =y ~ poly(x, 1 , raw = TRUE), se = TRUE) + facet_wrap(~Instar) + ggtitle("Ave capture vs average boldness rating") # Capture vs poke by instar ggplot(BoxComboAve, aes(x= AveCap, y = AvePokeRating)) + geom_jitter(position = position_jitter(w = 0.1, h = 0.1)) + geom_smooth(method = "lm", formula =y ~ poly(x, 1 , raw = TRUE), se = TRUE)+ facet_wrap(~Instar) + ggtitle("Ave capture vs average poke rating") # Move at all vs capture ggplot(subset(BoxComboAve, Cap != "NA"), aes(x=Move, fill = Cap)) + geom_bar(stat="bin", position="fill", colour = "black") + ggtitle("Move at all during boldness test vs particitpated in capture") + scale_x_discrete(breaks=c("y", "n"), labels=c("Moved", "Did Not Move")) + theme(axis.text=element_text(colour="black"), axis.title = element_blank()) + scale_fill_discrete(name = "Involved with\nprey capture?", breaks = c("n", "y"), labels = c("No", "Yes")) + facet_wrap(Instar~Treatment) #Move at all vs feed ggplot(subset(BoxComboAve, Feed != "NA"), aes(x=Move, fill = Feed)) + geom_bar(stat="bin", position="fill", colour = "black") + ggtitle("Move at all during boldness test with eat at all") + scale_x_discrete(breaks=c("y", "n"), labels=c("Moved", "Did Not Move")) + theme(axis.text=element_text(colour="black"), axis.title = element_blank()) + scale_fill_discrete(name = "Ate Food?", breaks = c("n", "y"), labels = c("No", "Yes")) + facet_wrap(Instar~Treatment) + geom_text(mean(x)) ##Boldness against instar ggplot(subset(BoxComboAve), aes(x=Instar, fill = Move)) + geom_bar(stat="bin", position="fill", colour = "black") dev.off() ggplot() ############################################################################ #Difference in weights.. need percentage change in weight? # (1) Feeding time vs weight change ggplot(BoxComboAve, aes(x=SumIndEat, y = WeightDiffPer)) + geom_point() + geom_smooth(method = "lm", formula =y ~ poly(x, 2 , raw = TRUE), se = TRUE) ggplot(AveByTrial, aes(x = Treatment, y = SimpAlt )) + geom_boxplot() + facet_wrap(~Instar) ##################################################################################### ##### Pielou's J graphs #histogram ggplot(AveByTrial, aes(AsinPJEven)) + geom_histogram() pdf("RuthEcuador2013/BoxFeedingTrials/Graphs/PJEven.pdf", width = 12, height =10) SubsetAveByTrial<- subset(AveByTrial)#, PJEven > -1) # not sure why the - 1 is there. #(SubsetAveByTrial, aes(x= Treatment, y =AsinPJEven)) + geom_boxplot() + mytheme + ylab("asin of box evenness") + xlab("Prey Size") ggplot(SubsetAveByTrial, aes(x= Treatment, y =PJEven)) + geom_boxplot() + mytheme + ylab("Intragroup Evenness") + xlab("Prey Size")# + #stat_summary(fun.y = "mean" , label = length(x), geom = "text") #ggplot(SubsetAveByTrial, aes(x= Treatment, y =AsinPJEven)) + geom_boxplot() + facet_wrap(~Instar) + mytheme + ylab("asin of box evenness") + xlab("Prey Size") ggplot(SubsetAveByTrial, aes(x= Treatment, y =PJEven)) + geom_boxplot() + facet_grid(.~Instar, labeller = Instr_label ) + mytheme + ylab("Intragroup Evenness") + xlab("Prey Size") #+ theme(strip.background = element_rect(fill = 'white')) #+ stat_summary(fun.data = give.n, geom = "text") dev.off() ################################################################################# ##histogram of hunger ggplot(BoxComboMorn, aes((Hunger))) + geom_histogram() + facet_wrap(~Instar) ggplot(BoxComboMorn, aes(x = Treatment, y= Hunger)) + geom_boxplot() + facet_wrap(~Instar) BoxEatGraph <-subset(BoxComboMorn, FeedIndPos =="y" & BoxComboMorn$CaptureIndPos != "NA") ## if having eaten weight with capture pdf("RuthEcuador2013/BoxFeedingTrials/Graphs/HavingEaten-Capture.pdf", width = 11, height =7.5) ggplot(BoxMornFeedOnly, aes(x=Cap, y = LogCond)) + geom_boxplot() + facet_grid(~ Treatment, labeller = Facet_label) + ylab("Log Condition") + xlab("Captured Prey?") + mytheme + scale_x_discrete(breaks = c("y", "n"),labels = c("Yes", "No")) ggplot(BoxMornFeedOnly, aes(x=Cap, y = LogCond)) + geom_boxplot() + facet_wrap(~Treatment + Instar) + ylab("Log Condition") + xlab("Captured Prey?") + mytheme + scale_x_discrete(breaks = c("y", "n"),labels = c("Yes", "No")) # stat_summary(fun.data = give.n, geom = "text") this adds sample sizes ggplot(BoxMornFeedOnly, aes(x=CaptureIndPos, y = Rank.Cond)) + geom_boxplot() + facet_grid(~Treatment, labeller = Facet_label) + ylab("Condition Rank in Box") + xlab("Captured Prey?") + mytheme + scale_x_discrete(breaks = c("y", "n"),labels = c("Yes", "No")) ggplot(BoxMornFeedOnly, aes(x=CaptureIndPos, y = Rank.Cond)) + geom_boxplot() + facet_wrap(~Treatment+ Instar)+ ylab("Condition Rank in Box") + xlab("Captured Prey?") + mytheme + scale_x_discrete(breaks = c("y", "n"),labels = c("Yes", "No")) #stat_summary(fun.data = give.n, geom = "text") ggplot(BoxMornFeedOnly, aes(x=CapAndFeed, y = Cond.Scal)) + stat_boxplot(geom ='errorbar') + geom_boxplot() + facet_wrap(~Treatment) + coord_flip() + ylab("Scaled Condition") + xlab("") + mytheme + theme(axis.text.y=element_text(angle=45)) + scale_x_discrete(breaks = c("NC+E", "C+NE", "C+E"),labels = c("Cheater", "No Eat, Capture", "Cooperator")) dev.off() ### Numbers of each within box ggplot(FdCapByTrial, aes(x = Treatment, y = value)) + geom_boxplot() + facet_grid(Cap ~ Eat) + mytheme + ylab("Number of spiders") + xlab("Prey Size")
/EcuRCode/BoxTrials/OldCode/BoxTrialsGraphs.R
no_license
ruthubc/ruthubc
R
false
false
20,981
r
library(plyr) library(ggplot2) require(reshape2) library(nlme) library(gridExtra) require(scales) #mytheme <-theme_bw(base_size=30) + theme(plot.title = element_text(vjust=2), panel.margin= unit(0.75, "lines"), axis.title.y = element_text(vjust=0), #plot.margin=unit(c(1,1,1.5,1.2),"cm"), panel.border = element_rect(fill = NA, colour = "grey", linetype=1, size = 1)) mytheme <-theme_bw(base_size=30) + theme(plot.title = element_text(vjust=2), panel.margin= unit(0.75, "lines"), axis.title.y = element_text(vjust=0), plot.margin=unit(c(1,1,1.5,1.2),"cm"), panel.border = element_rect(fill = NA, colour = "grey", linetype=1, size = 1)) + theme(strip.background = element_rect(fill = 'white')) ### Function to change facet label Facet_label <- function(var, value){ value <- as.character(value) if (var=="Treatment") { value[value=="large"] <- "Large Prey" value[value=="small"] <- "Small Prey" } else if (var=="Instar") { value[value=="Sub1"] <- "Subadult 1" value[value=="Sub2"] <- "Subadult 2" } return(value) } give.n <- function(x){ return(c(y = mean(x), label = length(x) )) } #### Box trails graphs. Code importing and manipulating the data is in BoxTrialsData.R source("G:/PhDWork/EclipseWorkspace/R/EcuRCode/BoxTrials/BoxTrialsData.R") ################### Histograms ###################################################### ##Looking at the total time eating in box. Need to remove maybe all records time < 1hour? BoxSubset<-subset(BoxCombo, TimeOfDay == "morn") BoxEating <- aggregate(BoxSubset, by = list(BoxSubset$TrialID), FUN = mean) ggplot(BoxEating, aes(x= x)) + geom_histogram(binwidth = 15) BoxWeight <- unique(subset(BoxCombo, select = c(Weight.1, Instar))) ggplot(BoxWeight, aes(x=(Weight.1)) ) + geom_histogram() + facet_wrap(~Instar) ######### Barplot Capture vs eating ################################################### ##################################################################################### CapVsEat <-subset(BoxCombo, select = c("FeedIndPos", "CaptureIndPos", "Treatment", "Instar", "LogHunger") ) CapVsEat <-na.omit(CapVsEat) CapVsEat$FeedIndPos <- factor(CapVsEat$FeedIndPos, levels =c("y", "n")) CapVsEat$FeedAndCap <- paste("Cap", CapVsEat$CaptureIndPos, "Feed", CapVsEat$FeedIndPos) pdf("RuthEcuador2013/BoxFeedingTrials/Graphs/CaptureVsFeed.pdf", width = 10, height = 10) ##separate bars; sub 1 and 2 combined ggplot(data=CapVsEat, aes(x=CaptureIndPos, fill = FeedIndPos)) + geom_bar(stat="bin", position="fill", colour = "black") + xlab("Participated in Prey Capture") + ylab("") + scale_x_discrete(breaks=c("y", "n"), labels=c("Yes", "No")) + theme(axis.text=element_text(colour="black"), axis.title = element_blank()) + scale_fill_discrete(name = "Fed?", breaks = c("y", "n"), labels = c("Yes", "No")) + mytheme ##just bottom bar ggplot(data=CapVsEat, aes(x=CaptureIndPos, fill = FeedIndPos)) + geom_bar(stat="bin", position="fill") + xlab("Participated in Prey Capture") + ylab("No. of Individuals That Fed") + scale_x_discrete(breaks=c("y", "n"), labels=c("Yes", "No")) + theme(axis.text=element_text(colour="black"), axis.title = element_blank()) + scale_fill_discrete(name = "Fed?", breaks = c("y", "n"), labels = c("Yes", "No")) + mytheme + scale_y_continuous(labels = percent)+ scale_fill_manual("FeedIndPos", values = c("darkblue", "white")) + theme(legend.position = "none") ##comparing the proportion of eaters and captures by TREATMENT ## no top bar ggplot(data=CapVsEat, aes(x=CaptureIndPos, fill = FeedIndPos)) + geom_bar(stat="bin", position="fill") + xlab("Participated in Prey Capture")+ ylab("Percentage of Individuals That Fed") + scale_x_discrete(breaks=c("y", "n"), labels=c("Yes", "No")) + theme(axis.text=element_text(colour="black"), axis.title = element_blank()) + scale_fill_discrete(name = "Fed?", breaks = c("y", "n"), labels = c("Yes", "No")) + mytheme + scale_y_continuous(labels = percent) + facet_grid(.~Treatment, labeller = Facet_label) + scale_fill_manual("FeedIndPos", values = c("darkblue", "white")) + theme(legend.position = "none") ggplot(data=CapVsEat, aes(x=FeedIndPos, fill = CaptureIndPos)) + geom_bar(stat="bin", position="fill", colour = "black") + scale_x_discrete(breaks=c("y", "n"), labels=c("Fed", "Did Not Feed")) + theme(axis.text=element_text(colour="black"), axis.title = element_blank()) + scale_fill_discrete(name = "Involved with\nprey capture?", breaks = c("n", "y"), labels = c("No", "Yes")) + facet_wrap(~Instar) ggplot(data=subset(CapVsEat, FeedIndPos == "y"), aes(x= FeedAndCap, y = LogHunger)) + geom_boxplot() + facet_wrap(~Instar + Treatment) dev.off() ######################################################################################### ########graph of number of individuals and total duration vs prey size and instar######## ####removing evening feeds as no or little feeding observations ##Counting the number of individuals eating in each trial pdf("RuthEcuador2013/BoxFeedingTrials/Graphs/NoAndDurationFeeding.pdf", onefile = "TRUE") ##graph total number of individuals feeding vs prey size ggplot(AveByTrial, aes(x=Treatment, y=noFeed)) + geom_boxplot() + stat_summary(fun.y=mean, geom="point", shape=5, size=4) + ggtitle("Log of Total number of spiders fed on prey against prey size") + xlab("Prey Size") + ylab("Total number of spiders feeding on prey") + scale_y_log10() + stat_summary(fun.data = give.n, geom = "text") #Number feeding vs treatment ggplot(AveByTrial, aes(x=Treatment, y=noFeed)) + geom_boxplot() + stat_summary(fun.y=mean, geom="point", shape=5, size=4) + ggtitle("Total number of spiders fed on prey against prey size") + xlab("Prey Size") + ylab("Total number of spiders feeding on prey by instar") + facet_wrap(~Instar) + scale_y_log10() ##### total box time eating vs prey ggplot(AveByTrial, aes(x=Treatment, y=feedDur)) + geom_boxplot() + stat_summary(fun.y=mean, geom="point", shape=5, size=4) + ggtitle("Log of Total amount of time feeding on prey per box") + ylab("Total time feeding (mins)") + xlab("Prey Size") + scale_y_log10() #total box time eating vs treatment by instar ggplot(AveByTrial, aes(x=Treatment, y=feedDur)) + geom_boxplot() + stat_summary(fun.y=mean, geom="point", shape=5, size=4) + facet_wrap(~Instar) + ggtitle("Log of Total amount of time feeding on prey per box by instar") + ylab("Total time feeding (mins)") + xlab("Prey Size") + scale_y_log10() ################################ Feeding fraction ##################################### ##Feed Fraction histograms ggplot((subset(BoxComboMorn, FeedFraction > 0)), aes(ASFeedFrac)) + geom_histogram() #asin makes it more normal ####graph of individual feeding fractionvs prey size and instar (graph looks pretty much the same # with zeros included compared to no zeros included ggplot((subset(BoxComboMorn, FeedFraction > 0)), aes(x=Treatment, y=ASFeedFrac)) + geom_boxplot() + stat_summary(fun.y=mean, geom="point", shape=5, size=4) + ggtitle("Fraction of time feeding by individual (no eaters removed)") + ylab("Fraction of time spent eating prey by each individual") + xlab("Prey Size") # feeding fraction by instar ggplot((subset(BoxComboMorn, FeedFraction > 0)), aes(x=Treatment, y=ASFeedFrac)) + geom_boxplot() + stat_summary(fun.y=mean, geom="point", shape=5, size=4) + ggtitle("Fraction of time feeding by individual (zero eaters removed)") + ylab("Fraction of time spent eating prey by each individual") + xlab("Prey Size") + facet_wrap(~Instar) ggplot(subset(BoxComboMorn, FeedFraction > 0), aes(x= Hunger, y = ASFeedFrac, colour = Treatment)) + geom_point() + geom_smooth(method = "lm", formula =y ~ poly(x, 1, raw = TRUE), se = TRUE) + ggtitle("Feeding Fraction against hunger level (head length / weight") + facet_wrap(Treatment~Instar, scales = "free_x") dev.off() ########################################################################################## ##Feeding duration (rank?) vs weight rank ##need to combine small trials ...... ####remove evening trials and moulted individuals pdf("RuthEcuador2013/BoxFeedingTrials/Graphs/InitalWeights.pdf", onefile = "TRUE") # weight rank vs rank eating duration by treatment ggplot(BoxComboAve, aes(x = Rank.Weights, y = RankEatDur.Mean)) + geom_point() + geom_smooth(method = "lm", formula =y ~ poly(x, 1, raw = TRUE), se = TRUE) + ggtitle("Weight ranked within box vs time eating ranked within box ") + ylab("Rank of Time Eating") + xlab("Weight rank within box") + facet_wrap(~Treatment) # leg length rank vs rank of eating duration ggplot(BoxComboAve, aes(x = Rank.Legs, y = RankEatDur.Mean, colour = Treatment)) + geom_point() + geom_smooth(method = "lm", formula =y ~ poly(x, 1, raw = TRUE), se = TRUE) + ggtitle("Rank of leg eating vs rank of time eating") + ylab("Rank of time eating") + xlab("Rank of leg length") # hunger by sum of time eating ggplot(BoxComboAve, aes(x= Hunger, y = SumIndEat, colour = Treatment)) + geom_point() + geom_smooth(method = "lm", formula =y ~ poly(x, 1, raw = TRUE), se = TRUE) + ggtitle("Total Time Eating against hunger level (head length / weight") + facet_wrap(Treatment~Instar, scales = "free_x") # hunger by sum of time eating by instar ggplot(subset(BoxComboAve, SumIndEat>0), aes(x= Hunger, y = SumIndEat, colour = Treatment)) + geom_point() + geom_smooth(method = "lm", formula =y ~ poly(x, 2, raw = TRUE), se = TRUE) + ggtitle("Total Time Eating against hunger level (head length / weight") + facet_wrap(Treatment~Instar, scales = "free_x") dev.off() pdf("RuthEcuador2013/BoxFeedingTrials/Graphs/FeedingAndHunger.pdf", width= 16, height =8.5) # hunger boxplot by ate or didn't ggplot(subset(BoxComboMorn, IndFeed != "NA") , aes(x = IndFeed, y = LogHunger)) + geom_boxplot(aes(fill = IndFeed)) + facet_grid(Instar ~ Treatment, labeller = Facet_label ) + mytheme + ylab("Log Hunger") + xlab("")+ scale_x_discrete(breaks=c("y", "n"), labels=c("Fed", "Did Not\nFeed")) + coord_flip() + guides(fill = FALSE) ggplot(subset(BoxComboMorn, IndFeed == "y"), aes(x= LogHunger, y = TimeEatingLog1, colour = Treatment)) + geom_point() + geom_smooth(method = "lm", formula =y ~ poly(x, 1, raw = TRUE), se = TRUE) + ggtitle("Total Time Eating against hunger level- zeros removed") + facet_wrap(Treatment~Instar, scales = "free_x") ggplot(subset(BoxComboMorn, IndFeed != "NA") , aes(x = IndFeed, y = log10(1/Hunger))) + geom_boxplot(aes(fill = IndFeed)) + facet_grid(Instar ~ Treatment, labeller = Facet_label ) + mytheme + ylab("Log Condition") + xlab("") + scale_x_discrete(breaks=c("y", "n"), labels=c("Fed", "Did Not\nFeed")) + coord_flip() + guides(fill = FALSE) ggplot(subset(BoxComboMorn, IndCapture != "NA") , aes(x = IndCapture, y = log10(1/Hunger))) + geom_boxplot(aes(fill = IndCapture)) + facet_grid(Instar ~ Treatment, labeller = Facet_label ) + mytheme + ylab("Log Condition") + xlab("") + scale_x_discrete(breaks=c("y", "n"), labels=c("Capture", "Did Not\nCapture")) + coord_flip() + guides(fill = FALSE) ## boxplot with rank and instar combined ggplot(subset(BoxComboMorn, IndFeed != "NA") , aes(x = IndFeed, y = Rank.Cond)) + geom_boxplot(aes(fill = IndFeed)) + facet_grid(~Treatment ) + mytheme + ylab("Condition Rank") + xlab("") + scale_x_discrete(breaks=c("y", "n"), labels=c("Fed", "Did Not\nFeed")) + coord_flip() + guides(fill = FALSE) ggplot(subset(BoxComboAve, IndCapture != "NA") , aes(x = IndCapture, y = Rank.Cond)) + geom_boxplot(aes(fill = IndCapture)) + facet_grid(~Treatment ) + mytheme + ylab("Condition Rank") + xlab("") + scale_x_discrete(breaks=c("y", "n"), labels=c("Capture", "Did Not\nCapture")) + coord_flip() + guides(fill = FALSE) ## Logistic regression ggplot(subset(BoxComboMorn, IndFeed != "NA"), aes(x = Cond.Scal, y = IndFeedNum, colour = Treatment)) + geom_point() + stat_smooth(method="glm", family="binomial", se=FALSE) + mytheme #+ facet_wrap(~Instar, scales = "free" ) ggplot(subset(BoxComboMorn, IndFeed != "NA"), aes(x = Cond.Scal, y = IndCapNum, colour = Treatment)) + geom_point() + stat_smooth(method="glm", family="binomial", se=FALSE) + mytheme #ggplot(Weights, aes(x = Cond.Scal, fill = Instar)) + geom_histogram() dev.off() ##################################################################################### #### Behavior vs physiological things # Poke rating vs inital weight by instar ggplot(Weights, aes(x= AvePokeRating, y = Weight.1)) + geom_point() + geom_smooth(method = "lm", formula =y ~ poly(x, 1, raw = TRUE), se = TRUE) + facet_wrap(~Instar, scales = "free_y") # Boldness vs inital weight by instar ggplot(Weights, aes(x= AveBoldness, y = Weight.1)) + geom_point() + facet_wrap(~Instar, scales = "free_y") + geom_smooth(method = "lm", formula =y ~ poly(x, 1, raw = TRUE), se = TRUE) # Boldness vs hunger by instar ggplot(Weights, aes(x= AveBoldness, y = Hunger)) + geom_point() + facet_wrap(~Instar, scales = "free_y") + geom_smooth(method = "lm", formula =y ~ poly(x, 1, raw = TRUE), se = TRUE) # Boldness vs leg length by instar ggplot(Weights, aes(x= AveBoldness, y = LegLen.mm)) + geom_point() + facet_wrap(~Instar) + geom_smooth(method = "lm", formula =y ~ poly(x, 1, raw = TRUE), se = TRUE) ####################################################################################### ####### Behaviours over time ######################### # Poke time1 vs time2 barchart although NA's are included ANNOYING!! ggplot(data=Weights, aes(x=Poke.1, fill = as.factor(Poke.2))) + geom_bar(stat="bin", position="fill", colour = "black") pdf("RuthEcuador2013/BoxFeedingTrials/Graphs/BehaviourOverTime.pdf") # Poke1 vs Poke2 ggplot(data=Weights, aes(x= PokeRating.1, y = PokeRating.2)) + geom_jitter(position = position_jitter(w = 0.1, h = 0.1)) + geom_smooth(method = "lm", formula =y ~ poly(x, 2 , raw = TRUE), se = TRUE) + ggtitle("Poke rating of same spider at different times (jittered points)") # Boldness1 vs Boldness2 ggplot(data = Weights, aes(x = BoldnessRank.1, y = BoldnessRank.2)) + geom_jitter(position = position_jitter(w = 0.1, h = 0.1)) + geom_smooth(method = "lm", formula =y ~ poly(x, 2 , raw = TRUE), se = TRUE) + ggtitle("Boldness rank of same spider at different times (jittered points)") dev.off() #Bar chart of cup drop1 vs cup drop2 ggplot(data= Weights, aes(x = as.factor(CupDrop.2), fill = as.factor(CupDrop.1))) + geom_bar(stat="bin", position="fill", colour = "black") # Bar chart of box drop1 vs box drop 2 ggplot(data= Weights, aes(x = as.factor(DropBox.2), fill = as.factor(BoxDrop.1))) + geom_bar(stat="bin", position="fill", colour = "black") ##################################################################################### ##Behaviour vs feeding and capture pdf("RuthEcuador2013/BoxFeedingTrials/Graphs/Behaviour.pdf") # Capture vs boldness by instar ggplot(BoxComboAve, aes(x= AveCap, y = AveBoldness)) + geom_jitter(position = position_jitter(w = 0.1, h = 0.1)) + geom_smooth(method = "lm", formula =y ~ poly(x, 1 , raw = TRUE), se = TRUE) + facet_wrap(~Instar) + ggtitle("Ave capture vs average boldness rating") # Capture vs poke by instar ggplot(BoxComboAve, aes(x= AveCap, y = AvePokeRating)) + geom_jitter(position = position_jitter(w = 0.1, h = 0.1)) + geom_smooth(method = "lm", formula =y ~ poly(x, 1 , raw = TRUE), se = TRUE)+ facet_wrap(~Instar) + ggtitle("Ave capture vs average poke rating") # Move at all vs capture ggplot(subset(BoxComboAve, Cap != "NA"), aes(x=Move, fill = Cap)) + geom_bar(stat="bin", position="fill", colour = "black") + ggtitle("Move at all during boldness test vs particitpated in capture") + scale_x_discrete(breaks=c("y", "n"), labels=c("Moved", "Did Not Move")) + theme(axis.text=element_text(colour="black"), axis.title = element_blank()) + scale_fill_discrete(name = "Involved with\nprey capture?", breaks = c("n", "y"), labels = c("No", "Yes")) + facet_wrap(Instar~Treatment) #Move at all vs feed ggplot(subset(BoxComboAve, Feed != "NA"), aes(x=Move, fill = Feed)) + geom_bar(stat="bin", position="fill", colour = "black") + ggtitle("Move at all during boldness test with eat at all") + scale_x_discrete(breaks=c("y", "n"), labels=c("Moved", "Did Not Move")) + theme(axis.text=element_text(colour="black"), axis.title = element_blank()) + scale_fill_discrete(name = "Ate Food?", breaks = c("n", "y"), labels = c("No", "Yes")) + facet_wrap(Instar~Treatment) + geom_text(mean(x)) ##Boldness against instar ggplot(subset(BoxComboAve), aes(x=Instar, fill = Move)) + geom_bar(stat="bin", position="fill", colour = "black") dev.off() ggplot() ############################################################################ #Difference in weights.. need percentage change in weight? # (1) Feeding time vs weight change ggplot(BoxComboAve, aes(x=SumIndEat, y = WeightDiffPer)) + geom_point() + geom_smooth(method = "lm", formula =y ~ poly(x, 2 , raw = TRUE), se = TRUE) ggplot(AveByTrial, aes(x = Treatment, y = SimpAlt )) + geom_boxplot() + facet_wrap(~Instar) ##################################################################################### ##### Pielou's J graphs #histogram ggplot(AveByTrial, aes(AsinPJEven)) + geom_histogram() pdf("RuthEcuador2013/BoxFeedingTrials/Graphs/PJEven.pdf", width = 12, height =10) SubsetAveByTrial<- subset(AveByTrial)#, PJEven > -1) # not sure why the - 1 is there. #(SubsetAveByTrial, aes(x= Treatment, y =AsinPJEven)) + geom_boxplot() + mytheme + ylab("asin of box evenness") + xlab("Prey Size") ggplot(SubsetAveByTrial, aes(x= Treatment, y =PJEven)) + geom_boxplot() + mytheme + ylab("Intragroup Evenness") + xlab("Prey Size")# + #stat_summary(fun.y = "mean" , label = length(x), geom = "text") #ggplot(SubsetAveByTrial, aes(x= Treatment, y =AsinPJEven)) + geom_boxplot() + facet_wrap(~Instar) + mytheme + ylab("asin of box evenness") + xlab("Prey Size") ggplot(SubsetAveByTrial, aes(x= Treatment, y =PJEven)) + geom_boxplot() + facet_grid(.~Instar, labeller = Instr_label ) + mytheme + ylab("Intragroup Evenness") + xlab("Prey Size") #+ theme(strip.background = element_rect(fill = 'white')) #+ stat_summary(fun.data = give.n, geom = "text") dev.off() ################################################################################# ##histogram of hunger ggplot(BoxComboMorn, aes((Hunger))) + geom_histogram() + facet_wrap(~Instar) ggplot(BoxComboMorn, aes(x = Treatment, y= Hunger)) + geom_boxplot() + facet_wrap(~Instar) BoxEatGraph <-subset(BoxComboMorn, FeedIndPos =="y" & BoxComboMorn$CaptureIndPos != "NA") ## if having eaten weight with capture pdf("RuthEcuador2013/BoxFeedingTrials/Graphs/HavingEaten-Capture.pdf", width = 11, height =7.5) ggplot(BoxMornFeedOnly, aes(x=Cap, y = LogCond)) + geom_boxplot() + facet_grid(~ Treatment, labeller = Facet_label) + ylab("Log Condition") + xlab("Captured Prey?") + mytheme + scale_x_discrete(breaks = c("y", "n"),labels = c("Yes", "No")) ggplot(BoxMornFeedOnly, aes(x=Cap, y = LogCond)) + geom_boxplot() + facet_wrap(~Treatment + Instar) + ylab("Log Condition") + xlab("Captured Prey?") + mytheme + scale_x_discrete(breaks = c("y", "n"),labels = c("Yes", "No")) # stat_summary(fun.data = give.n, geom = "text") this adds sample sizes ggplot(BoxMornFeedOnly, aes(x=CaptureIndPos, y = Rank.Cond)) + geom_boxplot() + facet_grid(~Treatment, labeller = Facet_label) + ylab("Condition Rank in Box") + xlab("Captured Prey?") + mytheme + scale_x_discrete(breaks = c("y", "n"),labels = c("Yes", "No")) ggplot(BoxMornFeedOnly, aes(x=CaptureIndPos, y = Rank.Cond)) + geom_boxplot() + facet_wrap(~Treatment+ Instar)+ ylab("Condition Rank in Box") + xlab("Captured Prey?") + mytheme + scale_x_discrete(breaks = c("y", "n"),labels = c("Yes", "No")) #stat_summary(fun.data = give.n, geom = "text") ggplot(BoxMornFeedOnly, aes(x=CapAndFeed, y = Cond.Scal)) + stat_boxplot(geom ='errorbar') + geom_boxplot() + facet_wrap(~Treatment) + coord_flip() + ylab("Scaled Condition") + xlab("") + mytheme + theme(axis.text.y=element_text(angle=45)) + scale_x_discrete(breaks = c("NC+E", "C+NE", "C+E"),labels = c("Cheater", "No Eat, Capture", "Cooperator")) dev.off() ### Numbers of each within box ggplot(FdCapByTrial, aes(x = Treatment, y = value)) + geom_boxplot() + facet_grid(Cap ~ Eat) + mytheme + ylab("Number of spiders") + xlab("Prey Size")
# Copyright 2020 Observational Health Data Sciences and Informatics # # This file is part of OhdsiSharing # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Format and check code: OhdsiRTools::formatRFolder() OhdsiRTools::checkUsagePackage("OhdsiSharing") OhdsiRTools::updateCopyrightYearFolder() devtools::spell_check() # Create manual and vignettes: unlink("extras/OhdsiSharing.pdf") shell("R CMD Rd2pdf ./ --output=extras/OhdsiSharing.pdf") pkgdown::build_site()
/extras/PackageMaintenance.R
permissive
rfherrerac/OhdsiSharing
R
false
false
969
r
# Copyright 2020 Observational Health Data Sciences and Informatics # # This file is part of OhdsiSharing # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Format and check code: OhdsiRTools::formatRFolder() OhdsiRTools::checkUsagePackage("OhdsiSharing") OhdsiRTools::updateCopyrightYearFolder() devtools::spell_check() # Create manual and vignettes: unlink("extras/OhdsiSharing.pdf") shell("R CMD Rd2pdf ./ --output=extras/OhdsiSharing.pdf") pkgdown::build_site()
Sys.setlocale("LC_TIME", "C")
/setlocale.R
no_license
nickleeh/r_proj
R
false
false
31
r
Sys.setlocale("LC_TIME", "C")
/fejezet8.R
no_license
klajosw/r_prg
R
false
false
1,620
r
/workout03/binomial/R/binomial.R
no_license
stat133-sp19/hw-stat133-gobearscn
R
false
false
5,772
r