blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
18457705cdd0841bedb141428e94ba2aa772de25
|
bb139658ab79133499b95b97f8e3ede66da56b26
|
/r/functions.R
|
8d1c086b480be9271b1fdb320b7f079024e5c018
|
[
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
abiener/MEPS-summary-tables
|
96fd68e6b98c7cb8574abd4a915d29e1257a041e
|
4f8b7acda89e384b645c78f53441f85c88ef65a9
|
refs/heads/master
| 2021-05-10T10:02:48.506480
| 2018-01-25T20:25:31
| 2018-01-25T20:25:31
| 118,946,214
| 0
| 0
| null | 2018-01-25T17:40:07
| 2018-01-25T17:40:07
| null |
UTF-8
|
R
| false
| false
| 23,885
|
r
|
functions.R
|
# Run functions -----------------------------------------------------------------
# USE AND EXP, PMED, COND
standardize <- function(df, stat, rowGrp, colGrp, gather = T){
out <- df %>% select(-contains("FALSE"))
key <- c(stat, paste0(stat, "_se"))
if(ncol(out) > 4 & gather) {
out <- out %>% gather_wide(rowGrp, colGrp, altsub = "insurance_v2X")
}
names(out)[!names(out) %in% c("ind", "sop", "event", rowGrp, colGrp)] <- key
out <- out %>%
mutate(ind = "Total") %>%
mutate(rowGrp = rowGrp, colGrp = colGrp)
if(rowGrp %in% names(out)) out <- out %>% mutate_(rowLevels = rowGrp)
if(colGrp %in% names(out)) out <- out %>% mutate_(colLevels = colGrp)
out %>% select(rowGrp, colGrp, one_of(c("rowLevels", "colLevels", key)))
}
gather_wide <- function(df, row, col, altsub = ""){
grps <- c(row, col)
spr_grp <- grps[!grps %in% names(df)]
if(length(spr_grp) == 0) spr_grp = ""
df <- df %>%
select(-contains("FALSE")) %>%
gather_("group", "coef", setdiff(names(.), c(grps,"ind"))) %>%
mutate(group = gsub(" > 0TRUE","",group)) %>%
mutate(group = gsub(spr_grp,"",group)) %>%
mutate(group = gsub(altsub,"",group)) %>%
separate(group, c("stat", "grp"), sep="\\.",fill="left") %>%
mutate(stat = replace(stat,is.na(stat),"stat")) %>%
mutate(grp = factor(grp, levels = unique(grp))) %>%
spread(stat, coef)
repl = grps[!grps %in% names(df)]
df[,repl] = df$grp
df %>% select_(row, col, "stat", "se")
}
update.csv <- function(add,file,dir){
init = !(file %in% list.files(dir,recursive=T))
fileName <- sprintf("%s/%s",dir,file) %>% gsub("//","/",.)
write.table(add,file=fileName,append=(!init),sep=",",col.names=init,row.names=F)
}
done <- function(outfile,...,dir="/"){
if(!outfile %in% list.files(dir,recursive=T)) return(FALSE)
df <- read.csv(paste0(dir,"/",outfile))
chk <- list(...)
for(i in 1:length(chk)){
name=names(chk)[i]
value=chk[[i]]
df <- df %>% filter_(sprintf("%s=='%s'",name,value))
}
is.done = (nrow(df)>0)
if(is.done) print('skipping')
return(is.done)
}
pop <- function(vec, ...) vec[!vec %in% unlist(list(...))]
add_v2X <- function(names) names %>% append(c('agegrps_v2X', 'insurance_v2X'))
add_v3X <- function(names) names %>% append(c('agegrps_v2X', 'agegrps_v3X'))
findKey <- function(nm, keys) {
keys = as.character(keys)
str = keys[sapply(keys, function(x) grepl(x, nm)) %>% which]
if(length(str) == 0) return(NA)
return(str)
}
# Merge functions -----------------------------------------------------------------
# Add event and SOP labels
add_labels <- function(df, dictionary, key="ind",vars=c("rowLevels","colLevels")){
dictionary <- dictionary %>% mutate_if(is.factor, as.character)
vars <- vars[vars %in% colnames(df)]
for(var in vars){
df <- df %>%
mutate_(temp = var) %>%
left_join(dictionary,by = c("temp" = key)) %>%
mutate(temp = coalesce(values, temp))
df[,var] = df$temp
df <- df %>% select(-temp, -values)
}
return(df)
}
rm_v2 <- function(df){
df%>% mutate(rowGrp = rowGrp %>% gsub("_v2X","",.) %>% gsub("_v3X","",.),
colGrp = colGrp %>% gsub("_v2X","",.) %>% gsub("_v3X","",.))
}
rm_na <- function(vec) {
vec[!is.na(vec)]
}
readSource <- function(file,...,dir=".") {
fileName <- sprintf("%s/%s",dir,file) %>% gsub("//","/",.)
codeString <- readChar(fileName,file.info(fileName)$size)
codeString <- codeString %>% gsub("\r","",.) # %>% gsub("\n","<br>",.)
# codeString <- codeString %>% rsub(...) %>% gsub("\r","",.)
codeString
}
run <- function(codeString,verbose=T){
if(verbose) writeLines(codeString)
eval(parse(text=codeString),envir=.GlobalEnv)
}
switch_labels <- function(df){
df %>%
mutate(g1=rowGrp,g2=colGrp,l1=rowLevels,l2=colLevels) %>%
mutate(rowGrp=g2,colGrp=g1,rowLevels=l2,colLevels=l1) %>%
select(-g1,-g2,-l1,-l2)
}
get_totals <- function(grp,df,label="All persons"){
totals <- df %>% filter(rowGrp=="ind",colGrp!=grp)
totals %>%
mutate(rowGrp=grp,rowLevels=label) %>%
switch_labels
}
add_totals <- function(df, var = 'row') {
df$var = df[,paste0(var,"Grp")]
df$lev = df[,paste0(var,"Levels")]
totals <- df %>% filter(var == "ind")
all_grps <- df$var %>% unique %>% pop('ind')
totals_list <- list()
for(grp in all_grps %>% pop("sop")) {
label = ifelse(grp == "event", "Any event", "All persons")
totals_list[[grp]] <- totals %>% mutate(var = grp, lev = label)
}
all_totals <- bind_rows(totals_list)
all_totals[,paste0(var,"Grp")] = all_totals$var
all_totals[,paste0(var,"Levels")] = all_totals$lev
return(bind_rows(all_totals, df) %>% select(-var, -lev))
}
reverse <- function(df) df[nrow(df):1,]
dedup <- function(df){
df %>%
reverse %>%
distinct(Year,stat,rowGrp,colGrp,rowLevels,colLevels,.keep_all=TRUE) %>%
reverse
}
rsub <- function(string,...,type='r') {
repl = switch(type,
'r'='\\.%s\\.',
'sas'='&%s\\.')
sub_list = list(...) %>% unlist
for(l in names(sub_list)){
original <- sprintf(repl,l)
replacement <- sub_list[l]
string <- gsub(original,replacement,string)
}
return(string)
}
adjust_text = function(D) {
if(is.null(D)) return("")
if(D %in% c(1, 10^-2)) return("")
if(D == 10^3) return("in thousands")
if(D == 10^6) return("in millions")
if(D == 10^9) return("in billions")
}
adjust_levels <- function(df, new_levels) {
nm = substitute(new_levels)
new_levels <- new_levels %>%
setNames(paste0(nm, 0:(length(new_levels)-1))) %>%
stack %>% mutate_all(as.character)
left_join(df, new_levels, by = c("levels" = "values")) %>%
mutate(levNum = coalesce(ind, levNum)) %>%
select(-ind)
}
reorder_levels <- function(df,new_levels){
orig_l1 = unique(df$levels)
new_l1 = c(orig_l1[!orig_l1 %in% new_levels],new_levels)
df %>%
mutate(levels = factor(levels,levels=new_l1)) %>%
arrange(levels) %>%
mutate(levels = as.character(levels))
}
formatNum <- function(x, d) {
xnum = x[!is.na(x)]
dnum = d[!is.na(x)]
spf <- paste0("%.",dnum,"f")
fm_digits <- sprintf(spf, xnum)
new_x <- prettyNum(fm_digits, big.mark = ",", preserve.width = "none")
x[!is.na(x)] <- new_x
return(x)
}
format_tbl <- function(df, appKey) {
fmt_tbl <- df %>%
mutate(sample_size = ifelse(coef %in% c("meanEXP","medEXP"), n_exp, n)) %>%
mutate(RSE = se/coef,
is.pct = (stat %>% startsWith("pct")),
special_pct = (is.pct & (coef < 0.1) & (RSE < (0.1/coef-1)/1.96)),
suppress = (sample_size < 60 | RSE > 0.5) | (se == 0),
suppress = replace(suppress, special_pct, FALSE),
star = (RSE > 0.3 & !suppress)) %>%
mutate(denom = replace(denom, is.na(denom), 1),
digits = replace(digits, is.na(digits), 1),
se_digits = replace(se_digits, is.na(se_digits), 1),
coef = ifelse(suppress, NA, coef/denom),
se = ifelse(suppress, NA, se/denom)) %>%
mutate(se = formatNum(se, d = se_digits),
coef = formatNum(coef, d = digits),
coef = ifelse(star, paste0(coef,"*"), coef)) %>%
select(Year, rowGrp, colGrp, rowLevels, colLevels, stat, coef, se, sample_size)
if(appKey == "care") {
fmt_tbl <- fmt_tbl %>%
mutate(
colLevels = as.character(colLevels),
colLevels = replace(colLevels, startsWith(colLevels,"afford"), "Couldn't afford"),
colLevels = replace(colLevels, startsWith(colLevels,"insure"), "Insurance related"),
colLevels = replace(colLevels, startsWith(colLevels,"other"), "Other"))
}
if(appKey == "pmed") {
fmt_tbl <- fmt_tbl %>%
mutate(rowLevels = str_to_title(rowLevels))
}
# Remove rows with too small n
fmt_tbl <- fmt_tbl %>%
group_by(rowLevels) %>%
mutate(max_n = max(sample_size, na.rm=T)) %>%
filter(max_n >= 60) %>%
ungroup(rowLevels) %>%
as.data.frame %>%
select(-max_n, -sample_size)
return(fmt_tbl)
}
# Convert to JSON -------------------------------------------------------------
add_all_labels <- function(df) {
df %>%
add_labels(sp_keys) %>%
add_labels(sop_dictionary) %>%
add_labels(evnt_use) %>%
add_labels(evnt_keys) %>%
add_labels(event_dictionary) %>%
add_labels(delay_dictionary)
}
load_years <- function(appKey, stats, years, adj) {
dir <- sprintf("../tables/%s", appKey)
if(missing(years)) years <- list.files(dir)
has_nexp <- any(grepl("n_exp.csv", list.files(dir,recursive = T)))
tbs <- n_df <- n_exp <- list()
for(year in years) { cat(year,"..")
yrX <- paste0(substr(year, 3, 4), "X")
for(stat in stats) {
tb_stat <-
read.csv(sprintf("%s/%s/%s.csv", dir, year, stat), stringsAsFactors = F) %>%
mutate(stat = stat, Year = year) %>%
mutate(
colLevels = gsub(yrX,"",colLevels),
rowLevels = gsub(yrX,"",rowLevels))
colnames(tb_stat)[colnames(tb_stat) %in% c(stat, paste0(stat,"_se"))] <- c('coef', 'se')
tbs[[paste0(stat,year)]] <- tb_stat
}
}
n_df <- lapply(years, function(x)
read.csv(sprintf("%s/%s/n.csv", dir, x), stringsAsFactors = F) %>% mutate(Year = x)) %>%
bind_rows %>% rm_v2 %>% dedup %>% add_all_labels
if(has_nexp){
n_exp <- lapply(years, function(x)
read.csv(sprintf("%s/%s/n_exp.csv", dir, x), stringsAsFactors = F) %>% mutate(Year = x)) %>%
bind_rows %>% rm_v2 %>% dedup %>% add_all_labels
}
if(appKey == 'use'){
n_df <- bind_rows(n_df, switch_labels(n_df)) %>% dedup
n_exp <- bind_rows(n_exp, switch_labels(n_exp)) %>% dedup
}
full_tbls <- bind_rows(tbs) %>% rm_v2 %>% dedup %>% add_all_labels %>% left_join(n_df)
if(has_nexp) full_tbls <- full_tbls %>% left_join(n_exp)
full_tbls <- full_tbls %>%
left_join(adj) %>%
format_tbl(appKey = appKey) %>%
filter(!rowLevels %in% c("Missing", "Inapplicable")) %>%
filter(!colLevels %in% c("Missing", "Inapplicable")) %>%
add_totals('row') %>%
add_totals('col')
full_tbls <- full_tbls %>%
group_by(rowGrp, colGrp, rowLevels, colLevels) %>%
mutate(n_miss = mean(is.na(coef))) %>%
filter(n_miss < 1) %>%
ungroup %>%
select(-n_miss)
return(full_tbls)
}
data_toJSON <- function(appKey, years, adj, pivot = F) {
dir <- sprintf("../mepstrends/hc_%s", appKey)
# delete data folder and create new one
unlink(sprintf('%s/json/data', dir), recursive = T)
dir.create(sprintf('%s/json/data', dir))
stats <- statList[[appKey]] %>% unlist(use.names = F)
all_stats <- load_years(appKey = appKey, stats = stats, years = years, adj = adj)
# check for zeros after rounding
zeros <- all_stats %>% filter(coef %in% c("0", "0.0", "0.00") | se %in% c("0", "0.0", "0.00"))
write.csv(zeros, file = paste0("zeros_",appKey,".csv"))
# Factor levels ----------------------------
rowFactors <- all_stats %>% select(rowGrp, rowLevels) %>% setNames(c("grp", "levels"))
colFactors <- all_stats %>% select(colGrp, colLevels) %>% setNames(c("grp", "levels"))
if(pivot) rowFactors = NULL
factors <- bind_rows(rowFactors, colFactors) %>%
unique %>%
arrange(grp) %>%
group_by(grp) %>%
reorder_levels(age_levels) %>%
reorder_levels(freq_levels) %>%
reorder_levels(racesex_levels) %>%
mutate(levNum = paste0(grp, LETTERS[row_number()])) %>%
ungroup
# -----------------------------------------
max_ncol <- 0
if(appKey == "use")
all_stats <- rbind(all_stats, all_stats %>% switch_labels) %>% dedup
for(st in stats){ cat("\n",st,":")
for(col in unique(all_stats$colGrp)) { cat(col,", ")
sub_tbl <- all_stats %>%
filter(stat == st, colGrp == col) %>% tbl_df %>%
gather(class, value, -rowGrp, -colGrp, -rowLevels, -colLevels, -Year, -stat) %>%
left_join(factors, by = c("colGrp" = "grp", "colLevels" = "levels")) %>%
arrange(levNum) %>%
unite(key1, colLevels, levNum, sep = "__") %>%
mutate(key1 = factor(key1, levels = unique(key1))) %>%
arrange(key1, Year, rowGrp, rowLevels, stat, class) %>%
select(-colGrp)
if(pivot){
pre_wide <- sub_tbl %>% unite(key, key1, Year, stat, class, sep = "__") %>% mutate(Year = "All")
} else {
pre_wide <- sub_tbl %>% unite(key, key1, stat, class, sep = "__")
}
app_wide <- pre_wide %>%
left_join(factors, by = c("rowGrp" = "grp", "rowLevels" = "levels")) %>%
rename(rowLevNum = levNum) %>%
mutate(rowLevels = ifelse(rowGrp == 'ind', Year, rowLevels)) %>%
mutate(key = factor(key, levels = unique(key))) %>%
spread(key, value)
app_wide <- app_wide %>%
mutate(selected = 0) %>%
select(Year, rowGrp, rowLevels, rowLevNum, selected, one_of(colnames(app_wide))) %>%
arrange(rowLevNum)
if(!pivot){
app_wide <- app_wide %>% arrange(-Year)
max_ncol <- max(max_ncol, ncol(app_wide))
} else {
ncol_trend <- sum(grepl("ind", colnames(app_wide))) + 4
max_ncol <- max(max_ncol, ncol_trend)
}
classes <- colnames(app_wide)
cnames <- array()
for(i in 1:length(classes)){
sp = str_split(classes[i],"__")[[1]]
cnames[i] = sp[1]
if(sp[1] == "Total") cnames[i] = sp[3]
}
jsonClasses <- toJSON(classes, dataframe = "values", na = "null")
jsonNames <- toJSON(cnames, dataframe = "values", na = "null")
for(row in unique(app_wide$rowGrp)){ #print(row)
if(row == col & row != 'ind') next
row_wide <- app_wide %>% filter(rowGrp == row)
jsonData <- toJSON(row_wide, dataframe = "values", na = "null")
json_OUT <- sprintf( '{"data": %s, "classes": %s, "names": %s}', jsonData, jsonClasses, jsonNames)
filename <- sprintf("%s/json/data/%s__%s__%s.json", dir, st, row, col)
write(json_OUT, file = filename)
} # row loop
} # col loop
} # stat loop
# Initialize column classes
coefCols <- rep('
{title: "", className: "coef", searchable: false, render: coefDisplay}', max_ncol)
seCols <- rep('
{title: "", className: "se", searchable: false, render: seDisplay}', max_ncol)
statCols <- c(rbind(coefCols,seCols)) %>% paste0(collapse=",\n")
initCols <- sprintf('[
{ title: "Year", className: "sub", "visible": false},
{ title: "rowGrp", className: "sub", "visible": false},
{ title: "rowLevels" , className: "main"},
{ title: "rowLevNum" , className: "sub", "visible": false},
{ title: "selected", className: "sub", "visible" : false},
%s]', statCols)
adj$text = sapply(adj$denom, adjust_text)
adjustment <- sprintf("%s: '%s'", adj$stat, adj$text) %>% paste(collapse = ", ")
# Initial level selection for groups
factors = factors %>%
filter(grp != 'ind') %>% # ind screws up pivot tables
filter(!levels %in% exclude_initial)
initLevels = list()
for(gp in unique(factors$grp)) {
sub = factors %>% filter(grp == gp)
initLevels[[gp]] = as.list(sub$levels) %>% setNames(sub$levNum)
}
init_levels <- toJSON(initLevels, auto_unbox = T)
sub_levels <- toJSON(subLevels, auto_unbox = T)
isPivot <- ifelse(pivot, 'true', 'false')
json_INIT <- sprintf(
"var isPivot = %s; var initCols = %s; var initLevels = %s; var subLevels = %s; var adjustStat = {%s};",
isPivot, initCols, init_levels, sub_levels, adjustment)
write(json_INIT, file = sprintf("%s/json/init.js", dir))
}
code_toJSON <- function(appKey, years) {
dir <- sprintf("../mepstrends/hc_%s", appKey)
# Code snippets
subgrps <- demo_grps %>% unlist(use.names = F)
pufNames <- lapply(years, get_puf_names, web = F) %>% setNames(years)
if(appKey != 'use') grpCode <- grpCode[!names(grpCode) %in% c("event", "sop", "event_sop")]
appKeyJ <- sprintf("var appKey = '%s';", appKey)
careCaptionJ <- sprintf("var careCaption = %s;", toJSON(careCaption, auto_unbox = T))
loadPkgsJ <- sprintf("var loadPkgs = %s;", toJSON(loadPkgs, auto_unbox = T))
loadFYCJ <- sprintf("var loadFYC = %s;", toJSON(loadFYC[[appKey]], auto_unbox = T))
loadCodeJ <- sprintf("var loadCode = %s;", toJSON(loadCode[[appKey]], auto_unbox = T))
grpCodeJ <- sprintf("var grpCode = %s;", toJSON(grpCode, auto_unbox = T))
dsgnCodeJ <- sprintf("var dsgnCode = %s;", toJSON(dsgnCode[[appKey]], auto_unbox = T))
statCodeJ <- sprintf("var statCode = %s;", toJSON(statCode[[appKey]], auto_unbox = T))
subgrpsJ <- sprintf("var subgrps = %s;", toJSON(subgrps, auto_unbox = T))
byVarsJ <- sprintf("var byVars = %s;", toJSON(byVars[[appKey]], auto_unbox = T))
pufNamesJ <- sprintf("var pufNames = %s;", toJSON(pufNames, auto_unbox = T))
# Notes
mepsNotesJ <- sprintf("var mepsNotes = %s;", toJSON(notes, auto_unbox = T))
code_JSON <- paste(
c(appKeyJ, careCaptionJ, loadPkgsJ, loadFYCJ, loadCodeJ, grpCodeJ, dsgnCodeJ, statCodeJ,
subgrpsJ, byVarsJ, pufNamesJ, mepsNotesJ), collapse = "\n\n")
write(code_JSON, file = sprintf("%s/json/code.js", dir))
}
# 508 form functions and html builder ------------------------------------------------------------------------
tab_li <- function(id, label, class = "") {
tags$li(class = class,
tags$a('data-toggle' = 'tab', href = sprintf('#%s-tab',id), id = sprintf('%s-pill',id),
tags$span(class = sprintf("tab-title %s-tab",id), label)))
}
caption <- function(id) {
tags$span(id = sprintf('%s-caption',id), role = 'region', 'aria-live' = 'polite', class = 'caption')
}
actionButton508 <- function (inputId, label, usaStyle = NULL, class="", icon = NULL, width = NULL, ...){
value <- restoreInput(id = inputId, default = NULL)
tags$button(
id = inputId,
type = "button",
class = sprintf("action-button %s",class),
class = paste(c("usa-button", usaStyle),collapse="-"),
`data-val` = value,
list(icon, label), ...)
}
selectInput508 <- function (inputId, choices = "", selected = NULL, label=NULL, width = NULL, size = NULL){
choices <- choicesWithNames(choices)
if(is.null(selected)) {
selected <- firstChoice(choices)
}else{
selected <- as.character(selected)
}
selectTag <- tags$select(id = inputId, size = size, selectOptions(choices, selected))
labelTag <- if(!is.null(label)) tags$label(label, 'for'=inputId)
tagList(labelTag, selectTag)
}
checkboxInput508 <- function(inputId, label, value = FALSE, inline=FALSE, class=""){
inputTag <- tags$input(id = inputId, type = "checkbox", name=inputId, value=inputId,class=class)
if (!is.null(value) && value) inputTag$attribs$checked <- "checked"
labelTag <- tags$label('for'=inputId,label)
if(inline){
inputTag$attribs$style = 'display: inline;'
labelTag$attribs$style = 'display: inline;'
}
tagList(inputTag,labelTag)
}
checkboxGroupInput508 <- function (inputId, choices = "", label=NULL, selected = NULL, inline=FALSE) {
choices <- choicesWithNames(choices)
if(!is.null(selected)) selected <- as.character(selected)
if (is.null(choices) && is.null(choiceNames) && is.null(choiceValues)) {
choices <- character(0)
}
options <- generateOptions508(inputId, choices, selected, inline)
labelTag <- ""
if(!is.null(label)) labelTag <- tags$label(label)
legendTag <- tags$legend(label,class="usa-sr-only")
tags$fieldset(id=inputId,
class="usa-fieldset-inputs usa-sans shiny-input-checkboxgroup", ## !important shiny class
labelTag,
legendTag,
tags$ul(class="usa-unstyled-list",options)
)
}
radioButtons508 <- function(inputId, label, choices, selected = NULL, inline = FALSE, width = NULL,class="") {
choices <- choicesWithNames(choices)
selected <- if(is.null(selected)){
choices[[1]]
}else {
as.character(selected)
}
if(length(selected) > 1) stop("The 'selected' argument must be of length 1")
options <- generateOptions508(inputId, choices, selected, inline, type = "radio")
legendTag <- tags$legend(label,class="em-legend")
tags$fieldset(
id=inputId,
class= paste("usa-fieldset-inputs usa-sans shiny-input-radiogroup",class), ## !important shiny class
legendTag,
tags$ul(class="usa-unstyled-list",options)
)
}
generateOptions508 <- function (inputId, choices, selected, inline=FALSE, type = "checkbox"){
options <- mapply(
choices, names(choices),
FUN = function(value,name) {
unique_id = paste(inputId,value,sep="-") ## need this in case using same choices across namespaces
inputTag <- tags$input(id = unique_id, type = type, name = inputId, value = value)
if(value %in% selected) inputTag$attribs$checked <- "checked"
labelTag <- tags$label('for'=unique_id, name)
listTag <- tags$li(inputTag,labelTag)
if(inline) listTag$attribs$style="display: inline-block; padding-right: 30px;"
listTag
}, SIMPLIFY = FALSE, USE.NAMES = FALSE)
div(class="shiny-options-group",options) ## need shiny-options-group class to replace, not append, new choices
}
downloadButton508 <- function (id, label = "Download"){
tags$a(id = id, title = "", 'data-original-title' = label,
tabindex = 0,
class = 'em-tooltip usa-button download-button',
tags$span(class = 'usa-sr-only', label))
}
searchBox508 <- function(id, label = "Search") {
div(class = 'inline',
div(
tags$label('for' = 'search', label),
tags$input(id = id, value = "", class = "form-control", type = 'text')
))
}
dropdown508 <- function(inputId,label="",...){
div(class="dropdown black-text", id = inputId,
tags$button(type="button",
class="usa-accordion-button dropdown-toggle shiny-bound-input arrow-button",
'data-toggle'="dropdown",
'aria-expanded'="false", label),
tags$ul(class="dropdown-menu dropdown-menu-form", 'aria-labelledby'=inputId,...)
)
}
# From Shiny -- re-written in case shiny updated ---------------------------------
firstChoice <- function(choices) {
if (length(choices) == 0L)
return()
choice <- choices[[1]]
if (is.list(choice))
firstChoice(choice)
else choice
}
selectOptions <- function (choices, selected = NULL) {
html <- mapply(choices, names(choices), FUN = function(choice, label) {
if (is.list(choice)) {
sprintf("<optgroup label=\"%s\">\n%s\n</optgroup>",
htmlEscape(label, TRUE), selectOptions(choice, selected))
}
else {
sprintf("<option value=\"%s\"%s>%s</option>", htmlEscape(choice, TRUE),
if (choice %in% selected) " selected" else "", htmlEscape(label))
}
})
HTML(paste(html, collapse = "\n"))
}
choicesWithNames <- function (choices) {
listify <- function(obj) {
makeNamed <- function(x) {
if (is.null(names(x)))
names(x) <- character(length(x))
x
}
res <- lapply(obj, function(val) {
if (is.list(val))
listify(val)
else if (length(val) == 1 && is.null(names(val)))
as.character(val)
else makeNamed(as.list(val))
})
makeNamed(res)
}
choices <- listify(choices)
if (length(choices) == 0) return(choices)
choices <- mapply(choices, names(choices), FUN = function(choice, name) {
if (!is.list(choice))
return(choice)
if (name == "")
stop("All sub-lists in \"choices\" must be named.")
choicesWithNames(choice)
}, SIMPLIFY = FALSE)
missing <- names(choices) == ""
names(choices)[missing] <- as.character(choices)[missing]
choices
}
|
f4826440fb36b049a00b6ab6802031d6d57c3ecc
|
58ce5212730d664ae19f2e29abadb5517473866a
|
/man/plotVarImpBase.Rd
|
37c9ff4d10a70aa3206a53742da867407213c3fb
|
[] |
no_license
|
wtcooper/vizrd
|
0430980693fa57d83210e63756253be9c1dc1a2a
|
3850bb1ec6a2576a6d674a828c9e9359b6a9b681
|
refs/heads/master
| 2021-06-06T21:16:26.266916
| 2017-08-30T18:46:09
| 2017-08-30T18:46:09
| 41,006,164
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 686
|
rd
|
plotVarImpBase.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlotsModel.r
\name{plotVarImpBase}
\alias{plotVarImpBase}
\title{Bar plot of variable importances, passing just a dataframe
of the name, importance, and response (target levels, faceted
if multinomial). Note: user must sort and set up the data frame
properly depending on the model output.}
\usage{
plotVarImpBase(df, xlabel)
}
\arguments{
\item{df}{data frame}
}
\description{
Bar plot of variable importances, passing just a dataframe
of the name, importance, and response (target levels, faceted
if multinomial). Note: user must sort and set up the data frame
properly depending on the model output.
}
|
08fd71c65ed4978412eaa6c73f68d16926ef40cf
|
96e6210bf4b9c0573621772bf42790e7b675c37c
|
/Data Analysis/YaoZhang_Project1ExerciseA.R
|
7ffdeae966713410856e6ad6c80bd23c6969c85d
|
[] |
no_license
|
ohana1128/HW
|
ede2dd666df37c2a22b4523c21f95c6b23586c2a
|
2aadc47bb54be650cf34536b88add39ced1caacd
|
refs/heads/master
| 2021-01-19T04:15:37.060084
| 2016-09-13T20:19:35
| 2016-09-13T20:19:35
| 45,416,689
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,999
|
r
|
YaoZhang_Project1ExerciseA.R
|
#Exercise A.1 Data Analysis
df <- read.csv("forestfires.csv")
head(df)
#1. How many observations are there in the dataset? 517 observations
nrow(df)
#2. How many observations are there with a fire (i.e. area>0)? 270 observations
nrow(subset(df, area>0))
#3. How many observations are there with rain (i.e. rain>0)? 8 observations
nrow(subset(df, rain>0))
#4. How many observations are there with both a fire and rain? 2 observations
nrow(df[df$area>0 & df$rain>0,TRUE])
#5. Write an R code to show the columns month, day, area of all the observations.
df[,c(3,4,13)]
#6. Write an R code to show the columns month, day, area of the observations with a fire.
subset(df,area>0,select = c(month,day,area))
#7. How large are the five largest fires (i.e. having largest area)? Top five burned areas are 200.94, 212.88, 278.53, 746.28, 1090.84
tail(sort(df$area),5)
#8. What are the corresponding month, temp, RH, wind, rain, area?
subset(df,df$area %in% c("200.94","212.88","278.53","746.28","1090.84"), select = c(month,temp,RH,rain,area))
#9. Reorder factor levels of month to be from Jan to Dec.
df$month <- factor(df$month, levels = c('jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'))
df[order(df$month), ]
#10. Add one column to the data indicating whether a fire occurred for each observation (‘TRUE’ for area>0 and ‘FALSE’ for area==0).
df$fire <- factor(df$area>0)
#11. What is the mean area, wind, temp and RH per month?
tapply(df$area, df$month, mean)
tapply(df$wind, df$month, mean)
tapply(df$temp, df$month, mean)
tapply(df$RH, df$month, mean)
#12. How many observations are there in each month?
table(df$month)
#13. How many observations are there with a fire in each month?
table(df[df$area>0, ]$month)
#Exercise A.2: Tests
#Write the analysis as if you are statistical consultant and you are writing a report for people with limited statistical knowledge. You should state what the p-value is and list the assumptions of the study. You should decide which tests to use for questions 1, 2 and 4.
#1. Test whether the average burned area changes with respect to the day of the week. Only use observations with area>0.
newdf <- subset(df, area>0)
df1 <- newdf[,c(4,13)]
#F-test
summary(aov(area~as.factor(day),data=newdf))
#P-value is 0.442 > 0.05, accept Null Hypothesis, which means that the average burned area doens't change with respect to the day of the week
#check normality by using qqplot and durbin-watson test
library(ggplot2)
library(car)
par(mfrow=c(3,3))
for(days in levels(df1$day)){tempGroup <- subset(df1,day==days,drop=TRUE); qqnorm(tempGroup$area, main=paste(days,durbinWatsonTest(tempGroup$area))); qqline(tempGroup$area);}
ggplot(df1,aes(x=factor(day),y=area))+geom_boxplot()
#check equal variance by using bartlett-test and levene-test
bartlett.test(area~day,data = df1)
library(car)
leveneTest(area~day,data=df1)
#2. Similarly, test if the average burned area is the same in each month. Again, only use observationswith area>0.
df <- read.csv("forestfires.csv")
newdf <- subset(df, area>0)
dfmonth <- newdf[,c(3,13)]
summary(aov(area~as.factor(month),data=dfmonth))
#P-value is 0.996 > 0.05, accept Null Hypothesis, which means that the average burned area doens't change with respect to the month
#check normality by using qqplot and durbin-watson test
library(ggplot2)
library(car)
par(mar=c(3,3,3,3))
par(mfrow=c(4,3))
for(months in c("apr","aug","dec","feb","jul","jun","mar","may","oct","sep")){tempGroup1 <- subset(dfmonth,month==months,drop=TRUE); qqnorm(tempGroup1$area, main=paste(months,durbinWatsonTest(tempGroup1$area))); qqline(tempGroup1$area);}
#note: the area values for "nov" and "jan" are o, so I only polt the area>0 month as requested
ggplot(dfmonth,aes(x=factor(month),y=area))+geom_boxplot()
#check equal variance by using levene-test
library(car)
leveneTest(area~month,data=newdf)
df <- read.csv("forestfires.csv")
newdf <- subset(df, area>0)
#3. Using bootstrap, obtain a 95% confidence interval for the correlation of each variable pair (FFMC,DMC, DC, ISI, temp, RH, wind, rain, area). Also compute p-values for H0 : p = 0, H1 : p = 0,where p is the Pearson correlation coefficient. Summarize the p-values in a matrix (e.g. row 2column 3 will contain the p-value for testing ⇢DMC,DC = 0).
df2 <- df[,c(5:13)]
#bootstrap t times
t <- 5000
boots <- matrix(rep(0,36*t),t,36)
k <- 1
for(j in 1:8){
for(h in (j+1):9){
dfjh <- df2[,c(j,h)]
for(i in 1:t){
df2Sample <- dfjh[sample(1:517,size = 517,replace = TRUE),]
boots[i,k] <- cor(df2Sample)[1,2]
}
k <- k+1
}
}
#caculate confidence interval
boots.ci.output <- matrix(rep(0,72),36,2)
df2Cor <- cor(df2)
l <- 1
for(m in 1:8){
for(n in (m+1):9){
boots.ci.output[l,1] <- as.matrix((df2Cor[m,n]+quantile(boots[,l]-df2Cor[m,n],c(0.025,0.975))))[1,1]
boots.ci.output[l,2] <- as.matrix((df2Cor[m,n]+quantile(boots[,l]-df2Cor[m,n],c(0.025,0.975))))[2,1]
l <- l+1
}
}
colnames(boots.ci.output) <- c("2.5%","97.5%")
boots.ci.output
###t-test for computing the p-avlue for hypothesis that pearson correlation coefficient is zero.
library(plyr)
colMeans <- colMeans(boots)
colSds <- apply(boots,2,sd)
pvalue <-pt(colMeans/colSds,99)
###sotre p-value in a matrix
pvalue.output <- matrix(rep(0,81),9,9)
h <- 1
for(i in 1:8){
for(j in (i+1):9){
pvalue.output[i,j] <- as.matrix(pvalue <-pt(colMeans/colSds,99))[h,1]
h <- h+1
}
}
colnames(pvalue.output) <- c("FFMC","DMC","DC","ISI","temp","RH","wind","rain","area")
rownames(pvalue.output) <- c("FFMC","DMC","DC","ISI","temp","RH","wind","rain","area")
pvalue.output
#4. Test if the distribution of the area variable in September is the same as the distribution of area in August. Only use observations with area>0.
sep <- subset(newdf,month=="sep")
sep <- as.numeric(sep$area)
aug <- subset(newdf,month=="aug")
aug <- as.numeric(aug$area)
ks.test(sep,aug)
#p-value is 0.09754, so they have same distribution of area variable.
|
5624f20e5d5aa932ac7d4318961296e02d3205d8
|
46d4fa34bf4ab67e1c9e2c40935f007c1ce7d279
|
/t-SNE/solucion.R
|
49e9d6162aad2139df0a8cb0d8788106b3391784
|
[] |
no_license
|
huan-lui/unsupervised-learning
|
62ca07317126ebcb6ab56f2ca8d7e14dd69d6977
|
0f555c5055a6691896825b5c0b0036d708a07533
|
refs/heads/master
| 2023-03-16T23:27:40.936593
| 2019-07-13T07:55:28
| 2019-07-13T07:55:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,812
|
r
|
solucion.R
|
#---------------------------------------------------------------------------------------------------#
# ___ _ _ _ ____ _ _ _____
# |_ _| _ __ (_) ___ (_) ___ _ | |_ / ___| | \ | | | ____|
# | | | '_ \ | | / __| | | / _ \ (_) | __| _____ \___ \ | \| | | _|
# | | | | | | | | | (__ | | | (_) | _ | |_ |_____| ___) | | |\ | | |___
# |___| |_| |_| |_| \___| |_| \___/ (_) \__| |____/ |_| \_| |_____|
#---------------------------------------------------------------------------------------------------#
# Este codigo es utilizado como ejemplo para entender el algoritmo de t-distributed Stochastic Neighbor Embedding.
#---------------------------------------------------------------------------------------------------#
rm(list=ls())
#Lectura de los datos de MNIST (numeros escritos a mano)
my_data <- as.data.frame(fread("./data_in/zip.train"))
#Tomamos solo 1/3 de los datos simplemente por costes computacionales
my_data <- sample_frac(my_data,1/3)
#Damos formato a los datos y guardamos la etiqueta
my_labels<-my_data$V1
my_data$V1<-as.factor(my_labels)
#Las siguientes linease se utilizan para realizar el plot y dar un correcto
# formato de visualizacion
colors <- rainbow(length(unique(my_data$V1))) #use ?rainbow
names(colors) <- unique(my_data$V1)
#Ejecutamos el algoritmo para los datos de MNIST, suprimiendo la columna de las etiquetas
tsne <- Rtsne(my_data[,-1], dims = 2, verbose=TRUE, max_iter = 500)
#-1 es que quitamos la colummna ya hecha.
#Hacemos un plot de los datos "vacio"
plot(tsne$Y, t='n', main="2D t-distributed Stochastic Neighbor Embedding para el MNIST dataset",
xlab = "t-SNE coordenada 1",ylab="t-SNE coordenada 2")
#Anadimos la etiqueta segun corresponda
text(tsne$Y, labels=my_labels, col=colors[my_data$V1])
#que pasa si hacemos lo mismo pero en 3 dimensiones
tsne3D <- Rtsne(my_data[,-1], dims = 3, verbose=TRUE, max_iter = 500)
#damos formato a los datos
data_to_plot<-as.data.frame(tsne3D$Y)
#plot 3d usando libreria plotly
# color: es cuantos colores voy a hacer. Lo saco de las etiquetas convertidas a factor.
# colors: (definido arriba). Colores diferentes cogidos de la función rainbow .
plot_ly(data_to_plot, x=~V1, y=~V2, z=~V3, color= as.factor(my_labels), colors = colors) %>%
add_markers() %>%
plotly::layout(scene = list(xaxis = list(title = 'Coordenada 1 t-SNE'),
yaxis = list(title = 'Coordenada 2 t-SNE'),
zaxis = list(title = 'Coordenada 3 t-SNE')))
#No ejecutar, a menos que se desee hacer plot de los numeros
row<-2
COLORS <- c("white", "black")
CUSTOM_COLORS <- colorRampPalette(colors = COLORS)
CUSTOM_COLORS_PLOT <- colorRampPalette(brewer.pal(10, "Set3"))
z <- array(as.vector(as.matrix(my_data[row, -1])), dim = c(16, 16))
z <- z[, 16:1] ##right side up
par(mfrow = c(1, 1), pty = "s", mar = c(1, 1, 1, 1), xaxt = "n", yaxt = "n")
image(1:16, 1:16, z, main = my_data[row, 1], col = CUSTOM_COLORS(256))
#Tarea: realizar un algoritmo de clustering que realice un agrupamiento como el de la imagen
tsne_scaled<-scale(tsne$Y)
head(tsne$Y)
head(tsne_scaled)
my_kmeans<-kmeans(tsne_scaled,centers = 10,nstart = 10)
my_kmeans$cluster
fviz_cluster(my_kmeans, data = tsne_scaled)
#----------------------------------------------------------------------#
# _____ _ _ ____ _ _ _____
# | ___| (_) _ __ _ | |_ / ___| | \ | | | ____|
# | |_ | | | '_ \ (_) | __| _____ \___ \ | \| | | _|
# | _| | | | | | | _ | |_ |_____| ___) | | |\ | | |___
# |_| |_| |_| |_| (_) \__| |____/ |_| \_| |_____|
#----------------------------------------------------------------------#
|
5bf45f60d8377f85bca406bf229d9cadef34d8c8
|
171e5018d6c684ad34c833c2dd09f27d02451cf1
|
/survey data cleaning.R
|
fc6c3f63798772f1449c8500a1678fe72a154adc
|
[
"MIT"
] |
permissive
|
JessieZ32/2020_US_election
|
f4dcd19f2c79d4a4d721cfb98e99a1a7aec40bf7
|
1b14118c9f92c8f33e0ef283a39e3f3be4d1d6f8
|
refs/heads/main
| 2023-01-03T00:19:53.794253
| 2020-11-03T04:57:13
| 2020-11-03T04:57:13
| 309,255,724
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,108
|
r
|
survey data cleaning.R
|
#### Preamble ####
# Purpose: Prepare and clean the survey data downloaded
# from Democracy Fund + UCLA Nationscape at
# https://www.voterstudygroup.org/publication/nationscape-data-set
# Author: Yijie Zhao, Yifan Xu, Yuyan Xu and Yuze Kang
# Data: 1 November 2020
# Contact: yijie.zhao@mail.utoronto.ca
# License: MIT
# To save the downloaded data in inputs/data
# To gitignore it.
#### Workspace setup ####
install.packages("haven")
install.packages("tidyverse")
install.packages("labelled")
library(haven)
library(tidyverse)
library(labelled)
#### Load data and choose variables ####
# Read the raw data
survey_raw_data <- read_dta("ns20200625.dta")
# Add the labels
survey_raw_data <- labelled::to_factor(survey_raw_data)
# Keep some variables
reduced_survey_data <- survey_raw_data %>%
select(vote_2020,
age,
gender,
race_ethnicity,
education,
household_income,
state,
vote_intention
)
#### Clean data ####
# I. Delete NA
# data - 6479 to 6101 observations
survey_data <- na.omit(reduced_survey_data)
# II. Drop "not voting"
# Drop data of observations who have great possibility
# to not vote any candidates in 2020 election
# as our model analysis only cares about potential voters
survey_data %>% group_by(vote_intention) %>% summarise(count=n())
# Out of 4 groups of vote_intention:
# We keep "Yes, I will vote" and "Not sure"
# We drop "No, I will not vote but I am eligible"
# and "No, I am not eligible to vote"
# data - 6101 to 5394 observations
survey_data <- survey_data[!(
survey_data$vote_intention=="No, I will not vote but I am eligible" |
survey_data$vote_intention=="No, I am not eligible to vote"),]
# III. Keep potential voters for Trump and Biden only
# We intend to have a prediction on the competition between Trump and Biden
# two candidates with great probability to win
# We only cares about potential voters for them
survey_data %>% group_by(vote_2020) %>% summarise(count=n())
# Out of 5 groups of vote_2020
# We keep "Donald Trump" (2212) and "Joe Biden" (2397)
# We drop "Someone else" (205), "I would not vote" (84)
# and "I am not sure/don't know" (496)
# data - 5394 to 4609 observations
survey_data <- survey_data[!(
survey_data$vote_2020=="Someone else" |
survey_data$vote_2020=="I would not vote" |
survey_data$vote_2020=="I am not sure/don't know"),]
#### Adjust variables with ACS_2018 data ####
# I. vote_2020
# Use vote_2020 to create a binary variable - vote_Trump as response variable
survey_data <- survey_data %>%
mutate(vote_Trump=ifelse(vote_2020=="Donald Trump",1,0))
# II. age
# Use age to create a categorical variable - age_group
# Refer to Weighting Targets of Nationscape User Guide
survey_data <- survey_data %>%
mutate(age_group=case_when(age>=18 & age<=23 ~ "18-23",
age>=24 & age<=29 ~ "24-29",
age>=30 & age<=39 ~ "30-39",
age>=40 & age<=49 ~ "40-49",
age>=50 & age<=59 ~ "50-59",
age>=60 & age<=69 ~ "60-69",
age>=70 ~ "70+"))
# III. gender
# Use sex instead of gender as ACS_2018
survey_data <- rename(survey_data, sex=gender)
survey_data <- survey_data %>%
mutate(sex=case_when(sex=="Female" ~ "female",
sex=="Male" ~ "male"))
# IV. race_ethnicity
# Create new variable race instead of race_ethnicity
# Combine categories of race_ethnicity
# Refer to Weighting Targets of Nationscape User Guide
survey_data <- survey_data %>%
mutate(race=case_when(
race_ethnicity=="White" ~ "White",
race_ethnicity=="Black, or African American" ~ "Black",
race_ethnicity=="American Indian or Alaska Native" |
race_ethnicity=="Asian (Asian Indian)" |
race_ethnicity=="Asian (Chinese)" |
race_ethnicity=="Asian (Filipino)" |
race_ethnicity=="Asian (Japanese)" |
race_ethnicity=="Asian (Korean)" |
race_ethnicity=="Asian (Vietnamese)" |
race_ethnicity=="Asian (Other)" |
race_ethnicity=="Pacific Islander (Native Hawaiian)" |
race_ethnicity=="Pacific Islander (Guamanian)" |
race_ethnicity=="Pacific Islander (Samoan)" |
race_ethnicity=="Pacific Islander (Other)" ~ "AAPI",
race_ethnicity=="Some other race" ~ "other races"))
# V. education
# Combine categories of education
# Refer to Weighting Targets of Nationscape User Guide
survey_data <- survey_data %>%
mutate(education=case_when(
education=="3rd Grade or less" |
education=="Middle School - Grades 4 - 8" |
education=="Completed some high school" ~ "No high school diploma",
education=="High school graduate" ~ "High school diploma",
education=="Other post high school vocational training" |
education=="Completed some college, but no degree" ~ "Some college",
education=="Associate Degree" ~ "Associate's Degree",
education=="College Degree (such as B.A., B.S.)" |
education=="Completed some graduate, but no degree" ~ "Bachelor's degree",
education=="Masters degree" |
education=="Doctorate degree" ~ "Graduate degree"))
# VI. household_income
# Combine categories of household_income
# Refer to Weighting Targets of Nationscape User Guide
survey_data <- survey_data %>%
mutate(household_income=case_when(
household_income=="Less than $14,999" |
household_income=="$15,000 to $19,999" ~ "$19,999 or less",
household_income=="$20,000 to $24,999" |
household_income=="$25,000 to $29,999" |
household_income=="$30,000 to $34,999" ~ "$20k-$34,999",
household_income=="$35,000 to $39,999" |
household_income=="$40,000 to $44,999" |
household_income=="$45,000 to $49,999" ~ "$35k-$49,999",
household_income=="$50,000 to $54,999" |
household_income=="$55,000 to $59,999" |
household_income=="$60,000 to $64,999" ~ "$50k-$64,999",
household_income=="$65,000 to $69,999" |
household_income=="$70,000 to $74,999" |
household_income=="$75,000 to $79,999" ~ "$65k-$79,999",
household_income=="$80,000 to $84,999" |
household_income=="$85,000 to $89,999" |
household_income=="$90,000 to $94,999" |
household_income=="$95,000 to $99,999" ~ "$80k-$99,999",
household_income=="$100,000 to $124,999" ~ "$100k-$124,999",
household_income=="$125,000 to $149,999" |
household_income=="$150,000 to $174,999" |
household_income=="$175,000 to $199,999" ~ "$125k-$199,999",
household_income=="$200,000 to $249,999" |
household_income=="$250,000 and above" ~ "$200k or more"))
# VII. state
# Change age from character to factor
survey_data$state <- as.factor(survey_data$state)
# Only select useful variables for modeling after adjustment
survey_data <- survey_data %>%
select(vote_Trump,
age_group,
sex,
race,
education,
household_income,
state
)
# Export the dataset - survey_data
write.csv(survey_data,
file="/cloud/project/survey_data.csv", row.names=F)
|
ce77e0e52a000755581e77bd69ae7503e1c9c86e
|
a300a631ba269f2528caec69157a9591102206ba
|
/scrape-links.R
|
c87ea8e5eb29d0755658085c10c7722e4ce3ad18
|
[] |
no_license
|
MilosVeres/Web-Scraping
|
e55435f011100d9a07f17e1a81bdbcf9fc3a1ecb
|
64d54f4397120025bcf3e3dd1ff2dc6484563d91
|
refs/heads/master
| 2020-08-02T01:33:51.740889
| 2019-09-26T23:28:47
| 2019-09-26T23:28:47
| 211,193,920
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 219
|
r
|
scrape-links.R
|
#Extract links
a<-html_nodes(ppage,'h3 a')
links<-html_attr(a,'href')
links<-links[-c(18,23)]
#-----------------------------------------
links<-str_replace(links,'^(/node/\\d+)','https://www.mercyhurst.edu\\1')
|
cf9420008f13db13da044c2ee52c0426364e024d
|
7f026bc3deee32e4732c13cd318cb32119c7dd69
|
/R/zlag.R
|
e68d04ef62135ff59c7bcddcc8bc5a9b0b1db24f
|
[] |
no_license
|
cran/TSA
|
109803777566ded77104af3a01e288c749daa97b
|
5050db06a645f31f2a37ac81a90fc5d2c590a25c
|
refs/heads/master
| 2022-07-28T07:23:53.254418
| 2022-07-05T10:36:22
| 2022-07-05T10:36:22
| 17,693,886
| 1
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 229
|
r
|
zlag.R
|
#' @export
zlag <-
function (x, d = 1)
{
if (d != as.integer(d) || d < 0)
stop("d must be a non-negative integer")
if (d == 0)
return(x)
else return(c(rep(NA, d), rev(rev(x)[-(1:d)])))
}
|
fb07f72370a447eb002a99c0f6532bcdb25b9abd
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/ddalpha/R/ddalphaf.test.r
|
f87cc9133977e7b8d55c4aa526c40cda5f580063
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,235
|
r
|
ddalphaf.test.r
|
ddalphaf.test <- function(learn, learnlabels, test, testlabels, disc.type = c("LS", "comp"), ...){
ops <- options(warn = -1)
on.exit(options(ops))
disc.type <- match.arg(disc.type)
ftrain = switch(disc.type,
"LS" = ddalphaf.train,
"comp" = compclassf.train
)
fclassify = switch(disc.type,
"LS" = ddalphaf.classify,
"comp" = compclassf.classify
)
tryCatch({
time <- system.time(
ddalpha <- ftrain(learn, learnlabels, ...)
)
cc = fclassify(objectsf = test,ddalphaf = ddalpha)
if (is.numeric(testlabels[[1]])){
if(is.factor(cc[[1]]) || is.character(cc[[1]])){
cc <- unlist(lapply(cc, as.character))
cc[cc == "Ignored"] <- NA
}
equal = (cc == testlabels)
} else {
cc <- unlist(lapply(cc, as.character))
equal = (cc == as.character(testlabels))
}
if(!(T %in% equal) && !(F %in% equal))
{ return(NA)}
error = sum(!equal,na.rm = T)/(sum(!equal,na.rm = T)+sum(equal,na.rm = T))
return(list(error = error, correct = sum(equal,na.rm = T), incorrect = sum(!equal,na.rm = T),
total = length(cc)-sum(is.na(equal)), ignored = sum(is.na(equal)), n = length(cc),
time = time[1]))
}
# tryCatch({}
, error = function(e) {
print ("ERROR T")
print (e)
}, finally = {
})
return (NA)
}
ddalphaf.getErrorRateCV <- function(dataf, labels, numchunks = 10, disc.type = c("LS", "comp"), ...){
n = length(dataf)
numchunks = min(n, numchunks)
chunksize = ceiling(n/numchunks)
sample = seq(from = 1, by = numchunks, length.out = chunksize)
errors = 0
total = 0
times = c()
for (i in 1:numchunks){
sample = sample[sample<=n]
learn = dataf[-sample]
test = dataf[sample]
learnlabels = labels[-sample]
testlabels = labels[sample]
el = ddalphaf.test(learn, learnlabels, test, testlabels, disc.type, ...)
if(is.list(el)){
errors = errors + el$incorrect
total = total + el$total
times = c(times,el$time)
}
sample = sample+1
}
return (list(errors = errors/total, time = mean(times), time_sd = sd(times)))
}
ddalphaf.getErrorRatePart <- function(dataf, labels, size = 0.3, times = 10, disc.type = c("LS", "comp"), ...){
if (!is.numeric(size) || size <=0 || size >= length(dataf)) stop("Wrong size of excluded sequences")
if(size < 1)
size = max(1, size*length(dataf)) # at least 1 point
size = as.integer(size)
indexes = 1:length(dataf)
errors = c()
total = 0
time = c()
for (i in 1:times){
samp = sample(indexes, size)
learn = dataf[-samp]
test = dataf[samp]
learnlabels = labels[-samp]
testlabels = labels[samp]
el = ddalphaf.test(learn, learnlabels, test, testlabels, disc.type, ...)
if(is.list(el)){
errors = c(errors,el$incorrect/el$total)
time = c(time,el$time)
}
}
return (list(errors = mean(errors), errors_sd = sd(errors), errors_vec = errors, time = mean(time), time_sd = sd(time)))
}
|
a4cee075d7d3cf8e448b7910be0dce8720c34624
|
9fbfd13af6074f3b04819722edb29f7ec07031ee
|
/man/nemenyiTest.Rd
|
1c3f5f216c4a31f026b621f331024c67869709c0
|
[] |
no_license
|
dedenistiawan/scmamp
|
3cdd03ccb6b1b2cd843e7f99fc4ddc00a04b8f85
|
e435f9d48078f93ab49b23a19fdb6ef6e12ea5f9
|
refs/heads/master
| 2023-06-03T06:53:11.183762
| 2021-06-16T15:14:23
| 2021-06-16T15:14:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,069
|
rd
|
nemenyiTest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tests.R
\name{nemenyiTest}
\alias{nemenyiTest}
\title{Nemenyi test}
\usage{
nemenyiTest(data, alpha = 0.05)
}
\arguments{
\item{data}{Matrix or data frame where each algorithm is in a column}
\item{alpha}{Significance level}
}
\value{
A list with class "htest" containing the following components: \code{statistic}, the value of the statistic used in the test; \code{method}, a character string indicating what type of test was performed; \code{data.name}, a character string giving the name of the data and \code{diff.matirx}, a matrix with all the pairwise differences of average rankings
}
\description{
This function performs the Nemenyi test
}
\details{
The test has been implemented according to the version in Demsar (2006), page 7
}
\examples{
data(data_gh_2008)
res <- nemenyiTest(data.gh.2008, alpha = 0.1)
res
res$diff.matrix
}
\references{
Demsar, J. (2006) Statistical Comparisons of Classifiers over Multiple Data Sets. \emph{Journal of Machine Learning Research}, 7, 1-30.
}
|
2c9ab1c8736b644debf3182253c87634b7f7c56d
|
4344ff77c3206403c9743d4db78082b6a97a007f
|
/cachematrix.R
|
c8fa93ca8be265589033f2a0704e38b4f9046c49
|
[] |
no_license
|
tcortes/ProgrammingAssignment2
|
ede9d7ba4eda15593150075e0ce7ee69c33c62d2
|
dbaf23fd4832fa91a1f9d54438903304c700144c
|
refs/heads/master
| 2021-01-18T03:36:38.128791
| 2014-07-25T08:57:47
| 2014-07-25T08:57:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,071
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
# Creates a matrix that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## Write a short comment describing this function
# Calculates the inverse of a matrix created with makeCacheMatrix.
# Only computes the inverse when it is not computed and cached yet.
# It uses the solve function.
# Example:
# m1 <- matrix(c(1,0,0,0,2,0,0,0,3), nrow=3, ncol=3)
# mAux <- makeCacheMatrix(m1)
# m2 <- cacheSolve(mAux)
# round(m1 %*% m2, 3)
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
299b2f089598158fc117beef9b9ad9d91b24bb1c
|
a7b3339c9e0d6a6871e37bdf7b23a12b7f664cae
|
/man/vlr.Rd
|
8d6215fc6dd466306c9e8902fe5f4cbea405c684
|
[
"MIT"
] |
permissive
|
llrs/propr
|
d5fabc1da6c69cee48295d40a20a828e120a7c12
|
857f9626780833d58e818fad357c6308d8a8c940
|
refs/heads/master
| 2020-03-30T03:52:39.911853
| 2018-09-28T09:38:08
| 2018-09-28T09:38:08
| 150,712,043
| 0
| 0
| null | 2018-09-28T08:47:57
| 2018-09-28T08:47:57
| null |
UTF-8
|
R
| false
| true
| 996
|
rd
|
vlr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/propr-functions.R
\name{vlr}
\alias{vlr}
\title{Variance of logratios}
\usage{
vlr(X, check = FALSE)
}
\arguments{
\item{X}{A matrix or dataframe of positive numeric values}
\item{check}{A logical scalar}
}
\value{
The symmetric matrix
\eqn{\mathrm{Var}{\log(X_i/X_j)}}{Var(log(X_i/X_j))} where \eqn{X_i} and \eqn{X_j}
denote \emph{columns} \eqn{i} and \eqn{j} of \eqn{X}.
}
\description{
\code{vlr} returns a matrix where element (i,j) is
the variance (over rows) of the log of the ratios of column i and j.
}
\details{
If \code{check} is \code{TRUE} then this function will stop if
there are any negative or \code{NA} values in \code{X}.
}
\examples{
N <- 10 # Number of observations
# Make a data frame with columns a and b roughly proportional
# and columns c and d roughly proportional
X <- data.frame(a=(1:N), b=(1:N) * rnorm(N, 10, 0.1),
c=(N:1), d=(N:1) * rnorm(N, 10, 1.0))
round(vlr(X),2)
}
|
e99d91847a721e72a543bf861f4eefd10987711e
|
f84a5a44bce130c3143ef2cfa8d9560d54c5cb09
|
/Trait Fits/TraitFits_bc.R
|
d13b282795f8c1646f937936eedc1f59c247020f
|
[
"MIT"
] |
permissive
|
mshocket/Six-Viruses-Temp
|
d0b42c506a03f0ac195806e7a2e91a6bbc927977
|
e4a5e7172847ba0b6e780a018c310745a00ed2ad
|
refs/heads/master
| 2022-12-16T08:43:39.148251
| 2020-09-08T05:20:24
| 2020-09-08T05:20:24
| 285,398,456
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 76,308
|
r
|
TraitFits_bc.R
|
## Marta Shocket, Stanford University / UCLA, marta.shocket@gmail.com
## Started Jan 2018, Updated August 2020
##
## Purpose: Use Bayesian Inference (JAGS) to fit temperature-dependent functions for vector competence tarits (bc, b, c) for six arboviruses in many Culex and Aedes species
##
## Contents: 1) Set-up,load packages, get data, etc.
## 2) JAGS models
## 3) Shared settings for all models
## 4) Fit bc thermal responses with uniform priors
## 5) Fit bc thermal responses for priors
## 6) Fit gamma distributions to bc prior thermal responses
## 7) Fit bc thermal responses with data-informed priors
## 8) Calculate treatment averages for plotting
##########
###### 1. Set up workspace, load packages, get data, etc.
##########
# Set working directory
setwd("~/Fitting Traits")
# Load libraties for fitting traits
library('R2jags')
library('mcmcplots')
# Load Data
data.bc <- read.csv("TraitData_bc.csv") # Data from database for most traits (except below)
unique(data.bc$joint.code)
# Subset Data
data.c.CpipWNV <- subset(data.bc, joint.code == "CpipWNV" & trait.name == "c")
data.bc.CpipWNV <- subset(data.bc, joint.code == "CpipWNV" & trait.name == "bc")
data.b.CtarWNV <- subset(data.bc, joint.code == "CtarWNV" & trait.name == "b")
data.bc.CuniWNV <- subset(data.bc, joint.code == "CuniWNV")
data.bc.CtarWEEV <- subset(data.bc, joint.code == "CtarWEEV" & trait.name == "bc")
data.c.CtarWEEV <- subset(data.bc, joint.code == "CtarWEEV" & trait.name == "c")
data.b.CtarWEEV <- subset(data.bc, joint.code == "CtarWEEV" & trait.name == "b")
data.c.CtarSLEV <- subset(data.bc, joint.code == "CtarSLEV" & trait.name == "c")
data.b.CtarSLEV <- subset(data.bc, joint.code == "CtarSLEV" & trait.name == "b")
data.c.AtaeSINV <- subset(data.bc, joint.code == "AtaeSINV")
data.c.CpipSINV <- subset(data.bc, joint.code == "CpipSINV")
data.bc.AtaeRVFV <- subset(data.bc, joint.code == "AtaeRVFV")
data.bc.AtriEEEV <- subset(data.bc, joint.code == "AtriEEEV")
data.bc.only <- subset(data.bc, trait.name == "bc")
data.b.only <- subset(data.bc, trait.name == "b")
data.c.only <- subset(data.bc, trait.name == "c")
par(mfrow = c(1,1))
plot(trait ~ T, xlim = c(5, 45), data = data.bc.only, ylab = "bc", xlab = "Temperature")
points(trait ~ T, data = data.bc.CpipWNV, col = "grey")
points(trait ~ T, data = data.bc.CuniWNV, col = "orange")
points(trait ~ T, data = data.bc.CtarWEEV, col = "blue")
points(trait ~ T, data = data.bc.AtriEEEV, col = "violet")
points(trait ~ T, data = data.bc.AtaeRVFV, col = "green")
plot(trait ~ T, xlim = c(5, 45), data = data.c.only, ylab = "b", xlab = "Temperature")
points(trait ~ T, data = data.c.CpipWNV, col = "grey")
points(trait ~ T, data = data.c.CtarWEEV, col = "dodgerblue")
points(trait ~ T, data = data.c.CtarSLEV, col = "navyblue")
points(trait ~ T, data = data.c.CpipSINV, col = "grey30")
points(trait ~ T, data = data.c.AtaeSINV, col = "darkgreen")
plot(trait ~ T, xlim = c(5, 45), data = data.b.only, ylab = "b", xlab = "Temperature")
points(trait ~ T, data = data.b.CtarWNV, col = "blue")
points(trait ~ T, data = data.b.CtarWEEV, col = "dodgerblue")
points(trait ~ T, data = data.b.CtarSLEV, col = "navyblue")
##########
###### 2. JAGS Models
##########
############## Quadratic Model with uniform priors
sink("quad.txt")
cat("
model{
## Priors
cf.q ~ dunif(0, 1)
cf.T0 ~ dunif(0, 24)
cf.Tm ~ dunif(26, 50)
cf.sigma ~ dunif(0, 1000)
cf.tau <- 1 / (cf.sigma * cf.sigma)
## Likelihood
for(i in 1:N.obs){
trait.mu[i] <- -1 * cf.q * (temp[i] - cf.T0) * (temp[i] - cf.Tm) * (cf.Tm > temp[i]) * (cf.T0 < temp[i])
trait[i] ~ dnorm(trait.mu[i], cf.tau)
}
## Derived Quantities and Predictions
for(i in 1:N.Temp.xs){
z.trait.mu.pred[i] <- -1 * cf.q * (Temp.xs[i] - cf.T0) * (Temp.xs[i] - cf.Tm) * (cf.Tm > Temp.xs[i]) * (cf.T0 < Temp.xs[i])
}
} # close model
",fill=T)
sink()
############## Quadratic Model with uniform priors - derived quantities always =< 1 (i.e., for probabilities)
sink("quadprob.txt")
cat("
model{
## Priors
cf.q ~ dunif(0, 1)
cf.T0 ~ dunif(0, 24)
cf.Tm ~ dunif(26, 50)
cf.sigma ~ dunif(0, 1000)
cf.tau <- 1 / (cf.sigma * cf.sigma)
## Likelihood
for(i in 1:N.obs){
trait.mu[i] <- -1 * cf.q * (temp[i] - cf.T0) * (temp[i] - cf.Tm) * (cf.Tm > temp[i]) * (cf.T0 < temp[i])
trait[i] ~ dnorm(trait.mu[i], cf.tau)
}
## Derived Quantities and Predictions
for(i in 1:N.Temp.xs){
z.trait.mu.pred[i] <- (-1 * cf.q * (Temp.xs[i] - cf.T0) * (Temp.xs[i] - cf.Tm) * (cf.Tm > Temp.xs[i]) * (cf.T0 < Temp.xs[i])) * (-1 * cf.q * (Temp.xs[i] - cf.T0) * (Temp.xs[i] - cf.Tm) < 1) + (-1 * cf.q * (Temp.xs[i] - cf.T0) * (Temp.xs[i] - cf.Tm) > 1)
}
} # close model
",fill=T)
sink()
############## Quadratic Model with gamma priors (except sigma) - truncated for probabilities
sink("quadprob_inf.txt")
cat("
model{
## Priors
cf.q ~ dgamma(hypers[1,1], hypers[2,1])
cf.T0 ~ dgamma(hypers[1,2], hypers[2,2])
cf.Tm ~ dgamma(hypers[1,3], hypers[2,3])
cf.sigma ~ dunif(0, 1000)
cf.tau <- 1 / (cf.sigma * cf.sigma)
## Likelihood
for(i in 1:N.obs){
trait.mu[i] <- -1 * cf.q * (temp[i] - cf.T0) * (temp[i] - cf.Tm) * (cf.Tm > temp[i]) * (cf.T0 < temp[i])
trait[i] ~ dnorm(trait.mu[i], cf.tau)
}
## Derived Quantities and Predictions
for(i in 1:N.Temp.xs){
z.trait.mu.pred[i] <- -1 * cf.q * (Temp.xs[i] - cf.T0) * (Temp.xs[i] - cf.Tm) * (cf.Tm > Temp.xs[i]) * (cf.T0 < Temp.xs[i]) * (-1 * cf.q * (Temp.xs[i] - cf.T0) * (Temp.xs[i] - cf.Tm) < 1) + (-1 * cf.q * (Temp.xs[i] - cf.T0) * (Temp.xs[i] - cf.Tm) > 1)
}
} # close model
",fill=T)
sink()
##########
###### 3. Shared settings for all models
##########
##### inits Function
inits<-function(){list(
cf.q = 0.01,
cf.Tm = 35,
cf.T0 = 5,
cf.sigma = rlnorm(1))}
##### Parameters to Estimate
parameters <- c("cf.q", "cf.T0", "cf.Tm","cf.sigma", "z.trait.mu.pred")
##### MCMC Settings
# Number of posterior dist elements = [(ni - nb) / nt ] * nc = [ (25000 - 5000) / 8 ] * 3 = 7500
ni <- 25000 # number of iterations in each chain
nb <- 5000 # number of 'burn in' iterations to discard
nt <- 8 # thinning rate - jags saves every nt iterations in each chain
nc <- 3 # number of chains
##### Temp sequence for derived quantity calculations
# For actual fits
Temp.xs <- seq(1, 45, 0.1)
N.Temp.xs <-length(Temp.xs)
# For priors - fewer temps for derived calculations makes it go faster
Temp.xs <- seq(5, 45, 0.5)
N.Temp.xs <-length(Temp.xs)
##########
###### 4. Fit bc thermal responses with uniform priors
##########
###################################### b for WNV Cx tarsalis - quadratic
##### Set data
data <- data.b.CtarWNV
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
b.CtarWNV.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
b.CtarWNV.out$BUGSoutput$summary[1:5,]
mcmcplot(b.CtarWNV.out)
save(b.CtarWNV.out, file = "jagsout_b_CtarWNV.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.b.CtarWNV, ylab = "b for WNV in Cx tarsalis", xlab = "Temperature")
lines(b.CtarWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(b.CtarWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(b.CtarWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### bc for WEEV Cx tarsalis - quadratic
##### Set data
data <- data.bc.CtarWEEV
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
bc.CtarWEEV.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
bc.CtarWEEV.out$BUGSoutput$summary[1:5,]
mcmcplot(bc.CtarWEEV.out)
save(bc.CtarWEEV.out, file = "jagsout_bc_CtarWEEV.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.CtarWEEV, ylab = "bc for WEEV in Cx tarsalis", xlab = "Temperature")
lines(bc.CtarWEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(bc.CtarWEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(bc.CtarWEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### c for WEEV Cx tarsalis - quadratic
##### Set data
data <- data.c.CtarWEEV
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
c.CtarWEEV.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quadprob.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
c.CtarWEEV.out$BUGSoutput$summary[1:5,]
mcmcplot(c.CtarWEEV.out)
save(c.CtarWEEV.out, file = "jagsout_c_CtarWEEV.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.c.CtarWEEV, ylab = "c for WEEV in Cx tarsalis", xlab = "Temperature")
lines(c.CtarWEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(c.CtarWEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(c.CtarWEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### b for WEEV Cx tarsalis - quadratic
##### Set data
data <- data.b.CtarWEEV
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
b.CtarWEEV.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
b.CtarWEEV.out$BUGSoutput$summary[1:5,]
mcmcplot(b.CtarWEEV.out)
save(b.CtarWEEV.out, file = "jagsout_b_CtarWEEV.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.b.CtarWEEV, ylab = "b for WEEV in Cx tarsalis", xlab = "Temperature")
lines(b.CtarWEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(b.CtarWEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(b.CtarWEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### c for SLEV Cx tarsalis - quadratic
##### Set data
data <- data.c.CtarSLEV
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
c.CtarSLEV.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
c.CtarSLEV.out$BUGSoutput$summary[1:5,]
mcmcplot(c.CtarSLEV.out)
save(c.CtarSLEV.out, file = "jagsout_c_CtarSLEV.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.c.CtarSLEV, ylab = "c for SLEV in Cx tarsalis", xlab = "Temperature")
lines(c.CtarSLEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(c.CtarSLEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(c.CtarSLEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### b for SLEV Cx tarsalis - quadratic
##### Set data
data <- data.b.CtarSLEV
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
b.CtarSLEV.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
b.CtarSLEV.out$BUGSoutput$summary[1:5,]
mcmcplot(b.CtarSLEV.out)
save(b.CtarSLEV.out, file = "jagsout_c_CtarSLEV.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.c.CtarSLEV, ylab = "b for SLEV in Cx tarsalis", xlab = "Temperature")
lines(b.CtarSLEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(b.CtarSLEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(b.CtarSLEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### bc for WNV in Cx. pipiens - quadratic
##### Set data
data <- data.bc.CpipWNV
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
bc.CpipWNV.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quadprob.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
bc.CpipWNV.out$BUGSoutput$summary[1:5,]
mcmcplot(bc.CpipWNV.out)
save(bc.CpipWNV.out, file = "jagsout_bc_CpipWNV.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.CpipWNV, ylab = "bc for WNV in Cx. pipiens", xlab = "Temperature")
lines(bc.CpipWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(bc.CpipWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(bc.CpipWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### c for WNV in Cx. pipiens - quadratic
##### Set data
data <- data.c.CpipWNV
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
c.CpipWNV.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
c.CpipWNV.out$BUGSoutput$summary[1:5,]
mcmcplot(c.CpipWNV.out)
save(c.CpipWNV.out, file = "jagsout_c_CpipWNV.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.c.CpipWNV, ylab = "c for WNV Cx pipiens", xlab = "Temperature")
lines(c.CpipWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(c.CpipWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(c.CpipWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### bc for WNV in Cx. univittatus - quadratic
##### Set data
data <- data.bc.CuniWNV
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
bc.CuniWNV.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quadprob.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
bc.CuniWNV.out$BUGSoutput$summary[1:5,]
mcmcplot(bc.CuniWNV.out)
save(bc.CuniWNV.out, file = "jagsout_bc_CuniWNV.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.CuniWNV, ylab = "bc for WNV in Cx. univittatus", xlab = "Temperature")
lines(bc.CuniWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(bc.CuniWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(bc.CuniWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### bc for RVFV in Cx. pipiens - quadratic
##### Set data
data <- data.bc.AtaeRVFV
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
bc.AtaeRVFV.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quadprob.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
bc.AtaeRVFV.out$BUGSoutput$summary[1:5,]
mcmcplot(bc.AtaeRVFV.out)
save(bc.AtaeRVFV.out, file = "jagsout_bc_CpipRVFV.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.AtaeRVFV, ylab = "bc for RVFV Ae taeniorynchus", xlab = "Temperature")
lines(bc.AtaeRVFV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(bc.AtaeRVFV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(bc.AtaeRVFV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### bc for SINV in Ae. taeniorhynchus - quadratic
##### Set data
data <- data.c.AtaeSINV
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
c.AtaeSINV.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
c.AtaeSINV.out$BUGSoutput$summary[1:5,]
mcmcplot(c.AtaeSINV.out)
save(c.AtaeSINV.out, file = "jagsout_bc_AtaeSINV.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.c.AtaeSINV, ylab = "c for SINV in Ae. taeniorhynchus", xlab = "Temperature")
lines(c.AtaeSINV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(c.AtaeSINV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(c.AtaeSINV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### c for SINV in Cx. pipiens - quadratic
##### Set data
data <- data.c.CpipSINV
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
c.CpipSINV.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
c.CpipSINV.out$BUGSoutput$summary[1:5,]
mcmcplot(c.CpipSINV.out)
save(c.CpipSINV.out, file = "jagsout_bc_CpipSINV.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.c.CpipSINV, ylab = "c for SINV in Cx. pipiens", xlab = "Temperature")
lines(c.CpipSINV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(c.CpipSINV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(c.CpipSINV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### bc for EEEV in Ae. triseriatus - quadratic
##### Set data
data <- data.bc.AtriEEEV
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
bc.AtriEEEV.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
bc.AtriEEEV.out$BUGSoutput$summary[1:5,]
mcmcplot(bc.AtriEEEV.out)
save(bc.AtriEEEV.out, file = "jagsout_bc_AtriEEEV.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.AtriEEEV, ylab = "bc for EEEV in Ae. triseriatus", xlab = "Temperature")
lines(bc.AtriEEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(bc.AtriEEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(bc.AtriEEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
##########
###### 5. Fit thermal responses for bc priors using a leave-one-out approach
##########
# Subset Data
data.bc.only <- subset(data.bc, trait.name == "bc")
data.b.only <- subset(data.bc, trait.name == "b")
data.c.only <- subset(data.bc, trait.name == "c")
data.c.CpipWNV.prior <- subset(data.c.only, joint.code != "CpipWNV")
data.bc.CpipWNV.prior <- subset(data.bc.only, joint.code != "CpipWNV")
data.b.CtarWNV.prior <- subset(data.b.only, joint.code != "CtarWNV")
data.bc.CuniWNV.prior <- subset(data.bc.only, joint.code != "CuniWNV")
data.bc.CtarWEEV.prior <- subset(data.bc.only, joint.code != "CtarWEEV")
data.c.CtarWEEV.prior <- subset(data.c.only, joint.code != "CtarWEEV")
data.b.CtarWEEV.prior <- subset(data.b.only, joint.code != "CtarWEEV")
data.c.CtarSLEV.prior <- subset(data.c.only, joint.code != "CtarSLEV")
data.b.CtarSLEV.prior <- subset(data.b.only, joint.code != "CtarSLEV")
data.c.AtaeSINV.prior <- subset(data.c.only, joint.code != "AtaeSINV")
data.c.CpipSINV.prior <- subset(data.c.only, joint.code != "CpipSINV")
data.bc.AtaeRVFV.prior <- subset(data.bc.only, joint.code != "AtaeRVFV")
data.bc.AtriEEEV.prior <- subset(data.bc.only, joint.code != "AtriEEEV")
###################################### bc for WNV in Cpip prior
##### Set data
data <- data.bc.CpipWNV.prior
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
bc.CpipWNV.prior.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
bc.CpipWNV.prior.out$BUGSoutput$summary[1:5,]
mcmcplot(bc.CpipWNV.prior.out)
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.CpipWNV.prior, ylab = "bc for WNV in Cpip prior", xlab = "Temperature")
lines(bc.CpipWNV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(bc.CpipWNV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(bc.CpipWNV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### c for WNV in Cpip prior
##### Set data
data <- data.c.CpipWNV.prior
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
c.CpipWNV.prior.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
c.CpipWNV.prior.out$BUGSoutput$summary[1:5,]
mcmcplot(c.CpipWNV.prior.out)
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.c.CpipWNV.prior, ylab = "c for WNV in Cpip prior", xlab = "Temperature")
lines(c.CpipWNV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(c.CpipWNV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(c.CpipWNV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### b for WNV in Ctar prior
##### Set data
data <- data.b.CtarWNV.prior
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
b.CtarWNV.prior.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
b.CtarWNV.prior.out$BUGSoutput$summary[1:5,]
mcmcplot(b.CtarWNV.prior.out)
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.b.CtarWNV.prior, ylab = "b for WNV in Ctar prior", xlab = "Temperature")
lines(b.CtarWNV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(b.CtarWNV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(b.CtarWNV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### bc for WNV in Cuni prior
##### Set data
data <- data.bc.CuniWNV.prior
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
bc.CuniWNV.prior.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
bc.CuniWNV.prior.out$BUGSoutput$summary[1:5,]
mcmcplot(bc.CuniWNV.prior.out)
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.CuniWNV.prior, ylab = "bc for WNV in Cuni prior", xlab = "Temperature")
lines(bc.CuniWNV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(bc.CuniWNV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(bc.CuniWNV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### bc for WEEV in Ctar prior
##### Set data
data <- data.bc.CtarWEEV.prior
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
bc.CtarWEEV.prior.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
bc.CtarWEEV.prior.out$BUGSoutput$summary[1:5,]
mcmcplot(bc.CtarWEEV.prior.out)
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.CtarWEEV.prior, ylab = "bc for WEEV in Ctar prior", xlab = "Temperature")
lines(bc.CtarWEEV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(bc.CtarWEEV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(bc.CtarWEEV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### c for WEEV in Ctar prior
##### Set data
data <- data.c.CtarWEEV.prior
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
c.CtarWEEV.prior.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
c.CtarWEEV.prior.out$BUGSoutput$summary[1:5,]
mcmcplot(c.CtarWEEV.prior.out)
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.c.CtarWEEV.prior, ylab = "c for WEEV in Ctar prior", xlab = "Temperature")
lines(c.CtarWEEV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(c.CtarWEEV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(c.CtarWEEV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### b for WEEV in Ctar prior
##### Set data
data <- data.b.CtarWEEV.prior
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
b.CtarWEEV.prior.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
b.CtarWEEV.prior.out$BUGSoutput$summary[1:5,]
mcmcplot(b.CtarWEEV.prior.out)
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.b.CtarWEEV.prior, ylab = "b for WEEV in Ctar prior", xlab = "Temperature")
lines(b.CtarWEEV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(b.CtarWEEV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(b.CtarWEEV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### c for SLEV in Ctar prior
##### Set data
data <- data.c.CtarSLEV.prior
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
c.CtarSLEV.prior.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
c.CtarSLEV.prior.out$BUGSoutput$summary[1:5,]
mcmcplot(c.CtarSLEV.prior.out)
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.c.CtarSLEV.prior, ylab = "c for SLEV in Ctar prior", xlab = "Temperature")
lines(c.CtarSLEV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(c.CtarSLEV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(c.CtarSLEV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### b for SLEV in Ctar prior
##### Set data
data <- data.b.CtarSLEV.prior
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
b.CtarSLEV.prior.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
b.CtarSLEV.prior.out$BUGSoutput$summary[1:5,]
mcmcplot(b.CtarSLEV.prior.out)
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.b.CtarSLEV.prior, ylab = "b for SLEV in Ctar prior", xlab = "Temperature")
lines(b.CtarSLEV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(b.CtarSLEV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(b.CtarSLEV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### c for SINV in Cpip prior
##### Set data
data <- data.c.CpipSINV.prior
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
c.CpipSINV.prior.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
c.CpipSINV.prior.out$BUGSoutput$summary[1:5,]
mcmcplot(c.CpipSINV.prior.out)
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.c.CpipSINV.prior, ylab = "c for SINV in Cpip prior", xlab = "Temperature")
lines(c.CpipSINV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(c.CpipSINV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(c.CpipSINV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### c for SINV in Atae prior
##### Set data
data <- data.c.AtaeSINV.prior
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
c.AtaeSINV.prior.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
c.AtaeSINV.prior.out$BUGSoutput$summary[1:5,]
mcmcplot(c.AtaeSINV.prior.out)
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.c.AtaeSINV.prior, ylab = "c for SINV in Atae prior", xlab = "Temperature")
lines(c.AtaeSINV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(c.AtaeSINV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(c.AtaeSINV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### bc for RVFV in Atae prior
##### Set data
data <- data.bc.AtaeRVFV.prior
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
bc.AtaeRVFV.prior.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
bc.AtaeRVFV.prior.out$BUGSoutput$summary[1:5,]
mcmcplot(bc.AtaeRVFV.prior.out)
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.AtaeRVFV.prior, ylab = "bc for RVFV in Atae prior", xlab = "Temperature")
lines(bc.AtaeRVFV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(bc.AtaeRVFV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(bc.AtaeRVFV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
###################################### bc for EEEV in Atri prior
##### Set data
data <- data.bc.AtriEEEV.prior
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs)
##### Run JAGS
bc.AtriEEEV.prior.out <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
bc.AtriEEEV.prior.out$BUGSoutput$summary[1:5,]
mcmcplot(bc.AtriEEEV.prior.out)
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.AtriEEEV.prior, ylab = "bc for EEEV in Atri prior", xlab = "Temperature")
lines(bc.AtriEEEV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(bc.AtriEEEV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(bc.AtriEEEV.prior.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
##########
###### 6. Fit gamma distributions to bc prior thermal responses
##########
###################################### bc for WNV in Cpip prior
# Get the posterior dists for 3 main parameters (not sigma) into a data frame
bc.CpipWNV.prior.cf.dists <- data.frame(q = as.vector(bc.CpipWNV.prior.out$BUGSoutput$sims.list$cf.q),
T0 = as.vector(bc.CpipWNV.prior.out$BUGSoutput$sims.list$cf.T0),
Tm = as.vector(bc.CpipWNV.prior.out$BUGSoutput$sims.list$cf.Tm))
# Fit gamma distributions for each parameter posterior dists
bc.CpipWNV.prior.gamma.fits = apply(bc.CpipWNV.prior.cf.dists, 2, function(df) fitdistr(df, "gamma")$estimate)
###################################### c for WNV in Cpip prior
# Get the posterior dists for 3 main parameters (not sigma) into a data frame
c.CpipWNV.prior.cf.dists <- data.frame(q = as.vector(c.CpipWNV.prior.out$BUGSoutput$sims.list$cf.q),
T0 = as.vector(c.CpipWNV.prior.out$BUGSoutput$sims.list$cf.T0),
Tm = as.vector(c.CpipWNV.prior.out$BUGSoutput$sims.list$cf.Tm))
# Fit gamma distributions for each parameter posterior dists
c.CpipWNV.prior.gamma.fits = apply(c.CpipWNV.prior.cf.dists, 2, function(df) fitdistr(df, "gamma")$estimate)
###################################### b for WNV in Ctar prior
# Get the posterior dists for 3 main parameters (not sigma) into a data frame
b.CtarWNV.prior.cf.dists <- data.frame(q = as.vector(b.CtarWNV.prior.out$BUGSoutput$sims.list$cf.q),
T0 = as.vector(b.CtarWNV.prior.out$BUGSoutput$sims.list$cf.T0),
Tm = as.vector(b.CtarWNV.prior.out$BUGSoutput$sims.list$cf.Tm))
# Fit gamma distributions for each parameter posterior dists
b.CtarWNV.prior.gamma.fits = apply(b.CtarWNV.prior.cf.dists, 2, function(df) fitdistr(df, "gamma")$estimate)
###################################### bc for WNV in Cuni prior
# Get the posterior dists for 3 main parameters (not sigma) into a data frame
bc.CuniWNV.prior.cf.dists <- data.frame(q = as.vector(bc.CuniWNV.prior.out$BUGSoutput$sims.list$cf.q),
T0 = as.vector(bc.CuniWNV.prior.out$BUGSoutput$sims.list$cf.T0),
Tm = as.vector(bc.CuniWNV.prior.out$BUGSoutput$sims.list$cf.Tm))
# Fit gamma distributions for each parameter posterior dists
bc.CuniWNV.prior.gamma.fits = apply(bc.CuniWNV.prior.cf.dists, 2, function(df) fitdistr(df, "gamma")$estimate)
###################################### bc for WEEV in Ctar prior
# Get the posterior dists for 3 main parameters (not sigma) into a data frame
bc.CtarWEEV.prior.cf.dists <- data.frame(q = as.vector(bc.CtarWEEV.prior.out$BUGSoutput$sims.list$cf.q),
T0 = as.vector(bc.CtarWEEV.prior.out$BUGSoutput$sims.list$cf.T0),
Tm = as.vector(bc.CtarWEEV.prior.out$BUGSoutput$sims.list$cf.Tm))
# Fit gamma distributions for each parameter posterior dists
bc.CtarWEEV.prior.gamma.fits = apply(bc.CtarWEEV.prior.cf.dists, 2, function(df) fitdistr(df, "gamma")$estimate)
###################################### c for WEEV in Ctar prior
# Get the posterior dists for 3 main parameters (not sigma) into a data frame
c.CtarWEEV.prior.cf.dists <- data.frame(q = as.vector(c.CtarWEEV.prior.out$BUGSoutput$sims.list$cf.q),
T0 = as.vector(c.CtarWEEV.prior.out$BUGSoutput$sims.list$cf.T0),
Tm = as.vector(c.CtarWEEV.prior.out$BUGSoutput$sims.list$cf.Tm))
# Fit gamma distributions for each parameter posterior dists
c.CtarWEEV.prior.gamma.fits = apply(c.CtarWEEV.prior.cf.dists, 2, function(df) fitdistr(df, "gamma")$estimate)
###################################### b for WEEV in Ctar prior
# Get the posterior dists for 3 main parameters (not sigma) into a data frame
b.CtarWEEV.prior.cf.dists <- data.frame(q = as.vector(b.CtarWEEV.prior.out$BUGSoutput$sims.list$cf.q),
T0 = as.vector(b.CtarWEEV.prior.out$BUGSoutput$sims.list$cf.T0),
Tm = as.vector(b.CtarWEEV.prior.out$BUGSoutput$sims.list$cf.Tm))
# Fit gamma distributions for each parameter posterior dists
b.CtarWEEV.prior.gamma.fits = apply(b.CtarWEEV.prior.cf.dists, 2, function(df) fitdistr(df, "gamma")$estimate)
###################################### c for SLEV in Ctar prior
# Get the posterior dists for 3 main parameters (not sigma) into a data frame
c.CtarSLEV.prior.cf.dists <- data.frame(q = as.vector(c.CtarSLEV.prior.out$BUGSoutput$sims.list$cf.q),
T0 = as.vector(c.CtarSLEV.prior.out$BUGSoutput$sims.list$cf.T0),
Tm = as.vector(c.CtarSLEV.prior.out$BUGSoutput$sims.list$cf.Tm))
# Fit gamma distributions for each parameter posterior dists
c.CtarSLEV.prior.gamma.fits = apply(c.CtarSLEV.prior.cf.dists, 2, function(df) fitdistr(df, "gamma")$estimate)
###################################### b for SLEV in Ctar prior
# Get the posterior dists for 3 main parameters (not sigma) into a data frame
b.CtarSLEV.prior.cf.dists <- data.frame(q = as.vector(b.CtarSLEV.prior.out$BUGSoutput$sims.list$cf.q),
T0 = as.vector(b.CtarSLEV.prior.out$BUGSoutput$sims.list$cf.T0),
Tm = as.vector(b.CtarSLEV.prior.out$BUGSoutput$sims.list$cf.Tm))
# Fit gamma distributions for each parameter posterior dists
b.CtarSLEV.prior.gamma.fits = apply(b.CtarSLEV.prior.cf.dists, 2, function(df) fitdistr(df, "gamma")$estimate)
###################################### c for SINV in Cpip prior
# Get the posterior dists for 3 main parameters (not sigma) into a data frame
c.CpipSINV.prior.cf.dists <- data.frame(q = as.vector(c.CpipSINV.prior.out$BUGSoutput$sims.list$cf.q),
T0 = as.vector(c.CpipSINV.prior.out$BUGSoutput$sims.list$cf.T0),
Tm = as.vector(c.CpipSINV.prior.out$BUGSoutput$sims.list$cf.Tm))
# Fit gamma distributions for each parameter posterior dists
c.CpipSINV.prior.gamma.fits = apply(c.CpipSINV.prior.cf.dists, 2, function(df) fitdistr(df, "gamma")$estimate)
###################################### c for SINV in Atae prior
# Get the posterior dists for 3 main parameters (not sigma) into a data frame
c.AtaeSINV.prior.cf.dists <- data.frame(q = as.vector(c.AtaeSINV.prior.out$BUGSoutput$sims.list$cf.q),
T0 = as.vector(c.AtaeSINV.prior.out$BUGSoutput$sims.list$cf.T0),
Tm = as.vector(c.AtaeSINV.prior.out$BUGSoutput$sims.list$cf.Tm))
# Fit gamma distributions for each parameter posterior dists
c.AtaeSINV.prior.gamma.fits = apply(c.AtaeSINV.prior.cf.dists, 2, function(df) fitdistr(df, "gamma")$estimate)
###################################### bc for RVFV in Atae prior
# Get the posterior dists for 3 main parameters (not sigma) into a data frame
bc.AtaeRVFV.prior.cf.dists <- data.frame(q = as.vector(bc.AtaeRVFV.prior.out$BUGSoutput$sims.list$cf.q),
T0 = as.vector(bc.AtaeRVFV.prior.out$BUGSoutput$sims.list$cf.T0),
Tm = as.vector(bc.AtaeRVFV.prior.out$BUGSoutput$sims.list$cf.Tm))
# Fit gamma distributions for each parameter posterior dists
bc.AtaeRVFV.prior.gamma.fits = apply(bc.AtaeRVFV.prior.cf.dists, 2, function(df) fitdistr(df, "gamma")$estimate)
###################################### bc for EEEV in Atri prior
# Get the posterior dists for 3 main parameters (not sigma) into a data frame
bc.AtriEEEV.prior.cf.dists <- data.frame(q = as.vector(bc.AtriEEEV.prior.out$BUGSoutput$sims.list$cf.q),
T0 = as.vector(bc.AtriEEEV.prior.out$BUGSoutput$sims.list$cf.T0),
Tm = as.vector(bc.AtriEEEV.prior.out$BUGSoutput$sims.list$cf.Tm))
# Fit gamma distributions for each parameter posterior dists
bc.AtriEEEV.prior.gamma.fits = apply(bc.AtriEEEV.prior.cf.dists, 2, function(df) fitdistr(df, "gamma")$estimate)
bc.hypers <- list(c.CpipWNV.prior.gamma.fits, bc.CpipWNV.prior.gamma.fits,
b.CtarWNV.prior.gamma.fits, bc.CuniWNV.prior.gamma.fits,
c.CtarWEEV.prior.gamma.fits, b.CtarWEEV.prior.gamma.fits, bc.CtarWEEV.prior.gamma.fits,
c.CtarSLEV.prior.gamma.fits, b.CtarSLEV.prior.gamma.fits,
bc.AtriEEEV.prior.gamma.fits, bc.AtaeRVFV.prior.gamma.fits,
c.AtaeSINV.prior.gamma.fits, c.CpipSINV.prior.gamma.fits)
save(bc.hypers, file = "bchypers.Rsave")
##########
###### 7. Fit bc thermal responses with data-informed priors
##########
load("bchypers.Rsave")
c.CpipWNV.prior.gamma.fits <- bc.hypers[[1]]
bc.CpipWNV.prior.gamma.fits <- bc.hypers[[2]]
b.CtarWNV.prior.gamma.fits <- bc.hypers[[3]]
bc.CuniWNV.prior.gamma.fits <- bc.hypers[[4]]
c.CtarWEEV.prior.gamma.fits <- bc.hypers[[5]]
b.CtarWEEV.prior.gamma.fits <- bc.hypers[[6]]
bc.CtarWEEV.prior.gamma.fits <- bc.hypers[[7]]
c.CtarSLEV.prior.gamma.fits <- bc.hypers[[8]]
b.CtarSLEV.prior.gamma.fits <- bc.hypers[[9]]
bc.AtriEEEV.prior.gamma.fits <- bc.hypers[[10]]
bc.AtaeRVFV.prior.gamma.fits <- bc.hypers[[11]]
c.AtaeSINV.prior.gamma.fits <- bc.hypers[[12]]
c.CpipSINV.prior.gamma.fits <- bc.hypers[[13]]
###################################### b for WNV in Cx tarsalis - quadratic
##### Set data
data <- data.b.CtarWNV
hypers <- b.CtarWNV.prior.gamma.fits * 1
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs, hypers = hypers)
##### Run JAGS
b.CtarWNV.out.inf <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad_inf.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
b.CtarWNV.out.inf$BUGSoutput$summary[1:5,]
mcmcplot(b.CtarWNV.out.inf)
save(b.CtarWNV.out.inf, file = "jagsout_b_CtarWNV_inf.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.b.CtarWNV, ylab = "b for WNV in Cx tarsalis", xlab = "Temperature")
lines(b.CtarWNV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(b.CtarWNV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(b.CtarWNV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
legend("topleft", legend = "A", bty = "n", adj = 1, cex = 1.2)
# Get optimum for b: 26.4 C
Temp.xs[which.max(as.vector(b.CtarWNV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "50%"]))]
###################################### bc for WEEV Cx tarsalis - quadratic
##### Set data
data <- data.bc.CtarWEEV
hypers <- bc.CtarWEEV.prior.gamma.fits * 0.5
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs, hypers = hypers)
##### Run JAGS
bc.CtarWEEV.out.inf <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad_inf.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine infput
bc.CtarWEEV.out.inf$BUGSoutput$summary[1:5,]
mcmcplot(bc.CtarWEEV.out.inf)
save(bc.CtarWEEV.out.inf, file = "jagsout_bc_CtarWEEV_inf.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.CtarWEEV, ylab = "bc for WEEV in Cx tarsalis", xlab = "Temperature")
lines(bc.CtarWEEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(bc.CtarWEEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(bc.CtarWEEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
legend("topleft", legend = "D", bty = "n", adj = 1, cex = 1.2)
# Get optimum for bc: 21.4 C
Temp.xs[which.max(as.vector(bc.CtarWEEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "50%"]))]
###################################### c for WEEV Cx tarsalis - quadratic
##### Set data
data <- data.c.CtarWEEV
hypers <- c.CtarWEEV.prior.gamma.fits * .01
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs, hypers = hypers)
##### Run JAGS
c.CtarWEEV.out.inf <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quadprob_inf.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
c.CtarWEEV.out.inf$BUGSoutput$summary[1:5,]
mcmcplot(c.CtarWEEV.out.inf)
save(c.CtarWEEV.out.inf, file = "jagsout_c_CtarWEEV_inf.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.c.CtarWEEV, ylab = "c for WEEV in Cx tarsalis", xlab = "Temperature")
lines(c.CtarWEEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(c.CtarWEEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(c.CtarWEEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
legend("topleft", legend = "C", bty = "n", adj = 1, cex = 1.2)
# Get optimum for c: 20.6 C
Temp.xs[which.max(as.vector(c.CtarWEEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"]))] # mean captures it better than median because it collapses at 1
###################################### b for WEEV Cx tarsalis - quadratic
##### Set data
data <- data.b.CtarWEEV
hypers <- b.CtarWEEV.prior.gamma.fits * 0.1
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs, hypers = hypers)
##### Run JAGS
b.CtarWEEV.out.inf <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad_inf.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
b.CtarWEEV.out.inf$BUGSoutput$summary[1:5,]
mcmcplot(b.CtarWEEV.out.inf)
save(b.CtarWEEV.out.inf, file = "jagsout_b_CtarWEEV_inf.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.b.CtarWEEV, ylab = "b for WEEV in Cx tarsalis", xlab = "Temperature")
lines(b.CtarWEEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(b.CtarWEEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(b.CtarWEEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
legend("topleft", legend = "B", bty = "n", adj = 1, cex = 1.2)
# Get optimum for b: 21.0 C
Temp.xs[which.max(as.vector(b.CtarWEEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "50%"]))]
###################################### c for SLEV Cx tarsalis - quadratic
##### Set data
data <- data.c.CtarSLEV
hypers <- c.CtarSLEV.prior.gamma.fits * 0.01
hypers[,3] <- c.CtarSLEV.prior.gamma.fits[,3] * 0.1
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs, hypers = hypers)
##### Run JAGS
c.CtarSLEV.out.inf <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad_inf.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
c.CtarSLEV.out.inf$BUGSoutput$summary[1:5,]
mcmcplot(c.CtarSLEV.out.inf)
save(c.CtarSLEV.out.inf, file = "jagsout_c_CtarSLEV_inf.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.c.CtarSLEV, ylab = "c for SLEV in Cx tarsalis", xlab = "Temperature")
lines(c.CtarSLEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(c.CtarSLEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(c.CtarSLEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
legend("topleft", legend = "E", bty = "n", adj = 1, cex = 1.2)
# Get optimum for c: 26.2 C
Temp.xs[which.max(as.vector(c.CtarSLEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "50%"]))]
###################################### b for SLEV Cx tarsalis - quadratic
##### Set data
data <- data.b.CtarSLEV
hypers <- b.CtarSLEV.prior.gamma.fits * 0.5
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs, hypers = hypers)
##### Run JAGS
b.CtarSLEV.out.inf <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad_inf.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
b.CtarSLEV.out.inf$BUGSoutput$summary[1:5,]
mcmcplot(b.CtarSLEV.out.inf)
save(b.CtarSLEV.out.inf, file = "jagsout_b_CtarSLEV_inf.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.b.CtarSLEV, ylab = "b for SLEV in Cx tarsalis", xlab = "Temperature")
lines(b.CtarSLEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(b.CtarSLEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(b.CtarSLEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
legend("topleft", legend = "F", bty = "n", adj = 1, cex = 1.2)
mtext(text = expression(paste("Temperature (",degree,"C)")), side = 1, line = 3, cex = 0.9)
# Get optimum for b: 26.2 C
Temp.xs[which.max(as.vector(b.CtarSLEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "50%"]))]
###################################### bc for WNV in Cx. pipiens - quadratic
##### Set data
data <- data.bc.CpipWNV
hypers <- bc.CpipWNV.prior.gamma.fits * .5
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs, hypers = hypers)
##### Run JAGS
bc.CpipWNV.out.inf <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad_inf.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
bc.CpipWNV.out.inf$BUGSoutput$summary[1:5,]
mcmcplot(bc.CpipWNV.out.inf)
save(bc.CpipWNV.out.inf, file = "jagsout_bc_CpipWNV_inf.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.CpipWNV, ylab = "bc for WNV in Cx. pipiens", xlab = "Temperature")
lines(bc.CpipWNV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(bc.CpipWNV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(bc.CpipWNV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
legend("topleft", legend = "B", bty = "n", adj = 1, cex = 1.2)
# Get optimum for bc: 27.9 C
Temp.xs[which.max(as.vector(bc.CpipWNV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "50%"]))]
###################################### c for WNV in Cx. pipiens - quadratic
##### Set data
data <- data.c.CpipWNV
hypers <- c.CpipWNV.prior.gamma.fits * 1
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs, hypers = hypers)
##### Run JAGS
c.CpipWNV.out.inf <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad_inf.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
c.CpipWNV.out.inf$BUGSoutput$summary[1:5,]
mcmcplot(c.CpipWNV.out.inf)
save(c.CpipWNV.out.inf, file = "jagsout_c_CpipWNV_inf.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.c.CpipWNV, ylab = "c for WNV Cx pipiens", xlab = "Temperature")
lines(c.CpipWNV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(c.CpipWNV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(c.CpipWNV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
legend("topleft", legend = "A", bty = "n", adj = 1, cex = 1.2)
# Get optimum for c: 33.6 C
Temp.xs[which.max(as.vector(c.CpipWNV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "50%"]))]
###################################### bc for WNV in Cx. univittatus - quadratic
##### Set data
data <- data.bc.CuniWNV
hypers <- bc.CuniWNV.prior.gamma.fits * .01
hypers[,3] <- c.CtarSLEV.prior.gamma.fits[,3] * 0.1
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs, hypers = hypers)
##### Run JAGS
bc.CuniWNV.out.inf <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quadprob_inf.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
bc.CuniWNV.out.inf$BUGSoutput$summary[1:5,]
mcmcplot(bc.CuniWNV.out.inf)
save(bc.CuniWNV.out.inf, file = "jagsout_bc_CuniWNV_inf.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.CuniWNV, ylab = "bc for WNV in Cx. univittatus", xlab = "Temperature")
lines(bc.CuniWNV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(bc.CuniWNV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(bc.CuniWNV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
legend("topleft", legend = "C", bty = "n", adj = 1, cex = 1.2)
# Get optimum for bc: 24.6 C
Temp.xs[which.max(as.vector(bc.CuniWNV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "50%"]))]
###################################### bc for RVFV in Ae. taeniorhynchus - quadratic
##### Set data
data <- data.bc.AtaeRVFV
hypers <- bc.AtaeRVFV.prior.gamma.fits * 2
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs, hypers = hypers)
##### Run JAGS
bc.AtaeRVFV.out.inf <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quadprob_inf.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
bc.AtaeRVFV.out.inf$BUGSoutput$summary[1:5,]
mcmcplot(bc.AtaeRVFV.out.inf)
save(bc.AtaeRVFV.out.inf, file = "jagsout_bc_AtaeRVFV_inf.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.AtaeRVFV, ylab = "bc for RVFV Ae taeniorynchus", xlab = "Temperature")
lines(bc.AtaeRVFV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(bc.AtaeRVFV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(bc.AtaeRVFV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
legend("topleft", legend = "G", bty = "n", adj = 1, cex = 1.2)
mtext(text = expression(paste("Temperature (",degree,"C)")), side = 1, line = 3, cex = 0.9)
# Get optimum for bc: 24.7 C
Temp.xs[which.max(as.vector(bc.AtaeRVFV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "50%"]))]
###################################### c for SINV in Ae. taeniorhynchus - quadratic
##### Set data
data <- data.c.AtaeSINV
hypers <- c.AtaeSINV.prior.gamma.fits * .1
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs, hypers = hypers)
##### Run JAGS
c.AtaeSINV.out.inf <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad_inf.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
c.AtaeSINV.out.inf$BUGSoutput$summary[1:5,]
mcmcplot(c.AtaeSINV.out.inf)
save(c.AtaeSINV.out.inf, file = "jagsout_c_AtaeSINV_inf.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.c.AtaeSINV, ylab = "c for SINV in Ae. taeniorhynchus", xlab = "Temperature")
lines(c.AtaeSINV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(c.AtaeSINV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(c.AtaeSINV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
legend("topleft", legend = "F", bty = "n", adj = 1, cex = 1.2)
# Get optimum for c: 25.2 C
Temp.xs[which.max(as.vector(c.AtaeSINV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "50%"]))]
###################################### c for SINV in Cx. pipiens - quadratic
##### Set data
data <- data.c.CpipSINV
hypers <- c.CpipSINV.prior.gamma.fits * 0.01
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs, hypers = hypers)
##### Run JAGS
c.CpipSINV.out.inf <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad_inf.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
c.CpipSINV.out.inf$BUGSoutput$summary[1:5,]
mcmcplot(c.CpipSINV.out.inf)
save(c.CpipSINV.out.inf, file = "jagsout_c_CpipSINV_inf.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.c.CpipSINV, ylab = "c for SINV in Cx. pipiens", xlab = "Temperature")
lines(c.CpipSINV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(c.CpipSINV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(c.CpipSINV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
#lines(c.CpipSINV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "50%"] ~ Temp.xs, col = "red")
legend("topleft", legend = "E", bty = "n", adj = 1, cex = 1.2)
# Get optimum for c: 17.2 C
Temp.xs[which.max(as.vector(c.CpipSINV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "50%"]))]
###################################### bc for EEEV in Ae. triseriatus - quadratic
##### Set data
data <- data.bc.AtriEEEV
hypers <- bc.AtriEEEV.prior.gamma.fits * 3
hypers[,3] <- bc.AtriEEEV.prior.gamma.fits[,3] * .01
##### Organize Data for JAGS
trait <- data$trait
N.obs <- length(trait)
temp <- data$T
##### Bundle Data
jag.data<-list(trait = trait, N.obs = N.obs, temp = temp, Temp.xs = Temp.xs, N.Temp.xs = N.Temp.xs, hypers = hypers)
##### Run JAGS
bc.AtriEEEV.out.inf <- jags(data=jag.data, inits=inits, parameters.to.save=parameters, model.file="quad_inf.txt",
n.thin=nt, n.chains=nc, n.burnin=nb, n.iter=ni, DIC=T, working.directory=getwd())
##### Examine Output
bc.AtriEEEV.out.inf$BUGSoutput$summary[1:5,]
mcmcplot(bc.AtriEEEV.out.inf)
save(bc.AtriEEEV.out.inf, file = "jagsout_bc_AtriEEEV_inf.Rdata")
# Plot data + fit
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.AtriEEEV, ylab = "bc for EEEV in Ae. triseriatus", xlab = "Temperature")
lines(bc.AtriEEEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(bc.AtriEEEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(bc.AtriEEEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
legend("topleft", legend = "D", bty = "n", adj = 1, cex = 1.2)
# Get optimum for bc: 28.8 C
Temp.xs[which.max(as.vector(bc.AtriEEEV.out.inf$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "50%"]))]
##########
###### 8. Calculate Treatment averages for plotting
##########
# Function to trait averages for plotting only (JAGS models fit to raw data)
CalcTraitAvg = function(data.set){
data.set <- data.set
temp.list <- unique(data.set$T)
out <- data.frame(Temp = numeric(length(temp.list)), mean = numeric(length(temp.list)), sd = numeric(length(temp.list)), SE = numeric(length(temp.list)),
n = numeric(length(temp.list)), upper = numeric(length(temp.list)), lower = numeric(length(temp.list)))
for(i in 1:length(temp.list)){
data.sub <- subset(data.set, T == temp.list[i])
out$Temp[i] <- temp.list[i]
out$mean[i] <- mean(data.sub$trait)
out$sd[i] <- sd(data.sub$trait)
out$n[i] <- nrow(data.sub)
out$SE[i] <- sd(data.sub$trait) / sqrt(nrow(data.sub))
out$lower[i] <- mean(data.sub$trait) - sd(data.sub$trait) / sqrt(nrow(data.sub))
out$upper[i] <- mean(data.sub$trait) + sd(data.sub$trait) / sqrt(nrow(data.sub))
}
out
}
CpipWNV.c.avg <- CalcTraitAvg(data.c.CpipWNV)
CpipWNV.bc.avg <- CalcTraitAvg(data.bc.CpipWNV)
CtarWNV.b.avg <- CalcTraitAvg(data.b.CtarWNV)
CuniWNV.bc.avg <- CalcTraitAvg(data.bc.CuniWNV)
CtarWEEV.c.avg <- CalcTraitAvg(data.c.CtarWEEV)
CtarWEEV.b.avg <- CalcTraitAvg(data.b.CtarWEEV)
CtarWEEV.bc.avg <- CalcTraitAvg(data.bc.CtarWEEV)
CtarSLEV.c.avg <- CalcTraitAvg(data.c.CtarSLEV)
CtarSLEV.b.avg <- CalcTraitAvg(data.b.CtarSLEV)
CpipSINV.c.avg <- CalcTraitAvg(data.c.CpipSINV)
AtaeSINV.c.avg <- CalcTraitAvg(data.c.AtaeSINV)
AtaeRVFV.bc.avg <- CalcTraitAvg(data.bc.AtaeRVFV)
AtriEEEV.bc.avg <- CalcTraitAvg(data.bc.AtriEEEV)
plot(mean ~ Temp, xlim = c(5, 45), ylim = c(0,1), data = CpipWNV.c.avg, ylab = "c for WNV in Cpip", xlab = "Temperature")
plot(mean ~ Temp, xlim = c(5, 45), ylim = c(0,1), data = CpipWNV.bc.avg, ylab = "bc for WNv in Cpip", xlab = "Temperature")
plot(mean ~ Temp, xlim = c(5, 45), ylim = c(0,1), data = CtarWNV.c.avg, ylab = "c for WNV in Cpip", xlab = "Temperature")
plot(mean ~ Temp, xlim = c(5, 45), ylim = c(0,1), data = CtarWNV.b.avg, ylab = "b for WNV in Cpip", xlab = "Temperature")
plot(mean ~ Temp, xlim = c(5, 45), ylim = c(0,1), data = CuniWNV.bc.avg, ylab = "bc for WNV in Cuni", xlab = "Temperature")
plot(mean ~ Temp, xlim = c(5, 45), ylim = c(0,1), data = CtarWEEV.c.avg, ylab = "c for WEEV in Ctar", xlab = "Temperature")
plot(mean ~ Temp, xlim = c(5, 45), ylim = c(0,1), data = CtarWEEV.b.avg, ylab = "b for WEEV in Ctar", xlab = "Temperature")
plot(mean ~ Temp, xlim = c(5, 45), ylim = c(0,1), data = CtarWEEV.bc.avg, ylab = "bc for WEEV in Ctar", xlab = "Temperature")
plot(mean ~ Temp, xlim = c(5, 45), ylim = c(0,1), data = CtarSLEV.c.avg, ylab = "c for SLEV in Ctar", xlab = "Temperature")
plot(mean ~ Temp, xlim = c(5, 45), ylim = c(0,1), data = CtarSLEV.b.avg, ylab = "b for SLEV in Ctar", xlab = "Temperature")
plot(mean ~ Temp, xlim = c(5, 45), ylim = c(0,1), data = CpipSINV.c.avg, ylab = "c for SINV in Cpip", xlab = "Temperature")
plot(mean ~ Temp, xlim = c(5, 45), ylim = c(0,1), data = AtaeSINV.c.avg, ylab = "c for SINV in Atae", xlab = "Temperature")
plot(mean ~ Temp, xlim = c(5, 45), ylim = c(0,1), data = AtaeRVFV.bc.avg, ylab = "bc for RVFV in Atae", xlab = "Temperature")
plot(mean ~ Temp, xlim = c(5, 45), ylim = c(0,1), data = AtriEEEV.bc.avg, ylab = "bc for EEEV in Atri", xlab = "Temperature")
##########
###### 9. Plot Preliminary Figures
##########
plot(trait ~ T, xlim = c(5, 45), data = data.bc, ylab = "bc for Cx. mosquitoes", xlab = "Temperature")
points(trait ~ T, data = data.bc.Cpipmol, col = "blue")
points(trait ~ T, data = data.bc.Cpippip, col = "dodgerblue")
points(trait ~ T, data = data.bc.Cqui, col = "red")
points(trait ~ T, data = data.bc.Ctar, col = "darkgreen")
lines(b.CtarWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2, col = "dodgerblue")
lines(b.CtarWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2, col = "dodgerblue")
lines(b.CtarWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs, col = "dodgerblue")
lines(c.CtarWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2, col = "blue")
lines(c.CtarWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2, col = "blue")
lines(c.CtarWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs, col = "blue")
lines(bc.CtarWEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2)
lines(bc.CtarWEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2)
lines(bc.CtarWEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs)
lines(bc.CtarSLEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2, col = "red")
lines(bc.CtarSLEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2, col = "red")
lines(bc.CtarSLEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs, col = "red")
lines(bc.CquiWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2, col = "darkgreen")
lines(bc.CquiWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2, col = "darkgreen")
lines(bc.CquiWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs, col = "darkgreen")
################### Figures with original data points
par(mfrow = c(3,3), mar = c(3, 4.5, 2, 1), oma = c(2, 0, 0, 0))
##### c for WNV in Cx. pipiens
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.c.CpipWNV, xaxt = "n", pch = 19,
ylab = "Infection Probability (c)", xlab = "", main = expression(paste("WNV in ",italic(Cx.)," ",italic(pipiens))), cex.lab = 1.15)
axis(1, at = seq(5, 45, 5))
lines(c.CpipWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2, col = "red", lwd = 1.5)
lines(c.CpipWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2, col = "red", lwd = 1.5)
lines(c.CpipWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs, lwd = 1.5)
legend("topleft", legend = "A", bty= "n", cex = 1.6, adj = c(1.5, 0))
##### b for WNV in Cx. pipiens
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.b.CpipWNV, xaxt = "n", pch = 19,
ylab = "Transmission Probability (b)", xlab = "", main = expression(paste("WNV in ",italic(Cx.)," ",italic(pipiens))), cex.lab = 1.15)
axis(1, at = seq(5, 45, 5))
lines(b.CpipWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2, col = "red", lwd = 1.5)
lines(b.CpipWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2, col = "red", lwd = 1.5)
lines(b.CpipWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs, lwd = 1.5)
legend("topleft", legend = "A", bty= "n", cex = 1.6, adj = c(1.5, 0))
##### c for WNV in Cx. tarsalis
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.c.CtarWNV, xaxt = "n", pch = 19,
ylab = "Infection Probability (c)", xlab = "", main = expression(paste("WNV in ",italic(Cx.)," ",italic(tarsalis))), cex.lab = 1.15)
axis(1, at = seq(5, 45, 5))
lines(c.CtarWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2, col = "red", lwd = 1.5)
lines(c.CtarWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2, col = "red", lwd = 1.5)
lines(c.CtarWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs, lwd = 1.5)
legend("topleft", legend = "A", bty= "n", cex = 1.6, adj = c(1.5, 0))
##### b for WNV in Cx. tarsalis
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.b.CtarWNV, xaxt = "n", pch = 19,
ylab = "Transmission Probability (b)", xlab = "", main = expression(paste("WNV in ",italic(Cx.)," ",italic(tarsalis))), cex.lab = 1.15)
axis(1, at = seq(5, 45, 5))
lines(b.CtarWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2, col = "red", lwd = 1.5)
lines(b.CtarWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2, col = "red", lwd = 1.5)
lines(b.CtarWNV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs, lwd = 1.5)
legend("topleft", legend = "A", bty= "n", cex = 1.6, adj = c(1.5, 0))
##### bc for WEEV in Cx. tarsalis
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.CtarWEEV, xaxt = "n", pch = 19,
ylab = "Transmission Probability (bc)", xlab = "", main = expression(paste("WEEV in ",italic(Cx.)," ",italic(tarsalis))), cex.lab = 1.15)
axis(1, at = seq(5, 45, 5))
lines(bc.CtarWEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2, col = "red", lwd = 1.5)
lines(bc.CtarWEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2, col = "red", lwd = 1.5)
lines(bc.CtarWEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs, lwd = 1.5)
legend("topleft", legend = "A", bty= "n", cex = 1.6, adj = c(1.5, 0))
##### bc for SLEV in Cx. tarsalis
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.CtarSLEV, xaxt = "n", pch = 19,
ylab = "Transmission Probability (bc)", xlab = "", main = expression(paste("SLEV in ",italic(Cx.)," ",italic(tarsalis))), cex.lab = 1.15)
axis(1, at = seq(5, 45, 5))
lines(bc.CtarSLEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2, col = "red", lwd = 1.5)
lines(bc.CtarSLEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2, col = "red", lwd = 1.5)
lines(bc.CtarSLEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs, lwd = 1.5)
legend("topleft", legend = "A", bty= "n", cex = 1.6, adj = c(1.5, 0))
##### bc for RVFV in Ae. taeniorhynchus
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.AtaeRVFV, xaxt = "n", pch = 19,
ylab = "Transmission Probability (bc)", xlab = "", main = expression(paste("RVFV in ",italic(Cx.)," ",italic(pipiens))), cex.lab = 1.15)
axis(1, at = seq(5, 45, 5))
lines(bc.AtaeRVFV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2, col = "red", lwd = 1.5)
lines(bc.AtaeRVFV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2, col = "red", lwd = 1.5)
lines(bc.AtaeRVFV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs, lwd = 1.5)
legend("topleft", legend = "A", bty= "n", cex = 1.6, adj = c(1.5, 0))
##### bc for EEEV in Ae. triseriatus
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.AtriEEEV, xaxt = "n", pch = 19,
ylab = "Transmission Probability (bc)", xlab = "", main = expression(paste("EEEV in ",italic(Ae.)," ",italic(triseriatus))), cex.lab = 1.15)
axis(1, at = seq(5, 45, 5))
lines(bc.AtriEEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "2.5%"] ~ Temp.xs, lty = 2, col = "red", lwd = 1.5)
lines(bc.AtriEEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "97.5%"] ~ Temp.xs, lty = 2, col = "red", lwd = 1.5)
lines(bc.AtriEEEV.out$BUGSoutput$summary[6:(6 + N.Temp.xs - 1), "mean"] ~ Temp.xs, lwd = 1.5)
legend("topleft", legend = "A", bty= "n", cex = 1.6, adj = c(1.5, 0))
##### bc for SINV in Cx. pipiens
plot(trait ~ T, xlim = c(5, 45), ylim = c(0,1), data = data.bc.CpipSINV, xaxt = "n", pch = 19,
ylab = "Transmission Probability (bc)", xlab = "", main = expression(paste("SINV in ",italic(Cx.)," ",italic(pipiens))), cex.lab = 1.15)
axis(1, at = seq(5, 45, 5))
legend("topleft", legend = "A", bty= "n", cex = 1.6, adj = c(1.5, 0))
mtext(expression(paste("Temperature (",degree,"C)")), side = 1, line = 3, las = 1, cex = 0.9)
|
27f95e5ea5decf42f10b8b34cb3178af15fd3819
|
73252de538ec2f706a9bd6d9ec798dfc398226fe
|
/model/website_model.r
|
4adbe17d3a407e4024c72e7d395b99bb9b3cffeb
|
[] |
no_license
|
fergusKe/infoHero_heatmap
|
e3354d2f2928f4f3773e3adcb3c8de84fbb59400
|
b59f026e42073ae2fcb31b2062273e780789a557
|
refs/heads/master
| 2020-12-24T09:23:58.575470
| 2017-03-06T18:02:41
| 2017-03-06T18:02:41
| 73,292,897
| 0
| 1
| null | 2017-01-10T11:13:57
| 2016-11-09T14:49:38
|
JavaScript
|
UTF-8
|
R
| false
| false
| 2,762
|
r
|
website_model.r
|
#!/usr/bin/env Rscript
library(rpart)
library(randomForest)
library(randomForestSRC)
args <- commandArgs(trailingOnly=TRUE)
if( length(args) == 0 ){
stop( "必須輸入處理的檔案名稱!", call.=FALSE )
}
filename <- args[1]
# "/Users/brianpan/Desktop/infoHero_heatmap/model/"
current_path <- args[2]
# random forest
rf_feature_rdata <- paste(current_path, "rdata/rf_features.RData", sep="")
load(rf_feature_rdata)
rf_model_rdata <- paste(current_path, "rdata/rf_model.RData", sep="")
load(rf_model_rdata)
rf_src_model_rdata <- paste(current_path, "rdata/rf_src_model.RData", sep="")
load(rf_src_model_rdata)
# decision tree
dt_model_rdata <- paste(current_path, "rdata/dt_model.RData", sep="")
load(dt_model_rdata)
# data preprocess
# test
target_file <- paste(current_path, "../uploads/", filename, ".csv", sep="")
dataframe <- read.csv(target_file)
original <- dataframe
# preprocess
dataframe <- transform( dataframe, OCCUPATION.無工作=(OCCUPATION == "無工作") )
dataframe <- transform( dataframe, OCCUPATION.不詳=(OCCUPATION == "不詳") )
dataframe <- transform( dataframe, X1.4.5.6=(X1+X4+X5+X6))
# edu hash
edu_hash_file <- paste(current_path, "rdata/edu.RData", sep="")
load(edu_hash_file)
# MAIMED hash
maimed_hash_file <- paste(current_path, "rdata/maimed.RData", sep="")
load(maimed_hash_file)
edu_match <- function(x){
if(x=="" || x=="不詳"){
NA
}
else{
edu_hash[[x]]
}
}
maimed_match <- function(x){
if(x==""){
NA
}
else{
maimed_hash[[x]]
}
}
train_data <- read.csv( paste(current_path, "sample.csv", sep="") )
train_data <- na.omit(train_data)
train_data <- transform(train_data, OCCUPATION.無工作=(OCCUPATION=="無工作"))
train_data <- transform(train_data, OCCUPATION.不詳=(OCCUPATION=="不詳"))
train_data$EDUCATION <- factor(train_data$EDUCATION)
train_data$MAIMED <- factor(train_data$MAIMED)
train_data <- subset(train_data, select=rf_predictors)
levels(dataframe$EDUCATION) <- sapply( levels(dataframe$EDUCATION), edu_match )
dataframe$EDUCATION <- factor(dataframe$EDUCATION)
levels(dataframe$MAIMED) <- sapply( levels(dataframe$MAIMED), maimed_match )
dataframe$MAIMED <- factor(dataframe$MAIMED)
dataframe <- subset(dataframe, select=rf_predictors)
# output result
# 先用資料合併讓factor在test data裏都有
new <- rbind( dataframe, train_data )
full_result <- predict( model_rf, new )
# 抓真實測試資料
test_dim <- dim(dataframe)[1]
rf_test_predict <- round( full_result[1:test_dim] )
rf_src_predict <- round( predict(model_rf_src, new)$predicted )
rf_src_predict <- rf_src_predict[1:test_dim]
original$風險指數 <- rf_src_predict
# save
dist_file <- paste( current_path, "../outputs/", filename, "-predicted", ".csv", sep="" )
write.csv(original, dist_file)
|
551d96f230398f1988e4f440d5674d4668584b25
|
3f57260050ef3f71e931f30781f8cf3168ec8c75
|
/Archive/analysis_main.R
|
442affdac4682b9b81fccae86518e541e521441f
|
[] |
no_license
|
LeonardoViotti/pmrj
|
f83271c05ffd931816d571a9d3315d0d6f6caa17
|
b5d2610d0c29397cdac0066a32893a74ba9085a7
|
refs/heads/master
| 2022-07-14T09:49:53.826005
| 2020-11-18T15:01:16
| 2020-11-18T15:01:16
| 184,453,181
| 0
| 0
| null | 2019-07-31T03:16:34
| 2019-05-01T17:21:39
|
Stata
|
UTF-8
|
R
| false
| false
| 16,485
|
r
|
analysis_main.R
|
#------------------------------------------------------------------------------#
# SIM - Main Regressions
#------------------------------------------------------------------------------#
# These are all defined in MASTER.R, only use to explicitly overwrite master.
OVERWRITE_MASTER_SWITCHES = F
if(OVERWRITE_MASTER_SWITCHES){
EXPORT_data = F
EXPORT_plots = F
EXPORT_tables = F
}
#------------------------------------------------------------------------------#
#### Load data ####
# Loading data into a new object to be processed
sr <- final_data
# Keep same sample for all models, i.e from 2010 onwards because of IV
sr <- sr[sem_year > 100,]
#------------------------------------------------------------------------------#
### OLS formulas ####
# right hand side without FE
rFormula <- paste(indepVars, collapse = " + ")
rFormula_iv <- paste(indepVars[-1], collapse = " + ")
# Add FE, cluster and instruments
# clusterVars = c("latitude", "longitude" )
#clusterVars = c("aisp" )
clusterVars= "0"
clusterVars_form <- paste(clusterVars, collapse = " + ")
FeForumala1 <- paste(FEVars[1:3], collapse = " + ")
config1 <- paste("|", FeForumala1, "| 0 | ", clusterVars_form )
FeForumala2 <- paste(FEVars, collapse = " + ") # with cmd FE
config2 <- paste("|", FeForumala2, "| 0 | ", clusterVars_form)
# IV formula
first_stage_left <- "on_target"
first_stage_right <- paste(ZVars, collapse = " + ")
formula_1st <- paste("(", first_stage_left, " ~ ", first_stage_right, " )")
config_iv <- paste("|", FeForumala2, "|" , formula_1st, "| ", clusterVars_form)
#### Final formulas
Formulas01_str <- paste(depVars, paste(rFormula, config1), sep = " ~ ")
Formulas02_str <- paste(depVars, paste(rFormula, config2), sep = " ~ ")
FormulasIV_str <- paste(depVars, paste(rFormula_iv, config_iv), sep = " ~ ")
# So it's easier to refernce to elements
names(Formulas01_str) <- depVars
names(Formulas02_str) <- depVars
names(FormulasIV_str) <- depVars
#rFormulaFE <- paste0("factor(",FEVars,")")
# rFormula1 <- paste(c(indepVars, rFormulaFE[1:2]), collapse = " + ")
# rFormula2 <- paste(c(indepVars, rFormulaFE), collapse = " + ")
#------------------------------------------------------------------------------#
#### Poisson formulas ####
form1 <- vehicle_robbery ~ on_target +
factor(year) + factor( month) + factor(aisp) +factor(id_cmt) +
policemen_aisp + policemen_upp + n_precinct+ offset(log(population))
# Remove max prize for some reason
poisson_indepVars <- indepVars[!(names(indepVars) %in% c("max_prize", "population"))]
# Fixed effects
sFormulaFE_poi <- paste(paste0("factor(",FEVars,")"), collapse = " + ")
# Construct right hand sied fo eq.
rFormula_poi_0 <- paste(poisson_indepVars, collapse = " + ")
# Add FEs
rFormula_poi_1 <- paste(rFormula_poi_0, "+", sFormulaFE_poi)
# Add Exposure variable
# Exposure Variable
exposure_variable <- "population"
paste0(" offset(log(", exposure_variable, ")")
rFormula_poi <- paste(rFormula_poi_1,
"+",
paste0(" offset(log(",
exposure_variable,
"))"
)
)
# Final formula
Formulas_poi_str <- paste(depVars, rFormula_poi, sep = " ~ ")
names(Formulas_poi_str) <- depVars
#------------------------------------------------------------------------------#
#### OLS models ####
# Original regressions and Consley SEs
feRegSim <- function(form, data = sr){
form <- as.formula(form)
#model <- felm(form, data = sr[year_month > 200906 & year_month < 201501,], keepCX = T)
model <- felm(form, data = data, keepCX = T)
# Rename Dep var for IV just for exporting
if (!is.null(model$endovars)){
rownames(model$coefficients)[grep("`on_", rownames(model$coefficients))] <- "on_target"
rownames(model$beta)[grep("`on_", rownames(model$beta))] <- "on_target"
colnames(model$cX)[grep("`on_", colnames(model$cX))] <- "on_target"
}
# Return regression object
return(model)
}
### Model 1 whithout cmnd FE
# Tabble 2
r_vd_01 <- feRegSim(Formulas01_str["violent_death_sim"])
r_vd_01_data <- regData(r_vd_01, regdf = sr)
r_vr_01 <- feRegSim(Formulas01_str["vehicle_robbery"])
r_vr_01_data <- regData(r_vr_01, regdf = sr)
r_rr_01 <- feRegSim(Formulas01_str["street_robbery"])
r_rr_01_data <- regData(r_rr_01, regdf = sr)
r_hm_01 <- feRegSim(Formulas01_str["homicide"])
r_hm_01_data <- regData(r_hm_01, regdf = sr)
r_pk_01 <- feRegSim(Formulas01_str["dpolice_killing"])
r_pk_01_data <- regData(r_pk_01, regdf = sr)
# Table 3 - Gaming
g_cf_01 <- feRegSim(Formulas01_str["dbody_found"])
g_cf_01_data <- regData(g_cf_01, regdf = sr)
g_vt_01 <- feRegSim(Formulas01_str["vehicle_theft"])
g_vt_01_data <- regData(g_vt_01, regdf = sr)
g_st_01 <- feRegSim(Formulas01_str["street_theft"])
g_st_01_data <- regData(g_st_01, regdf = sr)
# Table 4 - Spillovers
s_or_01 <- feRegSim(Formulas01_str["other_robberies"])
s_or_01_data <- regData(s_or_01, regdf = sr)
s_cr_01 <- feRegSim(Formulas01_str["cargo_robbery"])
s_cr_01_data <- regData(s_cr_01, regdf = sr)
s_bu_01 <- feRegSim(Formulas01_str["burglary"])
s_bu_01_data <- regData(s_bu_01, regdf = sr)
s_sr_01 <- feRegSim(Formulas01_str["store_robbery"])
s_sr_01_data <- regData(s_sr_01, regdf = sr)
### Model 2 whith cmnd FE
# Tabble 2
r_vd_02 <- feRegSim(Formulas02_str["violent_death_sim"])
r_vd_02_data <- regData(r_vd_02, regdf = sr)
r_vr_02 <- feRegSim(Formulas02_str["vehicle_robbery"])
r_vr_02_data <- regData(r_vr_02, regdf = sr)
r_rr_02 <- feRegSim(Formulas02_str["street_robbery"])
r_rr_02_data <- regData(r_rr_02, regdf = sr)
r_hm_02 <- feRegSim(Formulas02_str["homicide"])
r_hm_02_data <- regData(r_hm_02, regdf = sr)
r_pk_02 <- feRegSim(Formulas02_str["dpolice_killing"])
r_pk_02_data <- regData(r_pk_02, regdf = sr)
# Table 3 - Gaming
g_cf_02 <- feRegSim(Formulas02_str["dbody_found"])
g_cf_02_data <- regData(g_cf_02, regdf = sr)
g_vt_02 <- feRegSim(Formulas02_str["vehicle_theft"])
g_vt_02_data <- regData(g_vt_02, regdf = sr)
g_st_02 <- feRegSim(Formulas02_str["street_theft"])
g_st_02_data <- regData(g_st_02, regdf = sr)
# Table 4 - Spillovers
s_or_02 <- feRegSim(Formulas02_str["other_robberies"])
s_or_02_data <- regData(s_or_02, regdf = sr)
s_cr_02 <- feRegSim(Formulas02_str["cargo_robbery"])
s_cr_02_data <- regData(s_cr_02, regdf = sr)
s_bu_02 <- feRegSim(Formulas02_str["burglary"])
s_bu_02_data <- regData(s_bu_02, regdf = sr)
s_sr_02 <- feRegSim(Formulas02_str["store_robbery"])
s_sr_02_data <- regData(s_sr_02, regdf = sr)
#### Model 3 2SLS
# Tabble 2
r_vd_IV <- feRegSim(FormulasIV_str["violent_death_sim"])
r_vd_IV_data <- regData(r_vd_IV, regdf = sr)
r_vr_IV <- feRegSim(FormulasIV_str["vehicle_robbery"])
r_vr_IV_data <- regData(r_vr_IV, regdf = sr)
r_rr_IV <- feRegSim(FormulasIV_str["street_robbery"])
r_rr_IV_data <- regData(r_rr_IV, regdf = sr)
r_hm_IV <- feRegSim(FormulasIV_str["homicide"])
r_hm_IV_data <- regData(r_hm_IV, regdf = sr)
r_pk_IV <- feRegSim(FormulasIV_str["dpolice_killing"])
r_pk_IV_data <- regData(r_pk_IV, regdf = sr)
# Table 3 - Gaming
g_cf_IV <- feRegSim(FormulasIV_str["dbody_found"])
g_cf_IV_data <- regData(g_cf_IV, regdf = sr)
g_vt_IV <- feRegSim(FormulasIV_str["vehicle_theft"])
g_vt_IV_data <- regData(g_vt_IV, regdf = sr)
g_st_IV <- feRegSim(FormulasIV_str["street_theft"])
g_st_IV_data <- regData(g_st_IV, regdf = sr)
# Table 4 - Spillovers
s_or_IV <- feRegSim(FormulasIV_str["other_robberies"])
s_or_IV_data <- regData(s_or_IV, regdf = sr)
s_cr_IV <- feRegSim(FormulasIV_str["cargo_robbery"])
s_cr_IV_data <- regData(s_cr_IV, regdf = sr)
s_bu_IV <- feRegSim(FormulasIV_str["burglary"])
s_bu_IV_data <- regData(s_bu_IV, regdf = sr)
s_sr_IV <- feRegSim(FormulasIV_str["store_robbery"])
s_sr_IV1_data <- regData(s_sr_IV, regdf = sr)
#------------------------------------------------------------------------------#
#### Poisson models ####
RegPoisson <- function(form){
model <-
glm(as.formula(form),
family = poisson,
data = sr)
return(model)
}
p_vd <- RegPoisson(Formulas_poi_str["violent_death_sim"])
p_vd_data <- regData(p_vd, regdf = sr)
p_vr <- RegPoisson(Formulas_poi_str["vehicle_robbery"])
p_vr_data <- regData(p_vr, regdf = sr)
p_rr <- RegPoisson(Formulas_poi_str["street_robbery"])
p_rr_data <- regData(p_rr, regdf = sr)
#------------------------------------------------------------------------------#
##### Export ####
#### Define commun elements
n_aisp_line_3 <- c("Number of aisp", rep("39", 3))
n_aisp_line_9 <- c("Number of aisp", rep("39", 9))
n_aisp_line_12 <- c("Number of aisp", rep("39", 12))
chifeFE_line_3 <- c("Chief FE", rep(c( "No", "Yes", "Yes"), 1))
chifeFE_line_9 <- c("Chief FE", rep(c( "No", "Yes", "Yes"), 3))
chifeFE_line_12 <- c("Chief FE", rep(c( "No", "Yes", "Yes"), 4))
indepVar_label <- "On target"
col_labels_9 <- rep(c("OLS", "OLS", "2SLS"), 3)
col_labels_12 <- rep(c("OLS", "OLS", "2SLS"), 4)
#### Define formatting functions
# Function to find dep var means of regressions
Ymean <- function(x){
mean(regData(x, sr)[,regDepVars(x)])
}
# Function to create the row for regression tables
Ymean_row <- function(list){
c("Y mean", sapply(list, Ymean) %>% round(2))
}
# Export function
createTable <- function(reg_list,
add_lines_list,
title,
dep_var_labels,
outPath){
stargazer(reg_list,
keep = ("on_target"),
covariate.labels = "On target",
dep.var.labels = dep_var_labels,
title = title,
dep.var.caption = "Number of occurrences",
column.labels = col_labels_9,
add.lines = add_lines_list,
digits = 3,
omit.stat = c("rsq","ser", "f"),
out = outPath,
type = "html"
)
}
# Table 2
tab2_regs <-
list(r_vd_01,
r_vd_02,
r_vd_IV,
r_vr_01,
r_vr_02,
r_vr_IV,
r_rr_01,
r_rr_02,
r_rr_IV)
tab2_addLines <- list(chifeFE_line_9,
Ymean_row(tab2_regs),
n_aisp_line_9)
createTable(reg_list = tab2_regs,
add_lines_list = tab2_addLines,
dep_var_labels = c("Violent deaths",
"Vehicle robbery (Carjacking)",
"Street robbery"),
title = "Table 2 – Effect of expectancy of receiving bonuses on crime rates",
outPath = file.path(OUTPUTS_final, "tab2.html"))
# Table 3
tab3_regs <-
list(g_cf_01,
g_cf_02,
g_cf_IV,
g_vt_01,
g_vt_02,
g_vt_IV,
g_st_01,
g_st_02,
g_st_IV)
tab3_addLines <- list(chifeFE_line_9,
Ymean_row(tab3_regs),
n_aisp_line_9)
createTable(reg_list = tab3_regs,
add_lines_list = tab3_addLines,
dep_var_labels = c("Cadavers Found (dummy)",
"Car theft",
"Street theft"),
title = "Table A1 – Expectancy of receiving bonuses and gaming",
outPath = file.path(OUTPUTS_final, "tabA1.html"))
# Table 4
tab4_regs <-
list(s_or_01,
s_or_02,
s_or_IV,
s_cr_01,
s_cr_02,
s_cr_IV,
s_bu_01,
s_bu_02,
s_bu_IV,
s_sr_01,
s_sr_02,
s_sr_IV)
tab4_addLines <- list(chifeFE_line_12,
Ymean_row(tab4_regs),
n_aisp_line_12)
createTable(reg_list = tab4_regs,
add_lines_list = tab4_addLines,
dep_var_labels = c("Robberies not included in the target",
"Cargo robbery ",
"Burglary",
"Robbery of commercial stores"),
title = "Table 3 – Expectancy of receiving bonuses and positive spill overs on other crimes",
outPath = file.path(OUTPUTS_final, "tab3.html")) # Order changed in paper
# Poisson model
tab5_regs <-
list(p_vd,
p_vr,
p_rr)
tab5_addLines <- list(c("Chief FE", "Yes", "Yes", "Yes"),
Ymean_row(tab5_regs),
c("Number of aisp", rep("39", 3)))
stargazer(tab5_regs,
keep = ("on_target"),
covariate.labels = "On target",
# dep.var.labels = c("Violent deaths",
# "Vehicle robbery (Carjacking)",
# "Street robbery"),
title = "Table B4 – Robustness: Poisson Regressions",
dep.var.caption = "Number of occurrences",
add.lines = tab5_addLines,
digits = 3,
omit.stat = c("rsq","ser", "f"),
out = file.path(OUTPUTS_final, "tabB4.html"),
type = "html"
)
createTable(reg_list = tab5_regs,
add_lines_list = tab5_addLines,
dep_var_labels = c("Violent deaths",
"Vehicle robbery (Carjacking)",
"Street robbery"),
title = "Table B4 – Robustness: Poisson Regressions",
outPath = file.path(OUTPUTS_final, "tabB4.html"))
#------------------------------------------------------------------------------#
#### Monthly coef graphs ####
cpsr <- sr %>% subset(year > 2009 & year < 2016)
#### Create Variables
# Create month order dummys
cpsr$m2 <- ifelse(cpsr$month %in% c(2,8), 1, 0)
cpsr$m3 <- ifelse(cpsr$month %in% c(3,9), 1, 0)
cpsr$m4 <- ifelse(cpsr$month %in% c(4,10), 1, 0)
cpsr$m5 <- ifelse(cpsr$month %in% c(5,11), 1, 0)
cpsr$m6 <- ifelse(cpsr$month %in% c(6,12), 1, 0)
# Create on_target X mN interaction
cpsr$month2 <- cpsr$m2*cpsr$on_target
cpsr$month3 <- cpsr$m3*cpsr$on_target
cpsr$month4 <- cpsr$m4*cpsr$on_target
cpsr$month5 <- cpsr$m5*cpsr$on_target
cpsr$month6 <- cpsr$m6*cpsr$on_target
#### Construct monthly regression formulas
month_dummies <- c("month2",
"month3",
"month4",
"month5",
"month6")
rFormula_plot <- paste(c(month_dummies,
# Remove on_targer as it is already in the interactions
indepVars[-1]),
collapse = " + ")
Formulas02_plot_str <- paste(depVars, paste(rFormula_plot, config2), sep = " ~ ")
names(Formulas02_plot_str) <- depVars
#### Monthly regression
rplot_vd <- feRegSim(Formulas02_plot_str["violent_death_sim"], data = cpsr)
rplot_vr <- feRegSim(Formulas02_plot_str["vehicle_robbery"], data = cpsr)
rplot_rr <- feRegSim(Formulas02_plot_str["street_robbery"], data = cpsr)
#### Actual plots
monthCoefPlot <- function(model,
vars){
# Select only month coeffs
coefs_df <- data.frame(coef = model$coefficients[vars,],
month = vars,
se = model$rse[vars])
# Format X axis
coefs_df$month <- c(2:6)
plot <-
ggplot(data = coefs_df,
aes(y = coef,
x = month)) +
geom_point(col = "dodgerblue4", size = 2)+
geom_errorbar(aes(ymin=coef-se,
ymax=coef+se),
col = "dodgerblue4",
size = .5,
width=.1)+
geom_hline(yintercept=0,
color = "red")+
xlab("Month") +
ylab("")+
theme_minimal()
return(plot)
}
coefPlot_vd <-
monthCoefPlot(model = rplot_vd,
vars = month_dummies)
coefPlot_vr <-
monthCoefPlot(model = rplot_vr,
vars = month_dummies)
coefPlot_rr <-
monthCoefPlot(model = rplot_rr,
vars = month_dummies)
# Export plots
if(EXPORT_plots){
coefPlot_vd +
ggsave(filename = file.path(OUTPUTS_final, "coef_plot_violent_death_sim.png"),
width = 6,
height = 4)
coefPlot_rr +
ggsave(filename = file.path(OUTPUTS_final, "coef_plot_street_robbery.png"),
width = 6,
height = 4)
coefPlot_vr +
ggsave(filename = file.path(OUTPUTS_final, "coef_plot_vehicle_robbery.png"),
width = 6,
height = 4)
}
|
31d51fe5c1e17ac590a82e531221a2704b663476
|
407631f4bfd859a0d93aab969b683906ccfdc0be
|
/CHC-analysis-abs-Barplots_ST_20200826.R
|
ea10d45da9ed0c998e597a56e20a4d3d79ae4115
|
[] |
no_license
|
SandraTretter/Pc
|
bab7296d31c4aa36e5c982a6328e853b4e4cadda
|
04ab8ad4cea90250c4e2e9b1a712919786307771
|
refs/heads/master
| 2022-12-09T23:13:36.817979
| 2020-09-02T08:01:03
| 2020-09-02T08:01:03
| 291,988,944
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,672
|
r
|
CHC-analysis-abs-Barplots_ST_20200826.R
|
# Sandra Tretter
# 26.08.2020
#### Pogonomyrmex californicus, CHC Analyse
#---------------------------------------------------------------------
# content: 0. required packages/libraries
# 1. read in and transform the data
# 2. CHC classes
# 3. Barplots
# 3.1 for alkanes
# 3.2 for alkenes
# 3.3 for monomethylbranched CHC
# 3.4 for dimethylbranched CHC
# 3.5 for trimethylbranched CHC
# 3.6 for tetramethylbranched CHC
# 3.7 for dienes
#---------------------------------------------------------------------
#### 0. required packages/libraries ####
library(permute)
library(lattice)
library(vegan)
library(MASS)
library(gtools)
library(shape)
library(ggplot2)
library(viridis)
library(export)
#---------------------------------------------------------------------
#### 1. read in and transform the data ####
setwd("C:/Users/trett/Google Drive/AG Gadau/GC-MS/Pogonomyrmex/CHC analysis/Daten")
dataset <- read.csv("/Users/trett/Google Drive/AG Gadau/GC-MS/Pogonomyrmex/CHC analysis/Daten/BatchTable_CSV_ST_20200814.csv" ,header=T,dec=",",sep=";",check.names=FALSE,row.names = 1)
# Get rid of NAs
dataset[is.na(dataset)] = 0
# standardization with internal C12-standard to compensate deviations caused by injection volume
for (i in 1:nrow(dataset)) {
dataset[i,] <- dataset[i,]*22.5 / dataset[i,1] # internal standard is here the first column
}
# Get rid of the internal standard column
dataset = subset(dataset, select = -c(1))
#-----------------------------------------------------------------------
#### 2. CHC classes ####
nm <- colnames(dataset)
alkanes <- nm %in% grep("n-", nm, value = TRUE)
alkanes <- subset(dataset, select = alkanes)
alkanes$sample<-row.names(alkanes) # add column with sample names to data frame for plotting them on the x axis of the barplots
alkanes$caste<-gsub("Pc_(.*?)_(.*?)_.*","\\1",alkanes$sample,perl=T) # add column with caste to data frame for plotting them on x axis of boxplots
alkanes$lineage<-gsub("Pc_(.*?)_(.*?)_.*","\\2",alkanes$sample,perl=T) # add lineage to data frame
df<-tidyr::gather(alkanes,key="chc",value="val",-sample,-caste,-lineage)
dfAlkanes <- df
dfAlkanes$group <- "alkanes"
alkenes <- nm %in% grep("alkene", nm, value = TRUE)
alkenes <- subset(dataset, select = alkenes)
alkenes$sample<-row.names(alkenes)
alkenes$caste<-gsub("Pc_(.*?)_(.*?)_.*","\\1",alkenes$sample,perl=T)
alkenes$lineage<-gsub("Pc_(.*?)_(.*?)_.*","\\2",alkenes$sample,perl=T)
df<-tidyr::gather(alkenes,key="chc",value="val",-sample,-caste,-lineage)
dfAlkenes <- df
dfAlkenes$group <- "alkenes"
me <- nm %in% grep("-Me", nm, value = TRUE)
me <- subset(dataset, select = me)
me$sample<-row.names(me)
me$caste<-gsub("Pc_(.*?)_(.*?)_.*","\\1",me$sample,perl=T)
me$lineage<-gsub("Pc_(.*?)_(.*?)_.*","\\2",me$sample,perl=T)
df<-tidyr::gather(me,key="chc",value="val",-sample,-caste,-lineage)
dfMe <- df
dfMe$group <- "monomethyl-alkanes"
di <- nm %in% grep("-Di", nm, value = TRUE)
di <- subset(dataset, select = di)
di$sample<-row.names(di)
di$caste<-gsub("Pc_(.*?)_(.*?)_.*","\\1",di$sample,perl=T)
di$lineage<-gsub("Pc_(.*?)_(.*?)_.*","\\2",di$sample,perl=T)
df<-tidyr::gather(di,key="chc",value="val",-sample,-caste,-lineage)
dfDi <- df
dfDi$group <- "dimethyl-alkanes"
tri <- nm %in% grep("-Tri", nm, value = TRUE)
tri <- subset(dataset, select = tri)
tri$sample<-row.names(tri)
tri$caste<-gsub("Pc_(.*?)_(.*?)_.*","\\1",tri$sample,perl=T)
tri$lineage<-gsub("Pc_(.*?)_(.*?)_.*","\\2",tri$sample,perl=T)
df<-tidyr::gather(tri,key="chc",value="val",-sample,-caste,-lineage)
dfTri <- df
dfTri$group <- "trimethyl-alkanes"
tetra <- nm %in% grep("-Tetra", nm, value = TRUE)
tetra <- subset(dataset, select = tetra)
tetra$sample<-row.names(tetra)
tetra$caste<-gsub("Pc_(.*?)_(.*?)_.*","\\1",tetra$sample,perl=T)
tetra$lineage<-gsub("Pc_(.*?)_(.*?)_.*","\\2",tetra$sample,perl=T)
df<-tidyr::gather(tetra,key="chc",value="val",-sample,-caste,-lineage)
dfTetra <- df
dfTetra$group <- "tetramethyl-alkanes"
diene <- nm %in% grep("diene", nm, value = TRUE)
diene <- subset(dataset, select = diene)
diene$sample<-row.names(diene)
diene$caste<-gsub("Pc_(.*?)_(.*?)_.*","\\1",diene$sample,perl=T)
diene$lineage<-gsub("Pc_(.*?)_(.*?)_.*","\\2",diene$sample,perl=T)
df<-tidyr::gather(diene,key="chc",value="val",-sample,-caste,-lineage)
dfDiene <- df
dfDiene$group <- "dienes"
# combine classes
dfall <- rbind(dfAlkanes, dfAlkenes, dfMe, dfDi, dfTri, dfTetra, dfDiene)
dfall$group <- factor(dfall$group,
levels = c("alkanes","alkenes","dienes","monomethyl-alkanes","dimethyl-alkanes","trimethyl-alkanes","tetramethyl-alkanes")) # sort group
p <- ggplot(dfall, aes(fill=group, y=val, x=caste)) +
geom_bar(stat="identity") +
theme_classic(base_size = 22) + # white background
theme(axis.title.x = element_blank(), axis.title.y = element_text(color = "black", size=14, face="bold")) +
theme(legend.title =element_blank(), legend.text = element_text(color = "black", size = 16)) +
scale_x_discrete(guide= guide_axis(n.dodge = 2)) +
NULL +
scale_fill_manual(values = c("goldenrod1", "darkorange2", "chartreuse2", "slategray1", "skyblue2", "steelblue3", "steelblue4")) +
facet_grid(.~lineage, scales = "free")
p
# export graph
graph2ppt(p, file="Barplots-abs3_ST_20200826",width=15,height=11)
graph2png(p, file="Barplots-abs3_ST_20200826",width=15,height=11, dpi=600)
|
1fd900295604413bcb0faed17ab77a6bd9ea42d2
|
3e9d42c74e22a89a4735f074215ef5e4f2190d9c
|
/R/calc_loglik.R
|
631509401662e0de07211623ec7628d15619899a
|
[] |
no_license
|
cran/SAVER
|
49c1e9992af6dd78f5693fd75bff92d723427167
|
521ca66faf26cfcc659ca249ca1fc5086c4804a6
|
refs/heads/master
| 2020-04-01T07:42:04.173678
| 2019-11-13T18:30:03
| 2019-11-13T18:30:03
| 153,000,060
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,015
|
r
|
calc_loglik.R
|
#' Calculates marginal likelihood
#'
#' Calculates the marginal likelihood given the prediction under constant
#' coefficient of variation (a), Fano factor (b), and variance (k).
#'
#' \code{calc.loglik.a} returns the shifted negative log-likelihood under
#' constant coefficient of variation.
#' \code{calc.loglik.b} returns the shifted negative log-likelihood under
#' constant Fano factor.
#' \code{calc.loglik.k} returns the shifted negative log-likelihood under
#' constant variance.
#'
#' @param a,b,k Prior parameter.
#'
#' @param y A vector of observed gene counts.
#'
#' @param mu A vector of predictions from \code{\link{expr.predict}}.
#'
#' @param sf Vector of normalized size factors.
#'
#' @return A shifted negative marginal log-likelihood.
#'
#'
#' @rdname calc_loglik
#' @export
calc.loglik.a <- function(a, y, mu, sf) {
n <- length(y)
if (length(mu) == 1) {
mu <- rep(mu, n)
}
if (length(sf) == 1) {
sf <- rep(sf, n)
}
func1 <- n/a*log(1/a)
func2 <- -sum(1/a*log(mu))
func3 <- -n*lgamma(1/a)
func4 <- sum(lgamma(y+1/a))
func5 <- -sum((y+1/a)*log(sf+1/(a*mu)))
return(-sum(func1, func2, func3, func4, func5))
}
#' @rdname calc_loglik
#' @export
calc.loglik.b <- function(b, y, mu, sf) {
n <- length(y)
if (length(mu) == 1) {
mu <- rep(mu, n)
}
if (length(sf) == 1) {
sf <- rep(sf, n)
}
func1 <- sum(mu/b*log(1/b))
func2 <- -sum(lgamma(mu/b))
func3 <- sum(lgamma(y+mu/b))
func4 <- -sum((y+mu/b)*log(sf+1/b))
return(-sum(func1, func2, func3, func4))
}
#' @rdname calc_loglik
#' @export
calc.loglik.k <- function(k, y, mu, sf) {
n <- length(y)
if (length(mu) == 1) {
mu <- rep(mu, n)
}
if (length(sf) == 1) {
sf <- rep(sf, n)
}
func3 <- sum(mu^2*log(mu)/k)
func4 <- -sum(mu^2*log(k)/k)
func5 <- -sum(lgamma(mu^2/k))
func6 <- sum(lgamma(y+mu^2/k))
func7 <- -sum((y+mu^2/k)*log(sf+mu/k))
return(-sum(func3, func4, func5, func6, func7))
}
|
550139ef299f40e92f3c6d9855744d4b3701bb5d
|
a29832f97abdaafd1490ec4e7a38e85505ed3790
|
/tests/testthat/testsmoothSpline.R
|
f543fc7af347655912a878c0acc5065e70c05626
|
[] |
no_license
|
cran/growthPheno
|
edfd87b031ff311e8c0bd47da986a7f2ddd2bded
|
6bb455e5dac33deb46536a3162238da06e30a508
|
refs/heads/master
| 2023-08-31T22:58:01.201522
| 2023-08-22T16:00:02
| 2023-08-22T18:31:00
| 196,967,533
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,922
|
r
|
testsmoothSpline.R
|
cat("#### Test smoothSpline using NCSS with leaf data when there are missing values\n")
test_that("leaf_smoothSpline", {
skip_if_not_installed("growthPheno")
skip_on_cran()
library(dae)
library(ggplot2)
library(growthPheno)
# A small subset of Exp 270 leaf tracking data
data(testSpline)
responses <- names(test)[5:ncol(test)]
##Test omit in fitSpline - Length 3
leaf.dat <- test
carts <- levels(leaf.dat$Snapshot.ID.Tag)
nrows <- list(6,4,5,3,2,6,1,1)
names(nrows) <- carts
fit <- list()
for (cart in carts)
{
fit[[cart]] <- smoothSpline(subset(leaf.dat, Snapshot.ID.Tag == cart),
response = "Length.3", response.smoothed = "sLength.3",
x="xDays",
df = 4, na.x.action = "omit", na.y.action = "omit",
rates = c("AGR", "RGR"),
suffices.rates = c("AGRdv", "RGRdv"))
testthat::expect_equal(nrows[[cart]], nrow(fit[[cart]]$predictions))
testthat::expect_true(all(unlist(c("xDays", "sLength.3", "sLength.3.AGRdv", "sLength.3.RGRdv") %in%
names(fit[[cart]]$predictions))))
}
##Test omit in snoothSpline - Length 2 with a 0 length data.frame
nrows <- list(11,12,12,12,9,12,0,9)
names(nrows) <- carts
fit <- list()
for (cart in carts)
{
fit[[cart]] <- smoothSpline(subset(leaf.dat, Snapshot.ID.Tag == cart),
response = "Length.2", response.smoothed = "sLength.2",
x="xDays",
df = 4, na.x.action = "omi", na.y.action = "omit",
rates = c("AGR", "RGR"),
suffices.rates = c("AGRdv", "RGRdv"))
testthat::expect_equal(nrows[[cart]], nrow(fit[[cart]]$predictions))
testthat::expect_equal(ncol(fit[[cart]]$predictions), 4)
testthat::expect_true(all(unlist(c("xDays", "sLength.2", "sLength.2.AGRdv", "sLength.2.RGRdv") %in%
names(fit[[cart]]$predictions))))
}
##Test omit in smoothSpline - Length 2 with a 0 length data.frame
leaf.dat <- test
carts <- levels(leaf.dat$Snapshot.ID.Tag)
nrows <- list(6,4,5,3,2,6,1,1)
names(nrows) <- carts
fit <- list()
for (cart in carts)
{
fit[[cart]] <- smoothSpline(subset(leaf.dat, Snapshot.ID.Tag == cart),
response = "Length.3", response.smoothed = "sLength.3",
x="xDays", correctBoundaries = FALSE,
df = 4, na.x.action = "omit", na.y.action = "omit")
testthat::expect_equal(ncol(fit[[cart]]$predictions), 2)
testthat::expect_equal(nrow(fit[[cart]]$predictions), nrows[[cart]])
testthat::expect_true(all(unlist(c("xDays", "sLength.3") %in% names(fit[[cart]]$predictions))))
}
nrows <- list(6,4,5,3,2,6,1,1)
names(nrows) <- carts
fitC <- list()
for (cart in carts)
{
fitC[[cart]] <- smoothSpline(subset(leaf.dat, Snapshot.ID.Tag == cart),
response = "Length.3",
x="xDays", correctBoundaries = TRUE,
df = 4, na.x.action = "omit", na.y.action = "omit")
testthat::expect_equal(ncol(fitC[[cart]]$predictions), 2)
testthat::expect_equal(nrow(fitC[[cart]]$predictions), nrows[[cart]])
testthat::expect_true(all(unlist(c("xDays", "sLength.3") %in% names(fit[[cart]]$predictions))))
}
testthat::expect_true(all(abs(fit[["047162-C"]]$sLength.3 -
fitC[["047162-C"]]$sLength.3) > 0.01))
testthat::expect_true(all(abs(fit[["047164-S"]]$sLength.3 -
fitC[["047164-S"]]$sLength.3) < 1e-05))
})
|
5b93c14e58a33d9d5c19772d9da18c5db8070c87
|
0b724301700e023b057d9bd14cf992aa8379b874
|
/R/phillips_1996_viz.R
|
57e4acf9d442091386f169a181067ffe2f8634ae
|
[
"MIT"
] |
permissive
|
dindiarto/HIV-Model-Phillips-1996
|
4e275d3dcc3b52c3f8b0f90142c68a9ba4df5010
|
877bd5ec2d04983e5b574634f34aedc6acc086bf
|
refs/heads/master
| 2022-12-04T02:35:21.430000
| 2020-08-13T23:04:07
| 2020-08-13T23:04:07
| 282,621,475
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,331
|
r
|
phillips_1996_viz.R
|
# convert solution to a dataframe
s_CD4_HIV_dynamics_solution <- as.data.frame(s_CD4_HIV_dynamics_solution)
# Number of virus particles in the bloodstream
Vplot <- ggplot(s_CD4_HIV_dynamics_solution) +
geom_line(aes(time, V), color = "red") +
theme_classic() +
scale_y_continuous(trans = "log10") +
annotation_logticks(sides = "l") +
scale_x_continuous(breaks = scales::pretty_breaks(n = 7)) +
xlab("Days from infection") +
ylab("Number of virus particles (V)") +
coord_cartesian(ylim = c(0.1, 1e4))
LEplot <- ggplot(s_CD4_HIV_dynamics_solution) +
geom_line(aes(time, L, color = "L" )) +
geom_line(aes(time, E, color = "E")) +
theme_classic() +
scale_y_continuous(trans = "log10") +
coord_cartesian(ylim = c(0.1, 100)) +
scale_x_continuous(breaks = scales::pretty_breaks(n = 7)) +
annotation_logticks(sides = "l") +
labs(x = "Days from infection",
y = "L and E",
color = NULL) +
scale_color_manual(values = c("E" = "darkorange", "L" = "blue"))+
theme(legend.position = c(0.8, 0.8), legend.key.size = unit(2,"line"))
Rplot <- ggplot(s_CD4_HIV_dynamics_solution) +
geom_line(aes(time, R), color = "darkgreen") +
theme_classic()+
scale_x_continuous(breaks = scales::pretty_breaks(n = 7)) +
labs(x = "Days from infection",
y = "Number of susceptible cells (R)")
|
97c3df41c3c21cf0ee7009301504cd7d936ddc15
|
4edb5a7010aca41383da8ac7f36c7c5613104e6c
|
/deepdrone/R/unet_shallow.R
|
52a206d363a2bfb82bca9f5af9c5a7de89d61b1a
|
[
"MIT"
] |
permissive
|
sholtkamp/Master_Thesis
|
081ed34cbc8aa5297fd233778a149042f5a9bdfa
|
b26b4c20e30d89d4a21ebbf5971217ce87bc222f
|
refs/heads/master
| 2021-01-05T17:49:14.054047
| 2020-12-07T08:51:25
| 2020-12-07T08:51:25
| 241,094,556
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,696
|
r
|
unet_shallow.R
|
build_unet_shallow <- function(input_shape = c(128, 128, 3), num_classes = 2){
#---Input-------------------------------------------------------------------------------------
inputs <- layer_input(name = "input_1", shape = input_shape)
down4 <- inputs %>%
layer_conv_2d(filters = 64, kernel_size = c(3, 3), name = "down_7", padding = "same") %>%
layer_activation("relu") %>%
layer_conv_2d(filters = 64, kernel_size = c(3, 3), name = "down_8", padding = "same") %>%
layer_activation("relu")
down4_pool <- down4 %>%
layer_max_pooling_2d(pool_size = c(2, 2), strides = c(2, 2))
#---Center-----------------------------------------------------------------------------------
center <- down4_pool %>%
layer_spatial_dropout_2d(rate = 0.5) %>%
layer_conv_2d(filters = 128, kernel_size = c(3, 3), name = "center_1", padding = "same") %>%
layer_activation("relu") %>%
layer_conv_2d(filters = 128, kernel_size = c(3, 3), name = "center_2", padding = "same") %>%
layer_activation("relu")
#---Upsampling--------------------------------------------------------------------------------
up4 <- center %>%
layer_upsampling_2d(size = c(2, 2)) %>%
{layer_concatenate(inputs = list(down4, .), axis = 3)} %>%
layer_conv_2d(filters = 64, kernel_size = c(3, 3), name = "up_8", padding = "same") %>%
layer_activation("relu") %>%
layer_conv_2d(filters = 64, kernel_size = c(3, 3), name = "up_7", padding = "same") %>%
layer_activation("relu")
classify <- layer_conv_2d(up4, filters = num_classes, kernel_size = c(1, 1), activation = "sigmoid")
# Build specified model and assign it to variable
model <- keras_model(
inputs = inputs,
outputs = classify
)
return(model)
}
|
6a53a3d5bb033579b7e933b00eaa05d6d08bd45c
|
cbd0b3ff8c7bb3bb5aac96f006229ccff11be95b
|
/man/baytsDDSpatial.Rd
|
e2fa65dd5aa869ce9bd997ac17bbdac22717be33
|
[
"CC-BY-4.0"
] |
permissive
|
rbavery/bayts
|
5d2cf68bb6e40ddeef96d1845d276710a872e3f5
|
e0ae2d98357738d0b9aa6a61c5634ab7cddcd40d
|
refs/heads/master
| 2023-03-29T02:54:45.541461
| 2021-04-02T21:23:47
| 2021-04-02T21:23:47
| 342,713,122
| 0
| 0
|
NOASSERTION
| 2021-02-26T22:15:24
| 2021-02-26T22:07:20
|
R
|
UTF-8
|
R
| false
| false
| 3,346
|
rd
|
baytsDDSpatial.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/baytsDDSpatial.R
\name{baytsDDSpatial}
\alias{baytsDDSpatial}
\title{Function to run baytsDD on (mulitple) raster bricks}
\usage{
baytsDDSpatial(bL = list(), datesL = list(), pdfsdL = list(),
distNFL = list(), modL = list(), formulaL = list(), orderL = list(),
mask = NULL, start_history = NULL, end_history = NULL, start,
end = NULL, chi = 0.9, PNFmin = 0.5, bwf = c(0.1, 0.9),
mc.cores = 1, out_file = NULL)
}
\arguments{
\item{bL}{list of raster bricks. Raster bricks need to have the same extent and spatial resolution.}
\item{datesL}{list of time vector of the format: "2014-10-07".}
\item{pdfsdL}{list of pdfsd object(s) describing the modulation of the sd of F and NF sd(F),mean(NF),sd(NF) (e.g. pdfsd = c(2,-4,2))}
\item{modL}{list of modL - modulation of the time series observations. default=NULL}
\item{formulaL}{list of formula for the regression model. The default is response ~ trend + harmon, i.e., a linear trend and a harmonic season component. Other specifications are possible using all terms set up by bfastpp, i.e., season (seasonal pattern with dummy variables), lag (autoregressive terms), slag (seasonal autoregressive terms), or xreg (further covariates). See bfastpp for details.}
\item{orderL}{list of numeric. Order of the harmonic term, defaulting to 3.}
\item{mask}{(raster) mask at which method is applied; default = NULL (method is applied to all pixel)}
\item{start_history}{Start date of history period used to model the seasonality and derive F and NF PDFs. Default=NULL (start of input time series)}
\item{end_history}{End date of history period used to model the seasonality and derive F and NF PDFs. Default=NULL (Start of the monitoring period is used)}
\item{start}{start date of monitoring period. Default=NULL (start of input time series).}
\item{end}{end date of monitoring period. Default=NULL (end of input time series)}
\item{chi}{threshold of Pchange at which the change is confirmed; Default=0.5}
\item{PNFmin}{threshold of pNF above which the first observation is flagged; Default=0.5}
\item{bwf}{block weighting function to truncate the NF probability; Default=c(0.1,0.9); (c(0,1) = no truncation)}
\item{mc.cores}{numeric. number of cores to be used for the job. See \code{\link{mc.calc}} for more details (default = 1)}
\item{distL}{list of "distNF" object(s) describing the mean and sd of the NF distribution in case no data driven way to derive the NF distribution is wanted; default=NULL}
\item{outfile}{output file}
}
\value{
A rasterBrick with 5 layers:
(1) flag: time at which unconfirmed change got flagged;
(2) change.flagged: time at which confirmed change got flagged;
(3) change.confirmed: time at which change is confirmed;
(4) Pflag: Probabilty of change for unconfirmed flagged changes;
(5) Pchange.confirmed: Probabilty of change for confirmed changes.
}
\description{
Implements baytsDD function on (multiple) time series rasterBrick object(s).
}
\examples{
#TBD
}
\author{
Johannes Reiche (Wageningen University)
}
\references{
\href{http://www.mdpi.com/2072-4292/7/5/4973}{Reiche et al. (2015): A Bayesian Approach to Combine Landsat and ALOS PALSAR Time Series for Near Real-Time Deforestation Detection. Remote Sensing. 7(5), 4973-4996; doi:10.3390/rs70504973}
}
|
90fc80cecd263d661e2a0f037c8485d57976e615
|
b2f0f100905477dffcc9bfb2cce3f37e1e218fa2
|
/debug.R
|
d1837c5973b3d8c7c2937df382186c9ad6d3a58b
|
[] |
no_license
|
MurisonWardell/GAGA
|
70ec2a610c3c7f20c80ad5d02193d34dbd6bfc29
|
e165786afae46ccc0c5c7d496659415d1b03e49a
|
refs/heads/master
| 2020-12-24T17:44:39.057916
| 2014-01-17T19:56:09
| 2014-01-17T19:56:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,090
|
r
|
debug.R
|
## Testing that gaga works
setwd("C:/Users/cwardell/Desktop/temp")
## Load library
library(GAGA)
## Load synthetic data set
#data("gaga_synthetic_data","gaga_synthetic_data_annotation")
data("hidden_gaga")
data("gaga_simple_data")
data("gaga_synthetic_data")
data("gaga_synthetic_data_jittered")
data("BYB1_G07_pruned")
## Execute gaga - good data, don't mess with it...
#x=gaga(gaga_synthetic_data, gaga_synthetic_data_annotation, number_of_clones=6, iterations=20, contamination=0)
#y=gaga(gaga_synthetic_data, gaga_synthetic_data_annotation, number_of_clones=6, iterations=10, contamination=0)
gdata=gaga(gaga_synthetic_data, number_of_clones=6, iterations=3000,nroot=1)
gdataj=gaga(gaga_synthetic_data_jittered, number_of_clones=6, iterations=3000)
gagaYeast=gaga(BYB1_G07_pruned, number_of_clones=6, iterations=1000)
gagaYeastC=gaga(BYB1_G07_pruned, number_of_clones=6, iterations=1000,contamination=1)
## Correct solution for
simpleDataSolution=gaga(gaga_simple_data, number_of_clones=4, nroot=0,iterations=3000)
simpleDataSolution=gaga(gaga_simple_data, number_of_clones=4, nroot=1,iterations=500) # will hit correct solution faster
gagaReport(gaga_synthetic_data,gdata,output_file_prefix="gaga_synthetic_data")
gagaReport(gaga_synthetic_data_jittered,gdataj,output_file_prefix="gaga_synthetic_data_jittered")
gagaReport(BYB1_G07_pruned,gagaYeast,output_file_prefix="gagaYeast")
gagaReport(BYB1_G07_pruned,gagaYeastC,output_file_prefix="gagaYeastC")
#x=BYB1_G07_pruned
#y=gagaYeast
#y=gagaYeastC
gagaReport(x,y,outType="fitness")
gagaReport(x,y,outType="heatmap")
gagaReport(x,y,outType="proportion")
gagaReport(x,y,outType="phylogeny")
gagaReport(x,y)
gagaReport(gaga_synthetic_data,x,outType="complete")
gagaReport(gaga_synthetic_data,gdata,outType="fitness")
gagaReport(gaga_synthetic_data,z,outType="heatmap")
gagaReport(gaga_synthetic_data,z,outType="proportion")
gagaReport(gaga_synthetic_data,z,outType="phylogeny")
setwd("N:/MORGAN/ChrisW/documents/projects/131213_gaga/GAGA")
## The horrifying building vignettes section:
library(devtools)
build_vignettes()
|
def150a6c6988189438a423a332bdf4b8d245d42
|
a238180d0e91513c7d3ca5c8283f8711453427a1
|
/PCA and Clustering Analysis.R
|
4ed96b40a65a5516094925f3ccff5e811b98ba75
|
[] |
no_license
|
hingu-parth/airbnb-host-analysis-for-newyork
|
fdbfade770be36462d1bb95b8c70f455381696bd
|
e3be747460489a8a1d08ab46ce78fc9eb4578e85
|
refs/heads/master
| 2022-03-08T21:51:17.498445
| 2019-11-08T01:16:58
| 2019-11-08T01:16:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,799
|
r
|
PCA and Clustering Analysis.R
|
#Airbnb Datset EDA
#Author: Parth Hingu
##Importing libraries
library(data.table)
library(ggplot2) # tidyverse data visualization package
library(stringr)
library(tmap) # for static and interactive maps
library(leaflet) # for interactive maps
library(mapview) # for interactive maps
library(shiny) # for web applications
library(car)
#Importing csv file from my local computer
airbnbOriginalDF =read.csv("C:/Users/yadav/Desktop/MVA proj/airbnb/airbnb_1/Airbnb Host Data For Newyork City.csv")
#Converting data frame to data table
setDT(airbnbOriginalDF)
######## DATA CLEANING #########
#Checking null/missing value in dataset
table(is.na(airbnbOriginalDF))
#Checking null values in review per month column
table(is.na(airbnbOriginalDF$reviews_per_month))
#Removing values which are null and storing in new table.
airbnbNoNADT = airbnbOriginalDF[airbnbOriginalDF$reviews_per_month != 'NA']
# Rechecking, and can see no null values present now.
table(is.na(airbnbNoNADT))
table(is.na(airbnbNoNADT$reviews_per_month)) #airbnbNoNADT is datatable with not any null values
#Converting datatype of last review date to DAte Format.
airbnbNoNADT[,last_review:=as.Date(last_review, '%m/%d/%Y')]
str(airbnbNoNADT)
#Lets try to further analyze our data by analysing data types.
#CONVERTING CATEGORICAL VALUES TO FACTORS
unique(airbnbNoNADT$neighbourhood_group)
#As the neighbourhood_group column has 5 categorical values, we can factor it, and convert our string data type.
airbnbNoNADT[,neighbourhood_group:= factor(neighbourhood_group)]
unique(airbnbNoNADT$neighbourhood)
#For neighbourhood, we get 217 unique values. Here to reduce storage we can covert all similar type to lower case and also trim white spaces, so that each anme is unique.
#Converting all same type name to lower cases
airbnbNoNADT[,neighbourhood:=tolower(neighbourhood)]
#Removing all white spaces
airbnbNoNADT[,neighbourhood:=trimws(neighbourhood)]
#For room type, we get 3 unique categorical values. we can factor it, and convert our string datatype.
unique(airbnbNoNADT$room_type)
airbnbNoNADT[,room_type:= factor(room_type)]
###### Exploratory Data Analysis #######
#We found few ouliers, therefore that data and we have dropped below..
airbnbCleaned = airbnbNoNADT[price<2500 & number_of_reviews<400 & reviews_per_month<10]
#airbnbCleaned is our Final cleaned data
#Below we have stored the data for each boroughs in different table which will help to analyze each borough individually as well if required
#Manhattan area dataset
airbnbManhattan = airbnbCleaned[neighbourhood_group=='Manhattan']
nrow(airbnbManhattan)
#Queens area dataset
airbnbQueens = airbnbCleaned[neighbourhood_group=='Queens']
nrow(airbnbQueens)
#Brooklyn area dataset
airbnbBrooklyn = airbnbCleaned[neighbourhood_group=='Brooklyn']
nrow(airbnbBrooklyn)
#Bronx area dataset
airbnbBronx = airbnbCleaned[neighbourhood_group=='Bronx']
nrow(airbnbBronx)
#Staten Island area dataset
airbnbStatenIsland = airbnbCleaned[neighbourhood_group=='Staten Island']
nrow(airbnbStatenIsland)
#Creating corelation matrix for each boroughs
diagnolcol = c("price","minimum_nights","reviews/month", "numberOfReviews", "availabilityFor365")
#MANHATTAN
pairs(data.table(
airbnbManhattan$price,
airbnbManhattan$minimum_nights,
airbnbManhattan$reviews_per_month,
airbnbManhattan$number_of_reviews,
airbnbManhattan$availability_365), labels = diagnolcol)
#BROOKLYN
pairs(data.table(
airbnbBrooklyn$price,
airbnbBrooklyn$minimum_nights,
airbnbBrooklyn$reviews_per_month,
airbnbBrooklyn$number_of_reviews,
airbnbBrooklyn$availability_365), labels = diagnolcol)
#QUEENS
pairs(data.table(
airbnbQueens$price,
airbnbQueens$minimum_nights,
airbnbQueens$reviews_per_month,
airbnbQueens$number_of_reviews,
airbnbQueens$availability_365), labels = diagnolcol)
#Staten Island
pairs(data.table(
airbnbStatenIsland$price,
airbnbStatenIsland$minimum_nights,
airbnbStatenIsland$reviews_per_month,
airbnbStatenIsland$number_of_reviews,
airbnbStatenIsland$availability_365), labels = diagnolcol)
#BRONX
pairs(data.table(
airbnbBronx$price,
airbnbBronx$minimum_nights,
airbnbBronx$reviews_per_month,
airbnbBronx$number_of_reviews,
airbnbBronx$availability_365), labels = diagnolcol)
pairs(data.table(airbnbBronx$price,
airbnbBronx$minimum_nights,
airbnbBronx$reviews_per_month,
airbnbBronx$number_of_reviews,
airbnbBronx$availability_365), labels = diagnolcol)
####### PCA ########
######PCA for Manhattan######
library(dplyr)
library(data.table)
#Taking the numeric columns that will contribute for variance in data
airbnbManhattanPCA = data.frame(
airbnbManhattan$id,
airbnbManhattan$host_id,
airbnbManhattan$room_type,
airbnbManhattan$price,
airbnbManhattan$minimum_nights,
airbnbManhattan$number_of_reviews,
airbnbManhattan$reviews_per_month,
airbnbManhattan$availability_365)
setDT(airbnbManhattanPCA)
##Setting column names for our new dataframe
names(airbnbManhattanPCA) <- c(
'id',
'host_id',
'room_type',
'price',
'minimum_nights',
'number_of_reviews',
'reviews_per_month',
'availability_365')
head(airbnbManhattanPCA, 5)
##Here we have used prcomp function to get Principal components of data
airbnbPC <- prcomp(airbnbManhattanPCA[,-1:-3], scale=TRUE)
airbnbPC
##prcomp() gives three values x, sdev, rotation
names(airbnbPC)
## x contains principal components for drawing a graph.
##since there are 5 samples(COLUMNS), there are 5 PC
#To get a sense how meaningful this is, let's see how much variation in the original data PC1 or together with PC2 accounts for
#To do this we require the square of sdev, to see how much variance in the original data each PC accounts for
##The goal is to draw a graph that shows how the samples are related(not related)to each other
##Creating eigen values for airbnb (sqaure of sdev) ----> representing by pca_var
(pca_var <- airbnbPC$sdev^2)
names(pca_var)
names(pca_var) <- paste("PC",1:5,sep="")
names(pca_var)
pca_var
##Taking sum of all eigen values
sum_var <- sum(pca_var)
sum_var
##Calculating percentage of variance to better visualize each PC proportion in data
pcavarpercent <- (pca_var/sum_var)*100
##Visulaization using Bar chart
barplot(pcavarpercent, main="Scree Plot", xlab="Principal Component", ylab = "Percent Varaiation")
##Visualization using scree plot
plot(pcavarpercent, xlab = "Component number", ylab = "Component variance", type = "l", main = "Scree diagram")
##From the plot it can be deciphered that all PC components have good amount of information with them, approx 80% of variance is presented with PC1,PC2, PC3, and thus we cannot choose only two PC for dimensionality reduction
##since it will lead to information loss.
######PCA for Queens######
#Taking the numeric columns that will contribute for variance in data
airbnbQueensPCA = data.frame(
airbnbQueens$id,
airbnbQueens$host_id,
airbnbQueens$room_type,
airbnbQueens$price,
airbnbQueens$minimum_nights,
airbnbQueens$number_of_reviews,
airbnbQueens$reviews_per_month,
airbnbQueens$availability_365)
setDT(airbnbQueensPCA)
##Setting column names for our new dataframe
names(airbnbQueensPCA) = c(
'id',
'host_id',
'room_type',
'price',
'minimum_nights',
'number_of_reviews',
'reviews_per_month',
'availability_365')
head(airbnbQueensPCA, 5)
##Here we have used prcomp function to get Principal components of data
airbnbQueensPC <- prcomp(airbnbQueensPCA[,-1:-3], scale=TRUE)
airbnbQueensPC
##prcomp() gives three values x, sdev, rotation
names(airbnbQueensPC)
## x contains principal components for drawing a graph.
##since there are 5 samples(COLUMNS), there are 5 PC
#To get a sense how meaningful this is, let's see how much variation in the original data PC1 or together with PC2 accounts for
#To do this we require the square of sdev, to see how much variance in the original data each PC accounts for
##The goal is to draw a graph that shows how the samples are related(not related)to each other
##Creating eigen values for airbnb (sqaure of sdev) ----> representing by pca_var
(Queens_pca_var <- airbnbQueensPC$sdev^2)
names(Queens_pca_var)
names(Queens_pca_var) <- paste("PC",1:5,sep="")
names(Queens_pca_var)
Queens_pca_var
##Taking sum of all eigen values
sum_var <- sum(Queens_pca_var)
sum_var
##Calculating percentage of variance to better visualize each PC proportion in data
pcavarpercent <- (Queens_pca_var/sum_var)*100
##Visulaization using Bar chart
barplot(pcavarpercent, main="Scree Plot", xlab="Principal Component", ylab = "Percent Varaiation")
##Visualization using scree plot
plot(pcavarpercent, xlab = "Component number", ylab = "Component variance", type = "l", main = "Scree diagram")
##From the plot it can be deciphered that all PC components have good amount of information with them, approx 80% of variance is presented with PC1,PC2, PC3, and thus we cannot choose only two PC for dimensionality reduction
##since it will lead to information loss.
######PCA for Brooklyn######
#Taking the numeric columns that will contribute for variance in data
airbnbBrooklynPCA = data.frame(
airbnbBrooklyn$id,
airbnbBrooklyn$host_id,
airbnbBrooklyn$room_type,
airbnbBrooklyn$price,
airbnbBrooklyn$minimum_nights,
airbnbBrooklyn$number_of_reviews,
airbnbBrooklyn$reviews_per_month,
airbnbBrooklyn$availability_365)
setDT(airbnbBrooklynPCA)
##Setting column names for our new dataframe
names(airbnbBrooklynPCA) = c(
'id',
'host_id',
'room_type',
'price',
'minimum_nights',
'number_of_reviews',
'reviews_per_month',
'availability_365')
head(airbnbBrooklynPCA, 5)
##Here we have used prcomp function to get Principal components of data
airbnbBrooklynPCA <- prcomp(airbnbBrooklynPCA[,-1:-3], scale=TRUE)
airbnbBrooklynPCA
##prcomp() gives three values x, sdev, rotation
names(airbnbBrooklynPCA)
## x contains principal components for drawing a graph.
##since there are 5 samples(COLUMNS), there are 5 PC
#To get a sense how meaningful this is, let's see how much variation in the original data PC1 or together with PC2 accounts for
#To do this we require the square of sdev, to see how much variance in the original data each PC accounts for
##The goal is to draw a graph that shows how the samples are related(not related)to each other
##Creating eigen values for airbnb (sqaure of sdev) ----> representing by pca_var
(Brooklyn_pca_var <- airbnbBrooklynPCA$sdev^2)
names(Brooklyn_pca_var)
names(Brooklyn_pca_var) <- paste("PC",1:5,sep="")
names(Brooklyn_pca_var)
Brooklyn_pca_var
##Taking sum of all eigen values
sum_var <- sum(Brooklyn_pca_var)
sum_var
##Calculating percentage of variance to better visualize each PC proportion in data
pcavarpercent <- (Brooklyn_pca_var/sum_var)*100
##Visulaization using Bar chart
barplot(pcavarpercent, main="Scree Plot", xlab="Principal Component", ylab = "Percent Varaiation")
##Visualization using scree plot
plot(pcavarpercent, xlab = "Component number", ylab = "Component variance", type = "l", main = "Scree diagram")
##From the plot it can be deciphered that all PC components have good amount of information with them, approx 80% of variance is presented with PC1,PC2, PC3, and thus we cannot choose only two PC for dimensionality reduction
##since it will lead to information loss.
######PCA for Bronx######
#Taking the numeric columns that will contribute for variance in data
airbnbBronxPCA = data.frame(
airbnbBronx$id,
airbnbBronx$host_id,
airbnbBronx$room_type,
airbnbBronx$price,
airbnbBronx$minimum_nights,
airbnbBronx$number_of_reviews,
airbnbBronx$reviews_per_month,
airbnbBronx$availability_365)
setDT(airbnbBronxPCA)
##Setting column names for our new dataframe
names(airbnbBronxPCA) = c(
'id',
'host_id',
'room_type',
'price',
'minimum_nights',
'number_of_reviews',
'reviews_per_month',
'availability_365')
head(airbnbBronxPCA, 5)
##Here we have used prcomp function to get Principal components of data
airbnbBronxPCA <- prcomp(airbnbBronxPCA[,-1:-3], scale=TRUE)
airbnbBronxPCA
##prcomp() gives three values x, sdev, rotation
names(airbnbBronxPCA)
## x contains principal components for drawing a graph.
##since there are 5 samples(COLUMNS), there are 5 PC
#To get a sense how meaningful this is, let's see how much variation in the original data PC1 or together with PC2 accounts for
#To do this we require the square of sdev, to see how much variance in the original data each PC accounts for
##The goal is to draw a graph that shows how the samples are related(not related)to each other
##Creating eigen values for airbnb (sqaure of sdev) ----> representing by pca_var
(Bronx_pca_var <- airbnbBronxPCA$sdev^2)
names(Bronx_pca_var)
names(Bronx_pca_var) <- paste("PC",1:5,sep="")
names(Bronx_pca_var)
Bronx_pca_var
##Taking sum of all eigen values
sum_var <- sum(Bronx_pca_var)
sum_var
##Calculating percentage of variance to better visualize each PC proportion in data
pcavarpercent <- (Bronx_pca_var/sum_var)*100
##Visulaization using Bar chart
barplot(pcavarpercent, main="Scree Plot", xlab="Principal Component", ylab = "Percent Varaiation")
##Visualization using scree plot
plot(pcavarpercent, xlab = "Component number", ylab = "Component variance", type = "l", main = "Scree diagram")
##From the plot it can be deciphered that all PC components have good amount of information with them, approx 80% of variance is presented with PC1,PC2, PC3, and thus we cannot choose only two PC for dimensionality reduction
##since it will lead to information loss.
######PCA for Staten Island######
#Taking the numeric columns that will contribute for variance in data
airbnbStatenIslandPCA = data.frame(
airbnbStatenIsland$id,
airbnbStatenIsland$host_id,
airbnbStatenIsland$room_type,
airbnbStatenIsland$price,
airbnbStatenIsland$minimum_nights,
airbnbStatenIsland$number_of_reviews,
airbnbStatenIsland$reviews_per_month,
airbnbStatenIsland$availability_365)
setDT(airbnbStatenIslandPCA)
##Setting column names for our new dataframe
names(airbnbStatenIslandPCA) = c(
'id',
'host_id',
'room_type',
'price',
'minimum_nights',
'number_of_reviews',
'reviews_per_month',
'availability_365')
head(airbnbStatenIslandPCA, 5)
##Here we have used prcomp function to get Principal components of data
airbnbStatenIslandPCA <- prcomp(airbnbStatenIslandPCA[,-1:-3], scale=TRUE)
airbnbStatenIslandPCA
##prcomp() gives three values x, sdev, rotation
names(airbnbStatenIslandPCA)
## x contains principal components for drawing a graph.
##since there are 5 samples(COLUMNS), there are 5 PC
#To get a sense how meaningful this is, let's see how much variation in the original data PC1 or together with PC2 accounts for
#To do this we require the square of sdev, to see how much variance in the original data each PC accounts for
##The goal is to draw a graph that shows how the samples are related(not related)to each other
#Creating eigen values for airbnb (sqaure of sdev) ----> representing by pca_var
(StatenIsland_pca_var <- airbnbStatenIslandPCA$sdev^2)
names(StatenIsland_pca_var)
names(StatenIsland_pca_var) <- paste("PC",1:5,sep="")
names(StatenIsland_pca_var)
StatenIsland_pca_var
#Taking sum of all eigen values
sum_var <- sum(StatenIsland_pca_var)
sum_var
#Calculating percentage of variance to better visualize each PC proportion in data
pcavarpercent <- (StatenIsland_pca_var/sum_var)*100
#Visulaization using Bar chart
barplot(pcavarpercent, main="Scree Plot", xlab="Principal Component", ylab = "Percent Varaiation")
#Visualization using scree plot
plot(pcavarpercent, xlab = "Component number", ylab = "Component variance", type = "l", main = "Scree diagram")
#From the plot it can be deciphered that all PC components have good amount of information with them, approx 80% of variance is presented with PC1,PC2, PC3, and thus we cannot choose only two PC for dimensionality reduction
#since it will lead to information loss.
########## K-means Clustering #########
#install.packages("cluster")
library(cluster)
airbnbManhattanClust = data.frame(
airbnbManhattan$price,
airbnbManhattan$number_of_reviews,
airbnbManhattan$reviews_per_month)
#Making property id as Rownames, so cluster will be formed iwth these points.
rownames(airbnbManhattanClust) <- airbnbManhattan$id
#Scaling done to make the data on one scale.
scaleManhattan <- scale(airbnbManhattanClust[,1:ncol(airbnbManhattanClust)])
head(scaleManhattan)
# We will find K-means by taking k=2, 3, 4, 5, 6...
# Centers (k's) are numbers thus, 10 random sets are chosen
#For 2 clusters, k-means = 2
(kmeans2.Manhattan <- kmeans(scaleManhattan,2,nstart = 10))
# Computing the percentage of variation accounted for two clusters
perc_var_kmeans2 <- round(100*(1 - kmeans2.Manhattan$betweenss/kmeans2.Manhattan$totss),1)
names(perc_var_kmeans2) <- "Perc. 2 clus"
perc_var_kmeans2
#For 3 clusters, k-means = 3
(kmeans3.Manhattan <- kmeans(scaleManhattan,3,nstart = 10))
# Computing the percentage of variation accounted for. Two clusters
perc_var_kmeans3 <- round(100*(1 - kmeans3.Manhattan$betweenss/kmeans3.Manhattan$totss),1)
names(perc_var_kmeans3) <- "Perc. 3 clus"
perc_var_kmeans3
#For 4 clusters, k-means = 4
(kmeans4.Manhattan <- kmeans(scaleManhattan,4,nstart = 10))
# Computing the percentage of variation accounted for. Two clusters
perc_var_kmeans4 <- round(100*(1 - kmeans4.Manhattan$betweenss/kmeans4.Manhattan$totss),1)
names(perc_var_kmeans4) <- "Perc. 4 clus"
perc_var_kmeans4
#From above, after computing percentage of variation for each k means, we found that k means 3 could be good to preseent our data
# Saving above 3 k-means (1,2,3) in a list
#Filtering properties which are in 1 cluster of k mean 3
clus1 <- matrix(names(kmeans3.Manhattan$cluster[kmeans3.Manhattan$cluster == 1]),
ncol=1, nrow=length(kmeans3.Manhattan$cluster[kmeans3.Manhattan$cluster == 1]))
colnames(clus1) <- "Cluster 1"
#Filtering properties which are in 2 cluster of k mean 3
clus2 <- matrix(names(kmeans3.Manhattan$cluster[kmeans3.Manhattan$cluster == 2]),
ncol=1, nrow=length(kmeans3.Manhattan$cluster[kmeans3.Manhattan$cluster == 2]))
colnames(clus2) <- "Cluster 2"
#Filtering properties which are in 3 cluster of k mean 3
clus3 <- matrix(names(kmeans3.Manhattan$cluster[kmeans3.Manhattan$cluster == 3]),
ncol=1, nrow=length(kmeans3.Manhattan$cluster[kmeans3.Manhattan$cluster == 3]))
colnames(clus3) <- "Cluster 3"
list(clus1,clus2,clus3)
head(clus1,5)
head(clus2,5)
head(clus3,5)
#Now we will plot these clusters
library(fpc)
plotcluster(airbnbManhattanClust,kmeans3.Manhattan$cluster)
#We can make three subsets for three clusters by row filtering
airbnbManhattanCluster1 <- subset(airbnbManhattan, airbnbManhattan$id %in% clus1)
airbnbManhattanCluster2 <- subset(airbnbManhattan, airbnbManhattan$id %in% clus2)
airbnbManhattanCluster3 <- subset(airbnbManhattan, airbnbManhattan$id %in% clus3)
#Tried checking if properties in particular clusters are located in some specific area in Manhattan
length(unique(airbnbManhattanCluster1$neighbourhood))
length(unique(airbnbManhattanCluster2$neighbourhood))
length(unique(airbnbManhattanCluster3$neighbourhood))
#We did not get any idea, as all clusters have almost all locations.
#This is to check the mean of 3 clusters
kmeans3.Manhattan$centers
#We will see average price, average number of reviews , average reviews per month for houses in each cluster to get a better idea of most recommendable properties.
mean(airbnbManhattanCluster1$price)
mean(airbnbManhattanCluster1$number_of_reviews)
mean(airbnbManhattanCluster1$reviews_per_month)
mean(airbnbManhattanCluster2$price)
mean(airbnbManhattanCluster2$number_of_reviews)
mean(airbnbManhattanCluster2$reviews_per_month)
mean(airbnbManhattanCluster3$price)
mean(airbnbManhattanCluster3$number_of_reviews)
mean(airbnbManhattanCluster3$reviews_per_month)
#From above means , we find that properties in cluter 2 have average price of 150 and avargae no of reviews as 16.
#However for clust 1, avg price is 150 and avg no.of reviews is 11.
# for 3, avg price is 692 and avg no of reviews is 16
#Thus the most recommended properties for people to stay in Manhattan lies in Cluster 2
setDT(airbnbManhattanCluster2)
#Here we are trying to see the top apartment type available in cluster 2.
nrow(airbnbManhattanCluster2[airbnbManhattanCluster2$room_type == 'Entire home/apt'])
nrow(airbnbManhattanCluster2[airbnbManhattanCluster2$room_type == 'Private room'])
nrow(airbnbManhattanCluster2[airbnbManhattanCluster2$room_type == 'Shared room'])
ggplot(airbnbManhattanCluster2, aes(x=airbnbManhattanCluster2$room_type)) +geom_bar(fill ='purple') +theme_minimal()
# From this we see, that Entire home/apt and private room are the most available ones.
#Below we have shown the araes in Manhattan which have these properties in Cluster 2.
#There is no specific location in Manhattan have this spread out.
ggplot(airbnbManhattanCluster2, aes(x=airbnbManhattanCluster2$longitude,y=airbnbManhattanCluster2$latitude)) + geom_point(size=0.1, color = 'dark blue')
|
fe371301daa2d9e64101ea121f8cf22348ebf8ef
|
93f20ab83047f2541ecf31b3ead1a5a8235379d5
|
/man/BinaryMatrix.Rd
|
a2a254a26bacc663ff0009f106c7d103a763a9af
|
[] |
no_license
|
mbeccuti/PGS
|
9f5081bd7d4d7c0cb6b4a3178b68e89857c6bc5d
|
949821cce0045c24d3241d97186bcbd31c6a76f8
|
refs/heads/master
| 2021-01-21T15:27:11.200024
| 2017-05-25T13:46:32
| 2017-05-25T13:46:32
| 91,844,674
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,756
|
rd
|
BinaryMatrix.Rd
|
\name{BinaryMatrix}
\alias{BinaryMatrix}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
BinaryMatrix
}
\description{
BinaryMatrix takes the file 'Good_Predictors' created by Predictive_Power function and creates a list of files containing the final signature, the two lists of peculiar genes for the two classes.
}
\usage{
BinaryMatrix(binary_matrix_file, thr_low, thr_high, classification_vector_file)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{binary_matrix_file}{ the 'Good_Predictors' file created by Predictive_Power function
}
\item{thr_low}{The threshold identifying the most misclassified subjects in class 0}
\item{thr_high}{The threshold identifying the most misclassified subjects in class 1}
\item{classification_vector_file}{A file containing the true classification labels of the samples}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
BinaryMatrix creates three files:
- 'Final_Signature' containing all the peculiar genes
- 'Signature_High' containing the list of peculiar genes for class 1
- 'Signature_Low' containing the list of peculiar genes for class 0.
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Federica Martina
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
## The function is currently defined as
BinaryMatrix('Good_Predictors', 20, 165, 'classification_vector')
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
a329759c3db94ea72bf9f956ecf3ec15970215d8
|
fb6a38938893dfb929e9739bc3fb8a406bbba5fa
|
/evaluation/usage/inspection.r
|
e3da52380ee5eb767e89c3eb2e5ac4df73ea2b24
|
[] |
no_license
|
maenu/papers-nullability-emse2018-data
|
dd8f2faab46696b9037323770d5360a78a60af2a
|
3d2226726f3f679e86e0b8fecc4e608c62413c3f
|
refs/heads/master
| 2020-03-12T14:48:23.946716
| 2018-04-23T10:02:47
| 2018-04-23T10:02:47
| 130,676,564
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 28,036
|
r
|
inspection.r
|
library(tidyr)
library(dplyr)
library(readr)
library(stringr)
library(eulerr)
library(ggplot2)
reduce.annotation <- function(nullness) {
# verify that jaif is consistent
nullness <- data.frame(nullness = nullness) %>%
filter(nullness != 'Unknown',
nullness != '',!is.na(nullness)) %>%
unique()
if (nullness %>% nrow() == 1) {
return(nullness %>%
slice(1) %>%
unlist() %>%
as.character() %>%
first())
}
# nullable is stronger than non-null
if (nullness %>% filter(nullness == 'Nullable') %>% unique() %>% nrow() == 1) {
return('Nullable')
}
return('Unknown')
}
reduce.nullness <- function(nullness) {
# reduce nullness on overriden method return types, only one should be not unknown
nullness <- data.frame(nullness = nullness) %>%
filter(nullness != 'UNKNOWN',
nullness != '',!is.na(nullness)) %>%
unique()
if (nullness %>% nrow() == 1) {
return(nullness %>%
slice(1) %>%
unlist() %>%
as.character() %>%
first())
}
return('UNKNOWN')
}
read.jar <- function(artifact) {
# multiple defs for same methods with different return types removed, jar & jaif
return(
read.csv(file = paste0(
'evaluation/ground-truth/', artifact, '-jar.csv'
)) %>%
mutate(
name = str_replace(name, '\\).*$', ')'),
overriddes = as.character(class) != as.character(rootClass)
) %>%
select(
class,
attribute,
name,
index,
type,
primitive,
rootClass,
abstract,
classVisibility,
visibility
) %>%
unique()
)
}
read.jaif <- function(artifact) {
# multiple defs for same methods with different return types removed, jar & jaif
return(
read.csv(file = paste0(
'evaluation/ground-truth/', artifact, '-jaif.csv'
)) %>%
mutate(name = str_replace(name, '\\).*$', ')')) %>%
select(class,
attribute,
name,
index,
nullness) %>%
unique()
)
}
read.jaif.integrity.disagree <- function(artifact) {
return(read.csv(
file = paste0(
'evaluation/ground-truth/',
artifact,
'-jaif-integrity-disagree.csv'
)
))
}
read.definition <- function(artifact) {
# reduce nullness on overriden method return types, only one should be not unknown
.definition <-
read.csv(file = paste0('evaluation/definition/', artifact, '-data.csv')) %>%
mutate(name = str_replace(name, '\\).*$', ')')) %>%
select(class,
attribute,
name,
index,
nullness)
return(
.definition %>%
group_by(class,
name,
index) %>%
summarize(o = n()) %>%
ungroup() %>%
filter(o > 1) %>%
inner_join(.definition, by = c('class', 'name', 'index')) %>%
group_by(class, name, index) %>%
summarize(nullness = reduce.nullness(nullness)) %>%
right_join(
.definition,
by = c('class', 'name', 'index'),
suffix = c('.reduced', '.original')
) %>%
mutate(nullness = ifelse(
is.na(nullness.reduced),
as.character(nullness.original),
as.character(nullness.reduced)
)) %>%
select(-nullness.reduced, -nullness.original) %>%
unique() %>%
filter(nullness != '') %>%
ungroup() %>%
mutate(
class = as.character(class),
name = as.character(name),
index = as.numeric(index),
attribute = as.character(attribute),
nullness = as.character(nullness)
)
)
}
read.usage <- function(artifact) {
# grouping removes duplicates, should not exist though
return(
read.csv(file = paste0(
'evaluation/usage/', artifact, '-data.csv'
)) %>%
mutate(name = str_replace(name, '\\).*$', ')')) %>%
group_by(
class,
attribute,
visibility,
name,
getter,
index,
internal,
nullness
) %>%
summarize(n = sum(n)) %>%
ungroup()
)
}
combine.coverage <-
function (jar,
jaif,
jaif.integrity.disagree,
definition,
usage) {
return(
jar %>%
mutate(
use = ifelse(index == -1, 'return', 'parameter'),
jar = 'jar',
jar.public = ifelse(
classVisibility == 'public' &
visibility != 'private',
'jar.public',
NA
),
jar.original = ifelse(
as.character(class) == as.character(rootClass),
'jar.original',
NA
)
) %>%
select(
rootClass,
abstract,
class,
attribute,
name,
use,
index,
primitive,
jar,
jar.public,
jar.original
) %>%
unique() %>%
full_join(
jaif %>%
rename(nullness.jaif = nullness) %>%
mutate(
jaif = 'jaif',
jaif.known = ifelse(nullness.jaif != 'Unknown', 'jaif.known', NA)
) %>%
select(class, name, index, nullness.jaif, jaif, jaif.known) %>%
unique(),
by = c('class', 'name', 'index')
) %>%
full_join(
jaif.integrity.disagree %>%
mutate(jaif.integrity.disagree = 'jaif.integrity.disagree') %>%
select(class, name, index, jaif.integrity.disagree) %>%
mutate(
class = as.character(class),
name = as.character(name),
index = as.numeric(index)
) %>%
unique(),
by = c('class', 'name', 'index')
) %>%
full_join(
definition %>%
mutate(
definition = 'definition',
definition.known = ifelse(nullness == 'UNKNOWN', NA, 'definition.known')
) %>%
rename(nullness.definition = nullness) %>%
select(
class,
name,
index,
definition,
definition.known,
nullness.definition
) %>%
unique(),
by = c('class', 'name', 'index')
) %>%
full_join(
usage %>%
mutate(internal = 'true' == internal,
getter = 'true' == getter) %>%
group_by(class, name, index) %>%
summarize(
getter = any(internal),
usage.internal = any(internal),
usage.external = !all(internal)
) %>%
ungroup() %>%
mutate(
getter = ifelse(getter, 'getter', NA),
usage = 'usage',
usage.internal = ifelse(usage.internal, 'usage.internal', NA),
usage.external = ifelse(usage.external, 'usage.external', NA)
) %>%
unique(),
by = c('class', 'name', 'index')
) %>%
filter(primitive == 'false')
)
}
combine.coverage.filter <-
function(coverage, attribute_, use_, sets) {
return(
coverage %>%
filter(attribute == attribute_,
use == use_) %>%
group_by_(.dots = sets) %>%
summarize(n = n()) %>%
mutate(group = paste(!!!rlang::syms(sets), sep = '&')) %>%
ungroup() %>%
select(group, n) %>%
mutate(group = str_replace_all(group, 'NA&|&NA', '')) %>%
filter(group != 'NA') %>%
spread(group, n) %>%
c() %>%
unlist()
)
}
write.coverage <- function(artifact, coverage) {
.sets <- c('jar',
'jaif.known',
'jaif.integrity.disagree',
'definition.known',
'usage')
pdf(paste0(
'evaluation/usage/',
artifact,
'-coverage-method-return.pdf'
))
print(plot(
euler(
combine.coverage.filter(coverage, 'method', 'return', .sets),
shape = 'ellipse'
),
main = 'coverage-method-return',
quantities = list()
))
dev.off()
pdf(paste0(
'evaluation/usage/',
artifact,
'-coverage-method-parameter.pdf'
))
print(plot(
euler(
combine.coverage.filter(coverage, 'method', 'parameter', .sets),
shape = 'ellipse'
),
main = 'coverage-method-parameter',
quantities = list()
))
dev.off()
return()
}
combine.inference <-
function(jar,
jaif,
jaif.integrity.disagree,
definition,
usage) {
return(
jar %>%
filter(attribute == 'method',
primitive == 'false') %>%
select(rootClass,
abstract,
class,
name,
index) %>%
inner_join(
jaif %>%
select(class,
name,
index,
nullness),
by = c('class', 'name', 'index')
) %>%
# remove disagreed methods
anti_join(
jaif.integrity.disagree %>%
select(class,
name,
index) %>%
mutate(
class = as.character(class),
name = as.character(name),
index = as.numeric(index)
) %>%
unique(),
by = c('class', 'name', 'index')
) %>%
left_join(
# only method
definition %>%
filter(attribute == 'method') %>%
select(class,
name,
index,
nullness) %>%
rename(nullness.definition = nullness) %>%
mutate(
nullness.definition = ifelse(
nullness.definition == 'NON_NULL',
'NonNull',
ifelse(nullness.definition == 'NULLABLE',
'Nullable',
'Unknown')
)
),
by = c('class', 'name', 'index')
) %>%
inner_join(
usage %>%
filter(attribute == 'method') %>%
select(class,
name,
getter,
index,
internal,
nullness,
n) %>%
mutate(getter = getter == 'true') %>%
rename(n.usage = n,
internal.usage = internal),
by = c('class', 'name', 'index'),
suffix = c('.jaif', '.usage')
) %>%
group_by(
rootClass,
class,
name,
getter,
index,
nullness.jaif,
nullness.definition,
internal.usage
) %>%
summarize(
nullness.usage.null = sum(n.usage[nullness.usage == 'NULL']),
nullness.usage.non.null = sum(n.usage[nullness.usage == 'NON_NULL']),
nullness.usage.unknown = sum(n.usage[nullness.usage == 'UNKNOWN']),
n.usage = sum(n.usage)
) %>%
ungroup() %>%
mutate(use = ifelse(index == -1, 'return', 'parameter')) %>%
mutate(
nullability.evidence = ifelse(
use == 'parameter',
nullness.usage.null,
nullness.usage.non.null
),
non.nullability.evidence = ifelse(
use == 'return',
nullness.usage.non.null,
nullness.usage.null
),
nullability = nullability.evidence / n.usage,
non.nullability = non.nullability.evidence / n.usage
)
)
}
merge.inference <- function(inference) {
return(
inference %>%
group_by(
rootClass,
class,
name,
getter,
use,
index,
nullness.jaif,
nullness.definition
) %>%
summarize(
nullness.usage.null = sum(nullness.usage.null),
nullness.usage.non.null = sum(nullness.usage.non.null),
nullness.usage.unknown = sum(nullness.usage.unknown),
n.usage = sum(n.usage)
) %>%
ungroup() %>%
mutate(
nullability.evidence = ifelse(
use == 'parameter',
nullness.usage.null,
nullness.usage.non.null
),
non.nullability.evidence = ifelse(
use == 'parameter',
nullness.usage.non.null,
nullness.usage.null
),
nullability = nullability.evidence / n.usage,
non.nullability = non.nullability.evidence / n.usage
)
)
}
filter.inference.jaif <- function(inference) {
return(
inference %>%
filter(!is.na(nullness.jaif),
nullness.jaif != 'Unknown') %>%
rename(nullness.actual = nullness.jaif)
)
}
filter.inference.definition <- function(inference) {
return(
inference %>%
filter(
!is.na(nullness.definition),
nullness.definition != 'Unknown'
) %>%
rename(nullness.actual = nullness.definition)
)
}
infer.nullness <- function(inference, min.n.usage) {
return(inference %>%
mutate(nullness.usage = ifelse(
n.usage >= min.n.usage,
ifelse(nullability > 0,
'Nullable',
'NonNull'),
'Unknown'
)))
}
as.confusion <-
function(nullness.inferred,
nullness.actual_,
nullness.other_) {
return(
nullness.inferred %>%
mutate(
nullness.usage = ifelse(
nullness.usage == nullness.actual_,
nullness.usage,
nullness.other_
),
classification = ifelse(
nullness.actual == nullness.actual_,
ifelse(nullness.usage == nullness.actual, 'tp', 'fn'),
ifelse(nullness.usage == nullness.actual, 'tn', 'fp')
)
) %>%
group_by(classification) %>%
summarize(n = n()) %>%
ungroup() %>%
spread(classification, n) %>%
mutate(
tp = ifelse('tp' %in% names(.), tp, 0),
fn = ifelse('fn' %in% names(.), fn, 0),
tn = ifelse('tn' %in% names(.), tn, 0),
fp = ifelse('fp' %in% names(.), fp, 0)
) %>%
unlist()
)
}
precision.recall <- function(confusion) {
tp <- confusion['tp'] %>% first()
fp <- confusion['fp'] %>% first()
fn <- confusion['fn'] %>% first()
tn <- confusion['tn'] %>% first()
precision <- tp / (tp + fp)
recall <- tp / (tp + fn)
n <- sum(confusion)
return(data.frame(
precision = ifelse(is.nan(precision), 0, precision),
recall = ifelse(is.nan(recall), 0, recall),
n = n,
tp = tp,
fn = fn,
tn = tn,
fp = fp
))
}
evaluate.perfomance <- function(inference) {
.r <- data.frame()
for (x in c(1, 3, 10, 30, 100, 300, 1000, 3000, 10000)) {
nullness.inferred <- infer.nullness(inference, x)
row <-
precision.recall(as.confusion(nullness.inferred, 'Nullable', 'NonNull'))
row['nullness'] <- 'Nullable'
row['x'] <- x
.r <- bind_rows(.r, row)
row <-
precision.recall(as.confusion(nullness.inferred, 'NonNull', 'Nullable'))
row['nullness'] <- 'NonNull'
row['x'] <- x
.r <- bind_rows(.r, row)
}
.r <- .r %>%
mutate_at(vars(-nullness), as.numeric)
return(.r)
}
evaluate.perfomance.stats <-
function(inference) {
return(
inference %>%
group_by(nullness.actual) %>%
summarize(n = n()) %>%
spread(nullness.actual, n) %>%
mutate(
Nullable = ifelse('Nullable' %in% names(.), Nullable, 0),
NonNull = ifelse('NonNull' %in% names(.), NonNull, 0)
) %>%
slice(1) %>%
unlist()
)
}
plot.performance <- function (performance, performance.stats) {
return(
ggplot(performance, aes(x = x)) +
geom_step(aes(y = precision, color = 'precision')) +
geom_step(aes(y = recall, color = 'recall')) +
theme_minimal() +
labs(
caption = paste0(
'N = ',
sum(performance.stats),
', Nullable = ',
performance.stats['Nullable'],
', NonNull = ',
performance.stats['NonNull']
),
color = NULL,
x = 'minimal support (log10)',
y = 'precision / recall'
) +
ylim(c(0, 1)) +
scale_x_log10(
breaks = c(1, 3, 10, 30, 100, 300, 1000, 3000, 10000),
minor_breaks = NULL
)
)
}
write.performance <-
function(artifact,
performance,
performance.stats,
ground.truth,
use_,
nullness_) {
h_ <- performance.stats[nullness_] / sum(performance.stats)
plot.performance(performance %>% filter(nullness == nullness_),
performance.stats) +
labs(title = paste0(use_, ' ', nullness_, ' inference vs. ', ground.truth)) +
geom_hline(data = h_, yintercept = h_) +
geom_text(
x = 0,
y = h_,
label = 'constant precision',
vjust = -1,
hjust = -2
) +
ggsave(
paste0(
'evaluation/usage/',
artifact,
'-performance-',
ground.truth,
'-',
use_,
'-',
nullness_,
'.pdf'
),
width = 14,
height = 10,
units = 'cm'
)
}
evaluate.performance.use <-
function(artifact, inference, ground.truth, use_) {
.d <- inference %>% filter(use == use_)
performance <- evaluate.perfomance(.d)
performance.stats <- evaluate.perfomance.stats(.d)
write.performance(artifact,
performance,
performance.stats,
ground.truth,
use_,
'Nullable')
write.performance(artifact,
performance,
performance.stats,
ground.truth,
use_,
'NonNull')
}
evaluate.performance.ground.truth <-
function(artifact, inference, ground.truth) {
evaluate.performance.use(artifact, inference, ground.truth, 'parameter')
evaluate.performance.use(artifact, inference, ground.truth, 'return')
}
evaluate.performance.ground.truth.use <-
function(artifact, inference) {
if (artifact != 'jre') {
evaluate.performance.ground.truth(artifact,
filter.inference.definition(inference),
'definition')
}
evaluate.performance.ground.truth(artifact, filter.inference.jaif(inference), 'jaif')
}
write.usage.clean <- function(artifact, usage) {
write.csv(usage, file = paste0('evaluation/usage/', artifact, '-clean.csv'))
}
infer.nullness.parameter <- function(inference, min.n.usage) {
return(inference %>% mutate(nullness.usage = ifelse(
use == 'parameter',
ifelse(
nullability > 0,
# once passed null
'Nullable',
ifelse(n.usage < min.n.usage,
# low support
'Unknown',
'NonNull')
),
nullness.usage
)))
}
infer.nullness.return <- function(inference, min.n.usage) {
return(inference %>% mutate(nullness.usage = ifelse(
use == 'return',
ifelse(
nullability < 0.05,
# low confidence in return null
'NonNull',
ifelse(n.usage < min.n.usage,
# low support
'Unknown',
'Nullable')
),
nullness.usage
)))
}
infer.nullness <- function(inference, min.n.usage) {
return(
inference %>%
mutate(nullness.usage = NA) %>%
infer.nullness.parameter(min.n.usage) %>%
infer.nullness.return(min.n.usage)
)
}
infer.nullness.multi <- function(inference, s) {
.r <- data.frame()
for (x in s) {
nullness.inferred <-
inference %>% infer.nullness(x) %>% mutate(min.n.usage = x)
.r <- .r %>% bind_rows(nullness.inferred)
}
return(.r)
}
plot.performance.overview <- function(inference) {
return(
infer.nullness.multi(
inference %>%
filter.inference.jaif() %>%
left_join(
jar %>%
select(class,
name,
index,
type),
by = c('class', 'name', 'index')
),
c(1, 10, 100, 1000)
) %>%
group_by(use) %>%
mutate(use.label = paste0(use, ', N = ', n())) %>%
ungroup() %>%
group_by(min.n.usage,
use,
use.label,
nullness.actual,
nullness.usage) %>%
summarize(n = n()) %>%
ungroup() %>%
mutate(
predicted.actual = factor(
paste(nullness.usage, nullness.actual, sep = '.'),
levels = c(
'NonNull.NonNull',
'Unknown.NonNull',
'Nullable.NonNull',
'NonNull.Nullable',
'Unknown.Nullable',
'Nullable.Nullable'
)
),
min.n.usage = as.factor(min.n.usage)
) %>%
ggplot(aes(
x = min.n.usage,
y = n,
fill = predicted.actual,
label = n
)) +
facet_grid(. ~ use.label) +
geom_bar(stat = 'identity') +
geom_text(position = position_stack(vjust = 0.5), size = 3) +
scale_fill_manual(
name = 'predicted.actual',
breaks = c(
'NonNull.NonNull',
'Unknown.NonNull',
'Nullable.NonNull',
'NonNull.Nullable',
'Unknown.Nullable',
'Nullable.Nullable'
),
labels = c(
'NonNull.NonNull',
'Unknown.NonNull',
'Nullable.NonNull',
'NonNull.Nullable',
'Unknown.Nullable',
'Nullable.Nullable'
),
values = c(
'#339065',
'#5fc999',
'#9f51c3',
'#b576d1',
'#c25e48',
'#79392b'
)
) +
theme_minimal() +
labs(x = 'min usage', y = 'n') +
theme(plot.title = element_text(hjust = 0.5))
)
}
plot.performance.compatibility <- function(inference) {
.x <- infer.nullness.multi(
inference %>%
filter.inference.jaif() %>%
left_join(
jar %>%
select(class,
name,
index,
type),
by = c('class', 'name', 'index')
),
c(1, 10, 100, 1000)
) %>%
group_by(use) %>%
mutate(use.label = paste0(use, ', N = ', n())) %>%
ungroup()
.y <- .x %>%
group_by(min.n.usage,
use,
use.label,
nullness.actual,
nullness.usage) %>%
summarize(n = n()) %>%
ungroup() %>%
mutate(
compatible = ifelse(
use == 'parameter',
nullness.usage == nullness.actual |
nullness.actual == 'Nullable',
nullness.usage == nullness.actual |
nullness.actual == 'NonNull'
),
min.n.usage = as.factor(min.n.usage)
) %>%
group_by(min.n.usage,
use,
use.label) %>%
summarize(
precision.exact = sum(n[nullness.usage == nullness.actual &
nullness.usage != 'Unknown']) / (sum(n[nullness.usage == nullness.actual |
(nullness.usage != 'Unknown' &
nullness.usage != nullness.actual)])),
recall.exact = sum(n[nullness.usage == nullness.actual &
nullness.usage != 'Unknown']) / (sum(n[nullness.usage == nullness.actual |
nullness.usage == 'Unknown'])),
r.compatible = sum(n[compatible &
nullness.usage != 'Unknown']) / sum(n),
r.unknown = sum(n[nullness.usage == 'Unknown']) / sum(n),
r.incompatible = sum(n[!compatible &
nullness.usage != 'Unknown']) / sum(n)
) %>%
ungroup() %>%
gather(
key = 'type',
value = 'value',
precision.exact,
recall.exact,
r.compatible,
r.unknown,
r.incompatible
)
h_ <- .x %>%
filter(min.n.usage == 1) %>%
group_by(use,
use.label, nullness.actual) %>%
summarize(n = n()) %>%
ungroup() %>%
group_by(use) %>%
mutate(precision = n / sum(n)) %>%
arrange(desc(precision)) %>%
slice(1) %>%
ungroup()
return(
ggplot(.y, aes(x = min.n.usage, group = type)) +
facet_grid(. ~ use.label) +
geom_hline(data = h_, aes(yintercept = precision)) +
geom_text(
data = h_,
group = 1,
x = 0,
vjust = -1,
hjust = -3,
size = 3,
aes(y = precision,
label = nullness.actual)
) +
geom_bar(
data = .y %>%
filter(type %in% c(
'r.compatible',
'r.unknown',
'r.incompatible'
)) %>%
mutate(type = factor(
type,
levels = c('r.incompatible',
'r.unknown',
'r.compatible'),
labels = c('incompatible',
'unknown',
'compatible')
)),
aes(y = value, fill = type),
stat = 'identity',
alpha = 0.5
) +
geom_step(
data = .y %>%
filter(type == 'recall.exact'),
aes(y = value, color = 'recall')
) +
geom_step(
data = .y %>%
filter(type == 'precision.exact'),
aes(y = value, color = 'precision')
) +
scale_color_manual(
name = 'precision / recall',
breaks = c('precision',
'recall'),
labels = c('precision',
'recall'),
values = c('#713EA1',
'#AD413F')
) +
scale_fill_manual(
name = 'compatibility',
breaks = c('incompatible',
'unknown',
'compatible'),
labels = c('incompatible',
'unknown',
'compatible'),
values = c('#1B4D45',
'#9A9A9A',
'#349A8A')
) +
theme_minimal() +
labs(x = 'min usage', y = 'ratio') +
theme(plot.title = element_text(hjust = 0.5))
)
}
write.performance.overview <-
function(artifact, performance.overview) {
performance.overview +
labs(title = paste0('prediction, ', artifact)) +
ggsave(
paste0('evaluation/usage/',
artifact,
'-performance-overview.pdf'),
width = 14,
height = 10,
units = 'cm'
)
}
write.performance.compatibility <-
function(artifact, performance.compatibility) {
performance.compatibility +
labs(title = paste0('compatibility, ', artifact)) +
ggsave(
paste0(
'evaluation/usage/',
artifact,
'-performance-compatibility.pdf'
),
width = 14,
height = 10,
units = 'cm'
)
}
process <- function(artifact) {
jar <- read.jar(artifact)
jaif <- read.jaif(artifact)
jaif.integrity.disagree <- read.jaif.integrity.disagree(artifact)
definition <- read.definition(artifact)
usage <- read.usage(artifact)
coverage <-
combine.coverage(jar, jaif, jaif.integrity.disagree, definition, usage)
write.coverage(artifact, coverage)
inference <-
combine.inference(jar, jaif, jaif.integrity.disagree, definition, usage)
inference.merged <- merge.inference(inference)
#evaluate.performance.ground.truth.use(artifact, inference.merged)
write.usage.clean(artifact, usage)
write.performance.overview(artifact, plot.performance.overview(inference.merged))
write.performance.compatibility(artifact,
plot.performance.compatibility(inference.merged))
return()
}
process('guava')
process('commons-io')
process('jre')
|
6dbde8433b5e714f2d26a9b0a613588f722c44cc
|
18347ef9bc1f489e63e83cf03338b7211d21b7c8
|
/tests/testthat/test-extract_variable.R
|
28958edc974647f61f1129e24625bdef9accfac7
|
[
"BSD-3-Clause",
"CC-BY-4.0"
] |
permissive
|
stan-dev/posterior
|
cd1e0778f5b930b7ef97b9c1f09167f162fb9d7e
|
55e92336c2984be1a2487cdd489552a07e273d70
|
refs/heads/master
| 2023-08-18T07:53:15.023052
| 2023-08-07T08:13:36
| 2023-08-07T08:13:36
| 212,145,446
| 105
| 20
|
NOASSERTION
| 2023-08-07T08:13:37
| 2019-10-01T16:30:28
|
R
|
UTF-8
|
R
| false
| false
| 1,467
|
r
|
test-extract_variable.R
|
test_that("extract_variable works the same for different formats", {
draws_array <- as_draws_array(example_draws())
mu_array <- extract_variable(draws_array, "mu")
draws_df <- as_draws_df(example_draws())
mu_df <- extract_variable(draws_df, "mu")
expect_equal(mu_df, mu_array)
draws_list <- as_draws_list(example_draws())
mu_list <- extract_variable(draws_list, "mu")
expect_equal(mu_list, mu_array)
draws_matrix <- as_draws_matrix(example_draws())
mu_matrix <- extract_variable(draws_matrix, "mu")
expect_equal(as.vector(mu_matrix), as.vector(mu_array))
draws_rvars <- as_draws_rvars(example_draws())
mu_matrix <- extract_variable(draws_rvars, "mu")
expect_equal(as.vector(mu_matrix), as.vector(mu_array))
})
test_that("extract_variable works for draws_rvars on an indexed variable", {
draws_array <- as_draws_array(example_draws())
theta1_array <- extract_variable(draws_array, "theta[1]")
draws_rvars <- as_draws_rvars(example_draws())
theta1_matrix <- extract_variable(draws_rvars, "theta[1]")
expect_equal(as.vector(theta1_matrix), as.vector(theta1_array))
expect_error(extract_variable(draws_rvars, "theta"), "Cannot extract non-scalar value")
})
test_that("extract_variable default method works", {
# it should convert matrix to draws object
x <- matrix(1:20, nrow = 10, ncol = 2)
colnames(x) <- c("A", "B")
expect_equal(extract_variable(x, "A"), 1:10)
expect_equal(extract_variable(x, "B"), 11:20)
})
|
7d7c926d62850ccfc4f275e736c2fbae72d60eb9
|
749a5921cc2353d0bc2c630cf5df17c53cc36b76
|
/datastructures/4_Lists.R
|
055abcd6c7dfb39b26dd603200f75918cb6a4fc3
|
[] |
no_license
|
julmue/rlang_mnl_datastructures
|
60bc59a4eb27bd2300c4c4ef5c02b71ed4d50313
|
2acaa713725fab141ce3f9b7c26871fb12371f08
|
refs/heads/master
| 2020-04-14T21:24:38.011919
| 2019-01-04T16:03:24
| 2019-01-04T16:03:24
| 164,127,561
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,786
|
r
|
4_Lists.R
|
# -----------------------------------------------------------------------------
# Lists
# In contrast to homogenous vectors (same mode for every element)
# lists are heterogenous (differnt modes for different elements possible)
# Lists are central to R and form the basis to Data-Frames and OOP.
# Creation of lists
# "Normal" Vectors: Atomic Vectors - their components cannot be broken down in smaller elements
# Lists: Recursive Vectors
# List Creation
## Creating lists with the list() function
list("Joe", 55000, TRUE)
## List elements can be named (names are called "tags" in R)
# Whenever possible use names instead of numeric indices
list(name = "Joe", salary = 55000, union = TRUE)
# since lists are vectors they can be created using vecor()
l <- vector(mode = "list" )
l[["tag"]] <- 666
l$tag
#> 666
# List Access
l <- list(tag1 = TRUE, tag2 = "two", tag3 = 3)
## List access via indexing
l[[1]]
#> TRUE
mode(l[[1]])
#> logical
## List access via tags and double brackets return in the datatype of the element
l$tag1
#> TRUE
mode(l$tag1)
#> logical
l[["tag1"]]
#> TRUE
mode(l[["tag1"]])
#> logical
## List access via index or tags and single brackts return a sublist
l[1]
#> TRUE
mode(l[1])
#> list
l["tag1"]
#> TRUE
mode(l["tag1"])
#> list
# returns a sublist
l[2:3]
l[c("tag1","tag2")]
## Adding list elements
l <- vector(mode = "list")
## via indices
l[3:5] <- c(FALSE, TRUE, TRUE)
## via tags
l$a <- "sailing"
l$b <- 42
## Deleting list elements
# List elements get deleted by setting them to NULL
l[3:5] <- NULL
l$a <- NULL
## List concatenation
c(list("XXX", 1, TRUE), list(TRUE, 42))
## size of a list
l <- list(1,2,3,4,5)
length(l)
#> 5
## Accessing list components
# if list elements have names they can be accessed via the names() function
l <- list(name = "Joe", salary = 55000, union = TRUE)
names(l)
#> "name" salary" "union"
# obtain values via unlist()
# this function casts to the highest common base type -> strings as the highest denominator
l <- list(name = "Joe", salary = 55000, union = TRUE)
unlist(l)
#> name salary union
#> "Joe" "55000" "TRUE"
l <- list(tag1 = 1, tag2 = 2, tag3 = 3)
unlist(l)
## Applying functions to a list
# * lapply(): list apply
# * lapply():
# lapply(): call a function for each element of a list
l <- lapply(list(1:3, 13:27), median)
#> [[1]]
#> [1] 2
#> [[2]]
#> [1] 20
mode(l)
#> "list"
# sapply(): call a function for each element of a list and simplify the result to a vector or matrix
l <- sapply(list(1:3, 13:27), median)
#> 2 20
mode(l)
#> "numeric"
# Recursive lists
# lists can be recursive
a <- list("abc", 1:3)
b <- list(TRUE, 42)
c <- list(a,b) # list of two lists
# lists can be flattenend
d <- c(a,b, recursive = TRUE)
mode(d)
#> "list"
length(d)
#> 5
|
022f0f7953df9f1fa5525c7d69bbda7e4907383d
|
df7f969a5b69d7d1065f9237bfa86ef166a94509
|
/man/predict.MclustDA.Rd
|
56f1d2a789f9b6013754673e87129bacd2dbdca9
|
[] |
no_license
|
cran/mclust
|
81468701065ce54422acf16830bd7fa50bc5f3e4
|
30ee549b91dfb4e60afbf90fad27ca80213b9c82
|
refs/heads/master
| 2022-11-13T09:50:33.457389
| 2022-10-31T09:57:37
| 2022-10-31T09:57:37
| 17,697,361
| 19
| 23
| null | 2016-10-27T17:16:38
| 2014-03-13T05:17:48
|
HTML
|
UTF-8
|
R
| false
| false
| 1,581
|
rd
|
predict.MclustDA.Rd
|
\name{predict.MclustDA}
\alias{predict.MclustDA}
\title{Classify multivariate observations by Gaussian finite mixture modeling}
\description{Classify multivariate observations based on Gaussian finite mixture models estimated by \code{\link{MclustDA}}.}
\usage{
\method{predict}{MclustDA}(object, newdata, prop = object$prop, \dots)
}
\arguments{
\item{object}{an object of class \code{'MclustDA'} resulting from a call to \code{\link{MclustDA}}.}
\item{newdata}{a data frame or matrix giving the data. If missing the train data obtained from the call to \code{\link{MclustDA}} are classified.}
\item{prop}{the class proportions or prior class probabilities to belong to each class; by default, this is set at the class proportions in the training data.}
\item{\dots}{further arguments passed to or from other methods.}
}
% \details{}
\value{
Returns a list of with the following components:
\item{classification}{a factor of predicted class labels for \code{newdata}.}
\item{z}{a matrix whose \emph{[i,k]}th entry is the probability that
observation \emph{i} in \code{newdata} belongs to the \emph{k}th class.}
}
\author{Luca Scrucca}
% \note{}
\seealso{\code{\link{MclustDA}}.}
\examples{
\donttest{
odd <- seq(from = 1, to = nrow(iris), by = 2)
even <- odd + 1
X.train <- iris[odd,-5]
Class.train <- iris[odd,5]
X.test <- iris[even,-5]
Class.test <- iris[even,5]
irisMclustDA <- MclustDA(X.train, Class.train)
predTrain <- predict(irisMclustDA)
predTrain
predTest <- predict(irisMclustDA, X.test)
predTest
}
}
\keyword{multivariate}
|
4842aa7eb7b2132ad4d985756a74c11c0b605b38
|
4bbf0e5ea2b5ebde88acda15522aac478126e79a
|
/R/get_completed_data_moments.R
|
d91c1b2829b3785148a902686a0eff768853a3fc
|
[
"MIT"
] |
permissive
|
rasel-biswas/miLRT
|
c74c18bce113726f7d626754b87c7c6de0fc6a1f
|
d4ea261b92bc8db2cb1f0a00e453bf8bcce2c5e3
|
refs/heads/main
| 2023-04-06T16:20:41.436230
| 2021-04-20T13:32:08
| 2021-04-20T13:32:08
| 353,214,733
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 824
|
r
|
get_completed_data_moments.R
|
#' This function computes completed-data moments.
#'
#' (This is an internal function).
#'
#' @return A list containing the completed-data estimates and
#' their variance-covariance matrices.
#'
#' @import mice
#' @export
get_completed_data_moments <- function(model, null_model, sandwich = FALSE){
names_1 <- names(coef(model$analyses[[1]]))
names_0 <- names(coef(null_model$analyses[[1]]))
theta_names <- setdiff(names_1, names_0)
theta_hat <- sapply(model$analyses, coef)
theta_hat <- theta_hat[theta_names, ]
p <- length(names_1)
i <- which(names_1 %in% theta_names)
U_hat <- vapply(model$analyses,
ifelse(sandwich == FALSE, vcov, sandwich::vcovHC),
FUN.VALUE = matrix(0, p, p))
U_hat <- U_hat[i, i,]
out <- list(theta_hat = theta_hat, U_hat = U_hat)
out
}
|
f77c49286c56fcbbac04037df2553473382881d9
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Schaum'S_Outline_Series_-_Theory_And_Problems_Of_Statistics_by_Murray_R._Spiegel/CH12/EX12.12.4/Ex12_12_4.R
|
a0810766740b4ecf8bba76672f59cbe809cf2612
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 223
|
r
|
Ex12_12_4.R
|
#PAGE=250
k=6
o=c(25,17,15,23,24,16)
f=20
s=sum((o-f)**2)/f
s
x=0.95
x=qchisq(x,df=k-1)
x=round(x,digits = 1)
x
if (s<x) l<-TRUE
l
x=0.05
x=qchisq(x,df=k-1)
x=round(x,digits = 2)
x
if (s>x) l<-FALSE
l
|
e96636e0ecae6bc2f007f85bc2dd4dd7d48cda93
|
e8bd1221d5edf301183e222ae215afa7f3a4c166
|
/man/interpolate.sst.Rd
|
3470c728dd4990c32e883771e84d416a3d422990
|
[] |
no_license
|
dill/inlabru
|
1b9a581ae5b56246fcd748db8df051ae4ff8bfa8
|
e2c38a34d591f712b57cbe430c24bb0a82f03ae4
|
refs/heads/master
| 2021-01-22T22:53:21.963501
| 2017-03-18T09:30:08
| 2017-03-18T09:30:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 493
|
rd
|
interpolate.sst.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sst.R
\name{interpolate.sst}
\alias{interpolate.sst}
\title{Interpolate sea surface temperature (SST)}
\usage{
interpolate.sst(sst, loc)
}
\arguments{
\item{sst}{Spatial SST estimate or list of such}
\item{loc}{Locations to interpolate SST at}
}
\value{
sst.at.loc SST at the provided locations
}
\description{
Interpolate sea surface temperature (SST)
}
\author{
Fabian E. Bachl <\email{f.e.bachl@bath.ac.uk}>
}
|
35d49f88734de64b9c5eaba8324ac5547a0be63d
|
81f518e29b4cac7cd61ea8e2c895d4f7edfd209b
|
/R/cor_test_gamma.R
|
ef604c7ff6d475aa6795aff5377abc3c4714534c
|
[] |
no_license
|
cran/correlation
|
5e2d691df07edb5aa69aba35f3780e19ffefcf57
|
db4fd0ce345a0dcee08de2a9c1810f79b5575b63
|
refs/heads/master
| 2023-04-13T04:06:37.689083
| 2023-04-06T08:23:26
| 2023-04-06T08:23:26
| 247,916,232
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 743
|
r
|
cor_test_gamma.R
|
#' @keywords internal
.cor_test_gamma <- function(data, x, y, ci = 0.95, ...) {
var_x <- .complete_variable_x(data, x, y)
var_y <- .complete_variable_y(data, x, y)
# Get r value
Rx <- outer(var_x, var_x, function(u, v) sign(u - v))
Ry <- outer(var_y, var_y, function(u, v) sign(u - v))
S1 <- Rx * Ry
r <- sum(S1) / sum(abs(S1))
# t-value approximation
p <- cor_to_p(r, n = length(var_x))
ci_vals <- cor_to_ci(r, n = length(var_x), ci = ci)
data.frame(
Parameter1 = x,
Parameter2 = y,
r = r,
t = p$statistic,
df_error = length(var_x) - 2,
p = p$p,
CI_low = ci_vals$CI_low,
CI_high = ci_vals$CI_high,
Method = "Gamma",
stringsAsFactors = FALSE
)
}
|
63b9df073c10cd5088fefe4f9219b524bf01241c
|
3fe2e31a49590c6b0c3965c12cb87c8e3e784146
|
/tests/testthat/test-functions.R.R
|
20e27db5f8a70e642f962ce78e8b27813d37437b
|
[] |
no_license
|
MarkPados/Earthquakes.capstone.project
|
5e2bd947d4b603e3e30da0fd33acccd6411cf78c
|
442a422d9a0860db79a08442ad398079d0df6c84
|
refs/heads/master
| 2020-03-25T02:23:44.116024
| 2018-08-16T10:59:39
| 2018-08-16T10:59:39
| 143,287,690
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,780
|
r
|
test-functions.R.R
|
context("testthat.R")
test_that("geom_timeline returns a ggplot", {
library(dplyr)
raw_data <- Earthquakes.capstone.project::eq_read_data()
clean_data <- Earthquakes.capstone.project::eq_clean_data(raw_data)
eq_chart <- clean_data %>%
filter(COUNTRY %in% c("HUNGARY", "SLOVAKIA", "CZECH REPUBLIC", "CROATIA", "POLAND"), YEAR >= 1500) %>%
ggplot(aes(x = DATE, y = COUNTRY)) +
Earthquakes.capstone.project::geom_timeline()
expect_is(eq_chart, "ggplot")
})
test_that("geom_timeline_label returns also a ggplot", {
library(dplyr)
raw_data = Earthquakes.capstone.project::eq_read_data()
clean_data = Earthquakes.capstone.project::eq_clean_data(raw_data)
eq_chart <- clean_data %>%
filter(COUNTRY %in% c("JAPAN"), YEAR >= 2000) %>%
ggplot(aes(x = DATE)) +
geom_timeline()+
geom_timeline_label(aes(label = LOCATION_NAME, size = EQ_PRIMARY, nmax = 2))
expect_is(eq_chart, "ggplot")
})
test_that("ep_map works", {
library(dplyr)
raw_data = Earthquakes.capstone.project::eq_read_data()
clean_data = Earthquakes.capstone.project::eq_clean_data(raw_data)
eq_chart <- clean_data %>%
dplyr::filter(COUNTRY == "MEXICO" & lubridate::year(DATE) >= 2000) %>%
eq_map(annot_col = "DATE")
expect_is(eq_chart, "leaflet")
expect_is(eq_chart, "htmlwidget")
})
test_that("ep_map_label works", {
library(dplyr)
raw_data = Earthquakes.capstone.project::eq_read_data()
clean_data = Earthquakes.capstone.project::eq_clean_data(raw_data)
eq_chart <- clean_data %>%
dplyr::filter(COUNTRY == "MEXICO" & lubridate::year(DATE) >= 2000) %>%
dplyr::mutate(popup_text = eq_create_label(.)) %>%
eq_map(annot_col = "popup_text")
expect_is(eq_chart, "leaflet")
expect_is(eq_chart, "htmlwidget")
})
|
f5b3be24d9e2900ab063ce4ee43a45cb73d23849
|
4e1db2cc0f8b612510e9ddb94f5ec16964985cff
|
/cachematrix.R
|
2b27653380779228c790f8426ce030ae92fa7c67
|
[] |
no_license
|
orsleyk/ProgrammingAssignment2
|
8f9c61bc18fab0d78707f0ec5b04d3f54b7e2f5c
|
e8043f9c93995bb032ed3d740e1c4ab6cdd4d7e2
|
refs/heads/master
| 2020-03-21T10:47:41.331200
| 2018-06-24T10:54:17
| 2018-06-24T10:54:17
| 138,471,410
| 0
| 0
| null | 2018-06-24T10:03:50
| 2018-06-24T10:03:49
| null |
UTF-8
|
R
| false
| false
| 635
|
r
|
cachematrix.R
|
##Inverts a given matrix
makeCacheMatrix <- function(X = matrix()) {
Z = NULL
set_val <- function(A){
X <<- A
Z <<- NULL
}
get_val <-function() X
set_inverse <- function(inverse) Z <<- inverse
get_inverse <- function() Z
list(set_val = set_val,
get_val = get_val,
set_inverse = set_inverse,
get_inverse= get_inverse)
}
## Retreives unchaged matrix inverse value from cache
cacheSolve <- function(X, ...){
Z <-X$get_inverse()
matrix <- X$get_val()
Z <- solve(matrix, ...)
X$set_inverse(Z)
Z
## Returns a matrix that is the inverse of X
}
|
92dda624ab48213d14342f72ce3b0163cf895612
|
b2692cad2f83c97518acade33ef2e03b74b6e0df
|
/R/sub2ind.R
|
a4e607029816d578004bc27a08283f8f1fbc7858
|
[] |
no_license
|
neuroimaginador/ni.quantification
|
03e55e6f1eaaacebaa008ca3b045a307b78b9cfb
|
5a3495c1b685eb573aa3f14d5c52129d3be69003
|
refs/heads/master
| 2020-04-04T19:14:51.144154
| 2018-10-22T09:01:01
| 2018-10-22T09:01:01
| 156,198,036
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 819
|
r
|
sub2ind.R
|
#' Convert multi-dimensional indexing to linear indexing
#' @title Linear Index from Multi-dimensional Subscripts
#'
#' @author Brain Dynamics
#'
#' @param dims The dimensions of the array.
#' @param subs Multi-dimensional subscript to convert to linear indices.
#' @param offset (optional, default = 1) The offset for the linear indices. In R, linear indices start at 1, whileas in other languages, the usual offset is 0.
#'
#' @return Linear indices corresponding to the multi-dimensional subscripts for an array of given dimensions.
#'
sub2ind <- function(dims, subs, offset = 1L) {
p <- cumprod(c(1, dims))
nd <- length(dims)
idx <- vector(mode = "integer", length = nrow(subs))
for (i in 1:nd) {
idx <- idx + as.integer((subs[ , i] - 1) * p[i])
}
idx <- idx + offset
return(idx)
}
|
d0dc32f8c6322293bf7f04fb00295b756868656a
|
1ccc905832d7035cd95272e634fc3b4ca9e90579
|
/codes/lec21VECppp.R
|
7677beec0e9aab1048a7f95fa5d3fb28ad27c247
|
[] |
no_license
|
jduras/TTU-ECO5316
|
8c3219e8598d6c395789b2e67b301c33ea96a2cd
|
ee02656e439a334a97001952709097a1f9355130
|
refs/heads/master
| 2020-04-10T22:27:38.549633
| 2019-05-20T01:42:22
| 2019-05-20T01:42:22
| 161,325,258
| 3
| 10
| null | 2019-05-20T16:09:04
| 2018-12-11T11:42:02
|
R
|
UTF-8
|
R
| false
| false
| 2,534
|
r
|
lec21VECppp.R
|
library(magrittr)
library(readxl)
library(tidyquant)
library(timetk)
library(urca)
library(vars)
library(ggfortify)
theme_set(theme_bw() +
theme(strip.background = element_blank(),
strip.text.x = element_text(hjust = 0)))
# import data on price indices and exchange rates
ppp_raw_tbl <- read_csv("data/coint_ppp.csv")
ppp_raw_tbl
glimpse(ppp_raw_tbl)
ppp_tbl <-
ppp_raw_tbl %>%
mutate(yearm = as.yearmon(ENTRY, format = "%Y:%m:00")) %>%
dplyr::select(yearm, everything(), -ENTRY) %>%
rename(cpi_usa = USCPI,
cpi_can = CANCPI,
cpi_jap = JAPANCPI,
cpi_swe = SWCPI,
exr_can = CANEX,
exr_jap = JAPANEX,
exr_swe = SWEX) %>%
mutate_if(is.character, as.numeric)
glimpse(ppp_tbl)
head(ppp_tbl, 12)
tail(ppp_tbl, 12)
summary(ppp_tbl)
ppp_tbl %>%
tk_ts(select = -yearm, start = year(.$yearm[1]), frequency = 12) %>%
autoplot()
# rows with some missing values
ppp_tbl %>%
filter_all(any_vars(is.na(.)))
ppp_tbl_by_country <-
ppp_tbl %>%
gather(variable, level, -c(yearm, cpi_usa)) %>%
separate(variable, c("measure", "country")) %>%
spread(measure, level) %>%
arrange(country, yearm) %>%
# mutate_at(vars(-yearm, - country), log)
# mutate_if(is.numeric, log)
mutate(lcpi_usa = log(cpi_usa / cpi_usa[1]),
lcpi = log(cpi / cpi[1]),
lexr = log(exr / exr[1])) %>%
group_by(country) %>%
nest()
ppp_tbl_by_country %>%
filter(country == "can") %>%
pull(data)
ppp_ca_by_country <-
ppp_tbl_by_country %>%
mutate(data_ts = map(data, ~.x %>%
tk_ts(select = c(lcpi_usa, lcpi, lexr), start = year(.$yearm[1]), frequency = 12)),
data_ts_pre1998 = map(data, ~.x %>%
filter(yearm <= as.yearmon("Dec 1998")) %>%
tk_ts(select = c(lcpi_usa, lcpi, lexr), start = year(.$yearm[1]), frequency = 12)),
ca = map(data_ts_pre1998, ~ca.jo(.x, ecdet = "none", type = "eigen", K = 4, spec = "transitory", season = 12)),
vec = map(ca, ~cajorls(.x, r = 1)))
ppp_ca_by_country %$%
map(ca, summary)
ppp_ca_by_country %$%
map(vec, ~ .x %>% pluck("rlm") %>% summary())
# plot residuals and their ACF and PACF
# ppp_ca_by_country %$%
# map(ca, plotres)
# test for restricted constant in cointegration relationship rather than as a drift
lt_test <- ppp_ca_by_country %$%
map(ca, ~lttest(.x, r = 1))
lt_test
|
384075fb368dd39f8ac48a85d681c49262905c5a
|
f01bcca93ea70435b1d30e36ed65a58a548ba29d
|
/man/compareGroups-internal.Rd
|
ece83707729db2a986a5eade3561be28130116f7
|
[] |
no_license
|
isubirana/compareGroups
|
e250a6b517ade341488f7682ca59c47ce8bab11d
|
103523e7bd547bd3be7070034a5dc85d5b117127
|
refs/heads/master
| 2023-07-09T09:05:27.205884
| 2023-06-28T15:03:16
| 2023-06-28T15:03:16
| 100,396,090
| 32
| 16
| null | 2022-10-21T08:56:25
| 2017-08-15T16:17:40
|
HTML
|
UTF-8
|
R
| false
| false
| 3,730
|
rd
|
compareGroups-internal.Rd
|
% --- compareGroups-internal.Rd ---
\name{compareGroups-internal}
\alias{compareGroups-internal}
\alias{chisq.test2}
\alias{combn2}
\alias{compare.i}
\alias{descrip}
\alias{descripSurv}
\alias{flip}
\alias{format2}
\alias{logrank.pval}
\alias{signifdec.i}
\alias{signifdec}
\alias{summ.i}
\alias{table.i}
\alias{udpate.formula2}
\alias{KMg.plot}
\alias{Cox.plot}
\alias{bar2.plot}
\alias{box.plot}
\alias{KM.plot}
\alias{bar.plot}
\alias{norm.plot}
\alias{prepare}
\alias{trim}
\title{Internal compareGroups functions}
\description{Internal compareGroups functions}
\usage{
chisq.test2(obj, chisq.test.perm, chisq.test.B, chisq.test.seed)
combn2(x)
compareGroups.fit(X, y, Xext, selec, method, timemax, alpha, min.dis, max.ylev, max.xlev,
include.label, Q1, Q3, simplify, ref, ref.no, fact.ratio, ref.y, p.corrected,
compute.ratio, include.miss, oddsratio.method, chisq.test.perm, byrow,
chisq.test.B, chisq.test.seed, Date.format, var.equal, conf.level, surv,
riskratio, riskratio.method, compute.prop, lab.missing)
compare.i(x, y, selec.i, method.i, timemax.i, alpha, min.dis, max.xlev, varname, Q1, Q3,
groups, simplify, Xext, ref, fact.ratio, ref.y, p.corrected, compute.ratio,
include.miss, oddsratio.method, chisq.test.perm, byrow,chisq.test.B,
chisq.test.seed, Date.format, var.equal, conf.level, surv, riskratio,
riskratio.method, compute.prop, lab.missing)
descripSurv(x, y, timemax, surv)
descrip(x, y, method, Q1, Q3, conf.level)
confinterval(x, method, conf.level)
flip(x)
format2(x, digits = NULL, ...)
\method{formula}{compareGroups}(x, ...)
logrank.pval(x,y)
signifdec.i(x, digits)
signifdec(x, digits)
summ.i(x)
table.i(x, hide.i, digits, digits.ratio, type, varname, hide.i.no, digits.p, sd.type,
q.type, spchar, show.ci)
\method{update}{formula2}(object, new, ...)
KMg.plot(x, y, file, var.label.x, var.label.y, ...)
Cox.plot(x, y, file, var.label.x, var.label.y, ...)
bar2.plot(x, y, file, var.label.x, var.label.y, perc, byrow, ...)
box.plot(x, y, file, var.label.x, var.label.y, ...)
KM.plot(x, file, var.label.x, ...)
bar.plot(x, file, var.label.x, perc, ...)
norm.plot(x, file, var.label.x, z, n.breaks, ...)
prepare(x, nmax, header.labels)
snpQC(X, sep, verbose)
export2mdcbind(x, which.table, nmax, header.labels, caption, strip, first.strip,
background, width, size, landscape, format, header.background, header.color,
position, ...)
export2mdword(x, which.table, nmax, header.labels, caption, strip,
first.strip, background, size, header.background, header.color)
export2mdwordcbind(x, which.table, nmax, header.labels, caption, strip,
first.strip, background, size, header.background, header.color)
trim(x)
oddsratio(x, method = c("midp", "fisher", "wald", "small"), conf.level = 0.95)
riskratio(x, method = c("wald", "small", "boot"), conf.level = 0.95)
epitable(..., ncol = 2, byrow = TRUE, rev = c("neither", "rows", "columns", "both"))
table.margins(x)
or.midp(x, conf.level = 0.95, byrow = TRUE, interval = c(0,1000))
tab2by2.test(x)
ormidp.test(a1, a0, b1, b0, or = 1)
oddsratio.midp(x, conf.level = 0.95)
oddsratio.fisher(x, conf.level = 0.95)
oddsratio.wald(x, conf.level = 0.95)
oddsratio.small(x, conf.level = 0.95)
riskratio.small(x, conf.level = 0.95)
riskratio.wald(x, conf.level = 0.95)
riskratio.boot(x, conf.level = 0.95)
setupSNP2(data, colSNPs, sep)
snp2(x, sep = "/", name.genotypes, reorder = "common",
remove.spaces = TRUE, allow.partial.missing = FALSE)
reorder.snp2(x, ref = "common", ...)
\method{summary}{snp2}(object, ...)
SNPHWE2(x)
}
\details{These are not to be called by the user}
\keyword{internal}
|
eda46f8734cca356e0e3fa40797d6d914f92b0a3
|
307b0f73161701e48e24192aea10713c4c76db13
|
/man/insert_row.Rd
|
f69ecdee0d23f5ef2639678330b099474863dedc
|
[] |
no_license
|
spgarbet/tangram
|
aef70355a5aa28cc39015bb270a7a5fd9ab4333c
|
bd3fc4b47018ba47982f2cfbe25b0b93d1023d4f
|
refs/heads/master
| 2023-02-21T03:07:43.695509
| 2023-02-09T17:47:22
| 2023-02-09T17:47:22
| 65,498,245
| 58
| 3
| null | 2020-03-24T15:28:05
| 2016-08-11T20:07:01
|
R
|
UTF-8
|
R
| false
| true
| 666
|
rd
|
insert_row.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compile-post.R
\name{insert_row}
\alias{insert_row}
\title{Insert a row into a tangram table}
\usage{
insert_row(table, after, ..., class = NULL)
}
\arguments{
\item{table}{the table to modify}
\item{after}{numeric; The row to position the new row after. Can be zero for inserting a new first row.}
\item{...}{Table cells to insert. Cannot be larger than existing table.}
\item{class}{character; Classes to apply as directives to renderers}
}
\value{
the modified table
}
\description{
Insert a row into a tangram table. Will fill with empty cells is not enough cells are specified.
}
|
39640778667f24e338a3d26f5bf10e1b6b43a70a
|
4297befdc82667a621f3a0879cae94369576525a
|
/TexasCounty.R
|
fef966f116f9d7cf738e7f29379ed63f09fd0835
|
[] |
no_license
|
Soulstealer07/test
|
6aa9e2bf24384cab44a8ce589b5126a09b2ab92e
|
9cc29e881a01391ec31dfe19a5e1662b9107de78
|
refs/heads/master
| 2021-05-20T14:06:14.296841
| 2020-04-26T03:23:11
| 2020-04-26T03:23:11
| 252,326,277
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,539
|
r
|
TexasCounty.R
|
library(ggmap)
library(ggplot2)
library(dplyr)
library(rjson)
library(jsonlite)
library(RCurl)
library(googleVis)
library(rvest)
TexasCounty<-'https://en.wikipedia.org/wiki/List_of_counties_in_Texas'
TexasCounty_pop<-read_html(TexasCounty)
TexasCounty_pop <- TexasCounty_pop %>%
html_nodes(xpath='//*[@id="mw-content-text"]/div/table[2]') %>%
html_table()
TexasCounty_pop <- as.data.frame(TexasCounty_pop)
TexasCounty_pop <- TexasCounty_pop[-c(3,4,5,6,9)]
head(TexasCounty_pop)
str(TexasCounty_pop)
#This table has relevant population and size data
library(maps)
counties<-map_data("county")
Texas <- map('county','texas',fill=TRUE, col=palette())
#This API has relevant location data latitude and longitude for each county in Texas
base_url <- "https://data.texas.gov/resource/ups3-9e8m.json"
County_Gov<-fromJSON(base_url)
str(County_Gov)
County_Gov<- County_Gov[-c(5,6,7,8)]
str(County_Gov)
County_combine <- cbind(TexasCounty_pop,County_Gov)
str(County_combine)
#Combine Wikipedia Table and Json Data
County_csv <- write.csv(County_combine,"C:/Users/micha/OneDrive/Documents/Test/TexasCounties/County.csv")
TexasCountyMerger_10$LatLong <- paste(TexasCountyMerger_10$y_long,TexasCountyMerger_10$x_lat,sep=":")
G6 <- gvisGeoChart(TexasCountyMerger_10,locationvar = "LatLong",colorvar='Population',sizevar = "AreaSQMILES",hovervar = "County",options=list(region="US-TX",displayMode="Markers",resolution="provinces",width=600,height=400))
plot(G6)
str(TexasCountyMerger_10)
|
51251f07d07b7ece7baad0986647837c7f354cf5
|
d31492b02b5d4a249cfa3a731054be8ee02a5c2e
|
/man/Lagged2d-class.Rd
|
4a2b6f2b221ea326cd9ec1522fce81de401b59f5
|
[] |
no_license
|
GeoBosh/lagged
|
6fc59d2f8ad26a05f3454b07e158d82ab651040a
|
2697b9e5faf7fcf88f5b18da94c34de248a54be7
|
refs/heads/master
| 2022-05-02T03:37:40.976743
| 2022-04-04T21:33:54
| 2022-04-04T21:33:54
| 92,064,916
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,399
|
rd
|
Lagged2d-class.Rd
|
\name{Lagged2d-class}
\Rdversion{1.1}
\docType{class}
\alias{Lagged2d-class}
\title{Class Lagged2d}
\description{Class Lagged2d.}
\section{Objects from the Class}{
Objects can be created by calls of the form \code{Lagged(m)} or
\code{new("Lagged2d", data = m)}, where \code{m} is a matrix.
\code{new("Lagged2d", ...)} also works.
%% ~~ describe objects here ~~
}
\section{Slots}{
\describe{
\item{\code{data}:}{Object of class \code{"matrix"} ~~ }
}
}
\section{Extends}{
Class \code{"\linkS4class{Lagged}"}, directly.
}
\section{Methods}{
\describe{
\item{[}{\code{signature(x = "Lagged2d", i = "numeric", j = "missing", drop = "logical")}: ... }
\item{[}{\code{signature(x = "Lagged2d", i = "numeric", j = "missing", drop = "missing")}: ... }
\item{[<-}{\code{signature(x = "Lagged2d", i = "numeric")}: ... }
\item{show}{\code{signature(object = "Lagged2d")}: ... }
\item{whichLagged}{\code{signature(x = "Lagged2d", y = "missing")}: ... }
}
}
%\references{
%%% ~~put references to the literature/web site here~~
%}
\author{Georgi N. Boshnakov}
%\note{
%%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{Lagged}},
\code{\linkS4class{Lagged1d}},
\code{\linkS4class{Lagged3d}}
}
\examples{
powers <- Lagged(outer(1:6, 0:6, `^`))
powers[[0]]
powers[[1]]
powers[[2]]
}
\keyword{classes}
|
6ef1adacfbfc455b1a3db7bc03096ad1f6b894c9
|
3b599ef6ea2d989552101c2a5efc418b0b7f9094
|
/man/getFileinfo.Rd
|
832789fd7408b7e6f8340be201489df4a7264542
|
[] |
permissive
|
adamjdeacon/RNMImport
|
f4e5ea443904e8c7a8fc13ffbadf741d5a145849
|
6076cb1c0000de4a19d29dcbf3ec6c3d0fce453b
|
refs/heads/master
| 2020-03-25T02:00:10.901861
| 2018-08-02T08:35:19
| 2018-08-02T08:35:19
| 143,268,708
| 0
| 0
|
BSD-3-Clause-Clear
| 2018-08-02T08:55:26
| 2018-08-02T08:55:26
| null |
UTF-8
|
R
| false
| false
| 2,202
|
rd
|
getFileinfo.Rd
|
\name{MiscExtractors}
\alias{getFileinfo}
\alias{getControlStatements}
\alias{getControltext}
\alias{getReporttext}
\alias{getNmVersion}
\alias{getSimInfo}
\title{Various miscellaneous "getter"/extractor functions}
\description{\code{getFileinfo} gets information about the report and control files,
\code{getControlStatements} extracts parsed control file statements as a list, \code{getControltext} extracts raw control file text,
\code{getReporttext} extracts raw report file text, \code{getNmVersion} returns information about the NONMEM version used to create a run or problem.
\code{getSimInfo} retrieves simulation information (number of simulations, seeds) from a simulation problem.}
\usage{
getFileinfo(run)
getControlStatements(obj, ...)
getControltext(run)
getReporttext(run)
getNmVersion(obj)
getSimInfo(obj, problemNum = 1, addRawInfo = TRUE)
}
\arguments{
\item{run}{An object of class NMRun}
\item{obj}{An object of class NMRun, or one that extends NMProblem (for getSimInfo, should be a problem with simulation step)}
\item{problemNum}{[N,1] Number of the problem if obj is a run}
\item{addRawInfo}{[L,1] Should the raw contents of the $SIM statement be added to the returned information?}
\item{...}{If used on an NMRun object, should be \code{problemNum}}
}
\value{\code{getFileInfo}: A data.frame holding information about the source file of the NONMEM run, and the
report. \code{getControlStatements}: The parsed control file statements corresponding to a particular
NONMEM problem. \code{getControltext}, \code{getReporttext}: The raw text of a control file that generated a run,
or the report file that was produced respectively. \code{getNmVersion}: Named character vector with NONMEM major and minor versions.
\code{getSimInfo} A numeric vector with 3 entries : numSimulations, seed1, and seed2. These are self-explanatory.
If addRawInfo is TRUE, the returned result will have an attribute named "rawStatement" that will have the text
of the control stream $SIM field. }
\author{Mango Solutions <support@mango-solutions.com>}
\keyword{methods}
\keyword{utilities}
|
e7ba29fc835c7e673dc439ecd1363f1e403f6e94
|
07bca83a42e515c3b9fa1b9d3c38102899491652
|
/Thesis codes/knn.R
|
a67d809bf695163006a86e149a0febd84f766ed8
|
[] |
no_license
|
toktok911/Master_Thesis
|
7f6afa36914fae69d4e1a12f9bf0b2ebbb034666
|
d5d64109844b3a09e6e44568e3bf274e4539b428
|
refs/heads/master
| 2020-04-19T17:52:02.948426
| 2019-02-02T20:44:01
| 2019-02-02T20:44:01
| 168,346,980
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 316
|
r
|
knn.R
|
per_train <- read.csv("permission_train.csv");
per_test <- read.csv(""permission_test.csv)
train_label <- per_train[,331];
test_label <- per_test[,331]
library(class);
predict <- knn(train=per_train, test=per_test, cl=train_label, k=20);
library(gmodels);
crossTable(x=test_label, y=perdict, prop.chisq=False)
|
7205887fa7f3b3c6699b6fb94035a10673c96f90
|
4e323cfe39ed9e3719c1444e6a6ef6c89bd3afa0
|
/R/plot-kobe.R
|
5436ee7e4e9293b67b4d3e0e875a5e9ac315d268
|
[] |
no_license
|
laurieKell/FLBRP
|
49568f831f59a46e16d4f7ca3ad64730abfce67e
|
cd3dbb929571ed979f6b1a0a225309ffe53e8861
|
refs/heads/master
| 2021-01-18T08:43:26.547014
| 2014-11-28T09:58:52
| 2014-11-28T09:58:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,204
|
r
|
plot-kobe.R
|
setGeneric('kobe', function(object,method,...) standardGeneric('kobe'))
kobeFn=function(object,what=c("sims","trks","pts","smry","wrms")[1],
prob=c(0.75,0.5,.25),year=NULL,nwrms=10){
trks. =NULL
pts. =NULL
smry. =NULL
wrms. =NULL
sims. =NULL
## trks
if ("trks" %in% what){
trks.=rbind(ddply(object,.(year), function(x) data.frame(quantity="stock", pctl=prob,value=quantile(x$stock, prob, na.rm=TRUE))),
ddply(object,.(year), function(x) data.frame(quantity="harvest",pctl=prob,value=quantile(x$harvest, prob, na.rm=TRUE))))
trks.=transform(trks.,pctl=paste(substr(ac(signif(pctl,2)),3,nchar(ac(signif(pctl,2)))),ifelse(nchar(ac(trks.$pctl))==3,"0",""),"%",sep=""))
trks.=cast(trks.,year+pctl~quantity,value="value")
}
if ("pts" %in% what & !is.null(year))
pts. =object[object$year==year,]
if ("smry" %in% what)
smry. =ddply(kobeP(sims), .(year), function(x) data.frame(stock =median(stock(object), na.rm=TRUE),
harvest =median(harvest(object), na.rm=TRUE),
red =mean( x$red, na.rm=TRUE),
yellow =mean( x$yellow, na.rm=TRUE),
green =mean( x$green, na.rm=TRUE),
overFished =mean( x$overFished, na.rm=TRUE),
overFishing=mean( x$overFishing, na.rm=TRUE)))
if ("wrms" %in% what){
wrms =sample(unique(res$iter),nwrms)
wrms.=sims[sims$iter %in% wrms,]
}
if ("sims" %in% what)
sims. =object
res=list(trks=trks.,pts=pts.,smry=smry.,wrms=wrms.,sims=sims.)
if (length(what)==1) res[[what]] else res[what]}
setMethod('kobe', signature(object="FLBRP",method="missing"),
function(object,proxy="msy",what=c("sims","trks","pts","smry","wrms")[1],prob=c(0.75,0.5,.25),year=NULL,nwrms=10){
if (is.null(year)) year=range(object)["maxyear"]
print(1)
dat=model.frame(mcf(FLQuants(stock =ssb.obs( object)%/%refpts(object)[proxy,"ssb"],
harvest=fbar.obs(object)%/%refpts(object)[proxy,"harvest"])),drop=T)
print(head(dat))
res=kobeFn(dat,what=what,prob=prob,year=year,nwrms=nwrms)
print(head(res))
#
# if (length(what)==1)
# return(res[[what]])
# else
# return(res[what])
res
})
setMethod('kobe', signature(object="FLBRPs",method="missing"),
function(object,proxy="msy",what=c("sims","trks","pts","smry","wrms")[1],prob=c(0.75,0.5,.25),year=NULL,nwrms=10){
res=llply(object,function(x,proxy,what,prob,year,nwrms)
kobe(x,proxy=proxy,what=what,prob=prob,year=year,nwrms=nwrms)
,proxy,what=what,prob=prob,year=year,nwrms=nwrms)
res=list(trks=ldply(res, function(x) x$trks),
pts =ldply(res, function(x) x$pts),
smry=ldply(res, function(x) x$smry),
wrms=ldply(res, function(x) x$wrms),
sims=ldply(res, function(x) x$sims))
if (length(what)==1)
return(res[[what]])
else
return(res[what])})
setMethod('kobe', signature(object="FLBRP",method="FLStock"),
function(object,method,proxy="msy",
what=c("sims","trks","pts","smry","wrms")[1],
prob=c(0.75,0.5,.25),
year=NULL,
nwrms=10){
if (is.null(year)) year=range(method)["maxyear"]
dat=model.frame(mcf(FLQuants(stock =ssb( method)%/%refpts(object)[proxy,"ssb"],
harvest=fbar(method)%/%refpts(object)[proxy,"harvest"])),drop=T)
res=kobeFn(dat,what=what,prob=prob,year=year,nwrms=nwrms)
if (length(what)==1)
return(res)
else
return(res[what])})
setMethod('kobe', signature(object="FLBRP",method="FLStocks"),
function(object,method,proxy="msy",what=c("sims","trks","pts","smry","wrms")[1],prob=c(0.75,0.5,.25),year=NULL,nwrms=10){
res=mlply(method,function(x,object,proxy,what,prob,year,nwrms)
kobe(object,x,proxy=proxy,what=what,prob=prob,year=year,nwrms=nwrms)
,proxy,what=what,prob=prob,year=year,nwrms=nwrms)
res=list(trks=ldply(res, function(x) x$trks),
pts =ldply(res, function(x) x$pts),
smry=ldply(res, function(x) x$smry),
wrms=ldply(res, function(x) x$wrms),
sims=ldply(res, function(x) x$sims))
if (length(what)==1)
return(res[[what]])
else
return(res[what])})
|
3cf97dbcd88997c5e1b2edc3075c2f14c1c601c6
|
fa336caa57ba22cb3f649d9b73f056e9a3167b5d
|
/man/plot.ggthemr.swatch.Rd
|
78e32f1128fbd9026e019e161b768430765157d9
|
[] |
no_license
|
damonzon/ggthemr
|
29522271240916bd6c5acf6dbc1dea6706f0dc24
|
60756ec8dcd80347d13e3158a0d418462610d9dd
|
refs/heads/master
| 2020-12-11T03:49:00.233408
| 2015-04-20T00:15:27
| 2015-04-20T00:15:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 251
|
rd
|
plot.ggthemr.swatch.Rd
|
\name{plot.ggthemr.swatch}
\alias{plot.ggthemr.swatch}
\title{Create colour plot of ggthemr swatch}
\usage{
\method{plot}{ggthemr.swatch} (x)
}
\arguments{
\item{x}{ggthemr swatch object.}
}
\description{
Create colour plot of ggthemr swatch
}
|
06b9c7f6804e56172f853a558b3184a441874691
|
1ed12913fb9b98db702389871ea761c46fdee116
|
/man/td.lilliefors.Rd
|
849862995aa3ecc8e9fdae64782419135520d95b
|
[] |
no_license
|
ghuiber/teradataR
|
9053adf62e0151b320da4f9ca840d056adcdcad2
|
d097a9484f8cf53803f1ba26181970042bd146bb
|
refs/heads/master
| 2021-01-22T01:33:57.288324
| 2014-09-12T20:43:55
| 2014-09-12T20:43:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 855
|
rd
|
td.lilliefors.Rd
|
\name{td.lilliefors}
\alias{td.lilliefors}
\title{
Lilliefors Test
}
\description{
Function to use lilliefors test on table and column.
}
\usage{
td.lilliefors(tdf, col, thresh = 0.05)
}
\arguments{
\item{tdf}{
td data frame.
}
\item{col}{
column name to test.
}
\item{thresh}{
threshold value. Defaults to 0.05.
}
}
\details{
Use statistical tables to generate the lilliefors test statistic for this
td data frame and column.
}
\value{
data frame with one row of results.
}
\note{
Several statistical functions like this one use the Teradata Warehouse Miner
statistical tables and they need to be installed to use this function. You can
also set up tdMetadataDB once you know where they are installed otherwise they
will be searched for.
}
\examples{
\dontrun{
td.lilliefors(tdf,"age")
}
}
|
b1aae275cccc4495e309039e34b207a218d90945
|
83ae358d90cb1c54c8be380bc7bd628a2f6ed530
|
/R/bplot.R
|
7c4f0ceb55067e1de8a72ff003db9c7a64390ad7
|
[] |
no_license
|
cran/Rlab
|
c7963e1210e2140fc6d397ff6a2cf289f0dd3bd2
|
c72e630626f6df15cf75ffd8b9ee7c85322aeda8
|
refs/heads/master
| 2022-05-28T16:35:40.306539
| 2022-05-04T22:10:02
| 2022-05-04T22:10:02
| 17,693,343
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 265
|
r
|
bplot.R
|
"bplot" <-
function (x, by,style = "tukey", outlier = TRUE, plot = TRUE, ...)
{
obj <- stats.bplot(x, style = style, outlier = outlier, by=by)
if (plot) {
bplot.obj(obj, ...)
}
else {
return(obj)
}
invisible()
}
|
7a5b739b1c7257893e952509a0d8fe9f0d20dd7c
|
2a6ed6e2ac43699bc3c687662662e2089063098a
|
/R/MoM_2var.R
|
a66e83e4af2ea1579599273b44bc9afe6637fdb6
|
[] |
no_license
|
mxcai/iGREX
|
4392fab1dac46bc5d5a2cc32f3e04437d1090b27
|
94c65136dd83c3857881ea2e6cb76437faf3c892
|
refs/heads/master
| 2021-12-24T00:57:24.255825
| 2021-12-17T08:03:02
| 2021-12-17T08:03:02
| 152,763,918
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,200
|
r
|
MoM_2var.R
|
MoM_2var <- function(K,y,Z=NULL){
n <- length(y)
if(is.null(Z)){
Z <- matrix(1,n,1)
M <- diag(n) - matrix(1/n,n,n)
y <- y - mean(y)
MK <- K
} else{
Z <- cbind(1,Z)
M <- diag(n) - Z%*%solve(t(Z)%*%Z)%*%t(Z)
y <- M %*%y
MK <- M%*%K
}
q <- ncol(Z)
trK <- sum(diag(MK))
trK2 <- sum(MK^2)
S <- matrix(c(trK2, trK, trK, n-q),2,2)
c <- c(t(y)%*%MK%*%y, sum(y^2))
invS <- solve(S)
sigma <- invS %*% c
covB <- matrix(0,2,2)
if(q==1){
Sigma <- sigma[1]*K + sigma[2]*M
KS <- K%*%Sigma
covB[1,1] <- sum(KS^2) * 2
covB[2,2] <- sum(Sigma^2) * 2
covB[1,2] <- covB[2,1] <- sum(KS*Sigma) * 2
} else{
MS <- sigma[1]*MK%*%M + sigma[2]*M
MKMS <- MK %*% MS
covB[1,1] <- sum(MKMS^2) * 2
covB[2,2] <- sum(MS^2) * 2
covB[1,2] <- covB[2,1] <- sum(MKMS*MS) * 2
}
# Sandwich estimator
covSig <- invS %*% covB %*% invS
sb2 <- sigma[1]
se2 <- sigma[2]
var_total <- sb2*trK + se2*(n-q)
h <- sb2*trK / var_total
# Delta method
gh <- c(se2*(n-q)*trK/var_total^2, -sb2*(n-q)*trK/var_total^2)
se_h <- sqrt(t(gh) %*% covSig %*% gh)
ret <- list(sb2=sb2, se2=se2, K=K, covSig=covSig, h=h, se_h=se_h)
}
|
01250dd419b60af03cb82ca84099fe7b199fadd8
|
55c728f9da02b4f7e35111708fe997bf78caaba6
|
/man/AmoLeads.Rd
|
36a332864da742b9db32a079d264d7d4fc344552
|
[] |
no_license
|
grkhr/amocrm
|
c06d37ca06723c50518ede1df63bf1941f732695
|
3e35636f5783174f19bebc85a5c744f829ed1f49
|
refs/heads/master
| 2021-06-22T13:22:50.730351
| 2021-05-18T10:05:25
| 2021-05-18T10:05:25
| 225,080,539
| 7
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,291
|
rd
|
AmoLeads.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AmoLeads.R
\name{AmoLeads}
\alias{AmoLeads}
\title{Leads}
\usage{
AmoLeads(
email = NULL,
apikey = NULL,
domain = NULL,
auth_list = NULL,
limit = 500,
flatten = F,
id = NULL,
query = NULL,
responsible_user_id = NULL,
with_with = "is_price_modified_by_robot,loss_reason_name",
status = NULL,
date_create_from = NULL,
date_create_to = NULL,
date_modify_from = NULL,
date_modify_to = NULL,
tasks = NULL,
active = NULL
)
}
\arguments{
\item{email}{Email}
\item{apikey}{Your api key from settings in interface}
\item{domain}{Your domain in AmoCRM (xxx in xxx.amocrm.ru)}
\item{auth_list}{List with auth data, you can build from AmoAuthList}
\item{limit}{Batch limit, sometimes AmoCRM's API doesn't work properly, you can reduce the value and have a chance to load your data}
\item{flatten}{Set \code{TRUE} if you want to join all the output dataframes You'll have a not tidy-dataframe with left-joining all dataframes}
\item{id}{Filter. Pass id or vector of ids of leads.}
\item{query}{Filter. Searching for all fields of leads. String.}
\item{responsible_user_id}{Filter. Pass id or vector of ids of responsible user ids. You can get ids from AmoUsers().}
\item{with_with}{Additional data. Default to \code{'is_price_modified_by_robot,loss_reason_name'}.}
\item{status}{Filter. Single status id or vector of ids. You can get ids from AmoPipelinesStatuses().}
\item{date_create_from}{Filter. Date create of lead. You can pass like \code{'2019-01-01'} or like \code{'2019-01-01 12:30:00'}.}
\item{date_create_to}{Filter. Date create of lead. You can pass like \code{'2019-01-01'} or like \code{'2019-01-01 12:30:00'}.}
\item{date_modify_from}{Filter. Date modify of lead. You can pass like \code{'2019-01-01'} or like \code{'2019-01-01 12:30:00'}.}
\item{date_modify_to}{Filter. Date modify of lead. You can pass like \code{'2019-01-01'} or timezone like \code{'2019-01-01 12:30:00'}.}
\item{tasks}{Filter. Pass \code{1} if you need leads without tasks, pass \code{2} if you need leads with undone tasks.}
\item{active}{Filter. Pass \code{1} if you need only active leads.}
}
\value{
If flatten is \code{FALSE} (default) you'll get a list of 4 tidy-dataframes which you can join by id. You can access it using list_name$dataframe_name.
leads - all leads with unnested parameters.
linked_custom_fields — linked custom fields with all parameters.
linked_tags — linked tags with all parameters.
linked_contacts — linked contacts with all parameters.
}
\description{
Function to get leads.
}
\examples{
\dontrun{
# simple
library(dplyr)
leads <- AmoLeads(auth_list = auth_list)
leads_with_cf <- leads$leads \%>\%
left_join(leads$linked_custom_fields, by = 'id') # not tidy
# filters
leads <- AmoLeads(auth_list = auth_list,
date_create_from = '2019-02-01 05:00:00',
date_create_to = '2019-02-20 17:00:00',
active = 1)
}
}
\references{
Please \strong{READ} this:
\href{https://github.com/grkhr/amocrm/blob/master/md/AmoLeads.md}{Function documentation in Russian on GitHub}
Also nice to read:
\href{https://www.amocrm.ru/developers/content/api/leads}{AmoCRM official documentation}
}
|
2b6f934900581682fa578f8bd04be703edc361f8
|
f638bca9604b1aba725ef7ea1083aae681f008bd
|
/foodbanksHamilton.Rcheck/00_pkg_src/foodbanksHamilton/man/modes_less40k.Rd
|
d71c497258671afa6d9a123fad51ca26867b7370
|
[] |
no_license
|
jfhawkin/Accessibility-Food-Banks-Hamilton
|
38af1b1e6230dc3c11e836fbea75be056ce54a10
|
b7caf97d91dd5a3cb08fda21812391a9af94c48e
|
refs/heads/main
| 2023-08-15T11:18:53.810563
| 2021-10-13T23:32:52
| 2021-10-13T23:32:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,117
|
rd
|
modes_less40k.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/foodbanksHamilton.R
\docType{data}
\name{modes_less40k}
\alias{modes_less40k}
\title{Transportation for low income population by Traffic Analysis Zone.}
\format{
A simple features data frame with 234 rows and 5 variables:
\describe{
\item{TAZUID}{Unique identifier of traffic analysis zone, according to the GTA06 zoning system of the Transportation Tomorrow Survey}
\item{Transit}{Number of trips by Transit (excluding GO rail)}
\item{Walk}{Number of trips by walking}
\item{Driver}{Number of trips by Auto (driver)}
\item{geometry}{Geometry of points}
}
}
\source{
Transportation Tomorrow Survey (http://dmg.utoronto.ca/transportation-tomorrow-survey/tts-introduction)
}
\usage{
data(modes_less40k)
}
\description{
Modes of transportation by traffic analysis zones for population in households with income less than $40,000. The source data is a cross-tabulation of primary mode of travel by household income by traffic analysis zone of the household.
}
\keyword{datasets}
\keyword{land}
\keyword{transportation}
\keyword{use}
|
bf223b022d1a2482a254155b4ca6d4280504a929
|
493d13873d4f285e9c5927e3e1f235bf3af1104a
|
/nucleR_2.12.1_AH_edited_asy/man/syntheticNucMap.Rd
|
9d9bcd3a3ce944474a88207a33f89ca1a5197c6d
|
[
"MIT"
] |
permissive
|
elifesciences-publications/HTa_Histone_analog
|
d0d0cc2ea1c00ef7b5126a459688a8786d5cf53b
|
2e67014a2b0c85002a8268178410d616da7e6244
|
refs/heads/master
| 2020-09-15T05:40:39.307201
| 2019-11-22T08:39:23
| 2019-11-22T08:39:23
| 223,359,603
| 0
| 0
|
MIT
| 2019-11-22T08:37:05
| 2019-11-22T08:37:04
| null |
UTF-8
|
R
| false
| true
| 4,424
|
rd
|
syntheticNucMap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/syntheticNucMap.R
\name{syntheticNucMap}
\alias{syntheticNucMap}
\title{Generates a synthetic nucleosome map}
\usage{
syntheticNucMap(wp.num = 100, wp.del = 10, wp.var = 20, fuz.num = 50,
fuz.var = 50, max.cover = 20, nuc.len = 147, lin.len = 20,
rnd.seed = NULL, as.ratio = FALSE, show.plot = FALSE)
}
\arguments{
\item{wp.num}{Number of well-positioned (non overlapped) nucleosomes. They
are placed uniformly every \code{nuc.len+lin.len} basepairs.}
\item{wp.del}{Number of well-positioned nucleosomes (the ones generated by
\code{wp.num}) to remove. This will create an uncovered region.}
\item{wp.var}{Maximum variance in basepairs of the well-positioned
nucleosomes. This will create some variation in the position of the reads
describing the same nucleosome.}
\item{fuz.num}{Number of fuzzy nucleosomes. They are distributed randomly
over all the region. They could be overlapped with other well-positioned
or fuzzy nucleosomes.}
\item{fuz.var}{Maximum variance of the fuzzy nucleosomes. This allow to set
different variance in well-positioned and fuzzy nucleosome reads (using
\code{wp.var} and \code{fuz.var}).}
\item{max.cover}{Maximum coverage of a nucleosome, i.e., how many times a
nucleosome read can be repeated. The final coverage probably will be
higher by the addition of overlapping nucleosomes.}
\item{nuc.len}{Nucleosome length. It's not recomended change the default
147bp value.}
\item{lin.len}{Linker DNA length. Usually around 20 bp.}
\item{rnd.seed}{As this model uses random distributions for the placement,
by setting the rnd.seed to a known value allows to reproduce maps in
different executions or computers. If you don't need this, just left it in
default value.}
\item{as.ratio}{If \code{as.ratio=TRUE} this will create and return a synthetic
naked DNA control map and the ratio between it and the nucleosome
coverage. This can be used to simulate hybridization ratio data, like the
one in Tiling Arrays.}
\item{show.plot}{If \code{TRUE}, will plot the output coverage map, with the
nucleosome calls and optionally the calculated ratio.}
}
\value{
A list with the following elements:
\itemize{
\item wp.starts Start points of well-positioned nucleosomes
\item wp.nreads Number of repetitions of each well positioned read
\item wp.reads Well positioned nucleosome reads (\code{IRanges} format),
containing the repetitions
\item fuz.starts Start points of the fuzzy nucleosomes
\item fuz.nreads Number of repetitions of each fuzzy nucleosome read
\item fuz.reads Fuzzy nucleosome reads (\code{IRanges} format), containing all
the repetitions
\item syn.reads All synthetic nucleosome reads togheter (\code{IRanges} format)
}
The following elements will be only returned if \code{as.ratio=TRUE}:
\itemize{
\item ctr.reads The pseudo-naked DNA (control) reads (\code{IRanges} format)
\item syn.ratio The calculated ratio nucleosomal/control (\code{Rle} format)
}
}
\description{
This function generates a synthetic nucleosome map using the parameters
given by the user and returns the coverage (like NGS experiments) or a
pseudo-hybdridization ratio (like Tiling Arrays) toghether with the perfect
information about the well positioned and fuzzy nucleosome positions.
}
\examples{
# Generate a synthetic map with 50wp + 20fuzzy nucleosomes using fixed
# random seed=1
res <- syntheticNucMap(wp.num=50, fuz.num=20, show.plot=TRUE, rnd.seed=1)
# Increase the fuzzyness
res <- syntheticNucMap(
wp.num=50, fuz.num=20, wp.var=70, fuz.var=150, show.plot=TRUE,
rnd.seed=1
)
# Calculate also a random map and get the ratio between random and
# nucleosomal
res <- syntheticNucMap(
wp.num=50, wp.del=0, fuz.num=20, as.ratio=TRUE, show.plot=TRUE,
rnd.seed=1
)
print(res)
# Different reads can be accessed separately from results
# Let's use this to plot the nucleosomal + the random map
library(ggplot2)
as <- as.vector(coverage.rpm(res$syn.reads))
bs <- as.vector(coverage.rpm(res$ctr.reads))
cs <- as.vector(res$syn.ratio)
plot_data <- rbind(
data.frame(x=seq_along(as), y=as, lab="nucleosomal"),
data.frame(x=seq_along(bs), y=bs, lab="random"),
data.frame(x=seq_along(cs), y=cs, lab="ratio")
)
qplot(x=x, y=y, data=plot_data, geom="area", xlab="position", ylab="") +
facet_grid(lab~., scales="free_y")
}
\author{
Oscar Flores \email{oflores@mmb.pcb.ub.es}
}
\keyword{datagen}
|
3892723c9406cf48803a0d311609adca248600b4
|
c8caa0d8d7f62b2f9394e4712cf078e050d5213d
|
/server.r
|
93631bda283815a7b52780b4f1e642bff59cd41e
|
[] |
no_license
|
ClaireMcKayBowen/patients
|
5542a00d4b826528ff8d561e135923196085bead
|
aef29196b7e375db04984dcfd0351ddb5f479c55
|
refs/heads/master
| 2021-01-11T22:21:14.461122
| 2017-01-14T17:02:21
| 2017-01-14T17:02:21
| 78,951,821
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,829
|
r
|
server.r
|
library(shiny)
library(xtable)
# Define server logic for slider examples
shinyServer(function(input, output) {
# Reactive expression to compose a data frame containing all of the values
sliderValues <- reactive({
# Compose data frame
data.frame(
Name = c("Number of Patients",
"Adverse Event Percentage Level",
"Percentage Significant Figures"),
Value = as.character(c(input$patients,
input$percent,
input$sig)),
stringsAsFactors=FALSE)
})
data.m <- reactive({
m<-matrix(ncol=input$patients,nrow=input$patients)
for(n in 1:input$patients){
for(y in 1:input$patients){
m[y,n]<-1-pbeta((input$percent),y+1/3,n-y+1/3)
if(y==n)break
}
}
m<-round(m,digits=input$sig)
if(input$sig==2){
m[m>.99]<-">0.99"
}
if(input$sig==3){
m[m>.999]<-">0.999"
}
if(input$sig==4){
m[m>.9999]<-">0.9999"
}
if(input$sig==5){
m[m>.99999]<-">0.99999"
}
w<-matrix(ncol=1,nrow=input$patients)
for(i in 1:(input$patients)){
w[i]<-i
}
m<-cbind(w,m)
colnames(m)<-c("# of Patients with AE",paste("",1:input$patients))
as.data.frame(m)})
output$downloadData <- downloadHandler(
file = c('medical_data.csv'),
content = function(file) {
write.csv(data.m(), file)
}
)
# Show the values using an HTML table
output$values <- renderTable({
sliderValues()})
#output$data.m<-observe({print(data.m())})
#output$data.m<-function(){xtable(data.m(), caption = "Your Caption",caption.placement = "top",include.colnames = TRUE,align=paste(rep("c",input$patients+1)))}
output$data.m <- renderTable({
data.m()},include.rownames=FALSE,include.colnames=TRUE,digits=3)
})
|
549425da0a18f0441d11afa8e1444f88b172b55c
|
1ed761a15da505c2286a0f374c8e81b074e1eb27
|
/getaccession.R
|
b5d8fcb06ee7dbefe454678ea6db777fcf5bb948
|
[] |
no_license
|
Rphillips1995/SRAFunctions2015
|
9f6a77370e4b3e3b4f82e013f015e3a3501da3ba
|
d1e9d8033793c9979ce52a839a28201ea76cbba4
|
refs/heads/master
| 2021-01-10T15:32:13.619891
| 2015-07-28T14:17:59
| 2015-07-28T14:17:59
| 36,888,021
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,378
|
r
|
getaccession.R
|
#' Returns all accession numbers based on a search of cell line.
#'
#' This function creates a list of all accession numbers specific to the cell_line. This allows the user to further search
#' studies that used this cell line.
#'
#'
#' @param cell_line_name: Cell line for which you wish to find the accession number. THE FUNCTION DOES NOT SUPPORT - OR SPACES
#' BETWEEN THE LETTERS. WRITE THE CELL LINE AS ONE WORD WITH NO SPACES.
#'
#' @return All: A list, separated by type of accession number, of all the accession numbers.
#'
#' @examples
#' getaccession('MCF7')
getaccession <- function(cell_line_name){
Subset <- as.data.frame(subset(metadata,
subset=(cell_line==toupper(cell_line_name))))
{
runaccession <- unique(Subset[,1])
sampleaccession <- unique(Subset[,2])
expaccession <- unique(Subset[,3])
studyaccession <- unique(Subset[,4])
subaccession <- unique(Subset[,5])
}
{
All<-list(runaccession,sampleaccession,expaccession,studyaccession,subaccession)
}
{
if(length(All[[1]])==0) {
stop('cell line not found')
}}
{
names(All) <- c('run_accession',
'sample_accession',
'experiment_accession',
'study_accession',
'submission_accession')
}
{
return(All)
}
}
|
040e00043f6010e17620296f64b9afb0a43bc942
|
97305ae3c5572098b402ed2d7d49bbc8229c4804
|
/script/load_forecast.R
|
4194f2f61e570e0e700f29b348dbc65b9ca2f1d3
|
[] |
no_license
|
Akinlabiajelabi/JoslaOS
|
7fffa6112af29c07ec3032d13479823b5ccb9f4d
|
67c5fe847d5676c3b323b8d03b859148297132ab
|
refs/heads/master
| 2021-01-18T05:09:17.985876
| 2016-09-06T18:45:28
| 2016-09-06T18:45:28
| 66,694,515
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,164
|
r
|
load_forecast.R
|
###### Load Forecast for Electric Utility Supply using supervised learning algorithm #######
## Set working directory to latest development
#setwd("0")
## Load toolbox
library(dplyr)
library(plyr)
library(ggplot2)
library(tidyr)
## Get data
data <- read.csv("load_data.csv")
## Prepare data
str(data)
# Code to sample data based on system-level or node-level analysis
dataSystem <- subset(data, feeder == "IPP-1")
#dataNode <- subset(data, feeder != "IPP-1")
# Code to add time factor columns
dataSystem <- mutate(dataSystem,
year=factor(strftime(as.POSIXlt(Timestamp, origin="1970-01-01", tz="UTC"), format="%Y"), levels=c("2015", "2016")),
month=factor(strftime(as.POSIXlt(Timestamp, origin="1970-01-01", tz="UTC"), format="%b"), levels=c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")),
week=factor(strftime(as.POSIXlt(Timestamp, origin="1970-01-01", tz="UTC"), format="%W"), levels=c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53")),
day=factor(strftime(as.POSIXlt(Timestamp, origin="1970-01-01", tz="UTC"), format="%d"), levels=c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31")),
weekday=factor(strftime(as.POSIXlt(Timestamp, origin="1970-01-01", tz="UTC"), format="%a"), levels=c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")),
hour=factor(strftime(as.POSIXlt(Timestamp, origin="1970-01-01", tz="UTC"), format="%H"), levels=c("00","01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23"))
)
# Code to add privacy to mask private data usage
dataSystem$feeder <- mapvalues(dataSystem$feeder,
from = c("IPP-1", "Breaker 1-4", "Breaker 5-8", "Lagos House", "LTV & Co.", "Multi-Agency"),
to = c("utility", "zone1", "zone2", "zone3", "zone4", "zone5")
)
# Code to convert current to numeric data type
dataSystem$current <- as.numeric(levels(dataSystem$current))[dataSystem$current]
# Code to compute load (KW) from current column; Given that voltage and power factor are constant across the utility at 11kV and 0.9 respectively
# Also handles missing value by filling with zero
dataSystem <- ddply(dataSystem, .(Timestamp), transform, load = ifelse(is.na(current)==TRUE, 0, (11000 * current * 0.9 * 1.732)/1000))
# Code to column by name
dataSystem <- dataSystem[c("Timestamp", "year", "month", "week", "weekday", "day", "hour", "feeder", "current", "load")]
## Explore data
summary(dataSystem)
# Code to sample data based on time factor
dataSystemYear <- subset(dataSystem, year == "2015")
dataSystemMonth <- subset(subset(dataSystem, year == "2015"), month == "Jul")
dataSystemWeek <- subset(subset(subset(dataSystem, year == "2015"), month == "Jul"), week == "28")
dataSystemWeekday <- subset(subset(subset(dataSystem, year == "2015"), month == "Jul"), weekday == "Tue")
dataSystemDay <- subset(subset(subset(subset(dataSystem, year == "2015"), month == "Jul"), week == "28"), weekday == "Thu" & day == "16")
dataSystemHour <- subset(subset(subset(subset(subset(dataSystem, year == "2015"), month == "Jul"), week == "28"), weekday == "Thu" & day == "16"), hour == "12")
## Train KNN model without time factor
# Code to forecast next hour load
knn <- function(data, k) {
# get load data
load <- data$load
n <- length(load)
# normalise load data
load <- (load - min(load)) / (max(load) - min(load))
# vector to hold distance between k nearest neighours
dist <- rep(0, k)
for(i in 1:k) {
dist[i] <- sqrt((load[n] - load[n-k+i])^2)
}
# vector to hold predictor weight of k nearest neighbours
alpha <- rep(0, k)
for(j in 1:k) {
alpha[j] <- (dist[k] - dist[j]) / (dist[k] - dist[1])
}
# vector to hold forecast load
forecast <- load
for(f in 1:k) {
forecast[f] <- alpha[f] * load[f+1]
}
forecast[n] <- (1/sum(alpha))*sum(forecast)
return(gather(data.frame(n=seq(1, length(load), 1), load, forecast), condition, measurement, load:forecast))
}
dataTrain <- knn(dataSystemDay[1:(0.6*nrow(dataSystemDay)), ], ceiling(sqrt(nrow(dataSystemDay))))
## Evaluate the model
# Code to create test set from day data
## Communicate result
# Code to summaries load and forecast
ddply(dataTrain, .(as.factor(condition)), summarise,
blackout = sum(abs(measurement - 0) < 1e-6),
sum = (sum(measurement) * (max(dataSystemDay[1:(0.6*nrow(dataSystemDay)), c("load")])- min(dataSystemDay[1:(0.6*nrow(dataSystemDay)), c("load")]))) + min(dataSystemDay[1:(0.6*nrow(dataSystemDay)), c("load")]),
mean = (mean(measurement) * (max(dataSystemDay[1:(0.6*nrow(dataSystemDay)), c("load")])- min(dataSystemDay[1:(0.6*nrow(dataSystemDay)), c("load")]))) + min(dataSystemDay[1:(0.6*nrow(dataSystemDay)), c("load")]),
max = (max(measurement) * (max(dataSystemDay[1:(0.6*nrow(dataSystemDay)), c("load")])- min(dataSystemDay[1:(0.6*nrow(dataSystemDay)), c("load")]))) + min(dataSystemDay[1:(0.6*nrow(dataSystemDay)), c("load")]),
min = (min(measurement) * (max(dataSystemDay[1:(0.6*nrow(dataSystemDay)), c("load")])- min(dataSystemDay[1:(0.6*nrow(dataSystemDay)), c("load")]))) + min(dataSystemDay[1:(0.6*nrow(dataSystemDay)), c("load")]),
sd = sd(measurement),
se = sd / sqrt(length(measurement))
)
# Code to visualise trend based on time factor
ggplot(dataTrain, aes(n, measurement, group=condition, colour=condition)) + geom_line()
# Code to visualise distribution on time factor
ggplot(dataTrain, aes(x=measurement, fill=condition)) + geom_histogram(binwidth=.5, alpha=.5, position="identity")
|
d96b7bb00dcb06706f1e9eac5f521ababd8f844e
|
22c47b42f7307736dc1d8e7694c1baa1a6a27258
|
/ASCAT/R/ascat.metrics.R
|
071f9d41917482ef0bc06f84fd978bd6bd9f9717
|
[] |
no_license
|
jdemeul/ascat
|
b4aa6039f52fa0f852f9dd264503b2e63c60d37e
|
db25fb16c39cf8ed1c5696ad568f3b5f4bdfc723
|
refs/heads/master
| 2023-08-31T21:59:58.967803
| 2023-07-13T07:49:44
| 2023-07-13T07:49:44
| 87,942,160
| 1
| 0
| null | 2018-09-16T21:33:10
| 2017-04-11T14:12:18
|
R
|
UTF-8
|
R
| false
| false
| 9,849
|
r
|
ascat.metrics.R
|
#' Function to extract different metrics from ASCAT profiles.
#'
#' @param ASCAT_input_object R object generated by the ascat.aspcf function and given to the ascat.runAscat function.
#' @param ASCAT_output_object R object generated by the ascat.runAscat function.
#'
#' @return A dataframe (one sample per line) with the following metrics (as columns):\cr
#' sex - Sex information as provided.\cr
#' tumour_mapd - Median Absolute Pairwise Difference (MAPD) in tumour logR track.\cr
#' normal_mapd - Median Absolute Pairwise Difference (MAPD) in normal logR track (should be NA without matched normals and 0 for sequencing data).\cr
#' GC_correction_before - logR/GC correlation before correction.\cr
#' GC_correction_after - logR/GC correlation after correction.\cr
#' RT_correction_before - logR/RT correlation before correction.\cr
#' RT_correction_after - logR/RT correlation after correction.\cr
#' n_het_SNP - Number of heterozygous SNPs.\cr
#' n_segs_logR - Number of segments in the logR track.\cr
#' n_segs_BAF - Number of segments in the BAF track.\cr
#' n_segs_logRBAF_diff - Difference between number of segments in the logR versus BAF track.\cr
#' frac_homo - Fraction of homozygous (<0.1 | >0.9) probes in tumour.\cr
#' purity - Purity estimate.\cr
#' ploidy - Ploidy estimate.\cr
#' goodness_of_fit - Goodness of fit.\cr
#' size_intermediate_segments - Total size of (unrounded) segments in the X.45-X.55 range.\cr
#' size_odd_segments - Total size of segments with an odd (1/3/5/+) CN (either nMajor or nMinor).\cr
#' n_segs - Number of copy-number segments.\cr
#' segs_size - Total size of all segments.\cr
#' n_segs_1kSNP - Number of segments per 1k heterozygous SNPs.\cr
#' homdel_segs - Number of segments with homozygous deletion.\cr
#' homdel_largest - largest segment with homozygous deletion.\cr
#' homdel_size - Total size of segments with homozygous deletion.\cr
#' homdel_fraction - Fraction of the genome with homozygous deletion.\cr
#' LOH - Fraction of the genome with LOH (ignoring sex chromosomes).\cr
#' mode_minA - Mode of the minor allele (ignoring sex chromosomes).\cr
#' mode_majA - Mode of the major allele (ignoring sex chromosomes).\cr
#' WGD - Whole genome doubling event (ignoring sex chromosomes).\cr
#' GI - Genomic instability score (ignoring sex chromosomes).\cr
#'
#' @author tl
#' @export
ascat.metrics = function(ASCAT_input_object,ASCAT_output_object) {
METRICS=do.call(rbind,lapply(1:length(ASCAT_input_object$samples), function(nSAMPLE) {
SAMPLE=ASCAT_input_object$samples[nSAMPLE]
sex=ASCAT_input_object$gender[nSAMPLE]
tumour_mapd=round(median(abs(diff(na.omit(ASCAT_input_object$Tumor_LogR[,SAMPLE])))),4)
if (!is.null(ASCAT_input_object$Germline_LogR) && any(SAMPLE %in% colnames(ASCAT_input_object$Germline_LogR))) {
normal_mapd=round(median(abs(diff(na.omit(ASCAT_input_object$Germline_LogR[,SAMPLE])))),4)
} else {
normal_mapd=NA
}
if ('GC_correction_before' %in% names(ASCAT_input_object)) {GC_correction_before=ASCAT_input_object$GC_correction_before[SAMPLE]} else {GC_correction_before=NA}
if ('GC_correction_after' %in% names(ASCAT_input_object)) {GC_correction_after=ASCAT_input_object$GC_correction_after[SAMPLE]} else {GC_correction_after=NA}
if ('RT_correction_before' %in% names(ASCAT_input_object)) {RT_correction_before=ASCAT_input_object$RT_correction_before[SAMPLE]} else {RT_correction_before=NA}
if ('RT_correction_after' %in% names(ASCAT_input_object)) {RT_correction_after=ASCAT_input_object$RT_correction_after[SAMPLE]} else {RT_correction_after=NA}
if (!is.null(ASCAT_input_object$Tumor_LogR_segmented) && !is.null(ASCAT_input_object$Tumor_BAF_segmented[[nSAMPLE]])) {
n_het_SNP=length(ASCAT_input_object$Tumor_BAF_segmented[[nSAMPLE]])
n_segs_logR=length(rle(paste0(as.character(ASCAT_input_object$SNPpos[names(ASCAT_input_object$Tumor_LogR_segmented[,SAMPLE]),1]),'_',ASCAT_input_object$Tumor_LogR_segmented[,SAMPLE]))$values)
n_segs_BAF=length(rle(paste0(as.character(ASCAT_input_object$SNPpos[names(ASCAT_input_object$Tumor_BAF_segmented[[nSAMPLE]][,1]),1]),'_',ASCAT_input_object$Tumor_BAF_segmented[[nSAMPLE]][,1]))$values)
n_segs_logRBAF_diff=abs(n_segs_logR-n_segs_BAF)
segm_baf=ASCAT_input_object$Tumor_BAF[rownames(ASCAT_input_object$Tumor_BAF_segmented[[nSAMPLE]]),SAMPLE]
frac_homo=round(length(which(segm_baf<0.1 | segm_baf>0.9))/length(segm_baf),4)
rm(segm_baf)
} else {
n_het_SNP=NA
n_segs_logR=NA
n_segs_BAF=NA
n_segs_logRBAF_diff=NA
frac_homo=NA
}
if (!is.null(ASCAT_output_object$segments) && SAMPLE %in% ASCAT_output_object$segments$sample) {
purity=round(as.numeric(ASCAT_output_object$purity[SAMPLE]),4)
ploidy=round(as.numeric(ASCAT_output_object$ploidy[SAMPLE]),4)
goodness_of_fit=round(ASCAT_output_object$goodnessOfFit[SAMPLE],4)
unroundedprofile=ASCAT_output_object$segments_raw[ASCAT_output_object$segments_raw$sample==SAMPLE,]
unroundedprofile$nAraw_adjusted=unroundedprofile$nAraw-floor(unroundedprofile$nAraw)
unroundedprofile$nBraw_adjusted=unroundedprofile$nBraw-floor(unroundedprofile$nBraw)
stopifnot(all(c(unroundedprofile$nAraw_adjusted,unroundedprofile$nBraw_adjusted)>=0 & c(unroundedprofile$nAraw_adjusted,unroundedprofile$nBraw_adjusted)<=1))
unroundedprofile=unroundedprofile[which((unroundedprofile$nAraw_adjusted>=0.45 & unroundedprofile$nAraw_adjusted<=0.55) | (unroundedprofile$nBraw_adjusted>=0.45 & unroundedprofile$nBraw_adjusted<=0.55)),]
if (nrow(unroundedprofile)==0) {
size_intermediate_segments=0
} else {
size_intermediate_segments=sum(unroundedprofile$endpos-unroundedprofile$startpos+1)
}
rm(unroundedprofile)
profile=ASCAT_output_object$segments[ASCAT_output_object$segments$sample==SAMPLE,]
profile$size=profile$endpos-profile$startpos+1
size_odd_segments=sum(profile$size[which(profile$nMajor %in% seq(1,max(c(profile$nMajor,profile$nMinor,1)),2) | profile$nMinor %in% seq(1,max(c(profile$nMajor,profile$nMinor,1)),2))])
n_segs=nrow(profile)
segs_size=sum(profile$size)
n_segs_1kSNP=round(n_segs/(length(ASCAT_input_object$Tumor_BAF_segmented[[nSAMPLE]])/1e3),4)
INDEX_HD=which(profile$nMajor==0 & profile$nMinor==0)
if (length(INDEX_HD)>0) {
homdel_segs=length(INDEX_HD)
homdel_largest=max(profile$size[INDEX_HD])
homdel_size=sum(profile$size[INDEX_HD])
homdel_fraction=round(homdel_size/sum(profile$size),4)
} else {
homdel_segs=homdel_largest=homdel_size=homdel_fraction=0
}
rm(INDEX_HD)
profile=profile[which(profile$chr %in% setdiff(ASCAT_input_object$chrs,ASCAT_input_object$sexchromosomes)),] # do not consider sex chromosomes for the next metrics
LOH=round(sum(profile$size[which(profile$nMinor==0)])/sum(profile$size),4)
mode_minA=modeAllele(profile,'nMinor')
mode_majA=modeAllele(profile,'nMajor')
if (mode_majA==0 || !(mode_majA %in% 1:5)) {
WGD=NA
GI=NA
} else {
if (mode_majA==1) {
WGD=0
GI=computeGIscore(WGD,profile)
} else if (mode_majA==2) {
WGD=1
GI=computeGIscore(WGD,profile)
} else if (mode_majA %in% 3:5) {
WGD='1+'
GI=computeGIscore(1,profile)
}
}
rm(profile)
} else {
purity=NA
ploidy=NA
goodness_of_fit=NA
size_intermediate_segments=NA
size_odd_segments=NA
n_segs=NA
segs_size=NA
n_segs_1kSNP=NA
homdel_segs=NA
homdel_largest=NA
homdel_size=NA
homdel_fraction=NA
LOH=NA
mode_minA=NA
mode_majA=NA
WGD=NA
GI=NA
}
OUT=data.frame(sex=sex,
tumour_mapd=tumour_mapd,
normal_mapd=normal_mapd,
GC_correction_before=GC_correction_before,
GC_correction_after=GC_correction_after,
RT_correction_before=RT_correction_before,
RT_correction_after=RT_correction_after,
n_het_SNP=n_het_SNP,
n_segs_logR=n_segs_logR,
n_segs_BAF=n_segs_BAF,
n_segs_logRBAF_diff=n_segs_logRBAF_diff,
frac_homo=frac_homo,
purity=purity,
ploidy=ploidy,
goodness_of_fit=goodness_of_fit,
size_intermediate_segments=size_intermediate_segments,
size_odd_segments=size_odd_segments,
n_segs=n_segs,
segs_size=segs_size,
n_segs_1kSNP=n_segs_1kSNP,
homdel_segs=homdel_segs,
homdel_largest=homdel_largest,
homdel_size=homdel_size,
homdel_fraction=homdel_fraction,
LOH=LOH,
mode_minA=mode_minA,
mode_majA=mode_majA,
WGD=WGD,
GI=GI,
stringsAsFactors=F)
rownames(OUT)=SAMPLE
return(OUT)
}))
return(METRICS)
}
#' Function to get mode of the allele (either minor or major)
#' @noRd
modeAllele=function(cn,col) {
y=round(cn[,col])
y[y>5]=5
y=tapply(1:nrow(cn),y,function(z) sum((cn[z,'endpos']-cn[z,'startpos'])/1e6))
ord=order(y,decreasing=T)
y=y[ord]
return(as.numeric(names(y)[which.max(y)]))
}
#' Function to compute GI score based on WGD information
#' @noRd
computeGIscore=function(WGD,profile) {
stopifnot(WGD %in% 0:2)
if (WGD==0) {
baseline=1
} else if (WGD==1) {
baseline=2
} else if (WGD==2) {
baseline=4
}
return(round(1-sum(profile$size[which(profile$nMajor==baseline & profile$nMinor==baseline)])/sum(profile$size),4))
}
|
56c2ea10ceda77a54e44e50eb2ed2ce646d89763
|
a8e215d5f5cc9b424fc66fd9f56cc54cc3aab161
|
/shiny/ui.R
|
7ad298fee01dda27c770b63ec5ccd8ac67dbe9f2
|
[] |
no_license
|
chendaniely/USMassShootings
|
8eeebd3ea8979e7e69c732733d2b55bc423e1ed9
|
4acb3ddc0a8e938ea40062699d603e6c6d389de4
|
refs/heads/master
| 2021-01-17T15:55:38.410244
| 2015-10-05T02:12:11
| 2015-10-05T02:12:11
| 43,650,729
| 0
| 0
| null | 2015-10-04T20:02:55
| 2015-10-04T20:02:54
| null |
UTF-8
|
R
| false
| false
| 943
|
r
|
ui.R
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("US Shootings"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
helpText("Create map of victims for each state or for the entire US."),
selectInput("var",
label = "Choose a variable to display",
choices = unique(DT$State),
selected = "Percent White"),
sliderInput("years",
label = "Range of Years:",
min = min(DT$year), max = max(DT$year),
value = c(2013,2015), step =1 )
),
# Show a plot of the generated distribution
mainPanel(
#plotOutput("plot1"),
#plotOutput("plot2")
)
)
))
|
1eaa25e038c0447513145ebae69fb9136ba9ef85
|
640f730eafc8be8dc2fccd823fd60692bdf4d82a
|
/R/sobolset.r
|
35544e640d04173e4a0edc5e173630cd520eda77
|
[] |
no_license
|
hazbib/SBMLR_v
|
d163430264092913cfdc40e1dc42cd76936b0110
|
a8ff5284a7df6998db943f34c9e68466e3ad5eb7
|
refs/heads/master
| 2021-05-16T05:11:05.909337
| 2016-11-07T21:57:58
| 2016-11-07T21:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 643
|
r
|
sobolset.r
|
# Wrapper function for Reading in the sobol points and creating sobol data file
#
# Author: vishak
#Arguments
# param_path - path on disk containing the sobol sets
# parameter_count - Optional parameter if only a few parameters are being changed
# no_of_sets - Optional parameter if you want to only simulate the first 10 sets in a file containing 100 sets
"sobolset" <- function(param_path, parameter_count=0, no_of_sets=0) {
sets <- read.table(param_path, header=TRUE)
if(no_of_sets==0) no_of_sets <- nrow(sets)
if(parameter_count==0) parameter_count <- ncol(sets)
save(file=paste(getwd(),'sobol.dat',sep='/'), sets)
sets
}
|
06b4b949925bb2ac0425c8558a5dceef1e205452
|
cdeaba4963ad21b279e5d6850c5f3de6a7697161
|
/R_Programming/specdata.R
|
f5bcdee25d4a071e0a89b2882d954c7b6f69bdf2
|
[] |
no_license
|
skonmeme/datasciencecoursera
|
9865a37093c1839dfff360fb4fb8ab9b78a32489
|
cbd8d19c134708dc5f4e1063b968096d807a6eb4
|
refs/heads/master
| 2021-01-22T23:00:42.546376
| 2017-07-04T06:59:58
| 2017-07-04T06:59:58
| 92,793,315
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,370
|
r
|
specdata.R
|
readSpecdata <- function(directory, id = 1:332) {
specdata <- data.frame()
for (i in id) {
specdata <- rbind(specdata, read.csv(file.path(directory, paste(formatC(i, width = 3, flag = "0"), ".csv", sep = "")), colClasses = c("Date", "numeric", "numeric", "integer")))
}
specdata
}
pollutantmean <- function(directory, pollutant, id = 1:332) {
specdata <- readSpecdata(directory, id)
mean(specdata[, pollutant], na.rm = TRUE)
}
complete <- function(directory, id = 1:332) {
specdata <- readSpecdata(directory, id)
com <- as.data.frame(table(specdata[is.finite(specdata[, "sulfate"]) & is.finite(specdata[, "nitrate"]), "ID"]))
colnames(com) <- c("id", "nobs")
rownames(com) <- paste("row", com[, "id"], sep = "")
com <- com[paste("row", id, sep = ""), ]
rownames(com) <- NULL
com
}
computeCorrelation <- function(data, threshold = 0) {
com <- data[is.finite(data[, "sulfate"]) & is.finite(data[, "nitrate"]), ]
ifelse(nrow(com) > threshold, cor(com[, "sulfate"], com[, "nitrate"]), NA)
}
corr <- function(directory, threshold = 0) {
specdata <- list()
for (i in 1:332) {
specdata[[i]] <- read.csv(file.path(directory, paste(formatC(i, width = 3, flag = "0"), ".csv", sep = "")), colClasses = c("Date", "numeric", "numeric", "integer"))
}
cors <- sapply(specdata, computeCorrelation, threshold)
cors[is.finite(cors)]
}
|
c6a1dc4f667d8c4bd81b4df0750adf49fa4f85ef
|
6a052691f8db472846d1f68ae6f3f43ac5bccb55
|
/clase4/Anotaciones4/clase4.R
|
8905909eeb2b6bb4fdbef607b5a5d2761a3b7baf
|
[] |
no_license
|
Jesus-Angel-Condor/CursoR
|
6592c57864f13152d2f8e000a2e8c2ab9334a206
|
a94c9db5eb92b7c82afa9b7a063f291197aaf846
|
refs/heads/master
| 2021-01-23T21:16:58.937582
| 2017-12-10T21:42:20
| 2017-12-10T21:42:20
| 102,890,406
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,135
|
r
|
clase4.R
|
help("expand.grid")
expand.grid(S1 =1:2, S2=1:3, S3=1:4)
#combinaciones
help("choose")
choose(6,4)
#generacion de combianciones
help("combn")
combn(1:5,2)
#genearmos numeros aleatorios
#runif
#rnorm
#rgamma, investigar
#
#Generamos una muetra aleatoria
help("sample")
#secreto del profe
airquality
sample(airquality$Wind,10)
sample(c("TRUE","FALSE"),replace = TRUE)
#estructura de control
#if-else
#ifelse
#for
#solucion 5
n<-100
pasos<-1
while(n!=1){
if(n %% 2 == 0 ){
n <- n/2
}
else{
n<-3*n + 1
}
pasos <- pasos+1
}
print(pasos)
#Basta con uan interaccion en el rango y guardar las
#respuestas en una variables, la solucion tiene un paradigma
#fuerxza bruta
numero <- 1
longitud <- 1000000000000
for(i in 100:200){
indicador <-i
len<-1
if(indicador %%2==0){
indicador<-indicador/2
}
else{
indicador<-3*indicador+1
}
len<-len+1
if(len<longitud){
numro<-i
longitud<-len
}
}
sprintf("El numero con menor longitud de secuencia es %d",numero)
print("secuencia:")
while(numero!=1){
print(numero)
if(numero %%2 ==0){
numero<-numero/2
}
else{
numero<-3*numero+1
}
}
|
4075d392f04095631ee142da83b21eff2e99b877
|
aa2a0b9c08c3e0e3ee8f5384700727db08653eed
|
/SRC/install-packages-and-libraries.R
|
6f0fb1017ed6108028011ba1348d5868e8699f68
|
[
"BSD-2-Clause"
] |
permissive
|
BiodiversityDataScienceCorp/milkfli-mapping
|
757e68860a7cb02263f3fd03cfd49e7648a68950
|
4eacd452f60e958b5bfcc8c4d6389eaa06b5bcc5
|
refs/heads/main
| 2023-04-07T12:42:59.297337
| 2022-04-22T22:05:07
| 2022-04-22T22:05:07
| 459,738,856
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 929
|
r
|
install-packages-and-libraries.R
|
######################### Installing necessary packages for map-making and data cleaning #########################
# Maxine Cruz, Deanna Sunnergren, and Caelan Wilkie-Rogers
# Spring 2022
######################### INSTALL PACKAGES AND LOAD LIBRARIES #########################
# List all the packages required to run this code
# Storing them in one place (required <-) makes it easier to install them in one go
required <- c("raster", "sp", "dismo", "maptools", "spocc", "rgdal", "sf", "tidyverse",
"maps", "ggplot2", "rnaturalearth", "rnaturalearthdata")
# Install packages
install.packages(required)
# Load packages
# If these are not run, nothing in this script will either
library("raster")
library("sp")
library("dismo")
library("maptools")
library("spocc")
library("rgdal")
library("sf")
library("tidyverse")
library("maps")
library("ggplot2")
library("rnaturalearth")
library("rnaturalearthdata")
|
cf66062e9ec61bbe67836a06814bcb84f68d1474
|
a72fb18744addb14531743f8067b44dfd01022e8
|
/assets/code/Yamnaya_Heights.R
|
4e14937e963f52a4a866070527c2eac563ee96f3
|
[
"MIT"
] |
permissive
|
mathii/mathii.github.com
|
0ed57f89edfa2ff58d14b9c2cd688926deb92344
|
0d718db11b789f920abdd65c5a733f74dbb6fb07
|
refs/heads/master
| 2022-03-08T00:54:36.232278
| 2022-02-18T18:37:02
| 2022-02-18T18:37:02
| 30,841,538
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,473
|
r
|
Yamnaya_Heights.R
|
# GET EQUATION AND R-SQUARED AS STRING
# SOURCE: http://goo.gl/K4yh
lm.eqn <- function(m){
eq <- substitute(italic(y) == a + b %.% italic(x)*","~~italic(R)^2~"="~r2,
list(a = format(coef(m)[1], digits = 2),
b = format(coef(m)[2], digits = 2),
r2 = format(summary(m)$r.squared, digits = 2)))
as.character(as.expression(eq));
as.expression(eq);
}
data <- read.table("Yamnaya_heights.txt", header=TRUE)
plot(data$Yamnaya_PC, data$Height, pch=16, xlab="Percent Yamnaya ancestry (Haak et al 2015)", ylab="Average adult male height (cm)", bty="n", ylim=c(170,182))
xx=data$Yamnaya_PC
yy=data$Height
pp <- rep(4,NROW(data))
pp[data$Region=="Finland"]<-3
pp[data$Region=="Frnace"]<-3
pp[data$Region=="Tuscany"]<-3
pp[data$Region=="Belarussia"]<-1
pp[data$Region=="England"]<-1
pp[data$Region=="Hungary"]<-3
pp[data$Region=="Lithuania"]<-3
pp[data$Region=="Czech"]<-2
text(xx, yy, data$Region, pos=pp, cex=0.5)
## Regression
lm1<-lm(data$Height~data$Yamnaya_PC)
abline(lm1$coefficients, col="blue", lwd=2)
text(0, lm1$coefficients[1], lm.eqn(lm1), col="blue", pos=4, cex=0.75)
## Remove shortest
data2 <- data[!(data$Region %in% c("Sicily", "Sardinia", "Malta")),]
lm2<-lm(data2$Height~data2$Yamnaya_PC)
lines(c(15,100), lm2$coefficients[1]+c(15,100)*lm2$coefficients[2], col="red", lwd=2 )
text(-2, lm2$coefficients[1]+15*lm2$coefficients[2]-0.3, lm.eqn(lm2), col="red", pos=4, cex=0.75)
|
78461f7ac1f9966f8c09a8c92e1087be4eaf0cd4
|
10f2b26a285eee200316578e56eb03c3f858d0ff
|
/Part4/sec3_ggplot2.R
|
f78ca9727c368f5be12905175d5c2c10c00290e7
|
[] |
no_license
|
ckiekim/R-Lecture
|
fe27a143c04b972a947fe4a8b7ce40bdca39e814
|
3badfe3d29102a4dea87586be4fed0f9cf013dc2
|
refs/heads/master
| 2020-05-29T13:08:18.383370
| 2019-06-18T23:58:07
| 2019-06-18T23:58:07
| 189,150,220
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,039
|
r
|
sec3_ggplot2.R
|
# ggplot2
install.packages('ggplot2')
library(ggplot2)
install.packages("gridExtra")
library(gridExtra)
setwd("d:/Workspace/R_Data_Analysis/Part4")
korean <- read.table("data/학생별국어성적_new.txt", header=T, sep=",")
korean
ggplot(korean, aes(x=이름, y=점수)) + # positional argument
geom_point()
ggplot(mapping=aes(x=이름, y=점수), data=korean) + # keyword argument
geom_point()
ggplot(korean, aes(x=이름, y=점수)) +
geom_bar(stat="identity")
ggplot(korean, aes(x=이름, y=점수)) +
geom_bar(stat="identity", fill="green", colour="red")
ggplot(korean, aes(x=이름, y=점수)) +
geom_bar(stat="identity", fill="green", colour="red") +
theme(axis.text.x=element_text(angle=45, hjust=1, vjust=1,
color="blue", size=8))
score_kem <- read.csv("data/학생별과목별성적_국영수_new.csv")
score_kem
library(dplyr)
sort_kem <- arrange(score_kem, 이름, 과목)
sort_kem
# sort_kem2 <- ddply(sort_kem,"이름",transform,누적합계=cumsum(점수))
sort_kem2 <- sort_kem %>%
group_by(이름) %>%
mutate(누적합계=cumsum(점수))
sort_kem2
# sort_kem3 <- ddply(sort_kem2,"이름",transform,누적합계=cumsum(점수),
# label=cumsum(점수)-0.5*점수)
sort_kem3 <- sort_kem2 %>%
group_by(이름) %>%
mutate(label=cumsum(점수)-0.5*점수)
sort_kem3
sort_kem4 <- sort_kem %>%
group_by(이름) %>%
mutate(누적합계=cumsum(점수)) %>%
mutate(label=cumsum(점수)-0.5*점수)
sort_kem4
sort_kem5 <- sort_kem %>%
group_by(이름) %>%
mutate(누적합계=cumsum(점수), label=cumsum(점수)-0.5*점수)
sort_kem5
ggplot(sort_kem5, aes(x=이름, y=점수, fill=과목)) +
geom_bar(stat="identity") +
geom_text(aes(y=label, label=paste(점수,'점')), colour="black",
size=4)
ggplot(sort_kem5, aes(x=이름, y=점수, fill=과목)) +
geom_bar(stat="identity") +
geom_text(aes(y=label, label=paste(점수,'점')), colour="black",
size=4) +
# guides(fill=guide_legend(reverse=T)) +
theme(axis.text.x=element_text(angle=45, hjust=1, vjust=1,
colour="black", size=8))
score <- read.table("data/학생별전체성적_new.txt",header=T,sep=",")
score
score_eng <- score[, c('이름','영어')]
ggplot(score, aes(x=영어, y=reorder(이름,영어))) +
geom_point(size=4) +
theme_classic() +
theme(panel.grid.major.x=element_blank( ) ,
panel.grid.minor.x=element_blank( ) ,
panel.grid.major.y=element_line(color="red",
linetype="dashed"))
ggplot(score, aes(x=영어, y=reorder(이름,영어))) +
geom_segment(aes(yend=이름), xend=0, color="blue") +
geom_point(size=6, color="green") +
theme_bw() +
theme(panel.grid.major.y=element_blank())
mtcars
str(mtcars)
ggplot(mtcars, aes(x=hp , y=mpg)) +
geom_point()
ggplot(mtcars, aes(x=hp , y=disp)) +
geom_point()
ggplot(mtcars, aes(x=hp , y=disp)) +
geom_point(colour='blue')
ggplot(mtcars, aes(x=hp , y=mpg)) +
geom_point(aes(color=factor(am)))
ggplot(mtcars, aes(x=hp , y=mpg)) +
geom_point(aes(color=factor(am), size=5))
ggplot(mtcars, aes(x=hp , y=mpg)) +
geom_point(aes(color=factor(am), size=wt))
ggplot(mtcars, aes(x=hp , y=mpg)) +
geom_point(aes(shape=factor(am), size=wt))
ggplot(mtcars, aes(x=hp , y=mpg)) +
geom_point(aes(shape=factor(am), color=wt))
# 종류별로 크기, 모양, 색상 지정하기
ggplot(mtcars, aes(x=hp , y=mpg)) +
geom_point(aes(shape=factor(am), color=factor(am), size = wt)) +
scale_color_manual(values=c("red","green"))
ggplot(mtcars, aes(x=hp , y=mpg)) +
geom_point(color='red') +
geom_line(colour='blue')
par(oma=c(12, 10, 12, 10)) # 여백 조정
ggplot(mtcars, aes(x=hp , y=mpg)) +
geom_point(aes(shape=factor(am), color=factor(am), size = wt)) +
scale_color_manual(values=c("red","green")) +
labs(x="마력" , y="연비(mile/gallon)")
three <- read.csv("data/학생별과목별성적_3기_3명.csv")
three
sort_score <- arrange(three, 이름, 과목)
ggplot(sort_score, aes(x=과목, y=점수, color=이름, group=이름)) +
geom_line()
ggplot(sort_score, aes(x=과목, y=점수, color=이름, group=이름)) +
geom_line() +
geom_point(size=3)
ggplot(sort_score, aes(x=과목, y=점수, color=이름, group=이름,
fill=이름)) +
geom_line() +
geom_point(size=3, shape=22)
ggplot(sort_score, aes(x=과목, y=점수, color=이름, group=이름)) +
geom_line() +
geom_point(size=3) +
ggtitle("학생별 과목별 성적")
dis <- read.csv("data/1군전염병발병현황_년도별.csv",
stringsAsFactors=F)
str(dis)
ggplot(dis, aes(x=년도별, y=장티푸스, group=1)) +
geom_line()
ggplot(dis, aes(x=년도별, y=장티푸스, group=1)) +
geom_area()
ggplot(dis, aes(x=년도별, y=장티푸스, group=1)) +
geom_area(color="red", fill="cyan", alpha=0.4)
ggplot(dis, aes(x=년도별, y=장티푸스, group=1)) +
geom_area(fill="cyan", alpha=0.4) +
geom_line(color='blue')
|
5d1d4e6ad177777892462aee0cbd9cf42a40d6b5
|
ab9254521cd0a26433d5ad7dd21a6638ec47653d
|
/exercises/exercise3/market.R
|
7a048d119800b0a1670fd9a47c4e15d6318e8a71
|
[] |
no_license
|
Aubrey9922/SDS-323
|
49c7874dc607362d9aeafd497759fbcd7ab05d5f
|
fbb3fad6021d2bfdc5324883d7ac7bc39bd1620f
|
refs/heads/master
| 2022-04-20T00:39:22.548068
| 2020-04-13T14:02:17
| 2020-04-13T14:02:17
| 256,038,866
| 0
| 0
| null | 2020-04-15T21:17:28
| 2020-04-15T21:17:27
| null |
UTF-8
|
R
| false
| false
| 1,679
|
r
|
market.R
|
library(mosaic)
library(tidyverse)
library(ggplot2)
library(LICORS) # for kmeans++
library(foreach)
library(reshape2)
mkt = read.csv("social_marketing.csv")
# try hierarchical clustering
# convert integer variables to numeric to use scale() function
# mkt[2:37] <- lapply(mkt[2:37], as.numeric)
# mkt = mkt[-1] %>% mutate_if(is.numeric, scale(mkt, center=TRUE, scale=TRUE))
# Form a pairwise distance matrix using the dist function
mkt_distance_matrix = dist(mkt[-1], method='euclidean')
# Now run hierarchical clustering
hier_mkt = hclust(mkt_distance_matrix, method='complete')
# Plot the dendrogram
# plot(hier_mkt, cex=0.8)
cluster1 = cutree(hier_mkt, k=5)
summary(factor(cluster1))
# try K-means++ clustering
# mkt <- subset(mkt, select = -c(X)) # remove the anonymous identifier
# Center and scale the data
# NOT SURE WE NEED TO DO THIS IF EVERYTHING IS A COUNT
mkt = scale(mkt[-1], center=TRUE, scale=TRUE)
# Extract the centers and scales from the rescaled data (which are named attributes)
mu = attr(mkt,"scaled:center")
sigma = attr(mkt,"scaled:scale")
mkt_long <- melt(mkt) # convert matrix to long dataframe
mkt <- spread(mkt_long, Var2, value)# convert long dataframe to wide
# Run k-means plus plus.
clust2 = kmeanspp(mkt[-1], k=6, nstart=25)
clust2$center[1,]*sigma + mu
clust2$center[2,]*sigma + mu
clust2$center[4,]*sigma + mu
# A few plots with cluster membership shown
ggplot(data = mkt,
aes(x = travel, y = food, color = factor(clust2$cluster))) +
geom_point()
ggplot(data = mkt,
aes(x = current_events, y = politics, color = factor(clust2$cluster))) +
geom_point()
|
80009afb913721db9d022d6575d0e54a42af479d
|
a3498335d4980221f257ab76b8e88e479d153927
|
/run_analysis.R
|
0c053a849f7ea4c1ae12898f1781de425a889791
|
[] |
no_license
|
roysumit2468/getting_and_cleaning_data
|
7a7a16c496854700286f8e1659d513a726716a87
|
751d69e08758100c983ebd966bb89585a0d4584a
|
refs/heads/master
| 2020-04-05T23:05:04.300862
| 2015-01-25T07:21:39
| 2015-01-25T07:21:39
| 29,804,659
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,629
|
r
|
run_analysis.R
|
#read the test and training data
test<-read.table("test/x_test.txt")
subject_test<-read.table("test/subject_test.txt")
y_test<-read.table("test/y_test.txt")
train<-read.table("train/x_train.txt")
subject_train<-read.table("train/subject_train.txt")
y_train<-read.table("train/y_train.txt")
#to merge the training and test datasets
merged_data<-rbind(train,test)
merged_subject<-rbind(subject_train,subject_test)
merged_activity<-rbind(y_train,y_test)
# to identify the mean and standard deviation variables among others
features<-read.table("features.txt",stringsAsFactors = FALSE)
split<-strsplit(features[,2],"-")
logic<-c()
for(i in 1:length(split)){
logic<-c(logic,split[[i]][2] %in% c("mean()","std()"))
}
#to assign proper activity names to the corresponding indicators
activity_names<-read.table("activity_labels.txt")
library(plyr)
join<-join(merged_activity,activity_names,by="V1")
join$V1<-NULL
colnames(join)<-"ACTIVITY"
# Organising the data into a tidy dataset
colnames(merged_data)<-features[,2]
merged_data<-merged_data[,logic]
colnames(merged_subject)<-"subject.id"
merged_data<-cbind(merged_subject,join,merged_data)
merged_data<-merged_data[order(merged_data[,1],merged_data[,2]),]
#creating the subjectwise and activitywise mean values for all the variables
names<-names(merged_data)
library(reshape2)
data_melt<-melt(merged_data,id.vars=names[1:2],measure.vars=names[3:length(names)])
data<-dcast(data_melt,subject.id + ACTIVITY~variable,mean)
a<-paste("Avg",colnames(data),sep = "-")
a[1]<-"subject_id"
a[2]<-"activity"
colnames(data)<-a
write.table(data,"run_analysis.txt",row.names = FALSE)
|
82d6eb010450b78958632574ce785dfb219b917c
|
c4585033a739e54b26616a94cbd44ec95d0746be
|
/R/detect_separation.R
|
bac039ed919c1e284a887008a51d790100dce13a
|
[] |
no_license
|
guhjy/brglm2
|
e6b87a33a540bd24edd35b636a7a85b5c4279dec
|
b3fb50c7109648e6a0b435ee679af1f14c1ae27b
|
refs/heads/master
| 2020-03-07T17:01:12.894994
| 2018-02-28T00:12:39
| 2018-02-28T00:12:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,372
|
r
|
detect_separation.R
|
# Copyright (C) 2017 Ioannis Kosmidis
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 or 3 of the License
# (at your option).
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
#' Method for \code{\link{glm}} that tests for data separation and
#' finds which parameters have infinite maximum likelihood estimates
#' in generalized linear models with binomial responses
#'
#' \code{\link{detect_separation}} is a method for \code{\link{glm}}
#' that tests for the occurrence of complete or quasi-complete
#' separation in datasets for binomial response generalized linear
#' models, and finds which of the parameters will have infinite
#' maximum likelihood estimates. \code{\link{detect_separation}}
#' relies on the linear programming methods developed in Konis (2007).
#'
#' @inheritParams stats::glm.fit
#'
#' @aliases detectSeparation print.detect_separation
#'
#' @param x \code{x} is a design matrix of dimension \code{n * p},
#' @param y \code{y} is a vector of observations of length \code{n}
#' @param control a list of parameters controlling separation
#' detection. See \code{\link{detect_separation_control}} for
#' details.
#' @param start currently not used
#' @param mustart currently not used
#' @param etastart currently not used
#' @param singular.ok logical. If \code{FALSE}, a singular model is an
#' error.
#' @param ... arguments to be used to form the default 'control'
#' argument if it is not supplied directly.
#'
#' @details
#'
#' For the definition of complete and quasi-complete separation, see
#' Albert and Anderson (1984).
#'
#' \code{\link{detect_separation}} is a wrapper to the \code{separator}
#' function from the **safeBinaryRegression** R package, that can be
#' passed directly as a method to the \code{\link{glm}} function. See,
#' examples.
#'
#' The interface to \code{separator} was designed by Ioannis Kosmidis
#' after correspondence with Kjell Konis, and a port of
#' \code{separator} has been included in **brglm2** under the
#' permission of Kjell Konis.
#'
#' \code{detectSeparation} is an alias for \code{detect_separation}.
#'
#' @author Ioannis Kosmidis [aut, cre] \email{ioannis.kosmidis@warwick.ac.uk}, Kjell Konis [ctb] \email{kjell.konis@me.com}
#'
#' @seealso \code{\link{brglmFit}}, \code{\link{glm.fit}} and \code{\link{glm}}
#'
#' @references
#'
#' Kjell Konis (2007). *Linear Programming Algorithms for Detecting
#' Separated Data in Binary Logistic Regression
#' Models*. DPhil. University of Oxford.
#' \url{https://ora.ox.ac.uk/objects/uuid:8f9ee0d0-d78e-4101-9ab4-f9cbceed2a2a}
#'
#' Kjell Konis (2013). safeBinaryRegression: Safe Binary Regression. R
#' package version 0.1-3.
#' \url{https://CRAN.R-project.org/package=safeBinaryRegression}
#'
#' @examples
#'
#' ## endometrial data from Heinze \& Schemper (2002) (see ?endometrial)
#' data("endometrial", package = "brglm2")
#' endometrial_sep <- glm(HG ~ NV + PI + EH, data = endometrial,
#' family = binomial("logit"),
#' method = "detect_separation")
#' endometrial_sep
#' ## The maximum likelihood estimate for NV is infinite
#' summary(update(endometrial_sep, method = "glm.fit"))
#'
#' \dontrun{
#' ## Example inspired by unpublished microeconometrics lecture notes by
#' ## Achim Zeileis https://eeecon.uibk.ac.at/~zeileis/
#' ## The maximum likelihood estimate of sourhernyes is infinite
#' data("MurderRates", package = "AER")
#' murder_sep <- glm(I(executions > 0) ~ time + income +
#' noncauc + lfp + southern, data = MurderRates,
#' family = binomial(), method = "detect_separation")
#' murder_sep
#' ## which is also evident by the large estimated standard error for NV
#' murder_glm <- update(murder_sep, method = "glm.fit")
#' summary(murder_glm)
#' ## and is also reveal by the divergence of the NV column of the
#' ## result from the more computationally intensive check
#' check_infinite_estimates(murder_glm)
#' ## Mean bias reduction via adjusted scores results in finite estimates
#' update(murder_glm, method = "brglm_fit")
#' }
#' @export
detect_separation <- function (x, y, weights = rep(1, nobs),
start = NULL, etastart = NULL, mustart = NULL,
offset = rep(0, nobs), family = gaussian(),
control = list(), intercept = TRUE, singular.ok = TRUE) {
if (family$family != "binomial") {
warning("detect_separation has been developed for use with binomial-response models")
}
control <- do.call("detect_separation_control", control)
## ensure x is a matrix
x <- as.matrix(x)
betas_names <- dimnames(x)[[2L]]
##
nobs <- NROW(y)
nvars <- ncol(x)
EMPTY <- nvars == 0
if (is.null(weights)) {
weights <- rep.int(1, nobs)
}
if (missingOffset <- is.null(offset)) {
offset <- rep.int(0, nobs)
}
## Initialize as prescribed in family
eval(family$initialize)
if (EMPTY) {
out <- list(separation = FALSE)
}
else {
## as in brglmFit
boundary <- converged <- FALSE
## Detect aliasing
qrx <- qr(x)
rank <- qrx$rank
is_full_rank <- rank == nvars
if (!singular.ok && !is_full_rank) {
stop("singular fit encountered")
}
if (!isTRUE(is_full_rank)) {
aliased <- qrx$pivot[seq.int(qrx$rank + 1, nvars)]
X_all <- x
x <- x[, -aliased]
nvars_all <- nvars
nvars <- ncol(x)
betas_names_all <- betas_names
betas_names <- betas_names[-aliased]
}
else {
nvars_all <- nvars
betas_names_all <- betas_names
}
betas_all <- structure(rep(NA_real_, nvars_all), .Names = betas_names_all)
## Observations with zero weight do not enter calculations so ignore
keep <- weights > 0
x <- x[keep, ]
y <- y[keep]
## Reshape data set: keep 0 and 1, and replace anything in (0,
## 1) with one zero and one 1
ones <- y == 1
zeros <- y == 0
non_boundary <- !(ones | zeros)
x <- x[c(which(ones), which(zeros), rep(which(non_boundary), 2)), ]
y <- c(y[ones], y[zeros], rep(c(0., 1.), each = sum(non_boundary)))
## Run linear program
out <- separator(x = x, y = y, linear_program = control$linear_program, purpose = control$purpose, beta_tolerance = control$beta_tolerance)
if (is.null(out$beta)) {
betas_all <- NULL
}
else {
betas <- out$beta
names(betas) <- betas_names
inds <- abs(betas) < control$beta_tolerance
betas <- Inf * betas
betas[inds] <- 0
betas_all[betas_names] <- betas
}
out <- list(x = x, y = y, betas = betas_all, separation = out$separation)
}
out$linear_program <- control$linear_program
out$purpose <- control$purpose
out$class <- "detect_separation"
class(out) <- "detect_separation_core"
return(out)
}
#' Auxiliary function for the \code{\link{glm}} interface when
#' \code{method} is \code{\link{detect_separation}}.
#'
#' Typically only used internally by \code{\link{detect_separation}}
#' but may be used to construct a \code{control} argument.
#'
#' @aliases detectSeparationControl
#' @param linear_program should \code{\link{detect_separation}} solve
#' the \code{"primal"} or \code{"dual"} linear program for
#' separation detection?
#' @param purpose should \code{\link{detect_separation}} simply
#' \code{"test"} for separation or also \code{"find"} which
#' parameters are infinite?
#' @param beta_tolerance maximum absolute variable value from the
#' linear program, before separation is declared
#'
#' @export
detect_separation_control <- function(linear_program = c("primal", "dual"),
purpose = c("find", "test"),
beta_tolerance = sqrt(.Machine$double.eps)) {
linear_program <- match.arg(linear_program)
purpose <- match.arg(purpose)
list(linear_program = linear_program, purpose = purpose, beta_tolerance = beta_tolerance)
}
#' @method print detect_separation
#' @export
print.detect_separation <- function(x, digits = max(5L, getOption("digits") - 3L), ...) {
cat("Separation:", x$separation, "\n")
if (!is.null(x$betas)) {
cat("Existence of maximum likelihood estimates\n")
print(x$betas)
cat("0: finite value, Inf: infinity, -Inf: -infinity\n")
}
}
print.detect_separation_core <- function(x, digits = max(5L, getOption("digits") - 3L), ...) {
cat("Separation:", x$separation, "\n")
if (!is.null(x$betas)) {
cat("Existence of maximum likelihood estimates\n")
print(x$betas)
cat("0: finite value, Inf: infinity, -Inf: -infinity\n")
}
}
|
4614c20d0b9561e09ef56d1b9e3b8354083c68a9
|
dcc8417dd0dc34901e04235725b9529c8c48a8f9
|
/plot_prod.R
|
e19fad7dab27447e49d9432a6b1e2cbbe4d3cbbd
|
[] |
no_license
|
kuanb/ExData_Plotting1
|
bf5fc6c235db124ccb01ba58483e6ff23d1f21fc
|
da30b0d61021d2547cfe037dc1e3af2fe76186fa
|
refs/heads/master
| 2021-01-21T22:05:34.580642
| 2014-09-08T14:53:05
| 2014-09-08T14:53:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 881
|
r
|
plot_prod.R
|
library(lubridate)
# Set working dir
setwd("/Users/kuanbutts/Documents/MIT/Summer2014/Coursera/Course 4/ExData_Plotting1")
# Read txt file in as a table
# OLD METHOD: baseDF <- read.table("household_power_consumption.txt", header= TRUE, sep = ";")
baseDF <- read.csv.sql( file='household_power_consumption.txt',
sep=";",
sql="select * from file where Date = '1/2/2007' or Date = '2/2/2007'",
header=TRUE)
# Make date col. as.Date
baseDF$Date <- as.Date(baseDF$Date, format="%d/%m/%Y")
# Could not figure out time without getting Date involved, except with lubridate package
baseDF$difDate <- ymd(baseDF$Date)
baseDF$difTime <- hms(baseDF$Time)
baseDF$difDT <- baseDF$difDate + baseDF$difTime
# Convert Global_active_power to numeric
baseDF$Global_active_power <- as.numeric(baseDF$Global_active_power)
|
2b485e4179902db0d59589250ca14b7dd4aee7dd
|
4e01acf5a07af95846300ed1016edf601fdbb6cc
|
/Rprogramming/assignment1/complete.R
|
249b0764945a81fe3f61cd4eff5c13cc788a276c
|
[] |
no_license
|
carolcoder/datasciencecoursera
|
5b5c8e9ca270ba961061c4ae4b5dcacfdcf1bab5
|
d80a4ac780506179ab1e25cf559256f2f9de4a31
|
refs/heads/master
| 2021-01-23T02:49:10.301308
| 2015-08-07T20:06:33
| 2015-08-07T20:06:33
| 30,250,558
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 720
|
r
|
complete.R
|
# complete.R - Assignment 2
# Set wd to C:/Users/sao-carolinamo/Documents/Carolina/R/coursera/Rprogramming/assignment1
complete <- function(directory, id=1:332) {
files <- list.files(directory, pattern="*.csv")
fullFilePaths = paste(getwd(), "/", directory, "/" , files, sep = "")
dfInFiles = lapply(fullFilePaths, read.csv)
aDf = data.frame()
for(df in dfInFiles) {
good <- complete.cases(df)
completeData <- df[good,]
aDf <- rbind(aDf, completeData)
}
dataInId = aDf[aDf$ID %in% id,]
countCompleteNbs = lapply(id, function(x) { nrow(dataInId[dataInId$ID == x,]) } )
dataFrameSummary = data.frame( id, nobs=unlist(countCompleteNbs)) #unlist converts list to vector
}
|
ee951ea085c3023b785e547154514c635d90dea7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mldr/examples/evmetrics-av.Rd.R
|
94400e825e2de0679179498aa0c1effe65e918a5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 737
|
r
|
evmetrics-av.Rd.R
|
library(mldr)
### Name: Averaged metrics
### Title: Multi-label averaged evaluation metrics
### Aliases: 'Averaged metrics' accuracy precision micro_precision
### macro_precision recall micro_recall macro_recall fmeasure
### micro_fmeasure macro_fmeasure
### ** Examples
true_labels <- matrix(c(
1,1,1,
0,0,0,
1,0,0,
1,1,1,
0,0,0,
1,0,0
), ncol = 3, byrow = TRUE)
predicted_labels <- matrix(c(
1,1,1,
0,0,0,
1,0,0,
1,1,0,
1,0,0,
0,1,0
), ncol = 3, byrow = TRUE)
precision(true_labels, predicted_labels, undefined_value = "diagnose")
macro_recall(true_labels, predicted_labels, undefined_value = 0)
macro_fmeasure(
true_labels, predicted_labels,
undefined_value = function(tp, fp, tn, fn) as.numeric(fp == 0 && fn == 0)
)
|
452c8995007924b620ba243a0a637ad6dd4a8eee
|
8c0abf72e19bd097be33e4451e960be968ae3005
|
/R/series3.R
|
3a5fe24f9a65be45d46ef402b883f28017348c16
|
[] |
no_license
|
cran/PBIBD
|
286aace68dda110f5b65616d7e396b3e62f1bc52
|
4358637800ab96f362a4d2d8bc97d3db6434f0d5
|
refs/heads/master
| 2020-09-23T18:23:53.758487
| 2017-12-21T13:23:29
| 2017-12-21T13:23:29
| 66,554,503
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,470
|
r
|
series3.R
|
series3 <-
function(n){
v<-5*n
b<-5*n
r<-2*(n+1)
k<-2*(n+1)
l<-c((n+2),(n+2),3,2,2*n)
B<-matrix(nrow=n,ncol=k)
M<-matrix(nrow=5,ncol=n)
C<-matrix(nrow=n,ncol=k)
for(i in 1:5){
for(j in 1:n){
M[i,j]<-i+5*(j-1)
}
}
for(i in 1:n){
for(j in 1:4){
B[i,j]<-M[j,i]
}
jj=5
kk<-n+4
for(p in 1:n)
{
if(i!=p){
B[i,jj]<-M[2,p]
B[i,kk]<-M[3,p]
jj<-jj+1
kk<-kk+1
}
}
}
print(M)
print(B)
cont<-1
cnt<-1
for(j in 1:(n-1)){
C[,4+cont]<-B[,4+j]
cont<-cont+2
C[,5+cnt]<-B[,n+3+j]
cnt<-cnt+2
}
cat("The Parameters of the design are:","\n")
cat("v = ",v,"b = ",b,"r = ",r,"k = ",k,"\n")
for(i in 1:5)
cat("lambda[",i,"] = ",l[i],"\t")
cat("\n")
cat("The developed blocks are:","\n")
for(i in 1:n)
{
matt<-matrix(nrow=5,ncol=k)
for(j in 1:5)
{
matt[j,]<-B[i,]+(j-1)
{
for(ii in 1:4)
{
diff<-0
if(matt[j,ii]>5*i)
{
diff<-matt[j,ii]-5*i
matt[j,ii]<-diff+(i-1)*5
}
}
if(j==1)
{
matt[j,5:k]<-C[i,5:k]
cat("(",matt[j,],")")
cat("\n")
}
if(j==2 || j==3)
{
matt[j,5:k]<-matt[(j-1),5:k]+1
cat("(",matt[j,],")")
cat("\n")
}
count<-1
cnt<-1
if(j==4)
{
for(ij in 1:(n-1)){
matt[j,4+count]<-matt[j-1,4+count]+1
count<-count+2
matt[j,5+cnt]<-matt[j-1,5+cnt]-4
cnt<-cnt+2
}
#print(matt[j,])
cat("(",matt[j,],")")
cat("\n")
}
if(j==5)
{
for(ij in 1:(n-1)){
matt[j,4+count]<-matt[j-1,4+count]-4
count<-count+2
matt[j,5+cnt]<-matt[j-1,5+cnt]+1
cnt<-cnt+2
}
cat("(",matt[j,],")")
cat("\n")
}
}
}
}
}
|
e43789ce9cc063af3288dd6f84b52aa3ba4486f4
|
b938190e775654f7c0af94b10bb1555c7fddd37e
|
/tests/testthat/test-findOverlaps-methods.R
|
818e52ae756e1682a07009752728d4a758a77a70
|
[] |
no_license
|
mcieslik-mctp/GenomicTuples
|
ea621ad66454d70f1f6536ec169a4cf662efc302
|
c29f4b2a199f7bd15968bc8f7a5fbdf99422687b
|
refs/heads/master
| 2021-01-18T00:16:22.854256
| 2014-09-03T15:30:08
| 2014-09-03T15:30:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,551
|
r
|
test-findOverlaps-methods.R
|
# NB: Several objects used in testing are defined in
# tests/testthat/helper-make-test-data.R
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### findOverlaps
###
context("GTuples findOverlaps method")
test_that("GTuples,GTuples overlaps", {
## empty
hits <- findOverlaps(gt0, gt0)
expect_true(inherits(hits, "Hits"))
expect_equal(length(hits), 0)
## identical 1
hits <- findOverlaps(gt1, gt1)
expect_true(inherits(hits, "Hits"))
expect_equal(length(hits), 10)
expect_equal(hits@queryHits, 1:10)
expect_equal(hits@queryHits, hits@subjectHits)
## identical 2
tmp2 <- GTuples(seqnames = c("chr1", "chr1"),
tuples = matrix(c(1L,2L,3L,4L), nrow=2),
strand = "+")
hits = findOverlaps(tmp2, tmp2, type="equal")
expect_equal(length(hits), 2)
expect_equal(hits@queryHits, 1:2)
hits = findOverlaps(tmp2, tmp2, type="any")
expect_equal(length(hits), 4)
expect_equal(hits@queryHits, c(1,1,2,2))
expect_equal(hits@subjectHits, c(1,2,1,2))
## identical 3
tmp3 <- GTuples(seqnames = c("chr1", "chr1"),
tuples = matrix(c(1L,2L,3L,4L,5L,6L), nrow=2),
strand = "+")
hits = findOverlaps(tmp3, tmp3, type="equal")
expect_equal(length(hits), 2)
expect_equal(hits@queryHits, 1:2)
hits = findOverlaps(tmp3, tmp3, type="any")
expect_equal(length(hits), 4)
expect_equal(hits@queryHits, c(1,1,2,2))
expect_equal(hits@subjectHits, c(1,2,1,2))
})
|
7081c7f6d977573ec1e366d65f40bdadeee072f4
|
2259405ed20f5876d5f4cc14796f5e482a416617
|
/sequence.R
|
e6cb3771df2771f9f4b8ebc6d7420d495361504c
|
[] |
no_license
|
pkcodesat/E-commerce-product-recommendation-system-using-rule-and-sequence-mining
|
ed135e916ee2489fcf7218b29416d4584f8c1ae9
|
9ad7e67002bb644fb012d1076a9ee4456dfc5075
|
refs/heads/master
| 2022-02-17T06:23:47.283707
| 2018-12-12T15:40:13
| 2018-12-12T15:40:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 584
|
r
|
sequence.R
|
##########################################
# simple demo of building sequence rules
##########################################
library(arulesSequences)
data <- read_baskets(con = "C:/Users/91979/Desktop/web_analytics/a.txt", info = c("sequenceID","eventID","SIZE"))
as(head(data), "data.frame") # view first few rows of the data
seqs <- cspade(data, parameter = list(support = 0.1), control = list(verbose = TRUE))
as(seqs,"data.frame") # view the sequences
rules <- ruleInduction(seqs, confidence = 0.5,control = list(verbose = TRUE))
as(rules,"data.frame") # view the rules
|
9d5e74ac0b4dcadfb32d546f777314a27c062346
|
4caeaa501d9497ddddbf033904e019974e0e1b7e
|
/assets/3.among_clade_variation.R
|
32702c6e77115d5e4a97cd11d52cf6ac4a82c90f
|
[] |
no_license
|
macroevolution/workshop-OSU
|
b545e8612cb8d0185ec78e711c5e23482be04b09
|
a43b40a6868a4a2730f86eac1ea9470763d1517a
|
refs/heads/master
| 2021-01-11T14:01:22.563366
| 2017-09-05T16:59:19
| 2017-09-05T16:59:19
| 94,931,335
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,184
|
r
|
3.among_clade_variation.R
|
source("supporting/diversification_functions1.R")
source("supporting/traitDependent_functions.R")
source("supporting/simulate_shift_trees.R")
#----------------------------------------------
# Exercise 5: Chance!
# How much variation in species richness can you get from
# the same diversification process?
lambda <- 0.2
mu <- 0
max.t <- 25
REPS <- 1000
taxon_count <- numeric(REPS)
pars <- c(lambda = lambda, mu = mu)
simulateTree(pars = pars , max.t = max.t )
for (i in 1:REPS){
cat(i, '\n')
tree <- simulateTree(c(0.2, 0), max.t=25)
taxon_count[i] <- length(tree$tip.label)
}
#hist: plots a histogram
hist(taxon_count, breaks=100)
mean(taxon_count)
#
#----------------------------------------------
# Exercise 5b: Colless index ** Bonus exercise! **
# Implement function to compute colless index
# Apply it to a real dataset
# Assess significance by simulation
# Here we define the colless imbalance statistic
colless <- function(x){
#N <- length(x$tip.label)
nn <- balance(x)
cstat <- sum(abs(nn[,1] - nn[,2]))
#return(2 * cstat / ((N - 1) * (N - 2)))
return(cstat)
}
skinks <- read.tree("data/skinks/skinks216.tre")
# this is a tree with 216 tips and is >95% complete at
# the species level
# Now to assess significance:
# Simulate trees under constant-rate model
# compute colless for each
# store value
# This procedure gives us a null distribution that
# we can compare to the observed
source("supporting/diversification_functions1.R")
simulateTree(c(1,0), max.taxa = 216)
null_colless <- rep(NA, 1000)
for (ii in 1:1000){
cat(ii, "\n")
tree <- simulateTree(c(1,0), max.taxa = 216)
null_colless[ii] <- colless(tree)
}
# Plot the null distribution
hist(null_colless, breaks=50)
# Visualize: how imbalanced is the skink tree
# relative to the simulations under constant-rate model?
obs <- colless(skinks)
lines(x=c(obs, obs), y = c(0, 100), lwd=3, col="red")
# and the pvalue, two-tailed:
2* sum(null_colless > obs) / (1 + length(null_colless))
#----------------------------------------------
# Exercise 6: Tip-specific rates!
rm(list = ls())
source("traitDependent_functions.R")
skinks <- read.tree("data/skinks/skinks216.tre")
t_rates <- getEqualSplitsSpeciation(skinks)
# make a function that interpolates a set of colors
fx <- colorRampPalette(c("blue", "red"))
colset <- fx(100)
#plot(x = 1:100, y = 1:100, pch = 19, col = colset)
colvec <- colorFunction(t_rates, min = quantile(t_rates, 0.25), maxx = quantile(t_rates, 0.75), colset)
plot(skinks, type = "fan", show.tip.label=F)
tiplabels(pch=21, bg=colvec, cex=1.5)
#----------------------------------------------
# Exercise 7: simulate shift trees and compute tip-specific rates
source("simulate_shift_trees.R")
library(BAMMtools)
# these are exponential distributions with means of 0.15 and 0.05
lamfx <- function() return(rexp(1, 1/0.15))
mufx <- function() return(rexp(1, 1/0.05))
# rate at which events occur along the phylogeny
trate <- 0.006
tt <- simulateShiftTree(35.5, trate, lamfx, mufx, seed=8)
ed <- getEventData(phy = tt$phy, eventdata = tt$events)
z <- plot.bammdata(ed, lwd=2, breaksmethod = "linear")
addBAMMlegend(z)
true_lambda <- getTipRates(ed)$lambda.avg
rates <- getEqualSplitsSpeciation(tt$phy)
plot( rates ~ true_lambda, xlim=c(0,1), ylim=c(0,1))
abline(0,1)
cor.test(rates, true_lambda, method = "spear")
#----------------------------------------------
# Exercise 8: simulate
# a batch of shift trees and compute tip-specific rates
# and check correlation with true rates!
REPS <- 50
cormat <- matrix(NA, nrow = REPS, ncol = 3)
seedvec <- 1:REPS + 100
for (i in 1:REPS){
cat(i, "\n")
tt <- simulateShiftTree(35.5, trate, lamfx, mufx, seed=seedvec[i])
if (!is.na(tt$phy)[1]){
ed <- getEventData(phy = tt$phy, eventdata = tt$events)
true_lambda <- getTipRates(ed)$lambda.avg
rates <- getEqualSplitsSpeciation(as.phylo(ed))
cc <- cor.test(true_lambda, rates )
cormat[i, 1] <- cc$estimate
tx <- sort(table(ed$tipStates), decreasing=T)
cormat[i,2] <- length(ed$tip.label)
cormat[i,3] <- tx[2]
}
}
|
6fbb26adb6cea1c803c2625776808a576397668d
|
7f514dd5d3c23c5a8e08e19f25d4126344a4d75b
|
/ARIMA time series.R
|
e95759c3defe51067165f8cc4a5bebcb58bd4356
|
[] |
no_license
|
Yujieisme/R-code
|
831ab28d0b3d0a64da4fcf036762328db4d951ac
|
7ea86972e4d8e5fd947cf6e71fa6ac13ea5566f9
|
refs/heads/master
| 2020-08-28T04:26:12.429246
| 2019-10-25T21:12:49
| 2019-10-25T21:12:49
| 217,589,353
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,968
|
r
|
ARIMA time series.R
|
#plot the data
install.packages("astsa")
install.packages("forecast")
install.packages("TSA")
library(astsa)
library(forecast)
library(TSA)
set.seed(1000)
data1 <- read.table(file.choose())
ts_data1 <- ts(data1)
plot(ts_data1)
#do fitted line
summary(fit <- lm(ts_data1~time(ts_data1)))
plot(ts_data1)
abline(fit)
#do variance stabilization
BoxCox.lambda(ts_data1)
plot(log(ts_data1))
#estimate the trend
fit = lm(log(ts_data1)~time(ts_data1))
beta1 = fit$coefficients[1]
beta2 = fit$coefficients[2]
mu = beta1 + beta2*time(ts_data1)
plot(log(ts_data1))
lines(mu,lty=2)
legend("bottomright","estimated trend",lty=2)
x=resid(fit)
x = ts(x, frequency=4, 1960)
plot(x, type='l',main="Detrended data1")
Dlogts_data1 = diff(log(ts_data1))
plot(Dlogts_data1,main="Differenced data1")
par ( mfrow =c(3 ,1) , mar=c(3 ,3 ,1 ,1) , mgp=c (1.6 ,.6 ,0) )
acf(as.numeric(log(ts_data1)),main="ACF of data1")
acf(as.numeric(x),main="ACF of data1")
acf(as.numeric(Dlogts_data1),main="ACF of differenced data1")
#do smoothing
par(mfrow=c(1,1))
ma5 = stats::filter(ts_data1, sides=2, rep(1,5)/5)
ma53 = stats::filter(ts_data1, sides=2, rep(1,53)/53)
plot(ts_data1, type="p")
lines(ma5,col="red"); lines(ma53,col="blue")
legend("topright",c("MA5","MA53"),lty=1,col=c("red", "blue"))
#build AR(2) process
library(astsa)
library(forecast)
library(TSA)
polyroot(c(1,-1.3,0.4))
ARMAacf(ar=c(1.3, -0.4),lag.max=5)
acf(ar2_01)
ARMAacf(ar=c(1.3, -0.4),lag.max=5,pacf=T)
pacf(ar2_01)
polyroot(c(1,-0.8,0.5))
ARMAacf(ar=c(0.8, -0.5),lag.max=5)
acf(ar2_02)
ARMAacf(ar=c(0.8, -0.5),lag.max=5,pacf=T)
pacf(ar2_02)
polyroot(c(1,1.6,0.64))
ARMAacf(ar=c(-1.6, -0.64),lag.max=5)
acf(ar2_03)
ARMAacf(ar=c(-1.6, -0.64),lag.max=5,pacf=T)
pacf(ar2_03)
#for the AR(2) model given by Xt = -0.9Xt-2 +wt
polyroot(c(1,0,0.9))
ARMAacf(ar=c(0,-0.9),lag.max=4)
ar2_302 <- arima.sim(n=200,list(ar=c(0,-0.9)))
acf(ar2_302)
pacf(ar2_302)
|
f8fe39b8694f68abf843becf0f8bce2afac44de0
|
d5e4d8cc13151bf546727528ccf6849e2b43dc80
|
/Chapter 7/iCDA.Ch7.AccidentData.R
|
cec0a7fa226e9b4abb32541479c22d59157d54c6
|
[] |
no_license
|
jason2133/categorical_data_analysis
|
d369aeaee5b72cbfeb7da036ce7e413790d9c308
|
2e8bcf90aff80634fcd9ed30674c13f205058396
|
refs/heads/master
| 2022-01-19T11:43:36.241734
| 2022-01-15T19:02:47
| 2022-01-15T19:02:47
| 175,217,150
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,221
|
r
|
iCDA.Ch7.AccidentData.R
|
############
# Chapter 7
############
###Automobile Accidents and Seat Belts
Accidents <- read.table("http://www.stat.ufl.edu/~aa/cat/data/Accidents2.dat", header=TRUE)
Accidents # 16 cell counts in the contingency table
G <- Accidents$gender; L <- Accidents$location
S <- Accidents$seatbelt; I <- Accidents$injury
fit <- glm(count ~ G*L*S + G*I + L*I + S*I, family=poisson, data=Accidents)
summary(fit) # e.g. G*I represents G + I + G:I
deviance(fit) #G2
fitted(fit) #expected freq
sum(abs(Accidents$count - fitted(fit)))/(2*sum(Accidents$count)) #Dissimilarity index
fit2 <- glm(count ~ G*L+G*S+L*S+G*I+L*I+S*I,family=poisson, data=Accidents)
summary(fit2)
deviance(fit2) #G2
fitted(fit2) #expected freq
sum(abs(Accidents$count - fitted(fit2)))/(2*sum(Accidents$count)) #Dissimilarity index
#Logit model, treating Injury as a response and the others as predictor variables
Injury <- read.table("http://www.stat.ufl.edu/~aa/cat/data/Injury_binom.dat", header=TRUE) # Injury_binom data file at text website
Injury
G <- Injury$gender; L <- Injury$location; S <- Injury$seatbelt
fit2 <- glm(yes/(no+yes) ~ G + L + S, family=binomial, weights=no+yes, data=Injury)
summary(fit2)
|
37ab095fae2a0ac5bf022981576506458fd1e353
|
dfeba897e6b4e506a7acf4170f22ad44940db83d
|
/statistical power simulation 2/power_simulation_effect_size.R
|
9425342a694244f0b56aa596dfb4fcacf4ce0c64
|
[] |
no_license
|
yadevi/statistical-power-simulation-study
|
ad5e27d0f5459e466eebf1dd6b435610f8e214a4
|
0d6980196f13a4d0e9300f0bd261b2e56e4e5f15
|
refs/heads/master
| 2022-11-15T22:30:45.815410
| 2020-07-15T07:45:47
| 2020-07-15T07:45:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 964
|
r
|
power_simulation_effect_size.R
|
library(ggplot2)
source("utilities_effect_size1.R")
power <- c()
e_pos <- seq(0,0.1,0.001)
for (effectsize in e_pos ) {
N_sim <- 1000
sig_count <- 0
e_cont <- 0.1
e_neg <- 0.1
for (sim in 1:N_sim) {
result <- single_simulation(N = 150, e_cont, effectsize, e_neg)
sig_count <- sig_count + result
}
power<- append(power, sig_count/N_sim)
}
source("utilities_effect_size2.R")
effect_total <- c()
e_cont <- 0.1
e_pos <- seq(0,0.1,0.001)
e_neg <- 0.1
for (effectsize in e_pos ) {
result <- single_simulation(N = 9999, e_cont, effectsize, e_neg)
effect_total <- append(effect_total, result)
}
#create dataframe
powerdf<- data.frame (effect_total,power)
#create plot
ggplot (powerdf, aes( x = effect_total, y = power)) + geom_point (size=1) + labs ( x = "Effect Size", y = "Statistical Power") + theme_classic() + scale_y_continuous(breaks = seq(0,1,0.2)) + scale_x_continuous(breaks= seq(0,0.16,0.02))
|
46cc8224a56e5f126de4aa92986bde3df7e44d61
|
44c12bf5db12471edba464b652f9b2133a38e80e
|
/R/pseudoBulkDGE.R
|
f05ffd313e150ec93a76638627e28617770b45e5
|
[] |
no_license
|
MarioniLab/scran
|
af4d01246208a12d40fc01b4d7d49df6a5f59b9f
|
f238890d5642dfb8062cf0254e0257fd28c5f28d
|
refs/heads/master
| 2023-08-10T08:58:35.499754
| 2023-08-04T23:19:40
| 2023-08-04T23:30:29
| 100,610,090
| 43
| 31
| null | 2023-04-09T15:14:31
| 2017-08-17T14:06:03
|
R
|
UTF-8
|
R
| false
| false
| 15,769
|
r
|
pseudoBulkDGE.R
|
#' Quickly perform pseudo-bulk DE analyses
#'
#' A wrapper function around \pkg{edgeR}'s quasi-likelihood methods
#' to conveniently perform differential expression analyses on pseudo-bulk profiles,
#' allowing detection of cell type-specific changes between conditions in replicated studies.
#'
#' @param x A numeric matrix of counts where rows are genes and columns are pseudo-bulk profiles.
#' Alternatively, a SummarizedExperiment object containing such a matrix in its assays.
#' @param col.data A data.frame or \linkS4class{DataFrame} containing metadata for each column of \code{x}.
#' @param label A vector of factor of length equal to \code{ncol(x)},
#' specifying the cluster or cell type assignment for each column of \code{x}.
#' @param design A formula to be used to construct a design matrix from variables in \code{col.data}.
#' Alternatively, a function that accepts a data.frame with the same fields as \code{col.data} and returns a design matrix.
#' @param condition A vector or factor of length equal to \code{ncol(x)},
#' specifying the experimental condition for each column of \code{x}.
#' Only used for abundance-based filtering of genes.
#' @param coef String or character vector containing the coefficients to drop from the design matrix to form the null hypothesis.
#' Can also be an integer scalar or vector specifying the indices of the relevant columns.
#' @param contrast Numeric vector or matrix containing the contrast of interest.
#' Alternatively, a character vector to be passed to \code{\link{makeContrasts}} to create this numeric vector/matrix.
#' If specified, this takes precedence over \code{coef}.
#' @param lfc Numeric scalar specifying the log-fold change threshold to use in \code{\link{glmTreat}} or \code{\link{treat}}.
#' @param assay.type String or integer scalar specifying the assay to use from \code{x}.
#' @param include.intermediates Logical scalar indicating whether the intermediate \pkg{edgeR} objects should be returned.
#' @param row.data A \linkS4class{DataFrame} containing additional row metadata for each gene in \code{x},
#' to be included in each of the output DataFrames.
#' This should have the same number and order of rows as \code{x}.
#' @param ... For the generic, additional arguments to pass to individual methods.
#'
#' For the SummarizedExperiment method, additional arguments to pass to the ANY method.
#' @param method String specifying the DE analysis framework to use.
#' @param robust Logical scalar indicating whether robust empirical Bayes shrinkage should be performed.
#' @param qualities Logical scalar indicating whether quality weighting should be used when \code{method="voom"},
#' see \code{\link{voomWithQualityWeights}} for more details.
#' @param sorted Logical scalar indicating whether the output tables should be sorted by p-value.
#' @param sample Deprecated.
#'
#' @return
#' A \linkS4class{List} with one \linkS4class{DataFrame} of DE results per unique (non-failed) level of \code{cluster}.
#' This contains columns from \code{\link{topTags}} if \code{method="edgeR"} or \code{\link{topTable}} if \code{method="voom"}.
#' All DataFrames have row names equal to \code{rownames(x)}.
#'
#' The \code{\link{metadata}} of the List contains \code{failed},
#' a character vector with the names of the labels for which the comparison could not be performed - see Details.
#'
#' The \code{\link{metadata}} of the individual DataFrames contains \code{design}, the final design matrix for that label.
#' If \code{include.intermediates}, the \code{\link{metadata}} will also contain
#' \code{y}, the DGEList used for the analysis; and \code{fit}, the DGEGLM object after GLM fitting.
#'
#' @details
#' In replicated multi-condition scRNA-seq experiments,
#' we often have clusters comprised of cells from different samples of different experimental conditions.
#' It is often desirable to check for differential expression between conditions within each cluster,
#' allowing us to identify cell-type-specific responses to the experimental perturbation.
#'
#' Given a set of pseudo-bulk profiles (usually generated by \code{\link{sumCountsAcrossCells}}),
#' this function loops over the labels and uses \pkg{edgeR} or \code{\link{voom}} to detect DE genes between conditions.
#' The DE analysis for each label is largely the same as a standard analysis for bulk RNA-seq data,
#' using \code{design} and \code{coef} or \code{contrast} as described in the \pkg{edgeR} or \pkg{limma} user guides.
#' Generally speaking, \pkg{edgeR} handles low counts better via its count-based model
#' but \code{method="voom"} supports variable sample precision when \code{quality=TRUE}.
#'
#' Performing pseudo-bulk DGE enables us to reuse well-tested methods developed for bulk RNA-seq data analysis.
#' Each pseudo-bulk profile can be treated as an \emph{in silico} mimicry of a real bulk RNA-seq sample
#' (though in practice, it tends to be much more variable due to the lower numbers of cells).
#' This also models the relevant variability between experimental replicates (i.e., across samples)
#' rather than that between cells in the same sample, without resorting to expensive mixed-effects models.
#'
#' The DE analysis for each label is independent of that for any other label.
#' This aims to minimize problems due to differences in abundance and variance between labels,
#' at the cost of losing the ability to share information across labels.
#'
#' In some cases, it will be impossible to perform a DE analysis for a label.
#' The most obvious reason is if there are no residual degrees of freedom;
#' other explanations include impossible contrasts or a failure to construct an appropriate design matrix
#' (e.g., if a cell type only exists in one condition).
#'
#' Note that we assume that \code{x} has already been filtered to remove unstable pseudo-bulk profiles generated from few cells.
#'
#' @section Comments on abundance filtering:
#' For each label, abundance filtering is performed using \code{\link{filterByExpr}} prior to further analysis.
#' Genes that are filtered out will still show up in the DataFrame for that label, but with all statistics set to \code{NA}.
#' As this is done separately for each label, a different set of genes may be filtered out for each label,
#' which is largely to be expected if there is any label-specific expression.
#'
#' By default, the minimum group size for \code{filterByExpr} is determined using the design matrix.
#' However, this may not be optimal if the design matrix contains additional terms (e.g., blocking factors)
#' in which case it is not easy to determine the minimum size of the groups relevant to the comparison of interest.
#' To overcome this, users can specify \code{condition.field} to specify the group to which each sample belongs,
#' which is used by \code{filterByExpr} to obtain a more appropriate minimum group size.
#'
#' @author Aaron Lun
#'
#' @references
#' Tung P-Y et al. (2017).
#' Batch effects and the effective design of single-cell gene expression studies.
#' \emph{Sci. Rep.} 7, 39921
#'
#' Lun ATL and Marioni JC (2017).
#' Overcoming confounding plate effects in differential expression analyses of single-cell RNA-seq data.
#' \emph{Biostatistics} 18, 451-464
#'
#' Crowell HL et al. (2019).
#' On the discovery of population-specific state transitions from multi-sample multi-condition single-cell RNA sequencing data.
#' \emph{biorXiv}
#'
#' @examples
#' set.seed(10000)
#' library(scuttle)
#' sce <- mockSCE(ncells=1000)
#' sce$samples <- gl(8, 125) # Pretending we have 8 samples.
#'
#' # Making up some clusters.
#' sce <- logNormCounts(sce)
#' clusters <- kmeans(t(logcounts(sce)), centers=3)$cluster
#'
#' # Creating a set of pseudo-bulk profiles:
#' info <- DataFrame(sample=sce$samples, cluster=clusters)
#' pseudo <- sumCountsAcrossCells(sce, info)
#'
#' # Making up an experimental design for our 8 samples.
#' pseudo$DRUG <- gl(2,4)[pseudo$sample]
#'
#' # DGE analysis:
#' out <- pseudoBulkDGE(pseudo,
#' label=pseudo$cluster,
#' condition=pseudo$DRUG,
#' design=~DRUG,
#' coef="DRUG2"
#' )
#' out[[1]]
#' metadata(out[[1]])$design
#' @seealso
#' \code{\link{sumCountsAcrossCells}}, to easily generate the pseudo-bulk count matrix.
#'
#' \code{\link{decideTestsPerLabel}}, to generate a summary of the DE results across all labels.
#'
#' \code{\link{pseudoBulkSpecific}}, to look for label-specific DE genes.
#'
#' \code{pbDS} from the \pkg{muscat} package, which uses a similar approach.
#' @name pseudoBulkDGE
NULL
.pseudo_bulk_master <- function(x, col.data, label, design, coef, contrast=NULL,
condition=NULL, lfc=0, include.intermediates=TRUE, row.data=NULL, sorted=FALSE,
method=c("edgeR", "voom"), qualities=TRUE, robust=TRUE, sample=NULL)
{
if (!is.null(sample)) {
.Deprecated(msg="'sample=' is deprecated and will be ignored")
}
if (is.matrix(design)) {
.Defunct(msg="matrix 'design=' is defunct, use a formula or function instead")
}
.pseudo_bulk_dge(x=x, col.data=col.data, label=label, condition=condition,
design=design, coef=coef, contrast=contrast, lfc=lfc, row.data=row.data,
sorted=sorted, include.intermediates=include.intermediates,
method=match.arg(method), qualities=qualities, robust=robust)
}
#' @importFrom edgeR DGEList
#' @importFrom S4Vectors DataFrame SimpleList metadata metadata<-
.pseudo_bulk_dge <- function(x, col.data, label, design, coef, contrast=NULL,
condition=NULL, lfc=0, null.lfc.list=NULL, row.data=NULL, sorted=FALSE, include.intermediates=FALSE,
method=c("edgeR", "voom"), qualities=TRUE, robust=TRUE)
{
de.results <- list()
failed <- character(0)
label <- as.character(label)
method <- match.arg(method)
# Avoid requiring 'coef' if 'contrast' is specified.
if (!is.null(contrast)) {
coef <- NULL
}
for (i in sort(unique(label))) {
chosen <- i==label
curx <- x[,chosen,drop=FALSE]
curdata <- col.data[chosen,,drop=FALSE]
y <- DGEList(curx, samples=as.data.frame(curdata))
curcond <- condition[chosen]
curdesign <- try({
if (is.function(design)) {
design(curdata)
} else {
model.matrix(design, data=curdata)
}
}, silent=TRUE)
if (is(curdesign, "try-error")) {
failed <- c(failed, i)
next
} else {
args <- list(y, row.names=rownames(x), curdesign=curdesign, curcond=curcond,
coef=coef, contrast=contrast, lfc=lfc, null.lfc=null.lfc.list[[i]],
robust=robust, include.intermediates=include.intermediates)
if (method=="edgeR") {
stuff <- do.call(.pseudo_bulk_edgeR, args)
pval.field <- "PValue"
} else {
stuff <- do.call(.pseudo_bulk_voom, c(args, list(qualities=qualities)))
pval.field <- "p.value"
}
if (is.null(stuff)) {
failed <- c(failed, i)
next
}
}
if (!is.null(row.data)) {
# Adding stuff[,0] to preserve the DF's metadata and row names.
stuff <- cbind(stuff[,0], row.data, stuff)
}
if (sorted) {
o <- order(stuff[,pval.field])
stuff <- stuff[o,,drop=FALSE]
}
de.results[[i]] <- stuff
}
output <- SimpleList(de.results)
metadata(output)$failed <- failed
output
}
#' @importFrom S4Vectors DataFrame metadata metadata<-
#' @importFrom edgeR estimateDisp glmQLFit glmQLFTest getOffset scaleOffset
#' calcNormFactors filterByExpr topTags glmLRT glmFit glmTreat
#' @importFrom limma makeContrasts
.pseudo_bulk_edgeR <- function(y, row.names, curdesign, curcond, coef, contrast,
lfc, null.lfc, include.intermediates, robust=TRUE)
{
ngenes <- nrow(y)
gkeep <- filterByExpr(y, design=curdesign, group=curcond)
y <- y[gkeep,]
y <- calcNormFactors(y)
rank <- qr(curdesign)$rank
if (rank == nrow(curdesign) || rank < ncol(curdesign)) {
return(NULL)
}
if (is.character(contrast)) {
contrast <- makeContrasts(contrasts=contrast, levels=curdesign)
}
lfc.out <- .compute_offsets_by_lfc(design=curdesign, coef=coef,
contrast=contrast, filtered=gkeep, null.lfc=null.lfc)
if (!is.null(lfc.out)) {
offsets <- t(t(lfc.out)/log2(exp(1)) + getOffset(y))
y <- scaleOffset(y, offsets)
}
y <- estimateDisp(y, curdesign)
fit <- glmQLFit(y, curdesign, robust=robust)
if (lfc==0) {
res <- glmQLFTest(fit, coef=coef, contrast=contrast)
} else {
res <- glmTreat(fit, lfc=lfc, coef=coef, contrast=contrast)
}
tab <- topTags(res, n=Inf, sort.by="none")
expander <- match(seq_len(ngenes), which(gkeep))
tab <- DataFrame(tab$table[expander,,drop=FALSE])
rownames(tab) <- row.names
metadata(tab)$design <- curdesign
if (include.intermediates) {
metadata(tab)$y <- y
metadata(tab)$fit <- fit
}
tab
}
#' @importFrom limma contrastAsCoef
.compute_offsets_by_lfc <- function(design, coef, contrast, filtered, null.lfc) {
if (is.null(null.lfc) || all(null.lfc==0)) {
return(NULL)
}
if (!is.null(contrast)) {
out <- contrastAsCoef(design, contrast)
design <- out$design
coef <- out$coef
}
stopifnot(length(coef)==1)
null.lfc <- rep(null.lfc, length.out=length(filtered))
null.lfc <- null.lfc[filtered]
outer(null.lfc, design[,coef])
}
#' @importFrom S4Vectors DataFrame metadata metadata<-
#' @importFrom edgeR calcNormFactors filterByExpr
#' @importFrom limma voom voomWithQualityWeights lmFit
#' contrasts.fit eBayes treat topTable makeContrasts
.pseudo_bulk_voom <- function(y, row.names, curdesign, curcond, coef, contrast,
lfc, null.lfc, include.intermediates, qualities=TRUE, robust=TRUE)
{
ngenes <- nrow(y)
gkeep <- filterByExpr(y, design=curdesign, group=curcond)
y <- y[gkeep,]
y <- calcNormFactors(y)
rank <- qr(curdesign)$rank
if (rank == nrow(curdesign) || rank < ncol(curdesign)) {
return(NULL)
}
if (qualities) {
v <- voomWithQualityWeights(y, curdesign)
} else {
v <- voom(y, curdesign)
}
if (is.character(contrast)) {
contrast <- makeContrasts(contrasts=contrast, levels=curdesign)
}
lfc.out <- .compute_offsets_by_lfc(design=curdesign, coef=coef,
contrast=contrast, filtered=gkeep, null.lfc=null.lfc)
if (!is.null(lfc.out)) {
v$E <- v$E - lfc.out
}
fit <- lmFit(v)
if (!is.null(contrast)) {
fit <- contrasts.fit(fit, contrast)
coef <- 1
}
if (lfc==0) {
res <- eBayes(fit, robust=robust)
} else {
res <- treat(fit, lfc=lfc, robust=robust)
}
tab <- topTable(res, coef=coef, number=Inf, sort.by="none")
expander <- match(seq_len(ngenes), which(gkeep))
tab <- DataFrame(tab[expander,,drop=FALSE])
rownames(tab) <- row.names
metadata(tab)$design <- curdesign
if (include.intermediates) {
metadata(tab)$y <- y
metadata(tab)$v <- v
metadata(tab)$fit <- fit
}
tab
}
#' @export
#' @rdname pseudoBulkDGE
setGeneric("pseudoBulkDGE", function(x, ...) standardGeneric("pseudoBulkDGE"))
#' @export
#' @rdname pseudoBulkDGE
setMethod("pseudoBulkDGE", "ANY", .pseudo_bulk_master)
#' @export
#' @rdname pseudoBulkDGE
#' @importFrom SummarizedExperiment assay colData
setMethod("pseudoBulkDGE", "SummarizedExperiment", function(x, col.data=colData(x), ..., assay.type=1) {
.pseudo_bulk_master(assay(x, assay.type), col.data=col.data, ...)
})
|
f831b82f43e67bd7c528177e3b8bad0def448db5
|
549e1b6041559223051cb0baedc56f2650ad0eb6
|
/Web-приложения/продажи_зерна/global.R
|
0745557e3d6ad64417c72f2184c6d2be3dd148d6
|
[] |
no_license
|
nickolas-black/R
|
170e09d976c0268863c52b90ab032620199db12f
|
31cdbe2403747a30b66faecc8aa0139b4cb9ee43
|
refs/heads/main
| 2023-09-03T19:24:59.328415
| 2021-10-31T19:43:38
| 2021-10-31T19:43:38
| 418,191,655
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,183
|
r
|
global.R
|
library(shiny)
library(shinydashboard)
library(tidyverse)
library(readxl)
library(plotly)
library(DT)
library(rhandsontable)
library(shinyalert)
positions <- read_excel("D:/данные/разработка/R/приложения/web_приложение/les8/Grain_data.xlsx", sheet = "Positions")
costs <- read_excel("D:/данные/разработка/R/приложения/web_приложение/les8/Grain_data.xlsx", sheet = "SiloCosts")
prices <- positions %>% group_by(Commodity) %>% summarise(Price_for_MT = mean(Price_for_MT))
# Функция для более простого отображения всплывающего диалога с информацией
infoDialog <- function (title = "Информация", text = "Текст информации", callback = function(x) { message(x) }) {
shinyalert(
title = title,
text = text,
closeOnEsc = TRUE,
closeOnClickOutside = FALSE,
html = FALSE,
type = "info",
showConfirmButton = TRUE,
showCancelButton = FALSE,
confirmButtonText = "OK",
confirmButtonCol = "#AEDEF4",
timer = 0,
imageUrl = "",
animation = TRUE
)
}
|
a34614f204a079a33740fe1ef7bfd9f56f6c8ff6
|
cd62802bd89f36565e2e53c924a06590bc2c50ff
|
/RBAsicComm.R
|
135bd09c9af605b5659b47be456a5150db9e197d
|
[] |
no_license
|
gogiraj/MUIT
|
5f194aac22eba85e14808bf50af0372a735b41fd
|
35b60adfdd07ef99ebb491fd61c67c94e4ff77b1
|
refs/heads/master
| 2021-01-24T09:18:19.639511
| 2016-09-30T10:03:52
| 2016-09-30T10:03:52
| 69,446,095
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,147
|
r
|
RBAsicComm.R
|
a = c(1,2,3,4)
a
nrows = c(2)
nrows
x = c(1,2,3)
x
x = c(1,2,3,4)
x
a <- c(1:20)
a
a[3]
b = c("a","b","C","d")
b
b[4]
d <- c(TRUE,FALSE,T,F,FALSE)
d
d[3]
## Basic
e <- c(1:6, rnum=2,rcol=3)
e
e <- matrix(1:6, rnum=2,rcol=3)
e
e <- matrix(c(1:6), rnum=2,rcol=3)
e
e <- matrix(c(1,2,3,4,5,6), rnum=2,rcol=3)
e
e <- matrix(c(1,2,3,4,5,6), nrow=2, ncol=3)
e
f <- matrix(c(1:6), nrow=2, ncol=3)
f
e[2,3]
f[,2]
# dimnames(e) = list(
# c("row1", "row2"), # row names
# c("col1", "col2", "col3")) # column names
dimnames(e) = list( c("row1", "row2"), c("col1", "col2", "col3"))
e
library <<- c(1,2,3)
col1 <- c("Dhiraj","Gagan","Nakul","Preety")
col2 <- c(10,15,20,25)
col3 = c("M","M","M","F")
mydata = data.frame(col1,col2,col3)
mydata
mydata
dim01=c("A1","A2")
dim02=c("B1","B2","B3")
dim03=c("C1","C2","C3","C4")
myarray = array(1:24, c(2,3,4), dimnames = list(dim01,dim02,dim03)
)
myarray
a+b
a+b
a-b
a+b
a+b
a
a <- 1
a
b <- 2
b
a-b
a+b
y = c(2,4,6)
z = c(4,6,8)
y+z
y-z
y*z
fx = matrix(c(6:12),nrow=2,ncol=3)
fx
fx = matrix(c(7:12),nrow=2,ncol=3)
fx
f <- matrix(c(1:6), nrow=2, ncol=3)
f
f+fx
f-fx
f*fx
li <- list(c(1,2,3),c("a","b"),c(TRUE,F,T))
li
mydata
Name <- c("Dhiraj","Gagan","Nakul","Preety")
Marks <- c(10,15,20,25)
Gender = c("M","M","M","F")
mydata = data.frame(Name,Marks,Gender)
mydata
Name <- c("Dhiraj","Gagan","Nakul","Preety")
Marks <- c(50,45,40,25)
Gender = c("M","M","M","F")
mydata = data.frame(Name,Marks,Gender)
mydata
x <- rnorm(5)
x
rnorm(5) -> opp
opp
# You can also assign a value opposite but its not recommended
rnorm(5) -> opp
opp
age <- c(1,3,5,2,11,9,3,9,12,3)
weight <- c(4.4,5.3,7.2,5.2,8.5,7.3,6.0,10.4,10.2,6.1)
# to take the mean
mean(weight)
age
sd(weight)
# Co-realtion
cor(age,weight)
plot(age,weight)
q()
sd(weight)
mydata1 = data.frame(age,weight)
mydata1
demo(graphics)
demo(Hershey)
plot(age,weight)
age <- c(1,3,5,2,11,9,3,9,12,3)
weight <- c(4.4,5.3,7.2,5.2,8.5,7.3,6.0,10.4,10.2,6.1)
plot(age,weight)
cor(age,weight)
getwd() ##List the current working directory.
setwd("mydirectory") ## Change the current working directory to mydirectory.
ls() ##List the objects in the current workspace.
rm(objectlist) ##Remove (delete) one or more objects.
help(options) ##Learn about available options.
options() ##View or set current options.
history(#) ##Display your last # commands (default = 25).
savehistory("myfile") ##Save the commands history to myfile ( default .Rhistory).
loadhistory("myfile") ##Reload a command's history (default = .Rhistory).
save.image("myfile") #Save the workspace to myfile (default = .RData).
save(objectlist,file="myfile") ## Save specific objects to a file.
load("myfile") ##Load a workspace into the current session (default=.RData).
patientID <- c(1, 2, 3, 4)
age <- c(25, 34, 28, 52)
diabetes <- c("Type1", "Type2", "Type1", "Type1")
status <- c("Poor", "Improved", "Excellent", "Poor")
diabetes <- factor(diabetes)
status <- factor(status, order=TRUE)
patientdata <- data.frame(patientID, age, diabetes, status)
dim(patientdata)
names(patientdata)
str(patientdata)
## Statistics Calculation
## Example 1
e1 <- c(200,208,190,210,320,120,180)
e1
mad(e1) ## Median Absolute Deviation
range(e1)
rang1 <- max(e1)-min(e1)
rang1
cvrange = (max(e1)-min(e1))/(max(e1) + min(e1)) ## Coefficient of range
cvrange
##example 2 . Creating a frequency table from data faithful
faithful
names(faithful)
head(faithful)
duration = faithful$eruptions
duration
range(duration)
seq(1.5,5.5,by=0.5)
breaks = seq(1.5,5.5,by=0.5)
durations.cut=cut(duration,breaks,right=FALSE)
duration.freq = table(durations.cut)
duration.freq
cbind(duration.freq)
plot(duration.freq)
hist(duration.freq)
pie(duration.freq)
boxplot(duration,names = "Test001")
summary(duration)
round(runif(20,1,10)) ## Random number genaration
## Example 3
e3 <- c(200,210,208,160,220,250,300)
e3
quantile(e3)
quantile(e3,c(.32,.57,.98))
quantile(e3,c(.25,.75))
IQR(e3)
)
## This is to Calculate co efficient and IQR manualy taking Q1 and Q2
Q1 = quantile(e3,c(.25))
Q3 = quantile(e3,c(.75))
Q3 - Q1 ## This is to Calculate IQR manualy
Q3-Q1/Q3+Q1 ## to calculate Coefficient
## example 4
e4x <- c(10,11,12,13,14)
ef4 <- c(3,12,18,12,3)
e4 <- data.frame(e4x,ef4)
cummax(e4)
e4x <- c(rep(10,3),rep(11,12)) ## this is repeate the number
e4x
e4a <- c(10,10,10,11,11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,13,13,13,13,13,14,14,14)
table(e4a)
quantile(e4a,c(.25,.75))
IQR(e4a)
Q1 <- quantile(e4a,c(.25))
Q3 <- quantile(e4a,c(.75))
Q1
Q3
SemiQD = (Q3-Q1)/2
SemiQD
CoelQD = (Q3-Q1)/(Q3+Q1)
CoelQD
## Example 8
e8 <- c(100,150,80,90,160,200,140)
e8
fivenum(e8)
summary(e8)
mean(e8)
median(e8)
mad(e8) # Mean deviation about mean
sd(e8)
sdval <- sd(e8)
sdval
# To calculate Varience
Varience <- sdval * sdval
Varience
## Install package lsr to run mean devaition about median
# Mean deviation about Median
aad(e8)
|
0d7fc8e154417055db841bd04f12ce83dcc70ba7
|
8ba38d86bedfa5dd21e029d734716db892ff5e91
|
/Introduction to Monte Carlo Methods/Metropolis-Hastings Algorithm.r
|
2e05bb342750dc9c0a37df5ff253b028227d77a1
|
[] |
no_license
|
spradh/Financial-Computing-and-Analytics
|
6f0bf03bb5b7d4564d2b69420eaf1bb8380ca9de
|
050d35826a06dfa131535000fd71cf4a756b0e46
|
refs/heads/master
| 2020-07-06T08:52:46.046844
| 2017-05-19T18:19:25
| 2017-05-19T18:19:25
| 80,750,162
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,173
|
r
|
Metropolis-Hastings Algorithm.r
|
#############################
#############################
#Exercise 6.2
x=c(rnorm(1))
for(t in 1:10^4){
x[t+1]=x[t]+rnorm(1)
}
hist(x,breaks=50,probability =TRUE)
lines(seq(-10,10,length.out=10000),dnorm(seq(-10,10,length.out=10000),
0,1),ty='l')
x=c(rnorm(1))
for(t in 1:10^6){
x[t+1]=x[t]+rnorm(1)
}
hist(x,breaks=50,probability =TRUE)
lines(seq(-10,10,length.out=10000),dnorm(seq(-10,10,length.out=10000),
0,1),ty='l')
#############################
#############################
#Exercise 6.4
#c. Metropolis{Hastings algorithm to generate 5000 G(4.85, 1) random variables.
a=4; b=4/4.85
Nsim=5000
X=rep(rgamma(1,shape=a,rate=b),Nsim)
for (i in 2:Nsim){
Y=rgamma(1,shape=a,rate=b)
rho=dgamma(Y,shape=4.85,rate=1)/dgamma(X[i-1],shape=4.85,rate=1)
X[i]=X[i-1]+(Y-X[i-1])*(runif(1)<rho)
}
hist(X, breaks=100,probability = TRUE,main='Metropolis-Hastings')
lines(seq(0,20,length.out=1000),f(seq(0,20,length.out=1000)),col=2,lwd=2)
# d. Compare the algorithms using (i) their acceptance rates and (ii) the estimates
# of the mean and variance of the G(4:85; 1) along with their errors.
# (Hint:Examine the correlation in both samples.)
#Accept Reject
#(i)
length(x)/length(y)
#(ii)
mean(x)
var(x)
acf(x,main='Accept-Reject')#less coorelation
#Metropolis Hastings
#(i)
a=c()
for(i in 1:Nsim-1){
a=c(a, X[i]!=X[i+1])
}
sum(a)/5000
#(ii)
mean(X)
var(X)
acf(X,main='Metropolis-Hastings')#more coorelation
par(mfrow=c(1,1))
plot(4700:Nsim,X[4700:Nsim],ty='l')
#############################
#############################
#Exercise 6.10
#a
Nsim=5000
X=rep(rnorm(1),Nsim)
for (i in 2:Nsim){
Y=rnorm(1)
rho=dt(Y,df=4)/dt(X[i-1],df=4)
X[i]=X[i-1]+(Y-X[i-1])*(runif(1)<rho)
}
hist(X, breaks=100,probability = TRUE)
lines(seq(-3,3,length.out=1000),dt(seq(-3,3,length.out=1000),df=4),col=2,lwd=2)
#b
Nsim=5000
X=rep(rt(1,df=2),Nsim)
for (i in 2:Nsim){
Y=rt(1,df=2)
rho=dt(Y,df=4)/dt(X[i-1],df=4)
X[i]=X[i-1]+(Y-X[i-1])*(runif(1)<rho)
}
hist(X, breaks=100,probability = TRUE)
lines(seq(-3,3,length.out=1000),dt(seq(-3,3,length.out=1000),df=4),col=2,lwd=2)
|
a8d1c1146d4941081fff2812f1be4dd72db36e2c
|
9753d94f00a9db2bb5dea5a711bf3a976fa19fb7
|
/8. Machine Learning/17. Matrices.R
|
5baab38da58e5a8f2a094e01cf418f57ae5f9a38
|
[] |
no_license
|
praveen556/R_Practice
|
a3131068011685fd1a945bf75758297a993357a7
|
0fc21f69c0027b16e0bce17ad074eeaa182170bf
|
refs/heads/master
| 2023-02-18T09:11:49.982980
| 2021-01-17T16:07:28
| 2021-01-17T16:07:28
| 293,621,569
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 171
|
r
|
17. Matrices.R
|
library(tidyverse)
library(dslabs)
if(!exists("mnist")) mnist <- read_mnist()
class(mnist$train$images)
x <- mnist$train$images[1:1000,]
y <- mnist$train$labels[1:1000]
|
72356aff73901449be1a5bdf954e59cca0547b93
|
a01984c90baa149120fe852ea44888a23d9f6007
|
/R/updateStrata.r
|
3d69d7d23411e9e73de4f1e5ee467ee1a5f364a8
|
[] |
no_license
|
cran/SamplingStrata
|
b724c3b41d35582f96d9b67afbd907211ce973ea
|
9b1b6084fd9c9f55313ccfbf558e6e834579c80d
|
refs/heads/master
| 2022-11-24T04:14:05.098505
| 2022-11-15T20:50:06
| 2022-11-15T20:50:06
| 17,693,638
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,970
|
r
|
updateStrata.r
|
# ----------------------------------------------------
# Function for assigning new labels to initial strata
# and to report the structure of resulting
# aggregated strata
# Author: Giulio Barcaroli
# Date: 4 January 2012
# ----------------------------------------------------
updateStrata <- function (strata, solution, writeFiles = FALSE)
{
# if (writeFiles == TRUE) {
# dire <- getwd()
# direnew <- paste(dire,"/output",sep="")
# if(!dir.exists(direnew)) dir.create(direnew)
# setwd(direnew)
# }
colnames(strata) <- toupper(colnames(strata))
newstrata <- strata
newstrata$AGGR_STRATUM <- solution[[1]]
ndom <- length(levels(as.factor(strata$DOM1)))
nvarX <- length(grep("X", names(strata)))
matstrata <- NULL
stmt <- "matstrata <- as.data.frame(cbind(newstrata$DOM1,newstrata$AGGR_STRATUM,"
stmt2 <- "colnames(matstrata) <- c('DOM1','AGGR_STRATUM',"
stmt3 <- NULL
if (nvarX > 1) {
for (i in 1:(nvarX - 1)) {
stmt <- paste(stmt, "newstrata$X", i, ",", sep = "")
stmt2 <- paste(stmt2, "'X", i, "',", sep = "")
stmt3 <- paste(stmt3, "matstrata$X", i, ",", sep = "")
}
stmt <- paste(stmt, "newstrata$X", nvarX, "), stringsAsFactors = TRUE)", sep = "")
eval(parse(text = stmt))
stmt2 <- paste(stmt2, "'X", nvarX, "')", sep = "")
eval(parse(text = stmt2))
stmt3 <- paste(stmt3, "matstrata$X", nvarX, sep = "")
statement <- paste("matstrord <- matstrata[order(matstrata$DOM1,matstrata$AGGR_STRATUM,",
stmt3, "),]", sep = "")
eval(parse(text = statement))
}
if (nvarX == 1) {
matstrata <- as.data.frame(cbind(newstrata$DOM1,newstrata$AGGR_STRATUM,newstrata$X1),stringsAsFactors = TRUE)
colnames(matstrata) <- c('DOM1','AGGR_STRATUM','X1')
matstrord <- matstrata[order(matstrata$DOM1, matstrata$AGGR_STRATUM,
matstrata$X1), ]
}
if (nvarX == 1)
newstrata$STRATUM <- newstrata$X1
if (nvarX > 1) {
stmt <- NULL
stmt <- "newstrata$STRATUM <- paste("
for (i in 1:(nvarX - 1)) {
if (i > 0)
stmt <- paste(stmt, "newstrata$X", i, ",", sep = "")
}
stmt <- paste(stmt, "newstrata$X", nvarX, ",sep='*')",
sep = "")
eval(parse(text = stmt))
}
colnames(newstrata)[ncol(newstrata) - 1] <- c("LABEL")
colnames(newstrata) <- toupper(colnames(newstrata))
if (writeFiles == TRUE)
write.table(newstrata, file = "newstrata.txt", sep = "\t",
row.names = FALSE, col.names = TRUE, quote = FALSE)
if (writeFiles == TRUE)
write.table(matstrord, file = "strata_aggregation.txt",
sep = "\t", row.names = FALSE, col.names = TRUE,
quote = FALSE)
# if (writeFiles == TRUE) {
# setwd(dire)
# }
return(newstrata)
}
|
225741f14679803a796a783dab32bcbfcb7024b4
|
3db305c9b6f9f791d2668f88e9f42c0cbfbaf4cf
|
/argosTrack/R/simTrack.R
|
169bc80f0b331bde8299c76ac3b42b52f655abbc
|
[] |
no_license
|
calbertsen/argosTrack
|
4789f170f0b53cf2afa83195c55d57c25d3bd591
|
d09d54082bcf03c555f3553ff444bb5dc2246b34
|
refs/heads/master
| 2022-09-02T05:37:29.760935
| 2020-11-25T12:59:55
| 2020-11-25T12:59:55
| 24,145,844
| 10
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,120
|
r
|
simTrack.R
|
##' Generic function to simulate tracks
##'
##' @param object Object to simulate from
##' @param n Number of replications
##' @param ... other parameters
##' @author Christoffer Moesgaard Albertsen
##' @seealso \code{\link{simTrack,Animal-method}}, \code{\link{simTrack,Measurement-method}}, \code{\link{simTrack,Movement-method}}
#' @export
setGeneric("simTrack",
function(object,n, ...)
standardGeneric("simTrack")
)
##' Simulate from a movement model
##'
##' @param object Movement reference class object implementing the model to simulate from
##' @param n Number of replications
##' @param x0 Initial values
##' @param ... not used
##' @return A 2 x number of time steps in object x n array of simulated values
##' @author Christoffer Moesgaard Albertsen
##' @seealso \code{\link{simTrack}}, \code{\link{simTrack,Animal-method}}, \code{\link{simTrack,Measurement-method}}
setMethod("simTrack", "Movement",
function(object,
n = 1,
x0 = object$mu[,1],
...){
X <- replicate(n,object$simulate(x0 = x0))
return(X)
}
)
##' Simulate from a measurement model with observation information
##'
##' @param object Measurement reference class object to simulate from
##' @param n Number of replications
##' @param observation Observation reference class object with time points and Argos location class information to use in the simulation
##' @param ... Not used
##' @return A 2 x number of time steps in observation x n array of simulated values
##' @author Christoffer Moesgaard Albertsen
##' @seealso \code{\link{simTrack}}, \code{\link{simTrack,Animal-method}}, \code{\link{simTrack,Movement-method}}
setMethod("simTrack", c("Measurement"),
function(object,
n = 1,
observation,
...){
if(!class(observation) == "Observation")
stop("Observation must be an Observation object.")
X <- replicate(n,object$simulate(observation))
return(X)
}
)
##' Simulate from an Animal state-space model
##'
##' @param object Animal reference class describing the state-space model to simulate from
##' @param n Number of replications
##' @param newObject Should a new Animal object be added to the returned matrix?
##' @param ... Not used
##' @return A (2 + newObject) x n matrix where the first row (X) contains lists with a matrix of simulated movement tracks, the second row (Y) contains lists with a matrix of simulated observations, and the third row (Animal - if present) contains lists with a new Animal object based on the simulated values.
##' @author Christoffer Moesgaard Albertsen
##' @seealso \code{\link{simTrack}}, \code{\link{simTrack,Measurement-method}}, \code{\link{simTrack,Movement-method}}
setMethod("simTrack", "Animal",
function(object,
n = 1,
newObject = TRUE,
...){
X <- replicate(n,object$simulate(newObject))
return(X)
}
)
|
c8c7633f9816d14204c902e05f4bc0bf7d174e78
|
ffd09e41f3309a0ac3eaf2adca34674e0920d1b8
|
/man/VennThemes.Rd
|
5a5b04e13fbe28121650232e16a24d03c1214b6b
|
[] |
no_license
|
leipzig/Vennerable
|
04b3329c21623176730232f08ffdd79f4515cdc2
|
43b1d2b4f84f85f9578337b92311bc7c35da7952
|
refs/heads/master
| 2021-01-17T23:15:48.982954
| 2015-12-15T17:22:06
| 2015-12-15T17:22:06
| 51,934,923
| 1
| 0
| null | 2016-02-17T16:00:07
| 2016-02-17T16:00:06
| null |
UTF-8
|
R
| false
| false
| 2,917
|
rd
|
VennThemes.Rd
|
\name{VennThemes}
\Rdversion{1.1}
\alias{VennThemes}
\title{
Create lists of graphical parameters for Venn diagrams
}
\description{
Given a \code{VennDrawing} object, which it consults to find the names of each of the sets and faces
in the drawing, returns a list suitable as the \code{gp} argument in a subsequent call
to the \code{VennDrawing} method for \code{plot}.
}
\usage{
VennThemes(drawing, colourAlgorithm, increasingLineWidth)
}
\arguments{
\item{drawing}{
An object of class \code{VennDrawing}
}
\item{colourAlgorithm}{
Missing or one of \code{signature},\code{binary},\code{sequential}.
}
\item{increasingLineWidth}{
Logical, defaul \code{FALSE}
}
}
\details{
Set boundary colours are taken from the \code{Set1} palette provided by the \code{\link[RColorBrewer]{RColorBrewer}} package.
If \code{colourAlgorithm="signature"}, face fill colours are taken frome the \code{RColorBrewer} \code{YlOrRed}
palette based on the number of sets represented in the face, so eg all the faces corresponding to membership of a single set are pale yellow while the face corresponding to all the intersections is dark red.
If \code{colourAlgorithm="binary"}, faces are blue if they correspond to an odd number of intersections and white otherwise.
If \code{colourAlorithm="sequential"}, each face is given a different colour from the
\code{RColorBrewer} \code{Set3} palette, although this is repeated if necessary if there are more faces than the length of this palette (9).
Different faces with the same signature will be given the same colour.
If not specified, \code{sequential} is used if there are less than 9 faces, otherwise
\code{signature}.
If \code{increasingLineWidth=TRUE}, each Set is given a different linewidth, with the last to be plotted given the thinnest width, to help in
visualising nonsimple Venn diagrams.
}
\value{
A list with four elements
\item{Face }{Named list of graphical parameters which will be applied to faces with corresponding names }
\item{FaceText }{Named list of graphical parameters which will be applied to annotation in faces with corresponding names }
\item{Set }{Named list of graphical parameters which will be applied to sets with corresponding names }
\item{FaceText }{Named list of graphical parameters which will be applied to annotation in sets with corresponding names }
These are graphical parameters in the sense of the \code{grid} package.
}
\author{
Jonathan Swinton (jonathan@swintons.net)
}
\seealso{
See also \code{\link[RColorBrewer]{RColorBrewer}}
}
\examples{
# change the name of one set to red text and enlarge the other
C2 <- compute.Venn(Venn(n=2))
gp <- VennThemes(C2)
gp[["SetText"]][["Set1"]]$col <- "red";
gp[["SetText"]][["Set2"]]$cex <- 2;
plot(C2,gp=gp)
# use highlevel arguments
gp <- VennThemes(C2,colourAlgorithm="binary")
plot(C2,gp=gp)
gp <- VennThemes(C2,increasingLineWidth=TRUE)
plot(C2,gp=gp)
}
\keyword{ graphs}%
|
a57ffe003e62191761e73ea2e2ed0589620f9249
|
6770136ac69d0d12635b21bf3f9344a99fc0fb7e
|
/R/RandomForest/LNKN.R
|
c839b2c3e8cbbd731b98221c1ab7b3e0e1190940
|
[] |
no_license
|
christopherohit/Machine_Learning_From_Basic
|
7750513155aff0ccf9d5e770d463942074b40d50
|
b9549ea04328a9db48166e2f921850e747962461
|
refs/heads/main
| 2023-05-05T07:13:00.871139
| 2021-05-24T16:01:02
| 2021-05-24T16:01:02
| 368,719,556
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 727
|
r
|
LNKN.R
|
library(tidyverse)
require(foreign)
df = read.csv("https://archive.ics.uci.edu/ml/machine-learning-databases/00277/ThoraricSurgery.arff")
name(df) = c("Chuẩn Đoán", "Dung_tích_thở",
"Thê_tích_thở"
,"Zuborg", "Chỉ số đau", "Ho ra Máu", "Khó thở", "Ho", "Cơ thể yếu"
,"Giai đoạn khối u", "Phân suất tống máu", "Nhồi máu cơ tim",
"bệnh động mạch ngoại vi", "Sử dụng thuốc lá", "Bệnh hen xuyễn"
,"Tuổi tác", "Sự sống còn")
#Set Label
df$Survival = df$Survival%>%recode_factor(.,'F' = "Die" , 'T' = "Survived")
df$Benh_Phoi = df$Thê_tích_thở/df$Dung_tích_thở
library(caret)
set.seed(123)
|
a306fe20f8448f0a8982f08c2d9625f8c39d2f0b
|
e699a2e61465544979109ae9660386d5157172cf
|
/ui.R
|
0fd54aec67ba80a93dd306b62145190000a25080
|
[] |
no_license
|
everxjj/myShinyProgram
|
5ab86dbb08f57817470fed80310536dcd1093387
|
1a59c74ee6730d2ea9f01375391fedba1807ba94
|
refs/heads/master
| 2021-01-10T12:08:42.503105
| 2016-04-02T12:17:26
| 2016-04-02T12:17:26
| 55,292,694
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 307
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("myApp1-iris data"),
sidebarLayout(
sidebarPanel(
sliderInput("row","Row number of data to view",min = 1,max=150,value=5)
),
mainPanel(
tableOutput("mytable"),
plotOutput("irisPlot")
)
)
))
|
be280b3f78f51102ea2dc6db5ec6be0eb7747002
|
8fb0510fa0c561c80d90b97feff4223a371b7959
|
/R/extract-vclMatrix.R
|
04661e8e017e44367c1a07af3f6ef6e898c557cf
|
[] |
no_license
|
cran/gpuR
|
081c395da159797082a8521fb565edaaa3fe6088
|
189286b0cdbd898ffd1de13f4c749232a1d17953
|
refs/heads/master
| 2021-05-04T11:23:09.158681
| 2019-05-29T20:10:12
| 2019-05-29T20:10:12
| 48,081,147
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,278
|
r
|
extract-vclMatrix.R
|
#' @rdname extract-methods
#' @export
setMethod("[",
signature(x = "vclMatrix", i = "missing", j = "missing", drop = "missing"),
function(x, i, j, drop) {
Rmat <- switch(typeof(x),
"integer" = VCLtoMatSEXP(x@address, 4L),
"float" = VCLtoMatSEXP(x@address, 6L),
"double" = VCLtoMatSEXP(x@address, 8L),
"fcomplex" = VCLtoMatSEXP(x@address, 10L),
"dcomplex" = VCLtoMatSEXP(x@address, 12L),
stop("unsupported matrix type")
)
return(Rmat)
})
#' @rdname extract-methods
#' @export
setMethod("[",
signature(x = "vclMatrix", i = "missing", j = "numeric", drop="missing"),
function(x, i, j, drop) {
type <- switch(typeof(x),
"integer" = 4L,
"float" = 6L,
"double" = 8L,
stop("type not recognized")
)
if(length(j) > 1){
out <- matrix(nrow= nrow(x), ncol = length(j))
for(c in seq_along(j)){
out[,c] <- vclGetCol(x@address, j[c], type, x@.context_index - 1)
}
return(out)
}else{
return(vclGetCol(x@address, j, type, x@.context_index - 1))
}
})
#' @rdname extract-methods
#' @export
setMethod("[",
signature(x = "vclMatrix", i = "numeric", j = "missing", drop="missing"),
function(x, i, j, ..., drop) {
if(tail(i, 1) > length(x)){
stop("Index out of bounds")
}
type <- switch(typeof(x),
"integer" = 4L,
"float" = 6L,
"double" = 8L,
stop("type not recognized")
)
if(nargs() == 3){
if(length(i) > 1){
out <- matrix(nrow = length(i), ncol = ncol(x))
for(r in seq_along(i)){
out[r,] <- vclGetRow(x@address, i[r], type, x@.context_index - 1)
}
return(out)
}else{
return(vclGetRow(x@address, i, type, x@.context_index - 1))
}
}else{
output <- vector(ifelse(type == 4L, "integer", "numeric"), length(i))
nr <- nrow(x)
col_idx <- 1
for(elem in seq_along(i)){
if(i[elem] > nr){
tmp <- ceiling(i[elem]/nr)
if(tmp != col_idx){
col_idx <- tmp
}
row_idx <- i[elem] - (nr * (col_idx - 1))
}else{
row_idx <- i[elem]
}
output[elem] <- vclGetElement(x@address, row_idx, col_idx, type)
}
return(output)
}
# Rmat <- switch(typeof(x),
# "integer" = vclGetRow(x@address, i, 4L, x@.context_index - 1),
# "float" = vclGetRow(x@address, i, 6L, x@.context_index - 1),
# "double" = vclGetRow(x@address, i, 8L, x@.context_index - 1),
# stop("unsupported matrix type")
# )
# return(Rmat)
})
#' @rdname extract-methods
#' @export
setMethod("[",
signature(x = "vclMatrix", i = "numeric", j = "numeric", drop="missing"),
function(x, i, j, drop) {
type <- switch(typeof(x),
"integer" = 4L,
"float" = 6L,
"double" = 8L,
stop("type not recognized")
)
if(length(i) > 1 || length(j) > 1){
out <- matrix(nrow = length(i), ncol = length(j))
for(r in seq_along(i)){
for(c in seq_along(j)){
out[r,c] <- vclGetElement(x@address, i[r], j[c], type)
}
}
return(out)
}else{
return(vclGetElement(x@address, i, j, type))
}
})
#' @rdname extract-methods
#' @export
setMethod("[<-",
signature(x = "vclMatrix", i = "missing", j = "numeric", value = "numeric"),
function(x, i, j, value) {
if(j > ncol(x)){
stop("column index exceeds number of columns")
}
if(length(value) > 1){
if(length(value) != nrow(x)){
stop("number of items to replace is not a multiple of replacement length")
}
switch(typeof(x),
"float" = vclSetCol(x@address, j, value, 6L),
"double" = vclSetCol(x@address, j, value, 8L),
stop("unsupported matrix type")
)
}else{
switch(typeof(x),
"float" = vclFillCol(x@address, j, value, x@.context_index, 6L),
"double" = vclFillCol(x@address, j, value, x@.context_index, 8L),
stop("unsupported matrix type")
)
}
return(x)
})
#' @rdname extract-methods
#' @export
setMethod("[<-",
signature(x = "ivclMatrix", i = "missing", j = "numeric", value = "integer"),
function(x, i, j, value) {
if(length(value) != nrow(x)){
stop("number of items to replace is not a multiple of replacement length")
}
if(j > ncol(x)){
stop("column index exceeds number of columns")
}
switch(typeof(x),
"integer" = vclSetCol(x@address, j, value, 4L),
stop("unsupported matrix type")
)
return(x)
})
#' @rdname extract-methods
#' @export
setMethod("[<-",
signature(x = "vclMatrix", i = "numeric", j = "missing", value = "numeric"),
function(x, i, j, ..., value) {
assert_all_are_in_closed_range(i, lower = 1, upper = nrow(x))
type <- switch(typeof(x),
"integer" = 4L,
"float" = 6L,
"double" = 8L,
stop("type not recognized")
)
# print(nargs())
if(nargs() == 4){
if(length(value) != ncol(x)){
stop("number of items to replace is not a multiple of replacement length")
}
vclSetRow(x@address, i, value, type)
}else{
if(length(value) != length(i)){
if(length(value) == 1){
value <- rep(value, length(i))
}else{
stop("number of items to replace is not a multiple of replacement length")
}
}
nr <- nrow(x)
col_idx <- 1
for(elem in seq_along(i)){
if(i[elem] > nr){
tmp <- ceiling(i[elem]/nr)
if(tmp != col_idx){
col_idx <- tmp
}
row_idx <- i[elem] - (nr * (col_idx - 1))
}else{
row_idx <- i[elem]
}
# print(row_idx)
# print(col_idx)
vclSetElement(x@address, row_idx, col_idx, value[elem], type)
}
}
# if(length(value) != ncol(x)){
# stop("number of items to replace is not a multiple of replacement length")
# }
#
# if(i > nrow(x)){
# stop("row index exceeds number of rows")
# }
#
# switch(typeof(x),
# "float" = vclSetRow(x@address, i, value, 6L),
# "double" = vclSetRow(x@address, i, value, 8L),
# stop("unsupported matrix type")
# )
return(x)
})
#' @rdname extract-methods
#' @export
setMethod("[<-",
signature(x = "ivclMatrix", i = "numeric", j = "missing", value = "integer"),
function(x, i, j, value) {
if(length(value) != ncol(x)){
stop("number of items to replace is not a multiple of replacement length")
}
if(i > nrow(x)){
stop("row index exceeds number of rows")
}
switch(typeof(x),
"integer" = vclSetRow(x@address, i, value, 4L),
stop("unsupported matrix type")
)
return(x)
})
#' @rdname extract-methods
#' @export
setMethod("[<-",
signature(x = "vclMatrix", i = "numeric", j = "numeric", value = "numeric"),
function(x, i, j, value) {
assert_all_are_in_closed_range(i, lower = 1, upper=nrow(x))
assert_all_are_in_closed_range(j, lower = 1, upper=ncol(x))
assert_is_scalar(value)
switch(typeof(x),
"float" = vclSetElement(x@address, i, j, value, 6L),
"double" = vclSetElement(x@address, i, j, value, 8L),
stop("unsupported matrix type")
)
return(x)
})
#' @rdname extract-methods
#' @export
setMethod("[<-",
signature(x = "ivclMatrix", i = "numeric", j = "numeric", value = "integer"),
function(x, i, j, value) {
assert_all_are_in_closed_range(i, lower = 1, upper=nrow(x))
assert_all_are_in_closed_range(j, lower = 1, upper=ncol(x))
assert_is_scalar(value)
switch(typeof(x),
"integer" = vclSetElement(x@address, i, j, value, 4L),
stop("unsupported matrix type")
)
return(x)
})
#' @rdname extract-methods
#' @export
setMethod("[<-",
signature(x = "vclMatrix", i = "missing", j = "missing", value = "matrix"),
function(x, i, j, value) {
assert_is_matrix(value)
switch(typeof(x),
"integer" = vclSetMatrix(x@address, value, 4L, x@.context_index - 1),
"float" = vclSetMatrix(x@address, value, 6L, x@.context_index - 1),
"double" = vclSetMatrix(x@address, value, 8L, x@.context_index - 1),
stop("unsupported matrix type")
)
return(x)
})
#' @rdname extract-methods
#' @export
setMethod("[<-",
signature(x = "vclMatrix", i = "missing", j = "missing", value = "vclMatrix"),
function(x, i, j, value) {
switch(typeof(x),
"integer" = vclSetVCLMatrix(x@address, value@address, 4L, x@.context_index - 1),
"float" = vclSetVCLMatrix(x@address, value@address, 6L, x@.context_index - 1),
"double" = vclSetVCLMatrix(x@address, value@address, 8L, x@.context_index - 1),
stop("unsupported matrix type")
)
return(x)
})
#' @rdname extract-methods
#' @export
setMethod("[<-",
signature(x = "vclMatrix", i = "missing", j = "numeric", value = "vclMatrix"),
function(x, i, j, value) {
start = head(j, 1) - 1
end = tail(j, 1)
switch(typeof(x),
"integer" = vclMatSetVCLCols(x@address, value@address, start, end, 4L, x@.context_index - 1),
"float" = vclMatSetVCLCols(x@address, value@address, start, end, 6L, x@.context_index - 1),
"double" = vclMatSetVCLCols(x@address, value@address, start, end, 8L, x@.context_index - 1),
stop("unsupported matrix type")
)
return(x)
})
#' @rdname extract-methods
#' @export
setMethod("[<-",
signature(x = "vclMatrix", i = "missing", j = "missing", value = "numeric"),
function(x, i, j, value) {
assert_is_scalar(value)
switch(typeof(x),
"integer" = vclFillVCLMatrix(x@address, value, 4L, x@.context_index - 1),
"float" = vclFillVCLMatrix(x@address, value, 6L, x@.context_index - 1),
"double" = vclFillVCLMatrix(x@address, value, 8L, x@.context_index - 1),
stop("unsupported matrix type")
)
return(x)
})
#' @rdname extract-methods
#' @export
setMethod("[<-",
signature(x = "vclMatrix", i = "missing", j = "missing", value = "vclVector"),
function(x, i, j, value) {
switch(typeof(x),
"integer" = assignVectorToMat(x@address, value@address, 4L),
"float" = assignVectorToMat(x@address, value@address, 6L),
"double" = assignVectorToMat(x@address, value@address, 8L),
stop("unsupported matrix type")
)
return(x)
})
#' @rdname extract-methods
#' @export
setMethod("[<-",
signature(x = "vclMatrix", i = "missing", j = "numeric", value = "vclVector"),
function(x, i, j, value) {
switch(typeof(x),
"integer" = assignVectorToCol(x@address, value@address, j-1, 4L),
"float" = assignVectorToCol(x@address, value@address, j-1, 6L),
"double" = assignVectorToCol(x@address, value@address, j-1, 8L),
stop("unsupported matrix type")
)
return(x)
})
|
e3e70854a90744b360888eaab10c8fea1bdac007
|
d915d4e95357a49ebc32392c87f652b408e46e91
|
/Figure6/Figure6_v3.R
|
f73775e2354b215e81b1e3bfa85cd8ac602ffb81
|
[] |
no_license
|
DEST-bio/data-paper
|
ae1ef142ea9f29a1d03db986d5cb3dd6f7026142
|
919da1cb9e95f9fa1f0d613774ac2cb97a316155
|
refs/heads/main
| 2023-08-20T15:55:23.947717
| 2021-10-12T07:45:43
| 2021-10-12T07:45:43
| 325,566,988
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,770
|
r
|
Figure6_v3.R
|
# plot Figure8 - Margot
library(ggplot2)
library("gridExtra")
dat<-read.table(file="/Users/martinkapun/Documents/GitHub/data-paper/Figure6/Figure6_data.txt",header=T,fill =T,sep="\t")
dat_PoolSNP<-dat[dat$FILE=="PoolSNP",]
dat_SNAPE<-dat[dat$FILE=="SNAPE",]
# PoolSNP plots
plot1<-ggplot(dat_PoolSNP, aes(x=Continent, y=Pi)) + geom_boxplot(, show.legend = FALSE) + labs(y = "Nucleotide diversity", tag = "A") + theme_bw() + theme(axis.title.x=element_blank()) + facet_grid(~ FILE) + scale_x_discrete(limits=c("Africa","North America","Europe")) + scale_y_continuous(labels=function(x){sprintf("%.3f", x)},limits=c(0.0037, 0.007),breaks=seq(0.004, 0.007, 0.001))
plot2<-ggplot(dat_PoolSNP, aes(x=Continent, y=Watterson)) + geom_boxplot(, show.legend = FALSE) + labs(y = "Theta Watterson", tag = "C") + theme_bw() + theme(axis.title.x=element_blank()) + facet_grid(~ FILE) + scale_x_discrete(limits=c("Africa","North America","Europe")) + scale_y_continuous(labels=function(x){sprintf("%.3f", x)},limits=c(0.0026, 0.0068),breaks=seq(0.003, 0.006, 0.001))
plot3<-ggplot(dat_PoolSNP, aes(x=Continent, y=Tajima_D)) + geom_boxplot(, show.legend = FALSE) + labs(y = "Tajima's D", tag = "B") + theme_bw() + theme(axis.title.x=element_blank()) + facet_grid(~ FILE) + scale_x_discrete(limits=c("Africa","North America","Europe")) + scale_y_continuous(labels=function(x){sprintf("%.3f", x)},limits=c(-0.285, 1.65),breaks=seq(0, 1.5, 0.5))
plot4<-ggplot(dat_PoolSNP, aes(x=Continent, y=pn_ps)) + geom_boxplot(, show.legend = FALSE) + labs(y =expression('p'[N]*'/p'[S]), tag = "D") + theme_bw() + theme(axis.title.x=element_blank()) + facet_grid(~ FILE) + scale_x_discrete(limits=c("Africa","North America","Europe")) + scale_y_continuous(labels=function(x){sprintf("%.3f", x)},limits=c(0.23, 0.53),breaks=seq(0.3, 0.5, 0.1))
# SNAPE plots
plot5<-ggplot(dat_SNAPE, aes(x=Continent, y=Pi)) + geom_boxplot(, show.legend = FALSE) + labs(y = "Nucleotide diversity", tag = "") + theme_bw() + theme(axis.title.x=element_blank())+ facet_grid(~ FILE) + scale_x_discrete(limits=c("North America","Europe")) + scale_y_continuous(labels=function(x){sprintf("%.3f", x)},limits=c(0.0037, 0.007),breaks=seq(0.004, 0.007, 0.001))
plot6<-ggplot(dat_SNAPE, aes(x=Continent, y=Watterson)) + geom_boxplot(, show.legend = FALSE) + labs(y = "Theta Watterson", tag = "") + theme_bw() + theme(axis.title.x=element_blank()) + facet_grid(~ FILE) + scale_x_discrete(limits=c("North America","Europe")) + scale_y_continuous(labels=function(x){sprintf("%.3f", x)},limits=c(0.0026, 0.0068),breaks=seq(0.003, 0.006, 0.001))
plot7<-ggplot(dat_SNAPE, aes(x=Continent, y=Tajima_D)) + geom_boxplot(, show.legend = FALSE) + labs(y = "Tajima's D", tag = "") + theme_bw() + theme(axis.title.x=element_blank()) + facet_grid(~ FILE) + scale_x_discrete(limits=c("North America","Europe")) + scale_y_continuous(labels=function(x){sprintf("%.3f", x)},limits=c(-0.285, 1.65),breaks=seq(0, 1.5, 0.5))
plot8<-ggplot(dat_SNAPE, aes(x=Continent, y=pn_ps)) + geom_boxplot(, show.legend = FALSE) + labs(y =expression('p'[N]*'/p'[S]), tag = "") + theme_bw() + theme(axis.title.x=element_blank()) + facet_grid(~ FILE) + scale_x_discrete(limits=c("North America","Europe")) + scale_y_continuous(labels=function(x){sprintf("%.3f", x)},limits=c(0.23, 0.53),breaks=seq(0.3, 0.5, 0.1))
pdf("/Users/martinkapun/Documents/GitHub/data-paper/Figure6/Figure6_v3.pdf",widt=12,height=6)
# make final Figure 8
grid.arrange(plot1,
plot5,
plot3,
plot7,
plot2,
plot6,
layout_matrix = rbind(c(1,2,3,4),
c(1,2,5,6)))
dev.off()
ncol=2, nrow=2, widths=c(1,0.6,1,0.6), heights=c(1, 1))
|
490d602254a61f6e01d99584127565a043f83ab3
|
e4d207511ced01b6c295e32e904ebb060064aee3
|
/portfolio_return_v01.r
|
4db690137b2f3d28fc8846b627eb1b4f6f6c7380
|
[
"MIT"
] |
permissive
|
dhjelmar/Retirement
|
f5332eb6e4b99951e999f0d47515286a924f0fa3
|
2f844025e72d89c241aac5a6bd14780c48bb8dbb
|
refs/heads/main
| 2023-09-06T00:35:11.345731
| 2021-11-11T01:19:43
| 2021-11-11T01:19:43
| 399,638,669
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,393
|
r
|
portfolio_return_v01.r
|
## https://israeldi.github.io/bookdown/_book/monte-carlo-simulation-of-stock-portfolio-in-r-matlab-and-python.html
source('/home/dlhjel/GitHub_repos/R-setup/setup.r')
##-----------------------------------------------------------------------------
## Determine mean for each asset and covariance matrix
data <- '
Date AAPL GOOG FB
11/15/17 166.5791 1020.91 177.95
11/16/17 168.5693 1032.50 179.59
11/17/17 167.6333 1019.09 179.00
11/20/17 167.4658 1018.38 178.74
11/21/17 170.5791 1034.49 181.86
11/22/17 172.3721 1035.96 180.87
11/24/17 172.3820 1040.61 182.78
11/27/17 171.5150 1054.21 183.03
11/28/17 170.5101 1047.41 182.42
11/29/17 166.9732 1021.66 175.13
'
history <- as_tibble(readall(data))
history$Date <- as.Date(history$Date, "%m/%d/%Y")
increment <- (max(history$Date) - min(history$Date)) / nrow(history)
# alternately, I could grab the most common increment
# increment <- mode(diff(history$Date))
price <- history[2:ncol(history)]
# This function returns the first differences of a t x q df of data
returns = function(df){
rows <- nrow(df)
return <- df[2:rows, ] / df[1:rows-1, ] - 1
}
# Get the asset returns
return <- returns(price)
## calculate mean return for each asset
means = colMeans(return)
## Get the Variance Covariance Matrix of Stock Returns
pairs(return)
coVarMat = cov(return)
print(coVarMat)
## Lower Triangular Matrix from Choleski Factorization, L where L * t(L) = CoVarMat
## needed for R_i = mean_i + L_ij * Z_ji
L = t( chol(coVarMat) )
print(L)
##-----------------------------------------------------------------------------
## SET PARAMETERS FOR MONTE CARLO SIMULATIONS
# Set number of Monte Carlo Simulations
mc_rep = 1000
# Set number of days for the simulation
sim_days = 30
# Set simulation start date
sim_start <- Sys.Date()
# Calculate simulation end date
sim_end <- sim_start + increment * sim_days
# Set date vector for simulation (length should be sim_days + 1)
Date <- seq(sim_start, sim_end, increment)
# Suppose we invest our money evenly among all three assets
# We use today's Price 11/14/2018 to find the number of shares each stock
# that we buy
weights <- c(1/3, 1/3, 1/3)
print(weights)
##-----------------------------------------------------------------------------
## START MONTE CARLO SIMULATION
## initialize sim_return matrix
## row for each simulation date
## column for each monte carlo simulation
cum_sim_m = matrix(0, sim_days, mc_rep)
## Extend means vector to a matrix
## one row for each account (or investment column) repeated in columns for each simulation
means_matrix = matrix(rep(means, sim_days), nrow = ncol(return))
## set seed if want to repeat exactly
set.seed(200)
for (i in 1:mc_rep) {
## do following for each monte carlo simulation
## obtain random z values for each account (rows) for each date increment (columns)
Z <- matrix( rnorm( ncol(return) * sim_days ), ncol = sim_days)
## simulate returns for each increment forward in time (assumed same as whatever data was)
sim_return <- means_matrix + L %*% Z
## to view as a dataframe
## dfsim <- as_tibble(as.data.frame(t(sim_return)))
## Calculate vector of portfolio returns
cum_sim_i = cumprod( weights %*% sim_return + 1 )
## Add it to the monte-carlo matrix
cum_sim_m[,i] = cum_sim_i;
}
# put results into dataframe
cum_sim_df <- as_tibble(as.data.frame(cum_sim_m))
# add row for starting value
ones <- rep(1, ncol(cum_sim_df))
cum_sim_df <- rbind(ones, cum_sim_df)
##-----------------------------------------------------------------------------
## DISPLAY RESULTS
# plot results
# first establish plot area
ylim <- range(cum_sim_df)
plot(Date, cum_sim_df$V1, type='n',
ylab='Simulation Returns',
ylim=ylim)
for (i in 2:ncol(cum_sim_df)) {
lines(Date, t(cum_sim_df[i]), type='l')
}
# Porfolio Returns statistics at end of simulation
cum_final <- as.numeric( cum_sim_df[nrow(cum_sim_df),] )
cum_final_stats <- data.frame(mean = mean(cum_final),
median = median(cum_final),
sd = sd(cum_final))
print(cum_final_stats)
## Construct Confidential Intervals for returns
## first define function
ci <- function(df, conf) {
# calculate confidence limit for specified confidence level
# note specified conf = 1 - alpha
# i.e., if want alpha=0.05 to get 95% conf limit, specify 0.95
apply(df, 1, function(x) quantile(x, conf))
}
## create dataframe of confidence intervals
df <- cum_sim_df
cis <- as_tibble( data.frame(Date,
conf_99.9_upper_percent = ci(df, 0.999),
conf_99.0_upper_percent = ci(df, 0.99),
conf_95.0_upper_percent = ci(df, 0.95),
conf_50.0_percent = ci(df, 0.5),
conf_95.0_lower_percent = ci(df, 0.05),
conf_99.0_lower_percent = ci(df, 0.01),
conf_99.9_lower_percent = ci(df, 0.001)) )
## plot confidence intervals on simulation
lines(cis$Date, cis$conf_99.9_upper_percent, lwd=4, lty=2, col='red')
lines(cis$Date, cis$conf_50.0_percent , lwd=4, col='red')
lines(cis$Date, cis$conf_99.9_lower_percent, lwd=4, lty=2, col='red')
|
ae363232cea1cae17d85b4f9340f369fe5c607f1
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/RoughSets/man/A.Introduction-RoughSets.Rd
|
bd7a5d1dac778d3581a801fc74775ec258dfa28d
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 5,996
|
rd
|
A.Introduction-RoughSets.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RoughSets-introduction.R
\docType{package}
\name{A.Introduction-RoughSets}
\alias{A.Introduction-RoughSets}
\alias{RoughSets-intro}
\alias{A.Introduction-RoughSets-package}
\title{Introduction to Rough Set Theory}
\description{
This part attempts to introduce rough set theory (RST) and its application to data analysis.
While the classical RST proposed by Pawlak in 1982 is explained in detail in this section,
some recent advancements will be treated in the documentation of the related functions.
}
\details{
In RST, a data set is represented as a table called an information system \eqn{\mathcal{A} = (U, A)}, where
\eqn{U} is a non-empty set of finite objects known as the universe of discourse (note: it refers to all instances/rows
in datasets) and \eqn{A} is a non-empty finite set of attributes, such that \eqn{a : U \to V_{a}} for every \eqn{a \in A}.
The set \eqn{V_{a}} is the set of values that attribute \eqn{a} may take. Information systems that involve a decision attribute,
containing classes for each object, are called decision systems or decision tables. More formally, it is a pair \eqn{\mathcal{A} = (U, A \cup \{d\})},
where \eqn{d \notin A} is the decision attribute. The elements of \eqn{A} are called conditional attributes. The information system
representing all data in a particular system may contain redundant parts. It could happen because there are the same
or indiscernible objects or some superfluous attributes. The indiscernibility relation is a binary relation showing the relation between two objects.
This relation is an equivalence relation.
Let \eqn{\mathcal{A} = (U, A)} be an information system, then for any \eqn{B \subseteq A} there is an equivalence
relation \eqn{R_B(x,y)}:
\eqn{R_B(x,y)= \{(x,y) \in U^2 | \forall a \in B, a(x) = a(y)\}}
If \eqn{(x,y) \in R_B(x,y)}, then \eqn{x} and \eqn{y} are indiscernible by attributes from \eqn{B}. The equivalence
classes of the \eqn{B}-indiscernibility relation are denoted \eqn{[x]_{B}}. The indiscernibility relation will be further used to define basic concepts of rough
set theory which are lower and upper approximations.
Let \eqn{B \subseteq A} and \eqn{X \subseteq U},
\eqn{X} can be approximated using the information contained within \eqn{B} by constructing
the \eqn{B}-lower and \eqn{B}-upper approximations of \eqn{X}:
\eqn{R_B \downarrow X = \{ x \in U | [x]_{B} \subseteq X \}}
\eqn{R_B \uparrow X = \{ x \in U | [x]_{B} \cap X \not= \emptyset \}}
The tuple \eqn{\langle R_B \downarrow X, R_B \uparrow X \rangle} is called a rough set.
The objects in \eqn{R_B \downarrow X} mean that they can be with certainty classified as members of \eqn{X} on the basis of knowledge in \eqn{B}, while
the objects in \eqn{R_B \uparrow X} can be only classified as possible members of \eqn{X} on the basis of knowledge in \eqn{B}.
In a decision system, for \eqn{X} we use decision concepts (equivalence classes of decision attribute) \eqn{[x]_d}.
We can define \eqn{B}-lower and \eqn{B}-upper approximations as follows.
\eqn{R_B \downarrow [x]_d = \{ x \in U | [x]_{B} \subseteq [x]_d \}}
\eqn{R_B \uparrow [x]_d = \{ x \in U | [x]_{B} \cap [x]_d \not= \emptyset \}}
The positive, negative and boundary of \eqn{B} regions can be defined as:
\eqn{POS_{B} = \bigcup_{x \in U } R_B \downarrow [x]_d}
The boundary region, \eqn{BND_{B}}, is the set of objects that can possibly, but not certainly, be classified.
\eqn{BND_{B} = \bigcup_{x \in U} R_B \uparrow [x]_d - \bigcup_{x \in U} R_B \downarrow [x]_d}
Furthermore, we can calculate the degree of dependency of the decision on a set of attributes. The decision attribute \eqn{d}
depends totally on a set of attributes \eqn{B}, denoted \eqn{B \Rightarrow d},
if all attribute values from \eqn{d} are uniquely determined by values of attributes from \eqn{B}. It can be defined as follows.
For \eqn{B \subseteq A}, it is said that \eqn{d} depends on \eqn{B} in a degree of dependency \eqn{\gamma_{B} = \frac{|POS_{B}|}{|U|}}.
A decision reduct is a set \eqn{B \subseteq A} such that \eqn{\gamma_{B} = \gamma_{A}} and \eqn{\gamma_{B'} < \gamma_{B}} for every \eqn{B' \subset B}.
One algorithm to determine all reducts is by constructing the decision-relative discernibility matrix.
The discernibility matrix \eqn{M(\mathcal{A})} is an \eqn{n \times n} matrix \eqn{(c_{ij})} where
\eqn{c_{ij} = \{a \in A: a(x_i) \neq a(x_j) \}} if \eqn{d(x_i) \neq d(x_j)} and
\eqn{c_{ij} = \oslash} otherwise
The discernibility function \eqn{f_{\mathcal{A}}} for a decision system \eqn{\mathcal{A}} is a boolean function of \eqn{m} boolean variables \eqn{\bar{a}_1, \ldots, \bar{a}_m}
corresponding to the attributes \eqn{a_1, \ldots, a_m} respectively, and defined by
\eqn{f_{\mathcal{A}}(\bar{a_1}, \ldots, \bar{a_m}) = \wedge \{\vee \bar{c}_{ij}: 1 \le j < i \le n, c_{ij} \neq \oslash \}}
where \eqn{\bar{c}_{ij}= \{ \bar{a}: a \in c_{ij}\}}. The decision reducts of \eqn{A} are then the prime implicants of the function \eqn{f_{\mathcal{A}}}.
The complete explanation of the algorithm can be seen in (Skowron and Rauszer, 1992).
The implementations of the RST concepts can be seen in \code{\link{BC.IND.relation.RST}},
\code{\link{BC.LU.approximation.RST}}, \code{\link{BC.positive.reg.RST}}, and
\code{\link{BC.discernibility.mat.RST}}.
}
\references{
A. Skowron and C. Rauszer,
"The Discernibility Matrices and Functions in Information Systems",
in: R. Slowinski (Ed.), Intelligent Decision Support: Handbook of Applications and
Advances of Rough Sets Theory, Kluwer Academic Publishers, Dordrecht, Netherland,
p. 331 - 362 (1992).
Z. Pawlak, "Rough Sets",
International Journal of Computer and Information System,
vol. 11, no.5, p. 341 - 356 (1982).
Z. Pawlak, "Rough Sets: Theoretical Aspects of Reasoning about Data, System Theory, Knowledge Engineering and Problem Solving",
vol. 9, Kluwer Academic Publishers, Dordrecht, Netherlands (1991).
}
|
b78794598dacf71be87824e37ea7c86dd1017b87
|
665d8885b10d64a66d4a32da2aeb7e5e9cadb689
|
/R/import.R
|
8e11c4c9171eb2abd022567c787ea8e913ff3a6c
|
[] |
no_license
|
bioinfo-pf-curie/HiTC
|
47c75570bbedf760bec6e01085ffc20dc7ba3cb6
|
ed92101162b5c47b9d22a788d275bf4332d133a2
|
refs/heads/master
| 2020-12-03T03:58:52.468381
| 2018-03-28T15:18:24
| 2018-03-28T15:18:24
| 95,797,525
| 3
| 6
| null | 2017-06-29T16:31:25
| 2017-06-29T16:31:25
| null |
UTF-8
|
R
| false
| false
| 6,980
|
r
|
import.R
|
###################################
## importC
##
## Import HTCexp object to standard format
## rows/col/counts
##
## file = name of intput file to read
###################################
importC <- function(con, xgi.bed, ygi.bed=NULL, allPairwise=FALSE, rm.trans=FALSE, lazyload=FALSE){
stopifnot(!missing(con))
if(is.null(xgi.bed) && is.null(ygi.bed))
stop("BED files of x/y intervals are required")
message("Loading Genomic intervals ...")
xgi <- rtracklayer::import(xgi.bed, format="bed")
xgi <- sortSeqlevels(xgi)
names(xgi) <- id(xgi)
if (!is.null(ygi.bed)){
ygi <- rtracklayer::import(ygi.bed, format="bed")
ygi <- sortSeqlevels(ygi)
names(ygi) <- id(ygi)
}else{
ygi <- xgi
}
message("Reading file ...")
cdata <- read.table(con,comment.char = "#", colClasses=c("character","character","numeric"), check.names=FALSE)
stopifnot(ncol(cdata)==3)
id1 <- cdata[,1]
id2 <- cdata[,2]
pos1 <- match(id1, id(ygi))
pos2 <- match(id2, id(xgi))
## -1 is performed in the sparseMatrix function
bigMat <- Matrix::sparseMatrix(i=pos1, j=pos2, x=cdata[,3], dims=c(length(ygi), length(xgi)), dimnames=list(id(ygi), id(xgi)))
rm(cdata)
message("Convert 'C' file in HTCexp object(s)")
x <- splitCombinedContacts(bigMat, xgi, ygi, allPairwise, rm.trans, lazyload)
}##importC
###################################
## import.my5C
##
## Create a HTCexp object from my5C's data files (matrix data file).
## Intervals not define in the BED files are not taken into account
## If multiple chromosomes are available in the intervals files, several HTCexp object are created, one per chromosome pair.
##
## my5C.datafile: data file at the matrix format
##
##################################
import.my5C <- function(file, allPairwise=FALSE, rm.trans=FALSE, lazyload=FALSE){
## Read data
stopifnot(!missing(file))
message("Reading file ...")
my5Cdata <- read.table(file,comment.char = "#", check.names=FALSE, header=TRUE, row.names=1)
message("Convert my5C matrix file in HTCexp object(s)")
my5CdataM <- as(as.matrix(my5Cdata),"Matrix")
rownames(my5CdataM) <- rownames(my5Cdata)
colnames(my5CdataM) <- colnames(my5Cdata)
## Create xgi and ygi object
gr <- dimnames2gr(my5Cdata, pattern="\\||\\:|\\-", feat.names=c("name","org","chr","start", "end"))
ygi <- gr[[1]]
xgi <- gr[[2]]
## Create HTClist object from my5Cdata
rownames(my5CdataM) <- id(ygi)
colnames(my5CdataM) <- id(xgi)
## For multiple maps in one file
if (length(seqlevels(xgi)) > 1 || length(seqlevels(ygi)) > 1){
obj <- splitCombinedContacts(my5CdataM, xgi, ygi, allPairwise, rm.trans, lazyload)
}else{
obj <- HTClist(HTCexp(my5CdataM, xgi, ygi, lazyload = lazyload))
}
return(HTClist(unlist(obj[which(!unlist(lapply(obj, is.null)))])))
}##import.my5C
###################################
##
## INTERNAL FUNCTION
## Split my5C row/colnames matrix to create intervals objects
## Generalized function of my5C2gr - now deprecated
##
## x: Matrix data
## pattern: regular expression to split the colnames/rownames of x
## feat.names: features names associated with the regular expression
##
##################################
dimnames2gr <- function(x, pattern="\\||\\:|\\-", feat.names=c("name","chr","start", "end")){
rdata <- strsplit(rownames(x), split=pattern)
dr <- do.call(rbind.data.frame, rdata)
stopifnot(dim(dr)[2]==length(feat.names))
colnames(dr)<-feat.names
rgr <- GRanges(seqnames=dr$chr, ranges = IRanges(start=as.numeric(as.character(dr$start)), end=as.numeric(as.character(dr$end)), names=as.character(dr$name)))
if (length(setdiff(colnames(x),rownames(x)))>0){
cdata <- strsplit(colnames(x), pattern)
cr <- do.call(rbind.data.frame, cdata)
colnames(cr)<-feat.names
cgr <- GRanges(seqnames=cr$chr, ranges = IRanges(start=as.numeric(as.character(cr$start)), end=as.numeric(as.character(cr$end)), names=as.character(cr$name)))
}else{
cgr <- rgr
}
list(rgr, cgr)
}##dimnames2gr
###################################
## splitCombinedContacts
## INTERNAL FUNCTION
## Split a genome-wide Matrix into HTClist
## Selection is done by ids from xgi and ygi objects
##
## x: Matrix data
## xgi: GenomicRanges of x_intervals
## ygi: GenomicRanges of y_intervals
## allPairwise: see pair.chrom
## lazyload: see HTCexp
##
##################################
splitCombinedContacts <- function(x, xgi, ygi, allPairwise=TRUE, rm.trans=FALSE, lazyload=FALSE){
chromPair <- pair.chrom(sortSeqlevels(c(seqlevels(xgi), seqlevels(ygi))), use.order = allPairwise, rm.trans=rm.trans)
obj <- mclapply(chromPair, function(chr) {
ygi.subset <- ygi[which(seqnames(ygi) == chr[1]),]
seqlevels(ygi.subset) <- as.character(unique(seqnames(ygi.subset)))
xgi.subset <- xgi[which(seqnames(xgi) == chr[2]),]
seqlevels(xgi.subset) <- as.character(unique(seqnames(xgi.subset)))
if (length(xgi.subset) > 0 && length(ygi.subset) > 0) {
message("Creating ", chr[2], "-", chr[1], " Contact Map ...")
if (length(ygi.subset)==1 || length(xgi.subset)==1){
intdata <- Matrix(x[id(ygi.subset), id(xgi.subset)], nrow=length(ygi.subset), ncol=length(xgi.subset))
}else{
intdata <- x[id(ygi.subset), id(xgi.subset)]
}
colnames(intdata) <- id(xgi.subset)
rownames(intdata) <- id(ygi.subset)
##Put as NA rows/columns with only 0s
## back to matrix to speed up implementation ...
#cl <- class(intdata)
#intdata <- as.matrix(intdata)
#intdata[which(rowSums(intdata, na.rm=TRUE)==0),] <- NA
#intdata[,which(colSums(intdata, na.rm=TRUE)==0)] <- NA
#intdata <- as(intdata, cl)
HTCexp(intdata, xgi.subset, ygi.subset, lazyload = lazyload)
}
})
##obj
HTClist(unlist(obj))
}##splitCombinedContacts
###################################
## pair.chrom
## INTERNAL FUNCTION
## Compute chromsome pairwise combinaison
##
## chrom: character i.e vector with chromosome names
## use.order: if TRUE, all the pairwise combinaison are returned (i.e. chr1chr2 AND chr2chr1)
##
##################################
pair.chrom <- function(chrom, use.order=TRUE, rm.cis=FALSE, rm.trans=FALSE){
v <- unique(chrom)
if (use.order)
z <- unlist(sapply(1:length(v),function(i){paste(v[i], v)}))
else
z <- unlist(sapply(1:length(v),function(i){paste(v[i], v[i:length(v)])}))
lz <- strsplit(z," ")
names(lz) <- gsub(" ","",z)
if (rm.cis){
lz <- lz[which(sapply(lapply(lz, duplicated), sum)==0)]
}
if (rm.trans){
lz <- lz[which(sapply(lapply(lz, duplicated), sum)!=0)] }
lz
}##pair.chrom
|
e21ca22fad20709a88e9de7c48718ceee2915b8a
|
47bd2a6abda2e0927147935c83d926576a0830c3
|
/R/neuroCombatData/inst/scripts/simulateData.R
|
f63fbe93a8280d1aa853a808b7014a955fe47f34
|
[
"MIT",
"Artistic-2.0",
"Python-2.0"
] |
permissive
|
ThomasHMAC/ComBatHarmonization
|
32ec940381788a10fef0377e1c7fc5aae392c142
|
215596ffa6826c24f3e765d03a6c1e8cf729d51c
|
refs/heads/master
| 2023-04-30T05:38:03.220147
| 2021-05-29T23:21:01
| 2021-05-29T23:21:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,455
|
r
|
simulateData.R
|
library(SummarizedExperiment)
load("temp/seAdniFs.rda")
se <- seAdniFs
se <- se[,!duplicated(se$RID)]
sites <- table(se$site)
sites <- names(sites[sites>=25])
se <- se[, se$site %in% sites]
#Renaming site:
site <- as.factor(colData(se)$site)
levels(site) <- paste0("Site", 1:length(levels(site)))
colData(se)$site <- as.character(site)
# Adding jitter to age:
colData(se)$age <- jitter(colData(se)$age, 200)
# Only keeping relevant columns:
colData(se) <- colData(se)[, c("ID", "age","gender", "site")]
# Renaming ID:
se <- se[,order(se$site)]
ids <- split(colData(se)$site, f=colData(se)$site)
ids <- lapply(ids, function(x){
paste0(x, "_Scan",1:length(x))
})
se$ID <- do.call(c, ids)
colnames(se) <- se$ID
# Jittering data:
Y <- assays(se)[[1]]
for (i in 1:nrow(Y)){
y <- Y[i,]
sd <- mad(y, na.rm=TRUE)
y <- jitter(y, amount=sd/4)
Y[i, ] <- y
}
assays(se)[[1]] <- Y
#Modifying annotation:
ann <- rowData(se)
ann <- ann[, c("FLDNAME", "TEXT")]
colnames(ann) <- c("ID", "Description")
rowData(se) <- ann
metadata(se) <- list(NULL)
seCorticalThickness <- se
save(seCorticalThickness, file="../../data/seCorticalThickness.rda")
age <- colData(se)$age
col <- as.integer(as.factor(colData(se)$site))
tab=table(col)
sites <- names(tab)
good <- col %in% sites
Y <- assays(se)[[1]]
cors <- sapply(1:nrow(Y), function(i){
cor(age, Y[i,], use="p")
})
wh=which.min(cors)
#wh=order(cors)[1:10][8]
plot(age, Y[wh,],col=col)
|
93e374c2cce07df522efa5998d9d7f5a79a33bb6
|
d478e26f1bf5c097014ca28de2f54195f0b00de0
|
/Figure4_Validation_final8_big_labels.R
|
6bf27db4e1ab4d5900053889417f39dedd68c432
|
[] |
no_license
|
MWhite-InstitutPasteur/Pvivax_sero_dx
|
9d88f53ee7050223d6a3cd443c11edb861e12acf
|
73009bac1b0f924532464cfa746009e871b951a9
|
refs/heads/master
| 2020-11-26T22:54:17.522863
| 2020-01-07T16:37:37
| 2020-01-07T16:37:37
| 229,223,436
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,964
|
r
|
Figure4_Validation_final8_big_labels.R
|
###########################
###########################
## ##
## # ## ##### ###### ##
## ## ## ## ## ##
## ### ## #### ## ##
## ## ### ## ## ##
## ## ## ##### ## ##
## ##
###########################
###########################
library(igraph)
load("C:\\U\\GHIT\\NatMed_Paper_OrigBrazData\\Figure4_analysis\\LDA_search.RData")
top_4ant = LDA_4ant[order(LDA_4ant[,5], decreasing=TRUE),]
VV_4 = rep(0, N_ant)
MM_4 = matrix(0, nrow=N_ant, ncol=N_ant)
for(i in 1:nrow(top_4ant))
{
VV_4[which(ant_names_short == top_4ant[i,1])] = VV_4[which(ant_names_short == top_4ant[i,1])] + i
VV_4[which(ant_names_short == top_4ant[i,2])] = VV_4[which(ant_names_short == top_4ant[i,2])] + i
VV_4[which(ant_names_short == top_4ant[i,3])] = VV_4[which(ant_names_short == top_4ant[i,3])] + i
VV_4[which(ant_names_short == top_4ant[i,4])] = VV_4[which(ant_names_short == top_4ant[i,4])] + i
MM_4[which(ant_names_short == top_4ant[i,1]),which(ant_names_short == top_4ant[i,2])] = MM_4[which(ant_names_short == top_4ant[i,1]),which(ant_names_short == top_4ant[i,2])] + i
MM_4[which(ant_names_short == top_4ant[i,1]),which(ant_names_short == top_4ant[i,3])] = MM_4[which(ant_names_short == top_4ant[i,1]),which(ant_names_short == top_4ant[i,3])] + i
MM_4[which(ant_names_short == top_4ant[i,2]),which(ant_names_short == top_4ant[i,3])] = MM_4[which(ant_names_short == top_4ant[i,2]),which(ant_names_short == top_4ant[i,3])] + i
MM_4[which(ant_names_short == top_4ant[i,1]),which(ant_names_short == top_4ant[i,4])] = MM_4[which(ant_names_short == top_4ant[i,1]),which(ant_names_short == top_4ant[i,4])] + i
MM_4[which(ant_names_short == top_4ant[i,2]),which(ant_names_short == top_4ant[i,4])] = MM_4[which(ant_names_short == top_4ant[i,2]),which(ant_names_short == top_4ant[i,4])] + i
MM_4[which(ant_names_short == top_4ant[i,3]),which(ant_names_short == top_4ant[i,4])] = MM_4[which(ant_names_short == top_4ant[i,3]),which(ant_names_short == top_4ant[i,4])] + i
}
MM_4 = MM_4 + t(MM_4)
MM_4 = exp(-MM_4/quantile(MM_4, prob=0.3))
diag(MM_4) = 0
MM_4 = MM_4/mean(MM_4)
VV_4 = exp(-VV_4/quantile(VV_4, prob=0.5))
VV_4 = VV_4/mean(VV_4)
NET_4 = graph.adjacency(MM_4, mode="undirected", weighted=TRUE, diag=FALSE)
summary(NET_4)
color_scale = (E(NET_4)$weight - min(E(NET_4)$weight))/(max(E(NET_4)$weight) - min(E(NET_4)$weight))
###################################
###################################
## ##
## #### #### ###### #### ##
## ## ## ## ## ## ## ## ##
## ## ## ###### ## ###### ##
## ## ## ## ## ## ## ## ##
## #### ## ## ## ## ## ##
## ##
###################################
###################################
thailand_data = read.csv("C:\\U\\GHIT\\NatMed_Paper\\Data\\proc\\thailand_ab_epi_data.csv")
brazil_data = read.csv("C:\\U\\GHIT\\NatMed_Paper\\Data\\proc\\brazil_ab_epi_data.csv")
solomon_data = read.csv("C:\\U\\GHIT\\NatMed_Paper\\Data\\proc\\solomon_ab_epi_data.csv")
control_data = read.csv("C:\\U\\GHIT\\NatMed_Paper\\Data\\proc\\control_ab_epi_data.csv")
###################################
###################################
## ##
## CATEGORISATION ##
## ##
###################################
###################################
###################################
## Thailand
thailand_cat <- rep("thai_never", nrow(thailand_data) )
thailand_cat[which( thailand_data[,10] == 0 )] <- "thai_current"
thailand_cat[intersect( which(thailand_data[,10]>0), which(thailand_data[,10]<=9*30) )] <- "thai_recent"
thailand_cat[which(thailand_data[,10]>9*30)] <- "thai_old"
###################################
## Brazil
brazil_cat <- rep("braz_never", nrow(brazil_data) )
brazil_cat[which( brazil_data[,10] == 0 )] <- "braz_current"
brazil_cat[intersect( which(brazil_data[,10]>0), which(brazil_data[,10]<=9*30) )] <- "braz_recent"
brazil_cat[which(brazil_data[,10]>9*30)] <- "braz_old"
###################################
## Solomon
solomon_cat <- rep("sol_never", nrow(solomon_data) )
solomon_cat[which( solomon_data[,10] == 0 )] <- "sol_current"
solomon_cat[intersect( which(solomon_data[,10]>0), which(solomon_data[,10]<=9*30) )] <- "sol_recent"
solomon_cat[which(solomon_data[,10]>9*30)] <- "sol_old"
###################################
## Controls
control_cat <- rep("VBDR", nrow(control_data) )
for(i in 1:length(control_cat))
{
if( substr( control_data[i,1], 1, 3 ) == "TRC" )
{
control_cat[i] <- "TRC"
}
if( substr( control_data[i,1], 1, 3 ) == "ARC" )
{
control_cat[i] <- "ARC"
}
if( substr( control_data[i,1], 1, 3 ) == "BRC" )
{
control_cat[i] <- "BRC"
}
}
####################################
## Put together Thai and control data
AB <- rbind( thailand_data[,17:81], brazil_data[,17:81], solomon_data[,17:81], control_data[,17:81] )
AB <- log(AB)
ant_drop <- c()
for(j in 1:ncol(AB))
{
if( length(which(is.na(AB[,j]))) > 250 )
{
ant_drop = c(ant_drop, j)
}
}
AB = AB[,-ant_drop]
N_ant <- ncol(AB)
ant_names <- colnames(AB)
############################################
## Create shortened antibody names
ant_names_short = c("W01", "W02", "W03", "W04", "W05", "W06", "W07", "W08", "W09", "W10",
"W11", "W12", "W13", "W14", "W15", "W16", "W17", "W18", "W19", "W20",
"W21", "W22", "W23", "W24", "W25", "W26", "W27", "W28", "W29", "W30",
"W31", "W32", "W33", "W34", "W35", "W36", "W37", "W38", "W39", "W40",
"W41", "W42", "W43", "W44", "W45", "W46", "W47", "W48", "W49", "W50",
"W51", "W52", "W53", "W54", "W55", "W56", "W57", "W58", "W59", "W60")
inf_cat <- c( thailand_cat, brazil_cat, solomon_cat, control_cat )
N_part <- length(inf_cat)
###################################
## Binary category
bin_cat = rep("old", N_part)
bin_cat[which(inf_cat=="thai_current")] = "new"
bin_cat[which(inf_cat=="thai_recent")] = "new"
bin_cat[which(inf_cat=="braz_current")] = "new"
bin_cat[which(inf_cat=="braz_recent")] = "new"
bin_cat[which(inf_cat=="sol_current")] = "new"
bin_cat[which(inf_cat=="sol_recent")] = "new"
##############################
##############################
## ##
## ##### ## ## ## ## ##
## ## ## ## ## ## ## ##
## ##### ## ## ### ##
## ## ### ## ## ##
## ## # ## ## ##
## ##
##############################
##############################
PVX_read = read.csv("C:\\U\\GHIT\\NatMed_Paper\\Data\\protein_info\\Table2_protein.csv")
PVX_names <- rep(NA, N_ant)
for(i in 1:N_ant)
{
PVX_names[i] <- as.vector(PVX_read[which(PVX_read[,1] == ant_names[i]),3])
}
################################################################################
################################################################################
## ##
## #### #### ##### ##### ##### ## #### ###### #### #### # ## ##
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
## ## ## ## ##### ##### #### ## ###### ## ## ## ## ### ## ##
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ### ##
## #### #### ## ## ## ## ##### ##### ## ## ## #### #### ## ## ##
## ##
################################################################################
################################################################################
library(fields)
AB_cor <- matrix(NA, nrow=N_ant, ncol=N_ant)
for(i in 1:N_ant)
{
for(j in 1:N_ant)
{
index_ij = intersect( which( is.na(AB[,i])==FALSE ), which( is.na(AB[,j])==FALSE ) )
AB_cor[i,j] = cor( AB[index_ij,i], AB[index_ij,j], method="spearman" )
}
}
####################################
####################################
## ##
## ONE AT A TIME ##
## ##
####################################
####################################
N_SS <- 1000
SS_cut <- seq(from=-11, to=-3.5, length=N_SS)
sens_mat <- matrix(NA, nrow=N_ant, ncol=N_SS)
spec_mat <- matrix(NA, nrow=N_ant, ncol=N_SS)
for(i in 1:N_ant)
{
AB_i = AB[which(is.na(AB[,i])==FALSE),i]
bin_cat_i = bin_cat[which(is.na(AB[,i])==FALSE)]
for(j in 1:N_SS)
{
bin_pred <- rep( "old", length(bin_cat_i) )
bin_pred[ which(AB_i > SS_cut[j]) ] <- "new"
Cmat <- table( bin_cat_i, bin_pred )
if( ncol(Cmat)==1 )
{
if( colnames(Cmat)=="new" )
{
Cmat <- cbind( Cmat, c(0,0) )
}else{
if( colnames(Cmat)=="old" )
{
Cmat = cbind( c(0,0), Cmat )
}
}
}
sens_mat[i,j] <- Cmat[1,1]/(Cmat[1,1]+Cmat[1,2])
spec_mat[i,j] <- Cmat[2,2]/(Cmat[2,1]+Cmat[2,2])
}
}
##sens_mat = cbind( rep(1,N_SS), sens_mat, rep(0,N_SS) )
##spec_mat = cbind( rep(0,N_SS), spec_mat, rep(1,N_SS) )
######################################
## Calculate Area Under Curve (AUC)
AUC_one_ant <- rep(NA, N_ant)
for(i in 1:N_ant)
{
AUC_one_ant[i] <- sum( (sens_mat[i,1:(N_SS-1)] - sens_mat[i,2:N_SS])*
0.5*(spec_mat[i,1:(N_SS-1)] + spec_mat[i,2:N_SS]) )
}
##################################
##################################
## ##
## ##### ## #### ###### ##
## ## ## ## ## ## ## ##
## ##### ## ## ## ## ##
## ## ## ## ## ## ##
## ## ##### #### ## ##
## ##
##################################
##################################
top_8_final = order( VV_4, decreasing=TRUE )[c(1,3,4,7,9,5,8,14)]
top_8_names = ant_names[top_8_final] ## ant_names_PVX[top_8_final]
top_8_PVX = PVX_names[top_8_final] ## ant_names_PVX[top_8_final]
top_8_cols = rainbow(8)
alpha_seq = c("A", "B", "C", "D", "E", "F", "G", "H")
region_cols = c("magenta", "green3", "yellow2", "dodgerblue")
top_8_names_long <- rep(NA, 8)
for(i in 1:8)
{
top_8_names_long[i] <- paste( ant_names[top_8_final[i]], ": ", PVX_names[top_8_final[i]], sep="" )
}
tiff(file="Figure4_Validation_final8_big_labels.tif", width=40, height=30, units="cm", res=500)
lay.mat <- rbind( c( 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4 ),
c( 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8 ),
c( 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 ),
c(10,10,10,10,11,11,11,11,13,13,13,13 ),
c(10,10,10,10,12,12,12,12,13,13,13,13 ) )
layout(lay.mat, heights=c(15, 15, 3, (4/3)*13, (4/3)*2))
layout.show(13)
par(mar=c(3,5,2.5,1))
par(mgp=c(3, 0.65, 0))
point.size = 0.75
lab.size = 2
axis.size = 2
main.size = 2
#####################################
#####################################
## ##
## PANELS 1-8 ##
## Boxplots for Ab distributions ##
## ##
#####################################
#####################################
for(n in 1:8)
{
boxplot( AB[which(inf_cat=="thai_current"),top_8_final[n]],
AB[which(inf_cat=="thai_recent"),top_8_final[n]],
AB[which(inf_cat=="thai_old"),top_8_final[n]],
AB[which(inf_cat=="thai_never"),top_8_final[n]],
AB[which(inf_cat=="braz_current"),top_8_final[n]],
AB[which(inf_cat=="braz_recent"),top_8_final[n]],
AB[which(inf_cat=="braz_old"),top_8_final[n]],
AB[which(inf_cat=="braz_never"),top_8_final[n]],
AB[which(inf_cat=="sol_current"),top_8_final[n]],
AB[which(inf_cat=="sol_recent"),top_8_final[n]],
AB[which(inf_cat=="sol_old"),top_8_final[n]],
AB[which(inf_cat=="sol_never"),top_8_final[n]],
AB[which(inf_cat=="TRC"),top_8_final[n]],
AB[which(inf_cat=="BRC"),top_8_final[n]],
AB[which(inf_cat=="ARC"),top_8_final[n]],
AB[which(inf_cat=="VBDR"),top_8_final[n]],
pch=19, yaxt='n', xaxt='n',
ylim=log(c(1e-5, 0.03)),
col=c("darkgrey", "red", "orange" ,"green",
"darkgrey", "red", "orange" ,"green",
"darkgrey", "red", "orange" ,"green",
"royalblue", "cornflowerblue", "dodgerblue", "cyan"),
ylab="relative antibody unit",
main=paste("(", alpha_seq[n], ") anti-", top_8_PVX[n], " antibodies", sep=""),
cex.lab=lab.size, cex.axis=axis.size, cex.main=main.size)
points(x=c(-1e6, 1e6), y=rep(log(1.95e-5),2), type='l', lty="dashed")
points(x=c(-1e6, 1e6), y=rep(log(0.02),2), type='l', lty="dashed")
axis(1, at=c(2.5, 7, 10.5, 14.5), label=c("Thailand", "Brazil", "Solomons", "Controls"), cex.axis=1.3 )
axis(2, at=log(c(0.00001, 0.0001, 0.001, 0.01)), label=c("0.00001", "0.0001", "0.001", "0.01"), las=2, cex.axis=0.9 )
}
#####################################
#####################################
## ##
## PANELS 9 ##
## Legend ##
## ##
#####################################
#####################################
par(mar = c(0,0,0,0))
plot.new()
legend(x='center',
legend = c("infected", "negative controls: Thai RC",
"infected 1-9 months", "negative controls: Brazil RC",
"infected 9-12 months", "negative controls: Aus RC",
"no detected infection", "negative controls: VBDR"),
fill = c("darkgrey", "royalblue",
"red", "cornflowerblue",
"orange", "dodgerblue",
"green", "cyan"),
border = c("darkgrey", "royalblue",
"red", "cornflowerblue",
"orange", "dodgerblue",
"green", "cyan"),
ncol=4, cex=2.1, bty="n" )
#########################################
#########################################
## ##
## PANEL 10 ##
## Binary classification (all sites) ##
## ##
#########################################
#########################################
line_seq <- c(0.2, 0.4, 0.6, 0.8)
par(mar=c(4.5,5,2.5,1.5))
par(mgp=c(3.25, 0.6,0))
plot(x=c(0,1), y=c(0,1), type='l', lty="dashed",
xlim=c(0,1.002), ylim=c(0,1.002),
xaxs='i', yaxs='i', xaxt='n', yaxt='n',
xlab="1 - specificity", ylab="sensitivity",
main="(I) Classification of recent infections",
cex.lab=1.2*lab.size, cex.axis=axis.size, cex.main=1.1*main.size)
for(i in 1:4)
{
points(x=c(0,1), y=rep(line_seq[i],2), type='l', col="grey", lty="dashed")
points(x=rep(line_seq[i],2), y=c(0,1), type='l', col="grey", lty="dashed")
}
for(i in 1:N_ant)
{
points( x=1-spec_mat[i,], y=sens_mat[i,],
type='S', lwd=1, col="grey" )
}
for(i in 1:N_ant)
{
if( i %in% top_8_final )
{
points( x=1-spec_mat[i,], y=sens_mat[i,],
type='s', lwd=2, col=top_8_cols[which(top_8_final==i)] )
}
}
##points(x=0.2, y=0.8, pch=17, cex=2, col="red")
##points(x=0.02, y=0.5, pch=17, cex=2, col="green")
##points(x=0.5, y=0.98, pch=17, cex=2, col="blue")
legend(x="bottomright",
cex=1.1,
bg="white", box.col="white",
fill = top_8_cols,
border = top_8_cols,
legend = top_8_names_long )
axis(1, at=c(0.0, 0.2, 0.4, 0.6, 0.8, 1),
labels=c("0%", "20%", "40%", "60%", "80%", "100%"), cex.axis=1.5 )
axis(2, at=c(0.0, 0.2, 0.4, 0.6, 0.8, 1),
labels=c("0%", "20%", "40%", "60%", "80%", "100%"), cex.axis=1.5, las=2 )
#########################################
#########################################
## ##
## PANEL 11 ##
## Correlation ##
## ##
#########################################
#########################################
par(mar=c(2,3,2.5,1))
par(mgp=c(1.5,0.75,0))
##N_cor_steps <- 100
##
##cor_cols <- rev(heat.colors(N_cor_steps))
cor_cols = c("springgreen4", "springgreen", "palegreen", "yellowgreen", "yellow",
"gold", "orange", "orangered", "firebrick1", "red3" )
N_cor_steps = length(cor_cols)
plot(x=100, y=100,
xlim=c(0,N_ant), ylim=c(0,N_ant),
xlab="", ylab="",
main="(J) Correlation between antibody titres",
xaxt='n', yaxt='n', xaxs='i', yaxs='i',
cex.lab=lab.size, cex.axis=axis.size, cex.main=1.1*main.size)
for(i in 1:N_ant)
{
for(j in 1:N_ant)
{
polygon(x=c(i-1,i,i,i-1), y=c(j-1,j-1,j,j),
border=NA, col=cor_cols[N_cor_steps*AB_cor[i,j]])
}
}
axis(1, at=seq(from=0.5, by=1, length=N_ant), label=ant_names_short, las=2, cex.axis=0.5)
axis(2, at=seq(from=0.5, by=1, length=N_ant), label=ant_names_short, las=2, cex.axis=0.5)
#############
## LEGEND
par(mar=c(2,1,1,1))
plot(x=100, y=100,
xlim=c(0,100), ylim=c(0,1),
xlab="", ylab="",
main="",
xaxt='n', yaxt='n', xaxs='i', yaxs='i', bty='n')
for(i in 1:length(cor_cols))
{
polygon(y=c(0,1,1,0), x=c(i-1,i-1,i,i)*100/N_cor_steps,
border=NA, col=cor_cols[i])
}
axis(1, at=100*c(0,0.25,0.5,0.75,1), label=c("0%", "25%", "50%", "75%", "100%"), cex.axis=1.25)
#####################################
#####################################
## ##
## PANELS 12 ##
## Multi-variate data view ##
## ##
#####################################
#####################################
par(mar=c(4.5,5,2.5,1))
par(mgp=c(3.25, 0.65, 0))
#####################################
## Colouring by infection status
plot(x=1e10, y=1e10,
pch=19, yaxt='n', xaxt='n',
xlim=log(c(1e-5, 0.03)), ylim=log(c(1e-5, 0.03)),
xlab=paste("anti-", PVX_names[top_8_final[1]], " antibody titre", sep=""),
ylab=paste("anti-", PVX_names[top_8_final[2]], " antibody titre", sep=""),
main="(K) Distribution of antibody titres",
cex.lab=lab.size, cex.axis=axis.size, cex.main=1.1*main.size)
points( x=AB[ which(inf_cat%in%c("thai_never", "thai_old", "thai_recent", "thai_current")), top_8_final[1]],
y=AB[ which(inf_cat%in%c("thai_never", "thai_old", "thai_recent", "thai_current")), top_8_final[2]],
pch=19, cex=point.size, col=region_cols[1] )
points( x=AB[ which(inf_cat%in%c("braz_never", "braz_old", "braz_recent", "braz_current")), top_8_final[1]],
y=AB[ which(inf_cat%in%c("braz_never", "braz_old", "braz_recent", "braz_current")), top_8_final[2]],
pch=19, cex=point.size, col=region_cols[2] )
points( x=AB[ which(inf_cat%in%c("sol_never", "sol_old", "sol_recent", "sol_current")), top_8_final[1]],
y=AB[ which(inf_cat%in%c("sol_never", "sol_old", "sol_recent", "sol_current")), top_8_final[2]],
pch=19, cex=point.size, col=region_cols[3] )
points( x=AB[ which(inf_cat%in%c("TRC")), top_8_final[1]],
y=AB[ which(inf_cat%in%c("TRC")), top_8_final[2]],
pch=19, cex=point.size, col=region_cols[4] )
points( x=AB[ which(inf_cat%in%c("BRC")), top_8_final[1]],
y=AB[ which(inf_cat%in%c("BRC")), top_8_final[2]],
pch=19, cex=point.size, col=region_cols[4] )
points( x=AB[ which(inf_cat%in%c("ARC")), top_8_final[1]],
y=AB[ which(inf_cat%in%c("ARC")), top_8_final[2]],
pch=19, cex=point.size, col=region_cols[4] )
points( x=AB[ which(inf_cat%in%c("VBDR")), top_8_final[1]],
y=AB[ which(inf_cat%in%c("VBDR")), top_8_final[2]],
pch=19, cex=point.size, col=region_cols[4] )
points(x=c(-1e6, 1e6), y=rep(log(1.95e-5),2), type='l', lty="dashed")
points(x=c(-1e6, 1e6), y=rep(log(0.02),2), type='l', lty="dashed")
points(y=c(-1e6, 1e6), x=rep(log(1.95e-5),2), type='l', lty="dashed")
points(y=c(-1e6, 1e6), x=rep(log(0.02),2), type='l', lty="dashed")
legend(x="bottomright",
fill = region_cols,
border = region_cols,
bg="white", box.col="white",
legend = c("Thailand", "Brazil", "Solomons", "Controls"),
cex=1.25 )
axis(1, at=log(c(0.00001, 0.0001, 0.001, 0.01)), label=c("0.00001", "0.0001", "0.001", "0.01") )
axis(2, at=log(c(0.00001, 0.0001, 0.001, 0.01)), label=c("0.00001", "0.0001", "0.001", "0.01"), las=2, cex.axis=0.9 )
dev.off()
|
cce72864f0331fa137174c7ada98a6a48cf69776
|
cff1be6031223b07c693bdd043ee70af6202fef1
|
/man/AUCell_plot.Rd
|
bd6f290ca2256496ed0c08b2c5dc463145c5a9d7
|
[] |
no_license
|
qibaotu/AUCell
|
9a879f7237c1c06b32fa31481473bf0d1579f157
|
10cc963026610bfb862567daf8a7f047fb9905b4
|
refs/heads/master
| 2021-07-06T08:40:39.103669
| 2017-10-03T13:58:32
| 2017-10-03T13:58:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,913
|
rd
|
AUCell_plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aux_AUCell_plot.R
\name{AUCell_plot}
\alias{AUCell_plot}
\title{Plot AUC histogram}
\usage{
AUCell_plot(cellsAUC, aucThr = max(cellsAUC), nBreaks = 100, ...)
}
\arguments{
\item{cellsAUC}{Subset of the object returned by \code{\link{AUCell_calcAUC}}
(i.e. including only the gene-sets to plot)}
\item{aucThr}{AUC value planned to use as threshold
(to make sure the X axis includes it), if any.
Otherwise, the X axis extends to cover only the AUC values plotted.}
\item{nBreaks}{Number of 'bars' to plot (breaks argument for hist function).}
\item{...}{Other arguments to pass to \code{\link{hist}} function.}
}
\value{
List of histogram objects (invisible).
}
\description{
Plots the distribution of AUC across the cells
(for each gene-set) as an histogram.
}
\examples{
# This example is run using a fake expression matrix.
# Therefore, the output will be meaningless.
############# Fake expression matrix #############
set.seed(123)
exprMatrix <- matrix(data=sample(c(rep(0, 5000), sample(1:3, 5000, replace=TRUE))),
nrow=20)
rownames(exprMatrix) <- paste("Gene", 1:20, sep="")
colnames(exprMatrix) <- paste("Cell", 1:500, sep="")
dim(exprMatrix)
##################################################
############# Begining of the workflow ###########
# Step 1.
cells_rankings <- AUCell_buildRankings(exprMatrix, plotStats=FALSE)
# Step 2.
# (Gene set: 10 random genes)
genes <- sample(rownames(exprMatrix), 10)
geneSets <- list(geneSet1=genes)
# (aucMaxRank=5 to run with this fake example, it will return 'high' AUC values)
cells_AUC <- AUCell_calcAUC(geneSets, cells_rankings, aucMaxRank=5)
##################################################
# Plot histogram:
AUCell_plot(cells_AUC["geneSet1",], nBreaks=10)
}
\seealso{
See the package vignette for examples and more details:
\code{vignette("AUCell")}
}
|
d4937dc1f1ded8cb38b22c771ea07717ff112b08
|
151326bb8bb8252ae4524b30d31df14cef65d0c0
|
/programmingr/corr.R
|
45f77186620f7241df609c8ecef5c5e84aa308c0
|
[] |
no_license
|
sergiosennder/datasciencecoursera
|
bb4df5b42e98112656a9a2eb8baf08137a3cda30
|
747c7255073ea6b02cb2fd47764f9e9ec06ef649
|
refs/heads/master
| 2021-01-10T04:14:57.077573
| 2015-12-27T10:36:02
| 2015-12-27T10:36:02
| 43,112,009
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,294
|
r
|
corr.R
|
corr <- function(directory, threshold = 0) {
files <- list.files(path=directory, pattern="*.csv", full.names=TRUE)
debugPrint(paste("Threshold = ", threshold, sep=""), "info")
debugPrint(files, "debug")
tmp_correlations <- c(1:length(files))
index <- 1
for (cur_file in files) {
file_data <- read.csv(cur_file)
file_data <- file_data[complete.cases(file_data),]
num_complete <- nrow(file_data)
debugPrint(paste("Number of complete cases = ", num_complete, sep=""),
"info")
if (num_complete >= threshold) {
nitrate_data <- select(file_data, matches("nitrate"))
debugPrint("Nitrate data", "debug")
debugPrint(nitrate_data, "debug")
sulfate_data <- select(file_data, matches("sulfate"))
debugPrint("Sulfate data", "debug")
debugPrint(sulfate_data, "debug")
tmp_correlations[index] <- cor(nitrate_data, sulfate_data)
debugPrint(paste("Correlation = ", correlations[index], sep=""),
"info")
index <- index + 1
}
correlations <- tmp_correlations[1:(index-1)]
}
return (correlations)
}
|
352d1b1e44c8f1877220f95965007aded9eac6ab
|
c0876f8573dd87b8032bbd9ce815865367c4b6a0
|
/Teorema del Limite Central.R
|
9b0e5819a1ad292d5198a3837804a804ed4e87a9
|
[] |
no_license
|
celia26/Programaci-n-Actuarial-III
|
1ad9756f3d9131653cce0bae8eac4a548ec437bd
|
d7666641739b26a28e7382722d194304cfdb2d8d
|
refs/heads/master
| 2021-01-17T06:38:15.990569
| 2016-06-10T03:27:55
| 2016-06-10T03:27:55
| 50,951,002
| 0
| 2
| null | 2016-02-06T21:42:36
| 2016-02-02T20:51:28
|
HTML
|
WINDOWS-1250
|
R
| false
| false
| 787
|
r
|
Teorema del Limite Central.R
|
a <- runif(10,50,100);hist(a)
a <- runif(100,50,100);hist(a)
a <- runif(1000,50,100);hist(a)
a <- runif(10000,50,100);hist(a)
a <- runif(100000,50,100);hist(a)
a <- runif(1000000,50,100);hist(a)
hist(rnorm(10,100,10))
hist(rnorm(100,100,10))
hist(rnorm(1000,100,10))
hist(rnorm(10000,100,10))
hist(rnorm(100000,100,10))
hist(rnorm(1000000,100,10))
hist(rexp(1000000,1))
hist(rexp(1000000,.011))
hist(rgamma(1000000,5,0.5))
n<-10000
sumas <- vector("numeric",n)
for (i in 1:n){
sumas[i]<-sum(rexp(n),1)
}
hist(sumas)
#Suma de muchas variables aleatorias te crea una distribución normal
n<-1000 #var por prueba
m<-1000 #cantidad de prueba
promedio<- sapply(lapply(rep(n,m),runif),mean)
promedio<- sapply(lapply(rep(n,m),runif,max=100,min=50),mean)
hist(promedio)
f<-function()
|
c5cf2c51621f5a2bc3ea3b0087e703f62fa93158
|
999f6296b3102c5374af78e8f19f783db7ae0f22
|
/R/helper_fun.R
|
7488462dab0202a5f9ab7555669b11bcdcc8db0a
|
[] |
no_license
|
felix28dls/ddCt_QPCR_Analysis
|
b052a4beb308e7650cda03f864829e6266c1ca20
|
0539f31edb2b3b506de0a9c2db90a01190fb0e3c
|
refs/heads/master
| 2020-06-16T20:55:48.974964
| 2019-07-07T22:28:42
| 2019-07-07T22:28:42
| 195,700,929
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,017
|
r
|
helper_fun.R
|
#' Average values by a variable
#'
#' Uses a group_by statement to average values in a data.fram by a variable
#'
#' @param df A data.frame
#' @param group_var A vector or lenght equals the number or rows of df
#' @param amount A vector or lenght equals the number or rows of df
#' @param tidy A logical, default FALSE. When TRUE returns a tidy data.frame
#'
#' @details Used to average ct or input amounts (or their averages) by a
#' grouping variable; group_var for experimental groups or amount for serial
#' dilutions.
#'
#' @return A data.frame with a column for the grouping variable; group_var or
#' amount and one for each of the original columns. When tidy is TRUE the
#' returns a tidy data.frame with 3 columns; group/amount, gene and average.
#'
#' @examples
#' # using a group_var variabale
#' # locate and read raw ct data
#' fl <- system.file('extdata', 'ct1.csv', package = 'pcr')
#' ct1 <- readr::read_csv(fl)
#'
#' # add grouping variable
#' group_var <- rep(c('brain', 'kidney'), each = 6)
#'
#' # calculate averages
#' .pcr_average(ct1, group_var = group_var)
#'
#' # calculate averages and return a tidy data.frame
#' .pcr_average(ct1, group_var = group_var, tidy = TRUE)
#'
#' # using a amount variable
#' # locate and read raw ct data
#' fl <- system.file('extdata', 'ct3.csv', package = 'pcr')
#' ct3 <- readr::read_csv(fl)
#'
#' # add amount variable
#' amount <- amount <- rep(c(1, .5, .2, .1, .05, .02, .01), each = 3)
#'
#' # calculate averages
#' .pcr_average(ct3, amount = amount)
#'
#' # calculate averages and return a tidy data.frame
#' .pcr_average(ct3, amount = amount, tidy = TRUE)
#'
#' @importFrom magrittr %>%
#' @importFrom dplyr mutate group_by summarise_all
#' @importFrom tidyr gather
#'
#' @keywords internal
.pcr_average <- function(df, group_var, amount, tidy = FALSE) {
# when group_var is provided
# calculate the averages using group_var in group_by
if(!missing(group_var)) {
ave <- mutate(df, group = group_var) %>%
group_by(group) %>%
summarise_all(function(x) mean(x))
# when tidy is TRUE return a tidy data.frame
if(tidy == TRUE) {
ave <- ave %>%
gather(gene, average, -group)
}
}
# when group_var is not provided and amount is
# calculate the averages using amount in group_by
if(missing(group_var)) {
ave <- mutate(df, amount = amount) %>%
group_by(amount) %>%
summarise_all(function(x) mean(x))
# when tidy is TRUE return a tidy data.frame
if(tidy == TRUE) {
ave <- ave %>%
gather(gene, average, -amount)
}
}
return(ave)
}
#' Normalize values by a column
#'
#' Uses subtraction or division to normalize values in all columns to a certain
#' specified column
#'
#' @inheritParams .pcr_average
#' @param reference_gene A character string of the name of the column
#' corresponding to the reference gene
#' @param mode A character string of the normalization mode to be used. Default
#' is 'subtract'. Other possible modes include 'divide'
#'
#' @details Used to normalize ct or input amounts (or their averages) by a
#' a reference_gene/column
#'
#' @return A data.frame with a column for each of the original columns after
#' subtraction or division to a reference_gene/column which is dropped.
#' The function returns ignores non numeric columns. When tidy is TRUE the
#' returns a tidy data.frame with the columns: gene and average as
#' well as any non numeric columns such as a grouping variable group/amount.
#'
#' @examples
#' # locate and read raw ct data
#' fl <- system.file('extdata', 'ct1.csv', package = 'pcr')
#' ct1 <- readr::read_csv(fl)
#'
#' # normalize the ct values
#' .pcr_normalize(ct1, reference_gene = 'GAPDH')
#'
#' # normalize by division
#' .pcr_normalize(ct1, reference_gene = 'GAPDH', mode = 'divide')
#'
#' # add grouping variable and average first
#' group_var <- rep(c('brain', 'kidney'), each = 6)
#' ave <- .pcr_average(ct1, group_var = group_var)
#'
#' # normalize by subtraction
#' .pcr_normalize(ave, 'GAPDH')
#'
#' # normalize by division
#' .pcr_normalize(ave, 'GAPDH', mode = 'divide')
#'
#' @importFrom magrittr %>%
#' @importFrom dplyr select mutate_if starts_with
#'
#' @keywords internal
.pcr_normalize <- function(df, reference_gene, mode = 'subtract',
tidy = FALSE) {
# get the reference_gene column and unlist
ref <- select(df, reference_gene) %>% unlist(use.names = FALSE)
if(mode == 'subtract') {
# drop the reference_gene columns
# ignore non numeric columns
# subtract reference_gene from all other columns
norm <- select(df, -starts_with(reference_gene)) %>%
mutate_if(is.numeric, function(x) x - ref)
} else if(mode == 'divide') {
# drop the reference_gene columns
# ignore non numeric columns
# divide all other columns by reference_gene
norm <- select(df, -starts_with(reference_gene)) %>%
mutate_if(is.numeric, function(x) x / ref)
}
# retrun a tidy data.frame when tidy == TRUE
if(tidy == TRUE) {
norm <- gather(norm, gene, normalized, -group)
}
return(norm)
}
#' Calibrate values by a row
#'
#' Uses subtraction or division to caliberate values in all rows to a sepcified
#' row
#'
#' @inheritParams .pcr_average
#' @inheritParams .pcr_normalize
#' @param reference_group A character string of the the entery in the rows of a
#' grouping variable
#'
#' @details Used to calibrate average ct or input amounts by a reference_group/row
#'
#' @return A data.frame of the same dimensions after subtracting or dividing by
#' a reference_group/row. The function returns ignores non numeric columns.
#' When tidy is TRUE returns a tidy data.frame with the columns: gene and
#' calibrated as well as any non numeric columns such as a grouping variable
#' group/amount.
#'
#' @examples
#' # locate and read raw ct data
#' fl <- system.file('extdata', 'ct1.csv', package = 'pcr')
#' ct1 <- readr::read_csv(fl)
#'
#' # add grouping variable
#' group_var <- rep(c('brain', 'kidney'), each = 6)
#'
#' # calculate averages
#' ave <- .pcr_average(ct1, group_var = group_var)
#'
#' # calculate delta ct
#' dct <- .pcr_normalize(ave, 'GAPDH')
#'
#' # calculate delta delta ct
#' .pcr_calibrate(dct, 'brain', tidy = TRUE)
#'
#' # calculate delta delta ct and return a tidy data.frame
#' .pcr_calibrate(dct, 'brain', tidy = TRUE)
#'
#' @importFrom magrittr %>%
#' @importFrom dplyr filter select mutate_if
#' @importFrom tidyr gather
#'
#' @keywords internal
.pcr_calibrate <- function(df, reference_group, mode = 'subtract',
tidy = FALSE) {
# get the row index of the reference group
ind <- which(df$group == reference_group)
if(mode == 'subtract') {
# ignore non numeric columns
# subtract reference_group from all other rows
calib <- mutate_if(df, is.numeric, function(x) x - x[ind])
} else if(mode == 'divide') {
# ignore non numeric columns
# divide all other rows by the reference_group
calib <- mutate_if(df, is.numeric, function(x) x / x[ind])
}
# return a tidy data.frame when tidy == TRUE
if(tidy == TRUE) {
calib <- calib %>%
gather(gene, calibrated, -group)
}
return(calib)
}
#' Calculate standard deviation
#'
#' Uses a group_by statement to calculate the standard deviations of values in
#' a data.fram grouped by a variable
#'
#' @inheritParams .pcr_average
#'
#' @details Used to calculate the standard devaitions of ct after grouping by a
#' group_var for the experimental groups
#'
#' @return A data.frame with a column for the grouping variable; group_var and
#' one for each of the original columns. When tidy is TRUE the
#' returns a tidy data.frame with 3 columns; group/amount, gene and error.
#'
#' @examples
#' # locate and read raw ct data
#' fl <- system.file('extdata', 'ct1.csv', package = 'pcr')
#' ct1 <- readr::read_csv(fl)
#'
#' # add grouping variable
#' group_var <- rep(c('brain', 'kidney'), each = 6)
#'
#' # calculate standard deviations
#' .pcr_sd(ct1, group_var = group_var)
#'
#' # calculate standard deviations and return a tidy data.frame
#' .pcr_sd(ct1, group_var = group_var, tidy = TRUE)
#'
#' @importFrom magrittr %>%
#' @importFrom dplyr mutate group_by summarise_all
#' @importFrom tidyr gather
#' @importFrom stats sd
#'
#' @keywords internal
.pcr_sd <- function(df, group_var, tidy = FALSE) {
# group_by the group_var
# calculate standard deviation
sd <- mutate(df, group = group_var) %>%
group_by(group) %>%
summarise_all(function(x) sd(x))
# return a tidy data.frame when tidy == TRUE
if(tidy == TRUE) {
sd <- sd %>%
gather(gene, error, -group)
}
return(sd)
}
#' Calculate error terms
#'
#' Uses a specified column as a reference to calculate the error between it
#' and another column.
#'
#' @inheritParams .pcr_normalize
#' @inheritParams .pcr_average
#'
#' @details Used to sum the error of a gene and a reference_gene/column
#'
#' @return A data.frame with a column for each of the original columns after
#' taking the square root of the squared standard deviations of a target gene
#' and a reference_gene/column which is dropped. The function ignores
#' non numeric columns. When tidy is TRUE the returns a tidy data.frame with
#' the columns: gene and error as well as any non numeric columns such as a
#' grouping variable group/amount.
#'
#' @examples
#' # locate and read raw ct data
#' fl <- system.file('extdata', 'ct1.csv', package = 'pcr')
#' ct1 <- readr::read_csv(fl)
#'
#' # add grouping variable
#' group_var <- rep(c('brain', 'kidney'), each = 6)
#'
#' # calculate standard deviations
#' sds <- .pcr_sd(ct1, group_var = group_var)
#'
#' # calculate errors
#' .pcr_error(sds, reference_gene = 'GAPDH')
#'
#' @importFrom magrittr %>%
#' @importFrom dplyr select starts_with mutate_if
#' @importFrom tidyr gather
#'
#' @keywords internal
.pcr_error <- function(df, reference_gene, tidy = FALSE) {
# get the reference_gene column and unlist
ref <- select(df, reference_gene) %>%
unlist(use.names = FALSE)
# drop the reference gene column
# ignore non numeric columns
# calculate the error term by takine the squar root of the sum squares
error <- select(df, -starts_with(reference_gene)) %>%
mutate_if(is.numeric, function(x) sqrt((x^2) + (ref)^2))
# return a tidy data.frame when tidy == TRUE
if(tidy == TRUE) {
error <- error %>%
gather(gene, error, -group)
}
return(error)
}
#' Calculates the coefficient of variation
#'
#' Calculates the coefficient of variation of a gene by deviding standard
#' deviations of each group by their averages
#'
#' @param amounts A data.frame of the calculated input amounts returned by
#' \code{\link{.pcr_amount}}
#' @inheritParams .pcr_average
#'
#' @details Used to calculate the coefficient of variation of the input amounts
#' after grouping by a grouping variable; group_var for experimental groups.
#'
#' @return A data.frame with a column for the grouping variable; group_var and
#' one for each of the original columns. When tidy is TRUE the
#' returns a tidy data.frame with 3 columns; group/amount, gene and error
#'
#' @examples
#' # locate and read data
#' fl <- system.file('extdata', 'ct3.csv', package = 'pcr')
#' ct3 <- readr::read_csv(fl)
#'
#' fl <- system.file('extdata', 'ct1.csv', package = 'pcr')
#' ct1 <- readr::read_csv(fl)
#'
#' # make a vector of RNA amounts
#' amount <- rep(c(1, .5, .2, .1, .05, .02, .01), each = 3)
#'
#' # calculate curve
#' standard_curve <- pcr_assess(ct3, amount = amount, method = 'standard_curve')
#' intercept <- standard_curve$intercept
#' slope <- standard_curve$slope
#'
#' # calculate amounts
#' input_amounts <- .pcr_amount(ct1,
#' intercept = intercept,
#' slope = slope)
#'
#' # make grouping variable
#' group <- rep(c('brain', 'kidney'), each = 6)
#'
#' # calculate cv errors
#' .pcr_cv(input_amounts,
#' group_var = group)
#'
#' @importFrom tidyr gather spread
#' @importFrom dplyr group_by summarise ungroup
#'
#' @keywords internal
.pcr_cv <- function(amounts, group_var, tidy = FALSE) {
# group_by group_var and calculate cv
cv <- mutate(amounts, group = group_var) %>%
group_by(group) %>%
summarise_all(function(x) sd(x)/mean(x))
# return a tidy data.frame when tidy == TRUE
if(tidy == TRUE) {
cv <- gather(cv, gene, error, -group)
}
return(cv)
}
#' Calculate PCR RNA amounts
#'
#'
#' @inheritParams .pcr_average
#' @param intercept A numeric vector of length equals the number of columns of df,
#' one for each gene such as the output of \link{pcr_assess}
#' @param slope A numeric vector of length equals the number of columns of df,
#' one for each gene such as the output of \link{pcr_assess}
#'
#' @details Used to alculate the amount of RNA in a PCR experimental sample using the
#' information provided by the standard curve, namely the slope and the
#' intercept calculated in advance for each gene in a similar expermiment.
#'
#' @return A data.frame with the same dimensions as df containing the
#' amount of RNA in each sample in each gene
#'
#' @importFrom magrittr %>%
#' @importFrom dplyr data_frame full_join group_by mutate bind_cols
#' @importFrom tidyr gather
#'
#' @examples
#' # locate and read data
#' fl <- system.file('extdata', 'ct3.csv', package = 'pcr')
#' ct3 <- readr::read_csv(fl)
#'
#' fl <- system.file('extdata', 'ct1.csv', package = 'pcr')
#' ct1 <- readr::read_csv(fl)
#'
#' # make a vector of RNA amounts
#' amount <- rep(c(1, .5, .2, .1, .05, .02, .01), each = 3)
#'
#' # calculate curve
#' standard_curve <- pcr_assess(ct3, amount = amount, method = 'standard_curve')
#' intercept <- standard_curve$intercept
#' slope <- standard_curve$slope
#'
#' # calculate amounts
#' .pcr_amount(ct1,
#' intercept = intercept,
#' slope = slope)
#'
#' @keywords internal
.pcr_amount <- function(df, intercept, slope) {
# make a data.frame of intercept, slope ana gene names
curve <- data_frame(intercept,
slope,
gene = names(df))
# tidy the data.frame
ct <- df %>%
gather(gene, ct)
# calculate input amounts
amounts <- full_join(ct, curve) %>%
group_by(gene) %>%
mutate(amount = 10 ^ ((ct - intercept)/slope))
# reshap input amounts
with(amounts, split(amount, gene)) %>%
bind_cols()
}
#' Calculate the linear trend
#'
#' Calculates the linear trend; intercept and slope between two variables
#'
#' @param df A data.frame of raw ct values or the delta ct values calculated
#' by \link{.pcr_normalize}
#' @param amount A numeric vector input amounts/dilutions of legnth equals the
#' number of the rows of df.
#'
#' @details Used to calculate the linear trend; intercept and slope for a line
#' between each column of ct or delta ct values and the log10 input amount
#'
#' @return A data.frame of 4 columns
#' \itemize{
#' \item gene The column names of df
#' \item intercept The intercept of the line
#' \item slope The slope of teh line
#' \item r_squared The squared correlation
#' }
#'
#'
#' @importFrom purrr map
#' @importFrom stats cor lm coefficients
#' @importFrom dplyr data_frame bind_rows
#'
#' @examples
#' # locate and read file
#' fl <- system.file('extdata', 'ct3.csv', package = 'pcr')
#' ct3 <- readr::read_csv(fl)
#'
#' # make amount/dilution variable
#' amount <- rep(c(1, .5, .2, .1, .05, .02, .01), each = 3)
#'
#' # calculate trend
#' .pcr_trend(ct3, amount = amount)
#'
#' @keywords internal
.pcr_trend <- function(df, amount) {
# make a trend line using linear regression
trend_line <- map(df, function(x) {
# calculate the r squared
r_squared <- cor(x, log10(amount))^2
# calculate the model
ll <- lm(x ~ log10(amount))
# get coeffecients
coeff <- coefficients(ll)
# make a data.frame of intercep, slope and rsquared
data_frame(
intercept = coeff[1],
slope = coeff[2],
r_squared = r_squared
)
})
trend_line <- bind_rows(trend_line, .id = 'gene')
return(trend_line)
}
|
0d6941faf87b0d44d11db6ac3887c07b28fb172e
|
a9c77a01da86a3dc3654db0d360419ca0cc86e46
|
/inst/scripts/howto.R
|
21b8dddf3cc39fd5491e7c6c1f1aa6d14b49aaca
|
[] |
no_license
|
federicogiorgi/vulcan
|
268a7cedb7caf638dab9976dfb470ef484f08271
|
6fd15659b041a632f815681770f195c3d5bbcca8
|
refs/heads/master
| 2020-05-24T01:46:13.586290
| 2017-09-27T18:01:17
| 2017-09-27T18:01:17
| 92,324,712
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,931
|
r
|
howto.R
|
# Building manuals and namespace using roxygen2
library(devtools)
document()
# Remove .Rhistory
unlink(".Rhistory")
unlink("R/.Rhistory")
# # # Force the formatR style
# library(formatR)
# getOption("width",80)
# for(rfile in dir("R",full.names=TRUE)){
# tidy_source(source=rfile,file=rfile,width.cutoff=40,indent=4)
# }
cat("\nimport(zoo)\n",file="NAMESPACE",append=TRUE)
cat("import(ChIPpeakAnno)\n",file="NAMESPACE",append=TRUE)
cat("import(locfit)\n",file="NAMESPACE",append=TRUE)
cat("import(TxDb.Hsapiens.UCSC.hg19.knownGene)\n",file="NAMESPACE",append=TRUE)
cat("importFrom('GenomicRanges','GRanges')\n",file="NAMESPACE",append=TRUE)
cat("importFrom('S4Vectors','Rle')\n",file="NAMESPACE",append=TRUE)
cat("importFrom('gplots','colorpanel')\n",file="NAMESPACE",append=TRUE)
cat("importFrom('caTools','runmean')\n",file="NAMESPACE",append=TRUE)
cat("importFrom('csaw','correlateReads')\n",file="NAMESPACE",append=TRUE)
cat("importFrom('wordcloud','wordlayout')\n",file="NAMESPACE",append=TRUE)
cat("importFrom('DESeq','newCountDataSet','estimateSizeFactors',\n 'estimateDispersions','varianceStabilizingTransformation')\n",
file="NAMESPACE",append=TRUE)
cat("importFrom('Biobase','exprs')\n",file="NAMESPACE",append=TRUE)
cat("importFrom('viper','rowTtest','msviper','msviperAnnot','ttestNull')\n",file="NAMESPACE",append=TRUE)
cat("importFrom('DiffBind','dba','dba.count')\n",file="NAMESPACE",append=TRUE)
cat("importFrom('graphics', 'abline', 'grid', 'layout', 'legend', 'lines',\n 'par', 'plot', 'points', 'rect', 'text')\n",
file="NAMESPACE",append=TRUE)
cat("importFrom('stats', 'ks.test', 'pchisq', 'pnorm', 'pt', 'qnorm', 'qt',\n 'rnorm', 'setNames', 'quantile', 'var')\n",
file="NAMESPACE",append=TRUE)
cat("importFrom('utils', 'read.csv', 'setTxtProgressBar', 'txtProgressBar', 'relist')\n",
file="NAMESPACE",append=TRUE)
|
c8d52addc78ea17116abba4c95a520949ace697c
|
bfd31a2b282d56ce10ed40f0388174e6782f1498
|
/RScripts_QualityScore/RScript02_Histogram_perFrag.R
|
1887dbde12fdcc81ddfba66c735a8d257189bcf8
|
[] |
no_license
|
snandi/Project_QualityScore
|
72a540663938d6dbc1243181ae2717ad06abe1ad
|
f6542f53d1d770cc5cbd162caf4f146f0b28b488
|
refs/heads/master
| 2020-12-24T15:50:48.725482
| 2016-03-06T02:17:49
| 2016-03-06T02:17:49
| 40,999,265
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,089
|
r
|
RScript02_Histogram_perFrag.R
|
rm(list=ls(all.names=TRUE))
rm(list=objects(all.names=TRUE))
#dev.off()
########################################################################
## This script reads in the pixel intensity values produced by Chengyue's
## python script and plots histograms. There will be three levels of pixel
## intensity values, one pixel, two pixels and three pixels around the
## backbone of the molecules, after leaving out +/-2 pixels all around
########################################################################
########################################################################
## Load header files and source functions
########################################################################
RegistrationPath <- '~/R_Packages/Registration/R/'
Files <- list.files(path=RegistrationPath, pattern=".R")
for(Script in Files) source(paste(RegistrationPath, Script, sep=''))
PackagesLoaded <- loadPackages()
Packages <- PackagesLoaded$Packages
Packages_Par <- PackagesLoaded$Packages_Par
RScriptPath <- '~/Project_QualityScore/RScripts_QualityScore/'
source(paste(RScriptPath, 'fn_Library_CurveReg.R', sep=''))
source(paste(RScriptPath, 'fn_Library_GC_Content.R', sep=''))
source(paste(RScriptPath, 'fn_Library_Mflorum.R', sep=''))
source(paste(RScriptPath, 'fn_Library_QualityScore.R', sep=''))
RPlotPath <- '~/Project_QualityScore/Plots/'
ProjectPath <- '~/Project_QualityScore/'
RDataPath <- '~/Project_QualityScore/RData/'
DataPath <- '~/Project_QualityScore/Data/'
DataPath.mf <- '/z/Proj/newtongroup/snandi/MF_cap348/'
########################################################################
## Defining some constants and important variables
########################################################################
ConversionFactor <- 209
BasePairInterval <- ConversionFactor
BackbonePixels <- 1
DataPath.mf_Intensities <- paste(DataPath.mf, 'intensities_inca34_', BackbonePixels, 'pixel/', sep='')
DataPath.mf_Quality <- paste(DataPath.mf, 'Project_QualityScore/', sep='')
bp.loc <- fn_load_MF_bploc(ConversionFactor=ConversionFactor)
Filename.Alchunk <- paste(DataPath.mf_Intensities, 'MF_cap348_inca34_cf209_minSize50_minFrag5_alignmentChunks.RData', sep='')
load(Filename.Alchunk)
########################################################################
## Get MoleculeIDs for a fragIndex
########################################################################
FragIndex <- 9
## Get only those molecules that have punctates both, at the beginning and end of the interval
AlChunk.Frag <- subset(AlChunk, refStartIndex == FragIndex & refEndIndex == (FragIndex + 1))
AlChunk.Frag$molID <- as.factor(AlChunk.Frag$molID)
str(AlChunk.Frag)
MoleculeID.Table <- table(AlChunk.Frag$molID)
## Discard moleculeIDs that have more than one fragment aligned to the same reference fragment
MoleculeID.Table[MoleculeID.Table > 1]
MoleculeIDs_MultipleFrag <- names(MoleculeID.Table[MoleculeID.Table > 1])
MoleculeID.Table <- MoleculeID.Table[MoleculeID.Table == 1]
MoleculeIDs <- names(MoleculeID.Table)
########################################################################
## Read in data for a groupNum, frameNum & MoleculeID
########################################################################
# groupNum <- '2433096'
# frameNum <- 25
# MoleculeID <- 99
Count <- 0
CountZero <- 0
MoleculesZero <- c()
AllPixelData <- c()
for(i in 1:length(MoleculeIDs)){
#for(i in 1:5){
MoleculeID <- MoleculeIDs[i]
groupNum <- substr(MoleculeID, start = 1, stop = 7)
MoleculeNum <- as.numeric(substr(MoleculeID, start = 13, stop = 19)) %% (ConversionFactor * 10000)
Folderpath_Quality <- paste(DataPath.mf_Quality, 'refFrag_', FragIndex, '/group1-', groupNum,
'-inca34-outputs/', sep = '')
MoleculeFiles <- try(list.files(path = Folderpath_Quality, pattern = paste('molecule', MoleculeNum, sep='')))
if(length(MoleculeFiles) == 1){
Count <- Count + 1
# print(groupNum)
# print(MoleculeFiles)
Filename <- paste(Folderpath_Quality, MoleculeFiles, sep = '')
Data <- read.table(Filename, sep=' ', header=T, stringsAsFactors=F)
Data <- Data[,1:3]
Xlim <- range(Data[Data>0])
Pixel1 <- subset(Data, intensity1 > 0)[,'intensity1']
Pixel1_Norm <- Pixel1/median(Pixel1)
PixelData <- as.data.frame(cbind(Pixel1=Pixel1, Pixel1_Norm=Pixel1_Norm, MoleculeID=MoleculeID))
PixelData <- within(data=PixelData,{
Pixel1 <- as.numeric(as.vector(Pixel1))
Pixel1_Norm <- as.numeric(as.vector(Pixel1_Norm))
})
AllPixelData <- rbind(AllPixelData, PixelData)
}
if(length(MoleculeFiles) == 0){
CountZero <- CountZero + 1
MoleculesZero <- c(MoleculesZero, MoleculeID)
}
}
Count
CountZero
CountMultipleFrames <- nrow(AlChunk.Frag) - Count
CountMultipleFrames/Count
L <- split(x=AllPixelData, f=AllPixelData$MoleculeID)
fn_returnGammaPar <- function(DataToFit, Colname='Pixel1_Norm'){
DataVectorToFit <- DataToFit[,Colname]
MoleculeID <- as.vector(DataToFit$MoleculeID)[1]
DistFit <- fitdistr(x=DataVectorToFit, densfun='gamma')
Shape <- DistFit$estimate[['shape']]
Rate <- DistFit$estimate[['rate']]
KSTest <- ks.test(x=DataVectorToFit, y='pgamma', rate=Rate, shape=Shape)
pValue <- KSTest$p.value
Max <- max(DataVectorToFit)
q95 <- quantile(x=DataVectorToFit, probs=0.95)
#return(list(MoleculeID=MoleculeID, Shape=Shape, Rate=Rate))
return(c(MoleculeID, Shape, Rate, pValue, Max, q95))
}
GammaParameters <- as.data.frame(do.call(what=rbind, lapply(X=L, FUN=fn_returnGammaPar, Colname='Pixel1_Norm')),
stringsAsFactors=FALSE)
colnames(GammaParameters) <- c('MoleculeID', 'Shape', 'Rate', 'pValue', 'Max', 'q95')
GammaParameters <- within(data=GammaParameters,{
MoleculeID <- factor(MoleculeID)
Shape <- as.numeric(Shape)
Rate <- as.numeric(Rate)
pValue <- round(as.numeric(pValue), 12)
Max <- round(as.numeric(Max), 2)
q95 <- round(as.numeric(q95), 2)
})
str(GammaParameters)
# ggplot() + geom_point(aes(x = Shape, y = Rate), data = GammaParameters)
#
# ggplot() + geom_histogram(aes(x = Shape), data = GammaParameters)
#
# ggplot() + geom_point(aes(x = Max, y = pValue), data = GammaParameters)
#
# ggplot() + geom_histogram(aes(x = q95), data = GammaParameters) +
# geom_vline(xintercept=quantile(x=GammaParameters$q95, probs=0.95), col='royalblue1')
#
# ggplot() + geom_point(aes(x = q95, y = pValue), data = GammaParameters, size=3) +
# geom_vline(xintercept=quantile(x=GammaParameters$q95, probs=0.95), col='royalblue1')
MaxPixel1_Norm <- max(subset(GammaParameters, pValue >= 0.1)[,'Max'])
MaxPixel1_Norm
Cuttoff <- min(quantile(x=GammaParameters$q95, probs=0.95), MaxPixel1_Norm)
nrow(subset(GammaParameters, Max<=Cuttoff))
PP <- L[[10]]$Pixel1_Norm
DistFit <- fitdistr(x=PP, densfun='gamma')
pdf.ke <- pdfCluster::kepdf(PP)
Molecule <- as.vector(L[[10]]$MoleculeID[1])
Shape <- subset(GammaParameters, MoleculeID==Molecule)[,'Shape']
Rate <- subset(GammaParameters, MoleculeID==Molecule)[,'Rate']
Discard <- ifelse(test=subset(GammaParameters, MoleculeID==Molecule)[,'Max'] < Cuttoff, yes='Keep', no='Discard')
Maintitle <- paste('Reference Fragment', FragIndex, 'Molecule', Molecule, Discard)
Hist1_Dens1 <- ggplot(data = subset(AllPixelData, MoleculeID==Molecule), aes(x = Pixel1_Norm)) +
geom_histogram(fill='gray60') +
geom_density(kernel = 'epanechnikov', col = 'gray20', lwd=1) +
stat_function(fun = dgamma, args=c(shape=Shape, rate=Rate), col='red', size=1) +
geom_line(aes(x = pdf.ke@x, y = pdf.ke@estimate), col = 'royalblue1', size = 1) +
ggtitle(label=Maintitle) +
Hist1_Dens1
MoleculeIDs.Final <- unique(as.vector(AllPixelData$MoleculeID))
Molecule <- MoleculeIDs.Final[23]
fn_plotDensities <- function(AllPixelData, Molecule, GammaParameters, Cuttoff){
PixelData <- as.data.frame(subset(AllPixelData, MoleculeID == Molecule))
PP <- PixelData$Pixel1_Norm
DistFit <- fitdistr(x=PP, densfun='gamma')
pdf.ke <- pdfCluster::kepdf(PP)
Shape <- subset(GammaParameters, MoleculeID == Molecule)[,'Shape']
Rate <- subset(GammaParameters, MoleculeID == Molecule)[,'Rate']
Discard <- ifelse(test=subset(GammaParameters, MoleculeID==Molecule)[,'Max'] < Cuttoff, yes='Keep', no='Discard')
Maintitle <- paste('Reference Fragment', FragIndex, 'Molecule', Molecule, Discard)
Hist_Dens <- ggplot(data = PixelData, aes(x = Pixel1_Norm)) +
geom_histogram(fill='gray60') +
geom_density(kernel = 'epanechnikov', col = 'gray20', lwd=1) +
stat_function(fun = dgamma, args=c(shape=Shape, rate=Rate), col='red', size=1) +
geom_line(aes(x = pdf.ke@x, y = pdf.ke@estimate), col = 'royalblue1', size = 1) +
ggtitle(label=Maintitle) + ylab(label='') + xlab(label='Normalized Pixel intensities')
return(Hist_Dens)
}
Filename.plot <- paste(RPlotPath, 'refFrag', FragIndex, '_DensityPlots.pdf', sep='')
pdf(file=Filename.plot, onefile=TRUE)
for(Molecule in MoleculeIDs.Final){
# Plot <- fn_plotDensities(AllPixelData=AllPixelData, Molecule=Molecule,
# GammaParameters=GammaParameters, Cuttoff=Cuttoff)
# try(print(Plot))
PixelData <- as.data.frame(subset(AllPixelData, MoleculeID == Molecule))
PP <- PixelData$Pixel1_Norm
DistFit <- fitdistr(x=PP, densfun='gamma')
pdf.ke <- pdfCluster::kepdf(PP)
Shape <- subset(GammaParameters, MoleculeID == Molecule)[,'Shape']
Rate <- subset(GammaParameters, MoleculeID == Molecule)[,'Rate']
Discard <- ifelse(test=subset(GammaParameters, MoleculeID==Molecule)[,'Max'] < Cuttoff, yes='Keep', no='Discard')
Maintitle <- paste('Reference Fragment', FragIndex, 'Molecule', Molecule, Discard)
Hist_Dens <- ggplot(data = PixelData, aes(x = Pixel1_Norm)) +
geom_histogram(fill='gray60') +
geom_density(kernel = 'epanechnikov', col = 'gray20', lwd=1) +
stat_function(fun = dgamma, args=c(shape=Shape, rate=Rate), col='red', size=1) +
geom_line(aes(x = pdf.ke@x, y = pdf.ke@estimate), col = 'royalblue1', size = 1) +
ggtitle(label=Maintitle) + ylab(label='') + xlab(label='Normalized Pixel intensities')
try(print(Hist_Dens))
}
dev.off()
|
3b7f3e9ffad1d2394bd04a4c6bba186a354a44a0
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/gtfs2gps/man/filter_day_period.Rd
|
34bc26a49562574ab3e38e7d037f3dc7780fd44f
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 951
|
rd
|
filter_day_period.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter_day_period.R
\name{filter_day_period}
\alias{filter_day_period}
\title{Filter GTFS data within a period of the day}
\usage{
filter_day_period(gtfs, period_start = "00:00:01", period_end = "23:59:59")
}
\arguments{
\item{gtfs}{A GTFS data.}
\item{period_start}{A string of type "hh:mm" indicating start of the period (defaults to "00:00:01")}
\item{period_end}{A string of type "hh:mm" indicating the end of the period (defaults to "23:59:59")}
}
\value{
A filtered GTFS data.
}
\description{
Updates a GTFS feed filtering only the routes, shapes, trips, stops,
agencies and services that are active within a given period of the day.
}
\examples{
# read gtfs data
poa <- read_gtfs(system.file("extdata/poa.zip", package = "gtfs2gps"))
# filter gtfs data
poa_f <- filter_day_period(poa, period_start = "10:00", period_end = "10:20")
}
|
70fee1e1fea77b6101034d7f944ad631360aece3
|
c9dcad8a10c0e8f7571dda9460fa535e105919b9
|
/Experiments/Tibshirani2013/SGL/R/zzPathCalc.r
|
c0c5b0c4196482a59ba31158aea6f1a17b44ed18
|
[] |
no_license
|
adityagc/MS-Research
|
f0fd57420768ac270dd6007fa3ed015b5878e17c
|
5f148103a6205092fb3ae114c8613f7b6e849a84
|
refs/heads/master
| 2020-03-18T16:34:47.512135
| 2018-07-02T19:24:50
| 2018-07-02T19:24:50
| 134,974,224
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,960
|
r
|
zzPathCalc.r
|
betterPathCalc <- function(data, index, alpha = 0.95, min.frac = 0.05, nlam = 20, type = "linear"){
reset <- 10
step <- 1
gamma <- 0.8
inner.iter <- 1000
outer.iter <- 1000
thresh = 10^(-3)
outer.thresh = thresh
n <- nrow(data$x)
if(type == "linear"){
X <- data$x
resp <- data$y
n <- nrow(X)
p <- ncol(X)
## Setting up group lasso stuff ##
ord <- order(index)
index <- index[ord]
X <- X[,ord]
unOrd <- match(1:length(ord),ord)
## Coming up with other C++ info ##
groups <- unique(index)
num.groups <- length(groups)
range.group.ind <- rep(0,(num.groups+1))
for(i in 1:num.groups){
range.group.ind[i] <- min(which(index == groups[i])) - 1
}
range.group.ind[num.groups + 1] <- ncol(X)
group.length <- diff(range.group.ind)
}
if(type == "logit"){
X <- data$x
y <- data$y
n <- nrow(X)
p <- ncol(X)
## Setting up group lasso stuff ##
ord <- order(index)
index <- index[ord]
X <- X[,ord]
unOrd <- match(1:length(ord),ord)
## Coming up with other C++ info ##
groups <- unique(index)
num.groups <- length(groups)
range.group.ind <- rep(0,(num.groups+1))
for(i in 1:num.groups){
range.group.ind[i] <- min(which(index == groups[i])) - 1
}
range.group.ind[num.groups + 1] <- ncol(X)
group.length <- diff(range.group.ind)
beta.naught <- rep(0,ncol(X))
beta <- beta.naught
beta.is.zero <- rep(1, num.groups)
beta.old <- rep(0, ncol(X))
betas <- matrix(0, nrow = ncol(X), ncol = nlam)
eta <- rep(0,n)
intercepts <- mean(y)
eta = eta + intercepts
m.y <- mean(y)
resp <- m.y*m.y*(1-m.y) - (y-m.y)
}
if(type == "cox"){
covariates <- data$x
n <- nrow(covariates)
p <- ncol(covariates)
time <- data$time
status <- data$status
## Ordering Response and Removing any Censored obs before first death ##
death.order <- order(time)
ordered.time <- sort(time)
X <- covariates[death.order,]
ordered.status <- status[death.order]
first.blood <- min(which(ordered.status == 1))
X <- X[first.blood:n,]
ordered.status <- ordered.status[first.blood:n]
ordered.time <- ordered.time[first.blood:n]
death.order <- death.order[first.blood:n]
n <- n-first.blood+1
death.times <- unique(ordered.time[which(ordered.status == 1)]) ## Increasing list of times when someone died (censored ends not included) ##
## Calculating Risk Sets ##
risk.set <- rep(0,n)
for(i in 1:n){
risk.set[i] <- max(which(death.times <= ordered.time[i]))
}
## Calculating risk set beginning/ending indices ##
risk.set.ind <- rep(0,(length(death.times)+1))
for(i in 1:length(death.times)){
risk.set.ind[i] <- min(which(ordered.time >= death.times[i]))
}
risk.set.ind[length(risk.set.ind)] <- length(ordered.time) + 1
## Calculating number of deaths at each death time ##
num.deaths <- rep(0,length(death.times))
for(i in 1:length(ordered.time)){
if(ordered.status[i] == 1){
num.deaths[which(death.times == ordered.time[i])] <- num.deaths[which(death.times == ordered.time[i])] + 1
}
}
## Finding death indices and number of deaths ##
death.index <- which(ordered.status == 1)
total.deaths <- length(death.index)
## Setting up group lasso stuff ##
ord <- order(index)
index <- index[ord]
X <- X[,ord]
unOrd <- match(1:length(ord),ord)
## Coming up with other C++ info ##
groups <- unique(index)
num.groups <- length(groups)
range.group.ind <- rep(0,(num.groups+1))
for(i in 1:num.groups){
range.group.ind[i] <- min(which(index == groups[i])) - 1
}
range.group.ind[num.groups + 1] <- ncol(X)
group.length <- diff(range.group.ind)
beta.naught <- rep(0,ncol(X))
beta <- beta.naught
beta.is.zero <- rep(1, num.groups)
beta.old <- rep(0, ncol(X))
beta <- array(0, c(ncol(X),nlam,nlam))
beta.is.zero <- rep(1, num.groups)
eta <- rep(0,n)
## DONE SETTING UP COX MODEL STUFF
junk1 <- .C("Cox", riskSetInd = as.integer(risk.set.ind), riskSet = as.integer(risk.set), numDeath = as.integer(num.deaths), status = as.integer(ordered.status), ndeath = as.integer(length(death.times)), nrow = as.integer(n), ncol = as.integer(p), beta = as.double(rep(0,p)), eta = as.double(rep(0,n)), y = as.double(rep(0,n)), weights = as.double(rep(0,n)))
resp <- junk1$y * junk1$weights
}
lambda.max <- rep(0,num.groups)
for(i in 1:num.groups){
ind <- groups[i]
X.fit <- X[,which(index == ind)]
cors <- t(X.fit) %*% resp
ord.cors <- sort(abs(cors), decreasing = TRUE)
if(length(ord.cors) > 1){
norms <- rep(0,length(cors)-1)
lam <- ord.cors/alpha
for(j in 1:(length(ord.cors)-1)){
norms[j] <- sqrt(sum((ord.cors[1:j]-ord.cors[j+1])^2))
}
if(norms[1] > lam[2] * (1-alpha)*sqrt(group.length[i])){
our.cors <- ord.cors[1]
our.range <- c(ord.cors[2], ord.cors[1])/alpha
}else{
if(norms[length(ord.cors)-1] <= lam[length(ord.cors)] * (1-alpha)*sqrt(group.length[i])){
our.cors <- ord.cors
our.range <- c(0, ord.cors[length(ord.cors)])/alpha
} else{
my.ind <- max(which(norms[-length(norms)] <= lam[2:(length(norms))] * (1-alpha) * sqrt(group.length[i]))) + 1
our.cors <- ord.cors[1:my.ind]
our.range <- c(ord.cors[my.ind+1], ord.cors[my.ind])/alpha
}
}
nn <- length(our.cors)
A.term <- nn*alpha^2 - (1 - alpha)^2*group.length[i]
B.term <- - 2 * alpha * sum(our.cors)
C.term <- sum(our.cors^2)
lams <- c((-B.term + sqrt(B.term^2 - 4 * A.term * C.term))/(2*A.term), (-B.term - sqrt(B.term^2 - 4 * A.term * C.term))/(2*A.term))
lambda.max[i] <- min(subset(lams, lams > our.range[1] & lams < our.range[2]))
}
if(length(ord.cors) == 1){
lambda.max[i] <- ord.cors
}
}
max.lam <- max(lambda.max)
min.lam <- min.frac*max.lam
lambdas <- exp(seq(log(max.lam),log(min.lam), (log(min.lam) - log(max.lam))/(nlam-1)))
return(lambdas/nrow(X))
}
|
211e9d778e0c52d6d5744e1eeb81dea4f2e6c192
|
67f566943ef74373bef603f2a6b0f3ebe914be2b
|
/man/predict.iMqr.Rd
|
0a8404e747ad6f581d61e4ab2a8c89d6eafa1f6c
|
[] |
no_license
|
cran/Mqrcm
|
78aac457f7fa191e73c7af81cac49c1ca1cd1e89
|
7776ed3d279c94cf2cc40dd1a72c540ad6184d6c
|
refs/heads/master
| 2021-06-16T23:40:08.883883
| 2021-02-02T02:10:06
| 2021-02-02T02:10:06
| 145,909,724
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,907
|
rd
|
predict.iMqr.Rd
|
\name{predict.iMqr}
\alias{predict.iMqr}
\title{
Prediction After M-Quantile Regression Coefficients Modeling
}
\description{
Predictions from an object of class \dQuote{\code{iMqr}}.
}
\usage{
\method{predict}{iMqr}(object, type = c("beta", "CDF", "QF", "sim"), newdata, p, se = TRUE, \ldots)
}
\arguments{
\item{object}{
an object of class \dQuote{\code{iMqr}}, the result of a call to \code{\link{iMqr}}.
}
\item{type}{
a character string specifying the type of prediction. See \sQuote{Details}.
}
\item{newdata}{
an optional data frame in which to look for variables with which to predict.
If omitted, the data are used. For \kbd{type = "CDF"}, it must include the response variable.
Ignored if \kbd{type = "beta"}.
}
\item{p}{
a numeric vector indicating the order(s) of the quantile to predict. Only used if
\kbd{type = "beta"} or \kbd{type = "QF"}.
}
\item{se}{
logical. If \kbd{TRUE} (the default), standard errors of the prediction will be computed. Only used if \kbd{type = "beta"} or \kbd{type = "QF"}.
}
\item{\ldots}{for future methods.}
}
\details{
Using \code{\link{iMqr}}, M-quantile regression coefficients
\eqn{\beta(p)} are modeled as parametric functions of \eqn{p}, the order of the quantile.
This implies that the model parameter is \emph{not} \eqn{\beta(p)} itself.
The function \command{predict.iqr} permits computing \eqn{\beta(p)} and other
quantities of interest, as detailed below.
\itemize{
\item if \kbd{type = "beta"} (the default), \eqn{\beta(p)} is returned at
the supplied value(s) of \kbd{p}. If \kbd{p} is missing, a default \kbd{p = (0.01, ..., 0.99)} is used.
\item if \kbd{type = "CDF"}, the value of the fitted \acronym{CDF} (cumulative distribution function)
and \acronym{PDF} (probability density function) are computed. The \acronym{CDF} value should be
interpreted as the order of the M-quantile that corresponds to the observed \code{y} values,
while the \acronym{PDF} is just the first derivative of the \acronym{CDF}.
\item if \kbd{type = "QF"}, the fitted values \eqn{x'\beta(p)}, corresponding to the
conditional M-quantile function, are computed at the supplied values of \kbd{p}.
\item if \kbd{type = "sim"}, data are simulated from the fitted model.
To simulate the data, the fitted conditional M-quantile function is computed
at randomly generated \kbd{p} following a Uniform(0,1) distribution. CAUTION: this generates
data assuming that the model describes the \emph{quantile} function, while in practice
it describes M-quantiles.
}
}
\value{
\itemize{
\item if \kbd{type = "beta"} a list with one item for each covariate in the model.
Each element of the list is a data frame with columns (\kbd{p, beta, se, low, up}) reporting \eqn{\beta(p)}, its estimated standard error, and the corresponding 95\% confidence interval. If \kbd{se = FALSE}, the last three columns are not computed.
\item if \kbd{type = "CDF"}, a two-columns data frame \kbd{(CDF,PDF)}.
\item if \kbd{type = "QF"} and \kbd{se = FALSE}, a data frame with one row
for each observation, and one column for each value of \kbd{p}. If \kbd{se = TRUE},
a list of two data frames, \kbd{fit} (predictions) and \kbd{se.fit} (standard errors).
\item if \kbd{type = "sim"}, a vector of simulated data.
}}
\author{
Paolo Frumento \email{paolo.frumento@unipi.it}
}
\note{
Prediction may generate quantile crossing
if the support of the new covariates values supplied in \code{newdata}
is different from that of the observed data.
}
\seealso{
\code{\link{iMqr}}, for model fitting; \code{\link{summary.iMqr}} and \code{\link{plot.iMqr}},
for summarizing and plotting \code{iMqr} objects.
}
\examples{
# using simulated data
n <- 250
x <- runif(n)
y <- rlogis(n, 1 + x, 1 + x)
# true quantile function: Q(p | x) = beta0(p) + beta1(p)*x, with
# beta0(p) = beta1(p) = 1 + log(p/(1 - p))
model <- iMqr(y ~ x, formula.p = ~ I(log(p)) + I(log(1 - p)))
# (fit asymmetric logistic distribution)
# predict beta(0.25), beta(0.5), beta(0.75)
predict(model, type = "beta", p = c(0.25,0.5, 0.75))
# predict the CDF and the PDF at new values of x and y
predict(model, type = "CDF", newdata = data.frame(x = c(.1,.2,.3), y = c(1,2,3)))
# computes the quantile function at new x, for p = (0.25,0.5,0.75)
predict(model, type = "QF", p = c(0.25,0.5,0.75), newdata = data.frame(x = c(.1,.2,.3)))
# simulate data from the fitted model
ysim <- predict(model, type = "sim") # 'newdata' can be supplied
# NOTE: data are generated using the fitted M-quantile function as if
# it was a quantile function. This means that the simulated data will
# have quantiles (and not M-quantiles) described by the fitted model.
# There is no easy way to generate data with a desired M-quantile function.
}
\keyword{methods}
|
9a857b2de27f38a1bc38d55ec3cb9e1c672322f9
|
f5fe3cd40d9448d390b07254bb0d6f65729fe2f3
|
/R/qmerge.R
|
d64609daede8c9f9b640e2f8466a60deed682bc3
|
[] |
no_license
|
tsufz/RMassScreening
|
da664d80cbd6995cde847e7de96d45f1cfd67ceb
|
de766ed6faa59a3a4e10f7416f6090990cf5eeca
|
refs/heads/master
| 2022-12-01T16:53:07.069549
| 2018-04-23T15:52:39
| 2018-04-23T15:52:39
| 78,196,650
| 0
| 0
| null | 2017-01-06T10:16:09
| 2017-01-06T10:16:08
| null |
UTF-8
|
R
| false
| false
| 1,071
|
r
|
qmerge.R
|
# Merge function which deals with simple tables (i.e. they have 0-1 y datasets per x dataset and only one column to merge by.)
#
#' @export
qmerge <- function(x, y, by = NA, by.x = by, by.y = by, all.x = FALSE, suffixes = c(".x",".y"))
{
# matching
x.in.y <- match(x[,by.x], y[,by.y])
# locate column names
x.cols <- colnames(x)
y.cols <- colnames(y)
x.indexcol <- match(by.x, x.cols)
y.indexcol <- match(by.y, y.cols)
# suffix column names correctly
x.cols.edit <- which(x.cols %in% y.cols)
y.cols.edit <- which(y.cols %in% x.cols)
x.cols[x.cols.edit] <- paste0(x.cols[x.cols.edit], suffixes[[1]])
y.cols[y.cols.edit] <- paste0(y.cols[y.cols.edit], suffixes[[2]])
# rename x index column back to original name
x.cols[x.indexcol] <- by.x
colnames(x) <- x.cols
colnames(y) <- y.cols
# reduce matched set.
y.matched <- y[x.in.y,,drop=FALSE]
# remove y index column
y.matched <- y.matched[,-y.indexcol]
# bind and if necessary remove NA hits
x <- cbind(x, y.matched)
if(!all.x)
x <- x[!is.na(x.in.y),,drop=FALSE]
x
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.