blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2855376f043d8d8e1e27ad3bfd48c6b8b4ce64bd
|
04a5bb75750cb0da7be0234bb23225e7a6f5f63b
|
/pheatmap-conting-table.R
|
00ab0261eec820fcfa9adb2ac09371843915015b
|
[] |
no_license
|
BioRRW/Random-R-Scripts-heatmaps-and-clustering
|
1b353f76217116cea0d2b8b0f7820be4a9c9cd84
|
f6858006e33dd0a73d46e554a3c21be984638495
|
refs/heads/main
| 2023-01-13T21:30:35.064561
| 2020-11-17T18:09:21
| 2020-11-17T18:09:21
| 313,686,270
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,820
|
r
|
pheatmap-conting-table.R
|
### Reed Woyda
# reedwoyda@gmail.com
# 11/17/2020
# Script to make pheatmaps with metadata
library(pheatmap)
library(viridis)
# data must be in the format: rownames = isolate, colnames = genes
# metadata must be in the format: rownames = isolates (*have to be in same order as data rownames), colnames = metadata categories
data <- as.matrix(read.csv("example_contingency_table.csv", row.names = 1))
metadata <- as.data.frame(read.csv("metadata_example.csv", row.names = 1))
# for(i in 1:length(colnames(data))){
# for(j in 1:length(rownames(data))){
# if(data[j,i] == ""){
# data[j,i] <- 0
# }
# else{
# data[j,i] <- 1
# }
#
# }
# }
#data <- as.data.frame(data)
# str(data)
# sapply(data[1:794, 1:252], as.integer)
# str(data)
# class(data) <- "numeric"
# data.t.copy <- as.data.frame(data.t)
# allzero = c()
# j=1
# for(i in 5260:1){
# if(dim(table(data.t[,i])) != 1)
# {
# print(i)
# allzero[j] <- i
# j = j + 1
# data.t.copy = subset(data.t.copy, select = i)
# }
#
# }
#rownames(data.t) <- rownames(metadata)
colnames(metadata)[3] <- "Isolation Source"
#colnames(metadata)[2] <- "Combined Plasmids"
### can uncomment to get an idea of what the heatmap will look like
#heatmap(data)
#pheatmap(data)
# change legend breaks as you need. ie if you have continuous variables, you may want to change, legend_labels = c("0", "1"), to include more values
pheatmap(
mat = data,
color = inferno(10),
border_color = NA,
show_colnames = FALSE,
show_rownames = FALSE,
drop_levels = FALSE,
fontsize = 14,
annotation_row = metadata,
main = "Virulence Finder Database Presence/Absence Heatmap",
legend_breaks = c(0,1),
legend_labels = c("0", "1")
)
|
105f566395a6f0d75884ca7f3c2c109ca485952d
|
0a4cc2bafe6fb3396ac9c07dc1e382a8a897a2d5
|
/misc/SW_WA_2021_2024/bunbury_busselton/analysis/jatwc_to_inundation/make_vrt.R
|
0af2f9eb84de03673392f23a1b764dc2c27ab824
|
[
"BSD-3-Clause"
] |
permissive
|
GeoscienceAustralia/ptha
|
240e360ff9c33cbdfa6033115841035c39e7a85f
|
124d0caa76ed143d87fa0dfe51434d9500268a5a
|
refs/heads/master
| 2023-08-31T12:00:57.055692
| 2023-08-31T06:05:18
| 2023-08-31T06:05:18
| 39,749,535
| 26
| 8
|
BSD-3-Clause
| 2023-08-29T04:13:20
| 2015-07-27T01:44:11
|
R
|
UTF-8
|
R
| false
| false
| 248
|
r
|
make_vrt.R
|
make_vrt<-function(all_files){
temp_file = paste0(tempfile(), '.vrt')
gdal_command = paste0('gdalbuildvrt -resolution highest ', temp_file, ' ', paste(all_files, collapse=" "))
system(gdal_command, intern=TRUE)
return(temp_file)
}
|
ef2ce5c7256092a9c5725cb00a90c37d400705c5
|
56679529fa4da13ada1ae93dd4979ae6dc46f40c
|
/FIG_2/summary_plot.R
|
be23368126bd60ccb04d320eb63f0c426f468356
|
[] |
no_license
|
marzuf/MANUSCRIPT_FIGURES
|
da3c1ef2d6b435923e4eae8687ae25dcaa902861
|
80bf0ce87f54c10e3cbd18fd10e9791f34b5174a
|
refs/heads/master
| 2023-05-05T21:09:19.238311
| 2021-05-15T14:45:43
| 2021-05-15T14:45:43
| 259,663,576
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,688
|
r
|
summary_plot.R
|
# Rscript summary_plot.R
require(doMC)
require(foreach)
require(ggplot2)
require(ggsci)
require(colorRamps)
require(reshape2)
require(ggpubr)
setDir <- ""
registerDoMC(50)
plotType <- "svg"
source("../../Yuanlong_Cancer_HiC_data_TAD_DA/subtype_cols.R")
source("../settings.R")
settingFolder <- file.path(runFolder, "PIPELINE", "INPUT_FILES")
myWidthGG <- 9
myHeightGG <- 9
# geneSignifThresh and tadSignifThresh loaded from settings.R
nTopGenes <- 100
all_dt2 <- get(load(file.path(runFolder, "CREATE_FINAL_TABLE", "all_result_dt.Rdata")))
signif_dt2 <- all_dt2[all_dt2$adjPvalComb <= tadSignifThresh,]
signif_dt2$tad_id <- file.path(signif_dt2$hicds, signif_dt2$exprds, signif_dt2$region)
signif_dt2$dataset <- file.path(signif_dt2$hicds, signif_dt2$exprds)
signif_dt2 <- unique(signif_dt2)
nSignif_dt <- aggregate(tad_id~dataset, data=signif_dt2, FUN=length)
colnames(nSignif_dt)[colnames(nSignif_dt) == "tad_id"] <- "nSignifTADs"
all_dt <- get(load(file.path(runFolder, "GENE_RANK_TAD_RANK", "all_gene_tad_signif_dt.Rdata")))
all_dt$tad_id <- file.path(all_dt$hicds, all_dt$exprds, all_dt$region)
all_dt$dataset <- file.path(all_dt$hicds, all_dt$exprds)
tadSignif_dt <- all_dt[all_dt$tad_adjCombPval <= tadSignifThresh,]
tadSignif_geneSignif_dt <- all_dt[all_dt$tad_adjCombPval <= tadSignifThresh & all_dt$adj.P.Val <= geneSignifThresh,]
geneSignif_nSignif_dt <- aggregate(tad_id~dataset, data=tadSignif_geneSignif_dt, FUN=function(x)length(unique(x)))
colnames(geneSignif_nSignif_dt)[colnames(geneSignif_nSignif_dt) == "tad_id"] <- "nSignifTADs_withSignifGenes"
tadSignif_geneTop_dt <- all_dt[all_dt$tad_adjCombPval <= tadSignifThresh & all_dt$gene_rank <= nTopGenes,]
geneTop_nSignif_dt <- aggregate(tad_id~dataset, data=tadSignif_geneTop_dt, FUN=function(x)length(unique(x)))
colnames(geneTop_nSignif_dt)[colnames(geneTop_nSignif_dt) == "tad_id"] <- "nSignifTADs_withTopGenes"
count_dt <- merge(nSignif_dt, merge(geneSignif_nSignif_dt, geneTop_nSignif_dt, by="dataset", all=T),by="dataset", all=T)
count_dt[is.na(count_dt)] <- 0
count_dt$nSignifTADs_noSignifGenes <- count_dt$nSignifTADs-count_dt$nSignifTADs_withSignifGenes
count_dt$nSignifTADs_noTopGenes <- count_dt$nSignifTADs-count_dt$nSignifTADs_withTopGenes
stopifnot(setequal(signif_dt2$tad_id, tadSignif_dt$tad_id))
outFolder <- "SUMMARY_PLOT"
dir.create(outFolder, recursive = TRUE)
buildData <- FALSE
count_dt <- count_dt[order(count_dt$nSignifTADs),]
ds_order <- as.character(count_dt$dataset)
count_dt_s <-count_dt
########3
check_dt <- do.call(rbind, by(all_dt, all_dt$dataset, function(x) {
nSignifTADs <- length(unique(x$region[x$tad_adjCombPval<=tadSignifThresh] ))
nSignifTADs_withSignifGenes <- length(unique(x$region[x$tad_adjCombPval<=tadSignifThresh & x$adj.P.Val <= geneSignifThresh] ))
nSignifTADs_withTopGenes <- length(unique(x$region[x$tad_adjCombPval<=tadSignifThresh & x$gene_rank <= nTopGenes] ))
nSignifTADs_noTopGenes <- length(setdiff(x$region[x$tad_adjCombPval<=tadSignifThresh],
x$region[x$tad_adjCombPval<=tadSignifThresh & x$gene_rank <= nTopGenes]))
nSignifTADs_noSignifGenes<- length(setdiff(x$region[x$tad_adjCombPval<=tadSignifThresh],
x$region[x$tad_adjCombPval<=tadSignifThresh & x$adj.P.Val <= geneSignifThresh]))
data.frame(
dataset=unique(x$dataset),
nSignifTADs=nSignifTADs ,
nSignifTADs_withSignifGenes=nSignifTADs_withSignifGenes,
nSignifTADs_withTopGenes=nSignifTADs_withTopGenes,
nSignifTADs_noSignifGenes=nSignifTADs_noSignifGenes,
nSignifTADs_noTopGenes=nSignifTADs_noTopGenes,
stringsAsFactors = FALSE
)
}))
rownames(check_dt) <- NULL
check_dt <- check_dt[order(as.character(check_dt$dataset)),]
count_dt <- count_dt[order(as.character(count_dt$dataset)),]
stopifnot(all.equal(check_dt, count_dt))
########
count_dt <-count_dt_s
count_dt$dataset_name <- file.path(hicds_names[dirname(count_dt$dataset)],
exprds_names[basename(count_dt$dataset)])
dsName_order <- as.character(count_dt$dataset_name)
# m_count_dt <- melt(count_dt, id="dataset")
# m_count_dt$dataset <- factor(m_count_dt$dataset, levels = ds_order)
count_dt$dataset <- NULL
m_count_dt <- melt(count_dt, id="dataset_name")
m_count_dt$dataset_name <- factor(m_count_dt$dataset_name, levels = dsName_order)
# stopifnot(!is.na(m_count_dt$dataset))
stopifnot(!is.na(m_count_dt$dataset_name))
max_y <- max(count_dt$nSignifTADs)
my_pal <- "nord::victory_bonds"
# my_pal <- "wesanderson::FantasticFox1"
# withsignif <- paletteer::paletteer_d(my_pal)[3]
withsignif <- "#E19600FF"
# nosignif <- paletteer::paletteer_d(my_pal)[4]
nosignif <- "#193264FF "
withsignif <- "#E19600FF"
nosignif <- "#193264FF"
plot_names <- c("nSignifTADs_withSignifGenes", "nSignifTADs_noSignifGenes")
my_cols <- setNames(c(withsignif, nosignif), plot_names)
my_cols_names <- setNames(c("with signif. genes", "without any signif. genes"), plot_names)
plotTit <- ""
subTit <- ""
plotTit <- paste0("Differentially activated TADs and signif. genes")
subTit <- paste0("gene adj. p-val <= ", geneSignifThresh, "; TAD adj. p-val <= ", tadSignifThresh)
# m_count_dt$ds_nbr <- as.numeric(m_count_dt$dataset)
# m_count_dt$ds_nbr2 <- m_count_dt$ds_nbr *2
break_step <- 5
samp_y_start_offset <- 4 + break_step
samp_y_start <- 70
samp_axis_offset <- 30
axis_lim <- 65
text_label_dt <- do.call(rbind, lapply(ds_order, function(x) {
settingFile <- file.path(settingFolder, dirname(x), paste0("run_settings_", basename(x), ".R"))
stopifnot(file.exists(settingFile))
source(settingFile)
samp1 <- get(load(file.path(setDir, sample1_file)))
samp2 <- get(load(file.path(setDir, sample2_file)))
data.frame(
dataset=x,
n_samp1=length(samp1),
n_samp2=length(samp2),
cond1=cond1,
cond2=cond2,
stringsAsFactors = FALSE
)
}))
text_label_dt$dataset <- factor(text_label_dt$dataset, levels=ds_order)
stopifnot(!is.na(text_label_dt$dataset))
text_label_dt$y_pos <- max_y + samp_y_start_offset
text_label_dt$y_pos <- samp_y_start
text_label_dt$x_pos <- as.numeric(text_label_dt$dataset)
# text_label_dt$samp_lab <- paste0("(", text_label_dt$n_samp1, " - ", text_label_dt$n_samp2, ")")
text_label_dt$samp_lab <- paste0("(", text_label_dt$n_samp1, " vs. ", text_label_dt$n_samp2, ")")
add_axes <- function(p) {
return(
p + geom_text(data=text_label_dt, aes(x=x_pos, y= y_pos, label=samp_lab), inherit.aes = FALSE, hjust=0) +
scale_y_continuous(
breaks=seq(from=break_step, to=samp_y_start+break_step, by=break_step),
labels = c(seq(from=break_step, to=axis_lim, by=break_step),"", "\t\t(# samp.)"),
# breaks=seq(from=5, to=axis_lim, by=5),
expand=c(0,0),
limits=c(0, max_y+ samp_axis_offset)) +
with_samp_theme +
# geom_hline(yintercept = seq(from=break_step, axis_lim, by=break_step), color="darkgrey") +
geom_segment(x= 0.5, xend=0.5, yend=axis_lim, y=0, color="darkgrey")
)
}
no_samp_theme <- theme(
plot.subtitle = element_text(hjust=0.5, face="italic"),
panel.grid.major.y =element_blank(),
panel.grid.major.x =element_line(color="darkgrey"),
panel.grid.minor.x =element_line(color="darkgrey"),
legend.position = "top",
legend.text = element_text(size=14),
axis.title.x = element_text(),
axis.text.y = element_text(hjust=1, size=8),
axis.line.x = element_line(colour="darkgrey"),
axis.title.y = element_blank()
)
p_signif <- ggplot(m_count_dt[m_count_dt$variable %in% c(plot_names),],
# aes(x=dataset, y = value, fill=variable))+
aes(x=dataset_name, y = value, fill=variable))+
geom_hline(yintercept = seq(from=break_step, axis_lim, by=break_step), color="darkgrey") +
ggtitle(plotTit, subtitle = paste0(subTit))+
geom_bar(stat="identity", position="stack") +
scale_fill_manual(values=my_cols, labels=my_cols_names)+
# labs(fill = "Signif. TADs:", x = "", y="# of TADs") +
labs(fill = "", x = "", y="# of TADs") +
# scale_x_discrete(labels=function(x) gsub("/", "\n", x)) +
scale_x_discrete(labels=function(x) gsub("/", " - ", x)) +
# scale_y_continuous(breaks=seq(from=10, 70, by=5), expand=c(0,0))+
coord_flip()+
my_box_theme +
no_samp_theme
with_samp_theme <- theme(
plot.subtitle = element_text(hjust=0.5, face="italic"),
axis.line.x =element_blank(),
panel.grid.major.y =element_blank(),
panel.grid.major.x =element_blank(),
panel.grid.minor.x =element_blank()
)
p_signif2 <- add_axes(p_signif)
# + geom_text(data=text_label_dt, aes(x=x_pos, y= y_pos, label=samp_lab), inherit.aes = FALSE, hjust=0) +
# scale_y_continuous(breaks=seq(from=5, 65, by=5), expand=c(0,0),
# limits=c(0, max_y + samp_axis_offset)) +
# with_samp_theme +
# geom_hline(yintercept = seq(from=5, 65, by=5), color="darkgrey")
# p_signif2 <- p_signif2+geom_segment(x= 0.5, xend=0.5, yend=65, y=0, color="darkgrey")
outFile <- file.path(outFolder, paste0("summary_plot_signifTADs_signifGenes.", plotType))
ggsave(p_signif2, filename = outFile, height=myHeightGG, width=myWidthGG)
cat(paste0("... written: ", outFile, "\n"))
##############################################################
##############################################################
plot_names <- c("nSignifTADs_withTopGenes", "nSignifTADs_noTopGenes")
my_cols <- setNames(c(withsignif, nosignif), plot_names)
my_cols_names <- setNames(c("with top genes", "without any top genes"), plot_names)
plotTit <- ""
subTit <- ""
plotTit <- paste0("Differentially activated TADs and top DE genes")
subTit <- paste0("gene rank <= ", nTopGenes, "; TAD adj. p-val <= ", tadSignifThresh)
p_signif <- ggplot(m_count_dt[m_count_dt$variable %in% c(plot_names),],
# aes(x=dataset, y = value, fill=variable))+
aes(x=dataset_name, y = value, fill=variable))+
geom_hline(yintercept = seq(from=break_step, axis_lim, by=break_step), color="darkgrey") +
ggtitle(plotTit, subtitle = paste0(subTit))+
geom_bar(stat="identity", position="stack") +
scale_fill_manual(values=my_cols, labels=my_cols_names)+
# labs(fill = "Signif. TADs:", x = "", y="# of TADs") +
labs(fill = "", x = "", y="# of TADs") +
# scale_x_discrete(labels=function(x) gsub("/", "\n", x)) +
scale_x_discrete(labels=function(x) gsub("/", " - ", x)) +
# scale_y_continuous(breaks=seq(from=10, 70, by=5), expand=c(0,0))+
coord_flip()+
my_box_theme +
no_samp_theme
p_signif2 <- add_axes(p_signif)
outFile <- file.path(outFolder, paste0("summary_plot_signifTADs_topGenes.", plotType))
ggsave(p_signif2, filename = outFile, height=myHeightGG, width=myWidthGG)
cat(paste0("... written: ", outFile, "\n"))
#####################
signifTADs_signifGenes_topGenes_dt <- m_count_dt
saveFile <- file.path(outFolder, "fig2D_supp_fig2C_signifTADs_signifGenes_topGenes_dt.Rdata")
save(signifTADs_signifGenes_topGenes_dt, file=saveFile, version=2)
cat(paste0("... written:" , saveFile, "\n"))
#> wide_dt =reshape(signifTADs_signifGenes_topGenes_dt, direction="wide", timevar="variable", idvar="dataset_name")
#> save(wide_dt, file="SUMMARY_PLOT/wide_dt_fig2D_supp_fig2C_signifTADs_signifGenes_topGenes_dt.Rdata", version=2
#> stopifnot(wide_dt$nSignifTADs == wide_dt$nSignifTADs_withSignifGenes + wide_dt$nSignifTADs_noSignifGenes)
#> range(wide_dt$nSignifTADs_withTopGenes/wide_dt$nSignifTADs)
#[1] 0.0 0.6
#> mean(wide_dt$nSignifTADs_withTopGenes/wide_dt$nSignifTADs)
#[1] 0.2278946
|
edf9db1ca4beb7819d3c29ceff2095806b12756f
|
1945b47177455e900baae351c1179197e0e4078d
|
/man/NEONMICROBE_DIR_SOIL.Rd
|
75e50c39ef3eb306761552b3dd22b4d81a791a33
|
[] |
no_license
|
naithanilab/neonMicrobe
|
19711934b281d12adef4fd5ba85b517b3f99e344
|
359c2a3947b9151f370e889785e5bfe4fa207121
|
refs/heads/master
| 2023-04-12T18:36:26.916925
| 2021-04-30T17:04:49
| 2021-04-30T17:04:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 443
|
rd
|
NEONMICROBE_DIR_SOIL.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dirs.R
\name{NEONMICROBE_DIR_SOIL}
\alias{NEONMICROBE_DIR_SOIL}
\title{Dynamic Directory Name for Soil Data}
\usage{
NEONMICROBE_DIR_SOIL()
}
\value{
Directory path (character).
}
\description{
For NEON soil data DP1.10086.001:
"Soil physical and chemical properties, periodic",
tables sls_soilCoreCollection, sls_soilMoisture, sls_soilpH, and sls_soilChemistry.
}
|
e4ffff9c8956d63d4c0b40f2e27324f1821f1aab
|
c0f4ec09bf6ecb259fb9e26dec331f8804f2237d
|
/man/plotDrugData.Rd
|
f65b526e0c4753f7dd6f3babe64a3e8a976d8a11
|
[
"MIT"
] |
permissive
|
Lthura/mpnstXenoModeling
|
bc52ff0455b18870c3a878ca209bc6ac9b208153
|
437ac9e87a3ccadbefd071674e9b246d0f7661ce
|
refs/heads/master
| 2022-11-29T23:42:33.039456
| 2020-08-11T14:23:24
| 2020-08-11T14:23:24
| 285,438,039
| 0
| 0
|
MIT
| 2020-08-06T00:54:14
| 2020-08-06T00:54:14
| null |
UTF-8
|
R
| false
| true
| 245
|
rd
|
plotDrugData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotDataSets.R
\name{plotDrugData}
\alias{plotDrugData}
\title{plotHistogram of drug data}
\usage{
plotDrugData(drugData)
}
\description{
plotHistogram of drug data
}
|
4424a7bf65b331c921a71804e650b3f17a71c2d9
|
731be3ddee70944aae00c72748ed837c4c00c82e
|
/tSNE/tsne.R
|
6fbaee20b13f6580e603c599ab77360060d6813f
|
[] |
no_license
|
beherasan/RScripts
|
24c6f6872b0af9b74320600b2feed5b20e2aadac
|
87228c8275d3dbf2ab99e4d74927c02ac2a8cf88
|
refs/heads/master
| 2023-07-05T09:34:39.109622
| 2021-08-26T06:29:55
| 2021-08-26T06:29:55
| 130,580,944
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 810
|
r
|
tsne.R
|
library('Rtsne')
## Read the comma-separated file with the "label" column for group, here it contains additional label as "Age"
train<- read.csv(file.choose())
Labels<-train$label
train$label<-as.factor(train$label)
colors = rainbow(length(unique(train$label)))
names(colors) = unique(train$label)
## Select perplexity based on the row number (perplexity*3 < rownumber-1)
tsne <- Rtsne(train[,c(-1,-2)], dims = 2, perplexity=11, verbose=TRUE, max_iter = 1000)
exeTimeTsne<- system.time(Rtsne(train[,c(-1,-2)], dims = 2, perplexity=11, verbose=TRUE, max_iter = 1000))
## Plot type 1
#tsne$Y
#plot(tsne$Y, t='n', main="tSNE")
#text(tsne$Y, labels=train$Age, col=colors[train$label])
#train$Age
## Plot type 2
plot(tsne$Y,main="tSNE",pch=19,col=colors[train$label],cex=2.0)
text(tsne$Y, labels=train$Age,pos=1)
|
8bf6bd61c20e1956705758c4e0e57997558dad48
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/tadaatoolbox/examples/ord_gamma.Rd.R
|
d1ac9d675441c058d2873bce9c518c3c0e01ab43
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 249
|
r
|
ord_gamma.Rd.R
|
library(tadaatoolbox)
### Name: ord_gamma
### Title: Gamma
### Aliases: ord_gamma
### ** Examples
df <- data.frame(rating = round(runif(50, 1, 5)),
group = sample(c("A", "B", "C"), 50, TRUE))
tbl <- table(df)
ord_gamma(tbl)
|
49690d725640857147b7ea6788224a712f6ed6c0
|
2c05957ccb67c479652453a10965b7b261a2376d
|
/R/las_read_mnemonics_df.R
|
33b8bbd5766442e8da46056d99a2b5094bbb5d3b
|
[] |
no_license
|
mgmdick/lastools
|
f9effdc6ea04841a1356da8e270c70c07f10a398
|
54cad63bdda3fb9dab11112b1f0b12426bda2b95
|
refs/heads/master
| 2020-05-17T05:13:54.114146
| 2019-10-09T10:01:56
| 2019-10-09T10:01:56
| 183,527,836
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 755
|
r
|
las_read_mnemonics_df.R
|
#' @title Read las curve mnemonics data to data frame
#'
#' @description This function extracts las mnemonics from all las files in a directory
#' @param dir The Target Directory containing the .las files (required)
#' @export
#' @examples
#' get_all_las_mnemonics(dir)
read_las_mnemonics_df <- function(dir)
{
get_mnemonics <- function(x)
{
las <- lastools::read_las(x)
mnem.df <- as.data.frame(las$CURVE$MNEM)
mnem.df <- as.data.frame(las$CURVE$DESCRIPTION)
names(mnem.df) <- c("mnem")
return(mnem.df)
}
list.of.files <- list.files(dir, pattern = "\\.las$",recursive = TRUE, full.names = T)
mnem <- lapply(list.of.files,function(x) get_mnemonics(x))
mnem <-unique(as.data.frame(do.call(rbind,mnem)))
return (mnem)
}
|
4fb64257d8846e6e9fb84cc9b1693c7659bbf24e
|
23da05ef70a4cffe9c2ee2a58982378dd575327c
|
/URA_Ch18.R
|
1d289c2919f97f320317b9d9450ca806a9dc64cc
|
[] |
no_license
|
rmuhumuza/URA-Rcode
|
3fd74ee000363743ba8a67dbf886c1f085757609
|
98bf00df9877cee416c28af33b1322becf0d79e5
|
refs/heads/master
| 2022-04-14T00:59:34.256271
| 2020-04-09T03:11:22
| 2020-04-09T03:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,587
|
r
|
URA_Ch18.R
|
## Figure 18.1
charity = read.csv("https://raw.githubusercontent.com/andrea2719/
URA-DataSets/master/charitytax.csv")
attach(charity)
Y2 = subset(CHARITY, DEPS ==2)
hist(Y2, breaks=10, freq=F, main="", xlab="Log Charitable Contributions When DEPS = 2")
rug(Y2)
legend("topleft", c(paste("mean =", round(mean(Y2),2)), paste("sd =", round(sd(Y2),2))))
## Figure 18.2
Y.LO.I = subset(CHARITY, INCOME <= 10.533)
Y.HI.I = subset(CHARITY, INCOME > 10.533)
hist(Y.LO.I, breaks=10, freq=F, main="", lty="blank",
col="gray", xlab="Log Charitable Contributions", xlim = c(0,12))
hist(Y.HI.I, breaks=seq(1.9, 10.9,.5), freq=F, add=TRUE)
## Section 18.1
library(rpart)
fit1 = rpart(CHARITY ~ INCOME, maxdepth=1)
summary(fit1)
## Figure 18.3
Y.Lower = subset(CHARITY, INCOME < 10.90845)
Y.Upper = subset(CHARITY, INCOME >= 10.90845)
hist(Y.Lower, freq=F, lty="blank", xlim = c(2, 10.2), breaks=10,
ylim = c(0, .5), xlab = "Logged Charitable Contributions",
main = "Estimated Conditional Distributions", col="gray")
hist(Y.Upper, freq=F, lty=1, add=T, breaks=seq(1.9, 10.9,.5))
## Figure 18.4
library(rpart.plot)
rpart.plot(fit1, extra=1, digits=4)
## Figure 18.5 (requires add.loess function)
plot(INCOME, CHARITY, col="gray", pch="+")
points(c(8.5,10.9), c(6.34, 6.34), type="l", lwd=2)
points(c(10.9,12.0), c(7.35, 7.35), type="l", lwd=2)
points(c(10.9, 10.9), c(6.34,7.35), type = "l", col="gray", lwd=2)
abline(lsfit(INCOME, CHARITY), lty=2, lwd=2)
add.loess(INCOME, CHARITY, lty=3, lwd=2)
legend("topleft", c("Tree", "OLS", "LOESS"), lty=1:3, lwd=c(2,2,2))
## Figure 18.6
unique = sort(unique(INCOME))
n.unique = length(unique)
midpoints = (unique[1:(n.unique-1)] + unique[2:n.unique])/2
n.midpoints = n.unique - 1
rsq = numeric(n.midpoints)
for (i in 1:n.midpoints) {
Indicator = INCOME > midpoints[i]
rsq[i] = summary(lm(CHARITY~Indicator))$r.squared
}
plot(midpoints, rsq, xlab="INCOME Split Point", ylab="R Squared")
abline(v=10.90845)
## Figure 18.7
charity = read.csv("https://raw.githubusercontent.com/andrea2719/
URA-DataSets/master/charitytax.csv")
attach(charity)
fit2 = rpart(CHARITY ~ INCOME)
summary(fit2)
rpart.plot(fit2, extra=1, digits=3)
mean(CHARITY[INCOME >= 10.83595 & INCOME < 10.90845])
length(CHARITY[INCOME >= 10.83595 & INCOME < 10.90845])
## Figure 18.8
Income.plot = seq(8.5, 12, .001)
Income.pred = data.frame(Income.plot)
names(Income.pred) = c("INCOME")
Yhat2 = predict(fit2, Income.pred)
plot(INCOME, CHARITY, pch = "+", col="gray")
abline(lsfit(INCOME, CHARITY), lwd=2, lty=2)
points(Income.plot, Yhat2, type="l", lwd=2)
add.loess(INCOME, CHARITY, lwd=2, lty=3)
legend("topleft", c("Tree", "OLS", "LOESS"), lty=1:3, lwd=c(2,2,2))
## Figure 18.9
fit3 = rpart(CHARITY ~ INCOME + DEPS, maxdepth=2)
rpart.plot(fit3, extra=1, digits=3)
## Figure 18.10
inc.plot = seq(min(INCOME), max(INCOME), (max(INCOME)-min(INCOME))/20 )
inc.plot = rep(inc.plot,7)
deps.plot = seq(0,6,1)
deps.plot = rep(deps.plot, each=21)
all.pred = data.frame(inc.plot, deps.plot)
names(all.pred) = c("INCOME", "DEPS")
fit2 = rpart(CHARITY ~ INCOME + DEPS, maxdepth=2)
yhat.plot = predict(fit2, all.pred)
library(lattice)
wireframe(yhat.plot ~ inc.plot*deps.plot,
xlab = "INC", ylab = "DEPS", zlab = "CHAR",
main = "Tree Regression Function",
drape = TRUE,
colorkey = FALSE,
scales = list(arrows=FALSE,cex=.8,tick.number = 5),
screen = list(z = -20, x = -60))
## Figure 18.11
fit3 = rpart(CHARITY ~ INCOME + DEPS + AGE + MS)
rpart.plot(fit3, extra=1, digits=3)
|
1058d50decd71ab202edeec948e390ccdb79c91a
|
928788e6b9b0247c7a06adcf5e75451c6743e247
|
/man/get_lon180.Rd
|
f8f869a26351a6763051b88b39b5305e33ed063e
|
[] |
no_license
|
C3S-Data-Rescue-Lot1-WP3/stlocationqc
|
dade8a33fafb94bd7d2b124bf0f03631435ae452
|
ed3b0410e4a179445912df67740b132471677182
|
refs/heads/master
| 2020-04-10T05:07:21.388641
| 2018-12-28T16:04:51
| 2018-12-28T16:04:51
| 160,818,158
| 0
| 1
| null | 2018-12-28T16:04:52
| 2018-12-07T12:00:08
|
R
|
UTF-8
|
R
| false
| true
| 2,314
|
rd
|
get_lon180.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/1_get_lon180.R
\name{get_lon180}
\alias{get_lon180}
\title{Tests the Geographic Coordinates and Transforms the Longitude from (0, 360)
to (-180, +180).}
\usage{
## If there are the coordinates data frame
get_lon180(coords)
##
## If there are the coordinates text file
get_lon180(coords = NULL)
}
\arguments{
\item{coords}{data frame with the geographic coordinates in decimal degrees,
the longitude in the first column, the latitude in the second column and
the column names: \strong{lon | lat} (other columns can exist, however are
unnecessary for this function).}
}
\description{
Given a list of geographic coordinates, first tests the coordinates for out
of bounds values and missing values. Then transforms the longitude from (0,
360) to (-180, +180) degrees, considering 6 significative digits.
}
\details{
\strong{Input:}
\itemize{
\item A \strong{text file} without header with the geographic coordinates in
decimal degrees: the longitude in the first column and the latitude in the
second column, separated by tabs (other columns could exist, however are
unnecessary for this function). Missing values must be codified as 'NA' in
all fields.
\item Or a \strong{data frame} with that same format and the column names:
\strong{lon | lat}.
}
\strong{Output:}
\itemize{
\item A text file named 'coords_lon180' with three columns id | lon | lat,
where: 'id' is the row identifier for the coordinates in the original list of
coordinates, 'lon' is the longitude in the range (-180, +180) and 'lat' is
the latitude. Both coordinates are in decimal degrees.
\item If there are errors, the function writes a text file with the
coordinate pairs containing at least one coordinate out of bounds.
\item If there are missing coordinates, the function writes a text file with
the coordinate pairs containing at least one missing coordinate.
\item A .RData file with the output data frame(s) that has/have the column
names: \strong{id | lon | lat}. The data frame \strong{'coords_lon180'} can
be used as input parameter of \code{\link{get_country}} to determine the
country names. Eventually is created the data frame \strong{'excl_coords'}
with the erroneous and/or missing coordinates.
}
}
\examples{
\dontrun{
get_lon180(coords = ispd)
}
}
|
8305819d7efcd8c33df0639751a55f1bee4a315b
|
36b14b336e0efdda255fa3f163af65127e88105f
|
/man/Problem8.31.Rd
|
b925be8cd44190a0a110fc8f9da3899dced54399
|
[] |
no_license
|
ehassler/MontgomeryDAE
|
31fcc5b46ae165255446e13beee9540ab51d98b3
|
43a750f092410208b6d1694367633a104726bc83
|
refs/heads/master
| 2021-06-24T13:46:19.817322
| 2021-03-11T16:36:37
| 2021-03-11T17:13:18
| 199,803,056
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 569
|
rd
|
Problem8.31.Rd
|
\name{Problem8.31}
\alias{Problem8.31}
\docType{data}
\title{Exercise 8.31}
\usage{data("Problem8.31")}
\format{A data frame with 8 observations on the following variable(s).\describe{
\item{\code{AcidStrength}}{a numeric vector}
\item{\code{ReactionTime}}{a numeric vector}
\item{\code{AmountOfAcid}}{a numeric vector}
\item{\code{ReactionTemperature}}{a numeric vector}
\item{\code{Yield}}{a numeric vector}
}}
\references{Montgomery, D.C.(2017, 10th ed.) \emph{Design and Analysis of Experiments}, Wiley, New York.}
\examples{data(Problem8.31)}
\keyword{{datasets}}
|
1c8ef6175c6e17fcd7a57258e731598b6089f38e
|
948b78fc214a1b9981790c83abb6284758dbfa89
|
/r-library/R/zzz.R
|
54da89b24f5dc6577f7aa54aeef19d167798d9e3
|
[
"MIT"
] |
permissive
|
terminological/jepidemic
|
4ea81235273649b21cf11108c5e78dd7612fdf6e
|
f73cc26b0d0c431ecc31fcb03838e83d925bce7a
|
refs/heads/main
| 2023-04-14T10:13:56.372983
| 2022-05-24T22:07:10
| 2022-05-24T22:07:10
| 309,675,032
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 498
|
r
|
zzz.R
|
# Generated by r6-generator-maven-plugin: do not edit by hand
# This runs when the library is loaded.
# The precise result of this is a little uncertain as it depends on whether rJava has already been
# initialised and what other libraries are using it.
.onLoad <- function(libname, pkgname) {
# add in specific java options from the maven file
jOpts = getOption("java.parameters")[!grepl(x = getOption("java.parameters"),pattern = "-Xmx.*")]
options(java.parameters = c("-Xmx4096M",jOpts))
}
|
e93e95e8847bb3d90dd226f7897b47ee0d4a5a86
|
2035e29e335ae2ffd834832a4b31a0c554006689
|
/hws/hw04/resources/MCMC.r
|
59cc6b8405c448bcc787aad9cb9596c8c20c337d
|
[] |
no_license
|
tonyelhabr/isye-6420
|
a3941c4c7595f4da8b6b2e081d944c55c8d35299
|
ff46637c080c63f4b171d3ec831033771ff766f5
|
refs/heads/master
| 2022-03-17T09:57:47.559390
| 2019-12-15T18:37:40
| 2019-12-15T18:37:40
| 201,238,357
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 712
|
r
|
MCMC.r
|
n <-100
set.seed(2)
y <- 2 * rnorm(100) + 1
#------------------------------------------
NN <- 10000
mus <- c()
taus <- c()
sumdata <- sum(y)
#hyperparameters
mu0 <- 0.5
tau0 <- 1/100
a <- 1/2
b <- 2
# start, initial values
mu <- 0.5
tau <- 0.5
for (i in 1 : NN){
newmu <- rnorm(1, (tau * sumdata+tau0*mu0)/(tau0+n*tau),
sqrt(1/(tau0+n*tau)) )
rat <- b + 1/2 * sum ( (y - newmu)^2)
newtau <- rgamma(1,shape=a + n/2,
rate=rat)
mus <- c(mus, newmu)
taus <- c(taus, newtau)
mu <- newmu
tau<- newtau
}
burn <- 200
mus <- mus[burn:NN]
taus <- taus[burn:NN]
mean(mus)
mean(taus)
par(mfrow=c(1,2))
hist(mus, 40)
hist(taus, 40)
|
bf9126fb61122c4ad520cceccc822f4481f644b7
|
8cd2d5cd7bdcf74a72c04e98946189d2146a8ac2
|
/ecocrop/jjv.salinity.r
|
f4d2c2d931a80d76577089c73c83a242406ab96f
|
[] |
no_license
|
LaurenHodgson/Projects
|
fe7383c9d054fccb964ee54fc8a495b3de0812db
|
8935bc6f81bd173ed679552282cc7b32568c5fea
|
refs/heads/master
| 2016-09-06T02:38:54.899728
| 2012-12-13T05:00:13
| 2012-12-13T05:00:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,065
|
r
|
jjv.salinity.r
|
library(rgdal); library(SDMTools) #load the library
soil.dir = '/home/22/jc148322/flatdata/soil/'
wd = '/homes/31/jc165798/tmp/soils/'; setwd(wd) #define and set the working directory
load("bil.indata.RData") #bil.data = readGDAL('hwsd.bil') #read in the data
soil.asc = asc.from.sp(bil.data) #convert to an ascii grid format
baseasc = read.asc('base.asc') #read in the asc file
pos = read.csv('base.pos.csv',as.is=TRUE) #read in the base positions
pos$soil_ID = extract.data(cbind(pos$lon,pos$lat),soil.asc) #extract the soil id
tt = baseasc; tt[cbind(pos$row,pos$col)] = pos$soil_ID #get the soil id
png(); image(tt); dev.off() #quickly map the soil ID values
soildata = read.csv('hwsd_data.csv',as.is=TRUE) #read in teh soil data
soildata = soildata[,c("MU_GLOBAL","SHARE","T_ECE","S_ECE")] #lets just play with sal
#start aggregating the soil data
row.agg = function(x) { #function to aggregate the necessary data
if (is.finite(sum(x))) { #no missing data
return( sum(x*c(0.3,0.7)) ) #30/70 split of top and bottom soil
} else if (is.finite(x[1])) { #sal of top soil
return( x[1] )
} else if (is.finite(x[2])) { #sal of bottom soil
return( x[2] )
} else { return( NA ) }
}
soildata$sal = apply(soildata[,c("T_ECE","S_ECE")],1,row.agg) #get the top and bottom proportions
soildata = soildata[which(is.finite(soildata$sal)),] #get rid of NA data
soildata = soildata[,-which(names(soildata) %in% c("T_ECE","S_ECE"))] #remove extra columns
soildata$sal = (soildata$SHARE / 100) * soildata$sal #make the sal a proportion of the 'share'
soils = aggregate(soildata,by=list(soil_ID=soildata$MU_GLOBAL),sum) #get the sum of the data
soils$sal = soils$sal / (soils$SHARE / 100) #correct for incorrect addition of shares
soils = soils[,c('soil_ID','sal')] #keep only columns of interest
pos = merge(pos,soils,all.x=TRUE) #merge the soils into the pos object
for (ii in which(is.na(pos$sal))) { cat('.') #cycle through and replace missing sal data with the average of surrounding cells
x = pos$col[ii]; y = pos$row[ii] #get the row/col
buff = 0 #set the defaule
tval= NULL #object to hold sal values
while (is.null(tval)) { #continue looping through this until we have at least 5 sal values for the average
buff = buff+2 #add 2 pixel buffer distances
for (xx in (-buff:buff)+x) { #cycle through the possible x values
for (yy in (-buff:buff)+y) { #cycle throught eh possible y values
tt = which(pos$row==yy & pos$col==xx) #check to see if the xy combinationis a valid location
if (length(tt) > 0) tval = c(tval,pos$sal[tt]) #if valid location, extract the sal data
}
}
if (length(na.omit(tval))<5) tval = NULL #if not at least 5 finite sal values, set to null and start loop again with larger buffer
}
pos$sal[ii] = mean(tval,na.rm=TRUE)
}
tt = baseasc; tt[cbind(pos$row,pos$col)] = pos$sal #get the soil id
png('sal.png'); image(tt); dev.off() #quickly map the soil ID values
write.asc.gz(tt,paste(soil.dir,'soil.sal.asc',sep='')
pos.sal = pos
save(pos.sal, file=paste(soil.dir,'soil.sal.rData',sep=''))
|
4a937a495c02858e63365ac424aa8c218972e51e
|
ba65d8b42dfce42e1a4594d5a58a815194082112
|
/man/readVcfFiles.Rd
|
bb179fa703d7dc95af7d393c232de30fccbeef16
|
[
"MIT"
] |
permissive
|
acc-bioinfo/TMBleR
|
b4ac594173ecc2ead98fd19696136f0d235065a3
|
f3ded88b111b8db0867222aaa8be4bcd9fe8e80d
|
refs/heads/main
| 2023-06-19T22:21:49.508537
| 2021-07-16T16:42:20
| 2021-07-16T16:42:20
| 378,995,779
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,242
|
rd
|
readVcfFiles.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readVcfFiles.R
\name{readVcfFiles}
\alias{readVcfFiles}
\title{Read VCF files}
\usage{
readVcfFiles(vcfFiles, assembly)
}
\arguments{
\item{vcfFiles}{list of one or more \code{character} strings corresponding to
the vcf file names. Each element of the list should be named with the sample name.
If no name is provided, the .vcf file name will be used.}
\item{assembly}{human genome assembly: hg19 or hg38}
}
\value{
a (list of) \code{CollapsedVCF} object(s) containing variants
}
\description{
This function takes in input a (list of) VCF file name(s), reads and process
them by setting UCSC style and allowed chromosome names.
}
\examples{
# Read in input name of vcf files (provide them as a list even if you only have
# one file)
vcf_files <- list(Horizon5="Horizon5_ExamplePanel.vcf",
HorizonFFPEmild="HorizonFFPEmild_ExamplePanel.vcf")
# For each vcf file, get the absolute path
vcf_files <- lapply(vcf_files,
function(x) system.file("extdata", x, package = "TMBleR", mustWork = TRUE))
# Read in the files
vcfs <- readVcfFiles(vcfFiles = vcf_files, assembly = "hg19")
}
\author{
Laura Fancello
}
|
bdb5a8db20d926887a06c69ceaff67c29e0f6408
|
ab60dd90b4b0f1ac5b113e9a48373d7a780ebd6e
|
/testpull.r
|
c891ae068864cb74619b02b994558a575a50afd6
|
[] |
no_license
|
adamsb0713/new-GGPlot-
|
7a880346588d279053d1e3b9a870c531d587ca0e
|
b43821c213b6db1a12af79b2f4f6cc985d64e7ef
|
refs/heads/master
| 2020-03-18T08:12:41.346983
| 2018-06-12T02:39:21
| 2018-06-12T02:39:21
| 134,496,561
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 44
|
r
|
testpull.r
|
##test script
x=1
y=2
a=y/8
l=x*25
b=y*3
|
5be2220510ad82703fb440d39d00d90bd96a4200
|
f93b8f713c8d1eb03fd918c556526fdeb097df69
|
/Project Script.R
|
92a8c0fa1761779fa93dcb0ae978d9fe8b2ffece
|
[] |
no_license
|
jjrwrenn/Math421Assignment
|
6456b27c531f35e7001793c41bc5272b8439fa4d
|
66176a32e62b5eb06833a63b62d0fef5c7f38af1
|
refs/heads/master
| 2020-04-06T08:07:56.768286
| 2018-12-20T06:30:30
| 2018-12-20T06:30:30
| 157,296,189
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 37,116
|
r
|
Project Script.R
|
# Clear environment
rm(list = ls())
set.seed(12192018)
# Read in data file
data = read.csv(file = "C:\\Users\\student\\Documents\\Applied Data Mining\\Final Project\\default of credit card clients.csv", header = TRUE)
# Remove this NULLs variable, is ID only
data$X<-NULL
# Replace names
names(data) <- c('credit', 'gender', 'education', 'maritalStatus', 'age', 'sepPaymentStatus', 'augPaymentStatus', 'julPaymentStatus', 'junPaymentStatus', 'mayPaymentStatus', 'aprPaymentStatus', 'sepBillStatement', 'augBillStatement', 'julBillStatement', 'junBillStatement', 'mayBillStatement', 'aprBillStatement', 'sepPrevBillStatement', 'augPrevBillStatement', 'julPrevBillStatement', 'junPrevBillStatement', 'mayPrevBillStatement', 'aprPrevBillStatement', 'target')
# Make odd values missing
data$education[data$education == 6]<-NA
data$education[data$education == 5]<-NA
data$education[data$education == 0]<-NA
data$maritalStatus[data$maritalStatus == 0]<-NA
data$sepPaymentStatus[data$sepPaymentStatus == -2]<-NA
data$augPaymentStatus[data$augPaymentStatus == -2]<-NA
data$julPaymentStatus[data$julPaymentStatus == -2]<-NA
data$junPaymentStatus[data$junPaymentStatus == -2]<-NA
data$mayPaymentStatus[data$mayPaymentStatus == -2]<-NA
data$aprPaymentStatus[data$aprPaymentStatus == -2]<-NA
data$sepBillStatement[data$sepBillStatement < 0]<-NA
data$augBillStatement[data$augBillStatement < 0]<-NA
data$julBillStatement[data$julBillStatement < 0]<-NA
data$junBillStatement[data$junBillStatement < 0]<-NA
data$mayBillStatement[data$mayBillStatement < 0]<-NA
data$aprBillStatement[data$aprBillStatement < 0]<-NA
data$sepPrevBillStatement[data$sepPrevBillStatement < 0]<-NA
data$augPrevBillStatement[data$augPrevBillStatement < 0]<-NA
data$julPrevBillStatement[data$julPrevBillStatement < 0]<-NA
data$junPrevBillStatement[data$junPrevBillStatement < 0]<-NA
data$mayPrevBillStatement[data$mayPrevBillStatement < 0]<-NA
data$aprPrevBillStatement[data$aprPrevBillStatement < 0]<-NA
# Make some ints factors
target.f <- factor(data$target, labels = c("No", "Yes"))
gender.f <- factor(data$gender, labels = c("M", "F"))
edu.f <- factor(data$education, labels = c("GradSchool", "University", "HighSchool", "Other"))
marital.f <- factor(data$maritalStatus, labels = c("Married", "Single", "Other"))
data$target <- target.f
data$gender <- gender.f
data$education <- edu.f
data$maritalStatus <- marital.f
# Under sample, will make later computations faster and will balance the data
data.no = data[data$target == 'No',]
data.yes = data[data$target == 'Yes',]
data.no.under = data.no[sample(1:nrow(data.no), nrow(data.yes)),]
data.under <- rbind(data.yes, data.no.under)
# Do imputations before binning
data.under$education[is.na(data.under$education)]<-sort(names(table(data.under$education)))[1]
data.under$maritalStatus[is.na(data.under$maritalStatus)]<-sort(names(table(data.under$maritalStatus)))[1]
# Save for later use
data.bin <- data.under
data.dummy <- data.under
# Complete imputing
data.median<-predict(preProcess(data.under, method='medianImpute'), newdata = data.under)
data.knn<-predict(preProcess(data.under, method='knnImpute'), newdata = data.under)
# Change to complete.cases() soon
data.complete<-data.under[complete.cases(data.under),]
# Model Creation: Median Impute
# Quick Clean
median.splitIndex <- createDataPartition(data.median$target, p = 0.70, list = FALSE, times = 1)
median.train <- data.median[ median.splitIndex,]
median.test <- data.median[-median.splitIndex,]
median.train.list <- c(median.train$credit, median.train$gender, median.train$education, median.train$maritalStatus, median.train$age, median.train$sepPaymentStatus, median.train$augPaymentStatus, median.train$julPaymentStatus, median.train$junPaymentStatus, median.train$mayPaymentStatus, median.train$aprPaymentStatus, median.train$sepBillStatement, median.train$augBillStatement, median.train$julBillStatement, median.train$junBillStatement, median.train$mayBillStatement, median.train$aprBillStatement, median.train$sepPrevBillStatement, median.train$augPrevBillStatement, median.train$julPrevBillStatement, median.train$junBillStatement, median.train$mayPrevBillStatement, median.train$aprPrevBillStatement)
median.train.matrix <- matrix(median.train.list, nrow = length(median.train.list)/23, ncol = 23)
median.test.list <- c(median.test$credit, median.test$gender, median.test$education, median.test$maritalStatus, median.test$age, median.test$sepPaymentStatus, median.test$augPaymentStatus, median.test$julPaymentStatus, median.test$junPaymentStatus, median.test$mayPaymentStatus, median.test$aprPaymentStatus, median.test$sepBillStatement, median.test$augBillStatement, median.test$julBillStatement, median.test$junBillStatement, median.test$mayBillStatement, median.test$aprBillStatement, median.test$sepPrevBillStatement, median.test$augPrevBillStatement, median.test$julPrevBillStatement, median.test$junBillStatement, median.test$mayPrevBillStatement, median.test$aprPrevBillStatement)
median.test.matrix <- matrix(median.test.list, nrow = length(median.test.list)/23, ncol = 23)
# Glmnet model
fit.median = glmnet(x = median.train.matrix, y = as.double(data.median[median.splitIndex,]$target), family = "binomial")
fit.median.predict <- predict(fit.median, newx = median.test.matrix)
# C5.0 model
C5.0.tree.median <- C5.0(x = median.train[.1:(ncol(median.train)-1)], y = median.train$target)
C5.0.tree.median.predict <- predict(C5.0.tree.median, newdata = median.test)
# rpart model
median.rpart <- rpart(target ~ ., data = median.train, method = "class")
median.pred <- predict(median.rpart, median.test, type = "class")
median.cm = confusionMatrix(data = median.pred, reference = (median.test$target), positive = "Yes")
# ranger model
myGrid7median = expand.grid(mtry = 2, splitrule = ("gini"),
min.node.size = c(1:3))
model7median <- train(target~.,data = median.train, method = "ranger",
trControl = trainControl(method ="cv", number = 7, verboseIter = TRUE),
tuneGrid = myGrid7median)
# Model Creation: Knn Impute
# Quick Clean
knn.splitIndex <- createDataPartition(data.knn$target, p = 0.70, list = FALSE, times = 1)
knn.train <- data.knn[ knn.splitIndex,]
knn.test <- data.knn[-knn.splitIndex,]
knn.train.list <- c(knn.train$credit, knn.train$gender, knn.train$education, knn.train$maritalStatus, knn.train$age, knn.train$sepPaymentStatus, knn.train$augPaymentStatus, knn.train$julPaymentStatus, knn.train$junPaymentStatus, knn.train$mayPaymentStatus, knn.train$aprPaymentStatus, knn.train$sepBillStatement, knn.train$augBillStatement, knn.train$julBillStatement, knn.train$junBillStatement, knn.train$mayBillStatement, knn.train$aprBillStatement, knn.train$sepPrevBillStatement, knn.train$augPrevBillStatement, knn.train$julPrevBillStatement, knn.train$junBillStatement, knn.train$mayPrevBillStatement, knn.train$aprPrevBillStatement)
knn.train.matrix <- matrix(knn.train.list, nrow = length(knn.train.list)/23, ncol = 23)
knn.test.list <- c(knn.test$credit, knn.test$gender, knn.test$education, knn.test$maritalStatus, knn.test$age, knn.test$sepPaymentStatus, knn.test$augPaymentStatus, knn.test$julPaymentStatus, knn.test$junPaymentStatus, knn.test$mayPaymentStatus, knn.test$aprPaymentStatus, knn.test$sepBillStatement, knn.test$augBillStatement, knn.test$julBillStatement, knn.test$junBillStatement, knn.test$mayBillStatement, knn.test$aprBillStatement, knn.test$sepPrevBillStatement, knn.test$augPrevBillStatement, knn.test$julPrevBillStatement, knn.test$junBillStatement, knn.test$mayPrevBillStatement, knn.test$aprPrevBillStatement)
knn.test.matrix <- matrix(knn.test.list, nrow = length(knn.test.list)/23, ncol = 23)
# Glmnet model
fit.knn = glmnet(x = knn.train.matrix, y = as.double(data.knn[knn.splitIndex,]$target), family = "binomial")
fit.knn.predict <- predict(fit.knn, newx = knn.test.matrix)
# C5.0 model
C5.0.tree.knn <- C5.0(x = knn.train[.1:(ncol(knn.train)-1)], y = knn.train$target)
C5.0.tree.knn.predict <- predict(C5.0.tree.knn, newdata = knn.test)
# rpart model
knn.rpart <- rpart(target ~ ., data = knn.train, method = "class")
knn.pred <- predict(knn.rpart, knn.test, type = "class")
knn.cm = confusionMatrix(data = knn.pred, reference = (knn.test$target), positive = "Yes")
# ranger model
myGrid7knn = expand.grid(mtry = 2, splitrule = ("gini"),
min.node.size = c(1:3))
model7knn <- train(target~.,data = knn.train, method = "ranger",
trControl = trainControl(method ="cv", number = 7, verboseIter = TRUE),
tuneGrid = myGrid7knn)
# Model Creation: complete Impute
# Quick Clean
complete.splitIndex <- createDataPartition(data.complete$target, p = 0.70, list = FALSE, times = 1)
complete.train <- data.complete[ complete.splitIndex,]
complete.test <- data.complete[-complete.splitIndex,]
complete.train.list <- c(complete.train$credit, complete.train$gender, complete.train$education, complete.train$maritalStatus, complete.train$age, complete.train$sepPaymentStatus, complete.train$augPaymentStatus, complete.train$julPaymentStatus, complete.train$junPaymentStatus, complete.train$mayPaymentStatus, complete.train$aprPaymentStatus, complete.train$sepBillStatement, complete.train$augBillStatement, complete.train$julBillStatement, complete.train$junBillStatement, complete.train$mayBillStatement, complete.train$aprBillStatement, complete.train$sepPrevBillStatement, complete.train$augPrevBillStatement, complete.train$julPrevBillStatement, complete.train$junBillStatement, complete.train$mayPrevBillStatement, complete.train$aprPrevBillStatement)
complete.train.matrix <- matrix(complete.train.list, nrow = length(complete.train.list)/23, ncol = 23)
complete.test.list <- c(complete.test$credit, complete.test$gender, complete.test$education, complete.test$maritalStatus, complete.test$age, complete.test$sepPaymentStatus, complete.test$augPaymentStatus, complete.test$julPaymentStatus, complete.test$junPaymentStatus, complete.test$mayPaymentStatus, complete.test$aprPaymentStatus, complete.test$sepBillStatement, complete.test$augBillStatement, complete.test$julBillStatement, complete.test$junBillStatement, complete.test$mayBillStatement, complete.test$aprBillStatement, complete.test$sepPrevBillStatement, complete.test$augPrevBillStatement, complete.test$julPrevBillStatement, complete.test$junBillStatement, complete.test$mayPrevBillStatement, complete.test$aprPrevBillStatement)
complete.test.matrix <- matrix(complete.test.list, nrow = length(complete.test.list)/23, ncol = 23)
# Glmnet model
fit.complete = glmnet(x = complete.train.matrix, y = as.double(data.complete[complete.splitIndex,]$target), family = "binomial")
fit.complete.predict <- predict(fit.complete, newx = complete.test.matrix)
# C5.0 model
C5.0.tree.complete <- C5.0(x = complete.train[.1:(ncol(complete.train)-1)], y = complete.train$target)
C5.0.tree.complete.predict <- predict(C5.0.tree.complete, newdata = complete.test)
# rpart model
complete.rpart <- rpart(target ~ ., data = complete.train, method = "class")
complete.pred <- predict(complete.rpart, complete.test, type = "class")
complete.cm = confusionMatrix(data = complete.pred, reference = (complete.test$target), positive = "Yes")
# ranger model
myGrid7complete = expand.grid(mtry = 2, splitrule = ("gini"),
min.node.size = c(1:3))
model7complete <- train(target~.,data = complete.train, method = "ranger",
trControl = trainControl(method ="cv", number = 7, verboseIter = TRUE),
tuneGrid = myGrid7complete)
# Binning cause I'm lazy
data.bin$sepPaymentStatus[is.na(data.bin$sepPaymentStatus)]<-0
data.bin$augPaymentStatus[is.na(data.bin$augPaymentStatus)]<-0
data.bin$julPaymentStatus[is.na(data.bin$julPaymentStatus)]<-0
data.bin$junPaymentStatus[is.na(data.bin$junPaymentStatus)]<-0
data.bin$mayPaymentStatus[is.na(data.bin$mayPaymentStatus)]<-0
data.bin$aprPaymentStatus[is.na(data.bin$aprPaymentStatus)]<-0
# Make on time or little delay (0) a 0 value, all else a 1 (late, will make a factor later)
data.bin$sepPaymentStatus[data.bin$sepPaymentStatus <= 0]<-0
data.bin$augPaymentStatus[data.bin$augPaymentStatus <= 0]<-0
data.bin$julPaymentStatus[data.bin$julPaymentStatus <= 0]<-0
data.bin$junPaymentStatus[data.bin$junPaymentStatus <= 0]<-0
data.bin$mayPaymentStatus[data.bin$mayPaymentStatus <= 0]<-0
data.bin$aprPaymentStatus[data.bin$aprPaymentStatus <= 0]<-0
data.bin$sepPaymentStatus[data.bin$sepPaymentStatus > 0]<-1
data.bin$augPaymentStatus[data.bin$augPaymentStatus > 0]<-1
data.bin$julPaymentStatus[data.bin$julPaymentStatus > 0]<-1
data.bin$junPaymentStatus[data.bin$junPaymentStatus > 0]<-1
data.bin$mayPaymentStatus[data.bin$mayPaymentStatus > 0]<-1
data.bin$aprPaymentStatus[data.bin$aprPaymentStatus > 0]<-1
# Complete imputing
data.bin.median<-predict(preProcess(data.bin, method='medianImpute'), newdata = data.bin)
data.bin.knn<-predict(preProcess(data.bin, method='knnImpute'), newdata = data.bin)
data.bin.complete<-data.bin[complete.cases(data.bin),]
# Model Creation: Median Impute
# Quick Clean
bin.median.splitIndex <- createDataPartition(data.bin.median$target, p = 0.70, list = FALSE, times = 1)
bin.median.train <- data.bin.median[ bin.median.splitIndex,]
bin.median.test <- data.bin.median[-bin.median.splitIndex,]
bin.median.train.list <- c(bin.median.train$credit, bin.median.train$gender, bin.median.train$education, bin.median.train$maritalStatus, bin.median.train$age, bin.median.train$sepPaymentStatus, bin.median.train$augPaymentStatus, bin.median.train$julPaymentStatus, bin.median.train$junPaymentStatus, bin.median.train$mayPaymentStatus, bin.median.train$aprPaymentStatus, bin.median.train$sepBillStatement, bin.median.train$augBillStatement, bin.median.train$julBillStatement, bin.median.train$junBillStatement, bin.median.train$mayBillStatement, bin.median.train$aprBillStatement, bin.median.train$sepPrevBillStatement, bin.median.train$augPrevBillStatement, bin.median.train$julPrevBillStatement, bin.median.train$junBillStatement, bin.median.train$mayPrevBillStatement, bin.median.train$aprPrevBillStatement)
bin.median.train.matrix <- matrix(bin.median.train.list, nrow = length(bin.median.train.list)/23, ncol = 23)
bin.median.test.list <- c(bin.median.test$credit, bin.median.test$gender, bin.median.test$education, bin.median.test$maritalStatus, bin.median.test$age, bin.median.test$sepPaymentStatus, bin.median.test$augPaymentStatus, bin.median.test$julPaymentStatus, bin.median.test$junPaymentStatus, bin.median.test$mayPaymentStatus, bin.median.test$aprPaymentStatus, bin.median.test$sepBillStatement, bin.median.test$augBillStatement, bin.median.test$julBillStatement, bin.median.test$junBillStatement, bin.median.test$mayBillStatement, bin.median.test$aprBillStatement, bin.median.test$sepPrevBillStatement, bin.median.test$augPrevBillStatement, bin.median.test$julPrevBillStatement, bin.median.test$junBillStatement, bin.median.test$mayPrevBillStatement, bin.median.test$aprPrevBillStatement)
bin.median.test.matrix <- matrix(bin.median.test.list, nrow = length(bin.median.test.list)/23, ncol = 23)
# Glmnet model
fit.bin.median = glmnet(x = bin.median.train.matrix, y = as.double(data.bin.median[bin.median.splitIndex,]$target), family = "binomial")
fit.bin.median.predict <- predict(fit.bin.median, newx = bin.median.test.matrix)
# C5.0 model
bin.C5.0.tree.median <- C5.0(x = bin.median.train[.1:(ncol(bin.median.train)-1)], y = bin.median.train$target)
bin.C5.0.tree.median.predict <- predict(C5.0.tree.median, newdata = bin.median.test)
# rpart model
bin.median.rpart <- rpart(target ~ ., data = bin.median.train, method = "class")
bin.median.pred <- predict(bin.median.rpart, bin.median.test, type = "class")
bin.median.cm = confusionMatrix(data = bin.median.pred, reference = (bin.median.test$target), positive = "Yes")
# ranger model
bin.myGrid7median = expand.grid(mtry = 2, splitrule = ("gini"),
min.node.size = c(1:3))
bin.model7median <- train(target~.,data = bin.median.train, method = "ranger",
trControl = trainControl(method ="cv", number = 7, verboseIter = TRUE),
tuneGrid = bin.myGrid7median)
# Model Creation: Knn Impute
# Quick Clean
bin.knn.splitIndex <- createDataPartition(data.bin.knn$target, p = 0.70, list = FALSE, times = 1)
bin.knn.train <- data.bin.knn[ bin.knn.splitIndex,]
bin.knn.test <- data.bin.knn[-bin.knn.splitIndex,]
bin.knn.train.list <- c(bin.knn.train$credit, bin.knn.train$gender, bin.knn.train$education, bin.knn.train$maritalStatus, bin.knn.train$age, bin.knn.train$sepPaymentStatus, bin.knn.train$augPaymentStatus, bin.knn.train$julPaymentStatus, bin.knn.train$junPaymentStatus, bin.knn.train$mayPaymentStatus, bin.knn.train$aprPaymentStatus, bin.knn.train$sepBillStatement, bin.knn.train$augBillStatement, bin.knn.train$julBillStatement, bin.knn.train$junBillStatement, bin.knn.train$mayBillStatement, bin.knn.train$aprBillStatement, bin.knn.train$sepPrevBillStatement, bin.knn.train$augPrevBillStatement, bin.knn.train$julPrevBillStatement, bin.knn.train$junBillStatement, bin.knn.train$mayPrevBillStatement, bin.knn.train$aprPrevBillStatement)
bin.knn.train.matrix <- matrix(bin.knn.train.list, nrow = length(bin.knn.train.list)/23, ncol = 23)
bin.knn.test.list <- c(bin.knn.test$credit, bin.knn.test$gender, bin.knn.test$education, bin.knn.test$maritalStatus, bin.knn.test$age, bin.knn.test$sepPaymentStatus, bin.knn.test$augPaymentStatus, bin.knn.test$julPaymentStatus, bin.knn.test$junPaymentStatus, bin.knn.test$mayPaymentStatus, bin.knn.test$aprPaymentStatus, bin.knn.test$sepBillStatement, bin.knn.test$augBillStatement, bin.knn.test$julBillStatement, bin.knn.test$junBillStatement, bin.knn.test$mayBillStatement, bin.knn.test$aprBillStatement, bin.knn.test$sepPrevBillStatement, bin.knn.test$augPrevBillStatement, bin.knn.test$julPrevBillStatement, bin.knn.test$junBillStatement, bin.knn.test$mayPrevBillStatement, bin.knn.test$aprPrevBillStatement)
bin.knn.test.matrix <- matrix(bin.knn.test.list, nrow = length(bin.knn.test.list)/23, ncol = 23)
# Glmnet model
bin.fit.knn = glmnet(x = bin.knn.train.matrix, y = as.double(data.knn[bin.knn.splitIndex,]$target), family = "binomial")
bin.fit.knn.predict <- predict(bin.fit.knn, newx = bin.knn.test.matrix)
# C5.0 model
bin.C5.0.tree.knn <- C5.0(x = bin.knn.train[.1:(ncol(bin.knn.train)-1)], y = bin.knn.train$target)
bin.C5.0.tree.knn.predict <- predict(bin.C5.0.tree.knn, newdata = bin.knn.test)
# rpart model
bin.knn.rpart <- rpart(target ~ ., data = bin.knn.train, method = "class")
bin.knn.pred <- predict(bin.knn.rpart, bin.knn.test, type = "class")
bin.knn.cm = confusionMatrix(data = bin.knn.pred, reference = (bin.knn.test$target), positive = "Yes")
# ranger model
bin.myGrid7knn = expand.grid(mtry = 2, splitrule = ("gini"),
min.node.size = c(1:3))
bin.model7knn <- train(target~.,data = bin.knn.train, method = "ranger",
trControl = trainControl(method ="cv", number = 7, verboseIter = TRUE),
tuneGrid = bin.myGrid7knn)
# Model Creation: Complete Cases
# Quick Clean
bin.complete.splitIndex <- createDataPartition(data.bin.complete$target, p = 0.70, list = FALSE, times = 1)
bin.complete.train <- data.bin.complete[ bin.complete.splitIndex,]
bin.complete.test <- data.bin.complete[-bin.complete.splitIndex,]
bin.complete.train.list <- c(bin.complete.train$credit, bin.complete.train$gender, bin.complete.train$education, bin.complete.train$maritalStatus, bin.complete.train$age, bin.complete.train$sepPaymentStatus, bin.complete.train$augPaymentStatus, bin.complete.train$julPaymentStatus, bin.complete.train$junPaymentStatus, bin.complete.train$mayPaymentStatus, bin.complete.train$aprPaymentStatus, bin.complete.train$sepBillStatement, bin.complete.train$augBillStatement, bin.complete.train$julBillStatement, bin.complete.train$junBillStatement, bin.complete.train$mayBillStatement, bin.complete.train$aprBillStatement, bin.complete.train$sepPrevBillStatement, bin.complete.train$augPrevBillStatement, bin.complete.train$julPrevBillStatement, bin.complete.train$junBillStatement, bin.complete.train$mayPrevBillStatement, bin.complete.train$aprPrevBillStatement)
bin.complete.train.matrix <- matrix(bin.complete.train.list, nrow = length(bin.complete.train.list)/23, ncol = 23)
bin.complete.test.list <- c(bin.complete.test$credit, bin.complete.test$gender, bin.complete.test$education, bin.complete.test$maritalStatus, bin.complete.test$age, bin.complete.test$sepPaymentStatus, bin.complete.test$augPaymentStatus, bin.complete.test$julPaymentStatus, bin.complete.test$junPaymentStatus, bin.complete.test$mayPaymentStatus, bin.complete.test$aprPaymentStatus, bin.complete.test$sepBillStatement, bin.complete.test$augBillStatement, bin.complete.test$julBillStatement, bin.complete.test$junBillStatement, bin.complete.test$mayBillStatement, bin.complete.test$aprBillStatement, bin.complete.test$sepPrevBillStatement, bin.complete.test$augPrevBillStatement, bin.complete.test$julPrevBillStatement, bin.complete.test$junBillStatement, bin.complete.test$mayPrevBillStatement, bin.complete.test$aprPrevBillStatement)
bin.complete.test.matrix <- matrix(bin.complete.test.list, nrow = length(bin.complete.test.list)/23, ncol = 23)
# Glmnet model
bin.fit.complete = glmnet(x = bin.complete.train.matrix, y = as.double(data.bin.complete[bin.complete.splitIndex,]$target), family = "binomial")
bin.fit.complete.predict <- predict(bin.fit.complete, newx = bin.complete.test.matrix)
# C5.0 model
bin.C5.0.tree.complete <- C5.0(x = bin.complete.train[.1:(ncol(bin.complete.train)-1)], y = bin.complete.train$target)
bin.C5.0.tree.complete.predict <- predict(bin.C5.0.tree.complete, newdata = bin.complete.test)
# rpart model
bin.complete.rpart <- rpart(target ~ ., data = bin.complete.train, method = "class")
bin.complete.pred <- predict(bin.complete.rpart, bin.complete.test, type = "class")
bin.complete.cm = confusionMatrix(data = bin.complete.pred, reference = (bin.complete.test$target), positive = "Yes")
# ranger model
bin.myGrid7complete = expand.grid(mtry = 2, splitrule = ("gini"),
min.node.size = c(1:3))
bin.model7complete <- train(target~.,data = bin.complete.train, method = "ranger",
trControl = trainControl(method ="cv", number = 7, verboseIter = TRUE),
tuneGrid = bin.myGrid7complete)
# Dummy
dummies_model <- dummyVars(target~., data = data.dummy)
dummies.train <- predict(dummies_model, newdata = data.dummy)
dummies.data <- data.frame(dummies.train)
dummies.data$target <- data.dummy$target
dummies.data <- dummies.data[complete.cases(dummies.data),]
dummy.splitIndex <- createDataPartition(dummies.data$target, p = 0.70, list = FALSE, times = 1)
dummy.train <- dummies.data[ dummy.splitIndex,]
dummy.test <- dummies.data[-dummy.splitIndex,]
dummy.train.list <- c(dummy.train$credit, dummy.train$gender.M, dummy.train$gender.F, dummy.train$education.GradSchool, dummy.train$education.University, dummy.train$education.HighSchool, dummy.train$education.Other, dummy.train$maritalStatus.Married, dummy.train$maritalStatus.Single, dummy.train$maritalStatus.Single, dummy.train$age, dummy.train$sepPaymentStatus, dummy.train$augPaymentStatus, dummy.train$julPaymentStatus, dummy.train$junPaymentStatus, dummy.train$mayPaymentStatus, dummy.train$aprPaymentStatus, dummy.train$sepBillStatement, dummy.train$augBillStatement, dummy.train$julBillStatement, dummy.train$junBillStatement, dummy.train$mayBillStatement, dummy.train$aprBillStatement, dummy.train$sepPrevBillStatement, dummy.train$augPrevBillStatement, dummy.train$julPrevBillStatement, dummy.train$junBillStatement, dummy.train$mayPrevBillStatement, dummy.train$aprPrevBillStatement)
dummy.train.matrix <- matrix(dummy.train.list, nrow = length(dummy.train.list)/29, ncol = 29)
dummy.test.list <- c(dummy.test$credit, dummy.test$gender.M, dummy.test$gender.F, dummy.test$education.GradSchool, dummy.test$education.University, dummy.test$education.HighSchool, dummy.test$education.Other, dummy.test$maritalStatus.Married, dummy.test$maritalStatus.Single, dummy.test$maritalStatus.Other, dummy.test$age, dummy.test$sepPaymentStatus, dummy.test$augPaymentStatus, dummy.test$julPaymentStatus, dummy.test$junPaymentStatus, dummy.test$mayPaymentStatus, dummy.test$aprPaymentStatus, dummy.test$sepBillStatement, dummy.test$augBillStatement, dummy.test$julBillStatement, dummy.test$junBillStatement, dummy.test$mayBillStatement, dummy.test$aprBillStatement, dummy.test$sepPrevBillStatement, dummy.test$augPrevBillStatement, dummy.test$julPrevBillStatement, dummy.test$junBillStatement, dummy.test$mayPrevBillStatement, dummy.test$aprPrevBillStatement)
dummy.test.matrix <- matrix(dummy.test.list, nrow = length(dummy.test.list)/29, ncol = 29)
# Glmnet model
fit.dummy = glmnet(x = dummy.train.matrix, y = as.double(dummies.data[dummy.splitIndex,]$target), family = "binomial")
fit.dummy.predict <- predict(fit.dummy, newx = dummy.test.matrix)
# C5.0 model
C5.0.tree.dummy <- C5.0(x = dummy.train[.1:(ncol(dummy.train)-1)], y = dummy.train$target)
C5.0.tree.dummy.predict <- predict(C5.0.tree.dummy, newdata = dummy.test)
# rpart model
dummy.rpart <- rpart(target ~ ., data = dummy.train, method = "class")
dummy.pred <- predict(dummy.rpart, dummy.test, type = "class")
dummy.cm = confusionMatrix(data = dummy.pred, reference = (dummy.test$target), positive = "Yes")
# ranger model
myGrid7dummy = expand.grid(mtry = 2, splitrule = ("gini"),
min.node.size = c(1:3))
model7dummy <- train(target~.,data = dummy.train, method = "ranger",
trControl = trainControl(method ="cv", number = 7, verboseIter = TRUE),
tuneGrid = myGrid7dummy)
reportResults = function() {
plot(fit.median.predict)
plot(fit.knn.predict)
plot(fit.complete.predict)
plot(fit.bin.median.predict)
plot(bin.fit.knn.predict)
plot(bin.fit.complete.predict)
plot(fit.dummy.predict)
median.cm
knn.cm
complete.cm
bin.median.cm
bin.knn.cm
bin.complete.cm
dummy.cm
summary(C5.0.tree.median)
plot(C5.0.tree.median)
summary(C5.0.tree.knn)
plot(C5.0.tree.knn)
summary(C5.0.tree.complete)
plot(C5.0.tree.complete)
summary(bin.C5.0.tree.median)
plot(bin.C5.0.tree.median)
summary(bin.C5.0.tree.knn)
plot(bin.C5.0.tree.knn)
summary(bin.C5.0.tree.complete)
plot(bin.C5.0.tree.complete)
summary(C5.0.tree.dummy)
plot(C5.0.tree.dummy)
model7median[[4]][c(2:4,6)]
model7knn[[4]][c(2:4,6)]
model7complete[[4]][c(2:4,6)]
bin.model7median[[4]][c(2:4,6)]
bin.model7knn[[4]][c(2:4,6)]
bin.model7complete[[4]][c(2:4,6)]
model7dummy[[4]][c(2:4,6)]
}
# Tuning models
fit.median.tune = glmnet(x = median.train.matrix, y = as.double(data.median[median.splitIndex,]$target), family = "binomial", standardize = FALSE)
fit.median.tune.predict <- predict(fit.median.tune, newx = median.test.matrix)
plot(fit.median.tune.predict)
plot(fit.median.tune)
coef(fit.median.tune, s = 0.01)
fit.knn.tune = glmnet(x = knn.train.matrix, y = as.double(data.knn[knn.splitIndex,]$target), family = "binomial", standardize = FALSE)
fit.knn.tune.predict <- predict(fit.knn.tune, newx = knn.test.matrix)
plot(fit.knn.tune.predict)
plot(fit.knn.tune)
coef(fit.knn.tune, s = 0.01)
fit.complete.tune = glmnet(x = knn.train.matrix, y = as.double(data.knn[knn.splitIndex,]$target), family = "binomial", standardize = FALSE)
fit.complete.tune.predict <- predict(fit.knn.tune, newx = complete.test.matrix)
plot(fit.complete.tune.predict)
plot(fit.complete.tune)
coef(fit.complete.tune, s = 0.01)
bin.fit.median.tune = glmnet(x = bin.median.train.matrix, y = as.double(data.bin.median[median.splitIndex,]$target), family = "binomial", standardize = FALSE)
bin.fit.median.tune.predict <- predict(bin.fit.median.tune, newx = bin.median.test.matrix)
plot(fit.median.tune.predict)
plot(bin.fit.median.tune)
coef(bin.fit.median.tune, s = 0.01)
bin.fit.knn.tune = glmnet(x = knn.train.matrix, y = as.double(data.bin.knn[knn.splitIndex,]$target), family = "binomial", standardize = FALSE)
bin.fit.knn.tune.predict <- predict(bin.fit.knn.tune, newx = bin.knn.test.matrix)
plot(bin.fit.knn.tune.predict)
plot(bin.fit.knn.tune)
coef(bin.fit.knn.tune, s = 0.01)
bin.fit.complete.tune = glmnet(x = bin.knn.train.matrix, y = as.double(data.bin.knn[knn.splitIndex,]$target), family = "binomial", standardize = FALSE)
bin.fit.complete.predict.tune <- predict(bin.fit.knn.tune, newx = bin.complete.test.matrix)
plot(bin.fit.complete.predict.tune)
plot(bin.fit.complete.tune)
coef(bin.fit.complete.tune, s = 0.01)
fit.dummy.tune = glmnet(x = dummy.train.matrix, y = as.double(dummies.data[dummy.splitIndex,]$target), family = "binomial", standardize = FALSE)
fit.dummy.predict.tune <- predict(fit.dummy.tune, newx = dummy.test.matrix)
plot(fit.dummy.predict.tune)
plot(fit.dummy.tune)
coef(bin.fit.complete.tune, s = 0.01)
C5.0.tree.median.tune <- C5.0(x = median.train[.1:(ncol(median.train)-1)], y = median.train$target, trials = 10)
C5.0.tree.median.predict.tune <- predict(C5.0.tree.median.tune, newdata = median.test, type = "class")
C5.0.tree.knn.tune <- C5.0(x = knn.train[.1:(ncol(knn.train)-1)], y = knn.train$target, trials = 10)
C5.0.tree.knn.predict.tune <- predict(C5.0.tree.knn.tune, newdata = knn.test, type = "class")
C5.0.tree.complete.tune <- C5.0(x = complete.train[.1:(ncol(complete.train)-1)], y = complete.train$target, trials = 10)
C5.0.tree.complete.predict.tune <- predict(C5.0.tree.complete.tune, newdata = complete.test, type = "class")
bin.C5.0.tree.median.tune <- C5.0(x = bin.median.train[.1:(ncol(bin.median.train)-1)], y = bin.median.train$target, trials = 10)
bin.C5.0.tree.median.predict.tune <- predict(bin.C5.0.tree.median.tune, newdata = bin.median.test, type = "class")
bin.C5.0.tree.knn.tune <- C5.0(x = bin.knn.train[.1:(ncol(knn.train)-1)], y = bin.knn.train$target, trials = 10)
bin.C5.0.tree.knn.predict.tune <- predict(bin.C5.0.tree.knn.tune, newdata = bin.knn.test, type = "class")
bin.C5.0.tree.complete.tune <- C5.0(x = bin.complete.train[.1:(ncol(bin.complete.train)-1)], y = bin.complete.train$target, trials = 10)
bin.C5.0.tree.complete.predict.tune <- predict(bin.C5.0.tree.complete.tune, newdata = bin.complete.test, type = "class")
C5.0.tree.dummy.tune <- C5.0(x = dummy.train[.1:(ncol(dummy.train)-1)], y = dummy.train$target, trials = 10)
C5.0.tree.dummy.predict.tune <- predict(C5.0.tree.dummy.tune, newdata = dummy.test, type = "class")
median.rpart.tune <- rpart(target ~ ., data = median.train, method = "class", control = rpart.control(xval = 20))
median.pred.tune <- predict(median.rpart.tune, median.test, type = "class")
median.cm.tune = confusionMatrix(data = median.pred.tune, reference = (median.test$target), positive = "Yes")
knn.rpart.tune <- rpart(target ~ ., data = knn.train, method = "class")
knn.pred.tune <- predict(knn.rpart.tune, knn.test, type = "class")
knn.cm.tune = confusionMatrix(data = knn.pred.tune, reference = (knn.test$target), positive = "Yes")
complete.rpart.tune <- rpart(target ~ ., data = complete.train, method = "class", control = rpart.control(xval = 20))
complete.pred.tune <- predict(complete.rpart.tune, complete.test, type = "class")
complete.cm.tune = confusionMatrix(data = complete.pred.tune, reference = (complete.test$target), positive = "Yes")
bin.median.rpart.tune <- rpart(target ~ ., data = bin.median.train, method = "class", control = rpart.control(xval = 20))
bin.median.pred.tune <- predict(bin.median.rpart.tune, bin.median.test, type = "class")
bin.median.cm.tune = confusionMatrix(data = bin.median.pred.tune, reference = (bin.median.test$target), positive = "Yes")
bin.knn.rpart.tune <- rpart(target ~ ., data = bin.knn.train, method = "class", control = rpart.control(xval = 20))
bin.knn.pred.tune <- predict(bin.knn.rpart.tune, bin.knn.test, type = "class")
bin.knn.cm.tune = confusionMatrix(data = bin.knn.pred.tune, reference = (bin.knn.test$target), positive = "Yes")
bin.complete.rpart.tune <- rpart(target ~ ., data = bin.complete.train, method = "class", control = rpart.control(xval = 20))
bin.complete.pred.tune <- predict(bin.complete.rpart.tune, bin.complete.test, type = "class")
bin.complete.cm.tune = confusionMatrix(data = bin.complete.pred.tune, reference = (bin.complete.test$target), positive = "Yes")
dummy.rpart.tune <- rpart(target ~ ., data = dummy.train, method = "class", control = rpart.control(xval = 20))
dummy.pred.tune <- predict(dummy.rpart.tune, dummy.test, type = "class")
dummy.cm.tune = confusionMatrix(data = dummy.pred.tune, reference = (dummy.test$target), positive = "Yes")
myGrid7median.tune = expand.grid(mtry = 2, splitrule = c("gini", "extratrees"),
min.node.size = c(1:3))
model7median.tune <- train(target~.,data = median.train, method = "ranger",
trControl = trainControl(method ="cv", number = 10, verboseIter = TRUE),
tuneGrid = myGrid7median.tune)
myGrid7knn.tune = expand.grid(mtry = 2, splitrule = c("gini", "extratrees"),
min.node.size = c(1:3))
model7knn.tune <- train(target~.,data = knn.train, method = "ranger",
trControl = trainControl(method ="cv", number = 10, verboseIter = TRUE),
tuneGrid = myGrid7knn.tune)
myGrid7complete.tune = expand.grid(mtry = 2, splitrule = c("gini", "extratrees"),
min.node.size = c(1:3))
model7complete.tune <- train(target~.,data = complete.train, method = "ranger",
trControl = trainControl(method ="cv", number = 10, verboseIter = TRUE),
tuneGrid = myGrid7complete.tune)
bin.myGrid7median.tune = expand.grid(mtry = 2, splitrule = c("gini", "extratrees"),
min.node.size = c(1:3))
bin.model7median.tune <- train(target~.,data = bin.median.train, method = "ranger",
trControl = trainControl(method ="cv", number = 10, verboseIter = TRUE),
tuneGrid = bin.myGrid7median.tune)
bin.myGrid7knn.tune = expand.grid(mtry = 2, splitrule = c("gini", "extratrees"),
min.node.size = c(1:3))
bin.model7knn.tune <- train(target~.,data = bin.knn.train, method = "ranger",
trControl = trainControl(method ="cv", number = 10, verboseIter = TRUE),
tuneGrid = bin.myGrid7knn.tune)
bin.myGrid7complete.tune = expand.grid(mtry = 2, splitrule = c("gini", "extratrees"),
min.node.size = c(1:3))
bin.model7complete.tune <- train(target~.,data = bin.complete.train, method = "ranger",
trControl = trainControl(method ="cv", number = 10, verboseIter = TRUE),
tuneGrid = bin.myGrid7complete.tune)
myGrid7dummy.tune = expand.grid(mtry = 2, splitrule = c("gini", "extratrees"),
min.node.size = c(1:3))
model7dummy.tune <- train(target~.,data = dummy.train, method = "ranger",
trControl = trainControl(method ="cv", number = 10, verboseIter = TRUE),
tuneGrid = myGrid7dummy.tune)
reportResultsTune = function() {
plot(fit.median.tune.predict)
plot(fit.knn.tune.predict)
plot(fit.complete.tune.predict)
plot(bin.fit.median.tune.predict)
plot(bin.fit.knn.tune.predict)
plot(bin.fit.complete.predict.tune)
plot(fit.dummy.predict.tune)
median.cm.tune
knn.cm.tune
complete.cm.tune
bin.median.cm.tune
bin.knn.cm.tune
bin.complete.cm.tune
dummy.cm.tune
summary(C5.0.tree.median.tune)
plot(C5.0.tree.median.tune)
summary(C5.0.tree.knn.tune)
plot(C5.0.tree.knn.tune)
summary(C5.0.tree.complete.tune)
plot(C5.0.tree.complete.tune)
summary(bin.C5.0.tree.median.tune)
plot(bin.C5.0.tree.median.tune)
summary(bin.C5.0.tree.knn.tune)
plot(bin.C5.0.tree.knn.tune)
summary(bin.C5.0.tree.complete.tune)
plot(bin.C5.0.tree.complete.tune)
summary(C5.0.tree.dummy.tune)
plot(C5.0.tree.dummy.tune)
model7median.tune[[4]][c(2:4,6)]
model7knn.tune[[4]][c(2:4,6)]
model7complete.tune[[4]][c(2:4,6)]
bin.model7median.tune[[4]][c(2:4,6)]
bin.model7knn.tune[[4]][c(2:4,6)]
bin.model7complete.tune[[4]][c(2:4,6)]
model7dummy.tune[[4]][c(2:4,6)]
}
|
60f1e194bb8268a3e54988d19063768a5771cd3f
|
6da4fd73af6ac5b3ab3ac8c4ed6c4816a4582813
|
/output_prep_MATLAB.R
|
88f7143e4b0f93d4124350d7467fd309bb6729ff
|
[] |
no_license
|
yierge/eBass
|
13562c8d3641ce368dcef4386b16cd274dfa95b2
|
316d35c537e5a3345fbc74afc12915b9b2af30fb
|
refs/heads/master
| 2023-02-08T16:08:51.440820
| 2020-12-22T08:24:12
| 2020-12-22T08:24:12
| 288,008,813
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 505
|
r
|
output_prep_MATLAB.R
|
#inputdata is the result from primarythres.R
PrepeBass<-function(testresult)
{
fdr_rate<-inflate_cluster<-c()
for (j in 1:length(testresult[,2]))
{
finalthres[j]<-testresult[j,6]
fdr_rate[j]<-1-testresult[j,2]
fdr_cluster[j]<-length(which(pdata<=finalthres[j]))*fdr_rate[j]
inflate_cluster[j]<-fdr_cluster[j]*testresult[j,4]
}
results<-as.data.frame(cbind(finalthres, ceiling(fdr_cluster), ceiling(inflate_cluster)))
names(results)<-c("PrimaryThres","FDRcluster","Infcluster")
return(results)
}
|
f24c245c65bbffdd06296450886d14800e5755aa
|
8d67857ba1f7781ca8bb0500f2b4d724a4f46e5a
|
/R/helpers.R
|
8a05501ddaec14f1a402d6391f0b36ca912e1285
|
[] |
no_license
|
PhilipPallmann/jocre
|
b9eae5cc676886e127f969f6317022033dd82dff
|
4e07f565a1cb32dc9dcc17730acc781aee33472d
|
refs/heads/master
| 2020-05-29T08:52:09.854986
| 2017-05-12T14:43:52
| 2017-05-12T14:43:52
| 69,365,878
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,526
|
r
|
helpers.R
|
print.JOC <- function(x, digits=max(3, getOption("digits") - 4), ...){
if(x$method %in% c("expanded", "fix.seq", "tost")){
cat(paste("Parameter estimates and ", 100 * (1 - x$alpha), "% simultaneous confidence intervals:\n\n", sep=""))
}else{
cat(paste("Parameter estimates and projected boundaries of the ", x$p, "-dimensional\n", 100 * (1 - x$alpha),
"% simultaneous confidence region:\n\n", sep=""))
}
res <- cbind(round(x$est, digits), round(x$ci, digits))
rownames(res) <- colnames(x$dat)
colnames(res) <- c("Estimate", "Lower", "Upper")
print(res)
}
summary.JOC <- function(object, digits=max(3, getOption("digits") - 4), ...){
if(object$method %in% c("expanded", "fix.seq", "tost")){
cat(paste("Parameter estimates and ", 100 * (1 - object$alpha), "% simultaneous confidence intervals:\n\n", sep=""))
}else{
cat(paste("Parameter estimates and projected boundaries of the ", object$p, "-dimensional\n", 100 * (1 - object$alpha),
"% simultaneous confidence region:\n\n", sep=""))
}
res <- cbind(round(object$est, digits), round(object$ci, digits))
rownames(res) <- colnames(object$dat)
colnames(res) <- c("Estimate", "Lower", "Upper")
print(res)
}
plot.JOC <- function(x, equi=log(c(0.8, 1.25)), axnames=NULL, main=NULL, xlim=log(c(0.77, 1.3)),
ylim=log(c(0.77, 1.3)), col="black", convexify=FALSE, ...){
if(nrow(x$ci)!=2){
stop("Plotting only allowed for regions or intervals in 2 dimensions.")
}
if(is.null(axnames)==TRUE){
axisnames <- colnames(x$dat)
}else{
axisnames <- axnames
}
par(mar=c(5, 5, 4, 2))
plot(0, xlim=xlim, ylim=ylim, las=1, xlab=axisnames[1], ylab=axisnames[2],
cex.main=2.5, cex.axis=1.5, cex.lab=1.7, main=main, type='n', ...)
if(is.null(equi)==FALSE){
if(length(equi)!=2){
stop("Length of equi must be 2.")
}
rect(equi[1], equi[1], equi[2], equi[2], col="gray95", border=NA)
}
if(x$method %in% c("limacon.asy", "limacon.fin")){
if(convexify==FALSE){
#points(x$cr, pch=20, col=col, cex=0.5)
tsp <- TSP(dist(x$cr))
tour <- solve_TSP(tsp, method='farthest')
polygon(x$cr[tour, ], col=NULL, border=col, lwd=2)
}else{
polygon(x$cr[chull(x$cr), -3], col=NULL, border=col, lwd=2)
}
}
if(x$method %in% c("boot.kern", "emp.bayes", "hotelling", "standard.cor", "standard.ind", "tseng", "tseng.brown")){
polygon(x$cr[chull(x$cr), -3], col=NULL, border=col, lwd=2)
}
if(x$method %in% c("expanded", "fix.seq", "tost")){
segments(x0=x$ci[1], x1=x$ci[3], y0=x$est[2], y1=x$est[2], lwd=2, col=col)
segments(y0=x$ci[2], y1=x$ci[4], x0=x$est[1], x1=x$est[1], lwd=2, col=col)
}
points(x$est[1], x$est[2], pch=19, col="black")
points(0, 0, pch="+", col="black", cex=2)
par(mar=c(5, 4, 4, 2))
}
print.JOCMV <- function(x, digits=max(3, getOption("digits") - 4), ...){
cat(paste("Parameter estimate and projected boundaries of the 2-dimensional\n", 100 * (1 - x$alpha),
"% simultaneous confidence region:\n\n", sep=""))
res <- cbind(round(x$est, digits), round(x$ci, digits))
colnames(res) <- c("Estimate", "Lower", "Upper")
print(res)
}
summary.JOCMV <- function(object, digits=max(3, getOption("digits") - 4), ...){
cat(paste("Parameter estimate and projected boundaries of the 2-dimensional\n", 100 * (1 - object$alpha),
"% simultaneous confidence region:\n\n", sep=""))
res <- cbind(round(object$est, digits), round(object$ci, digits))
colnames(res) <- c("Estimate", "Lower", "Upper")
print(res)
}
plot.JOCMV <- function(x, axnames=NULL, main=NULL, xlim=NULL, ylim=NULL, col="black", ...){
if(is.null(xlim)==FALSE){
xlims <- xlim
}else{
xlims <- range(x$cr[, 1])
}
if(is.null(ylim)==FALSE){
ylims <- ylim
}else{
ylims <- range(x$cr[, 2])
}
if(is.null(axnames)==TRUE){
if(x$scale=="var"){
axisnames <- c("Mean", "Variance")
}else{
axisnames <- c("Mean", "SD")
}
}else{
axisnames <- axnames
}
par(mar=c(5, 5, 4, 2))
plot(0, xlim=xlims, ylim=ylims, las=1, xlab=axisnames[1], ylab=axisnames[2],
cex.main=2.5, cex.axis=1.5, cex.lab=1.7, main=main, type='n', ...)
polygon(x$cr[chull(x$cr[, ]), ], col=NULL, border=col, lwd=2)
if(x$scale=="var"){
points(x$est, x$s^2, pch=19, col="black")
}else{
points(x$est, x$s, pch=19, col="black")
}
par(mar=c(5, 4, 4, 2))
}
|
1baffdf9b30213d745f8411600b0fc885b01b86f
|
76d33f5007d50fc7cc3a22a1aa7878afde234950
|
/aeltere-Versionen/3_suf_wsi-brb-2015_befragungsdaten-aufbereiten.R
|
0a9e3dc8f2ffbbd5d91c54464f6c1eec64ad0b6b
|
[] |
no_license
|
helge-baumann/suf_brb_2016
|
19e235d8479c7227f5fec428672977e4ad37ede2
|
ada5645756b17c04d07d1c85ae9b9a43cdf6fc22
|
refs/heads/master
| 2021-03-30T05:52:40.130402
| 2020-03-19T14:44:22
| 2020-03-19T14:44:22
| 248,023,022
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,166
|
r
|
3_suf_wsi-brb-2015_befragungsdaten-aufbereiten.R
|
# Befragungsdaten aufbereiten
# Gewichte zuspielen------------------------------------------------------------
gew <- daten_input[["gew"]][,c("lfd", "gewbr_k", "gewbr_l")]
dat <- merge(daten_input[["dat"]], gew, by="lfd")
# Variablenlabels und Valuelabels anfügen
dat.varlabels.alle <- c(varlabels[["dat"]], varlabels[["gew"]])
dat.valuelabels.alle <- append(valuelabels[["dat"]], valuelabels[["gew"]])
rm(list=c("gew"))
# Anteilswerte----------------------------------------------------------------
# Dokumentation generierter Variablen
Doku_gen <- list(NA)
for(b in names(dat)) {
varlabel <- dat.varlabels.alle[b]
valuelabel <- dat.valuelabels.alle[[b]]
if (grepl("anteil", varlabel, ignore.case=T)) {
Doku_gen[[paste0(b, "_gen")]] <- matrix(NA)
Doku_gen[[paste0(b, "_gen")]][1] <- b
# Hauptvariable erzeugen
z <- dat[,b]
# Vorhandensein übernehmen
if (grepl(
"vorhandensei",
varlabels[["dat"]][which(names(dat.varlabels.alle)==b)-2],
ignore.case=T)) {
Doku_gen[[paste0(b, "_gen")]][2] <- names(dat)[which(names(dat)==b)-2]
x <- dat[,which(names(dat)==b)-2]
# Nein: Anteil=0
z[tolower(x) == "nein"] <- 0
# Missings übernehmen
z[tolower(x) %in%
tolower(names(valuelabel))] <-
valuelabel[
as.character(
x[tolower(x) %in%
tolower(names(valuelabel))
])]
# Spalte entfernen
dat <- dat[,-(which(names(dat)==b)-2)]
}
# Anzahl übernehmen
y <- dat[,which(names(dat)==b)-1]
y.name <- names(dat)[which(names(dat)==b)-1]
# Anteil berechnen aus Betriebsgröße
z[
!(y %in% valuelabels[["dat"]][[y.name]]) &
!(dat$D1 %in% valuelabels[["dat"]][["D1"]]) &
is.na(y) == F & is.na(dat$D1)==F] <-
(y[
!(y %in% valuelabels[["dat"]][[y.name]]) &
!(dat$D1 %in% valuelabels[["dat"]][["D1"]]) &
is.na(y) == F & is.na(dat$D1)==F]/
dat$D1[
!(y %in% valuelabels[["dat"]][[y.name]]) &
!(dat$D1 %in% valuelabels[["dat"]][["D1"]]) &
is.na(y) == F & is.na(dat$D1)==F]
)*100
# Missings übernehmen
# a) aus absoluten Angaben
labtable.y <- valuelabels[["dat"]][[y.name]]
varunique.y <- na.omit(unique(y))
names(varunique.y) <- as.character(varunique.y)
gen.lab.y <- sort(c(varunique.y[!varunique.y %in% labtable.y], labtable.y))
y.fac <- factor(y, levels=gen.lab.y, labels=names(gen.lab.y))
labtable.d1 <- valuelabels[["dat"]][["D1"]]
varunique.d1 <- na.omit(unique(dat$D1))
names(varunique.d1) <- as.character(varunique.d1)
gen.lab.d1 <- sort(c(varunique.d1[!varunique.d1 %in% labtable.d1], labtable.d1))
d1.fac <- factor(dat$D1, levels=gen.lab.d1, labels=names(gen.lab.d1))
z[
tolower(y.fac) %in%
tolower(names(valuelabel))] <-
valuelabel[
as.character(
y.fac[
tolower(y.fac) %in%
tolower(names(valuelabel))])]
z[
tolower(d1.fac) %in%
tolower(names(valuelabel))==T &
is.na(z)==T & is.na(d1.fac)==F
] <-
valuelabel[
as.character(
d1.fac[tolower(d1.fac) %in%
tolower(names(valuelabel))==T &
is.na(z)==T & is.na(d1.fac)==F
])]
# Unplausible Werte erkennen
z[z > 100 & !(z %in% valuelabel) & is.na(z)==F] <-
valuelabel[length(valuelabel)]+1
Doku_gen[[paste0(b, "_gen")]][3] <- names(dat)[which(names(dat)==b)-1]
Doku_gen[[paste0(b, "_gen")]][4] <- "D1"
# Spalte n-1 entfernen
dat <- dat[,-(which(names(dat)==b)-1)]
dat[,b] <- z
names(dat)[which(names(dat)==b)] <- paste0(b, "_gen")
dat.valuelabels.alle[[paste0(b, "_gen")]] <- valuelabel
dat.valuelabels.alle[[paste0(b, "_gen")]][length(valuelabel)+1] <-
dat.valuelabels.alle[[paste0(b, "_gen")]][length(valuelabel)]+1
names(dat.valuelabels.alle[[paste0(b, "_gen")]])[length(valuelabel)+1] <-
"unplausibler Wert (Anteil > 1)"
dat.varlabels.alle[paste0(b, "_gen")] <-
paste0(dat.varlabels.alle[b], " (generierte Variable)")
}
}
# Internen WSI-Datensatz erzeugen
# Stichprobenmerkmale zuspielen-------------------------------------------------
dat <- merge(dat, daten_input[["sti"]], by="lfd")
# Variablenlabels und Valuelabels anfügen
dat.varlabels.alle <- c(dat.varlabels.alle, varlabels[["sti"]])
dat.valuelabels.alle <- append(dat.valuelabels.alle, valuelabels[["sti"]])
Datensatz_Intern <- dat
source("./R_Funk/systematisierung_wz.R", encoding="UTF-8")
Datensatz_Intern <- Datensatz_Intern[,!(names(dat) %in% "w08_abschnitt")]
branche.varlabels <- NA
branche.varlabels["branche6"] <-
"Branche nach WZ 2008 (6er-Vergröberung)"
branche.varlabels["branche8"] <-
"Branche nach WZ 2008 (8er-Vergröberung)"
branche.varlabels["branche10"] <-
"Branche nach WZ 2008 (10er-Vergröberung)"
branche.valuelabels <- list(NA)
branche.valuelabels[["branche6"]] <-
1:6
names(branche.valuelabels[["branche6"]]) <-
levels(Datensatz_Intern$branche6)
branche.valuelabels[["branche8"]] <-
1:8
names(branche.valuelabels[["branche8"]]) <-
levels(Datensatz_Intern$branche8)
branche.valuelabels[["branche10"]] <-
1:10
names(branche.valuelabels[["branche10"]]) <-
levels(Datensatz_Intern$branche10)
# Variablenlabels und Valuelabels anfügen
dat.varlabels.alle <- c(dat.varlabels.alle, branche.varlabels)
dat.valuelabels.alle <- append(dat.valuelabels.alle, branche.valuelabels)
rm(list=c("branche.varlabels", "branche.valuelabels"))
Datensatz.varlabels <- dat.varlabels.alle[
names(dat.varlabels.alle) %in% names(Datensatz_Intern)]
Datensatz.varlabels <- Datensatz.varlabels[unique(
names(Datensatz.varlabels))]
Datensatz.valuelabels <- dat.valuelabels.alle[
names(dat.valuelabels.alle) %in% names(Datensatz_Intern)]
Datensatz.valuelabels <- Datensatz.valuelabels[unique(
names(Datensatz.valuelabels))]
attr(Datensatz_Intern, "label.table") <- Datensatz.valuelabels
attr(Datensatz_Intern, "var.labels") <- Datensatz.varlabels # namen falsch sortiert
dir.create("./Output/Befragungsdaten intern/")
# für Paket haven
for(b in names(Datensatz_Intern)) {
if(class(Datensatz_Intern[[b]]) == "factor") {
Datensatz_Intern[[b]] <-
round(as.double(get.origin.codes(
Datensatz_Intern[[b]],
label.table=attr(Datensatz_Intern,
"label.table")[[b]])), digits=0)
for(i in attr(Datensatz_Intern,
"label.table")[[b]]) {
val_label(Datensatz_Intern[[b]], i) <-
names(attr(Datensatz_Intern,
"label.table")[[b]][attr(Datensatz_Intern,
"label.table")[[b]]==i])
na_values(Datensatz_Intern[[b]]) <-
round(as.double(unname(attr(Datensatz_Intern, "label.table")[[b]][
names(attr(Datensatz_Intern,
"label.table")[[b]]) %in% c("verweigert", "weiß nicht",
"unplausibler Wert (Anteil > 1)")])),
digits=0)
}
#Datensatz_Intern[[b]] <- as_factor(Datensatz_Intern[[b]])
#attr(Datensatz_Intern[[b]], "labels") <- attr(Datensatz_Intern, "label.table")[[b]]
} else { # numerische Variablen
Datensatz_Intern[[b]] <- as.double(Datensatz_Intern[[b]])
for(i in attr(
Datensatz_Intern,
"label.table")[[b]]
) {
val_label(Datensatz_Intern[[b]], i) <-
names(
attr(
Datensatz_Intern,
"label.table")[[b]][
attr(
Datensatz_Intern,
"label.table")[[b]]==i]
)
na_values(Datensatz_Intern[[b]]) <-
as.double(unname(attr(Datensatz_Intern,
"label.table")[[b]]))
}
}
var_label(Datensatz_Intern[b]) <-
attr(Datensatz_Intern, "var.labels")[b]
}
write_sav(Datensatz_Intern,
paste0(
"./Output/Befragungsdaten intern/",
"WSI-BRB-2015_Befragungsdaten-intern",
format(Sys.time(), "%Y-%m-%d"), ".sav"
)
)
# Stata-Support fehlt noch.
# Problem: Stata kann keine Labels für Nachkommastellen vergeben.
# Dies gilt allerdings nur für die gelabelten Werte selbst
# siehe https://github.com/tidyverse/haven/commit/5010f44ebf24797a75d4acc0f4ddde873c624465
# und https://github.com/tidyverse/haven/issues/343
Fertige_Exportdaten[["Befragungsdaten_intern"]] <- Datensatz_Intern
#rm(list=c("Datensatz_Intern", "Datensatz.varlabels", "Datensatz.valuelabels",
#"dat.varlabels", "dat.valuelabels",
#"d1.fac", "gen.lab.d1", "gen.lab.y",
#"labtable.d1", "labtable.y",
#"valuelabel", "varlabel",
#"varunique.d1", "varunique.y",
#"x", "y", "z",
#"y.fac", "y.name", "b"))
|
297959200dcc0bb4ca65d4de64cc95820070a728
|
7d95ef134deeba5f74fe238fdaa21bfadc915e7d
|
/plot3.R
|
34495eb2b93c40cfb48201602b682c6681483fb3
|
[] |
no_license
|
acrung/ExData_Plotting1
|
db565d6e392c26f8d3980c0aa9ba57b7f2c071b9
|
5e2ec4c5ab0509d75092e6910d19aa671c5a4d07
|
refs/heads/master
| 2021-01-20T18:09:49.489887
| 2015-05-06T15:45:00
| 2015-05-06T15:45:00
| 35,055,813
| 0
| 0
| null | 2015-05-04T19:50:07
| 2015-05-04T19:50:07
| null |
UTF-8
|
R
| false
| false
| 2,039
|
r
|
plot3.R
|
Sys.setlocale("LC_TIME", "English")
con<-file("household_power_consumption.txt")
open(con)
#Getting only good rows
#2007-02-01 # first lign 66638
#and 2007-02-02 #last lign 69517
# 69517-66638 = 2879
myColNames <- c("date","time","globalactivepower","globalreactivepower", "voltage",
"globalintensity","submetering1","submetering2","submetering3")
myColClasses <- c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
power <- read.table(con,skip=66637,nrow=2880, sep = ";", col.names = myColNames, na.strings="?",
colClasses=myColClasses)
#155874 ?
#changing 1st column to date
power[,1] <- as.Date(power[,1], format="%d/%m/%Y")
#creating a special Time column (like a timestamp)
power$Time <- paste(power[,1], power[,2], sep="_")
power$Time <- strptime(power$Time, format="%Y-%m-%d_%H:%M:%S")
# PLOT 1
# hist(power$globalactivepower,
# xlab="Global Active Power (kilowatts)",
# ylab="Frequency",
# col="red",
# main="Global Active Power"
# )
# dev.copy(png, file = "plot1.png", width=480, height=480)
# dev.off()
# creating a special weekday column (monday, tuesday...)
# power$Day <- format(power$Time, format="%A")
# PLOT 2
# plot(x=power$Time, y=power$globalactivepower, type="l",
# xlab="", ylab="Global Active Power (killowatts)")
# dev.copy(png, file = "plot2.png", width=480, height=480)
# dev.off()
#PLOT 3
png(file = "plot3.png", width=480, height=480)
plot(x=power$Time, y=power$submetering1, type="l",
xlab="", ylab="Energy sub metering"
)
lines(x=power$Time, y=power$submetering2, type="l", col="red")
lines(x=power$Time, y=power$submetering3, type="l", col="blue")
legend("topright",
lty=c(1,1,1), # gives the legend appropriate symbols (lines)
lwd=c(2.5,2.5,2.5),col=c("black","blue","red"), # gives the legend lines the correct color and width
legend =c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
)
# dev.copy(png, file = "plot3.png", width=480, height=480)
dev.off()
|
a99b182d19a9455617748c3b5a5b5476459af751
|
daa0947ca5fbad70bdb9b6545b14b44562804407
|
/Tasks/Task_03/Patterns_Imports_2019.R
|
993672b19d2d16b9856a717e3db53e252ca3568b
|
[
"MIT"
] |
permissive
|
ankurRangi/Data-Analysis-CEEW
|
f8d8c9d810f40878523d1187757d5ab50cef7008
|
cb654152ba2ae10bdd50729fa48dfdf4c576f01c
|
refs/heads/main
| 2023-04-15T12:24:06.674176
| 2021-05-03T10:20:14
| 2021-05-03T10:20:14
| 361,378,601
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,963
|
r
|
Patterns_Imports_2019.R
|
library(readxl)
library(stringr)
indo_2019 <- read_xlsx("D:/CEEW/Data/Task_03/indonesia.xlsx", 3)
sing_2019 <- read_xlsx("D:/CEEW/Data/Task_03/singapore.xlsx", 3)
mala_2019 <- read_xlsx("D:/CEEW/Data/Task_03/malaysia.xlsx", 3)
thai_2019 <- read_xlsx("D:/CEEW/Data/Task_03/thailand.xlsx", 3)
viet_2019 <- read_xlsx("D:/CEEW/Data/Task_03/vietnam.xlsx", 3)
phil_2019 <- read_xlsx("D:/CEEW/Data/Task_03/philippines.xlsx", 3)
brun_2019 <- read_xlsx("D:/CEEW/Data/Task_03/brunie.xlsx", 3)
laos_2019 <- read_xlsx("D:/CEEW/Data/Task_03/laos.xlsx", 3)
camb_2019 <- read_xlsx("D:/CEEW/Data/Task_03/cambodia.xlsx", 3)
myan_2019 <- read_xlsx("D:/CEEW/Data/Task_03/myanmar.xlsx", 3)
indo_2019[is.na(indo_2019)] <- 0
sing_2019[is.na(sing_2019)] <- 0
mala_2019[is.na(mala_2019)] <- 0
thai_2019 [is.na(thai_2019 )] <- 0
viet_2019 [is.na(viet_2019 )] <- 0
phil_2019[is.na(phil_2019)] <- 0
brun_2019[is.na(brun_2019)] <- 0
laos_2019[is.na(laos_2019)] <- 0
camb_2019[is.na(camb_2019)] <- 0
myan_2019[is.na(myan_2019)] <- 0
HSCode <- c()
for( i in 1:NROW(phil_2019)){
spl <- as.numeric(strsplit(as.character(phil_2019$HSCode[i]), "")[[1]])
if(length(spl) == 8){
HSCode[i] = as.numeric(paste(spl[1:6], collapse = ""))
}else if(length(spl) == 7){
HSCode[i] = as.numeric(paste(spl[1:5], collapse = ""))
}else{
HSCode[i] = as.numeric(paste(spl, collapse = ""))
}
}
#HSCodev2 <- str_pad(HSCode, width=6, side="left", pad="0")
#View(HSCode)
phil_2019$HSCode_2 <- HSCode
phil_2019 <- phil_2019[, c(1,2,7,3,4,5,6)]
hscode_list <- c(indo_2019$HSCode_2, sing_2019$HSCode_2, mala_2019$HSCode_2, thai_2019$HSCode_2,
viet_2019$HSCode_2, phil_2019$HSCode_2, brun_2019$HSCode_2, laos_2019$HSCode_2,
camb_2019$HSCode_2, myan_2019$HSCode_2)
temp <- c()
for(i in 1:NROW(myan_2019)){
if((myan_2019[i,6] - myan_2019[i,5]) > 0 ){
temp[i] <- as.numeric(myan_2019$HSCode_2[i])
}else{
temp[i] <- 0
}
}
hslist_growth <- c(hslist_growth, temp)
hslist_growth[hslist_growth == 0] <- NA
hslist_growth <- hslist_growth[complete.cases(hslist_growth)]
View(hslist_growth)
updated_dataset <- data.frame(c(hslist_growth))
indo
sing
mala
thai
viet
phil
brun
laos
camb
myan
year_2019 <- c()
for(i in 1:NROW(hslist_growth)){
if(length(phil_2019$`2019-2020(Apr-Feb(P))`[phil_2019$HSCode_2 == hslist_growth[i]]) != 0 ){
year_2019[i] <- phil_2019$`2019-2020(Apr-Feb(P))`[phil_2019$HSCode_2 == hslist_growth[i]]
}else{
year_2019[i] <- NA
}
}
updated_dataset$phil_2019 <- year_2019
View(year_2019)
View(updated_dataset)
twodigit <- updated_dataset$c.hslist_growth./10000
twodigit <- floor(twodigit)
View(twodigit)
updated_dataset$twodigit <- twodigit
updated_dataset <- updated_dataset[, c(1,2,3,4,5,6,7,12,8,9,10, 11)]
write.csv(updated_dataset, "D:/CEEW/Data/Task_03/UPDATED_dataset_2019_v2.csv", row.names = TRUE)
|
dccb81ac87aba9fb571d81881bd05071c0f0e49f
|
c7953ff0d490aca333db79d5275f5a339e6bf2ec
|
/man/meta_tissue.Rd
|
baafc4f6f5543b040c340ec668f85e138101e4b5
|
[] |
no_license
|
brandonjew/mcLMM
|
339717e1e2f38315c94218a9cb35567845ad1fad
|
ca0d16e083fc25be278750df6a2e9096e3b54c1a
|
refs/heads/master
| 2023-07-01T06:53:56.055349
| 2021-07-09T16:59:46
| 2021-07-09T16:59:46
| 253,665,211
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,386
|
rd
|
meta_tissue.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mcLMM.R
\name{meta_tissue}
\alias{meta_tissue}
\title{Ultra-fast meta-tissue algorithm}
\usage{
meta_tissue(
expr,
geno,
covs = NULL,
heuristic = FALSE,
newRE = TRUE,
force.iter = FALSE,
verbose = TRUE
)
}
\arguments{
\item{expr}{Matrix with individuals as rows and tissues as columns. Missing
gene expression must be NA}
\item{geno}{Vector of genotypes for each individual}
\item{covs}{Matrix with individuals as rows and covariates as columns.}
\item{heuristic}{Boolean. Uses heuristic for scaling standard errors.
increases sensitivity at the cost of higher FPR.}
\item{newRE}{Boolean. Use new random effects model to perform meta
analyses as discussed in Sul et al.}
\item{force.iter}{Boolean. If TRUE, force iterative method even when there
is no missing data. This is included for testing purposes
only. The optimal non-iterative method for no missing data
is exact and way faster.}
\item{verbose}{Boolean. Output logging info.}
}
\value{
List of estimated coefficients \code{beta}, coefficient correlation
\code{corr}, and \code{sigma_g}.
}
\description{
Provides identical results as meta-tissue in linear time with respect to
the number of samples rather than cubic. Slight differences may be
due to different optimization algorithms of the same likelihood function.
}
|
33cc8b164b6685cc9b456ae95fb2cfd36c70c562
|
3b0f4a825fefab15559d472a9b1d69049c5c9ada
|
/cachematrix.R
|
bcefd037fe940cb510a3ca75398ad6f00d25af7d
|
[] |
no_license
|
spillerwill/ProgrammingAssignment2
|
31c984c7f18bf1f109dab44161303740bde39e18
|
942205c427ef616d967f7262fe142594f91e5b76
|
refs/heads/master
| 2020-04-18T07:19:11.873279
| 2019-01-24T13:26:27
| 2019-01-24T13:26:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,671
|
r
|
cachematrix.R
|
## Coursera - Johns Hopkins - R Programming - Week 3 - Assignment 2
## This pair of functions computes the inverse of a matrix and creates an object
## to associate the matrix with its inverse in the cache. The computation
## function checks for a pre-existing computed inverse to avoid unneccessary
## repeated computation.
## makeCacheMatrix creates a 'matrix' object that can cache its inverse
makeCacheMatrix <- function(x = matrix()){ # Defaults to 1x1 NA matrix
inverse <- NULL # Will hold matrix inverse
set <- function(y){
x <<- y # Set matrix in parent environment
inverse <- NULL # Resets inverse
}
get <- function(){x} # Returns x produced by set funct
setinverse <- function(z){
inverse <<- z # Set inverse in parent environment
}
getinverse <- function(){inverse} # Returns the inverse
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## cacheSolve computes the inverse of the 'matrix' created by makeCacheMatrix.
## If the inverse of a given matrix has already been computed, it retrieves
## this from the cache rather than re-computing it.
cacheSolve <- function(x, ...){
inverse <- x$getinverse
if(!is.null(inverse)){ # Checks for a preexisting inverse
message("getting cached data")
return(inverse) # Returns the cached inverse
}
nocache <- x$get()
inverse <- solve(nocache, ...) # If no cached matrix, solve
x$setinverse(inverse)
inverse
}
|
3cc0bf56a96249bd9294a4f5b061160fc08ac39f
|
dd25f95d4444b4a1f7445a5c2fb8209418b000b9
|
/inst/examples/shiny_example_01/app.R
|
c1dddd00ee1b9fd2a9bb6fe6d1b57069952f5b51
|
[
"MIT"
] |
permissive
|
harveyl888/barRating
|
fb276e9ba76b57146fcab680ae6458ceab3e1234
|
45ea330775d6afc433b2e0c346276da88e201056
|
refs/heads/master
| 2021-01-20T18:24:03.180233
| 2016-07-21T12:43:55
| 2016-07-21T12:43:55
| 60,821,983
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,655
|
r
|
app.R
|
## Example
library(shiny)
library(barRating)
server <- function(input, output, session) {
output$bar1 <- renderBarRating(barRating(choices = as.character(seq(1:10)),
theme = 'bars-1to10',
selected = '7',
includeEmpty = TRUE,
showSelectedRating = TRUE))
output$bar2 <- renderBarRating(barRating(choices = c('Bad', 'Mediocre', 'Good', 'Awesome'),
theme = 'bars-movie',
selected = 'Good'))
output$bar3 <- renderBarRating(barRating(choices = as.character(seq(1:5)),
theme = 'bars-square',
selected = '4',
showValues = TRUE,
showSelectedRating = FALSE))
output$bar4 <- renderBarRating(barRating(choices = LETTERS[1:5],
theme = 'bars-pill',
selected = 'C',
showValues = TRUE,
showSelectedRating = FALSE))
output$bar5 <- renderBarRating(barRating(choices = c('Strongly Agree', 'Agree', 'Neither Agree or Disagree', 'Disagree', 'Strongly Disagree'),
theme = 'bars-reversed',
selected = 'Disagree',
reverse = TRUE,
showValues = FALSE,
showSelectedRating = TRUE))
output$bar6 <- renderBarRating(barRating(choices = as.character(seq(5,0)),
selected = '3',
reverse = TRUE,
theme = 'bars-horizontal',
showSelectedRating = TRUE))
output$txt <- renderText({
paste0('bar 1 = ', input$bar1_value, '\n',
'bar 2 = ', input$bar2_value, '\n',
'bar 3 = ', input$bar3_value, '\n',
'bar 4 = ', input$bar4_value, '\n',
'bar 5 = ', input$bar5_value, '\n',
'bar 6 = ', input$bar6_value, '\n')
})
observeEvent(input$butChangeValue, {
barRatingUpdate('bar1', 5, session)
})
observeEvent(input$butClear, {
barRatingClear('bar1', session)
})
observe({
barRatingReadOnly('bar1', input$chkReadOnly, session)
})
}
ui <- fluidPage(
h1('barRating widget'),
h4('For further information check out ',
a("http://antenna.io/demo/jquery-bar-rating/examples/", href="http://antenna.io/demo/jquery-bar-rating/examples/", target="_blank"), ' and ',
a("http://github.com/antennaio/jquery-bar-rating", href="http://github.com/antennaio/jquery-bar-rating", target="_blank")),
hr(),
fluidRow(
column(4, barRatingOutput('bar1')),
column(4, barRatingOutput('bar2')),
column(4, barRatingOutput('bar3'))
),
br(),
fluidRow(
column(4, barRatingOutput('bar4')),
column(4, barRatingOutput('bar5')),
column(4, barRatingOutput('bar6'))
),
br(),
fluidRow(
column(3,
actionButton('butChangeValue', 'set value to 5'),
actionButton('butClear', 'clear the value'),
checkboxInput('chkReadOnly', 'read only state', value = FALSE)
),
column(3,
verbatimTextOutput('txt')
)
)
)
shinyApp(ui = ui, server = server)
|
dac51b9525b7ba1e69b7374a4b1184f46b9f60ab
|
3f650f820a147adbc121a3e666dc531fd13f9c8b
|
/SupFigTable.R
|
cdfcd501fc2df25ed727852b315c727443cbf3df
|
[] |
no_license
|
solocell/BiologicalProcessActivity
|
42f05cfa13498034fd8d9f06cd7b950d971793d7
|
346c927954f87789cee78e3068e34059f3dd38d6
|
refs/heads/master
| 2022-02-17T19:03:25.715268
| 2019-09-28T00:02:47
| 2019-09-28T00:02:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,201
|
r
|
SupFigTable.R
|
system("cp ./Benchmark/batchCorrect.pdf ./SupFig1.pdf")
#SupFig1
load("./Dropout/GTEx/GO.rda")
load("./Immune/Chromium/GO.rda")
load("./Immune/Chromium/Chromium.rda")
load("./Embryo/E-MTAB-3929/GO.rda")
pdf("SupFig2.pdf", width = 9, height = 6)
par(mfcol = c(2, 3))
plot(rowMeans(rpkm_drop[, 1:20]), rowSums(rpkm_drop[, 1:20] > 0)/20, xlab = "Average Expression", ylab = "Expression Rate", pch = 16, main = "Esophagus")
legend("bottomright", legend = paste("Overall Drop-out(%)", signif(sum(rpkm_drop[, 1:20] == 0)/length(rpkm_drop[, 1:20])*100, 4)), bty = "n", border = NA)
plot(rowMeans(rpkm_drop[, 21:40]), rowSums(rpkm_drop[, 21:40] > 0)/20, xlab = "Average Expression", ylab = "Expression Rate", pch = 16, main = "Lung")
legend("bottomright", legend = paste("Overall Drop-out(%)", signif(sum(rpkm_drop[, 21:40] == 0)/length(rpkm_drop[, 21:40])*100, 4)), bty = "n", border = NA)
plot(rowMeans(tpm[, dbscan$cluster == 1]), rowSums(tpm[, dbscan$cluster == 1] > 0)/sum(dbscan$cluster == 1),
xlab = "Average Expression", ylab = "Expression Rate", pch = 16, main = "Chromium B-Cell")
legend("bottomright", legend = paste("Overall Drop-out(%)", signif(sum(tpm[, dbscan$cluster == 1] == 0)/length(tpm[, dbscan$cluster == 1])*100, 4)), bty = "n", border = NA)
plot(rowMeans(tpm[, dbscan$cluster == 2]), rowSums(tpm[, dbscan$cluster == 2] > 0)/sum(dbscan$cluster == 2),
xlab = "Average Expression", ylab = "Expression Rate", pch = 16, main = "Chromium T-Cell")
legend("bottomright", legend = paste("Overall Drop-out(%)", signif(sum(tpm[, dbscan$cluster == 2] == 0)/length(tpm[, dbscan$cluster == 2])*100, 4)), bty = "n", border = NA)
plot(rowMeans(rpkm[, colnames(rpkm) == "E6"]), rowSums(rpkm[, colnames(rpkm) == "E6"] > 0)/sum(colnames(rpkm) == "E6"),
xlab = "Average Expression", ylab = "Expression Rate", pch = 16, main = "E-MTAB-3929 E6")
legend("bottomright", legend = paste("Overall Drop-out(%)", signif(sum(rpkm[, colnames(rpkm) == "E6"] == 0)/length(rpkm[, colnames(rpkm) == "E6"])*100, 4)), bty = "n", border = NA)
plot(rowMeans(rpkm[, colnames(rpkm) == "E7"]), rowSums(rpkm[, colnames(rpkm) == "E7"] > 0)/sum(colnames(rpkm) == "E7"),
xlab = "Average Expression", ylab = "Expression Rate", pch = 16, main = "E-MTAB-3929 E7")
legend("bottomright", legend = paste("Overall Drop-out(%)", signif(sum(rpkm[, colnames(rpkm) == "E7"] == 0)/length(rpkm[, colnames(rpkm) == "E7"])*100, 4)), bty = "n", border = NA)
dev.off()
#SupFig2
source("./ColorGradient.R")
pdf("SupFig3.pdf", width = 12, height = 9)
par(mfrow = c(3, 4))
load("./Immune/Chromium/GO.rda")
load("./Immune/Chromium/Chromium.rda")
plot(mds_nes, col = c(1, 4, 5, 6)[dbscan$cluster+1], pch = 16, cex = 0.75, xlab = "Dim1", ylab = "Dim2", main = "Chromium")
legend("topright", legend = c("Outlier", "B", "T", "Mono"), fill = c(1, 4, 5, 6), bty = "n", border = NA)
plot(mds_nes, col = expColor("CD3E", tpm), pch = 16, cex = 0.75, xlab = "Dim1", ylab = "Dim2", main = "T-cell, CD3E")
plot(mds_nes, col = expColor("CD14", tpm), pch = 16, cex = 0.75, xlab = "Dim1", ylab = "Dim2", main = "Monocyte, CD14")
plot(mds_nes, col = expColor("MS4A1", tpm), pch = 16, cex = 0.75, xlab = "Dim1", ylab = "Dim2", main = "B-cell, CD20")
#Chromium
load("./Immune/QuakeMCA/GO.rda")
c1 <- get(load("./Immune/QuakeMCA/Spleen_cpm.rda"))
c2 <- get(load("./Immune/QuakeMCA/Thymus_cpm.rda"))
cpm <- cbind(c1, c2)
plot(mds_nes, col = c(1, 4, 5, rep(1, 5))[dbscan$cluster+1], pch = 16, cex = 0.75, xlab = "Dim1", ylab = "Dim2", main = "Tabula Muris")
legend("bottomright", legend = c("Outlier", "B", "T"), fill = c(1, 4, 5), bty = "n", border = NA)
plot(mds_nes, col = expColor("Cd3e", cpm), pch = 16, cex = 0.75, xlab = "Dim1", ylab = "Dim2", main = "T-cell, Cd3e")
plot(mds_nes, col = expColor("Cd14", cpm), pch = 16, cex = 0.75, xlab = "Dim1", ylab = "Dim2", main = "Monocyte, Cd14")
plot(mds_nes, col = expColor("Ms4a1", cpm), pch = 16, cex = 0.75, xlab = "Dim1", ylab = "Dim2", main = "B-cell, Cd20")
#QuakeMCA
load("./Immune/GSE94820/GO.rda")
anno <- sapply(strsplit(colnames(nes), split = "_"), function(x) x[1])
plot(mds_nes, col = getcol(21)[as.factor(anno)], pch = 16, cex = 0.75, xlab = "Dim1", ylab = "Dim2", xlim = c(-0.3, 0.3), ylim = c(-0.2, 0.2), main = "GSE94820")
legend("bottomleft", legend = levels(as.factor(anno)), fill = getcol(21), bty = "n", border = NA)
#GSE94820
dev.off()
#SupFig3
library(princurve)
load("./Embryo/E-MTAB-3929/GO.rda")
pdf("SupFig4.pdf", width = 12, height = 4)
par(mfcol = c(1, 3), mar = c(5, 5, 5, 1))
pc_rpkm <- princurve::principal.curve(tsne_rpkm, smoother = "lowess")
plot(tsne_rpkm, col = rainbow(5)[as.factor(colnames(nes))], cex = 0.75, pch = 16, xlab = "Dim1", ylab = "Dim2", cex.lab = 2, main = "Expression", cex.main = 2)
lines(pc_rpkm, lwd = 2)
pc_nes <- princurve::principal.curve(tsne_nes, smoother = "lowess")
plot(tsne_nes, col = rainbow(5)[as.factor(colnames(nes))], cex = 0.75, pch = 16, xlab = "Dim1", ylab = "Dim2", cex.lab = 2, main = "Activity", cex.main = 2)
lines(pc_nes, lwd = 2)
plot(pc_rpkm$lambda, pc_nes$lambda, xlab = "Expression", ylab = "Activity", cex.lab = 2, pch = 16, cex = 0.75, col = rainbow(5)[as.factor(colnames(nes))], main = "Lineage", cex.main = 2)
legend("bottomright", legend = levels(as.factor(colnames(nes))), fill = rainbow(5), cex = 1, bty = "n", border = NA)
dev.off()
#SupFig4
library(dtw)
load("./ES/GO.rda")
nes <- cbind(nes_Hs[intersect(rownames(nes_Hs), rownames(nes_Mm)), ],
nes_Mm[intersect(rownames(nes_Hs), rownames(nes_Mm)), ])
dist <- cor(nes)
dist <- dist[1:ncol(nes_Hs), (ncol(nes_Hs)+1):ncol(nes)]
dtw <- dtw(1 - dist)
pdf("SupFig5.pdf")
par(mar = c(6, 6, 6, 2))
plot(dtw$index1, dtw$index2, axes = F, xlab = "", ylab = "", main = "Dynamic Time Warping", cex.main = 2)
axis(side = 1, tick = F, las = 2, at = 1:28, col.axis = 1,
labels = c(rep("oocyte", 3), rep("pronuclear", 3), rep("zygote", 2),
rep("2cell", 3), rep("4cell", 4), rep("8cell", 10), rep("morula", 3)))
axis(side = 2, tick = F, las = 2, at = 1:17, col.axis = 4,
labels = c(rep("oocyte", 2), rep("pronuclear", 3), rep("2cell", 3),
rep("4cell", 3), rep("8cell", 3), rep("morula", 3)))
legend("bottomright", legend = c("Hs", "Mm"), text.col = c(1, 4), bty = "n", cex = 2)
dev.off()
#SupFig5
system("cp ./Immune/Chromium-GSE94820-QuakeMCA/hclust.pdf ./SupFig6.pdf")
#SupFig6
system("cp ./Embryo/E-MTAB-3929-GSE66688-GSE65525/E-MTAB-3929-GSE66688-GSE65525.pdf ./SupFig7.pdf")
#SupFig7
system("cp ./Benchmark/seurat.pdf ./SupFig8.pdf")
#SupFig8
system("cp ./Benchmark/GSE116272/bulk.pdf ./SupFig9.pdf")
#SupFig9
system("cp ./Benchmark/gsetFilter.pdf ./SupFig10.pdf")
#SupFig10
system("cp ./Benchmark/gsetSize.pdf ./SupFig11.pdf")
#SupFig11
system("cp ./Benchmark/normalization.pdf ./SupFig12.pdf")
#SupFig12
system("cp ./Benchmark/shadow.pdf ./SupFig13.pdf")
#SupFig13
system("cp ./Benchmark/gsetRandom.pdf ./SupFig14.pdf")
#SupFig14
system("cp ./Benchmark/variableTerms.pdf ./SupFig15.pdf")
#SupFig15
system("cp ./Immune/Chromium/stat.csv ./SupTable1.csv")
#SupTable1
system("cp ./Immune/QuakeMCA/stat.csv ./SupTable2.csv")
#SupTable2
system("cp ./Immune/GSE94820/stat.csv ./SupTable3.csv")
#SupTable3
|
70fab33021a137550d7a1e2175fb3448e8c9e98c
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/coxinterval/R/maximalint.R
|
28989aaee558f46ff32025dd429a6b2a8e189516
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,305
|
r
|
maximalint.R
|
### maximal intersections from an interval-type Surv object
maximalint <- function(x, eps = 1e-7)
{
if (is.null(nrow(x))) x <- matrix(x, nrow = 1)
if (ncol(x) == 2) x[is.na(x[, 2]), 2] <- Inf
else if (ncol(x) > 2) {
## right-censored
x[x[, 3] == 0, 2] <- Inf
## exact
x[x[, 3] == 1, 2] <- x[x[, 3] == 1, 1]
}
ind <- x[, 1] == x[, 2]
## break ties
if (sum(!ind)) {
l <- unique(x[!ind, 1])
r <- r.minus <- unique(x[!ind, 2])
r.minus[r %in% l] <- r[r %in% l] - eps
}
else l <- r <- r.minus <- NULL
if (sum(ind)) {
## open left-endpoint
x[ind, 1] <- x[ind, 2] - eps
l <- c(l, unique(x[ind, 1]))
r.minus <- c(r.minus, unique(x[ind, 2]))
r <- c(r, unique(x[ind, 2]))
}
s <- rbind(cbind(l, l, 0), cbind(r.minus, r, 1))
s <- s[order(s[, 1]), ]
s <- cbind(s, rbind(s[-1, ], NA))
## maximal intersection left endpoint, right endpoint without and with ties
s <- s[s[, 3] == 0 & s[, 6] == 1, c(1, 4, 5)]
if (is.null(nrow(s))) s <- matrix(s, nrow = 1)
colnames(s) <- c("left", "right.untied", "right")
## maximal intersection overlap with censoring interval indicator matrix
if (nrow(s) < 2) i <- matrix(1)
else
i <-
t(apply(x[, 1:2], 1, function(x) 1 * (s[, 1] >= x[1] & s[, 2] <= x[2])))
list(int = s, ind = i)
}
|
f8a71189dff7a71dc0ae0b89fb266991b5a7596c
|
b57bd24e098c7a94241dbb96c95b004a4b5fedab
|
/4-Analysis/seed-production-threshold-simulation-functions.R
|
36fc904c8c3424d6d2031ab879d38a9296a930fb
|
[] |
no_license
|
hnguyen19/matrix-prospective
|
40dcc4d3625ddd2dab30d30dca2369fb4d3ef68e
|
2d38316ed3f124f5ac5c151ee913077810170cb5
|
refs/heads/master
| 2023-06-28T19:38:30.795602
| 2023-06-16T16:01:22
| 2023-06-16T16:01:22
| 407,684,146
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,315
|
r
|
seed-production-threshold-simulation-functions.R
|
################################################ 2-year rotation ################################################
###### Lambda calculation #######
rot_2year_conv_lambda <- function(vec, prt_C, em_C, sv_C, seed_C, poh_C, ow_C,
prt_S, em_S, sv_S, seed_S, poh_S, ow_S){
seed_C[1,3] <- rlnorm(1, 4.52, 0.61)
seed_C[1,4] <- rlnorm(1, 4.52, 0.61)
seed_C[1,5] <- rlnorm(1, 4.22, 0.65)
seed_S[1,3] <- rlnorm(1, 4.52, 0.61) #67.56 seeds/plant
seed_S[1,4] <- rlnorm(1, 4.52, 0.61)
seed_S[1,5] <- rlnorm(1, 4.22, 0.65)
# corn phase dynamics
after_corn <- ow_C %*% poh_C %*% seed_C %*% sv_C %*% em_C %*% prt_C %*% vec
# soybean phase dynamics
after_soy <- ow_S %*% poh_S %*% seed_S %*% sv_S %*% em_S %*% prt_S %*% after_corn
after_soy
}
rot_2year_low_lambda <- function(vec, prt_C, em_C, sv_C, seed_C, poh_C, ow_C,
prt_S, em_S, sv_S, seed_S, poh_S, ow_S){
seed_C[1,3] <- rlnorm(1, 2.66, 0.89)
seed_C[1,4] <- rlnorm(1, 2.66, 0.89)
seed_C[1,5] <- rlnorm(1, 2.66, 0.89)
seed_S[1,3] <- rlnorm(1, 2.66, 0.89)
seed_S[1,4] <- rlnorm(1, 2.66, 0.89)
seed_S[1,5] <- rlnorm(1, 2.66, 0.89)
# corn phase dynamics
after_corn <- ow_C %*% poh_C %*% seed_C %*% sv_C %*% em_C %*% prt_C %*% vec
# soybean phase dynamics
after_soy <- ow_S %*% poh_S %*% seed_S %*% sv_S %*% em_S %*% prt_S %*% after_corn
after_soy
}
###### Mature plant density output #######
### given manipulated per-capita seed production that make annualized lambda = 1
rot_2year_conv_plant_density_fixed <- function(vec, prt_C, em_C, sv_C, seed_C, poh_C, ow_C,
prt_S, em_S, sv_S, seed_S, poh_S, ow_S){
seed_C[1,3] <- rlnorm(1, 4.52, 0.61)
seed_C[1,4] <- rlnorm(1, 4.52, 0.61)
seed_C[1,5] <- rlnorm(1, 4.22, 0.65)
seed_S[1,3] <- rlnorm(1, 4.52, 0.61) #67.56 seeds/plant
seed_S[1,4] <- rlnorm(1, 4.52, 0.61)
seed_S[1,5] <- rlnorm(1, 4.22, 0.65)
# corn phase dynamics
after_corn <- ow_C %*% poh_C %*% seed_C %*% sv_C %*% em_C %*% prt_C %*% vec
pl_dens_corn <- sv_C %*% em_C %*% prt_C %*% vec
# soybean phase dynamics
pl_dens_soy <- sv_S %*% em_S %*% prt_S %*% after_corn
l <- list(pl_dens_corn[3:8], pl_dens_soy[3:8])
names(l) <- c("C2", "S2")
l
}
rot_2year_low_plant_density_fixed <- function(vec, prt_C, em_C, sv_C, seed_C, poh_C, ow_C,
prt_S, em_S, sv_S, seed_S, poh_S, ow_S){
seed_C[1,3] <- rlnorm(1, 2.66, 0.89)
seed_C[1,4] <- rlnorm(1, 2.66, 0.89)
seed_C[1,5] <- rlnorm(1, 2.66, 0.89)
seed_S[1,3] <- rlnorm(1, 2.66, 0.89)
seed_S[1,4] <- rlnorm(1, 2.66, 0.89)
seed_S[1,5] <- rlnorm(1, 2.66, 0.89)
# corn phase dynamics
after_corn <- ow_C %*% poh_C %*% seed_C %*% sv_C %*% em_C %*% prt_C %*% vec
pl_dens_corn <- sv_C %*% em_C %*% prt_C %*% vec
# soybean phase dynamics
pl_dens_soy <- sv_S %*% em_S %*% prt_S %*% after_corn
l <- list(pl_dens_corn[3:8], pl_dens_soy[3:8])
names(l) <- c("C2", "S2")
l
}
###### Seed production, manipulated #######
rot_2year_conv_seed_production_per_cap <- function(seed_C, seed_S){
seed_C[1,3] <- rlnorm(1, 4.52, 0.61)
seed_C[1,4] <- rlnorm(1, 4.52, 0.61)
seed_C[1,5] <- rlnorm(1, 4.22, 0.65)
seed_S[1,3] <- rlnorm(1, 4.52, 0.61) #67.56 seeds/plant
seed_S[1,4] <- rlnorm(1, 4.52, 0.61)
seed_S[1,5] <- rlnorm(1, 4.22, 0.65)
seed_production_count_corn <- seed_C[1,3:8]
seed_production_count_soy <- seed_S[1,3:8]
# seed at harvest
l <- list(seed_production_count_corn, seed_production_count_soy)
names(l) <- c("C2", "S2")
l
}
rot_2year_low_seed_production_per_cap <- function(seed_C, seed_S){
seed_C[1,3] <- rlnorm(1, 2.66, 0.89)
seed_C[1,4] <- rlnorm(1, 2.66, 0.89)
seed_C[1,5] <- rlnorm(1, 2.66, 0.89)
seed_S[1,3] <- rlnorm(1, 2.66, 0.89)
seed_S[1,4] <- rlnorm(1, 2.66, 0.89)
seed_S[1,5] <- rlnorm(1, 2.66, 0.89)
seed_production_count_corn <- seed_C[1,3:8]
seed_production_count_soy <- seed_S[1,3:8]
# seed at harvest
l <- list(seed_production_count_corn, seed_production_count_soy)
names(l) <- c("C2", "S2")
l
}
################################################ 3-year rotation ################################################
###### Lambda calculation #######
rot_3year_conv_lambda <- function(vec, prt_C, em_C, sv_C, seed_C, poh_C, ow_C,
prt_S, em_S, sv_S, seed_S, poh_S, ow_S,
prt_O, em_O, sv_O, seed_O, poh_O, ow_O){
seed_C[1,3] <- rlnorm(1, 2.66, 0.89)
seed_C[1,4] <- rlnorm(1, 2.66, 0.89)
seed_C[1,5] <- rlnorm(1, 2.66, 0.89)
seed_C[1,6] <- rlnorm(1, 2.66, 0.89)
seed_C[1,7] <- rlnorm(1, 2.66, 0.89)
# seed_C[1,8] <- rlnorm(1, 2.66, 0.89) cohort 6 original seed production is already lower
seed_S[1,3] <- rlnorm(1, 2.66, 0.89)
seed_S[1,4] <- rlnorm(1, 2.66, 0.89)
seed_S[1,5] <- rlnorm(1, 2.66, 0.89)
seed_S[1,6] <- rlnorm(1, 2.66, 0.89)
seed_S[1,7] <- rlnorm(1, 2.66, 0.89)
seed_S[1,8] <- rlnorm(1, 2.66, 0.89)
# corn phase dynamics
after_corn <- ow_C %*% poh_C %*% seed_C %*% sv_C %*% em_C %*% prt_C %*% vec
# soybean phase dynamics
after_soy <- ow_S %*% poh_S %*% seed_S %*% sv_S %*% em_S %*% prt_S %*% after_corn
# oat phase dynamics
after_oat <- ow_O %*% poh_O %*% seed_O %*% sv_O %*% em_O %*% prt_O %*% after_soy
after_oat
}
### low herbicide weed management
rot_3year_low_lambda <- function(vec, prt_C, em_C, sv_C, seed_C, poh_C, ow_C,
prt_S, em_S, sv_S, seed_S , poh_S, ow_S,
prt_O, em_O, sv_O, seed_O, poh_O, ow_O ){
seed_C[1,3] <- rlnorm(1, 2.66, 0.89)
seed_C[1,4] <- rlnorm(1, 2.66, 0.89)
seed_C[1,5] <- rlnorm(1, 4.22, 0.65)
seed_S[1,3] <- rlnorm(1, 2.66, 0.89)
seed_S[1,4] <- rlnorm(1, 2.66, 0.89)
seed_S[1,5] <- rlnorm(1, 6.94, 0.43)
# corn phase dynamics
after_corn <- ow_C %*% poh_C %*% seed_C %*% sv_C %*% em_C %*% prt_C %*% vec
# soybean phase dynamics
after_soy <- ow_S %*% poh_S %*% seed_S %*% sv_S %*% em_S %*% prt_S %*% after_corn
# oat phase dynamics
after_oat <- ow_O %*% poh_O %*% seed_O %*% sv_O %*% em_O %*% prt_O %*% after_soy
after_oat
}
###### Mature plant density output #######
### given manipulated per-capita seed production that make annualized lambda = 1
rot_3year_conv_plant_density_fixed <- function(vec, prt_C, em_C, sv_C, seed_C, poh_C, ow_C,
prt_S, em_S, sv_S, seed_S , poh_S, ow_S,
prt_O, em_O, sv_O, seed_O, poh_O, ow_O ){
seed_C[1,3] <- rlnorm(1, 2.66, 0.89)
seed_C[1,4] <- rlnorm(1, 2.66, 0.89)
seed_C[1,5] <- rlnorm(1, 2.66, 0.89)
seed_C[1,6] <- rlnorm(1, 2.66, 0.89)
seed_C[1,7] <- rlnorm(1, 2.66, 0.89)
# seed_C[1,8] <- rlnorm(1, 2.66, 0.89) cohort 6 original seed production is already lower
seed_S[1,3] <- rlnorm(1, 2.66, 0.89)
seed_S[1,4] <- rlnorm(1, 2.66, 0.89)
seed_S[1,5] <- rlnorm(1, 2.66, 0.89)
seed_S[1,6] <- rlnorm(1, 2.66, 0.89)
seed_S[1,7] <- rlnorm(1, 2.66, 0.89)
seed_S[1,8] <- rlnorm(1, 2.66, 0.89)
# corn phase dynamics
after_corn <- ow_C %*% poh_C %*% seed_C %*% sv_C %*% em_C %*% prt_C %*% vec
pl_dens_corn <- sv_C %*% em_C %*% prt_C %*% vec
# soybean phase dynamics
after_soy <- ow_S %*% poh_S %*% seed_S %*% sv_S %*% em_S %*% prt_S %*% after_corn
pl_dens_soy <- sv_S %*% em_S %*% prt_S %*% after_corn
# oat phase dynamics
after_oat <- ow_O %*% poh_O %*% seed_O %*% sv_O %*% em_O %*% prt_O %*% after_soy
pl_dens_oat <- sv_O %*% em_O %*% prt_O %*% after_soy
l <- list(pl_dens_corn[3:8], pl_dens_soy[3:8], pl_dens_oat[3:8])
names(l) <- c("C3", "S3", "O3")
l
}
rot_3year_low_plant_density_fixed <- function(vec, prt_C, em_C, sv_C, seed_C, poh_C, ow_C,
prt_S, em_S, sv_S, seed_S , poh_S, ow_S,
prt_O, em_O, sv_O, seed_O, poh_O, ow_O ){
seed_C[1,3] <- rlnorm(1, 2.66, 0.89)
seed_C[1,4] <- rlnorm(1, 2.66, 0.89)
seed_C[1,5] <- rlnorm(1, 4.22, 0.65)
seed_S[1,3] <- rlnorm(1, 2.66, 0.89)
seed_S[1,4] <- rlnorm(1, 2.66, 0.89)
seed_S[1,5] <- rlnorm(1, 6.94, 0.43)
# corn phase dynamics
after_corn <- ow_C %*% poh_C %*% seed_C %*% sv_C %*% em_C %*% prt_C %*% vec
pl_dens_corn <- sv_C %*% em_C %*% prt_C %*% vec
# soybean phase dynamics
after_soy <- ow_S %*% poh_S %*% seed_S %*% sv_S %*% em_S %*% prt_S %*% after_corn
pl_dens_soy <- sv_S %*% em_S %*% prt_S %*% after_corn
# oat phase dynamics
after_oat <- ow_O %*% poh_O %*% seed_O %*% sv_O %*% em_O %*% prt_O %*% after_soy
pl_dens_oat <- sv_O %*% em_O %*% prt_O %*% after_soy
l <- list(pl_dens_corn[3:8], pl_dens_soy[3:8], pl_dens_oat[3:8])
names(l) <- c("C3", "S3", "O3")
l
}
###### Seed production, manipulated #######
rot_3year_conv_seed_production_per_cap <- function(seed_C, seed_S, seed_O){
seed_C[1,3] <- rlnorm(1, 2.66, 0.89)
seed_C[1,4] <- rlnorm(1, 2.66, 0.89)
seed_C[1,5] <- rlnorm(1, 2.66, 0.89)
# seed_C[1,6] <- rlnorm(1, 2.66, 0.89) cohort 6 original seed production is already lower
# seed_C[1,7] <- rlnorm(1, 2.66, 0.89) cohort 6 original seed production is already lower
# seed_C[1,8] <- rlnorm(1, 2.66, 0.89) cohort 6 original seed production is already lower
seed_S[1,3] <- rlnorm(1, 2.66, 0.89)
seed_S[1,4] <- rlnorm(1, 2.66, 0.89)
seed_S[1,5] <- rlnorm(1, 2.66, 0.89)
seed_S[1,6] <- rlnorm(1, 2.66, 0.89)
seed_S[1,7] <- rlnorm(1, 2.66, 0.89)
seed_S[1,8] <- rlnorm(1, 2.66, 0.89)
seed_production_count_corn <- seed_C[1, 3:8]
seed_production_count_soy <- seed_S[1, 3:8]
seed_production_count_oat <- seed_O[1, 3:8]
# seed at harvest
l <- list(seed_production_count_corn,
seed_production_count_soy,
seed_production_count_oat)
names(l) <- c("C3", "S3", "O3")
l
}
rot_3year_low_seed_production_per_cap <- function(seed_C, seed_S, seed_O){
seed_C[1,3] <- rlnorm(1, 2.66, 0.89)
seed_C[1,4] <- rlnorm(1, 2.66, 0.89)
seed_C[1,5] <- rlnorm(1, 4.22, 0.65)
seed_S[1,3] <- rlnorm(1, 2.66, 0.89)
seed_S[1,4] <- rlnorm(1, 2.66, 0.89)
seed_S[1,5] <- rlnorm(1, 6.94, 0.43)
seed_production_count_corn <- seed_C[1,3:8]
seed_production_count_soy <- seed_S[1,3:8]
seed_production_count_oat <- seed_O[1, 3:8]
# seed at harvest
l <- list(seed_production_count_corn,
seed_production_count_soy,
seed_production_count_oat)
names(l) <- c("C3", "S3", "O3")
l
}
################################################ 4-year rotation ################################################
###### Lambda calculation #######
### conventional weed management
rot_4year_conv_lambda <- function(vec, prt_C, em_C, sv_C, seed_C, poh_C, ow_C,
prt_S, em_S, sv_S, seed_S , poh_S, ow_S,
prt_O, em_O, sv_O, seed_O, poh_O, ow_O,
prt_A, em_A, sv_A, seed_A,poh_A, ow_A){
seed_C[1,3] <- rlnorm(1, 2.66, 0.89)
seed_C[1,4] <- rlnorm(1, 2.66, 0.89)
seed_C[1,5] <- rlnorm(1, 2.66, 0.89)
#fecundity was much lower after cohort 3, so focus on suppressing plant size in soybean
seed_S[1,3] <- rlnorm(1, 2.66, 0.89)
seed_S[1,4] <- rlnorm(1, 2.66, 0.89)
seed_S[1,5] <- rlnorm(1, 2.66, 0.89)
seed_S[1,6] <- rlnorm(1, 7.34, 0.44)
# corn phase dynamics
after_corn <- ow_C %*% poh_C %*% seed_C %*% sv_C %*% em_C %*% prt_C %*% vec
# soybean phase dynamics
after_soy <- ow_S %*% poh_S %*% seed_S %*% sv_S %*% em_S %*% prt_S %*% after_corn
# oat phase dynamics
after_oat <- ow_O %*% poh_O %*% seed_O %*% sv_O %*% em_O %*% prt_O %*% after_soy
# alfalfa phase dynamics
after_alfalfa <- ow_A %*% poh_A %*% seed_A %*% sv_A %*% em_A %*% prt_A %*% after_oat
after_alfalfa
}
### low herbicide weed management
rot_4year_low_lambda <- function(vec, prt_C, em_C, sv_C, seed_C, poh_C, ow_C,
prt_S, em_S, sv_S, seed_S , poh_S, ow_S,
prt_O, em_O, sv_O, seed_O, poh_O, ow_O,
prt_A, em_A, sv_A, seed_A,poh_A, ow_A){
seed_C[1,3] <- rlnorm(1, 2.66, 0.89)
seed_C[1,4] <- rlnorm(1, 2.66, 0.89)
seed_C[1,5] <- rlnorm(1, 2.66, 0.89)
# seed_C[1,6] <- rlnorm(1, 2.66, 0.89) #cohort 4 original seed production is already lower
# seed_C[1,7] <- rlnorm(1, 2.66, 0.89) #cohort 5 original seed production is already lower
# seed_C[1,8] <- rlnorm(1, 2.66, 0.89)
seed_S[1,3] <- rlnorm(1, 2.66, 0.89)
seed_S[1,4] <- rlnorm(1, 2.66, 0.89)
seed_S[1,5] <- rlnorm(1, 2.66, 0.89)
seed_S[1,6] <- rlnorm(1, 2.66, 0.89)
seed_S[1,7] <- rlnorm(1, 2.66, 0.89)
# seed_S[1,8] <- rlnorm(1, 2.66, 0.89)
# corn phase dynamics
after_corn <- ow_C %*% poh_C %*% seed_C %*% sv_C %*% em_C %*% prt_C %*% vec
# soybean phase dynamics
after_soy <- ow_S %*% poh_S %*% seed_S %*% sv_S %*% em_S %*% prt_S %*% after_corn
# oat phase dynamics
after_oat <- ow_O %*% poh_O %*% seed_O %*% sv_O %*% em_O %*% prt_O %*% after_soy
# alfalfa phase dynamics
after_alfalfa <- ow_A %*% poh_A %*% seed_A %*% sv_A %*% em_A %*% prt_A %*% after_oat
after_alfalfa
}
###### Mature plant density output #######
### given manipulated per-capita seed production that make annualized lambda = 1
rot_4year_conv_plant_density_fixed <- function(vec, poh_C, ow_C, prt_C, em_C, sv_C, seed_C,
poh_S, ow_S, prt_S, em_S, sv_S, seed_S,
poh_O, ow_O, prt_O, em_O, sv_O, seed_O,
poh_A, ow_A, prt_A, em_A, sv_A, seed_A){
seed_C[1,3] <- rlnorm(1, 2.66, 0.89)
seed_C[1,4] <- rlnorm(1, 2.66, 0.89)
seed_C[1,5] <- rlnorm(1, 2.66, 0.89)
#fecundity was much lower after cohort 3, so focus on suppressing plant size in soybean
seed_S[1,3] <- rlnorm(1, 2.66, 0.89)
seed_S[1,4] <- rlnorm(1, 2.66, 0.89)
seed_S[1,5] <- rlnorm(1, 2.66, 0.89)
seed_S[1,6] <- rlnorm(1, 7.34, 0.44)
# corn phase dynamics
after_corn <- ow_C %*% poh_C %*% seed_C %*% sv_C %*% em_C %*% prt_C %*% vec
pl_dens_corn <- sv_C %*% em_C %*% prt_C %*% vec
# soybean phase dynamics
after_soy <- ow_S %*% poh_S %*% seed_S %*% sv_S %*% em_S %*% prt_S %*% after_corn
pl_dens_soy <- sv_S %*% em_S %*% prt_S %*% after_corn
# oat phase dynamics
after_oat <- ow_O %*% poh_O %*% seed_O %*% sv_O %*% em_O %*% prt_O %*% after_soy
pl_dens_oat <- sv_O %*% em_O %*% prt_O %*% after_soy
# alfalfa phase dynamics
after_alfalfa <- ow_A %*% poh_A %*% seed_A %*% sv_A %*% em_A %*% prt_A %*% after_oat
pl_dens_alfalfa <- sv_A %*% em_A %*% prt_A %*% after_oat
# Collect phase-end mature plant density
l <- list(pl_dens_corn[3:8], pl_dens_soy[3:8], pl_dens_oat[3:8], pl_dens_alfalfa[3:8])
names(l) <- c("C4", "S4", "O4", "A4")
l
}
rot_4year_low_plant_density_fixed <- function(vec, poh_C, ow_C, prt_C, em_C, sv_C, seed_C,
poh_S, ow_S, prt_S, em_S, sv_S, seed_S,
poh_O, ow_O, prt_O, em_O, sv_O, seed_O,
poh_A, ow_A, prt_A, em_A, sv_A, seed_A){
seed_C[1,3] <- rlnorm(1, 2.66, 0.89)
seed_C[1,4] <- rlnorm(1, 2.66, 0.89)
seed_C[1,5] <- rlnorm(1, 2.66, 0.89)
# seed_C[1,6] <- rlnorm(1, 2.66, 0.89) #cohort 4 original seed production is already lower
# seed_C[1,7] <- rlnorm(1, 2.66, 0.89) #cohort 5 original seed production is already lower
# seed_C[1,8] <- rlnorm(1, 2.66, 0.89)
seed_S[1,3] <- rlnorm(1, 2.66, 0.89)
seed_S[1,4] <- rlnorm(1, 2.66, 0.89)
seed_S[1,5] <- rlnorm(1, 2.66, 0.89)
seed_S[1,6] <- rlnorm(1, 2.66, 0.89)
seed_S[1,7] <- rlnorm(1, 2.66, 0.89)
# seed_S[1,8] <- rlnorm(1, 2.66, 0.89)
# corn phase dynamics
after_corn <- ow_C %*% poh_C %*% seed_C %*% sv_C %*% em_C %*% prt_C %*% vec
pl_dens_corn <- sv_C %*% em_C %*% prt_C %*% vec
# soybean phase dynamics
after_soy <- ow_S %*% poh_S %*% seed_S %*% sv_S %*% em_S %*% prt_S %*% after_corn
pl_dens_soy <- sv_S %*% em_S %*% prt_S %*% after_corn
# oat phase dynamics
after_oat <- ow_O %*% poh_O %*% seed_O %*% sv_O %*% em_O %*% prt_O %*% after_soy
pl_dens_oat <- sv_O %*% em_O %*% prt_O %*% after_soy
# alfalfa phase dynamics
after_alfalfa <- ow_A %*% poh_A %*% seed_A %*% sv_A %*% em_A %*% prt_A %*% after_oat
pl_dens_alfalfa <- sv_A %*% em_A %*% prt_A %*% after_oat
# Collect phase-end mature plant density
l <- list(pl_dens_corn[3:8], pl_dens_soy[3:8], pl_dens_oat[3:8], pl_dens_alfalfa[3:8])
names(l) <- c("C4", "S4", "O4", "A4")
l
}
###### Seed production, manipulated #######
rot_4year_conv_seed_production_per_cap <- function(seed_C, seed_S, seed_O, seed_A){
seed_C[1,3] <- rlnorm(1, 2.66, 0.89)
seed_C[1,4] <- rlnorm(1, 2.66, 0.89)
seed_C[1,5] <- rlnorm(1, 2.66, 0.89)
#fecundity was much lower after cohort 3, so focus on suppressing plant size in soybean
seed_S[1,3] <- rlnorm(1, 2.66, 0.89)
seed_S[1,4] <- rlnorm(1, 2.66, 0.89)
seed_S[1,5] <- rlnorm(1, 2.66, 0.89)
seed_S[1,6] <- rlnorm(1, 7.34, 0.44)
seed_production_count_corn <- seed_C[1, 3:8]
seed_production_count_soy <- seed_S[1, 3:8]
seed_production_count_oat <- seed_O[1, 3:8]
seed_production_count_alfalfa <- seed_A[1, 3:8]
# seed at harvest
l <- list(seed_production_count_corn, seed_production_count_soy,
seed_production_count_oat, seed_production_count_alfalfa)
names(l) <- c("C4", "S4", "O4", "A4")
l
}
rot_4year_low_seed_production_per_cap <- function(seed_C, seed_S, seed_O, seed_A){
seed_C[1,3] <- rlnorm(1, 2.66, 0.89)
seed_C[1,4] <- rlnorm(1, 2.66, 0.89)
seed_C[1,5] <- rlnorm(1, 2.66, 0.89)
# seed_C[1,6] <- rlnorm(1, 2.66, 0.89) #cohort 4 original seed production is already lower
# seed_C[1,7] <- rlnorm(1, 2.66, 0.89) #cohort 5 original seed production is already lower
# seed_C[1,8] <- rlnorm(1, 2.66, 0.89)
seed_S[1,3] <- rlnorm(1, 2.66, 0.89)
seed_S[1,4] <- rlnorm(1, 2.66, 0.89)
seed_S[1,5] <- rlnorm(1, 2.66, 0.89)
seed_S[1,6] <- rlnorm(1, 2.66, 0.89)
seed_S[1,7] <- rlnorm(1, 2.66, 0.89)
# seed_S[1,8] <- rlnorm(1, 2.66, 0.89)
seed_production_count_corn <- seed_C[1,3:8]
seed_production_count_soy <- seed_S[1,3:8]
seed_production_count_oat <- seed_O[1, 3:8]
seed_production_count_alfalfa <- seed_A[1, 3:8]
# seed at harvest
l <- list(seed_production_count_corn, seed_production_count_soy,
seed_production_count_oat, seed_production_count_alfalfa)
names(l) <- c("C4", "S4", "O4", "A4")
l
}
|
7c512aef070fea9af7d15e673386a77c8871a003
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/lle/R/find_nn_k.R
|
3dd31cee20c919f08f3200dcc91bf866a2cda91d
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 838
|
r
|
find_nn_k.R
|
find_nn_k <-
function(X,k,iLLE=FALSE){
#calculate distance-matrix
nns <- as.matrix(dist(X))
#get ranks of all entries
nns <- t(apply(nns,1,rank))
#choose the k+1 largest entries without the first (the data point itself)
nns <- ( nns<=k+1 & nns>1 )
#optional: improved LLE
if( iLLE ){
N <- dim(X)[1]
n <- dim(X)[2]
nns2 <- nns
nns <- as.matrix(dist(X))
for( i in 1:N){
if( i%%100==0 ) cat(i,"von",N,"\n")
for( j in 1:N){
Mi <- sqrt( sum( rowSums( matrix( c(t(X[i,])) - c(t(X[nns2[i,],])), nrow=k, ncol=n, byrow=TRUE)^2 )^2 ) )
Mj <- sqrt( sum( rowSums( matrix( c(t(X[j,])) - c(t(X[nns2[j,],])), nrow=k, ncol=n, byrow=TRUE)^2 )^2 ) )
nns[i,j] <- nns[i,j]/(sqrt(Mi*Mj)*k)
}
}
nns <- t(apply(nns,1,rank))
nns <- ( nns<=k+1 & nns>1 )
}
return (nns)
}
|
f03ae7f1b8ebb883d3ff592c100e17412e434233
|
2f3ea7ef177cbe890cfa48af195d04cc385155ff
|
/BIEN.R
|
dd4a6fabc868f6b1394bf0f1a2977fbf0c90d6ab
|
[] |
no_license
|
stdupas/BIEN
|
57bf60b19db89c31c68e73531c66d8b92bffa865
|
7c28725990e85e72daa3d5857b7eae8866686a03
|
refs/heads/master
| 2020-03-22T20:56:30.392974
| 2019-04-02T20:08:28
| 2019-04-02T20:08:28
| 140,644,320
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 99,438
|
r
|
BIEN.R
|
setwd("C:/Users/steph/OneDrive/Documents/GitHub/BIEN")
#library(raster)
#edata
#idata
#
# New version 15-12-2018
# ecolearn model
# variables = vector of variale names
# dada = array of ecosystem dynamics including indicator data and interacting ecosystem variables
# in dim 1 is time,
# in dim 2 are ecosystem variables,
# in dim 3 are repetitions
# parameter = inherits from "list" of "numeric", with other slots defining attibutes of parameters in the list "eco" "indic" "sceono" 'ecodelay"
# prior = "list" of prior finstions
# egde = list of (son="integer",indiclentgh="integer",ecolength="integer",
# parentfull="list of funciton",delayfull="integer",
# parenteco="integer",delayeco="integer",dfullfun="list of function",
# rifun="list of function",defun="list of function"))
# where
# - sun are the dependent variables index at each time step
# - parent are independent variables index
# - delay for parent time dependance
# - func is the function to be used for each parent
# - residual is the residual to be used for each sun
#
# Proof of concept
#
# set data array for true data simulation using climatic data
ecoVar=c("Tx","Pr","Pa","Pl","Pe")
indicVar=c("iTx","iPr","iPa","iPl","iPe")
variables = c(ecoVar,indicVar)
load(file = "climdata.RData")
data = array(NA,dim=c(dim(climdata)[1],10,dim(climdata)[3]),dimnames=list(dimnames(climdata)[[1]],c(indicVar,ecoVar),dimnames(climdata)[[3]]))
data[,c("iTx","iPr"),] <- climdata
data[1,"iPl",]<-0;data[2,"iPl",]<-0.01 # planting occurs in february
data[1:2,"iPa",]<-0 # there is no parasite before planting
data[1:2,"iPe",]<-0 # there is no pesticide before planting
data[,,1]
#
# definition of true parameters
#
# The model : The model is a discret ecossytem model represented as a temporal dynamic graph
# where value of each variable depend on variabels before, not necesarily one time step before
# but from zero to a number of time step before
# We distinguish eco variables and indic variables. "eco" variables are those involved in the ecological temporal effects.
# "indic" variables are those determined by the sampling of ecological variables.
#
# Definition of the graph:
# nodes = eco and indicator variables, each is given an integer index from one to n
# edges = contains information of the temporal dynamic graph (qualitative links and quantitative functions and parameters)
# egdes = list of son="integer",
# indiclentgh="integer",
# ecolength="integer",
# parentfun="list of function", # index of the parents for each son,
# delay="integer", # delay of the parent for each son as a list of integer and probability,
# parenteco="integer"
# delayeco="integer",
# dfullfun="list of function",
# rifun="list of function",defun="list of function"))
# where
# - son are the dependent variables at each time step, represented by their integer index
# - parent are independent variables index
# - delay for parent time dependance
# - defun is the function calulating the probality of a son knowing the parents in the ecosystem model
# - rifun is the function sampling an ecosystem variable knowing the indicator
#
#"defun","delayeco","delayfull","dfullfun","ecolength","indiclentgh","parenteco","parentfull","rifun","son"
#"defun","delayeco","delayfull","dfullfun","ecolength","indiclentgh","parenteco","parentfull","rifun","son"
setwd("/home/dupas/BIEN/")
# "parameter" list of ecosystem dynamics
# eco = indices of ecological dynamics parameters,
# indic= indices of parameters linking indicators variables to ecological variables,
# iscoeno= indices of parameters linking scoenopoietic indicators variables to scoenopoietic ecological variables.
# Scenopoietic ecological variables are ecological variables that do not depend on other ecological variables,
# as in environmental niche modelling, as oposed to bionomic ecological variables.
# ecodelay= indices of parameter that determine the tiñe delay in the ecossytem model
validParam= function(object){
if (!all(lapply(object,class)=="numeric")) stop("error in parameter construction : should be a list of numeric")
if (!all(order(c(object@eco,object@indic))==1:length(object))) stop("error in ecological and indicator parameters identity. The union of @eco and @indic is not 1:length(parameter)")
}
setClass("parameter",
contains="list",
slot = c(eco="integer",indic="integer",iscoeno="integer",ecodelay="integer")
#validity= validParam
)
x=p = new("parameter",
list(
PaPa_rmax=10,
PaPa_K=20,
TxPa_min=15,
TxPa_max=30,
PrPa_min=3,
PlPa_r=1,
Pa_delay=2,
PrPl_rperPr=.5,
TxPl_min=10,
TxPl_max=30,
PaPl_r=-0.03,
PlPl_r=2.5,
PlPl_K=2,
PlPl_var_r=0.03^2,
PePa_r=0.1,
PaPe=1.5,
T_sd=0.3,
Pr_var_r=0.05^2,
Pa_sample_r=0.05,
Pl_var_r=0.05^2,
Pe_pDet=.8,
Pe_pFalseDet=.005
),
eco = 1:16,
indic=17:22,
iscoeno=17:18,
ecodelay=as.integer(7))
# subset
# to extract subset of parameters se
# x = parameter class variable
# type = type of subset,
# - if "index" uses the argument index to identify parameter index to extract
# - if "eco" extracts the ecological dynamics parameters
# - if "indic" extracts the parameters linking indicators variables to ecological variables
# - if "iscoeno" extracts the parameters linking sceonopoietic indicators to independent ecological variables
# - if "ecodelay" extracts the the ecological dynamics parameters defining time delay in the ecological responses
# index = the index of the parameter to extract. Index is used if type = "index"
setMethod("subset",
signature="parameter",
definition= function(x,type="index",index=integer(0)){
switch(type,
eco={pEco<-lapply(x@eco,function(i){x[[i]]})
names(pEco) <- names(x)[x@eco]
new("parameter",pEco,eco=1:length(x@eco),indic=integer(0),iscoeno=integer(0),ecodelay=x@ecodelay)
},
indic={pIndic=lapply(x@indic,function(i){x[[i]]})
names(pIndic)=names(x)[x@indic]
new("parameter",pIndic,eco=integer(0),
indic=1:length(x@indic),iscoeno=1:length(x@iscoeno),
ecodelay=integer(0))},
iscoeno={pIscoeno=lapply(x@iscoeno,function(i){x[[i]]})
names(pIscoeno)=names(x)[x@iscoeno]
new("parameter",pIscoeno,eco=integer(0),indic=1:length(x@iscoeno),
iscoeno=1:length(x@iscoeno),ecodelay=integer(0))},
ecodelay={pEcodelay=lapply(x@ecodelay,function(i){x[[i]]})
names(pEcodelay)=names(x)[x@ecodelay]
new("parameter",eco=x@ecodelay,indic=integer(0),
iscoeno=integer(0),ecodelay=x@ecodelay)},
index={pIndex=lapply(index, function(i){x[[i]]})
Indic=which(index%in%x@indic)
Eco=which(index%in%x@eco)
Iscoeno=which(index%in%x@iscoeno)
Ecodelay=which(index%in%x@ecodelay)
names(pIndex)=names(x)[index]
new("parameter",pIndex,eco=Eco,indic=Indic,iscoeno=Iscoeno,ecodelay=Ecodelay)
}
)}
)
a=subset(x=p,type="eco");a
b=subset(x=p,type="iscoeno");b
c=subset(x=p,type="indic");c
d=subset(x=p,type="ecodelay");d
e=subset(x=p,index=c(1:5,7,16:17,18));d
names(a)
names(b)
names(c)
names(d)
is.list.of.function <- function(object){
all(lapply(object,class)=="function")
}
is.list.of.numeric <- function(object){
all(lapply(objet,class)=="numeric")
}
# edge
# to define the graph model
# ecoVar = names of the ecological variables
# indicVar = names of the indicator variables
# p = parameters of the model, class parameter
# adjacency = matrix of adjacency among indicator and ecological variables set
# adjacency = 0, no link
# adjacency = 1, ecologial link between t-f(t) in line and t in columns (ecological simulation or probability)
# adjacency = 2|3, link between indicator in line to ecological in column at time t (ecological simulation from indicators)
# adjacency = 3, link between scoenopoietic indicator in line to scoenopoietic ecological in column at time t (ecological simulation from indicators)
# adjacency = 4|5, link between ecological to indicator at time t (indicator simulation from ecological simulation)
# adjacency = 5, link between scoenopoietic ecological to scoenopoietic indicator at time t (indicator simulation from ecological simulation)
#
# dteco = list of probability function of the time delays for the ecosystem dependent variables
# deco = list of probability functions for the ecosystem dependent variables
# dEco2Indic = list of probability function of the indicator variables depending on ecological variables
# dIndic2Eco = list of probability function of the ecosystem variables depending on the indicator variables
# dscoenoEco2Indic = list of probability function of the scoenopoietic ecosystem variables ('independent' ecosystem variables)
# depending on the scoenopoietic indicator variables
# pscoenoIndic2Eco = list of probability function of the scoenopoietic indicator variables
# depending on the scoenopoietic ecosystem variables ('independent' ecosystem variables)
# reco = list of sampling probability functions for the ecosystem dependent variables
# rEco2Indic = list of sampling probability function of the indicator variables depending on ecological variables
# rIndic2Eco = list of sampling probability function of the ecosystem variables depending on the indicator variables
# rscoenoEco2Indic = list of sampling probability function of the scoenopoietic ecosystem variables ('independent' ecosystem variables)
# depending on the scoenopoietic indicator variables
# rscoenoIndic2Eco = list of sampling probability function of the scoenopoietic indicator variables
# depending on the scoenopoietic ecosystem variables ('independent' ecosystem variables)
#
#
#
#
setClass("listOfun",
contains="list",
validity=is.list.of.function)
#
# edge model
#
validEdge = function(object){
Names = c("ecoVar","indicVar","p","adjacency","dteco","deco","reco","dindic2eco","rindic2eco","deco2indic","reco2indic","rscoenoEco2Indic","rscoenoIndic2Eco","dscoenoEco2Indic","dscoenoIndic2Eco")
if (!all(names(object)[order(names(object))] == Names[order(Names)])) stop("edge class slots should be among 'ecoVar','indicVar','p','adjacency','dteco','deco','reco','dindic2eco','rindic2eco','deco2indic','reco2indic','rscoenoEco2Indic','rscoenoIndic2Eco','dscoenoEco2Indic','dscoenoIndic2Eco'")
if (!is.list.of.function(object[["deco"]])) stop("deco should be a list of functions")
if (!is.list.of.function(object[["reco"]])) stop("reco should be a list of functions")
if (!is.list.of.function(object[["deco2indic"]])) stop("deco2indic should be a list of functions")
if (!is.list.of.function(object[["rindic2eco"]])) stop("rindic2eco should be a list of functions")
if (!is.list.of.function(object[["reco2indic"]])) stop("reco2indic should be a list of functions")
if (!is.list.of.function(object[["rscoenoEco2Indic"]])) stop("rscoenoEco2Indic should be a list of functions")
if (!is.list.of.function(object[["rscoenoIndic2Eco"]])) stop("rscoenoIndic2Eco should be a list of functions")
if (!is.list.of.function(object[["dscoenoEco2Indic"]])) stop("dscoenoEco2Indic should be a list of functions")
if (!is.list.of.function(object[["dscoenoIndic2Eco"]])) stop("dscoenoIndic2Eco should be a list of functions")
}
validEdge = function(object){
Names = c("ecoVar","indicVar","p","adjacency","dteco","deco","reco","dindic2eco","rindic2eco","deco2indic","reco2indic","rscoenoEco2Indic","rscoenoIndic2Eco","dscoenoEco2Indic","dscoenoIndic2Eco")
#if (!all(names(object)[order(names(object))] == Names[order(Names)])) stop("edge class slots should be among 'ecoVar','indicVar','p','adjacency','dteco','deco','reco','dindic2eco','rindic2eco','deco2indic','reco2indic','rscoenoEco2Indic','rscoenoIndic2Eco','dscoenoEco2Indic','dscoenoIndic2Eco'")
if (!is.list.of.function(object@deco)) stop("deco should be a list of functions")
if (!is.list.of.function(object@reco)) stop("reco should be a list of functions")
if (!is.list.of.function(object@deco2indic)) stop("deco2indic should be a list of functions")
if (!is.list.of.function(object@rindic2eco)) stop("rindic2eco should be a list of functions")
if (!is.list.of.function(object@reco2indic)) stop("reco2indic should be a list of functions")
if (!is.list.of.function(object@rscoenoEco2Indic)) stop("rscoenoEco2Indic should be a list of functions")
if (!is.list.of.function(object@rscoenoIndic2Eco)) stop("rscoenoIndic2Eco should be a list of functions")
if (!is.list.of.function(object@dscoenoEco2Indic)) stop("dscoenoEco2Indic should be a list of functions")
if (!is.list.of.function(object@dscoenoIndic2Eco)) stop("dscoenoIndic2Eco should be a list of functions")
}
setClass("edge",
slots=list(ecoVar="character",indicVar="character",p="parameter",adjacency="matrix",dteco="list",
deco="listOfun",reco="listOfun",dindic2eco="listOfun",rindic2eco="listOfun",
deco2indic="listOfun",reco2indic="listOfun", dscoenoEco2Indic="listOfun",
rscoenoEco2Indic="listOfun",dscoenoIndic2Eco="listOfun",rscoenoIndic2Eco="listOfun"),
validity=validEdge
)
setMethod(names,signature = "edge",
definition = function(x){
slotNames(x)
}
)
EdgeModel = new("edge",
ecoVar=c("Tx","Pr","Pa","Pl","Pe"),
indicVar=c("iTx","iPr","iPa","iPl","iPe"),
p=p,
adjacency = t(matrix(as.integer(c(0,0,0,0,0,3,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,
0,0,0,0,0,0,0,2,0,0,
0,0,0,0,0,0,0,0,2,0,
0,0,0,0,0,0,0,0,0,2,
5,0,0,0,0,0,0,1,1,0,
0,5,0,0,0,0,0,1,1,0,
0,0,4,0,0,0,0,1,1,1,
0,0,0,4,0,0,0,1,0,0,
0,0,0,0,4,0,0,1,0,0)),nrow=10,dimnames=list(c("iTx","iPr","iPa","iPl","iPe","Tx","Pr","Pa","Pl","Pe"),
c("iTx","iPr","iPa","iPl","iPe","Tx","Pr","Pa","Pl","Pe")))),
dteco = list(list(as.integer(p$Pa_delay-2*(p$Pa_delay/2)^.5):as.integer(p$Pa_delay+2*(p$Pa_delay/2)^.5),dgamma(x=as.integer(p$Pa_delay-2*(p$Pa_delay/2)^.5):as.integer(p$Pa_delay+2*(p$Pa_delay/2)^.5),shape = p$Pa_delay*2,rate=2)),
1,
1),
deco = new("listOfun",list(
function(p,x,y){
a = (x["Pa"]==0)+
(!x["Pe"]+x["Pe"]*p$PePa_r)*x["Pa"]*p$PaPa_rmax*(x["Tx"]>p$TxPa_min)*(x["Tx"]<p$TxPa_max)*
(x["Tx"]-p$TxPa_min)/(p$TxPa_max-p$TxPa_min)*(x["Pr"]>p$PrPa_min)*(x["Pl"]*p$PlPa_r)
dpois(y[1],lambda = a*(p$PaPa_K-a)/p$PaPa_K)},
function(p,x,y){
dgamma(y,
shape=x["Pl"]+{if ((x["Pr"]>p$PrPl_min)*(x["Pr"]<4)*(x["Tx"]>p$TxPl_min)*(x["Tx"]<p$TxPl_max)) {
p$PlPl_r*4*(x["Tx"]-p$TxPl_min)*(p$TxPl_max-x["Tx"])/(p$TxPl_max+p$TxPl_min)*
((.5+.5*(x[3]-p$PrPl_min)/(4-p$PrPl_min))+(x["Pr"]>=4))*
(1-x["Pl"]/p$PlPl_K)*x[4]} else 0}/p$PlPl_var_r,
scale=p$PlPl_var_r)},
function(p,x,y){
dlogis(y,location=p$PaPe)})),
reco = new("listOfun",list(
function(p,x){
a = (x["Pa"]==0)+
(!x["Pe"]+x["Pe"]*p$PePa_r)*x["Pa"]*p$PaPa_rmax*(x["Tx"]>p$TxPa_min)*(x["Tx"]<p$TxPa_max)*
(x["Tx"]-p$TxPa_min)/(p$TxPa_max-p$TxPa_min)*(x["Pr"]>p$PrPa_min)*(x["Pl"]*p$PlPa_r)
rpois(1,lambda = a*(p$PaPa_K-a)/p$PaPa_K)},
function(p,x){
rgamma(1,
shape=x["Pl"]+{if ((x["Pr"]>p$PrPl_min)*(x["Pr"]<4)*(x["Tx"]>p$TxPl_min)*(x["Tx"]<p$TxPl_max)) {
p$PlPl_r*4*(x["Tx"]-p$TxPl_min)*(p$TxPl_max-x["Tx"])/(p$TxPl_max+p$TxPl_min)*
((.5+.5*(x[3]-p$PrPl_min)/(4-p$PrPl_min))+(x["Pr"]>=4))*
(1-x["Pl"]/p$PlPl_K)*x[4]} else 0}/p$PlPl_var_r,
scale=p$PlPl_var_r)},
function(p,x){
dlogis(1,location=p$PaPe)})),
dindic2eco = new("listOfun",list(function(p,x,y) {dnorm(y[1],mean=x[1],sd = p$T_sd)},
function(p,x,y) {dgamma(y[1],shape =x[1]/p$Pr_var_r,scale=p$Pr_var_r)},
function(p,x,y) {dpois(y,x[1]/p$Pa_sample_r)},
function(p,x,y) {dgamma(y,shape=x[1]/p$Pl_var_r,scale=p$Pl_var_r)},
function(p,x,y) {dgamma(y,shape=x[1]/p$Pl_var_r,scale=p$Pl_var_r)})),
rindic2eco = new("listOfun",list(function(p,x) {rnorm(1,mean=x[1],sd = p$T_sd)},
function(p,x) {rgamma(1,shape =x[1]/p$Pr_var_r,scale=p$Pr_var_r)},
function(p,x) {rpois(1,x[1]/p$Pa_sample_r)},
function(p,x) {rgamma(1,shape=x[1]/p$Pl_var_r,scale=p$Pl_var_r)},
function(p,x) {rgamma(1,shape=x[1]/p$Pl_var_r,scale=p$Pl_var_r)})),
deco2indic = new("listOfun",list(function(p,x,y) {dnorm(y[1],mean=x[1],sd = p$T_sd)},
function(p,x,y) {dgamma(y[1],shape =x[1]/p$Pr_var_r,scale=p$Pr_var_r)},
function(p,x,y) {dpois(y,x[1]/p$Pa_sample_r)},
function(p,x,y) {dgamma(y,shape=x[1]/p$Pl_var_r,scale=p$Pl_var_r)},
function(p,x,y) {dgamma(y,shape=x[1]/p$Pl_var_r,scale=p$Pl_var_r)})),
reco2indic = new("listOfun",list(function(p,x) {rnorm(1,mean=x[1],sd = p$T_sd)},
function(p,x) {rgamma(1,shape =x[1]/p$Pr_var_r,scale=p$Pr_var_r)},
function(p,x) {rpois(1,x[1]/p$Pa_sample_r)},
function(p,x) {rgamma(1,shape=x[1]/p$Pl_var_r,scale=p$Pl_var_r)},
function(p,x) {rgamma(1,shape=x[1]/p$Pl_var_r,scale=p$Pl_var_r)})),
dscoenoEco2Indic = new("listOfun",list(function(p,x,y) {pnorm(y,mean=x["Tx"],sd = p$T_sd)},
function(p,x,y) {pgamma(y,shape =x["Pr"]/p$Pr_var_r,scale=p$Pr_var_r)})),
rscoenoEco2Indic = new("listOfun",list(function(p,x) {rnorm(1,mean=x["Tx"],sd = p$T_sd)},
function(p,x) {rgamma(1,shape =x["Pr"]/p$Pr_var_r,scale=p$Pr_var_r)})),
dscoenoIndic2Eco = new("listOfun",list(function(p,x,y) {pnorm(y,mean=x["iTx"],sd = p$T_sd)},
function(p,x,y) {pgamma(y,shape =x["iPr"]/p$Pr_var_r,scale=p$Pr_var_r)})),
rscoenoIndic2Eco = new("listOfun",list(function(p,x) {rnorm(1,mean=x["iTx"],sd = p$T_sd)},
function(p,x) {rgamma(1,shape =x["iPr"]/p$Pr_var_r,scale=p$Pr_var_r)}))
)
setClass("prior",
contains = "list",
slot = c(eco="integer",indic="integer", iscoeno="integer",ecodelay="integer"),
validity=function(object){
if (!all(lapply(object,class)=="function")) stop("error in prior construction : should be a list of functions")
if (!all(order(c(object@eco,object@indic))==1:length(object))) stop("error in identification of ecological and indicator priors; their union is not 1:length(prior)")
}
)
# gamma : mean = shape/rate, var = shape/rate^2
# eco = indices of ecological dynamics parameters,
# indic= indices of parameters linking indicators variables to ecological variables,
# iscoeno= indices of parameters linking scoenopoietic indicators variables to scoenopoietic ecological variables.
# Scenopoietic ecological variables are ecological variables that do not depend on other ecological variables,
# as in environmental niche modelling, as oposed to bionomic ecological variables.
# ecodelay= indices of parameter that determine the tiñe delay in the ecossytem model
pr<-new("prior",list(PaPa_rmax=function() {exp(runif(n=1,min = log(5),max = log(10)))},
PaPa_K=function() {exp(runif(n=1,min = log(15),max = log(25)))},
TxPa_min=function() {runif(n=1,min = 10,max = 20)},
TxPa_max=function() {runif(n=1,min = 25,max = 35)},
PrPa_min=function() {runif(n=1,min = 2,max = 4)},
PlPa_r=function() {runif(1,10,50)},
Pa_delay=function() {rgamma(1,shape = 2,rate = 1)},
PrPl_rperPr=function() {runif(1,.3,.7)},
TxPl_min=function() {runif(1,5,14)},
TxPl_max=function() {runif(1,26,32)},
PaPl_r=function() {-exp(runif(1,log(0.001),log(0.045)))},
PlPl_r=function() {exp(runif(1,log(1.8),log(3.5)))},
PlPl_K=function() {exp(runif(1,log(1.5),log(3)))},
PlPl_var_r=function() {9e-04},
PePa_r=function() {exp(runif(1,log(.06),log(.15)))},
PaPe=function() {runif(1,1.3,1.9)},
T_sd=function() {runif(1,.2,.5)},
Pr_var_r=function() {0.0025},
Pa_sample_r=function() {0.05},
Pl_var_r=function() {(exp(runif(1,log(.03),log(.07))))^2},
Pe_pDet=function() {runif(1,.6,.99)},
Pe_pFalseDet=function() {exp(runif(1,log(.001),log(.02)))}),
eco = 1:16,
indic=17:22,
iscoeno=17:18,
ecodelay=as.integer(7))
names(pr)==names(p)
pr[[1]]()
sample
setMethod("sample",
signature = "prior",
definition = function(x,size=1){
new("parameter",lapply(1:length(x),function(i) {x[[i]]()}),eco=x@eco,indic=x@indic,iscoeno=x@iscoeno,ecodelay=x@ecodelay)
}
)
pr@eco
sample(pr)
#
# Data
# data should be a 3 dim array, dim[1] is time, dim[2] indicator and ecosystem variables, dim[3] locality or repetition
#
#
#
load(file="data/climdata.RData")
save(climdata,file = "data/climdata.RData")
climdata[,,1]
dataMaize=array(NA,dim=c(dim(climdata)[1],10,dim(climdata)[3]),dimnames=list(dimnames(climdata)[[1]],c("iTx","iPr","iPa","iPl","iPe","Tx","Pr","Pa","Pl","Pe"),dimnames(climdata)[[3]]))
dataMaize[,1:2,]=climdata
dataMaize[,,1]
dataMaize<-dataMaize[-dim(dataMaize)[1],,]
validity_Data=function(object){
if (length(dim(object))!=3) stop("data should be a 3 dim array, dim[1] is time, dim[2] indicator and ecosystem variables, dim[3] locality or repetition")
if (any(colnames(object)!=c(object@indicVar,object@ecoVar))) stop("names Data class of columns should be 'indicVar' followed by 'ecoVar' slots")
}
setClass("Data",
contains="array",
slots= c(indicVar="character",ecoVar="character",scoenoVarIndex="integer",bionoVarIndex="integer",timeStep="numeric"),
validity = validity_Data)
object=new("Data",dataMaize,indicVar=c("iTx","iPr","iPa","iPl","iPe"),ecoVar=c("Tx","Pr","Pa","Pl","Pe"),scoenoVarIndex=1:2,bionoVarIndex=3:5,timeStep=365*24*3600/12)
#slot(object,"scoenoVarIndex")<-as.integer(c(1,2))
dataMaize=object
setClass("data_model",
contains="Data",
slots=c(edge="edge"),
# validity = validity_data_model
)
validity_data_model=function(object){
# if ((object@edge@p@indic)!=object@indicVar) stop("when creating data_model object,
# the slot @indic of Data should be identical to the @indicVar slot")
}
object=DM <- new("data_model",dataMaize,edge=EdgeModel)
object=DM
setMethod("simulate",
signature = c(object="data_model"),
definition = function(object,option="fromIndep"){
switch(option,
fromIndep = {
p = subset(object@edge@p,type="iscoeno")
a=system.time({
for (month in 1:dim(object@.Data)[1]){
object@.Data[month,,] <- (
sapply(1:dim(object@.Data)[3],function(repet){
sapply(object@scoenoVarIndex,function(Var){
object@edge@rscoenoIndic2Eco[[Var]](p,object@.Data[month,object@indicVar[object@scoenoVarIndex],repet])
})
})
)}
}
)
eco = {
p = subset(object@edge@p,type="iscoeno")
a=system.time({
for (month in 1:dim(object@.Data)[1]){
object@.Data[month,,] <- (
sapply(1:dim(object@.Data)[3],function(repet){
sapply(object@scoenoVarIndex,function(Var){
object@edge@rscoenoIndic2Eco[[Var]](p,object@.Data[month,object@indicVar[object@scoenoVarIndex],repet])
})
})
)}
}
)
for (month in 1:dim(object@.Data)[1]){
object@.Data[month,,] <- (
sapply(1:dim(object@.Data)[3],function(repet){
sapply(object@scoenoVarIndex,function(Var){
object@edge@rscoenoIndic2Eco[[Var]](p,object@.Data[month,object@indicVar[object@scoenoVarIndex],repet])
})
})
)
}
#
lapply(1:dim(object@.Data)[3],function(x){
lapply(1:dim(object@.Data)[3],function(y){
lapply(object@scoenoVarIndex,function(z){
x = object@.Data[t,object@indicVar[object@scoenoVarIndex],repet]
Fun = object@edge@rscoenoIndic2Eco[[Var]]
object@.Data[t,object@ecoVar[Var],repet] <- Fun(p,x)
})})})
for (repet in 1:dim(object@.Data)[3]){
for (t in 1:dim(object@.Data)[1]){
for (Var in 1:length(object@bionoVarIndex)){
x = object@.Data[t,object@ecoVar[object@bionoVarIndex],repet]
Fun = object@edge@reco[[Var]]
object@.Data[t,object@ecoVar[Var],repet] <- Fun(p,x)
}
}
}
})
}
)
setClass("posterior",
)
setClass("bayesSet",
contains = "edge",
slot = c(data="array", parameters = "parameter", prior = "pior", posterior = "array", thining= "integer", burnin = "integer"),
validity = function(object){
})
#
# in this first example indicators are very good
# (Tx = iTx and Pr = iPr, Pa = iPa, Pl = iPl, Pe=iPe )
#
data[,c("Tx","Pr","Pa","Pl","Pe"),] <- data[,c("iTx","iPr","iPa","iPl","iPe"),]
data[,,1]
# EXAMPLE OF LEARNING ECOSYSTEM
# simulate ecosystem
# this is a an annual plant pathogen interaction system
# start from the simulation of ecosystem history and indicator data from
# a true model then assume we have access to indicator data
# to infer the model using prior.
# The full ecosystem data is simulated from indicator and prior
# The likelihood of is estimated from ecosystem history
# The posterior is calculated and sampled using metropolis algorithm
setwd("/home/dupas/PlaNet/")
setwd("C:/Users/steph/OneDrive/Documents/GitHub/PlaNet")
# set true parameters
#true parameters
p0=list(PaPa_rmax=10,PaPa_K=20,
TxPa_min=15,TxPa_max=30,
PrPa_min=3,
PlPa_r=20,
PrPl_min=3,
TxPl_min=10,TxPl_max=30,
PaPl_r=-0.03,
PlPl_r=2.5,
PlPl_K=2,
PlPl_var_r=0.03^2,
PePa_r=0.1,
PaPe=1.5,
Pl_var_r=0.05^2,
T_sd=0.3,
Pe_pDet=.8,
Pe_pFalseDet=.005)
# set parameters to true parameters
p_PaPa_rmax=p0["PaPa_rmax"]
p_PaPa_K=p0["PaPa_K"]
p_TxPa_min=p0["TxPa_min"]
p_TxPa_max=p0["TxPa_max"]
p_PrPa_min=p0["PrPa_min"]
p_PlPa_r=p0["PlPa_r"]
p_PrPl_min=p0["PrPl_min"]
p_TxPl_min=p0["TxPl_min"]
p_TxPl_max=p0["TxPl_max"]
p_PaPl_r=p0["PaPl_r"]
p_PlPl_r=p0["PlPl_r"]
p_PlPl_K=p0["PlPl_K"]
p_PlPl_var_r=p0["PlPl_var_r"]
p_PePa_r=p0["PePa_r"]
p_PaPe=p0["PaPe"]
#ecoindic
p_Pl_var_r=p0["Pl_var_r"]
p_T_sd=p0["T_sd"]
p_Pe_pDet=p0["Pe_pDet"]
p_Pe_pFalseDet=p0["Pe_pFalseDet"]
#
# set time series
#
setwd("/home/dupas/PlaNet/")
ecoVar=c("Tx","Pr","Pa","Pl","Pe") # T: temperature, Pr: precipitation, Pl : plant density; Pa: parasite density; Pe : pestidice application
indicVar=c("iT","iPr","iPa","iPl","iPe")
load("yield.data.RData")
dataf[,,1]
load(file = "yield.data.RData")
setClass("Data",
contains="array",
validity=function(object){
if (length(dim(object))!=3) stop("data should be a 3 dim array, dim[1] is indicator and ecosystem variables, dim[2] is population, dim[3] is time")
}
)
idata <- new("Data",dataf[1:8,c(8,4,2,2,2),])
dimnames(idata) <- list(dimnames(idata)[[1]],c("Tx","Pr","Pa","Pl","Pe"),dimnames(idata)[[3]])
edata <- new("Data",dataf[1:8,c(8,4,2,2,2),])
dimnames(edata) <- list(dimnames(edata)[[1]],c("Tx","Pr","Pa","Pl","Pe"),dimnames(edata)[[3]])
edata[,3,]<-NA
edata[1,4,]<-0;edata[2,4,]<-0.01 # planting occurs in february
edata[1,4,]<-0;edata[2,4,]<-0.01 # planting occurs in february
edata[1:2,3,]<-0 # there is no parasite before planting
edata[1:2,5,]<-0 # there is no pesticide before planting
edata[,,1:3]
# simulate true edata
#
# Pe si rpois(Pa) > PaPe
# Pl Pl*r where r= 1+exp(edata[i-1,2,k]-p$PrPl_min/(14-p$PrPl_min))+exp((edata[i-1,1,k]-p$TxPl_min)/(p$TxPl_max-p$TxPl_min))
# Pl Pl*r where r= 1+p$PlPl_r*4*(edata[i-1,1,k]-p$TxPl_min)*(p$TxPl_max-edata[i-1,1,k])/(p$TxPl_max+p$TxPl_min)*(edata[i-1,1,k]>p$TxPl_min)*(edata[i-1,1,k]<p$TxPl_max)
# ((.5+.5*(edata[i-1,2,k]-p$PrPl_min)/(4-p$PrPl_min))*(edata[i-1,2,k]>p$PrPl_min)*(edata[i-1,2,k]<4)+(edata[i-1,2,k]>=4))
# (1-edata[,4,]/p$PlPl_K)
#
e0=edata
simulTrueEdata <- function(p,edata){
for (k in 1:dim(edata)[3]){
for (i in 3:dim(edata)[1]){
#Pe pesticide application
edata[i,5,k]={if (edata[i-1,3,k]!=0) (rpois(1,edata[i-1,3,k]) > p$PaPe) else FALSE}
#Pl plant density
edata[i,4,k]=rgamma(1,shape=(edata[i-1,4,k]+{if ((edata[i-1,2,k]>p$PrPl_min)*(edata[i-1,2,k]<4)*(edata[i-1,1,k]>p$TxPl_min)*(edata[i-1,1,k]<p$TxPl_max)) {p$PlPl_r*4*(edata[i-1,1,k]-p$TxPl_min)*(p$TxPl_max-edata[i-1,1,k])/(p$TxPl_max+p$TxPl_min)*
((.5+.5*(edata[i-1,2,k]-p$PrPl_min)/(4-p$PrPl_min))+(edata[i-1,2,k]>=4))*
(1-edata[i-1,4,k]/p$PlPl_K)*edata[i-1,4,k]} else 0})/p$PlPl_var_r,scale=p$PlPl_var_r)
#Pa parasite density
edata[i,3,k]={a = (edata[i-1,3,k]==0)+(!Pe+Pe*p$PePa_r)*edata[i-1,3,k]*p$PaPa_rmax*(edata[i-1,1,k]>p$TxPa_min)*(edata[i-1,1,k]<p$TxPa_max)*(edata[i-1,1,k]-p$TxPa_min)/(p$TxPa_max-p$TxPa_min)*(edata[i-2,2,k]>p$PrPa_min)*(edata[i-1,4,k]*p$PlPa_r);
rpois(1,a*(a<p$PaPa_K)+p$PaPa_K*(a>=p$PaPa_K))}
}
}
edata
}
edataTrue=simulTrueEdata(p0,edata)
# i;k
lapply(1:dim(edata)[3], function(k) {lapply(3:dim(edata)[1], function (i) {
{if (edata[i-1,3,k]!=0) (rpois(edata[i-1,3,k]) > p$PaPe) else FALSE}})})
lapply(1:dim(edata)[3], function(k) {lapply(3:dim(edata)[1], function (i) {
edata[i,3,k]})})
b=array(unlist(lapply(1:dim(edata)[3], function(k) {lapply(3:dim(edata)[1], function (i) {
{i-1}})})),dim=c(dim(edata)[c(1,3)]))
simulTrueEdata <- function(p,edata){
aperm(array(unlist(lapply(1:dim(edata)[3], function(k) {lapply(3:dim(edata)[1], function (i) {
Pe={if (edata[i-1,3,k]!=0) (rpois(edata[i-1,3,k]) > p$PaPe) else FALSE}
c(Pe=Pe,
Pl=edata[i-1,4,k]+{if ((edata[i-1,2,k]>p$PrPl_min)*(edata[i-1,2,k]<4)*(edata[i-1,1,k]>p$TxPl_min)*(edata[i-1,1,k]<p$TxPl_max)) {p$PlPl_r*4*(edata[i-1,1,k]-p$TxPl_min)*(p$TxPl_max-edata[i-1,1,k])/(p$TxPl_max+p$TxPl_min)*
((.5+.5*(edata[i-1,2,k]-p$PrPl_min)/(4-p$PrPl_min))+(edata[i-1,2,k]>=4))*
(1-edata[i-1,4,k]/p$PlPl_K)*edata[i-1,4,k]} else 0},
Pa= {a = (edata[i-1,3,k]==0)+(!Pe+Pe*p$PePa_r)*edata[i-1,3,k]*p$PaPa_rmax*(edata[i-1,1,k]>p$TxPa_min)*(edata[i-1,1,k]<p$TxPa_max)*(edata[i-1,1,k]-p$TxPa_min)/(p$TxPa_max-p$TxPa_min)*(edata[i-2,2,k]>p$PrPa_min)*(edata[i-1,4,k]*p$PlPa_r);
rpois(1,a*(a<p$PaPa_K)+p$PaPa_K*(a>=p$PaPa_K))})
}
)})),dim=dim(edata)[c(2,1,3)],dimnames=lapply(c(2,1,3),function(i) dimnames(edata)[[i]])),c(2,1,3))
}
edataTrue=simulTrueEdata(p0,edata)
simulTrueEdata <- function(p,edata){
aperm(array(unlist(lapply(1:dim(edata)[3], function(k) {lapply(1:dim(edata)[1], function (i) {
Pe={if (edata[i-1,3,k]!=0) (rpois(edata[i-1,3,k]) > p$PaPe) else FALSE}
c(Pe=Pe,
Pl=rgamma(1,shape=1+(exp(edata[i-1,2,k]-p$PrPl_min/(14-p$PrPl_min))+exp((edata[i-1,1,k]-p$TxPl_min)/(p$TxPl_max-p$TxPl_min)))*p$PlPl_var_r,rate = p$PlPl_var_r),
Pa= {a = (edata[i-1,3,k]==0)+(!Pe+Pe*p$PePa_r)*edata[i-1,3,k]*p$PaPa_rmax*(edata[i-1,1,k]>p$TxPa_min)*(edata[i-1,1,k]<p$TxPa_max)*(edata[i-1,1,k]-p$TxPa_min)/(p$TxPa_max-p$TxPa_min)*(edata[i-2,2,k]>p$PrPa_min)*(edata[i-1,4,k]*p$PlPa_r);
rpois(1,a*(a<p$PaPa_K)+p$PaPa_K*(a>=p$PaPa_K))})})})),dim=dim(edata)[c(2,1,3)],dimnames=lapply(c(2,1,3),function(i) dimnames(edata)[[i]])),c(2,1,3))
}
# Pe si rpois(Pa) > PaPe
# Pl Pl*r where r= 1+exp(edata[i-1,2,k]-p$PrPl_min/(14-p$PrPl_min))+exp((edata[i-1,1,k]-p$TxPl_min)/(p$TxPl_max-p$TxPl_min))
# Pl Pl*r where r= 1+p$PlPl_r*4*(edata[i-1,1,k]-p$TxPl_min)*(p$TxPl_max-edata[i-1,1,k])/(p$TxPl_max+p$TxPl_min)*(edata[i-1,1,k]>p$TxPl_min)*(edata[i-1,1,k]<p$TxPl_max)
# ((.5+.5*(edata[i-1,2,k]-p$PrPl_min)/(4-p$PrPl_min))*(edata[i-1,2,k]>p$PrPl_min)*(edata[i-1,2,k]<4)+(edata[i-1,2,k]>=4))
# (1-edata[,4,]/p$PlPl_K)
edataTrue=simulTrueEdata(p0,edata)
edataTrue <- simulTrueEdata(p0,edata)
edataTrue[,,1]
# simulate true idata
simulTrueIdata <- function(p,edata){
idata=aperm(array(unlist(lapply(1:dim(edata)[3], function(k){lapply(3:dim(edata)[1], function (i) {
Pl=rgamma(1,edata[i,4,k],edata[i,4,k]*p$Pl_sd_r)
#if (Pl<0) Pl=0
c(T=rnorm(1,edata[i,1,k],p$T_sd),Pr=rpois(1,edata[i,2,k]),Pa=rpois(1,edata[i,3,k]),Pl=Pl,Pe=rbinom(1,edata[i,5,k],p$Pe_pDet)+rbinom(1,!edata[i,5,k],p$Pe_pFalseDet))
})})),dim=dim(edata)[c(2,1,3)],dimnames=lapply(c(2,1,3),function(i) dimnames(edata)[[i]])),c(2,1,3))
idata
}
# for the inference, simulate edata from idata
simulEdataFromIdata <- function(p,idata){
n=dim(idata)[2]
idata[,1,] = rnorm(n=n,mean=round(idata[,1,]),sd=p$T_sd)
idata[,2,] = rpois(1,idata[,2,])
idata[,3,] = rpois(1,idata[,3,])
idata[,4,] = rgamma(n, shape=idata[,4,], scale = p$Pl_var_r)
idata[,5,] ={
p1 <- which1 <- which(as.logical(edata[,5,]))
p1 <- sum(dbinom(idata[,5,][which1],1,p$Pe_pDet,log=TRUE))
p0 <- which0 <- which(!as.logical(edata[,5,]))
p0 <- sum(dbinom(idata[,5,][which0],1,p$Pe_pFalseDet,log=TRUE))
p0+p1
}
idata
}
idataTrue <- simulEdataFromIdata(p0,edata)
# save true idata
idataTrue <- idata
Posterior <- data.frame(runi=0,PaPa_rmax=0,TxPa_min=0,PrPa_min=0,PlPa_r=0,PrPl_rperPr=0,TxPl_min=0,
PaPl_r=0,PlPl_r=0,PePa_r=0,PaPe=0,Pl_sd_r=0,T_sd=0,Pe_pDet=0,Pe_pFalseDet=0,prior=0,posterior=0)
Posterior=Posterior[-1,]
runi=1
# Sampling algorithm
samplePrior <- function(option="prior"){
if(option=="prior") list(PaPa_rmax=exp(runif(1,log(5),log(15))),
PaPa_K=exp(runif(1,log(15),log(25))),
TxPa_min=runif(1,10,20),
TxPa_max=runif(1,25,35),
PrPa_min=runif(1,1.5,4),
PlPa_r=runif(1,10,50),
PrPl_rperPr=runif(1,.3,.7),
TxPl_min=runif(1,5,14),
TxPl_max=runif(1,26,32),
PaPl_r=-exp(runif(1,log(0.001),log(0.045))),
PlPl_r=exp(runif(1,log(1.8),log(3.5))),
PlPl_K=exp(runif(1,log(1.5),log(3))),
PlPl_sd=runif(1,.07,.15),
PePa_r=exp(runif(1,log(.06),log(.15))),
PaPe=runif(1,1.3,1.9),
#ecoindic
Pl_var_r=(exp(runif(1,log(.03),log(.07))))^2,
T_sd=runif(1,.2,.5),
Pe_pDet=runif(1,.6,.99),
Pe_pFalseDet=exp(runif(1,log(.001),log(.02))))
}
getPprime <- function(p,rate=1/20){
list(PaPa_rmax=exp({a={if(as.logical(rbinom(1,1,1/2))) log(p$PaPa_rmax+(15-5)*rate) else log(p$PaPa_rmax-(15-5)*rate)};{ if (a<log(5)) log(5) else if (a>log(15)) log(15) else a}}),
PaPa_K={a={rbinom(1,1,1/2);if(a==0) a=-1;a}*(log(25)-log(15))*rate;if (p$PaPa_K<15) p$PaPa_K=15;if (p$PaPa_K>15) p$PaPa_K=25;p$PaPa_K},
TxPa_min={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(20-10)*rate;a=p$TxPa_min+b;if (a<10) a=15;if (a>20) a=20;a},
TxPa_max={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(35-25)*rate;a=p$TxPa_max+b;if (a<25) a=25;if (a>35) a=35;a},
PrPa_min={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(4-1.5)*rate;a=p$PrPa_min+b;if (a<1.5) a=1.5;if (a>4) a=4;a},
PlPa_r={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(1.5-.7)*rate;a=p$PlPa_r+b;if (a<.7) a=.7;if (a>1.5) a=1.5;a},
PrPl_rperPr={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(.7-.3)*rate;a=p$PrPl_rperPr+b;if (a<.3) a=.3;if (a>.7) a=.7;a},
TxPl_min={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(14-5)*rate;a=p$TxPl_min+b;if (a<5) a=5;if (a>14) a=14;a},
TxPl_max={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(32-26)*rate;a=p$TxPl_max+b;if (a<26) a=26;if (a>32) a=32;a},
PaPl_r=-exp({a={if(as.logical(rbinom(1,1,1/2))) log(-p$PaPl_r-(.01-.045)*rate) else log(-p$PaPl_r+(.01-.045)*rate)};{ if (a<log(.01)) log(.01) else if (a>log(.045)) log(.045) else a}}),
PlPl_r=exp({a={if(as.logical(rbinom(1,1,1/2))) log(p$PlPl_r+(3.5-1.8)*rate) else log(p$PlPl_r-(3.5-1.8)*rate)};{ if (a<log(1.8)) log(1.8) else if (a>log(3.5)) log(3.5) else a}}),
PlPl_K=exp({a={if(as.logical(rbinom(1,1,1/2))) log(p$PlPl_K+(3-1.5)*rate) else log(p$PlPl_K-(3-1.5)*rate)};{ if (a<log(1.5)) log(1.5) else if (a>log(3)) log(3) else a}}),
PlPl_sd={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(.15-.07)*rate;a=p$PlPl_sd+b;if (a<.07) a=.07;if (a>.15) a=.15;a},
PePa_r={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(.15-.06)*rate;a=p$PePa_r+b;if (a<.06) a=.06;if (a>.15) a=.15;a},
PaPe={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(1.9-1.3)*rate;a=p$PaPe+b;if (a<1.3) a=1.3;if (a>1.9) a=1.9;a},
Pl_var_r=exp({a={if(as.logical(rbinom(1,1,1/2))) log(p$Pl_var_r^.5+(.07-.03)*rate) else 2*log(p$Pl_var_r^.5-(.07-.03)*rate)};
{ if (a<2*log(.03)) 2*log(.07) else if (a>2*log(.07)) 2*log(.07) else a}}),
T_sd={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(.5-.2)*rate;a=p$T_sd+b;if (a<.2) a=.2;if (a>.5) a=.5;a},
Pe_pDet={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(.99-.6)*rate;a=p$Pe_pDet+b;if (a<.6) a=.6;if (a>.99) a=.99;a},
Pe_pFalseDet={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(.02-.001)*rate;a=p$Pe_pFalseDet+b;if (a<.001) a=.001;if (a>.02) a=.02;a}
)}
mean(rgamma(1000,2,scale=1/3)) 2/3
var(rgamma(100000,2,3)) #2*(1/3)^2
# shape = k
# scale = theta
# mean = k theta
# var = k theta^2
# k= 9 theta = 0.5 mean = 4.5 var =
# gamma
# mean=shape/rate=shape*scale
# var=shape/(rate^2)=shape*scale^2
# shape=mean^2/var
# scale = var/mean
# rate = 1/scale
# shape * scale = mean => shape = mean * rate
# shape = mean*rate = mean*mean/var
# scale=(var/shape)^.5
# shape=shape^.5*mean/var^.5
# shape^.5=mean/var^.5
# scale=var^1.5/mean
# p_pl_sd_r = var.5/mean = 1/shape^.5
#
# rate = shape/
# rate = p_pl_sd_r^2 = p_pl_var_r
#
# Initialisation of learning
# Sample first prior (or set from true parameters to get to the rightplace at first steps)
p_PaPa_rmax=exp(runif(1,log(5),log(15)))#p_PaPa_rmax=10
p_PaPa_K=exp(runif(1,log(15),log(25)))#p_PaPa_K=20
p_TxPa_min=runif(1,10,20)#p_TxPa_min=15
p_TxPa_max=runif(1,25,35)#p_TxPa_max=30
p_PrPa_min=runif(1,1.5,4)#p_PrPa_min=3
p_PlPa_r=runif(1,.7,1.5)#p_PlPa_r=1
p_PrPl_rperPr=runif(1,.3,.7)#p_PrPl_rperPr=.5
p_TxPl_min=runif(1,5,14)#p_TxPl_min=10
p_TxPl_max=runif(1,26,32)#p_TxPl_max=30
p_PaPl_r=-exp(runif(1,log(0.001),log(0.045)))#p_PaPl_r=-0.03
p_PlPl_r=exp(runif(1,log(1.8),log(3.5)))#p_PlPl_r=2.5
p_PlPl_K=exp(runif(1,log(1.5),log(3)))#p_PlPl_K=2
p_PlPl_sd=runif(1,.07,.15)#p_PlPl_sd=.1
p_PePa_r=exp(runif(1,log(.06),log(.15)))#p_PePa_r=0.1
p_PaPe=runif(1,1.3,1.9)#p_PaPe=1.5
#ecoindic
p_Pl_sd_r=exp(runif(1,log(.03),log(.07)))#p_Pl_sd_r=0.05
p_T_sd=runif(1,.2,.5)#p_T_sd=0.3
p_Pe_pDet=runif(1,.6,.99)#p_Pe_pDet=.8
p_Pe_pFalseDet=exp(runif(1,log(.001),log(.02)))#p_Pe_pFalseDet=.005
#
# Learning loop
#
for (runi in 2:100){# sample prior
print(runi)
# simulate edata from prior sample
for (k in 1:dim(edata)[3]){
for (i in 3:dim(edata)[1]){
#Pe
edata[i,5,k]=edata[i-1,3,k]>p_PaPe
#Pl
a=((edata[i-1,1,k]>p_TxPl_min)*0.1)+edata[i-1,4,k]*p_PlPl_r*((((1+edata[i-1,2,k])*p_PrPl_rperPr)*(edata[i-1,1,k]>p_TxPl_min))*
(1+(T-p_TxPl_min)/(p_TxPl_max-p_TxPl_min))*(edata[i-1,1,k]<p_TxPl_max))+edata[i-1,3,k]*(p_PaPl_r)
if(a>p_PlPl_K){a=2}
if(a<0) {a=0}
edata[i,4,k]=a
#Pa
a=(edata[i-1,3,k]==0)+((!edata[i,5,k])+edata[i,5,k]*p_PePa_r)*edata[i-1,3,k]*p_PaPa_rmax*((edata[i-1,1,k]>p_TxPa_min)*(edata[i-1,1,k]<p_TxPa_max))*(edata[i-1,1,k]-p_TxPa_min)/(p_TxPa_max-p_TxPa_min)*(edata[i-2,2,k]>p_PrPa_min)*(edata[i-1,4,k]*p_PlPa_r)
edata[i,3,k]=rpois(1,a*(a<p_PaPa_K)+p_PaPa_K*(a>=p_PaPa_K))
}
}
# Calculate idata probability from edata history
pIndic <- sum(c(pT = sum(log(dnorm(round(idata[,1,]),edata[,1,],p_T_sd))),
pH = sum(log(dpois(round(idata[,2,]),edata[,2,]))),
pPa = {
p1 <- which1 <- which(as.logical(edata[,3,]))
p1 <- sum(dbinom(idata[,5,][which1],1,p_Pe_pDet,log=TRUE))
p1*length(idata[,5,])/length(which1)
},
sdPl={sdPl=edata[,4,]*p_Pl_sd_r
sdPl[sdPl<=0.1]=.1
sum(log(dnorm(x=idata[,4,],mean=edata[,4,],sd=sdPl)))},
pPe={
p1 <- which1 <- which(as.logical(edata[,5,]))
p1 <- sum(dbinom(idata[,5,][which1],1,p_Pe_pDet,log=TRUE))
p0 <- which0 <- which(!as.logical(edata[,5,]))
p0 <- sum(dbinom(idata[,5,][which0],1,p_Pe_pFalseDet,log=TRUE))
p0+p1
}
))
# calculate posterior
# posterior = likelihood * prior
prior = log(prod(c(p_PaPa_rmax=dunif(log(p_PaPa_rmax),log(5),log(15))/(log(15)-log(5)),
p_PaPa_K=1/(log(25)-log(15)),
p_TxPa_min=1/(-10+20),
p_TxPa_max=1/(-25+35),
p_PrPa_min=1/(-1.5+4),
p_PlPa_r=1/(-.7+1.5),
p_PrPl_rperPr=1/(-.3+.7),
p_TxPl_min=1/(5+14),
p_TxPl_max=1/(26+32),
p_PaPl_r=1/(-log(0.015)+log(0.045)),
p_PlPl_r=1/(-log(1.8)+log(3.5)),
p_PlPl_K=1/(-log(1.5)+log(3)),
p_PlPl_sd=1/(-.07+.15),
p_PePa_r=1/(-log(.06)+log(.15)),
p_PaPe=1/(-1.3+1.9),
#ecoindic
p_Pl_sd_r=1/(-log(.03)+log(.07)),
p_T_sd=1/(-.2+.5),
p_Pe_pDet=1/(-.6+.99),
p_Pe_pFalseDet=1/(-.7+.99))))
# since sampled from uniforms props = constant
# we use onliy likelihood to sample
posterior = pIndic + prior
Posterior[runi,] <- c(runi,p_PaPa_rmax,p_TxPa_min,p_PrPa_min,p_PlPa_r,p_PrPl_rperPr,p_TxPl_min,p_PaPl_r,p_PlPl_r,p_PePa_r,p_PaPe,p_Pl_sd_r,p_T_sd,p_Pe_pDet,p_Pe_pFalseDet,prior,posterior)
# metropolis move
p_PaPa_rmax=exp({a={if(as.logical(rbinom(1,1,1/2))) log(p_PaPa_rmax+(15-5)/20) else log(p_PaPa_rmax-(15-5)/20)};{ if (a<log(5)) log(5) else if (a>log(15)) log(15) else a}})#exp(runif(1,log(5),log(15)))
#p_PaPa_K={a={rbinom(1,1,1/2);if(a==0) a=-1;a}*(log(25)-log(15))/20;if (p_PaPa_K<15) p_PaPa_K=15;if (p_PaPa_K>15) p_PaPa_K=25;p_PaPa_K}#exp(runif(1,log(15),log(25)))
p_TxPa_min={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(20-10)/20;a=p_TxPa_min+b;if (a<10) a=15;if (a>20) a=20;a}#runif(1,10,20)
p_TxPa_max={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(35-25)/20;a=p_TxPa_max+b;if (a<25) a=25;if (a>35) a=35;a}#runif(1,25,35)
p_PrPa_min={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(4-1.5)/20;a=p_PrPa_min+b;if (a<1.5) a=1.5;if (a>4) a=4;a}#runif(1,1.5,4)
p_PlPa_r={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(1.5-.7)/20;a=p_PlPa_r+b;if (a<.7) a=.7;if (a>1.5) a=1.5;a}#runif(1,.7,1.5)
p_PrPl_rperPr={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(.7-.3)/20;a=p_PrPl_rperPr+b;if (a<.3) a=.3;if (a>.7) a=.7;a}#runif(1,.3,.7)
p_TxPl_min={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(14-5)/20;a=p_TxPl_min+b;if (a<5) a=5;if (a>14) a=14;a}#runif(1,5,14)
p_TxPl_max={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(32-26)/20;a=p_TxPl_max+b;if (a<26) a=26;if (a>32) a=32;a}#runif(1,26,32)
p_PaPl_r=-exp({a={if(as.logical(rbinom(1,1,1/2))) log(-p_PaPl_r-(.01-.045)/20) else log(-p_PaPl_r+(.01-.045)/20)};{ if (a<log(.01)) log(.01) else if (a>log(.045)) log(.045) else a}})#-exp(runif(1,log(0.015),log(0.045)))
p_PlPl_r=exp({a={if(as.logical(rbinom(1,1,1/2))) log(p_PlPl_r+(3.5-1.8)/20) else log(p_PlPl_r-(3.5-1.8)/20)};{ if (a<log(1.8)) log(1.8) else if (a>log(3.5)) log(3.5) else a}})#exp(runif(1,log(1.8),log(3.5)))
p_PlPl_K=exp({a={if(as.logical(rbinom(1,1,1/2))) log(p_PlPl_K+(3-1.5)/20) else log(p_PlPl_K-(3-1.5)/20)};{ if (a<log(1.5)) log(1.5) else if (a>log(3)) log(3) else a}})#exp(runif(1,log(1.5),log(3)))
p_PlPl_sd={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(.15-.07)/20;a=p_PlPl_sd+b;if (a<.07) a=.07;if (a>.15) a=.15;a}#runif(1,.07,.15)
p_PePa_r={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(.15-.06)/20;a=p_PePa_r+b;if (a<.06) a=.06;if (a>.15) a=.15;a}#exp(runif(1,log(.06),log(.15)))
p_PaPe={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(1.9-1.3)/20;a=p_PaPe+b;if (a<1.3) a=1.3;if (a>1.9) a=1.9;a}#runif(1,1.3,1.9)
#ecoindic
p_Pl_sd_r=exp({a={if(as.logical(rbinom(1,1,1/2))) log(p_Pl_sd_r+(.07-.03)/20) else log(p_Pl_sd_r-(.07-.03)/20)};{ if (a<log(.03)) log(.07) else if (a>log(.07)) log(.07) else a}})#exp(runif(1,log(.03),log(.07)))
{b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(.7-.03)/20;a=p_Pl_sd_r+b;if (a<.03) a=.03;if (a>.7) a=.7;a}
p_T_sd={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(.5-.2)/20;a=p_T_sd+b;if (a<.2) a=.2;if (a>.5) a=.5;a}#runif(1,.2,.5)
p_Pe_pDet={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(.99-.6)/20;a=p_Pe_pDet+b;if (a<.6) a=.6;if (a>.99) a=.99;a}#runif(1,.6,.99)
p_Pe_pFalseDet={b={b=rbinom(1,1,1/2);if(b==0) b=-1;b}*(.02-.001)/20;a=p_Pe_pFalseDet+b;if (a<.001) a=.001;if (a>.02) a=.02;a}#exp(runif(1,log(.001),log(.02)))
p_PaPa_rmax=pprime["p_PaPa_rmax"]
p_TxPa_min=pprime["p_TxPa_min"]
p_PrPa_min=pprime["p_PrPa_min"]
p_PlPa_r=pprime["p_PlPa_r"]
p_PrPl_rperPr=pprime["p_PrPl_rperPr"]
p_TxPl_min=pprime["p_TxPl_min"]
p_PaPl_r=pprime["p_PaPl_r"]
p_PlPl_r=pprime["p_PlPl_r"]
p_PlPl_K=pprime["p_PlPl_K"]
p_PlPl_sd=pprime["p_PlPl_sd"]
p_PePa_r=pprime["p_PePa_r"]
p_PaPe=pprime["p_PaPe"]
#ecoindic
p_Pl_sd_r=pprime["p_Pl_sd_r"]
p_T_sd=pprime["p_T_sd"]
p_Pe_pDet=pprime["p_Pe_pDet"]
p_Pe_pFalseDet=pprime["p_Pe_pFalseDet"]
}
Posterior <- data.frame(p_PaPa_rmax=0,p_TxPa_min=0,p_PrPa_min=0,p_PlPa_r=0,p_PrPl_rperPr=0,p_TxPl_min=0,
p_PaPl_r=0,p_PlPl_r=0,p_PePa_r=0,p_PaPe=0,p_Pl_sd_r=0,p_T_sd=0,p_Pe_pDet=0,p_Pe_pFalseDet=0,prior=0,posterior=0)
#
setClass("parDisFun",
contains = c("function"),
slots= c(lengthx="integer",lengthp="integer",xname="character",pnames="character"),
# prototype = list(function(x,p) rpois(x*p[[1]]),lengthx=1:1,lengthp=1:1,xname="Tx",pnames="TxPa"),
)
# parDisFun are the parametrized distribution functions ]linking ecosystem variables
setClass("parFunList",
contains = "list",
prototype = list(new("parDisFun",function(x,p) rpois(1,p[[1]]),lengthx=1:1,lengthp=1:1,xname="Tx",pnames="TxPa"),
new("parDisFun",function(x,p) rnorm(1,p[["PlPl"]][1]+x[["Pl"]]*p[["PlPl"]][2],p[["PlPl"]][3]),lengthx=1:1,lengthp=1:1,xname=c("Pl"),pnames=c("PlPl"))
),
validity = function(object){
if (any(lapply(object,class)!="parDisFun")) stop("trying to create a funList where not all list components are parDisFun")}
)
setClass("EcoModel",
contains="Data",
slots=c(funs="parFunList",ecoLink="matrix",delays="matrix")
)
setClass("prior",
contains = "function",
slots = c(priorParam="numeric")
)
pr1=new("prior",runif,priorParam=c(0,10))
sample(pr1)
pr1=new("prior",rnorm,priorParam=c(0,1))
pr2=new("prior",rnorm,priorParam=c(10,5))
setClass("prior",
contains = "list",
slot = "FUN",
)
setClass("priorList",
contains = "list",
validity = function(object) {
if (any(lapply(object,class)!="prior")) stop("priorList@funs did not contain list of prior")
}
)
priorP <- new("priorList",lapply(p,function(p_i) {new("prior",list(PrMin=p_i*0.66,PrMax=p_i*1.5),FUN=runif)}))
priorP <- lapply(p,function(p_i) list(PrMin=p_i*0.66,PrMax=p_i*1.5,FUN=runif))
mn=priorP$PaPl$PrMin
mx=priorP$PaPl$PrMax
priorP$PaPl$PrMin=mx
priorP$PaPl$PrMax=mn
names(priorP)=names(p)
priorP$Papa
x=priorP
setMethod("sample",
signature = "list",
definition = function(x){
l <- lapply(x,function(x) x[[1]])
for (i in 1:length(l)){for (j in 1:length(l[[i]])) l[[i]][[j]]=x[[i]]$FUN(1,x[[i]][[1]][[j]],x[[i]][[2]][[j]]) }
l
})
p1=sample(priorP)
sample(pr2)
x=c(Pa=1.5,Tx=20,Pl=0.5,Pr=2.5,Pe=0)
p=list(PaPa=c(rmax=10,K=20),TxPa=c(min=15,max=30),PrPa=c(min=3),PlPa=c(r=1),PrPl=c(rperPr=.5),TxPl=c(min=10,max=30),PaPl=c(r=-0.03),PlPl=c(r=2.5,K=2,sd=.1),PePa=c(r=0.1),PaPe=c(thr=1.5))
p=new("param",p)
Pafun <- new("parDisFun",function(x,p){
a=(x["Pa"]==0)+((!x["Pe"])+x["Pe"]*p[["PePa"]]["r"])*x["Pa"]*p[["PaPa"]]["rmax"]*((x["Tx"]>p[["TxPa"]]["min"])*(x["Tx"]<p[["TxPa"]]["max"]))*(x["Tx"]-p[["TxPa"]]["min"])/(p[["TxPa"]]["max"]-p[["TxPa"]]["min"])*(x["Pr"]>p[["PrPa"]]["min"])*(x["Pl"]*p[["PlPa"]])
rpois(1,a*(a<p[["PaPa"]]["K"])+p[["PaPa"]]["K"]*(a>=p[["PaPa"]]["K"]))
},lengthx=as.integer(5),lengthp=as.integer(5),xname=c("Tx","Pr","Pa","Pl","Pe"),pnames=c("TxPa","PrPa","PaPa","PlPa","PePa"))
#
#Plfun <- new("parDisFun",function(x,p){
# a = ((x["Tx"]>p[["TxPl"]]["min"])*0.1)+x["Pl"]*p[["PlPl"]]["r"]*(((x["Pr"]>p[["PrPl"]])*(x["Tx"]>p[["TxPl"]]["min"]))*
# (1+(T-p[["TxPl"]]["min"])/(p[["TxPl"]]["max"]-p[["TxPl"]]["min"]))*(x["Tx"]<p[["TxPl"]]["max"]))+x["Pa"]*(p[["PaPl"]]["r"])
#rnorm(1,a,p[["PlPl"]]["sd"]*(a>0))
Plfun <- new("parDisFun",function(x,p){
a=((x["Tx"]>p[["TxPl"]]["min"])*0.1)+x["Pl"]*p[["PlPl"]]["r"]*((((1+x["Pr"])*p[["PrPl"]])*(x["Tx"]>p[["TxPl"]]["min"]))*
(1+(T-p[["TxPl"]]["min"])/(p[["TxPl"]]["max"]-p[["TxPl"]]["min"]))*(x["Tx"]<p[["TxPl"]]["max"]))+x["Pa"]*(p[["PaPl"]]["r"])
if(a>p[["PlPl"]]["K"]) {a=2}
if(a<0) {a=0}
a
},lengthx=as.integer(4),lengthp=as.integer(4),xname=c("Tx","Pr","Pa","Pl"),pnames=c("TxPl","PrPl","PaPl","PlPl"))
Pefun <- new("parDisFun",function(x,p){
# runif(1,0,x["Pa"])>p[["PaPe"]]},lengthx=1:1,lengthp=1:1,xname=c("Pa"),pnames=c("PaPe")
x["Pa"]>p[["PaPe"]]},lengthx=1:1,lengthp=1:1,xname=c("Pa"),pnames=c("PaPe")
)
iTfun <- new("parDisFun",function(x,p){
rnorm(1,x["iT"],p["sd"])
},lengthx=1:1,lengthp=1:1,xname=c("Tx"),pnames=c("sd"))
modlfun =new("parFunList",list(Pe=Pefun,Pa=Pafun,Pl=Plfun))
modl=new("EcoModel",edata,funs=modlfun,ecoLink=ecoLink,delays=ecoLinkTime)
object=modl
modl@funs
names(modl@funs)
dimnames(modl@.Data[,,1])
p
modl
setClass("priorize",
signature = c("EcoModel","prior")
definition = function(object){
}
)
setGeneric(name="simulate",
def= function(object,p) { return(standardGeneric("simulate"))}
)
object=modl
setClass("simulate",
signature = c(object="EcoModel",p="list"),
definition=function(object,p) object*unlist(p))
setMethod("simulate",
signature=c(object="EcoModel",p="list"),
definition = function(object,p){
for (pop in 1:dim(object)[3]){
for (per in (max(object@delays[,names(object@funs)])+1):dim(object)[1]){
for (Funi in 1:length(object@funs)){
Fun <- object@funs[[Funi]]
nameFuni <- names(object@funs)[Funi]
# tmp=
# if (per>tmp) if (any(is.na(object@.Data[(per-tmp):(per-1),Fun@xname,pop]))) stop("data missing for simulation of data coordinates: per=",(per-tmp):(per-1)," dep.var=",Fun@xname," population=",pop) else {
tmp2 <- unlist(lapply(which(object@ecoLink[,nameFuni]),FUN=function(i) object@.Data[per-object@delays[i,nameFuni],i,pop]))
# dati <- object@.Data[,,pop]
object@.Data[per,nameFuni,pop] <- object@funs[[Funi]](tmp2,p)
#print(paste(nameFuni,object@funs[[Funi]](tmp2,p)))
}
}
}
object
}
)
setMethod("probablity",
signature=c(object="EcoModel",p=list),
for (pop in 1:dim(object)[3]){
for (per in (max(object@delays[,names(object@funs)])+1):dim(object)[1]){
for (Funi in 1:length(object@funs)){
Fun <- object@funs[[Funi]]
nameFuni <- names(object@funs)[Funi]
# tmp=
# if (per>tmp) if (any(is.na(object@.Data[(per-tmp):(per-1),Fun@xname,pop]))) stop("data missing for simulation of data coordinates: per=",(per-tmp):(per-1)," dep.var=",Fun@xname," population=",pop) else {
tmp2 <- unlist(lapply(which(object@ecoLink[,nameFuni]),FUN=function(i) object@.Data[per-object@delays[i,nameFuni],i,pop]))
# dati <- object@.Data[,,pop]
object@.Data[per,nameFuni,pop] <- object@funs[[Funi]](tmp2,p)
#print(paste(nameFuni,object@funs[[Funi]](tmp2,p)))
}
}
}
)
object=modl
sim <- list()
plist=list()
for (i in 2:100){
plist <- sample(priorP)
sim=simulate(modl,plist)
plsim=list(p=plist,sim=sim)
save(plsim,file = paste("pAndEcosim",i,".RData",sep=""))
}
plsim[[1]]
simul <- function(object,p){
lapply
Fun <- object@funs[[Funi]]
nameFuni <- names(object@funs)[Funi]
# tmp=
# if (per>tmp) if (any(is.na(object@.Data[(per-tmp):(per-1),Fun@xname,pop]))) stop("data missing for simulation of data coordinates: per=",(per-tmp):(per-1)," dep.var=",Fun@xname," population=",pop) else {
tmp2 <- unlist(lapply(which(object@ecoLink[,nameFuni]),FUN=function(i) object@.Data[per-object@delays[i,nameFuni],i,pop]))
# dati <- object@.Data[,,pop]
object@.Data[per,nameFuni,pop] <- object@funs[[Funi]](tmp2,p)
simulate
param
class(p)
a= array(1:24,dim=c(2,3,4),dimnames=list(1:2,1:3,1:4))
for ()
fun= object@funs[[1]]
class(fun)
a=NULL;i=0
for (fun in object@funs){i=i+1
print(fun)
print(paste("LA CLASSE C'EST", class(fun)))
print(paste("Le name C'EST", names(fun)))
}
names(object@funs)
Pa=1.5;T=20;Pl=0.5;H=92;Pe=0
p_PaPa=c(rmax=10,K=20);p_TPa=c(min=15,max=30);p_HPa=c(min=90);p_PlPa=c(r=1)
p_HPl=c(min=50);p_TPl=c(min=10,max=30);p_PaPl=c(r=-0.03);p_PlPl=c(r=1.05,sd=.1)
p_PePa=c(r=.1)
#
a=((!Pe)+Pe*p_PePa)*Pa*p_PaPa[1]*((T>p_TPa[1])*(T<p_TPa[2]))*(T-p_TPa[1])/(p_TPa[2]-p_TPa[1])*(H>p_HPa[1])*(Pl*p_PlPa)
Pa = rpois(1,a*(a<p_PaPa[2])+p_PaPa[2]*(a>=p_PaPa[2]))
# Pa si pas de pesticide pas
Pl = ((T>p_TPl[1])*0.1)+Pl*p_PlPl[1]*(((H>p_HPl)*(T>p_TPl[1]))*
(1+(T-p_TPl[1])/(p_TPl[2]-p_TPl[1]))*(T<p_TPl[2]))+Pa*(p_PaPl)
Pl = rnorm(1,Pl*(Pl>0),p_PlPl[2]*Pl*(Pl>0))
Pe = (Pa>=p_PePa)
Pe;Pa;Pl
setClass("model",
slot=c("parFunList","ecoLink",""))
Model <- function(param,linkH,linkHI,paramx0){
}
# Simulate climate data
Njours=900
nbjour_mois <- c(31,28,31,30,31,30,31,31,30,31,30,31)
cumul_jour <- NULL
for (i in 1:12) cumul_jour[i] <- sum(nbjour_mois[1:i])
precmoyparmois <- c(54 ,46 ,50 ,44 ,58 ,56 ,53 ,51 ,56 ,57 ,58, 54)
prec <- rpois(Njours,rep(precmoyparmois/nbjour_mois,nbjour_mois ))
precparmois <- sum(prec[1:cumul_jour[1]])
for (mois in 2:12) precparmois[mois] <- sum(prec[cumul_jour[mois-1]:cumul_jour[mois]])
tmeanmoyparmois <- c(3.3, 4.2, 7.8, 10.8, 14.3, 17.5, 19.4, 19.1, 16.4, 11.6, 7.2, 4.2 )
tmeanmoyjours <- rep(tmeanmoyparmois,nbjour_mois)
tmean <- rnorm(1,tmeanmoyjours,3)
for (j in 2:Njours) tmean[j] <- (tmean[j-1]+ rnorm(1,0,3) + tmeanmoyjours[{a=j%%365;if(a==0) a=365 ; a}])/2
tmeanparmois <- mean(tmean[1:cumul_jour[1]])
for (mois in 2:12) tmeanparmois[mois] <- mean(tmean[cumul_jour[mois-1]:cumul_jour[mois]])
plantgrowth <- NULL
for (year in 0:2) {
planted <- FALSE
joursseuil=5
tempseuil=9
precseuil=5
joursseuilP=5
day=1
while (!planted) {planted <- ((sum(prec[(year*365+day-{if (day<=joursseuilP) day-1 else joursseuilP}):(year*365+day)])>precseuil)&(mean(tmean[(year*365):(year*365+day)][(day-{if (day<=joursseuil) day-1 else joursseuil}):day])>tempseuil)&(day>joursseuil))
day=day+1}
growth=0 while (growth<90) growth = growth + (sum(prec[(year*365+day-{if (day<=joursseuilP) day-1 else joursseuilP}):(year*365+day)])>precseuil)* tmean(year*365+day)/30
}
X2 <- sin((1:Njours)*2*pi/365)*12 + rnorm(Njours,14,5)
tX3<-tX2 <- 1:Njours
plot(tX2,X2)
X3=NULL
X3[1]=0
for(i in (2:Njours)) {
if (i>10) X3[i]=(X3[i-1]+(X2[i-10] + abs(rnorm(1,0,1)))/100) else X3[i]=X3[i-1] + abs(rnorm(1,0,1))/100
if ((i%%365)>100) X3[i]=0
}
par(mfrow=c(1,2))
plot(tX3,X3)
plot(tX2,X2)
a <- sapply(1:5,FUN=function(x) x+1)
F=function(x)
{
if (x>10) (X3[x-1]+(X2[x-10] + rnorm(1,0,4))/100)
else (X3[x-1] + rnorm(1,0,4)/100)
}
<- as.data.frame(matrix(runif(800,1,40),nrow=
### Start PlaNet
# continuous-time series
cSTF <- setClass("cSTF",
slots=c(times=c("POSIXct"),coord="matrix"),
validity = function(object){
if (length(object@times)!=nrow(object@coord)) stop("length of @times vector differs from number of rows in @coord matrix")
}
)
setMethod("length",
signature="cSTF",
definition=function(x){length(x@times)})
setMethod("dim",
signature="cSTF",
definition=function(x){dim(x@coord)})
intCSTS <- setClass("intCSTS",
contains="cSTF",
slots=c(values="integer"),
validity = function(object){
if (length(object@values)!=length(object)) stop("length of cSTF vector differs from length of @values list")
}
)
exiCSTS=new("intCSTS",new("cSTF",times=c(as.POSIXct("2018-10-13 14:28:22",tz="America/Bogota"),as.POSIXct("2018-10-13 14:28:22",tz="America/Bogota")),coord=as.matrix(data.frame(X=c(1.2,1.4),Y=c(3.6,-1)))),values=as.integer(c(4,5)))
charCSTS <- setClass("charCSTS",
contains="cSTF",
slots=c(values="character"),
validity = function(object){
if (length(object@values)!=length(object)) stop("length of cSTF vector differs from length of @values list")
}
)
excCSTS=new("charCSTS",new("cSTF",times=c(as.POSIXct("2018-10-13 14:28:22 America/Bogota"),as.POSIXct("2018-10-13 14:28:22 America/Bogota")),coord=as.matrix(data.frame(X=c(1.2,1.4),Y=c(3.6,-1)))),values=c("e","r"))
logicCSTS <- setClass("logicCSTS",
contains="cSTF",
slots=c(values="logical"),
validity = function(object){
if (length(object@values)!=length(object)) stop("length of cSTF vector differs from length of @values list")
}
)
exlCSTS=new("logicCSTS",new("cSTF",times=c(as.POSIXct("2018-10-13 14:28:22 America/Bogota"),as.POSIXct("2018-10-13 14:28:22 America/Bogota")),coord=as.matrix(data.frame(X=c(1.2,1.4),Y=c(3.6,-1)))),values=c(T,F))
numCSTS <- setClass("numCSTS",
contains="cSTF",
slots=c(values="numeric"),
validity = function(object){
if (length(object@values)!=length(object)) stop("length of cSTF vector differs from length of @values list")
}
)
exnCSTS=new("numCSTS",new("cSTF",times=c(as.POSIXct("2018-10-13 14:28:22 America/Bogota"),as.POSIXct("2018-10-13 14:28:22 America/Bogota")),coord=as.matrix(data.frame(X=c(1.2,1.4),Y=c(3.6,-1)))),values=c(0.88,3.54))
# CSTS : list of Continuous-SpatioTemporal Series
CSTS <- setClass("CSTS",
contains="list",
validity = function(object){
# if (!all(lapply(object,class)%in%c("numCSTS","intCSTS","logicCSTS","charCSTS"))) stop("trying to contrust a TimeSeries object with a list conaining objects other than numCSTS, or intCSTS, or charCSTS or logicCSTS")
if (!all(sapply(lapply(object,function(x){colnames(x@coord)}),FUN=identical,colnames(object[[1]]@coord)))) stop("the coordinates colnames differ among time series in CSTS object contruction")
}
)
CSTS <- function(X,name=NA){
#note if X is a matrix, col 1 contains values, col 2 times, and col 3.. coordinates
# if X is a list, fisrt element contains values, second elt contains times, and other elements coordinates
if (any(is.null(names(X)))) stop("requires names for the list as variables names in CSTS constructor")
if (class(X)== "list") {
for (i in 1:length(X)) {
if (!(class(X[[i]])%in%c("numCSTS","intCSTS","logicCSTS","charCSTS"))) {
X[[i]] = switch(class(X[[i]]),
matrix = new("numCSTS",new("cSTF",times=X[i,2],coord=X[i,3:ncol(X)]),values=X[i,1]),
list = switch(class(X[[i]][[1]]),
character= new("charCSTS",new("cSTF",times=X[[i]][[2]],coord=(X[[i]][[3]])),values=X[[i]][[1]]),
numeric= new("numCSTS",new("cSTF",times=X[[i]][[2]],coord=(X[[i]][[3]])),values=X[[i]][[1]]),
integer=new("intCSTS",new("cSTF",times=X[[i]][[2]],coord=(X[[i]][[3]])),values=X[[i]][[1]]),
logical= new("logicCSTS",new("cSTF",times=X[[i]][[2]],coord=(X[[i]][[3]])),values=X[[i]][[1]]))
)
}
}
new("CSTS",X)
}
else stop("needs a list as argument")
}
c(as.POSIXct("2018-10-12 20:45:12"),as.POSIXct("2018-10-11 20:45:12"))
x=object=exCSTS <- CSTS(list(landtype=list(values=c("farm","road","wood",'wood'),times=c(as.POSIXct("2018-10-13 14:28:22",tz="America/Bogota"),as.POSIXct("2018-10-12 15:28:24",tz="America/Bogota"),as.POSIXct("2018-10-10 15:28:24",tz="America/Bogota"),as.POSIXct("2018-10-13 14:25:22",tz="America/Bogota")),coord=as.matrix(data.frame(x1=c(1,2,.4,.5),x2=c(2,3,3.4,.5)))),
tmean=list(c(10,15,7,8),c(as.POSIXct("2018-10-13 14:28:22",tz="America/Bogota"),as.POSIXct("2018-10-12 15:28:24",tz="America/Bogota"),as.POSIXct("2018-10-10 15:28:24",tz="America/Bogota"),as.POSIXct("2018-10-13 14:25:22",tz="America/Bogota")),as.matrix(data.frame(x1=c(1,2,.4,.5),x2=c(2,3,3.4,.5)))),
present=list(c(T,F,F,T),c(as.POSIXct("2018-10-13 14:28:22",tz="America/Bogota"),as.POSIXct("2018-10-12 15:28:24",tz="America/Bogota"),as.POSIXct("2018-10-10 15:28:24",tz="America/Bogota"),as.POSIXct("2018-10-13 14:25:22",tz="America/Bogota")),as.matrix(data.frame(x1=c(1,2,.4,.5),x2=c(2,3,3.4,.5))))))
class(exCSTS)
min(exCSTS[[1]]@times)
setMethod(min,
signature="CSTS",
definition=function(x){
min(as.POSIXct(unlist((lapply(x,FUN=function(xi) as.character(min(xi@times)))))))
})
min(exCSTS)
setMethod(max,
signature="CSTS",
definition=function(x){
max(as.POSIXct(unlist((lapply(x,FUN=function(xi) as.character(max(xi@times)))))))
})
max(exCSTS)
setMethod("as.data.frame",
signature="CSTS",
definition=function(x){
df<-as.data.frame(lapply(x,FUN=function(x) x@values))
vapply(x, function, FUN.VALUE = type, ...)
})
setMethod(discretize,
signature="CSTS",
definition = function(x,unit="day",tZ=Sys.timezone()){
start = min(x)
finish= max(x)
starting=as.POSIXct(switch(unit,
second = start,
minute= as.Date(start,tz=tZ)+period(hour(start),"hours")+period(minute(start),"minute"),
hour= as.Date(start,tz=tZ)+period(hour(start,tz=tZ),"hours"),
day=as.Date(start,tz=tZ)+period(1,"hours")-period(1,"hour"),
month=as.Date(start,tz=tZ)-day(start,tz=tZ)+period(1,'days'),
year=((as.Date(start,tz=tZ)-day(start,tz=tZ)+period(1,"days"))-period(month(start)-1,"month"))
),tz=tZ)
finished=as.POSIXct(switch(unit,
second=finish+period(1,"second"),
minute=as.Date(finish)+period(hour(finish),"hours")+period(minute(finish)+1,"minute"),
hour=as.Date(finish)+period(hour(finish)+1,"hours"),
day=as.Date(finish)+1+period(1,"hours")-period(1,"hours"),
month=period(1,"month")+as.Date(finish)-day(finish)+period(1,'days'),
year=period(1,"year")+(as.Date(finish)-day(finish)+period(1,"days"))-period(month(finish)-1,"month")
),tz=tZ)
len <- as.integer(as.period(finished-starting,unit)/period(1,unit))
tf <- new("timeFrame",starting=starting,finished=as.POSIXct(finished),period=period(1,unit), length=len)
cutf <- cut(tf)
listOflist <- list()
listOflist<-lapply(cutf[-length(cutf)],function(x) {x=list(x);names(x)="times";x})
dataF <- as.data.frame(matrix(NA,ncol=length(x),nrow=len))
colnames(dataF) <- names(x)
for (vari in names(x)) {
listOflist[[vari]]<-NULL
for (element in 1:length(x[[vari]])){
listOflist[[sum(x[[vari]]@times[element]>cutf)]][[vari]]<-append(listOflist[[sum(x[[vari]]@times[element]>cutf)]][[vari]],x[[vari]]@values[element])
}
}
columnsOfDataFrame <- NULL
for (vari in names(x)){
columnsOfDataFrame <- append(columnsOfDataFrame,switch(class(x[[vari]]),
intCSTS=c(paste(vari,".n",sep=""),paste(vari,".rep",sep="")),
charCSTS=paste(vari,levels(as.factor(x[[vari]]@values)),sep="."),
logicCSTS=c(paste(vari,".n",sep=""),paste(vari,".k",sep="")),
numCSTS=vari))
}
dataF <- data.frame(matrix("",ncol=length(columnsOfDataFrame)))
colnames(dataF)<- columnsOfDataFrame
for (time in cutf){
{
for(vari in names(x)){
for (col in grep(vari,columnsOfDataFrame,value=TRUE)){
}
}
switch(class(x[[vari]]),
intCSTS=,
charCSTS={for (state in levels(x[[vari]]@values))
},
){
}
for (time in cutf){
dataF[dataF$times==time,]
}
}
dataF[,vari]
}
for (col in colnames(dataF))
for (elt in 1:length(listOfList)){
df
}
}
referenceTime <- referenceTime - start
{(referenceTime-start)/period referenceTime <- referenceTime }referenceTime <- referenceTime - start
timepoints <-
if (referenceTime>=start) starting=referenceTime-ceiling(abs(referenceTime-start))
if (referenceTime<start) starting=referenceTime+floor(abs(referenceTime-start))
finished=starting+ceiling(finish-start)
})
x=exCSTS
# discetre-time series #
#######################
library(lubridate)
timeFrame <- setClass("timeFrame",
slots=c(period="Period", length="integer"),
prototype=prototype(period=period(num = 1,unit = "months"),length=as.integer(6)))
refTimeFrame <- setClass("refTimeFrame",
contains="timeFrame",
slots = c(starting=c("POSIXct"),finished=c("POSIXct")),
prototype = prototype(new("timeFrame",period=period(num = 1,units="seconds"),length=as.integer(6)),starting=as.POSIXct("2011-06-10 08:06:35"),finished=as.POSIXct("2011-06-10 08:06:41")),
validity = function(object){
if ((object@starting)+object@period*(object@length)!=object@finished) {
stop(paste("incompatibility between starting, finished and period values","\n (object@starting)+object@period*(object@length) =",(object@starting)+object@period*(object@length),"\n object@finished =",object@finished))
}
}
)
object=tf6s=new("refTimeFrame",starting=as.POSIXct("2011-06-10 08:06:35"),finished=as.POSIXct("2011-06-10 08:06:41"),period=period(num = 1,units="seconds"),length=as.integer(6))
object=tf6s=new("refTimeFrame",starting=as.POSIXct("2012-06-10 08:06:35"),finished=as.POSIXct("2017-06-10 08:06:35"),period=period(num = 1,units="years"),length=as.integer(5))
object=tf6y=new("refTimeFrame",starting=as.POSIXct("2011-01-01"),finished=as.POSIXct("2017-01-01"),period=period(num = 1,units="years"),length=as.integer(6))
object=new("refTimeFrame",starting=ymd_hms("2011-06-10-08-06-35"),finished=ymd_hms("2011-06-19-08-06-35"),period=period(1,"day"),length=as.integer(9))
setMethod(length,
signature = "timeFrame",
definition = function(x){x@length})
length(object)
setMethod(range,
signature = "refTimeFrame",
definition = function(x){c(starting=x@starting,finished=x@finished)})
range(object)
setMethod("cut",
signature="refTimeFrame",
definition=function(x){
cuts <- range(x)[1]
for (i in 1:length(x)){
cuts = append(cuts,cuts[1]+x@period*i)
}
cuts})
cut(object)
# discrete time series DTS
setwd("/home/dupas/BIEN/")
data <- read.table("data/TrainingDataSet_Maize.txt")
head(data)
coln <- colnames(data)
coln <- levels(as.factor(unlist(lapply(strsplit(colnames(data),"\\_"),function(x){x[[1]]}))))
?strsplit
dataList <- setClass("dataList",
contains="list",
validity = function(object){
if (!all(lapply(object,class)=="data.frame")) stop("dataList constructor receceived something else than a list of data frame")
if (!all(lapply(object,colnames)==colnames(object)[[1]])) stop("colnames of data.frame in dataList should be identicals")
})
dataList <- function(x,timeByCol=TRUE,sep="_",listColTag=c("yearHarvest","NUMD","IRR"),timeByRowNColInfo=NULL,connectivity=list(type="temporal",tempVar="yearHarvest",labelVar="NUMD",connectVar="yieldAnomaly",connectRow=9)){
# timeByRowNcolInfo = list(cols="yearHarvest",colsBy="year",rows="_x",rowsBy="month")
if (class(x)=="list") return(new("dataList",x))
if ((class(x)=="data.frame")&(!timeByCol)) return(new("dataList",list(x)))
if ((class(x)=="data.frame")&(!is.null(timeByRowNColInfo))){
}
if ((class(x)=="data.frame")&(timeByCol)){
dataL=list()
colnlist = strsplit(colnames(x),sep)
coln <- levels(as.factor(unlist(lapply(strsplit(colnames(x),sep),function(x){x[[1]]}))))
lastElt = lapply(colnlist,FUN=function(x) x[[length(x)]])
options(warn=-1)
times=as.numeric(levels(as.factor(unlist(lastElt[which(!is.na(as.numeric(lastElt)))]))))
options(warn=0)
times=times[order(times)]
for (tag in 1:nrow(x)){
df0=x[tag,]
dfn=data.frame(matrix(ncol=length(coln),nrow=length(times),dimnames=list(times,coln)))
dfn[,coln[which(coln%in%listColTag)]] <- df0[,coln[which(coln%in%listColTag)]]
for (i in times){
for (j in coln[!(coln%in%listColTag)]){
if (paste(j,i,sep="_")%in%colnames(x)) dfn[i,j]=df0[,paste(j,i,sep="_")]
if (j%in%colnames(x)) dfn[i,j]=df0[i,j]
}
}
dataL[[paste(listColTag,x[tag,listColTag],sep="",collapse="_")]]=dfn
}
if (connectivity$typ=="temporal"){
for (i in 1:length(dataL)){
previousValue = unlist(lapply(dataL,function(dal){
if ((dal[1,connectivity$tempVar] == dataL[[i]][1,connectivity$tempVar]-1)&(dal[1,connectivity$labelVar] == dataL[[i]][1,connectivity$labelVar])) dal[connectivity$connectRow,connectivity$connectVar] else NULL}))
if (is.null(previousValue)) dataL[[i]][,paste("past",connectivity$connectVar,sep="_")]<-NA else dataL[[i]][,paste("past",connectivity$connectVar,sep="_")]<-previousValue
fivePreviousValue=NULL
ePreviousValue=append(fivePreviousValue,pv)
if (is.null(previousValue)) dataL[[i]][,paste("past",connectivity$connectVar,sep="_")]<-NA else dataL[[i]][,paste("past",connectivity$connectVar,sep="_")]<-previousValue
}
}
}
return(new("dataList",dataL))
}
}
dl <- dataList(data,timeByCol=TRUE,sep="_",listColTag=c("yearHarvest","NUMD","IRR"))
save(dl,file = "data.list.RData")
load(file = "data.list.RData")
df <- array(unlist(dl),dim=list(dim(dl[[1]])[1],dim(dl[[1]])[2],length(dl)),dimnames = list(c("JAN","FEB","MAR","APR","MAI","JUN","JUL","AUG","SEP","NOV"),colnames(dl[[1]]),names(dl)))
df[,,1]
?save
save(df,file="yield.data.RData")
x=data.frame(X1=c("a","b","c","d","e","f"),X2=c(1,2,3,4,5,6))
bycol=FALSE
obj=dataList(x=data.frame(X1=c("a","b","c","d","e","f"),X2=c(1,2,3,4,5,6)),timeByCol=FALSE)
setMethod("variable.names",
signature="dataList",
definition=function(object){colnames(object[[1]])})
setMethod("nrow",
signature="dataList",
definition=function(x) nrow(x[[1]]))
setMethod("dim",
signature="dataList",
definition=function(x) dim(x[[1]]))
variable.names(dl)
nrow(dl)
dim(dl)
length(dl)
dl_TimeFramed <-new("refTimeFrame",starting=as.POSIXct("2018-01-01"),finished=as.POSIXct("2018-11-01"),period=period(36,"minutes")+period(9,"hours")+period(30,"days"),length=as.integer(10))
DTS <- setClass("DTS",
contains = "refTimeFrame",
slots=c(variables="character",data="dataList"),
validity = function(object){
if (!all(object@variables%in%variable.names(object@data))) stop("not all variables slot are in data slot colnames")
if (length(object)!=nrow(object@data)) stop("timeFrame and number of row in data are different")
}
)
object=dTS <- new("DTS",tf6y,variables=c("X1","X2"),data=dataList(data.frame(X1=c("a","b","c","d","e","f"),X2=c(1,2,3,4,5,6))))
object=dTS <- new("DTS",tf6y,variables=c("X1","X2"),data=data.frame(X1=c("a","b","c","d","e","f"),X2=c(1,2,3,4,5,6)))
object=dTS <- new("DTS",dl_TimeFramed,variables=variable.names(dl),data=dl)
listDTS <- setClass("listDTS",
contains="list",
)
setMethod("names",
signature="DTS",
definition=function(x){
x@variables
})
setMethod("as.data.frame",
signature="DTS",
definition=function(x){
cbind(x@data,data.frame(times=cut(x)[-length(cut(x))]))
})
dfd=as.data.frame(dTS)
setMethod("names",signature="DTS",definition = function(x){x@variables})
names(dTS)
object=eTS <- new("DTS",tf6y,variables=c("E1","E2"),data=data.frame(E1=c(2.45,3.5,4.,6.8,7.0,6.9),E2=as.integer(c(5,5,3,12,67,0))))
names(eTS)
dfe=as.data.frame(eTS)
setGeneric(name = "as.DTS",def = function(x,..){standardGeneric("as.DTS")})
setMethod("as.DTS",
signature="data.frame",
definition=function(x,per=NULL,option="bycol"){
t<-x[,which(lapply(as.list(x),FUN = function(x) class(x)[1])=="POSIXct")]
df<- x[,-which(lapply(as.list(x),FUN = function(x) class(x)[1])=="POSIXct")]
starting=min(t)
t=t[order(t)]
if(is.null(per)) per = lapply(1:(length(t)-1),function(i){t[i+1]-t[i]})[[which(unlist(lapply(1:(length(t)-1),function(i){t[i+1]-t[i]})) == min(unlist(lapply(1:(length(t)-1),function(i){t[i+1]-t[i]}))))[1]]]
len = as.integer((max(t)-starting)/as.numeric((per))+1)
if (per%in%c(difftime("2017-01-01","2016-01-01"),difftime("2018-01-01","2017-01-01"))) per=period(1,"year") else per = as.period(per,unit=unit)
finished=max(t)+per
new("DTS",new("timeFrame",starting=starting,finished=finished,period=per,length=len),data=df)
}
)
## Ecosystem model structure
# varFunction a character matrix linking each variable of the ecosystem containing XP functions
#
extendedMatrix <- setClass("extendedMatrix",
slots=list)
# XPfun are all the functions with parameters names "x" and "p" where x is a variable , p is a parameter
xpFun <- setClass("xpFun",
contains="function",
validity = function(object){if (any(formalArgs(object)!=c("x","p"))) stop("XPFunction was created without 'p' and 'x' as arguments")}
)
proP <- function(x=0,p=0){x*p}
propXP <- new("xpFun",proP(x=0,p=c(p1=0,p2=1)))
propXP(2,3)
pnorM <- new("xpFun",function(x=0,p=c(meaN=2,sigmA=1)){pnorm(x,p[1],p[2])})
pnorM(x=3,p=c(neaN=3,sigmA=4))
# varFunctions is a matrix of functions for the ecosystem graph
varFunctions <- setClass("varFunctions",
contains="list",
validity=function(object){
if (any(lapply(object,class)!="list")) stop("varFunctions should be a list of list")
if (any(lapply(object,length)!=length(object))) stop("varFunctions should be squared")
for (subobject in object) {if (any(lapply(subobject,class)!="xpFun")) stop("varFunctions should be squared list of xpFUN")}
})
vF<-new("varFunctions",list(list(new("xpFun",function(x,p){p*x}),new("xpFun",function(x,p){p[1]*x+p[2]*x^2})),list(new("xpFun",function(x,p){0}),new("xpFun",function(x,p){1}))))
object=vF
vF
setMethod("dim",
signature = "varFunctions",
definition = function(x){c(length(x[[1]]),length(x))}
)
dim(vF)
logimat <- setClass("logimat",
contains="matrix",
validity=function(object){if (any(lapply(object,class)!="logical")) stop("logimat lentgh should equal its @dim product")}
)
object=G <- new("logimat",matrix(c(T,F,F,T),nr=2))
paramVecMat <- setClass("paramVecMat",
contains="list",
validity=function(object){
if (any(lapply(object,class)!="list")) stop("@p should be a list of list")
if (any(lapply(object,length)!=length(object))) stop("@p should be squared")
for (subobject in object) {if (any(lapply(subobject,class)!="numeric")) stop("@p should be squared list of numeric vectors")}
}
)
setMethod("[","paramVecMat",
definition = function(x, i, j, ..., drop) {
x[[i]][[j]]
})
setMethod("dim","paramVecMat",
definition = function(x) {
c(length(x),length(x[[1]]))
})
p=new("paramVecMat",list(list(c(1,2),0),list(0,c(.5,4))))
p[1,2]
dim(p)
r1unif <- function(min=0,max=1){runif(1,mean,sd)}
r1norm <- function(mean=0,sd=1){rnorm(1,mean,sd)}
prior <- setClass("prior",
contains="function",
slots=c(hyperParam="numeric"),
validity = function(object){if (any(names(formals(object))!=names(object@hyperParam))) stop("formals of prior and hyperParam slot names are different")}
)
priorList <- setClass("priorList",
contains="list",
validity = function(object){if (any(lapply(object,class)!="prior")) stop("priorList should be a list of prior objects")}
)
object=new("prior",r1unif,hyperparam=c(min=0,max=1))
object=new('priorList',list(a=object,b=new("prior",r1norm,hyperparam=c(mean=0,sd=1))))
names(object) <- c("b","c")
names(object)
bayesParam <- setClass("bayesParam",
slots=c(fun="xpFun",par="numeric",prior="priorList",ecoModel="ecoModel"),
validity = funciton(object){
if (any(names(object@par)!=names(object@prior))) stop("names of parameter vector and prior list do not coincide")
if (any(names(object@par)!=names(object@prior)) stop("names of parameter vector and prior list do not coincide")
})
# varFunctions is a matrix of functions with slots
# @p : parameter vector matrix (list of list) and
# @Gamma : neighborhood matrix, presented as a
varFunctionParam <- setClass("varFunctionParam",
contains="varFunctions",
slot=c(p="list",Gamma="logimat"),
validity=function(object){
if (any(lapply(object@p,class)!="list")) stop("@p should be a list of list")
if (any(lapply(object@p,length)!=length(object@p))) stop("@p should be squared")
if (any(dim(object@Gamma)!=dim(object))) stop ("neighborhood matrix and function matrix should be of the same size")
if (any(dim(object)!=c(length(object@p),length(object@p)))) stop("dimension of functions lists and dimentions of parameters lists cannot differ")
if (any(dim(object)!=c(length(object@p),length(object@p)))) stop("dimension of functions lists and dimentions of parameters lists cannot differ")
for (subobject in object@p) {if (any(lapply(subobject,class)!="numeric")) stop("@p should be squared list of numeric vectors")}
})
object= new("varFunctionParam",vF, p=new("paramVecMat",list(list(c(1,2),0),list(0,c(.5,4)))),Gamma=as.logical(c(1,0,0,1)))
timeFunctions <- setClass("timeFunctions",
contains="varFunctions")
timeFunctionParam <- setClass("timeFunctionParam",
contains="varFunctionParam")
object= new("timeFunctions",list(list(new("xpFun",function(x,p){p*x}),new("xpFun",function(x,p){p[1]*x+p[2]*x^2})),list(new("xpFun",function(x,p){0}),new("xpFun",function(x,p){1}))))
object= new("timeFunctionParameters",list(list(c(1,2),c(.5,4)),list(0,0)))
#
ecoModel <- setClass("ecoModel",
contains=c("ecosysTimeSeries"),
slots=c(timeFunctions="list",timeModel="list",varFunctions="list", varModel="list",parameters="list"),
validity = function(object){
if (!all(levels(as.factor(unlist(object@parents)))%in%names(object))) stop("some parents in ecoModel are not in the ecosystem variables")
if (!all(levels(as.factor(unlist(object@parents)))%in%names(object))) stop("some parents in ecoModel are not in the ecosystem variables")
if (any(lapply(object@parents,length)!=(lapply(object@timeModel,length)))) stop("some parents in ecoModel are not in the ecosystem variables")
}
)
3DGamma <- setClass("3DGamma",
contains="array",
validity=function(object){
if (length(dim(object)!=3)) stop("3DGamma should be a 3D array")
})
loG <- function(x=0,p=0)
linear <- new("XPFun",function(x=0,p=0){x*p})
varGammaFunctions <- setClass("varGammaFunctions",
slots = "list",
validity = function(object){
if (any(lapply(object,class)!="function")) stop("varGammaFunction is a list of functions, constructor received something else in the list")
}
)
ecoGammaFunction <- setClassGammaFunctions
Gamma <- setClass("Gamma",
slots=c(GammaFunctions="function")
timeModel <- setClass("timeModel",
slots=c(type="character",parameters="numeric"))
data.frame[i,gamma(i)]
variableModel <- setClass("variableModel",
slots=c(parents="character",time="list",fun="list")
ecoModelSample <- setClass("ecoModelSample",
contains="ecoModel",
slots=c(paramValues="numeric"))
setClass("timeModel",
contains="list",
validity = function(object){
lapply(object[["variable"]]
})
model <- setClass("model",
slots=c(variables="character",parents="character",parameters="list", timeModel = "timeModel", residualDistribution ="function"),
validity =
)
timeLinks <- setClass("timeLinks",
contains="list",
validity=function(object){(all(lapply(object,class)=="model"))})
a=c(as.formula(y~ a*x),as.formula(y~1),as.formula(y[1]~x[1]))
names(a)
new("timeLinks",a)
varLinks <- setClass("varLinks",
contains="list",
validity=function(object){if (!(all(lapply(object,class)=="formula"))) stop("varLinks constructor did not receive alist of formula")})
a=c(as.formula(y~ a*x),as.formula(y~1),as.formula(y[1]~x[1]))
new("varLinks",a)
prior <- setClass("prior",
contains = "list",
validity = function(object){})
ecoModelSimul <- setClass("ecoModelSimul",
contains="ecoModelSample",
slots=c(simulations="data.frame"),
validity = function(object){
if (colnames(simulations)!=)
}
)
prior <- setClass("pior",
)
proportional <- function(param=a,variable=x){param*variable}
exponential <- function(param=a,variable=x){exp(param*variable)}
functions <- setClass("functions",
slots=c(parameters="character",)
)
##
## Old stuff
##
## PlaNet
## Proof of concept
# ecolink
# T H Pa Pl Pe
# T x x
# H x x
# Pa x x x
# Pl x x
# Pe x
# indlink
# iT iH iPa iPl iPe
# iT x
# iH x
# iPa x
# iPl x
# iPe x
setwd("/home/dupas/PlaNet/")
ecoVar=c("Tx","Pr","Pa","Pl","Pe")
indicVar=c("iT","iPr","iPa","iPl","iPe")
library(raster)
ecoLink <- t(matrix(as.logical(c(0,0,1,1,0,
0,0,1,1,0,
0,0,1,1,1,
0,0,1,1,0,
0,0,1,0,0)),nrow=5, ncol =5,dimnames=list(ecoVar,ecoVar)))
dimnames(ecoLink)=list(ecoVar,ecoVar)
ecoLinkTime = t(matrix((c(0,0,1,1,0,
0,0,2,1,0,
0,0,1,1,1,
0,0,1,1,0,
0,0,0,0,0)),nrow=5,ncol=5,dimnames=list(ecoVar,ecoVar)))
dimnames(ecoLinkTime)=list(ecoVar,ecoVar)
indicLink <- as.logical(diag(5))
dimnames(indicLink)=list(ecoVar,ecoVar)
# data is a 3 dim array: dim[1] is indicator and ecosystem variables, dim[2] is population, dim[3] is time
setClass("Data",
contains="array",
validity=function(object){
if (length(dim(object))!=3) stop("data should be a 3 dim array, dim[1] is indicator and ecosystem variables, dim[2] is population, dim[3] is time")
}
)
#save(dataf,file = "yield.data.RData")
setwd()
load("yield.data.RData")
dataf[,,1]
load(file = "yield.data.RData")
idata <- new("Data",dataf[1:8,c(8,4,2,2,2),])
dimnames(idata) <- list(dimnames(idata)[[1]],c("Tx","Pr","Pa","Pl","Pe"),dimnames(idata)[[3]])
edata <- new("Data",dataf[1:8,c(8,4,2,2,2),])
dimnames(edata) <- list(dimnames(edata)[[1]],c("Tx","Pr","Pa","Pl","Pe"),dimnames(edata)[[3]])
edata[,3,]<-NA
edata[1,4,]<-0;edata[2,4,]<-0.01 # planting occurs in february
edata[1,4,]<-0;edata[2,4,]<-0.01 # planting occurs in february
edata[1:2,3,]<-0 # there is no parasite before planting
edata[1:2,5,]<-0 # there is no pesticide before planting
edata[,,1:3]
p=list(PaPa=c(rmax=10,K=20),TxPa=c(min=15,max=30),PrPa=c(min=3),PlPa=c(r=1),PrPl=c(rperPr=.5),TxPl=c(min=10,max=30),PaPl=c(r=-0.03),PlPl=c(r=2.5,K=2,sd=.1),PePa=c(r=0.1),PaPe=c(thr=1.5))
names(p)
#
# Simulating ecosystem history
#
p_PaPa_rmax=10;p_PaPa_K=20
p_TxPa_min=15;p_TxPa_max=30
p_PrPa_min=3
p_PlPa_r=1
p_PrPl_rperPr=.5
p_TxPl_min=10;p_TxPl_max=30
p_PaPl_r=-0.03
p_PlPl_r=2.5;p_PlPl_K=2;p_PlPl_sd=.1
p_PePa_r=0.1
p_PaPe=1.5
p_Pl_sd_r=0.05
p_T_sd=0.3
p_Pe_pDet=.8
# simulating ecosystem
# using lapply
tmp=aperm(array(unlist(lapply(1:dim(edata)[3], function(k){lapply(3:dim(edata)[1], function (i) {
Pl=((edata[i-1,1,k]>p_TxPl_min)*0.1)+edata[i-1,4,k]*p_PlPl_r*((((1+edata[i-1,2,k])*p_PrPl_rperPr)*(edata[i-1,1,k]>p_TxPl_min))*
(1+(T-p_TxPl_min)/(p_TxPl_max-p_TxPl_min))*(edata[i-1,1,k]<p_TxPl_max))+edata[i-1,3,k]*(p_PaPl_r)
if(Pl>p_PlPl_K){Pl=2}
if(Pl<0) {Pl=0}
Pa=(edata[i-1,3,k]==0)+((!edata[i,5,k])+edata[i,5,k]*p_PePa_r)*edata[i-1,3,k]*p_PaPa_rmax*((edata[i-1,1,k]>p_TxPa_min)*(edata[i-1,1,k]<p_TxPa_max))*(edata[i-1,1,k]-p_TxPa_min)/(p_TxPa_max-p_TxPa_min)*(edata[i-2,2,k]>p_PrPa_min)*(edata[i-1,4,k]*p_PlPa_r)
c(Pa=rpois(1,Pa*(Pa<p_PaPa_K)+p_PaPa_K*(Pa>=p_PaPa_K)),
Pl=Pl,Pe=(edata[i-1,3,k]>p_PaPe))
})})),dim=dim(edata)[c(2,1,3)],dimnames=lapply(c(2,1,3),function(i) dimnames(edata)[[i]])),c(2,1,3))
ed=edata
for(k in 1:dim(edata)[3]) for (i in 3:dim(edata)[1]){
Pl=((edata[i-1,1,k]>p_TxPl_min)*0.1)+edata[i-1,4,k]*p_PlPl_r*((((1+edata[i-1,2,k])*p_PrPl_rperPr)*(edata[i-1,1,k]>p_TxPl_min))*
(1+(T-p_TxPl_min)/(p_TxPl_max-p_TxPl_min))*(edata[i-1,1,k]<p_TxPl_max))+edata[i-1,3,k]*(p_PaPl_r)
if(Pl>p_PlPl_K){Pl=2}
if(Pl<0) {Pl=0}
Pa=(edata[i-1,3,k]==0)+((!edata[i,5,k])+edata[i,5,k]*p_PePa_r)*edata[i-1,3,k]*p_PaPa_rmax*((edata[i-1,1,k]>p_TxPa_min)*(edata[i-1,1,k]<p_TxPa_max))*(edata[i-1,1,k]-p_TxPa_min)/(p_TxPa_max-p_TxPa_min)*(edata[i-2,2,k]>p_PrPa_min)*(edata[i-1,4,k]*p_PlPa_r)
c(Pa=rpois(1,Pa*(Pa<p_PaPa_K)+p_PaPa_K*(Pa>=p_PaPa_K)),
Pl=Pl,Pe=(edata[i-1,3,k]>p_PaPe))
ed[i,3:5,k]=c(Pa,Pl,(edata[i-1,3,k]>p_PaPe))
}
Pl=((edata[i-1,1,k]>p_TxPl_min)*0.1)+edata[i-1,4,k]*p_PlPl_r*((((1+edata[i-1,2,k])*p_PrPl_rperPr)*(edata[i-1,1,k]>p_TxPl_min))*
(1+(T-p_TxPl_min)/(p_TxPl_max-p_TxPl_min))*(edata[i-1,1,k]<p_TxPl_max))+edata[i-1,3,k]*(p_PaPl_r)
if(Pl>p_PlPl_K){Pl=2}
if(Pl<0) {Pl=0}
Pa=(edata[i-1,3,k]==0)+((!edata[i,5,k])+edata[i,5,k]*p_PePa_r)*edata[i-1,3,k]*p_PaPa_rmax*((edata[i-1,1,k]>p_TxPa_min)*(edata[i-1,1,k]<p_TxPa_max))*(edata[i-1,1,k]-p_TxPa_min)/(p_TxPa_max-p_TxPa_min)*(edata[i-2,2,k]>p_PrPa_min)*(edata[i-1,4,k]*p_PlPa_r)
c(Pa=rpois(1,Pa*(Pa<p_PaPa_K)+p_PaPa_K*(Pa>=p_PaPa_K)),
Pl=Pl,Pe=(edata[i-1,3,k]>p_PaPe))
#
# simulating ecosystem good one
#
for (k in 1:dim(edata)[3]){
for (i in 3:dim(edata)[1]){
#Pe
edata[i,5,k]=edata[i-1,3,k]>p_PaPe
#Pl
a=((edata[i-1,1,k]>p_TxPl_min)*0.1)+edata[i-1,4,k]*p_PlPl_r*((((1+edata[i-1,2,k])*p_PrPl_rperPr)*(edata[i-1,1,k]>p_TxPl_min))*
(1+(T-p_TxPl_min)/(p_TxPl_max-p_TxPl_min))*(edata[i-1,1,k]<p_TxPl_max))+edata[i-1,3,k]*(p_PaPl_r)
if(a>p_PlPl_K){a=2}
if(a<0) {a=0}
edata[i,4,k]=a
#Pa
a=(edata[i-1,3,k]==0)+((!edata[i,5,k])+edata[i,5,k]*p_PePa_r)*edata[i-1,3,k]*p_PaPa_rmax*((edata[i-1,1,k]>p_TxPa_min)*(edata[i-1,1,k]<p_TxPa_max))*(edata[i-1,1,k]-p_TxPa_min)/(p_TxPa_max-p_TxPa_min)*(edata[i-2,2,k]>p_PrPa_min)*(edata[i-1,4,k]*p_PlPa_r)
edata[i,3,k]=rpois(1,a*(a<p_PaPa_K)+p_PaPa_K*(a>=p_PaPa_K))
}
}
#
# Calculating probability of ecosystem model
#
dims=dim(edata)
for(k in 1:dims[3]) for (i in 3:dims[1]){
}
idata=aperm(array(unlist(lapply(1:dim(edata)[3], function(k){lapply(1:dim(edata)[1], function (i) {
Pl=pgamma(1,shape=edata[i,4,k],rate=p_Pl_var_r)
if (Pl<0) Pl=0
c(T=rnorm(1,edata[i,1,k],p_T_sd),Pr=rpois(1,edata[i,2,k]),Pa=rpois(1,edata[i,3,k]),Pl=Pl,Pe=rbinom(1,edata[i,5,k],p_Pe_pDet))
})})),dim=dim(edata)[c(2,1,3)],dimnames=lapply(c(2,1,3),function(i) dimnames(edata)[[i]])),c(2,1,3))
load(file = "edata.RData")
n=length(edata[,1,])
simulate idata from edata
#Pe
idata[,5,]=(edata[,5,])*rbinom(n,1,p_Pe_pDet)
#Pl
idata[,4,]=rgamma(n,edata[,4,],rate=p_Pl_var_r)
#Pa
idata[,3,]=rpois(n,edata[,3,])
#T
idata[,1,]=rnorm(n,edata[,1,],p_T_sd)
#Pr
idata[,2,]=rpois(n,edata[,2,])
#for (k in 1:dim(edata)[3]){
# for (i in 3:dim(edata)[1]){
#Pe
# if (edata[i,5,k]) idata[i,5,k]=rbinom(1,1,p_Pe_pDet)
#Pl
# idata[i,4,k]=rgamma(length(edata[,4,]),edata[,4,],rate=p_Pl_var_r)
#rnorm(1,edata[i,4,k],edata[i,4,k]*p_Pl_sd_r)
# if (idata[i,4,k]<0) idata[i,4,k]=0
#Pa
# idata[i,3,k]=rpois(1,edata[i,3,k])
#T
# idata[i,1,k]=rnorm(1,edata[i,1,k],p_T_sd)
#Pr
# idata[i,2,k]=rpois(1,edata[i,2,k])
# }
#}
#
# Simulation of indicator data from edata good one
#
idata=aperm(array(unlist(lapply(1:dim(edata)[3], function(k){lapply(1:dim(edata)[1], function (i) {
Pl=rnorm(1,edata[i,4,k],edata[i,4,k]*p_Pl_sd_r)
if (Pl<0) Pl=0
c(T=rnorm(1,edata[i,1,k],p_T_sd),Pr=rpois(1,edata[i,2,k]),Pa=rpois(1,edata[i,3,k]),Pl=Pl,Pe=rbinom(1,edata[i,5,k],p_Pe_pDet))
})})),dim=dim(edata)[c(2,1,3)],dimnames=lapply(c(2,1,3),function(i) dimnames(edata)[[i]])),c(2,1,3))
#
# Simulation of edata from idata
#
# p(i/e)=p(e/i)*p(i)/p(e)
# p(e/i)=p(i/e)*p(e)/p(i)
# p(H)=dgamma3(x,1,5.749)
# P(T)=dnorm(1,10,4)*.55+dnorm(1,21,3)*.45
# P(Pa)=dgamma(x+1,.2,3)
# P(Pl)=rnorm(1,edata[i,4,k],edata[i,4,k]*p_Pl_sd_r)
# plot(dnorm(1:40,10,4)*.6+dnorm(1:40,4)*.4)
# ?dnorm
library(FAdist)
.1*1.5^8
par(mfrow=c(1,2))
hist(idata[,4,])
plot((0:10)/5,dexp((1:11),1.3,1.5))
plot(-5:35,dnorm(-5:35,10,4)*.55+dnorm(-5:35,21,3)*.45)
hist(rpois(length(idata[,2,]),.1))
plot(1:20,dgamma3(1:20,1,2))
pgamma3(1,1,5.749)
sum(edata[,2,]<1)/length(edata[,2,])
pgamma3(2,1,5.749)
sum((edata[,2,]<2)&(edata[,2,]>1))/length(idata[,2,])
?rgamma3
dimnames(idata)[[2]]
idata=aperm(array(unlist(lapply(1:dim(edata)[3], function(k){lapply(1:dim(edata)[1], function (i) {
Pl=rnorm(1,edata[i,4,k],edata[i,4,k]*p_Pl_sd_r)
if (Pl<0) Pl=0
c(T=rnorm(1,edata[i,1,k],p_T_sd),Pr=rpois(1,edata[i,2,k]),Pa=rpois(1,edata[i,3,k]),Pl=Pl,Pe=rbinom(1,edata[i,5,k],p_Pe_pDet))
})})),dim=dim(edata)[c(2,1,3)],dimnames=lapply(c(2,1,3),function(i) dimnames(edata)[[i]])),c(2,1,3))
#
# Probability of indicator data
#
hist(edata[,4,])
idata
log(exp(1))
pT <- sum(dnorm(idata[,1,],edata[,1,],p_T_sd,log=TRUE))
pH <- sum(dpois(idata[,2,],edata[,2,],log=TRUE)))
pPa <- sum(dpois(idata[,3,],edata[,3,],log=TRUE))
sdPl=edata[,4,]*p_Pl_sd_r
sdPl[sdPl<=0.1]=.1
pPl <- sum(dnorm(x=idata[,4,],mean=edata[,4,],sd=sdPl,log=TRUE))
pPe <- dbinom(idata[,5,],1,prob=edata[,5,]*p_Pe_pDet,log=TRUE)
pIndic <- sum(c(pT = sum(log(dnorm(idata[,1,],edata[,1,],p_T_sd))),
pH = sum(log(dpois(idata[,2,],edata[,2,]))),
pPa = sum(log(dpois(idata[,3,],edata[,3,]))),
sdPl={sdPl=edata[,4,]*p_Pl_sd_r
sdPl[sdPl<=0.1]=.1
sum(log(dnorm(x=idata[,4,],mean=edata[,4,],sd=sdPl)))},
pPe=sum(dbinom(idata[,5,],1,prob=edata[,5,]*p_Pe_pDet,log=TRUE))
))
edata[1:8,,1]
idata[1:8,,1]
p_PaPa_rmax=10
p_PaPa_K=20
p_TxPa_min=15
p_TxPa_max=30
p_PrPa_min=3
p_PlPa_r=1
p_PrPl_rperPr=.5
p_TxPl_min=10
p_TxPl_max=30
p_PaPl_r=-0.03
p_PlPl_r=2.5
p_PlPl_K=2
p_PlPl_sd=.1
p_PePa_r=0.1
p_PaPe=1.5
p_Pl_sd_r=0.05
p_T_sd=0.3
p_Pe_pDet=.8
p_Pe_pFalseDet=.005
#ecosysHistory
p0 = c(p_PaPa_rmax=exp(runif(1,log(5),log(15))),p_PaPa_K=exp(runif(1,log(15),log(25))),
p_TxPa_min=runif(1,10,20),p_TxPa_max=runif(1,25,35),
p_PrPa_min=runif(1,1.5,4),p_PlPa_r=runif(1,.7,1.5),
p_PrPl_rperPr=runif(1,.3,.7),p_TxPl_min=runif(1,5,14),p_TxPl_max=runif(1,26,32),
p_PaPl_r=exp(runif(1,log(0.015),log(0.045))),
p_PlPl_r=exp(runif(1,log(1.8),log(3.5))),p_PlPl_K=exp(runif(1,log(1.5),log(3))),p_PlPl_sd=runif(1,.07,.15),
p_PePa_r=exp(runif(1,log(.06),log(.15))),
p_PaPe=runif(1,1.3,1.9),
#ecoindic
p_Pl_sd_r=exp(runif(1,log(.03),log(.07))),
p_T_sd=runif(1,.2,.5),
p_Pe_pDet=runif(1,.6,.99),
p_Pe_pFalseDet=exp(runif(1,log(.001),log(.02)))
)
#
# Algorithm
#
setwd("/home/dupas/PlaNet/")
ecoVar=c("Tx","Pr","Pa","Pl","Pe")
indicVar=c("iT","iPr","iPa","iPl","iPe")
load("yield.data.RData")
dataf[,,1]
load(file = "yield.data.RData")
setClass("Data",
contains="array",
validity=function(object){
if (length(dim(object))!=3) stop("data should be a 3 dim array, dim[1] is indicator and ecosystem variables, dim[2] is population, dim[3] is time")
}
)
idata <- new("Data",dataf[1:8,c(8,4,2,2,2),])
dimnames(idata) <- list(dimnames(idata)[[1]],c("Tx","Pr","Pa","Pl","Pe"),dimnames(idata)[[3]])
edata <- new("Data",dataf[1:8,c(8,4,2,2,2),])
dimnames(edata) <- list(dimnames(edata)[[1]],c("Tx","Pr","Pa","Pl","Pe"),dimnames(edata)[[3]])
edata[,3,]<-NA
edata[1,4,]<-0;edata[2,4,]<-0.01 # planting occurs in february
edata[1,4,]<-0;edata[2,4,]<-0.01 # planting occurs in february
edata[1:2,3,]<-0 # there is no parasite before planting
edata[1:2,5,]<-0 # there is no pesticide before planting
edata[,,1:3]
|
12d730db833bd11b870b217f74942ec62757f669
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query05_query10_1344n/query05_query10_1344n.R
|
6ae98a2914ad472dd4bef398f10aaea4e53d7986
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 71
|
r
|
query05_query10_1344n.R
|
bf968afd3c7687316f4c8214c8ac3f6a query05_query10_1344n.qdimacs 568 1323
|
6374290c2fe382d47e602ace4e0f215935cc4d6d
|
cf4c963a4f0e25672c2d6b5323810486554fa4e7
|
/R/DANB_Coexpression.R
|
875205d0c6aa26b481392855cbe634e6e72a18f3
|
[] |
no_license
|
tallulandrews/M3Drop
|
1f5e96c1c3bd98c88889bae54cbd13e83d4a34bf
|
9128921ec4077bb442fdbfb83e7a6c9185483c97
|
refs/heads/master
| 2023-02-19T04:34:43.763543
| 2023-02-16T23:21:33
| 2023-02-16T23:21:33
| 63,236,849
| 29
| 9
| null | 2018-04-26T21:59:17
| 2016-07-13T10:22:59
|
R
|
UTF-8
|
R
| false
| false
| 2,310
|
r
|
DANB_Coexpression.R
|
NBumiCoexpression <- function(counts, fit, gene_list=NULL, method=c("both", "on", "off")) {
# Set up
if (is.null(gene_list)) {
gene_list <- names(fit$vals$tjs)
}
pd_gene <- matrix(-1, nrow=length(gene_list), ncol=ncol(counts));
name_gene <- rep("", length(gene_list))
for (i in 1:length(gene_list)) {
gid <- which(names(fit$vals$tjs) == gene_list[i])
if (length(gid) == 0) {next;}
mu_is <- fit$vals$tjs[gid]*fit$vals$tis/fit$vals$total
p_is <- (1+mu_is/fit$sizes[gid])^(-fit$sizes[gid]);
pd_gene[i,] <- p_is;
name_gene[i] <- gene_list[i];
}
if (sum(name_gene == "") > 0) {
warning(paste("Warning:", sum(name_gene == ""), "genes not found, check your gene list is correct."));
exclude <- which(name_gene == "");
pd_gene <- pd_gene[-exclude,]
name_gene <- name_gene[-exclude]
}
rownames(pd_gene) <- name_gene
lib.size <- fit$vals$tis;
Z_mat <- matrix(-1, ncol=length(pd_gene), nrow=length(pd_gene));
for(i in 1:nrow(pd_gene)) {
for (j in (i):nrow(pd_gene)) {
p_g1 <- pd_gene[i,];
p_g2 <- pd_gene[j,];
expr_g1 <- counts[rownames(counts)==rownames(pd_gene)[i],]
expr_g2 <- counts[rownames(counts)==rownames(pd_gene)[j],]
if (method == "off" | method=="both") {
# Both zero
expect_both_zero <- p_g1*p_g2
expect_both_err <- expect_both_zero*(1-expect_both_zero)
obs_both_zero <- sum(expr_g1==0 & expr_g2==0)
Z <- (obs_both_zero - sum(expect_both_zero)) / sqrt(sum(expect_both_err))
#p_val <- pnorm(-abs(Z))*2
}
if (method == "on" | method=="both") {
# both nonzero
obs_both_nonzero <- sum(expr_g1!=0 & expr_g2!=0)
expect_both_nonzero <- (1-p_g1)*(1-p_g2)
expect_non_err <- expect_both_nonzero*(1-expect_both_nonzero)
Z <- (obs_both_nonzero - sum(expect_both_nonzero)) / sqrt(sum(expect_non_err))
#p_val <- pnorm(-abs(Z))*2
}
if (method == "both") {
# either
obs_either <- obs_both_zero+obs_both_nonzero
expect_either <- expect_both_zero+expect_both_nonzero
expect_err <- expect_either*(1-expect_either)
Z <- (obs_either - sum(expect_either)) / sqrt(sum(expect_err))
#p_val <- pnorm(-abs(Z))*2
}
Z_mat[i,j] <- Z_mat[j,i] <- Z;
}
}
rownames(Z_mat) <- names(pd_gene);
colnames(Z_mat) <- names(pd_gene);
return(Z_mat);
}
|
8230258fbfa385c958b7f8100ba4bae84bacb8a9
|
a0269d7359c770f77345c69ef95c160680db75b1
|
/man/hugo_read_data.Rd
|
8737e97b45a78cde8aed449ff631cd67948ad93e
|
[] |
no_license
|
hugo4r/hugo
|
3898062a8f8a0ef4be3f7965ff299d4663771619
|
da660022bb69b819edfdce82e85638637c76165c
|
refs/heads/master
| 2020-03-12T02:34:24.268026
| 2018-06-17T20:56:41
| 2018-06-17T20:56:41
| 130,405,601
| 5
| 22
| null | 2018-06-17T20:56:42
| 2018-04-20T19:31:14
|
R
|
UTF-8
|
R
| false
| true
| 3,201
|
rd
|
hugo_read_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hugo_read_data.R
\name{hugo_read_data}
\alias{hugo_read_data}
\title{Reads data to R}
\usage{
hugo_read_data(path, file_extension = NA, header = NA, separator = NA,
decimal = NA, file_name_to_save = NULL)
}
\arguments{
\item{path}{the name of the path which the data are to be read from. Each row of the table appears as one line of the file. If it does not contain an absolute path, the path name is relative to the current working directory, \code{getwd()}. path can also be a complete URL.}
\item{file_extension}{the type of file which is loaded. Usually don't needed, because Hugo guesses it.}
\item{header}{a logical value indicating whether the file contains the names of the variables as its first line. If missing, the value is determined from the file format: header is set to TRUE if and only if the first row contains one fewer field than the number of columns.}
\item{separator}{the field separator character. Values on each line of the file are separated by this character. If sep = "" the separator is 'white space', that is one or more spaces, tabs, newlines or carriage returns.}
\item{decimal}{the character used in the file for decimal points.}
\item{file_name_to_save}{the name of the file where the data will be saved.}
}
\value{
Returns data.frame, tibble or a list.
}
\description{
Function unifies most common reading data functions. It guesses the file extenstion and fits best function to load.
Usually knowing types of parameters as: separator, decimal, header is not necessary, but function allows to put them by hand.
Supported extensions: "txt", "csv", "xlsx", "tsv", "rda", "rdata", "json".
}
\examples{
\dontrun{
### simple loading most common types of extensions
#loading csv
path <- "http://insight.dev.schoolwires.com/HelpAssets/C2Assets/C2Files/C2SectionRobotSample.csv"
data <- hugo_read_data(path)
head(data)
#loading rda
path <- system.file("extdata", "example.rda", package = "hugo")
data <- hugo_read_data(path)
head(data)
#loading json
path <- "https://raw.githubusercontent.com/corysimmons/colors.json/master/colors.json"
data <- hugo_read_data(path)
head(data)
### specifying our own parameters
#loading csv
path <- "http://insight.dev.schoolwires.com/HelpAssets/C2Assets/C2Files/C2SectionRobotSample.csv"
data <- hugo_read_data(path, separator = ",", decimal = ".", header = TRUE)
head(data)
### interaction with user
# loading file without extension
path <- system.file("extdata", "no_extension", package = "hugo")
# input "txt"
data <- hugo_read_data(path)
head(data)
# providing your own parameters
path <- system.file("extdata", "example2.txt", package = "hugo")
data <- hugo_read_data(path, decimal = ".")
# an error occured, but you have an information to put ALL parameters
data <- hugo_read_data(path, header = TRUE, separator = ",", decimal = ".")
head(data)
### more examples
# for more examples please put an extension in <extension> below
# and try other avaliable sample files attached to package
# path <- system.file("extdata", "example.<extension>", package = "hugo")
# data <- hugo_read_data(path)
# head(data)
}
}
\author{
Dariusz Komosinski
}
|
d7546ab90db1bd20249c349d6c9b17cb1c30ba5c
|
00c0db9350641b0f3c700836b3d10df5b3f78a65
|
/man/diversity.predict.Rd
|
3bf40f89dafd5c18653b8c015f1eaaee187d0f93
|
[] |
no_license
|
derek-corcoran-barrios/DiversityOccu
|
4f50546e7b9f966d735c8b2dde6256cc6540f98f
|
a9ab7cc477f873e8f01067af020f1de6987265b2
|
refs/heads/master
| 2020-12-29T02:32:38.350106
| 2019-11-07T23:11:11
| 2019-11-07T23:11:11
| 49,165,821
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,558
|
rd
|
diversity.predict.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DiversityOccu.R
\name{diversity.predict}
\alias{diversity.predict}
\title{Makes a spacially explicit prediction of the occupancy of multiple species
and alpha diversity, and select the area where}
\usage{
diversity.predict(model, diverse, new.data, quantile.nth = 0.8, species,
kml = TRUE, name = "Priority_Area.kml")
}
\arguments{
\item{model}{A result from diversityoccu}
\item{diverse}{A result from the model.diversity function.}
\item{new.data}{a rasterstack, or a dataframe containing the same variables as
the siteCovs variable in diversityoccu or batchoccu}
\item{quantile.nth}{the nth quantile, over which is a goal to keep both diversity
and selected species. default = NULL}
\item{species}{a boolean vector of the species to take into acount}
\item{kml}{if TRUE builds a kml file of the selected area and saves it in your
working directry}
\item{name}{the name of the kml file if kml is TRUE}
}
\value{
a data frame with predicted values, or a raster stack with predictions
for each species, a raster for diversity and a raster with the area meeting the
quantile criteria.
}
\description{
This function takes an deiversityoccu object and predicts occupancy for all species
in new data, either a data.frame or a rasterstack. It can also return a subset
of the total area of a rasterstack, where diversity and occupancy/abundance are
higher than the nth quantile.
}
\examples{
\dontrun{
#Load the data
data("IslandBirds")
data("Daily_Cov")
data("siteCov")
data("Birdstack")
#Model the abundance for 5 bat species and calculate alpha diversity from that
#Model the abundance for 5 bat species and calculate alpha diversity from that
BirdDiversity <-diversityoccu(pres = IslandBirds, sitecov = siteCov,
obscov = Daily_Cov,spp = 5, form = ~ Day + Wind + Time ~ Elev + Wetland + Upland)
#Select the best model that explains diversity using genetic algorithms
set.seed(123)
glm.Birdiversity <- model.diversity(BirdDiversity, method = "g")
# get the area where the first two bird species are most abundant
# and the diversity is high
library(rgdal)
Selected.area <- diversity.predict(model = BirdDiversity, diverse = glm.Birdiversity,
new.data = Birdstack, quantile.nth = 0.65, species =
c(TRUE, TRUE, FALSE, FALSE, FALSE))
Selected.area
}
}
\seealso{
\code{\link[DiversityOccupancy]{diversityoccu}}
\code{\link[DiversityOccupancy]{batchoccu}}
\code{\link[DiversityOccupancy]{model.diversity}}
}
\author{
Derek Corcoran <derek.corcoran.barrios@gmail.com>
}
|
ae1484a42365edd681926234cf3d913b0de767bc
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/GENLIB/man/gen.half.founder.Rd
|
adde8d9e94c77f08437fa81b6d91543c0c6a1967
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 807
|
rd
|
gen.half.founder.Rd
|
\name{gen.half.founder}
\alias{gen.half.founder}
\title{Get half-founder id numbers}
\description{Returns the id numbers of the half-founders. Half-founders are defined as the individuals with only one known parent in the genealogy (i.e., either mother id=0 or father id=0).}
\usage{gen.half.founder( gen, ...)}
\arguments{
\item{gen}{An object of class GLgen obtained with gen.genealogy, gen.lineages or gen.branching. Required.}
\item{...}{Option to pass additionnal arguments automaticaly between methods. Internal use only.}
}
\value{returns a vector of integer}
\seealso{
\code{\link{gen.genealogy}}
\code{\link{gen.pro}}
\code{\link{gen.founder}}
\code{\link{gen.parent}}
}
\examples{
data(geneaJi)
genJi<-gen.genealogy(geneaJi)
# There are 2 half-founders
gen.half.founder(genJi)
}
\keyword{manip}
|
3cbd4ef5f96feae27828a93a75acec6dd00e7e12
|
8a3f753e5af0f96b8b7b8f46bc0e9b7e618f31cf
|
/run/utils/plot_accuracy.r
|
c3a3ed4493f3a94141b1c6edd07eb057f472b726
|
[
"Apache-2.0"
] |
permissive
|
hxj5/csp-benchmark
|
6d02ad16576e359f8d872308a85b22427443fe85
|
5e7c54e1aa86526d14ab24a3b71d176ccce89a63
|
refs/heads/master
| 2023-05-12T11:16:38.398938
| 2021-06-10T03:17:52
| 2021-06-10T03:17:52
| 348,695,609
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,204
|
r
|
plot_accuracy.r
|
#!/usr/bin/env Rscript
#Aim: to visualize the results of comparing coverage matrices of two Apps.
#@abstract Parse mtx file to get mtx info.
#@param fn Name of mtx file [STR]
#@return A list with four elements if success, NULL otherwise [list]
# The four elements are:
# $nrow Number of rows [INT]
# $ncol Number of columns [INT]
# $nval Number of values [INT]
# $data The matrix data without header [tibble]
#@note The mtx file of general format has 3 columns: <row> <col> <value>.
# In this case, the 3 columns are: <snp> <cell> <value>
parse_mtx <- function(fn) {
if (! file.exists(fn)) { return(NULL) }
df <- read.table(fn, header = F, comment.char = "%")
if (nrow(df) < 2) { return(NULL) }
colnames(df)[1:3] <- c("row", "col", "value")
return(list(
nrow = df[1, 1],
ncol = df[1, 2],
nval = df[1, 3],
data = df[-1, ] %>% as_tibble()
))
}
#@abstract Write the mtx data to the mtx file.
#@param mtx The mtx data [list]
#@param fn Name of mtx file [STR]
#@return Void.
write_mtx <- function(mtx, fn) {
write("%%MatrixMarket matrix coordinate integer general", file = fn)
write("%", file = fn, append = T)
write(paste(c(mtx$nrow, mtx$ncol, mtx$nval), collapse = "\t"),
file = fn, append = T)
write.table(mtx$data, fn, append = T, sep = "\t", row.names = F,
col.names = F)
}
preprocess_mtx <- function(mtx, grp_col = 1) {
mtx$data <- mtx$data %>%
as_tibble() %>%
filter(value > 0) # necessary!
mtx$nsnp <- mtx$nrow
mtx$nsmp <- mtx$ncol
mtx$nrec <- mtx$nval
if (grp_col != 1) {
mtx$nrow <- mtx$nsmp
mtx$ncol <- mtx$nsnp
mtx$data <- mtx$data %>%
rename(row = col, col = row)
}
return(mtx)
}
#@abstract Basic stat for two mtx.
#@param mtx1 The first mtx to be compared, returned by parse_mtx() [list]
#@param mtx2 The second mtx to be compared, returned by parse_mtx() [list]
#@return Void
basic_stat <- function(mtx1, mtx2) {
if (mtx1$nsnp != mtx2$nsnp || mtx1$nsmp != mtx2$nsmp) {
write("Error: invalid headers of mtx files!", file = stderr())
quit("no", 3)
}
print(paste0("nsnp = ", mtx1$nsnp, "; nsmp = ", mtx1$nsmp, "; nrec1 = ",
mtx1$nrec, "; nrec2 = ", mtx2$nrec))
union <- mtx1$data %>%
full_join(mtx2$data, by = c("row", "col")) %>%
mutate(value.x = ifelse(is.na(value.x), 0, value.x),
value.y = ifelse(is.na(value.y), 0, value.y))
print(paste0("uniq rows for union of mtx1 and mtx2 = ",
nrow(union %>% select(row) %>% distinct())))
uniq_x_idx <- mtx1$data %>%
select(row) %>%
distinct() %>%
anti_join(mtx2$data %>% select(row), by = "row")
print(paste0("uniq rows only for mtx1 = ", nrow(uniq_x_idx)))
uniq_y_idx <- mtx2$data %>%
select(row) %>%
distinct() %>%
anti_join(mtx1$data %>% select(row), by = "row")
print(paste0("uniq rows only for mtx2 = ", nrow(uniq_y_idx)))
overlap_idx <- mtx1$data %>%
select(row) %>%
distinct() %>%
semi_join(mtx2$data %>% select(row), by = "row")
print(paste0("uniq rows for overlap of mtx1 and mtx2 = ", nrow(overlap_idx)))
return(list(
union = union,
uniq_x_idx = uniq_x_idx,
uniq_y_idx = uniq_y_idx,
overlap_idx = overlap_idx
))
}
#@abstract A safe version of cor()
#@param v1 Vector 1 [vector]
#@param v2 Vector 2 with the same length of vector 1 [vector]
#@return The correlation value [DOUBLE]
#@note When either vector's sd is 0, then return -2.
safe_cor <- function(v1, v2) {
sd1 <- sd(v1)
sd2 <- sd(v2)
if (sd1 == 0 || sd2 == 0) {
return(COR_OF_SD0)
} else {
return(cov(v1, v2) / (sd1 * sd2))
}
}
safe_mae <- function(v1, v2) {
ave <- (v1 + v2) / 2
ave[ave == 0] <- 1 # here if ave = 0, then v1 = v2 = 0.
res <- mean(abs(v1 - v2) / ave)
return(res)
}
#@abstract Compare each SNPs within two mtx.
#@param mmtx Merged two ref mtx or merged two alt mtx [tbl]
#@param lc Length of col elements of each SNP [INT]
#@param type Name of mtx type [STR]
#@return The stat results [tbl]
cmp_mtx <- function(mmtx, lc, type) {
tb <- tibble(col = 1:lc)
res <- mmtx %>%
group_by(row) %>%
group_modify(~ {
.x %>%
right_join(tb, by = "col") %>%
mutate(value.x = ifelse(is.na(value.x), 0, value.x),
value.y = ifelse(is.na(value.y), 0, value.y)) %>%
summarise(cor = safe_cor(value.x, value.y), mae = safe_mae(value.x, value.y))
}) %>%
ungroup() %>%
gather(st_type, st_value, -row) %>%
mutate(mtx_type = type)
return(res)
}
library(argparse)
# default settings
stat_types <- c("cor", "mae")
all_fig_types <- c("boxplot", "density")
def_fig_types <- paste(all_fig_types, collapse = ",")
all_range <- c("overlap", "union")
def_range <- "overlap"
def_grp_col <- 1
def_width <- 8
def_height <- 6
def_dpi <- 300
min_cor <- 0.9
max_mae <- 0.1
cor_unit_range <- 0.1
mae_unit_range <- 0.1
COR_OF_SD0 <- -2 # special correlation value when one of the vector has sd = 0
# parse command line args.
args <- commandArgs(trailingOnly = TRUE)
if (0 == length(args)) {
print("use -h or --help for help on argument.")
quit("no", 1)
}
parser <- ArgumentParser(
description = "",
formatter_class = "argparse.RawTextHelpFormatter"
)
parser$add_argument("--ref1", type = "character", help = "Ref matrix file of app 1.")
parser$add_argument("--alt1", type = "character", help = "Alt matrix file of app 1.")
parser$add_argument("--name1", type = "character", help = "Name of app 1.")
parser$add_argument("--ref2", type = "character", help = "Ref matrix file of app 2.")
parser$add_argument("--alt2", type = "character", help = "Alt matrix file of app 2.")
parser$add_argument("--name2", type = "character", help = "Name of app 2.")
parser$add_argument("-O", "--outdir", type = "character",
help = "Outdir for result summary files.")
parser$add_argument("-f", "--outfig", type = "character", default = def_fig_types,
help = paste0("Result figure file types: boxplot|density, separated by comma [", def_fig_types, "]"))
parser$add_argument("--groupcol", type = "integer", default = def_grp_col,
help = paste0("Col for grouping: 1|2 [", def_grp_col, "]"))
parser$add_argument("--range", type = "character", default = def_range,
help = paste0("Range of merged mtx: overlap|union [", def_range, "]"))
parser$add_argument("--title", help = "If set, will add title to plots.")
parser$add_argument("--width", type = "double", default = def_width,
help = paste0("Result file width [", def_width, "]"))
parser$add_argument("--height", type = "double", default = def_height,
help = paste0("Result file height [", def_height, "]"))
parser$add_argument("--dpi", type = "integer", default = def_dpi,
help = paste0("DPI [", def_dpi, "]"))
args <- parser$parse_args()
# check args.
if (! dir.exists(args$outdir)) {
dir.create(args$outdir)
}
fig_types <- args$outfig
if (is.null(args$outfig) || 0 == length(args$outfig) || "" == args$outfig) {
fig_types <- def_fig_types
}
fig_types <- strsplit(fig_types, ",")[[1]]
grp_col <- args$groupcol
library(stringr)
library(dplyr)
library(tidyr)
library(ggplot2)
#ref1:tbl
# row col value
#
#ref_bs:list
# $union:tbl
# row col value.x value.y
# $uniq_x_idx:tbl
# row
# $uniq_y_idx:tbl
# row
# $overlap_idx:tbl
# row
#
#mcmp:tbl
# row st_type st_value mtx_type
#
#tags:tbl
# row tag_type tag_value app
#
#aly_overlap:tbl
# row st_type st_value mtx_type tag_type tag_value app
# load data
print("loading data ...")
ref1 <- parse_mtx(args$ref1)
ref2 <- parse_mtx(args$ref2)
alt1 <- parse_mtx(args$alt1)
alt2 <- parse_mtx(args$alt2)
ref1 <- preprocess_mtx(ref1, grp_col)
ref2 <- preprocess_mtx(ref2, grp_col)
alt1 <- preprocess_mtx(alt1, grp_col)
alt2 <- preprocess_mtx(alt2, grp_col)
write("", file = stdout())
# basic statistics
print("basic stat for ref mtx files ...")
ref_bs <- basic_stat(ref1, ref2)
write("", file = stdout())
print("basic stat for alt mtx files ...")
alt_bs <- basic_stat(alt1, alt2)
write("", file = stdout())
print(paste0("Query range is '", args$range, "'"))
write("", file = stdout())
# compare two mtx.
print("Comparing ref & alt mtx files ...")
if (args$range == "overlap") {
mcmp <- rbind(
cmp_mtx(ref_bs$union %>%
semi_join(ref_bs$overlap_idx, by = "row"), ref1$ncol, "r"),
cmp_mtx(alt_bs$union %>%
semi_join(alt_bs$overlap_idx, by = "row"), alt1$ncol, "a")
)
} else if (args$range == "union") {
mcmp <- rbind(
cmp_mtx(ref_bs$union, ref1$ncol, "r"),
cmp_mtx(alt_bs$union, alt1$ncol, "a")
)
} else {
write("Error: invalid range!", file = stderr())
quit("no", 5)
}
mcmp
mcmp_file <- paste0(args$outdir, "/mcmp.tsv")
write.table(mcmp, mcmp_file, quote = F, sep = "\t", row.names = F)
print(paste0("The mcmp file is saved to ", mcmp_file))
write("", file = stdout())
print("Total uniq rows: ")
total_uniq <- mcmp %>%
group_by(mtx_type) %>%
distinct(row) %>%
summarise(total_rows = n()) %>%
ungroup()
total_uniq
write("", file = stdout())
print("The ratio of uniq rows in each range for cor:")
cor_flt_std <- tibble(
st_flt = rep(c(0.99, 0.95, 0.9, 0.8), 2),
mtx_type = rep(c("r", "a"), each = 4)
)
cor_range_ratio <- cor_flt_std %>%
group_by(st_flt) %>%
group_modify(~ {
mcmp %>%
group_by(mtx_type) %>%
filter(st_type == "cor" & st_value >= .y$st_flt[1]) %>%
distinct(row) %>%
summarise(nrows = n()) %>%
ungroup()
}) %>%
ungroup() %>%
right_join(cor_flt_std, by = c("mtx_type", "st_flt")) %>%
mutate(nrows = ifelse(is.na(nrows), 0, nrows)) %>%
full_join(total_uniq, by = "mtx_type") %>%
mutate(row_ratio = nrows / total_rows)
cor_range_ratio
cor_range_ratio_file <- paste0(args$outdir, "/cor_range_ratio.tsv")
write.table(cor_range_ratio, cor_range_ratio_file, quote = F, sep = "\t", row.names = F)
print(paste0("The cor_range_ratio is saved to ", cor_range_ratio_file))
write("", file = stdout())
print("The ratio of uniq rows in each range for mae:")
mae_flt_std <- tibble(
st_flt = rep(c(0.01, 0.05, 0.1, 0.2), 2),
mtx_type = rep(c("r", "a"), each = 4)
)
mae_range_ratio <- mae_flt_std %>%
group_by(st_flt) %>%
group_modify(~ {
mcmp %>%
group_by(mtx_type) %>%
filter(st_type == "mae" & st_value < .y$st_flt[1]) %>%
distinct(row) %>%
summarise(nrows = n()) %>%
ungroup()
}) %>%
ungroup() %>%
right_join(mae_flt_std, by = c("mtx_type", "st_flt")) %>%
mutate(nrows = ifelse(is.na(nrows), 0, nrows)) %>%
full_join(total_uniq, by = "mtx_type") %>%
mutate(row_ratio = nrows / total_rows)
mae_range_ratio
mae_range_ratio_file <- paste0(args$outdir, "/mae_range_ratio.tsv")
write.table(mae_range_ratio, mae_range_ratio_file, quote = F, sep = "\t", row.names = F)
print(paste0("The mae_range_ratio is saved to ", mae_range_ratio_file))
write("", file = stdout())
# visualize the result of comparing
for (st in stat_types) {
pdata <- mcmp %>% filter(st_type == st)
st_name <- ifelse(st == "cor", "Correlation", "Mean Absolute Error")
for (ft in fig_types) {
fig_path <- paste0(args$outdir, "/", paste0(ft, "_grpcol_", grp_col, "_", st,
"_", args$range, ".tiff"))
if ("boxplot" == ft) {
p <- pdata %>%
ggplot(aes(x = mtx_type, y = st_value)) +
geom_boxplot() +
scale_x_discrete(labels = c("r" = "Ref", "a" = "Alt"),
limits = c("a", "r")) +
labs(x = "Mtx Type", y = st_name)
} else if ("density" == ft) {
p <- pdata %>%
ggplot(aes(x = st_value, color = mtx_type)) +
geom_density(fill = "transparent") +
scale_colour_discrete(labels = c("r" = "Ref", "a" = "Alt"),
limits = c("a", "r")) +
labs(x = st_name, y = "Density", color = "Mtx Type")
}
if (! is.null(args$title)) {
p <- p +
labs(title = paste0("Comparison between the output results of ", args$name1, " and ", args$name2))
}
if (grepl("tiff?$", fig_path, perl = TRUE, ignore.case = TRUE)) {
ggsave(fig_path, p, width = args$width, height = args$height, dpi = args$dpi, compress="lzw")
} else {
ggsave(fig_path, p, width = args$width, height = args$height, dpi = args$dpi)
}
msg <- paste0("the result of stat:", st, "; fig:", ft, "; is saved to ", fig_path)
print(msg)
}
}
write("", file = stdout())
# the ratio of SD = 0
print("The ratio of SD = 0")
sd0_ratio <- mcmp %>%
filter(st_type == "cor" & st_value == COR_OF_SD0) %>%
group_by(mtx_type) %>%
distinct(row) %>%
summarise(nrows = n()) %>%
ungroup() %>%
full_join(total_uniq, by = "mtx_type") %>%
mutate(nrows = ifelse(is.na(nrows), 0, nrows)) %>%
mutate(row_ratio = nrows / total_rows)
sd0_ratio
sd0_ratio_file <- paste0(args$outdir, "/sd0_ratio.tsv")
write.table(sd0_ratio, sd0_ratio_file, quote = F, sep = "\t", row.names = F)
print(paste0("The sd0_ratio is saved to ", sd0_ratio_file))
write("", file = stdout())
|
a95454945709e9cfef0ef1f6c913996b4e68533c
|
08c4885facb8a0a40a995eaf989c6252fb8350b0
|
/man/create_utilmod.Rd
|
8e0e2c323fbbe7ead20d39d59f935717fd95b220
|
[] |
no_license
|
InnovationValueInitiative/IVI-NSCLC
|
a1aeefb23113fe88b578e1023d21fbf0a6488254
|
bafd1faa8b1887b91cd42af00b43cd5b41ee53cf
|
refs/heads/master
| 2020-03-24T08:23:52.095148
| 2019-07-23T04:21:11
| 2019-07-23T04:21:11
| 142,594,493
| 14
| 14
| null | 2019-07-23T04:21:12
| 2018-07-27T15:29:57
|
R
|
UTF-8
|
R
| false
| true
| 2,090
|
rd
|
create_utilmod.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilmod.R
\name{create_utilmod}
\alias{create_utilmod}
\title{Create utility model}
\usage{
create_utilmod(n = 100, struct, patients, ae_probs,
params_utility = iviNSCLC::params_utility, ae_duration = c("month",
"progression"))
}
\arguments{
\item{n}{The number of random observations of the parameters to draw.}
\item{struct}{A \code{\link{model_structure}} object.}
\item{patients}{A data table returned from \code{\link{create_patients}}.}
\item{ae_probs}{An "ae_probs" object as returned by \code{\link{ae_probs}}.}
\item{params_utility}{Parameter estimates for health state utilities and
adverse event disutilities in the same format as \code{\link{params_utility}}.}
\item{ae_duration}{Duration of time over with disutility from adverse events
should accrue. If \code{"month"}, then disutility accrues over the first
month of treatment; if \code{progression} then disutility accrues until
disease progression (i.e., over the entire duration of time spent in
stable disease).}
}
\value{
An object of class "StateVals" from the
\href{https://hesim-dev.github.io/hesim/}{hesim} package.
}
\description{
Create a model for health state utility given values of utility by
health state, treatment, and time sampled from a probability distribution.
}
\examples{
# Treatment sequences
txseq1 <- txseq(first = "erlotinib",
second = c("osimertinib", "PBDC"),
second_plus = c("PBDC + bevacizumab", "PBDC + bevacizumab"))
txseq2 <- txseq(first = "gefitinib",
second = c("osimertinib", "PBDC"),
second_plus = c("PBDC + bevacizumab", "PBDC + bevacizumab"))
txseqs <- txseq_list(seq1 = txseq1, seq2 = txseq2)
# Patient population
pats <- create_patients(n = 2)
## Model structure
struct <- model_structure(txseqs, dist = "weibull")
## Utility model
n_samples <- 2
ae_probs <- ae_probs(n = n_samples, struct = struct)
utilmod <- create_utilmod(n = n_samples, struct = struct, patients = pats,
ae_probs = ae_probs)
}
|
a3da27f201afae2c10ca5efac0284035400b475a
|
bb2cfa25c58bf3f4563acfa703c538742c54a17c
|
/Subset_ca1.R
|
e5c2b31f071ee48b90aec043b6f1d267b541176c
|
[] |
no_license
|
hirjus/Galku
|
876c24ffc2d665de097b90d0fcf4dc97b620b21d
|
be94314810c3c171373a7f2d4139728e811b70c1
|
refs/heads/master
| 2021-01-25T14:39:28.748578
| 2020-11-14T08:14:31
| 2020-11-14T08:14:31
| 123,716,832
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,912
|
r
|
Subset_ca1.R
|
#Subset_ca1.R 7.9.20
#
# Data G1_4_Calaaj1.Rmd
#
# Data
#
# ISSP2012esim2.dat
# spCAmaaga1 maaga-ca-objekti (täydentävillä maa-pisteillö)
# maagaTab1 taulukko jossa maaga-rivit ja maat täydentävinä pisteinä
maagaTab1
# Koodilohko subsetCA-1
X11()
maagaCA2sub1 <- ca(~maaga + Q1b,ISSP2012esim2.dat, subsetrow = 1:24)
plot(maagaCA2sub1, main = "Äiti töissä: ikäluokka ja sukupuoli maittain",
sub = "symmetrinen kartta - rivit 1-24 (subset ca")
# Ongelma 1: miten saa maarivit kätevästi? Tässä tapauksessa näin
# maagaTab1
# Taulkon viimeisillä riveillä maa-profiilit frekvensseinä
# maaga-rivit ovat samassa järjestyksessä, kuusi naisten ja kuusi miesten
# ikäryhmää
# ISSP2012esim2.dat %>% tableX(maaga, Q1b)
#
# BE 191 451 438 552 381
# BG 118 395 205 190 13
# DE 165 375 198 538 438
# DK 70 238 152 232 696
# FI 47 188 149 423 303
# HU 219 288 225 190 75
#
# BE 1-12, BG 13-24, DE 25-36, DK 37-48, FI 49-60, HU 61-72
#Ongelma 2: subsetrow, ratkeaa kun käytetään merkkijonomuuttujaa (9.9.20)
# maapisteet suprow = 73:78 toimivat
str(maagaTab1)
typeof(maagaTab1)#integer
class(maagaTab1) #matriisi
attributes(maagaTab1)
rownames(maagaTab1)
# Hoitaako ca-paketti automaattisesti täydentävien pisteiden "skaalauksen
# subsetCA:ssa? Sarakepisteiden keskiarvo on origossa, mutta rivien osajoukon
# keskiarvo ei ole ja tämä pitäisi korjata.
maagaCA2sub2 <- ca(~maaga + Q1b,ISSP2012esim2.dat,subsetrow = 25:60)
plot(maagaCA2sub2, main = "Äiti töissä: ikäluokka ja sukupuoli maittain",
sub = "symmetrinen kartta - rivit 25-60 (subset ca)")
# maapisteet suprow = 73:78 toimivat
spCAmaagasub1 <- ca(maagaTab1[,1:5], suprow = 73:78,subsetrow = 25:60 )
par(cex = 0.5)
plot(spCAmaagasub1, main = "Äiti töissä: ikäluokka ja sukupuoli maittain 2",
sub = "symmetrinen kartta, maat täydentävinä pisteinä, cex=0.75"
)
## saako subsetrow - asetuksen kahdessa "pätkässä"? TOIMII!
# osajoukot1 <- c(13:14,61:72)
#spCAmaagasub2 <- ca(maagaTab1[,1:5], suprow = 73:78,subsetrow = osajoukot1)
#par(cex = 0.5)
#plot(spCAmaagasub2, main = "Äiti töissä: ikäluokka ja sukupuoli maittain 2",
# sub = "symmetrinen kartta, osajoukko-ca, maat täydentävinä pisteinä, cex=0.05"
# )
# ei voi (ehkä?) suoraan lisätä täydentäviä pisteitä siitä muuttujasta
# jossa aineisto rajataan johonkin osajoukkoon (8.9.20)
# BE 1-12, BG 13-24, DE 25-36, DK 37-48, FI 49-60, HU 61-72
BGHUsubset <- c(13:14,61:72)
BEDEDKFIsubset <- c(1:12, 25:36, 37:48, 49:60)
DEDKFIsubset <- c(25:36, 37:48, 49:60)
BEBGHUsubset <- c(1:12,13:14,61:72)
spCAmaagasub3 <- ca(maagaTab1[,1:5], subsetrow = BGHUsubset)
par(cex = 0.6)
plot(spCAmaagasub3, main = "Äiti töissä (Q1b): ikäluokka ja sukupuoli maittain",
sub = "symmetrinen kartta, osajoukko-ca, cex=0.06"
)
spCAmaagasub4 <- ca(maagaTab1[,1:5], subsetrow = BEDEDKFIsubset)
par(cex = 0.6)
plot(spCAmaagasub4,
arrows = c(FALSE, TRUE),
main = "Äiti töissä (Q1b): ikäluokka ja sukupuoli maittain",
sub = "symmetrinen kartta, osajoukko-ca, cex=0.06"
)
spCAmaagasub5 <- ca(maagaTab1[,1:5], subsetrow = DEDKFIsubset)
par(cex = 0.6)
plot(spCAmaagasub5, main = "Äiti töissä (Q1b): ikäluokka ja sukupuoli maittain",
sub = "symmetrinen kartta, osajoukko-ca, cex=0.06"
)
spCAmaagasub6 <- ca(maagaTab1[,1:5], subsetrow = BEBGHUsubset)
par(cex = 0.6)
plot(spCAmaagasub6, main = "Äiti töissä (Q1b): ikäluokka ja sukupuoli maittain",
sub = "symmetrinen kartta, osajoukko-ca, cex=0.06"
)
#Tämä ei toimi (8.9.20)
spCAmaagasub7 <- ca(maagaTab1[,1:5], subsetrow = BEBGHUsubset)
par(cex = 0.7)
plot(spCAmaagasub7, map = "rowgreen",
contrib = c("relative", "relative"),
mass = c(TRUE,TRUE),
arrows = c(FALSE, TRUE),
main = "Äiti töissä (Q1b): ikäluokka ja sukupuoli maittain",
sub = "rowgreen (relative), osajoukko-ca, cex=0.07"
)
|
3fe76414305ef4e7841a469b2cb0154241906f2f
|
04947ec352b9f31ae8b58182a1ee585f23c1ee69
|
/Scripts/04_Plots.R
|
eb5334c0f64913c9607587c6b24abd26da52cd54
|
[] |
no_license
|
lessardlab/GlobalPolyMorp
|
464e4ae88d5212e651c08cf01e0ee42028a04d02
|
7f7d97c0f03ba347d6e5c6818a10b60a411049db
|
refs/heads/main
| 2023-04-10T20:15:17.693806
| 2021-08-26T22:10:39
| 2021-08-26T22:10:39
| 300,376,034
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,340
|
r
|
04_Plots.R
|
### GPM Figures
## Gabriel Muñoz _ dic 2020.
## Please note that functions to produce plots feed on the "occ" dataset, you need to have that one loaded first.
# source("GM_Scripts/1_PrepData.R") # run this line if you haven't gone through the 1. Script.
#source("4_Functions.R")
occ<-read.csv("Data/occ_withsitedensity.csv",stringsAsFactors = F)
occ <- occ[complete.cases(occ),]
#https://www.r-graph-gallery.com/38-rcolorbrewers-palettes.html
library(RColorBrewer)
par(mar=c(3,4,2,2))
display.brewer.all()
# make the global proportion plot
pdf("GlobalMapProp.pdf", width = 6, height = 4, pointsize = 10)
par(mar = c(2,2,2,2))
makeWorldPlot(occ, "prop", colbin = 11, "RdYlBu", plot = T, rev = T)
dev.off()
pdf("GlobalMapDensity.pdf", width = 6, height = 4, pointsize = 15)
par(mar = c(2,2,2,2))
makeWorldPlot(occ, "density", colbin = 9, "YlOrRd", plot = T, rev = F)
dev.off()
pdf("GlobalMapPoly.pdf", width = 6, height = 4, pointsize = 15)
par(mar = c(2,2,2,2))
makeWorldPlot(occ, "poly", colbin = 11,"RdYlGn" , plot = T, rev = F)
dev.off()
pdf("GlobalMapRichness.pdf", width = 6, height = 4, pointsize = 15)
par(mar = c(2,2,2,2))
makeWorldPlot(occ, "richness", colbin = 9, "Greens", plot = T, rev = F)
dev.off()
pdf("GlobalMapEffort.pdf", width = 6, height = 4, pointsize = 15)
par(mar = c(2,2,2,2))
makeWorldPlot(occ, "effort", colbin = 11, "RdGy", plot = T, rev = F)
dev.off()
# Make the partial plots
## Myrmicinae
png("FiguresPaper/GlobalMapRatio_Myrm.png", width = 900, height = 450, pointsize = 10)
par(mar = c(2,2,2,2))
makeWorldPlot(droplevels(occ[occ$genus == "Camponotus",]), "prop", "Spectral", colbin = 9, plot = T, rev = T)
dev.off()
png("FiguresPaper/GlobalMapDensity_Myr.png", width = 900, height = 450, pointsize = 10)
par(mar = c(2,2,2,2))
makeWorldPlot(droplevels(occ[occ$subFamily == "Myrmicinae",]), "density", "YlOrRd", plot = T, rev = F)
dev.off()
## Formicinae
png("FiguresPaper/GlobalMapRatio_Form.png", width = 900, height = 450, pointsize = 10)
par(mar = c(2,2,2,2))
makeWorldPlot(droplevels(occ[occ$subFamily == "Formicinae",]), "ratio", "Spectral", plot = T, rev = T)
dev.off()
png("FiguresPaper/GlobalMapDensity_Form.png", width = 900, height = 450, pointsize = 10)
par(mar = c(2,2,2,2))
makeWorldPlot(droplevels(occ[occ$subFamily == "Formicinae",]), "density", "YlOrRd", plot = T, rev = F)
dev.off()
###############
##############################
## Plot the global distribution of ant polymorphism in climatic space
# dataframe with all species
mt <- makeWorldPlot(occ, "prop", plot = F)
mt <- data.frame(mt,makeWorldPlot(occ, "density", plot = F)[c("dens", "PenDens")])
mt$Temp <- MAT1$x[match(rownames(mt), stringr::str_replace( MAT1$Group.1, "_", "."))]
mt$Prec <- MAP1$x[match(rownames(mt), stringr::str_replace( MAP1$Group.1, "_", "."))]
mt$MAT_round <- round(mt$MAT.point, 2)
mt$MAP_round <- round(mt$MAP.point,2)
agg_mt <- aggregate(mt$prop, list(mt$MAT_round, mt$MAP_round), median)
agg_mt_dens <- aggregate(mt$dens, list(mt$MAT_round, mt$MAP_round), median)
agg_mt_Pdens <- aggregate(mt$PenDens, list(mt$MAT_round, mt$MAP_round), median)
###############
par(las = 1, oma = c(2,2,2,2))
### proportion poly
plot(MAP1$x~MAT1$x, cex = 4,
xaxt = "n",
yaxt = "n",
col = "grey94", frame = F,
xlab = "",
ylab = "",
pch = ".",
ylim = c(-1,6))
points(agg_mt$Group.2~ agg_mt$Group.1, cex = 4,
col = scales::alpha(f(round(agg_mt$x), 11, "RdYlBu", F),1),
#col = ifelse(mt$rat >1, "yellow", "red"),
pch = ".")
abline(h = 0, v = 0, col = "grey50", lty = 2)
axis(1,seq(-2,1.5, 0.5), labels = c(-3.82-12.07*4, -3.82-12.07*3,-3.82-12.07*2,-3.82-12.07,-3.82,-3.82+12.07,-3.82+(12.07*2),-3.82+(12.07*3)))
axis(2,seq(-1,6, 1), labels = c(0,550.05,550.05+652.48,550.05+(652.48*2),550.05+(652.48*3),550.05+(652.48*4),550.05+(652.48*5),550.05+(652.48*6)))
### density
plot(MAP1$x~MAT1$x, cex = 5,
xaxt = "n",
yaxt = "n",
col = "grey90", frame = F,
xlab = "",
ylab = "",
pch = ".",
ylim = c(-1,6))
points(agg_mt_Pdens$Group.2[agg_mt$x>0]~ agg_mt_Pdens$Group.1[agg_mt$x>0], cex = 5,
col = scales::alpha(f(log1p(agg_mt_Pdens$x[agg_mt_Pdens$x>0]),9, "YlOrRd", T),1),
#col = ifelse(mt$rat >1, "yellow", "red"),
pch = ".")
abline(h = 0, v = 0, col = "grey50", lty = 2)
axis(1,seq(-2,1.5, 0.5), labels = c(-3.82-12.07*4, -3.82-12.07*3,-3.82-12.07*2,-3.82-12.07,-3.82,-3.82+12.07,-3.82+(12.07*2),-3.82+(12.07*3)))
axis(2,seq(-1,6, 1), labels = c(0,550.05,550.05+652.48,550.05+(652.48*2),550.05+(652.48*3),550.05+(652.48*4),550.05+(652.48*5),550.05+(652.48*6)))
################
## Myrmicinae
#################
# dataframe with all species
mt_myr <- makeWorldPlot(occ[occ$subFamily == "Myrmicinae",], "ratio", "Spectral", plot = F, rev = T)
mt_myr <- data.frame(mt_myr,makeWorldPlot(occ[occ$subFamily == "Myrmicinae",], plotType = "density", plot = F, rev = T)[c("dens", "PenDens")])
mt_myr$Temp <- MAT1$x[match(rownames(mt_myr), stringr::str_replace( MAT1$Group.1, "_", "."))]
mt_myr$Prec <- MAP1$x[match(rownames(mt_myr), stringr::str_replace( MAP1$Group.1, "_", "."))]
mt_myr$MAT_round <- round(mt_myr$MAT.point, 2)
mt_myr$MAP_round <- round(mt_myr$MAP.point, 2)
agg_mt_myr <- aggregate(mt_myr$rat, list(mt_myr$MAT_round, mt_myr$MAP_round), median)
agg_mt_dens_myr <- aggregate(mt_myr$dens, list(mt_myr$MAT_round, mt_myr$MAP_round), median)
agg_mt_Pdens_myr <- aggregate(mt_myr$PenDens, list(mt_myr$MAT_round, mt_myr$MAP_round), median)
###############
par(las = 1, oma = c(2,2,2,2))
### ratio
plot(MAP1$x~MAT1$x, cex = 4,
xaxt = "n",
yaxt = "n",
col = "grey90", frame = F,
xlab = "",
ylab = "",
pch = ".",
ylim = c(-1,6))
points(agg_mt_myr$Group.2~ agg_mt_myr$Group.1, cex = 4,
col = scales::alpha(f(log1p(round(agg_mt_myr$x)), 6, "Spectral", F),0.5),
#col = ifelse(mt$rat >1, "yellow", "red"),
pch = ".")
abline(h = 0, v = 0, col = "grey50", lty = 2)
axis(1,seq(-2,1.5, 0.5), labels = c(-3.82-12.07*4, -3.82-12.07*3,-3.82-12.07*2,-3.82-12.07,-3.82,-3.82+12.07,-3.82+(12.07*2),-3.82+(12.07*3)))
axis(2,seq(-1,6, 1), labels = c(0,550.05,550.05+652.48,550.05+(652.48*2),550.05+(652.48*3),550.05+(652.48*4),550.05+(652.48*5),550.05+(652.48*6)))
### density
plot(MAP1$x~MAT1$x, cex = 4,
xaxt = "n",
yaxt = "n",
col = "grey90", frame = F,
xlab = "",
ylab = "",
pch = ".",
ylim = c(-1,6))
points(agg_mt_Pdens_myr$Group.2[agg_mt$x>0]~ agg_mt_Pdens_myr$Group.1[agg_mt$x>0], cex = 3,
col = scales::alpha(f((log1p(agg_mt_Pdens_myr$x[agg_mt$x>0])), 6, "YlOrRd", T),0.6),
#col = ifelse(mt$rat >1, "yellow", "red"),
pch = ".")
abline(h = 0, v = 0, col = "grey50", lty = 2)
axis(1,seq(-2,1.5, 0.5), labels = c(-3.82-12.07*4, -3.82-12.07*3,-3.82-12.07*2,-3.82-12.07,-3.82,-3.82+12.07,-3.82+(12.07*2),-3.82+(12.07*3)))
axis(2,seq(-1,6, 1), labels = c(0,550.05,550.05+652.48,550.05+(652.48*2),550.05+(652.48*3),550.05+(652.48*4),550.05+(652.48*5),550.05+(652.48*6)))
################
## Formycinae
#################
# dataframe with all species
mt_for <- makeWorldPlot(occ[occ$subFamily == "Formicinae",], "ratio", "Spectral", plot = F, rev = T)
mt_for <- data.frame(mt_for,makeWorldPlot(occ[occ$subFamily == "Formicinae",], plotType = "density", plot = F, rev = T)[c("dens", "PenDens")])
mt_for$Temp <- MAT1$x[match(rownames(mt_for), stringr::str_replace( MAT1$Group.1, "_", "."))]
mt_for$Prec <- MAP1$x[match(rownames(mt_for), stringr::str_replace( MAP1$Group.1, "_", "."))]
mt_for$MAT_round <- round(mt_for$MAT.point, 2)
mt_for$MAP_round <- round(mt_for$MAP.point, 2)
agg_mt_for <- aggregate(mt_for$rat, list(mt_for$MAT_round, mt_for$MAP_round), median)
agg_mt_dens_for <- aggregate(mt_for$dens, list(mt_for$MAT_round, mt_for$MAP_round), median)
agg_mt_Pdens_for <- aggregate(mt_for$PenDens, list(mt_for$MAT_round, mt_for$MAP_round), median)
###############
par(las = 1, oma = c(2,2,2,2))
### ratio
plot(MAP1$x~MAT1$x, cex = 4,
xaxt = "n",
yaxt = "n",
col = "grey90", frame = F,
xlab = "",
ylab = "",
pch = ".",
ylim = c(-1,6))
points(agg_mt_for$Group.2~ agg_mt_for$Group.1, cex = 4,
col = scales::alpha(f(round(agg_mt_for$x), 6, "Spectral", F),0.5),
#col = ifelse(mt$rat >1, "yellow", "red"),
pch = ".")
abline(h = 0, v = 0, col = "grey50", lty = 2)
axis(1,seq(-2,1.5, 0.5), labels = c(-3.82-12.07*4, -3.82-12.07*3,-3.82-12.07*2,-3.82-12.07,-3.82,-3.82+12.07,-3.82+(12.07*2),-3.82+(12.07*3)))
axis(2,seq(-1,6, 1), labels = c(0,550.05,550.05+652.48,550.05+(652.48*2),550.05+(652.48*3),550.05+(652.48*4),550.05+(652.48*5),550.05+(652.48*6)))
### density
plot(MAP1$x~MAT1$x, cex = 4,
xaxt = "n",
yaxt = "n",
col = "grey90", frame = F,
xlab = "",
ylab = "",
pch = ".",
ylim = c(-1,6))
points(agg_mt_Pdens_for$Group.2[agg_mt$x>0]~ agg_mt_Pdens_for$Group.1[agg_mt$x>0], cex = 4,
col = scales::alpha(f((log1p(agg_mt_Pdens_for$x[agg_mt$x>0])), 6, "YlOrRd", T),0.5),
#col = ifelse(mt$rat >1, "yellow", "red"),
pch = ".")
abline(h = 0, v = 0, col = "grey50", lty = 2)
axis(1,seq(-2,1.5, 0.5), labels = c(-3.82-12.07*4, -3.82-12.07*3,-3.82-12.07*2,-3.82-12.07,-3.82,-3.82+12.07,-3.82+(12.07*2),-3.82+(12.07*3)))
axis(2,seq(-1,6, 1), labels = c(0,550.05,550.05+652.48,550.05+(652.48*2),550.05+(652.48*3),550.05+(652.48*4),550.05+(652.48*5),550.05+(652.48*6)))
#####################
head(mt)
par(mfrow = c(1,3), mar = c(4,4,2,2))
plot(log(mt$poly1),log(mt$poly0),
col = f(mt$rat, 6, "Spectral"),
xlim = c(0,7), ylim = c(0,7),
pch =15, main = "All species",
cex.lab = 1.5,
ylab = "Monomorphic richness (Log)",
xlab = "Polymorphic richness (Log)",
frame = F)
abline(a=0,b=1)
plot(log(mt_myr$poly1),log(mt_myr$poly0),
col = f(mt_myr$rat, 6, "Spectral"),
xlim = c(0,7), ylim = c(0,7),
cex.lab = 1.5,
pch =15, main = "Myrmicinae",
ylab = "Monomorphic richness (Log)",
xlab = "Polymorphic richness (Log)",
frame = F)
abline(a=0,b=1)
plot(log(mt_for$poly1),log(mt_for$poly0),
col = f(mt_for$rat, 6, "Spectral"),
xlim = c(0,7), ylim = c(0,7),
cex.lab = 1.5,
pch =15, main = "Formicinae",
ylab = "Monomorphic richness (Log)",
xlab = "Polymorphic richness (Log)",
frame = F)
abline(a=0,b=1)
######## Richness vs environment tradeoffs
library(RStoolbox)
EnvStack <- raster::stack(MAP, MAT)
PCAras <- RStoolbox::rasterPCA(EnvStack)
dev.off()
##########
# ALL SPECIES
##########
png("FiguresPaper/EnvMapsALL.png", width = 1000, height = 1000, pointsize = 10)
par(mfrow = c(2,2), mar = c(1,1,1,1), oma = c(1,1,1,1))
plot(log(PCAras$map[[2]]+50), box=F, axes = F,
legend = F, main = "Polymorphic species")
points(X2~X1,
data = mt[mt$rat>0,], pch = ".",
cex = log1p(PenDens)*2)
plot(log(PCAras$map[[2]]+50), box=F, axes = F, legend = F, main = "Monomorphic species")
points(X2~X1,
data = mt[mt$rat<0,], pch = ".",
cex = log1p(PenDens)*2)
plot(-log(PCAras$map[[1]]+1000), box=F, axes = F, legend = F, main = "Polymorphic species")
points(X2~X1,
data = mt[mt$rat>0,], pch = ".",
cex = log1p(PenDens)*2)
plot(-log(PCAras$map[[1]]+1000), box=F, axes = F, legend = F, main = "Monomorphic species")
points(X2~X1,
data = mt[mt$rat<0,], pch = ".",
cex = log1p(PenDens)*2)
dev.off()
##########
# Myrmicinae
##########
png("FiguresPaper/EnvMapsMyr.png", width = 1000, height = 1000, pointsize = 10)
par(mfrow = c(2,2), mar = c(1,1,1,1), oma = c(1,1,1,1))
plot(log(PCAras$map[[2]]+50), box=F, axes = F,
legend = F, main = "Polymorphic species")
points(X2~X1,
data = mt_myr[mt_myr$rat>0,], pch = ".",
cex = log1p(PenDens)*2)
plot(log(PCAras$map[[2]]+50), box=F, axes = F, legend = F, main = "Monomorphic species")
points(X2~X1,
data = mt_myr[mt_myr$rat<0,], pch = ".",
cex = log1p(PenDens)*2)
plot(-log(PCAras$map[[1]]+1000), box=F, axes = F, legend = F, main = "Polymorphic species")
points(X2~X1,
data = mt_myr[mt_myr$rat>0,], pch = ".",
cex = log1p(PenDens)*2)
plot(-log(PCAras$map[[1]]+1000), box=F, axes = F, legend = F, main = "Monomorphic species")
points(X2~X1,
data = mt_myr[mt_myr$rat<0,], pch = ".",
cex = log1p(PenDens)*2)
dev.off()
##########
# Formycinae
##########
png("FiguresPaper/EnvMapsFor.png", width = 1000, height = 1000, pointsize = 10)
par(mfrow = c(2,2), mar = c(1,1,1,1), oma = c(1,1,1,1))
plot(log(PCAras$map[[2]]+50), box=F, axes = F,
legend = F, main = "Polymorphic species")
points(X2~X1,
data = mt_for[mt_for$rat>0,], pch = ".",
cex = log1p(PenDens)*2)
plot(log(PCAras$map[[2]]+50), box=F, axes = F, legend = F, main = "Monomorphic species")
points(X2~X1,
data = mt_for[mt_for$rat<0,], pch = ".",
cex = log1p(PenDens)*2)
plot(-log(PCAras$map[[1]]+1000), box=F, axes = F, legend = F, main = "Polymorphic species")
points(X2~X1,
data = mt_for[mt_for$rat>0,], pch = ".",
cex = log1p(PenDens)*2)
plot(-log(PCAras$map[[1]]+1000), box=F, axes = F, legend = F, main = "Monomorphic species")
points(X2~X1,
data = mt_for[mt_for$rat<0,], pch = ".",
cex = log1p(PenDens)*2)
dev.off()
|
5c6d3ada9bb79daf5b0e871ce17aa5354b22836d
|
1c3daff070c084ca947dd383378b7b1d05e11b1d
|
/cachematrix.R
|
a2940ba69b7ffc917fc9fcd8625995e56e434a72
|
[] |
no_license
|
srijan-nayak/ProgrammingAssignment2
|
bf1e0d9d6e578c18517c99193fd74b1822d43e92
|
70e34b776489a5f4ebb9b57ea6df846ce5c5ae96
|
refs/heads/master
| 2022-11-13T06:22:31.081087
| 2020-07-04T07:42:35
| 2020-07-04T07:42:35
| 277,044,854
| 0
| 0
| null | 2020-07-04T07:42:37
| 2020-07-04T05:27:33
|
R
|
UTF-8
|
R
| false
| false
| 1,265
|
r
|
cachematrix.R
|
# Calling makeCacheMatrix on a matrix gives a special cache matrix whose matrix
# and inverse matrix can be set or looked up.
# Calling cacheSolve on a cache matrix returns its cached inverse matrix if
# available, otherwise returns a newly calculated inverse matrix.
# makeCacheMatrix takes a matrix and returns a list of functions that can be
# used to interact with the matrix and its inverse, essentially making it a
# cache matrix that can store its computed inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInv <- function(inverse) inv <<- inverse
getInv <- function() inv
return(list(
set = set,
get = get,
setInv = setInv,
getInv = getInv
))
}
# cacheSolve takes a cache matrix and returns the already computed inverse
# stored in it, if available. Otherwise it computes the inverse of the matrix,
# stores it in the cache matrix, and finally returns the inverse.
cacheSolve <- function(x, ...) {
inv <- x$getInv()
if (!is.null(inv)) {
message("getting cached inverse")
return(inv)
}
inv <- solve(x$get(), ...)
x$setInv(inv)
return(inv)
}
|
a2f76fb7e9f6a7b3a9742d8af1bd4fb1171378b3
|
01088d692f73ceacd86f3bc63b0ae06faf60f75c
|
/man/bsspline2.Rd
|
b79ec4d73c2a2057eac01d6dc0a255eb6f72bf61
|
[] |
no_license
|
cran/ciuupi2
|
fc0c9b303e0a2808be68a9b36262acb6cfdde48b
|
192af8b6a6ba3b56f710af973b12afa636b3f7a1
|
refs/heads/master
| 2023-03-22T06:03:02.642259
| 2021-03-11T14:40:02
| 2021-03-11T14:40:02
| 346,926,442
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,598
|
rd
|
bsspline2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bsspline2.R
\name{bsspline2}
\alias{bsspline2}
\title{Evaluate the functions b and s at x}
\usage{
bsspline2(x, bsvec, alpha, m, natural = 1)
}
\arguments{
\item{x}{A value or vector of values at which the functions b and s are to be
evaluated}
\item{bsvec}{The vector \eqn{(b(d/6), b(2d/6), \dots, b(5d/6), s(0), s(d/6),
\dots, s(5d/6))} computed using \code{bsciuupi2}}
\item{alpha}{The minimum coverage probability is 1 - \code{alpha}}
\item{m}{Degrees of freedom \code{n - p}}
\item{natural}{Equal to 1 (default) if the b and s functions are evaluated by
natural cubic spline interpolation or 0 if evaluated by clamped cubic spline
interpolation. This parameter must take the same value as that used in
\code{bsciuupi2}}
}
\value{
A data frame containing \code{x} and the corresponding values of the
functions b and s.
}
\description{
Evaluate the functions b and s, as specified by the vector
(b(d/6),b(2d/6),...,b(5d/6),s(0),s(d/6),...,s(5d/6)) computed using
\code{bsciuupi2}, \code{alpha}, \code{m}
and \code{natural} at \code{x}.
}
\details{
The function b is an odd continuous function and the function s is an
even continuous function. In addition, b(x)=0 and s(x) is equal to the
\eqn{1 - \alpha/2} quantile of the \eqn{t} distribution with \code{m}
degrees of freedom for all |x| greater than or equal to d, where d is a
sufficiently large positive number (chosen by the function
\code{bsciuupi2}). The values of these functions in the interval
\eqn{[-d,d]} are specified by the vector \eqn{(b(d/6), b(2d/6), \dots,
b(5d/6), s(0), s(d/6), \dots, s(5d/6))} as follows. By assumption,
\eqn{b(0)=0} and \eqn{b(-i)=-b(i)} and \eqn{s(-i)=s(i)} for
\eqn{i=d/6,...,d}. The values of \eqn{b(x)} and \eqn{s(x)} for any \eqn{x}
in the interval \eqn{[-d,d]} are found using cubic spline interpolation for
the given values of \eqn{b(i)} and \eqn{s(i)} for
\eqn{i=-d,-5d/6,...,0,d/6,...,5d/6,d}. The choices of \eqn{d} for \eqn{m =
1, 2} and \eqn{>2} are \eqn{d=20, 10} and \eqn{6} respectively.
The vector \eqn{(b(d/6), b(2d/6), \dots, b(5d/6), s(0), s(d/6), \dots,
s(5d/6))} that specifies the Kabaila and Giri(2009) confidence interval that
utilizes uncertain prior information (CIUUPI), with minimum coverage
probability \code{1 - alpha}, is obtained using
\code{\link{bsciuupi2}}.
In the examples, we continue with the same 2 x 2 factorial example described
in the documentation for \code{\link{find_rho}}.
}
\examples{
alpha <- 0.05
m <- 8
# Find the vector (b(d/6),...,b(5d/6),s(0),...,s(5d/6)) that specifies the
# Kabaila & Giri (2009) CIUUPI for the first definition of the
# scaled expected length (default) (takes about 30 mins to run):
\donttest{
bsvec <- bsciuupi2(alpha, m, rho = -0.7071068)
}
# The result bsvec is (to 7 decimal places) the following:
bsvec <- c(-0.0287487, -0.2151595, -0.3430403, -0.3125889, -0.0852146,
1.9795390, 2.0665414, 2.3984471, 2.6460159, 2.6170066, 2.3925494)
# Graph the functions b and s
x <- seq(0, 8, by = 0.1)
splineval <- bsspline2(x, bsvec, alpha, m)
plot(x, splineval[, 2], type = "l", main = "b function",
ylab = " ", las = 1, lwd = 2, xaxs = "i", col = "blue")
plot(x, splineval[, 3], type = "l", main = "s function",
ylab = " ", las = 1, lwd = 2, xaxs = "i", col = "blue")
}
\references{
Kabaila, P. and Giri, R. (2009). Confidence intervals in
regression utilizing prior information. Journal of Statistical Planning and
Inference, 139, 3419-3429.
}
\seealso{
\code{\link{find_rho}}, \code{\link{bsciuupi2}}
}
|
04f6c6d720b98eb60a19c0a5e93d9df097b1fecc
|
a0ceb8a810553581850def0d17638c3fd7003895
|
/scripts/rstudioserver_analysis/BM_all_merged/9-downstream_GLMPCA_BM_KeepMorePlates.R
|
ebc694af80803572c4af8a39fa143c87b5638af6
|
[] |
no_license
|
jakeyeung/sortchicAllScripts
|
9e624762ca07c40d23e16dbd793ef9569c962473
|
ecf27415e4e92680488b6f228c813467617e7ee5
|
refs/heads/master
| 2023-04-15T22:48:52.272410
| 2022-10-24T10:45:24
| 2022-10-24T10:45:24
| 556,698,796
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,016
|
r
|
9-downstream_GLMPCA_BM_KeepMorePlates.R
|
# Jake Yeung
# Date of Creation: 2020-02-04
# File: ~/projects/scchic/scripts/rstudioserver_analysis/BM_all_merged/9-downstream_GLMPCA_BM_KeepMorePlates.R
# description
jstart <- Sys.time()
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(glmpca)
library(scchicFuncs)
library(topicmodels)
library(hash)
library(igraph)
library(umap)
# jmarks <- c("H3K4me1", "H3K4me3", "H3K27me3", "H3K9me3"); names(jmarks) <- jmarks
jmarks <- c("H3K4me3", "H3K27me3", "H3K9me3"); names(jmarks) <- jmarks
# jmarks <- jmarks[[3]]
# jmark <- "H3K4me1"
# infs <- ""
jsuffix <- "KeepMorePlates"
# jdates <- c("2020-02-02", "2020-02-04", "2020-02-04", "2020-02-04")
# names(jdates) <- jmarks
jdate <- c("2020-02-04")
for (jmark in jmarks){
print(jmark)
niter <- 1000
topn <- 150
jbins.keep <- 500
# calculating var raw
binsize <- 50000
mergesize <- 1000
bigbinsize <- 50000 * mergesize
outdir <- "/home/jyeung/data/from_rstudioserver/scchic/rdata_robjs/GLMPCA_outputs"
pdfdir <- "/home/jyeung/data/from_rstudioserver/scchic/rdata_robjs/GLMPCA_outputs.downstream"
dir.create(pdfdir)
outbase <- paste0("PZ_", jmark, ".", jsuffix, ".GLMPCA_var_correction.150.", jdate, ".binskeep_1000.devmode")
# outbase <- paste0("PZ_", jmark, ".GLMPCA_var_correction.", topn, ".", Sys.Date(), ".binskeep_", jbins.keep, ".devmode")
outpdf <- file.path(pdfdir, paste0(outbase, ".pdf"))
# outf <- "/home/jyeung/data/from_rstudioserver/scchic/rdata_robjs/GLMPCA_outputs/PZ_H3K4me3.KeepMorePlates.GLMPCA_var_correction.150.2020-02-04.binskeep_1000.devmode.RData"
outf <- file.path(outdir, paste0(outbase, ".RData"))
assertthat::assert_that(file.exists(outf))
load(outf, v=T)
# inf <- paste0("/home/jyeung/hpc/scChiC/raw_demultiplexed/LDA_outputs_all/ldaAnalysisBins_B6BM_All_allmarks.2020-01-31.var_filt/lda_outputs.BM_", jmark, ".varcutoff_0.3.platesRemoved.SmoothBinSize_1000.AllMerged.K-30.binarize.FALSE/ldaOut.BM_", jmark, ".varcutoff_0.3.platesRemoved.SmoothBinSize_1000.AllMerged.K-30.Robj")
# inf <- paste0("/home/jyeung/data/from_rstudioserver/scchic/rdata_robjs/GLMPCA_outputs/PZ_", jmark, ".", jsuffix, ".GLMPCA_var_correction.150.2020-02-04.binskeep_1000.devmode.RData")
inf <- paste0("/home/jyeung/hpc/scChiC/raw_demultiplexed/LDA_outputs_all/ldaAnalysisBins_B6BM_All_allmarks.2020-01-31.var_filt_keepPlates/lda_outputs.BM_", jmark, ".varcutoff_0.3.KeepAllPlates.K-30.binarize.FALSE/ldaOut.BM_", jmark, ".varcutoff_0.3.KeepAllPlates.K-30.Robj")
assertthat::assert_that(file.exists(inf))
load(inf, v=T)
tm.result <- posterior(out.lda)
dat.impute.log <- log2(t(tm.result$topics %*% tm.result$terms))
jchromos <- paste("chr", c(seq(19)), sep = "")
dat.var <- CalculateVarAll(dat.impute.log, jchromos)
# Plot output -------------------------------------------------------------
topics.mat <- glm.out$factors
jsettings <- umap.defaults
jsettings$n_neighbors <- 30
jsettings$min_dist <- 0.1
jsettings$random_state <- 123
umap.out <- umap(topics.mat, config = jsettings)
dat.umap.long <- data.frame(cell = rownames(umap.out[["layout"]]), umap1 = umap.out[["layout"]][, 1], umap2 = umap.out[["layout"]][, 2], stringsAsFactors = FALSE)
dat.umap.long <- DoLouvain(topics.mat, jsettings, dat.umap.long)
cbPalette <- c("#696969", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf")
ggplot(dat.umap.long, aes(x = umap1, y = umap2, color = louvain)) + geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + scale_color_manual(values = cbPalette)
dat.umap.long <- dat.umap.long %>%
rowwise() %>%
mutate(plate = ClipLast(as.character(cell), jsep = "_")) %>%
left_join(., dat.var)
m.louv <- ggplot(dat.umap.long, aes(x = umap1, y = umap2, color = louvain)) + geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + scale_color_manual(values = cbPalette)
m.louv.plate <- ggplot(dat.umap.long, aes(x = umap1, y = umap2, color = louvain)) + geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + scale_color_manual(values = cbPalette) +
facet_wrap(~plate)
m.var <- ggplot(dat.umap.long, aes(x = umap1, y = umap2, color = cell.var.within.sum.norm)) + geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c(direction = -1)
m.var.plate <- ggplot(dat.umap.long, aes(x = umap1, y = umap2, color = cell.var.within.sum.norm)) + geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c(direction = -1) +
facet_wrap(~plate)
pdf(outpdf,useDingbats = FALSE)
print(m.louv)
print(m.louv.plate)
print(m.var)
print(m.var.plate)
dev.off()
}
print(Sys.time() - jstart)
|
707d47249308875afccf608c128228aaac92e830
|
08172e1a0e8ed79c05c1cf704cacc00530732ef8
|
/thesis/plots/file_size.R
|
38f39020234d7f328e7015a6e1d63207083b10b3
|
[] |
no_license
|
olafurpg/thesis
|
84d53620d2a51287440e4cff33285d16434c852c
|
27672678f858ef1b936cc660cc902e75434eabfa
|
refs/heads/master
| 2020-12-29T02:38:27.231554
| 2017-09-22T13:10:58
| 2017-09-22T13:10:58
| 51,314,768
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 429
|
r
|
file_size.R
|
# library(stargazer, lib.loc="plots")
library(xtable, lib.loc="plots")
# library(Hmisc, lib.loc="plots")
options(xtable.floating = FALSE)
options(xtable.timestamp = "")
sizes <- read.table("data/file_sizes.dat")
# summary(sizes)
sizes <- quantile(sizes$V1, c(.50, .75, .90, .99))
# append(sizes, 1)
# sizes
# summary(sizes)
# colnames(sizes) = c("Mean", "3rd Q", "90th", "99th")
sizes
# summary(sizes)
# xtable(summary(sizes))
|
1a7e8d3aaa62ee2f724aee1b0f317f5bfc3523f2
|
63a8b105407d6d2a25b25df44637a730d63168e9
|
/R/utils/85pct_rule.R
|
b9db51b08ceb12382a298fcde5589365fde31e29
|
[] |
no_license
|
jmhewitt/ctds_dives_jabes
|
5857b0e37a01fd80e77b099bf46ef5cf63147b36
|
30de1a1d3b825cf2f07261f40db64b3558053976
|
refs/heads/main
| 2023-01-03T17:27:16.503272
| 2020-10-30T16:27:07
| 2020-10-30T16:27:07
| 308,685,611
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,343
|
r
|
85pct_rule.R
|
# create a vector to label dive stages
stagevec = function (length.out, breaks) {
rep(1:3, c(breaks[1] - 1, breaks[2] - breaks[1], length.out +
1 - breaks[2]))
}
# use 85% max depth rule to determine time in stages
times.stages = function(dives.obs) {
do.call(rbind, lapply(dives.obs, function(d) {
# extract depths
depths = d$depth.bins$center[d$dive$depths]
# find max depth, and stage threshold
max.depth = max(depths)
stage.thresh = .85 * max.depth
# compute observed stage vector
bottom.range = range(which(depths >= stage.thresh))
if(length(unique(bottom.range))==1) {
bottom.range[2] = bottom.range[1] + 1
}
stages = stagevec(length.out = length(depths), breaks = bottom.range)
# linearly interpolate to find stage transition times
t.inds = which(diff(stages)==1)
t.stages = sapply(t.inds, function(ind) {
# get start and end times/depths
d0 = depths[ind]
t0 = d$dive$times[ind]
df = depths[ind+1]
tf = d$dive$times[ind+1]
# compute time at which stage.thresh is crossed
if(df==d0) {
mean(c(t0,tf))
} else {
(stage.thresh - d0)/(df-d0) * (tf-t0) + t0
}
})
# return results
data.frame(sub.time.min = t.stages[1]/60,
bottom.time.min = diff(t.stages)/60)
}))
}
|
bc8a529c95217185b8fade6a6670ba322b940249
|
9f85f5d13c66e79bd470806b054404242bbe58cc
|
/man/unzip_process.Rd
|
58eef4bd81af016152a0bfc062ca076c6538c205
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
r-lib/zip
|
f7749877eabdf9cf3844dbd0dacf4972c11cbc56
|
70f0b1518304652ed04a7e7a991396e923030bb7
|
refs/heads/main
| 2023-05-11T15:56:15.575775
| 2023-05-09T19:16:47
| 2023-05-09T19:16:47
| 87,673,925
| 58
| 22
|
NOASSERTION
| 2023-09-04T15:35:44
| 2017-04-09T01:06:13
|
C
|
UTF-8
|
R
| false
| true
| 1,747
|
rd
|
unzip_process.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process.R
\name{unzip_process}
\alias{unzip_process}
\title{Class for an external unzip process}
\usage{
unzip_process()
}
\value{
An \code{unzip_process} R6 class object, a subclass of
\link[processx:process]{processx::process}.
}
\description{
\code{unzip_process()} returns an R6 class that represents an unzip process.
It is implemented as a subclass of \link[processx:process]{processx::process}.
}
\section{Using the \code{unzip_process} class}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{up <- unzip_process()$new(zipfile, exdir = ".", poll_connection = TRUE,
stderr = tempfile(), ...)
}\if{html}{\out{</div>}}
See \link[processx:process]{processx::process} for the class methods.
Arguments:
\itemize{
\item \code{zipfile}: Path to the zip file to uncompress.
\item \code{exdir}: Directory to uncompress the archive to. If it does not
exist, it will be created.
\item \code{poll_connection}: passed to the \code{initialize} method of
\link[processx:process]{processx::process}, it allows using \code{\link[processx:poll]{processx::poll()}} or the
\code{poll_io()} method to poll for the completion of the process.
\item \code{stderr}: passed to the \code{initialize} method of \link[processx:process]{processx::process},
by default the standard error is written to a temporary file.
This file can be used to diagnose errors if the process failed.
\item \code{...} passed to the \code{initialize} method of \link[processx:process]{processx::process}.
}
}
\examples{
ex <- system.file("example.zip", package = "zip")
tmp <- tempfile()
up <- unzip_process()$new(ex, exdir = tmp)
up$wait()
up$get_exit_status()
dir(tmp)
}
|
6f1b126ae922a93851eb9075fe3d8371ae519ea8
|
94c16636d7d4c98c918fde5096cf3a4118c02415
|
/tests/testthat/test_dyadr.R
|
84cad7eee05604aac4d87cda78e4ab145e687dbb
|
[] |
no_license
|
RandiLGarcia/dyadr
|
66c87d6be3b3eb4e7bf37568dc43f6e037d34961
|
5d317dceb2e278887b9684e172bd79a0c12974af
|
refs/heads/master
| 2021-07-14T20:50:59.289227
| 2021-03-17T13:27:55
| 2021-03-17T13:27:55
| 61,908,363
| 17
| 14
| null | 2020-07-29T15:24:03
| 2016-06-24T19:43:21
|
R
|
UTF-8
|
R
| false
| false
| 1,744
|
r
|
test_dyadr.R
|
context("dyadr")
test_that("var_labels error works", {
expect_error(var_labels(iris), "labels")})
test_that("apim error works", {
expect_message(apim("x"), "please enter a formula")})
test_that("crsp output returns correct value", {
if (require(nlme)) {
apimi = gls(Satisfaction_A ~ Tension_A + SelfPos_P,
na.action=na.omit,
correlation=corCompSymm (form=~1|CoupleID),
data=acipair)
apimie = summary(gls(Satisfaction_A ~ 1,
na.action=na.omit,
correlation=corCompSymm (form=~1|CoupleID),
data=acipair))
# sd of errors for the model or esd
esd = as.numeric(apimi[6])
# sd of errors for the empty model or esd0
esd0 = as.numeric(apimie[6])
# the R squared, using the crsp function
print(crsp(esd, esd0))
expect_gt(crsp(esd,esd0),0.3)
}
})
test_that("smallsummary returns output including Correlation", {
apimi = nlme::gls(Satisfaction_A ~ Tension_A + SelfPos_P,
na.action=na.omit,
correlation=corCompSymm (form=~1|CoupleID),
data=acipair)
expect_output(smallsummary(apimi), "Correlation")})
test_that("counts_labels error works", {
expect_error(counts_labels(acipair, x = "SelfPos_P"), "'SelfPos_P' does not have value labels.")
})
test_that("lincomb works", {
apimi = nlme::gls(Satisfaction_A ~ Tension_A + SelfPos_P,
na.action=na.omit,
correlation=corCompSymm (form=~1|CoupleID),
data=acipair)
expect_length(lincomb(apimi, 2, 3), 3)
})
test_that("variable_view has correct length", {
expect_length(variable_view(dyadic_trade), 2)
})
|
b9405d65c95f5d5b46477c66a342f6bc42b76e76
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rorcid/examples/orcid_auth.Rd.R
|
484afa46537dbfdd767cd50b973c19f8329c769e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 263
|
r
|
orcid_auth.Rd.R
|
library(rorcid)
### Name: orcid_auth
### Title: ORCID authorization
### Aliases: orcid_auth rorcid-auth
### ** Examples
## Not run:
##D x <- orcid_auth()
##D orcid_auth(reauth = TRUE)
##D #orcid_auth(scope = "/read-public", reauth = TRUE)
## End(Not run)
|
8e1b5dcb979780d336db2a87691177019205948d
|
e8f045b7fb162d00c03f3bf26a9eaa9681c66997
|
/scripts/analysis/13_figures_SI.R
|
ccc54ab68106c53f193abb077eb944f51e9a450e
|
[] |
no_license
|
CamilleAnna/HamiltonRuleMicrobiome_gitRepos
|
f648d3f4997947cdd0d6aa9107bfee9325ab1891
|
3a28d0196df93e39dc46cd8e05b0a356e4baf1c0
|
refs/heads/master
| 2023-08-15T11:01:31.186998
| 2021-10-01T13:32:51
| 2021-10-01T13:32:51
| 260,581,534
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,919
|
r
|
13_figures_SI.R
|
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Simonet & McNally 2020 #
# SI file figures #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#local_project_dir='/path/to/where/repo/is/cloned'
setwd(paste0(local_project_dir, '/HamiltonRuleMicrobiome_gitRepos/'))
source('./scripts/analysis/sourced_ggthemes.R')
source('./scripts/analysis/sourced_packages.R')
library(kableExtra)
# GO TRAITS CONTRIBUTION HEAMAPS ----
quant_go_sociality_2<- function(species, focal_behaviour, social_go_list){
social_gos<- social_go_list
# Reading panzzer output (i.e. GO annotation of CDS), keep best hit only, do some housekeeping
# Each peg can be present in the table up to three times: once for each of the three GO ontologies
sp<- read.csv(paste0(local_project_dir, '/HamiltonRuleMicrobiome_gitRepos/output/pannzer/', species, '.GO.out'), header=TRUE, sep = '\t', colClasses = c(rep('character', 4), rep('numeric', 3)))
sp<- sp[sp$ARGOT_rank == 1,]
sp<- sp[,1:4]
colnames(sp)<- c('peg', 'Ontology', 'GO_id', 'Description')
sp$GO_id<- paste0('GO:', sp$GO_id)
# open PATRIC features table of that species [theone that came along the fasta file feeding into Pannzer]
sp_cds<- read.csv(paste0(local_project_dir, '/HamiltonRuleMicrobiome_gitRepos/data/patric/features/', species, '.features'), header = TRUE, sep = '\t')
sp_cds<- sp_cds[sp_cds$feature_type == 'CDS', c('patric_id', 'product', 'go')]
sp_cds$patric_id<- as.character(sp_cds$patric_id)
sp_cds$product<- as.character(sp_cds$product)
sp_cds$go<- as.character(sp_cds$go)
colnames(sp_cds)<- c('peg', 'product_patric', 'go_patric')
# Intersect with panzzer table to get for each peg, the GO assigned by panzzer
sp$peg<- paste0(do.call('rbind', strsplit(sp$peg, '\\|'))[,1], '|', do.call('rbind', strsplit(sp$peg, '\\|'))[,2])
# Check that all peg names in the panzzer output table are in the peg names of the PATRIC table
checkpoint<- ifelse(length(which(sp$peg %in% sp_cds$peg == FALSE)) == 0, 'ok', 'not_ok')
print(paste0('checkpoint 1 : ', checkpoint))
sp_cds_annot<- full_join(sp_cds, sp, 'peg')
# record proportion of pegs now annotated
tmp<- sp_cds_annot[,c('peg', 'GO_id')] # take just pegs and GO annotation by panzzer
tmp<- tmp[is.na(tmp$GO_id) == FALSE,] # remove all those with annotation
# intersect with social GO list = quantify sociality
sp_cds_annot$is_focal_behaviour <- sp_cds_annot$GO_id %in% social_gos[social_gos$behaviour == focal_behaviour,1]
# Also record for each term the number of time it was hit in that species --> to check the contribution of each term to a behaviour quantification
social_gos_focus<- social_gos[social_gos$behaviour == focal_behaviour, ]
go_contribution<- as.data.frame(table(sp_cds_annot$GO_id)) #%>% rename(GO_id = Var1) %>% right_join(social_gos_focus, 'GO_id')
colnames(go_contribution)<- c('GO_id', 'Freq')
go_contribution<- right_join(go_contribution, social_gos_focus, 'GO_id')
go_contribution[is.na(go_contribution$Freq) == TRUE,'Freq']<- 0
names(go_contribution)[2]<- species
go_contribution<- go_contribution[,c(1,3,5,2)]
sp_cds_annot2<- sp_cds_annot[,c('peg', 'Description', 'is_focal_behaviour')]
sp_cds_annot2$is_annotated<- ifelse(is.na(sp_cds_annot2$Description) == TRUE, 0, 1)
# Each peg can be present up to three times, because we retain the top hit GO match for all three ontologies
# But when a peg is assigned to e.g. biofilm by both its BP and CC for example, we don't count it as twice biofilm
# basically if either of the ontologies GO of a given peg falls in one social behaviour this peg is counted as being part of that social behaviour
# The following thus converts those potential 'multiple hits' ACROSS THE 3 ONTOLOGIES into binary 0/1
test<- sp_cds_annot2 %>% group_by(peg) %>% summarise(focal_behaviour_counts = sum(is_focal_behaviour),
annotated_counts = sum(is_annotated))
test2<- data.frame(peg = test$peg, ifelse(test[,c('focal_behaviour_counts', 'annotated_counts')] > 0, 1, 0))
quant_sociality<- data.frame(
species = species,
focal_behaviour = sum(test2$focal_behaviour_counts),
total_cds = nrow(test2),
annotated_cds = sum(test2$annotated_counts)
)
return(list(quant_go = quant_sociality, go_term_contribution = go_contribution))
}
social_go_list<- as.data.frame(read_excel(paste0(local_project_dir, '/HamiltonRuleMicrobiome_gitRepos/output/tables/social_go_list_final.xls')))
dat<- read.csv(paste0(local_project_dir, '/HamiltonRuleMicrobiome_gitRepos/output/tables/relatedness.txt'), sep = '\t') %>%
select(species_id, mean_relatedness) %>%
unique()
colnames(dat)<- c('species', 'mean_relatedness')
# Code wrapper to apply the previous function and runs plotting commands on each species/cooperation category
run_trait_quantification<- function(focal_behaviour){
trait_quantification<- vector('list', length = nrow(dat))
trait_go_terms_contribution<- vector('list', length = nrow(dat))
for(i in 1:nrow(dat)){
trait_quantification[[i]]<- quant_go_sociality_2(dat$species[i], focal_behaviour, social_go_list)[[1]]
trait_go_terms_contribution[[i]]<- quant_go_sociality_2(dat$species[i], focal_behaviour, social_go_list)[[2]]
print(i)
}
# Measure of the behaviour
trait_quantification_df<- do.call('rbind', trait_quantification)
# GO terms contribution
trait_go_terms_contribution_df<- trait_go_terms_contribution[[1]]
for(i in 2:length(trait_go_terms_contribution)){
trait_go_terms_contribution_df<- cbind(trait_go_terms_contribution_df, trait_go_terms_contribution[[i]][,4])
colnames(trait_go_terms_contribution_df)<- c(colnames(trait_go_terms_contribution_df)[c(1:ncol(trait_go_terms_contribution_df)-1)], colnames(trait_go_terms_contribution[[i]])[4])
}
trait_go_terms_contribution_df<- trait_go_terms_contribution_df[,c(2, 4:ncol(trait_go_terms_contribution_df))]
rownames(trait_go_terms_contribution_df)<- trait_go_terms_contribution_df$description
trait_go_terms_contribution_df<- trait_go_terms_contribution_df[,-1]
trait_go_terms_contribution_df <- as.data.frame(t(trait_go_terms_contribution_df))
trait_go_terms_contribution_df$species<- rownames(trait_go_terms_contribution_df)
trait_go_terms_contribution_df2<- gather(trait_go_terms_contribution_df, 'GO_id', 'hits', 1:(ncol(trait_go_terms_contribution_df)-1))
plot_ids<- read.table("./data/species_info_files/species_plot_names.txt", header=TRUE, sep = '\t')
trait_go_terms_contribution_df2<- left_join(trait_go_terms_contribution_df2, plot_ids, 'species')
heatmap<- ggplot(trait_go_terms_contribution_df2, aes(x = GO_id, y = plot_names))+
geom_tile(aes(fill = log(1+hits))) +
scale_fill_gradient(low = "white", high = "darkred")+xlab('')+ylab('')+
theme(#legend.position="none",
legend.title = element_text(size = 6),
legend.text = element_text(size = 6),
legend.key.size = unit(0.7, "cm"),
legend.key.width = unit(0.4,"cm") ,
panel.border= element_blank(),
axis.text.y = element_text(colour="black", size=5),
axis.text.x = element_text(colour="black", face = "bold", size=5, angle = 45, vjust=1, hjust=1),
axis.line.y = element_line(color="black", size = 0.3),
axis.line.x = element_line(color="black", size = 0.3),
axis.ticks.y = element_line(color="black", size = 0.3),
axis.ticks.x = element_line(color="black", size = 0.3),
plot.title = element_text(lineheight=.8, face="bold", hjust = 0.5))
return(list('contribution_heatmap' = heatmap,
'trait_quantification' = trait_quantification_df))
}
qt_biofilm<- run_trait_quantification('biofilm')
qt_ab_degradation<- run_trait_quantification('antibiotic_degradation')
qt_quorum_sensing<- run_trait_quantification('quorum_sensing')
qt_siderophores<- run_trait_quantification('siderophores')
qt_secretion_system_no4<- run_trait_quantification('secretion_system')
pdf("./output/figures/SI_FIG_ContribBiofilm.pdf", width = 17.3/2.54, height = 25/2.54)
print(qt_biofilm$contribution_heatmap)
dev.off()
pdf("./output/figures/SI_FIG_ContribQuorumSensing.pdf", width = 17.3/2.54, height = 25/2.54)
print(qt_quorum_sensing$contribution_heatmap)
dev.off()
pdf("./output/figures/SI_FIG_ContribABdegradation.pdf", width = 17.3/2.54, height = 25/2.54)
print(qt_ab_degradation$contribution_heatmap)
dev.off()
pdf("./output/figures/SI_FIG_ContribSiderophores.pdf", width = 17.3/2.54, height = 25/2.54)
print(qt_siderophores$contribution_heatmap)
dev.off()
pdf("./output/figures/SI_FIG_ContribSecretionSyst.pdf", width = 17.3/2.54, height = 25/2.54)
print(qt_secretion_system_no4$contribution_heatmap)
dev.off()
# PER GENES ANNOTATION VENN DIAGRAMS ----
per_gene<- read.table('./output/tables/per_gene_annotation.txt', header=TRUE)
test<- per_gene[!is.na(per_gene$secretome),] # remove the four species for which there is no secretome data
test<- test[test$sum_ANY != 0,] # focus on overlap among genes that are annotated
pegs.biofilm<- test[test$biofilm == 1, 'peg']
pegs.ab<- test[test$antibiotic_degradation == 1, 'peg']
pegs.qs<- test[test$quorum_sensing == 1, 'peg']
pegs.sid<- test[test$siderophores == 1, 'peg']
pegs.ss<- test[test$secretion_system == 1, 'peg']
pegs.secretome<- test[test$secretome == 1, 'peg']
# We know from SUM_GO that the GO do not overlap with each other at all
# So figure out the overlaps between secretome and the 5 GO categories
length(pegs.secretome)
length(pegs.biofilm)
length(pegs.ab)
length(pegs.qs)
length(pegs.sid)
length(pegs.ss)
length(intersect(pegs.secretome, pegs.biofilm))
length(intersect(pegs.secretome, pegs.ab))
length(intersect(pegs.secretome, pegs.qs))
length(intersect(pegs.secretome, pegs.sid))
length(intersect(pegs.secretome, pegs.ss))
# only Biofilm and AB overalp with secretome
# Use above to fill in a VennDiagram
# Do it this way and nto with venn.diagram() because with >2 sets, this is the only way to have a scale diagram.
library(eulerr)
# OVERALL DIAGRAM
pdf(file = paste0("./output/figures/SI_Fig_VennsDiagrams.pdf"), width=(11.3/2.54), height=(6.5/2.54))
VennDiag <- euler(c("Secretome" = length(pegs.secretome),
"Biofilm" = length(pegs.biofilm),
"Antibiotic" = length(pegs.ab),
"Quorum-sensing" = length(pegs.qs),
"Siderophores" = length(pegs.sid),
"Sec.Syst." = length(pegs.ss),
"Secretome&Biofilm" = length(intersect(pegs.secretome, pegs.biofilm)),
"Secretome&Antibiotic" = length(intersect(pegs.secretome, pegs.ab))))
plot(VennDiag,
counts = TRUE,
edges = TRUE,#quantities = TRUE,
font=2,
labels = list(cex = .5),
quantities = list(cex = .5),
cex=2,
alpha=0.5,
fill=c("darkorchid", "forestgreen", "magenta", "yellow", "darkorange", "navy"))
dev.off()
# PER SPECIES DIAGRAM
test<- per_gene[!is.na(per_gene$secretome),] # remove the four species for which there is no secretome data
test<- test[test$sum_ANY != 0,] # focus on overlap among genes that are annotated
mysp = unique(test$species)
pdf(file = paste0("./output/figures/additional_figs/VennsDiagrams_per_species.pdf"), width=(11.3/2.54), height=(6.5/2.54))
for(s in 1:length(mysp)){
print(s)
test.s = test[test$species == mysp[s],]
pegs.biofilm<- test.s[test.s$biofilm == 1, 'peg']
pegs.ab<- test.s[test.s$antibiotic_degradation == 1, 'peg']
pegs.qs<- test.s[test.s$quorum_sensing == 1, 'peg']
pegs.sid<- test.s[test.s$siderophores == 1, 'peg']
pegs.ss<- test.s[test.s$secretion_system == 1, 'peg']
pegs.secretome<- test.s[test.s$secretome == 1, 'peg']
# we know from the overall dataframe that overlap always happen between secretome/biofilm or secretome/antibiotic, never between seceretome and anything else, or the GO categories between each other
# so we can simply re-apply this code looping over each species
VennDiag <- euler(c("Secretome" = length(pegs.secretome),
"Biofilm" = length(pegs.biofilm),
"Antibiotic" = length(pegs.ab),
"Quorum-sensing" = length(pegs.qs),
"Siderophores" = length(pegs.sid),
"Sec.Syst." = length(pegs.ss),
"Secretome&Biofilm" = length(intersect(pegs.secretome, pegs.biofilm)),
"Secretome&Antibiotic" = length(intersect(pegs.secretome, pegs.ab))))
print(plot(VennDiag,
counts = TRUE,
edges = TRUE,#quantities = TRUE,
font=2,
labels = list(cex = .5),
quantities = list(cex = .5),
cex=1,
alpha=0.5,
main = list(cex = 0.7, label = mysp[s], font = 2),
fill=c("darkorchid", "forestgreen", "magenta", "yellow", "darkorange", "navy")))
}
dev.off()
# SUPPLEMENTARY TABLES ----
format_effects<- function(string){
string_edited<- string %>%
gsub(pattern = "(Intercept)", replacement = "Intercept", ., fixed = TRUE) %>%
gsub(pattern = "mean_relatedness", replacement = "Mean relatedness", ., fixed = TRUE) %>%
gsub(pattern = "log(nb_cds_not_involved_in_response)", replacement = "Log(genome size)", ., fixed = TRUE) %>%
gsub(pattern = "gram_profilep", replacement = "Gram positive", ., fixed = TRUE) %>%
gsub(pattern = "species", replacement = "Phylogenetic", ., fixed = TRUE) %>%
gsub(pattern = "units", replacement = "Residual (non-phylogenetic)", ., fixed = TRUE) %>%
gsub(pattern = "mean_relative_abundance", replacement = "Mean relative abundance", ., fixed = TRUE) %>%
gsub(pattern = "sporulation_score", replacement = "Sporulation score", ., fixed = TRUE)
return(string_edited)
}
format_effects.model3<- function(string){
string_edited<- string %>%
gsub(pattern = "(Intercept)", replacement = "Intercept", ., fixed = TRUE) %>%
gsub(pattern = "mean_relatedness", replacement = "Mean relatedness", ., fixed = TRUE) %>%
gsub(pattern = "log(nb_cds_not_involved_in_response)", replacement = "Log(genome size)", ., fixed = TRUE) %>%
gsub(pattern = "gram_profilep", replacement = "Gram positive", ., fixed = TRUE) %>%
gsub(pattern = "species.ide", replacement = "Species, non-phylogenetic", ., fixed = TRUE) %>%
gsub(pattern = "species", replacement = "Species, phylogenetic", ., fixed = TRUE, ignore.case = FALSE) %>%
gsub(pattern = "units", replacement = "Residual", ., fixed = TRUE) %>%
gsub(pattern = "mean_relative_abundance", replacement = "Mean relative abundance", ., fixed = TRUE) %>%
gsub(pattern = "sporulation_score", replacement = "Sporulation score", ., fixed = TRUE) %>%
gsub(pattern = "host", replacement = "Host", ., fixed = TRUE) %>%
gsub(pattern = "within_Host_relative_abundance", replacement = "Within host relative abundance", ., fixed = TRUE) %>%
gsub(pattern = "biofilm", replacement = "Biofilm", ., fixed = TRUE) %>%
gsub(pattern = "ab_degradation", replacement = "Antibiotic degradation", ., fixed = TRUE) %>%
gsub(pattern = "quorum_sensing", replacement = "Quorum sensing", ., fixed = TRUE) %>%
gsub(pattern = "siderophores", replacement = "Siderophores", ., fixed = TRUE) %>%
gsub(pattern = "secretion_system_no4", replacement = "Secretion systems", ., fixed = TRUE) %>%
gsub(pattern = "nb_extracellular", replacement = "Secretome", ., fixed = TRUE)
return(string_edited)
}
format_full_summary<- function(model, trait){
fixed<- summary(model)$solutions %>%
as.data.frame()%>%
rownames_to_column('Effect') %>%
mutate(Structure = 'Fixed effect',
pMCMC = ifelse(pMCMC<0.01,
formatC(pMCMC, digit = 2, format = 'e'),
formatC(pMCMC, digit = 3, format = 'f')))
random<- summary(model)$Gcovariances %>%
as.data.frame()%>%
rownames_to_column('Effect') %>%
mutate(pMCMC = '') %>%
mutate(Structure = '(Co)variance')
unit<- summary(model)$Rcovariances %>%
as.data.frame()%>%
rownames_to_column('Effect') %>%
mutate(pMCMC = '') %>%
mutate(Structure = '(Co)variance')
tab<- rbind(fixed, random, unit) %>%
mutate(Effect = format_effects(Effect),
Model = trait) %>%
mutate(Structure = ifelse(duplicated(Structure) == TRUE, '', Structure),
Model = ifelse(duplicated(Model) == TRUE, '', Model)) %>%
select(Model, Structure, Effect, post.mean,`l-95% CI`, `u-95% CI`, eff.samp, pMCMC) %>%
rename(`Posterior\n mean` = post.mean,
`CI95% lower` = `l-95% CI`,
`CI95% upper` = `u-95% CI`,
`Effective\n sampling` = eff.samp) %>%
as.data.frame() %>%
mutate(`Effective\n sampling` = formatC(`Effective\n sampling`, digit = 0, format = 'f')) %>%
mutate_if(is.numeric, funs(formatC(., digit = 3, format = 'f')))
return(tab)
}
format_effects_model2<- function(string){
string_edited<- string %>%
gsub("at.level(trait, 2).species.ide:at.level(trait, 2).species.ide", "Relatedness_species.ide", ., fixed = TRUE) %>%
gsub("at.level(first, \"TRUE\"):at.level(trait, 1).species.ide:at.level(trait, 2).species.ide", "Relatedness,Trait_species.ide", ., fixed = TRUE) %>%
gsub("at.level(trait, 2).species.ide:at.level(first, \"TRUE\"):at.level(trait, 1).species.ide", "Trait,Relatedness_species.ide", ., fixed = TRUE) %>%
gsub("at.level(first, \"TRUE\"):at.level(trait, 1).species.ide:at.level(first, \"TRUE\"):at.level(trait, 1).species.ide", "Trait_species.ide", ., fixed = TRUE) %>%
gsub("at.level(trait, 2):at.level(trait, 2).species", "Relatedness_species", ., fixed = TRUE) %>%
gsub("at.level(first, \"TRUE\"):at.level(trait, 1):at.level(trait, 2).species", "Relatedness,Trait_species", ., fixed = TRUE) %>%
gsub("at.level(trait, 2):at.level(first, \"TRUE\"):at.level(trait, 1).species", "Trait,Relatedness_species", ., fixed = TRUE) %>%
gsub("at.level(first, \"TRUE\"):at.level(trait, 1):at.level(first, \"TRUE\"):at.level(trait, 1).species", "Trait_species", ., fixed = TRUE) %>%
gsub("at.level(trait, 2).units", "Relatedness_units", ., fixed = TRUE) %>%
gsub("at.level(first, \"FALSE\"):at.level(trait, 1).units", "construct", ., fixed = TRUE) %>%
gsub("at.level(trait, 1):at.level(first, \"TRUE\"):gram_profilen", "Gram profile (negative)_fixed", ., fixed = TRUE) %>%
gsub("at.level(trait, 1):at.level(first, \"TRUE\"):log(nb_cds_not_involved_in_response)", "Log(genome size)_fixed", ., fixed = TRUE) %>%
gsub("at.level(trait, 2)", "Intercept relatedness_fixed", ., fixed = TRUE) %>%
gsub("at.level(trait, 1):at.level(first, \"TRUE\")", "Intercept trait_fixed", ., fixed = TRUE)
return(string_edited)
}
format_full_summary_model2<- function(model, trait){
fixed<- summary(model)$solutions %>%
as.data.frame()%>%
rownames_to_column('Effect') %>%
mutate(Structure = 'Fixed effect',
pMCMC = ifelse(pMCMC<0.01,
formatC(pMCMC, digit = 2, format = 'e'),
formatC(pMCMC, digit = 3, format = 'f')))
random<- summary(model)$Gcovariances %>%
as.data.frame()%>%
rownames_to_column('Effect') %>%
mutate(pMCMC = '') %>%
mutate(Structure = 'G')
# Add the pMCMC for the (phylogenetic) covariance term
species.cov.post<- model$VCV[,which(colnames(model$VCV) == 'at.level(first, "TRUE"):at.level(trait, 1):at.level(trait, 2).species')]
random$pMCMC[which(random$Effect == 'at.level(first, "TRUE"):at.level(trait, 1):at.level(trait, 2).species')]<- (2*sum(species.cov.post<0))/length(species.cov.post)
unit<- summary(model)$Rcovariances %>%
as.data.frame()%>%
rownames_to_column('Effect') %>%
mutate(pMCMC = '') %>%
mutate(Structure = 'R')
# Add the pMCMC for the (mom-phylogenetic) covariance term
ide.cov.post<- model$VCV[,which(colnames(model$VCV) == 'at.level(first, "TRUE"):at.level(trait, 1).species.ide:at.level(trait, 2).species.ide')]
unit$pMCMC[which(unit$Effect == 'at.level(first, "TRUE"):at.level(trait, 1).species.ide:at.level(trait, 2).species.ide')]<- (2*sum(ide.cov.post<0))/length(ide.cov.post)
tab<- rbind(fixed, random, unit) %>%
mutate(Effect = format_effects_model2(Effect),
Model = trait) %>%
mutate(Structure = do.call('rbind', strsplit(Effect, '_'))[,2]) %>%
filter(!duplicated(post.mean),
Structure != 'construct') %>%
mutate(Structure = gsub('fixed', 'Fixed effects', Structure)) %>%
mutate(Structure = gsub('species.ide', 'Species (co)-variances', Structure)) %>%
mutate(Structure = gsub('species', 'Phylogenetic (co)-variances', Structure)) %>%
mutate(Structure = gsub('units', 'Residual variance', Structure)) %>%
mutate(Structure = ifelse(duplicated(Structure), '', Structure)) %>%
mutate(Effect = do.call('rbind', strsplit(Effect, '_'))[,1]) %>%
select(Model, Structure, Effect, post.mean,`l-95% CI`, `u-95% CI`, eff.samp, pMCMC) %>%
rename(`Posterior\n mean` = post.mean,
`CI95% lower` = `l-95% CI`,
`CI95% upper` = `u-95% CI`,
`Effective\n sampling` = eff.samp) %>%
as.data.frame() %>%
mutate(`Effective\n sampling` = formatC(`Effective\n sampling`, digit = 0, format = 'f')) %>%
mutate_if(is.numeric, funs(formatC(., digit = 3, format = 'f')))
return(tab)
}
library(kableExtra)
load("./output/analyses/MODEL1_CHAIN_1.RData")
load("./output/analyses/MODEL2_CHAIN_1.RData")
load("./output/analyses/MODEL3_CHAIN_1.RData")
load("./output/analyses/MODEL4_CHAIN_1.RData")
load("./output/analyses/MODEL5_CHAIN_1.RData")
# MODEL 1
tab<-rbind(
format_full_summary(mods.R$siderophores, ''),
format_full_summary(mods.R$biofilm, ''),
format_full_summary(mods.R$ab_degradation, ''),
format_full_summary(mods.R$secretome, ''),
format_full_summary(mods.R$secretion_system_no4, ''),
format_full_summary(mods.R$quorum_sensing, '')
)
tab.s1<- kable(tab, "latex", booktabs = T, caption = 'Model summaries for the phylogenetic mixed models of cooperation') %>%
footnote(c("CI95%: 95% credible interval of the posterior distribution",
"pMCMC: taken as twice the posterior probability that the estimate is negative"),
fixed_small_size = TRUE, general_title = "") %>%
kable_styling() %>%
pack_rows("Siderophores", 1, 5) %>%
pack_rows("Biofilm", 6, 10) %>%
pack_rows("Antibiotic degradation", 11, 15) %>%
pack_rows("Secretome", 16, 21) %>%
pack_rows("Secretion systems", 22, 26) %>%
pack_rows("Quorum sensing", 27, 31)
fileConn<-file("./output/figures/TABLE_S1.tex")
writeLines(tab.s1, fileConn)
close(fileConn)
# MODEL 2
tab.model2<-rbind(
format_full_summary_model2(mods.R.UNCERTAINTY$siderophores, ''),
format_full_summary_model2(mods.R.UNCERTAINTY$biofilm, ''),
format_full_summary_model2(mods.R.UNCERTAINTY$ab_degradation, ''),
format_full_summary_model2(mods.R.UNCERTAINTY$secretome, ''),
format_full_summary_model2(mods.R.UNCERTAINTY$secretion_system_no4, ''),
format_full_summary_model2(mods.R.UNCERTAINTY$quorum_sensing, '')
)
tab.model2 <- tab.model2 %>%
rename(`Post. mean` = `Posterior\n mean`,
`Eff. samp.` = `Effective\n sampling`) %>%
mutate(Structure = gsub('Phylogenetic', 'Phyl.', Structure, fixed = TRUE))
tab.s2<- kable(tab.model2, "latex", longtable = T, booktabs = T, caption = 'Model summaries for phylogenetic mixed models of cooperation, for the six forms of cooperation, when accounting for uncertainty in relatedness estimates. The total regression coefficient of the response trait over relatedness is the sum of the phylogenetic and non-phylogenetic (residual) covariances divided by the sum of the phylogenetic and non-phylogenetic (residual) variances') %>%
kable_styling(latex_options = c("HOLD_position", "repeat_header")) %>%
footnote(c("CI95%: 95% credible interval of the posterior distribution",
"pMCMC: taken as twice the posterior probability that the estimate is negative"),
fixed_small_size = TRUE, general_title = "") %>%
pack_rows("Siderophores", 1, 10) %>%
pack_rows("Biofilm", 11, 20) %>%
pack_rows("Antibiotic degradation", 21, 30) %>%
pack_rows("Secretome", 31, 41) %>%
pack_rows("Secretion systems", 42, 51) %>%
pack_rows("Quorum sensing", 52, 61)
fileConn<-file("./output/figures/TABLE_S2.tex")
writeLines(tab.s2, fileConn)
close(fileConn)
# MODEL 3
fixed<- summary(m3)$solutions %>% as.data.frame()%>%
rownames_to_column('Effect') %>%
mutate(Structure = 'Fixed effects',
pMCMC = ifelse(pMCMC<0.01,
formatC(pMCMC, digit = 2, format = 'e'),
formatC(pMCMC, digit = 3, format = 'f')))
random<- summary(m3)$Gcovariances %>% as.data.frame()%>%
rownames_to_column('Effect') %>%
mutate(pMCMC = '') %>%
mutate(Structure = 'Variances')
unit<- summary(m3)$Rcovariances %>% as.data.frame()%>%
rownames_to_column('Effect') %>%
mutate(pMCMC = '') %>%
mutate(Structure = 'Variances')
tab.model3<- rbind(fixed, random, unit) %>%
mutate(Effect = format_effects.model3(Effect)) %>%
mutate(Structure = ifelse(duplicated(Structure) == TRUE, '', Structure)) %>%
select(Structure, Effect, post.mean,`l-95% CI`, `u-95% CI`, eff.samp, pMCMC) %>%
rename(`Posterior\n mean` = post.mean,
`CI95% lower` = `l-95% CI`,
`CI95% upper` = `u-95% CI`,
`Effective\n sampling` = eff.samp) %>%
as.data.frame() %>%
mutate(`Effective\n sampling` = formatC(`Effective\n sampling`, digit = 0, format = 'f')) %>%
as.data.frame() %>%
mutate_if(is.numeric, funs(formatC(., digit = 3, format = 'f')))
tab.model3<- tab.model3[c(1:4,6,5,7),]
tab.s3<- kable(tab.model3, "latex", booktabs = T, caption = 'Model summary for the phylogenetic mixed model of relatedness (drivers of relatedness)', row.names = FALSE, linesep = "") %>%
footnote(c("CI95%: 95% credible interval of the posterior distribution",
"pMCMC: taken as twice the posterior probability that the estimate is negative"),
fixed_small_size = TRUE, general_title = "")
fileConn<-file("./output/figures/TABLE_S3.tex")
writeLines(tab.s3, fileConn)
close(fileConn)
# MODEL 4
tab.model4<-rbind(
format_full_summary(mods.R.RA.SPO$siderophores, ''),
format_full_summary(mods.R.RA.SPO$biofilm, ''),
format_full_summary(mods.R.RA.SPO$ab_degradation, ''),
format_full_summary(mods.R.RA.SPO$secretome, ''),
format_full_summary(mods.R.RA.SPO$secretion_system_no4, ''),
format_full_summary(mods.R.RA.SPO$quorum_sensing, '')
)
tab.s4<- kable(tab.model4, "latex", booktabs = T, caption = 'Model summaries for the phylogenetic mixed models of cooperation when including sporulation scores and relative abundance as predictors') %>%
footnote(c("CI95%: 95% credible interval of the posterior distribution",
"pMCMC: taken as twice the posterior probability that the estimate is negative"),
fixed_small_size = TRUE, general_title = "")%>%
kable_styling() %>%
pack_rows("Siderophores", 1, 7) %>%
pack_rows("Biofilm", 8, 14) %>%
pack_rows("Antibiotic degradation", 15, 21) %>%
pack_rows("Secretome", 22, 29) %>%
pack_rows("Secretion systems", 30, 36) %>%
pack_rows("Quorum sensing", 37, 43)
fileConn<-file("./output/figures/TABLE_S4.tex")
writeLines(tab.s4, fileConn)
close(fileConn)
# MODEL 5
fixed<- summary(m5)$solutions %>% as.data.frame()%>%
rownames_to_column('Effect') %>%
mutate(Structure = 'Fixed effects',
pMCMC = ifelse(pMCMC<0.01,
formatC(pMCMC, digit = 2, format = 'e'),
formatC(pMCMC, digit = 3, format = 'f')))
random<- summary(m5)$Gcovariances %>% as.data.frame()%>%
rownames_to_column('Effect') %>%
mutate(pMCMC = '') %>%
mutate(Structure = 'Variances')
unit<- summary(m5)$Rcovariances %>% as.data.frame()%>%
rownames_to_column('Effect') %>%
mutate(pMCMC = '') %>%
mutate(Structure = 'Variances')
tab.model5<- rbind(fixed, random, unit) %>%
mutate(Effect = format_effects.model3(Effect)) %>%
mutate(Structure = ifelse(duplicated(Structure) == TRUE, '', Structure)) %>%
select(Structure, Effect, post.mean,`l-95% CI`, `u-95% CI`, eff.samp, pMCMC) %>%
rename(`Posterior\n mean` = post.mean,
`CI95% lower` = `l-95% CI`,
`CI95% upper` = `u-95% CI`,
`Effective\n sampling` = eff.samp) %>%
as.data.frame() %>%
mutate(`Effective\n sampling` = formatC(`Effective\n sampling`, digit = 0, format = 'f')) %>%
as.data.frame() %>%
mutate_if(is.numeric, funs(formatC(., digit = 3, format = 'f')))
tab.model5<- tab.model5[c(1:9,10,12,11,13),]
tab.s5<- kable(tab.model5, "latex", booktabs = T, caption = 'Model summary for the phylogenetic mixed model of relatedness (drivers of relatedness) with cooperation (all six forms) included as fixed predictors', row.names = FALSE, linesep = "") %>%
footnote(c("CI95%: 95% credible interval of the posterior distribution",
"pMCMC: taken as twice the posterior probability that the estimate is negative"),
fixed_small_size = TRUE, general_title = "")
fileConn<-file("./output/figures/TABLE_S5.tex")
writeLines(tab.s5, fileConn)
close(fileConn)
# META-ANALYSES
tab.meta<- rbind(MA.MODELS_1, MA.MODELS_2, MA.MODELS_4) %>%
as.data.frame() %>%
select(predictor, estimate, se, ci.lower, ci.upper, z.value, p.value) %>%
rename(Predictor = predictor,
Estimate = estimate,
`Std. Err.` = se,
`CI95% lower` = ci.lower,
`CI95% upper` = ci.upper,
`z value` = z.value,
`p value` = p.value) %>%
mutate(Predictor = format_effects(Predictor)) %>%
mutate(Predictor = gsub('Genome size', 'Log(genome size)', Predictor))
library(kableExtra)
tab.s6<- kable(tab.meta, "latex", booktabs = T, caption = 'Meta-analysis model summaries') %>%
footnote(c("CI95%: 95% confidence intervals",
"pMCMC: taken as twice the posterior probability that the estimate is negative",
"Model 1: meta-analysis over the models of cooperation with mean relatedness as predictor",
"Model 2: meta-analysis over the models of cooperation accounting for uncertainty in relatedness estimates",
"Model 3: meta-analysis over the models of cooperation with mean relatedness and sporulation scores and relative abundance as predictors"), fixed_small_size = TRUE, general_title = "")%>%
kable_styling() %>%
pack_rows("Model 1", 1, 2) %>%
pack_rows("Model 2", 3, 4) %>%
pack_rows("Model 3", 5, 8)
fileConn<-file("./output/figures/TABLE_S6.tex")
writeLines(tab.s6, fileConn)
close(fileConn)
|
466dfa279bd28f5009d5ad1b98321d1bef04db05
|
3fa1b23746232975b3b014db2f525007a3b49991
|
/anna_code/simband_local/peak_caller.R
|
9b0b433e5f1310553626ad669411121821240cb2
|
[] |
no_license
|
AshleyLab/myheartcounts
|
ba879e10abbde085b5c9550f0c13ab3f730d7d03
|
0f80492f7d3fc53d25bdb2c69f14961326450edf
|
refs/heads/master
| 2021-06-17T05:41:58.405061
| 2021-02-28T05:33:08
| 2021-02-28T05:33:08
| 32,551,526
| 7
| 1
| null | 2020-08-17T22:37:43
| 2015-03-19T23:25:01
|
OpenEdge ABL
|
UTF-8
|
R
| false
| false
| 1,766
|
r
|
peak_caller.R
|
# objective function, spline interpolation of the sample spectrum
f <- function(x, q, d) spline(q, d, xout = x)$y
x <- sp$freq
y <- log(sp$spec)
nb <- 10 # choose number of intervals
iv <- embed(seq(floor(min(x)), ceiling(max(x)), len = nb), 2)[,c(2,1)]
# make overlapping intervals to avoid problems if the peak is close to
# the ends of the intervals (two modes could be found in each interval)
iv[-1,1] <- iv[-nrow(iv),2] - 2
# The function "f" is maximized at each of these intervals
iv
# [,1] [,2]
# [1,] 0.0000000 0.6666667
# [2,] -1.3333333 1.3333333
# [3,] -0.6666667 2.0000000
# [4,] 0.0000000 2.6666667
# [5,] 0.6666667 3.3333333
# [6,] 1.3333333 4.0000000
# [7,] 2.0000000 4.6666667
# [8,] 2.6666667 5.3333333
# [9,] 3.3333333 6.0000000
# choose thresholds for the gradient and Hessian to accept
# the solution is a local maximum
gr.thr <- 0.001
hes.thr <- 0.03
require("numDeriv")
vals <- matrix(nrow = nrow(iv), ncol = 3)
grd <- hes <- rep(NA, nrow(vals))
for (j in seq(1, nrow(iv)))
{
opt <- optimize(f = f, maximum = TRUE, interval = iv[j,], q = x, d = y)
vals[j,1] <- opt$max
vals[j,3] <- exp(opt$obj)
grd[j] <- grad(func = f, x = vals[j,1], q = x, d = y)
hes[j] <- hessian(func = f, x = vals[j,1], q = x, d = y)
if (abs(grd[j]) < gr.thr && abs(hes[j]) > hes.thr)
vals[j,2] <- 1
}
# it is convenient to round the located peaks in order to avoid
# several local maxima that essentially the same point
vals[,1] <- round(vals[,1], 2)
if (anyNA(vals[,2])) {
peaks <- unique(vals[-which(is.na(vals[,2])),1])
} else
peaks <- unique(vals[,1])
|
818cb3ec2d11e9db35a8ea88316d743e700fc97e
|
1c7c8e7d87b3be88afe606f0d8d3935017658302
|
/school-boundary.R
|
415a0ec16ccf37308528daf971e8ce43b869f586
|
[] |
no_license
|
dobrowski/mapping
|
df2fa0f931cc29cb54bd1506e70dc5f1fc07fa57
|
183acea0b5c1b0024a90d019804c2e1444431402
|
refs/heads/master
| 2020-05-01T18:57:24.642538
| 2019-03-25T18:23:52
| 2019-03-25T18:23:52
| 177,635,587
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,684
|
r
|
school-boundary.R
|
### Establish local school shapefiles -------
primary <- st_read("SABS_1516/SABS_1516_Primary.shp")
middle <- st_read("SABS_1516/SABS_1516_Middle.shp")
primary.ca <- primary %>%
filter(stAbbrev == "CA") %>%
st_transform(4269)
middle.ca <- middle %>%
filter(stAbbrev == "CA") %>%
st_transform(4269)
monterey_primary <- primary.ca %>%
st_intersection(monterey_tracts)
monterey_middle <- middle.ca %>%
st_intersection(monterey_tracts)
monterey_primary_middle <- monterey_primary %>% rbind(monterey_middle)
### Functions ------
join.map <- function(file){
feeders <- read_csv( file) %>%
mutate(CDSCode = as.character(CDSCode))
joint <- monterey_schools %>%
left_join(feeders)
joint2 <- monterey_primary_middle %>%
left_join(joint) %>%
filter(isdistrict == 0,
!str_detect(SchoolName,"Boronda Elem")) %>%
select(SchoolName,
District,
Grades = GSserved,
Chronic = ChronicAbsenteeismRate,
ELA,
Math,
Suspension = `Suspension Rate (Total)` ,
geometry) %>%
mutate(District = fct_drop(District),
Grades = fct_drop(Grades)) %>%
filter(Grades %notin% c("K-5", "K-3", "4-5") )
}
make.maps <- function(file, districtname, groupy ,centerpoint){
joint2<- join.map(file)
for( i in c("ELA", "Math", "Chronic", "Suspension")){
map.ela <- tm_shape(joint2) +
tm_fill(i, alpha = .5, popup.vars = c("District", "Grades" ,"ELA", "Math", "Chronic", "Suspension")) +
tm_borders() +
tm_text("SchoolName", auto.placement = TRUE) +
tm_view(set.view = centerpoint)
tmap_save(map.ela, here("maps", paste0("map-",districtname,"-",groupy ,"-" ,i,".html")))
}
}
### Join School data ------
joint2 <- join.map("Feeder Districts Salinas UnionALL.csv")
### Make some maps -------
tmap_mode("view")
# Map loop for single var
map.ela <- tm_shape(joint2) +
tm_fill("ELA", alpha = .5, popup.vars = c("District","Grades","ELA", "Math", "Chronic", "Suspension")) +
tm_borders() +
tm_text("SchoolName", auto.placement = TRUE) +
tm_view(set.view = c(lat = 36.68 , lon = -121.65 , zoom = 13))
tmap_save(map.ela, "map-Salinas-ela.html")
# Make four maps for each data set provided
make.maps(here("data", "Feeder Districts SMCJUHSD ALL.csv"), "SMCJUHSD" , "all" ,c(lat = 36 , lon = -121.32 , zoom = 9))
make.maps(here("data","Feeder Districts Salinas Union ALL.csv"), "SUHSD" ,"all" ,c(lat = 36.68 , lon = -121.65 , zoom = 13))
make.maps(here("data", "Feeder Districts SMCJUHSD EL.csv"), "SMCJUHSD" , "EL" ,c(lat = 36 , lon = -121.32 , zoom = 9))
make.maps(here("data","Feeder Districts Salinas Union EL.csv"), "SUHSD" ,"EL" ,c(lat = 36.68 , lon = -121.65 , zoom = 13))
make.maps(here("data", "Feeder Districts SMCJUHSD SWD.csv"), "SMCJUHSD" , "SWD" ,c(lat = 36 , lon = -121.32 , zoom = 9))
make.maps(here("data","Feeder Districts Salinas Union SWD.csv"), "SUHSD" ,"SWD" ,c(lat = 36.68 , lon = -121.65 , zoom = 13))
# Make maps for ELPI
feeders <- read_csv(here("data" ,"Feeder Districts Salinas Union ELPI.csv")) %>%
mutate(CDSCode = as.character(CDSCode))
joint <- monterey_schools %>%
left_join(feeders)
joint2 <- monterey_primary_middle %>%
left_join(joint) %>%
filter(!is.na(SchoolName)) %>%
mutate(Grades = GSserved) %>%
filter(Grades %notin% c("K-5", "K-3", "4-5") ) %>%
select(-1:-2)
map.elpi <- tm_shape(joint2) +
tm_fill("PL1_Pct", alpha = .5, popup.vars = c("District", "PL1_Pct" ,"PL2_Pct", "PL3_Pct", "PL4_Pct")) +
tm_borders() +
tm_text("School", auto.placement = TRUE) +
tm_view(set.view = c(lat = 36.68 , lon = -121.65 , zoom = 13))
tmap_save(map.elpi, here("maps", paste0("map-SUHSD-ELPI.html")))
# Facet with two in sync maps
map.facet <- tm_shape(joint2 ) +
tm_polygons(c("Chronic", "Suspension")) +
tm_borders() +
tm_text("NAME", auto.placement = TRUE) +
tm_view(set.view = c(lat = 36.65 , lon = -121.6 , zoom = 10)) +
tm_facets(sync = TRUE, ncol = 2)
# Map with lots of choosable layers
map.facet <- tm_shape(joint2) +
tm_polygons(c("District","Grades","ELA", "Math", "Chronic", "Suspension")) +
tm_borders() +
tm_text("SchoolName", auto.placement = TRUE) +
tm_view(set.view = c(lat = 36.65 , lon = -121.6 , zoom = 10)) +
tm_facets(sync = TRUE, as.layers = TRUE)
# This doens't work.
# tmap_save(map.facet, "map-Salinas-layers.html")
### Experiment with mapview ----
mapview(joint2, xcol = "ELA")
m1 <- mapView(franconia, col.regions = "red")
m2 <- mapView(breweries)
test <- m1 + breweries + poppendorf[[4]]
mapshot(test, "test.html")
### SMCJUHSD Middle mapping ------
joint2<- join.map("Feeder Districts SMCJUHSDALL.csv")
# Map with lots of choosable layers
map.facet <- tm_shape(joint2) +
tm_polygons(c("District","Grades","ELA", "Math", "Chronic", "Suspension")) +
tm_borders() +
tm_text("SchoolName", auto.placement = TRUE) +
tm_view(set.view = c(lat = 36 , lon = -121.32 , zoom = 9)) +
tm_facets(sync = TRUE, as.layers = TRUE)
map.facet
|
e0f7711e9865a3b68e5401d5730e10c79bd19aad
|
9172a35be3d169fe90e7c0b157b835a34f346be3
|
/ui.R
|
c05792ecc5afbe332f2f64e703b868b16ba61d6e
|
[] |
no_license
|
JagadeeshaKV/NewYorkAirQuality
|
1a40384118ec5e08e6ddf65c504112c043314d46
|
95eb0f9389f2d01c2e4bae68d8447af72fd17cf7
|
refs/heads/master
| 2020-03-31T12:00:50.559303
| 2018-10-09T07:52:48
| 2018-10-09T07:52:48
| 152,200,420
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,986
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinyjs)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
useShinyjs(),
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "style.css")
),
# Application title
tags$div( class="pageTitle",
titlePanel("New York Air Quality Measurements"),
h4("Daily air quality measurements in New York, May to September 1973"),
div(class="doc",a("Documentation", href="help.html",
title="Click here for documentation"))
),
div(
id = "loading-content",
h2("Loading...")
),
tags$div(
class="content",
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
class="side",
#Choose month
selectInput("month", "Select Month : ", choices=c("May"=5, "Jun"=6, "Jul"=7, "Aug"=8, "Sep"=9),
multiple=FALSE, selected = "Jun"),
#Choose week
sliderInput("week", "Weeks :", min = 1, max = 5, value = c(2,4)),
#
checkboxGroupInput("checkGroup", label = h3("Plot"), choices = list("Ozone" = 1,
"Solar Radiation" = 2, "Weekly Wind Speed Comparison" = 3, "Ozone - Temperature" = 4), selected = c(1,2,3,4))
),
# Show a plot of the generated distribution
mainPanel(class="main",
fluidRow(
column(width = 3, class="tile", htmlOutput("tempAvg")),
column(width = 3, class="tile", htmlOutput("windAvg")),
column(width = 3, class="tile", htmlOutput("solarAvg")),
column(width = 3, class="tile", htmlOutput("ozoneAvg"))
),
plotOutput("distPlot")
)
)
)
))
|
9a2d706597980dc2eece1e1cd6395f509ed02add
|
5a297c4677a198dfc1a30a265368b9fe05c4a3f9
|
/CRT_MEP_SICI_anlyzer_sub.R
|
e4076453536ae1a0568181327f9eb020bfa33205
|
[] |
no_license
|
naokit-dev/My-small-scripts
|
1c082c6bcb4e5b4a91a9b5a2516ea4ac57af5198
|
f89fddf29462c6d6b3e4572db0ed09f1e94f4acc
|
refs/heads/master
| 2022-10-22T20:18:33.827137
| 2020-06-16T02:39:22
| 2020-06-16T02:39:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,603
|
r
|
CRT_MEP_SICI_anlyzer_sub.R
|
setwd("~/Documents/R/CRT_MEP_SICI")
library(dplyr)
library(ggplot2)
#作図テーマ
theme_set(theme_classic(base_size = 18, base_family = "Helvetica"))
# データの読み込み
x <- read.csv("CRT_MEP_SICI_grand.txt", header = TRUE)
head(x)
#Trialcondをラベル付け
x$Sub_ID <- factor(x$Sub_ID)
x$TrialCond <- factor(x$TrialCond, levels=c(0,1,2), labels=c("rest", "go", 'nogo'))
x$sppp <- factor(x$sppp, levels=c(1,2), labels=c("sp", 'pp'))
#x$TMSdelay <- factor(x$TMSdelay)
# 試行平均
df <- x %>% group_by(Sub_ID, sppp, TrialCond, TMSdelay) %>%
summarize(ampAPB.mean = mean(APBamp, na.rm=TRUE), ampAPB.sd = sd(APBamp, na.rm=TRUE),
ampADM.mean = mean(ADMamp, na.rm=TRUE), ampADM.sd = sd(ADMamp, na.rm=TRUE),
ampAPB.rest.mean = mean(APBamp_rest, na.rm=TRUE), ampAPB.rest.sd = sd(APBamp_rest, na.rm=TRUE),
ampADM.rest.mean = mean(ADMamp_rest, na.rm=TRUE), ampADM.rest.sd = sd(ADMamp_rest, na.rm=TRUE),
ampAPB.z = mean(APBamp_z, na.rm=TRUE),
ampADM.z = mean(ADMamp_z, na.rm=TRUE))
head(df)
# TMS delay除外
df <- df %>% filter(TMSdelay != 200)
# 各被験者平均反応時間
meanRT.sub <- x %>% group_by(Sub_ID) %>% summarize(meanRT_sub = mean(RT, na.rm=TRUE))
meanRTsub.faster <- meanRT.sub %>% arrange(meanRT_sub)
# 被験者数を取得
lab.sub <- levels(df$Sub_ID)
n.sub <- length(lab.sub)
g <- ggplot()
# 各被験者解析APB(raw)
pdf("~/Documents/R/CRT_MEP_SICI/sub_ampAPB.pdf")
for(i in 1:n.sub) {
data.sub <- filter(df, Sub_ID==lab.sub[i])
g[[i]] <- ggplot(data.sub,
aes(x = TMSdelay,
y = ampAPB.mean,
group = interaction(TrialCond,sppp),
colour = TrialCond,
linetype = sppp,
fill = sppp))
g[[i]] <- g[[i]] +
geom_line() +
geom_point(aes(colour = TrialCond, shape = TrialCond, fill = sppp), size = 4) +
geom_errorbar(aes(ymin = ampAPB.mean - ampAPB.sd,
ymax = ampAPB.mean + ampAPB.sd,
width = 3)) +
geom_vline(xintercept=meanRT.sub$meanRT_sub[[i]], linetype="dashed", colour="gray")
g[[i]] <- g[[i]] +
theme(axis.line=element_line(linetype = 'solid', colour = "black", size=1),
axis.ticks=element_line(colour = "black"),
legend.position = c(0, 1), legend.justification = c(0, 1)) +
scale_x_continuous(breaks=seq(-40,160,by=40),limits=c(-100,320)) +
labs(x="TMS delay (ms)", y="Mean amplitude (mV)", title=paste("ampAPB_sub",lab.sub[i], sep = "" ))
plot(g[[i]])
}
dev.off()
# 各被験者解析APB(per rest)
pdf("~/Documents/R/CRT_MEP_SICI/sub_ampAPB_rest.pdf")
for(i in 1:n.sub) {
data.sub.rest <- df %>% filter(Sub_ID==lab.sub[i],TrialCond=='go'|TrialCond=='nogo')
g[[i]] <- ggplot(data.sub.rest,
aes(x = TMSdelay,
y = ampAPB.rest.mean,
group = interaction(TrialCond,sppp),
colour = TrialCond,
linetype = sppp))
g[[i]] <- g[[i]] +
geom_line() +
geom_point(aes(colour = TrialCond, shape = TrialCond, fill = sppp), size = 4) +
geom_errorbar(aes(ymin = ampAPB.rest.mean - ampAPB.rest.sd,
ymax = ampAPB.rest.mean + ampAPB.rest.sd,
width = 3)) +
geom_vline(xintercept=meanRT.sub$meanRT_sub[[i]], linetype="dashed", colour="gray") +
geom_hline(yintercept=1, linetype="solid", colour="gray")
g[[i]] <- g[[i]] +
theme(axis.line=element_line(linetype = 'solid', colour = "black", size=1),
axis.ticks=element_line(colour = "black"),
legend.position = c(0, 1), legend.justification = c(0, 1)) +
scale_x_continuous(breaks=seq(-40,160,by=40),limits=c(-100,320)) +
labs(x="TMS delay (ms)", y="Amplitude (%rest)", title=paste("ampAPB_sub",lab.sub[i], sep = "" ))
plot(g[[i]])
}
dev.off()
# 各被験者解析APB(z-score)
pdf("~/Documents/R/CRT_MEP_SICI/sub_ampAPB_z.pdf")
for(i in 1:n.sub) {
data.sub.z <- filter(df, Sub_ID==lab.sub[i])
g[[i]] <- ggplot(data.sub.z,
aes(x = TMSdelay,
y = ampAPB.z,
group = interaction(TrialCond,sppp),
colour = TrialCond,
linetype = sppp))
g[[i]] <- g[[i]] +
geom_line() +
geom_point(aes(colour = TrialCond, shape = TrialCond, fill = sppp), size = 4) +
geom_vline(xintercept=meanRT.sub$meanRT_sub[[i]], linetype="dashed", colour="gray")
g[[i]] <- g[[i]] +
theme(axis.line=element_line(linetype = 'solid', colour = "black", size=1),
axis.ticks=element_line(colour = "black"),
legend.position = c(0, 1), legend.justification = c(0, 1)) +
scale_x_continuous(breaks=seq(-40,160,by=40),limits=c(-100,320)) +
scale_y_continuous(breaks=seq(-0.5,2,by=0.5),limits=c(-1,2.4)) +
labs(x="TMS delay (ms)", y="Amplitude (z-score)", title=paste("ampAPB_sub",lab.sub[i], sep = "" ))
plot(g[[i]])
}
dev.off()
# 各被験者解析APB(z-score)
pdf("~/Documents/R/CRT_MEP_SICI/sub_ampAPB_z2.pdf")
g <- ggplot( df, aes( x = TMSdelay, y = ampAPB.z, group = interaction(Sub_ID , TrialCond), colour = TrialCond ) )
g <- g + geom_line()
g <- g +
theme(axis.line=element_line(linetype = 'solid', colour = "black", size=1),
axis.ticks=element_line(colour = "black"),
legend.position = c(0, 1), legend.justification = c(0, 1)) +
labs(x="TMS delay (ms)", y="Amplitude (z-score)", title=paste("ampAPB_sub",lab.sub[i], sep = "" ))
plot(g)
dev.off()
# grandave(作図用)
ga_df <- df %>%
group_by(sppp, TrialCond, TMSdelay) %>%
summarize(ampAPB.grandave.mean = mean(ampAPB.rest.mean, na.rm=TRUE),
ampAPB.grandave.sd = sd(ampAPB.rest.mean, na.rm=TRUE),
ampAPB.z.grandave.mean= mean(ampAPB.z, na.rm=TRUE),
ampAPB.z.grandave.sd= sd(ampAPB.z, na.rm=TRUE),
ampADM.grandave.mean = mean(ampADM.rest.mean, na.rm=TRUE),
ampADM.grandave.sd = sd(ampADM.rest.mean, na.rm=TRUE),
ampADM.z.grandave.mean= mean(ampADM.z, na.rm=TRUE),
ampADM.z.grandave.sd= sd(ampADM.z, na.rm=TRUE))
head(ga_df)
SICI_df <- df %>% select(Sub_ID, sppp, TrialCond, TMSdelay, ampAPB.mean, ampADM.mean) %>% gather(emgloc, amplitude, -Sub_ID, -TrialCond, -TMSdelay, -sppp) %>%
spread(sppp, amplitude) %>% mutate(spppratio = pp/sp) %>% select(TMSdelay, emgloc, spppratio) %>% spread(emgloc, spppratio)
SICI_ga_df <- SICI_df %>% group_by(TrialCond, TMSdelay) %>%
summarize(siciAPB.grand.mean = mean(ampAPB.mean, na.rm=TRUE),
siciAPB.grand.sd = sd(ampAPB.mean, na.rm=TRUE),
siciADM.grand.mean = mean(ampADM.mean, na.rm=TRUE),
siciADM.grand.sd = sd(ampADM.mean, na.rm=TRUE))
SICI_ga_df_rest <- SICI_ga_df %>% filter(TrialCond == "rest")
# grandave作図(/rest)
pdf("~/Documents/R/CRT_MEP_SICI/grand_amp_rest.pdf")
ga_df_trial <- ga_df %>% filter(TrialCond != "rest")
ga_df_rest <- ga_df %>% filter(TrialCond == "rest")
# APB
g <- ggplot(
ga_df_trial,
aes(
x = TMSdelay,
y = ampAPB.grandave.mean,
group = interaction(TrialCond,sppp),
colour = TrialCond,
linetype = sppp))
g <- g +
geom_vline(xintercept = 0, linetype="dashed", colour="gray") +
geom_line() + geom_point(aes(colour = TrialCond, shape = TrialCond, fill = sppp), size = 4)
g <- g + geom_errorbar(
aes(
ymin = ampAPB.grandave.mean - ampAPB.grandave.sd/sqrt(n.sub),
ymax = ampAPB.grandave.mean + ampAPB.grandave.sd/sqrt(n.sub),
width = 3)) +
geom_hline(yintercept=1, linetype="solid", colour="gray")
g <- g +
scale_x_continuous(breaks=seq(-40,160,by=40),limits=c(-100,180)) +
scale_y_continuous(breaks=seq(0,10,by=2),limits=c(-1,10)) +
theme(axis.line=element_line(linetype = 'solid', colour = "black", size=1),
axis.ticks=element_line(colour = "black"),
legend.position = c(0, 1), legend.justification = c(0, 1)) +
labs(x = "TMS delay (ms)", y = "Mean amplitude (%rest)", title="ampAPB (%rest±SEM)" )
plot(g)
# ADM
g <- ggplot(
ga_df_trial,
aes(
x = TMSdelay,
y = ampADM.grandave.mean,
group = interaction(TrialCond,sppp),
colour = TrialCond,
linetype = sppp))
g <- g +
geom_vline(xintercept = 0, linetype="dashed", colour="gray") +
geom_line() + geom_point(aes(colour = TrialCond, shape = TrialCond, fill = sppp), size = 4)
g <- g + geom_errorbar(
aes(
ymin = ampADM.grandave.mean - ampADM.grandave.sd/sqrt(n.sub),
ymax = ampADM.grandave.mean + ampADM.grandave.sd/sqrt(n.sub),
width = 3)) +
geom_hline(yintercept=1, linetype="solid", colour="gray")
g <- g +
scale_x_continuous(breaks=seq(-40,160,by=40),limits=c(-100,180)) +
scale_y_continuous(breaks=seq(0,10,by=2),limits=c(-1,10)) +
theme(axis.line=element_line(linetype = 'solid', colour = "black", size=1),
axis.ticks=element_line(colour = "black"),
legend.position = c(0, 1), legend.justification = c(0, 1)) +
labs(x = "TMS delay (ms)", y = "Mean amplitude (%rest)", title="ampADM (%rest±SEM)" )
plot(g)
dev.off()
# grandave作図(z-score)
pdf("~/Documents/R/CRT_MEP_SICI/grand_amp_z.pdf")
# APB
g <- ggplot(
ga_df_trial,
aes(
x = TMSdelay,
y = ampAPB.z.grandave.mean,
group = interaction(TrialCond,sppp),
colour = TrialCond,
linetype = sppp, shape = TrialCond))
g <- g +
geom_vline(xintercept = 0, linetype="dashed", colour="gray") +
geom_line(size = 1) +
geom_point(size = 3) +
geom_errorbar(aes(
ymin = ampAPB.z.grandave.mean - ampAPB.z.grandave.sd/sqrt(n.sub),
ymax = ampAPB.z.grandave.mean + ampAPB.z.grandave.sd/sqrt(n.sub),
width = 3))
g <- g +
scale_x_continuous(breaks=seq(-40,160,by=40),limits=c(-50,180)) +
scale_y_continuous(breaks=seq(-0.5,1.5,by=0.5),limits=c(-0.6,1.6)) +
theme(axis.line=element_line(linetype = 'solid', colour = "black", size=1),
axis.ticks=element_line(colour = "black"),
legend.position = c(0, 1), legend.justification = c(0, 1)) +
labs(x = "TMS delay (ms)", y = "MEP amplitude (z-score)", title="ampAPB (z-score±SEM)" )
plot(g)
# ADM
g <- ggplot(
ga_df_trial,
aes(
x = TMSdelay,
y = ampADM.z.grandave.mean,
group = interaction(TrialCond,sppp),
colour = TrialCond,
fill = sppp,
linetype = sppp))
g <- g +
geom_line() +
geom_vline(xintercept = 0, linetype="dashed", colour="gray") +
geom_point(aes(colour = TrialCond, shape = TrialCond, fill = sppp), size = 4) +
geom_errorbar(aes(
ymin = ampADM.z.grandave.mean - ampADM.z.grandave.sd/sqrt(n.sub),
ymax = ampADM.z.grandave.mean + ampADM.z.grandave.sd/sqrt(n.sub),
width = 3))
g <- g +
scale_x_continuous(breaks=seq(-40,160,by=40),limits=c(-50,180)) +
scale_y_continuous(breaks=seq(-0.5,1.5,by=0.5),limits=c(-0.6,1.6)) +
theme(axis.line=element_line(linetype = 'solid', colour = "black", size=1),
axis.ticks=element_line(colour = "black"),
legend.position = c(0, 1), legend.justification = c(0, 1)) +
labs(x = "TMS delay (ms)", y = "MEP amplitude (z-score)", title="ampADM (z-score±SEM)" )
plot(g)
dev.off()
# grandave作図(SICI)
pdf("~/Documents/R/CRT_MEP_SICI/grand_sici.pdf")
SICI_ga_df <- SICI_ga_df %>% filter(TrialCond != "rest")
# APB
g <- ggplot(
SICI_ga_df,
aes(
x = TMSdelay,
y = siciAPB.grand.mean,
group = TrialCond,
colour = TrialCond
))
g <- g +
geom_hline(yintercept = 1, color = 'black') +
geom_vline(xintercept = 0, linetype="dashed", colour="gray") +
geom_line(size = 1.2, position = position_dodge(10)) +
geom_point(aes(colour = TrialCond, shape = TrialCond), size = 4, position = position_dodge(10)) +
geom_errorbar(aes(
ymin = siciAPB.grand.mean - siciAPB.grand.sd/sqrt(n.sub),
ymax = siciAPB.grand.mean + siciAPB.grand.sd/sqrt(n.sub),
width = 3), position = position_dodge(10)) +
geom_hline(yintercept = SICI_ga_df_rest$siciAPB.grand.mean, color = 'gray') +
annotate("rect", xmin=-Inf,xmax=Inf,
ymin=SICI_ga_df_rest$siciAPB.grand.mean - SICI_ga_df_rest$siciAPB.grand.sd/sqrt(n.sub),
ymax=SICI_ga_df_rest$siciAPB.grand.mean + SICI_ga_df_rest$siciAPB.grand.sd/sqrt(n.sub),
fill="gray",alpha=0.2)
g <- g +
scale_x_continuous(breaks=seq(-40,160,by=40),limits=c(-50,170)) +
scale_y_continuous(breaks=seq(0.5,1.5,by=0.5),limits=c(0.5,1.5)) +
theme(axis.line=element_line(linetype = 'solid', colour = "black", size=1),
axis.ticks=element_line(colour = "black"),
legend.position = c(0, 1), legend.justification = c(0, 1)) +
labs(x = "TMS delay (ms)", y = "PP/SP ratio", title="siciAPB" )
plot(g)
# ADM
g <- ggplot(
SICI_ga_df,
aes(
x = TMSdelay,
y = siciADM.grand.mean,
group = TrialCond,
colour = TrialCond
))
g <- g +
geom_hline(yintercept = 1, color = 'black') +
geom_vline(xintercept = 0, linetype="dashed", colour="gray") +
geom_line(size = 1.2, position = position_dodge(10)) +
geom_point(aes(colour = TrialCond, shape = TrialCond), size = 4, position = position_dodge(10)) +
geom_errorbar(aes(
ymin = siciADM.grand.mean - siciADM.grand.sd/sqrt(n.sub),
ymax = siciADM.grand.mean + siciADM.grand.sd/sqrt(n.sub),
width = 3), position = position_dodge(10)) +
geom_hline(yintercept = SICI_ga_df_rest$siciADM.grand.mean, color = 'gray') +
annotate("rect", xmin=-Inf,xmax=Inf,
ymin=SICI_ga_df_rest$siciADM.grand.mean - SICI_ga_df_rest$siciADM.grand.sd/sqrt(n.sub),
ymax=SICI_ga_df_rest$siciADM.grand.mean + SICI_ga_df_rest$siciADM.grand.sd/sqrt(n.sub),
fill="gray",alpha=0.2)
g <- g +
scale_x_continuous(breaks=seq(-40,160,by=40),limits=c(-50,170)) +
scale_y_continuous(breaks=seq(0.5,1.5,by=0.5),limits=c(0.5,1.5)) +
theme(axis.line=element_line(linetype = 'solid', colour = "black", size=1),
axis.ticks=element_line(colour = "black"),
legend.position = c(0, 1), legend.justification = c(0, 1)) +
labs(x = "TMS delay (ms)", y = "PP/SP ratio", title="siciADM" )
plot(g)
dev.off()
#heatmap#
df.go <- df %>% filter(TrialCond=='go')
df.nogo <- df %>% filter(TrialCond=='nogo')
df.go.nogo.subt <- transform(df.go, ampAPB.z.subt = df.go$ampAPB.z-df.nogo$ampAPB.z )
h<-ggplot(df.go,aes(TMSdelay,Sub_ID)) +
geom_tile(aes(fill=ampAPB.z)) +
scale_fill_gradient2(limits=c(-2, 2), midpoint=0, low = "blue", mid = "white", high = "red") +
scale_y_discrete(limits=c(paste(meanRTsub.faster$Sub_ID))) +
geom_point(data = meanRT.sub, aes(meanRT_sub, Sub_ID), size = 4, shape=1) +
scale_x_continuous(breaks=seq(60,400,by=40),limits=c(50,400)) +
theme(legend.title=element_blank())
plot(h)
h<-ggplot(df.nogo,aes(TMSdelay,Sub_ID)) +
geom_tile(aes(fill=ampAPB.z)) +
scale_fill_gradient2(limits=c(-2, 2), midpoint=0, low = "blue", mid = "white", high = "red") +
scale_y_discrete(limits=c(paste(meanRTsub.faster$Sub_ID))) +
geom_point(data = meanRT.sub, aes(meanRT_sub, Sub_ID), size = 4, shape=1) +
scale_x_continuous(breaks=seq(60,400,by=40),limits=c(50,400)) +
theme(legend.title=element_blank())
plot(h)
h<-ggplot(df.go.nogo.subt,aes(TMSdelay,Sub_ID)) +
geom_tile(aes(fill=ampAPB.z.subt)) +
scale_fill_gradient2(midpoint=0.5, low = "white", mid = "white", high = "red") +
scale_y_discrete(limits=c(paste(meanRTsub.faster$Sub_ID))) +
geom_point(data = meanRT.sub, aes(meanRT_sub, Sub_ID), size = 4, shape=1) +
scale_x_continuous(breaks=seq(60,400,by=40),limits=c(50,400)) +
theme(legend.title=element_blank())
plot(h)
dev.off()
|
407f7239924c390ba3a16d266af41471c48bc9f0
|
dec6bce6c646cbdaab39de26aa796ac6ec546fd6
|
/inst/shiny/ui.R
|
c52903551d1e00a40dca5ebb580f093c5c145d42
|
[] |
no_license
|
cran/soilcarbon
|
172c550423a07a83e6dd1d5ae700ef80a4746d6f
|
5c6947262cc36c2f56499ea9fe8cff823899c860
|
refs/heads/master
| 2021-01-20T08:37:38.351226
| 2017-08-04T02:17:31
| 2017-08-04T02:17:31
| 90,169,921
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,840
|
r
|
ui.R
|
library(soilcarbon)
library(ggplot2)
shinyUI(fluidPage(
theme = "bootstrap_simplex.css",
# Application title
headerPanel("Powell Center soilcarbon workbench"),
sidebarPanel(
conditionalPanel(condition="input.conditionedPanels==1",
h3("Visualize database"),
fluidRow(column(6, selectInput("y_var", "Y Variable:",
list("None (histogram)" = "NULL",
"Top of layer" = "layer_top",
"Bottom of layer" = "layer_bot",
"C14" = "X14c",
"Total Carbon" = "c_tot",
"Total Nitrogen" = "n_tot",
"Bulk Density" = "bd_tot",
"MAP" = "map",
"MAT" = "mat"
),
selected = "layer_bot")),
column(6, selectInput("x_var", "X Variable:",
list("C14" = "X14c",
"Top of layer" = "layer_top",
"Bottom of layer" = "layer_bot",
"C14" = "X14c",
"Total Carbon" = "c_tot",
"Total Nitrogen" = "n_tot",
"Bulk Density" = "bd_tot",
"MAP" = "map",
"MAT" = "mat"
),
selected = "bd_tot"))),
fluidRow(column(7, selectInput("col_facet_var", "Panel Variable:",
list("None" = "NULL",
"Top of layer" = "layer_top",
"Bottom of layer" = "layer_bot",
"C14" = "X14c",
"Total Carbon" = "c_tot",
"Total Nitrogen" = "n_tot",
"Bulk Density" = "bd_tot",
"MAP" = "map",
"MAT" = "mat"
),
selected = "map")),
column(5, textInput("col_facet_thresh", "threshold", value = "500"))),
fluidRow(column(7, selectInput("row_facet_var", "Panel Variable 2:",
list("None" = "NULL",
"Top of layer" = "layer_top",
"Bottom of layer" = "layer_bot",
"C14" = "X14c",
"Total Carbon" = "c_tot",
"Total Nitrogen" = "n_tot",
"Bulk Density" = "bd_tot",
"MAP" = "map",
"MAT" = "mat"
),
selected = "mat")),
column(5, textInput("row_facet_thresh", "threshold", value = "5"))),
fluidRow(column(7, selectInput("col_var", "Color Variable:",
list("None" = "NULL",
"Top of layer" = "layer_top",
"Bottom of layer" = "layer_bot",
"C14" = "X14c",
"Total Carbon" = "c_tot",
"Total Nitrogen" = "n_tot",
"Bulk Density" = "bd_tot",
"MAP" = "map",
"MAT" = "mat"
),
selected = "mat")),
column(5, sliderInput("alpha", "transparency", min = 0,
max = 1, value = 0.4))),
selectInput("size_var", "Size Variable:",
list("None" = "NULL",
"C14" = "X14c",
"Total Carbon" = "c_tot",
"Total Nitrogen" = "n_tot",
"Bulk Density" = "bd_tot",
"MAP" = "map",
"MAT" = "mat"
)),
h3("Download database"),
downloadButton("download_database", "Download Database (flattened)")),
conditionalPanel(condition="input.conditionedPanels==2",
h3("Template file"),
downloadButton("download_template", "Download soilcarbon Template file"),
h4("Tips for filling out template file:"),
HTML("<ul><li>The 'metadata', 'site', 'profile', 'layer', and 'fraction' tabs are required in order to upload file to database</li>
<li>Variables with red column names are required and cannot have missing values.</li>
<li>Values in the variables called 'dataset_name', 'site_name', 'profile_name', and 'layer_name' must match across tabs in which they are found.</li>
<li>Check the 'controlled vocabulary' tab for acceptable values for certain variables</li>
<li>Remove the first two description rows before submitting dataset</li></ul>")
)
),
mainPanel(tabsetPanel(
tabPanel("Database", plotOutput("plot"), value=1),
tabPanel("Add data to database", value=2 , h3("Quality Control Check"),
helpText("To add a dataset to the soilcarbon database, the data must pass a quality control check without any warning messages, for questions email Grey (greymonroe@gmail.com)"),
fileInput("upload", label = "Upload data"),
conditionalPanel(
condition = "output.fileUploaded",
downloadButton("download_dataqc", "download quality control report"),
helpText("If the quality control report does not have any warning messages, you may submit the data by emailing it to Grey (greymonroe@gmail.com). Thanks!")
)
),
id = "conditionedPanels"
)
),
tags$script('
Shiny.addCustomMessageHandler("resetFileInputHandler", function(x) {
var el = $("#" + x);
el.replaceWith(el = el.clone(true));
var id = "#" + x + "_progress";
$(id).css("visibility", "hidden");
});
')
))
|
8d8908236771acc3f107bdd2646b7dad7a8173c4
|
83223e278bbb92d45c38b7de6648da1a5bb64bf7
|
/man/rmNAfromTable.Rd
|
c712f2154c7c89339b3fc0a8f53ef59ae07b5a89
|
[] |
no_license
|
gdario/coxph2table
|
804bb1abd6806092217b114e0c76e1f6d27d32b6
|
9edb5b29561911c49ee6f9a99718669820244d2d
|
refs/heads/master
| 2020-05-30T18:33:34.998129
| 2013-04-21T09:08:08
| 2013-04-21T09:08:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 505
|
rd
|
rmNAfromTable.Rd
|
\name{rmNAfromTable}
\alias{rmNAfromTable}
\title{Remove NAs from an output table}
\usage{
rmNAfromTable(xt)
}
\arguments{
\item{xt}{an output table (data frame or xtable)}
}
\value{
the same table (data frame or xtable) without the rows
where \code{NA} appeared.
}
\description{
Remove NAs from an output table
}
\details{
This accessory function simply removes NAs from an output
table when they are used as an additional level to a
categorical variable.
}
\author{
Giovanni d'Ario
}
|
8064a4a002b3980e7a9a8b102c430e07bddfa304
|
f5e1eb18ef32b847556eed4f3707b1d5a9689247
|
/r_modules/trade_prevalidation/R/getAllReportersRaw.R
|
5359465b55992266775b8faad9a27d83eca046e5
|
[] |
no_license
|
mkao006/sws_r_api
|
05cfdd4d9d16ea01f72e01a235614cb0dcaf573d
|
040fb7f7b6af05ec35293dd5459ee131b31e5856
|
refs/heads/master
| 2021-01-10T20:14:54.710851
| 2015-07-06T08:03:59
| 2015-07-06T08:03:59
| 13,303,093
| 2
| 5
| null | 2015-06-30T07:57:41
| 2013-10-03T16:14:13
|
R
|
UTF-8
|
R
| false
| false
| 273
|
r
|
getAllReportersRaw.R
|
getAllReportersRaw <- function(dmn = "trade",
dtset = "ct_raw_tf",
dimen = "reportingCountryM49") {
if(!is.SWSEnvir()) stop("No SWS environment detected.")
faosws::GetCodeList(dmn, dtset, dimen)
}
|
6914386222ecba835a6b8198dfebccae692eccd8
|
ce8d434798a232380d3a10eaa7279a0b7cda68dc
|
/Linear Regression/LR_Lasso.R
|
37e87d841984c05c58a595f95a5d0527deaf9857
|
[] |
no_license
|
HananGit/Prudential-Life-Insurance-Risk-Prediction
|
40d08db2502d3e4df71f6ee61ef5838e1f829359
|
aa46678a8278ec69dd15c75d007f98909c6dfd84
|
refs/heads/master
| 2021-02-08T21:07:53.397116
| 2020-03-06T22:15:01
| 2020-03-06T22:15:01
| 244,197,407
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,019
|
r
|
LR_Lasso.R
|
#Splitting dataset into train and test datasets
set.seed(2) # we set the seed to make sure that the train and test data will not change every time we divide them by running the sample function
sample_index_2 = sample(1:nrow(Prudential_final_Data_2), nrow(Prudential_final_Data_2)*0.8) #length(sample_index) should be %80 of the dataset
Prudential_train_2 = Prudential_final_Data_2[sample_index_2,] #80% of dataset is train data
Prudential_test_2= Prudential_final_Data_2[-sample_index_2,] #20% of dataset is test data
write.csv(Prudential_train_2, file = "C:/Users/Nauka Salot/Desktop/ADS/Assignments_Nauka/Mid-Term Project_2/Prudential_train_2.csv", row.names = FALSE)
write.csv(Prudential_test_2, file = "C:/Users/Nauka Salot/Desktop/ADS/Assignments_Nauka/Mid-Term Project_2/Prudential_test_2.csv", row.names = FALSE)
#Checking the dimensions of the training dataset
dim(Prudential_train_2)
dim(Prudential_test_2)
#Linear Regression
linear_model <- lm(Response ~ Product_Info_4+
Medical_History_1+
Medical_Keyword_6+
Medical_Keyword_45+
`Product_Info_2 _ A1`+
`Product_Info_2 _ E1`+
`Product_Info_2 _ D4`+
`Product_Info_2 _ A6`+
`Product_Info_2 _ A5`+
`Product_Info_2 _ C4`+
`Product_Info_2 _ B2`+
`Product_Info_2 _ A4`+
`Product_Info_6 _ 1`+
`Employment_Info_3 _ 1`+
`Employment_Info_5 _ 3`+
`InsuredInfo_2 _ 2`+
`InsuredInfo_5 _ 1` +
`InsuredInfo_6 _ 2` +
`InsuredInfo_7 _ 1`+
`Insurance_History_1 _ 1`+
`Medical_History_11 _ 3`+
`Medical_History_11 _ 1`+
`Medical_History_14 _ 3`+
`Medical_History_22 _ 2`+
`Medical_History_35 _ 1`+
`Medical_History_38 _ 1`+
`Medical_History_39 _ 3`
, data = Prudential_train_2)
summary(linear_model)
linear_model_2 <- lm(Response ~., data = Prudential_train_2)
summary(linear_model_2)
linear_model_1 <- lm( Response ~ Product_Info_4+Ins_Age+Ht+Wt+Family_Hist_2+Family_Hist_4+
Medical_History_1+Medical_Keyword_3+Medical_Keyword_9+Medical_Keyword_11+
Medical_Keyword_12+Medical_Keyword_15+Medical_Keyword_16+Medical_Keyword_18+
Medical_Keyword_19+Medical_Keyword_25+Medical_Keyword_31+Medical_Keyword_33+
Medical_Keyword_34+Medical_Keyword_37+Medical_Keyword_38+`Product_Info_2 _ A1`+
`Product_Info_2 _ E1`+`Product_Info_2 _ D4`+`Product_Info_2 _ A7`+
`Product_Info_2 _ A6`+`Product_Info_2 _ A5`+`Product_Info_2 _ B2`+
`Product_Info_2 _ A4`+`Product_Info_6 _ 1`+`Employment_Info_3 _ 1`+
`Employment_Info_5 _ 3`+`InsuredInfo_2 _ 2`+`InsuredInfo_5 _ 1` +
`InsuredInfo_6 _ 2`+`InsuredInfo_7 _ 1`+`Insurance_History_3 _ 1`+
`Insurance_History_7 _ 3`+`Medical_History_4 _ 1`+`Medical_History_7 _ 1`+
`Medical_History_11 _ 3`+`Medical_History_22 _ 2`+`Medical_History_35 _ 1`+
`Medical_History_38 _ 1`+`Medical_History_39 _ 3`,data= Prudential_train_2)
summary(linear_model_1)
Prudential_test_2 <- subset(Prudential_test_2, select = -c(Response) )
pred_2<- predict(linear_model, Prudential_test_2)
error_2<-rmse(Prudential_train_2$Response, pred_2)
pred<- predict(linear_model_1, Prudential_test_2)
library(Metrics)
error<-rmse(Prudential_train_2$Response, pred)
dim(Prudential_train_2)
#Lasso Regression
install.packages("glmnet")
library(glmnet)
x<- model.matrix(Response ~ ., Prudential_train_2)[,-65]
y<-Prudential_train_2$Response
grid = 10^seq(10,-2,length=100)
lasso.mode = glmnet(x,y,alpha = 1, lambda = grid)
summary(lasso.mode)
coeficient_lasso<-coef(lasso.mode)
lasso.mode$beta[,1]
fit.lasso <- lm(Response ~ Product_Info_4+
Ins_Age+ouj
Ht+
Wt+
Family_Hist_4+
Medical_History_1+
Medical_Keyword_3+
Medical_Keyword_9+
Medical_Keyword_12+
Medical_Keyword_15+
Medical_Keyword_16+
Medical_Keyword_31+
Medical_Keyword_33+
Medical_Keyword_34+
Medical_Keyword_37+
Medical_Keyword_38+
`Product_Info_2 _ A1`+
`Product_Info_2 _ E1`+
`Product_Info_2 _ D4`+
`Product_Info_2 _ A7`+
`Product_Info_2 _ A6`+
`Product_Info_2 _ A5`+
`Product_Info_2 _ B2`+
`Product_Info_2 _ A4`+
`Employment_Info_3 _ 1`+
`Employment_Info_5 _ 3`+
`InsuredInfo_2 _ 2`+
`InsuredInfo_5 _ 1` +
`InsuredInfo_6 _ 2`+
`InsuredInfo_7 _ 1` +
`Insurance_History_7 _ 3`+
`Medical_History_4 _ 1`+
`Medical_History_7 _ 1`+
`Medical_History_11 _ 3`+
`Medical_History_22 _ 2`+
`Medical_History_35 _ 1`+
`Medical_History_38 _ 1`+
`Medical_History_39 _ 3`, data = Prudential_train_2)
summary(fit.lasso)
#Plotting the graph
plot(fit.lasso)
pred_lasso <- predict(fit.lasso, Prudential_test_2)
pred_lasso <- round(pred_lasso)
summary(pred_lasso)
plot(pred_lasso)
#To be executed
mat_lasso<-table(pred_lasso,Prudential_test_2$Response)
sum(diag(mat_lasso))/sum(mat_lasso) * 100
rm_lasso <- sqrt(mean((Prudential_test_2$response - pred_lasso)^2, na.rm = TRUE))
#backward elimination
#including the column
predict_data <- data.frame(Prudential_test_2, pred_lasso)
write.csv(predict_data, file = "C:/Users/Nauka Salot/Desktop/ADS/Assignments_Nauka/Mid-Term Project_2/Prudential_test_2_results.csv", row.names = FALSE)
#RMSE Value
library(Metrics)
error<-rmse(Prudential_train_2$Response, pred)
#Forecast
install.packages("forecast")
library(forecast)
accuracy(pred_lasso, Prudential_train_2$Response)
|
a996255e8b68935837889d319f7a74aacbdb284d
|
3ad3ce5f38d636b649abd6e2e8741d482d8f6d72
|
/R/cleanabs.R
|
dd61671cd2a5cb974d169d4eda2f0ad2c750c427
|
[] |
no_license
|
cran/pubmed.mineR
|
279ce024df5b7913885ec96ad906226aa78633ce
|
7612a2d68f503794f8dee8479e8b388949e4b70d
|
refs/heads/master
| 2021-12-09T18:39:44.578225
| 2021-11-26T14:50:03
| 2021-11-26T14:50:03
| 17,919,888
| 5
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 288
|
r
|
cleanabs.R
|
setGeneric("cleanabs", function(object) standardGeneric("cleanabs"));
setMethod("cleanabs","Abstracts",function(object){temp1 = which(object@Abstract!="NONE"); temp=new("Abstracts", Journal=object@Journal[temp1], Abstract=object@Abstract[temp1], PMID=object@PMID[temp1]);return(temp)})
|
5402151df21937247123f9f52c0d6a2f66172a85
|
95b8a3279797515372dd46b03d8d47be5b32bbb5
|
/HW7/hw7_code.R
|
9fe50e4c74672170cefcacad9b90af3e56f2a44b
|
[
"MIT"
] |
permissive
|
qzyu999/applied-time-series-analysis-winter-19
|
69f66d031b1c36a0e80426cf4c52843a28c8c267
|
af5bda8f738c4cdf8ace11ec4ed750790e0a62fc
|
refs/heads/master
| 2022-11-06T03:01:05.705830
| 2020-05-15T03:14:06
| 2020-05-15T03:14:06
| 264,046,759
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,908
|
r
|
hw7_code.R
|
library(xlsx); library(astsa) # Load libraries
global_temp <- read.xlsx('GlobalTemp_NASA.xlsx', sheetIndex = 1) # Load data
X_t <- diff(global_temp$Temp.Anomaly) # First differenced data
par(mfrow=c(2,2))
# plot periodogram of X_t
p_0 <- spec.pgram(X_t, log = 'no',
main = 'Periodogram of First Differenced Series', ylim = c(0,0.07))
# 3 smoothed periodogram (modified Daniell) spans 5, 15, 25
p_5 <- spec.pgram(x = X_t, spans = 5, log = 'no',
main = 'Smoothed Periodogram, span = 5', ylim = c(0,0.07))
p_15 <- spec.pgram(x = X_t, spans = 15, log = 'no',
main = 'Smoothed Periodogram, span = 15', ylim = c(0,0.07))
p_25 <- spec.pgram(x = X_t, spans = 25, log = 'no',
main = 'Smoothed Periodogram, span = 25', ylim = c(0,0.07))
dev.off()
### 1 b
specselect=function(y,kmax){
# Obtains the values of the criterion function for
# obtaining the the optimal number of neighbors for
# spectral density estimate for modified Daniell's method.
# input: y, observed series; kmax=max number of nighbors to
# be considered
# output: ctr - the criterion function
# output: kopt - the value of k at which the criterion function
# is minimized
ii=spec.pgram(y,log="no",plot=FALSE)
ii=ii$spec
cc=norm(as.matrix(ii),type="F")^2
ctr=rep(1,kmax)
for(k in 1:kmax) {
ss=2*k+1; kk=1/(2*k)
ff=spec.pgram(y,spans=ss,log="no",plot=FALSE)
fspec=ff$spec
ctr[k]=norm(as.matrix(ii-fspec),type="F")^2+kk*cc
}
kopt=which.min(ctr)
result=list(ctr=ctr,kopt=kopt)
return(result)
}
# a = specselect(X_t, kmax = 12)
# plot(1:12, a$ctr, type = 'l')
### 1 c
fit <- arima(X_t, order = c(2,0,2))
arma_fit <- arma.spec(ar = fit$coef[1:2], ma = fit$coef[3:4],
log = 'no', var.noise = fit$sigma2)
p_7 <- spec.pgram(x = X_t, spans = 2*7+1, log = 'no',
main = 'Smoothed Periodogram, span = 15', ylim = c(0,0.03))
lines(arma_fit$freq, arma_fit$spec)
### 2 c
w = seq(from = -0.5, to = 0.5, by = 0.01)
sdf <- function(w) {
2.98 + 2.8*cos(2*pi*w)
}
sdf2 <- function(w) {
3.16-0.36*cos(2*pi*w)-2.8*cos(4*pi*w)
}
sdf_y_lim <- range(range(sdf(w)), range(sdf2(w)))
sdf_y_lim[1] <- sdf_y_lim[1] - 0.1
sdf_y_lim[2] <- sdf_y_lim[2] + 0.1
plot(w, sdf(w), type = 'l', ylim = sdf_y_lim,
main = 'Spectral Density of X_t and Differenced X_t',
xlab = 'Frequency', ylab = 'Spectrum')
lines(w, sdf2(w), type = 'l', col = 'red', lty = 2)
legend('bottomleft', legend = c('X_t', 'differenced X_t'),
col = c('black', 'red'), lty = c(1,2))
### 3 b
sdf3 <- function(w) {
(2/3) * (1 + 4*cos(2*pi*w) + 4*cos(2*pi*w)^2)
}
plot(w, sdf3(w), type = 'l', main = 'Spectral Density of White Noise with Variance = 2',
xlab = 'Frequency', ylab = 'Spectrum')
abline(h = 2, col = 'red', lty = 2)
legend('topleft', legend = c('Z_t', 'X_t'), lty = c(1,2), col = c('black', 'red'))
|
96f4fae8ed3f7475986da5a78315ea3e6c688a8a
|
91bfc625b23246e77b9f05ddbcac30b0b53f8201
|
/run_analysis.R
|
6f46408b0ffd1f4ea8240fc0423767df13528c44
|
[] |
no_license
|
Puranjay2406/getting-and-cleaning-data-project
|
3e7bb0dfd5bf51230192540acd2d178c27640ba6
|
d05d3bff3284a1969b7541936bfa44d7faa1b35c
|
refs/heads/master
| 2020-03-28T16:59:00.646955
| 2018-09-09T13:47:50
| 2018-09-09T13:47:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,519
|
r
|
run_analysis.R
|
################################ download file and unzip it####################
library(dplyr)
if(!file.exists("./data")){dir.create("./data");dir.create("./data/unzipdt")}
dturl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(dturl,"./data/wearabledt.zip",mode="wb")
unzip(zipfile = "./data/wearabledt.zip",exdir = "./data/unzipdt")
############################## Load necessary data ########################################
activitylabels <- read.table("./data/unzipdt/UCI HAR Dataset/activity_labels.txt",
colClasses = c("numeric","character"))
measures <- read.table("./data/unzipdt/UCI HAR Dataset/features.txt",
colClasses = c("numeric","character"))
selectedmeasures <- grep(".*mean.*|.*std.*", measures[,2])
measuresnames <- {measuresnames <- measures[selectedmeasures,2];
measuresnames <- gsub('-mean', 'Mean', measuresnames);
measuresnames <- gsub('-std', 'Std', measuresnames)
measuresnames <- gsub('[-()]', '',measuresnames)
measuresnames <- gsub("BodyBody", "Body",measuresnames)
measuresnames <- tolower(measuresnames)
}
####### Extracts only the measurements on the mean and standard deviation for each measurement########
traindt <- read.table("./data/unzipdt/UCI HAR Dataset/train/X_train.txt")[selectedmeasures]
trainactivs <- read.table("./data/unzipdt/UCI HAR Dataset/train/Y_train.txt")
trainsubjects <- read.table("./data/unzipdt/UCI HAR Dataset/train/subject_train.txt")
testdt <- read.table("./data/unzipdt/UCI HAR Dataset/test/X_test.txt")[selectedmeasures]
testactivs <- read.table("./data/unzipdt/UCI HAR Dataset/test/Y_test.txt")
testsubjects <- read.table("./data/unzipdt/UCI HAR Dataset/test/subject_test.txt")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~merge datasets~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
traindt <- cbind(trainsubjects, trainactivs, traindt)
testdt <- cbind(testsubjects, testactivs, testdt)
finaldata <- rbind(traindt, testdt)
colnames(finaldata) <- c("subject", "activity", measuresnames)
#### independent tidy data set with the average of each variable for each activity and each subject####
indepdata <- finaldata %>% group_by(activity,subject)%>%summarize_all(mean);{indepdata$activity <- factor(indepdata$activity,labels = activitylabels$V2)}
|
1c49f0469167827d2ba7d57978f403ae4e2c416d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/lmomco/examples/pp.f.Rd.R
|
07d80d9247512a163eb62ba5b8dc4989fbb3df29
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 307
|
r
|
pp.f.Rd.R
|
library(lmomco)
### Name: pp.f
### Title: Quantile Function of the Ranks of Plotting Positions
### Aliases: pp.f
### Keywords: plotting position rankit
### ** Examples
X <- sort(rexp(10))
PPlo <- pp.f(0.25, X)
PPhi <- pp.f(0.75, X)
plot(c(PPlo,NA,PPhi), c(X,NA,X))
points(pp(X), X) # Weibull i/(n+1)
|
e16f05cb56ce038132c4c81f9eacd9fe92642eee
|
230297d590fbc7156d408f0db2e4cb33a2c75fa5
|
/ui.R
|
db718cd73475862d7cf18068599d9b61c46d0f37
|
[] |
no_license
|
jadavs/LeagueOfLegends-DataAnalysis
|
3bc0acc705271b28c53dee13c688f8502902fcf8
|
146d0bc1590c66cee2705e385768cf6b1441b5c5
|
refs/heads/master
| 2021-08-24T01:31:28.875776
| 2017-12-07T13:16:23
| 2017-12-07T13:16:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,442
|
r
|
ui.R
|
library("shiny")
library("plotly")
library("shinythemes")
shinyUI <- fluidPage(title = "LoL Analysis", theme = shinytheme('sandstone'),
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "style.css")
),
navbarPage("League of Legends",
tabPanel("Project Overview",
fluidRow(
includeMarkdown("scripts/Overview.Rmd")
)
),
tabPanel("Player Analysis",
sidebarLayout(
sidebarPanel(
selectInput(inputId = "role",
label = "Choose a role:",
choice = c("Top", "Jungle","Middle", "ADC", "Support",
selected = "Middle")),
br(),
uiOutput("firstdropdown"),
br(),
uiOutput("seconddropdown")
),
mainPanel(
h2("Overview"),
p("In this tab, we calculate the win-rates of all players who have played competitive league of legends games between
2015 and 2017. From the dropdown menus on the left, the user can select the two players whose win-rate he/she wants
to compare. Also, we compare the champions the selected players have played on the basis of the number
of games played on each champion."),
br(),
h2("Winrate Comparison"),
p("This plot compares the win-rates for the two selected players. Also, the use of a stacked bar plot enables us to compare
the win-rates for each player on the basis of the side they play on- blue or red."),
br(),
plotlyOutput("winrateplot"),
br(),
br(),
br(),
p("Generally, we can see a trend that the win-rate on the blue side is higher than the win-rate on the red side. This supports
the fact that most teams opt to play on the blue side when given the option."),
br(),
h2("Player's Champion Pools"),
p("The two plots below display the number of games each player has played on respective champions between 2015 and 2017."),
br(),
plotlyOutput("p1champplot"),
plotlyOutput("p2champplot")
)
)
),
tabPanel("Champion Analysis",
sidebarLayout(
sidebarPanel(
selectInput(inputId = "role1",
label = "Choose a role:",
choice = c("Top", "Jungle","Middle", "ADC", "Support")),
br(),
uiOutput("thirddropdown"),
br(),
uiOutput("fourthdropdown")
),
# Show a plot of the generated distribution
mainPanel(
h2("Overview"),
p("League of legends is game that is very much dependent on a player's skill, but at the
same time, the champions one picks matter ALOT. Most champions can play in one role(Top, Mid, Jungle, ADC and Support)
Meanwhile, some champions can be played in multiple roles. In this panel, we compare 2 champions with respect
to their win rates and ban rates. The champions picked will define the flow and pace of the game. Some champions
tend to play extremely aggressive and upclose whereas some tend to scale better and deal
damage from a distance. What makes professional esports and their champion selections interesting is the banning
champions phase at the beginning of the game. Each team gets to ban 5 champions and pick 5 champions. Studying the
ban rates and win rates gives us a general idea of which champions are currently very good or versatile"),
br(),
h2("Win Rates Comparison"),
p("The plot below compares the win rates of the 2 champions selected from the dropdown menu :"),
br(),
plotlyOutput("winplot"),
br(),
h2("Ban Rates Comparison"),
p("The plot below compares the win rates of the 2 champions selected from the dropdown menu :"),
br(),
plotlyOutput("banplot")
)
)
),
tabPanel("How to win more games?",
fluidRow(
h2("Overview"),
p("We plan to find what objectives affect the outcome of the game the most. Also, we use correlation matrices to compare
the different correlations and try to know the difference between a competitive game and a non-competitive game."),
br(),
h2("Non-Competitive Games"),
plotOutput("noncompplot"),
p("From this plot, we can observe that there is decent correlation between winning the games and getting
objectives such as towers, and inhibitors. However, there is one interesting observation. The correlation
of winning a game and getting a baron is lower as compared to the correlation with getting Dragons. This
is indeed surprising because baron is the strongest objective in the game, and usually can turn the tide towards
a team."),
br(),
h2("Competitive Games"),
plotOutput("compplot"),
p("Just like the correlation matrix plot for the non-competitive games, we can observe a strong correlation between winning
games and getting objectives such as towers and inhibitors. However, there is one major difference which we can observe-
the correlation between winning and getting baron is much higher, and similar to getting dragons. This is more consistent
with the logic of the game. Moreover, in competitive games all teams and players play much more seriously and focus to get
objectives in the game to win. Once a team gets baron, which is the strongest objective in the game, it's less likely that
they will throw away the lead, thus the higher correlation."),
br(),
h2("Conclusion"),
p("From this analysis, we can conclude that non-competitive games mostly depend on getting objectives such as turrets and
inhibitors. Barons and dragons don't have as big an influence as these objectives have, so don't lose hope if you lose
a baron or drake! In order to win more games, you should concentrate in getting turrets, and pushing lanes in order to get
inhibitors!"),
br()
)),
tabPanel("Sources/Contact Us",
fluidRow(
h2("Reach Us"),
p("We are here to answer any questions you might have about our League of Legends application.
Reach out to us and we'll respond as soon as we can!
Please use the contact information provided below to reach us."),
h2("Contact Information"),
h4(strong("E-mail")),
p("Aman Agarwal: aman28@uw.edu"),
p("Siddharth Jadav: jadavs@uw.edu"),
p("Mahir Bathija: mahirb@uw.edu"),
br(),
h4(strong("Mobile")),
p("Aman Agarwal: +1 (206) 565-7896"),
p("Siddharth Jadav: +1 (206) 245-3623"),
p("Mahir Bathija: +1 (206) 693-0757"),
br(),
h4(strong("Summoner ID")),
p("Aman Agarwal: seraastark"),
p("Siddharth Jadav: sjadav"),
h2("Resources"),
helpText(a("Competitive Games dataset", href="https://www.kaggle.com/chuckephron/leagueoflegends/data")),
helpText(a("Non-Competitive Games dataset", href="https://www.kaggle.com/datasnaek/league-of-legends/data"))
)
)
)
)
|
518f1b46fb58539493f12e3e3190b490511f9cf4
|
319f8ff8b0228260ecfe14dc32964955529c0eb6
|
/cpue/dorado_cpue_gam_standardization_month.R
|
e9bce5bc8bdf6e764ff90fe7c7e30cce054bcf36
|
[] |
no_license
|
imarpe/MDB
|
0228324b32838d948b9007b11f6bf0034a63bd9d
|
bad916d26d8647b7bc7894b0878bef02ebb10b05
|
refs/heads/master
| 2020-04-09T14:59:00.384111
| 2016-06-23T02:27:56
| 2016-06-23T02:27:56
| 41,062,374
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,998
|
r
|
dorado_cpue_gam_standardization_month.R
|
# Clean:
rm(list = ls(all = TRUE)) #clear all;
graphics.off() # close all;
gc() # Clear memmory (residuals of operations?, cache? Not sure)
require(gam)
require(pgirmess)
source("cpue_standardization_functions.R")
# Data pre-processing -----------------------------------------------------
perico = read.csv("perico.csv")
perico = perico[perico$year < 2015, ]
names(perico)
perico$nTrips = rep(1, length=nrow(perico))
perico$daysTrip = daysTrip(perico$date_ini, perico$date_end)
environment = read.table("http://www.cpc.ncep.noaa.gov/data/indices/sstoi.indices",
header=TRUE, sep="", na.strings="NA", dec=".", strip.white=TRUE)
environment = environment[ ,c("YR","MON", "NINO1.2", "ANOM")]
colnames(environment) = c("year", "month", "nino12", "anom")
# enviroment = read.csv("enviroment.csv")
# mei = mei[, c("year", "mei")]
# mei$month = rep(1:12, length(nrow(mei)))
factor = 1e-3
names(perico)
pericoByMonth = aggregate(cbind(total, pericokg, holdCapacity, nHook, daysTrip, nHours, nTrips) ~ year + month, FUN = sum,
data = perico, na.action=na.pass)
newBase = expand.grid(year = unique(pericoByMonth$year), month = c(1:12))
pericoByMonth = merge(newBase, pericoByMonth, all = TRUE)
pericoByMonth = merge(pericoByMonth, environment, all.x = TRUE)
pericoByMonth$time = pericoByMonth$year + ((pericoByMonth$month)/12)
pericoByMonth = pericoByMonth[order(pericoByMonth$time), ]
pericoByMonth = pericoByMonth[pericoByMonth$year >= 1999 & pericoByMonth$year <= 2015, ]
pericoByMonth$semester = ifelse(pericoByMonth$month<=6, pericoByMonth$year+0, pericoByMonth$year+0.5)
pericoByMonth$quarter = pericoByMonth$year + rep(c(0, 0.25, 0.5, 0.75), each=3)
season = rep(c("summer", "fall", "winter", "spring"), each = 3, len = nrow(pericoByMonth) + 1)
# year effect
for(i in 0:11) pericoByMonth[, paste0("year", i)] = as.factor(lag(pericoByMonth$year, i))
# semester effect
for(i in 0:5) pericoByMonth[, paste0("semester", i)] = as.factor(lag(pericoByMonth$semester, i, freq=6))
# quarter effect
for(i in 0:2) pericoByMonth[, paste0("quarter", i)] = as.factor(lag(pericoByMonth$quarter, i, freq=3))
pericoByMonth$month = as.factor(pericoByMonth$month)
pericoByMonth$year = as.factor(pericoByMonth$year)
pericoByMonth$season1 = as.factor(season[-length(season)])
pericoByMonth$season2 = as.factor(season[-1])
pericoByMonth$yearSeason = pericoByMonth$year3
# CPUE: ----------------------------------------------------
pericoByMonth$cpue = (factor*pericoByMonth$pericokg)/pericoByMonth$nHook
pairsrp(pericoByMonth[, c("cpue", "holdCapacity", "nHook", "nHours", "nTrips")], meth = "pearson",
cex = 1.5, col = "grey50")
# year effect
tot.year = NULL
for(i in 0:11) {
fmla = sprintf("log(cpue) ~ year%d", i)
tot.year[[paste0("year", i)]] = gam(as.formula(fmla), data=pericoByMonth)
print(c(round(AIC(tot.year[[i+1]]),2), names(tot.year)[i+1]))
}
# semester effect
tot.sem = NULL
for(i in 0:5) {
fmla = sprintf("log(cpue) ~ semester%d", i)
tot.sem[[paste0("semester", i)]] = gam(as.formula(fmla), data=pericoByMonth)
print(c(round(AIC(tot.sem[[i+1]]),2), names(tot.sem)[i+1]))
}
# quarter effect
tot.quart = NULL
for(i in 0:2) {
fmla = sprintf("log(cpue) ~ quarter%d + month", i)
tot.quart[[paste0("quarter", i)]] = gam(as.formula(fmla), data=pericoByMonth)
print(c(round(AIC(tot.quart[[i+1]]),2), names(tot.quart)[i+1]))
}
#Modelos
model = NULL
model[[1]] = gam (log(cpue) ~ year10, data=pericoByMonth)
model[[2]] = gam (log(cpue) ~ semester4, data=pericoByMonth)
model[[3]] = gam (log(cpue) ~ quarter0, data=pericoByMonth)
model[[4]] = gam (log(cpue) ~ year10 + season2, data=pericoByMonth)
model[[5]] = gam (log(cpue) ~ semester4 + season2, data=pericoByMonth)
model[[6]] = gam (log(cpue) ~ quarter0 + season2, data=pericoByMonth)
model[[7]] = gam (log(cpue) ~ year10 + season2 + s(nino12), data=pericoByMonth)
model[[8]] = gam (log(cpue) ~ semester4 + season2 + s(nino12), data=pericoByMonth)
model[[9]] = gam (log(cpue) ~ quarter0 + season2 + s(nino12), data=pericoByMonth)
model[[10]] = gam (log(cpue) ~ year10 + season2 + s(anom), data=pericoByMonth)
model[[11]] = gam (log(cpue) ~ semester4 + season2 + s(anom) , data=pericoByMonth)
model[[12]] = gam (log(cpue) ~ quarter0 + season2 + s(anom) , data=pericoByMonth)
model[[13]] = gam (log(cpue) ~ year10 + season2 + s(nHours) , data=pericoByMonth)
model[[14]] = gam (log(cpue) ~ semester4 + season2 + s(nHours) , data=pericoByMonth)
model[[15]] = gam (log(cpue) ~ quarter0 + season2 + s(nHours) , data=pericoByMonth)
for(i in seq_along(model)){
print(AIC(model[[i]]))
}
for(i in seq_along(model)){
print(summary(model[[i]]))
}
pericoByMonth$cpueP1[!is.na(pericoByMonth$cpue)] = exp(predict(model[[1]]))
pericoByMonth$cpueP2[!is.na(pericoByMonth$cpue)] = exp(predict(model[[2]]))
pericoByMonth$cpueP3[!is.na(pericoByMonth$cpue)] = exp(predict(model[[3]]))
pericoByMonth$cpueP4[!is.na(pericoByMonth$cpue)] = exp(predict(model[[4]]))
pericoByMonth$cpueP5[!is.na(pericoByMonth$cpue)] = exp(predict(model[[5]]))
pericoByMonth$cpueP6[!is.na(pericoByMonth$cpue)] = exp(predict(model[[6]]))
pericoByMonth$cpueP7[!is.na(pericoByMonth$cpue)] = exp(predict(model[[7]]))
pericoByMonth$cpueP8[!is.na(pericoByMonth$cpue)] = exp(predict(model[[8]]))
pericoByMonth$cpueP9[!is.na(pericoByMonth$cpue)] = exp(predict(model[[9]]))
pericoByMonth$cpueP10[!is.na(pericoByMonth$cpue)] = exp(predict(model[[10]]))
pericoByMonth$cpueP11[!is.na(pericoByMonth$cpue)] = exp(predict(model[[11]]))
pericoByMonth$cpueP12[!is.na(pericoByMonth$cpue)] = exp(predict(model[[12]]))
pericoByMonth$cpueP13[!is.na(pericoByMonth$cpue)] = exp(predict(model[[13]]))
pericoByMonth$cpueP14[!is.na(pericoByMonth$cpue)] = exp(predict(model[[14]]))
pericoByMonth$cpueP15[!is.na(pericoByMonth$cpue)] = exp(predict(model[[15]]))
plot(pericoByMonth$time, pericoByMonth$cpue, type="b", col="gray", xlim=c(1999, 2015),
ylab = "CPUE", xlab = "time")
lines(pericoByMonth$time, pericoByMonth$cpueP9, col="blue", lwd=2)
lines(pericoByMonth$time, pericoByMonth$cpueP15, col="red", lwd=2)
print(cor(pericoByMonth[, grep(pat="cpue", names(pericoByMonth),value=TRUE)], use="complete")[-1,1])
##############
ref = "2000"
MODELyear = 7
#Asumiendo year
x = model[[MODELyear]]
xperico = complete.cases(pericoByMonth[, c("nHook")])
pT = predict(x, type="terms", se=TRUE)
pT[["index"]] = pericoByMonth$year10[xperico]
pT[["time"]] = pericoByMonth$year10[xperico]
split(pT$fit[, "year10"], f = pT$index)
lyear = tapply(pT$fit[, "year10"], INDEX=pT$index, FUN=unique)
se = tapply(pT$se.fit[, "year10"], INDEX=pT$index, FUN=unique)
year = exp(lyear + 0.5*se^2)
year.perico = year/year[ref]
year.perico = data.frame(time = as.numeric(names(lyear)),
lyear = lyear, se = se, year = year, ind = year.perico)
#par(mfrow=c(1,2), mar=c(3,3,1,1))
#par(mfrow=c(1,2))
plot(year.perico$time[-c(1:3)], year.perico$year[-c(1:3)], type = "b", lwd = 2, pch = 19,
xlab = "Year", ylab = "Standardized catch rate", axes = FALSE, ylim = c(0, 2))
axis(1, las = 2, seq(1998, 2014, 1))
axis(2, las = 1)
box()
title("year")
#Asumiendo quarter
MODELquarter = 15
y = model[[MODELquarter]]
yperico = complete.cases(pericoByMonth[, c("nHook")])
pT = predict(y, type="terms", se=TRUE)
pT[["index"]] = pericoByMonth$quarter0[yperico]
pT[["time"]] = pericoByMonth$quarter0[yperico]
split(pT$fit[, "quarter0"], f = pT$index)
lyear = tapply(pT$fit[, "quarter0"], INDEX=pT$index, FUN=unique)
se = tapply(pT$se.fit[, "quarter0"], INDEX=pT$index, FUN=unique)
year = exp(lyear + 0.5*se^2)
quarter.perico = year/year[ref]
quarter.perico = data.frame(time=as.numeric(names(lyear)),
lyear=lyear, se=se, year=year, ind=quarter.perico)
plot(quarter.perico$time, quarter.perico$year, type = "b", lwd = 2, pch = 19,
xlab = "Year", ylab = "Standardized catch rate", col = "red", axes = FALSE)
axis(1, las = 2, seq(1999, 2015, 1))
axis(2, las = 1)
box()
title("quarter")
|
08fff484f6ed6777dd804def0cf3d18cef47e0bb
|
8adbe657f2087fbec42acca33c047ba9926591f7
|
/Model Building for Booking cancellations.R
|
3994015b27753421e35242b1545b534323625917
|
[
"MIT"
] |
permissive
|
anuraglahon16/Final-Project-R
|
219b44888c09b10752ca3111ec6e87122cbd13f6
|
52ac47e93eb282fa0feb9534ae42325059de04a8
|
refs/heads/master
| 2022-06-30T23:56:03.804661
| 2020-05-11T23:01:52
| 2020-05-11T23:01:52
| 258,346,715
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,099
|
r
|
Model Building for Booking cancellations.R
|
getwd()
setwd('C:/Users/anura/Downloads')
hotels=read.csv("hotel_bookings.csv")
########################## MODEL BUILDING for booking cancellations #########################
install.packages("tidyverse")
install.packages("Hmisc")
install.packages("DataExplorer")
install.packages("ggplot2")
library(funModeling)
library(tidyverse)
library(Hmisc)
library(DataExplorer)
library(ggplot2)
#Handling missing data
#Replacing missing values in Children column from the corresponding Babies column
n <- length(hotels$children)
for (i in 1:n) {
if (is.na(hotels$children[i]))
hotels$children[i] <- hotels$babies[i]
}
#Replacing undefined as SC .Both means no meal package.
hotels <-fix(hotels)
hotels$meal <-replace(hotels$meal,hotels$meal=='Undefined','SC')
hotels$meal <- factor(hotels$meal)
#Defining mode function
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
#Replacing Undefined with mode for market segment column
modeM <- getmode(hotels$market_segment)
modeM
hotels$market_segment <- replace(hotels$market_segment,hotels$market_segment=='Undefined',modeM)
hotels$market_segment <- factor(hotels$market_segment)
#Replacing Undefined with mode for distribution channel column
modeD <- getmode(hotels$distribution_channel)
modeD
hotels$distribution_channel <- replace(hotels$distribution_channel,hotels$distribution_channel=='Undefined',modeD)
hotels$distribution_channel <- factor(hotels$distribution_channel)
#Removing missing country rows
hotels$country[hotels$country=='NULL']= NA
sum(is.na(hotels$country))
hotels=hotels[!is.na(hotels$country),]
sum(is.na(hotels$country))
#Droping company column( 91% missing values)
install.packages("dplyr")
library(dplyr)
hotels=select(hotels, -company)
#checking final dataset
describe(hotels)
dim(hotels)
head(hotels)
hotels$reservation_status_date <- NULL
dim(hotels)
#===================================splitting dataset into training and testing data
set.seed(0)
n=nrow(hotels)
shuffled=hotels[sample(n),]
trainSet=shuffled[1:round(0.8 * n),]
testSet = shuffled[(round(0.8 * n) + 1):n,]
summary(trainSet)
summary(testSet)
# 95512 rows
dim(trainSet)
#23878 rows
dim(testSet)
################################################### Using Logistic regression
model1 <- glm(is_canceled ~ hotel + lead_time + arrival_date_month + children +
market_segment + is_repeated_guest + adults + babies +
previous_cancellations +
deposit_type + booking_changes +
reserved_room_type + adr + days_in_waiting_list + customer_type +
total_of_special_requests,
data = trainSet , family = "binomial")
summary(model1)
train_pred <-predict(model1, trainSet,type = 'response')
library(knitr)
library(ROCR)
install.packages("verification")
library(verification)
pred <- prediction(train_pred,trainSet$is_canceled)
perform <- performance(pred,"acc")
max <- which.max(slot(perform,"y.values")[[1]])
prob <- slot(perform,"x.values")[[1]][max]
prob
train_pred1 <- ifelse(train_pred > prob, 1,0)
mean(trainSet$is_canceled == train_pred1)
tble <- table(Actual = trainSet$is_canceled,Predicted = train_pred1 );tble
test_pred <-predict(model1, testSet,type = 'response')
test_pred1 <- ifelse(test_pred > prob , 1,0)
#test accuracy 80.49%
mean(testSet$is_canceled == test_pred1)
tble1 <- table(Actual = testSet$is_canceled,Predicted = test_pred1 );tble1
TN <- tble1[1,1]
FN <- tble1[2,1]
FP <- tble1[1,2]
TP <- tble1[2,2]
N <- sum(tble[1,])
P <- sum(tble[2,])
Specificity <- FP/N
Sensitivity <- TP/N
df <- data.frame(Specificity,Sensitivity)
kable(df)
1 - sum(diag(tble1))/sum(tble1)
roc.plot(
testSet$is_canceled,
test_pred,
threshold = seq(0,max(test_pred),0.01)
)
#auc=83.52
pred1 <- prediction(test_pred,testSet$is_canceled)
auc <- performance(pred1,"auc")
auc <- unlist(slot(auc,"y.values"))
auc
#step
stepHotel<-stepAIC(model1)
stepHotel$anova
#Checking Assumptions
install.packages("car")
library(car)
vif(model1)
AIC(model1)
BIC(model1)
plot(model1)
#residualPlots(model1)
durbinWatsonTest(model1)
##################################################### using random forest package
install.packages("randomForest")
library(randomForest)
sapply(hotels, class)
str(hotels)
hotels$reservation_status_date=as.integer(hotels$reservation_status_date)
hotels$total_of_special_requests= as.factor(hotels$total_of_special_requests)
hotels$is_repeated_guest=as.factor(hotels$is_repeated_guest)
hotels$arrival_date_year=as.factor(hotels$arrival_date_year)
hotels$is_canceled=as.factor(hotels$is_canceled)
#Removing country column
hotels=hotels[-24]
hotels=hotels[-14]
#model
model1=randomForest(is_canceled~.-reservation_status -arrival_date_year -arrival_date_month,data=trainSet)
model1
#prediction and confusion matrix for training data
modpredTrain=predict(model1,trainSet)
confusionMatrix(modpredTrain,trainSet$is_canceled)
library(caret)
#prediction and confusion matrix for testing data
modpredTest=predict(model1,testSet)
confusionMatrix(modpredTest,testSet$is_canceled)
#Error rate
plot(model1)
#number of nodes for the trees
hist(treesize(model1),main = "No of nodes for the trees",col = "green")
#variable importance
varImpPlot(model1)
varUsed(model1)
#------------------------Running Different Models for selecting features---------------------------------------------
#For ridge and lasso regression, we will be using the `glmnet` library.
#Remember, we need to tune our hyperparameter, $\lambda$ to find the 'best' ridge or lasso model to implement.
library(glmnet)
x_train = model.matrix(trainSet$is_canceled~., trainSet)[,-1]
y_train = trainSet$is_canceled
x_test=model.matrix(testSet$is_canceled~., testSet)[,-1]
y_test=testSet$is_canceled
#Ridge regression
#Then we would want to build in a cross-validation process to choose our 'best' $\lambda$. We can do this using `cv.glmnet,`
cv_ridge = cv.glmnet(x_train, y_train, alpha = 0)
#ridge regression is performed by default using `alpha = 0`
cv_ridge$lambda.min
#We see that the cross-validated model with a $\lambda = 0.048 provides the optimal model in terms of minimizing MSE
predict(cv_ridge, type="coefficients", s=0.04829162)
dim(hotels)
#min value of lambda
lambda_min <- cv_ridge$lambda.min
#best value of lambda
lambda_1se <- cv_ridge$lambda.1se
#regression coefficients
coef(cv_ridge,s=lambda_1se)
#Predicting on training data 100% with 30 features
#predict class, type="class"
ridge_prob <- predict(cv_ridge,newx = x_train,s=lambda_1se,type="response")
#translate probabilities to predictions
ridge_predict <- rep("non_cancelled",nrow(trainSet))
ridge_predict[ridge_prob>.5] <- "canceled"
ridge_predict <- ifelse(ridge_predict=="canceled",1,0)
mean(ridge_predict==trainSet$is_canceled)
#Testing on test data 100 % with 30 features
ridge_prob <- predict(cv_ridge,newx = x_test,s=lambda_1se,type="response")
#translate probabilities to predictions
ridge_predict <- rep("non_cancelled",nrow(testSet))
ridge_predict[ridge_prob>.5] <- "canceled"
ridge_predict <- ifelse(ridge_predict=="canceled",1,0)
mean(ridge_predict==testSet$is_canceled)
#Lasso
cv_lasso = cv.glmnet(x_train, y_train, alpha = 1)
bestlam = cv_ridge$lambda.min
predict(cv_lasso, type="coefficients", s=bestlam)
#min value of lambda
lambda_min <- cv_lasso$lambda.min
#best value of lambda
lambda_1se <- cv_lasso$lambda.1se
#regression coefficients
coef(cv_lasso,s=lambda_1se)
#Predicting on training data 100% with 30 features
#predict class, type="class"
lasso_prob <- predict(cv_lasso,newx = x_train,s=lambda_1se,type="response")
#translate probabilities to predictions
lasso_predict <- rep("non_cancelled",nrow(trainSet))
lasso_predict[lasso_prob>.5] <- "canceled"
lasso_predict <- ifelse(lasso_predict=="canceled",1,0)
mean(lasso_predict==trainSet$is_canceled)
#Testing on test data 63.08 %
lasso_prob <- predict(cv_lasso,newx = x_test,s=lambda_1se,type="response")
#translate probabilities to predictions
ridge_predict <- rep("non_cancelled",nrow(testSet))
lasso_predict[lasso_prob>.5] <- "canceled"
lasso_predict <- ifelse(lasso_predict=="canceled",1,0)
mean(lasso_predict==testSet$is_canceled)
#=========================================Regression for some variable with Logistic Regression 83.61 auc=============================================
x_train1 = model.matrix(trainSet$is_canceled~hotel + lead_time + arrival_date_month +arrival_date_year+ children +meal+
market_segment + is_repeated_guest + adults + babies +
previous_cancellations +
deposit_type + booking_changes +
reserved_room_type + adr + days_in_waiting_list + customer_type +
total_of_special_requests, trainSet)[,-1]
y_train1 = trainSet$is_canceled
x_test1=model.matrix(testSet$is_canceled~hotel + lead_time + arrival_date_month +arrival_date_year+ children +meal+
market_segment + is_repeated_guest + adults + babies +
previous_cancellations +
deposit_type + booking_changes +
reserved_room_type + adr + days_in_waiting_list + customer_type +
total_of_special_requests, testSet)[,-1]
y_test1=testSet$is_canceled
#=======================================Ridge Regression for cancelled hotel=======================================
#it will take time to run
#alpha0.fit <- cv.glmnet(x_train1,y_train1,type.measure = 'mse',alpha=0,family='gaussian')
#alpha0.predicted<-predict(alpha0.fit,s=alpha0.fit$lambda.1se,newx=x_test1)
#mean((y_test1-alpha0.predicted)^2)
#=======================================Lasso Regression for cancelled hotel=======================================
#alpha1.fit <- cv.glmnet(x_train1,y_train1,type.measure = 'mse',alpha=1,family='gaussian')#
#alpha1.predicted<-predict(alpha1.fit,s=alpha1.fit$lambda.1se,newx=x_test1)
#mean((y_test1-alpha1.predicted)^2)
#==============================================Regression of Lasso and Ridge to calculate accuracy=====================
#===========================Regression with Lasso and Ridge Regression with positive coefficient and without reservation status 76.87% ===========================================
x_train1 = model.matrix(trainSet$is_canceled~lead_time + arrival_date_year + arrival_date_month + arrival_date_week_number+
arrival_date_day_of_month + stays_in_weekend_nights + stays_in_week_nights+
adults + children + babies + meal + distribution_channel+is_repeated_guest +
previous_cancellations + reserved_room_type + assigned_room_type+
deposit_type + customer_type + adr , trainSet)[,-1]
y_train1 = trainSet$is_canceled
x_test1=model.matrix(testSet$is_canceled~lead_time + arrival_date_year + arrival_date_month + arrival_date_week_number+
arrival_date_day_of_month + stays_in_weekend_nights + stays_in_week_nights+
adults + children + babies + meal + distribution_channel+is_repeated_guest +
previous_cancellations + reserved_room_type + assigned_room_type+
deposit_type + customer_type + adr , testSet)[,-1]
y_test1=testSet$is_canceled
#-------------------------Lasso Regression-----------------------
cv.out <- cv.glmnet(x_train1,y_train1,alpha=1,family="binomial",type.measure = "mse" )
#plot result
plot(cv.out)
#min value of lambda
lambda_min <- cv.out$lambda.min
#best value of lambda
lambda_1se <- cv.out$lambda.1se
#regression coefficients
coef(cv.out,s=lambda_1se)
#Predict on training data set 76.71% accuracy
lasso_prob <- predict(cv.out,newx = x_train1,s=lambda_1se,type="response")
#translate probabilities to predictions
lasso_predict <- rep("non_cancelled",nrow(trainSet))
lasso_predict[lasso_prob>.5] <- "canceled"
lasso_predict <- ifelse(lasso_predict=="canceled",1,0)
mean(lasso_predict==trainSet$is_canceled)
#Predicting of testing or new data set
#get test data
#predict class, type="class"
lasso_prob <- predict(cv.out,newx = x_test1,s=lambda_1se,type="response")
#translate probabilities to predictions
lasso_predict <- rep("non_cancelled",nrow(testSet))
lasso_predict[lasso_prob>.5] <- "canceled"
lasso_predict <- ifelse(lasso_predict=="canceled",1,0)
mean(lasso_predict==testSet$is_canceled)
tble1 <- table(Actual = testSet$is_canceled,Predicted = lasso_predict );tble1
TN <- tble1[1,1]
FN <- tble1[2,1]
FP <- tble1[1,2]
TP <- tble1[2,2]
N <- sum(tble1[1,])
P <- sum(tble1[2,])
Specificity <- FP/N
Sensitivity <- TP/N
df <- data.frame(Specificity,Sensitivity)
library(knitr)
library(ROCR)
library(verification)
kable(df)
1 - sum(diag(tble1))/sum(tble1)
roc.plot(
testSet$is_canceled,
lasso_predict,
threshold = seq(0,max(lasso_predict),0.01)
)
pred1 <- prediction(lasso_predict,testSet$is_canceled)
auc <- performance(pred1,"auc")
auc <- unlist(slot(auc,"y.values"))
auc
#---------------------Ridge regression-----------------------------------------------
cv.out <- cv.glmnet(x_train1,y_train1,alpha=0,family="binomial",type.measure = "mse" )
#plot result
plot(cv.out)
#min value of lambda
lambda_min <- cv.out$lambda.min
#best value of lambda
lambda_1se <- cv.out$lambda.1se
#regression coefficients
coef(cv.out,s=lambda_1se)
#Predicting on training data 75.81 %
#predict class, type="class"
ridge_prob <- predict(cv.out,newx = x_train1,s=lambda_1se,type="response")
#translate probabilities to predictions
ridge_predict <- rep("non_cancelled",nrow(trainSet))
ridge_predict[ridge_prob>.5] <- "canceled"
ridge_predict <- ifelse(ridge_predict=="canceled",1,0)
mean(ridge_predict==trainSet$is_canceled)
#Predicting on testing data
#predict class, type="class"
ridge_prob <- predict(cv.out,newx = x_test1,s=lambda_1se,type="response")
#translate probabilities to predictions
ridge_predict <- rep("non_cancelled",nrow(testSet))
ridge_predict[ridge_prob>.5] <- "canceled"
ridge_predict <- ifelse(ridge_predict=="canceled",1,0)
mean(ridge_predict==testSet$is_canceled)
tble1 <- table(Actual = testSet$is_canceled,Predicted = ridge_predict );tble1
TN <- tble1[1,1]
FN <- tble1[2,1]
FP <- tble1[1,2]
TP <- tble1[2,2]
N <- sum(tble1[1,])
P <- sum(tble1[2,])
Specificity <- FP/N
Sensitivity <- TP/N
df <- data.frame(Specificity,Sensitivity)
kable(df)
1 - sum(diag(tble1))/sum(tble1)
roc.plot(
testSet$is_canceled,
ridge_predict,
threshold = seq(0,max(ridge_predict),0.01)
)
pred1 <- prediction(ridge_predict,testSet$is_canceled)
auc <- performance(pred1,"auc")
auc <- unlist(slot(auc,"y.values"))
auc
#============================================Regression to test Prediction 80.3% ==============================
x_train1 = model.matrix(trainSet$is_canceled~lead_time + country + deposit_type + adr + arrival_date_day_of_month +
total_of_special_requests + stays_in_weekend_nights + previous_cancellations+
arrival_date_year+ booking_changes + required_car_parking_spaces +
market_segment, trainSet)[,-1]
y_train1 = trainSet$is_canceled
x_test1=model.matrix(testSet$is_canceled~lead_time + country + deposit_type + adr + arrival_date_day_of_month +
total_of_special_requests + stays_in_weekend_nights + previous_cancellations+
arrival_date_year+ booking_changes + required_car_parking_spaces +
market_segment , testSet)[,-1]
y_test1=testSet$is_canceled
#-------------------------Lasso Regression-----------------------
cv.out <- cv.glmnet(x_train1,y_train1,alpha=1,family="binomial",type.measure = "mse" )
#plot result
plot(cv.out)
#min value of lambda
lambda_min <- cv.out$lambda.min
#best value of lambda
lambda_1se <- cv.out$lambda.1se
#regression coefficients
coef(cv.out,s=lambda_1se)
#Predicting on training data 80.07%
#predict class, type="class"
lasso_prob <- predict(cv.out,newx = x_train1,s=lambda_1se,type="response")
#translate probabilities to predictions
lasso_predict <- rep("non_cancelled",nrow(trainSet))
lasso_predict[lasso_prob>.5] <- "canceled"
lasso_predict <- ifelse(lasso_predict=="canceled",1,0)
mean(lasso_predict==trainSet$is_canceled)
#Predicting on testing data set
#predict class, type="class"
lasso_prob <- predict(cv.out,newx = x_test1,s=lambda_1se,type="response")
#translate probabilities to predictions
lasso_predict <- rep("non_cancelled",nrow(testSet))
lasso_predict[lasso_prob>.5] <- "canceled"
lasso_predict <- ifelse(lasso_predict=="canceled",1,0)
mean(lasso_predict==testSet$is_canceled)
#---------------------Ridge regression-----------------------------------------------
cv.out <- cv.glmnet(x_train1,y_train1,alpha=0,family="binomial",type.measure = "mse" )
#plot result
plot(cv.out)
#min value of lambda
lambda_min <- cv.out$lambda.min
#best value of lambda
lambda_1se <- cv.out$lambda.1se
#regression coefficients
coef(cv.out,s=lambda_1se)
#Predicting on training data set 79.73%
#predict class, type="class"
ridge_prob <- predict(cv.out,newx = x_train1,s=lambda_1se,type="response")
#translate probabilities to predictions
ridge_predict <- rep("non_cancelled",nrow(trainSet))
ridge_predict[ridge_prob>.5] <- "canceled"
ridge_predict <- ifelse(ridge_predict=="canceled",1,0)
mean(ridge_predict==trainSet$is_canceled)
#Predicting on testing data set
#predict class, type="class"
ridge_prob <- predict(cv.out,newx = x_test1,s=lambda_1se,type="response")
#translate probabilities to predictions
ridge_predict <- rep("non_cancelled",nrow(testSet))
ridge_predict[ridge_prob>.5] <- "canceled"
ridge_predict <- ifelse(ridge_predict=="canceled",1,0)
mean(ridge_predict==testSet$is_canceled)
#============================Regression to test Prediction same above but included stays_in_week_nights the best prediction 80.57%==============================
x_train1 = model.matrix(trainSet$is_canceled~lead_time + country + deposit_type + adr + arrival_date_day_of_month +
total_of_special_requests + stays_in_weekend_nights +stays_in_week_nights + previous_cancellations+
arrival_date_year+ booking_changes + required_car_parking_spaces +
market_segment, trainSet)[,-1]
y_train1 = trainSet$is_canceled
x_test1=model.matrix(testSet$is_canceled~lead_time + country + deposit_type + adr + arrival_date_day_of_month +
total_of_special_requests + stays_in_weekend_nights + stays_in_week_nights +previous_cancellations+
arrival_date_year+ booking_changes + required_car_parking_spaces +
market_segment , testSet)[,-1]
y_test1=testSet$is_canceled
#-------------------------Lasso Regression-----------------------
cv.out <- cv.glmnet(x_train1,y_train1,alpha=1,family="binomial",type.measure = "mse" )
cv.out
#plot result
plot(cv.out)
#min value of lambda
lambda_min <- cv.out$lambda.min
#best value of lambda
lambda_1se <- cv.out$lambda.1se
#regression coefficients
coef(cv.out,s=lambda_1se)
#Predicting on training data set 80.17 %
#predict class, type="class"
lasso_prob <- predict(cv.out,newx = x_train1,s=lambda_1se,type="response")
#translate probabilities to predictions
lasso_predict <- rep("non_cancelled",nrow(trainSet))
lasso_predict[lasso_prob>.5] <- "canceled"
lasso_predict <- ifelse(lasso_predict=="canceled",1,0)
mean(lasso_predict==trainSet$is_canceled)
#Cross Validation on Training data
#Predicting on testing data set
#predict class, type="class"
lasso_prob <- predict(cv.out,newx = x_test1,s=lambda_1se,type="response")
#translate probabilities to predictions
lasso_predict <- rep("non_cancelled",nrow(testSet))
lasso_predict[lasso_prob>.5] <- "canceled"
lasso_predict <- ifelse(lasso_predict=="canceled",1,0)
mean(lasso_predict==testSet$is_canceled)
#confusion matrix
table(pred=lasso_predict,true=testSet$is_canceled)
tble1 <- table(Actual = testSet$is_canceled,Predicted = lasso_predict );tble1
TN <- tble1[1,1]
FN <- tble1[2,1]
FP <- tble1[1,2]
TP <- tble1[2,2]
N <- sum(tble1[1,])
P <- sum(tble1[2,])
Specificity <- FP/N
Sensitivity <- TP/N
df <- data.frame(Specificity,Sensitivity)
kable(df)
1 - sum(diag(tble1))/sum(tble1)
roc.plot(
testSet$is_canceled,
lasso_predict,
threshold = seq(0,max(lasso_predict),0.01)
)
pred1 <- prediction(lasso_predict,testSet$is_canceled)
auc <- performance(pred1,"auc")
auc <- unlist(slot(auc,"y.values"))
auc
#---------------------Ridge regression-----------------------------------------------
cv.out <- cv.glmnet(x_train1,y_train1,alpha=0,family="binomial",type.measure = "mse" )
#plot result
plot(cv.out)
#min value of lambda
lambda_min <- cv.out$lambda.min
#best value of lambda
lambda_1se <- cv.out$lambda.1se
#regression coefficients
coef(cv.out,s=lambda_1se)
#Predicting on training dataset 79.81%
ridge_prob <- predict(cv.out,newx = x_train1,s=lambda_1se,type="response")
#translate probabilities to predictions
ridge_predict <- rep("non_cancelled",nrow(trainSet))
ridge_predict[ridge_prob>.5] <- "canceled"
ridge_predict <- ifelse(ridge_predict=="canceled",1,0)
mean(ridge_predict==trainSet$is_canceled)
#Cross Validation (K Fold) on Training data
# Prepare data set
library(glmnet)
# Run cross-validation
mod_cv <- cv.glmnet(x=x_train1, y=y_train1, family='binomial')
mod_cv$lambda.1se
coef(mod_cv, mod_cv$lambda.1se)
#get test data
#predict class, type="class"
ridge_prob <- predict(cv.out,newx = x_test1,s=lambda_1se,type="response")
#translate probabilities to predictions
ridge_predict <- rep("non_cancelled",nrow(testSet))
ridge_predict[ridge_prob>.5] <- "canceled"
ridge_predict <- ifelse(ridge_predict=="canceled",1,0)
mean(ridge_predict==testSet$is_canceled)
#Confusion Matrix
tble1 <- table(Actual = testSet$is_canceled,Predicted = ridge_predict );tble1
TN <- tble1[1,1]
FN <- tble1[2,1]
FP <- tble1[1,2]
TP <- tble1[2,2]
N <- sum(tble1[1,])
P <- sum(tble1[2,])
Specificity <- FP/N
Sensitivity <- TP/N
df <- data.frame(Specificity,Sensitivity)
kable(df)
1 - sum(diag(tble1))/sum(tble1)
roc.plot(
testSet$is_canceled,
ridge_predict,
threshold = seq(0,max(ridge_predict),0.01)
)
pred1 <- prediction(lasso_predict,testSet$is_canceled)
auc <- performance(pred1,"auc")
auc <- unlist(slot(auc,"y.values"))
auc
#==========================================================================================================================================
|
654195199a900011c97971556f2e5bb66af65c25
|
790b18693ac1ad05892bc84b9d2704ce02d144e5
|
/interpreting-xgboost.R
|
2953694373a6453057cabaf8bc0c0a6d9bf5c2aa
|
[] |
no_license
|
tom-beard/ml-experiments
|
f97b8cb4d20dbb2d942f17b622a4c73fc6a89215
|
1afe606089e14bafa1641b05104dab5527ed09b0
|
refs/heads/master
| 2022-09-21T23:30:22.655308
| 2022-09-18T08:38:45
| 2022-09-18T08:38:45
| 240,835,067
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,142
|
r
|
interpreting-xgboost.R
|
# from https://bgreenwell.github.io/pdp/articles/pdp-example-xgboost.html
# init --------------------------------------------------------------------
library(tidyverse)
library(xgboost)
library(pdp)
library(vip)
ames <- AmesHousing::make_ames()
# CV ----------------------------------------------------------------------
# Find the optimal number of rounds using 5-fold CV
set.seed(749) # for reproducibility
ames_xgb_cv <- xgb.cv(
data = data.matrix(subset(ames, select = -Sale_Price)),
label = ames$Sale_Price,
objective = "reg:linear",
verbose = FALSE,
nrounds = 1000,
max_depth = 5,
eta = 0.1, gamma = 0,
nfold = 5,
early_stopping_rounds = 30
)
ames_xgb_cv$best_iteration # optimal number of trees
# fit ---------------------------------------------------------------------
# Fit an XGBoost model to the Boston housing data
set.seed(804) # for reproducibility
ames_xgb <- xgboost::xgboost(
data = data.matrix(subset(ames, select = -Sale_Price)),
label = ames$Sale_Price,
objective = "reg:linear",
verbose = FALSE,
nrounds = ames_xgb_cv$best_iteration,
max_depth = 5,
eta = 0.1, gamma = 0
)
# variable importance -----------------------------------------------------
vip(ames_xgb, num_features = 10) # 10 is the default
# c-ICE curves and PDPs for Overall_Qual and Gr_Liv_Area
x <- data.matrix(subset(ames, select = -Sale_Price)) # training features
p1 <- partial(ames_xgb, pred.var = "Overall_Qual",
ice = TRUE, center = TRUE,
plot = TRUE, rug = TRUE, alpha = 0.07,
plot.engine = "ggplot2",
train = x)
p2 <- partial(ames_xgb, pred.var = "Gr_Liv_Area",
ice = TRUE, center = TRUE,
plot = TRUE, rug = TRUE, alpha = 0.07,
plot.engine = "ggplot2",
train = x)
# look for heterogeneity in ICE curves, suggesting possible interactions
p3 <- partial(ames_xgb, pred.var = c("Overall_Qual", "Gr_Liv_Area"),
plot = TRUE, chull = TRUE,
plot.engine = "ggplot2",
train = x)
grid.arrange(p1, p2, p3, ncol = 3)
# to do: repeat using tidymodels!
|
f8866c8c0a6332550f2b96d44975a9ffb27d824b
|
87829b9b9e8d1d5b89af005578bd7e062cd07971
|
/crt_d_cov.R
|
fe8b49f29870c0ba84895f239d34506a0a0e1a5e
|
[] |
no_license
|
hedbergec/PowerWorkShopCode
|
a230010780359db2029e83b151d88edd00b4b368
|
e7e4ef347f75208d7945eda8e0503919dece6349
|
refs/heads/master
| 2020-05-18T04:23:30.862628
| 2019-04-30T01:42:37
| 2019-04-30T01:42:37
| 184,171,693
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 283
|
r
|
crt_d_cov.R
|
n <- 20
m <- 5
df <- 2*m-2 -1
alpha <- .05
power <- .8
rho.intra <- .1
R2.unit <- .5
R2.cluster <- .75
D <- 1 + (n-1)*rho.intra - (R2.unit + (n*R2.cluster-R2.unit)*rho.intra)
t.crit <- qt(1-alpha/2, df)
t.beta <- qt(1-power, df)
delta.m <- sqrt((2*D)/(m*n))*(t.crit-t.beta)
delta.m
|
8ba42777f886c2c36e0113b304f7a7fef4be546a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/regclass/examples/confusion_matrix.Rd.R
|
2bc8a4936e603ad44dd7ffe062f35e315f7186a2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 915
|
r
|
confusion_matrix.Rd.R
|
library(regclass)
### Name: confusion_matrix
### Title: Confusion matrix for logistic regression models
### Aliases: confusion.matrix confusion_matrix
### ** Examples
#On WINE data as a whole
data(WINE)
M <- glm(Quality~.,data=WINE,family=binomial)
confusion_matrix(M)
#Calculate generalization error using training/holdout
set.seed(1010)
train.rows <- sample(nrow(WINE),0.7*nrow(WINE),replace=TRUE)
TRAIN <- WINE[train.rows,]
HOLDOUT <- WINE[-train.rows,]
M <- glm(Quality~.,data=TRAIN,family=binomial)
confusion_matrix(M,HOLDOUT)
#Predicting donation
#Model predicting from recent average gift amount is significant, but its
#classifications are the same as the naive model (majority rules)
data(DONOR)
M.naive <- glm(Donate~1,data=DONOR,family=binomial)
confusion_matrix(M.naive)
M <- glm(Donate~RECENT_AVG_GIFT_AMT,data=DONOR,family=binomial)
confusion_matrix(M)
|
038b36ad23dc68e5d50374b9aa6df50f1eb4d58e
|
344614177666758a75a235966d5adbc2232987b6
|
/R/cfb_ratings_sp_conference.R
|
23f8732cff119e140c76c44be940e289007348b9
|
[
"MIT"
] |
permissive
|
saiemgilani/cfbscrapR
|
61d677496bac5053c07fac708f34092ce70e141d
|
0a33650fb6e7e6768b2cc1318c8960dd291c73f4
|
refs/heads/master
| 2023-03-31T06:19:47.722785
| 2021-04-03T23:53:23
| 2021-04-03T23:53:23
| 274,903,057
| 28
| 8
|
NOASSERTION
| 2021-02-20T19:36:07
| 2020-06-25T11:49:29
|
R
|
UTF-8
|
R
| false
| false
| 5,247
|
r
|
cfb_ratings_sp_conference.R
|
#' Get conference-level S&P+ historical rating data
#'
#' @param year (*Integer* optional): Year, 4 digit format (*YYYY*)
#' @param conference (*String* optional): Conference abbreviation - S&P+ information by conference\cr
#' Conference abbreviations P5: ACC, B12, B1G, SEC, PAC\cr
#' Conference abbreviations G5 and FBS Independents: CUSA, MAC, MWC, Ind, SBC, AAC\cr
#'
#' @return A data frame with 25 variables:
#' \describe{
#' \item{`year`}{integer.}
#' \item{`conference`}{character.}
#' \item{`rating`}{double.}
#' \item{`second_order_wins`}{logical.}
#' \item{`sos`}{logical.}
#' \item{`offense_rating`}{double.}
#' \item{`offense_success`}{logical.}
#' \item{`offense_explosiveness`}{logical.}
#' \item{`offense_rushing`}{logical.}
#' \item{`offense_passing`}{logical.}
#' \item{`offense_standard_downs`}{logical.}
#' \item{`offense_passing_downs`}{logical.}
#' \item{`offense_run_rate`}{logical.}
#' \item{`offense_pace`}{logical.}
#' \item{`defense_rating`}{double.}
#' \item{`defense_success`}{logical.}
#' \item{`defense_explosiveness`}{logical.}
#' \item{`defense_rushing`}{logical.}
#' \item{`defense_passing`}{logical.}
#' \item{`defense_standard_downs`}{logical.}
#' \item{`defense_passing_downs`}{logical.}
#' \item{`defense_havoc_total`}{logical.}
#' \item{`defense_havoc_front_seven`}{logical.}
#' \item{`defense_havoc_db`}{logical.}
#' \item{`special_teams_rating`}{double.}
#' }
#' @source <https://api.collegefootballdata.com/ratings/sp/conferences>
#' @keywords SP+
#' @importFrom attempt stop_if_all
#' @importFrom jsonlite fromJSON
#' @importFrom httr GET
#' @importFrom utils URLencode
#' @importFrom assertthat assert_that
#' @importFrom glue glue
#' @importFrom dplyr rename
#' @export
#' @examples
#'
#' cfb_ratings_sp_conference(year = 2019)
#'
#' cfb_ratings_sp_conference(year = 2012, conference = 'SEC')
#'
#' cfb_ratings_sp_conference(year = 2016, conference = 'ACC')
#'
cfb_ratings_sp_conference <- function(year = NULL, conference = NULL){
args <- list(year = year,
conference = conference)
# Check that at least one argument is not null
attempt::stop_if_all(args, is.null,
msg = "You need to specify at least one of two arguments:\n year, as a number (YYYY), or conference\nConference abbreviations P5: ACC, B12, B1G, SEC, PAC\nConference abbreviations G5 and Independents: CUSA, MAC, MWC, Ind, SBC, AAC")
if(!is.null(year)){
# check if year is numeric and correct length
assertthat::assert_that(is.numeric(year) & nchar(year) == 4,
msg = 'Enter valid year as a number in 4 digit format (YYYY)')
}
if(!is.null(conference)){
# # Check conference parameter in conference abbreviations, if not NULL
# assertthat::assert_that(conference %in% cfbscrapR::cfb_conf_types_df$abbreviation,
# msg = "Incorrect conference abbreviation, potential misspelling.\nConference abbreviations P5: ACC, B12, B1G, SEC, PAC\nConference abbreviations G5 and Independents: CUSA, MAC, MWC, Ind, SBC, AAC")
# Encode conference parameter for URL, if not NULL
conference = utils::URLencode(conference, reserved = TRUE)
}
base_url = 'https://api.collegefootballdata.com/ratings/sp/conferences?'
full_url = paste0(base_url,
"year=",year,
"&conference=",conference)
# Check for internet
check_internet()
# Create the GET request and set response as res
res <- httr::GET(full_url)
# Check the result
check_status(res)
df <- data.frame()
tryCatch(
expr ={
# Get the content and return it as data.frame
df = jsonlite::fromJSON(full_url, flatten=TRUE) %>%
dplyr::rename(
second_order_wins = .data$secondOrderWins,
offense_rating = .data$offense.rating,
offense_success = .data$offense.success,
offense_explosiveness = .data$offense.explosiveness,
offense_rushing = .data$offense.rushing,
offense_passing = .data$offense.passing,
offense_standard_downs = .data$offense.standardDowns,
offense_passing_downs = .data$offense.passingDowns,
offense_run_rate = .data$offense.runRate,
offense_pace = .data$offense.pace,
defense_rating = .data$defense.rating,
defense_success = .data$defense.success,
defense_explosiveness = .data$defense.explosiveness,
defense_rushing = .data$defense.rushing,
defense_passing = .data$defense.passing,
defense_standard_downs = .data$defense.standardDowns,
defense_passing_downs = .data$defense.passingDowns,
defense_havoc_total = .data$defense.havoc.total,
defense_havoc_front_seven = .data$defense.havoc.frontSeven,
defense_havoc_db = .data$defense.havoc.db,
special_teams_rating = .data$specialTeams.rating) %>%
as.data.frame()
message(glue::glue("{Sys.time()}: Scraping conference-level S&P+ ratings data..."))
},
error = function(e) {
message(glue::glue("{Sys.time()}: Invalid arguments or no conference-level S&P+ ratings data available!"))
},
warning = function(w) {
},
finally = {
}
)
return(df)
}
|
ad105386d304ba3afe5e4a06805bd146ce9b5d30
|
7454266c05d38cf3286778ba4c8b8a19c378da2b
|
/rmongo-try.R
|
75c1d9e2b7c709571088dc157604794cd67177bd
|
[] |
no_license
|
mmirolim/analyze-clicks
|
0e3d5555a917a8e73b71b73627e54111a55197c8
|
301828e2b43b046b9e15ac945b1b74cbf19d9489
|
refs/heads/master
| 2016-08-09T18:45:14.777316
| 2015-11-27T16:20:30
| 2015-11-27T16:20:30
| 46,752,445
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 561
|
r
|
rmongo-try.R
|
library(RMongo)
library(dplyr)
library(parsedate)
today <- as.Date('2015-11-28', format = '%Y-%m-%d')
db <- mongoDbConnect("ltvdb")
output <- dbGetQueryForKeys(db, 'events', '{"regdate": {"$lte":1446249600}, "eventdate": {"$gte": {"$date": "2015-10-31T24:59:37.275Z"}}}', '{"_id":0, "profileid":1, "eventdate":1}', skip=0, limit = 100000)
data <- as.data.table(output)
data$eventdate <- format(parse_date(data$eventdate), "%Y-%m-%d")
clicksByDay <- tally(group_by(data, eventdate), sort = FALSE)
barplot(clicksByDay$n, names.arg = clicksByDay$eventdate)
|
7cae39cf0f12765ac3fe1042b4e33edbe046a244
|
eac759ea418d8522b239cd420039c5047f34b546
|
/R/size.prop.confint.R
|
1f6dcf13369758ccf736811c16adf2cfb0c6b1c8
|
[] |
no_license
|
cran/OPDOE
|
1dac02018283dcbe1ad0cd8b85736baf930718f4
|
cc4f73c76e7a3655ddbc7fc8be069d150182f15d
|
refs/heads/master
| 2021-05-15T01:55:50.474710
| 2018-03-17T21:49:18
| 2018-03-17T21:49:18
| 17,681,194
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 177
|
r
|
size.prop.confint.R
|
size.prop.confint <- function(p=NULL,delta,alpha)
{
q=qnorm(1-alpha/2)
if (is.null(p)) {n=q^2/(4*delta^2)}
else {n=p*(1-p)*q^2/delta^2}
n=ceiling(n)
return(n)
}
|
5a9b7b9d3bd4839303cc65a4ae17d810ce16abaa
|
9f56bef3c268b4d10f86817e15a64bc6ecd93a2f
|
/R/sim-helpers.R
|
be44178420d872df21e1df60c95dbe8110359904
|
[] |
no_license
|
Qian-Li/HFM
|
b6707322e41e53c0d61557c11a3da10684a2527f
|
d047c583095fcd614e05ad616e8207613555fff9
|
refs/heads/master
| 2021-04-26T22:11:02.759156
| 2018-05-04T06:42:30
| 2018-05-04T06:42:30
| 124,034,587
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,211
|
r
|
sim-helpers.R
|
## Simulation Helpers
##
#' Simulation Helper: quantile-based knots
#'
#' @param x A vector of observations
#' @param num.knots An integer, Number of knots.
#' @return A vector of \code{num.knots} length as quantile-based knots for \code{x}
#'
#'
#' @export
default.knots <- function(x,num.knots)
{
# Delete repeated values from x
x <- unique(x)
# Work out the default number of knots
if (missing(num.knots))
{
n <- length(x)
d <- max(4,floor(n/35))
num.knots <- floor(n/d - 1)
}
nx <- names(x)
x <- as.vector(x)
nax <- is.na(x)
if(nas <- any(nax))
x <- x[!nax]
knots <- seq(0,1,length=num.knots+2)[-c(1,num.knots+2)]
knots <- quantile(x,knots)
names(knots) <- NULL
return(knots)
}
## Gaussian Process Kernal:
#' Simulation Helper: GP-kernel
#'
#' Calculates the Gaussian Process kernel for simulation
#'
#' @param X1 A vector of input
#' @param X2 A vector of input
#' @param l A number of kernel window size
#'
#' @export
calcSigma<-function(X1,X2,l=1){
## Simplified code
sig <- outer(X1, X2, "-"); Sigma <- exp(-1/2 * (abs(sig)/l)^2)
return(Sigma)
}
# Importation of ourside functions
#' @importFrom stats quantile
#' @importFrom splines bs
NULL
|
e08613890cf95c4b73ffe9ddc38b724204b8f4d3
|
c570e275bebb8ff8ecb1439d135fc052dfecc47f
|
/R/modify.R
|
8b5fe9e1d73a1297a6d6e4860c6c59d784548e35
|
[] |
no_license
|
runehaubo/simr
|
d0e1ae35f14d3ff94b48c0bebbfb2b0941f5f919
|
aec34ad674ffb2f2173ca3da0f507fd6687aee6a
|
refs/heads/master
| 2021-03-19T11:26:32.261723
| 2018-04-20T10:46:22
| 2018-04-20T10:46:22
| 123,607,883
| 0
| 1
| null | 2018-03-02T17:13:19
| 2018-03-02T17:13:17
|
R
|
UTF-8
|
R
| false
| false
| 6,642
|
r
|
modify.R
|
#' Modifying model parameters.
#'
#' These functions can be used to change the size of a model's fixed effects,
#' its random effect variance/covariance matrices, or its residual variance.
#' This gives you more control over simulations from the model.
#'
#' @name modify
#' @rdname modify
#'
#' @param object a fitted model object.
#' @param value new parameter values.
#'
#' @details
#'
#' New values for \code{VarCorr} are interpreted as variances and covariances, not standard deviations and
#' correlations. New values for \code{sigma} and \code{scale} are interpreted on the standard deviation scale.
#' This means that both \code{VarCorr(object)<-VarCorr(object)} and \code{sigma(object)<-sigma(object)}
#' leave \code{object} unchanged, as you would expect.
#'
#' \code{sigma<-} will only change the residual standard deviation,
#' whereas \code{scale<-} will affect both \code{sigma} and \code{VarCorr}.
#'
#' These functions can be used to change the value of individual parameters, such as
#' a single fixed effect coefficient, using standard R subsetting commands.
#'
#' @examples
#' fm <- lmer(y ~ x + (1|g), data=simdata)
#' fixef(fm)
#' fixef(fm)["x"] <- -0.1
#' fixef(fm)
#'
#' @seealso \code{\link{getData}} if you want to modify the model's data.
#'
NULL
#' @rdname modify
#' @export
`fixef<-` <- function(object, value) {
value <- coefCheck(fixef(object), value, "fixed effect")
object @ beta <- unname(value)
attr(object, "simrTag") <- TRUE
return(object)
}
coefCheck <- function(coef, value, thing="coefficient") {
nc <- names(coef)
nv <- names(value)
if(!is.null(nv)) {
# if there are names, are they correct?
if(!setequal(nc, nv)) {
stop(str_c(setdiff(nv, nc)[[1]], " is not the name of a ", thing, "."))
}
# do they need to be reordered?
value <- value[nc]
}
# are there the right number of coefficients?
if(length(coef) != length(value)) stop(str_c("Incorrect number of ", thing, "s."))
return(value)
}
#' @rdname modify
#' @export
`coef<-` <- function(object, value) UseMethod("coef<-", object)
#' @export
`coef<-.default` <- function(object, value) {
value <- coefCheck(coef(object), value)
object $ coefficients <- value
object $ fitted.values <- predict(object, type="response")
attr(object, "simrTag") <- TRUE
return(object)
}
#' @export
`coef<-.glm` <- function(object, value) {
value <- coefCheck(coef(object), value)
object $ coefficients <- value
object $ linear.predictors <- predict.lm(object, type="response")
object $ fitted.values <- family(object)$linkinv(object $ linear.predictors)
attr(object, "simrTag") <- TRUE
return(object)
}
# VarCorr -> theta for a single group
calcTheta1 <- function(V, sigma=1) {
L <- suppressWarnings(chol(V, pivot=TRUE))
p <- order(attr(L, "pivot"))
L <- t(L[p, p])
L[lower.tri(L, diag=TRUE)] / sigma
}
# All the thetas
calcTheta <- function(V, sigma) {
if(missing(sigma)) sigma <- attr(V, "sc")
if(is.null(sigma)) sigma <- 1
if(!is.list(V)) V <- list(V)
theta <- llply(V, calcTheta1, sigma)
unname(unlist(theta))
}
#' @rdname modify
#' @export
`VarCorr<-` <- function(object, value) {
object.useSc <- isTRUE(attr(VarCorr(object), "useSc"))
value.useSc <- isTRUE(attr(value, "useSc"))
if(object.useSc && value.useSc) s <- sigma(object) <- attr(value, "sc")
if(object.useSc && !value.useSc) s <- sigma(object)
if(!object.useSc && value.useSc) s <- attr(value, "sc")
if(!object.useSc && !value.useSc) s <- 1
newtheta <- calcTheta(value, s)
if(length(newtheta) != length(object@theta)) stop("Incorrect number of variance parameters.")
object@theta <- newtheta
attr(object, "simrTag") <- TRUE
return(object)
}
#' @rdname modify
#' @export
`sigma<-` <- function(object, value) UseMethod("sigma<-", object)
#' @export
`sigma<-.merMod` <- function(object, value) {
useSc <- object@devcomp$dims[["useSc"]]
REML <- object@devcomp$dims[["REML"]]
if(!useSc && !identical(value, 1)) stop("sigma is not applicable for this model.")
V <- VarCorr(object)
sigmaName <- if(REML) "sigmaREML" else "sigmaML"
object@devcomp$cmp[[sigmaName]] <- value
object@theta <- calcTheta(V, value)
attr(object, "simrTag") <- TRUE
return(object)
}
#' @export
`sigma<-.lm` <- function(object, value) {
old.sigma <- sigma(object)
new.sigma <- value
if(is.null(old.sigma)) {
if(is.null(value)) return(object)
stop("sigma is not applicable for this model.")
}
object$residuals <- object$residuals * new.sigma / old.sigma
attr(object, "simrTag") <- TRUE
return(object)
}
#' @export
sigma.lm <- function(object, ...) summary(object)$sigma
#' @rdname modify
#' @export
`scale<-` <- function(object, value) {
useSc <- object@devcomp$dims[["useSc"]]
REML <- object@devcomp$dims[["REML"]]
if(!useSc) stop("scale is not applicable for this model.")
sigmaName <- if(REML) "sigmaREML" else "sigmaML"
object@devcomp$cmp[[sigmaName]] <- value
attr(object, "simrTag") <- TRUE
return(object)
}
# Unmodified objects suggest post hoc power analysis.
simrTag <- function(object) {
isTRUE(attr(object, "simrTag"))
}
observedPowerWarning <- function(sim) {
if(simrTag(sim)) return(FALSE)
if(is.function(sim)) return(FALSE)
if(is(sim, "iter")) return(FALSE)
if(!getSimrOption("observedPowerWarning")) return(FALSE)
warning("This appears to be an \"observed power\" calculation")
return(TRUE)
}
#' @rdname modify
#' @export
`ranef<-` <- function(object, value) {
re <- ranef(object)
nm <- names(re)
if(!identical(nm, names(value))) stop("Factor names don't match.")
#
# Old attempt: problems with large and/or singular Lambda.
#
# b <- unlist(value)
# u <- solve(getME(object, "Lambda"), b)
#
# New: use Tlist instead of Lambda. Use ginv to solve.
#
Tlist <- getME(object, "Tlist")
u <- list()
for(i in seq_along(Tlist)) {
L <- Tlist[[i]]
b <- as.matrix(value[[i]])
# u0 <- as.matrix(re[[i]]) ### check that they conform?
u[[i]] <- MASS::ginv(L) %*% t(b)
}
u <- unlist(u)
#
# Check if supplied b was valid (might not be if any elements of theta were zero).
#
bCheck <- getME(object, "Lambda") %*% u
#if(???)
object@pp$setDelu(u)
object@u <- u
# nb: should this be tagged to avoid observed power warning? These aren't "parameters".
return(object)
}
|
246950343b8433620d7811760b61541290a25f71
|
829efd8da04ee3d78e848bf9926373e0d38caaf9
|
/session1.2-Robjects.R
|
dfbcfa67cb450e1ce93bdd4b9222a3b8722971a6
|
[] |
no_license
|
Dr-yang/git
|
fbab5100eadeb788b3f631bc622e6a12386c18f5
|
2ac3aff48869c73ce74b29b000f3738bf1fce228
|
refs/heads/master
| 2021-05-27T12:48:09.006450
| 2014-05-13T23:25:02
| 2014-05-13T23:25:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 980
|
r
|
session1.2-Robjects.R
|
## session 1.2 R objects
gDat <- read.delim("gapminderDataFiveYear.txt")
str(gDat)
head(gDat)
head(gDat, n=10)
tail(gDat, n=10)
names(gDat)
dim(gDat)
nrow(gDat)
ncol(gDat)
length(gDat)
summary(gDat)
plot(lifeExp ~ year, data=gDat)
plot(lifeExp ~ gdpPercap, data=gDat)
str(gDat)
gDat$lifeExp
summary(gDat$lifeExp)
hist(gDat$gdpPercap)
gDat$continent
as.character(gDat$continent)
## subset
subset(gDat, subset=country=="Cambodia")
subset(gDat, subset=country %in% c("Cambodia","Japan","Spain"))
subset(gDat, subset=year < 1962)
subset(gDat, subset=lifeExp < 32)
myData <- subset(gDat, subset=lifeExp < 32, select=c(country,lifeExp,pop))
mean(myData$lifeExp)
with(myData, mean(lifeExp))
plot(lifeExp ~ year, gDat, subset=country=="Spain")
lm(lifeExp ~ year, gDat, subset=country=="Canada")
with(gDat, mean(lifeExp))
with(subset(gDat, subset=country=="Canada"), mean(lifeExp))
##
x <- c(3,5)
class(x)
x[3] <- "charactor"
## matrix
x <- cbind(1:5, c("a","b","c","d","e"))
|
62bc25e3cc6c50247396fe3b9f4df689dbebb782
|
ee1afa85744a7b63f894693b5aff1ed3edef943c
|
/cachematrix.R
|
0a9b362fdbdf5a7256f7567a79905a64836098e9
|
[] |
no_license
|
nhhikr/ProgrammingAssignment2
|
f2b4f8e440e0af290934ff549fb46c9a732396a9
|
5be3c7da2ec787b46a73a1e459122f6a3e1cb8f5
|
refs/heads/master
| 2021-01-09T06:27:44.547881
| 2015-01-24T13:04:01
| 2015-01-24T13:04:01
| 29,311,154
| 0
| 0
| null | 2015-01-15T18:18:29
| 2015-01-15T18:18:29
| null |
UTF-8
|
R
| false
| false
| 1,711
|
r
|
cachematrix.R
|
## this pair of functions demonstrate R code
## you must call the first function makeCacheMatrix()
## to set up the data structures to cache a matrix inverse
## then call cacheSolve() to actually compute the inverse
## using the function handle returned by makeCacheMatrix()
## calling makeCacheMatrix(x) creates a function handle for
## matrix "x" that can later be used to compute and retrieve
## its inverse matrix using member functions
makeCacheMatrix <- function(x = matrix()) {
## called first, initialize stored inverse to NULL
m <- NULL
## sets stored matrix to new value "y" and deletes stored inverse
set <- function(y) {
x <<- y
m <<- NULL
}
## retrieves the original matrix from the function handle
get <- function() x
## sets the inverse matrix to a value computed elsewhere
setinv <- function(solved) m <<- solved
## retrieves the inverse matrix or NULL if not computed yet
getinv <- function() m
## list the available operations of handle
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## calling cacheSolve(h) for function handle "h" previously
## returned by makeCacheMatrix() will provide the _alleged_
## inverse matrix by retrieving stored value if available
## or otherwise computing it
cacheSolve <- function(x, ...) {
## if inverse matrix previously cached, print message
## and return cached value
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## otherwise get original matrix from handle and use it
## to calculate inverse
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
ecd2cd74b6b4b26a73787ec443e4797f21fb53f3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/qsort/examples/print_cards.Rd.R
|
28c848cb2ba8ca2c31307ab841be535234710f33
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 209
|
r
|
print_cards.Rd.R
|
library(qsort)
### Name: print_cards
### Title: print_cards
### Aliases: print_cards
### ** Examples
## No test:
print_cards(qset_aqs, desc_col = "description", dir.print = tempdir())
## End(No test)
|
fed789b6ca32ffb765b56a0c55812cf8b99d12b8
|
5885524cbba3f812c1eddc1912ec33304b54c1d4
|
/man/as_dummy.Rd
|
902df57a2d73427e1be2c3be0feb0540e23fe24a
|
[
"MIT"
] |
permissive
|
jackobailey/jbmisc
|
a198ab9855800dfcd8bd2c9d639f6dc70413ddef
|
960ce7852fe3144359972500b2858a2976323dff
|
refs/heads/master
| 2021-12-14T19:01:18.073680
| 2021-12-09T20:29:04
| 2021-12-09T20:29:04
| 142,127,918
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 803
|
rd
|
as_dummy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as_dummy.R
\name{as_dummy}
\alias{as_dummy}
\title{Convert Factor Variables to Dummies}
\usage{
as_dummy(x, ..., factor = T, labels = c("Off", "On"))
}
\arguments{
\item{x}{A vector of factor data to convert to a dummy.}
\item{...}{Terms to recode as 1.}
\item{factor}{Convert the dummy to a factor variable? Defaults to FALSE.}
\item{labels}{If factor = T, a character vector to specify the names of the resulting levels (e.g. c("Tails", "Heads")). Defaults to c("Off", "On").}
}
\value{
A vector of dummy data.
}
\description{
This function takes away the need to rely on ifelse() to create dummy variables.
}
\examples{
x <- sample(c("Coffee", "Tea", "Hot Chocolate"), replace = TRUE, size = 100)
as_dummy(x, Coffee)
}
|
da7d2ee6c53a4a340ac224f80142fa1f7313da56
|
e0cc4d8b9e96b90e53884c64b82329f6358e7bf3
|
/chapter_02.R
|
9500999f0d2242fc641ae8bbf01e8c5af84cd6ac
|
[] |
no_license
|
r-visualization/r-visualization-book
|
3912fd814c1644969481065c130d8841a3799cdd
|
3cf2764f8760b8b90c0761267cc71502084ac29a
|
refs/heads/master
| 2020-06-02T00:39:22.529009
| 2019-06-09T14:34:07
| 2019-06-09T14:34:07
| 190,981,055
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,295
|
r
|
chapter_02.R
|
# -----------------------------------
# R visualization - 소스코드
# 출판사: 도서출판 인사이트
# 저자: 유충현, 홍성학
# 챕터: 2장
# 파일명: chapter_02.R
# -----------------------------------
# ==================== 소스순번: 001 ====================
# x-좌표를 위한 벡터
x1 <- 1:5
# y-좌표를 위한 벡터
y1 <- x1^2
# 벡터 생성
z1 <- 5:1
# 행렬 생성
(mat1 <- cbind(x1, y1, z1))
# 그래픽 윈도우의 화면 분할 (2행 3열)
op <- par(no.readonly = TRUE)
par(mfrow=c(2, 3))
# 일변량 그래프
plot(y1, main="using index")
# 이변량 그래프
plot(x=x1, y=y1, main="x^2")
# 이변량 그래프 (행렬)
plot(mat1, main="using matrix")
plot(x1, y1, type="l", main="line")
plot(x1, y1, type="h", main="high density")
plot(x1, y1, type="n", main="no plotting")
# 그래픽 윈도우의 화면 병합 (1행 1열)
par(op)
# ==================== 소스순번: 002 ====================
x <- rep(1:5, rep(5, 5))
x
y <- rep(5:1, 5)
y
pchs <- c("&", "z", "Z", "1", "가")
plot(1:5, type = "n", xlim = c(0, 7.5), ylim = c(0.5, 5.5), main = "points by 'pch'")
points(x, y, pch = 1:25, cex = 1.5)
text(x - 0.4, y, labels = as.character(1:25), cex = 1.2)
points(rep(6, 5), 5:1, pch = 65:69, cex = 1.5)
text(rep(6, 5) - 0.4, y, labels = as.character(65:69), cex = 1.2)
points(rep(7, 5), 5:1, pch = pchs, cex = 1.5)
text(rep(7, 5) - 0.4, y, labels = paste("'", pchs, "'", sep = ""), cex = 1.2)
# ==================== 소스순번: 003 ====================
cars[1:4,]
z <- lm(dist ~ speed, data = cars)
is(z)
z$coef
plot(cars, main = "abline")
# horizontal
abline(h = 20)
abline(h = 30)
# vertical
abline(v = 20, col="blue")
# y = a + bx
abline(a = 40, b = 4, col="red")
# reg 인수
abline(z, lty = 2, lwd = 2, col="green")
# coef 인수
abline(z$coef, lty = 3, lwd = 2, col="red")
# ==================== 소스순번: 004 ====================
op <- par(no.readonly = TRUE)
par(mar=c(0, 2, 3, 2))
lty1 <- c("blank", "solid", "dashed", "dotted", "dotdash", "longdash","twodash")
lty2 <- c("33", "24", "F2", "2F","3313", "F252","FF29")
plot(0:6, 0:6, type="n", ylim=c(0,20), xlab="", ylab="", main="lines")
lines(c(1, 3), c(20, 20), lty = 1); text(4, 20, "1")
lines(c(1, 3), c(19, 19), lty = 2); text(4, 19, "2")
lines(c(1, 3), c(18, 18), lty = 3); text(4, 18, "3")
lines(c(1, 3), c(17, 17), lty = 4); text(4, 17, "4")
lines(c(1, 3), c(16, 16), lty = 5); text(4, 16, "5")
lines(c(1, 3), c(15, 15), lty = 6); text(4, 15, "6")
lines(c(1, 3), c(14, 14), lty = lty1[1]); text(4, 14, lty1[1])
lines(c(1, 3), c(13, 13), lty = lty1[2]); text(4, 13, lty1[2])
lines(c(1, 3), c(12, 12), lty = lty1[3]); text(4, 12, lty1[3])
lines(c(1, 3), c(11, 11), lty = lty1[4]); text(4, 11, lty1[4])
lines(c(1, 3), c(10, 10), lty = lty1[5]); text(4, 10, lty1[5])
lines(c(1, 3), c(9, 9), lty = lty1[6]); text(4, 9, lty1[6])
lines(c(1, 3), c(8, 8), lty = lty1[7]); text(4, 8, lty1[7])
lines(c(1, 3), c(7, 7), lty = lty2[1]); text(4, 7, lty2[1])
lines(c(1, 3), c(6, 6), lty = lty2[2]); text(4, 6, lty2[2])
lines(c(1, 3), c(5, 5), lty = lty2[3]); text(4, 5, lty2[3])
lines(c(1, 3), c(4, 4), lty = lty2[4]); text(4, 4, lty2[4])
lines(c(1, 3), c(3, 3), lty = lty2[5]); text(4, 3, lty2[5])
lines(c(1, 3), c(2, 2), lty = lty2[6]); text(4, 2, lty2[6])
lines(c(1, 3), c(1, 1), lty = lty2[7]); text(4, 1, lty2[7])
par(op)
# ==================== 소스순번: 005 ====================
op <- par(no.readonly = TRUE)
par(mar=c(0, 0, 2, 0))
plot(1:9, type = "n", axes = FALSE, xlab = "", ylab = "", main = "arrows")
arrows(1, 9, 4, 9, angle = 30, length = 0.25, code = 2)
text(4.5, 9, adj = 0, "angle = 30, length = 0.25, code = 2(default)")
arrows(1, 8, 4, 8, length = 0.5); text(4.5, 8, adj = 0, "length = 0.5")
arrows(1, 7, 4, 7, length = 0.1); text(4.5, 7, adj = 0, "length = 0.1")
arrows(1, 6, 4, 6, angle = 60); text(4.5, 6, adj = 0, "angle = 60")
arrows(1, 5, 4, 5, angle = 90); text(4.5, 5, adj = 0, "angle = 90")
arrows(1, 4, 4, 4, angle = 120); text(4.5, 4, adj = 0, "angle = 120")
arrows(1, 3, 4, 3, code = 0); text(4.5, 3, adj = 0, "code = 0")
arrows(1, 2, 4, 2, code = 1); text(4.5, 2, adj = 0, "code = 1")
arrows(1, 1, 4, 1, code = 3); text(4.5, 1, adj = 0, "code = 3")
par(op)
# ==================== 소스순번: 006 ====================
op <- par(no.readonly = TRUE)
par(mar=c(4, 4, 3, 2), mfrow = c(2, 1))
set.seed(3)
x <- runif(12)
set.seed(4)
y <- rnorm(12)
i <- order(x); x <- x[i]; y <- y[i]
plot(x, y, main = "2 segments by segments function")
s <- seq(length(x) - 1)
segments(x[s], y[s], x[s + 2], y[s + 2], lty = 1:2)
plot(x, y, main = "3 segments by segments function")
s <- seq(length(x) - 2)
segments(x[s], y[s], x[s + 3], y[s + 3], lty = 1:3)
box(which = "outer")
par(op)
# ==================== 소스순번: 007 ====================
par(mfrow = c(2, 1))
plot(x, y, main = "Example segments by 2 segment")
lines(x[seq(1, 12, 2)], y[seq(1, 12, 2)], lty = 1)
lines(x[seq(2, 12, 2)], y[seq(2, 12, 2)], lty = 2)
plot(x, y, main = "Example segments by 3 segment")
lines(x[seq(1, 12, 3)], y[seq(1, 12, 3)], lty = 1)
lines(x[seq(2, 12, 3)], y[seq(2, 12, 3)], lty = 2)
lines(x[seq(3, 12, 3)], y[seq(3, 12, 3)], lty = 3)
box(which = "outer")
par(mfrow=c(1, 1))
# ==================== 소스순번: 008 ====================
op <- par(no.readonly = TRUE)
# margin & outer margin
par(mar = c(2, 2, 2, 2), oma = c(2, 2, 2, 2))
set.seed(1)
hist(rnorm(50), axes = F, xlab = "", ylab = "", main = "box")
# 영역의 종류
whichs <- c("outer", "inner", "plot", "figure")
box(which = whichs[1], lty = 1, lwd = 1.2, col = "red")
box(which = whichs[2], lty = 2, lwd = 1.2, col = "black")
box(which = whichs[3], lty = 3, lwd = 1.2, col = "blue")
box(which = whichs[4], lty = 4, lwd = 1.2, col = "green")
legend(locator(1), legend = whichs, lwd = 1.2, lty = 1:4,
col = c("red", "black", "blue", "green"))
par(op)
# ==================== 소스순번: 009 ====================
op <- par(no.readonly = TRUE)
par(mar = c(0, 2, 2, 2))
plot(1:10, type = "n", main = "rect", xlab = "", ylab = "", axes = F)
rect(xleft = 1, ybottom = 7, xright = 3, ytop = 9)
text(2, 9.5, adj = 0.5, "defalut")
rect(1, 4, 3, 6, col = "gold")
text(2, 6.5, adj = 0.5, "col = \"gold\"")
rect(1, 1, 3, 3, border = "gold")
text(2, 3.5, adj = 0.5, "border = \"gold\"")
rect(4, 7, 6, 9, density = 10)
text(5, 9.5, adj = 0.5, "density = 10")
rect(4, 4, 6, 6, density = 10, angle = 315)
text(5, 6.5, adj = 0.5, "density=10, angle=315")
rect(4, 1, 6, 3, density = 25)
text(5, 3.5, adj = 0.5, "density = 25")
rect(7, 7, 9, 9, lwd = 2)
text(8, 9.5, adj = 0.5, "lwd = 2")
rect(7, 4, 9, 6, lty = 2)
text(8, 6.5, adj = 0.5, "lty = 2")
rect(7, 1, 9, 3, lty = 2, density = 10)
text(8, 3.5, adj = 0.5, "lty=2, density=10")
par(op)
# ==================== 소스순번: 010 ====================
op <- par(no.readonly = TRUE)
par(mar = c(0, 2, 2, 2))
# 원 모양을 만들기 위해 theta를 구함
theta <- seq(-pi, pi, length = 12)
x <- cos(theta)
y <- sin(theta)
plot(1:6, type = "n", main = "polygon", xlab = "", ylab = "", axes = F)
# 좌표 이동을 위한 작업
x1 <- x + 2
y1 <- y + 4.5
polygon(x1, y1)
x2 <- x + 2
y2 <- y + 2
polygon(x2, y2, col = "gold")
x3 <- x + 5
y3 <- y + 4.5
polygon(x3, y3, density = 10)
x4 <- x + 5
y4 <- y + 2
polygon(x4, y4, lty = 2, lwd = 2)
text(2, 5.7, adj = 0.5, "defalut")
text(2, 3.2, adj = 0.5, "col = \"gold\"")
text(5, 5.7, adj = 0.5, "density = 10")
text(5, 3.2, adj = 0.5, "lty = 2, lwd = 2")
par(op)
# ==================== 소스순번: 011 ====================
op <- par(no.readonly = TRUE)
par(mar = c(4, 4, 4, 4), oma = c(4, 0, 0, 0))
set.seed(2)
plot(rnorm(20), type = "o", xlab = "", ylab = "")
title(main = "Main title on line1", line = 1)
title(main = "Main title on line2", line = 2)
title(main = "Main title on line3", line = 3)
title(sub = "subtitle on line1", line = 1, outer = T)
title(sub = " subtitle on line2", line = 2, outer = T)
title(sub = " subtitle on line3", line = 3, outer = T)
title(xlab = "X lable on line1", line = 1)
title(xlab = "X lable on line2", line = 2)
title(xlab = "X lable on line3", line = 3)
title(ylab = "Y lable on line1", line = 1)
title(ylab = "Y lable on line2", line = 2)
title(ylab = "Y lable on line3", line = 3)
par(op)
# ==================== 소스순번: 012 ====================
op <- par(no.readonly = TRUE)
par(mar = c(0, 0, 2, 0))
plot(1:10, 1:10, type = "n", xlab = "", ylab = "", main = "text")
text(1.5, 9, adj = 0, labels = "피타고라스의 정리(定理)")
polygon(c(5, 3, 5), c(9, 7, 7))
polygon(c(5, 5, 4.8, 4.8), c(7, 7.2, 7.2, 7))
text(3.64, 8.36, adj = 0, labels = "c")
text(3.94, 6.67, adj = 0, labels = "a")
text(5.36, 7.95, adj = 0, labels = "b")
# Example expression labels
text(1.5, 8, adj = 0, labels = expression(c^2 == a^2 + b^2))
text(1.5, 6, adj = 0, labels = expression(cos(r^2) * e^{-r/6}))
text(2, 3, adj = 0.3, labels = expression(z[i] == sqrt(x[i]^2 + y[i]^2)))
text(9, 4, adj = 1, labels = expression(f(x) == frac(1, sqrt((2 *
pi)^n ~ ~det(Sigma[x]))) ~ ~exp * bgroup("(", -frac(1, 2) ~ ~(x -
mu)^T * Sigma[x]^-1 * (x - mu), ")")))
text(5, 5, adj = 0.5, labels = expression(y == bgroup("(", atop(a ~ ~b, c ~ ~d), ")")))
# Example position by pos
points(8, 8, pch = 16)
text(8, 8, "position1", pos = 1)
text(8, 8, "position2", pos = 2)
text(8, 8, "position3", pos = 3)
text(8, 8, "position4", pos = 4)
# Example offset
points(8, 6, pch = 16)
text(8, 6, "offset1", pos = 1, offset = 1)
text(8, 6, "offset2", pos = 2, offset = 1.5)
text(8, 6, "offset3", pos = 3, offset = 2)
text(8, 6, "offset4", pos = 4, offset = 2.5)
# Example adj by adj(x, y)
text(4, 2, "at(4, 2) left/top by adj = c(0, 0)", adj = c(0, 0))
text(4, 1.5, "at(4, 2) center/bottom by adj = c(0.5, 1)", adj = c(0.5, + 1))
text(8, 3, "at(8, 3) right/middle by adj = c(1, 0.5)", adj = c(1, 0.5))
par(op)
# ==================== 소스순번: 013 ====================
op <- par(no.readonly = TRUE)
par(mar = c(4, 4, 4, 4), oma = c(4, 0, 0, 0))
set.seed(5)
plot(rnorm(20), type = "o", xlab = "", ylab = "")
mtext("Position3 on line1", line = 1)
mtext("Position3 on line2", side = 3, line = 2)
mtext("Position3 on line3", side = 3, line = 3)
mtext("Outer position1 on line1", side = 1, line = 1, outer = T)
mtext("Outer position1 on line2", side = 1, line = 2, outer = T)
mtext("Outer position1 on line3", side = 1, line = 3, outer = T)
mtext("Position1 on line1", side = 1, line = 1, adj = 0)
mtext("Position1 on line2", side = 1, line = 2, adj = 0.5)
mtext("Position1 on line3", side = 1, line = 3, adj = 1)
mtext("Position2 on line1", side = 2, line = 1, adj = 0)
mtext("Position2 on line2", side = 2, line = 2, adj = 0.5)
mtext("Position2 on line3", side = 2, line = 3, adj = 1)
mtext("at 0, Posion4 on line1", side = 4, line = 1, at = 0)
mtext("at 0, adj 0, Position4 on line2", side = 4, line = 2, at = 0, adj = 0)
mtext("at 0, adj 1, Position4 on line3", side = 4, line = 3, at = 0, adj = 1)
par(op)
# ==================== 소스순번: 014 ====================
plot(1:10, type = "n", xlab = "", ylab = "", main = "legend")
legend("bottomright", "(x, y)", pch = 1, title = "bottomright")
legend("bottom", "(x, y)", pch = 1, title = "bottom")
legend("bottomleft", "(x, y)", pch = 1, title = "bottomleft")
legend("left", "(x, y)", pch = 1, title = "left")
legend("topleft", "(x, y)", pch = 1, title = "topleft")
legend("top", "(x, y)", pch = 1, title = "top")
legend("topright", "(x, y)", pch = 1, title = "topright")
legend("right", "(x, y)", pch = 1, title = "right")
legend("center", "(x, y)", pch = 1, title = "center")
legends <- c("Legend1", "Legend2")
legend(3, 8, legend = legends, pch = 1:2, col = 1:2)
legend(7, 8, legend = legends, pch = 1:2, col = 1:2, lty = 1:2)
legend(3, 4, legend = legends, fill = 1:2)
legend(7, 4, legend = legends, fill = 1:2, density = 30)
legend(locator(1), legend = "Locator", fill = 1)
# ==================== 소스순번: 015 ====================
op <- par(no.readonly = TRUE)
par(oma = c(0, 0, 2, 0))
plot(1:5, type = "l", main = " axis", axes = FALSE, xlab = "", ylab = "")
axis(side = 1, at = 1:5, labels = LETTERS[1:5], line = 2)
# tick = F 이므로 col.axis는 의미 없음
axis(side = 2, tick = F, col.axis = "blue")
axis(side = 3, outer = T)
axis(side = 3, at = c(1, 3, 5), pos = 3, col = "blue", col.axis = "red")
axis(side = 4, lty = 2, lwd = 2)
par(op)
# ==================== 소스순번: 016 ====================
op <- par(no.readonly = TRUE)
par(mar = c(4, 4, 2, 2), mfrow = c(2, 1))
plot(iris$Sepal.Length, iris$Sepal.Width, pch = 16, col = as.integer(iris$Species))
# (1)
grid()
title("grid()")
plot(iris$Sepal.Length, iris$Sepal.Width, pch = 16, col = as.integer(iris$Species))
# (2)
grid(3, 4, lty = 1, lwd = 1.2, col = "blue")
title("grid(3, 4, lty = 1, lwd = 2, col = \"blue\")")
par(op)
# ==================== 소스순번: 017 ====================
op <- par(no.readonly = TRUE)
par(mar = c(4, 4, 2, 2), mfrow = c(2, 1))
plot(density(quakes$lat), main = "rug(lat)")
# (1)
rug(quakes$lat)
plot(density(quakes$long), main = "side=3, col='blue', ticksize=0.04")
# (2)
rug(quakes$long, side = 3, col = "blue", ticksize = 0.04)
par(op)
# ==================== 소스순번: 018 ====================
set.seed(1)
dot <- matrix(rnorm(200), ncol = 2)
plot(dot)
chull.data <- chull(dot)
polygon(dot[chull.data, ], angle = 45, density = 15, col = "red")
title(main = "Polygon by chull")
# ==================== 소스순번: 019 ====================
(m <- matrix(c(1, 1, 2, 3), ncol = 2, byrow = T))
layout(mat = m)
plot(cars, main = "scatter plot of cars data", pch = 19, col = 4)
hist(cars$speed)
hist(cars$dist)
# ==================== 소스순번: 020 ====================
op <- par(no.readonly = TRUE)
# 바탕색을 흰색으로 지정
par(bg = "white")
# 상하 2개로 화면분할
split.screen(fig = c(2, 1))
# 2번(아래) 화면을 좌우 두 개로 분할
split.screen(c(1, 2), screen = 2)
# 3번(아래 왼쪽) 화면을 지정
screen(n = 3)
hist(cars$speed)
# 1번(위쪽) 화면을 지정
screen(1)
plot(cars, main = "scatter plot of cars data by split.screen")
# 4번(아래 오른쪽) 화면을 지정
screen(4)
hist(cars$dist)
# 1번 화면을 바탕색으로 칠함(지움)
erase.screen(n = 1)
# 다시 1번 화면(위쪽)을 지정
screen(1)
plot(cars, main = "scatter plot of cars data by split.screen", pch = 19,
col = "blue")
# 화면 분할 정의를 마침
close.screen(all = TRUE)
par(op)
# ==================== 소스순번: 021 ====================
op <- par(no.readonly = TRUE)
par(mfrow = c(2, 2))
par(fig = c(0, 1, 0.5, 1))
plot(cars, main = "scatter plot of cars data by fig")
par(fig = c(0, 0.5, 0, 0.5), new = T)
hist(cars$speed, main = "Histogram of cars$speed by fig")
par(fig = c(0.5, 1, 0, 0.5), new = T)
hist(cars$dist, main = "Histogram of cars$dist by fig")
par(op)
# ==================== 소스순번: 022 ====================
op <- par(no.readonly = TRUE)
par(mfrow = c(2, 2))
plot(1:10, type = "l", main = "plot")
par(new = F)
plot(10:1, type = "s", main = "plot by new = F")
plot(1:10, type = "l")
par(new = T)
plot(10:1, type = "s", main = "plot by new = T")
x <- rnorm(10)
plot(x)
par(new = T)
hist(x)
par(op)
# ==================== 소스순번: 023 ====================
op <- par(no.readonly = TRUE)
par(mfrow = c(2, 3), bty = "l")
# C모양(1, 2, 3 영역)
plot(0:6, 0:6, bty = "c", main = " bty = \"c\" ")
# 출력하지 않음
plot(0:6, 0:6, bty = "n", main = " bty = \"n\" ")
# O모양(1, 2, 3, 4 영역)
plot(0:6, 0:6, bty = "o", main = " bty = \"o\" ")
# 7모양(3, 4 영역)
plot(0:6, 0:6, bty = "7", main = " bty = \"7\" ")
# U모양(1, 2, 4 영역)
plot(0:6, 0:6, bty = "u", main = " bty = \"u\" ")
# L영역(1, 2 영역)
plot(0:6, 0:6, main = " bty = \"l\" ")
par(op)
# ==================== 소스순번: 024 ====================
op <- par(no.readonly = TRUE)
theta <- seq(-pi, pi, length = 30)
x <- cos(theta)
y <- sin(theta)
par(mfrow = c(1, 2), pty = "s", bty = "o")
plot(x, y, type = "l", main = "pty = \"s\"")
par(pty = "m")
plot(x, y, type = "l", main = "pty = \"m\"")
par(op)
# ==================== 소스순번: 025 ====================
op <- par(no.readonly = TRUE)
par(mfrow = c(2, 3), type = "n")
plot(0:6, 0:6, main = "default")
plot(0:6, 0:6, type = "b", main = "type = \"b\"")
plot(0:6, 0:6, type = "c", main = "type = \"c\"")
plot(0:6, 0:6, type = "o", main = "type = \"o\"")
plot(0:6, 0:6, type = "s", main = "type = \"s\"")
plot(0:6, 0:6, type = "S", main = "type = \"S\"")
par(op)
# ==================== 소스순번: 026 ====================
par("pch")
# ==================== 소스순번: 027 ====================
par("lty")
# ==================== 소스순번: 028 ====================
op <- par(no.readonly = TRUE)
x <- 0:4
set.seed(7)
(y <- dbinom(x, size = 4, prob = 0.5))
par(oma = c(0, 0, 2, 0), mfrow = c(2, 2))
plot(x, y, type = "h", main = "default")
plot(x, y, type = "h", ylim = c(0, max(y)), main = "ylim = (0, max(y))")
plot(x, y, type = "h", ylim = c(0.1, 0.3), main = "ylim = c(0.1, 0.3)")
plot(x, y, type = "h", xlim = c(1, 3), main = "xlim = c(1, 3)")
title(main = "binomial density", line = 0, outer = T)
par(op)
# ==================== 소스순번: 029 ====================
op <- par(no.readonly = TRUE)
par(mfrow = c(2, 2), oma = c(0, 0, 2, 0), cex = 1)
plot(0:6, 0:6, type = "n", main = "cex in text")
text(1:3, 1:3, labels = LETTERS[1:3], cex = 1:3)
plot(0:6, 0:6, type = "n", cex = 2, main = "cex in plot")
text(1:3, 1:3, labels = LETTERS[1:3], cex = 1:3)
par(cex = 1.2)
plot(0:6, 0:6, type = "n", main = "cex in par")
text(1:3, 1:3, labels = LETTERS[1:3], cex = 1:3)
plot(0:6, 0:6, type = "n", main = "cex in par")
text(1:3, 1:3, labels = c("가", "나", "다"), cex = 1:3)
points(3:5, 1:3, pch = 1:3, cex = 1:3)
title(main = "cex", line = 0, outer = T)
par(op)
# ==================== 소스순번: 030 ====================
par("srt")
op <- par(no.readonly = TRUE)
par(mar = c(2, 2, 2, 2))
plot(0:6, 0:6, type = "n", axes = F, xlab = "", ylab = "")
text(3, 5, "srt = 0", srt = 0, cex = 2)
text(1, 3, "srt = 90", srt = 90, cex = 2)
text(3, 1, "srt = 180", srt = 180, cex = 2)
text(5, 3, "srt = 270", srt = 270, cex = 2)
text(5, 5, "srt = -45", srt = -45, cex = 2)
text(1, 5, "srt = 45", srt = 45, cex = 2)
points(3, 3, pch = "A", srt = 45, cex = 2)
title("srt", srt = 45)
mtext(side = 2, "srt = 270", srt = 270, cex = 2)
axis(side = 1, srt = 45)
par(op)
# ==================== 소스순번: 031 ====================
op <- par(no.readonly = TRUE)
par(mfrow = c(3, 3), oma = c(0, 0, 2, 0), mar = c(2, 2, 2, 2))
plot(0:4, 0:4, tck = -0.2, main = "tck = -0.2")
plot(0:4, 0:4, tck = -0.1, main = "tck = -0.1")
plot(0:4, 0:4, tck = 0, main = "tck = 0")
plot(0:4, 0:4, tck = 0.3, main = "tck = 0.3")
plot(0:4, 0:4, tck = 0.5, main = "tck = 0.5")
plot(0:4, 0:4, tck = 0.7, main = "tck = 0.7")
plot(0:4, 0:4, tck = 1, main = "tck = 1")
par(tck = 0.2)
plot(0:4, 0:4, main = "tck defined in par")
plot(0:4, 0:4, tck = -0.1, main = "tck defined in both")
title(main = "tck", line = 0, outer = T)
par(op)
# ==================== 소스순번: 032 ====================
op <- par(no.readonly = TRUE)
par(mfrow = c(2, 2))
par("mar")
# Figure 1
par(mar = c(0, 0, 0, 0))
plot(0:4, 0:4)
title("mar = c(0, 0, 0, 0)")
# Figure 2
par(mar = c(2, 2, 2, 2))
plot(0:4, 0:4, main = "mar = c(2, 2, 2, 2)")
# Figure 3
par(mar = c(5, 5, 5, 5))
plot(0:4, 0:4, main = "mar = c(5, 5, 5, 5)")
# Figure 4
par(mar = c(1, 2, 3, 4))
plot(0:4, 0:4, main = "mar = c(1, 2, 3, 4)")
par(op)
# ==================== 소스순번: 033 ====================
op <- par(no.readonly = TRUE)
par(mar = c(2, 2, 2, 2))
plot(1:10, type = "n", main = "par(font)", axes = FALSE, ylab = "",
xlab = "")
lab <- "Written with font parameter "
for (i in 1:10) {
par(font = i)
text(5.5, 11 - i, labels = paste(lab, i), adj = 0.5, cex = 1.5)
}
box()
par(op)
# ==================== 소스순번: 034 ====================
op <- par(no.readonly = TRUE)
# 기본값 조회
unlist(par("fg", "bg"))
fg bg
"black" "transparent"
par(bg = "thistle", fg = "blue")
hist(rnorm(30), main = "bg = \"thistle\", fg = \"blue\"")
par(op)
# ==================== 소스순번: 035 ====================
col.table <- function(cols, main=NULL, fg=NULL) {
n <- length(cols)
plot(seq(n), rep(1, n), xlim = c(0, n), ylim = c(0, 1), type = "n", xlab = "", ylab = "", axes = F)
main.txt <- if(is.null(main)) paste("Color Table by", deparse(substitute(cols)))
else main
title(main=main.txt)
fg <- if(is.null(fg)) unlist(lapply(cols, function(x)
ifelse(mean(col2rgb(x)) > 127 | toupper(x) %in% c("WHITE", "#FFFFFF"), "black", "white")))
else rep(fg, n)
for(i in 1:n) {
polygon(c(i - 1, i - 1, i, i), c(0.05, 1, 1, 0.05), col = cols[i])
text(mean(c(i - 1, i)), 0.52, labels = cols[i], srt=90, adj=0.5, col=fg[i], cex=1.5)
}
}
op <- par(no.readonly = TRUE)
par(mfrow=c(2,1))
col.table(1:16)
col.table(5:20)
par(op)
# ==================== 소스순번: 036 ====================
cols <- colors()
length(cols)
cols[1:5]
grep("sky", cols, value=TRUE)
col2rgb(grep("sky", cols, value=TRUE))
op <- par(no.readonly = TRUE)
par(mfrow=c(2, 1), mar=c(1, 1, 3, 1))
col.table(grep("sky", cols, value=TRUE))
col.table(grep("red", cols, value=TRUE))
par(op)
# ==================== 소스순번: 037 ====================
col.map <- function(cols=colors()) {
n <- length(cols)
cnt <- floor(sqrt(n))
plot.new()
plot.window(xlim=c(0, cnt), ylim=c(0, cnt))
for (i in 1:cnt) for (j in 1:cnt)
rect(i-1, j-1, i, j, col=cols[(i-1)*cnt +j], border=NA)
}
col.map(colors())
# ==================== 소스순번: 038 ====================
seqs <- seq(0, 255, length = 15)
hexs <- toupper(as.character.hexmode(seqs))
red <- paste("#", hexs, "0000", sep = "")
green <- paste("#00", hexs, "00", sep = "")
blue <- paste("#0000", hexs, sep = "")
mix1 <- paste("#", hexs, hexs, hexs, sep = "")
mix2 <- paste("#", rev(hexs), hexs, rev(hexs), sep = "")
# ==================== 소스순번: 039 ====================
op <- par(no.readonly = TRUE)
par(mfrow = c(5, 1), mar = c(0, 0, 2, 0))
col.table(rainbow(20))
col.table(heat.colors(20))
col.table(terrain.colors(20))
col.table(topo.colors(20))
col.table(cm.colors(20))
par(op)
# ==================== 소스순번: 040 ====================
op <- par(no.readonly = TRUE)
par(mfrow = c(2, 2), mar = c(0, 0, 2, 0))
col.map(rainbow(400, start = 0, end = 0.8))
col.map(heat.colors(400))
col.map(cm.colors(400))
col.map(topo.colors(400))
par(op)
# ==================== 소스순번: 041 ====================
seqs <- seq(0, 255, length = 20)
alpha <- toupper(as.character.hexmode(seqs))
op <- par(no.readonly = TRUE)
par(mfrow = c(5, 1), mar = c(0, 0, 2, 0))
col.table(paste("#FF0000", alpha, sep = ""), fg = 1)
col.table(paste("#00FF00", alpha, sep = ""), fg = 1)
col.table(paste("#0000FF", alpha, sep = ""), fg = 1)
col.table(rainbow(20), main = "Alpha Channel 사용 안함")
col.table(rainbow(20, alpha = seq(0, 1, length = 20)),
main = "Alpha Channel 사용", fg=1)
par(op)
# ==================== 소스순번: 042 ====================
op <- par(no.readonly = TRUE)
x <- c(1, 1.3, 1.6)
y <- c(1, 2, 1)
par(mar = c(4, 2, 3, 1), mfrow = c(1, 2))
plot(x, y, pch = 16, cex = 20, col = c("red", "green", "blue"),
xlim = c(0,3), ylim = c(-2, 5))
title(main = "col=c('red','green','blue')")
plot(x, y, pch = 16, cex = 20, col = c("#FF000077", "#00FF0077", "#0000FF77"),
xlim = c(0, 3), ylim = c(-2, 5))
title(main = "alpha channle by \"77\"")
par(op)
# ==================== 소스순번: 043 ====================
play.circle <- function(circle.counts=100, limits=3, radius=0.2, densitys=1) {
circle <- function (x, y, r=1, col=1) {
angle <- (0:360)*pi/180
pos.x <- r*cos(angle) + x
pos.y <- r*sin(angle) + y
lines(pos.x, pos.y, col=col)
}
leaf <- function(limits, xs, ys, radius, r=1, alpha="55") {
isin <- function(x, y) {
any(sqrt((xs-x)^2+(ys-y)^2) <= radius)
}
x <- runif(1, 0, limits)
y <- runif(1, 0, limits)
angle <- (0:360)*pi/180
pos.x <- r*cos(angle) + x
pos.y <- r*sin(angle) + y
polygon(pos.x, pos.y, col=paste(ifelse(isin(x,y), "#FF0000", "#00FF00"),
alpha, sep=""), border=NA)
}
xs <- runif(n=circle.counts, min=0, max=limits)
ys <- runif(n=circle.counts, min=0, max=limits)
plot(radius:(limits-radius), radius:(limits-radius), type='n', axes=F, xlab=", ylab=")
box()
for (i in 1:circle.counts) {
circle(xs[i], ys[i], r=radius, col="#FF000011")
}
for (i in 1:(circle.counts^2*densitys)) {
leaf(limits, xs, ys, radius, r=radius/5)
}
}
play.circle()
# ==================== 소스순번: 044 ====================
hsv(0.5, 0.5, 0.5)
hsv1 <- c(hsv(0.5, 0.5, 0.5), hsv(0.6, 0.5, 0.5), hsv(0.7, 0.5, 0.5),
hsv(0.8, 0.5, 0.5))
hsv2 <- c(hsv(0.5, 0.5, 0.5), hsv(0.5, 0.6, 0.5), hsv(0.5, 0.7, 0.5),
hsv(0.5, 0.8, 0.5))
hsv3 <- c(hsv(0.5, 0.5, 0.5), hsv(0.5, 0.5, 0.6), hsv(0.5, 0.5, 0.7),
hsv(0.5, 0.5, 0.8))
hsv4 <- c(hsv(0.5, 0.5, 0.5), hsv(0.6, 0.6, 0.6), hsv(0.7, 0.7, 0.7),
hsv(0.8, 0.8, 0.8))
op <- par(no.readonly = TRUE)
col.map(hsv1)
title("hsv1")
col.map(hsv2)
title("hsv2")
col.map(hsv3)
title("hsv3")
col.map(hsv4)
title("hsv4")
par(op)
# ==================== 소스순번: 045 ====================
reds1 <- rgb((0:15)/15, g = 0, b = 0)
reds2 <- rgb((0:15)/15, g = 0, b = 0, alpha = 0.5)
gray1 <- gray(0:8/8)
gray2 <- gray(0:8/8, alpha = 0.5)
op <- par(no.readonly = TRUE)
par(mfrow = c(2, 2), mar = c(1, 3, 1, 1))
col.map(reds1)
title("rgb((0:15)/15, g=0, b=0)")
col.map(reds2)
title("rgb((0:15)/15, g=0, b=0, alpha=0.5)")
col.map(gray1)
title("gray(0:8/8)")
col.map(gray2)
title("gray(0:8/8, alpha=0.5)")
par(op)
# ==================== 소스순번: 046 ====================
op <- par(no.readonly = TRUE) > par(pty = "s")
# 방법 1
angle <- (0:360) * pi/180
# 방법 2
angle <- seq(-pi, pi, length = 361)
x <- 3 + 5 * cos(angle)
y <- 4 + 5 * sin(angle)
plot(x, y, type = "l", main = "circle with radius 5 and center (3, 4)")
par(op)
# ==================== 소스순번: 047 ====================
op <- par(no.readonly = TRUE)
par(oma = c(0, 0, 2, 0), mar = c(4, 2, 2, 0), mfrow = c(2, 2), pty = "s")
# triangle
theta <- seq(pi/2, pi/2 + 2 * pi, by = 2 * pi/3)
tri.x <- cos(theta)
tri.y <- sin(theta)
plot(tri.x, tri.y, type = "l", xlim = c(-1, 1), ylim = c(-1, 1), main = "triangle")
# square
theta <- seq(pi/4, pi/4 + 2 * pi, by = 2 * pi/4)
sq.x <- cos(theta)
sq.y <- sin(theta)
plot(sq.x, sq.y, type = "l", xlim = c(-1, 1), ylim = c(-1, 1), main = "square")
# pentagon
theta <- seq(pi/2, pi/2 + 2 * pi, by = 2 * pi/5)
pent.x <- cos(theta)
pent.y <- sin(theta)
plot(pent.x, pent.y, type = "l", xlim = c(-1, 1), ylim = c(-1, 1),
main = "pentagon")
# star
s <- seq(length(pent.x))
# line을 순서를 지정하기 위한 벡터
s <- c(s[s%%2 == 1], s[s%%2 == 0])
plot(pent.x, pent.y, type = "n", xlim = c(-1, 1), ylim = c(-1, 1), + main = "star shape")
lines(pent.x[s], pent.y[s])
# main title
title(main = "drawing polygon", line = 0, outer = T)
par(op)
|
37781d63b62788b2e4e855898144f1c93e24cdc5
|
b18044c24c29c7a49a54930548a4567dd7cf519e
|
/man/generate_data.Rd
|
04ba815cea2655d1c6ed4f9470636a102c0b2c3d
|
[] |
no_license
|
ChongWu-Biostat/GLMaSPU
|
14bc3ecef928546e91a066479c8f7f1f167aaa6e
|
1523be583a355d8d7cb1a796bc8d5ef164ff3cd2
|
refs/heads/master
| 2021-01-17T17:39:01.712687
| 2017-08-08T18:45:53
| 2017-08-08T18:45:53
| 70,542,503
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,044
|
rd
|
generate_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate_data.R
\name{generate_data}
\alias{generate_data}
\title{Generate data for generalized linear models in simulation.}
\usage{
generate_data(seed, n, p, beta, alpha)
}
\arguments{
\item{seed}{Random seed.}
\item{n}{Number of samples}
\item{p}{Dimension of variable of interest}
\item{beta}{Coefficients for covariates Z}
\item{alpha}{Coefficients for variable of interest X}
}
\value{
A list object
}
\description{
\code{generate_data} returns simulated data, including response Y, covariates Z, and variable of interest X.
}
\examples{
p = 100
n = 50
beta = c(1,3,3)
s = 0.15
signal.r = 0.02
non.zero = floor(p * s)
seed = 1
alpha = c(rep(signal.r,non.zero),rep(0,p-non.zero))
dat = generate_data(seed, n = n, p = p, beta = beta,alpha = alpha)
#X, Y, cov
#dat$X; dat$Y; dat$cov
}
\references{
Chong Wu, Gongjun Xu and Wei Pan, "An Adaptive test on high dimensional parameters in generalized linear models" (Submitted)
}
\author{
Chong Wu and Wei Pan
}
|
281661d66b7d3dd6075be78488ea82bee85aa8d9
|
f44c3ebd7e5af79fc4ba70169612377028418d08
|
/ui.R
|
90717293c008ee115d49f9031b477e47c978f778
|
[] |
no_license
|
mariasu11/DevelopingDataProducts-Course-Project
|
6b222bd263c118ec1a49f6c2372204d88cc5a5dd
|
ab744eb70cdec3ac675a244aff1acb05bf25dfc9
|
refs/heads/master
| 2021-01-10T07:14:59.838486
| 2015-05-22T15:04:49
| 2015-05-22T15:04:49
| 36,045,780
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,935
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Maryland Home Prices Historical Comparision and Prediction for 2016"),
sidebarLayout(
sidebarPanel(
h1("What is the price of your Maryland home?"),
numericInput(inputId="homePrice", label="Your home price", value= 0,min=0),
submitButton("Go!")
),
mainPanel(
tabsetPanel(
tabPanel("Your Results Past and Future Home Prices",
h4("In 1990 your home cost"),
verbatimTextOutput("costone"),
h4("In 2005 your home cost"),
verbatimTextOutput("costtwo"),
h4("In 2010 your home cost"),
verbatimTextOutput("costthree"),
h4("In 2014 your home cost"),
verbatimTextOutput("costfour"),
h4("In 2016 your home WILL cost"),
verbatimTextOutput("costfive")
),
tabPanel("Home Price Plot", htmlOutput(outputId = "main_plot")
),
tabPanel("How to use this app",
p("This app takes the current price of a home in Maryland as of May 2015 and provides the user with the price of that home in 1990, 2005, 2010, 2014, and 2016 (based on Zillow forecast).
The second tab plots these prices using GoogleVis to give the user an interactive visual on how the price of homes have increased since 1990.
The data for historical prices was based on Maryland Appreciation Rates found at http://www.neighborhoodscout.com/md/rates/
The forecast was based on a Zillow prediction of 2.6% which can be found at http://www.zillow.com/md/home-values/ ")
)
)
)
)
))
|
17f4f8cae18d9041a91761e4c7ccadac414dd196
|
77471bc2b7d12997a2702a0fa4719b2f71d46b00
|
/man/readhtml.Rd
|
706b2bc64bba702d041813e3342fcea832e9c0dd
|
[] |
no_license
|
songssssss/cleanbot
|
afeb72d810788176edf5e0b7189841d5f2e23926
|
ba7fe5a4745836b3b6da8ff68fbb138c462cbb17
|
refs/heads/main
| 2023-04-06T23:13:54.673563
| 2021-04-22T04:30:43
| 2021-04-22T04:30:43
| 353,277,009
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 293
|
rd
|
readhtml.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readhtml.R
\name{readhtml}
\alias{readhtml}
\title{A function}
\usage{
readhtml(file)
}
\arguments{
\item{file}{A string}
\item{expr}{A variable in the dataframe}
}
\value{
A dataframe
}
\description{
A function
}
|
7c3f3d14c0eeab5fc3987e53fffbe1c1c7db29c8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/prabclus/examples/build.ext.nblist.Rd.R
|
1fcbf3a7bbda1cb90f0f64b7bd66ce83edc3fc91
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 340
|
r
|
build.ext.nblist.Rd.R
|
library(prabclus)
### Name: build.ext.nblist
### Title: Internal: generates neighborhood list for diploid loci
### Aliases: build.ext.nblist
### Keywords: cluster
### ** Examples
data(veronica)
vnb <- coord2dist(coordmatrix=veronica.coord[1:20,], cut=20,
file.format="decimal2",neighbors=TRUE)
build.ext.nblist(vnb$nblist)
|
c0324deaf0a50076be09be1a0b9411e63b31c8cf
|
9035f2a2fa85a704eed28018dec2667c031781f1
|
/FunciónGrafica.R
|
8a29401ba1f090ae68b432f1045333fede6070a5
|
[] |
no_license
|
AxlRG96/EvidenciaModulo2
|
174ddfb5fc9459fdaac7f4817e81fe938d0f8ca2
|
370754a15c6e7b56ad73cadaa8e00cb4c81d5d88
|
refs/heads/main
| 2023-04-10T04:58:20.982410
| 2021-04-16T16:09:51
| 2021-04-16T16:09:51
| 358,637,690
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,294
|
r
|
FunciónGrafica.R
|
library(httr)
library(jsonlite)
library("dplyr")
library(reshape2)
library(plotly)
urlS <- "http://localhost:4000/api/reporteb/hoja123"
bodyS <- toJSON(list(
list("fecha1"="2021-02-01",
"fecha2"="2021-03-20",
"fechat1"="2021-03-01",
"fechat2"="2021-03-20",
"hora"="17",
"sitios1"=list(
list("sitio"="SA1-A-1")
),
"sitios2"=list(
list("sitio"="SA1-A-1")
)
)
)
, auto_unbox=TRUE)
r <- POST(urlS, body = bodyS,content_type("application/json"))
dataS <- content(r)
lec = dataS$h1[[1]]$lecturas
caudal <- lec[[1]]$caudal
volumen <- lec[[1]]$volumen
vola <- lec[[1]]$vola
flec <- as.Date(lec[[1]]$fechalectura)
for (i in 2:length(lec)){
caudal <- append(caudal,lec[[i]]$caudal)
volumen <- append(volumen,lec[[i]]$volumen)
vola <- append(vola,lec[[i]]$vola)
flec <- append(flec,as.Date(lec[[i]]$fechalectura))
}
dataF <- data.frame(flec,volumen,vola,caudal);
test_data_long <- melt(dataF, id="flec")
ggplotly(ggplot(data=test_data_long,
aes(x=flec, y=value, colour=variable)) +
geom_line(size=1)
)
|
25c137f25373ea09a706f5c73f07e456287a6b43
|
432ecdbb1a40e1c5ba7bc1d55d4f068f1e97c2c5
|
/gene_pathway_membership_long_to_wide.R
|
e01cad8eae8b35b242ac64bcf1f47949c7a61871
|
[] |
no_license
|
joshuaburkhart/bio
|
a2b5a2ca638260d8478c7c34d4ee3d1a9039626d
|
7288bd3f089fa15dbeffd4a159946e7a899954c7
|
refs/heads/master
| 2021-01-23T19:38:59.635484
| 2017-10-23T17:20:18
| 2017-10-23T17:20:18
| 6,525,975
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,730
|
r
|
gene_pathway_membership_long_to_wide.R
|
# Gene <-> Pathway: Long to Wide R Script
# Author: Joshua Burkhart
# Date: 9/20/2016
# Libraries
library(openxlsx)
library(dplyr)
library(magrittr)
library(reshape2)
# Globals
DATA_DIR <- "/Users/joshuaburkhart/Research/DEET/biting/analysis/"
CONTIG_MAP_FILE <- "DEET_loci_annotation.csv"
CONTIG_MAP_PATH <- paste(DATA_DIR,CONTIG_MAP_FILE,sep="")
PATHWY_MAP_FILE <- "Genes to pathways.csv"
PATHWY_MAP_PATH <- paste(DATA_DIR,PATHWY_MAP_FILE,sep="")
OUT_FILE_1 <- "Genes to pathways.xlsx"
OUT_PATH_1 <- paste(DATA_DIR,OUT_FILE_1,sep="")
OUT_FILE_2 <- "Contigs and Singletons to pathways.xlsx"
OUT_PATH_2 <- paste(DATA_DIR,OUT_FILE_2,sep="")
# Read mapping files
contig.map <- read.delim(CONTIG_MAP_PATH,header=TRUE,sep=",",stringsAsFactors = FALSE) #first use $tail -n +2 DEET\ loci\ annotation.csv to remove first header
pathwy.map <- read.delim(PATHWY_MAP_PATH,header=FALSE,sep=",",stringsAsFactors = FALSE)
# Name pathwy.map columns, remove zero, add contig/singleton id column
colnames(pathwy.map) <- c("zero","gene","pathway")
pathwy.map <- pathwy.map %>% dplyr::select(gene,pathway) %>%
dplyr::left_join(contig.map,by=c('gene' = 'BLAST_Agam')) %>%
dplyr::select(gene,
DEET.output.using.expressin.profiles.from.3.biting.arrays,
pathway)
# Add pathway numbers as ids and reshape by gene to wide format
wide_gene_pathwy.map <- pathwy.map %>%
dplyr::select(gene,pathway) %>%
dplyr::filter(!(is.na(gene))) %>%
group_by(gene) %>% mutate(id = seq_len(n())) %>%
group_by(gene) %>% mutate(id = seq_along(pathway)) %>%
group_by(gene) %>% mutate(id = row_number()) %>%
as.data.frame() %>%
reshape(direction = 'wide',
idvar = 'gene',
timevar = 'id',
v.names = 'pathway',
sep = "_")
wide_gene_pathwy.map %>% openxlsx::write.xlsx(file=OUT_PATH_1)
# Add pathway numbers as ids and reshape by contig/singleton id to wide format
wide_contig_pathwy.map <- pathwy.map %>%
dplyr::select(DEET.output.using.expressin.profiles.from.3.biting.arrays,gene,pathway) %>%
dplyr::filter(!(is.na(DEET.output.using.expressin.profiles.from.3.biting.arrays))) %>%
group_by(DEET.output.using.expressin.profiles.from.3.biting.arrays) %>% mutate(id = seq_len(n())) %>%
group_by(DEET.output.using.expressin.profiles.from.3.biting.arrays) %>% mutate(id = seq_along(pathway)) %>%
group_by(DEET.output.using.expressin.profiles.from.3.biting.arrays) %>% mutate(id = row_number()) %>%
as.data.frame() %>%
reshape(direction = 'wide',
idvar = 'DEET.output.using.expressin.profiles.from.3.biting.arrays',
timevar = 'id',
v.names = 'pathway',
sep = "_")
wide_contig_pathwy.map %>% openxlsx::write.xlsx(file=OUT_PATH_2)
|
447b255e990cbb6c7bc48347fa822feb8c946ef0
|
f0388bbd23c406ea31b01fe81eb8d03ac3dbbf91
|
/R/namespace.R
|
a879e41fa4db594680fab01fa348a6f403557fef
|
[
"MIT"
] |
permissive
|
pre-processing-r/chk
|
78507709b12c3ec9361ddc87acfb7e748932564b
|
ddfa8294185c282dc833c7f8170de187f57c0ac0
|
refs/heads/master
| 2023-08-29T14:56:34.954841
| 2021-10-16T19:41:25
| 2021-10-16T20:03:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 49
|
r
|
namespace.R
|
#' @import rlang lifecycle
#' @import typed
NULL
|
0bca379113b36bc4aaa06f18bc42915bb937f313
|
bf777a173db8979cd63ce88f9be189c2ee29656f
|
/note_learning_all.r
|
a3f6dcbe2a1ec2e7609e67eda098fa632f8ea87d
|
[] |
no_license
|
vimlu/note_code
|
e1e3c426a06c224597fabb5c5e0980955f33cc42
|
4182cbe4844437935f9464745f4ec7defd09870c
|
refs/heads/master
| 2020-03-28T12:52:55.852472
| 2018-10-26T08:58:00
| 2018-10-26T08:58:00
| 148,343,337
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,774
|
r
|
note_learning_all.r
|
source("https://bioconductor.org/biocLite.R")
## or source("http://bioconductor.org/biocLite.R")
biocLite("ComplexHeatmap")
biocLite("RTCGA.clinical")
biocLite("ballgown")
install.packages("devtools")
devtools::install_github('alyssafrazee/RSkittleBrewer')
tx<-runif(100)
ty<-rnorm(100)+5*tx
tmodel<-lm(ty~tx)
attributes(tmodel)
mode(tmodel)
plot(kmfit2, col = c("red", "blue"))
points(kmfit2, col=c("red", "blue"), pch = "+")
dfNew <- data.frame(clinic=factor(c("1", "2"), levels=levels(as.factor(addicts$clinic))),
X=c(-2, -2),
prison=factor(c("0", "1"), levels=levels(as.factor(addicts$prison))))
d=data.frame(name=c("李明","张聪","王建"),age=c(30,35,28),height=c(180,162,175))
cat("TITLE extra line", "2 3 5 7", "", "11 13 17", file="ex.data", sep="\n")
###=====================
options(digits=2)
Student <- c("John Davis", "Angela Williams",
"Bullwinkle Moose", "David Jones",
"Janice Markhammer", "Cheryl Cushing",
"Reuven Ytzrhak", "Greg Knox", "Joel England",
"Mary Rayburn")
Math <- c(502, 600, 412, 358, 495, 512, 410, 625, 573, 522)
Science <- c(95, 99, 80, 82, 75, 85, 80, 95, 89, 86)
English <- c(25, 22, 18, 15, 20, 28, 15, 30, 27, 18)
roster <- data.frame(Student, Math, Science, English,
stringsAsFactors=FALSE)
z <- scale(roster[,2:4])
score <- apply(z, 1, mean)
roster <- cbind(roster, score)
y <- quantile(score, c(.8,.6,.4,.2))
roster$grade[score >= y[1]] <- "A"
roster$grade[score < y[1] & score >= y[2]] <- "B"
roster$grade[score < y[2] & score >= y[3]] <- "C"
roster$grade[score < y[3] & score >= y[4]] <- "D"
roster$grade[score < y[4]] <- "F"
###=================
feelings <- c("sad", "afraid")
for (i in feelings)
print(
switch(i,
happy = "I am glad you are happy",
afraid = "There is nothing to fear",
sad = "Cheer up",
angry = "Calm down now"
)
)
w <- c(75.0, 64.0, 47.4, 66.9, 62.2, 62.2, 58.7, 63.5,
66.6, 64.0, 57.0, 69.0, 56.9, 50.0, 72.0)
X<-c(159, 280, 101, 212, 224, 379, 179, 264,
222, 362, 168, 250, 149, 260, 485, 170)
t.test(X,mu=225,alternative="greater")
mouse<-data.frame(
X=c( 2, 4, 3, 2, 4, 7, 7, 2, 2, 5, 4, 5, 6, 8, 5, 10, 7,
12, 12, 6, 6, 7, 11, 6, 6, 7, 9, 5, 5, 10, 6, 3, 10),
A=factor(c(rep(1,11),rep(2,10), rep(3,12))))
mouse.aov<-aov(X ~ A, data=mouse)
summary(mouse.aov)
##regression analysis
x<-c(0.10,0.11,0.12,0.13,0.14,0.15,0.16,0.17,0.18,0.20,0.21,0.23)
y<-c(42.0,43.5,45.0,45.5,45.0,47.5,49.0,53.0,50.0,55.0,55.0,60.0)
lm.sol<-lm(y ~ 1+x)
##先从图形上大致判断是否具有线性
plot(x,y)
abline(lm.sol)
summary(lm.sol)
## pheatmap for phynogenetic tree
require(pheatmap)
#read.table("clipboard", header = T, sep="\t")->P025
#P025[,1]->P025_rowname
#P025[,-1]->P025
#as.matrix(P025)->P025
#rownames(P025)<-P025_rowname
## read the data with first column as rowname!!
read.table("clipboard", header = T, row.names = 1, na.strings = 'NA')->P025
pheatmap(P025, color = colorRampPalette( c("white", "red", "darkred"))(200),display_numbers = T, number_format = "%.2f", cluster_rows = F, cluster_cols = F)
windowsFonts(Times=windowsFont("Times New Roman"))
par(mar=c(3,4,2,5), family="Times", ps=14)
##==========to perform chisq.test in batch============##
read.table('clipboard',header = T)->test
test$Gene->rownames(test)
test[,-1]->test
as.matrix(test)->test
for(i in 1:length(test[,1])){
tmp_matrix<-matrix(test[i,],ncol = 2,byrow = T)
chisq.test(tmp_matrix)$p.value->pval[i]
# cat(pval[i])
}
names(pval)<-rownames(test)
pval[pval<0.05]->pval_cand
|
e92b45c8e1506347bcdbc99cc95d918acd87361f
|
8866b741411e2edfa61972369143de26fde5f821
|
/man/QueueingModel.i_MMInfKK.Rd
|
64ab10737e03aa9f39dc47f0a256faeab82941cb
|
[] |
no_license
|
cran/queueing
|
6232577c0eb67cae7c716ef1432cc54194fb26d4
|
7712782a0d82d73599f128f68e94536b0cf8d4e5
|
refs/heads/master
| 2020-12-25T17:36:19.513391
| 2019-12-08T21:10:02
| 2019-12-08T21:10:02
| 17,698,913
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 822
|
rd
|
QueueingModel.i_MMInfKK.Rd
|
% File man/QueueingModel.i_MMInfKK.Rd
\name{QueueingModel.i_MMInfKK}
\alias{QueueingModel.i_MMInfKK}
\title{Builds a M/M/Infinite/K/K queueing model}
\description{
Builds a M/M/Infinite/K/K queueing model
}
\usage{
\method{QueueingModel}{i_MMInfKK}(x, \dots)
}
\arguments{
\item{x}{a object of class i_MMInfKK}
\item{\dots}{aditional arguments}
}
\details{Build a M/M/Infinite/K/K queueing model. It also checks the input params calling the \link{CheckInput.i_MMInfKK}}
\references{
[Kleinrock1975] Leonard Kleinrock (1975).\cr
\emph{Queueing Systems Vol 1: Theory}.\cr
John Wiley & Sons.
}
\seealso{
\code{\link{CheckInput.i_MMInfKK}}
}
\examples{
## create input parameters
i_MMInfKK <- NewInput.MMInfKK(lambda=0.25, mu=4, k=4)
## Build the model
QueueingModel(i_MMInfKK)
}
\keyword{M/M/Infinite/K/K}
|
247dc185bb7cb8b3955057ece45a3310ed46f098
|
4dc66ca0241589deeadafd3c1d656ac08e87beaa
|
/Scripts_/03_Sensitivities.R
|
f58aea09f4d6d060d90485007f76cfa41eca51ad
|
[] |
no_license
|
erwanrh/SSE-model-Longevity
|
e1cece124531a9f58e71906681bd3a59c68373a1
|
32536e0e662d486a3ef9a2390d44de21715b6421
|
refs/heads/master
| 2023-02-05T09:27:46.560059
| 2020-12-29T18:58:46
| 2020-12-29T18:58:46
| 263,285,049
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,945
|
r
|
03_Sensitivities.R
|
######## SCRIPT MODEL SSE Sensitivities Analysis #######-
# Author: Erwan Rahis (erwan.rahis@axa.com), GRM Life, Longevity Team
# Version 1.0
# Last update: 12/05/2020
# Script to fit a Sums Of Smooth Exponential model according to the paper "Sum Of Smooth Exponentials to decompose complex series of counts"
# Camarda (2016)
####################################################################################-
#-------------------------- FUNCTIONS SENSITIVITES ----------------------------
####################################################################################-
# Crossed Sensitivities Males ---------------------------------------------------
compute_fitted_crossedsensis_Senescent_M <- function( period1 = 2000, period2 = 2017){
Fitted_DataFrame_Sensis1 <- data.frame(cbind(exp(SSE_males$XX$X1%*%(SSE_coeffcients_males_df[1:2, as.character(period1)])),
exp(SSE_males$XX$X2%*%(SSE_coeffcients_males_df[3:27, as.character(period2)])),
exp(SSE_males$XX$X3%*%(SSE_coeffcients_males_df[28:52, as.character(period1)]))))
colnames(Fitted_DataFrame_Sensis1) <- c('Infant', 'Senescent', 'Hump')
Fitted_DataFrame <- data.frame(cbind(exp(SSE_males$XX$X1%*%(SSE_coeffcients_males_df[1:2, as.character(period1)])),
exp(SSE_males$XX$X2%*%(SSE_coeffcients_males_df[3:27, as.character(period1)])),
exp(SSE_males$XX$X3%*%(SSE_coeffcients_males_df[28:52, as.character(period1)]))))
colnames(Fitted_DataFrame) <- c('Infant', 'Senescent', 'Hump')
Fitted_DataFrame_Sensis1$FittedCurve <- rowSums(Fitted_DataFrame_Sensis1)
Fitted_DataFrame_Sensis1$Age <- 0:110
Fitted_DataFrame_Sensis1$Sensis <- 'Sensis'
Fitted_DataFrame$FittedCurve <- rowSums(Fitted_DataFrame)
Fitted_DataFrame$Age <- 0:110
Fitted_DataFrame$Sensis <- 'Base'
list('df_base'= Fitted_DataFrame, 'df_sensis'= Fitted_DataFrame_Sensis1)
}
compute_fitted_crossedsensis_Hump_M <- function( period1 = 2000, period2 = 2017){
Fitted_DataFrame_Sensis1 <- data.frame(cbind(exp(SSE_males$XX$X1%*%(SSE_coeffcients_males_df[1:2, as.character(period1)])),
exp(SSE_males$XX$X2%*%(SSE_coeffcients_males_df[3:27, as.character(period1)])),
exp(SSE_males$XX$X3%*%(SSE_coeffcients_males_df[28:52, as.character(period2)]))))
colnames(Fitted_DataFrame_Sensis1) <- c('Infant', 'Senescent', 'Hump')
Fitted_DataFrame <- data.frame(cbind(exp(SSE_males$XX$X1%*%(SSE_coeffcients_males_df[1:2, as.character(period1)])),
exp(SSE_males$XX$X2%*%(SSE_coeffcients_males_df[3:27, as.character(period1)])),
exp(SSE_males$XX$X3%*%(SSE_coeffcients_males_df[28:52, as.character(period1)]))))
colnames(Fitted_DataFrame) <- c('Infant', 'Senescent', 'Hump')
Fitted_DataFrame_Sensis1$FittedCurve <- rowSums(Fitted_DataFrame_Sensis1)
Fitted_DataFrame_Sensis1$Age <- 0:110
Fitted_DataFrame_Sensis1$Sensis <- 'Sensis'
Fitted_DataFrame$FittedCurve <- rowSums(Fitted_DataFrame)
Fitted_DataFrame$Age <- 0:110
Fitted_DataFrame$Sensis <- 'Base'
list('df_base'= Fitted_DataFrame, 'df_sensis'= Fitted_DataFrame_Sensis1)
}
compute_fitted_crossedsensis_Infant_M <- function( period1 = 2000, period2 = 2017){
Fitted_DataFrame_Sensis1 <- data.frame(cbind(exp(SSE_males$XX$X1%*%(SSE_coeffcients_males_df[1:2, as.character(period2)])),
exp(SSE_males$XX$X2%*%(SSE_coeffcients_males_df[3:27, as.character(period1)])),
exp(SSE_males$XX$X3%*%(SSE_coeffcients_males_df[28:52, as.character(period1)]))))
colnames(Fitted_DataFrame_Sensis1) <- c('Infant', 'Senescent', 'Hump')
Fitted_DataFrame <- data.frame(cbind(exp(SSE_males$XX$X1%*%(SSE_coeffcients_males_df[1:2, as.character(period1)])),
exp(SSE_males$XX$X2%*%(SSE_coeffcients_males_df[3:27, as.character(period1)])),
exp(SSE_males$XX$X3%*%(SSE_coeffcients_males_df[28:52, as.character(period1)]))))
colnames(Fitted_DataFrame) <- c('Infant', 'Senescent', 'Hump')
Fitted_DataFrame_Sensis1$FittedCurve <- rowSums(Fitted_DataFrame_Sensis1)
Fitted_DataFrame_Sensis1$Age <- 0:110
Fitted_DataFrame_Sensis1$Sensis <- 'Sensis'
Fitted_DataFrame$FittedCurve <- rowSums(Fitted_DataFrame)
Fitted_DataFrame$Age <- 0:110
Fitted_DataFrame$Sensis <- 'Base'
list('df_base'= Fitted_DataFrame, 'df_sensis'= Fitted_DataFrame_Sensis1)
}
compute_delta_Ex_crossedsensis_M <- function (period1 = 2000, period2= 2017){
Fitted_sensis_listHump <- compute_fitted_crossedsensis_Hump_M(period1, period2)
Fitted_sensis_listSenescent <- compute_fitted_crossedsensis_Senescent_M(period1, period2)
Fitted_sensis_listInfant <- compute_fitted_crossedsensis_Infant_M(period1, period2)
Fitted_DataFrame_base <- Fitted_sensis_listHump[['df_base']]
Fitted_DataFrame_SensisHump <- Fitted_sensis_listHump[['df_sensis']]
Fitted_DataFrame_SensisSenescent <- Fitted_sensis_listSenescent[['df_sensis']]
Fitted_DataFrame_SensisInfant <- Fitted_sensis_listInfant[['df_sensis']]
LE_base <- LE_period_Model(QxModel = SSE_deathrates_male_df,Age = 0, Period = period1)
LE_after <- LE_period_Model(QxModel = SSE_deathrates_male_df,Age = 0, Period = period2)
LE_sensisHump <- LE_period_Model(QxModel = Fitted_DataFrame_SensisHump,Age = 0, Period = 'FittedCurve')
LE_sensisSenescent <- LE_period_Model(QxModel = Fitted_DataFrame_SensisSenescent,Age = 0, Period = 'FittedCurve')
LE_sensisInfant <- LE_period_Model(QxModel = Fitted_DataFrame_SensisInfant,Age = 0, Period = 'FittedCurve')
LE_delta_crossed <- as.data.frame(rbind(LE_sensisHump, LE_sensisSenescent, LE_sensisInfant))
cbind( LE_base, t(LE_delta_crossed), LE_after)
}
# Crossed Sensitivities Females ---------------------------------------------------
compute_fitted_crossedsensis_Senescent_F <- function( period1 = 2000, period2 = 2017){
Fitted_DataFrame_Sensis1 <- data.frame(cbind(exp(SSE_females$XX$X1%*%(SSE_coeffcients_females_df[1:2, as.character(period1)])),
exp(SSE_females$XX$X2%*%(SSE_coeffcients_females_df[3:27, as.character(period2)])),
exp(SSE_females$XX$X3%*%(SSE_coeffcients_females_df[28:52, as.character(period1)]))))
colnames(Fitted_DataFrame_Sensis1) <- c('Infant', 'Senescent', 'Hump')
Fitted_DataFrame <- data.frame(cbind(exp(SSE_females$XX$X1%*%(SSE_coeffcients_females_df[1:2, as.character(period1)])),
exp(SSE_females$XX$X2%*%(SSE_coeffcients_females_df[3:27, as.character(period1)])),
exp(SSE_females$XX$X3%*%(SSE_coeffcients_females_df[28:52, as.character(period1)]))))
colnames(Fitted_DataFrame) <- c('Infant', 'Senescent', 'Hump')
Fitted_DataFrame_Sensis1$FittedCurve <- rowSums(Fitted_DataFrame_Sensis1)
Fitted_DataFrame_Sensis1$Age <- 0:110
Fitted_DataFrame_Sensis1$Sensis <- 'Sensis'
Fitted_DataFrame$FittedCurve <- rowSums(Fitted_DataFrame)
Fitted_DataFrame$Age <- 0:110
Fitted_DataFrame$Sensis <- 'Base'
list('df_base'= Fitted_DataFrame, 'df_sensis'= Fitted_DataFrame_Sensis1)
}
compute_fitted_crossedsensis_Hump_F <- function( period1 = 2000, period2 = 2017){
Fitted_DataFrame_Sensis1 <- data.frame(cbind(exp(SSE_females$XX$X1%*%(SSE_coeffcients_females_df[1:2, as.character(period1)])),
exp(SSE_females$XX$X2%*%(SSE_coeffcients_females_df[3:27, as.character(period1)])),
exp(SSE_females$XX$X3%*%(SSE_coeffcients_females_df[28:52, as.character(period2)]))))
colnames(Fitted_DataFrame_Sensis1) <- c('Infant', 'Senescent', 'Hump')
Fitted_DataFrame <- data.frame(cbind(exp(SSE_females$XX$X1%*%(SSE_coeffcients_females_df[1:2, as.character(period1)])),
exp(SSE_females$XX$X2%*%(SSE_coeffcients_females_df[3:27, as.character(period1)])),
exp(SSE_females$XX$X3%*%(SSE_coeffcients_females_df[28:52, as.character(period1)]))))
colnames(Fitted_DataFrame) <- c('Infant', 'Senescent', 'Hump')
Fitted_DataFrame_Sensis1$FittedCurve <- rowSums(Fitted_DataFrame_Sensis1)
Fitted_DataFrame_Sensis1$Age <- 0:110
Fitted_DataFrame_Sensis1$Sensis <- 'Sensis'
Fitted_DataFrame$FittedCurve <- rowSums(Fitted_DataFrame)
Fitted_DataFrame$Age <- 0:110
Fitted_DataFrame$Sensis <- 'Base'
list('df_base'= Fitted_DataFrame, 'df_sensis'= Fitted_DataFrame_Sensis1)
}
compute_fitted_crossedsensis_Infant_F <- function( period1 = 2000, period2 = 2017){
Fitted_DataFrame_Sensis1 <- data.frame(cbind(exp(SSE_females$XX$X1%*%(SSE_coeffcients_females_df[1:2, as.character(period2)])),
exp(SSE_females$XX$X2%*%(SSE_coeffcients_females_df[3:27, as.character(period1)])),
exp(SSE_females$XX$X3%*%(SSE_coeffcients_females_df[28:52, as.character(period1)]))))
colnames(Fitted_DataFrame_Sensis1) <- c('Infant', 'Senescent', 'Hump')
Fitted_DataFrame <- data.frame(cbind(exp(SSE_females$XX$X1%*%(SSE_coeffcients_females_df[1:2, as.character(period1)])),
exp(SSE_females$XX$X2%*%(SSE_coeffcients_females_df[3:27, as.character(period1)])),
exp(SSE_females$XX$X3%*%(SSE_coeffcients_females_df[28:52, as.character(period1)]))))
colnames(Fitted_DataFrame) <- c('Infant', 'Senescent', 'Hump')
Fitted_DataFrame_Sensis1$FittedCurve <- rowSums(Fitted_DataFrame_Sensis1)
Fitted_DataFrame_Sensis1$Age <- 0:110
Fitted_DataFrame_Sensis1$Sensis <- 'Sensis'
Fitted_DataFrame$FittedCurve <- rowSums(Fitted_DataFrame)
Fitted_DataFrame$Age <- 0:110
Fitted_DataFrame$Sensis <- 'Base'
list('df_base'= Fitted_DataFrame, 'df_sensis'= Fitted_DataFrame_Sensis1)
}
compute_delta_Ex_crossedsensis_F <- function (period1 = 2000, period2= 2017){
Fitted_sensis_listHump <- compute_fitted_crossedsensis_Hump_F(period1, period2)
Fitted_sensis_listSenescent <- compute_fitted_crossedsensis_Senescent_F(period1, period2)
Fitted_sensis_listInfant <- compute_fitted_crossedsensis_Infant_F(period1, period2)
Fitted_DataFrame_base <- Fitted_sensis_listHump[['df_base']]
Fitted_DataFrame_SensisHump <- Fitted_sensis_listHump[['df_sensis']]
Fitted_DataFrame_SensisSenescent <- Fitted_sensis_listSenescent[['df_sensis']]
Fitted_DataFrame_SensisInfant <- Fitted_sensis_listInfant[['df_sensis']]
LE_base <- LE_period_Model(QxModel = SSE_deathrates_female_df,Age = 0, Period = period1)
LE_after <- LE_period_Model(QxModel = SSE_deathrates_female_df,Age = 0, Period = period2)
LE_sensisHump <- LE_period_Model(QxModel = Fitted_DataFrame_SensisHump,Age = 0, Period = 'FittedCurve')
LE_sensisSenescent <- LE_period_Model(QxModel = Fitted_DataFrame_SensisSenescent,Age = 0, Period = 'FittedCurve')
LE_sensisInfant <- LE_period_Model(QxModel = Fitted_DataFrame_SensisInfant,Age = 0, Period = 'FittedCurve')
LE_delta_crossed <- as.data.frame(rbind(LE_sensisHump, LE_sensisSenescent, LE_sensisInfant))
cbind( LE_base, t(LE_delta_crossed), LE_after)
}
# Sensitivities Males -----------
compute_fitted_sensis_M <- function(sensis_infant=0, sensis_hump=0, sensis_senescent=0, period = 2017){
Fitted_DataFrame_Sensis1 <- data.frame(cbind(exp(SSE_males$XX$X1%*%(SSE_coeffcients_males_df[1:2, as.character(period)])*(1+sensis_infant)),
exp(SSE_males$XX$X2%*%(SSE_coeffcients_males_df[3:27, as.character(period)])*(1+sensis_senescent)),
exp(SSE_males$XX$X3%*%(SSE_coeffcients_males_df[28:52, as.character(period)])*(1+sensis_hump))))
colnames(Fitted_DataFrame_Sensis1) <- c('Infant', 'Senescent', 'Hump')
Fitted_DataFrame <- data.frame(cbind(exp(SSE_males$XX$X1%*%(SSE_coeffcients_males_df[1:2, as.character(period)])),
exp(SSE_males$XX$X2%*%(SSE_coeffcients_males_df[3:27, as.character(period)])),
exp(SSE_males$XX$X3%*%(SSE_coeffcients_males_df[28:52, as.character(period)]))))
colnames(Fitted_DataFrame) <- c('Infant', 'Senescent', 'Hump')
Fitted_DataFrame_Sensis1$FittedCurve <- rowSums(Fitted_DataFrame_Sensis1)
Fitted_DataFrame_Sensis1$Age <- 0:110
Fitted_DataFrame_Sensis1$Sensis <- 'Sensis'
Fitted_DataFrame$FittedCurve <- rowSums(Fitted_DataFrame)
Fitted_DataFrame$Age <- 0:110
Fitted_DataFrame$Sensis <- 'Base'
list('df_base'= Fitted_DataFrame, 'df_sensis'= Fitted_DataFrame_Sensis1)
}
# Sensitivities Females -----------
compute_fitted_sensis_F <- function(sensis_infant=0, sensis_hump=0, sensis_senescent=0, period = 2017){
Fitted_DataFrame_Sensis1 <- data.frame(cbind(exp(SSE_females$XX$X1%*%(SSE_coeffcients_females_df[1:2, as.character(period)])*(1+sensis_infant)),
exp(SSE_females$XX$X2%*%(SSE_coeffcients_females_df[3:27, as.character(period)])*(1+sensis_senescent)),
exp(SSE_females$XX$X3%*%(SSE_coeffcients_females_df[28:52, as.character(period)])*(1+sensis_hump))))
colnames(Fitted_DataFrame_Sensis1) <- c('Infant', 'Senescent', 'Hump')
Fitted_DataFrame <- data.frame(cbind(exp(SSE_females$XX$X1%*%(SSE_coeffcients_females_df[1:2, as.character(period)])),
exp(SSE_females$XX$X2%*%(SSE_coeffcients_females_df[3:27, as.character(period)])),
exp(SSE_females$XX$X3%*%(SSE_coeffcients_females_df[28:52, as.character(period)]))))
colnames(Fitted_DataFrame) <- c('Infant', 'Senescent', 'Hump')
Fitted_DataFrame_Sensis1$FittedCurve <- rowSums(Fitted_DataFrame_Sensis1)
Fitted_DataFrame_Sensis1$Age <- 0:110
Fitted_DataFrame_Sensis1$Sensis <- 'Sensis'
Fitted_DataFrame$FittedCurve <- rowSums(Fitted_DataFrame)
Fitted_DataFrame$Age <- 0:110
Fitted_DataFrame$Sensis <- 'Base'
list('df_base'= Fitted_DataFrame, 'df_sensis'= Fitted_DataFrame_Sensis1)
}
# PLOT ---------------------
compute_Ex_by_comp <- function() {
sensis_crossed_df_M<- data.frame()
sensis_crossed_df_F <- data.frame()
year_1 <- as.numeric(colnames(SSE_deathrates_male_df)[1])
for (year in as.numeric(colnames(SSE_deathrates_male_df)[-1]))
{
sensis_crossed_df_M <- rbind(sensis_crossed_df_M,cbind(compute_delta_Ex_crossedsensis_M(period1 = year_1, period2 = year), year))
sensis_crossed_df_F <- rbind(sensis_crossed_df_F,cbind(compute_delta_Ex_crossedsensis_F(period1 = year_1, period2 = year), year))
year_1 <- year
}
sensis_crossed_var_df_M <- (sensis_crossed_df_M[, c('LE_sensisHump','LE_sensisSenescent','LE_sensisInfant')] - sensis_crossed_df_M$LE_base)*12
sensis_crossed_var_df_M$year <- sensis_crossed_df_M$year
sensis_crossed_var_df_M$LE_var <- (sensis_crossed_df_M$LE_after - sensis_crossed_df_M$LE_base)*12
sensis_crossed_var_df_F <- (sensis_crossed_df_F[, c('LE_sensisHump','LE_sensisSenescent','LE_sensisInfant')] - sensis_crossed_df_F$LE_base)*12
sensis_crossed_var_df_F$year <- sensis_crossed_df_F$year
sensis_crossed_var_df_F$LE_var <- (sensis_crossed_df_F$LE_after - sensis_crossed_df_F$LE_base)*12
sensis_crossed_plot_m <- melt(sensis_crossed_var_df_M, id.vars = 'year' )
sensis_crossed_plot_m$sex <- 'M'
sensis_crossed_plot_f <- melt(sensis_crossed_var_df_F, id.vars = 'year' )
sensis_crossed_plot_f$sex <- 'F'
rbind(sensis_crossed_plot_m, sensis_crossed_plot_f)
}
plot_Ex_by_comp<- function(){
sensis_crossed_plot <- compute_Ex_by_comp()
p2 <- ggplot(subset(sensis_crossed_plot)) + geom_line(aes(x= year, y= value, group = variable, color= variable, linetype= variable)) + facet_wrap(~ sex, nrow=2)+
ggtitle('Component Yearly Ex Improvements')+ ylab('LE improvement (months)') +
scale_linetype_manual('LE Improvement',values = c(1,1,1,2), labels = c('Hump', 'Senescent', 'Infant', 'Total'))+
scale_color_discrete('LE Improvement', labels = c('Hump', 'Senescent', 'Infant', 'Total'))
p2
}
compute_Ex_stock <- function(){
#Dataframe des sensibilités d'ex
sensis_ex_dfF <- data.frame()
sensis_ex_dfM <- data.frame()
for(delta in c(100)){
for (year in 2000:2016){
sensis_ex_dfF[as.character(paste(year,delta, sep = '_')), 'Infant'] <- compute_delta_Ex_sensis_F(sensis_infant = delta, period = year)*12
sensis_ex_dfF[as.character(paste(year,delta, sep = '_')), 'Hump'] <- compute_delta_Ex_sensis_F(sensis_hump = delta, period = year)*12
sensis_ex_dfF[as.character(paste(year,delta, sep = '_')), 'Senescent'] <- compute_delta_Ex_sensis_F(sensis_senescent = delta, period = year)*12
sensis_ex_dfF[as.character(paste(year,delta, sep = '_')), 'variation'] <- delta
sensis_ex_dfF[as.character(paste(year,delta, sep = '_')), 'year'] <- year
sensis_ex_dfM[as.character(paste(year,delta, sep = '_')), 'Infant'] <- compute_delta_Ex_sensis_M(sensis_infant = delta, period = year)*12
sensis_ex_dfM[as.character(paste(year,delta, sep = '_')), 'Hump'] <- compute_delta_Ex_sensis_M(sensis_hump = delta, period = year)*12
sensis_ex_dfM[as.character(paste(year,delta, sep = '_')), 'Senescent'] <- compute_delta_Ex_sensis_M(sensis_senescent = delta, period = year)*12
sensis_ex_dfM[as.character(paste(year,delta, sep = '_')), 'variation'] <- delta
sensis_ex_dfM[as.character(paste(year,delta, sep = '_')), 'year'] <- year
}
}
#Plot des sensibilités d'ex pour les hommes et les femmes
sensis_ex_plotdf_m <- melt(sensis_ex_dfM, id.vars = 'year', measure.vars = c('Infant', 'Hump'), value.name = 'Ex')
sensis_ex_plotdf_m$sex = 'M'
sensis_ex_plotdf_f <- melt(sensis_ex_dfF, id.vars = 'year', measure.vars = c('Infant', 'Hump'), value.name = 'Ex')
sensis_ex_plotdf_f$sex = 'F'
rbind(sensis_ex_plotdf_f, sensis_ex_plotdf_m)
}
plot_Ex_stock <- function(){
sensis_ex_plotdf <- compute_Ex_stock()
ex_stock_plot <- ggplot(sensis_ex_plotdf, aes(x = year, y = Ex, color = variable), linetype = 1) +
facet_wrap(~ sex, ncol = 1) +
geom_point() +
geom_smooth( method = 'lm', formula = y ~ x) +
scale_x_continuous(breaks = c(seq(1960, 2015,5),2017)) +
ggtitle('Espérance de vie en stock par composante') +
scale_color_manual(name='Composante',values = c("#C39BD3", "#5FD69C"), )+
ylab('Stock LE (months)')
ex_stock_plot
}
# CORRELATIONS ---------------------
plot_ExImp_corr <- function(){
#Corrélatins entre améliorations
sensis_crossed_plot <- compute_Ex_by_comp()
imp_corr <- rbind(melt(cor(dcast(subset(sensis_crossed_plot, sex == 'F'), formula = year ~ variable, value.var = 'value')[,c(2,3,4)])),
melt(cor(dcast(subset(sensis_crossed_plot, sex == 'M'), formula = year ~ variable, value.var = 'value')[,c(2,3,4)])))
imp_corr[1:9, 'sex'] <- 'F'
imp_corr[10:18, 'sex'] <- 'M'
corr_F <- ggplot(data = imp_corr, aes(x=Var1, y=Var2, fill=value)) +
geom_tile() +
scale_fill_gradient2(midpoint = 0, low = "#52BE80", mid = "white",
high = "#03A9F4", space = "Lab" )+
scale_y_discrete('',labels=c('Hump','Senescent','Infant'))+
scale_x_discrete('',labels=c('Hump','Senescent','Infant'))+
geom_text(aes(Var2, Var1, label = round(value,3)), color = "#5D6D7E", size = 5) +
facet_wrap(~ sex, nrow = 2) + theme(rect = element_rect(fill = "transparent") # all rectangles
)
corr_F
}
|
6fdfeb5a48a26457db1540db5870c149fb567e41
|
4e6f18e6feb01502f2cd15772b6c0cc2f7c0629d
|
/man/treatments_by_policy.Rd
|
8059289f4095b79911a697dd124dda352441303c
|
[] |
no_license
|
cancerpolicy/bcimodel
|
8f76e9a64c79773c4e7058f1a71eb619a6496d8e
|
219df7c778ce8388b661a83de45845b2b0f57815
|
refs/heads/master
| 2021-01-13T02:55:45.874682
| 2019-06-27T19:50:48
| 2019-06-27T19:50:48
| 77,089,332
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,375
|
rd
|
treatments_by_policy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/treatment.R
\name{treatments_by_policy}
\alias{treatments_by_policy}
\title{Use sim_treatment_by_subgroup to simulate treatments}
\usage{
treatments_by_policy(policies, treat_chars, stagegroups, map, pop_size,
nsim)
}
\arguments{
\item{policies}{A "scenarios" data frame containing an 'id' for the policies
and a 'pairnum' column indicating either NA or the paired policy ID, for strategies with early detection. See ex1$pol}
\item{treat_chars}{Data frame with "txSSno" column indicating treatment numbers and subsequent columns with treatment proportions WITHIN stage-subgroups. Each of these columns should correspond to a row in the "policies" data frame, with their names taken fro policies$id. See ex1$tx}
\item{stagegroups}{List of stage-subgroup matrices, one for each policy/row in the "scenarios" data frame}
\item{map}{Stage-subgroup map indicating allowed stage-shifts. See ex1$map.}
\item{pop_size}{Population size (number of rows)}
\item{nsim}{Number of sims (number of columns)}
}
\value{
List of treatment matrices, one for each policy in the "scenarios" data frame. Each matrix contains treatment IDs corresponding to treat_chars$txSSno. Early detection scenarios will have NAs for advanced-stage cases who aren't stage-shifted.
}
\description{
Simulate treatments according to specified policy rules
}
\examples{
library(bcimodel)
data(ex1)
# ex1$nh shows that there are 4 stage-subgroups. Use a fake random distribution of groups 1:4 for the population before stage-shifting.
popdistr <- matrix(sample.int(4, size=40, replace=TRUE), nrow=20, ncol=2)
# Create stageshift indicator matrices for all 3 scenarios: no stage shifts for #1 and #2, but 30\% stageshift for #3. Use a small population of size 20, and 2 sims
stageshifts <- list(base=matrix(0, nrow=20, ncol=2),
tam=matrix(0, nrow=20, ncol=2),
tamshift=stageshift_indicator(0.85, 20, 2))
# Get the actual stages - only policy #3 has stage-shifting
stages <- lapply(stageshifts, shift_stages, original=popdistr, map=ex1$map)
lapply(stages, table)
t <- treatments_by_policy(policies=ex1$pol,
treat_chars=ex1$tx,
stagegroups=stages,
map=ex1$map,
pop_size=20, nsim=2)
}
|
c3b65c65c0028a21dbeb952079a352dbeddc08c5
|
96a3c9ff4b96b5493b1012fa5d60e144270c3952
|
/run_analysis.R
|
97dca96f8ba1b92b90ab86be948c2de1440980df
|
[] |
no_license
|
cnemri/DataCleaning
|
a9c9b18a6939eae93824b5ad012f97be4583c572
|
42d043c4b4bb243e5e53f0622901cadc4dd07236
|
refs/heads/master
| 2022-02-23T15:30:35.845051
| 2019-10-20T06:32:15
| 2019-10-20T06:32:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,529
|
r
|
run_analysis.R
|
# Getting and Cleaning Data Course Project
rm(list = ls())
# The script processes Activity Recognition Using Smartphones Dataset
# It has two outputs
# data table X : Dataset as per Step 4 of the Instructions
# data table X_tidy : Tidy Data set as requested in Step 5 of the instructions
if(!file.exists('./data')) {dir.create('./data')}
# Downloading the dataset
if (!file.exists('./data/Dataset.zip')) {
fileUrl = "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, './data/Dataset.zip', method = 'curl')
unzip('./data/Dataset.zip', exdir = './data')
}
library(data.table)
# Load the feature names
Features <- fread('./data/UCI HAR Dataset/features.txt', header = F, sep2 = '\t')
Features <- Features[,V2]
# Load Train set
Y_train <- fread("./data/UCI HAR Dataset/train/y_train.txt", header = F)
id <- fread('./data/UCI HAR Dataset/train/subject_train.txt', header = F)
X_train <- fread('./data/UCI HAR Dataset/train/X_train.txt', header = F, sep2 = '\t')
X_train$Activity <- Y_train
X_train$id <- id
names(X_train) <- c(Features, "Activity", 'id')
X_train <- X_train[, c(563, 1:562)]
# Load Test set
Y_test <- fread('./data/UCI HAR Dataset/test/y_test.txt', header = F)
id <- fread('./data/UCI HAR Dataset/test/subject_test.txt', header = F)
X_test <- fread('./data/UCI HAR Dataset/test/X_test.txt', header = F, sep2 = '\t')
X_test$Activity <- Y_test
X_test$id <- id
names(X_test) <- c(Features, 'Activity', 'id')
X_test <- X_test[, c(563, 1:562)]
# Merging train and test datasets
X <- rbind(X_train, X_test)
# Keeping std and means features only
Features_logicals <- grepl('(.*)mean(.*)|(.*)std(.*)', Features)
Features_std_mean <- Features[Features_logicals]
X <- X[, c('id',Features_std_mean,'Activity'), with=FALSE]
# Replacing activity flags with descriptive activity labels
Activity_labels <- fread('./data/UCI HAR Dataset/activity_labels.txt', sep2 = '\t')
library(dplyr)
X <- mutate(X, Activity = Activity_labels$V2[Activity])
# Tidy dataset
library(reshape2)
Xmelt <- melt(X, id.vars = c('id', 'Activity'), measure.vars = Features_std_mean)
X_tidy <- dcast(Xmelt, variable ~ Activity, mean)
# Removing unnecessary variables from workspace
rm('X_train','X_test','Features','Features_logicals', 'Activity_labels', 'Y_train','Y_test','id','fileUrl','Features_std_mean','Xmelt')
write.table(X,"./data/UCI HAR Dataset/CleanData.txt",sep="\t",row.names=FALSE)
write.table(X_tidy,"./data/UCI HAR Dataset/TidyData.txt",sep="\t",row.names=FALSE)
|
158c405064845817f7514ce3960ecae2559132a7
|
0480eb0ebcc0ffcc5889b8f8674195816c9e57c5
|
/Lesson 3 - Functions, loops, ifelse/RNASeqResults.R
|
5a8fbed653422bf07c12437b3f934f6aaf3a7bcc
|
[] |
no_license
|
aharkey/RschoolScripts
|
d364f9eb2132a8e37378386734b022268d87f0ff
|
34bda28b8bbacc111d48db77195926cc7f2f1d37
|
refs/heads/master
| 2022-11-23T08:20:45.281138
| 2020-07-27T16:01:16
| 2020-07-27T16:01:16
| 268,577,891
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 461
|
r
|
RNASeqResults.R
|
# Given a range of values, which value has the largest absolute value
maxabs <- function(a)
{ifelse(max(a)>abs(min(a)), max(a), min(a))}
# Given a logFC and pvalue, and cutoffs for both, return a single character
# representing the direction of change (U or D) or no change (n)
upordown <- function(pval, lfc, pcut = 0.05, lfccut = 0.5)
{if(pval < pcut & abs(lfc) > lfccut)
{if(lfc > 0)
{out <- "U"}
else
{out <- "D"}}
else
{out <- "n"}
return(out)}
|
b3f0453b06b29598cfdd0e3d1bdde8acb9d74816
|
47522d9918014670e0a78bcb874f98147b27c9c1
|
/man/rsd.Rd
|
3ef9334ebebefda748bdef2516940142ac1010df
|
[] |
no_license
|
galangjs/galangjs
|
b764a043a3b4a78225758cb7a8d583641deb5be2
|
3d7e2e2f609eedd41b2a0d10e68712c776f31ec2
|
refs/heads/master
| 2021-01-18T13:49:22.298744
| 2015-09-12T03:26:23
| 2015-09-12T03:26:23
| 41,980,561
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 476
|
rd
|
rsd.Rd
|
% Generated by roxygen2 (4.1.1.9000): do not edit by hand
% Please edit documentation in R/rsd_et_al.r
\name{rsd}
\alias{rsd}
\title{Relative Standard Deviation}
\usage{
rsd(x, as_pct = FALSE)
}
\arguments{
\item{x}{a numeric vector}
\item{as_pct}{logical stating whether the result should be expressed as a
percentage}
}
\description{
This function calculates the relative standard deviation of a numeric vector.
}
\examples{
rsd(c(19, 17, 18))
rsd(17:19, as_pct = TRUE)
}
|
34c8778a33048d584bd5ab955b7d6f6aec06cb37
|
cfcd465ec5e08de08347c0ddb9b105d5b2ba1013
|
/src/prediction/prediction_randomforest.R
|
1160a85671a1e1d03a8b21f07a7916af7e7e3345
|
[] |
no_license
|
dewaldayres/titanic
|
a031001bd471ab76964a2e5e3c59892bb08d5df4
|
dc349be0030d861bc32c63c354ff52f3f90213a7
|
refs/heads/master
| 2020-03-11T10:03:03.015295
| 2019-01-09T17:20:47
| 2019-01-09T17:20:47
| 129,926,649
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 581
|
r
|
prediction_randomforest.R
|
#
# randomforest prediction
#
set.seed(415)
train <- passengers %>% filter(set=="train")
test <- passengers %>% filter(set=="test")
test <- within(test, rm(survived))
fit <- randomForest(survived ~ passenger_class + gender + age + fare + family_size + title, # 80.38
data=train,
importance=TRUE,
ntree=2000)
# varImpPlot(fit)
prediction <- predict(fit, test)
test$survived <- prediction
passengers[test$passenger_id,]$survived <- prediction
# confusionMatrix(prediction, test$survived)
|
87425489843a604ebd357e2be9918eaa51bce589
|
1e8406ba4774f2717acb4d53b26589e5913e951a
|
/cor_gene_counts.R
|
7c4e810c907e37ac23cf5b481b70a8b53bba70fb
|
[] |
no_license
|
russ-dvm/cchfv-rnaseq
|
27e507242bd0c14fdf1545d82625ed088638ebbd
|
fd052396a6050a5feb4f44f8826a67fae0bd2885
|
refs/heads/master
| 2021-10-16T14:52:34.235047
| 2019-02-11T16:53:50
| 2019-02-11T16:53:50
| 104,894,232
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,530
|
r
|
cor_gene_counts.R
|
library(tidyverse)
library(ggpubr)
library(plyr)
hep <- read.csv("~/Dropbox/temp/gene_count_matrix.csv", h = T)
huh <- read.csv("~/Dropbox/temp/huh_gene_counts.csv", h = T)
cor_fnc <- function(x, offset){
cor_list <- list()
for (i in 1:(ncol(x)-1)){
j <- i
i <- i + offset
if (i %% 3 == 0){
liba <- paste("^Lib_", i-2, "$", sep="")
libb <- paste("^Lib_", i-1, "$", sep="")
libc <- paste("^Lib_", i, "$", sep="")
colLiba <- grep(liba, colnames(x))
colLibb <- grep(libb, colnames(x))
colLibc <- grep(libc, colnames(x))
comp1 <- paste(liba, libb, sep="-")
comp1 <- gsub("\\^", "", comp1)
comp1 <- gsub("\\$", "", comp1)
cor_ab <- cor(x[,colLiba], x[,colLibb], method = "spearman")
comp2 <- paste(liba, libc, sep="-")
comp2 <- gsub("\\^", "", comp2)
comp2 <- gsub("\\$", "", comp2)
cor_ac <- cor(x[,colLiba], x[,colLibc], method = "spearman")
comp3 <- paste(libb, libc, sep="-")
comp3 <- gsub("\\^", "", comp3)
comp3 <- gsub("\\$", "", comp3)
cor_bc <- cor(x[,colLibb], x[,colLibc], method = "spearman")
cor_list[[j-2]] <- c(comp1, cor_ab)
cor_list[[j-1]] <- c(comp2, cor_ac)
cor_list[[j]] <- c(comp3, cor_bc)
}
}
return(cor_list)
}
hepList <- cor_fnc(hep, 18)
hepDf <- ldply(hepList)
hepDfRound <- hepDf
hepDfRound$V2 <- signif(as.numeric(hepDfRound$V2), digits = 3)
huhList <- cor_fnc(huh, 0)
huhDf <- ldply(huhList)
huhDfRound <- huhDf
huhDfRound$V2 <- signif(as.numeric(huhDfRound$V2), digits = 3)
|
c78bd8250f5a5e3353e875ba6b5499ab84595e67
|
c109be0d330c4a0a06c055a2b1802ffc4b9c73d5
|
/Rstem/man/wordStem.Rd
|
392ee9efc7341ce3212a01bc9f2d5267301bd58a
|
[] |
no_license
|
bradleyboehmke/Air-Force-Little-Blue-Book
|
c228706934cf694a1af29ea916411fd9186cdb3b
|
3aa57b80d1add460d4c128f223f3c170bfc6f85f
|
refs/heads/master
| 2021-01-10T14:20:40.904660
| 2016-03-13T18:56:37
| 2016-03-13T18:56:37
| 48,626,590
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,838
|
rd
|
wordStem.Rd
|
\name{wordStem}
\alias{wordStem}
\title{Get the common root/stem of words}
\description{
This function computes the stems of each of the
given words in the vector.
This reduces a word to its base component,
making it easier to compare words
like win, winning, winner.
See \url{http://snowball.tartarus.org/} for
more information about the concept and algorithms
for stemming.
}
\usage{
wordStem(words, language = character(), warnTested = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{words}{a character vector of words whose stems are to be
computed.}
\item{language}{the name of a recognized language for the package.
This should either be a single string which is an element in the
vector returned by \code{\link{getStemLanguages}}, or
alternatively a character vector of length 3
giving the names of the routines for
creating and closing a Snowball \code{SN\_env} environment
and performing the stem (in that order).
See the example below.
}
\item{warnTested}{an option to control whether a warning is issued
about languages which have not been explicitly tested as part of the
unit testing of the code. For the most part, one can ignore these
warnings and so they are turned off. In the future, we might
consider controlling this with a global option, but for now
we suppress the warnings by default.
}
}
\details{
This uses Dr. Martin Porter's stemming algorithm
and the interface generated by Snowball
\url{http://snowball.tartarus.org/}.
}
\value{
A character vector with as many elements
as there are in the input vector
with the corresponding elements being the
stem of the word.
}
\references{
See \url{http://snowball.tartarus.org/}
}
\author{Duncan Temple Lang <duncan@wald.ucdavis.edu>}
\examples{
# Simple example
# "win" "win" "winner"
wordStem(c("win", "winning", 'winner'))
# test the supplied vocabulary.
testWords = readLines(system.file("words", "english", "voc.txt", package = "Rstem"))
validate = readLines(system.file("words", "english", "output.txt", package = "Rstem"))
\dontrun{
# Read the test words directly from the snowball site over the Web
testWords = readLines(url("http://snowball.tartarus.org/english/voc.txt"))
}
testOut = wordStem(testWords)
all(validate == testOut)
# Specify the language from one of the built-in languages.
testOut = wordStem(testWords, "english")
all(validate == testOut)
# To illustrate using the dynamic lookup of symbols that allows one
# to easily add new languages or create and close environment
# routines (for example, to manage pools if this were an efficiency
# issue!)
testOut = wordStem(testWords, c("testDynCreate", "testDynClose", "testDynStem"))
}
\keyword{IO}
\keyword{utilities}
|
2ab8f6fcf7ea1836b1d68281c45b46e53c015524
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.developer.tools/man/codeartifact_list_repositories.Rd
|
e39376d66724a1f6178a80495fa333214af27d51
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,175
|
rd
|
codeartifact_list_repositories.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codeartifact_operations.R
\name{codeartifact_list_repositories}
\alias{codeartifact_list_repositories}
\title{Returns a list of RepositorySummary objects}
\usage{
codeartifact_list_repositories(
repositoryPrefix = NULL,
maxResults = NULL,
nextToken = NULL
)
}
\arguments{
\item{repositoryPrefix}{A prefix used to filter returned repositories. Only repositories with
names that start with \code{repositoryPrefix} are returned.}
\item{maxResults}{The maximum number of results to return per page.}
\item{nextToken}{The token for the next set of results. Use the value returned in the
previous response in the next request to retrieve the next set of
results.}
}
\description{
Returns a list of \href{https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_RepositorySummary.html}{RepositorySummary} objects. Each \code{RepositorySummary} contains information about a repository in the specified Amazon Web Services account and that matches the input parameters.
See \url{https://www.paws-r-sdk.com/docs/codeartifact_list_repositories/} for full documentation.
}
\keyword{internal}
|
7f8e50394fe6e3285e612513337d6402eaf2076d
|
22057bf4f2eb001f739761e53a5578de578e6920
|
/scripts_on_file1/compare_tracer_premodeling.R
|
1b0d819eef077c764fa296a772a1b62d290816bb
|
[] |
no_license
|
mrubayet/archived_codes_for_sfa_modeling
|
3e26d9732f75d9ea5e87d4d4a01974230e0d61da
|
f300fe8984d1f1366f32af865e7d8a5b62accb0d
|
refs/heads/master
| 2020-07-01T08:16:17.425365
| 2019-08-02T21:47:18
| 2019-08-02T21:47:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,979
|
r
|
compare_tracer_premodeling.R
|
#This file is used for plotting flux averaged concentrations with multiple realizations
rm(list=ls())
#set the path to the files
main_path = '/files2/scratch/chenxy/'
#comp_paths = c('MeanField/IFRC2/120x120x30','IFRC2_Tracer_MeanField_BoundaryTracer/Injected')
#comp_legends = c('Dirichlet','Zero_gradient')
#comp_col = c('red','blue')
#comp_paths = c('IFRC2_MR_0_Prior/MeanField','IFRC2_MR_1_Prior/MeanField','IFRC2_MR_0_Prior_highIC/MeanField')
#comp_legends = c('IFRC2_MR_0','IFRC2_MR_1','High_Inital_U')
#comp_col = c('red','blue','darkgreen')
#comp_paths = c('Premodel_Oct2011_150gpm','Premodel_Oct2011_150gpm_start191hr')
#comp_paths = c('Premodel_Oct2011_10gpm','Premodel_Oct2011_25gpm','Premodel_Oct2011_50gpm','Premodel_Oct2011_100gpm','Premodel_Oct2011_150gpm')
comp_paths = c('Premodel_Oct2011_10gpm_start191hr','Premodel_Oct2011_25gpm_start191hr','Premodel_Oct2011_50gpm_start191hr','Premodel_Oct2011_100gpm_start191hr','Premodel_Oct2011_150gpm_start191hr')
comp_legends = c('10gpm@191hr','25gpm','50gpm','100gpm','150gpm')
comp_col = c('black','red','blue','darkgreen','mediumorchid')
#comp_legends = c('start@24hr','start@191hr')
#comp_col = c('red','blue')
#comp_paths = c('IFRC2_MR_0_Prior_highIC/MeanField','IFRC2_MR_0_Prior_highIC/MeanField_fine')
#comp_legends = c('High_Inital_U','High_Initial_U_finerVZ')
#comp_col = c('red','blue')
#comp_paths = c('IFRC2_Tracer_MeanField_BoundaryTracer/East','IFRC2_Tracer_MeanField_BoundaryTracer/West','IFRC2_Tracer_MeanField_BoundaryTracer/South','IFRC2_Tracer_MeanField_BoundaryTracer/North','IFRC2_Tracer_MeanField_BoundaryTracer/Injected')
#comp_legends = c('East','West','South','North','Initial')
#comp_col = c('red','blue','darkgreen','chocolate','plum')
testvariable = 'Tracer_'
BC = 'IFRC2'
npath = length(comp_paths)
fields = 1
nfields = length(fields) # number of random fields
#prefix and extension of files
prefix_file = 'OBS_FluxAve'
ext_file = '.dat'
DataSet = 'Mar2011TT'
plot_true = F #whether to plot true observed curves
#plotting specifications
linwd1 = 1.5
linwd2 = 1.5
pwidth = 26
pheight = 17
plotfile_name = 'Compare_InjRate_Starting191_'
plotfile = '_Oct2011.jpg'
x_lim = c(0,500)
NormTime_True = 1.0 #normalization factor for time of true data
#time_start = 255.5 # for Oct2009 test
#time_start = 191.25 # for Mar2011 test
#time_start = c(24,191.25) # for Mar2011 test
#time_start = rep(24,npath)
time_start = rep(191.25,npath)
MeanData_exist = 0 #whether results from mean field exist
if(length(grep('UO2',testvariable)) > 0)
{
true_col = 2
NormConc_True = 1.0
y_label = 'U(VI) [ug/L]'
y_convert = 238.0*1000000
}
if(length(grep('Tracer',testvariable)) > 0)
{
true_col = 3
y_label = 'Tracer C/C0'
# norm_true = 180.0 #Oct 2009 test
# y_convert = 1000.0/2.25 #Oct2009 test
NormConc_True = 210.0 # Mar2011 test
y_convert = 1000.0/5.9234 #Mar2011 test, injected tracer
# y_convert = 1000.0 #for boundary tracer
}
wells_plot = c('2-05','2-07','2-08','2-09','2-10','2-11','2-12','2-13','2-14','2-15','2-16','2-17','2-18','2-19','2-20','2-21',
'2-22','2-23','2-24','2-25','2-26','2-27','2-28','2-29','2-30','2-31','3-23','3-24','3-25','3-26','3-27','3-28',
'3-29','3-30','3-31','3-32','3-35','2-34','2-37')
nwell = length(wells_plot)
for(ifield in seq(1,nfields))
{
for (ipath in 1:npath)
{
path_prefix = paste(main_path,'/',comp_paths[ipath],'/',sep='')
input_file = paste(path_prefix, prefix_file,'R',ifield,ext_file,sep='')
a = readLines(input_file,n=1)
b = unlist(strsplit(a,','))
nvar = length(b)-1 #the first column is time
varnames = b[-1]
#find the columns needed
varcols = array(NA,nwell,1)
varcols_t1 = array(NA,nwell,1)
for (iw in 1:nwell)
varcols[iw] = intersect(grep(wells_plot[iw],varnames),grep(testvariable,varnames)) + 1 #the first column is time
#for tracer, high concentration was assigned to east boundary by mistake, subtracting Tracer1 concentration will correct it
if(length(grep('Tracer_',testvariable))>0)
{
for (iw in 1:nwell)
varcols_t1[iw] = intersect(grep(wells_plot[iw],varnames),grep('Tracer1',varnames)) + 1 #the first column is time
}
#read from files
input_data = read.table(input_file,skip=1) # the first line is skipped
data0 = matrix(0,nrow(input_data),(nwell+1))
data0[,1] = input_data[,1]- time_start[ipath]
for (ivar in 1:nwell)
{
if(length(grep('Tracer_',testvariable))==0)
data0[,(ivar+1)] = input_data[,varcols[ivar]]
if(length(grep('Tracer_',testvariable))>0)
data0[,(ivar+1)] = input_data[,varcols[ivar]]-input_data[,varcols_t1[ivar]]
}
data0[,2:(nwell+1)] = data0[,2:(nwell+1)] * y_convert
#store data from different cases in different names
text1 = paste('data',ipath,'=data0',sep='')
eval(parse(text = text1))
}
jpeg(paste(main_path,'/',plotfile_name,testvariable,'_R',ifield, plotfile, sep = ''), width = pwidth, height = pheight,units="in",res=150,quality=100)
par(mfrow=c(5,8))
for (iw in 1:nwell)
{
well_name = wells_plot[iw]
#find the maximum y among th cases
ymin = 5000
ymax = 0
for (ipath in 1:npath)
{
text1 = paste('data0 = data',ipath,sep='')
eval(parse(text = text1))
ymax = max(max(data0[,(iw+1)],na.rm=T),ymax)
ymin = min(min(data0[,(iw+1)],na.rm=T),ymin)
}
if(plot_true)
{
true_t = matrix(NA,1,1)
true_BTC = matrix(NA,1,1)
#read in true data if it exists
true_file = paste('TrueData_',DataSet,'/Well_',well_name,'_',DataSet,'.txt',sep='')
true_exist = file.exists(true_file)
if(true_exist)
{
truedata = read.table(true_file,skip=1)
true_t = truedata[,1]/NormTime_True #time units converted to hours
true_BTC = truedata[,true_col]
if(length(grep('Tracer',testvariable)))
true_BTC = true_BTC - mean(truedata[which(truedata[,1]<0),3])
true_BTC = true_BTC/NormConc_True
ymax = max(ymax,max(true_BTC,na.rm=T))
ymin = min(ymin,min(true_BTC,na.rm=T))
}
}
yrange = ymax - ymin
ymax = ymax + 0.05*yrange
ymin = ymin - 0.05*yrange
ymax = 1.1
plot(data1[,1],data1[,(iw+1)], main=paste(BC,': ',wells_plot[iw],sep=''), xlab = b[1], ylab = y_label,xlim=x_lim, ylim = c(ymin, ymax), type = "l",lwd = linwd1, col = comp_col[1],lty = 'solid')
for(ipath in 2:npath)
{
text1 = paste('data0 = data',ipath,sep='')
eval(parse(text = text1))
points(data0[,1],data0[,iw+1],col=comp_col[ipath],type = 'l',lty='solid',lwd=linwd1)
}
if(plot_true)
points(true_t,true_BTC,col='black',type = 'b',lty='solid',lwd=linwd1,pch=1)
#points(data0[,1],data1[,(iw+1)]+data2[,(iw+1)]+data3[,(iw+1)]+data4[,(iw+1)]+data5[,(iw+1)],type='l',lty='dashed')
}
plot(c(1,2),c(2,2),ylim=c(0,1),xlab="",ylab="",bty="n",axes = F)
legend('topleft',comp_legends,lty=rep('solid',times = npath),col=comp_col,lwd=rep(linwd1,times=npath),bty='n')
dev.off()
}
|
2f11b137420cecf7d9a3f63222540567149ae968
|
4dcbd7bde2c131cb0d3c96990eb20df6e9ea22ed
|
/man/filter_noise.Rd
|
52068006951a390eb92adf5363997beb89bd1af2
|
[] |
no_license
|
LindaLuck/VoxR
|
e3428d0967a2d5b5b3470cdfe85df92d75e7dfb9
|
4ad74f91aa421881120fc387137861d57c92f790
|
refs/heads/master
| 2022-12-24T02:44:23.030784
| 2020-09-30T07:50:08
| 2020-09-30T07:50:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,723
|
rd
|
filter_noise.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter_noise.R
\name{filter_noise}
\alias{filter_noise}
\title{Statistical filtering of a point cloud.}
\usage{
filter_noise(data, k, sigma, store_noise, message)
}
\arguments{
\item{data}{a data.frame or data.table containing the x, y, z, ... coordinates of a point cloud.}
\item{k}{numeric. The number of nearest neighbours to use. Default = 5.}
\item{sigma}{numeric. The multiplier of standard deviation to consider a point as noise. Default = 1.5.}
\item{store_noise}{logical. Should the noisy points be retained ? Default = FALSE.}
\item{message}{logical. If FALSE, messages are disabled. Default = TRUE.}
}
\value{
If \code{store_noise = TRUE} the input data is returned with an additional field ("Noise")
where points that are classified as noise points are labaled with 2 and the points not classified as noise are labeled as 1.
If \code{store_noise = FALSE} only the points that were not classified as noise are returned.
}
\description{
Implements the Statistical Outliers Removal (SOR) filter available in
\href{https://www.cloudcompare.org/doc/wiki/index.php?title=SOR_filter}{CloudCompare}.
Computes the distance of each point to its \code{k} nearest neighbours and considers
a point as noise if it is further than the average distance (for the entire point cloud)
plus \code{sigma} times the standard deviation away from other points.
}
\examples{
#- import tls data
tls=data.table::fread(system.file("extdata", "Tree_t0.asc", package="VoxR"))
#- run noise filter
clean=VoxR::filter_noise(tls,store_noise = TRUE)
#- plot the result (noise in red)
rgl::open3d()
rgl::plot3d(clean,col=clean$Noise,add=TRUE)
}
|
82d4f6e593974326668ce63c3d01890d1ff918ee
|
72a7ccc09560084e0a7465ff3a7c10c61e06f889
|
/man/plotDiffPathways.Rd
|
a0f7db9fd46a284d05b0312d1d7f62f2f06ec430
|
[] |
no_license
|
liuqivandy/scRNABatchQC
|
e2cbe87ce3be2b87139b0c8f1e5c9f806e99e02a
|
32689ef428eeff0a6e97e6089cb9594d1f7c7b3d
|
refs/heads/master
| 2021-06-11T20:26:40.934055
| 2021-03-20T17:39:39
| 2021-03-20T17:39:39
| 119,717,047
| 11
| 6
| null | 2020-09-08T11:54:23
| 2018-01-31T17:04:37
|
HTML
|
UTF-8
|
R
| false
| true
| 1,264
|
rd
|
plotDiffPathways.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotFunctions.R
\name{plotDiffPathways}
\alias{plotDiffPathways}
\title{plot pathways enriched in differentially expressed genes}
\usage{
plotDiffPathways(
scesMerge,
margins = c(5, 5),
keysize = 1,
col = colorpanel(75, low = "white", high = "red"),
...
)
}
\arguments{
\item{scesMerge}{a SingleCellExperiment object; this object contains the combined datasets, pairwise comparison results and reduced dimensions using PCA and tSNE; (results from \code{\link{Combine_scRNAseq}})}
\item{margins}{margins for heatmap.2}
\item{keysize}{integer for heatmap.2}
\item{col}{color for heatmap.2}
\item{...}{parameters passing to heatmap.2}
}
\value{
a matrix containing the -log10 FDR of enriched pathways
}
\description{
plot the differentially expressed genes in any pairwise comparison
}
\examples{
library(scRNABatchQC)
sces<-Process_scRNAseq(inputfile=c("https://github.com/liuqivandy/scRNABatchQC/raw/master/bioplar1.csv.gz",
"https://github.com/liuqivandy/scRNABatchQC/raw/master/bioplar5.csv.gz"))
scesMerge<-Combine_scRNAseq(sces)
plotDiffPathways(scesMerge)
}
\seealso{
\code{\link{Process_scRNAseq}}, \code{\link{Combine_scRNAseq}}
}
|
8a909877b4b10af803b94abae1bf3278d4fa39ea
|
e189d2945876e7b372d3081f4c3b4195cf443982
|
/man/resize_max.Rd
|
62cf27daf2f3c71108614f083224a0115b4abd9a
|
[
"Apache-2.0"
] |
permissive
|
Cdk29/fastai
|
1f7a50662ed6204846975395927fce750ff65198
|
974677ad9d63fd4fa642a62583a5ae8b1610947b
|
refs/heads/master
| 2023-04-14T09:00:08.682659
| 2021-04-30T12:18:58
| 2021-04-30T12:18:58
| 324,944,638
| 0
| 1
|
Apache-2.0
| 2021-04-21T08:59:47
| 2020-12-28T07:38:23
| null |
UTF-8
|
R
| false
| true
| 450
|
rd
|
resize_max.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vision_core.R
\name{resize_max}
\alias{resize_max}
\title{Resize_max}
\usage{
resize_max(img, resample = 0, max_px = NULL, max_h = NULL, max_w = NULL)
}
\arguments{
\item{img}{image}
\item{resample}{resample value}
\item{max_px}{max px}
\item{max_h}{max height}
\item{max_w}{max width}
}
\value{
None
}
\description{
`resize` `x` to `max_px`, or `max_h`, or `max_w`
}
|
762e0b44f3b227793358d8540c4d2e3fc39204a4
|
8c2a46a573a0841d60392de018ab642c86682c35
|
/inst/tests/test-FLStock_cpp.R
|
0965c819c922acc77a739f10139268b556e4c1d0
|
[] |
no_license
|
drfinlayscott/FLRcppAdolc
|
aae91bc53f2998b83ee1e0ebf783d88fc27f2e78
|
9394549750aad754f15a521d57252a9d8c47185f
|
refs/heads/master
| 2021-01-18T18:16:34.118974
| 2014-08-25T11:43:38
| 2014-08-25T11:43:38
| 9,937,693
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,701
|
r
|
test-FLStock_cpp.R
|
context("CPP implementation of FLStock")
test_that("FLStock SEXP constructor",{
data(ple4)
sn <- test_FLStock_sexp_constructor(ple4)
expect_that(sn, is_identical_to(ple4@stock.n))
})
test_that("FLStock wrap and as",{
data(ple4)
fls <- test_FLStock_wrap(ple4)
expect_that(fls, is_identical_to(ple4))
flq <- test_FLStock_as(ple4)
expect_that(flq, is_identical_to(ple4@stock.n))
fls <- test_FLStock_as_wrap(ple4)
})
test_that("FLStock copy constructor works properly",{
data(ple4)
indices <- round(runif(6,min=1, max = dim(ple4@stock.n)))
value_in <- rnorm(1)
# Makes a copy of ple4@stock.n, changes a value, returns original and new FLStock
# Checks that the copy constuctor makes a 'deep' copy else changing a value in the copy FLS will also change a value in the original FLS
flss <- test_FLStock_copy_constructor(ple4, indices[1], indices[2], indices[3], indices[4], indices[5], indices[6], value_in)
expect_that(ple4, is_identical_to(flss[["fls1"]]))
expect_that(c(flss[["fls2"]]@stock.n[indices[1], indices[2], indices[3], indices[4], indices[5], indices[6]]), is_identical_to(value_in))
})
test_that("FLStock assignment operator",{
data(ple4)
indices <- round(runif(6,min=1, max = dim(ple4@stock.n)))
value_in <- rnorm(1)
# Makes a copy of flq_in, changes a value of flq_in.
flss <- test_FLStock_assignment_operator(ple4, indices[1], indices[2], indices[3], indices[4], indices[5], indices[6], value_in)
expect_that(ple4, is_identical_to(flss[["fls1"]]))
expect_that(c(flss[["fls2"]]@stock.n[indices[1], indices[2], indices[3], indices[4], indices[5], indices[6]]), is_identical_to(value_in))
})
|
46281f013ea7abb23d60d4ddab3d2e4bad984e7e
|
74bc48ba64859a63855d204f1efd31eca47a223f
|
/DSM/1003.Prav_CV_CrossValidation.R
|
1911177c7afb4377d1a0a32abe7d673f9dfc6dff
|
[] |
no_license
|
PraveenAdepu/kaggle_competitions
|
4c53d71af12a615d5ee5f34e5857cbd0fac7bc3c
|
ed0111bcecbe5be4529a2a5be2ce4c6912729770
|
refs/heads/master
| 2020-09-02T15:29:51.885013
| 2020-04-09T01:50:55
| 2020-04-09T01:50:55
| 219,248,958
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,766
|
r
|
1003.Prav_CV_CrossValidation.R
|
###############################################################################################################################
# Sys.time()
# save.image(file = "DSM2017_03.RData" , safe = TRUE)
Sys.time()
load("DSM2017_03.RData")
Sys.time()
###############################################################################################################################
all_data_full_build <- subset(all_data_full, year(DispenseDate) <= 2014)
all_data_full_valid <- subset(all_data_full, year(DispenseDate) == 2015)
ValidationSet <- read_csv("./input/Prav_ValidationSet_2015.csv")
Build_DiabeticKnown <- subset(all_data_full_build[,c("Patient_ID","ChronicIllness")], ChronicIllness == "Diabetes")
Build_DiabeticKnown <- unique(Build_DiabeticKnown)
ValidationSetLeak <- inner_join(Build_DiabeticKnown,ValidationSet, by = "Patient_ID")
# head(ValidationSetLeak)
ValidationSetLeak$Diabetes_build <- 1
ValidationSetLeak$Diabetes <- NULL
validation_Check <- left_join(ValidationSet, ValidationSetLeak,by="Patient_ID")
validation_Check$Diabetes_build[is.na(validation_Check$Diabetes_build)] <- 0
validation_Check$ChronicIllness <- NULL
validation_Check$Diabetes_build <- as.integer(validation_Check$Diabetes_build)
# head(validation_Check,25)
validation_Check <- subset(validation_Check, Patient_ID >= 279201)
cat("CV Fold- 2015 ", " ", metric, ": ", score(validation_Check$Diabetes_build, validation_Check$Diabetes, metric), "\n", sep = "")
# CV Fold- 2015 auc: 0.9231684
####################################################################################################################################
unique(Build_DiabeticKnown$ChronicIllness)
Build_DiabeticKnown$ChronicIllness <- NULL
Build_Diabetes <- inner_join(all_data_full_build,Build_DiabeticKnown,by="Patient_ID")
unique(Build_Diabetes$ChronicIllness)
Build_NoDiabetic <- subset(all_data_full_build[,c("Patient_ID","ChronicIllness")], ChronicIllness != "Diabetes")
Build_NoDiabetic <- unique(Build_NoDiabetic)
unique(Build_NoDiabetic$ChronicIllness)
length(unique(all_data_full$ChronicIllness))
ggplot(all_data_full_valid, aes(year_of_birth, DispenseDate, color = ChronicIllness)) + geom_point()
####################################################################################################################################
set.seed(20)
features <- c("year_of_birth")
Patient_Chronic_Clusters <- kmeans(all_data_full_build[, features], 12, nstart = 20)
Patient_Chronic_Clusters
table(Patient_Chronic_Clusters$cluster, all_data_full_build$ChronicIllness)
Patient_Chronic_Clusters$cluster <- as.factor(Patient_Chronic_Clusters$cluster)
ggplot(all_data_full_build, aes(year_of_birth, DispenseDate, color = Patient_Chronic_Clusters$cluster)) + geom_point()
names(all_data_full_build)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.