blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
320004f8633eb9ae713b40f2818dc9b147dfa00c
|
63b6a027aaab886b940025d821e994e69fa6ddf7
|
/Estimation.R
|
20935c6119effb825bdbf045816f909185a0ae54
|
[] |
no_license
|
lurui0421/CauseSel
|
6af4f17ca81e92aed047b867b0e045374cfe8196
|
29fba08b8e8e71ac578ef57f8571a517b2c15d86
|
refs/heads/master
| 2022-12-10T01:43:33.229305
| 2020-08-10T21:59:04
| 2020-08-10T21:59:04
| 227,627,761
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,961
|
r
|
Estimation.R
|
######################################################################################
## Treatment effect estimation ##
#######################################################################################
##: This is the treatment effect estimatio based on various algirthems
##: These are embedded covariate selection method for causal inference
###: Author : Rui Lu
### 1: Propensity score matching
##Pre_request functions and packages for esitmation of propensity score
install.packages("Matching")
library(Matching)
logit<-function(PS){
logitPS<- log(PS /(1 - PS))
return(logitPS)
}
est_ps_match<-function(df,M=1,cov_sel,preprocessing=TRUE)
{
## Data is particular data set used for estimation
## cov_sel is the selected covariates
## M is the 1:1 or 1:2 matching
## Step 1: Estimate the propensity score using GBM
if (preprocessing==TRUE)
{df<-df[,c(cov_sel,"Treat","Y")]}
measure.var<-"Treat"
sel.var<-names(df[,grep("X",names(df))])
formula<-as.formula(paste(measure.var, paste(sel.var, collapse=" + "), sep=" ~ "))
twang.fit<- twang::ps(formula = formula,data=df,
n.trees=1000, interaction.depth=2,
shrinkage=0.01,stop.method="es.mean",
estimand = "ATE")
p.score<-twang.fit$ps
names(p.score)<-NULL
trt<-df$Treat
### Step 2: Propensity score matching
psmatch_ATT<-Match(Tr=trt,M=M,estimand ="ATT",X=sapply(p.score,logit),replace=FALSE,caliper=.2)
## Estimate ATT
matched_ATT<-df[unlist(psmatch_ATT[c("index.treated","index.control")]), ]
fit_ATT<-lm(Y~Treat,data=matched_ATT)
result_ATT<-summary(fit_ATT)
ATT<-result_ATT$coefficients[[2]]
psmatch_ATC<-Match(Tr=trt,M=M,estimand ="ATC",X=sapply(p.score,logit),replace=FALSE,caliper=.2)
## Estimate the ATC
matched_ATC<-df[unlist(psmatch_ATC[c("index.treated","index.control")]), ]
fit_ATC<-lm(Y~Treat,data=matched_ATC)
result_ATC<-summary(fit_ATC)
ATC<-result_ATC$coefficients[[2]]
ATE<-mean(ATC,ATT)
return(ATE)}
data_lin.100<-readRDS(file=file.choose())
cov_sel.dlasso.100<-readRDS(file=file.choose())
ps_100.lin.dlasso<-list()
for (i in 1:100)
{
ps_100.lin.dlasso[[i]]<-est_ps_match(data_lin.100[[i]][[1]],cov_sel=cov_sel.dlasso.100[[i]])
}
### 2: Genetic Matching
##Pre_request functions and packages for esitmation of propensity score
install.packages('rgenoud')
library(rgenoud)
install.packages("Matching")
library(Matching)
est_gen_match<-function(df,M=1,cov_sel,pre_processing=TRUE)
{ ### df represent the data set
## M is the number of matched samples
## cov_sel is the name of covariate selected
## pre_processing means wheather covariate selection is conducted or not
if (pre_processing==TRUE)
{df<-df[,c(cov_sel,"Treat","Y")]}
### Step 1: Genatic Matching to get weights
X<-df[,grep("X",names(df))]
BalanceMat <-X
Treat<-df$Treat
Y<-df$Y
### Estimate ATC
genout_ATC <- GenMatch(Tr=Treat,X=X, BalanceMatrix=BalanceMat, estimand="ATC", M=M,
pop.size=16, max.generations=10, wait.generations=1)
mout_ATC <- Match(Y=Y, Tr=Treat, X=X, estimand="ATC", Weight.matrix=genout_ATC)
ATC<-as.vector(mout_ATC$est)
### Estimate ATT
genout_ATT <- GenMatch(Tr=Treat,X=X, BalanceMatrix=BalanceMat, estimand="ATT", M=M,
pop.size=16, max.generations=10, wait.generations=1)
mout_ATT <- Match(Y=Y, Tr=Treat, X=X, estimand="ATT", Weight.matrix=genout_ATT)
ATT<-as.vector(mout_ATT$est)
### Step 3: Estimate ATE
ATE<-mean(ATT,ATC)
return(ATE)}
## 3: BART : with propensity score justification
## Reference: Hill,2012
install.packages("devtools")
devtools::install_github("vdorie/bartCause")
library(bartCause)
XY=c("X1","X2","X3","X4","X5","X6","X7","X8","X9","X10")
est_bart<-function(df,cov_sel=c("X1","X2","X3","X4","X5","X6","X7","X8","X9","X10"),
method.rsp='bart',
method.trt='none',
pre_processing=TRUE,
estimand='ate')
{### Vinilla BART function
if (pre_processing==TRUE)
{df<-df[,c(cov_sel,"Treat","Y")]}
cov_index<-grep("X",names(df))
X<-as.matrix(df[,cov_index])
Y<-df[,"Y"]
T<-df$Treat
fit<-bartc(Y,T,X,method.rsp=method.rsp,method.trt= method.trt,estimand='ate')
result<-summary(fit)
return(result$estimates)}
### 4: C-TMLE_scalable,Lasso-CTMLE
### : Reference : Ju et.al, 2018
install.packages('ctmle')
library(ctmle)
est_ctmle<-function(df,method="ctmle",cov_sel=c("X3","X4","X5","X6","X7","X8","X9","X10"),pre_processing=TRUE)
{ ### Method =lasso means using CTMLE with lasso for propensity score estimation.other wise scalable CTMLE
## Pre_process means if pre_rpocessing of covarite estimation is included
## cov_set is the selected covariate set
if(pre_processing==TRUE)
{
cov_set<-c(cov_sel,"Treat","Y")
df<-df[,cov_set]}
N<-dim(df)[1]
## Q matrix with initialization
Q <- cbind(rep(mean(df$Y[df$Treat == 0]), N),
rep(mean(df$Y[df$Treat == 1]), N))
cov_index<-grep("X",names(df))
## W matrix
W<-as.matrix(df[,cov_index])
Treat<-df$Treat
Y<-df$Y
if (method=="lasso")
{glmnet_fit<-cv.glmnet(Treat,x=W,family='binomial',nlambda=20)
lambdas <- glmnet_fit$lambda[(which(glmnet_fit$lambda==glmnet_fit$lambda.min)):length(glmnet_fit$lambda)]
ctmle_fit<- ctmleGlmnet(Y = Y,
A = Treat,
W = W,
Q = Q,
lambdas=lambdas,
ctmletype=1,
family="gaussian",
gbound=0.025,
V=5)}
else {ctmle_fit<- ctmleDiscrete(Y = Y,
A = Treat,
W = data.frame(W),
Q = Q,
preOrder = TRUE,
order = rev(1:length(cov_index)),
detailed = TRUE)}
result<-summary(ctmle_fit)
return(result[[1]])
}
### 5: Super learning
install.packages("gam")
library(gam)
install.packages("tmle")
library(tmle)
install.packages("ranger")
library(ranger)
install.packages("dbarts")
library(dbarts)
est_super_learn<-function(df,cov_sel=c("X3","X4","X5","X6","X7","X8","X9","X10"),pre_processing=TRUE)
{
if(pre_processing==TRUE)
{cov_set<-c(cov_sel,"Treat","Y")
df<-df[,cov_set]}
A<-df$Treat
Y<-df$Y
W<-df[,grep("X",names(df))]
SL.TMLER <- tmle(Y=Y, A=A, W=W,
family="gaussian",
Q.SL.library = c("SL.glm",
"tmle.SL.dbarts2",
"SL.glmnet"),
g.SL.library = c("SL.glm",
"tmle.SL.dbarts.k.5",
"SL.gam",
"SL.ranger"))
ATE<-SL.TMLER$estimates$ATE$psi
return(ATE)}
|
6f93399f31ace34a084ac277a2a63a7913aa1194
|
6eb6be10dfb00975aa041b19b47ef2511808096d
|
/ExData_Plotting1-master/plot3.R
|
a9f028a54f28a5a4735ef4442b9df57e9f291806
|
[] |
no_license
|
yashika-sindhu/datasciencecoursera
|
5e72af030f83d7ba90433da32af1bc2940b50b54
|
971ceb4526935374250fa646d8722b7e94bb0ed6
|
refs/heads/master
| 2022-01-22T04:59:26.645366
| 2019-07-22T09:35:49
| 2019-07-22T09:35:49
| 116,703,715
| 0
| 1
| null | 2018-01-09T05:04:23
| 2018-01-08T16:57:01
| null |
UTF-8
|
R
| false
| false
| 1,321
|
r
|
plot3.R
|
## Download the dataset
download.file(
"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile="Electric_Power_dataset.zip"
)
## Unzip the data
unzip("Electric_Power_dataset.zip")
## Read the relevant data into R
install.packages("sqldf")
library(sqldf)
my_data<-read.csv.sql(
"household_power_consumption.txt",
sql="select * from file where Date='1/2/2007' or Date='2/2/2007'",
sep=";"
)
## Convert Date and Time column to Date/Time class POSIXct
my_data$Date<-as.POSIXct(paste(as.Date(my_data$Date,"%d/%m/%Y"),my_data$Time))
my_data$Time<-NULL
## Convert the Datetime to numeric value to plot it on the graph
my_data$Date<-as.numeric(my_data$Date)
## PNG file is opened
png(filename="plot3.png",width=480,height=480)
## Plot3 is created for Days vs Metering
plot(
my_data$Date,
my_data$Sub_metering_1,
type="n",
ylab="Energy sub metering",
xlab="",
xaxt="n"
)
axis(
side=1,
at=c(1170268200,1170354660,1170441060),
labels=c("Thu","Fri","Sat")
)
lines(my_data$Date,my_data$Sub_metering_1,col="black")
lines(my_data$Date,my_data$Sub_metering_2,col="red")
lines(my_data$Date,my_data$Sub_metering_3,col="blue")
legend(
"topright",
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col=c("black","red","blue"),
lty=1
)
dev.off()
|
1ef4e41c84bc21b658ae254b93148f3e7350a8b3
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/m2r/R/m2.R
|
116136ff31ec3cd86e60e0537b2480b807cc43ca
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,084
|
r
|
m2.R
|
#' Call and reset a Macaulay2 process
#'
#' Call and reset a Macaulay2 process
#'
#' @param port port for Macaulay2 socket
#' @param timeout number of seconds before aborting
#' @param attempts numer of times to try to make connection
#' @param cloud use a cloud?
#' @param hostname the remote host to connect to; defaults to the Amazon EC2
#' instance
#' @param code Macaulay2 code
#' @param x formal argument for print method
#' @param ... ...
#' @return m2 return value
#' @name m2_call
#' @examples
#'
#' \dontrun{ requires Macaulay2
#'
#' m2("1 + 1")
#' m2.("1 + 1")
#'
#' m2("factor 32004")
#'
#' # run a chunk of m2 code, only pulling the end value back into R
#' m2("
#' R = QQ[a..d]
#' I = ideal(a^3-b^2*c, b*c^2-c*d^2, c^3)
#' G = gens gb I
#' ")
#'
#' # illustrate the persistent connection
#' m2("a = 1 + 1")
#' m2("a")
#' reset_m2()
#' m2("a")
#'
#'
#' # forcing a cloud start
#' if(has_m2_connection()) stop_m2()
#' start_m2(cloud = TRUE)
#' m2("1 + 1")
#' stop_m2()
#'
#'
#'
#' m2.("peek(QQ[x,y,z])")
#' m2("peek(QQ[x,y,z])")
#'
#' # m2 returns in its ext_str position the result of running
#' # toExternalString on the return value of the chunk of code
#' # you run. in principle, toExternalString provides the code
#' # needed to recreate the m2 object of interest. however,
#' # does not work for all objects representable in the m2 language.
#' # in particular, mutable objects are not supported.
#' # this is what happens when you look at those:
#' m2.("new MutableList from {1,2,3}")
#' m2("new MutableList from {1,2,3}")
#'
#' }
#' @export
#' @rdname m2_call
m2r_version_number <- function() {
"1.0.0"
}
#' @export
#' @rdname m2_call
m2r_cloud_url <- function() {
"ec2-52-10-66-241.us-west-2.compute.amazonaws.com"
}
#' @export
#' @rdname m2_call
has_m2_connection <- function() {
if (is.null(get_m2_connection())) {
FALSE
} else {
TRUE
}
}
#' @export
#' @rdname m2_call
start_m2 <- function(
port = 27436L, timeout = 10, attempts = 10,
cloud = FALSE, hostname = m2r_cloud_url()
) {
# don't increment port if supplied
if (!missing(port)) attempts <- 1
# if already running M2, break
if (has_m2_connection()) return(invisible(0L))
if(!is.null(get_m2_path()) && !cloud && missing(hostname)) { # m2 found locally
if (do_start_m2_local(port = port, timeout = timeout, attempts = attempts) != 0L)
stop("m2r unable to connect to local instance")
} else { # default to cloud
cloud_out <- do_start_m2_cloud(hostname)
if(cloud_out == 1L) stop("m2r unable to connect to the cloud instance, are you online?")
}
}
do_start_m2_cloud <- function(hostname = m2r_cloud_url()) {
# if already running M2, break
if (has_m2_connection()) return(invisible(0L))
# launch M2 on cloud
message("Connecting to M2 in the cloud... ", appendLF = FALSE)
# request port
port <- request_port(hostname = hostname, port = 27435L)
# connect to local server
out <- connect_to_m2_server(hostname = hostname, port = port)
message("done.")
# return
invisible(out)
}
do_start_m2_local <- function(port = 27436L, timeout = 10, attempts = 10) {
# if already running M2, break
if (has_m2_connection()) return(invisible(0L))
# find the first open port
openPortFound <- FALSE
for (i in seq.int(0, attempts-1)) {
out <- system(paste0("netstat -an | grep '[\\.:]", port, "' | wc -l"), intern = TRUE)
if (as.integer(out) == 0) break()
# openPortFound <- TRUE
# tempservercon <- NULL
# tryCatch(
# tempservercon <- suppressWarnings(
# socketConnection(
# port = port, blocking = FALSE,
# server = TRUE, open = "r+", timeout = 1
# )
# ),
# error = function(e) { openPortFound <- FALSE }
# )
# if (!is.null(tempservercon)) close(tempservercon)
# if (openPortFound) break()
if (i == attempts - 1) {
message(sprintf("%s attempts made at finding an open port. Aborting start_m2", attempts))
return(invisible(1L))
} else {
# message(sprintf("Unable to connect to M2 on port %s. Attempting to connect on port %s", port, port + 1))
port <- port + 1
}
}
# launch M2 on local server
message("Starting M2... ", appendLF = FALSE)
if(is.mac() || is.unix()) {
system2(
file.path2(get_m2_path(), "M2"), args = c("--script", system.file("server", "m2rserverscript.m2", package = "m2r"), toString(port)),
stdout = NULL, stderr = NULL, stdin = "",
wait = FALSE
)
} else if(is.win()) {
stop("Running local instances of M2 is not yet supported.")
}
# connect to local server
out <- connect_to_m2_server(port = port, timeout = timeout)
message("done.")
# post process id to m2r global options
if (out == 0) set_m2r_option(m2_procid = strtoi(m2("processID()")))
# return
out
}
request_port <- function(
hostname = m2r_cloud_url(),
port = 27435L, timeout = 10
) {
# initialize client socket
con <- NULL
for (i in 0:(20*timeout)) {
tryCatch(
con <- suppressWarnings(
socketConnection(
host = hostname, port = port,
blocking = FALSE, server = FALSE,
open = "r+", timeout = 60*60*24*7
)
),
error = function(e) { }
)
if (!is.null(con)) { break } else { Sys.sleep(0.05) }
}
if (is.null(con)) stop("m2r unable to connect to the cloud instance, are you online?")
repeat {
# read output info
port_number <- readLines(con, 1)
if (length(port_number) > 0) break
i <- i + 1
if (timeout > 0 && i >= timeout * 2000) {
break
} else {
Sys.sleep(0.0005)
}
}
close(con)
if (length(port_number) == 0 || port_number == "0") {
stop(sprintf("Macaulay2 cloud is full; please try again later."))
}
return(strtoi(port_number))
}
connect_to_m2_server <- function(hostname = "localhost", port = 27436L, timeout = 10) {
# initialize client socket
con <- NULL
for (i in 0:(20*timeout)) {
tryCatch(
con <- suppressWarnings(
socketConnection(
host = hostname, port = port,
blocking = FALSE, server = FALSE,
open = "r+", timeout = 60*60*24*7
)
),
error = function(e) { }
)
if (!is.null(con)) {
break
} else {
Sys.sleep(0.05)
}
}
if (is.null(con)) return(invisible(1L))
repeat {
# read output info
server_version <- readLines(con, 1)
if (length(server_version) > 0) break
i <- i + 1
if (timeout > 0 && i >= 2000*timeout) {
break
} else {
Sys.sleep(0.0005)
}
}
if (server_version != m2r_version_number()) {
close(con)
stop(sprintf("Internal error: server version is %s and client version is %s.",
server_version, m2r_version_number()))
}
# set options
set_m2r_option(
m2_con = con,
m2_port = port,
m2_timeout = timeout
)
set_m2r_option(m2_procid = -1L)
invisible(0L)
}
#' @export
#' @rdname m2_call
stop_m2 <- function() {
if (has_m2_connection()) { # for detaching when m2 never run
# send kill code
writeLines("", get_m2_connection())
close(get_m2_connection())
# not elegant, but a necessary safety measure
Sys.sleep(0.01)
if (get_m2_procid() >= 0) tools::pskill(get_m2_procid())
set_m2r_option(m2_con = NULL, m2_procid = NULL)
}
}
#' @export
#' @rdname m2_call
reset_m2 <- function(
port = 27436L, timeout = 10, attempts = 10,
hostname = "ec2-52-10-66-241.us-west-2.compute.amazonaws.com"
) {
stop_m2()
start_m2(port, timeout, attempts, hostname)
}
#' @export
#' @rdname m2_call
m2 <- function(code, timeout = -1) {
m2_meta(do.call(m2., as.list(match.call())[-1]), "ext_str")
}
#' @export
#' @rdname m2_call
m2. <- function(code, timeout = -1) {
# ensure m2 is running
start_m2()
# preempt m2 kill code
if (code == "") return("")
# write to connection
writeLines(code, get_m2_connection())
i <- 0
outinfo <- NULL
repeat {
# read output info
outinfo <- readLines(get_m2_connection(), 1)
if (length(outinfo) > 0) break
i <- i + 1
if (timeout > 0 && i >= timeout * 2000) {
break
} else {
Sys.sleep(0.0005)
}
}
if (length(outinfo) > 0) {
# read output from connection and return
info <- strsplit(outinfo, " ", fixed = TRUE)[[1]]
retcode <- strtoi(info[1])
numlines <- strtoi(info[2])
m2_name <- info[3]
m2_class <- info[4]
m2_class_class <- info[5]
} else {
# cancel command if needed
tools::pskill(get_m2_procid(), tools::SIGINT)
Sys.sleep(0.01)
retcode <- -1L
numlines <- -1L
}
output <- paste(readLines(get_m2_connection(), numlines), collapse = "\n")
if (retcode == -1L) {
# timeout occurred, kill M2 instance and stop
stop_m2()
# start_m2(getOption("m2_port"), getOption("m2_timeout"))
stop("Command timed out, M2 connection lost")
} else if (retcode == 1L) {
# user's code string errored, alert them
stop(output, call. = FALSE)
} else if (retcode == 2L) {
# toExternalString failed, make ext_str NULL
output <- NULL
}
# assemble pointer object and return
m2_structure(
m2_name = m2_name,
m2_class = "m2_pointer",
m2_meta = list(
ext_str = output,
m2_class = m2_class,
m2_class_class = m2_class_class
)
)
}
#' @export
#' @rdname m2_call
print.m2_pointer <- function (x, ...) {
cat("M2 Pointer Object\n")
if(is.null(m2_meta(x, "ext_str"))) m2_meta(x, "ext_str") <- ""
w <- min(c(options()$width, 80), na.rm = TRUE) - 19
if(nchar(m2_meta(x, "ext_str")) > w) {
ext_str <- str_c(str_sub(m2_meta(x, "ext_str"), 1, w-4), "...")
} else {
ext_str <- m2_meta(x, "ext_str")
}
cat(sprintf(" ExternalString : %s\n", ext_str))
# cat(sprintf(" R Name : %s\n", deparse(substitute(x))))
cat(sprintf(" M2 Name : %s\n", m2_name(x)))
cat(sprintf(" M2 Class : %s (%s)\n", m2_meta(x, "m2_class"), m2_meta(x, "m2_class_class")))
invisible(x)
}
|
cbaa348202ec817b6be595e341250f836d5d66e3
|
2a066a86ddbc4546381397171f7f7483251f9990
|
/R/reg.diff1M.R
|
7b00b6ffd33f1bd805d869576ae589266de98b91
|
[] |
no_license
|
cran/clogitLasso
|
1df71301a875bfb1474e28a35dbeb0761a7156c4
|
4d8f4af9c9f3fc76682012f15a7f3a919cfb9008
|
refs/heads/master
| 2020-02-26T14:48:46.306401
| 2018-06-27T20:34:49
| 2018-06-27T20:34:49
| 64,686,532
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,250
|
r
|
reg.diff1M.R
|
reg.diff1M = function(X,
y,
strata,
fraction = NULL,
nbfraction = 100,
nopenalize = NULL,
BACK = TRUE,
standardize = FALSE,
maxit = 100,
maxitB = 500,
thr = 1e-10,
tol = 1e-10,
epsilon = 0.0001,
trace = TRUE,
coefnuls = FALSE,
log = TRUE,
adaptive = FALSE,
separate = FALSE,
ols = FALSE,
p.fact = NULL,
remove = FALSE) {
#Algorithme IRLS-LASSOSHOOTING
#require(data.table)
x_rec = X
y_rec = y
strata_rec = strata
if (length(y) != nrow(X))
stop("Please ensure that each observation has predictors and response")
if (missing(strata)) {
stop("'strata' is missing")
} else{
y1 = aggregate(as.data.frame(y), by = list(strata), function(u)
u[1] - u[-1])$y
if (any(y1 != 1))
stop("Response vector should be 1 case and 1 control in each strata, starting by the case")
}
ust <- unique(strata)
# require(foreach)
x <- foreach(i = ust, .combine = rbind) %do% apply(X[strata == i, ], 2, function(u)
u[1] - u[-1])
M <- as.numeric(table(strata)[1] - 1)
# CHANGE of strata
strata <- sort(rep(ust, M))
# strata index
strata_ust <- as.vector(ust)
strata_freq <- list(c())
for (i in strata_ust) {
strata_freq[[i]] <- which(strata==i)
}
strata_table <- list(strata_ust, strata_freq)
x <- as.matrix(x)
d <- dim(x)
n <- d[1]
m <- d[2]
N <- length(ust)
# library(lassoshooting)
# stabilize/standardize.
if (standardize) {
sds = stand1M(x, n)
x <- x / matrix(sds, nrow(x), ncol(x), byrow = T)
}
# calcul of regularization parameters
if (is.null(fraction)) {
if (missing(nbfraction)) {
nbfraction = 100
}
fraction = frac1M(
epsilon = epsilon,
log = log,
x = x,
n = n,
m = m,
nbfraction = nbfraction,
M = M
)
} else{
nbfraction = length(fraction)
}
if (adaptive & is.null(p.fact)) {
warning("p.fact required")
}
#Estimation of coefficients for each regularization parameter
nb_coef_non_nuls <- c()
betanew <- matrix(0, nbfraction, m)
for (i in (1:nbfraction)) {
if (trace) {
if (mod1M(i, 40) == 0) {
cat("fraction ", i, "\n")
}
}
if (i == 1) {
betaold <- rep(0, m)
} else {
betaold <- betanew[(i - 1), ]
}
a = 0
fold = likelihood.diff1M(x, betaold, strata_table)
while (a < maxit) {
a <- a + 1
z <- rep(0, n)
x_local <- exp(-x %*% betaold)
sumbys_local_T <- (1 + sumbys(x_local, strata_table, r = T))
z <- x %*% betaold + sumbys_local_T
lambda <- x_local / ( sumbys_local_T ^ 2)
rm(x_local)
lambda_vector_sqrt <- sqrt(as.vector(lambda))
X <- lambda_vector_sqrt * x
Y <- lambda_vector_sqrt * z
rm(z, lambda_vector_sqrt)
if (adaptive == TRUE) {
gamma = (
lassoshooting(
X = X,
y = Y,
lambda = fraction[i],
penaltyweight = p.fact,
thr = thr,
nopenalize = nopenalize
)
)$coefficient
rm(X)
rm(Y)
} else {
XtX <- t(X) %*% X
Xty <- t(X) %*% Y
rm(X)
rm(Y)
gamma = (
lassoshooting(
XtX = XtX,
Xty = Xty,
lambda = fraction[i],
thr = thr,
nopenalize = nopenalize
)
)$coefficient
rm(XtX)
rm(Xty)
}
#Backtracking-line search
if (BACK) {
step <- gamma - betaold
t <- 1
delta <- grad.diff1M(x, betaold, strata_table) %*% step
likelihood_old <- likelihood.diff1M(x, betaold, strata_table)
for (l in (1:maxitB)) {
gamma <- betaold + t * step
if (likelihood.diff1M(x, gamma, strata_table) <= ( likelihood_old + 0.3 * t * delta))
break
t <- 0.9 * t
}
betanew[i, ] <- (1 - t) * betaold + t * gamma
} else{
betanew <- gamma
}
fnew <- likelihood.diff1M(x, betanew[i, ], strata_table)
criteria3 <- abs(fnew - fold) / abs(fnew)
if (criteria3 < tol)
break
betaold <- betanew[i, ]
fold <- fnew
}
nb_coef_non_nuls[i] <- sum(betanew[i, ] != 0)
}
dimnames(betanew)[2] <- dimnames(x)[2]
if (standardize) {
for (i in seq(length(fraction)))
betanew[i, betanew[i, ] != 0] <- betanew[i, betanew[i, ] != 0] / sds[betanew[i, ] != 0]
}
list(
beta = betanew,
fraction = fraction,
nz = nb_coef_non_nuls,
W = lambda,
x_rec = x_rec,
arg = list(
y = y_rec,
strata = strata_rec,
standardize = standardize,
fraction = fraction,
nopenalize = nopenalize,
adaptive = adaptive,
separate = separate,
ols = ols,
maxit = maxit,
maxitB = maxitB,
thr = thr,
tol = tol,
epsilon = epsilon,
log = log,
trace = trace,
p.fact = p.fact
)
)
}
#==sum by strata, repeated each the number of controls if necessary
sumbys <- function(x, strata_table, r = TRUE) {
ust <- strata_table[[1]]
feq <- strata_table[[2]]
z<-matrix(NA,nrow=length(ust),ncol=1)
cont<-0
for(i in ust){
cont=cont+1
z[cont,1]<-sum(x[feq[[i]]])
}
if (r) {
z <- rep(z, each = (length(x) / length(ust)))
}
return(z)
}
#===Calcul sigmoid function
sigmoid1M <- function(x, beta, strata_table, r = TRUE) {
1 / (1 + sumbys(exp(-x %*% beta), strata_table, r = r))
}
#===Calcul of likelihood
likelihood.diff1M <- function(x, beta, strata_table) {
sum(log(1 + sumbys(exp(-x %*% beta), strata_table, r = F)))
}
#===Calcul of gradient
grad.diff1M <- function(x, beta, strata_table) {
U = exp(-x %*% beta) / (1 + sumbys(exp(-x %*% beta), strata_table))
res = as.vector(-t(x) %*% U)
return(res)
}
#===Calcul of fraction, M is the number of controls
frac1M <- function(epsilon, log, x, n, m, nbfraction, M) {
fracmax <- max(abs((t(x) %*% rep(1 / (M + 1), n))))
fracmin <- fracmax * epsilon
if (log == TRUE) {
fraction <- exp(seq(from = log(fracmax), to = log(fracmin), length.out = nbfraction ))
} else {
fraction <- seq(from = fracmax, to = fracmin, length.out = nbfraction)
}
return(fraction)
}
#===stabilize/standardize
stand1M <- function(x, n) {
vars <- apply(x, 2, var) * (n - 1) / n
vars[vars == 0] <- 1
sds <- sqrt(vars)
return(sds)
}
mod1M <- function(x, m) {
t1 <- floor(x / m)
return(x - t1 * m)
}
|
edb87ccfa6f669ae30735a8a7a295929e97ff725
|
50828550091c6c7cd646e28b07466cde80055d9d
|
/absences.r
|
1b9dc6421a9e9db1842574857a88fda0ca645fcf
|
[] |
no_license
|
DanielNery/summary-measures-two-dimensional-analysis-ifsp
|
390345d02df33f330c5d988c345b9c52c63c22f2
|
46a58c04a561fc0947a0f3bc03d03fa462a497d0
|
refs/heads/main
| 2023-02-15T04:29:50.529732
| 2020-12-30T00:18:21
| 2020-12-30T00:18:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,613
|
r
|
absences.r
|
# LENDO ARQUIVO COM AMOSTRA DE DADDOS
data_frame <- read.csv2('~/Documentos/mat_estudantes.csv', sep = ";")
absences = data_frame$absences
ausencias = function (absences) {
# CRIANDO UMA MATRIZ COM SEUS VALORES.
mat <- matrix(c(1, 2))
print(mat)
# DEFININDO O LAYOUT DO MEU HISTOGRAMA.
layout(mat, c(1,1), c(2.5, 1))
# IDENTIFICANDO O MAIOR VALOR DA LISTA "a".
topox=ceiling(max(absences))
print(topox)
# aQUI DEFINIMOS OS PARÂMETROS DO NOSSO GRÁFICO, POR EXEMPLO, DISTÂNCIA.
par(mar=c(0, 5, 2, 1))
# PARTE DO HISTOGRAMA PINTADO INDO DO 1 ATÉ TOPOX COM QUEBRAS DE 2
b <- hist (absences, breaks=c(0, seq(1, topox, 2)), include.lowest = TRUE, right = FALSE, plot= FALSE)
# AQUI IDENTIFICAMOS O VALOR MÁXIMO DENTRO DO NOSSO PRIMEIRO ESQUEMA DE HISTOGRAMA
topoy = max(c(b$counts))
#cALCULANDO A PORCENTAGEM DE FREQUÊNCIA
porcent=round((c(b$counts/length(absences))*100), 2)
print(porcent)
# JUNTANDO INFORMAÇÕES E PLOTANDO O HISTOGRAMA DAS ABSTENCES
hist (absences,
breaks=c(0, seq(1,topox, 1)),
include.lowest = TRUE,
right = FALSE,
xlim = c(0, topox),
ylab = "Frequência",
main = "Abstenções",
col = "green",
axes = FALSE,
density = 20)
# DEFININDO O PADRÃO DE PREENCHIMENTO DOS EIXOS NO GRÁFICO
axis(1, at=seq(0,topox,by=5))
axis(2, at=seq(0,topoy,by=15))
par(mar=c(0, 5, 0, 1))
# DEFININDO NOSSO BLOXPLOT E SUAS CONFIGURAÇÕES.
c <-boxplot(absences, horizontal = TRUE,
outline = FALSE,
xlim =c(0, 2),
ylim = c(0, topox),
col = "green",
axes=FALSE)
}
ausencias(absences)
|
7c1471360ad3ff916912551dbd8b09abf7805650
|
24126baa896ba65e54afc16e7ed5a002de9d1173
|
/R/user_mod_ui.R
|
673e6efced422a394886759fd278aed2bd5bae62
|
[] |
no_license
|
ashbaldry/reddit-analysis-app
|
9dd52c6dd1dba0a4fe471f7fa09cff8eac4a570e
|
d9a64de7e1b41f284f757ed68dff8ec169a7caca
|
refs/heads/master
| 2023-08-28T01:48:08.659491
| 2021-11-02T18:37:22
| 2021-11-02T18:37:22
| 348,464,225
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,748
|
r
|
user_mod_ui.R
|
user_page_ui <- function(id) {
ns <- NS(id)
div(
class = "ui container",
div(
class = "ui stackable grid padded-grid",
div(
class = "two column stretched row",
div(
class = "column",
div(
class = "ui horizontal fluid card",
uiOutput(class = "image", ns("user_icon")),
div(
class = "content",
div(class = "header shiny-text-output", id = ns("user_name")),
div(
class = "meta",
div(
reddit_karma_icon("banner-karma-icon"),
span(class = "shiny-text-output", id = ns("total_karma")), "karma"
),
div(
tags$i(class = "birthday cake icon cake-day-icon"),
span(class = "date shiny-text-output", id = ns("cake_day"))
)
)
)
),
reddit_segment(
div(
class = "ui one statistics",
div(
class = "statistic",
div(id = ns("n_subreddit"), class = "shiny-text-output value"),
div(class = "label", "Subreddits Subscribed To")
)
)
)
),
div(
class = "column",
reddit_segment(
div(
class = "ui two stackable statistics",
div(
class = "statistic",
div(id = ns("post_karma"), class = "shiny-text-output value"),
div(class = "label", "Submission Karma")
),
div(
class = "statistic",
div(id = ns("comm_karma"), class = "shiny-text-output value"),
div(class = "label", "Comment Karma")
),
div(
class = "statistic",
div(id = ns("awarder_karma"), class = "shiny-text-output value"),
div(class = "label", "Awarder Karma")
),
div(
class = "statistic",
div(id = ns("awardee_karma"), class = "shiny-text-output value"),
div(class = "label", "Awardee Karma")
)
)
)
)
),
div(
class = "one column row",
div(
class = "column",
reddit_segment(
span(
tags$label("Post"),
shiny.semantic::checkbox_input(ns("karma_toggle"), "Comment", is_marked = FALSE, type = "toggle")
),
highcharter::highchartOutput(ns("karma_plt"), height = "335px")
)
)
)
)
)
}
|
14c54616f15c22df9f02fd63e50b444eef4a0fb9
|
38e22dcf20dd6e9b2cd745c1871c318b238c27ca
|
/R/spml.R
|
b7f7af8bdaed9ff2d89fd6322e50595bd229d3a8
|
[] |
no_license
|
cran/splm
|
5f6d21bf534ea13dd96e371a41f8f2e698cb2ba0
|
62b73c011ed69da35c92d4cf713feeb8871bb9d1
|
refs/heads/master
| 2023-08-02T12:21:20.641346
| 2023-07-20T16:00:02
| 2023-07-20T17:31:05
| 17,700,055
| 10
| 9
| null | 2017-12-05T04:27:11
| 2014-03-13T06:31:08
|
R
|
UTF-8
|
R
| false
| false
| 3,018
|
r
|
spml.R
|
spml <- function(formula, data, index=NULL, listw, listw2=listw, na.action,
model=c("within","random","pooling"),
effect=c("individual","time","twoways"),
lag=FALSE, spatial.error=c("b","kkp","none"),
...) {
## wrapper function for all ML models
## record call
cl <- match.call()
## check class(listw)
checklw <- function(x) {
if(!("listw" %in% class(x))) {
x <- x
if("matrix" %in% class(x)) {
#require(spdep)
x <- mat2listw(x)
}
else {
stop("'listw' has to be either a 'listw' or a 'matrix' object")
}}
# }
return(x)
}
listw <- checklw(listw)
listw2 <- checklw(listw2)
## dimensions check is moved downstream
##added by gpiras on November 25, 2015 for consistency with the test bsk
## removed by the_sculler on Jan 8 2016 because bsktest() never calls spml()
#if(model == 'pooling' && spatial.error == 'b' && lag ==FALSE){
#
# res <- spfeml(formula=formula, data=data, index=index,
# listw=listw, listw2=listw2, na.action,
# model = 'error', effects = "pooling",
# cl=cl, ...)
#}
#else{
switch(match.arg(model), within={
if(lag) {
model <- switch(match.arg(spatial.error),
b="sarar",
kkp="sarar",
none="lag")
} else {
model <- switch(match.arg(spatial.error),
b="error",
kkp="error",
none="plm")
if(model == "plm") stop("No spatial component, use plm instead")
## put call to plm() here, fetch results
## and suitably transform them for compliance
}
effects <- switch(match.arg(effect), individual="spfe",
time="tpfe", twoways="sptpfe")
res <- spfeml(formula=formula, data=data, index=index,
listw=listw, listw2=listw2, na.action,
model=model, effects=effects,
cl=cl, ...)
}, random={
switch(match.arg(effect),
time={stop("time random effects not implemented")},
twoways={stop("twoway random effects not implemented")},
individual={
errors <- switch(match.arg(spatial.error),
b="semre", kkp="sem2re", none="re")})
res <- spreml(formula=formula, data=data, index=index,
w=listw2mat(listw), w2=listw2mat(listw2),
lag=lag, errors=errors, cl=cl, ...)
}, pooling={
errors <- switch(match.arg(spatial.error),
b="sem", kkp="sem", none="ols")
res <- spreml(formula=formula, data=data, index=index,
w=listw2mat(listw), w2=listw2mat(listw2),
lag=lag, errors=errors, cl=cl, ...)
})
#}
#print(class(res))
class(res) <- c(class(res), "splm_ML")
#print(class(res))
return(res)
}
|
019dd98d8de9bb6633306d44854c7f39cde672d0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/TeachingSampling/examples/PikPPS.rd.R
|
b00bfb055bde3743b91217d666983d4a0728cedd
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,392
|
r
|
PikPPS.rd.R
|
library(TeachingSampling)
### Name: PikPPS
### Title: Inclusion Probabilities in Proportional to Size Sampling Designs
### Aliases: PikPPS
### Keywords: survey
### ** Examples
############
## Example 1
############
x <- c(30,41,50,170,43,200)
n <- 3
# Two elements yields values bigger than one
n*x/sum(x)
# With this functions, all of the values are between zero and one
PikPPS(n,x)
# The sum is equal to the sample size
sum(PikPPS(n,x))
############
## Example 2
############
# Vector U contains the label of a population of size N=5
U <- c("Yves", "Ken", "Erik", "Sharon", "Leslie")
# The auxiliary information
x <- c(52, 60, 75, 100, 50)
# Gives the inclusion probabilities for the population accordin to a
# proportional to size design without replacement of size n=4
pik <- PikPPS(4,x)
pik
# The selected sample is
sum(pik)
############
## Example 3
############
# Uses the Lucy data to compute teh vector of inclusion probabilities
# accordind to a piPS without replacement design
data(Lucy)
attach(Lucy)
# The sample size
n=400
# The selection probability of each unit is proportional to the variable Income
pik <- PikPPS(n,Income)
# The inclusion probabilities of the units in the sample
pik
# The sum of the values in pik is equal to the sample size
sum(pik)
# According to the design some elements must be selected
# They are called forced inclusion units
which(pik==1)
|
e71c84ab2a474ad7b4f97dc986e18bf86f31187d
|
0f104ea64886750d6c5f7051810b4ee39fa91ba9
|
/inst/test-data/specific-redcapr/read-oneshot/specify-fields-without-record-id.R
|
d01b1c6dfa2021b048883f83cb76040a1823d2aa
|
[
"MIT"
] |
permissive
|
OuhscBbmc/REDCapR
|
3ca0c106e93b14d55e2c3e678f7178f0e925a83a
|
34f2154852fb52fb99bccd8e8295df8171eb1c18
|
refs/heads/main
| 2023-07-24T02:44:12.211484
| 2023-07-15T23:03:31
| 2023-07-15T23:03:31
| 14,738,204
| 108
| 43
|
NOASSERTION
| 2023-09-04T23:07:30
| 2013-11-27T05:27:58
|
R
|
UTF-8
|
R
| false
| false
| 800
|
r
|
specify-fields-without-record-id.R
|
structure(list(name_first = c("Nutmeg", "Tumtum", "Marcus", "Trudy",
"John Lee"), address = c("14 Rose Cottage St.\nKenning UK, 323232",
"14 Rose Cottage Blvd.\nKenning UK 34243", "243 Hill St.\nGuthrie OK 73402",
"342 Elm\nDuncanville TX, 75116", "Hotel Suite\nNew Orleans LA, 70115"
), interpreter_needed = c(0, 0, 1, NA, 0)), row.names = c(NA,
-5L), spec = structure(list(cols = list(name_first = structure(list(), class = c("collector_character",
"collector")), address = structure(list(), class = c("collector_character",
"collector")), interpreter_needed = structure(list(), class = c("collector_double",
"collector"))), default = structure(list(), class = c("collector_guess",
"collector")), delim = ","), class = "col_spec"), class = c("spec_tbl_df",
"tbl_df", "tbl", "data.frame"))
|
1f0b35f546810a5a95a3172278d42fb02b32a2bd
|
9655ed9c073e16922159846964374adf934019ac
|
/plot3.R
|
3b3c98d6046c782b366b60078d680af2befb9cfe
|
[] |
no_license
|
Rub123/ExData_Plotting1
|
49fa294e040c2c6f1cb8551f4f2a089b10c77209
|
de131578b06b3fdd4deea553ba0598da6af7de96
|
refs/heads/master
| 2020-12-02T17:55:52.422727
| 2017-07-09T14:25:59
| 2017-07-09T14:25:59
| 96,449,470
| 0
| 0
| null | 2017-07-06T16:16:42
| 2017-07-06T16:16:42
| null |
UTF-8
|
R
| false
| false
| 1,542
|
r
|
plot3.R
|
library(tidyverse)
dataUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
File_name <- "household_power_consumption.txt"
Zip_name <- "exdata_data_household_power_consumption.zip"
# if the data file doesn`t exist theh: if Zip file exist theh unzipped the file
# else `download and unzipped the file
if (!file.exists(File_name)){
if(file.exists(Zip_name)){
unzip(Zip_name)
}
else{
download.file(dataUrl, Zip_name)
unzip(Zip_name)
}
}
## Sys.setlocale("LC_TIME", "English") ## so that the dayes on the x-axis will be in English
# read the data to R
data_set <- read.table(File_name, header = TRUE, sep = ";", na.strings = "?",
colClasses = c("character","character","numeric","numeric","numeric","numeric","numeric"))
data_set$Date <- as.POSIXct(paste(data_set$Date, data_set$Time, sep = " "), format="%d/%m/%Y %H:%M:%S")
data_set <- subset(data_set,Date >= "2007-02-01 00:00:00" & Date <= "2007-02-02 23:59:00")
png("plot3.png", width = 480, height = 480) # open Graphical device
plot(data_set$Date, data_set$Sub_metering_1, type = "n", xlab= "", ylab = "Energy sub metering")
lines(data_set$Date, data_set$Sub_metering_1) # col "black"
lines(data_set$Date, data_set$Sub_metering_2, col = "red")
lines(data_set$Date, data_set$Sub_metering_3, col = "blue")
legend("topright",lty = 1, lwd = 2 ,col = c("black","red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
3f0cedbfd643caaadf39022407e93d847ced8aaf
|
6fa24cca5ba1dc15f9f206beb97c260786efe732
|
/script/old/Distance-1028.R
|
dbd0b53e366b8c94e081c1093c3bf6909cda7f71
|
[] |
no_license
|
wetinhsu/Macaca-population-trend
|
fe14e5dfa6290b4a982175feae290053c60446b9
|
a0e738ec817ae70b8ea042eeb7b64df4c9b5cb10
|
refs/heads/master
| 2023-07-19T12:38:45.198058
| 2023-07-11T01:13:59
| 2023-07-11T01:13:59
| 190,343,813
| 0
| 0
| null | 2019-06-05T07:07:49
| 2019-06-05T07:07:49
| null |
UTF-8
|
R
| false
| false
| 2,279
|
r
|
Distance-1028.R
|
library(Distance)
library(data.table)
library(magrittr)
library(ggplot2)
library(readxl)
setwd("D:/R/test/Macaca-population-trend")
M.data <-
read_xlsx("data/clean/data_for_analysis_1519.xlsx") %>%
setDT %>%
.[ Distance <20 , ] %>%
.[, Year := as.numeric(Year)] %>%
.[, Year.re := Year - min(Year) + 1] %>%
.[County %in% list("宜蘭縣","基隆市","台北市","臺北市",
"新北市","台北縣","臺北縣",
"桃園縣","桃園市","新竹市",
"新竹縣","苗栗縣"), Region := "North"] %>%
.[County %in% list("台中市","臺中市",
"台中縣","臺中縣",
"彰化縣","南投縣","南投市",
"雲林縣","嘉義縣","嘉義市"), Region := "Center"] %>%
.[County %in% list("台南市","臺南市",
"台南縣","臺南縣",
"高雄縣","高雄市",
"屏東縣"), Region := "South"]%>%
.[County %in% list("花蓮縣",
"台東縣","臺東縣"), Region := "East"]
ttt<- df %>% setDT %>%
.[Macaca_dist %in% c("A","B"),] %>%
.[Time %in% c("A","B"),] %>%
.[Macaca_sur == 1,] %>%
.[Macaca_dist %in% "A", distance := 25] %>%
.[Macaca_dist %in% "B", distance := 100] %>% setDF
ds.ttt <- ds(ttt, transect = "point",
formula = ~ 1 ,
adjustment = NULL)
plot(ds.ttt, breaks =c(0,25,100), pl.col =2)
summary(ds.ttt )
gof_ds(ds.ttt)
hist(ttt$distance, breaks =c(0,25,100), probability = T, plot=T)
table(ttt$distance)
gof_ds(ds.ttt)
#==========================
ttt2<- M.data %>% setDT %>%
.[Macaca_dist %in% c("A","B", "C"),] %>%
.[Time %in% c("A","B"),] %>%
.[Macaca_sur == 1,] %>%
.[Macaca_dist %in% "A", distance := 25] %>%
.[Macaca_dist %in% "B", distance := 100] %>%
.[Macaca_dist %in% "C", distance := 200] %>%
.[, Pointid := paste0(Site_N,"-",Point)] %>% setDF
ds.ttt2 <- ds(ttt2, transect = "point",
formula = ~ 1 ,key = "hn",
adjustment = NULL)
plot(ds.ttt2, breaks =c(0,25,100,200), pl.col =2)
summary(ds.ttt2 )
gof_ds(ds.ttt2)
hist(ttt2$distance, breaks =c(0,25,100,200), probability = T, plot=T)#
table(ttt2$distance)
|
721f2ac4047df1cc6b99881709ae16c65d7a6288
|
768bf50e03d36e04bcc6efd248917becea958cc3
|
/Rsuite/FudgeIO/ReadNC.R
|
60d6c23b761d8ecd2fc51b03cd311b5c111c1fed
|
[] |
no_license
|
cwhitlock-NOAA/FUDGE
|
62b3d3665cf8d979bf097cd6781f480068364c32
|
8c1ee4013a83ad044792a0066dcd97e35cdb9047
|
refs/heads/master
| 2021-01-23T18:52:24.149002
| 2015-04-02T14:20:56
| 2015-04-02T14:20:56
| 35,306,320
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,873
|
r
|
ReadNC.R
|
# Aparna Radhakrishnan 08/04/2014
ReadNC <- function(nc.object,var.name=NA,dstart=NA,dcount=NA,dim='none',verbose=FALSE, force_3_dimensions=FALSE) {
#'Reads data from a variable of a netCDF file object
#'Returns netCDF variable data and, if dim!= 'none', one or more dimensions of that
#'netCDF object
#'-----Arguments-----
#'@param nc.object: A NetCDF object returned from nc_open of R's ncdf4 package
#'@param var.name: The name of the variable within the NetCDf file
#'@param dstart: A vector of indices indicating where to start reading the passed values (begin- ning at 1).
#'@param dcount: A vector of integers indicating the count of values to read along the variable (order is X-Y-Z-T).
#'@param dim: Which dimensions to include in the netCDF object. Can be one of 'temporal',
#'which queries the dimension associated with the T axis, 'spatial', which queries the dimensions associated with the
#'X, Y and Z axes, and 'none', which makes no query. The dimensions queried also affect the other variables returned;
#''temporal' returns all variables that reference the T axis underneath the $vars list of the output, and
#'
#'@returns A list containing the following elements:
#' $clim.in: A 3-dimensional array of the values in the variable with the same dimensions
#' as the input dataset. Returned values will be in ordinary R double precision if the netCDF
#' variable type is float or double. Has a 'filename' attribute that points to the input file,
#' which is used for error messages. If dim='temporal', then it also has a calendar attribute.
#' $dim: The dimension(s) queried in the file, of class ncdim4. Very useful for writing to file.
#' $vars: The variables *not* the variable of interest that use the dimensions queried
#' (i.e. lat_bnds, j_offset, height).
#' $cfname: The CF-standard name of the variable. Only applies to tasmax, tasmin and pr.
#' $long_name: The long name of the variable. Derived from the long_name attribute of the netCDF object.
#' $units: The units of the variable of interest. Derived from the units attribute of the netCDF object.
#'
#'@include ncdf4, ncdf4.helpers
if((is.na(dstart)) && (is.na(dcount)) && (is.na(var.name))) {
clim.in <- ncvar_get(nc.object)
}else {
message('obtaining variable slice')
clim.in <- ncvar_get(nc.object,var.name,dstart,dcount,collapse_degen=FALSE)
message('vars obtained')
}
#Fudge currently deals badly with input vectors of more than three dimensions - and the first two
#have to be spatial. This will collapse any OTHER degenerate dimensions present.
#In the future, this will NOT apply to ensemble dimensions, but we are not there yet by any means.
if(length(dim(clim.in)) > 3 && force_3_dimensions){
message("Adjusting dimensions to fit the FUDGE framework")
#Last dim is always time, first two should always be X and Y
dim.adjust <- dim(clim.in)[2:length(dim(clim.in))-1]
if(sum(dim.adjust!=1)!=0){
#If there are any non-degenerate dimensions, throw an error
#NOTE: This is not meant to be used with ensemble dims - not yet, anyway
stop(paste("Error in ReadNC: The file", attr(nc.object, "filename"), "had a var that could not be adjusted to an x,y,t dimension system",
"due to one or more non-degenerate dimensions. Please examine the input file and try again."))
}else{
#Redimension as (x,y,t)
dim(clim.in) <- c(dim(clim.in[1:2]), dim(clim.in)[length(clim.in)])
}
}
#### get standard name,long name, units if present ####
attname = 'standard_name'
cfname <- ncatt_get(nc.object, var.name, attname)
attname = 'long_name'
long_name <- ncatt_get(nc.object, var.name, attname)
attname <- 'units'
units <- ncatt_get(nc.object, var.name, attname)
attr(clim.in, "units") <- units
###Test code for determining what happens for unitless vars
#######################################################
#Control getting the dimensions and other variables in the output file
dim.list <- list(dim=list(), vars=list())
for(d in 1:length(dim)){
temp.list <- switch(dim[d],
"spatial"=get.space.vars(nc.object, var.name),
"temporal"=get.time.vars(nc.object, var.name),
"ensemble" = get.ens.dim(nc.object, var.name),
#If other arg or "nothing", do nothing
list("dim"=list("none"), 'vars'=list("none"))
)
dim.list$dim <- c(dim.list$dim, temp.list$dim)
dim.list$vars <- c(dim.list$vars, temp.list$vars)
print(names(dim.list$dim))
}
#######################################################
listout <- list("clim.in"=clim.in,"cfname"=cfname,"long_name"=long_name,"units"=units,
"dim"=dim.list$dim, 'vars'=dim.list$vars)
###Add attributes for later QC checking against each other
attr(listout, "calendar") <- nc.object$dim$time$calendar
attr(listout, "filename") <- nc.object$filename
nc_close(nc.object)
return(listout)
}
get.space.vars <- function(nc.object, var){
#Obtains spatial vars, grid specs and all vars not the main var of interest
#that depend upon those vars
#Axes with spatial information
message('getting spatial vars')
axes <- c("X", "Y", "Z")
file.axes <- nc.get.dim.axes(nc.object, var)
if(is.null(file.axes)){
stop(paste("Error in ReadNC: File", nc.object$filename, "has no variable", var, "; please examine your inputs."))
}else{
print("Obtaining axis", )
spat.axes <- file.axes[file.axes%in%axes] #Here is where the extra Z dim check comes in
spat.varnames <- names(file.axes[file.axes%in%axes])
}
#Obtain any dimensions that reference space
spat.dims <- list()
for (sd in 1:length(spat.varnames)){
ax <- spat.axes[[sd]]
dim <- spat.varnames[[sd]]
spat.dims[[dim]] <- nc.get.dim.for.axis(nc.object, var, ax)
#Make sure that original file is being included, in order to support attribute cloning
attr(spat.dims[[dim]], "filename") <- attr(nc.object, "filename")
}
#Obtain any dimensions that are not time
#Obtain any variables that do not reference time
#THIS is the bit that was tripping you up last time. deal with it, please.
vars.present <- names(nc.object$var)[names(nc.object$var)!=var]
spat.vars <- list()
for(i in 1:length(vars.present)){
var.loop <- vars.present[i]
if(! ("time"%in%lapply(nc.object$var[[var.loop]]$dim, obtain.ncvar.dimnames))){
spat.vars[[var.loop]] <- ncvar_get(nc.object, var.loop, collapse_degen=FALSE)
#Grab the bits used to build the vars later
att.vector <- c(nc.object$var[[var.loop]]$units, nc.object$var[[var.loop]]$longname,
#nc.object$var[[var.loop]]$missval,
nc.object$var[[var.loop]]$prec)
att.vector[4] <- paste(names(nc.object$dim)[(nc.object$var[[var.loop]]$dimids)+1], collapse=",") #formerly 5
names(att.vector) <- c("units", "longname", "prec", "dimids") #"missval",
att.vector[att.vector=='int'] <- "integer"
for (a in 1:length(att.vector)){
attr(spat.vars[[var.loop]], which=names(att.vector)[[a]]) <- att.vector[[a]]
}
#And finally, grab the comments attribute, which is important
#for i and j offsets (but not much else)
comments <- ncatt_get(nc.object, var.loop, 'comments')
if(comments$hasatt){
attr(spat.vars[[var.loop]], which='comments') <- comments$value
}
}
}
return(list("dim"=spat.dims, "vars"=spat.vars))
}
get.time.vars <- function(nc.object, var){
#Obtains time vars, calendar attributes and all vars that depend on time
#that are not the main var of interest
message('getting time vars')
axes<- c("T")
file.axes <- nc.get.dim.axes(nc.object, var)
if(is.null(file.axes)){
stop(paste("Error in ReadNC: File", nc.object$filename, "has no variable", var, "; please examine your inputs."))
}else{
time.axes <- file.axes[file.axes%in%axes]
time.varnames <- names(file.axes[file.axes%in%axes])
}
#Obtain any dimensions that reference time
time.dims <- list()
for (td in 1:length(time.varnames)){
ax <- time.axes[[td]]
dim <- time.varnames[[td]]
time.dims[[dim]] <- nc.get.dim.for.axis(nc.object, var, ax)
#Make sure that original file is being included, in order to support attribute cloning
attr(time.dims[[dim]], "filename") <- attr(nc.object, "filename")
}
#Obtain any dimensions that are not time
#Obtain any variables that do not reference time
#THIS is the bit that was tripping you up last time. deal with it, please.
if(length(time.varnames > 1)){
vars.present <- names(nc.object$var)[names(nc.object$var)!=var]
time.vars <- list()
for(i in 1:length(vars.present)){
var.loop <- vars.present[i]
#Obtain all vars that have a dim named 'time'
if( "time"%in%lapply(nc.object$var[[var.loop]]$dim, obtain.ncvar.dimnames) ){
time.vars[[var.loop]] <- ncvar_get(nc.object, var.loop, collapse_degen=FALSE)
#Grab bits needed to construct vars later; store as attributes
att.vector <- c(nc.object$var[[var.loop]]$units, nc.object$var[[var.loop]]$longname,
#nc.object$var[[var.loop]]$missval,
nc.object$var[[var.loop]]$prec)
att.vector[4] <- paste(names(nc.object$dim)[(nc.object$var[[var.loop]]$dimids)+1], collapse=",") #formerly
names(att.vector) <- c("units", "longname", "prec", "dimids") #"missval",
att.vector[att.vector=='int'] <- "integer"
for (a in 1:length(att.vector)){
attr(time.vars[[var.loop]], which=names(att.vector)[[a]]) <- att.vector[[a]]
}
#And finally, grab the comments attribute, which is important
#for i and j offsets (but not much else)
comments <- ncatt_get(nc.object, var.loop, 'comments')
if(comments$hasatt){
attr(time.vars[[var.loop]], which='comments') <- comments$value
}
}
}
}else{
message("No variables but the main variable found using time dimension; continue on.")
time.dims[[dim]]
}
return(list("dim"=time.dims, "vars"=time.vars))
}
obtain.ncvar.dimnames <- function(nc.obj){
#obtains one of the names of the dimensions of a netcdf
#variable
return(nc.obj[['name']])
}
get.ens.dim <- function(nc.obj, var.name){
message('getting ensemble dimension, if present')
ens.dim.name <- "ensmem"
if(ens.dim.name%in%names(nc.obj$dim)){
print("ensmem present")
ensmem=nc.obj$dim$ensmem
attr(ensmem, "filename") <- attr(nc.obj, "filename")
return(list(dim=list(ensmem=ensmem), vars=list()))
}else{
print("no dim named ensmem present")
return(list(dim=list(), vars=list()))
}
}
|
18219192a50b57d5a8e80e40be258aeffb2bb07c
|
dd5b4b21b5fd3e4a443f0a7bac5445d24439e841
|
/R/study/Linerar Algebra.R
|
a8b6aeb96a2cc59b5dbb5944d9c49bfddfd946a4
|
[] |
no_license
|
qkdrk777777/DUcj
|
466a6c519cfe296a1c6753b52c1f49f949c5a894
|
510c80e18dfaa4c8723b79a1aab286f418b3c901
|
refs/heads/master
| 2020-03-22T18:39:02.326444
| 2018-07-10T18:44:13
| 2018-07-10T18:44:13
| 117,794,983
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,546
|
r
|
Linerar Algebra.R
|
#package('pracma');package('Matrix')
package('installr')
updateR()
#비동차연립방정식(nonhomogeneous system of linear equations) 해 구하기
#b=O인경우 동차연립방정식 아닌경우를 비동차연립방정식이라 함.
A<-matrix(c(2,-6,-8,3,-8,-14,4,-11,-17),ncol=3,byrow=T)
b<-matrix(c(10,8,15),nrow=3)
solve(A,b)
#해가 없는 경우
A<-matrix(c(2,1,1,1,-2,-7,4,3,5),nrow=3,byrow=T)
b<-matrix(c(7,-4,22),nrow=3)
solve(A,b)
#해가 무수히 많은 경우
A<-matrix(c(2,1,1,1,-2,-7,4,3,5),nrow=3,byrow=T)
b<-matrix(c(7,-4,17),nrow=3)
solve(A,b)
#동차연립방정식
#비자명해는 구할 수 없다.
A<-matrix(c(2,3,8,1,1,2,5,1,-6),nrow=3,byrow=T)
b=matrix(0,nrow=3,ncol=1)
det(A)
solve(A,b)
#자명해
A<-matrix(c(3,-6,9,2,3,-4,4,-1,-2),nrow=3,byrow=T)
b=matrix(0,nrow=3,ncol=1)
det(A)
solve(A,b)
#역행렬(inverse matrix)구하기
solve(A)
#연립 일차방정식의 계수행렬 A가 가역이고 정방행렬이면 B는 오직 한개의 해를 갖는다.
#AX=B이고 가역이므로
#X=t(A)%*%B
#행렬식 구하기
A<-matrix(c(3,2,4,-2,0,-1,1,-2,5),nrow=3,byrow=T)
det(A)
#n*n행렬식의 두 행(열)을 서로 바꾼 행렬식은 원래 행렬식에 -1을 곱한것과 같다.
B<-rbind(A[2,],A[1,],A[3,])
det(B)
#i행(열)을 제외한 나머지 성분은 모두 같고 C의 i행(열)의 성분은 A와 B의 i행의 선분을 차례로 더한 것이라 하면
#det(C)=det(A)+det(B)
A<-matrix(c(3,-2,5,1,2,4,2,-3,0),nrow=3,byrow=T)
B<-rbind(A[1,],A[2,],c(1,2,3))
C<-rbind(A[1,],A[2,],A[3,]+B[3,])
det(C)==det(A)+det(B)
#정방행렬 A가 가역이기 위한 필요충분조건은 det(A)!=0이다.#example
A<-matrix(c(1,2,3),nrow=3,ncol=3,byrow=T)
det(A)
solve(A)
det(tri(A))
solve(tri(A))
#정방행렬 A,B에 대하여 det(A%*%B)==det(A)*det(B)이다.
(A<-matrix(sample(9,9),nrow=3,ncol=3))
(B<-matrix(sample(9,9),nrow=3,ncol=3))
det(A%*%B)
det(A)*det(B)
#정방행렬 A가 가역이면 그 역행렬은 cofactor(A,adj=T)/det(A)이다.
A<-matrix(c(2,-1,1,1,2,0,-3,-2,4),nrow=3,ncol=3)
det(A)
#det(A)!=0이므로 A는 가역
solve(A)
cofactor(A,adj=T)/det(A)
#고유값(eigenvalue) 고유벡터(eigenvector)
#A is m by n matrix , x는 영벡터가 아닌 벡터.
#스칼라 lambda에 대해 A%*%x=lambda*x를 만족할 때
#lambda를 고유값 x를 고유벡터 라고 한다.
#따라서 (A-lambda*I)%*%X=O이므로
#30번째 줄#연립 일차방정식의 계수행렬 A가 가역이고 정방행렬이면 B는 오직 한개의 해를 갖는다.
#det(A-lambda*I)!=0이면 가역이므로 오직 한개의 자명해를 갖는다. 따라서 위 가정에 만족하지 못함.(70줄의 x)
#det(A-lambda*I)=0이면 비가역이므로 비자명해를 갖지 않는다.
#가역
A<-matrix(c(3,2,4,-5,-4,-7,5,6,9),nrow=3)
det(A)
b<-matrix(0,nrow=3,ncol=1)
solve(A,b)
#비가역
A<-matrix(c(2,1,5,3,1,1,8,2,-6),nrow=3)
det(A)
solve(A,b)
#det(A-lambda*I)=(A[1,1]-lambda)*(A[2,2]-lambda)-A[1,2]*A[2,1]=lambda^2-(A[1,1]+A[2,2])*lambda+(A[1,1]*A[2,2]-A[1,2]*A[2,1])
#lambda^2-(A[1,1]+A[2,2])*lambda+(A[1,1]*A[2,2]-A[1,2]*A[2,1])이를 특성방정식이라 한다.
A<-matrix(c(1,1,3,1,1,-3,2,-1,0),nrow=3)
eigen(A)
#서로 다른 고유값에 대응하는 고유벡터들은 서로 일차 독립이다.
A=matrix(c(4,3,2,5),nrow=2)
eigen<-eigen(A)
A
eigen$vectors%*%diag(eigen[[1]]^2)%*%solve(eigen$vectors)
A<-matrix(c(1,2,3),nrow=3,ncol=3)
A<-matrix(c(0,1,0,1,0,0,0,0,1),nrow=3)
(eigen<-eigen(A))
eigen$vectors%*%diag(eigen[[1]])%*%solve(eigen$vectors)
#마르코프 연쇄
package('markovchain')
statesNames=c('a','b')
mcA<-new("markovchain",transitionMatrix=matrix(c(0.9,0.1,0.2,0.8),byrow=T,nrow=2,dimnames = list(statesNames,statesNames)))
mcA^2
statesNames=c("a","b","c","d")
matrice<-matrix(c(0.25,0.75,0,0,0.4,0.6,0,0,0,0,0.1,0.9,0,0,0.7,0.3),
nrow=4, byrow=TRUE)
mcC<-new("markovchain", states=statesNames, transitionMatrix=matrice)
mcD<-new("markovchain", transitionMatrix=matrix(c(0,1,0,1), nrow=2,byrow=TRUE))
package('MonteCarlo')
#example(MonteCarlo)
test_func<-function(n,loc,scale){
sample<-rnorm(n, loc, scale)
stat<-sqrt(n)*mean(sample)/sd(sample)
decision<-abs(stat)>1.96
return(list("decision"=decision))
}
# Example without parallization
n_grid<-c(50,100,250,500)
loc_grid<-seq(0,1,0.2)
scale_grid<-c(1,2)
param_list=list("n"=n_grid, "loc"=loc_grid, "scale"=scale_grid)
erg<-MonteCarlo(func=test_func, nrep=250, param_list=param_list, ncpus=1)
summary(erg)
rows<-c("n")
cols<-c("loc","scale")
MakeTable(output=erg, rows=rows, cols=cols, digits=2)
|
18803124c8e452238d9e5475c968d91c00b3e249
|
178087fd666375abeb10fc4f9f23230d2438dc21
|
/R/boxformat.r
|
655f290bef8bfa6c77028840e9bdd2a93c971e89
|
[] |
no_license
|
cran/sdtoolkit
|
736adf447b9c59c9a795f7f4735d34985dfac0a2
|
8e9767f73b1266de8edf37744c7325a7e36c6497
|
refs/heads/master
| 2020-05-29T13:14:47.569985
| 2014-02-16T00:00:00
| 2014-02-16T00:00:00
| 17,699,521
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,543
|
r
|
boxformat.r
|
`boxformat` <-
function(box, dimlist, morestats, pvallist, style = "ineq"){
d <- ncol(box)
mat <- t(box)
colnames(mat) <- c("low","high")
#Sort the pvalues according to ranking so they line up with more stats
#There should be a one line way vectorized way to do this but I can't think of it
pv2 <- vector(length=nrow(morestats))
for (i in 1:nrow(morestats)){
cind <- which(pvallist[,1]==morestats[i,1])
pv2[i] <- pvallist[cind,2]
}
if(style == "ineq"){
namev <- rep(rownames(mat),2)
ineqsigns <- c(rep(" > ",d),rep(" < ",d))
restricts <- c(mat[,1],mat[,2])
tmat <- data.frame(dim = namev, rel=ineqsigns, bound=restricts)
# mat <- tmat[c(dimlist$lower,dimlist$upper),]
###within this box is the addition for using the ranking data - comment out this and uncomment line immediately above to convert back
onlydims <- rbind(morestats[,1],(morestats[,1]+d))
dinterleaved <- as.vector(onlydims)
tmat <- tmat[dinterleaved,]
uplow <- rbind(dimlist$lower[morestats[,1]],dimlist$upper[morestats[,1]])
loginterleaved <- as.vector(uplow)
tmat <- tmat[loginterleaved,]
#now need something to appropriately space for double entries, otherwise
both <- uplow[1,] & uplow[2,]
totuniq <- nrow(morestats)
direct <- 1:totuniq
needspc <- c(1:nrow(morestats))[both]
#Don't add to last position
needspc <- needspc[needspc!=totuniq]
if(length(needspc)!=0){
for (i in needspc){
direct[(i+1):totuniq] <- direct[(i+1):totuniq]+1
}
}
newmat <- matrix(nrow=nrow(tmat),ncol=6)
newmat[direct,] <- cbind(morestats,pv2,nrow(morestats):1)
newmat <- cbind(tmat,newmat)
# newmat <- newmat[,-4]
# fill in NA holes
newmat[is.na(newmat[,4]),4] <- newmat[,4][c(1:nrow(newmat))[is.na(newmat[,4])] - 1]
colnames(newmat) <- c("dimension name","rel","bound","dimind"," density","coverage","support","qpval","rmv")
mat <- newmat
### #
rownames(mat) <- NULL
}
else if(style == "absmat"){
mat <- mat[dimlist$either,]
}
else if(style == "neatmat"){
mat[!dimlist$lower,1] <- NA
mat[!dimlist$upper,2] <- NA
mat <- mat[dimlist$either,]
}
else if(style != "fullmat"){
stop("Argument \'style\' must be set to \'ineq\', \'absmat\', \'neatmat\' or \'fullmat\'.")
}
return(mat)
}
|
04be5857f610a2dc51f1077046dcae5083299d34
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PeerPerformance/examples/alphaScreening.Rd.R
|
482b5f9b6ad6a51ec9a75460bfc6019750c5635f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 485
|
r
|
alphaScreening.Rd.R
|
library(PeerPerformance)
### Name: alphaScreening
### Title: Screening using the alpha outperformance ratio
### Aliases: alphaScreening
### Keywords: htest
### ** Examples
## Load the data (randomized data of monthly hedge fund returns)
data("hfdata")
rets = hfdata[,1:10]
## Run alpha screening
ctr = list(nCore = 1)
alphaScreening(rets, control = ctr)
## Run alpha screening with HAC standard deviation
ctr = list(nCore = 1, hac = TRUE)
alphaScreening(rets, control = ctr)
|
8734652a292b7da26e0f9e7fc60f6de2cc7cf06a
|
2eafc112ae88a8a790ad585e8458718fe581ce78
|
/type.R
|
daee9c4120cc80fa47d4c432b0d23865b2119bba
|
[
"MIT"
] |
permissive
|
PRL-PRG/sle22-signatr-artifact
|
c5047d8ed2f01f6939cb767984a184aedeb65bc2
|
88982d670ac1c746b142fd2435acd1a0eeb7ea15
|
refs/heads/master
| 2023-04-15T03:15:31.667611
| 2022-11-22T14:53:05
| 2022-11-22T14:53:05
| 548,820,754
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 379
|
r
|
type.R
|
#!/usr/bin/env Rscript
traces_file <- commandArgs(trailingOnly = TRUE)[1]
fun_name <- basename(traces_file)
types <- signatr::traces_type(traces_file, signatr:::type_system_tastr, "data/cran_db")
types <- types[[fun_name]]
types <- subset(types, select=c(fun_name, id, signature))
if (length(types) > 0) {
qs::qsave(types, file.path("data/types", basename(traces_file)))
}
|
7c02214db533b23d9ea13cca66b856f68b1a5540
|
14305a42ea3fbd2791399aa7f100f2c1a935cf21
|
/combineAseReadCountFiles.r
|
865af81f2812fd009b2495d203ecdd6be8a699a1
|
[
"MIT"
] |
permissive
|
baoqingding/verta_jones_elife_2019
|
58fc55a0b21b27a4392daf444f48d63858f07f40
|
87a1bb2685260856c18bdff05bd5da608767f4cf
|
refs/heads/master
| 2023-03-19T20:05:07.027372
| 2019-05-24T11:56:53
| 2019-05-24T11:56:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,060
|
r
|
combineAseReadCountFiles.r
|
# load bed files
# define a unique set of positions
# parse individual bed file into a dataframe of all unique positions
# select lines where the is no NA's
inds = c('c172_F1_20_F', 'c172_F1_20_M', 'c172_F1_04_F', 'c172_F1_04_M', 'c172_F1_10_F', 'c172_F1_10_M', 'c172_F1_13_F', 'c172_F1_13_M', 'c172_F1_01_F', 'c172_F1_01_M', 'c172_F1_05_F', 'c172_F1_05_M','c172_P_532_F_lane3', 'c172_P_533_M_lane3', 'c172_P_532_F_lane8', 'c172_P_533_M_lane8')
cov = list()
pos = c()
for (i in inds){
print(i)
cov[[i]] = read.table(paste('.../SNPs/',i,'/STAR/UCSCgtf/genomicMask/',i,'_aseReadCounter_infSites_duprem.txt',sep=''),header=T,sep='\t') # use with parents!
pos = append(pos,paste(cov[[i]][,1],cov[[i]][,2],sep=':'))
}
#unique positions - rows in data.frame
uPos = unique(pos)
library(data.table)
#merge all coverage files letting NA's fill unmatched positions - use DATA TABLE !!!
Cov = data.frame(pos=uPos)
rownames(Cov) = Cov[,'pos']
Cov = setDT(Cov)
for (i in names(cov)){
bed = data.table(cov[[i]])
bed[,'pos'] = paste(cov[[i]][,1],cov[[i]][,2],sep=':')
colnames(bed)[6] = paste(i,'REFCOV',sep='_')
colnames(bed)[7] = paste(i,'ALTCOV',sep='_')
Cov = merge(Cov,bed[,c(paste(i,'REFCOV',sep='_'),paste(i,'ALTCOV',sep='_'),'pos'),with=F],all=T,by='pos')
}
# check that parents are indeed fully informative
hist(abs(Cov$c172_P_532_F_lane8_REFCOV-Cov$c172_P_532_F_lane8_ALTCOV)/(Cov$c172_P_532_F_lane8_REFCOV+Cov$c172_P_532_F_lane8_ALTCOV))
hist(abs(Cov$c172_P_532_F_lane3_REFCOV-Cov$c172_P_532_F_lane3_ALTCOV)/(Cov$c172_P_532_F_lane3_REFCOV+Cov$c172_P_532_F_lane3_ALTCOV))
# filter out SNPs with heterozygous counts in the parents
hetSNPs = which(abs(Cov$c172_P_532_F_lane3_REFCOV-Cov$c172_P_532_F_lane3_ALTCOV)/(Cov$c172_P_532_F_lane3_REFCOV+Cov$c172_P_532_F_lane3_ALTCOV)<0.99 |
abs(Cov$c172_P_532_F_lane8_REFCOV-Cov$c172_P_532_F_lane8_ALTCOV)/(Cov$c172_P_532_F_lane8_REFCOV+Cov$c172_P_532_F_lane8_ALTCOV)<0.99 |
abs(Cov$c172_P_533_M_lane3_REFCOV-Cov$c172_P_533_M_lane3_ALTCOV)/(Cov$c172_P_533_M_lane3_REFCOV+Cov$c172_P_533_M_lane3_ALTCOV)<0.99 |
abs(Cov$c172_P_533_M_lane8_REFCOV-Cov$c172_P_533_M_lane8_ALTCOV)/(Cov$c172_P_533_M_lane8_REFCOV+Cov$c172_P_533_M_lane8_ALTCOV)<0.99)
Cov = Cov[!hetSNPs]
# remove parent columns if necessary
#Cov = Cov[,1:25]
# save table
write.table(Cov,'aseReadCounts_c172_PallF1_infSites_STAR_duprem.txt')
#merge all coverage files letting NA's fill unmatched positions - use DATA TABLE !!!
Cov = data.frame(pos=uPos)
rownames(Cov) = Cov[,'pos']
Cov = setDT(Cov)
for (i in names(cov)){
bed = data.table(cov[[i]])
bed[,'pos'] = paste(cov[[i]][,1],cov[[i]][,2],sep=':')
colnames(bed)[6] = paste(i,'REFCOV',sep='_')
colnames(bed)[7] = paste(i,'ALTCOV',sep='_')
colnames(bed)[8] = paste(i,'TOTCOV',sep='_')
Cov = merge(Cov,bed[,c(paste(i,'TOTCOV',sep='_'),'pos'),with=F],all=T,by='pos')
}
Cov = Cov[!hetSNPs]
# remove parent columns if necessary
#Cov = Cov[,1:13]
write.table(Cov,'aseReadCounts_c172_PallF1_infSites_totalCov_STAR_duprem.txt')
|
27bec4bc7f3435b77a8414443015aabc4965513d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/glm.predict/examples/basepredict.Rd.R
|
76a13b7d35d412c13d9088c5e3ddd878b1deeff3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 370
|
r
|
basepredict.Rd.R
|
library(glm.predict)
### Name: basepredict
### Title: predicted value
### Aliases: basepredict
### Keywords: models
### ** Examples
model1 = glm(Sex ~ Height + Smoke + Pulse, data=MASS::survey, family=binomial(link=logit))
summary(model1)
# comparing a person with the height 150cm to 151cm
basepredict(model1, c(1,150,1,0,0,mean(MASS::survey$Pulse,na.rm=TRUE)))
|
cb951a789414bda79b12186bbf100a909405013d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/POUMM/examples/plot.summary.POUMM.Rd.R
|
fe54d162bcc70febe9bd5f5da5382be3bb37616e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,022
|
r
|
plot.summary.POUMM.Rd.R
|
library(POUMM)
### Name: plot.summary.POUMM
### Title: Plot a summary of a POUMM fit
### Aliases: plot.summary.POUMM
### ** Examples
## Not run:
##D library(POUMM)
##D
##D set.seed(1)
##D
##D N <- 1000
##D
##D # create a random non-ultrametric tree of N tips
##D tree <- ape::rtree(N)
##D
##D # Simulate the evolution of a trait along the tree
##D z <- rVNodesGivenTreePOUMM(
##D tree, g0 = 8, alpha = 1, theta = 4, sigma = 1.2, sigmae = .8)
##D
##D fit <- POUMM(z[1:N], tree, spec = list(nSamplesMCMC = 4e5))
##D
##D # Summarize the results from the fit in a table:
##D summary(fit)
##D
##D # Create plots for some of the inferred parameters/statistics:
##D pl <- plot(fit, stat = c("alpha", "theta", "sigma", "sigmae", "H2tMean"),
##D doZoomIn = TRUE,
##D zoomInFilter = paste("!(stat %in% c('alpha', 'sigma', 'sigmae')) |",
##D "(value >= 0 & value <= 8)"),
##D doPlot = FALSE)
##D
##D pl$traceplot
##D pl$densplot
## End(Not run)
|
900dbc6d819d4d4b9704aed852af99508ff66e90
|
b50a1fa9d4c855c648709c6fe75c83d2ca5851cb
|
/R/learner.R
|
bae5c4531adb221f0d067f29d09a68d4a5239592
|
[] |
no_license
|
cran/boost
|
852ebe4469654e8af1adb2e10c4a944a42c78ef8
|
a6c8d51ba9ddb7b07b635ae1cc2bbc53b3c7ce13
|
refs/heads/master
| 2021-01-10T21:31:25.928552
| 2004-12-09T00:00:00
| 2004-12-09T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,447
|
r
|
learner.R
|
learner <- function(y, w, xlearn, xtest, method, args, bag)
{
## Definitions
learn <- dim(xlearn)[1]
test <- dim(xtest)[1]
blearn <- matrix(0, bag, learn)
btest <- matrix(0, bag, test)
## Currently only stumps as learners are supported, no choice of args!!!
cntrl <- rpart.control(maxdepth = 1, minsplit = learn-1, #minbucket = 1,
maxsurrogate = 0, usesurrogate=0, maxcompete = 1,
cp = 0, xval = 0)
## Bagging stumps/trees
if (bag==1)
{
bx <- xlearn
fit <- rpart(y~bx, weights = w/mean(w), control = cntrl)
bx <- xtest
blearn[1,] <- predict(fit)
btest[1,] <- predict(fit, newdata = data.frame(bx))
}
if (bag>1)
{
for (b in 1:bag)
{
indices <- sample(1:learn, learn, replace = TRUE)
by <- y[indices]
bw <- w[indices]
bx <- xlearn[indices,]
fit <- rpart(by~bx, weights=bw/mean(bw), control=cntrl)
bx <- xlearn
blearn[b,] <- predict(fit, newdata = data.frame(bx))
bx <- xtest
btest[b,] <- predict(fit, newdata = data.frame(bx))
}
}
## Output
list(learn = apply(blearn, 2, mean), test = apply(btest, 2, mean))
}
|
4d520c99b2e20bdccaacb9091a2360c7743a0653
|
6199b3da924058b8bc2ec245ab815feb25e5aac1
|
/R/execute_sort_read_pairs_from_stacks.R
|
3384865a20d94801268a105f7106b7ef0844c097
|
[] |
no_license
|
abshah/RADseqR
|
f3611803aaf07fbfd247d7aa49f89eb3317cc49e
|
629d31177d77a933c563f61a769172a9410b7d51
|
refs/heads/master
| 2020-12-31T02:14:11.459982
| 2016-04-11T22:41:11
| 2016-04-11T22:41:11
| 37,906,098
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 806
|
r
|
execute_sort_read_pairs_from_stacks.R
|
#' Execute sort read pairs from stacks
#' @param stacks_output_files output files from stacks. default ="stacks/"
#' @param sample_files sample files directory. default="samples/"
#' @param whitelist_file which whitelist to use. default="whitelist.txt"
#' @param where to store the output files. default="paired/"
#' @export
#### Execute Sort Read Pairs from Stacks #####
execute_sort_read_pairs_from_stacks<-function(stacks_output_files="stacks/",sample_files="samples/",whitelist_file="whitelist.txt",output_files="paired/")
{
if(dir.exists(output_files)==FALSE){dir.create(output_files)}
sort_read_pairs<-Sys.which("sort_read_pairs.pl")
system2(command=sort_read_pairs, args = c(" -p ", stacks_output_files," -s ",sample_files," -o ",output_files," -w ",whitelist_file))
return(NULL)
}
|
460d6f3957fe5302039150b3f0bf3a5cdc10c047
|
8f2b6b2cd7876713e26794f84f5d41e1f01e2683
|
/man/get_sample_size.Rd
|
4118fc4f50931e1e4d73d01733eeb373abad0ff9
|
[
"MIT"
] |
permissive
|
Shicheng-Guo/catalogueR
|
909a3bf27a71a06a89f0cebbaec4c5bfdba98bd5
|
bdaf36272f54077f24de52c0b7b93851ed8bcd3e
|
refs/heads/master
| 2023-03-27T12:25:06.437278
| 2021-03-29T11:22:37
| 2021-03-29T11:22:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 478
|
rd
|
get_sample_size.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{get_sample_size}
\alias{get_sample_size}
\title{Infer (effective) sample size from summary stats}
\usage{
get_sample_size(subset_DT, sample_size = NULL, effective_ss = T, verbose = T)
}
\description{
Infer (effective) sample size from summary stats
}
\examples{
data("BST1")
BST1 <- finemap_DT
subset_DT <- get_sample_size(subset_DT = finemap_DT)
}
\concept{general}
\keyword{internal}
|
5d2f09547345db6c14f258df9f550edf7ae16eb6
|
b88a9c576e7d59abfb3b0bd693d3e96ba54d156a
|
/Selection-genome-variable_v2.R
|
58b5f07d3e201de45c7c2c58db1ff1aa280744ae
|
[] |
no_license
|
SlimEKDev/Ececorum
|
dc9ec5f005562a1c43674858e4e100978cf14409
|
64f059d7fbb39a7e462e6e255e9b9ae887eb6466
|
refs/heads/master
| 2021-01-22T18:08:09.785789
| 2017-03-15T13:57:19
| 2017-03-15T13:57:19
| 85,061,398
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,793
|
r
|
Selection-genome-variable_v2.R
|
setwd("~/Ececorum")
# Chargement Comparaison Pathway E. cecorum
GenEcec <- read.csv(file = "Ececorum_all_PATRIC_pathways.csv", header = TRUE, sep = "," , fill = TRUE)
summary(GenEcec)
# Sélection Pathway si retrouvé 16 fois (soit tous les génomes sélectionnés) = core-pathwome de E. cecorum
CoreGenEcec <- GenEcec[ GenEcec[,5] == 16, c(1,2,5,8) ]
summary(CoreGenEcec)
# Chargement Comparaison Pathway pour E. spp
GenEspp <- read.csv(file = "Espp_41-gnm-cplt_PATRIC_pathways.csv", header = TRUE, sep = "," , fill = TRUE)
summary(GenEspp)
# Sélection Pathway si retrouvé 41 fois (soit tous les génomes sélectionnés) = core-pathwome de E. spp
CoreGenEspp <- GenEspp[ GenEspp[,5] == 41, c(1,2,5,8) ]
summary(CoreGenEspp)
# Sélection des pathways uniques à E. cecorum en éliminant les redondances entre les 2 tables
library("dplyr", lib.loc="/usr/local/public/R-3.2.0/lib64/R/library")
GenSpeEcec <- anti_join(CoreGenEcec,CoreGenEspp, by = "Pathway.ID")
# Sauvegarde de la table en fichier .csv
write.csv(GenSpeEcec, file = "Genome-specifique_E-cecorum2.csv")
# Chargement Comparaison Pathway pour E. spp = pan-pathwome de E. spp, E. cecorum exclu
GenEsppmEceco <- read.csv(file = "Espp_41-gnm-cplt_mEceco_PATRIC_pathways.csv", header = TRUE, sep = "," , fill = TRUE)
summary(GenEsppmEceco)
# Sélection colonnes 1,2,5,8 dans la table pan-pathwome de E. spp, E. cecorum exclu
PanGenEsppmEceco <- GenEsppmEceco[ , c(1,2,5,8) ]
summary(PanGenEsppmEceco)
# Prise en compte uniquement des gènes variables
VarGenEsppmEceco <- GenEsppmEceco[ GenEsppmEceco[,5] < 35, c(1,2,5,8) ]
summary(VarGenEsppmEceco)
CommonGen <- semi_join(VarGenEsppmEceco, CoreGenEcec, by="Pathway.ID")
# Ecriture résultat dans un fichier .csv
write.csv(CommonGen, file = "NB-occ_Pathwome-unique_E-cecorum2.csv")
|
02f50cce30034e911bfdb9da06e4818329299fff
|
d8b9a7ecd42d91c5bfac7bcb329aa618a8a849ca
|
/CRISPRScreenPlots/SyntheticLethality_GenesOfInterest.R
|
c70eb6b0c01d93a8459adaa7f0efca17497df482
|
[] |
no_license
|
MFMdeRooij/CRISPRscreen
|
3e77af211b0008591b68e90febb11711dfeff3d0
|
27e4d755d350d9c00b5a5fa928749b8cb6cbee75
|
refs/heads/master
| 2023-08-30T13:56:37.186502
| 2023-08-22T13:09:36
| 2023-08-22T13:09:36
| 226,073,806
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,784
|
r
|
SyntheticLethality_GenesOfInterest.R
|
# Use the CRISPRScreenAnalysis.R output files of a synthetic lethality screen, adjust the settings, and run the script in R studio.
# This script can normalize the median log2 fold change to the essential and non-essential genes of a synthetic lethality screen, and plots T1control/T0 against T1treated/T0.
# This normalization can improve the comparison treated - control if the treated arm did not have equal cell divisions, however the separation between the essentials and
# non-essentials will not be improved. Synthetic lethal genes will be located around the lower half of the vertical 0 axis.
# Author: M.F.M. de Rooij PhD, Amsterdam UMC, Spaargaren Lab, 2023, info: m.f.derooij@amsterdamumc.nl
##################################################################################################################################
## Install required pacakges once
#install.packages("devtools)
#devtools::install_github("JosephCrispell/basicPlotteR")
library("basicPlotteR")
##################################################################################################################################
# SETTINGS
# Put this script in the folder where the count tables are located
folder<- dirname(rstudioapi::getActiveDocumentContext()$path)
## Fill in workdirectory (folder in which the count tables are located, use always slash (/) instead of backslash)
#folder<- "H:/BioWin/CRISPRscreen/Namalwa/"
# Cell line:
cellID<- "Namalwa"
# Is there a T1drug/T1control comparison, 0: no, 1: yes
t2t1com<- 1
# Size graph
size<- 7
# Genes to emphasize, 0: all significant (from T1drug/T1control comparison), 1: specific genes
allsignif<- 0
GenesOfInterest<- NULL
if (allsignif==1){
# If specific genes, Which ones?
GenesOfInterest<- c("BTK", "SYK", "PIK3R1")
}
# Show all gene symbols, 0: no, 1: yes
GeneSymbol<- 0
# Axes labels:
xlab<- "Control (Relative log2 median fold change)"
ylab<- "Ibrutinib (Relative log2 median fold change)"
# # BCR-controlled adhesion screens:
# xlab<- "PMA (Log2 median fold change)"
# ylab<- expression(alpha*"IgM (Log2 median fold change)")
# Normalize to essential and non-essentials (only for lethality), 0: no, 1: yes
NormalizeX<- 1
NormalizeY<- 1
#Axes limit, 0 = automatic, 1: custom
Axlim<- 0
# If automatic, Equal X and Y axes, 0 = no, 1: yes
XYequal<- 1
if (Axlim==1){
# Custom axes limits:
xmin<- -0.8
xmax<- 0.4
xticks<- 0.2
ymin<- -0.8
ymax<- 0.4
yticks<- 0.2
}
# Colors:
call<- 'lightgray'
cpos<- 'red'
cneg<- 'blue'
chit<- 'black'
##################################################################################################################################
setwd(folder)
Control<- read.csv("DESeq2 T0vsT1 Genes.csv", stringsAsFactors=F)
if (NormalizeX == 0){
Control$Nmfc<- log2(Control$MedianFoldChange)
}
if (NormalizeX == 1){
medianCP<- log2(median(Control$MedianFoldChange[Control$Type=='p']))
medianCN<- log2(median(Control$MedianFoldChange[Control$Type=='n']))
Control$Nmfc<- (log2(Control$MedianFoldChange)-medianCN)/abs(medianCP-medianCN)
}
ControlG<- Control[,c("GeneSymbol","Type","Nmfc")]
Treated <- read.csv("DESeq2 T0vsT2 Genes.csv", stringsAsFactors=F)
if (NormalizeY == 0){
Treated$Nmfc<- log2(Treated$MedianFoldChange)
}
if (NormalizeY == 1){
medianTP<- log2(median(Treated$MedianFoldChange[Treated$Type=='p']))
medianTN<- log2(median(Treated$MedianFoldChange[Treated$Type=='n']))
Treated$Nmfc<- (log2(Treated$MedianFoldChange)-medianTN)/abs(medianTP-medianTN)
}
TreatedG<- Treated[,c("GeneSymbol","Nmfc")]
if (t2t1com==1){
TC<- read.csv("DESeq2 T1vsT2 Genes.csv", stringsAsFactors=F)
TC$Stat<- apply(TC[,c("rhoDepleted","rhoEnriched")], 1, FUN=min)
TC$fdr<- apply(TC[,c("fdrDepleted","fdrEnriched")], 1, FUN=min)
TCG<- TC[,c("GeneSymbol","Stat", "fdr")]
if (allsignif==0){
GenesOfInterest<- TCG$GeneSymbol[TCG$fdr<0.1]
}
}
Combi<- merge(ControlG, TreatedG, by="GeneSymbol")
if (t2t1com==1){
Combi<- merge(Combi, TCG, by="GeneSymbol")
}
pos<- Combi[Combi$Type=="p",]
neg<- Combi[Combi$Type=="n",]
hit<- Combi[Combi$GeneSymbol %in% GenesOfInterest,]
if (Axlim==0){
# Calculate axis limits:
xmin<- round(min(Combi$Nmfc.x),2)-0.3
xmax<- round(max(Combi$Nmfc.x),2)+0.3
ymin<- round(min(Combi$Nmfc.y),2)-0.3
ymax<- round(max(Combi$Nmfc.y),2)+0.3
if (XYequal==1){
xmin<- min(xmin,ymin)
ymin<- min(xmin,ymin)
xmax<- max(xmax,ymax)
ymax<- max(xmax,ymax)
}
xticks<- round((xmax-xmin)/5.1,2)
yticks<- round((ymax-ymin)/5.1,2)
}
pdf(paste0("CRISPR_SL_",cellID,"_R.pdf"),size,size)
par(mar=c(4,4,0,0))
par(fig=c(0.1,0.7,0.1,0.7))
plot(Combi$Nmfc.x, Combi$Nmfc.y, xlab=xlab, ylab=ylab, cex.lab=1, cex.axis=1,
xlim=c(xmin,xmax), ylim=c(ymin,ymax),
xaxp=c(xmin,xmin+xticks*floor((xmax-xmin)/xticks),floor((xmax-xmin)/xticks)),
yaxp=c(ymin,ymin+yticks*floor((ymax-ymin)/yticks),floor((ymax-ymin)/yticks)),
pch=16, col=call, cex=if(t2t1com==1){0.3+-0.1*log10(Combi$Stat)}else{0.5})
if (GeneSymbol == 1){
text(Combi$Nmfc.x, Combi$Nmfc.y, labels=Combi$GeneSymbol, cex=0.8, col="gray", adj = c(-0.2,0.5), srt=22.5)
}
points(pos$Nmfc.x, pos$Nmfc.y, pch=16, col=cpos, cex=if(t2t1com==1){0.3+-0.1*log10(pos$Stat)}else{0.5})
points(neg$Nmfc.x, neg$Nmfc.y, pch=16, col=cneg, cex=if(t2t1com==1){0.3+-0.1*log10(neg$Stat)}else{0.5})
if (length(GenesOfInterest)>0){
points(hit$Nmfc.x, hit$Nmfc.y, pch=16, col=chit, cex=if(t2t1com==1){0.3+-0.1*log10(hit$Stat)}else{0.5})
addTextLabels(hit$Nmfc.x, hit$Nmfc.y, hit$GeneSymbol, avoidPoints = TRUE,
keepLabelsInside = TRUE, col.label="black", cex.label=1)
}
abline(v=0, col=cneg, lty=3)
abline(h=0, col=cneg, lty=3)
if (NormalizeX == 1){
abline(v=-1, col=cpos, lty=3)
}
if (NormalizeY == 1){
abline(h=-1, col=cpos, lty=3)
}
abline(0,1, col="black", lty=2)
legend(xmin,ymax,legend=c("All genes", "Essentials","Non-essentials"), pch=16, cex=0.8, col=c(call,cpos,cneg))
# Density y axis
denY_PC<- density(pos$Nmfc.y, from=ymin, to=ymax, na.rm=TRUE)
denY_PC$y[1]<- 0
denY_PC$y[length(denY_PC$y)]<- 0
denY_NC<- density(neg$Nmfc.y, from=ymin, to=ymax, na.rm=TRUE)
denY_NC$y[1]<- 0
denY_NC$y[length(denY_NC$y)]<- 0
denYMax <- max(c(denY_PC$y, denY_NC$y))
par(mar=c(4,0,0,4))
par(fig=c(0.7,0.9,0.1,0.7),new=TRUE)
plot(denY_PC$y, denY_PC$x, ylim=c(ymin,ymax), xlim=(c(0,denYMax)), type='l', axes=FALSE, col=2, xlab="", ylab="", lwd=2)
lines(denY_NC$y, denY_NC$x, col=4, lwd=2)
rgb.val<- col2rgb(cpos)
polygon(denY_PC$y, denY_PC$x, col=rgb(rgb.val[1]/255,rgb.val[2]/255,rgb.val[3]/255,alpha=0.3), lwd=0.1)
rgb.val<- col2rgb(cneg)
polygon(denY_NC$y, denY_NC$x, col=rgb(rgb.val[1]/255,rgb.val[2]/255,rgb.val[3]/255,alpha=0.3), lwd=0.1)
# Density x axis
denX_PC<- density(pos$Nmfc.x, from=xmin, to=xmax, na.rm=TRUE)
denX_PC$y[1]<- 0
denX_PC$y[length(denX_PC$y)]<- 0
denX_NC<- density(neg$Nmfc.x, from=xmin, to=xmax, na.rm=TRUE)
denX_NC$y[1]<- 0
denX_NC$y[length(denX_NC$y)]<- 0
denXMax <- max(c(denX_PC$y, denX_NC$y))
par(mar=c(0,4,4,0))
par(fig=c(0.1,0.7,0.7,0.9),new=TRUE)
plot(denX_PC$x, denX_PC$y, xlim=c(xmin,xmax), ylim=c(0,denXMax), type='l', axes=FALSE, col=2, xlab="", ylab="", lwd=2, main=cellID)
lines(denX_NC$x, denX_NC$y, col=4, lwd=2)
rgb.val<- col2rgb(cpos)
polygon(denX_PC, col=rgb(rgb.val[1]/255,rgb.val[2]/255,rgb.val[3]/255,alpha=0.3), lwd=0.1)
rgb.val<- col2rgb(cneg)
polygon(denX_NC, col=rgb(rgb.val[1]/255,rgb.val[2]/255,rgb.val[3]/255,alpha=0.3), lwd=0.1)
dev.off()
|
c066419b80bcc70d63b97106f7a668bc0f677478
|
8a736317e9732b939803d041f2448c125ff49e5f
|
/man/Input_output.Rd
|
b91d4ce07c15d4140855c668f1e761f16fcdc5ee
|
[] |
no_license
|
mbojan/isnar
|
f753c9d6a6c2623e7725c2f03035c1f9cc89ba85
|
56177701d509b267eff845e15514e4cf48e69675
|
refs/heads/master
| 2021-06-10T15:32:48.345605
| 2021-02-17T20:00:40
| 2021-02-17T20:00:40
| 19,937,772
| 8
| 3
| null | 2015-04-03T12:41:31
| 2014-05-19T10:28:23
|
R
|
UTF-8
|
R
| false
| true
| 384
|
rd
|
Input_output.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Input_output.R
\docType{data}
\name{Input_output}
\alias{Input_output}
\title{Input-output table for U.S. economy}
\format{22-by-22 numeric matrix with dimnames.}
\source{
TODO
}
\description{
Based on data from TODO
}
\references{
TODO: Bojanowski () "Industrial structure and interfirm collaboration".
}
|
4dc37ebd19ae2700f3d05bee1088d5f8006215c1
|
9f5ccb4b451aa7c7e91f21942e828c237d5e8c0f
|
/cachematrix.R
|
de48a247339e1ce93f4626635ff1e5e576e5c95c
|
[] |
no_license
|
jdpacheco/ProgrammingAssignment2
|
70b0de7f306833861c103be2d070654579cd9e89
|
8156b69fc92708187bec42f3389c81bd48eebfe5
|
refs/heads/master
| 2021-01-20T09:36:14.221357
| 2015-04-26T06:06:20
| 2015-04-26T06:06:20
| 34,598,817
| 0
| 0
| null | 2015-04-26T05:11:46
| 2015-04-26T05:11:46
| null |
UTF-8
|
R
| false
| false
| 888
|
r
|
cachematrix.R
|
## Due to the costly nature of the 'solve' function, and finding
## inverses of Matrices in general, to save some cycles, I have created
## a matrix which can cache its solve result
## This is the new "data structure", which is the matrix with a memory cache
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- null
}
get <- function() x
setSolve <- function(solve) i <<- solve
getSolve <- function() i
list (set = set, get = get, setSolve = setSolve, getSolve = getSolve)
}
## This will check to see if the solve has been stored, returning that if so, calculating new otherwise.
cacheSolve <- function(x, ...) {
i <- x$getSolve()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setSolve(i)
i
}
|
6f787743ec33636b5ab97a8e4fd46e74578314d4
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/AllPossibleSpellings/R/batch.possSpells.fnc.R
|
157f1ec9656f4c6475efb79b3e786804068cd17b
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 382
|
r
|
batch.possSpells.fnc.R
|
batch.possSpells.fnc <-
function(fn=list.files(pattern=".*\\.rda")){
for(file in fn){
sink(file=paste(gsub("(.*)\\.rda","\\1",file),"_log.txt",sep=""),split=TRUE)
cat("loading file",file,"\n")
load(file)
possible.spellings=possSpells.fnc(words=words)
write(possible.spellings,file=paste(gsub("(.*)\\.rda","\\1",file),".txt",sep=""))
sink(file=NULL)
}
}
|
abc07ea37d4260812c6f567260c8315fb16cc7bb
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/SciencesPo/R/stratified.R
|
5b5a423c94cc348b29a3c731beae1c86970e2cb7
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,121
|
r
|
stratified.R
|
#' @encoding UTF-8
#' @title Stratified Sampling
#'
#' @description A handy function for sampling row values of a data.frame conditional to some strata.
#'
#' @param .data The data.frame from which the sample is desired.
#' @param group The grouping factor, may be a list.
#' @param size The sample size.
#' @param select If sampling from a specific group or list of groups.
#' @param replace Should sampling be with replacement?
#' @param both.sets If TRUE, both `sample` and `.data` are returned.
#'
#' @keywords Manipulation
#'
#' @examples
#' # Generate a couple of sample data.frames to play with
#'
#' set.seed(51)
#' dat1 <- data.frame(ID = 1:100, A = sample(c("AA", "BB", "CC", "DD", "EE"),
#' 100, replace = TRUE), B = rnorm(100), C = abs(round(rnorm(100), digits = 1)),
#' D = sample(c("CA", "NY", "TX"), 100, replace = TRUE), E = sample(c("M","F"),
#' 100, replace = TRUE))
#'
#' # Let's take a 10% sample from all -A- groups in dat1
#' stratified(dat1, "A", 0.1)
#'
#' # Let's take a 10% sample from only 'AA' and 'BB' groups from -A- in dat1
#' stratified(dat1, "A", 0.1, select = list(A = c("AA", "BB")))
#'
#' # Let's take 5 samples from all -D- groups in dat1, specified by column
#' stratified(dat1, group = 5, size = 5)
#'
#' # Let's take a sample from all -A- groups in dat1, where we specify the
#' # number wanted from each group
#' stratified(dat1, "A", size = c(3, 5, 4, 5, 2))
#'
#' # Use a two-column strata (-E- and -D-) but only interested in cases where
#' # -E- == 'M'
#' stratified(dat1, c("E", "D"), 0.15, select = list(E = "M"))
#'
#' @export
`stratified` <- function(.data, group, size, select = NULL,
replace = FALSE, both.sets = FALSE) {
if (is.null(select)) {
.data <- .data
} else {
if (is.null(names(select))) stop("'select' must be a named list")
if (!all(names(select) %in% names(.data)))
stop("Please verify your 'select' argument")
temp <- sapply(names(select),
function(x) .data[[x]] %in% select[[x]])
.data <- .data[rowSums(temp) == length(select), ]
}
.data.interaction <- interaction(.data[group], drop = TRUE)
.data.table <- table(.data.interaction)
.data.split <- split(.data, .data.interaction)
if (length(size) > 1) {
if (length(size) != length(.data.split))
stop("Number of groups is ", length(.data.split),
" but number of sizes supplied is ", length(size))
if (is.null(names(size))) {
n <- stats::setNames(size, names(.data.split))
message(sQuote("size"), " vector entered as:\n\nsize = structure(c(",
paste(n, collapse = ", "), "),\n.Names = c(",
paste(shQuote(names(n)), collapse = ", "), ")) \n\n")
} else {
ifelse(all(names(size) %in% names(.data.split)),
n <- size[names(.data.split)],
stop("Named vector supplied with names ",
paste(names(size), collapse = ", "),
"\n but the names for the group levels are ",
paste(names(.data.split), collapse = ", ")))
}
} else if (size < 1) {
n <- round(.data.table * size, digits = 0)
} else if (size >= 1) {
if (all(.data.table >= size) || isTRUE(replace)) {
n <- stats::setNames(rep(size, length.out = length(.data.split)),
names(.data.split))
} else {
message(
"Some groups\n---",
paste(names(.data.table[.data.table < size]), collapse = ", "),
"---\ncontain fewer observations",
" than desired number of samples.\n",
"All observations have been returned from those groups.")
n <- c(sapply(.data.table[.data.table >= size], function(x) x = size),
.data.table[.data.table < size])
}
}
temp <- lapply(
names(.data.split),
function(x) .data.split[[x]][sample(.data.table[x],
n[x], replace = replace), ])
set1 <- do.call("rbind", temp)
if (isTRUE(both.sets)) {
set2 <- .data[!rownames(.data) %in% rownames(set1), ]
list(SET1 = set1, SET2 = set2)
} else {
set1
}
}### end -- stratified function
NULL
|
218b00d9696a76627dcad60b75ca4cfd2a827e38
|
dc98c78d24a63b9d6420b3883a2d3d427c74b292
|
/man/box.scale.Rd
|
e07d265d69ade24de572cb8a170b3a04eae40e3c
|
[] |
no_license
|
vjcitn/parody2
|
1af69344a19ba841233cb2ddaf5efda1353190bb
|
190ccc2306197ca4ddd7eea13cda91ffd93c66bd
|
refs/heads/main
| 2023-02-19T11:12:29.859406
| 2021-01-23T10:21:28
| 2021-01-23T10:21:28
| 330,368,610
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 835
|
rd
|
box.scale.Rd
|
\name{box.scale}
% [1] "al" "box.scale" "calout.detect" "ckesd"
% [5] "gesdri" "hamp.scale.3" "hamp.scale.4" "hampor"
% [9] "hampoutinds" "lamtab" "logit" "prompt.default"
%[13] "rouor" "rououtinds" "shorth" "shorth.scale"
%[17] "skesd" "tukeyor" "tukeyorinds"
%
\alias{box.scale}
\alias{hamp.scale.3}
\alias{hamp.scale.4}
\alias{shorth.scale}
\title{ calibrated scaling inlier multiplier radius for various outlier detection approaches }
\description{ calibrated scaling inlier multiplier radius for various outlier detection approaches }
\usage{
box.scale(n, alpha=0.05)
}
\arguments{
\item{n}{ n}
\item{alpha}{ alpha}
}
\author{Vince Carey <stvjc@channing.harvard.edu>}
\examples{
box.scale(20)
}
\keyword{ models }
|
c3ba2c49e4f713e4e7ceafaa94d4f0c8233fbc04
|
8d9cc3035e8daf324a5a29f1908a1b33cc56938f
|
/scripts/01_data.R
|
339b76223fa7c1d136501efcf5e81e3c36901d4f
|
[] |
no_license
|
JClingo/data-science-methods-final
|
5973bad51224c4c548086906a1ebe988dc1a14f3
|
ebd0ebe00275976b8c0544548fdd08ba8edc0687
|
refs/heads/main
| 2023-01-30T02:06:47.556128
| 2020-12-18T00:37:42
| 2020-12-18T00:37:42
| 316,864,205
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,159
|
r
|
01_data.R
|
#' ---
#' title: Data processing of 'An exploration of cross-cultural research on bias against atheists'
#' author: "Joshua Clingo"
#' email: "jclingo@ucmerced.edu"
#'
#' output:
#' rmarkdown::html_document:
#' toc: false
#' code_folding: "hide"
#' ---
## This script cleans the data and stores it for later analysis
#+ setup
library(tidyverse)
data_dir = file.path('..', 'data')
if (!dir.exists(data_dir)) {
dir.create(data_dir)
}
## Load data ----
dataf_raw = read.csv(file.path(data_dir, 'aggregate-data.csv'))
## Strip out incomplete records
#' Note: Error rates tend to vary wildly across countries so any score comparison
#' should either be done for a single country or should normalize first
#' Note: Political data is quite incomplete (more than 1400 records are missing this)
#' We will be excluding it from the analysis for this reason
#' Note: RelID sometimes came through a textual description but this was rare
#' enough that we'll just exclude those with the other malformed rows
#'
dataf = dataf_raw %>%
subset(select = -c(PoliticsC, X)) %>%
filter(Error != 'NA',
BiG100 != 'NA',
GenFem != 'NA',
Age != 'NA',
Education != 'NA',
SES != 'NA',
as.integer(RelID) != 'NA')
dataf$RelID = as.integer(dataf$RelID)
#' Strip out non-standard religious affiliations
dataf = dataf %>%
filter(RelID < 11)
#' Get scaled values for quantities
dataf$BiG_z = scale(dataf$BiG100, scale=T, center=T)[,]
dataf$Age_z = scale(dataf$Age, scale=T, center=T)[,]
dataf$Education_z = scale(dataf$Education, scale=T, center=T)[,]
dataf$SES_z = scale(dataf$SES, scale=T, center=T)[,]
dataf$GenFem_z = scale(dataf$GenFem, scale=T, center=T)[,]
#' Check that we've manually dealt with missing data
#' dataf <- dataf[complete.cases(dataf),]
#' ^ Running this yields the same number of rows -- looks like we're good
#'
#'
## All in all, we've lost a several hundred rows and the results from China (mainland), which used nonstandard religious IDs
summary(dataf)
## Write output ----
saveRDS(dataf, file.path(data_dir, '01_data.Rds'))
|
b524592c2b6f01a34ea7948a1b678473bc77c4da
|
67337094711ea45a7734f825a36951b28f7ab7ba
|
/man/plot_comparison.Rd
|
b2f636f494613fc79f1d5aae71a4f2369e33ef5c
|
[] |
no_license
|
jashu/itrak
|
74ca4046df8862ec16ce037f496b5bb55e603ce8
|
6a57c4bd2f6deba8cefd6786406428e0350b601d
|
refs/heads/master
| 2021-01-18T23:54:58.656427
| 2020-04-30T23:01:01
| 2020-04-30T23:01:01
| 46,810,030
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,248
|
rd
|
plot_comparison.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_comparison.R
\name{plot_comparison}
\alias{plot_comparison}
\title{Plot Comparison of Time Series}
\usage{
plot_comparison(data, time, pre, post, trial = NULL)
}
\arguments{
\item{data}{Data frame containing both time series.}
\item{time}{Name of variable that gives time units.}
\item{pre}{Name of variable containing the pre-processed time series.}
\item{post}{Name of variable containing the post-processed time series.}
\item{trial}{If the data contains multiple time series, name of the variable
that identifies each time series. Will be plotted using \code{facet_wrap}.}
}
\description{
\code{plot_comparison} helps to visualize the transformation of your original
time series into a subsequent, cleaner version.
}
\details{
Use this plotting function to evaluate the performance of the
\code{\link{fix_artifacts}}, \code{\link{normalize}}, and/or
\code{\link{low_pass_filter}} functions on cleaning your time series. Note
that if your original and cleaned time series are at different scales (e.g.,
if you are comparing a pre-normed version to a post-normed version),
the original time series will be projected to the scale of
the cleaned time series.
}
|
b6b3e7d627fe577f6c84eca2c9aadd1acd8cc6ce
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MRMR/examples/CreateEvaluationDates.Rd.R
|
05918b21d0e77efd6f58af4934af35e4e0129cb8
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 546
|
r
|
CreateEvaluationDates.Rd.R
|
library(MRMR)
### Name: CreateEvaluationDates
### Title: Create triangle evaluation dates
### Aliases: CreateEvaluationDates
### ** Examples
## Not run:
##D OriginStart = c(mdy("1/1/2000"), mdy("1/1/2000"), mdy("1/1/2001"))
##D OriginEnd = c(mdy("12/31/2000"), mdy("12/31/2000"), mdy("12/31/2001"))
##D OriginPeriod = CreateOriginPeriods(OriginStart, OriginEnd)
##D DevelopmentLag = c(months(12), months(24), months(12))
##D
##D EvaluationDates = CreateEvaluationDates(OriginPeriod, DevelopmentLag)
##D EvaluationDates
## End(Not run)
|
d28cafd282a89881f3d07eb8fa21ceb4dd582a09
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610556933-test.R
|
aef77b100532824b52904b7b117352af0a6b8213
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 282
|
r
|
1610556933-test.R
|
testlist <- list(data = structure(c(3.34809087500923e-115, 3.33821168147722e+151, 3.94604863549254e-114, 4.6343369826479e+252, 6.69422745814845e+223, 4.86113721284491e-63, 0, 0, 0, 0, 0, 0), .Dim = 3:4), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
a587e3df358ccb51c99b7c4a65d30812ab9ee49e
|
d603c9dfc5a7268c5bfda58327cfb0bfc61e6d74
|
/code/R/test_model.R
|
c946d96db0a4b2a64d6b3d857f27a70e0e69cd79
|
[
"MIT"
] |
permissive
|
sammorris81/extreme-decomp
|
ee60f2913eda7e664324ad4abccfa53c9f1fb334
|
a412a513f2b9cde075ecc8842729cc0f7ec32150
|
refs/heads/master
| 2021-01-20T19:00:10.370167
| 2018-02-22T07:03:36
| 2018-02-22T07:03:36
| 37,218,705
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 178,565
|
r
|
test_model.R
|
rm(list=ls())
library(fields)
library(Rcpp)
library(emulator)
library(microbenchmark)
library(SpatialExtremes)
library(numDeriv)
library(fields)
#### testing beta ####
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
openblas.set.num.threads(3)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 2
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma <- exp(-d / phi)
tau <- rgamma(nt, 0.5, 0.5)
Qb <- chol2inv(chol(Sigma))
Xb <- getXBeta(X = X, beta = beta.t)
if (nt == 1) {
Xb <- matrix(Xb, ns, nt)
}
mu <- matrix(0, ns, nt)
for (t in 1:nt) {
mu[, t] <- Xb[, t] + t(chol(Sigma)) %*% rnorm(ns) / sqrt(tau[t])
}
# initialize values
SS <- diag(quad.form(Qb, mu - Xb))
niters <- 10000
beta.keep <- matrix(0, niters, np)
beta <- rep(0, np)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBeta(beta.sd = 100, Qb = Qb,
param = mu, X = X, SS = SS, tau = tau)
beta <- this.update$beta
Xb <- this.update$Xb
SS <- this.update$SS
beta.keep[iter, ] <- beta
if (iter %% 500 == 0) {
start <- max(1, iter - 2000)
par(mfrow = c(2, np / 2))
for (i in 1:6) {
plot(beta.keep[start:iter, i], type = "l",
main = paste("Beta = ", round(beta.t[i], 3)))
}
}
}
#### testing phi ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
openblas.set.num.threads(3)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 10
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau <- rgamma(nt, 0.5, 0.5)
Qb <- chol2inv(chol(Sigma.t))
Xb <- getXBeta(X = X, beta = beta.t)
if (nt == 1) {
Xb <- matrix(Xb, ns, nt)
}
mu <- matrix(0, ns, nt)
for (t in 1:nt) {
mu[, t] <- Xb[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau[t])
}
# initialize values
phi <- 0.05
Qb <- chol2inv(chol(exp(-d / phi)))
SS <- diag(quad.form(Qb, mu - Xb))
phi <- 0.05
niters <- 10000
phi.keep <- rep(0, niters)
acc.phi <- att.phi <- MH.phi <- 0.1
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBW(bw = phi, bw.min = 0.01, bw.max = 1.2,
bw.mn = 0, bw.sd = 1, Qb = Qb, d = d,
mu = mu, Xb1 = Xb, tau1 = tau, SS1 = SS,
ls = mu, Xb2 = Xb, tau2 = tau, SS2 = SS,
acc = acc.phi, att = att.phi, MH = MH.phi)
phi <- this.update$bw
Qb <- this.update$Qb
SS <- this.update$SS1
acc.phi <- this.update$acc
att.phi <- this.update$att
this.update <- mhUpdate(acc = acc.phi, att = att.phi, MH = MH.phi)
acc.phi <- this.update$acc
att.phi <- this.update$att
MH.phi <- this.update$MH
phi.keep[iter] <- phi
if (iter %% 500 == 0) {
start <- max(1, iter - 2000)
plot(phi.keep[start:iter], type = "l")
}
}
#### testing tau ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
openblas.set.num.threads(3)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 0.5, 0.5)
Qb <- chol2inv(chol(Sigma.t))
Xb <- getXBeta(X = X, beta = beta.t)
if (nt == 1) {
Xb <- matrix(Xb, ns, nt)
}
mu <- matrix(0, ns, nt)
for (t in 1:nt) {
mu[, t] <- Xb[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
SS <- getGPSS(Qb = Qb, param = mu, Xb = Xb)
# initialize values
niters <- 10000
tau <- rep(1, nt)
tau.keep <- matrix(0, niters, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPTau(SS = SS, tau.a = 0.1, tau.b = 0.1,
ns = ns)
tau <- this.update$tau
tau.keep[iter, ] <- tau
if (iter %% 500 == 0) {
start <- max(1, iter - 2000)
par(mfrow = c(4, 3))
for (t in 1:nt) {
plot(tau.keep[start:iter, t], type = "l",
main = paste("tau = ", round(tau.t[t], 3)))
}
}
}
#### testing tau, phi, and beta ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
openblas.set.num.threads(3)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb.t <- getXBeta(X = X, beta = beta.t)
mu <- matrix(0, ns, nt)
for (t in 1:nt) {
mu[, t] <- Xb.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
# initialize values
beta <- rep(0, np)
Xb <- getXBeta(X = X, beta = beta)
tau <- rep(1, nt)
phi <- 0.05
Qb <- chol2inv(chol(exp(-d / phi)))
SS <- getGPSS(Qb = Qb, param = mu, Xb = Xb)
niters <- 2000
burn <- 1500
beta.sd <- 100
beta <- rep(0, np)
beta.keep <- matrix(0, niters, np)
beta.sd.keep <- rep(0, niters)
tau <- rep(1, nt)
tau.keep <- matrix(0, niters, nt)
phi.keep <- rep(0, niters)
acc.phi <- att.phi <- MH.phi <- 0.1
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBeta(beta.sd = beta.sd, Qb = Qb,
param = mu, X = X, SS = SS, tau = tau)
beta <- this.update$beta
Xb <- this.update$Xb
SS <- this.update$SS
beta.keep[iter, ] <- beta
this.update <- updateGPBetaSD(beta = beta, tau.a = 0.1, tau.b = 0.1)
beta.sd <- this.update$beta.sd
beta.sd.keep[iter] <- beta.sd
this.update <- updateGPTau(SS = SS, tau.a = 0.1, tau.b = 0.1,
ns = ns)
tau <- this.update$tau
tau.keep[iter, ] <- tau
this.update <- updateGPBW(bw = phi, bw.min = 0.01, bw.max = 1.2,
bw.mn = 0, bw.sd = 1, Qb = Qb, d = d,
mu = mu, Xb1 = Xb, tau1 = tau, SS1 = SS,
ls = mu, Xb2 = Xb, tau2 = tau, SS2 = SS,
acc = acc.phi, att = att.phi, MH = MH.phi)
phi <- this.update$bw
Qb <- this.update$Qb
SS <- this.update$SS1
acc.phi <- this.update$acc
att.phi <- this.update$att
phi.keep[iter] <- phi
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.phi, att = att.phi, MH = MH.phi)
acc.phi <- this.update$acc
att.phi <- this.update$att
MH.phi <- this.update$MH
}
if (iter %% 100 == 0) {
par(mfrow = c(5, 3))
for (i in 1:np) {
plot(beta.keep[1:iter, i], type = "l",
main = paste("beta: ", round(beta.t[i], 3)))
}
plot(beta.sd.keep[1:iter], type = "l", main = "beta sd")
plot(phi.keep[1:iter], type = "l", main = paste("phi: ", phi.t))
for(i in 1:7) {
plot(tau.keep[1:iter, i], type = "l",
main = paste("tau: ", round(tau.t[i], 3)))
}
}
}
#### testing mu ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
openblas.set.num.threads(3)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb.t <- getXBeta(X = X, beta = beta.t)
mu.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
ls.t <- matrix(0, ns, nt)
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
Sigma <- solve(Qb.t * tau.t[t])
# initialize values
mu <- matrix(mu.t + rnorm(ns * nt), ns, nt)
SS <- getGPSS(Qb = Qb.t, param = mu, Xb = Xb.t)
curll <- matrix(0, ns, nt)
for (t in 1:nt) {
curll[, t] <- dgev(x = y.t[, t], loc = mu[, t], exp(ls.t[, t]), xi.t,
log = TRUE)
}
niters <- 10000
burn <- 8000
mu.keep <- array(0, dim = c(niters, ns, nt))
acc.mu <- att.mu <- MH.mu <- matrix(0.2, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateMuTest(mu = mu, Qb = Qb.t, tau = tau.t, Xb = Xb.t,
y = y.t, ls = ls.t, xi = xi.t,
SS = SS, curll = curll, acc = acc.mu,
att = att.mu, MH = MH.mu)
mu <- this.update$mu
SS <- this.update$SS
curll <- this.update$curll
acc.mu <- this.update$acc
att.mu <- this.update$att
mu.keep[iter, , ] <- mu
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.mu, att = att.mu, MH = MH.mu,
target.min = 0.4, target.max = 0.7,
nattempts = 400)
acc.mu <- this.update$acc
att.mu <- this.update$att
MH.mu <- this.update$MH
}
if (iter %% 500 == 0) {
par(mfrow = c(3, 3))
start <- max(1, iter - 20000)
for (i in 1:3) {
for (j in 1:3) {
plot(mu.keep[start:iter, i, j], type = "l",
main = paste("mu: ", round(mu.t[i, j], 3)),
ylab = round(acc.mu[i, j] / att.mu[i, j], 3),
xlab = MH.mu[i, j])
}
}
}
}
#### testing mu and tau ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
openblas.set.num.threads(3)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb.t <- getXBeta(X = X, beta = beta.t)
mu.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
ls.t <- matrix(0, ns, nt)
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
mu <- matrix(mu.t + rnorm(ns * nt), ns, nt)
SS <- getGPSS(Qb = Qb.t, param = mu, Xb = Xb.t)
curll <- matrix(0, ns, nt)
for (t in 1:nt) {
curll[, t] <- dgev(x = y.t[, t], loc = mu[, t], exp(ls.t[, t]), xi.t,
log = TRUE)
}
niters <- 60000
burn <- 50000
mu.keep <- array(0, dim = c(niters, ns, nt))
tau <- rep(1, nt)
tau.keep <- matrix(0, niters, nt)
acc.mu <- att.mu <- MH.mu <- matrix(0.1, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateMuTest(mu = mu, Qb = Qb.t, tau = tau, Xb = Xb.t,
y = y.t, ls = ls.t, xi = xi.t,
SS = SS, curll = curll, acc = acc.mu,
att = att.mu, MH = MH.mu)
mu <- this.update$mu
SS <- this.update$SS
curll <- this.update$curll
acc.mu <- this.update$acc
att.mu <- this.update$att
mu.keep[iter, , ] <- mu
this.update <- updateGPTau(SS = SS, tau.a = 0.1, tau.b = 0.1,
ns = ns)
tau <- this.update$tau
tau.keep[iter, ] <- tau
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.mu, att = att.mu, MH = MH.mu,
target.min = 0.5, target.max = 0.7,
nattempts = 200)
acc.mu <- this.update$acc
att.mu <- this.update$att
MH.mu <- this.update$MH
}
if (iter %% 1000 == 0) {
par(mfrow = c(4, 3))
start <- max(1, iter - 20000)
for (i in 1:3) {
for (j in 1:3) {
plot(mu.keep[start:iter, i, j], type = "l",
main = paste("mu: ", round(mu.t[i, j], 3)),
ylab = round(acc.mu[i, j] / att.mu[i, j], 3),
xlab = MH.mu[i, j])
}
}
for (i in 1:3) {
plot(tau.keep[start:iter, i], type = "l",
main = paste("tau: ", round(tau.t[i], 3)))
}
}
}
#### testing mu, tau, and beta ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
openblas.set.num.threads(3)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb.t <- getXBeta(X = X, beta = beta.t)
mu.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
ls.t <- matrix(0, ns, nt)
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
mu <- matrix(mu.t + rnorm(ns * nt), ns, nt)
SS <- getGPSS(Qb = Qb.t, param = mu, Xb = Xb.t)
curll <- matrix(0, ns, nt)
for (t in 1:nt) {
curll[, t] <- dgev(x = y.t[, t], loc = mu[, t], exp(ls.t[, t]), xi.t,
log = TRUE)
}
niters <- 20000
burn <- 15000
beta.sd <- 100
beta <- rep(0, np)
beta.keep <- matrix(0, niters, np)
beta.sd.keep <- rep(0, niters)
mu.keep <- array(0, dim = c(niters, ns, nt))
tau <- rep(1, nt)
tau.keep <- matrix(0, niters, nt)
acc.mu <- att.mu <- MH.mu <- matrix(0.1, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBeta(beta.sd = beta.sd, Qb = Qb.t,
param = mu, X = X, SS = SS, tau = tau)
beta <- this.update$beta
Xb <- this.update$Xb
SS <- this.update$SS
beta.keep[iter, ] <- beta
this.update <- updateGPBetaSD(beta = beta, tau.a = 0.1, tau.b = 1)
beta.sd <- this.update$beta.sd
beta.sd.keep[iter] <- beta.sd
this.update <- updateMuTest(mu = mu, Qb = Qb.t, tau = tau.t, Xb = Xb,
y = y.t, ls = ls.t, xi = xi.t,
SS = SS, curll = curll, acc = acc.mu,
att = att.mu, MH = MH.mu)
mu <- this.update$mu
SS <- this.update$SS
curll <- this.update$curll
acc.mu <- this.update$acc
att.mu <- this.update$att
mu.keep[iter, , ] <- mu
this.update <- updateGPTau(SS = SS, tau.a = 0.1, tau.b = 0.1,
ns = ns)
tau <- this.update$tau
tau.keep[iter, ] <- tau
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.mu, att = att.mu, MH = MH.mu,
target.min = 0.5, target.max = 0.7,
nattempts = 200)
acc.mu <- this.update$acc
att.mu <- this.update$att
MH.mu <- this.update$MH
}
if (iter %% 1000 == 0) {
par(mfrow = c(4, 3))
start <- max(1, iter - 20000)
for (i in 1:2) {
for (j in 1:3) {
plot(mu.keep[start:iter, i, j], type = "l",
main = paste("mu: ", round(mu.t[i, j], 3)),
ylab = round(acc.mu[i, j] / att.mu[i, j], 3),
xlab = MH.mu[i, j])
}
}
for (i in 1:3) {
plot(beta.keep[start:iter, i], type = "l",
main = paste("beta: ", round(beta.t[i], 3)))
}
for (i in 1:3) {
plot(tau.keep[start:iter, i], type = "l",
main = paste("tau: ", round(tau.t[i], 3)))
}
}
}
#### Verify gradients - no residual dependence ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
openblas.set.num.threads(3)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 10
nt <- 3
np <- 6
X1 <- rX(ns, nt, np)
X2 <- rX(ns, nt, np)
beta1.t <- rnorm(np, 0, 1)
beta2.t <- rnorm(np, 0, 0.1)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1, beta = beta1.t)
Xb2.t <- getXBeta(X = X2, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
xi.t <- 0.1
y.t <- rgev(n = ns * nt, loc = mu.t, scale = exp(ls.t), xi.t)
lp.mu <- logpost.mu.test(mu = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t], xi = xi.t)
mean(grad(func = logpost.mu.test, x = mu.t[, t], Xb = Xb1.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t], ls = ls.t[, t], xi = xi.t) /
logpost.mu.grad.test(mu = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t],
xi = xi.t))
sd(grad(func = logpost.mu.test, x = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t], xi = xi.t) /
logpost.mu.grad.test(mu = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t],
xi = xi.t))
lp.logsig <- logpost.logsig.test(ls = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t],
mu = mu.t[, t], xi = xi.t)
mean(grad(func = logpost.logsig.test, x = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t], mu = mu.t[, t], xi = xi.t) /
logpost.logsig.grad.test(ls = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t],
mu = mu.t[, t], xi = xi.t))
sd(grad(func = logpost.logsig.test, x = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t], mu = mu.t[, t], xi = xi.t) /
logpost.logsig.grad.test(ls = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t],
mu = mu.t[, t], xi = xi.t))
#### testing logsig ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
openblas.set.num.threads(3)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X1.t <- rX(ns, nt, np)
X2.t <- rX(ns, nt, np)
beta1.t <- rnorm(np, 0, 10)
beta2.t <- rnorm(np, 0, 5)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1.t, beta = beta1.t)
Xb2.t <- getXBeta(X = X2.t, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
ls <- matrix(ls.t + rnorm(ns * nt, 0, 0.1), ns, nt)
SS <- getGPSS(Qb = Qb.t, param = ls, Xb = Xb2.t)
curll <- matrix(0, ns, nt)
for (t in 1:nt) {
curll[, t] <- dgev(x = y.t[, t], loc = mu.t[, t], exp(ls[, t]), xi.t,
log = TRUE)
}
niters <- 10000
burn <- 8000
ls.keep <- array(0, dim = c(niters, ns, nt))
acc.ls <- att.ls <- MH.ls <- matrix(0.1, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateLSTest(ls = ls, tau = tau.t, Xb = Xb2.t, SS = SS,
y = y.t, mu = mu.t, xi = xi.t,
Qb = Qb.t, curll = curll, acc = acc.ls,
att = att.ls, MH = MH.ls)
ls <- this.update$ls
SS <- this.update$SS
curll <- this.update$curll
acc.ls <- this.update$acc
att.ls <- this.update$att
ls.keep[iter, , ] <- ls
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.ls, att = att.ls, MH = MH.ls,
target.min = 0.4, target.max = 0.7,
nattempts = 400)
acc.ls <- this.update$acc
att.ls <- this.update$att
MH.ls <- this.update$MH
}
if (iter %% 500 == 0) {
par(mfrow = c(3, 3))
start <- max(1, iter - 20000)
for (i in 1:3) {
for (j in 1:3) {
plot(ls.keep[start:iter, i, j], type = "l",
main = paste("logsig: ", round(ls.t[i, j], 3)),
ylab = round(acc.ls[i, j] / att.ls[i, j], 3),
xlab = MH.ls[i, j])
}
}
}
}
#### testing logsig, tau, and beta ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
openblas.set.num.threads(3)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X1.t <- rX(ns, nt, np)
X2.t <- rX(ns, nt, np)
beta1.t <- rnorm(np, 0, 10)
beta2.t <- rnorm(np, 0, 5)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1.t, beta = beta1.t)
Xb2.t <- getXBeta(X = X2.t, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
ls <- matrix(ls.t + rnorm(ns * nt, 0, 0.1), ns, nt)
SS <- getGPSS(Qb = Qb.t, param = ls, Xb = Xb2.t)
curll <- matrix(0, ns, nt)
for (t in 1:nt) {
curll[, t] <- dgev(x = y.t[, t], loc = mu.t[, t], exp(ls[, t]), xi.t,
log = TRUE)
}
niters <- 30000
burn <- 25000
beta.sd <- 100
beta <- rep(0, np)
beta.keep <- matrix(0, niters, np)
beta.sd.keep <- rep(0, niters)
ls.keep <- array(0, dim = c(niters, ns, nt))
tau <- rep(1, nt)
tau.keep <- matrix(0, niters, nt)
acc.ls <- att.ls <- MH.ls <- matrix(0.1, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBeta(beta.sd = beta.sd, Qb = Qb.t,
param = ls, X = X2.t, SS = SS, tau = tau)
beta <- this.update$beta
Xb2 <- this.update$Xb
SS <- this.update$SS
beta.keep[iter, ] <- beta
this.update <- updateGPBetaSD(beta = beta, tau.a = 0.1, tau.b = 1)
beta.sd <- this.update$beta.sd
beta.sd.keep[iter] <- beta.sd
this.update <- updateLSTest(ls = ls, tau = tau.t, Xb = Xb2, SS = SS,
y = y.t, mu = mu.t, xi = xi.t,
Qb = Qb.t, curll = curll,
acc = acc.ls, att = att.ls, MH = MH.ls)
ls <- this.update$ls
SS <- this.update$SS
curll <- this.update$curll
acc.ls <- this.update$acc
att.ls <- this.update$att
ls.keep[iter, , ] <- ls
this.update <- updateGPTau(SS = SS, tau.a = 0.1, tau.b = 0.1,
ns = ns)
tau <- this.update$tau
tau.keep[iter, ] <- tau
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.ls, att = att.ls, MH = MH.ls,
target.min = 0.4, target.max = 0.7,
nattempts = 400)
acc.ls <- this.update$acc
att.ls <- this.update$att
MH.ls <- this.update$MH
}
if (iter %% 1000 == 0) {
par(mfrow = c(4, 3))
start <- max(1, iter - 20000)
for (i in 1:2) {
for (j in 1:3) {
plot(ls.keep[start:iter, i, j], type = "l",
main = paste("logsig: ", round(ls.t[i, j], 3)),
ylab = round(acc.ls[i, j] / att.ls[i, j], 3),
xlab = MH.ls[i, j])
}
}
for (i in 1:3) {
plot(beta.keep[start:iter, i], type = "l",
main = paste("beta: ", round(beta2.t[i], 3)))
}
for (i in 1:3) {
plot(tau.keep[start:iter, i], type = "l",
main = paste("tau: ", round(tau.t[i], 3)))
}
}
}
#### testing basis bandwidth update ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
openblas.set.num.threads(3)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
# setting np later after X is created
ns <- 400
nt <- 12
nknots <- 5
time.interact <- TRUE
s <- cbind(runif(ns), runif(ns))
knots <- as.matrix(cover.design(R = s, nd = nknots)$design)
d <- rdist(s)
dw2 <- rdist(s, knots)^2
dw2[dw2 < 1e-4] <- 0
# create the matrix of covariates
X1.t <- X2.t <- array(1, dim = c(ns, nt, 2))
for (t in 1:nt) {
time <- (t - nt / 2) / nt
X1.t[, t, 2] <- X2.t[, t, 2] <- time
}
bw.basis.t <- 0.2
B.t <- makeW(dw2 = dw2, rho = bw.basis.t)
X1.t <- add.basis.X(X1.t, B.t, time.interact = time.interact)
X2.t <- add.basis.X(X2.t, B.t, time.interact = time.interact)
np <- dim(X1.t)[3]
beta1.t <- rnorm(np, 0, 10)
beta2.t <- rnorm(np, 0, 5)
bw.gp.t <- 0.2
Sigma.t <- exp(-d / bw.gp.t)
tau1.t <- tau2.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1.t, beta = beta1.t)
Xb2.t <- getXBeta(X = X2.t, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau2.t[t])
}
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, scale = exp(ls.t), shape = xi.t)
# initialize values
bw.basis <- 0.4
bw.basis.min <- quantile(dw2, 0.01)
bw.basis.max <- quantile(dw2, 0.99)
B <- makeW(dw2 = dw2, rho = bw.basis)
X1 <- rep.basis.X(X = X1.t, newB = B, time.interact = time.interact)
X2 <- rep.basis.X(X = X2.t, newB = B, time.interact = time.interact)
Xb1 <- getXBeta(X = X1, beta = beta1.t)
Xb2 <- getXBeta(X = X2, beta = beta1.t)
SS1 <- getGPSS(Qb = Qb.t, param = mu.t, Xb = Xb1)
SS2 <- getGPSS(Qb = Qb.t, param = ls.t, Xb = Xb2)
niters <- 30000
burn <- 25000
# storage
bw.basis.keep <- rep(0, niters)
Xb1.keep <- array(0, dim = c(niters, ns, nt))
acc.bw.basis <- att.bw.basis <- MH.bw.basis <- 0.1
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateXBasisBW(bw = bw.basis, bw.min = bw.basis.min,
bw.max = bw.basis.max,
X1 = X1, beta1 = beta1.t, Xb1 = Xb1,
mu = mu.t, tau1 = tau1.t, SS1 = SS1,
X2 = X2, beta2 = beta2.t, Xb2 = Xb2,
ls = ls.t, tau2 = tau2.t, SS2 = SS2,
Qb = Qb.t, dw2 = dw2,
time.interact = time.interact,
acc = acc.bw.basis, att = att.bw.basis,
MH = MH.bw.basis)
bw.basis <- this.update$bw
X1 <- this.update$X1
Xb1 <- this.update$Xb1
SS1 <- this.update$SS1
X2 <- this.update$X2
Xb2 <- this.update$Xb2
SS2 <- this.update$SS2
acc.bw.basis <- this.update$acc
att.bw.basis <- this.update$att
bw.basis.keep[iter] <- bw.basis
Xb1.keep[iter, , ] <- Xb1
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.bw.basis, att = att.bw.basis,
MH = MH.bw.basis,
target.min = 0.3, target.max = 0.6,
lower = 0.8, higher = 1.2)
acc.bw.basis <- this.update$acc
att.bw.basis <- this.update$att
MH.bw.basis <- this.update$MH
}
if (iter %% 1000 == 0) {
par(mfrow = c(2, 5))
plot(bw.basis.keep[100:iter], type = "l",
main = paste("BW basis: ", bw.basis.t, sep = ""),
ylab = round(acc.bw.basis / att.bw.basis, 3),
xlab = MH.bw.basis)
for(i in 1:3) { for (j in 1:3) {
plot(Xb1.keep[100:iter, i, j], type = "l",
main = paste("Xb1: ", Xb1.t[i, j], sep = ""))
}}
}
}
#### testing basis bandwidth, beta1, beta2 ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
openblas.set.num.threads(3)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
# setting np later after X is created
ns <- 400
nt <- 12
nknots <- 5
time.interact <- TRUE
s <- cbind(runif(ns), runif(ns))
knots <- as.matrix(cover.design(R = s, nd = nknots)$design)
d <- rdist(s)
dw2 <- rdist(s, knots)^2
dw2[dw2 < 1e-4] <- 0
# create the matrix of covariates
X1.t <- X2.t <- array(1, dim = c(ns, nt, 2))
for (t in 1:nt) {
time <- (t - nt / 2) / nt
X1.t[, t, 2] <- X2.t[, t, 2] <- time
}
bw.basis.t <- 0.2
B.t <- makeW(dw2 = dw2, rho = bw.basis.t)
X1.t <- add.basis.X(X1.t, B.t, time.interact = time.interact)
X2.t <- add.basis.X(X2.t, B.t, time.interact = time.interact)
np <- dim(X1.t)[3]
beta1.t <- rnorm(np, 0, 10)
beta2.t <- rnorm(np, 0, 5)
bw.gp.t <- 0.2
Sigma.t <- exp(-d / bw.gp.t)
tau1.t <- tau2.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1.t, beta = beta1.t)
Xb2.t <- getXBeta(X = X2.t, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau2.t[t])
}
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, scale = exp(ls.t), shape = xi.t)
# initialize values
beta1 <- beta2 <- rep(0, np)
beta1.sd <- beta2.sd <- 100
bw.basis <- 0.4
bw.basis.min <- quantile(dw2, 0.01)
bw.basis.max <- quantile(dw2, 0.99)
B <- makeW(dw2 = dw2, rho = bw.basis)
X1 <- rep.basis.X(X = X1.t, newB = B, time.interact = time.interact)
X2 <- rep.basis.X(X = X2.t, newB = B, time.interact = time.interact)
Xb1 <- getXBeta(X = X1, beta = beta1)
Xb2 <- getXBeta(X = X2, beta = beta1)
SS1 <- getGPSS(Qb = Qb.t, param = mu.t, Xb = Xb1)
SS2 <- getGPSS(Qb = Qb.t, param = ls.t, Xb = Xb2)
niters <- 30000
burn <- 25000
# storage
bw.basis.keep <- rep(0, niters)
beta1.keep <- beta2.keep <- matrix(0, niters, np)
beta1.sd.keep <- beta2.sd.keep <- rep(0, niters)
acc.bw.basis <- att.bw.basis <- MH.bw.basis <- 0.1
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBeta(beta.sd = beta1.sd, Qb = Qb.t,
param = mu.t, X = X1, SS = SS1, tau = tau1.t)
beta1 <- this.update$beta
Xb1 <- this.update$Xb
SS1 <- this.update$SS
beta1.keep[iter, ] <- beta1
this.update <- updateGPBetaSD(beta = beta1, tau.a = 0.5, tau.b = 0.5)
beta1.sd <- this.update$beta.sd
beta1.sd.keep[iter] <- beta1.sd
this.update <- updateGPBeta(beta.sd = beta2.sd, Qb = Qb.t,
param = ls.t, X = X2, SS = SS2, tau = tau2.t)
beta2 <- this.update$beta
Xb2 <- this.update$Xb
SS2 <- this.update$SS
beta2.keep[iter, ] <- beta2
this.update <- updateGPBetaSD(beta = beta2, tau.a = 0.5, tau.b = 0.5)
beta2.sd <- this.update$beta.sd
beta2.sd.keep[iter] <- beta2.sd
this.update <- updateXBasisBW(bw = bw.basis, bw.min = bw.basis.min,
bw.max = bw.basis.max,
X1 = X1, beta1 = beta1, Xb1 = Xb1,
mu = mu.t, tau1 = tau1.t, SS1 = SS1,
X2 = X2, beta2 = beta2, Xb2 = Xb2,
ls = ls.t, tau2 = tau2.t, SS2 = SS2,
Qb = Qb.t, dw2 = dw2,
time.interact = time.interact,
acc = acc.bw.basis, att = att.bw.basis,
MH = MH.bw.basis)
bw.basis <- this.update$bw
X1 <- this.update$X1
Xb1 <- this.update$Xb1
SS1 <- this.update$SS1
X2 <- this.update$X2
Xb2 <- this.update$Xb2
SS2 <- this.update$SS2
acc.bw.basis <- this.update$acc
att.bw.basis <- this.update$att
bw.basis.keep[iter] <- bw.basis
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.bw.basis, att = att.bw.basis,
MH = MH.bw.basis,
target.min = 0.3, target.max = 0.6,
lower = 0.8, higher = 1.2)
acc.bw.basis <- this.update$acc
att.bw.basis <- this.update$att
MH.bw.basis <- this.update$MH
}
if (iter %% 500 == 0) {
par(mfrow = c(3, 3))
plot(bw.basis.keep[100:iter], type = "l",
main = paste("BW basis: ", bw.basis.t, sep = ""),
ylab = round(acc.bw.basis / att.bw.basis, 3),
xlab = MH.bw.basis)
for(i in 1:4) {
plot(beta1.keep[100:iter, i], type = "l",
main = paste("beta 1: ", beta1.t[i], sep = ""))
}
for (i in 1:4) {
plot(beta2.keep[100:iter, i], type = "l",
main = paste("beta 2: ", beta2.t[i], sep = ""))
}
}
}
#### testing basis bandwidth, beta1, beta2, mu, and logsig ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
openblas.set.num.threads(3)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
# setting np later after X is created
ns <- 400
nt <- 12
nknots <- 5
time.interact <- TRUE
s <- cbind(runif(ns), runif(ns))
knots <- as.matrix(cover.design(R = s, nd = nknots)$design)
d <- rdist(s)
dw2 <- rdist(s, knots)^2
dw2[dw2 < 1e-4] <- 0
# create the matrix of covariates
X1.t <- X2.t <- array(1, dim = c(ns, nt, 2))
for (t in 1:nt) {
time <- (t - nt / 2) / nt
X1.t[, t, 2] <- X2.t[, t, 2] <- time
}
bw.basis.t <- 0.2
B.t <- makeW(dw2 = dw2, rho = bw.basis.t)
X1.t <- add.basis.X(X1.t, B.t, time.interact = time.interact)
X2.t <- add.basis.X(X2.t, B.t, time.interact = time.interact)
np <- dim(X1.t)[3]
beta1.t <- rnorm(np, 0, 10)
beta2.t <- rnorm(np, 0, 5)
bw.gp.t <- 0.2
Sigma.t <- exp(-d / bw.gp.t)
tau1.t <- tau2.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1.t, beta = beta1.t)
Xb2.t <- getXBeta(X = X2.t, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau2.t[t])
}
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, scale = exp(ls.t), shape = xi.t)
# initialize values
mu <- matrix(mu.t + rnorm(ns * nt), ns, nt)
ls <- matrix(ls.t + rnorm(ns * nt), ns, nt)
beta1 <- beta2 <- rep(0, np)
beta1.sd <- beta2.sd <- 100
bw.basis <- 0.4
bw.basis.min <- quantile(dw2, 0.01)
bw.basis.max <- quantile(dw2, 0.99)
B <- makeW(dw2 = dw2, rho = bw.basis)
X1 <- rep.basis.X(X = X1.t, newB = B, time.interact = time.interact)
X2 <- rep.basis.X(X = X2.t, newB = B, time.interact = time.interact)
Xb1 <- getXBeta(X = X1, beta = beta1)
Xb2 <- getXBeta(X = X2, beta = beta1)
SS1 <- getGPSS(Qb = Qb.t, param = mu.t, Xb = Xb1)
SS2 <- getGPSS(Qb = Qb.t, param = ls.t, Xb = Xb2)
curll <- matrix(0, ns, nt)
for (t in 1:nt) {
curll[, t] <- dgev(x = y.t[, t], loc = mu[, t], exp(ls[, t]), xi.t,
log = TRUE)
}
niters <- 30000
burn <- 25000
# storage
bw.basis.keep <- rep(0, niters)
beta1.keep <- beta2.keep <- matrix(0, niters, np)
beta1.sd.keep <- beta2.sd.keep <- rep(0, niters)
mu.keep <- array(0, dim = c(niters, ns, nt))
ls.keep <- array(0, dim = c(niters, ns, nt))
acc.bw.basis <- att.bw.basis <- MH.bw.basis <- 0.1
acc.mu <- att.mu <- MH.mu <- matrix(0.2, ns, nt)
acc.ls <- att.ls <- MH.ls <- matrix(0.1, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBeta(beta.sd = beta1.sd, Qb = Qb.t,
param = mu, X = X1, SS = SS1, tau = tau1.t)
beta1 <- this.update$beta
Xb1 <- this.update$Xb
SS1 <- this.update$SS
beta1.keep[iter, ] <- beta1
this.update <- updateGPBetaSD(beta = beta1, tau.a = 0.5, tau.b = 0.5)
beta1.sd <- this.update$beta.sd
beta1.sd.keep[iter] <- beta1.sd
this.update <- updateGPBeta(beta.sd = beta2.sd, Qb = Qb.t,
param = ls, X = X2, SS = SS2, tau = tau2.t)
beta2 <- this.update$beta
Xb2 <- this.update$Xb
SS2 <- this.update$SS
beta2.keep[iter, ] <- beta2
this.update <- updateGPBetaSD(beta = beta2, tau.a = 0.5, tau.b = 0.5)
beta2.sd <- this.update$beta.sd
beta2.sd.keep[iter] <- beta2.sd
this.update <- updateXBasisBW(bw = bw.basis, bw.min = bw.basis.min,
bw.max = bw.basis.max,
X1 = X1, beta1 = beta1, Xb1 = Xb1,
mu = mu, tau1 = tau1.t, SS1 = SS1,
X2 = X2, beta2 = beta2, Xb2 = Xb2,
ls = ls, tau2 = tau2.t, SS2 = SS2,
Qb = Qb.t, dw2 = dw2,
time.interact = time.interact,
acc = acc.bw.basis, att = att.bw.basis,
MH = MH.bw.basis)
bw.basis <- this.update$bw
X1 <- this.update$X1
Xb1 <- this.update$Xb1
SS1 <- this.update$SS1
X2 <- this.update$X2
Xb2 <- this.update$Xb2
SS2 <- this.update$SS2
acc.bw.basis <- this.update$acc
att.bw.basis <- this.update$att
bw.basis.keep[iter] <- bw.basis
this.update <- updateMuTest(mu = mu, tau = tau1.t, Xb = Xb1, SS = SS1,
y = y.t, ls = ls, xi = xi.t,
Qb = Qb.t, curll = curll, acc = acc.mu,
att = att.mu, MH = MH.mu)
mu <- this.update$mu
SS1 <- this.update$SS
curll <- this.update$curll
acc.mu <- this.update$acc
att.mu <- this.update$att
mu.keep[iter, , ] <- mu
this.update <- updateLSTest(ls = ls, tau = tau2.t, Xb = Xb2, SS = SS2,
y = y.t, mu = mu, xi = xi.t,
Qb = Qb.t, curll = curll, acc = acc.ls,
att = att.ls, MH = MH.ls)
ls <- this.update$ls
SS2 <- this.update$SS
curll <- this.update$curll
acc.ls <- this.update$acc
att.ls <- this.update$att
ls.keep[iter, , ] <- ls
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.bw.basis, att = att.bw.basis,
MH = MH.bw.basis,
target.min = 0.3, target.max = 0.6,
lower = 0.8, higher = 1.2)
acc.bw.basis <- this.update$acc
att.bw.basis <- this.update$att
MH.bw.basis <- this.update$MH
this.update <- mhUpdate(acc = acc.mu, att = att.mu, MH = MH.mu,
target.min = 0.4, target.max = 0.7,
nattempts = 400)
acc.mu <- this.update$acc
att.mu <- this.update$att
MH.mu <- this.update$MH
this.update <- mhUpdate(acc = acc.ls, att = att.ls, MH = MH.ls,
target.min = 0.4, target.max = 0.7,
nattempts = 400)
acc.ls <- this.update$acc
att.ls <- this.update$att
MH.ls <- this.update$MH
}
if (iter %% 500 == 0) {
par(mfrow = c(3, 3))
plot(bw.basis.keep[100:iter], type = "l",
main = paste("BW basis: ", bw.basis.t, sep = ""),
ylab = round(acc.bw.basis / att.bw.basis, 3),
xlab = MH.bw.basis)
for(i in 1:4) {
plot(beta1.keep[100:iter, i], type = "l",
main = paste("beta 1: ", beta1.t[i], sep = ""))
}
for (i in 1:4) {
plot(beta2.keep[100:iter, i], type = "l",
main = paste("beta 2: ", beta2.t[i], sep = ""))
}
}
}
#### Verify gradients - with residual dependence ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
openblas.set.num.threads(3)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 10
nt <- 3
np <- 6
X1 <- rX(ns, nt, np)
X2 <- rX(ns, nt, np)
beta1.t <- rnorm(np, 0, 1)
beta2.t <- rnorm(np, 0, 0.1)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1, beta = beta1.t)
Xb2.t <- getXBeta(X = X2, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
xi.t <- 0.1
y.t <- rgev(n = ns * nt, loc = mu.t, scale = exp(ls.t), xi.t)
nknots <- 4
theta.t <- matrix(abs(rnorm(ns * nt)), ns, nt)
alpha.t <- 0.4
thresh.t <- matrix(median(y.t), ns, nt)
xi.t <- 0
lp.mu <- logpost.mu(mu = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t)
mean(grad(func = logpost.mu, x = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t], alpha = alpha.t) /
logpost.mu.grad(mu = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t],
xi = xi.t, theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
sd(grad(func = logpost.mu, x = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t], alpha = alpha.t) /
logpost.mu.grad(mu = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t],
xi = xi.t, theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
lp.logsig <- logpost.logsig(ls = ls.t[, t], Xb = Xb2.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], mu = mu.t[, t],
xi = xi.t, theta = theta.t[, t],
thresh = thresh.t[, t], alpha = alpha.t)
mean(grad(func = logpost.logsig, x = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t], alpha = alpha.t) /
logpost.logsig.grad(ls = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t],
mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
sd(grad(func = logpost.logsig, x = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t], alpha = alpha.t) /
logpost.logsig.grad(ls = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t],
mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
#### testing xi ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
openblas.set.num.threads(3)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X1.t <- rX(ns, nt, np)
X2.t <- rX(ns, nt, np)
beta1.t <- rnorm(np, 0, 10)
beta2.t <- rnorm(np, 0, 5)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1.t, beta = beta1.t)
Xb2.t <- getXBeta(X = X2.t, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
xi.t <- -0.7
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
xi <- 0.1
thresh.t <- matrix(-Inf, ns, nt)
theta.t <- matrix(1, ns, nt)
alpha.t <- 1
curll <- loglike(y = y.t, mu = mu.t, ls = ls.t, xi = xi,
theta = theta.t, thresh = thresh.t, alpha = alpha.t)
niters <- 10000
burn <- 8000
xi.keep <- rep(0, niters)
acc.xi <- att.xi <- MH.xi <- 0.01
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateXi(xi = xi, xi.min = -2, xi.max = 2,
xi.mn = 0, xi.sd = 0.5, y = y.t, mu = mu.t,
ls = ls.t, curll = curll, theta = theta.t,
thresh = thresh.t, alpha = alpha.t, acc = acc.xi,
att = att.xi, MH = MH.xi)
xi <- this.update$xi
curll <- this.update$curll
acc.xi <- this.update$acc
att.xi <- this.update$att
xi.keep[iter] <- xi
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.xi, att = att.xi, MH = MH.xi,
target.min = 0.3, target.max = 0.6,
nattempts = 400)
acc.xi <- this.update$acc
att.xi <- this.update$att
MH.xi <- this.update$MH
}
if (iter %% 500 == 0) {
start <- max(1, iter - 20000)
plot(xi.keep[start:iter], type = "l", main = paste("xi: ", xi.t),
ylab = round(acc.xi / att.xi, 3),
xlab = MH.xi)
}
}
#### testing beta1 ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
openblas.set.num.threads(3)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 600
nt <- 30
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
Qb.t <- chol2inv(chol(Sigma.t))
tau1.int.t <- rgamma(1, 1, 1)
tau1.time.t <- rgamma(1, 1, 1)
beta1.int.t <- beta1.time.t <- matrix(0, ns, nt)
for (t in 1:nt) {
beta1.int.t[, t] <- t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.int.t[1])
beta1.time.t[, t] <- t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.time.t[1])
}
beta1.int.mn.t <- beta1.time.mn.t <- 0
mu.t <- matrix(0, ns, nt)
time <- (1:nt - nt / 2) / nt
for (t in 1:nt) {
mu.t[, t] <- beta1.int.t[, t] + beta1.time.t [, t] * time[t]
}
ls.t <- matrix(0, ns, nt)
xi.t <- 0
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
beta1.int <- beta1.int.t + rnorm(ns * nt)
beta1.time <- beta1.time.t + rnorm(ns * nt)
mu <- matrix(0, ns, nt)
SS1.int <- SS1.time <- rep(0, nt)
for (t in 1:nt) {
mu[, t] <- beta1.int[, t] + beta1.time[, t] * time[t]
SS1.int[t] <- quad.form(Qb.t, beta1.int[, t] - beta1.int.mn.t)
SS1.time[t] <- quad.form(Qb.t, beta1.time[, t] - beta1.time.mn.t)
}
thresh.t <- matrix(-Inf, ns, nt)
theta.t <- matrix(1, ns, nt)
theta.xi.t <- theta.t^xi.t
alpha.t <- 1
curll <- loglike(y = y.t, mu = mu, ls = ls.t, xi = xi.t,
theta = theta.t, theta.xi = theta.xi.t,
thresh = thresh.t, alpha = alpha.t)
niters <- 10000
burn <- 8000
keep.beta1.int <- keep.beta1.time <- array(0, dim = c(niters, ns, nt))
acc.beta1 <- att.beta1 <- MH.beta1 <- matrix(0.1, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateBeta1(beta.int = beta1.int, beta.int.mn = beta1.int.mn.t,
SS.int = SS1.int, tau.int = tau1.int.t,
beta.time = beta1.time,
beta.time.mn = beta1.time.mn.t,
SS.time = SS1.time, tau.time = tau1.time.t,
mu = mu, time = time,
y = y.t, theta = theta.t, theta.xi = theta.xi.t,
ls = ls.t, xi = xi.t, thresh = thresh.t,
alpha = alpha.t, Qb = Qb.t, curll = curll,
acc = acc.beta1, att = att.beta1, MH = MH.beta1)
beta1.int <- this.update$beta.int
SS1.int <- this.update$SS.int
beta1.time <- this.update$beta.time
SS1.time <- this.update$SS.time
mu <- this.update$mu
curll <- this.update$curll
acc.beta1 <- this.update$acc
att.beta1 <- this.update$att
keep.beta1.int[iter, , ] <- beta1.int
keep.beta1.time[iter, , ] <- beta1.time
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.beta1, att = att.beta1, MH = MH.beta1,
target.min = 0.3, target.max = 0.6,
nattempts = 400)
acc.beta1 <- this.update$acc
att.beta1 <- this.update$att
MH.beta1 <- this.update$MH
}
if (iter %% 500 == 0) {
par(mfrow = c(4, 4))
acc.rate <- round(acc.beta1 / att.beta1, 3)
for (i in 1:4) {
plot(keep.beta1.int[1:iter, 1, i], type = "l",
main = paste("intercept site 1, day ", i, " (",
round(beta1.int.t[1, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[1, i], sep = ""),
xlab = acc.rate[1, i])
plot(keep.beta1.time[1:iter, 1, i], type = "l",
main = paste("time site 1, day ", i, " (",
round(beta1.time.t[1, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[1, i], sep = ""),
xlab = acc.rate[1, i])
plot(keep.beta1.int[1:iter, 2, i], type = "l",
main = paste("intercept site 1, day ", i, " (",
round(beta1.int.t[2, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[2, i], sep = ""),
xlab = acc.rate[2, i])
plot(keep.beta1.time[1:iter, 2, i], type = "l",
main = paste("time site 1, day ", i, " (",
round(beta1.time.t[2, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[2, i], sep = ""),
xlab = acc.rate[2, i])
}
}
}
#### Verify gradients - on beta ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
openblas.set.num.threads(3)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 10
nt <- 3
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
Qb.t <- chol2inv(chol(Sigma.t))
tau1.int.t <- rgamma(1, 1, 1)
tau1.time.t <- rgamma(1, 1, 1)
tau2.int.t <- rgamma(1, 1, 1)
tau2.time.t <- rgamma(1, 1, 1)
beta1.int.t <- beta1.time.t <- matrix(0, ns, nt)
beta2.int.t <- beta2.time.t <- matrix(0, ns, nt)
for (t in 1:nt) {
beta1.int.t[, t] <- t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.int.t[1])
beta1.time.t[, t] <- t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.time.t[1])
beta2.int.t[, t] <- t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau2.int.t[1])
beta2.time.t[, t] <- t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau2.time.t[1])
}
beta1.int.mn.t <- beta1.time.mn.t <- 0
beta2.int.mn.t <- beta2.time.mn.t <- 0
mu.t <- matrix(0, ns, nt)
ls.t <- matrix(0, ns, nt)
time <- (1:nt - nt / 2) / nt
for (t in 1:nt) {
mu.t[, t] <- beta1.int.t[, t] + beta1.time.t[, t] * time[t]
ls.t[, t] <- beta2.int.t[, t] + beta2.time.t[, t] * time[t]
}
xi.t <- 0
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
nknots <- 4
theta.t <- matrix(abs(rnorm(ns * nt)), ns, nt)
alpha.t <- 0.4
thresh.t <- matrix(median(y.t), ns, nt)
xi.t <- 0
lp.beta1.int <- logpost.beta1.int(beta.int = beta1.int.t[, t],
beta.int.mn = beta1.int.mn.t,
tau = tau1.int.t, Qb = Qb.t,
beta.time = beta1.time.t[, t], time = time[t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t)
mean(grad(func = logpost.beta1.int, x = beta1.int.t[, t],
beta.int.mn = beta1.int.mn.t, tau = tau1.int.t, Qb = Qb.t,
beta.time = beta1.time.t[, t], time = time[t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t) /
logpost.beta1.int.grad(beta.int = beta1.int.t[, t],
beta.int.mn = beta1.int.mn.t,
tau = tau1.int.t, Qb = Qb.t,
beta.time = beta1.time.t[, t], time = time[t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
sd(grad(func = logpost.beta1.int, x = beta1.int.t[, t],
beta.int.mn = beta1.int.mn.t, tau = tau1.int.t, Qb = Qb.t,
beta.time = beta1.time.t[, t], time = time[t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t) /
logpost.beta1.int.grad(beta.int = beta1.int.t[, t],
beta.int.mn = beta1.int.mn.t,
tau = tau1.int.t, Qb = Qb.t,
beta.time = beta1.time.t[, t], time = time[t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
lp.beta1.time <- logpost.beta1.time(beta.time = beta1.time.t[, t],
beta.time.mn = beta1.time.mn.t,
time = time[t], tau = tau1.int.t, Qb = Qb.t,
beta.int = beta1.int.t[, t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t],
thresh = thresh.t[, t], alpha = alpha.t)
mean(grad(func = logpost.beta1.time, x = beta1.time.t[, t],
beta.time.mn = beta1.time.mn.t, time = time[t], tau = tau1.int.t,
Qb = Qb.t, beta.int = beta1.int.t[, t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t) /
logpost.beta1.time.grad(beta.time = beta1.time.t[, t],
beta.time.mn = beta1.time.mn.t, time = time[t],
tau = tau1.int.t, Qb = Qb.t,
beta.int = beta1.int.t[, t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t],
thresh = thresh.t[, t], alpha = alpha.t))
sd(grad(func = logpost.beta1.time, x = beta1.time.t[, t],
beta.time.mn = beta1.time.mn.t, time = time[t], tau = tau1.int.t,
Qb = Qb.t, beta.int = beta1.int.t[, t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t) /
logpost.beta1.time.grad(beta.time = beta1.time.t[, t],
beta.time.mn = beta1.time.mn.t, time = time[t],
tau = tau1.int.t, Qb = Qb.t,
beta.int = beta1.int.t[, t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t],
thresh = thresh.t[, t], alpha = alpha.t))
this.grad <- logpost.beta1.grad(beta.int = beta1.int.t[, t],
beta.int.mn = beta1.int.mn.t,
beta.time = beta1.time.t[, t],
beta.time.mn = beta1.time.mn.t,
time = time[t],
tau = tau1.int.t, Qb = Qb.t,
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t)
logpost.beta1.int.grad(beta.int = beta1.int.t[, t],
beta.int.mn = beta1.int.mn.t,
tau = tau1.int.t, Qb = Qb.t,
beta.time = beta1.time.t[, t], time = time[t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t)
logpost.beta1.time.grad(beta.time = beta1.time.t[, t],
beta.time.mn = beta1.time.mn.t, time = time[t],
tau = tau1.int.t, Qb = Qb.t,
beta.int = beta1.int.t[, t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t],
thresh = thresh.t[, t], alpha = alpha.t)
microbenchmark(logpost.beta1.grad(beta.int = beta1.int.t[, t],
beta.int.mn = beta1.int.mn.t,
beta.time = beta1.time.t[, t],
beta.time.mn = beta1.time.mn.t,
time = time[t],
tau = tau1.int.t, Qb = Qb.t,
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t),
logpost.beta1.int.grad(beta.int = beta1.int.t[, t],
beta.int.mn = beta1.int.mn.t,
tau = tau1.int.t, Qb = Qb.t,
beta.time = beta1.time.t[, t], time = time[t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t),
logpost.beta1.time.grad(beta.time = beta1.time.t[, t],
beta.time.mn = beta1.time.mn.t, time = time[t],
tau = tau1.int.t, Qb = Qb.t,
beta.int = beta1.int.t[, t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t],
thresh = thresh.t[, t], alpha = alpha.t))
lp.beta2.int <- logpost.beta2.int(beta.int = beta2.int.t[, t],
beta.int.mn = beta2.int.mn.t,
tau = tau2.int.t, Qb = Qb.t,
beta.time = beta1.time.t[, t], time = time[t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t)
mean(grad(func = logpost.beta2.int, x = beta2.int.t[, t],
beta.int.mn = beta2.int.mn.t, tau = tau2.int.t, Qb = Qb.t,
beta.time = beta2.time.t[, t], time = time[t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t) /
logpost.beta2.int.grad(beta.int = beta2.int.t[, t],
beta.int.mn = beta2.int.mn.t,
tau = tau2.int.t, Qb = Qb.t,
beta.time = beta2.time.t[, t], time = time[t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
sd(grad(func = logpost.beta2.int, x = beta2.int.t[, t],
beta.int.mn = beta2.int.mn.t, tau = tau2.int.t, Qb = Qb.t,
beta.time = beta2.time.t[, t], time = time[t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t) /
logpost.beta2.int.grad(beta.int = beta2.int.t[, t],
beta.int.mn = beta2.int.mn.t,
tau = tau2.int.t, Qb = Qb.t,
beta.time = beta2.time.t[, t], time = time[t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
lp.beta2.time <- logpost.beta2.time(beta.time = beta2.time.t[, t],
beta.time.mn = beta2.time.mn.t,
time = time[t], tau = tau2.int.t, Qb = Qb.t,
beta.int = beta1.int.t[, t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t)
mean(grad(func = logpost.beta2.time, x = beta2.time.t[, t],
beta.time.mn = beta2.time.mn.t, time = time[t], tau = tau2.int.t,
Qb = Qb.t, beta.int = beta2.int.t[, t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t) /
logpost.beta2.time.grad(beta.time = beta2.time.t[, t],
beta.time.mn = beta2.time.mn.t,
time = time[t], tau = tau2.int.t, Qb = Qb.t,
beta.int = beta2.int.t[, t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
sd(grad(func = logpost.beta2.time, x = beta2.time.t[, t],
beta.time.mn = beta2.time.mn.t, time = time[t], tau = tau2.int.t,
Qb = Qb.t, beta.int = beta2.int.t[, t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t) /
logpost.beta2.time.grad(beta.time = beta2.time.t[, t],
beta.time.mn = beta2.time.mn.t,
time = time[t], tau = tau2.int.t, Qb = Qb.t,
beta.int = beta2.int.t[, t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
#### testing beta1 ####
rm(list=ls())
library(compiler)
enableJIT(3)
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
openblas.set.num.threads(3)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 600
nt <- 30
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
Qb.t <- chol2inv(chol(Sigma.t))
tau1.int.t <- rgamma(1, 1, 1)
tau1.time.t <- rgamma(1, 1, 1)
beta1.int.t <- beta1.time.t <- matrix(0, ns, nt)
for (t in 1:nt) {
beta1.int.t[, t] <- t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.int.t[1])
beta1.time.t[, t] <- t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.time.t[1])
}
beta1.int.mn.t <- beta1.time.mn.t <- 0
mu.t <- matrix(0, ns, nt)
time <- (1:nt - nt / 2) / nt
for (t in 1:nt) {
mu.t[, t] <- beta1.int.t[, t] + beta1.time.t [, t] * time[t]
}
ls.t <- matrix(0, ns, nt)
xi.t <- 0
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
beta1.int <- beta1.int.t + rnorm(ns * nt)
beta1.int.mn <- 0
beta1.time <- beta1.time.t + rnorm(ns * nt)
beta1.time.mn <- 0
mu <- matrix(0, ns, nt)
SS1.int <- SS1.time <- rep(0, nt)
for (t in 1:nt) {
mu[, t] <- beta1.int[, t] + beta1.time[, t] * time[t]
SS1.int[t] <- quad.form(Qb.t, beta1.int[, t] - beta1.int.mn.t)
SS1.time[t] <- quad.form(Qb.t, beta1.time[, t] - beta1.time.mn.t)
}
thresh.t <- matrix(-Inf, ns, nt)
theta.t <- matrix(1, ns, nt)
theta.xi.t <- theta.t^xi.t
alpha.t <- 1
curll <- loglike(y = y.t, mu = mu, ls = ls.t, xi = xi.t,
theta = theta.t, theta.xi = theta.xi.t,
thresh = thresh.t, alpha = alpha.t)
niters <- 10000
burn <- 8000
keep.beta1.int <- keep.beta1.time <- array(0, dim = c(niters, ns, nt))
keep.beta1.int.mn <- keep.beta1.time.mn <- rep(0)
acc.beta1 <- att.beta1 <- MH.beta1 <- matrix(0.01, ns, nt)
set.seed(3366) # demo
Rprof(filename = "Rprof.out", line.profiling = TRUE)
for (iter in 1:niters) {
this.update <- updateBeta1(beta.int = beta1.int, beta.int.mn = beta1.int.mn.t,
SS.int = SS1.int, tau.int = tau1.int.t,
beta.time = beta1.time,
beta.time.mn = beta1.time.mn.t,
SS.time = SS1.time, tau.time = tau1.time.t,
mu = mu, time = time,
y = y.t, theta = theta.t, theta.xi = theta.xi.t,
ls = ls.t, xi = xi.t, thresh = thresh.t,
alpha = alpha.t, Qb = Qb.t, curll = curll,
acc = acc.beta1, att = att.beta1, MH = MH.beta1)
beta1.int <- this.update$beta.int
SS1.int <- this.update$SS.int
beta1.time <- this.update$beta.time
SS1.time <- this.update$SS.time
mu <- this.update$mu
curll <- this.update$curll
acc.beta1 <- this.update$acc
att.beta1 <- this.update$att
keep.beta1.int[iter, , ] <- beta1.int
keep.beta1.time[iter, , ] <- beta1.time
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.beta1, att = att.beta1, MH = MH.beta1,
target.min = 0.5, target.max = 0.8,
nattempts = 50)
acc.beta1 <- this.update$acc
att.beta1 <- this.update$att
MH.beta1 <- this.update$MH
}
if (iter %% 100 == 0) {
par(mfrow = c(4, 4))
acc.rate <- round(acc.beta1 / att.beta1, 3)
for (i in 1:4) {
plot(keep.beta1.int[1:iter, 1, i], type = "l",
main = paste("intercept site 1, day ", i, " (",
round(beta1.int.t[1, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[1, i], sep = ""),
xlab = acc.rate[1, i])
plot(keep.beta1.time[1:iter, 1, i], type = "l",
main = paste("time site 1, day ", i, " (",
round(beta1.time.t[1, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[1, i], sep = ""),
xlab = acc.rate[1, i])
plot(keep.beta1.int[1:iter, 2, i], type = "l",
main = paste("intercept site 1, day ", i, " (",
round(beta1.int.t[2, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[2, i], sep = ""),
xlab = acc.rate[2, i])
plot(keep.beta1.time[1:iter, 2, i], type = "l",
main = paste("time site 1, day ", i, " (",
round(beta1.time.t[2, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[2, i], sep = ""),
xlab = acc.rate[2, i])
}
}
}
Rprof(NULL)
summaryRprof(filename = "Rprof.out", lines = "show")
rm(list=ls())
library(fields)
library(Rcpp)
library(emulator)
library(microbenchmark)
library(SpatialExtremes)
library(numDeriv)
library(fields)
#### testing beta ####
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 2
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma <- exp(-d / phi)
tau <- rgamma(nt, 0.5, 0.5)
Qb <- chol2inv(chol(Sigma))
Xb <- getXBeta(X = X, beta = beta.t)
if (nt == 1) {
Xb <- matrix(Xb, ns, nt)
}
mu <- matrix(0, ns, nt)
for (t in 1:nt) {
mu[, t] <- Xb[, t] + t(chol(Sigma)) %*% rnorm(ns) / sqrt(tau[t])
}
# initialize values
SS <- diag(quad.form(Qb, mu - Xb))
niters <- 10000
beta.keep <- matrix(0, niters, np)
beta <- rep(0, np)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBeta(beta.sd = 100, Qb = Qb,
param = mu, X = X, SS = SS, tau = tau)
beta <- this.update$beta
Xb <- this.update$Xb
SS <- this.update$SS
beta.keep[iter, ] <- beta
if (iter %% 500 == 0) {
start <- max(1, iter - 2000)
par(mfrow = c(2, np / 2))
for (i in 1:6) {
plot(beta.keep[start:iter, i], type = "l",
main = paste("Beta = ", round(beta.t[i], 3)))
}
}
}
#### testing phi ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 10
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau <- rgamma(nt, 0.5, 0.5)
Qb <- chol2inv(chol(Sigma.t))
Xb <- getXBeta(X = X, beta = beta.t)
if (nt == 1) {
Xb <- matrix(Xb, ns, nt)
}
mu <- matrix(0, ns, nt)
for (t in 1:nt) {
mu[, t] <- Xb[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau[t])
}
# initialize values
phi <- 0.05
Qb <- chol2inv(chol(exp(-d / phi)))
SS <- diag(quad.form(Qb, mu - Xb))
phi <- 0.05
niters <- 10000
phi.keep <- rep(0, niters)
acc.phi <- att.phi <- MH.phi <- 0.1
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBW(bw = phi, bw.min = 0.01, bw.max = 1.2,
bw.mn = 0, bw.sd = 1, Qb = Qb, d = d,
mu = mu, Xb1 = Xb, tau1 = tau, SS1 = SS,
ls = mu, Xb2 = Xb, tau2 = tau, SS2 = SS,
acc = acc.phi, att = att.phi, MH = MH.phi)
phi <- this.update$bw
Qb <- this.update$Qb
SS <- this.update$SS1
acc.phi <- this.update$acc
att.phi <- this.update$att
this.update <- mhUpdate(acc = acc.phi, att = att.phi, MH = MH.phi)
acc.phi <- this.update$acc
att.phi <- this.update$att
MH.phi <- this.update$MH
phi.keep[iter] <- phi
if (iter %% 500 == 0) {
start <- max(1, iter - 2000)
plot(phi.keep[start:iter], type = "l")
}
}
#### testing tau ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 0.5, 0.5)
Qb <- chol2inv(chol(Sigma.t))
Xb <- getXBeta(X = X, beta = beta.t)
if (nt == 1) {
Xb <- matrix(Xb, ns, nt)
}
mu <- matrix(0, ns, nt)
for (t in 1:nt) {
mu[, t] <- Xb[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
SS <- getGPSS(Qb = Qb, param = mu, Xb = Xb)
# initialize values
niters <- 10000
tau <- rep(1, nt)
tau.keep <- matrix(0, niters, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPTau(SS = SS, tau.a = 0.1, tau.b = 0.1,
ns = ns)
tau <- this.update$tau
tau.keep[iter, ] <- tau
if (iter %% 500 == 0) {
start <- max(1, iter - 2000)
par(mfrow = c(4, 3))
for (t in 1:nt) {
plot(tau.keep[start:iter, t], type = "l",
main = paste("tau = ", round(tau.t[t], 3)))
}
}
}
#### testing tau, phi, and beta ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb.t <- getXBeta(X = X, beta = beta.t)
mu <- matrix(0, ns, nt)
for (t in 1:nt) {
mu[, t] <- Xb.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
# initialize values
beta <- rep(0, np)
Xb <- getXBeta(X = X, beta = beta)
tau <- rep(1, nt)
phi <- 0.05
Qb <- chol2inv(chol(exp(-d / phi)))
SS <- getGPSS(Qb = Qb, param = mu, Xb = Xb)
niters <- 2000
burn <- 1500
beta.sd <- 100
beta <- rep(0, np)
beta.keep <- matrix(0, niters, np)
beta.sd.keep <- rep(0, niters)
tau <- rep(1, nt)
tau.keep <- matrix(0, niters, nt)
phi.keep <- rep(0, niters)
acc.phi <- att.phi <- MH.phi <- 0.1
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBeta(beta.sd = beta.sd, Qb = Qb,
param = mu, X = X, SS = SS, tau = tau)
beta <- this.update$beta
Xb <- this.update$Xb
SS <- this.update$SS
beta.keep[iter, ] <- beta
this.update <- updateGPBetaSD(beta = beta, tau.a = 0.1, tau.b = 0.1)
beta.sd <- this.update$beta.sd
beta.sd.keep[iter] <- beta.sd
this.update <- updateGPTau(SS = SS, tau.a = 0.1, tau.b = 0.1,
ns = ns)
tau <- this.update$tau
tau.keep[iter, ] <- tau
this.update <- updateGPBW(bw = phi, bw.min = 0.01, bw.max = 1.2,
bw.mn = 0, bw.sd = 1, Qb = Qb, d = d,
mu = mu, Xb1 = Xb, tau1 = tau, SS1 = SS,
ls = mu, Xb2 = Xb, tau2 = tau, SS2 = SS,
acc = acc.phi, att = att.phi, MH = MH.phi)
phi <- this.update$bw
Qb <- this.update$Qb
SS <- this.update$SS1
acc.phi <- this.update$acc
att.phi <- this.update$att
phi.keep[iter] <- phi
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.phi, att = att.phi, MH = MH.phi)
acc.phi <- this.update$acc
att.phi <- this.update$att
MH.phi <- this.update$MH
}
if (iter %% 100 == 0) {
par(mfrow = c(5, 3))
for (i in 1:np) {
plot(beta.keep[1:iter, i], type = "l",
main = paste("beta: ", round(beta.t[i], 3)))
}
plot(beta.sd.keep[1:iter], type = "l", main = "beta sd")
plot(phi.keep[1:iter], type = "l", main = paste("phi: ", phi.t))
for(i in 1:7) {
plot(tau.keep[1:iter, i], type = "l",
main = paste("tau: ", round(tau.t[i], 3)))
}
}
}
#### testing mu ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb.t <- getXBeta(X = X, beta = beta.t)
mu.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
ls.t <- matrix(0, ns, nt)
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
Sigma <- solve(Qb.t * tau.t[t])
# initialize values
mu <- matrix(mu.t + rnorm(ns * nt), ns, nt)
SS <- getGPSS(Qb = Qb.t, param = mu, Xb = Xb.t)
curll <- matrix(0, ns, nt)
for (t in 1:nt) {
curll[, t] <- dgev(x = y.t[, t], loc = mu[, t], exp(ls.t[, t]), xi.t,
log = TRUE)
}
niters <- 10000
burn <- 8000
mu.keep <- array(0, dim = c(niters, ns, nt))
acc.mu <- att.mu <- MH.mu <- matrix(0.2, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateMuTest(mu = mu, Qb = Qb.t, tau = tau.t, Xb = Xb.t,
y = y.t, ls = ls.t, xi = xi.t,
SS = SS, curll = curll, acc = acc.mu,
att = att.mu, MH = MH.mu)
mu <- this.update$mu
SS <- this.update$SS
curll <- this.update$curll
acc.mu <- this.update$acc
att.mu <- this.update$att
mu.keep[iter, , ] <- mu
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.mu, att = att.mu, MH = MH.mu,
target.min = 0.4, target.max = 0.7,
nattempts = 400)
acc.mu <- this.update$acc
att.mu <- this.update$att
MH.mu <- this.update$MH
}
if (iter %% 500 == 0) {
par(mfrow = c(3, 3))
start <- max(1, iter - 20000)
for (i in 1:3) {
for (j in 1:3) {
plot(mu.keep[start:iter, i, j], type = "l",
main = paste("mu: ", round(mu.t[i, j], 3)),
ylab = round(acc.mu[i, j] / att.mu[i, j], 3),
xlab = MH.mu[i, j])
}
}
}
}
#### testing mu and tau ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb.t <- getXBeta(X = X, beta = beta.t)
mu.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
ls.t <- matrix(0, ns, nt)
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
mu <- matrix(mu.t + rnorm(ns * nt), ns, nt)
SS <- getGPSS(Qb = Qb.t, param = mu, Xb = Xb.t)
curll <- matrix(0, ns, nt)
for (t in 1:nt) {
curll[, t] <- dgev(x = y.t[, t], loc = mu[, t], exp(ls.t[, t]), xi.t,
log = TRUE)
}
niters <- 60000
burn <- 50000
mu.keep <- array(0, dim = c(niters, ns, nt))
tau <- rep(1, nt)
tau.keep <- matrix(0, niters, nt)
acc.mu <- att.mu <- MH.mu <- matrix(0.1, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateMuTest(mu = mu, Qb = Qb.t, tau = tau, Xb = Xb.t,
y = y.t, ls = ls.t, xi = xi.t,
SS = SS, curll = curll, acc = acc.mu,
att = att.mu, MH = MH.mu)
mu <- this.update$mu
SS <- this.update$SS
curll <- this.update$curll
acc.mu <- this.update$acc
att.mu <- this.update$att
mu.keep[iter, , ] <- mu
this.update <- updateGPTau(SS = SS, tau.a = 0.1, tau.b = 0.1,
ns = ns)
tau <- this.update$tau
tau.keep[iter, ] <- tau
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.mu, att = att.mu, MH = MH.mu,
target.min = 0.5, target.max = 0.7,
nattempts = 200)
acc.mu <- this.update$acc
att.mu <- this.update$att
MH.mu <- this.update$MH
}
if (iter %% 1000 == 0) {
par(mfrow = c(4, 3))
start <- max(1, iter - 20000)
for (i in 1:3) {
for (j in 1:3) {
plot(mu.keep[start:iter, i, j], type = "l",
main = paste("mu: ", round(mu.t[i, j], 3)),
ylab = round(acc.mu[i, j] / att.mu[i, j], 3),
xlab = MH.mu[i, j])
}
}
for (i in 1:3) {
plot(tau.keep[start:iter, i], type = "l",
main = paste("tau: ", round(tau.t[i], 3)))
}
}
}
#### testing mu, tau, and beta ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb.t <- getXBeta(X = X, beta = beta.t)
mu.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
ls.t <- matrix(0, ns, nt)
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
mu <- matrix(mu.t + rnorm(ns * nt), ns, nt)
SS <- getGPSS(Qb = Qb.t, param = mu, Xb = Xb.t)
curll <- matrix(0, ns, nt)
for (t in 1:nt) {
curll[, t] <- dgev(x = y.t[, t], loc = mu[, t], exp(ls.t[, t]), xi.t,
log = TRUE)
}
niters <- 20000
burn <- 15000
beta.sd <- 100
beta <- rep(0, np)
beta.keep <- matrix(0, niters, np)
beta.sd.keep <- rep(0, niters)
mu.keep <- array(0, dim = c(niters, ns, nt))
tau <- rep(1, nt)
tau.keep <- matrix(0, niters, nt)
acc.mu <- att.mu <- MH.mu <- matrix(0.1, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBeta(beta.sd = beta.sd, Qb = Qb.t,
param = mu, X = X, SS = SS, tau = tau)
beta <- this.update$beta
Xb <- this.update$Xb
SS <- this.update$SS
beta.keep[iter, ] <- beta
this.update <- updateGPBetaSD(beta = beta, tau.a = 0.1, tau.b = 1)
beta.sd <- this.update$beta.sd
beta.sd.keep[iter] <- beta.sd
this.update <- updateMuTest(mu = mu, Qb = Qb.t, tau = tau.t, Xb = Xb,
y = y.t, ls = ls.t, xi = xi.t,
SS = SS, curll = curll, acc = acc.mu,
att = att.mu, MH = MH.mu)
mu <- this.update$mu
SS <- this.update$SS
curll <- this.update$curll
acc.mu <- this.update$acc
att.mu <- this.update$att
mu.keep[iter, , ] <- mu
this.update <- updateGPTau(SS = SS, tau.a = 0.1, tau.b = 0.1,
ns = ns)
tau <- this.update$tau
tau.keep[iter, ] <- tau
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.mu, att = att.mu, MH = MH.mu,
target.min = 0.5, target.max = 0.7,
nattempts = 200)
acc.mu <- this.update$acc
att.mu <- this.update$att
MH.mu <- this.update$MH
}
if (iter %% 1000 == 0) {
par(mfrow = c(4, 3))
start <- max(1, iter - 20000)
for (i in 1:2) {
for (j in 1:3) {
plot(mu.keep[start:iter, i, j], type = "l",
main = paste("mu: ", round(mu.t[i, j], 3)),
ylab = round(acc.mu[i, j] / att.mu[i, j], 3),
xlab = MH.mu[i, j])
}
}
for (i in 1:3) {
plot(beta.keep[start:iter, i], type = "l",
main = paste("beta: ", round(beta.t[i], 3)))
}
for (i in 1:3) {
plot(tau.keep[start:iter, i], type = "l",
main = paste("tau: ", round(tau.t[i], 3)))
}
}
}
#### Verify gradients - no residual dependence ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 10
nt <- 3
np <- 6
X1 <- rX(ns, nt, np)
X2 <- rX(ns, nt, np)
beta1.t <- rnorm(np, 0, 1)
beta2.t <- rnorm(np, 0, 0.1)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1, beta = beta1.t)
Xb2.t <- getXBeta(X = X2, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
xi.t <- 0.1
y.t <- rgev(n = ns * nt, loc = mu.t, scale = exp(ls.t), xi.t)
lp.mu <- logpost.mu.test(mu = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t], xi = xi.t)
mean(grad(func = logpost.mu.test, x = mu.t[, t], Xb = Xb1.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t], ls = ls.t[, t], xi = xi.t) /
logpost.mu.grad.test(mu = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t],
xi = xi.t))
sd(grad(func = logpost.mu.test, x = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t], xi = xi.t) /
logpost.mu.grad.test(mu = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t],
xi = xi.t))
lp.logsig <- logpost.logsig.test(ls = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t],
mu = mu.t[, t], xi = xi.t)
mean(grad(func = logpost.logsig.test, x = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t], mu = mu.t[, t], xi = xi.t) /
logpost.logsig.grad.test(ls = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t],
mu = mu.t[, t], xi = xi.t))
sd(grad(func = logpost.logsig.test, x = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t], mu = mu.t[, t], xi = xi.t) /
logpost.logsig.grad.test(ls = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t],
mu = mu.t[, t], xi = xi.t))
#### testing logsig ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X1.t <- rX(ns, nt, np)
X2.t <- rX(ns, nt, np)
beta1.t <- rnorm(np, 0, 10)
beta2.t <- rnorm(np, 0, 5)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1.t, beta = beta1.t)
Xb2.t <- getXBeta(X = X2.t, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
ls <- matrix(ls.t + rnorm(ns * nt, 0, 0.1), ns, nt)
SS <- getGPSS(Qb = Qb.t, param = ls, Xb = Xb2.t)
curll <- matrix(0, ns, nt)
for (t in 1:nt) {
curll[, t] <- dgev(x = y.t[, t], loc = mu.t[, t], exp(ls[, t]), xi.t,
log = TRUE)
}
niters <- 10000
burn <- 8000
ls.keep <- array(0, dim = c(niters, ns, nt))
acc.ls <- att.ls <- MH.ls <- matrix(0.1, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateLSTest(ls = ls, tau = tau.t, Xb = Xb2.t, SS = SS,
y = y.t, mu = mu.t, xi = xi.t,
Qb = Qb.t, curll = curll, acc = acc.ls,
att = att.ls, MH = MH.ls)
ls <- this.update$ls
SS <- this.update$SS
curll <- this.update$curll
acc.ls <- this.update$acc
att.ls <- this.update$att
ls.keep[iter, , ] <- ls
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.ls, att = att.ls, MH = MH.ls,
target.min = 0.4, target.max = 0.7,
nattempts = 400)
acc.ls <- this.update$acc
att.ls <- this.update$att
MH.ls <- this.update$MH
}
if (iter %% 500 == 0) {
par(mfrow = c(3, 3))
start <- max(1, iter - 20000)
for (i in 1:3) {
for (j in 1:3) {
plot(ls.keep[start:iter, i, j], type = "l",
main = paste("logsig: ", round(ls.t[i, j], 3)),
ylab = round(acc.ls[i, j] / att.ls[i, j], 3),
xlab = MH.ls[i, j])
}
}
}
}
#### testing logsig, tau, and beta ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X1.t <- rX(ns, nt, np)
X2.t <- rX(ns, nt, np)
beta1.t <- rnorm(np, 0, 10)
beta2.t <- rnorm(np, 0, 5)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1.t, beta = beta1.t)
Xb2.t <- getXBeta(X = X2.t, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
ls <- matrix(ls.t + rnorm(ns * nt, 0, 0.1), ns, nt)
SS <- getGPSS(Qb = Qb.t, param = ls, Xb = Xb2.t)
curll <- matrix(0, ns, nt)
for (t in 1:nt) {
curll[, t] <- dgev(x = y.t[, t], loc = mu.t[, t], exp(ls[, t]), xi.t,
log = TRUE)
}
niters <- 30000
burn <- 25000
beta.sd <- 100
beta <- rep(0, np)
beta.keep <- matrix(0, niters, np)
beta.sd.keep <- rep(0, niters)
ls.keep <- array(0, dim = c(niters, ns, nt))
tau <- rep(1, nt)
tau.keep <- matrix(0, niters, nt)
acc.ls <- att.ls <- MH.ls <- matrix(0.1, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBeta(beta.sd = beta.sd, Qb = Qb.t,
param = ls, X = X2.t, SS = SS, tau = tau)
beta <- this.update$beta
Xb2 <- this.update$Xb
SS <- this.update$SS
beta.keep[iter, ] <- beta
this.update <- updateGPBetaSD(beta = beta, tau.a = 0.1, tau.b = 1)
beta.sd <- this.update$beta.sd
beta.sd.keep[iter] <- beta.sd
this.update <- updateLSTest(ls = ls, tau = tau.t, Xb = Xb2, SS = SS,
y = y.t, mu = mu.t, xi = xi.t,
Qb = Qb.t, curll = curll,
acc = acc.ls, att = att.ls, MH = MH.ls)
ls <- this.update$ls
SS <- this.update$SS
curll <- this.update$curll
acc.ls <- this.update$acc
att.ls <- this.update$att
ls.keep[iter, , ] <- ls
this.update <- updateGPTau(SS = SS, tau.a = 0.1, tau.b = 0.1,
ns = ns)
tau <- this.update$tau
tau.keep[iter, ] <- tau
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.ls, att = att.ls, MH = MH.ls,
target.min = 0.4, target.max = 0.7,
nattempts = 400)
acc.ls <- this.update$acc
att.ls <- this.update$att
MH.ls <- this.update$MH
}
if (iter %% 1000 == 0) {
par(mfrow = c(4, 3))
start <- max(1, iter - 20000)
for (i in 1:2) {
for (j in 1:3) {
plot(ls.keep[start:iter, i, j], type = "l",
main = paste("logsig: ", round(ls.t[i, j], 3)),
ylab = round(acc.ls[i, j] / att.ls[i, j], 3),
xlab = MH.ls[i, j])
}
}
for (i in 1:3) {
plot(beta.keep[start:iter, i], type = "l",
main = paste("beta: ", round(beta2.t[i], 3)))
}
for (i in 1:3) {
plot(tau.keep[start:iter, i], type = "l",
main = paste("tau: ", round(tau.t[i], 3)))
}
}
}
#### testing basis bandwidth update ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
# setting np later after X is created
ns <- 400
nt <- 12
nknots <- 5
time.interact <- TRUE
s <- cbind(runif(ns), runif(ns))
knots <- as.matrix(cover.design(R = s, nd = nknots)$design)
d <- rdist(s)
dw2 <- rdist(s, knots)^2
dw2[dw2 < 1e-4] <- 0
# create the matrix of covariates
X1.t <- X2.t <- array(1, dim = c(ns, nt, 2))
for (t in 1:nt) {
time <- (t - nt / 2) / nt
X1.t[, t, 2] <- X2.t[, t, 2] <- time
}
bw.basis.t <- 0.2
B.t <- makeW(dw2 = dw2, rho = bw.basis.t)
X1.t <- add.basis.X(X1.t, B.t, time.interact = time.interact)
X2.t <- add.basis.X(X2.t, B.t, time.interact = time.interact)
np <- dim(X1.t)[3]
beta1.t <- rnorm(np, 0, 10)
beta2.t <- rnorm(np, 0, 5)
bw.gp.t <- 0.2
Sigma.t <- exp(-d / bw.gp.t)
tau1.t <- tau2.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1.t, beta = beta1.t)
Xb2.t <- getXBeta(X = X2.t, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau2.t[t])
}
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, scale = exp(ls.t), shape = xi.t)
# initialize values
bw.basis <- 0.4
bw.basis.min <- quantile(dw2, 0.01)
bw.basis.max <- quantile(dw2, 0.99)
B <- makeW(dw2 = dw2, rho = bw.basis)
X1 <- rep.basis.X(X = X1.t, newB = B, time.interact = time.interact)
X2 <- rep.basis.X(X = X2.t, newB = B, time.interact = time.interact)
Xb1 <- getXBeta(X = X1, beta = beta1.t)
Xb2 <- getXBeta(X = X2, beta = beta1.t)
SS1 <- getGPSS(Qb = Qb.t, param = mu.t, Xb = Xb1)
SS2 <- getGPSS(Qb = Qb.t, param = ls.t, Xb = Xb2)
niters <- 30000
burn <- 25000
# storage
bw.basis.keep <- rep(0, niters)
Xb1.keep <- array(0, dim = c(niters, ns, nt))
acc.bw.basis <- att.bw.basis <- MH.bw.basis <- 0.1
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateXBasisBW(bw = bw.basis, bw.min = bw.basis.min,
bw.max = bw.basis.max,
X1 = X1, beta1 = beta1.t, Xb1 = Xb1,
mu = mu.t, tau1 = tau1.t, SS1 = SS1,
X2 = X2, beta2 = beta2.t, Xb2 = Xb2,
ls = ls.t, tau2 = tau2.t, SS2 = SS2,
Qb = Qb.t, dw2 = dw2,
time.interact = time.interact,
acc = acc.bw.basis, att = att.bw.basis,
MH = MH.bw.basis)
bw.basis <- this.update$bw
X1 <- this.update$X1
Xb1 <- this.update$Xb1
SS1 <- this.update$SS1
X2 <- this.update$X2
Xb2 <- this.update$Xb2
SS2 <- this.update$SS2
acc.bw.basis <- this.update$acc
att.bw.basis <- this.update$att
bw.basis.keep[iter] <- bw.basis
Xb1.keep[iter, , ] <- Xb1
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.bw.basis, att = att.bw.basis,
MH = MH.bw.basis,
target.min = 0.3, target.max = 0.6,
lower = 0.8, higher = 1.2)
acc.bw.basis <- this.update$acc
att.bw.basis <- this.update$att
MH.bw.basis <- this.update$MH
}
if (iter %% 1000 == 0) {
par(mfrow = c(2, 5))
plot(bw.basis.keep[100:iter], type = "l",
main = paste("BW basis: ", bw.basis.t, sep = ""),
ylab = round(acc.bw.basis / att.bw.basis, 3),
xlab = MH.bw.basis)
for(i in 1:3) { for (j in 1:3) {
plot(Xb1.keep[100:iter, i, j], type = "l",
main = paste("Xb1: ", Xb1.t[i, j], sep = ""))
}}
}
}
#### testing basis bandwidth, beta1, beta2 ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
# setting np later after X is created
ns <- 400
nt <- 12
nknots <- 5
time.interact <- TRUE
s <- cbind(runif(ns), runif(ns))
knots <- as.matrix(cover.design(R = s, nd = nknots)$design)
d <- rdist(s)
dw2 <- rdist(s, knots)^2
dw2[dw2 < 1e-4] <- 0
# create the matrix of covariates
X1.t <- X2.t <- array(1, dim = c(ns, nt, 2))
for (t in 1:nt) {
time <- (t - nt / 2) / nt
X1.t[, t, 2] <- X2.t[, t, 2] <- time
}
bw.basis.t <- 0.2
B.t <- makeW(dw2 = dw2, rho = bw.basis.t)
X1.t <- add.basis.X(X1.t, B.t, time.interact = time.interact)
X2.t <- add.basis.X(X2.t, B.t, time.interact = time.interact)
np <- dim(X1.t)[3]
beta1.t <- rnorm(np, 0, 10)
beta2.t <- rnorm(np, 0, 5)
bw.gp.t <- 0.2
Sigma.t <- exp(-d / bw.gp.t)
tau1.t <- tau2.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1.t, beta = beta1.t)
Xb2.t <- getXBeta(X = X2.t, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau2.t[t])
}
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, scale = exp(ls.t), shape = xi.t)
# initialize values
beta1 <- beta2 <- rep(0, np)
beta1.sd <- beta2.sd <- 100
bw.basis <- 0.4
bw.basis.min <- quantile(dw2, 0.01)
bw.basis.max <- quantile(dw2, 0.99)
B <- makeW(dw2 = dw2, rho = bw.basis)
X1 <- rep.basis.X(X = X1.t, newB = B, time.interact = time.interact)
X2 <- rep.basis.X(X = X2.t, newB = B, time.interact = time.interact)
Xb1 <- getXBeta(X = X1, beta = beta1)
Xb2 <- getXBeta(X = X2, beta = beta1)
SS1 <- getGPSS(Qb = Qb.t, param = mu.t, Xb = Xb1)
SS2 <- getGPSS(Qb = Qb.t, param = ls.t, Xb = Xb2)
niters <- 30000
burn <- 25000
# storage
bw.basis.keep <- rep(0, niters)
beta1.keep <- beta2.keep <- matrix(0, niters, np)
beta1.sd.keep <- beta2.sd.keep <- rep(0, niters)
acc.bw.basis <- att.bw.basis <- MH.bw.basis <- 0.1
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBeta(beta.sd = beta1.sd, Qb = Qb.t,
param = mu.t, X = X1, SS = SS1, tau = tau1.t)
beta1 <- this.update$beta
Xb1 <- this.update$Xb
SS1 <- this.update$SS
beta1.keep[iter, ] <- beta1
this.update <- updateGPBetaSD(beta = beta1, tau.a = 0.5, tau.b = 0.5)
beta1.sd <- this.update$beta.sd
beta1.sd.keep[iter] <- beta1.sd
this.update <- updateGPBeta(beta.sd = beta2.sd, Qb = Qb.t,
param = ls.t, X = X2, SS = SS2, tau = tau2.t)
beta2 <- this.update$beta
Xb2 <- this.update$Xb
SS2 <- this.update$SS
beta2.keep[iter, ] <- beta2
this.update <- updateGPBetaSD(beta = beta2, tau.a = 0.5, tau.b = 0.5)
beta2.sd <- this.update$beta.sd
beta2.sd.keep[iter] <- beta2.sd
this.update <- updateXBasisBW(bw = bw.basis, bw.min = bw.basis.min,
bw.max = bw.basis.max,
X1 = X1, beta1 = beta1, Xb1 = Xb1,
mu = mu.t, tau1 = tau1.t, SS1 = SS1,
X2 = X2, beta2 = beta2, Xb2 = Xb2,
ls = ls.t, tau2 = tau2.t, SS2 = SS2,
Qb = Qb.t, dw2 = dw2,
time.interact = time.interact,
acc = acc.bw.basis, att = att.bw.basis,
MH = MH.bw.basis)
bw.basis <- this.update$bw
X1 <- this.update$X1
Xb1 <- this.update$Xb1
SS1 <- this.update$SS1
X2 <- this.update$X2
Xb2 <- this.update$Xb2
SS2 <- this.update$SS2
acc.bw.basis <- this.update$acc
att.bw.basis <- this.update$att
bw.basis.keep[iter] <- bw.basis
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.bw.basis, att = att.bw.basis,
MH = MH.bw.basis,
target.min = 0.3, target.max = 0.6,
lower = 0.8, higher = 1.2)
acc.bw.basis <- this.update$acc
att.bw.basis <- this.update$att
MH.bw.basis <- this.update$MH
}
if (iter %% 500 == 0) {
par(mfrow = c(3, 3))
plot(bw.basis.keep[100:iter], type = "l",
main = paste("BW basis: ", bw.basis.t, sep = ""),
ylab = round(acc.bw.basis / att.bw.basis, 3),
xlab = MH.bw.basis)
for(i in 1:4) {
plot(beta1.keep[100:iter, i], type = "l",
main = paste("beta 1: ", beta1.t[i], sep = ""))
}
for (i in 1:4) {
plot(beta2.keep[100:iter, i], type = "l",
main = paste("beta 2: ", beta2.t[i], sep = ""))
}
}
}
#### testing basis bandwidth, beta1, beta2, mu, and logsig ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
# setting np later after X is created
ns <- 400
nt <- 12
nknots <- 5
time.interact <- TRUE
s <- cbind(runif(ns), runif(ns))
knots <- as.matrix(cover.design(R = s, nd = nknots)$design)
d <- rdist(s)
dw2 <- rdist(s, knots)^2
dw2[dw2 < 1e-4] <- 0
# create the matrix of covariates
X1.t <- X2.t <- array(1, dim = c(ns, nt, 2))
for (t in 1:nt) {
time <- (t - nt / 2) / nt
X1.t[, t, 2] <- X2.t[, t, 2] <- time
}
bw.basis.t <- 0.2
B.t <- makeW(dw2 = dw2, rho = bw.basis.t)
X1.t <- add.basis.X(X1.t, B.t, time.interact = time.interact)
X2.t <- add.basis.X(X2.t, B.t, time.interact = time.interact)
np <- dim(X1.t)[3]
beta1.t <- rnorm(np, 0, 10)
beta2.t <- rnorm(np, 0, 5)
bw.gp.t <- 0.2
Sigma.t <- exp(-d / bw.gp.t)
tau1.t <- tau2.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1.t, beta = beta1.t)
Xb2.t <- getXBeta(X = X2.t, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau2.t[t])
}
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, scale = exp(ls.t), shape = xi.t)
# initialize values
mu <- matrix(mu.t + rnorm(ns * nt), ns, nt)
ls <- matrix(ls.t + rnorm(ns * nt), ns, nt)
beta1 <- beta2 <- rep(0, np)
beta1.sd <- beta2.sd <- 100
bw.basis <- 0.4
bw.basis.min <- quantile(dw2, 0.01)
bw.basis.max <- quantile(dw2, 0.99)
B <- makeW(dw2 = dw2, rho = bw.basis)
X1 <- rep.basis.X(X = X1.t, newB = B, time.interact = time.interact)
X2 <- rep.basis.X(X = X2.t, newB = B, time.interact = time.interact)
Xb1 <- getXBeta(X = X1, beta = beta1)
Xb2 <- getXBeta(X = X2, beta = beta1)
SS1 <- getGPSS(Qb = Qb.t, param = mu.t, Xb = Xb1)
SS2 <- getGPSS(Qb = Qb.t, param = ls.t, Xb = Xb2)
curll <- matrix(0, ns, nt)
for (t in 1:nt) {
curll[, t] <- dgev(x = y.t[, t], loc = mu[, t], exp(ls[, t]), xi.t,
log = TRUE)
}
niters <- 30000
burn <- 25000
# storage
bw.basis.keep <- rep(0, niters)
beta1.keep <- beta2.keep <- matrix(0, niters, np)
beta1.sd.keep <- beta2.sd.keep <- rep(0, niters)
mu.keep <- array(0, dim = c(niters, ns, nt))
ls.keep <- array(0, dim = c(niters, ns, nt))
acc.bw.basis <- att.bw.basis <- MH.bw.basis <- 0.1
acc.mu <- att.mu <- MH.mu <- matrix(0.2, ns, nt)
acc.ls <- att.ls <- MH.ls <- matrix(0.1, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBeta(beta.sd = beta1.sd, Qb = Qb.t,
param = mu, X = X1, SS = SS1, tau = tau1.t)
beta1 <- this.update$beta
Xb1 <- this.update$Xb
SS1 <- this.update$SS
beta1.keep[iter, ] <- beta1
this.update <- updateGPBetaSD(beta = beta1, tau.a = 0.5, tau.b = 0.5)
beta1.sd <- this.update$beta.sd
beta1.sd.keep[iter] <- beta1.sd
this.update <- updateGPBeta(beta.sd = beta2.sd, Qb = Qb.t,
param = ls, X = X2, SS = SS2, tau = tau2.t)
beta2 <- this.update$beta
Xb2 <- this.update$Xb
SS2 <- this.update$SS
beta2.keep[iter, ] <- beta2
this.update <- updateGPBetaSD(beta = beta2, tau.a = 0.5, tau.b = 0.5)
beta2.sd <- this.update$beta.sd
beta2.sd.keep[iter] <- beta2.sd
this.update <- updateXBasisBW(bw = bw.basis, bw.min = bw.basis.min,
bw.max = bw.basis.max,
X1 = X1, beta1 = beta1, Xb1 = Xb1,
mu = mu, tau1 = tau1.t, SS1 = SS1,
X2 = X2, beta2 = beta2, Xb2 = Xb2,
ls = ls, tau2 = tau2.t, SS2 = SS2,
Qb = Qb.t, dw2 = dw2,
time.interact = time.interact,
acc = acc.bw.basis, att = att.bw.basis,
MH = MH.bw.basis)
bw.basis <- this.update$bw
X1 <- this.update$X1
Xb1 <- this.update$Xb1
SS1 <- this.update$SS1
X2 <- this.update$X2
Xb2 <- this.update$Xb2
SS2 <- this.update$SS2
acc.bw.basis <- this.update$acc
att.bw.basis <- this.update$att
bw.basis.keep[iter] <- bw.basis
this.update <- updateMuTest(mu = mu, tau = tau1.t, Xb = Xb1, SS = SS1,
y = y.t, ls = ls, xi = xi.t,
Qb = Qb.t, curll = curll, acc = acc.mu,
att = att.mu, MH = MH.mu)
mu <- this.update$mu
SS1 <- this.update$SS
curll <- this.update$curll
acc.mu <- this.update$acc
att.mu <- this.update$att
mu.keep[iter, , ] <- mu
this.update <- updateLSTest(ls = ls, tau = tau2.t, Xb = Xb2, SS = SS2,
y = y.t, mu = mu, xi = xi.t,
Qb = Qb.t, curll = curll, acc = acc.ls,
att = att.ls, MH = MH.ls)
ls <- this.update$ls
SS2 <- this.update$SS
curll <- this.update$curll
acc.ls <- this.update$acc
att.ls <- this.update$att
ls.keep[iter, , ] <- ls
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.bw.basis, att = att.bw.basis,
MH = MH.bw.basis,
target.min = 0.3, target.max = 0.6,
lower = 0.8, higher = 1.2)
acc.bw.basis <- this.update$acc
att.bw.basis <- this.update$att
MH.bw.basis <- this.update$MH
this.update <- mhUpdate(acc = acc.mu, att = att.mu, MH = MH.mu,
target.min = 0.4, target.max = 0.7,
nattempts = 400)
acc.mu <- this.update$acc
att.mu <- this.update$att
MH.mu <- this.update$MH
this.update <- mhUpdate(acc = acc.ls, att = att.ls, MH = MH.ls,
target.min = 0.4, target.max = 0.7,
nattempts = 400)
acc.ls <- this.update$acc
att.ls <- this.update$att
MH.ls <- this.update$MH
}
if (iter %% 500 == 0) {
par(mfrow = c(3, 3))
plot(bw.basis.keep[100:iter], type = "l",
main = paste("BW basis: ", bw.basis.t, sep = ""),
ylab = round(acc.bw.basis / att.bw.basis, 3),
xlab = MH.bw.basis)
for(i in 1:4) {
plot(beta1.keep[100:iter, i], type = "l",
main = paste("beta 1: ", beta1.t[i], sep = ""))
}
for (i in 1:4) {
plot(beta2.keep[100:iter, i], type = "l",
main = paste("beta 2: ", beta2.t[i], sep = ""))
}
}
}
#### Verify gradients - with residual dependence ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 10
nt <- 3
np <- 6
X1 <- rX(ns, nt, np)
X2 <- rX(ns, nt, np)
beta1.t <- rnorm(np, 0, 1)
beta2.t <- rnorm(np, 0, 0.1)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1, beta = beta1.t)
Xb2.t <- getXBeta(X = X2, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
xi.t <- 0.1
y.t <- rgev(n = ns * nt, loc = mu.t, scale = exp(ls.t), xi.t)
nknots <- 4
theta.t <- matrix(abs(rnorm(ns * nt)), ns, nt)
alpha.t <- 0.4
thresh.t <- matrix(median(y.t), ns, nt)
xi.t <- 0
lp.mu <- logpost.mu(mu = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t)
mean(grad(func = logpost.mu, x = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t], alpha = alpha.t) /
logpost.mu.grad(mu = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t],
xi = xi.t, theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
sd(grad(func = logpost.mu, x = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t], alpha = alpha.t) /
logpost.mu.grad(mu = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t],
xi = xi.t, theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
lp.logsig <- logpost.logsig(ls = ls.t[, t], Xb = Xb2.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], mu = mu.t[, t],
xi = xi.t, theta = theta.t[, t],
thresh = thresh.t[, t], alpha = alpha.t)
mean(grad(func = logpost.logsig, x = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t], alpha = alpha.t) /
logpost.logsig.grad(ls = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t],
mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
sd(grad(func = logpost.logsig, x = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t], alpha = alpha.t) /
logpost.logsig.grad(ls = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t],
mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
#### testing xi ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X1.t <- rX(ns, nt, np)
X2.t <- rX(ns, nt, np)
beta1.t <- rnorm(np, 0, 10)
beta2.t <- rnorm(np, 0, 5)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1.t, beta = beta1.t)
Xb2.t <- getXBeta(X = X2.t, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
xi.t <- -0.7
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
xi <- 0.1
thresh.t <- matrix(-Inf, ns, nt)
theta.t <- matrix(1, ns, nt)
alpha.t <- 1
curll <- loglike(y = y.t, mu = mu.t, ls = ls.t, xi = xi,
theta = theta.t, thresh = thresh.t, alpha = alpha.t)
niters <- 10000
burn <- 8000
xi.keep <- rep(0, niters)
acc.xi <- att.xi <- MH.xi <- 0.01
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateXi(xi = xi, xi.min = -2, xi.max = 2,
xi.mn = 0, xi.sd = 0.5, y = y.t, mu = mu.t,
ls = ls.t, curll = curll, theta = theta.t,
thresh = thresh.t, alpha = alpha.t, acc = acc.xi,
att = att.xi, MH = MH.xi)
xi <- this.update$xi
curll <- this.update$curll
acc.xi <- this.update$acc
att.xi <- this.update$att
xi.keep[iter] <- xi
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.xi, att = att.xi, MH = MH.xi,
target.min = 0.3, target.max = 0.6,
nattempts = 400)
acc.xi <- this.update$acc
att.xi <- this.update$att
MH.xi <- this.update$MH
}
if (iter %% 500 == 0) {
start <- max(1, iter - 20000)
plot(xi.keep[start:iter], type = "l", main = paste("xi: ", xi.t),
ylab = round(acc.xi / att.xi, 3),
xlab = MH.xi)
}
}
#### testing beta1 ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 600
nt <- 30
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
Qb.t <- chol2inv(chol(Sigma.t))
tau1.int.t <- rgamma(1, 1, 1)
tau1.time.t <- rgamma(1, 1, 1)
beta1.int.t <- beta1.time.t <- matrix(0, ns, nt)
for (t in 1:nt) {
beta1.int.t[, t] <- t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.int.t[1])
beta1.time.t[, t] <- t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.time.t[1])
}
beta1.int.mn.t <- beta1.time.mn.t <- 0
mu.t <- matrix(0, ns, nt)
time <- (1:nt - nt / 2) / nt
for (t in 1:nt) {
mu.t[, t] <- beta1.int.t[, t] + beta1.time.t [, t] * time[t]
}
ls.t <- matrix(0, ns, nt)
xi.t <- 0
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
beta1.int <- beta1.int.t + rnorm(ns * nt)
beta1.time <- beta1.time.t + rnorm(ns * nt)
mu <- matrix(0, ns, nt)
SS1.int <- SS1.time <- rep(0, nt)
for (t in 1:nt) {
mu[, t] <- beta1.int[, t] + beta1.time[, t] * time[t]
SS1.int[t] <- quad.form(Qb.t, beta1.int[, t] - beta1.int.mn.t)
SS1.time[t] <- quad.form(Qb.t, beta1.time[, t] - beta1.time.mn.t)
}
thresh.t <- matrix(-Inf, ns, nt)
theta.t <- matrix(1, ns, nt)
theta.xi.t <- theta.t^xi.t
alpha.t <- 1
curll <- loglike(y = y.t, mu = mu, ls = ls.t, xi = xi.t,
theta = theta.t, theta.xi = theta.xi.t,
thresh = thresh.t, alpha = alpha.t)
niters <- 10000
burn <- 8000
keep.beta1.int <- keep.beta1.time <- array(0, dim = c(niters, ns, nt))
acc.beta1 <- att.beta1 <- MH.beta1 <- matrix(0.1, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateBeta1(beta.int = beta1.int, beta.int.mn = beta1.int.mn.t,
SS.int = SS1.int, tau.int = tau1.int.t,
beta.time = beta1.time,
beta.time.mn = beta1.time.mn.t,
SS.time = SS1.time, tau.time = tau1.time.t,
mu = mu, time = time,
y = y.t, theta = theta.t, theta.xi = theta.xi.t,
ls = ls.t, xi = xi.t, thresh = thresh.t,
alpha = alpha.t, Qb = Qb.t, curll = curll,
acc = acc.beta1, att = att.beta1, MH = MH.beta1)
beta1.int <- this.update$beta.int
SS1.int <- this.update$SS.int
beta1.time <- this.update$beta.time
SS1.time <- this.update$SS.time
mu <- this.update$mu
curll <- this.update$curll
acc.beta1 <- this.update$acc
att.beta1 <- this.update$att
keep.beta1.int[iter, , ] <- beta1.int
keep.beta1.time[iter, , ] <- beta1.time
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.beta1, att = att.beta1, MH = MH.beta1,
target.min = 0.3, target.max = 0.6,
nattempts = 400)
acc.beta1 <- this.update$acc
att.beta1 <- this.update$att
MH.beta1 <- this.update$MH
}
if (iter %% 500 == 0) {
par(mfrow = c(4, 4))
acc.rate <- round(acc.beta1 / att.beta1, 3)
for (i in 1:4) {
plot(keep.beta1.int[1:iter, 1, i], type = "l",
main = paste("intercept site 1, day ", i, " (",
round(beta1.int.t[1, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[1, i], sep = ""),
xlab = acc.rate[1, i])
plot(keep.beta1.time[1:iter, 1, i], type = "l",
main = paste("time site 1, day ", i, " (",
round(beta1.time.t[1, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[1, i], sep = ""),
xlab = acc.rate[1, i])
plot(keep.beta1.int[1:iter, 2, i], type = "l",
main = paste("intercept site 1, day ", i, " (",
round(beta1.int.t[2, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[2, i], sep = ""),
xlab = acc.rate[2, i])
plot(keep.beta1.time[1:iter, 2, i], type = "l",
main = paste("time site 1, day ", i, " (",
round(beta1.time.t[2, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[2, i], sep = ""),
xlab = acc.rate[2, i])
}
}
}
#### Verify gradients - on beta ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 10
nt <- 3
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
Qb.t <- chol2inv(chol(Sigma.t))
tau1.int.t <- rgamma(1, 1, 1)
tau1.time.t <- rgamma(1, 1, 1)
tau2.int.t <- rgamma(1, 1, 1)
tau2.time.t <- rgamma(1, 1, 1)
beta1.int.t <- beta1.time.t <- matrix(0, ns, nt)
beta2.int.t <- beta2.time.t <- matrix(0, ns, nt)
for (t in 1:nt) {
beta1.int.t[, t] <- t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.int.t[1])
beta1.time.t[, t] <- t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.time.t[1])
beta2.int.t[, t] <- t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau2.int.t[1])
beta2.time.t[, t] <- t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau2.time.t[1])
}
beta1.int.mn.t <- beta1.time.mn.t <- 0
beta2.int.mn.t <- beta2.time.mn.t <- 0
mu.t <- matrix(0, ns, nt)
ls.t <- matrix(0, ns, nt)
time <- (1:nt - nt / 2) / nt
for (t in 1:nt) {
mu.t[, t] <- beta1.int.t[, t] + beta1.time.t[, t] * time[t]
ls.t[, t] <- beta2.int.t[, t] + beta2.time.t[, t] * time[t]
}
xi.t <- 0
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
nknots <- 4
theta.t <- matrix(abs(rnorm(ns * nt)), ns, nt)
alpha.t <- 0.4
thresh.t <- matrix(median(y.t), ns, nt)
xi.t <- 0
lp.beta1.int <- logpost.beta1.int(beta.int = beta1.int.t[, t],
beta.int.mn = beta1.int.mn.t,
tau = tau1.int.t, Qb = Qb.t,
beta.time = beta1.time.t[, t], time = time[t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t)
mean(grad(func = logpost.beta1.int, x = beta1.int.t[, t],
beta.int.mn = beta1.int.mn.t, tau = tau1.int.t, Qb = Qb.t,
beta.time = beta1.time.t[, t], time = time[t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t) /
logpost.beta1.int.grad(beta.int = beta1.int.t[, t],
beta.int.mn = beta1.int.mn.t,
tau = tau1.int.t, Qb = Qb.t,
beta.time = beta1.time.t[, t], time = time[t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
sd(grad(func = logpost.beta1.int, x = beta1.int.t[, t],
beta.int.mn = beta1.int.mn.t, tau = tau1.int.t, Qb = Qb.t,
beta.time = beta1.time.t[, t], time = time[t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t) /
logpost.beta1.int.grad(beta.int = beta1.int.t[, t],
beta.int.mn = beta1.int.mn.t,
tau = tau1.int.t, Qb = Qb.t,
beta.time = beta1.time.t[, t], time = time[t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
lp.beta1.time <- logpost.beta1.time(beta.time = beta1.time.t[, t],
beta.time.mn = beta1.time.mn.t,
time = time[t], tau = tau1.int.t, Qb = Qb.t,
beta.int = beta1.int.t[, t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t],
thresh = thresh.t[, t], alpha = alpha.t)
mean(grad(func = logpost.beta1.time, x = beta1.time.t[, t],
beta.time.mn = beta1.time.mn.t, time = time[t], tau = tau1.int.t,
Qb = Qb.t, beta.int = beta1.int.t[, t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t) /
logpost.beta1.time.grad(beta.time = beta1.time.t[, t],
beta.time.mn = beta1.time.mn.t, time = time[t],
tau = tau1.int.t, Qb = Qb.t,
beta.int = beta1.int.t[, t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t],
thresh = thresh.t[, t], alpha = alpha.t))
sd(grad(func = logpost.beta1.time, x = beta1.time.t[, t],
beta.time.mn = beta1.time.mn.t, time = time[t], tau = tau1.int.t,
Qb = Qb.t, beta.int = beta1.int.t[, t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t) /
logpost.beta1.time.grad(beta.time = beta1.time.t[, t],
beta.time.mn = beta1.time.mn.t, time = time[t],
tau = tau1.int.t, Qb = Qb.t,
beta.int = beta1.int.t[, t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t],
thresh = thresh.t[, t], alpha = alpha.t))
this.grad <- logpost.beta1.grad(beta.int = beta1.int.t[, t],
beta.int.mn = beta1.int.mn.t,
beta.time = beta1.time.t[, t],
beta.time.mn = beta1.time.mn.t,
time = time[t],
tau = tau1.int.t, Qb = Qb.t,
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t)
logpost.beta1.int.grad(beta.int = beta1.int.t[, t],
beta.int.mn = beta1.int.mn.t,
tau = tau1.int.t, Qb = Qb.t,
beta.time = beta1.time.t[, t], time = time[t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t)
logpost.beta1.time.grad(beta.time = beta1.time.t[, t],
beta.time.mn = beta1.time.mn.t, time = time[t],
tau = tau1.int.t, Qb = Qb.t,
beta.int = beta1.int.t[, t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t],
thresh = thresh.t[, t], alpha = alpha.t)
microbenchmark(logpost.beta1.grad(beta.int = beta1.int.t[, t],
beta.int.mn = beta1.int.mn.t,
beta.time = beta1.time.t[, t],
beta.time.mn = beta1.time.mn.t,
time = time[t],
tau = tau1.int.t, Qb = Qb.t,
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t),
logpost.beta1.int.grad(beta.int = beta1.int.t[, t],
beta.int.mn = beta1.int.mn.t,
tau = tau1.int.t, Qb = Qb.t,
beta.time = beta1.time.t[, t], time = time[t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t),
logpost.beta1.time.grad(beta.time = beta1.time.t[, t],
beta.time.mn = beta1.time.mn.t, time = time[t],
tau = tau1.int.t, Qb = Qb.t,
beta.int = beta1.int.t[, t],
y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t],
thresh = thresh.t[, t], alpha = alpha.t))
lp.beta2.int <- logpost.beta2.int(beta.int = beta2.int.t[, t],
beta.int.mn = beta2.int.mn.t,
tau = tau2.int.t, Qb = Qb.t,
beta.time = beta1.time.t[, t], time = time[t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t)
mean(grad(func = logpost.beta2.int, x = beta2.int.t[, t],
beta.int.mn = beta2.int.mn.t, tau = tau2.int.t, Qb = Qb.t,
beta.time = beta2.time.t[, t], time = time[t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t) /
logpost.beta2.int.grad(beta.int = beta2.int.t[, t],
beta.int.mn = beta2.int.mn.t,
tau = tau2.int.t, Qb = Qb.t,
beta.time = beta2.time.t[, t], time = time[t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
sd(grad(func = logpost.beta2.int, x = beta2.int.t[, t],
beta.int.mn = beta2.int.mn.t, tau = tau2.int.t, Qb = Qb.t,
beta.time = beta2.time.t[, t], time = time[t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t) /
logpost.beta2.int.grad(beta.int = beta2.int.t[, t],
beta.int.mn = beta2.int.mn.t,
tau = tau2.int.t, Qb = Qb.t,
beta.time = beta2.time.t[, t], time = time[t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
lp.beta2.time <- logpost.beta2.time(beta.time = beta2.time.t[, t],
beta.time.mn = beta2.time.mn.t,
time = time[t], tau = tau2.int.t, Qb = Qb.t,
beta.int = beta1.int.t[, t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t)
mean(grad(func = logpost.beta2.time, x = beta2.time.t[, t],
beta.time.mn = beta2.time.mn.t, time = time[t], tau = tau2.int.t,
Qb = Qb.t, beta.int = beta2.int.t[, t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t) /
logpost.beta2.time.grad(beta.time = beta2.time.t[, t],
beta.time.mn = beta2.time.mn.t,
time = time[t], tau = tau2.int.t, Qb = Qb.t,
beta.int = beta2.int.t[, t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
sd(grad(func = logpost.beta2.time, x = beta2.time.t[, t],
beta.time.mn = beta2.time.mn.t, time = time[t], tau = tau2.int.t,
Qb = Qb.t, beta.int = beta2.int.t[, t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t) /
logpost.beta2.time.grad(beta.time = beta2.time.t[, t],
beta.time.mn = beta2.time.mn.t,
time = time[t], tau = tau2.int.t, Qb = Qb.t,
beta.int = beta2.int.t[, t],
y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
#### testing beta1 ####
rm(list=ls())
library(compiler)
enableJIT(3)
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 600
nt <- 30
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
Qb.t <- chol2inv(chol(Sigma.t))
tau1.int.t <- rgamma(1, 1, 1)
tau1.time.t <- rgamma(1, 1, 1)
beta1.int.t <- beta1.time.t <- matrix(0, ns, nt)
for (t in 1:nt) {
beta1.int.t[, t] <- t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.int.t[1])
beta1.time.t[, t] <- t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.time.t[1])
}
beta1.int.mn.t <- beta1.time.mn.t <- 0
mu.t <- matrix(0, ns, nt)
time <- (1:nt - nt / 2) / nt
for (t in 1:nt) {
mu.t[, t] <- beta1.int.t[, t] + beta1.time.t [, t] * time[t]
}
ls.t <- matrix(0, ns, nt)
xi.t <- 0
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
beta1.int <- beta1.int.t + rnorm(ns * nt)
beta1.int.mn <- 0
beta1.time <- beta1.time.t + rnorm(ns * nt)
beta1.time.mn <- 0
mu <- matrix(0, ns, nt)
SS1.int <- SS1.time <- rep(0, nt)
for (t in 1:nt) {
mu[, t] <- beta1.int[, t] + beta1.time[, t] * time[t]
SS1.int[t] <- quad.form(Qb.t, beta1.int[, t] - beta1.int.mn.t)
SS1.time[t] <- quad.form(Qb.t, beta1.time[, t] - beta1.time.mn.t)
}
thresh.t <- matrix(-Inf, ns, nt)
theta.t <- matrix(1, ns, nt)
theta.xi.t <- theta.t^xi.t
alpha.t <- 1
curll <- loglike(y = y.t, mu = mu, ls = ls.t, xi = xi.t,
theta = theta.t, theta.xi = theta.xi.t,
thresh = thresh.t, alpha = alpha.t)
niters <- 10000
burn <- 8000
keep.beta1.int <- keep.beta1.time <- array(0, dim = c(niters, ns, nt))
keep.beta1.int.mn <- keep.beta1.time.mn <- rep(0)
acc.beta1 <- att.beta1 <- MH.beta1 <- matrix(0.01, ns, nt)
set.seed(3366) # demo
Rprof(filename = "Rprof.out", line.profiling = TRUE)
for (iter in 1:niters) {
this.update <- updateBeta1(beta.int = beta1.int, beta.int.mn = beta1.int.mn.t,
SS.int = SS1.int, tau.int = tau1.int.t,
beta.time = beta1.time,
beta.time.mn = beta1.time.mn.t,
SS.time = SS1.time, tau.time = tau1.time.t,
mu = mu, time = time,
y = y.t, theta = theta.t, theta.xi = theta.xi.t,
ls = ls.t, xi = xi.t, thresh = thresh.t,
alpha = alpha.t, Qb = Qb.t, curll = curll,
acc = acc.beta1, att = att.beta1, MH = MH.beta1)
beta1.int <- this.update$beta.int
SS1.int <- this.update$SS.int
beta1.time <- this.update$beta.time
SS1.time <- this.update$SS.time
mu <- this.update$mu
curll <- this.update$curll
acc.beta1 <- this.update$acc
att.beta1 <- this.update$att
keep.beta1.int[iter, , ] <- beta1.int
keep.beta1.time[iter, , ] <- beta1.time
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.beta1, att = att.beta1, MH = MH.beta1,
target.min = 0.5, target.max = 0.8,
nattempts = 50)
acc.beta1 <- this.update$acc
att.beta1 <- this.update$att
MH.beta1 <- this.update$MH
}
if (iter %% 100 == 0) {
par(mfrow = c(4, 4))
acc.rate <- round(acc.beta1 / att.beta1, 3)
for (i in 1:4) {
plot(keep.beta1.int[1:iter, 1, i], type = "l",
main = paste("intercept site 1, day ", i, " (",
round(beta1.int.t[1, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[1, i], sep = ""),
xlab = acc.rate[1, i])
plot(keep.beta1.time[1:iter, 1, i], type = "l",
main = paste("time site 1, day ", i, " (",
round(beta1.time.t[1, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[1, i], sep = ""),
xlab = acc.rate[1, i])
plot(keep.beta1.int[1:iter, 2, i], type = "l",
main = paste("intercept site 1, day ", i, " (",
round(beta1.int.t[2, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[2, i], sep = ""),
xlab = acc.rate[2, i])
plot(keep.beta1.time[1:iter, 2, i], type = "l",
main = paste("time site 1, day ", i, " (",
round(beta1.time.t[2, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[2, i], sep = ""),
xlab = acc.rate[2, i])
}
}
}
Rprof(NULL)
summaryRprof(filename = "Rprof.out", lines = "show")
rm(list=ls())
library(fields)
library(Rcpp)
library(emulator)
library(microbenchmark)
library(SpatialExtremes)
library(numDeriv)
library(fields)
#### testing beta ####
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 2
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma <- exp(-d / phi)
tau <- rgamma(nt, 0.5, 0.5)
Qb <- chol2inv(chol(Sigma))
Xb <- getXBeta(X = X, beta = beta.t)
if (nt == 1) {
Xb <- matrix(Xb, ns, nt)
}
mu <- matrix(0, ns, nt)
for (t in 1:nt) {
mu[, t] <- Xb[, t] + t(chol(Sigma)) %*% rnorm(ns) / sqrt(tau[t])
}
# initialize values
SS <- diag(quad.form(Qb, mu - Xb))
niters <- 10000
beta.keep <- matrix(0, niters, np)
beta <- rep(0, np)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBeta(beta.sd = 100, Qb = Qb,
param = mu, X = X, SS = SS, tau = tau)
beta <- this.update$beta
Xb <- this.update$Xb
SS <- this.update$SS
beta.keep[iter, ] <- beta
if (iter %% 500 == 0) {
start <- max(1, iter - 2000)
par(mfrow = c(2, np / 2))
for (i in 1:6) {
plot(beta.keep[start:iter, i], type = "l",
main = paste("Beta = ", round(beta.t[i], 3)))
}
}
}
#### testing phi ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 10
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau <- rgamma(nt, 0.5, 0.5)
Qb <- chol2inv(chol(Sigma.t))
Xb <- getXBeta(X = X, beta = beta.t)
if (nt == 1) {
Xb <- matrix(Xb, ns, nt)
}
mu <- matrix(0, ns, nt)
for (t in 1:nt) {
mu[, t] <- Xb[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau[t])
}
# initialize values
phi <- 0.05
Qb <- chol2inv(chol(exp(-d / phi)))
SS <- diag(quad.form(Qb, mu - Xb))
phi <- 0.05
niters <- 10000
phi.keep <- rep(0, niters)
acc.phi <- att.phi <- MH.phi <- 0.1
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBW(bw = phi, bw.min = 0.01, bw.max = 1.2,
bw.mn = 0, bw.sd = 1, Qb = Qb, d = d,
mu = mu, Xb1 = Xb, tau1 = tau, SS1 = SS,
ls = mu, Xb2 = Xb, tau2 = tau, SS2 = SS,
acc = acc.phi, att = att.phi, MH = MH.phi)
phi <- this.update$bw
Qb <- this.update$Qb
SS <- this.update$SS1
acc.phi <- this.update$acc
att.phi <- this.update$att
this.update <- mhUpdate(acc = acc.phi, att = att.phi, MH = MH.phi)
acc.phi <- this.update$acc
att.phi <- this.update$att
MH.phi <- this.update$MH
phi.keep[iter] <- phi
if (iter %% 500 == 0) {
start <- max(1, iter - 2000)
plot(phi.keep[start:iter], type = "l")
}
}
#### testing tau ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 0.5, 0.5)
Qb <- chol2inv(chol(Sigma.t))
Xb <- getXBeta(X = X, beta = beta.t)
if (nt == 1) {
Xb <- matrix(Xb, ns, nt)
}
mu <- matrix(0, ns, nt)
for (t in 1:nt) {
mu[, t] <- Xb[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
SS <- getGPSS(Qb = Qb, param = mu, Xb = Xb)
# initialize values
niters <- 10000
tau <- rep(1, nt)
tau.keep <- matrix(0, niters, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPTau(SS = SS, tau.a = 0.1, tau.b = 0.1,
ns = ns)
tau <- this.update$tau
tau.keep[iter, ] <- tau
if (iter %% 500 == 0) {
start <- max(1, iter - 2000)
par(mfrow = c(4, 3))
for (t in 1:nt) {
plot(tau.keep[start:iter, t], type = "l",
main = paste("tau = ", round(tau.t[t], 3)))
}
}
}
#### testing tau, phi, and beta ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb.t <- getXBeta(X = X, beta = beta.t)
mu <- matrix(0, ns, nt)
for (t in 1:nt) {
mu[, t] <- Xb.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
# initialize values
beta <- rep(0, np)
Xb <- getXBeta(X = X, beta = beta)
tau <- rep(1, nt)
phi <- 0.05
Qb <- chol2inv(chol(exp(-d / phi)))
SS <- getGPSS(Qb = Qb, param = mu, Xb = Xb)
niters <- 2000
burn <- 1500
beta.sd <- 100
beta <- rep(0, np)
beta.keep <- matrix(0, niters, np)
beta.sd.keep <- rep(0, niters)
tau <- rep(1, nt)
tau.keep <- matrix(0, niters, nt)
phi.keep <- rep(0, niters)
acc.phi <- att.phi <- MH.phi <- 0.1
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBeta(beta.sd = beta.sd, Qb = Qb,
param = mu, X = X, SS = SS, tau = tau)
beta <- this.update$beta
Xb <- this.update$Xb
SS <- this.update$SS
beta.keep[iter, ] <- beta
this.update <- updateGPBetaSD(beta = beta, tau.a = 0.1, tau.b = 0.1)
beta.sd <- this.update$beta.sd
beta.sd.keep[iter] <- beta.sd
this.update <- updateGPTau(SS = SS, tau.a = 0.1, tau.b = 0.1,
ns = ns)
tau <- this.update$tau
tau.keep[iter, ] <- tau
this.update <- updateGPBW(bw = phi, bw.min = 0.01, bw.max = 1.2,
bw.mn = 0, bw.sd = 1, Qb = Qb, d = d,
mu = mu, Xb1 = Xb, tau1 = tau, SS1 = SS,
ls = mu, Xb2 = Xb, tau2 = tau, SS2 = SS,
acc = acc.phi, att = att.phi, MH = MH.phi)
phi <- this.update$bw
Qb <- this.update$Qb
SS <- this.update$SS1
acc.phi <- this.update$acc
att.phi <- this.update$att
phi.keep[iter] <- phi
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.phi, att = att.phi, MH = MH.phi)
acc.phi <- this.update$acc
att.phi <- this.update$att
MH.phi <- this.update$MH
}
if (iter %% 100 == 0) {
par(mfrow = c(5, 3))
for (i in 1:np) {
plot(beta.keep[1:iter, i], type = "l",
main = paste("beta: ", round(beta.t[i], 3)))
}
plot(beta.sd.keep[1:iter], type = "l", main = "beta sd")
plot(phi.keep[1:iter], type = "l", main = paste("phi: ", phi.t))
for(i in 1:7) {
plot(tau.keep[1:iter, i], type = "l",
main = paste("tau: ", round(tau.t[i], 3)))
}
}
}
#### testing mu ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb.t <- getXBeta(X = X, beta = beta.t)
mu.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
ls.t <- matrix(0, ns, nt)
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
Sigma <- solve(Qb.t * tau.t[t])
# initialize values
mu <- matrix(mu.t + rnorm(ns * nt), ns, nt)
SS <- getGPSS(Qb = Qb.t, param = mu, Xb = Xb.t)
curll <- matrix(0, ns, nt)
for (t in 1:nt) {
curll[, t] <- dgev(x = y.t[, t], loc = mu[, t], exp(ls.t[, t]), xi.t,
log = TRUE)
}
niters <- 10000
burn <- 8000
mu.keep <- array(0, dim = c(niters, ns, nt))
acc.mu <- att.mu <- MH.mu <- matrix(0.2, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateMuTest(mu = mu, Qb = Qb.t, tau = tau.t, Xb = Xb.t,
y = y.t, ls = ls.t, xi = xi.t,
SS = SS, curll = curll, acc = acc.mu,
att = att.mu, MH = MH.mu)
mu <- this.update$mu
SS <- this.update$SS
curll <- this.update$curll
acc.mu <- this.update$acc
att.mu <- this.update$att
mu.keep[iter, , ] <- mu
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.mu, att = att.mu, MH = MH.mu,
target.min = 0.4, target.max = 0.7,
nattempts = 400)
acc.mu <- this.update$acc
att.mu <- this.update$att
MH.mu <- this.update$MH
}
if (iter %% 500 == 0) {
par(mfrow = c(3, 3))
start <- max(1, iter - 20000)
for (i in 1:3) {
for (j in 1:3) {
plot(mu.keep[start:iter, i, j], type = "l",
main = paste("mu: ", round(mu.t[i, j], 3)),
ylab = round(acc.mu[i, j] / att.mu[i, j], 3),
xlab = MH.mu[i, j])
}
}
}
}
#### testing mu and tau ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb.t <- getXBeta(X = X, beta = beta.t)
mu.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
ls.t <- matrix(0, ns, nt)
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
mu <- matrix(mu.t + rnorm(ns * nt), ns, nt)
SS <- getGPSS(Qb = Qb.t, param = mu, Xb = Xb.t)
curll <- matrix(0, ns, nt)
for (t in 1:nt) {
curll[, t] <- dgev(x = y.t[, t], loc = mu[, t], exp(ls.t[, t]), xi.t,
log = TRUE)
}
niters <- 60000
burn <- 50000
mu.keep <- array(0, dim = c(niters, ns, nt))
tau <- rep(1, nt)
tau.keep <- matrix(0, niters, nt)
acc.mu <- att.mu <- MH.mu <- matrix(0.1, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateMuTest(mu = mu, Qb = Qb.t, tau = tau, Xb = Xb.t,
y = y.t, ls = ls.t, xi = xi.t,
SS = SS, curll = curll, acc = acc.mu,
att = att.mu, MH = MH.mu)
mu <- this.update$mu
SS <- this.update$SS
curll <- this.update$curll
acc.mu <- this.update$acc
att.mu <- this.update$att
mu.keep[iter, , ] <- mu
this.update <- updateGPTau(SS = SS, tau.a = 0.1, tau.b = 0.1,
ns = ns)
tau <- this.update$tau
tau.keep[iter, ] <- tau
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.mu, att = att.mu, MH = MH.mu,
target.min = 0.5, target.max = 0.7,
nattempts = 200)
acc.mu <- this.update$acc
att.mu <- this.update$att
MH.mu <- this.update$MH
}
if (iter %% 1000 == 0) {
par(mfrow = c(4, 3))
start <- max(1, iter - 20000)
for (i in 1:3) {
for (j in 1:3) {
plot(mu.keep[start:iter, i, j], type = "l",
main = paste("mu: ", round(mu.t[i, j], 3)),
ylab = round(acc.mu[i, j] / att.mu[i, j], 3),
xlab = MH.mu[i, j])
}
}
for (i in 1:3) {
plot(tau.keep[start:iter, i], type = "l",
main = paste("tau: ", round(tau.t[i], 3)))
}
}
}
#### testing mu, tau, and beta ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X <- rX(ns, nt, np)
beta.t <- rnorm(np, 0, 10)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb.t <- getXBeta(X = X, beta = beta.t)
mu.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
ls.t <- matrix(0, ns, nt)
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
mu <- matrix(mu.t + rnorm(ns * nt), ns, nt)
SS <- getGPSS(Qb = Qb.t, param = mu, Xb = Xb.t)
curll <- matrix(0, ns, nt)
for (t in 1:nt) {
curll[, t] <- dgev(x = y.t[, t], loc = mu[, t], exp(ls.t[, t]), xi.t,
log = TRUE)
}
niters <- 20000
burn <- 15000
beta.sd <- 100
beta <- rep(0, np)
beta.keep <- matrix(0, niters, np)
beta.sd.keep <- rep(0, niters)
mu.keep <- array(0, dim = c(niters, ns, nt))
tau <- rep(1, nt)
tau.keep <- matrix(0, niters, nt)
acc.mu <- att.mu <- MH.mu <- matrix(0.1, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBeta(beta.sd = beta.sd, Qb = Qb.t,
param = mu, X = X, SS = SS, tau = tau)
beta <- this.update$beta
Xb <- this.update$Xb
SS <- this.update$SS
beta.keep[iter, ] <- beta
this.update <- updateGPBetaSD(beta = beta, tau.a = 0.1, tau.b = 1)
beta.sd <- this.update$beta.sd
beta.sd.keep[iter] <- beta.sd
this.update <- updateMuTest(mu = mu, Qb = Qb.t, tau = tau.t, Xb = Xb,
y = y.t, ls = ls.t, xi = xi.t,
SS = SS, curll = curll, acc = acc.mu,
att = att.mu, MH = MH.mu)
mu <- this.update$mu
SS <- this.update$SS
curll <- this.update$curll
acc.mu <- this.update$acc
att.mu <- this.update$att
mu.keep[iter, , ] <- mu
this.update <- updateGPTau(SS = SS, tau.a = 0.1, tau.b = 0.1,
ns = ns)
tau <- this.update$tau
tau.keep[iter, ] <- tau
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.mu, att = att.mu, MH = MH.mu,
target.min = 0.5, target.max = 0.7,
nattempts = 200)
acc.mu <- this.update$acc
att.mu <- this.update$att
MH.mu <- this.update$MH
}
if (iter %% 1000 == 0) {
par(mfrow = c(4, 3))
start <- max(1, iter - 20000)
for (i in 1:2) {
for (j in 1:3) {
plot(mu.keep[start:iter, i, j], type = "l",
main = paste("mu: ", round(mu.t[i, j], 3)),
ylab = round(acc.mu[i, j] / att.mu[i, j], 3),
xlab = MH.mu[i, j])
}
}
for (i in 1:3) {
plot(beta.keep[start:iter, i], type = "l",
main = paste("beta: ", round(beta.t[i], 3)))
}
for (i in 1:3) {
plot(tau.keep[start:iter, i], type = "l",
main = paste("tau: ", round(tau.t[i], 3)))
}
}
}
#### Verify gradients - no residual dependence ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 10
nt <- 3
np <- 6
X1 <- rX(ns, nt, np)
X2 <- rX(ns, nt, np)
beta1.t <- rnorm(np, 0, 1)
beta2.t <- rnorm(np, 0, 0.1)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1, beta = beta1.t)
Xb2.t <- getXBeta(X = X2, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
xi.t <- 0.1
y.t <- rgev(n = ns * nt, loc = mu.t, scale = exp(ls.t), xi.t)
lp.mu <- logpost.mu.test(mu = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t], xi = xi.t)
mean(grad(func = logpost.mu.test, x = mu.t[, t], Xb = Xb1.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t], ls = ls.t[, t], xi = xi.t) /
logpost.mu.grad.test(mu = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t],
xi = xi.t))
sd(grad(func = logpost.mu.test, x = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t], xi = xi.t) /
logpost.mu.grad.test(mu = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t],
xi = xi.t))
lp.logsig <- logpost.logsig.test(ls = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t],
mu = mu.t[, t], xi = xi.t)
mean(grad(func = logpost.logsig.test, x = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t], mu = mu.t[, t], xi = xi.t) /
logpost.logsig.grad.test(ls = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t],
mu = mu.t[, t], xi = xi.t))
sd(grad(func = logpost.logsig.test, x = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t], mu = mu.t[, t], xi = xi.t) /
logpost.logsig.grad.test(ls = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t],
mu = mu.t[, t], xi = xi.t))
#### testing logsig ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X1.t <- rX(ns, nt, np)
X2.t <- rX(ns, nt, np)
beta1.t <- rnorm(np, 0, 10)
beta2.t <- rnorm(np, 0, 5)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1.t, beta = beta1.t)
Xb2.t <- getXBeta(X = X2.t, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
ls <- matrix(ls.t + rnorm(ns * nt, 0, 0.1), ns, nt)
SS <- getGPSS(Qb = Qb.t, param = ls, Xb = Xb2.t)
curll <- matrix(0, ns, nt)
for (t in 1:nt) {
curll[, t] <- dgev(x = y.t[, t], loc = mu.t[, t], exp(ls[, t]), xi.t,
log = TRUE)
}
niters <- 10000
burn <- 8000
ls.keep <- array(0, dim = c(niters, ns, nt))
acc.ls <- att.ls <- MH.ls <- matrix(0.1, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateLSTest(ls = ls, tau = tau.t, Xb = Xb2.t, SS = SS,
y = y.t, mu = mu.t, xi = xi.t,
Qb = Qb.t, curll = curll, acc = acc.ls,
att = att.ls, MH = MH.ls)
ls <- this.update$ls
SS <- this.update$SS
curll <- this.update$curll
acc.ls <- this.update$acc
att.ls <- this.update$att
ls.keep[iter, , ] <- ls
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.ls, att = att.ls, MH = MH.ls,
target.min = 0.4, target.max = 0.7,
nattempts = 400)
acc.ls <- this.update$acc
att.ls <- this.update$att
MH.ls <- this.update$MH
}
if (iter %% 500 == 0) {
par(mfrow = c(3, 3))
start <- max(1, iter - 20000)
for (i in 1:3) {
for (j in 1:3) {
plot(ls.keep[start:iter, i, j], type = "l",
main = paste("logsig: ", round(ls.t[i, j], 3)),
ylab = round(acc.ls[i, j] / att.ls[i, j], 3),
xlab = MH.ls[i, j])
}
}
}
}
#### testing logsig, tau, and beta ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X1.t <- rX(ns, nt, np)
X2.t <- rX(ns, nt, np)
beta1.t <- rnorm(np, 0, 10)
beta2.t <- rnorm(np, 0, 5)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1.t, beta = beta1.t)
Xb2.t <- getXBeta(X = X2.t, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
ls <- matrix(ls.t + rnorm(ns * nt, 0, 0.1), ns, nt)
SS <- getGPSS(Qb = Qb.t, param = ls, Xb = Xb2.t)
curll <- matrix(0, ns, nt)
for (t in 1:nt) {
curll[, t] <- dgev(x = y.t[, t], loc = mu.t[, t], exp(ls[, t]), xi.t,
log = TRUE)
}
niters <- 30000
burn <- 25000
beta.sd <- 100
beta <- rep(0, np)
beta.keep <- matrix(0, niters, np)
beta.sd.keep <- rep(0, niters)
ls.keep <- array(0, dim = c(niters, ns, nt))
tau <- rep(1, nt)
tau.keep <- matrix(0, niters, nt)
acc.ls <- att.ls <- MH.ls <- matrix(0.1, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBeta(beta.sd = beta.sd, Qb = Qb.t,
param = ls, X = X2.t, SS = SS, tau = tau)
beta <- this.update$beta
Xb2 <- this.update$Xb
SS <- this.update$SS
beta.keep[iter, ] <- beta
this.update <- updateGPBetaSD(beta = beta, tau.a = 0.1, tau.b = 1)
beta.sd <- this.update$beta.sd
beta.sd.keep[iter] <- beta.sd
this.update <- updateLSTest(ls = ls, tau = tau.t, Xb = Xb2, SS = SS,
y = y.t, mu = mu.t, xi = xi.t,
Qb = Qb.t, curll = curll,
acc = acc.ls, att = att.ls, MH = MH.ls)
ls <- this.update$ls
SS <- this.update$SS
curll <- this.update$curll
acc.ls <- this.update$acc
att.ls <- this.update$att
ls.keep[iter, , ] <- ls
this.update <- updateGPTau(SS = SS, tau.a = 0.1, tau.b = 0.1,
ns = ns)
tau <- this.update$tau
tau.keep[iter, ] <- tau
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.ls, att = att.ls, MH = MH.ls,
target.min = 0.4, target.max = 0.7,
nattempts = 400)
acc.ls <- this.update$acc
att.ls <- this.update$att
MH.ls <- this.update$MH
}
if (iter %% 1000 == 0) {
par(mfrow = c(4, 3))
start <- max(1, iter - 20000)
for (i in 1:2) {
for (j in 1:3) {
plot(ls.keep[start:iter, i, j], type = "l",
main = paste("logsig: ", round(ls.t[i, j], 3)),
ylab = round(acc.ls[i, j] / att.ls[i, j], 3),
xlab = MH.ls[i, j])
}
}
for (i in 1:3) {
plot(beta.keep[start:iter, i], type = "l",
main = paste("beta: ", round(beta2.t[i], 3)))
}
for (i in 1:3) {
plot(tau.keep[start:iter, i], type = "l",
main = paste("tau: ", round(tau.t[i], 3)))
}
}
}
#### testing basis bandwidth update ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
# setting np later after X is created
ns <- 400
nt <- 12
nknots <- 5
time.interact <- TRUE
s <- cbind(runif(ns), runif(ns))
knots <- as.matrix(cover.design(R = s, nd = nknots)$design)
d <- rdist(s)
dw2 <- rdist(s, knots)^2
dw2[dw2 < 1e-4] <- 0
# create the matrix of covariates
X1.t <- X2.t <- array(1, dim = c(ns, nt, 2))
for (t in 1:nt) {
time <- (t - nt / 2) / nt
X1.t[, t, 2] <- X2.t[, t, 2] <- time
}
bw.basis.t <- 0.2
B.t <- makeW(dw2 = dw2, rho = bw.basis.t)
X1.t <- add.basis.X(X1.t, B.t, time.interact = time.interact)
X2.t <- add.basis.X(X2.t, B.t, time.interact = time.interact)
np <- dim(X1.t)[3]
beta1.t <- rnorm(np, 0, 10)
beta2.t <- rnorm(np, 0, 5)
bw.gp.t <- 0.2
Sigma.t <- exp(-d / bw.gp.t)
tau1.t <- tau2.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1.t, beta = beta1.t)
Xb2.t <- getXBeta(X = X2.t, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau2.t[t])
}
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, scale = exp(ls.t), shape = xi.t)
# initialize values
bw.basis <- 0.4
bw.basis.min <- quantile(dw2, 0.01)
bw.basis.max <- quantile(dw2, 0.99)
B <- makeW(dw2 = dw2, rho = bw.basis)
X1 <- rep.basis.X(X = X1.t, newB = B, time.interact = time.interact)
X2 <- rep.basis.X(X = X2.t, newB = B, time.interact = time.interact)
Xb1 <- getXBeta(X = X1, beta = beta1.t)
Xb2 <- getXBeta(X = X2, beta = beta1.t)
SS1 <- getGPSS(Qb = Qb.t, param = mu.t, Xb = Xb1)
SS2 <- getGPSS(Qb = Qb.t, param = ls.t, Xb = Xb2)
niters <- 30000
burn <- 25000
# storage
bw.basis.keep <- rep(0, niters)
Xb1.keep <- array(0, dim = c(niters, ns, nt))
acc.bw.basis <- att.bw.basis <- MH.bw.basis <- 0.1
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateXBasisBW(bw = bw.basis, bw.min = bw.basis.min,
bw.max = bw.basis.max,
X1 = X1, beta1 = beta1.t, Xb1 = Xb1,
mu = mu.t, tau1 = tau1.t, SS1 = SS1,
X2 = X2, beta2 = beta2.t, Xb2 = Xb2,
ls = ls.t, tau2 = tau2.t, SS2 = SS2,
Qb = Qb.t, dw2 = dw2,
time.interact = time.interact,
acc = acc.bw.basis, att = att.bw.basis,
MH = MH.bw.basis)
bw.basis <- this.update$bw
X1 <- this.update$X1
Xb1 <- this.update$Xb1
SS1 <- this.update$SS1
X2 <- this.update$X2
Xb2 <- this.update$Xb2
SS2 <- this.update$SS2
acc.bw.basis <- this.update$acc
att.bw.basis <- this.update$att
bw.basis.keep[iter] <- bw.basis
Xb1.keep[iter, , ] <- Xb1
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.bw.basis, att = att.bw.basis,
MH = MH.bw.basis,
target.min = 0.3, target.max = 0.6,
lower = 0.8, higher = 1.2)
acc.bw.basis <- this.update$acc
att.bw.basis <- this.update$att
MH.bw.basis <- this.update$MH
}
if (iter %% 1000 == 0) {
par(mfrow = c(2, 5))
plot(bw.basis.keep[100:iter], type = "l",
main = paste("BW basis: ", bw.basis.t, sep = ""),
ylab = round(acc.bw.basis / att.bw.basis, 3),
xlab = MH.bw.basis)
for(i in 1:3) { for (j in 1:3) {
plot(Xb1.keep[100:iter, i, j], type = "l",
main = paste("Xb1: ", Xb1.t[i, j], sep = ""))
}}
}
}
#### testing basis bandwidth, beta1, beta2 ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
# setting np later after X is created
ns <- 400
nt <- 12
nknots <- 5
time.interact <- TRUE
s <- cbind(runif(ns), runif(ns))
knots <- as.matrix(cover.design(R = s, nd = nknots)$design)
d <- rdist(s)
dw2 <- rdist(s, knots)^2
dw2[dw2 < 1e-4] <- 0
# create the matrix of covariates
X1.t <- X2.t <- array(1, dim = c(ns, nt, 2))
for (t in 1:nt) {
time <- (t - nt / 2) / nt
X1.t[, t, 2] <- X2.t[, t, 2] <- time
}
bw.basis.t <- 0.2
B.t <- makeW(dw2 = dw2, rho = bw.basis.t)
X1.t <- add.basis.X(X1.t, B.t, time.interact = time.interact)
X2.t <- add.basis.X(X2.t, B.t, time.interact = time.interact)
np <- dim(X1.t)[3]
beta1.t <- rnorm(np, 0, 10)
beta2.t <- rnorm(np, 0, 5)
bw.gp.t <- 0.2
Sigma.t <- exp(-d / bw.gp.t)
tau1.t <- tau2.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1.t, beta = beta1.t)
Xb2.t <- getXBeta(X = X2.t, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau2.t[t])
}
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, scale = exp(ls.t), shape = xi.t)
# initialize values
beta1 <- beta2 <- rep(0, np)
beta1.sd <- beta2.sd <- 100
bw.basis <- 0.4
bw.basis.min <- quantile(dw2, 0.01)
bw.basis.max <- quantile(dw2, 0.99)
B <- makeW(dw2 = dw2, rho = bw.basis)
X1 <- rep.basis.X(X = X1.t, newB = B, time.interact = time.interact)
X2 <- rep.basis.X(X = X2.t, newB = B, time.interact = time.interact)
Xb1 <- getXBeta(X = X1, beta = beta1)
Xb2 <- getXBeta(X = X2, beta = beta1)
SS1 <- getGPSS(Qb = Qb.t, param = mu.t, Xb = Xb1)
SS2 <- getGPSS(Qb = Qb.t, param = ls.t, Xb = Xb2)
niters <- 30000
burn <- 25000
# storage
bw.basis.keep <- rep(0, niters)
beta1.keep <- beta2.keep <- matrix(0, niters, np)
beta1.sd.keep <- beta2.sd.keep <- rep(0, niters)
acc.bw.basis <- att.bw.basis <- MH.bw.basis <- 0.1
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBeta(beta.sd = beta1.sd, Qb = Qb.t,
param = mu.t, X = X1, SS = SS1, tau = tau1.t)
beta1 <- this.update$beta
Xb1 <- this.update$Xb
SS1 <- this.update$SS
beta1.keep[iter, ] <- beta1
this.update <- updateGPBetaSD(beta = beta1, tau.a = 0.5, tau.b = 0.5)
beta1.sd <- this.update$beta.sd
beta1.sd.keep[iter] <- beta1.sd
this.update <- updateGPBeta(beta.sd = beta2.sd, Qb = Qb.t,
param = ls.t, X = X2, SS = SS2, tau = tau2.t)
beta2 <- this.update$beta
Xb2 <- this.update$Xb
SS2 <- this.update$SS
beta2.keep[iter, ] <- beta2
this.update <- updateGPBetaSD(beta = beta2, tau.a = 0.5, tau.b = 0.5)
beta2.sd <- this.update$beta.sd
beta2.sd.keep[iter] <- beta2.sd
this.update <- updateXBasisBW(bw = bw.basis, bw.min = bw.basis.min,
bw.max = bw.basis.max,
X1 = X1, beta1 = beta1, Xb1 = Xb1,
mu = mu.t, tau1 = tau1.t, SS1 = SS1,
X2 = X2, beta2 = beta2, Xb2 = Xb2,
ls = ls.t, tau2 = tau2.t, SS2 = SS2,
Qb = Qb.t, dw2 = dw2,
time.interact = time.interact,
acc = acc.bw.basis, att = att.bw.basis,
MH = MH.bw.basis)
bw.basis <- this.update$bw
X1 <- this.update$X1
Xb1 <- this.update$Xb1
SS1 <- this.update$SS1
X2 <- this.update$X2
Xb2 <- this.update$Xb2
SS2 <- this.update$SS2
acc.bw.basis <- this.update$acc
att.bw.basis <- this.update$att
bw.basis.keep[iter] <- bw.basis
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.bw.basis, att = att.bw.basis,
MH = MH.bw.basis,
target.min = 0.3, target.max = 0.6,
lower = 0.8, higher = 1.2)
acc.bw.basis <- this.update$acc
att.bw.basis <- this.update$att
MH.bw.basis <- this.update$MH
}
if (iter %% 500 == 0) {
par(mfrow = c(3, 3))
plot(bw.basis.keep[100:iter], type = "l",
main = paste("BW basis: ", bw.basis.t, sep = ""),
ylab = round(acc.bw.basis / att.bw.basis, 3),
xlab = MH.bw.basis)
for(i in 1:4) {
plot(beta1.keep[100:iter, i], type = "l",
main = paste("beta 1: ", beta1.t[i], sep = ""))
}
for (i in 1:4) {
plot(beta2.keep[100:iter, i], type = "l",
main = paste("beta 2: ", beta2.t[i], sep = ""))
}
}
}
#### testing basis bandwidth, beta1, beta2, mu, and logsig ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
# setting np later after X is created
ns <- 400
nt <- 12
nknots <- 5
time.interact <- TRUE
s <- cbind(runif(ns), runif(ns))
knots <- as.matrix(cover.design(R = s, nd = nknots)$design)
d <- rdist(s)
dw2 <- rdist(s, knots)^2
dw2[dw2 < 1e-4] <- 0
# create the matrix of covariates
X1.t <- X2.t <- array(1, dim = c(ns, nt, 2))
for (t in 1:nt) {
time <- (t - nt / 2) / nt
X1.t[, t, 2] <- X2.t[, t, 2] <- time
}
bw.basis.t <- 0.2
B.t <- makeW(dw2 = dw2, rho = bw.basis.t)
X1.t <- add.basis.X(X1.t, B.t, time.interact = time.interact)
X2.t <- add.basis.X(X2.t, B.t, time.interact = time.interact)
np <- dim(X1.t)[3]
beta1.t <- rnorm(np, 0, 10)
beta2.t <- rnorm(np, 0, 5)
bw.gp.t <- 0.2
Sigma.t <- exp(-d / bw.gp.t)
tau1.t <- tau2.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1.t, beta = beta1.t)
Xb2.t <- getXBeta(X = X2.t, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau2.t[t])
}
xi.t <- 0.01
y.t <- rgev(n = ns * nt, loc = mu.t, scale = exp(ls.t), shape = xi.t)
# initialize values
mu <- matrix(mu.t + rnorm(ns * nt), ns, nt)
ls <- matrix(ls.t + rnorm(ns * nt), ns, nt)
beta1 <- beta2 <- rep(0, np)
beta1.sd <- beta2.sd <- 100
bw.basis <- 0.4
bw.basis.min <- quantile(dw2, 0.01)
bw.basis.max <- quantile(dw2, 0.99)
B <- makeW(dw2 = dw2, rho = bw.basis)
X1 <- rep.basis.X(X = X1.t, newB = B, time.interact = time.interact)
X2 <- rep.basis.X(X = X2.t, newB = B, time.interact = time.interact)
Xb1 <- getXBeta(X = X1, beta = beta1)
Xb2 <- getXBeta(X = X2, beta = beta1)
SS1 <- getGPSS(Qb = Qb.t, param = mu.t, Xb = Xb1)
SS2 <- getGPSS(Qb = Qb.t, param = ls.t, Xb = Xb2)
curll <- matrix(0, ns, nt)
for (t in 1:nt) {
curll[, t] <- dgev(x = y.t[, t], loc = mu[, t], exp(ls[, t]), xi.t,
log = TRUE)
}
niters <- 30000
burn <- 25000
# storage
bw.basis.keep <- rep(0, niters)
beta1.keep <- beta2.keep <- matrix(0, niters, np)
beta1.sd.keep <- beta2.sd.keep <- rep(0, niters)
mu.keep <- array(0, dim = c(niters, ns, nt))
ls.keep <- array(0, dim = c(niters, ns, nt))
acc.bw.basis <- att.bw.basis <- MH.bw.basis <- 0.1
acc.mu <- att.mu <- MH.mu <- matrix(0.2, ns, nt)
acc.ls <- att.ls <- MH.ls <- matrix(0.1, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateGPBeta(beta.sd = beta1.sd, Qb = Qb.t,
param = mu, X = X1, SS = SS1, tau = tau1.t)
beta1 <- this.update$beta
Xb1 <- this.update$Xb
SS1 <- this.update$SS
beta1.keep[iter, ] <- beta1
this.update <- updateGPBetaSD(beta = beta1, tau.a = 0.5, tau.b = 0.5)
beta1.sd <- this.update$beta.sd
beta1.sd.keep[iter] <- beta1.sd
this.update <- updateGPBeta(beta.sd = beta2.sd, Qb = Qb.t,
param = ls, X = X2, SS = SS2, tau = tau2.t)
beta2 <- this.update$beta
Xb2 <- this.update$Xb
SS2 <- this.update$SS
beta2.keep[iter, ] <- beta2
this.update <- updateGPBetaSD(beta = beta2, tau.a = 0.5, tau.b = 0.5)
beta2.sd <- this.update$beta.sd
beta2.sd.keep[iter] <- beta2.sd
this.update <- updateXBasisBW(bw = bw.basis, bw.min = bw.basis.min,
bw.max = bw.basis.max,
X1 = X1, beta1 = beta1, Xb1 = Xb1,
mu = mu, tau1 = tau1.t, SS1 = SS1,
X2 = X2, beta2 = beta2, Xb2 = Xb2,
ls = ls, tau2 = tau2.t, SS2 = SS2,
Qb = Qb.t, dw2 = dw2,
time.interact = time.interact,
acc = acc.bw.basis, att = att.bw.basis,
MH = MH.bw.basis)
bw.basis <- this.update$bw
X1 <- this.update$X1
Xb1 <- this.update$Xb1
SS1 <- this.update$SS1
X2 <- this.update$X2
Xb2 <- this.update$Xb2
SS2 <- this.update$SS2
acc.bw.basis <- this.update$acc
att.bw.basis <- this.update$att
bw.basis.keep[iter] <- bw.basis
this.update <- updateMuTest(mu = mu, tau = tau1.t, Xb = Xb1, SS = SS1,
y = y.t, ls = ls, xi = xi.t,
Qb = Qb.t, curll = curll, acc = acc.mu,
att = att.mu, MH = MH.mu)
mu <- this.update$mu
SS1 <- this.update$SS
curll <- this.update$curll
acc.mu <- this.update$acc
att.mu <- this.update$att
mu.keep[iter, , ] <- mu
this.update <- updateLSTest(ls = ls, tau = tau2.t, Xb = Xb2, SS = SS2,
y = y.t, mu = mu, xi = xi.t,
Qb = Qb.t, curll = curll, acc = acc.ls,
att = att.ls, MH = MH.ls)
ls <- this.update$ls
SS2 <- this.update$SS
curll <- this.update$curll
acc.ls <- this.update$acc
att.ls <- this.update$att
ls.keep[iter, , ] <- ls
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.bw.basis, att = att.bw.basis,
MH = MH.bw.basis,
target.min = 0.3, target.max = 0.6,
lower = 0.8, higher = 1.2)
acc.bw.basis <- this.update$acc
att.bw.basis <- this.update$att
MH.bw.basis <- this.update$MH
this.update <- mhUpdate(acc = acc.mu, att = att.mu, MH = MH.mu,
target.min = 0.4, target.max = 0.7,
nattempts = 400)
acc.mu <- this.update$acc
att.mu <- this.update$att
MH.mu <- this.update$MH
this.update <- mhUpdate(acc = acc.ls, att = att.ls, MH = MH.ls,
target.min = 0.4, target.max = 0.7,
nattempts = 400)
acc.ls <- this.update$acc
att.ls <- this.update$att
MH.ls <- this.update$MH
}
if (iter %% 500 == 0) {
par(mfrow = c(3, 3))
plot(bw.basis.keep[100:iter], type = "l",
main = paste("BW basis: ", bw.basis.t, sep = ""),
ylab = round(acc.bw.basis / att.bw.basis, 3),
xlab = MH.bw.basis)
for(i in 1:4) {
plot(beta1.keep[100:iter, i], type = "l",
main = paste("beta 1: ", beta1.t[i], sep = ""))
}
for (i in 1:4) {
plot(beta2.keep[100:iter, i], type = "l",
main = paste("beta 2: ", beta2.t[i], sep = ""))
}
}
}
#### Verify gradients - with residual dependence ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 10
nt <- 3
np <- 6
X1 <- rX(ns, nt, np)
X2 <- rX(ns, nt, np)
beta1.t <- rnorm(np, 0, 1)
beta2.t <- rnorm(np, 0, 0.1)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1, beta = beta1.t)
Xb2.t <- getXBeta(X = X2, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
xi.t <- 0.1
y.t <- rgev(n = ns * nt, loc = mu.t, scale = exp(ls.t), xi.t)
nknots <- 4
theta.t <- matrix(abs(rnorm(ns * nt)), ns, nt)
alpha.t <- 0.4
thresh.t <- matrix(median(y.t), ns, nt)
xi.t <- 0
lp.mu <- logpost.mu(mu = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t)
mean(grad(func = logpost.mu, x = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t], alpha = alpha.t) /
logpost.mu.grad(mu = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t],
xi = xi.t, theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
sd(grad(func = logpost.mu, x = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t], alpha = alpha.t) /
logpost.mu.grad(mu = mu.t[, t], Xb = Xb1.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], ls = ls.t[, t],
xi = xi.t, theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
lp.logsig <- logpost.logsig(ls = ls.t[, t], Xb = Xb2.t[, t], tau = tau.t[t],
Qb = Qb.t, y = y.t[, t], mu = mu.t[, t],
xi = xi.t, theta = theta.t[, t],
thresh = thresh.t[, t], alpha = alpha.t)
mean(grad(func = logpost.logsig, x = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t], alpha = alpha.t) /
logpost.logsig.grad(ls = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t],
mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
sd(grad(func = logpost.logsig, x = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t], mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t], alpha = alpha.t) /
logpost.logsig.grad(ls = ls.t[, t], Xb = Xb2.t[, t],
tau = tau.t[t], Qb = Qb.t, y = y.t[, t],
mu = mu.t[, t], xi = xi.t,
theta = theta.t[, t], thresh = thresh.t[, t],
alpha = alpha.t))
#### testing xi ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 400
nt <- 12
np <- 6
X1.t <- rX(ns, nt, np)
X2.t <- rX(ns, nt, np)
beta1.t <- rnorm(np, 0, 10)
beta2.t <- rnorm(np, 0, 5)
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
tau.t <- rgamma(nt, 1, 1)
Qb.t <- chol2inv(chol(Sigma.t))
Xb1.t <- getXBeta(X = X1.t, beta = beta1.t)
Xb2.t <- getXBeta(X = X2.t, beta = beta2.t)
mu.t <- ls.t <- matrix(0, ns, nt)
for (t in 1:nt) {
mu.t[, t] <- Xb1.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
ls.t[, t] <- Xb2.t[, t] + t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau.t[t])
}
xi.t <- -0.7
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
xi <- 0.1
thresh.t <- matrix(-Inf, ns, nt)
theta.t <- matrix(1, ns, nt)
alpha.t <- 1
curll <- loglike(y = y.t, mu = mu.t, ls = ls.t, xi = xi,
theta = theta.t, thresh = thresh.t, alpha = alpha.t)
niters <- 10000
burn <- 8000
xi.keep <- rep(0, niters)
acc.xi <- att.xi <- MH.xi <- 0.01
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateXi(xi = xi, xi.min = -2, xi.max = 2,
xi.mn = 0, xi.sd = 0.5, y = y.t, mu = mu.t,
ls = ls.t, curll = curll, theta = theta.t,
thresh = thresh.t, alpha = alpha.t, acc = acc.xi,
att = att.xi, MH = MH.xi)
xi <- this.update$xi
curll <- this.update$curll
acc.xi <- this.update$acc
att.xi <- this.update$att
xi.keep[iter] <- xi
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.xi, att = att.xi, MH = MH.xi,
target.min = 0.3, target.max = 0.6,
nattempts = 400)
acc.xi <- this.update$acc
att.xi <- this.update$att
MH.xi <- this.update$MH
}
if (iter %% 500 == 0) {
start <- max(1, iter - 20000)
plot(xi.keep[start:iter], type = "l", main = paste("xi: ", xi.t),
ylab = round(acc.xi / att.xi, 3),
xlab = MH.xi)
}
}
#### testing beta1 ####
rm(list=ls())
source("../../../usefulR/usefulfunctions.R", chdir = TRUE)
source("auxfunctions.R", chdir = TRUE)
source("updatemodel.R", chdir = TRUE)
set.seed(2000)
ns <- 600
nt <- 30
phi.t <- 0.2
s <- cbind(runif(ns), runif(ns))
d <- rdist(s)
Sigma.t <- exp(-d / phi.t)
Qb.t <- chol2inv(chol(Sigma.t))
tau1.int.t <- rgamma(1, 1, 1)
tau1.time.t <- rgamma(1, 1, 1)
beta1.int.t <- beta1.time.t <- matrix(0, ns, nt)
for (t in 1:nt) {
beta1.int.t[, t] <- t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.int.t[1])
beta1.time.t[, t] <- t(chol(Sigma.t)) %*% rnorm(ns) / sqrt(tau1.time.t[1])
}
beta1.int.mn.t <- beta1.time.mn.t <- 0
mu.t <- matrix(0, ns, nt)
time <- (1:nt - nt / 2) / nt
for (t in 1:nt) {
mu.t[, t] <- beta1.int.t[, t] + beta1.time.t [, t] * time[t]
}
ls.t <- matrix(0, ns, nt)
xi.t <- 0
y.t <- rgev(n = ns * nt, loc = mu.t, exp(ls.t), xi.t)
# initialize values
beta1.int <- beta1.int.t + rnorm(ns * nt)
beta1.time <- beta1.time.t + rnorm(ns * nt)
mu <- matrix(0, ns, nt)
SS1.int <- SS1.time <- rep(0, nt)
for (t in 1:nt) {
mu[, t] <- beta1.int[, t] + beta1.time[, t] * time[t]
SS1.int[t] <- quad.form(Qb.t, beta1.int[, t] - beta1.int.mn.t)
SS1.time[t] <- quad.form(Qb.t, beta1.time[, t] - beta1.time.mn.t)
}
thresh.t <- matrix(-Inf, ns, nt)
theta.t <- matrix(1, ns, nt)
theta.xi.t <- theta.t^xi.t
alpha.t <- 1
curll <- loglike(y = y.t, mu = mu, ls = ls.t, xi = xi.t,
theta = theta.t, theta.xi = theta.xi.t,
thresh = thresh.t, alpha = alpha.t)
niters <- 10000
burn <- 8000
keep.beta1.int <- keep.beta1.time <- array(0, dim = c(niters, ns, nt))
acc.beta1 <- att.beta1 <- MH.beta1 <- matrix(0.1, ns, nt)
set.seed(3366) # demo
for (iter in 1:niters) {
this.update <- updateBeta1(beta.int = beta1.int, beta.int.mn = beta1.int.mn.t,
SS.int = SS1.int, tau.int = tau1.int.t,
beta.time = beta1.time,
beta.time.mn = beta1.time.mn.t,
SS.time = SS1.time, tau.time = tau1.time.t,
mu = mu, time = time,
y = y.t, theta = theta.t, theta.xi = theta.xi.t,
ls = ls.t, xi = xi.t, thresh = thresh.t,
alpha = alpha.t, Qb = Qb.t, curll = curll,
acc = acc.beta1, att = att.beta1, MH = MH.beta1)
beta1.int <- this.update$beta.int
SS1.int <- this.update$SS.int
beta1.time <- this.update$beta.time
SS1.time <- this.update$SS.time
mu <- this.update$mu
curll <- this.update$curll
acc.beta1 <- this.update$acc
att.beta1 <- this.update$att
keep.beta1.int[iter, , ] <- beta1.int
keep.beta1.time[iter, , ] <- beta1.time
if (iter < burn / 2) {
this.update <- mhUpdate(acc = acc.beta1, att = att.beta1, MH = MH.beta1,
target.min = 0.3, target.max = 0.6,
nattempts = 400)
acc.beta1 <- this.update$acc
att.beta1 <- this.update$att
MH.beta1 <- this.update$MH
}
if (iter %% 500 == 0) {
par(mfrow = c(4, 4))
acc.rate <- round(acc.beta1 / att.beta1, 3)
for (i in 1:4) {
plot(keep.beta1.int[1:iter, 1, i], type = "l",
main = paste("intercept site 1, day ", i, " (",
round(beta1.int.t[1, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[1, i], sep = ""),
xlab = acc.rate[1, i])
plot(keep.beta1.time[1:iter, 1, i], type = "l",
main = paste("time site 1, day ", i, " (",
round(beta1.time.t[1, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[1, i], sep = ""),
xlab = acc.rate[1, i])
plot(keep.beta1.int[1:iter, 2, i], type = "l",
main = paste("intercept site 1, day ", i, " (",
round(beta1.int.t[2, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[2, i], sep = ""),
xlab = acc.rate[2, i])
plot(keep.beta1.time[1:iter, 2, i], type = "l",
main = paste("time site 1, day ", i, " (",
round(beta1.time.t[2, i], 3), ")", sep = ""),
ylab = paste("MH: ", MH.beta1[2, i], sep = ""),
xlab = acc.rate[2, i])
}
}
}
|
70c82c2c306ac59e8e3786231b959ba3a2cde540
|
4b670987944f024846e8d170b73b372df1710fcf
|
/man/dli.xform.Rd
|
ee1b06e7f85813059bd9e25c95ceb4f2b880b2bb
|
[] |
no_license
|
beckyfisher/custom_functions
|
94bed392501b807d6546dab9aa3c243bd83042b6
|
291748ef9be93dd05deff8ff99cb3292c741521c
|
refs/heads/master
| 2020-04-08T10:59:47.805179
| 2019-09-02T04:18:54
| 2019-09-02T04:18:54
| 159,289,189
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 334
|
rd
|
dli.xform.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dredging_wq_transformations.R
\name{dli.xform}
\alias{dli.xform}
\title{dli.xform}
\usage{
dli.xform(x)
}
\arguments{
\item{x}{A numeric vector of raw DLI values}
}
\value{
A numeric vector of transformed values.
}
\description{
Applies a transformation
}
|
7f0a686bd29b4cf9492d616f4720cd7315a56053
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/ontologySimilarity/inst/doc/ontologySimilarity-guide.R
|
d680e33018043811501e2a9953a1f7a5f4869a40
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,244
|
r
|
ontologySimilarity-guide.R
|
## ------------------------------------------------------------------------
suppressPackageStartupMessages(library(ontologyIndex))
suppressPackageStartupMessages(library(ontologySimilarity))
data(hpo)
set.seed(1)
## ------------------------------------------------------------------------
#random set of terms with ancestors
terms <- get_ancestors(hpo, sample(hpo$id, size=30))
#set information content of terms (as if each term occurs with frequency `1/n`)
information_content <- get_term_info_content(hpo, term_sets=as.list(terms))
#similarity of term pairs
tsm <- get_term_sim_mat(hpo, information_content)
## ------------------------------------------------------------------------
phenotypes <- replicate(simplify=FALSE, n=5, expr=minimal_set(hpo, sample(terms, size=8)))
## ------------------------------------------------------------------------
sim_mat <- get_sim_mat(tsm, phenotypes)
sim_mat
## ------------------------------------------------------------------------
get_sim(sim_mat, 1:3)
## ------------------------------------------------------------------------
get_sim_p(sim_mat, 1:3)
## ------------------------------------------------------------------------
get_sim_to_profile_p(tsm, phenotypes[[1]], phenotypes[[2]])
|
6bdbd1fde5f211a84418c78c057c354ccdb21f89
|
f5e3ef688de15b483f97518626d75086a62efd6f
|
/histo.R
|
68a46679e880ed9a1a0b710742e928d49cc02b2a
|
[] |
no_license
|
maxerickson/osm_ms_buildings
|
99cb42bd006b4270bbd8d6191050ce21ee63403b
|
5441dc2fe1a65575d0428b50b5b5d0f540ec8900
|
refs/heads/master
| 2021-01-18T18:04:05.090768
| 2018-10-24T21:33:32
| 2018-10-24T21:33:32
| 86,838,437
| 21
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 233
|
r
|
histo.R
|
d=read.table('/home/max/bingbuildings/overlaps.txt')
res<-hist(d[,1],seq(0,1,0.1))
binnames=format(res$breaks, digits=1)
print(data.frame(paste(binnames[1:10],binnames[2:11],sep='-'),
res$counts),
right=FALSE,
row.names=FALSE)
|
f5413b6be7bcdf33d73a367c797aeb5bc55e8ee1
|
95a5cd4c339aeaca2f7e612b55726bab0369e090
|
/inst/examples/app.R
|
981d4c510f0943ecf2566593507d98290c66e7ed
|
[] |
no_license
|
bright-spark/shinyUIkit
|
88b6434cc34a0e02413c31d65c5159785e55fd1a
|
66fb6a640fa41ab5f93fc58bde26fd5259093eb3
|
refs/heads/master
| 2023-03-19T03:40:24.968413
| 2019-07-22T14:55:04
| 2019-07-22T14:55:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,147
|
r
|
app.R
|
library(shiny)
shiny::shinyApp(
ui = UIkitPage(
title = "My UIkit application",
UIkitSwitcher(
#animation = "scale-up",
mode = "switcher",
UIkitSwitcherItem(
tabName = "tab1",
UIkitSortable(
UIkitSortableItem(
UIkitCard(
width = NULL,
title = "Plot Ouput",
hover = TRUE,
horizontal = FALSE,
body = tagList(
sliderInput(
"obs",
"Number of observations:",
min = 0,
max = 1000,
value = 500
),
plotOutput("distPlot")
)
)
),
UIkitSortableItem(
UIkitCard(
width = NULL,
title = "Plot Ouput",
hover = TRUE,
horizontal = FALSE,
body = tagList(
radioButtons(
"dist", "Distribution type:",
c("Normal" = "norm",
"Uniform" = "unif",
"Log-normal" = "lnorm",
"Exponential" = "exp")
),
plotOutput("plot")
)
)
)
)
),
UIkitSwitcherItem(
tabName = "tab2",
checkboxGroupInput(
"variable",
"Variables to show:",
c("Cylinders" = "cyl",
"Transmission" = "am",
"Gears" = "gear")
),
tableOutput("data")
),
UIkitSwitcherItem(
tabName = "tab3",
"some test"
)
)
),
server = function(input, output) {
output$distPlot <- renderPlot({
hist(rnorm(input$obs))
})
output$plot <- renderPlot({
dist <- switch(
input$dist,
norm = rnorm,
unif = runif,
lnorm = rlnorm,
exp = rexp,
rnorm
)
hist(dist(500))
}, execOnResize = TRUE)
output$data <- renderTable({
mtcars[, c("mpg", input$variable), drop = FALSE]
}, rownames = TRUE)
}
)
|
01c69e7432c2c2ead9dafe4cc1a8bffa70beb578
|
7bbf674e12365b31eff3aaaf9dc25a7548436919
|
/tests/prove.R
|
31d5e3b6cd93ca0064012cf301584d9c5c623dcd
|
[] |
no_license
|
phaverty/bigmemoryExtras
|
d61abed1ee1cd52dbb35bf9b32a850743150ded1
|
033d144545c5fd1637e2c76d2c86cdef776aad22
|
refs/heads/master
| 2021-07-05T18:51:43.136409
| 2020-12-07T16:20:43
| 2020-12-07T16:20:43
| 22,368,191
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 103
|
r
|
prove.R
|
require("bigmemoryExtras") || stop("unable to load bigmemoryExtras package")
bigmemoryExtras:::.test()
|
1ac4811c4343c48d1c3f5f237e75f12aad126cd7
|
84e7b589d3d8b05e52e927dc7ce77b79515e71fa
|
/ch19 - Tidymodel(최적모델)/II-3.Neural Network.R
|
2db43b6391ab6bf2283c9ff8c9670d57f875d4ca
|
[
"MIT"
] |
permissive
|
Lee-changyul/Rstudy_Lee
|
d1e0f28190de74643d5c0a14f178b41250db7860
|
837a88d6cb4c0e223b42ca18dc5a469051b48533
|
refs/heads/main
| 2023-06-29T20:21:10.968106
| 2021-08-02T01:48:00
| 2021-08-02T01:48:00
| 325,493,003
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,152
|
r
|
II-3.Neural Network.R
|
## II.개별 모델 만들기
## II-3.Neural Network
## 05.Tuning Model 만들기
nnet_model <-
mlp(
hidden_units = tune(),
penalty = tune(),
activation = "relu") %>% # 탐색에서 많이 사용하는 방법
set_engine("nnet") %>%
set_mode("classification")
nnet_model
# 하이퍼파라미터 그리드 만들기
# dials:: grid_regular
# cost_complexity 5개 X tree_depth 5개 = 25세트
nnet_grid <-
nnet_model %>%
parameters() %>%
grid_max_entropy(size = 10)
nnet_grid
# recipe 만들기
# 랜덤 포레스트 모델에는 더미 또는 정규화 된 예측 변수가
# 필요없음
nnet_recipe <-
recipe(Personal_Loan ~ ., data = train_data) %>%
step_BoxCox(all_numeric())%>%
step_normalize(all_numeric()) %>%
step_zv(all_predictors()) %>%
step_dummy(all_nominal(), -all_outcomes())
summary(nnet_recipe)
## 06.workflow 만들기
set.seed(123) # 랜덤시드 추가
nnet_workflow <-
workflow() %>%
add_recipe(nnet_recipe) %>%
add_model(nnet_model)
nnet_workflow
## 07.하이퍼 파라미터 튜닝
# MODEL TUNING WITH A GRID
nnet_results <-
nnet_workflow %>%
tune_grid(
resamples = bank_folds,
grid = nnet_grid,
control = control_grid(save_pred = TRUE),
metrics = metric_set(roc_auc, accuracy)
)
nnet_results
# 튜닝 결과 확인
nnet_results %>%
collect_metrics()
# 튜닝 결과 그래프 그리기
autoplot(nnet_results)
## 08.final model 세팅(훈련데이터)
# 튜닝 결과 확인
nnet_results %>%
show_best("roc_auc", n=10) %>%
arrange(desc(mean))
# best model 선택
nnet_best <-
nnet_results %>%
select_best("roc_auc")
nnet_best
nnet_results %>%
collect_predictions()
nnet_auc <-
nnet_results %>%
collect_predictions(parameters = nnet_best) %>%
roc_curve(Personal_Loan, .pred_Yes) %>%
mutate(model = "Neural Network")
autoplot(nnet_auc)
nnet_plot <-
nnet_results %>%
collect_metrics() %>%
ggplot(aes(x = penalty, y = mean, color=.metric)) +
geom_point() +
geom_line() +
ylab("Area under the ROC Curve") +
scale_x_log10(labels = scales::label_number())
nnet_plot
|
8a5a7c26e6040396c5de4bccd4f8e750b7d2c619
|
cf0c1e0c96c05b2d2359f344e1b0f070ef49d728
|
/cachematrix.R
|
dec828ee96f3ea20d79752d137a5225b112be514
|
[] |
no_license
|
eprinjo/ProgrammingAssignment2
|
bd98cd87460cef64a49dfd94ae9b49316fd9809d
|
cd6f8beb1d389f53296c6c4667d2a88509b6993d
|
refs/heads/master
| 2021-01-13T06:53:05.884962
| 2015-07-25T19:44:56
| 2015-07-25T19:44:56
| 39,461,487
| 0
| 0
| null | 2015-07-21T18:01:42
| 2015-07-21T18:01:42
| null |
UTF-8
|
R
| false
| false
| 937
|
r
|
cachematrix.R
|
## makeCacheMatrix contains two functions and stored them in a list
## 1. inverse - to inverse a matrix given in input
## 2. get - to retrieve the inversed matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
## inverse an input matrox using solve()
inverse <- function(){
inv <<- solve(x)
}
## retrieves the inversed matrix
get <- function() inv
## stores the function in a list
list(inverse = inverse, get = get)
}
## Function to check if the matirx is already inveresed and returning from cache.
## else inverse matrx and store in list
cacheSolve <- function(x, ...) {
## If the matrix is already inverse, retrieve the inverse matrix from cache
m <- x$get()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## If a new matrix, inverse the matrix and store in list
x$inverse()
## get the inversed matrix
inv <- x$get()
inv
}
|
e77c03020c75fc784b7f05161314c5460ce4a119
|
cc0f711dbf151f5bd65a0c563cac0c1bec04481a
|
/man/pq.diagnostics.Rd
|
d8f30ffeb4e6331a59778d27ad6d533ba857f74d
|
[] |
no_license
|
cran/lmem.qtler
|
a9a3652c084fd4a4b5508ac506563fd7ac0db847
|
6b056e4021d772421b2dc028b295a9c4c690e30a
|
refs/heads/master
| 2021-01-21T14:48:35.448280
| 2016-07-12T07:49:42
| 2016-07-12T07:49:42
| 58,163,717
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,536
|
rd
|
pq.diagnostics.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diagnosis_functions.R
\name{pq.diagnostics}
\alias{pq.diagnostics}
\title{Performs phenotypic data quality diagnostics.}
\usage{
pq.diagnostics (crossobj, boxplot = TRUE, qqplot = FALSE,
scatterplot = TRUE,heatplot = TRUE)
}
\arguments{
\item{crossobj}{An object of class = cross obtained from the qtl.cross
function from this package, or the read.cross
function from r/qtl package (Broman and Sen, 2009).
This file contains phenotypic means, genotypic marker score, and genetic map.}
\item{boxplot}{Indicates whether a boxplot should be performed.
TRUE/FALSE term. TRUE is set as default.}
\item{qqplot}{Indicates whether a qqplot should be performed.
TRUE/FALSE term. FALSE is set as default.}
\item{scatterplot}{Indicates whether a scatterplot should be performed.
TRUE/FALSE term. TRUE is set as default.}
\item{heatplot}{Indicates whether a phenotypic heatplot should be performed.
TRUE is set as default.}
}
\value{
It returns: Boxplot, Scatterplot, QQplot, Heatplot
}
\description{
Performs phenotypic data quality diagnostic of an object of class cross
created by the qtl.cross function, including summary descriptive
diagnostics, correlation across traits,and distribution of traits.
}
\details{
Performs reports in the work directory.
}
\note{
Could be performed for QTL analysis in order to analyze
the phenotypic data quality.
}
\examples{
\dontrun{
data (SxM_geno)
data (SxM_map)
data (SxM_pheno)
P.data <- SxM_pheno
G.data <- SxM_geno
map.data <- SxM_map
cross.data <- qtl.cross (P.data, G.data, map.data,
cross='dh', heterozygotes = FALSE)
summary (cross.data)
jittermap (cross.data)
Pheno Quality
pq.diagnostics (crossobj=cross.data)
}
}
\author{
Lucia Gutierrez
}
\references{
Broman KW, Sen S (2009) A Guide to QTL Mapping with R/qtl.
Springer, NewYork.
Comadran J, Thomas W, van Eeuwijk F, Ceccarelli S, Grando S, Stanca A,
Pecchioni N, Akar T, Al-Yassin A, Benbelkacem A, Ouabbou H, Bort J,
Romagosa I, Hackett C, Russell J (2009) Patterns of genetic diversity
and linkage disequilibrium in a highly structured Hordeum vulgare
association-mapping population for the Mediterranean basin.
Theor Appl Genet 119:175-187
Milne et al., (2010) Flapjack - graphical genotype visualization.
Bioinformatics 26(24), 3133-3134.
}
\seealso{
qtl.cross
}
|
756d9a16c7d443f550a0975323dc8878d690434a
|
86772a78af6ca3567ed333c9a4cd68c5af73848d
|
/supplementaries/Mode Jumping MCMC/supplementary/examples/Simulated Data (Example 1)/mode_jumping_package_class_simulated_bas_data_3211.r
|
049941e97d163a4c940e36a74e800ce2ecf847d8
|
[] |
no_license
|
aliaksah/EMJMCMC2016
|
077170db8ca4a21fbf158d182f551b3814c6c702
|
3954d55fc45296297ee561e0f97f85eb5048c39e
|
refs/heads/master
| 2023-07-19T16:52:43.772170
| 2023-07-15T16:05:37
| 2023-07-15T16:05:37
| 53,848,643
| 17
| 5
| null | 2021-11-25T14:53:35
| 2016-03-14T10:51:06
|
R
|
UTF-8
|
R
| false
| false
| 8,106
|
r
|
mode_jumping_package_class_simulated_bas_data_3211.r
|
rm(list = ls(all = TRUE))
# install the required packges if needed
#install.packages("INLA", repos="http://www.math.ntnu.no/inla/R/testing")
#install.packages("bigmemory")
#install.packages("snow")
#install.packages("Rmpi")
#install.packages("ade4")
#install.packages("sp")
#install.packages("BAS")
#install.packages("https://github.com/aliaksah/EMJMCMC2016/files/270429/EMJMCMC_1.2.tar.gz", repos = NULL, type="source")
#install.packages("RCurl")
#install.packages("hash")
library(hash)
library(RCurl)
library(EMJMCMC)
library(sp)
library(INLA)
library(parallel)
library(bigmemory)
library(snow)
library(MASS)
library(ade4)
library(copula)
library(compiler)
library(BAS)
require(stats)
#define your working directory, where the data files are stored
workdir<-""
#prepare data
simx <- read.table(text=getURL("https://raw.githubusercontent.com/aliaksah/EMJMCMC2016/master/examples/Simulated%20Data%20%28Example%201%29/simcen-x.txt"))
simy <- read.table(text=getURL("https://raw.githubusercontent.com/aliaksah/EMJMCMC2016/master/examples/Simulated%20Data%20%28Example%201%29/simcen-y.txt"))
data.example <- cbind(simy,simx)
names(data.example)[1]="Y"
#fparam <- c("Const",colnames(data)[-1])
fparam.example <- colnames(data.example)[-1]
fobserved.example <- colnames(data.example)[1]
#dataframe for results; n/b +1 is required for the summary statistics
statistics1 <- big.matrix(nrow = 2 ^(length(fparam.example))+1, ncol = 15,init = NA, type = "double")
statistics <- describe(statistics1)
#create MySearch object with default parameters
mySearch = EMJMCMC2016()
# load functions as in BAS article by Clyde, Ghosh and Littman to reproduce their first example
mySearch$estimator = estimate.bas.lm
mySearch$estimator.args = list(data = data.example,prior = 3, g = 100 ,n=100)
# carry out full enumeration
system.time(
FFF<-mySearch$full_selection(list(statid=6, totalit =32769, ub = 100,mlikcur=-Inf,waiccur =100000))
)
# check that all models are enumerated during the full search procedure
idn<-which(!is.na(statistics1[,1]))
length(idn)
# see the best current results and total number of iterations
mySearch$g.results[,]
#compare to
which(statistics1[,1]==max(statistics1[,1],na.rm = TRUE))
View(statistics1[2113,])
which(statistics1[868,1]<= -10)
# get graphical output (only makes sence after EMJMCMC procedure carried out)
mySearch$visualize_results(statistics1 = statistics1, template = "test/full1BASclasstest", mds_size = 1024, crit = list(waic=TRUE,mlik=TRUE,dic=TRUE), draw_dist = TRUE)
# once full search is completed, get the truth for the experiment
ppp<-mySearch$post_proceed_results(statistics1 = statistics1)
truth = ppp$p.post # make sure it is equal to Truth column from the article
truth.m = ppp$m.post
truth.prob = ppp$s.mass
ordering = sort(ppp$p.post,index.return=T)
fake500 <- sum(exp(x = sort(statistics1[,1],decreasing = T)[1:700]),na.rm = TRUE)/truth.prob
print("pi truth")
sprintf("%.10f",truth[ordering$ix])
#estimate best performance possible
iddx <- sort(statistics1[,1],decreasing = T,index.return=T)$ix
statistics1[as.numeric(iddx[3212:2^15]),1:15]<-NA
# get the "unbeatible" results
ppp.best<-mySearch$post_proceed_results(statistics1 = statistics1)
best = ppp.best$p.post # make sure it is equal to Truth column from the article
bset.m = ppp.best$m.post
best.prob = ppp.best$s.mass/truth.prob
print("pi best")
sprintf("%.10f",best[ordering$ix])
# define search parameters
mySearch$max.cpu=as.integer(4)
mySearch$switch.type=as.integer(1)
mySearch$switch.type.glob=as.integer(1)
#mySearch$printable.opt = TRUE
mySearch$max.N.glob=as.integer(4)
mySearch$min.N.glob=as.integer(4)
mySearch$max.N=as.integer(2)
mySearch$min.N=as.integer(2)
mySearch$recalc.margin = as.integer(400)
distrib_of_proposals = c(76.91870,71.25264,87.68184,60.55921,17812.39852)
distrib_of_neighbourhoods=t(array(data = c(7.6651604,16.773326,14.541629,12.839445,2.964227,13.048343,7.165434,
0.9936905,15.942490,11.040131,3.200394,15.349051,5.466632,14.676458,
1.5184551,9.285762,6.125034,3.627547,13.343413,2.923767,15.318774,
14.5295380,1.521960,11.804457,5.070282,6.934380,10.578945,12.455602,
6.0826035,2.453729,14.340435,14.863495,1.028312,12.685017,13.806295),dim = c(7,5)))
distrib_of_neighbourhoods[7]=distrib_of_neighbourhoods[7]/50
#reproduce the 1st experiment as in BAS article
Niter <- 100
thining<-1
system.time({
vect <-array(data = 0,dim = c(15,Niter))
vect.mc <-array(data = 0,dim = c(15,Niter))
inits <-array(data = 0,dim = Niter)
freqs <-array(data = 100,dim = c(5,Niter))
freqs.p <-array(data = 100,dim = c(5,7,Niter))
masses <- array(data = 0,dim = Niter)
biases.m <- array(data = 0,dim = 2 ^(length(fparam.example))+1)
biases.m.mc <- array(data = 0,dim = 2 ^(length(fparam.example))+1)
rmse.m <- array(data = 0,dim = Niter)
rmse.m.mc <- array(data = 0,dim = Niter)
iterats <- array(data = 0,dim = c(2,Niter))
for(i in 1:Niter)
{
statistics1 <- big.matrix(nrow = 2 ^(length(fparam.example))+1, ncol = 15,init = NA, type = "double")
statistics <- describe(statistics1)
mySearch$g.results[4,1]<-0
mySearch$g.results[4,2]<-0
mySearch$p.add = array(data = 0.5,dim = 15)
print("BEGIN ITERATION!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(i)
set.seed(10*i)
initsol=rbinom(n = length(fparam.example),size = 1,prob = 0.8)
inits[i] <- mySearch$bittodec(initsol)
freqs[,i]<- distrib_of_proposals
resm<-mySearch$modejumping_mcmc(list(varcur=initsol,statid=5, distrib_of_proposals = distrib_of_proposals,distrib_of_neighbourhoods=distrib_of_neighbourhoods, eps = 0.0001, trit = 32700, trest = 3200 , burnin = 50, max.time = 30, maxit = 100000, print.freq =500))
vect[,i]<-resm$bayes.results$p.post
vect.mc[,i]<-resm$p.post
masses[i]<-resm$bayes.results$s.mass/truth.prob
print(masses[i])
freqs.p[,,i] <- distrib_of_neighbourhoods
cur.p.post <- resm$bayes.results$m.post
cur.p.post[(which(is.na(cur.p.post)))]<-0
rmse.m[i]<-mean((cur.p.post - truth.m)^2,na.rm = TRUE)
biases.m<-biases.m + (cur.p.post - truth.m)
cur.p.post.mc <- resm$m.post
cur.p.post.mc[(which(is.na(cur.p.post.mc)))]<-0
rmse.m.mc[i]<-mean((cur.p.post.mc - truth.m)^2,na.rm = TRUE)
biases.m.mc<-biases.m.mc + (cur.p.post.mc - truth.m)
iterats[1,i]<-mySearch$g.results[4,1]
iterats[2,i]<-mySearch$g.results[4,2]
print("COMPLETE ITERATION!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! with")
print(iterats[2,i])
remove(statistics1)
remove(statistics)
}
}
)
Nlim <- 1
order.deviat <- sort(masses,decreasing = TRUE,index.return=T)
print("model bias rm")
sqrt(mean((biases.m/Niter)^2,na.rm = TRUE))*100000
print("model rmse rm")
sqrt(mean(rmse.m))*100000
print("model bias mc")
sqrt(mean((biases.m.mc/Niter)^2,na.rm = TRUE))*100000
print("model rmse mc")
sqrt(mean(rmse.m.mc))*100000
print("model coverages")
mean(masses)
print("mean # of iterations")# even smaller on average than in BAS
mean(iterats[1,])
print("mean # of estimations")# even smaller on average than in BAS
mean(iterats[2,])
# correlation between the MSE and the masses, obviously almost minus 1
cor(rmse.m,masses)
cor(rmse.m.mc,masses)
truth.buf <- array(data = 0,dim = c(15,Niter))
truth.buf[,1:Niter]<-truth
bias <- vect - truth.buf
bias.mc <- vect.mc - truth.buf
rmse <- (vect^2 +truth.buf^2 - 2*vect*truth.buf)
rmse.mc <- (vect.mc^2 +truth.buf^2 - 2*vect.mc*truth.buf)
bias.avg.rm<-rowMeans(bias)
rmse.avg.rm <-sqrt(rowMeans(rmse))
bias.avg.mc<-rowMeans(bias.mc)
rmse.avg.mc <-sqrt(rowMeans(rmse.mc))
print("pi biases rm")
sprintf("%.10f",bias.avg.rm[ordering$ix]*100)
print("pi rmse rm")
sprintf("%.10f",rmse.avg.rm[ordering$ix]*100)
print("pi biases mc")
sprintf("%.10f",bias.avg.mc[ordering$ix]*100)
print("pi rmse mc")
sprintf("%.10f",rmse.avg.mc[ordering$ix]*100)
# view the results
View((cbind(bias.avg.rm[ordering$ix],rmse.avg.rm[ordering$ix],bias.avg.mc[ordering$ix],rmse.avg.mc[ordering$ix])*100))
|
c58044f22df1b784b3d8790c4dc5a66cad7bac33
|
58ac7af9a85b288580401ff386e025f3d0c9fd43
|
/DDPSC/Library_Statistics/fastqc_summarize.R
|
5e75fbd0d242a102547f4acb4f54eeec9147329a
|
[] |
no_license
|
calizarr/Misc
|
431f0a22f82ec029b6fc5d39db722259d564e71f
|
037b1ce92c0d9776dcd5a4e993083c355e1558bb
|
refs/heads/master
| 2022-07-09T19:12:13.958079
| 2022-05-20T23:06:44
| 2022-05-20T23:06:44
| 36,998,913
| 0
| 0
| null | 2021-01-20T22:04:00
| 2015-06-06T23:56:39
|
R
|
UTF-8
|
R
| false
| false
| 4,120
|
r
|
fastqc_summarize.R
|
#!/usr/bin/Rscript
library(tidyr)
library(argparser, quietly = TRUE)
p <- arg_parser("Take files, a single file, or a directory then a directory path (for files) and an output filename to summarize FastQC")
p <- add_argument(p,
arg = "--input",
help = "Give a space separated list of files, a single file, or a directory path",
type = "character",
nargs = Inf,
short = "-i")
p <- add_argument(p,
arg = "--directory",
help = "Directory path where multiple files reside if multiple space separated files given",
type = "character",
short = "-d")
p <- add_argument(p,
arg = "--filename",
help = "Output filename for the FastQC Summarized output",
short = "-f")
args <- parse_args(p)
filename <- args$filename
directory <- args$directory
files <- args$input
nameFlag <- FALSE
fileFlag <- FALSE
if(is.na(filename) | filename == "") {
nameFlag <- TRUE
print("Please provide a filename")
} else if (is.na(files) | files == "") {
fileFlag <- TRUE
print("Please provide an input")
}
if(fileFlag | nameFlag) {
quit()
}
## options(echo=TRUE)
## args <- commandArgs(trailingOnly = TRUE)
## directory <- tail(args, n = 2)[[1]]
## filename <- tail(args, n = 2)[[2]]
## print(paste0("This is the filename: ", filename))
## files <- args[1:(length(args)-2)]
## print(paste0("This is the current directory: ", getwd()))
## print(files)
analyse.fastQC <- function(zip, debug = FALSE) {
fastqc.zip <- zip
fastqc.folder <- gsub(".zip", "", basename(fastqc.zip))
if(debug) {
print(paste0("This is the zip file: ", fastqc.zip))
print(paste0("This is the fastqc_folder: ", fastqc.folder))
}
fastqc.data <- readLines(unz(fastqc.zip, file.path(fastqc.folder, "fastqc_data.txt")))
fastqc.summary <- read.table(unz(fastqc.zip, file.path(fastqc.folder, "summary.txt")),
header = F, sep = "\t", colClasses = rep("character", 3))
fastqc.summary <- fastqc.summary[, c(3,2,1)]
names(fastqc.summary) <- c("Library", "Variable", "Value")
closeAllConnections()
## Getting percentage of pass, fail, and warn
percentages <- list(
"Pass Percentage" = round((sum(fastqc.summary$Value == "PASS") / 12) * 100, 2),
"Fail Percentage" = round((sum(fastqc.summary$Value == "FAIL") / 12) * 100, 2),
"Warn Percentage" = round((sum(fastqc.summary$Value == "WARN") / 12) * 100, 2)
)
percentages <- as.data.frame(do.call(rbind, percentages))
percentages[, "Variable"] <- rownames(percentages)
rownames(percentages) <- seq(nrow(percentages))
percentages[, "Library"] <- fastqc.summary[1, 1]
percentages <- percentages[, c(3, 2, 1)]
names(percentages) <- names(fastqc.summary)
fastqc.summary <- rbind(fastqc.summary, percentages)
single.fastqc <- spread(fastqc.summary, "Variable", "Value")[, c(1, 2, 3, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 7, 4, 16)]
return(list(Summary = fastqc.summary, Single = single.fastqc))
}
multiple.files <- function(directory) {
if(length(directory) == 1 & dir.exists(directory)) {
files <- file.path(directory, grep("zip", dir(directory), val = T))
}
analyses <- lapply(files, function(file) {
analyse.fastQC(file)
})
analyses.single <- do.call(rbind, lapply(seq_along(analyses), function(index) { analyses[[index]][[2]] }))
analyses.long <- do.call(rbind, lapply(seq_along(analyses), function(index) { analyses[[index]][[1]] }))
return(list(Summary = analyses.long, Single = analyses.single))
}
if(length(files) == 1) {
if(dir.exists(files[[1]])) {
analyses.single <- multiple.files(files[[1]])
}
else {
analyses.single <- analyse.fastQC(files[[1]])
}
} else {
## files <- file.path(directory, files)
analyses.single <- multiple.files(files[[1]])
}
write.table(analyses.single$Single, file = filename, quote = FALSE, sep = ",", row.names = FALSE)
|
bc3b4429c62cac6e4b3dcfc6c8cf4b29f4d92b07
|
7101abfc448961275c411ee68fe229100ffd4a7f
|
/02.Rcode/80.지도만들기.R
|
3bc14271d22831c58be616a653fd07ec37517679
|
[] |
no_license
|
Gwangil/YourHomeMyHome
|
239a087dfc19d4b97f25347836687e4bad4b62dd
|
bab1c9d5d32f5369f681556a8c8d356db2c9fc53
|
refs/heads/master
| 2021-01-01T19:05:35.272984
| 2019-05-15T00:34:54
| 2019-05-15T00:34:54
| 98,506,459
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 7,102
|
r
|
80.지도만들기.R
|
#install.packages('data.table')
#install.packages('dplyr')
#install.packages("XML") # XML 다루는 패키지
#install.packages("stringr")
#install.packages("ggplot2") # 그림 그리는 패키지
#install.packages("leaflet") # 지도관련 패키지
#install.packages("jsonlite") # json 관련 패키지
#install.packages('RCurl') # url 관련 패키지
#install.packages('purrr') # 뭐였더라.... 구글....
#install.packages('viridis') # 뭐였더라.... 구글....
#install.packages('DT') # 표 만들기 하는 테이블(html)
library(data.table)
library(dplyr)
library(stringr)
library(ggplot2)
library(leaflet)
library(jsonlite)
library(RCurl)
library(XML)
library(purrr)
library(viridis)
library(DT)
######
X_all <- fread("E:/00.니집내집/90.데이터수집/01.실거래가/all.csv") # 실거래가 정리 코드를 이용해서 all.csv를 만든것을 불러옴
colnames(X_all) <- c("si","gu","dong","bungi","bungi_main","bungi_sub","complex",
"area","yearMonth","day","price","floor","constructed","roadName") # column명을 영문으로 변환...R에서 한글은 인코딩이....ㅜㅜ
X_all$yearMonth <- as.Date(paste0(X_all$yearMonth,01),"%Y%m%d") # yyyymm으로 받은 데이터에 임의로 01(dd)을 붙여 Date타입으로 변환
X_all$si <- as.factor(X_all$si) # chr(character)형식을 factor로 변환 : 추후 모델적합시 속도향상(음.. 문자열이랑 요인(factor)랑 R에서 취급하는게 조금 다름...)
X_all$gu <- as.factor(X_all$gu) # 문자열은 하나하나가 다 다른 개별 값이지만
X_all$dong <- as.factor(X_all$dong) # factor는 해당 값의 목록(levels)이 있고, 해당 값은 그 목록의 몇 번째 값을 가져온건지 알 수 있음 as.numeric(factorTypeData)식으로
X_all$bungi <- as.factor(X_all$bungi)
X_all$bungi_main <- as.factor(X_all$bungi_main)
X_all$bungi_sub <- as.factor(X_all$bungi_sub)
X_all$complex <- as.factor(X_all$complex)
X_all$day <- as.factor(str_sub(X_all$day,1,-3))
X_all$roadName <- as.factor(X_all$roadName)
X.prac <- X_all[-which.min(X_all$price),] # which.min은 최소값의 index를 찾아줌, X.prac에 price의 최소값이 있는 index를 제외하고 할당 -- 나중에 평균값으로 지도에 뿌리는데, 이상치가 평균에 영향을줌
str(X.prac)
######
# '%>%'는 dplyr패키지에서 지원하는 chain 방식을 사용하기위한 기호 -- 연산속도 향상, 코드 줄어듦
X.prac_map <- X.prac %>% filter(substr(X.prac$yearMonth,1,4) >= 2016) %>% # X.prac 에서(%>%) yearMonth(계약년월)의 1~4번째자리:yyyy가 2016 이상인 것을 필터링 %>%
group_by(dong) %>% summarise(n = n(), mean = round(mean(price), 0)) %>% # 필터링한 데이터를 동별로 그룹핑 하고 %>% 그룹별 요약(summarise)함, 갯수와 평균값 계산 %>%
as.data.table() # 처리한 데이터를 data.table 타입으로 변환하여 X.prac_map에 저장
###################################
url <- "http://maps.googleapis.com/maps/api/geocode/json?sensor=false&language=ko&address=" # google api로 위경도 받아올거임.
for(i in 1:nrow(X.prac_map)){
post <- paste0("부산광역시", X.prac_map[i, ]$dong) # 부산광역시??동
post <- iconv(post, from = "cp949", to = "UTF-8") # win version # R 기본 character set이 cp949...
if ( is.na(post) ) { # 상황에따라 iconv함수 결과가 NA가 나올때가 있어서 이럴때는
post <- paste0("부산광역시", X.prac_map[i, ]$V5) # 그런데 어떤건 iconv에서 NA가 나고 어떤건 URLencode에서 NA가 뜰수도 있음..우선 지금 사용하는 데이터는 둘중 한번은 제대로됨
post <- URLencode(post) # URLencode로 encoding변환..
}
geocode_url = paste(url, post,sep="") # api url에 우리가 찾고싶은 곳 연결
url_query <- getURL(geocode_url) # url결과를 json으로 받아오옴
url_json <- fromJSON(paste0(url_query, collapse = "")) # json형태를 읽어들임
x_point <- url_json$results$geometry$location$lng # 경도
y_point <- url_json$results$geometry$location$lat # 위도
X.prac_map[i, x_ := x_point] # 위경도 저장, 아마 :=는 값을 특정컬럼에 넣을때 쓰는것으로 추정됨...
X.prac_map[i, y_ := y_point]
X.prac_map[i, n_ := i]
}
#######################################
X.prac_map_purr <- X.prac_map %>% # X.prac_map_purr 데이터를 만듦( X.prac_map을 이용하면서 %>% )
mutate(level = cut(mean, c(seq(0,50000,10000),10e+10), # mutate로 새 변수(level) 추가, cut으로 (0,1억,2억,...,5억,아주큰숫자) 자리에서
labels = c("~ 1억", "1억 ~ 2억", "2억 ~ 3억", "3억 ~ 4억", "4억 ~ 5억","5억 ~ "))) # 끊어서 labels를 달아줌
X.prac_map_purr$label <- # label변수 만듦
sapply(X.prac_map_purr$mean, function(x) { # mean변수에 사용자지정함수 적용
djr <- str_sub(round(x,-3),end=-5) # djr(억) : 끝에서 5번째 자리까지가 억단위
cjsaks <- str_sub(round(x,-3),-4,-4) # cjsaks(천만) : 천만까지 반올림하면 끝에서 4번째 자리가 천만단위
if ( djr != "" && cjsaks != "0" ) { # 억단위 미만이거나, 천만단위가 0인 상황 구분하여
paste0(djr,"억",cjsaks,"천만" ) # if, else를 사용해서 "억", "천만"형태로 만들어줌(가시성 높이기위함)
} else if(djr != "" & cjsaks == "0" ) {
paste0(djr,"억")
} else paste0(cjsaks,"천만" )
}
)
X.prac_map_purr_split <- split(X.prac_map_purr, X.prac_map_purr$level) # X.prac_map_purr을 level기준으로 split
pusan_leaf <- leaflet(width = "100%") %>% addTiles() # leaflet 기본 전체 세계지도 생성
pusan_leaf_ <- names(X.prac_map_purr_split) %>% # 세계지도에 마커, 값 뿌려주는 부분
walk( function(df) {
pusan_leaf <<- pusan_leaf %>%
addMarkers(data = X.prac_map_purr_split[[df]],
lng = ~x_, lat = ~y_,
popup = ~as.character(dong),
label = ~as.character(label),
labelOptions = labelOptions(noHide = T, textsize = "15px", direction = 'auto'),
group = df,
clusterOptions = markerClusterOptions(removeOutsideVisibleBounds = F))
})
pusan_leaf <- pusan_leaf %>% # 억단위 체크박스, 미니맵 추가
addLayersControl(
overlayGroups = names(X.prac_map_purr_split),
options = layersControlOptions(collapsed = FALSE)
) %>% addMiniMap(toggleDisplay = TRUE) %>%
addProviderTiles(providers$OpenStreetMap) # 배경지도(Tile)을 저장할때 묶어서 가기위해 사용
pusan_leaf
library(htmlwidgets) # 만들어진 지도가 html형식, html형식 export
saveWidget(pusan_leaf, file="pusan2016.html")
|
46c7f04c81bebdeffa4201e2558a1ae004808c34
|
1fab782e96a803e221f16434b80656232f88ac74
|
/main.R
|
2aa1a5091528d9f498d20caa10d500bd686edbf0
|
[] |
no_license
|
superelastic/FAA03
|
a330858124df378653b98c269500c62a76a0be0f
|
a6f90711e0e61fcd2d048d9b3f4d5085fb09f1d6
|
refs/heads/master
| 2021-01-23T11:50:02.350443
| 2015-04-19T20:02:00
| 2015-04-19T20:02:00
| 33,827,396
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,737
|
r
|
main.R
|
rm(list=ls())
setInternet2(TRUE)
con = gzcon(url('http://www.systematicportfolio.com/sit.gz', 'rb'))
source(con)
close(con)
load.packages("TTR,PerformanceAnalytics,quantmod,lattice")
source("C:/Users/rf6994/Documents/R/FAA03/FAAreturns.R")
require(quantmod)
require(PerformanceAnalytics)
mutualFunds <- c("CSD", "EDV", "VNQ", "BND", "MUB", "IGOV", "VWO", "ILF", "EWJ","VFISX")
#mutualFunds <- c("VTSMX", #Vanguard Total Stock Market Index
# "FDIVX", #Fidelity Diversified International Fund
# "VEIEX", #Vanguard Emerging Markets Stock Index Fund
# "VFISX", #Vanguard Short-Term Treasury Fund
# "VBMFX", #Vanguard Total Bond Market Index Fund
# "QRAAX", #Oppenheimer Commodity Strategy Total Return
# "VGSIX" #Vanguard REIT Index Fund
#)
#mid 1997 to end of 2012
#getSymbols(mutualFunds, from="1997-06-30", to="2015-03-20")
getSymbols(mutualFunds, from="2009-06-30", to="2015-04-17")
tmp <- list()
for(fund in mutualFunds) {
tmp[[fund]] <- Ad(get(fund))
}
#always use a list when intending to cbind/rbind large quantities of objects
adPrices <- do.call(cbind, args = tmp)
colnames(adPrices) <- gsub(".Adjusted", "", colnames(adPrices))
':=' <- function(lhs, rhs) {
frame <- parent.frame()
lhs <- as.list(substitute(lhs))
if (length(lhs) > 1)
lhs <- lhs[-1]
if (length(lhs) == 1) {
do.call(`=`, list(lhs[[1]], rhs), envir=frame)
return(invisible(NULL))
}
if (is.function(rhs) || is(rhs, 'formula'))
rhs <- list(rhs)
if (length(lhs) > length(rhs))
rhs <- c(rhs, rep(list(NULL), length(lhs) - length(rhs)))
for (i in 1:length(lhs))
do.call(`=`, list(lhs[[i]], rhs[[i]]), envir=frame)
return(invisible(NULL))
}
FAA <- function(prices, monthLookback = 4,
weightMom = 1, weightVol = .5, weightCor = .5,
riskFreeName = NULL, bestN = 3,
stepCorRank = FALSE, stepStartMethod = c("best", "default"),
geometric = TRUE, ...) {
stepStartMethod <- stepStartMethod[1]
if(is.null(riskFreeName)) {
prices$zeroes <- 0
riskFreeName <- "zeroes"
warning("No risk-free security specified. Recommended to use one of: quandClean('CHRIS/CME_US'), SHY, or VFISX.
Using vector of zeroes instead.")
}
returns <- Return.calculate(prices)
monthlyEps <- endpoints(prices, on = "months")
riskFreeCol <- grep(riskFreeName, colnames(prices))
tmp <- list()
dates <- list()
for(i in 2:(length(monthlyEps) - monthLookback)) {
#subset data
priceData <- prices[monthlyEps[i]:monthlyEps[i+monthLookback],]
returnsData <- returns[monthlyEps[i]:monthlyEps[i+monthLookback],]
#perform computations
momentum <- data.frame(t(t(priceData[nrow(priceData),])/t(priceData[1,]) - 1))
momentum <- momentum[,!is.na(momentum)]
#momentum[is.na(momentum)] <- -1 #set any NA momentum to negative 1 to keep R from crashing
priceData <- priceData[,names(momentum)]
returnsData <- returnsData[,names(momentum)]
momRank <- rank(momentum)
vols <- data.frame(StdDev(returnsData))
volRank <- rank(-vols)
cors <- cor(returnsData, use = "complete.obs")
if (stepCorRank) {
if(stepStartMethod=="best") {
compositeMomVolRanks <- weightMom*momRank + weightVol*volRank
maxRank <- compositeMomVolRanks[compositeMomVolRanks==max(compositeMomVolRanks)]
corRank <- stepwiseCorRank(corMatrix=cors, startNames = names(maxRank),
bestHighestRank = TRUE, ...)
} else {
corRank <- stepwiseCorRank(corMatrix=cors, bestHighestRank=TRUE, ...)
}
} else {
corRank <- rank(-rowSums(cors))
}
totalRank <- rank(weightMom*momRank + weightVol*volRank + weightCor*corRank)
upper <- length(names(returnsData))
lower <- max(upper-bestN+1, 1)
topNvals <- sort(totalRank, partial=seq(from=upper, to=lower))[c(upper:lower)]
#compute weights
longs <- totalRank %in% topNvals #invest in ranks length - bestN or higher (in R, rank 1 is lowest)
longs[momentum < 0] <- 0 #in previous algorithm, removed momentums < 0, this time, we zero them out at the end.
longs <- longs/sum(longs) #equal weight all candidates
longs[longs > 1/bestN] <- 1/bestN #in the event that we have fewer than top N invested into, lower weights to 1/top N
names(longs) <- names(totalRank)
#append removed names (those with momentum < 0)
removedZeroes <- rep(0, ncol(returns)-length(longs))
names(removedZeroes) <- names(returns)[!names(returns) %in% names(longs)]
longs <- c(longs, removedZeroes)
#reorder to be in the same column order as original returns/prices
longs <- data.frame(t(longs))
longs <- longs[, names(returns)]
#append lists
tmp[[i]] <- longs
dates[[i]] <- index(returnsData)[nrow(returnsData)]
}
weights <- do.call(rbind, tmp)
dates <- do.call(c, dates)
weights <- xts(weights, order.by=as.Date(dates))
weights[, riskFreeCol] <- weights[, riskFreeCol] + 1-rowSums(weights)
strategyReturns <- Return.rebalancing(R = returns, weights = weights, geometric = geometric)
colnames(strategyReturns) <- paste(monthLookback, weightMom, weightVol, weightCor, sep="_")
return(strategyReturns)
}
c(ra_wts, ra_ret) := FAAreturns(adPrices)
c(n4_wts, n4_ret) := FAAreturns(adPrices, bestN=4)
c(n3_wts, n3_ret) := FAAreturns(adPrices, bestN=3)
c(volcor_wts, volcor_ret) := FAAreturns(adPrices, weightVol = 1, weightCor = 1)
c(minrisk_wts, minrisk_ret) := FAAreturns(adPrices, weightMom = 0, weightVol=1, weightCor=1)
c(puremom_wts, puremom_ret) := FAAreturns(adPrices, weightMom=1, weightVol=0, weightCor=0)
c(maxdecor_wts, maxdecor_ret) := FAAreturns(adPrices, weightMom=0, weightVol=0, weightCor=1)
c(momdecor_wts, momdecor_ret) := FAAreturns(adPrices, weightMom=1, weightVol=0, weightCor=1)
all <- cbind(ra_ret, n4_ret, n3_ret, volcor_ret, minrisk_ret, puremom_ret, maxdecor_ret, momdecor_ret)
colnames(all) <- c("Replica Attempt", "N4", "N3", "N3vol1cor1", "minRisk", "pureMomentum", "maxDecor", "momDecor")
charts.PerformanceSummary(all, colorset=c("black", "red", "blue", "green", "brown", "darkgrey", "purple", "orange"))
stats <- data.frame(t(rbind(Return.annualized(all)*100,
maxDrawdown(all)*100,
SharpeRatio.annualized(all))))
stats$Return_To_Drawdown <- stats[,1]/stats[,2]
|
c0a96f2be34a943fbcd091065a10e986189cf356
|
b59771808f850041ae5fb9a01266a37cbab6c112
|
/man/CompTransform.Rd
|
513e74685397118d92f1701b424fbc21a3b456d1
|
[
"MIT"
] |
permissive
|
TroyHernandez/alignmentfreer
|
a3ab610186a194ce03dcbd3fd761cbf41ea66700
|
d9f54535e85ac11ae961c0644f94b24972a39ef0
|
refs/heads/master
| 2016-09-06T00:47:52.144677
| 2014-03-23T20:46:58
| 2014-03-23T20:46:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 602
|
rd
|
CompTransform.Rd
|
\name{CompTransform}
\alias{CompTransform}
\title{Calculate the composition vector from a generalized vector sequence.}
\usage{
CompTransform(vec, kmer = 3, statistic = 3)
}
\arguments{
\item{vec}{a generalized vector sequence as derived from
\code{Vectorizer} with kmer greater than 1.}
\item{kmer}{an integer that defines the kmer length of
vec.}
\item{statistic}{an integer that defines the statistics
present in vec.}
}
\description{
This function takes a generalized vector sequence, as
obtained from \code{Vectorizer}, and calculates the
composition vector.
}
\keyword{vectorizer}
|
a8205d7765ee68162416fae8f0068f7494ea1eac
|
6a68c8a50933b31dbd274aa0dccd7753adf2def8
|
/processing/2b_descriptives.R
|
ea77ce6695df96dbac6a148c1a316a91f78485ca
|
[] |
no_license
|
ballardtj/recruitment
|
81583adf28154d9113921b5c6e847fd4b0fff06e
|
d8520d365fdede756f86aa70e9ea7996aa9fb6e7
|
refs/heads/master
| 2020-03-23T11:55:57.940010
| 2019-02-22T01:58:10
| 2019-02-22T01:58:10
| 141,527,292
| 0
| 0
| null | 2018-08-09T05:41:08
| 2018-07-19T05:10:39
|
R
|
UTF-8
|
R
| false
| false
| 1,811
|
r
|
2b_descriptives.R
|
rm(list=ls())
#load packages
library(tidyverse)
#load data
load("../clean_data/trimmed_data_exp2.RData")
#response rates
trimmed_data %>%
#our program logs rt and response for non-responses as -1.
#Here, we replace -1 with NA.
mutate(rt = if_else(time>0,time,NA_integer_),
response = if_else(response>0,response,NA_integer_),
iscorrect = if_else(iscorrect>-1,iscorrect,NA_integer_)) %>%
group_by(wave,source) %>%
summarise(nr_rate = mean(is.na(response)),
too_fast_rate = mean(rt < 150,na.rm=T))
#plot accuracy, mean RTs, and non-response rates by condition
descriptives = trimmed_data %>%
#our program logs rt and response for non-responses as -1.
#Here, we replace -1 with NA.
mutate(rt = if_else(time>0,time,NA_integer_),
response = if_else(response>0,response,NA_integer_),
iscorrect = if_else(iscorrect>-1,iscorrect,NA_integer_)) %>%
filter(rt > 250) %>%
group_by(subjectid,wave,source,emphasis,brightness) %>%
summarise(mean_RT = mean(rt,na.rm=T),
accuracy = mean(iscorrect,na.rm=T),
response = mean(response),
nr_rate = mean(is.na(response))) %>%
group_by(wave,source,emphasis,brightness) %>%
summarise(mean_RT = mean(mean_RT),
accuracy = mean(accuracy),
response = mean(response),
nr_rate = mean(nr_rate))
ggplot(data=descriptives) +
geom_line(aes(x=brightness,y=accuracy,group=emphasis,colour=emphasis)) +
facet_grid(wave~source) +
theme_minimal()
ggplot(data=descriptives) +
geom_line(aes(x=brightness,y=mean_RT,group=emphasis,colour=emphasis)) +
facet_grid(wave~source) +
theme_minimal()
ggplot(data=descriptives) +
geom_line(aes(x=brightness,y=nr_rate,group=emphasis,colour=emphasis)) +
facet_grid(wave~source) +
theme_minimal()
|
708c7361dda162b55a8b96cf37201c83abb46fa5
|
396df2552224ffcb0294fe6e297b231aa2e59e68
|
/_working/0157-fitting-bins.R
|
420fa0a6b6715ca0efdf139a9e208d5a55704d5e
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
ellisp/blog-source
|
d072bed980a5074d6c7fac03be3635f70ab5f098
|
1227f83df23af06da5280214ac7f2e0182be5707
|
refs/heads/master
| 2023-09-05T07:04:53.114901
| 2023-08-27T21:27:55
| 2023-08-27T21:27:55
| 122,695,494
| 17
| 8
| null | 2023-08-27T21:15:33
| 2018-02-24T02:36:45
|
HTML
|
UTF-8
|
R
| false
| false
| 8,724
|
r
|
0157-fitting-bins.R
|
library(tidyverse)
library(multidplyr)
library(frs)
library(fitdistrplus)
library(knitr)
library(readxl)
library(kableExtra)
library(clipr)
#-----------------simulated data-------------
set.seed(123)
simulated_rate <- 0.005
volumes <- tibble(volume = rexp(n = 10000, rate = simulated_rate))
volumes <- volumes %>%
mutate(bin = case_when(volume < 10 ~ "<10",
volume < 50 ~ "10-50",
volume < 100 ~ "50-100",
volume < 1000 ~ "100 - 1000",
TRUE ~ ">= 1000"),
left = case_when(volume < 10 ~ 0,
volume < 50 ~ 10,
volume < 100 ~ 50,
volume < 1000 ~ 100,
TRUE ~ 1000),
right = case_when(volume < 10 ~ 10,
volume < 50 ~ 50,
volume < 100 ~ 100,
volume < 1000 ~ 1000,
TRUE ~ NA_real_),
bin = factor(bin, levels = c("<10", "10-50", "50-100", "100 - 1000", ">= 1000"), ordered = T))
# This is how it would look to the original user
volumes %>%
group_by(bin) %>%
summarise(freq = n()) %>%
kable() %>%
kable_styling(bootstrap_options = "striped") %>%
write_clip()
volumes_bin <- dplyr::select(volumes, left, right) %>%
as.data.frame()
fitted_distribution_gamma <- fitdistcens(volumes_bin, "gamma")
# overall fit:
summary(fitted_distribution_gamma)
# estimated mean
fitted_distribution_gamma$estimate["shape"] / fitted_distribution_gamma$estimate["rate"]
p <- ggplot(volumes) +
geom_density(aes(x = volume)) +
stat_function(fun = dgamma, args = fitted_distribution_gamma$estimate, colour = "blue") +
annotate("text", x = 700, y = 0.0027, label = "Blue line shows modelled distribution; black is density of the actual data.")
frs::svg_png(p, "../img/0157-densities")
# bin_based_mean
volumes %>%
mutate(mid = (left + replace_na(right, 2000)) / 2) %>%
summarise(crude_mean = mean(mid)) %>%
pull(crude_mean)
#------real data - business counts------------
download.file("https://www.abs.gov.au/AUSSTATS/subscriber.nsf/log?openagent&816502.xls&8165.0&Data%20Cubes&B164DBE8275CCE58CA2583A700121372&0&June%202014%20to%20June%202018&21.02.2019&Latest",
destfile = "bus_counts.xls", mode = "wb")
bus_counts_orig <- readxl::read_excel("bus_counts.xls" ,
sheet = "June 2018",
range = "A7:G4976",
col_types = c("text", "text", "text", rep("numeric", 4)))
names(bus_counts_orig) <- c("state", "code", "industry", "0-0", "1-19", "20-199", "200-30000")
bus_counts <- bus_counts_orig %>%
mutate(code = str_pad(code, 4, "left", "0")) %>%
filter(!grepl("^Total", industry) & code != "9999") %>%
filter((`0-0` + `1-19` + `20-199` + `200-30000`) > 0) %>%
gather(employees, freq, -state, -code, -industry) %>%
separate(employees, by = "-", into = c("left", "right"), remove = FALSE) %>%
mutate(left = as.numeric(left),
right = as.numeric(right),
employees = fct_reorder(employees, -left)) %>%
left_join(dplyr::select(anzsic_4_abs, anzsic_class_code, anzsic_group_code, anzsic_group, division),
by = c("code" = "anzsic_class_code")) %>%
group_by(anzsic_group_code, anzsic_group, state, employees, left, right, division) %>%
summarise(freq = sum(freq)) %>%
arrange(anzsic_group_code) %>%
ungroup()
# how data will look:
kable(head(bus_counts)) %>%
kable_styling(bootstrap_options = "striped") %>%
write_clip()
# demo plot:
p <- bus_counts %>%
mutate(division2 = fct_lump(division, 10, w = freq),
division2 = fct_reorder(division2, freq, .fun = sum),
state2 = fct_lump(state, 5, w = freq),
division2 = fct_relevel(division2, "Other", after = 0)) %>%
ggplot(aes(x = division2, fill = employees, weight = freq)) +
geom_bar() +
facet_wrap(~state2) +
coord_flip() +
scale_y_continuous(label = comma, expand = c(0, 0)) +
labs(y = "Number of firms",
x = "",
fill = "Number of employees",
caption = "Source: ABS Count of Australian Businesses, analysis by freerangestats.info",
title = "Number of firms by industry division and number of employees") +
theme(panel.grid.minor = element_blank(),
panel.spacing = unit(5, "mm"),
panel.border = element_blank(),
axis.text.x = element_text(angle = 45, hjust = 1)) +
scale_fill_brewer(palette = "Spectral", guide = guide_legend(reverse = TRUE))
frs::svg_png(p, "../img/0157-businesses", w = 10, h = 7)
bus_counts_small <- bus_counts %>%
filter(division == "Manufacturing" & state %in% c("New South Wales", "Victoria", "Queensland"))
#' @param d a data frame or tibble with columns including left, right and freq
#' @param keepdata passed through to fitdistcens
#' @param ... extra variables to pass to fitdistcens eg starting values for the estimation process
avg_finder <- function(d, keepdata = FALSE, ...){
d_bin <- as.data.frame(d[rep(1:nrow(d), d$freq), c("left", "right")])
fit <- fitdistrplus::fitdistcens(d_bin, "nbinom", keepdata = keepdata, ...)
return(fit$estimate[["mu"]])
}
# Meat manufacturing in NSW:
avg_finder(bus_counts_small[1:4, ])
# Meat manufacturing in Queensland:
avg_finder(bus_counts_small[5:8, ])
cluster <- new_cluster(7)
cluster_library(cluster, "fitdistrplus")
cluster_assign(cluster, avg_finder = avg_finder)
# calculate the simple ones using single processing dplyr for ease during dev:
bus_summary_simp <- bus_counts_small %>%
mutate(middle = (left + right) / 2,
just_above_left = (right - left) / 10 + left,
tiny_above_left = left * 1.1) %>%
group_by(anzsic_group, state) %>%
summarise(number_businesses = sum(freq),
crude_avg = sum(freq * middle) / sum(freq),
less_crude_avg = sum(freq * just_above_left) / sum(freq),
another_avg = sum(freq * tiny_above_left) / sum(freq))
# parallel processing for the negative binomial verions
bus_summary_nb <- bus_counts_small %>%
group_by(anzsic_group, state) %>%
partition(cluster = cluster) %>%
do(nb_avg = avg_finder(.)) %>%
collect() %>%
mutate(nb_avg = unlist(nb_avg))
bus_summary <- bus_summary_simp %>%
left_join(bus_summary_nb, by = c("anzsic_group", "state"))
p1 <- bus_summary %>%
ggplot(aes(x = crude_avg, y = nb_avg, colour = anzsic_group)) +
facet_wrap(~state) +
geom_abline(intercept = 0, slope = 1) +
geom_point() +
theme(legend.position = "none") +
labs(x = "Average firm size calculated based on mid point of each bin\nDiagonal line shows where points would be if both methods agreed.",
y = "Average firm size calculated with negative binomial model",
title = "Mean number of employees in manufacturing firms estimated from binned data",
subtitle = "Inference based on mid point of bin delivers massive over-estimates")
p2 <- bus_summary %>%
ggplot(aes(x = less_crude_avg, y = nb_avg, colour = anzsic_group)) +
facet_wrap(~state) +
geom_abline(intercept = 0, slope = 1) +
geom_point() +
theme(legend.position = "none") +
labs(x = "Average firm size calculated based on 10% of way from left side of bin to right side\nDiagonal line shows where points would be if both methods agreed.",
y = "Average firm size calculated with negative binomial model",
title = "Mean number of employees in manufacturing firms estimated from binned data",
subtitle = "Using a point 10% of the distance from the left to the right still over-estimates very materially")
p3 <- bus_summary %>%
ggplot(aes(x = another_avg, y = nb_avg, colour = anzsic_group)) +
facet_wrap(~state) +
geom_abline(intercept = 0, slope = 1) +
geom_point() +
theme(legend.position = "none") +
labs(x = "Average firm size calculated based on left side of bin x 1.1\nDiagonal line shows where points would be if both methods agreed.",
y = "Average firm size calculated with negative binomial model",
title = "Mean number of employees in manufacturing firms estimated from binned data",
subtitle = "Using the (left-most point of the bin times 1.1) results in under-estimates")
svg_png(p1, "../img/0157-res1", w = 12, h = 5)
svg_png(p2, "../img/0157-res2", w = 12, h = 5)
svg_png(p3, "../img/0157-res3", w = 12, h = 5)
#========Full data========
# Doesn't work for some state-industry combinations
system.time({
bus_summary_full <- bus_counts %>%
group_by(anzsic_group, state) %>%
partition(cluster = cluster) %>%
do(nb_avg = avg_finder(.)) %>%
collect() %>%
mutate(nb_avg = unlist(nb_avg))
})
|
fb147a58a7634572a5204e61a8d42b3348d0b15b
|
b143351b8b602b51213c5cac34488594f9aa07a0
|
/2-groups-t-test-analysis.R
|
157e0ae4d42054887a9a7b3422531a4f89d08ea7
|
[] |
no_license
|
jpfolador/ttest
|
62f111c55d1019fe519e6ebbe107102442a08b79
|
9a05fe93d27a7a2b12b1c9ea5b0b869bb4f10d30
|
refs/heads/master
| 2023-02-23T22:32:11.523492
| 2021-01-30T22:04:21
| 2021-01-30T22:04:21
| 253,340,649
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,226
|
r
|
2-groups-t-test-analysis.R
|
#-----------------------------------------------
# Comparison between the ages of two groups
#
# Author: Joao Paulo Folador
# email: jpfolador at gmail.com
#-----------------------------------------------
if (!require("tidyr")) { install.packages('tidyr') }
if (!require("irr")) { install.packages('irr') }
if (!require('blandr')) { install.packages('blandr') }
if (!require("ggplot2")) { install.packages("ggplot2") }
if (!require("ggthemes")) { install.packages("ggthemes") }
if (!require("cowplot")) { install.packages("cowplot") }
if (!require("extrafont")) { install.packages("extrafont") }
if (!require("RColorBrewer")) { install.packages("RColorBrewer") }
# Load Windows fonts
loadfonts(device = "win")
# setup the theme to ggplot
theme_set(theme_bw())
# load the file with the data
participants <- read.csv(file = 'participants-sample.csv', header = TRUE, sep = ";")
h <- participants[which(participants$volunteer == 'h'), ]
pd <- participants[which(participants$volunteer == 'pd'), ]
summary(h$age)
h.shap = shapiro.test(h$age)
summary(pd$age)
pd.shap = shapiro.test(pd$age)
# Site of color pallet: http://paletton.com/
axisTextColor <- "#555555";
barContourColor <- "#9D5F44";
barFillColor <- "#EAEAEA"
lineColor <- "#FF9366";
pointColor <- "#8C3E00";
histh <- ggplot(data=h, aes(h$age)) +
geom_histogram(aes(y =..density..), color = barContourColor, alpha = 0.8,
bins = 20, fill=barFillColor, size=1) +
stat_function(fun = dnorm, geom = "line", color = lineColor, size = 1.2, na.rm = TRUE,
args = list(mean = mean(h$age), sd = sd(h$age))) +
labs(x = 'Age (years)', y = 'Density', subtitle = paste("A", '- Histogram of healthy group')) +
theme(text = element_text(size=22, family="serif", color=axisTextColor),
axis.text.x = element_text(size=24, color=axisTextColor),
axis.text.y = element_text(size=24, color=axisTextColor),
plot.margin = unit(c(0.4, 0.4, 0.4, 0.4), "cm"),
plot.title = element_text(size = 20, color=axisTextColor))
addTextH <- paste("W: ", round(h.shap[["statistic"]][["W"]], 4), "| p:",
round(h.shap[["p.value"]], 4))
qqh <- ggplot(h, aes(sample = h$age)) +
stat_qq(color = pointColor, size=2) +
stat_qq_line(color = lineColor, size=1) +
labs(x = 'Theoretical', y = 'Samples', subtitle = paste("B", '- QQ-Plot of healthy group')) +
theme(text = element_text(size=22, family="serif", color=axisTextColor),
axis.text.x = element_text(size=24, color=axisTextColor),
axis.text.y = element_text(size=24, color=axisTextColor),
plot.margin = unit(c(0.4, 0.4, 0.4, 0.4), "cm"),
plot.title = element_text(size = 20, color=axisTextColor)) +
annotate("text", x = -2, y = 82, size=6, family="serif", color=axisTextColor,
label = as.character(as.expression(addTextH)), hjust=0,
parse = FALSE)
histPd <- ggplot(data=pd, aes(pd$age)) +
geom_histogram(aes(y =..density..), color = barContourColor, alpha = 0.8,
bins = 20, fill=barFillColor, size=1) +
stat_function(fun = dnorm, geom = "line", color = lineColor, size = 1.2, na.rm = TRUE,
args = list(mean = mean(pd$age), sd = sd(pd$age))) +
labs(x = 'Age (years)', y = 'Density', subtitle = paste("C", '- Histogram of PD group')) +
theme(text = element_text(size=22, family="serif", color=axisTextColor),
axis.text.x = element_text(size=24, color=axisTextColor),
axis.text.y = element_text(size=24, color=axisTextColor),
plot.margin = unit(c(0.4, 0.4, 0.4, 0.4), "cm"),
plot.title = element_text(size = 20, color=axisTextColor))
addTextPd <- paste("W: ", round(pd.shap[["statistic"]][["W"]], 4), "| p:",
round(pd.shap[["p.value"]], 4))
qqPd <- ggplot(pd, aes(sample = pd$age)) +
stat_qq(color = pointColor, size=2) +
stat_qq_line(color = lineColor, size=1) +
labs(x = 'Theoretical', y = 'Samples', subtitle = paste("D", '- QQ-Plot of PD group')) +
theme(text = element_text(size=22, family="serif", color=axisTextColor),
axis.text.x = element_text(size=24, color=axisTextColor),
axis.text.y = element_text(size=24, color=axisTextColor),
plot.margin = unit(c(0.4, 0.4, 0.4, 0.4), "cm"),
plot.title = element_text(size = 20, color=axisTextColor)) +
annotate("text", x = -2, y = 82, size=6, family="serif", color=axisTextColor,
label = as.character(as.expression(addTextPd)), hjust=0,
parse = FALSE)
plot_grid(histh, qqh, histPd, qqPd, nrow=2, ncol=2)
#---------------------------------------------------------------
# If the data has a normal distribution then apply t.test
#---------------------------------------------------------------
t.test(pd$age, h$age)
|
7295adf926e2a90907a334e4f0335a04be4295c2
|
8ed441ee034ab9f22ed248645f8f6ba2606b6e5b
|
/ecoseed/arabidopsis/arabidopsis_late.R
|
0aef08c76494e92574b6b6312d3f8cfe4dced56f
|
[] |
no_license
|
CathyCat88/thesis
|
51b33ddf4f86255f1c143f68a8e57ad4dc98726c
|
a1f311f4b95d4ef40006dd5773d54c97cb295ea7
|
refs/heads/master
| 2020-09-28T12:39:58.624508
| 2016-11-13T16:17:16
| 2016-11-13T16:17:16
| 66,710,427
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,428
|
r
|
arabidopsis_late.R
|
graphics.off()
remove(list = ls())
data <- read.csv("arabidopsis_late.csv", header = TRUE)
blank <- data[data$temperature == "blank",]
mean.blank <- mean(blank$OD)
data2 <- data[data$temperature != "blank",]
Concentration <- function(OD, weight) {
return((OD - mean.blank)/ (weight*24200*2*10^-6))
}
data2$result <- Concentration(data2$OD, data2$weight)
mean <- aggregate(data2$result, list(data2$temperature, data2$treatment), mean)
StandErr <- function(x) {
se <- sd(x)/sqrt(length(x))
}
se <- aggregate(data2$result, list(data2$temperature, data2$treatment), StandErr)
result <- cbind(mean, se[,3])
colnames(result) <- c("conditions", "treatment", "mean", "se")
result$treatment <- factor(result$treatment,
levels = c("no", "aged"))
result$conditions <- factor(result$conditions,
levels = c("CTR", "LT", "HT"),
labels = c("contrôle", "T basse", "T haute"))
g <- ggplot(result,
aes(x = treatment,
y = mean,
group = conditions,
fill = conditions)) +
geom_bar(stat = "identity",
position = "dodge",
color = "black",
width = 0.75) +
labs(fill = "conditions \nde culture") +
scale_x_discrete(name = "",
labels = c("no" = "non traité", "aged" ="âgé")) +
scale_y_continuous(name = "Production de superoxyde \n(µM/g/h) \n") +
geom_errorbar(data = result,
position = position_dodge(0.75),
aes(x = treatment,
ymin = result$mean,
ymax = result$mean + result$se),
width = 0.1)
save_plot("arabidopsis_late.png", g, base_aspect_ratio = 1.3)
###########
###STATS###
###########
shapiroTest <- tapply(data2$result, interaction(data2$temperature, data2$treatment, drop = TRUE), shapiro.test)
#tout normal
bartlett.test(list(data2$result[data2$temperature == "CTR" & data2$treatment == "aged"],
data2$result[data2$temperature == "LT" & data2$treatment == "aged"]))
t.test(data2$result[data2$temperature == "HT" & data2$treatment == "no"],
data2$result[data2$temperature == "CTR" & data2$treatment == "no"], var.equal = TRUE)
t.test(data2$result[data2$temperature == "HT" & data2$treatment == "aged"],
data2$result[data2$temperature == "HT" & data2$treatment == "no"], var.equal = TRUE)
|
ba5997494d8deb3d0204290bc673ebf4d5856c41
|
7d192b2ddb740d0a3882b5f66b3453bf217b6b72
|
/src/analysis/atom-committers.R
|
f2fec525f0f665163b4e1f880d031a2f1b8d748f
|
[
"MIT"
] |
permissive
|
dgopstein/atom-finder
|
b497fe634e217fb2b682892728e42b6bc83e3d68
|
4198a0113c42da6f38710f72f84a7fa592e87fec
|
refs/heads/master
| 2023-07-08T13:16:49.140429
| 2023-06-26T03:44:53
| 2023-06-26T03:44:53
| 71,188,915
| 4
| 7
| null | 2017-09-28T23:13:06
| 2016-10-17T23:11:51
|
C++
|
UTF-8
|
R
| false
| false
| 1,484
|
r
|
atom-committers.R
|
library(data.table)
library(ggplot2)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
source("util.R")
atom.committers.gcc <- data.table(read.csv("data/atom-committers_gcc_2017-10-31_1-3.csv")); atom.committers <- atom.committers.gcc
atom.committers.linux <- data.table(read.csv("data/atom-committers_linux_2017-11-01_01.csv")); atom.committers <- atom.committers.linux
atom.committers[, rate := added.atoms/added.non.atoms]
atom.committers <- atom.committers[rev.str > 5 & added.non.atoms > 10 & rate < 1]
ggplot(atom.committers, aes(x = rev.str, y = rate)) + geom_point() + labs(x="# commits") + ggtitle("Commiter's atom rate by commits")
ggplot(atom.committers, aes(x = rev.str, y = rate)) + geom_point() + labs(x="# commits (log)") + ggtitle("Commiter's atom rate (log) by commits (log)") + scale_x_log10() + scale_y_log10()
#stat_density_2d(geom = "raster", aes(fill = ..density..), contour = FALSE) +
#scale_x_log10() + scale_y_log10() +
#geom_density_2d()
ggplot(atom.committers, aes(x = added.non.atoms, y = rate)) + geom_point() + labs(x="# AST nodes added") + ggtitle("Commiter's atom rate by amount of code")
ggplot(atom.committers, aes(x = added.non.atoms, y = rate)) + geom_point() + labs(x="# AST nodes added (log)") + ggtitle("Commiter's atom rate (log) by amount of code (log)") + scale_x_log10() + scale_y_log10()
ggplot(atom.committers, aes(rate)) + stat_bin(bins=500) + geom_histogram()
#ggplot(atom.committers, aes(rate)) + geom_density()
|
8326f3eb364c5ed81fbcb8ed148be5d4dec913bd
|
750c83ade014709a7a1d7cec05c190625bff9be3
|
/app.R
|
6a39db4752202187d15ceec0803d5349dc01366e
|
[] |
no_license
|
mrc-ide/shinyq
|
07b0b7a6101b836b36af8be1c620f1848926bfe1
|
3db138a3573ddd33ad62731926097770ea82f179
|
refs/heads/master
| 2021-07-04T22:40:27.094856
| 2020-08-28T13:40:22
| 2020-08-28T13:41:20
| 138,864,036
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 75
|
r
|
app.R
|
source("common.R", local = TRUE)
shiny::shinyApp(ui, server(workers = 1L))
|
27e6aa7d04ac46cba20fdb2360465ddb6d610dec
|
c1beee1b69f30c33de46a6b1d56ef06af465a747
|
/iris.R
|
44f974f02a8dd094d7f2c47985317d8daf373ca5
|
[] |
no_license
|
Mayankagupta/R-programs
|
7db41c985a5dda6aec0f8463b7c251af91ca0425
|
142adb8f960950a19dff4a0719f0c84760462862
|
refs/heads/master
| 2022-12-23T22:38:07.688110
| 2020-09-28T10:58:42
| 2020-09-28T10:58:42
| 299,279,017
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 224
|
r
|
iris.R
|
names(iris)
is.vector(names(iris))
vec<-names(iris)
length(vec)
nrow(iris)
ncol(iris)
dim(iris)
iris(,1)
iris[,1]
sum<-iris[,1]
length(sum)
sum(sum)
sum(sum)/length(sum)
mean(iris[,1])
iris[,5]
summary(iris)
|
305beaa99ceb11e7b73c26d3703b7e6ba64749bf
|
ebee9629abd81143610a6352288ceb2296d111ac
|
/R/pairwise_vectors.R
|
ecdd0de3af6a2156a14157d0d78195ac0c57dd88
|
[] |
no_license
|
antiphon/Kdirectional
|
76de70805b4537a5aff0636486eb387cb64069b0
|
98ab63c3491f1497d6fae8b7b096ddd58afc4b29
|
refs/heads/master
| 2023-02-26T02:19:41.235132
| 2023-02-12T13:07:11
| 2023-02-12T13:07:11
| 37,183,574
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,058
|
r
|
pairwise_vectors.R
|
#' Compute pairwise distances and angles
#'
#' Computes pairwise vectors of a 2d or 3d point pattern. Returns
#' circular- or spherical coordinates, depending on the dimension.
#'
#' @param x matrix of coordinates.
#' @param from Indices from which to compute
#' @param to Indices to which to compute
#' @param asMatrix return n x n matrices
#' @param as.xyz return as xyz-coordinates instead of polar/spherical
#' @details
#'
#' Default is to return the upper triangle (without diagonal) of
#' pairwise distance and angle matrices.
#'
#' Result will have 2 or 3 columns, where first is the distance
#' and 2(:3) are angle(s).
#'
#' @export
pairwise_vectors <- function(x, from, to, asMatrix=FALSE, as.xyz=FALSE){
x <- as.matrix(x)
d <- ncol(x)
#
# check for subsetting
if(!missing(from) | !missing(to)){
if(missing(from)) from <- 1:nrow(x)
if(missing(to)) to <- 1:nrow(x)
U <- c_pairwise_dist_angle_subset(x, from, to)
# drop empty ones
nonempty <- U[,1]>0
U <- U[nonempty,]
}
else{ # all pairs once
U <- c_pairwise_dist_angle(x)
}
colnames(U) <- c("distance", if(d==2) "angle" else c("azimuth", "inclination"))
# if as matrix
if(asMatrix){
if(!missing(from)|!missing(to)) stop("Not implemented for subsets.")
R <- list()
M0 <- A <- D <- diag(0, nrow = nrow(x))
ltri <- lower.tri(D)
# distance matrix
D[ltri] <- U[,1]
D <- D + t(D)
# angles:
if(d==3) A2 <- A
# i -> j, transposing
A[ltri] <- U[,2]
# j->i = antipode
B <- M0
Aa <- U[,2]
i <- U[,2]>pi
Aa[i] <- Aa[i]-pi
Aa[!i] <- Aa[!i]+pi
B[ltri] <- Aa
A <- B+t(A)
diag(A) <-NA
if(d==3) {
A2[ltri] <- U[,3]
B2 <- M0
B2[ltri] <- pi-U[,3]
A2 <- B2+t(A2)
diag(A2) <-NA
}
diag(D) <- 0
R$distance <- D
if(d==2)R$angle <- A
else{R$azimuth <- A; R$inclination <- A2}
}
else R <- U
if(!asMatrix & as.xyz){
if(d==2) R <- R[,1] * cbind(cos(R[,2]), sin(R[,2]))
else R <- R[,1] * ai2xyz(R[,-1])
}
R
}
|
edebaf2159d794da6435289447734b2ab52e9572
|
d7ff71e8ffb07419aad458fb2114a752c5bf562c
|
/tests/testthat/line_breaks_and_other/pipe-line-breaks-in.R
|
a600216696ad43e3e6f343f1e65b1933cb306e64
|
[
"MIT"
] |
permissive
|
r-lib/styler
|
50dcfe2a0039bae686518959d14fa2d8a3c2a50b
|
ca400ad869c6bc69aacb2f18ec0ffae8a195f811
|
refs/heads/main
| 2023-08-24T20:27:37.511727
| 2023-08-22T13:27:51
| 2023-08-22T13:27:51
| 81,366,413
| 634
| 79
|
NOASSERTION
| 2023-09-11T08:24:43
| 2017-02-08T19:16:37
|
R
|
UTF-8
|
R
| false
| false
| 1,811
|
r
|
pipe-line-breaks-in.R
|
c(a %>% b)
c(a %>% b())
c(a + b %>% c)
c(
a %>% b)
c(a %>% b()
)
c(a %>% b() # 33
)
c(
a + b %>% c
)
c(
a + b %>%
c)
c(a + b %>%
c)
c(
a + b %>% # 654
c
)
c( # rr
a + b %>%
c
)
c(
a +
b %>% c
)
c(a +
b %>% c
)
a %>% b(
)
a %>% b(
) %>% q
a %>%
b()
a %>% b() %>% c
# short pipes < 2 can stay on one line
a %>% b()
fun(x,
a %>% b)
fun(x,
gg = a %>% b,
tt %>% q)
fun(x, gg = a %>% b, tt %>% q)
z = a %>% b()
fun( s = g(x),
gg = a(n == 2) %>% b,
tt %>% q(r = 3))
# FIXME closing brace could go on ntext line. Alternative: remove lin breaks completely.
blew(x %>%
c(), y = 2)
# FIXME closing brace could go on ntext line. Alternative: move c() up.
blew(y = 2, x %>%
c())
{a %>% c +1}
b %>%
f() %>% # never move comment to next line as it can be styler: off or nolint
k() %>%
x()
# line break before { inserted inside and outside function calls
c(
data %>%
filter(bar) %>% {
cor(.$col1, .$col2, use = "complete.obs")
}
)
data %>%
filter(bar) %>% {
cor(.$col1, .$col2, use = "complete.obs")
}
# line break before { kept inside and outside function calls
c(
data %>%
filter(bar) %>%
{
cor(.$col1, .$col2, use = "complete.obs")
}
)
data %>%
filter(bar) %>%
{
cor(.$col1, .$col2, use = "complete.obs")
}
# redundant blank lines removed
c(
data %>%
filter(bar) %>%
{
cor(.$col1, .$col2, use = "complete.obs")
}
)
data %>%
filter(bar) %>%
{
cor(.$col1, .$col2, use = "complete.obs")
}
# blank lines kept when around comment
c(
data %>%
filter(bar) %>%
# comment
{
cor(.$col1, .$col2, use = "complete.obs")
}
)
data %>%
filter(bar) %>%
# comment
{
cor(.$col1, .$col2, use = "complete.obs")
}
|
d62f64cacb12359187f59e5ba2365322ee9e3736
|
e2c212336cd141635e163aadb4b068c57c5cffb1
|
/R/connect_engine.R
|
efd0f71068fca4be7c03361cd29aac352a7ceeb2
|
[] |
no_license
|
abyanka/myWrdsAccess
|
cbf863d9c14243478b90b644e291e9458e5aa740
|
fd817f09c3989aca36616a71966b6f1e852f53fd
|
refs/heads/master
| 2023-03-15T18:45:38.413098
| 2019-05-17T13:54:05
| 2019-05-17T13:54:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 149
|
r
|
connect_engine.R
|
wrds_connect <- function(){
wrds <<- DBI::dbConnect(odbc::odbc(), "wrds-postgres")
}
wrds_disconnect <- function(){
DBI::dbDisconnect(wrds)
}
|
af2f65640835b66eb25f5ceb6fbd6d49b8cab773
|
9c3c1d8e918dd6108a819eee5c0da53c8f21636e
|
/man/datasets.Rd
|
8af30fc7734a66b40cb5d37eda5f8ec05394b3d0
|
[] |
no_license
|
cran/chronosphere
|
054d466fcfaa3c3555f810780ba48952161beb97
|
09988de91d2f520fc8e3ccf3d01f284a5d5dcbc9
|
refs/heads/master
| 2023-08-17T21:44:32.330628
| 2023-08-17T12:33:00
| 2023-08-17T12:44:55
| 236,570,939
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,330
|
rd
|
datasets.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\name{datasets}
\alias{datasets}
\title{Download a database extract from \code{chronosphere} remote server}
\usage{
datasets(
src = NULL,
datadir = NULL,
verbose = FALSE,
master = FALSE,
greetings = TRUE,
all = FALSE
)
}
\arguments{
\item{src}{\code{character}. Source identifier. If this is set to \code{NULL}, then a simplified list of availables series will be downloaded, including all unique \code{src} and \code{ser} combinations. If \code{src} is a valid source identifier, then all accessible products (resolutions and versions) of a series are shown.}
\item{datadir}{\code{character} Directory where the downloaded files are kept. Individual entries will be looked up from the directory if this is given, and will be downloaded if they are not found. The default \code{NULL} option will download data to a temporary directory that exists only until the R session ends.}
\item{verbose}{\code{logical} Should console feedback during download be displayed?}
\item{master}{\code{logical} When \code{src} is \code{NULL}, should the function download the master records file?}
\item{greetings}{\code{logical} When the function is invoked without arguments, it displays a message to keep new users informed about different versions and resolutions (even with \code{verbose=FALSE}). This argument turns this message off on demand.}
\item{all}{\code{logical} When set to \code{FALSE} (default), only those items are shown that are available for the R environment. Set to \code{TRUE} to see all items.}
}
\value{
A \code{data.frame} class object.
}
\description{
The function will download a list of available series from the data repository
}
\details{
The function will download a single .csv file and attach it as a \code{data.frame}.
}
\examples{
# available datasets (sources and series) - proper
# index <- datasets()
# all available versions and resolutions in database 'pbdb'
# oneDat <- datasets(src="pbdb")
###################################
# local example INCOMPLETE - does not connect to the internet
ind <- datasets(
datadir=system.file("extdata", package="chronosphere"))
# one available archive
ind <- datasets(
src="SOM-zaffos-fragmentation",
datadir=system.file("extdata", package="chronosphere"))
}
|
482eea9e871e0be5d5d53b223be27a719c489da3
|
e7c3e0886bf01da80252301c541f87cb6c80cf84
|
/scripts/PlotMaggie.R
|
6bb0546f092306dcf6b9d3130028b6334f178f6d
|
[] |
no_license
|
zengfengbo/nextgenseq_pipeline
|
6ae8599228b36c49442d9da5409888209b2e3b5e
|
6070e223252499b7447b854c538c416a353eaf52
|
refs/heads/master
| 2021-05-22T17:40:19.547802
| 2019-10-18T13:53:14
| 2019-10-18T13:53:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,862
|
r
|
PlotMaggie.R
|
#!/usr/bin/env Rscript
suppressPackageStartupMessages(library("optparse"))
suppressPackageStartupMessages(library("stringr"))
suppressPackageStartupMessages(library("gridExtra"))
option_list <- list(
make_option("--input", help="bcftools covrage compare metrix, required"),
make_option("--patient", help="patient ID"),
make_option("--output", help="output pdf file name.")
)
opt <- parse_args(OptionParser(option_list=option_list))
if( ! is.element('input', names(opt)) ) stop("Maggie result is required. ")
if( ! is.element('patient', names(opt)) ) stop("Patient name is required. ")
if( ! is.element('output', names(opt)) ) stop("Output file name is required. ")
patient=opt$patient
a <-read.table(opt$input, header=T,sep="\t")
a$Score <-round(as.numeric(a$Score/a$Sites),digits=5)
a <- a[order(a$Score),]
#a$Score[a$Score==0]<-NA
a$Color[grep(".*WES.*",a$Sample,perl=TRUE,value = FALSE)]="green"
a$Color[grep(".*RNASEQ.*",a$Sample,perl=TRUE,value = FALSE)]="darkred"
a$Color[a$Sites <=15000]="orange"
a$Color[grep(paste(patient,".*WES.*",sep=""),a$Sample,perl=TRUE,value = FALSE)]="blue"
a$Color[grep(paste(patient,".*RNASEQ.*",sep=""),a$Sample,perl=TRUE,value = FALSE)]="red"
a$shape<-19
a$shape[grep(patient,a$Sample,perl=TRUE,value = FALSE)]=8
a$size<-0.60
a$size[grep(patient,a$Sample,perl=TRUE,value = FALSE)]=1.2
d<-nrow(a[a$Sites<=15000,])
pdf(opt$output, height=10, width=10, points=20)
plot(a$Score,
log="x",
col=a$Color,
pch=a$shape,
cex=a$size,
las=1,
ylab="Discordance",
xlab="Ordered Samples",
main=paste(patient,"\nMAGGIE Result",sep="\n")
)
legend("bottomright",
legend = c("WES", "RNASEQ", "Patient WES", "Patient RNASEQ",paste(d,"Samples <15K sites",sep=" ")),
text.col = c("green","darkred","blue","red","orange"),
bty = "n"
)
plot.new()
c<-a[c("Sample","Score")]
grid.table(head(c,n=20),rows = NULL)
dev.off()
|
8d8b6b345c47fd3903e492d331440e31d4f50146
|
7d3fe7aca728be7f701ef0ec595222b608798696
|
/Reservoir_Layer/Tools/Functions.r
|
f2feb076282dac8bfb4fe6d856527f7cf19cae4e
|
[] |
no_license
|
54481andrew/pathogen-spillover-forecast
|
dbf9e9c5d0e864e0ea87ecb72ab6c2fa55d15e5b
|
210c002bd3db7556560479fb3153aebf90e7b1cf
|
refs/heads/master
| 2022-01-23T04:45:36.030327
| 2022-01-06T05:01:22
| 2022-01-06T05:01:22
| 278,423,939
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,479
|
r
|
Functions.r
|
## Return interpretable predictor names for plots
pretty.labels <- function(x){
x.out <- c()
for(xi in x){
xi.name <- switch(xi,
TreeCover = 'Tree Cover',
ShrubCover = 'Shrub Cover',
Grasslands = 'Grassland',
Cropland = 'Cropland',
AquaticVeg = 'Aquatic Veg.',
SparseVeg = 'Sparse Veg.',
Bare = 'Bare',
BuiltUp = 'Built Up',
SnowIce = 'Snow and Ice',
OpenWater = 'Open Water',
Pmu = 'Mean Precipitation',
Tmu = 'Mean Temperature',
Nmu = 'NDVI',
Pmin = 'Minimum Precip',
Pmax = 'Maximum Precip',
Nmin = 'Minimum NDVI',
Nmax = 'Maximum NDVI',
Pcv = 'Precip. Coef. of Variation',
Ncv = 'NDVI Coef. of Variation',
Pc = 'Precip. Constancy',
Pm = 'Precip. Contingency',
Nc = 'NDVI Constancy',
Nm = 'NDVI Contingency',
Pdur = 'Dry Duration',
Ndur = 'Brown Duration',
Elev = 'Elevation',
Pop = 'Population',
TotCrop = 'Croplands',
Evergreen_Needleleaf_Forest = 'Ev. Needle Forest',
Evergreen_Broadleaf_Forest = 'Ev. Broad Forest',
Deciduous_Needleleaf_Forest = 'De. Needle Forest',
Deciduous_Broadleaf_Forest = 'De. Broad Forest',
Mixed_Forest= 'Mixed Forest',
Closed_Shrubland = 'Cl. Shrubland',
Open_Shrubland = 'Op. Shrubland',
Woody_Savanna = 'Woody Savanna',
Savannas = 'Savanna',
Grasslands = 'Grassland',
Permanent_Wetlands = 'Wetland',
Croplands = 'Cropland',
Urban_BuiltUp = 'Urban',
Cropland_Natural_Mosaic = 'Cropland Mosaic',
Permanent_Snow_Ice = 'Snow/Ice',
Barren = 'Barren',
Water_Bodies = 'Water',
Unclassified = 'NA')
x.out <- c(x.out, xi.name)
}
return(x.out)
}
## Impute missing values
impute <- function(dataset, var.names, impute.fun = 'median'){
fun <- get(impute.fun)
for(var in var.names){
dataset[is.na(dataset[,var]),var] <- fun(na.omit(dataset[,var]))
}
return(dataset)
}
## Function that removes doubles (points that fall in the same 5x5 grid square
purge.repeats <- function(dat, template){
dat.with.repeats.removed <- c()
rem.dat <- c()
## Step 1: get cell numbers of each point
points <- cbind(dat$Longitude, dat$Latitude)
cells <- raster::extract(template, points, cellnumbers=TRUE)[,'cells']
## Loop through rows of dat, retain points that fall in unique
## cells, omit repeats based on some priority. Here, priority is
## given to Presence==1.
omit.points <- c()
keep.points <- c()
for(jj in 1:nrow(dat)){
if(jj %in% omit.points){
## Do nothing if jj is already omitted
}else{
repeat.set <- which(cells[jj] == cells) ## Set of repeats for point jj
keep.point <- jj
## Keep first point with presence==1, if any presence points exist in repeat.set
if(sum(dat$Presence[repeat.set]) > 0){
keep.point <- repeat.set[which(dat$Presence[repeat.set]==1)[1]]
}
keep.points <- c(keep.points, keep.point)
omit.points <- c(omit.points, repeat.set)
} ## End if that checks if jj is in omit.points
} ## Loop through jj
dat.with.repeats.removed <- dat[keep.points,]
rem.dat <- data.frame(orig.len = nrow(dat),
purg.len = length(keep.points))
rem.dat$n.rem <- with(rem.dat, orig.len - purg.len)
return(list(dat.with.repeats.removed, rem.dat))
}## End Function
## Generate name for model run directories
generate.res.name <- function(hypers.i){
fold.name <- with(hypers.i, paste(paste0(substr(paste(Species),1,1),
substr(strsplit(paste(Species),' ')[[1]][2],1,1)),
'pa_nboots',nboots,
'nbg', num.bg.points,
'tc', tree.complexity,
'mllr', mllr,
'lmt', lmt,
sep = '_'))
return(fold.name)
}
|
9b09ee91cac2d82a4365086a58d49195b3ff2b35
|
6727bd3ae9437a309f029af586e43625cb4f804a
|
/BMW y VW/bmw y vw.R
|
1d7f68d8b554bd12dabf9692e3c04ab2ecf2b606
|
[] |
no_license
|
miguelcobaleda/prediccion
|
34092fe589ca0bcbee368c151b61e4e23ce94525
|
5534528715edb9bcef80b20ebd7ac6f6e3c1c4b4
|
refs/heads/main
| 2023-02-02T03:27:54.740200
| 2020-12-13T17:19:23
| 2020-12-13T17:19:23
| 311,446,776
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,731
|
r
|
bmw y vw.R
|
library("quantmod") #Package to download financials historical data
library(forecast)
library("fGarch")
library(vars)
library(depmixS4)
library(TTR)
library(ggplot2)
library(reshape2)
library(xts)
library(extrafont)
#funciones
archTest <- function(rtn,m=10){
# Perform Lagrange Multiplier Test for ARCH effect of a time series
# rtn: time series
# m: selected AR order
# TSAY(2013)
y=(rtn-mean(rtn))^2
T=length(rtn)
atsq=y[(m+1):T]
x=matrix(0,(T-m),m)
for (i in 1:m){
x[,i]=y[(m+1-i):(T-i)]
}
md=lm(atsq~x)
summary(md)
}
###FIN FUNCIONES
#Yahoo ticker (stock or index)
sSymbol="BMW.DE"
#get data from yahoo
mData<-getSymbols(sSymbol ,from="1990-01-01",to="2016-09-30",auto.assign=FALSE)
#Define workdata
xData=Ad(mData)
#Calculate Daily Arithmetic Return
dRentCont=dailyReturn(xData,type='log',leading=FALSE)
#Exclude NA (First data)
dRentCont=na.exclude(dRentCont)
plot.zoo(cbind(xData,dRentCont),main=paste(sSymbol," y Rentabilidad"),xlab="años",ylab=c("Precio","rentabilidad"))
grid(lwd=2)
#Volatilidad GARCH
#Plot return squared
plot.zoo(cbind(Ad(mData),dRentCont,dRentCont^2),main=paste(sSymbol," y Rentabilidad"),xlab="años",ylab=c("Precio","rentabilidad","Volatilidad"))
#testing mean
t.test(dRentCont)
#ACF & PACF
# VolProxy=abs(dRentCont) # absolute value
VolProxy=dRentCont^2 #squared
#ACF y PACF
tsdisplay(VolProxy)
#Ljung-Box Test
Box.test(VolProxy,lag=10, type="Lj")
Box.test(VolProxy,lag=20, type="Lj")
Box.test(VolProxy,lag=40, type="Lj")
#LM test
archTest(dRentCont,20)
#ARCH(1)
m1=garchFit(~1+garch(1,0),data=dRentCont,trace=F) # Fit an ARCH(1) model
summary(m1)
resi=residuals(m1,standardize=T) #residuals
resi=xts(resi,order.by=index(dRentCont)) #residuals as xts
tsdisplay(resi^2) #acf pacf residuals
#GARCH(1,1)
m2=garchFit(~1+garch(1,1),data=dRentCont,trace=F) # Fit an GARCH(1,1) model
summary(m2)
resi=residuals(m2,standardize=T) #residuals
resi=xts(resi,order.by=index(dRentCont)) #residuals as xts
tsdisplay(resi^2) #acf pacf residuals
plot(m2)
#t-student
m3=garchFit(~1+garch(1,1),data=dRentCont,trace=F,cond.dist="std")
summary(m3)
plot(m3)
v1=volatility(m3) # Obtain volatility
v1=xts(v1,order.by=index(dRentCont)) # volatility as XTS
plot(sqrt(252)*v1)
resi=residuals(m3,standardize=T) # Standardized residuals
resi=xts(resi,order.by=index(dRentCont)) # Standardized residuals as XTS
tsdisplay(resi^2) #acf pacf residuals
plot(resi)
predict(m3) #forecast volatility
predict(m3, n.ahead = 10, plot=TRUE, crit_val=2) #plot with 2*standard error
predict(m3,n.ahead=20,plot=TRUE,conf=.9,nx=100) # plot 100 data with 90% confidence
## VAR
## Leer datos
bmw=getSymbols("BMW.DE",env=NULL)
vw=getSymbols("VOW.DE",env=NULL)
# Generar rentabilidad mensual
rbmw=monthlyReturn(tef[,6])
rvw=monthlyReturn(ibex[,6])
#generar vector
vY=cbind(rbmw,rvw)
colnames(vY)=c("BMW.DE","VOW.DE")
vY=na.omit(vY)
#Seleccionar modelo
VARselect(vY)
#estimar
model.var=VAR(vY)
summary(model.var)
model.var1=VAR(vY,type="none")
summary(model.var1)
#causalidad de granger
causality(model.var1)
#respuesta al impulso
model.ri=irf(model.var1)
model.ri
plot(model.ri)
##prediccion
predict(model.var1, n.ahead = 8, ci = 0.95)
## HHM
### Leer datos
bmw=getSymbols("BMW.DE",env=NULL)
bmw=na.omit(bmw)
#crear XTS
mData=bmw$BMW.DE.Adjusted
colnames(mData)=c("Close")
#crear XTS semanal
semanal=function(mData){
aa=seq.Date(as.Date(min(index(mData))),length.out=2+as.numeric(as.Date(max(index(mData)))-as.Date(min(index(mData)))),by="1 days")
bb=xts(rep(NA,length(aa)),aa)
cc=bb[time(bb[.indexwday(bb)==5])]
dd=sapply(1:(length(cc)-1), function(x) last(mData[seq.Date(as.Date(time(cc[x])),as.Date(time(cc[x+1])),1)]))
coredata(cc[2:(length(cc))])=dd
return(cc)
}
mDataLR=semanal(mData)
#Añadir Rentabilidad
colnames(mDataLR)=c("Close")
#
mDataLR$Rentabilidad <- log(mDataLR$Close) - lag(log(mDataLR$Close),k=2)
#elimnar NAs
mDataLR <- na.exclude(mDataLR)
#Transformar XTS en DF
mDataLRdf <- data.frame(mDataLR)
#Poner la fecha que esta en el nombre de la fila como columna de fecha con formato
mDataLRdf$Date <-as.Date(row.names(mDataLRdf),"%Y-%m-%d")
#definir modelo HHM de markov con 2 estados. Rentabilidad en función de la constante
mod <- depmix(Rentabilidad ~ 1, family = gaussian(), nstates = 2, data = mDataLR)
set.seed(1)
# Estimar
fm2 <- fit(mod, verbose = FALSE)
#Resumen de resultados
summary(fm2)
print(fm2)
# Compute probability of being in each state
probs <- posterior(fm2)
mDataLRdf$pBull <- probs[,2]
mDataLRdf$pBear <- probs[,3]
mDataLRdf$pState <- probs[,1]
#Nombre a la Primera columna
#colnames(mDataLRdf$logret)=c("Rentabilidad")
nameStock <- colnames(mDataLRdf)[1]
#Crear df para ggplot2
df <- melt(mDataLRdf[,c(1,2,3,4,5,6)],id="Date",measure=c(nameStock,"Rentabilidad","pBull","pBear","pState"))
##Gráfico Probabilidad
positivoColor=subset(df,df$variable =="Rentabilidad")
pColor=ifelse(positivoColor$value >=0, "blue", "red")
f <- ggplot()+
geom_step(data=subset(df,df$variable ==nameStock),aes(Date, value))+
geom_linerange(data=positivoColor,aes(Date, value,ymin=0,ymax=value),color = pColor)+
geom_linerange(data=subset(df,df$variable =="pBull"),aes(Date, value,ymin=0,ymax=value),color="cornflowerblue")+
facet_grid(variable ~., scales = "free", as.table = TRUE) +
scale_x_date(date_breaks = "1 years",date_labels = "%y")+
theme_bw() +
theme(panel.spacing = unit(0,"lines"), axis.title.x = element_blank(),
axis.title.y = element_blank(),
strip.background = element_rect(colour="black", fill="white"))+
ggtitle("Grafico de Estados")
f
#####ESTADOs
f <- ggplot()+
geom_step(data=subset(df,df$variable ==nameStock),aes(Date, value))+
geom_linerange(data=positivoColor,aes(Date, value,ymin=0,ymax=value),color = pColor)+
geom_linerange(data=subset(df,df$variable =="pBull"),aes(Date, value,ymin=0,ymax=value),color="cornflowerblue")+
geom_step(data=subset(df,df$variable =="pState"),aes(Date, 2-value),color="cornflowerblue",size=1)+
facet_grid(variable ~., scales = "free", as.table = TRUE) +
scale_x_date(date_breaks = "1 years",date_labels = "%y")+
theme_bw() +
theme(panel.spacing = unit(0,"lines"), axis.title.x = element_blank(),
axis.title.y = element_blank(),
strip.background = element_rect(colour="black", fill="white"))+
ggtitle("Ciclos del BMW: Alcista vs Bajista")+labs(caption = "BMW Hidden Markov Model two states: rentabilidades quincenales")
f
|
3af5e528b0704c313303e3fb0777f5fb4af9710d
|
191d38b3f528a316e51aeef8683799435ac3d2b7
|
/chapter4.R
|
6c02ce0f6e8b0f311078312837d7ca88ba96167b
|
[] |
no_license
|
sonirishi/Mclreath_Bayes
|
7d7e18b1c103fa45a89bba7eeee4ae3d5ec83ddf
|
9b1655de8ffcf649e0c54a70033b57b47a6e9dd6
|
refs/heads/master
| 2020-07-06T14:51:36.492867
| 2019-09-15T10:54:06
| 2019-09-15T10:54:06
| 203,057,669
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,189
|
r
|
chapter4.R
|
rm(list=ls(all=T))
library(rethinking)
library(dplyr)
random_walk <- replicate(1000,sum(runif(16,-1,1)))
plot(density(random_walk))
hist(random_walk)
mults <- replicate(10000,prod(1 + runif(12,0,0.1)))
plot(density(mults))
logmults <- replicate(10000,log(prod(1 + runif(12,0,0.1)))) ## log of big no to convert into small
plot(density(logmults))
data("Howell1")
d <-Howell1
d2 <- d[d$age > 18,]
plot(density(d2$height))
curve(dnorm(x,178,20),100,250)
curve(dunif(x,0,50),-10,60)
mu.list <- seq(120,200,length.out = 1000)
sigma.list <- seq(1,50,length.out = 1000)
post <- expand.grid(mu.list,sigma.list)
colnames(post) <- c("mu","sigma")
LL <- function(mu,sigma){
sum(dnorm(d2$height,mu,sigma,log=T))
}
post$LL <- mapply(LL,post$mu,post$sigma)
post$prod <- post$LL + dnorm(post$mu,178,20,log=T) + dunif(post$sigma,0,50,log=T)
post$prob <- exp(post$prod - max(post$prod))
rows_sample <- sample(1:nrow(post),10000,replace = T,prob = post$prob)
sample.mu <- post$mu[rows_sample]
sample.sigma <- post$sigma[rows_sample]
plot(sample.mu,sample.sigma)
contour_xyz(post$mu,post$sigma,post$prob)
image_xyz(post$mu,post$sigma,post$prob)
###
logsigma <- map(
alist(
height ~ dnorm(mu,exp(log.sigma)),
mu ~ dnorm(178,20),
log.sigma ~ dnorm(2,10)
),data = d2
)
post <- extract.samples(logsigma)
pred_reg <- map(
alist(
height ~ dnorm(mu,sigma),
mu <- a + b*weight,
a ~ dnorm(178,20),
b ~ dnorm(0,10),
sigma ~ dunif(0,50)
),data = d2
)
alpha.list <- seq(120,200,length.out = 50)
beta.list <- seq(-10,10,length.out = 50)
sigma.list <- seq(1,50,length.out = 50)
post <- expand.grid(alpha.list,beta.list,sigma.list)
colnames(post) <- c("alpha","beta","sigma")
post$LL <- sapply(1:nrow(post),function(i){sum(dnorm(d2$height,post$sigma[i]+
d2$weight*post$beta[i],post$sigma[i],log=T))})
post$prod <- post$LL + dnorm(post$alpha,178,20,log=T) + dnorm(post$beta,0,10,log=T) +
dunif(post$sigma,0,50,log=T)
post$prob <- exp(post$prod - max(post$prod))
###
map.1 <- map(
alist(
height ~ dnorm(mu,sigma),
mu <- a + b*weight,
a ~ dnorm(178,100),
b ~ dnorm(0,10),
sigma ~ dunif(0,50)
),
data = d2
)
post <- extract.samples(map.1,1000)
weight <- seq(25,75,by=1)
# grid <- expand.grid(post$a,post$b,weight) ## this is wrong as a and b are joint distributions
calculate_mu <- function(a,b,w){
return(a+b*w)
}
### Good function to simulate from posterior
mu.value <- sapply(weight, function(x){mapply(calculate_mu,post$a,post$b,x)})
dens(mu.value)
grid1 <- cbind(grid,mu.value)
plot(y=grid1[,4],x=grid[,3])
mu <- link(map.1,data=data.frame(weight))
calculate_height <- function(a,b,s,w){
#print(a+b*w)
return(rnorm(1,a+b*w,s))
}
height.value <- sapply(weight, function(x){mapply(calculate_height,post$a,post$b,post$sigma,x)})
height.mean <- apply(height.value,2,mean)
plot(height.mean,weight)
# Q1
weight_data <- c(46.95,43.72,64.78,32.59,54.63)
expected_height <- sapply(weight_data, function(x){mapply(calculate_height,post$a,post$b,post$sigma,x)})
height_mean <- apply(expected_height,2,mean)
height_hdpi <- apply(expected_height,2,HPDI)
d3 <- d[d$age < 18,]
height_child <- map(
alist(
height ~ dnorm(mu,sigma),
mu <- a + b*weight,
a ~ dnorm(100,50),
b ~ dnorm(0,20),
sigma ~ dunif(0,50)
), data = d3
)
post <- extract.samples(height_child)
weight_simulate <- seq(5,43,by=1)
mu.value <- sapply(weight_simulate, function(x){mapply(calculate_mu,post$a,post$b,x)})
expec.hgt <- sapply(weight_simulate, function(x){mapply(calculate_height,post$a,post$b,post$sigma,x)})
mu.value.hdpi <- apply(mu.value,2,HPDI)
expec.hgt.hdpi <- apply(expec.hgt,2,HPDI)
plot(y=d3$height, x=d3$weight)
abline(coef(height_child)[1],coef(height_child)[2])
shade(mu.value.hdpi,weight_simulate)
shade(expec.hgt.hdpi,weight_simulate,col = col.alpha("green",0.15))
## Q
full_model <- map(
alist(
height ~ dnorm(mu,sigma),
mu <- a + b*log(weight),
a ~ dnorm(178,100),
b ~ dnorm(0,100),
sigma ~ dunif(0,50)
), data = d
)
coef(full_model)
plot(d$height,d$weight)
plot(d$height,log(d$weight)) ### This seems more linear
post <- extract.samples(full_model)
weight_simulate <- seq(5,178,by=1)
calculate_mu_log <- function(a,b,w){
return(a+b*log(w))
}
calculate_height_log <- function(a,b,s,w){
#print(a+b*w)
return(rnorm(1,a+b*log(w),s))
}
mu.value <- sapply(weight_simulate, function(x){mapply(calculate_mu_log,post$a,post$b,x)})
expec.hgt <- sapply(weight_simulate, function(x){mapply(calculate_height_log,post$a,post$b,post$sigma,x)})
mu.value.hdpi <- apply(mu.value,2,HPDI,0.97)
mu.value.mean <- apply(mu.value,2,mean)
expec.hgt.hdpi <- apply(expec.hgt,2,HPDI,0.97)
plot(y=d$height, x=d$weight)
lines(x=weight_simulate,y=mu.value.mean)
shade(mu.value.hdpi,weight_simulate)
shade(expec.hgt.hdpi,weight_simulate,col = col.alpha("green",0.15))
|
7a760fc991f64ebaf0a859277e592e66e33a69ee
|
923f808538d02bea3a3b0125f93d94cf720b090c
|
/.tmp/hrapgc.collectLCs4.R
|
e663b5ba245f9e4c49a8bc37cb8fab088bf533d6
|
[] |
no_license
|
Tuxkid/PNZ_EF
|
e557d8d2a08c92fb33db6faf4862927b4a7beec0
|
219cdd3139b0a05928d5b3c32d6606ceff3e5aa2
|
refs/heads/master
| 2021-01-23T07:04:05.411294
| 2016-02-24T20:39:25
| 2016-02-24T20:39:25
| 40,269,470
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,239
|
r
|
hrapgc.collectLCs4.R
|
collectLCs4 <- function(adjust.cont = FALSE,
xO = feb16OffCI.df, #xOCT = sepOffCI_CT.df,
xW = feb16WithCI.df, #xWCT = sepWithCI_CT.df,
ab.off = ab.sept15OffAll$concJ,
ab.with = ab.sept15WithAll$concJ,
zO = NewTotalOff.df, zW = feb16withAllFIX.df)
{
### Purpose:- Kiwifruit lot, forget about the 100% bit
### ----------------------------------------------------------------------
### Modified from:- collectLCs2
### ----------------------------------------------------------------------
### Arguments:-
### ----------------------------------------------------------------------
### Author:- Patrick Connolly, Date:- 24 Feb 2016, 11:49
### ----------------------------------------------------------------------
### Revisions:-
## Standardize egg in identifiers
rownames(xO) <- gsub("ME", "egg", rownames(xO))
## rownames(xOCT) <- gsub("ME", "egg", rownames(xOCT))
all.row.names <- unique(rownames(xO))
## Off fruit
off.df <- data.frame(LC99est = as.numeric(xO[all.row.names, ]$lt.mean))
rownames(off.df) <- all.row.names
keep.col.names <- names(off.df)
### Get identifiers to resort rows
off.df <- within(off.df, SLS <- getbit(all.row.names, "\\|", 1))
off.df <- within(off.df, temp <- getbit(all.row.names, "\\|", 2))
off.df <- within(off.df, temp <- gsub("°", "", temp))
off.df <- within(off.df, temp <- as.numeric(gsub("[A-z]", "", temp)))
off.df <- within(off.df, duration <- getbit(all.row.names, "\\|", 3))
off.df <- within(off.df, duration <- as.numeric(gsub("[A-z]", "", duration)))
off.df$Ndx <- all.row.names
off.df <- off.df %>%
arrange(SLS, temp, duration)
rownames(off.df) <- off.df$Ndx # no longer local dataframe
## off.df <- off.df[, keep.col.names]
## browser()
## With fruit: have to use a matrix
with.mat <- matrix(nrow = nrow(xW), ncol = ncol(off.df))
dimnames(with.mat) <- list(rownames(xW), names(off.df))
with.mat[rownames(xW), "LC99est"] <- as.numeric(xW$lt.mean)
with.df <- as.data.frame(with.mat)
with.id <- rownames(with.df)
with.df <- within(with.df, LC99est[is.nan(LC99est)] <- NA)
## remove A| and K| from rownames
## With help from Rhelp
chopAK <- function(x)
gsub("\\|[AK]\\|","\\|", x)
glean.off <- get(ab.off$datafun)
glean.with <- get(ab.with$datafun)
## data used in those ab lists
mort.dat.off <- with(glean.off(), data.frame(id, times, dead, total))
mort.dat.with <- with(glean.with(), data.frame(id, times, dead, total))
## legends to line up
legs.off <- glean.off()$legend
legs.off <- gsub("ME", "egg", legs.off)
## Get concentrations from data used in glean functions
zO <- within(zO, SLS <- gsub("ME", "egg", SLS)) # needed here too
zO <- within(zO, Temp <- paste0(Temperature, "°C"))
zO <- within(zO, Hours <- paste0(Duration, "h"))
zW <- within(zW, Temp <- paste0(Temperature, "°C"))
zW <- within(zW, Hours <- paste0(Duration, "h"))
zO <- within(zO, Ndx <- paste(SLS, Temp, Hours, sep = "|"))
zW <- within(zW, Ndx <- paste(SLS, substring(Fruit, 1, 1), Temp, Hours, sep = "|"))
## Which concentrations do we want predictions done?
predict.at <- 2:3 # i.e. target concentrations
zzO <- zO %>%
filter(Efnom %in% predict.at, Rep > 0) %>%
select(Ndx, Efnom, Efpc, Rep) %>%
arrange(Ndx, Efnom, Rep) %>%
group_by(Ndx, Efnom) %>%
summarise(EF = round(mean(Efpc), 2), Efmin = min(Efpc), Efmax = max(Efpc))
zzO <- within(zzO, Efnom <- fact2num(Efnom))
zzW <- zW %>%
filter(Efnom %in% predict.at, Efpc > 0) %>%
mutate(Efnom = factor(Efnom)) %>% # group_by must use character or factor
select(Ndx, Efnom, Efpc, Rep) %>%
arrange(Ndx, Efnom, Rep) %>%
group_by(Ndx, Efnom) %>% # group_by must use character or factor
summarise(EF = round(mean(Efpc), 2), Efmin = min(Efpc), Efmax = max(Efpc))
zzW <- within(zzW, Efnom <- fact2num(Efnom)) # needs a number for subsetting
prediction.df <-
data.frame(StageTreat = rep(with.id, each = 2),
TargetEFconc = rep(2:3, length(with.id)),
AchievedEFoff_mean = NA, AchievedEFoff_lo = NA, AchievedEFoff_hi = NA,
PredictedOff_mean = NA, PredictedOff_lo = NA, PredictedOff_hi = NA,
AchievedEFwith_mean = NA, AchievedEFwith_lo = NA, AchievedEFwith_hi = NA,
PredictedWith_mean = NA, PredictedWith_lo = NA, PredictedWith_hi = NA)
for(i in seq(with.id)){
id.i <- with.id[i]
dat.with.i <- mort.dat.with[mort.dat.with$id == i,]
## slope and intercept info
slope.i.with <- ab.with$slope[i]
intercept.i.with <- ab.with$intercept[i]
## corresponding off data identifier
lab.with.i <- with.id[i]
lab.with.i.match <- gsub("C", "°C", lab.with.i)
lab.off.i <- chopAK(lab.with.i.match) # removes the fruit info
lab.off.ii <- gsub("°C", "C", lab.off.i) # for matching in zzO
id.off.i <- which(legs.off == lab.off.i)
dat.off.i <- mort.dat.off[mort.dat.off$id == id.off.i,]
## corresponding slope and intercept info for off data
slope.i.off <- ab.off$slope[id.off.i]
intercept.i.off <- ab.off$intercept[id.off.i]
cont.dat.with.i <- dat.with.i[dat.with.i$times == 0,]
cont.dat.off.i <- dat.off.i[dat.off.i$times == 0,]
## control adjustments
cont.mort.with.i <- with(cont.dat.with.i, mean(dead/total))
cont.mort.off.i <- with(cont.dat.off.i, mean(dead/total))
if(adjust.cont){ ## adjust intercepts for control mort
if(cloglog.bt(intercept.i.off) > cont.mort.off.i)
intercept.i.off <- cloglog(cloglog.bt(intercept.i.off) - cont.mort.off.i) else
slope.i.off <- slope.i.off * (1 - cont.mort.off.i) # adj slope instead
if(cloglog.bt(intercept.i.with) > cont.mort.with.i)
intercept.i.with <- cloglog(cloglog.bt(intercept.i.with) - cont.mort.with.i) else
slope.i.with <- slope.i.with * (1 - cont.mort.with.i)
}
### Relevant concentrations for this i
achieved.i.off <- as.data.frame(zzO[with(zzO, Ndx == lab.off.ii), ]) # ii for off
achieved.i.with <- as.data.frame(zzW[with(zzW, Ndx == lab.with.i), ]) # i for with
for(j in predict.at){ # always 2 for Off fruit
conc.ij.off <- achieved.i.off[achieved.i.off$Efnom == j, ] %>%
select(EF, Efmin, Efmax)
conc.ij.with <- achieved.i.with[achieved.i.with$Efnom == j, ] %>%
select(EF, Efmin, Efmax)
try(prediction.df[with(prediction.df, StageTreat == lab.with.i & TargetEFconc == j),
c("AchievedEFoff_mean", "AchievedEFoff_lo", "AchievedEFoff_hi")] <-
conc.ij.off, silent = TRUE)
try(prediction.df[with(prediction.df, StageTreat == lab.with.i & TargetEFconc == j),
c("AchievedEFwith_mean", "AchievedEFwith_lo", "AchievedEFwith_hi")] <-
conc.ij.with, silent = TRUE)
## predictions for those concentrations, off and with
pred.off.ij <- round(cloglog.bt(intercept.i.off + slope.i.off * conc.ij.off) * 100, 1)
pred.with.ij <- round(cloglog.bt(intercept.i.with + slope.i.with * conc.ij.with) * 100, 1)
try(prediction.df[with(prediction.df, StageTreat == lab.with.i & TargetEFconc == j),
c("PredictedOff_mean", "PredictedOff_lo", "PredictedOff_hi")] <-
pred.off.ij, silent = TRUE)
try(prediction.df[with(prediction.df, StageTreat == lab.with.i & TargetEFconc == j),
c("PredictedWith_mean", "PredictedWith_lo", "PredictedWith_hi")] <-
pred.with.ij)
}
}
prediction.df <- within(prediction.df, StageTreat <- as.character(StageTreat))
prediction.df <- within(prediction.df, Stage <- getbit(StageTreat, "\\|", 1))
prediction.df <- within(prediction.df, fruit <- getbit(StageTreat, "\\|", 2))
prediction.df <- within(prediction.df, temp <- getbit(StageTreat, "\\|", 3))
prediction.df <- within(prediction.df, duration <- getbit(StageTreat, "\\|", 4))
prediction.df <- within(prediction.df, Temp <- as.numeric(gsub("°C", "", temp)))
prediction.df <- within(prediction.df, Duration <- as.numeric(gsub("h", "", duration)))
prediction.df$Fruit <- NA
prediction.df <- within(prediction.df, Fruit[fruit == "A"] <- "apple")
prediction.df <- within(prediction.df, Fruit[fruit == "K"] <- "kiwifruit")
require("WriteXLS")
out.df <- prediction.df %>%
select(Stage, Fruit, Temp, Duration, TargetEFconc,
AchievedEFoff_mean, AchievedEFoff_lo, AchievedEFoff_hi,
PredictedOff_mean, PredictedOff_lo, PredictedOff_hi,
AchievedEFwith_mean, AchievedEFwith_lo, AchievedEFwith_hi,
PredictedWith_mean, PredictedWith_lo, PredictedWith_hi)
xlout <- "Predictions_With.OffFruit_EF4Feb16.xls"
### Write out predictions, the off/with LC99 estimates, and the confidence limit of each
### group of replicates
WriteXLS(x = c("out.df", "xO", "xW"), xlout, row.names = TRUE,
c("predictions @ 2 & 3%", "Off fruit LC99CI", "With fruit LC99CI"),
BoldHeaderRow = TRUE, FreezeRow = 3, FreezeCol = 2)
## out.df
}
|
ccfd81e21aec2d0acbbf195dcdd98fe429dabcf8
|
dddb431f9b34f1d048180ebadbbd7fb7d9fe73f5
|
/man/ordASDA.Rd
|
0c4305c687954141c3fd469d19bce8c3edc08c15
|
[] |
no_license
|
gumeo/accSDA
|
b091b7f20febe61501920854bad3618d9daf61ed
|
fdc0c29bc02fe69f8726896ca7c6771059807dad
|
refs/heads/master
| 2022-09-06T01:50:43.733060
| 2022-09-02T08:01:43
| 2022-09-02T08:01:43
| 57,045,995
| 7
| 1
| null | 2022-04-05T23:10:55
| 2016-04-25T13:57:33
|
R
|
UTF-8
|
R
| false
| true
| 3,935
|
rd
|
ordASDA.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ordinalFunctions.R
\name{ordASDA}
\alias{ordASDA}
\alias{ordASDA.default}
\title{Ordinal Accelerated Sparse Discriminant Analysis}
\usage{
ordASDA(Xt, ...)
\method{ordASDA}{default}(
Xt,
Yt,
s = 1,
Om,
gam = 0.001,
lam = 1e-06,
method = "SDAAP",
control,
...
)
}
\arguments{
\item{Xt}{n by p data matrix, (can also be a data.frame that can be coerced to a matrix)}
\item{...}{Additional arguments for \code{\link{ASDA}} and \code{\link[MASS]{lda}}
function in package MASS.}
\item{Yt}{vector of length n, equal to the number of samples. The classes should be
1,2,...,K where K is the number of classes. Yt needs to be a numeric vector.}
\item{s}{We need to find a hyperplane that separates all classes with different biases.
For each new bias we define a binary classification problem, where a maximum of
s ordinal classes or contained in each of the two classes. A higher value of s means
that more data will be copied in the data augmentation step. BY default s is 1.}
\item{Om}{p by p parameter matrix Omega in generalized elastic net penalty, where
p is the number of variables.}
\item{gam}{Regularization parameter for elastic net penalty, must be greater than zero.}
\item{lam}{Regularization parameter for l1 penalty, must be greater than zero.}
\item{method}{String to select method, now either SDAD or SDAAP, see ?ASDA for more info.}
\item{control}{List of control arguments further passed to ASDA. See \code{\link{ASDA}}.}
}
\value{
\code{ordASDA} returns an object of \code{\link{class}} "\code{ordASDA}" including a list
with the same components as an ASDA objects and:
\describe{
\item{\code{h}}{Scalar value for biases.}
\item{\code{K}}{Number of classes.}
}
\code{NULL}
}
\description{
Applies accelerated proximal gradient algorithm to
the optimal scoring formulation of sparse discriminant analysis proposed
by Clemmensen et al. 2011. The problem is further casted to a binary
classification problem as described in "Learning to Classify Ordinal Data:
The Data Replication Method" by Cardoso and da Costa to handle the ordinal labels.
This function serves as a wrapper for the \code{\link{ASDA}} function, where the
appropriate data augmentation is performed. Since the problem is casted into
a binary classication problem, only a single discriminant vector comes from the
result. The first *p* entries correspond to the variables/coefficients for
the predictors, while the following K-1 entries correspond to biases for the
found hyperplane, to separate the classes. The resulting object is of class ordASDA
and has an accompanying predict function. The paper by Cardoso and dat Costa can
be found here: (http://www.jmlr.org/papers/volume8/cardoso07a/cardoso07a.pdf).
}
\note{
Remember to normalize the data.
}
\examples{
set.seed(123)
# You can play around with these values to generate some 2D data to test one
numClasses <- 5
sigma <- matrix(c(1,-0.2,-0.2,1),2,2)
mu <- c(0,0)
numObsPerClass <- 5
# Generate the data, can access with train$X and train$Y
train <- accSDA::genDat(numClasses,numObsPerClass,mu,sigma)
test <- accSDA::genDat(numClasses,numObsPerClass*2,mu,sigma)
# Visualize it, only using the first variable gives very good separation
plot(train$X[,1],train$X[,2],col = factor(train$Y),asp=1,main="Training Data")
# Train the ordinal based model
res <- accSDA::ordASDA(train$X,train$Y,s=2,h=1, gam=1e-6, lam=1e-3)
vals <- predict(object = res,newdata = test$X) # Takes a while to run ~ 10 seconds
sum(vals==test$Y)/length(vals) # Get accuracy on test set
#plot(test$X[,1],test$X[,2],col = factor(test$Y),asp=1,
# main="Test Data with correct labels")
#plot(test$X[,1],test$X[,2],col = factor(vals),asp=1,
# main="Test Data with predictions from ordinal classifier")
}
\seealso{
\code{\link{ASDA}}.
}
|
80b18b0a09b52b39751dfcec4f20ef0735c24502
|
763682fcbdb7430bed317aa94b0ea2f014f52ec4
|
/fig1/plotPRStimeGroup.R
|
ad460dd675a46b78ba183c5910321b31fd6ce2c9
|
[] |
no_license
|
mathilab/SkinPigmentationCode
|
10ce7b5c8fb05bf2263d5f420cae27c1468adef7
|
e829dfcc1ee1311c3701b1e4a27ecfaa5ec17e61
|
refs/heads/master
| 2022-12-28T16:44:34.598601
| 2020-10-16T06:22:01
| 2020-10-16T06:22:01
| 258,414,584
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,711
|
r
|
plotPRStimeGroup.R
|
## Plot PRS time series stratified by European ancestral group
plotPRStimeGroup <- function(data.df,avg.df,use_beta,out) {
# Partition dataset by ADMIXTURE proportions
# Early Farmer
ef.df <- data.df[data.df$ANA>0.6, ]
filter_ef <- ef.df$ID[which(ef.df$Date<5000 & ef.df$YAM>0.3)]
ef.df <- ef.df[!(ef.df$ID %in% filter_ef), ]
# Hunter gatherer
hg.df <- data.df[which(data.df$HG>0.6), ]
# Steppe/Yamnaya
filter_sp <- which(data.df$Date<=5000 & data.df$YAM>0.3)
sp.df <- data.df[filter_sp, ]
# assign factors for legend
data.df$color[which(data.df$HG>0.6)] <- 'HG'
data.df$color[data.df$ID %in% ef.df$ID] <- 'EF'
data.df$color[filter_sp] <- 'SP'
if(use_beta==T | use_beta == "T") {
# run GLM for each group
# early farmer
model.ef <- glm(Score ~ Date, family="binomial", weights = Weight, data=ef.df)
sink(paste(out,'_ef.txt',sep=''))
print(summary(model.ef))
sink()
# steppe
model.sp <- glm(Score ~ Date, family="binomial", weights = Weight, data=sp.df)
sink(paste(out,'_sp.txt',sep=''))
print(summary(model.sp))
sink()
# hunter gatherer
model.hg <- glm(Score ~ Date, family="binomial", weights = Weight, data=hg.df)
sink(paste(out,'_hg.txt',sep=''))
print(summary(model.hg))
sink()
# plot groups into one figure
ggplot(data.df, aes(x=Date, y=Score)) + theme_classic() +
scale_color_manual(name="Population",
labels=c('EF','HG','SP'),
values=c('steelblue1','red','palegreen2')
) +
theme(legend.position=c(0.13,0.85),
legend.title=element_text(size=6),
legend.text=element_text(size=6),
legend.background=element_rect(size=0.15, linetype="solid", color='grey'),
legend.key.size = unit(0.4, "cm")
) + guides(size=FALSE) +
scale_x_reverse() +
xlab("Date (years BP)") + ylab("Score") +
scale_y_continuous(breaks=c(0,0.5,1)) +
geom_point(data=data.df[!(is.na(data.df$color)), ],
mapping=aes(x=Date, y=Score, color=color, size=Weight^7),
pch=16, alpha=0.6) +
stat_smooth(method="glm", method.args=list(family="binomial"), se=F,
aes(weight=Weight), lty='longdash',
color=rgb(0,0,0,0.8)) +
stat_smooth(data=ef.df,mapping=aes(x=Date, y=Score, weight=Weight),
method="glm", method.args=list(family="binomial"),
se=T, inherit.aes=F, color="blue", fill='blue') +
stat_smooth(data=hg.df,mapping=aes(x=Date, y=Score, weight=Weight),
method="glm", method.args=list(family="binomial"),
se=T, inherit.aes=F, color='red3', fill='red3') +
stat_smooth(data=sp.df,mapping=aes(x=Date, y=Score, weight=Weight),
method="glm", method.args=list(family="binomial"),
se=T, inherit.aes=F, color='darkgreen', fill='darkgreen')
ggsave(file = paste(out,'_stratified.pdf',sep=''), width = 4, height = 4)
} else {
# run GLM for each group
# early farmer
values.ef <- cbind(ef.df$NumDark, ef.df$NumLight)
colnames(values.ef) <- c('dark', 'light')
model.ef <- glm(values.ef ~ ef.df$Date, family = "binomial")
sink(paste(out,'_ef.txt',sep=''))
print(summary(model.ef))
sink()
# steppe
values.sp <- cbind(sp.df$NumDark, sp.df$NumLight)
colnames(values.sp) <- c('dark', 'light')
model.sp <- glm(values.sp ~ sp.df$Date, family = "binomial")
sink(paste(out,'_sp.txt',sep=''))
print(summary(model.sp))
sink()
# hunter gatherer
values.hg <- cbind(hg.df$NumDark, hg.df$NumLight)
colnames(values.hg) <- c('dark', 'light')
model.hg <- glm(values.hg ~ hg.df$Date, family = "binomial")
sink(paste(out,'_hg.txt',sep=''))
print(summary(model.hg))
sink()
# plot groups into one figure
ggplot(data.df, aes(x=Date, y=Score)) + theme_classic() +
scale_color_manual(name="Population",
labels=c('EF','HG','SP'),
values=c('steelblue1','red','palegreen2')
) +
theme(legend.position=c(0.13,0.85),
legend.title=element_text(size=6),
legend.text=element_text(size=6),
legend.background=element_rect(size=0.15, linetype="solid", color='grey'),
legend.key.size = unit(0.4, "cm")
) +
guides(size=F) +
scale_x_reverse() +
scale_y_continuous(breaks=c(0,0.5,1)) +
xlab("Date (years BP)") + ylab("Score") +
geom_point(data=data.df[!(is.na(data.df$color)), ],
mapping=aes(x=Date, y=Score, size=(TotalNumSNPs)^7, color=color),
pch=16, alpha=0.6) +
stat_smooth(method="glm", method.args=list(family="binomial"), se=F,
aes(weight=TotalNumSNPs), lty='longdash',
color=rgb(0,0,0,0.8)) +
stat_smooth(data=ef.df,mapping=aes(x=Date, y=Score, weight=TotalNumSNPs),
method="glm", method.args=list(family="binomial"),
se=T, inherit.aes=F, color="blue", fill='blue') +
stat_smooth(data=hg.df,mapping=aes(x=Date, y=Score, weight=TotalNumSNPs),
method="glm", method.args=list(family="binomial"),
se=T, inherit.aes=F, color='red3', fill='red3') +
stat_smooth(data=sp.df,mapping=aes(x=Date, y=Score, weight=TotalNumSNPs),
method="glm", method.args=list(family="binomial"),
se=T, inherit.aes=F, color='darkgreen', fill='darkgreen')
ggsave(file = paste(out,'_stratified.pdf',sep=''), width = 4, height = 4)
}
}
|
966a630d61d4659ceeb85ecc3c87b5e20ca337b4
|
0e6f323fffa7de3eafe39767bd8dc1aa7c762e1b
|
/R/bit-package.R
|
906b1ed21ad08ebffa7d279f3b9bcc43650a7432
|
[] |
no_license
|
cran/bit
|
a3fa7153016147b81826381b2c2f44261b7d8769
|
02ef361a415bb7268767da034fb56c0018b59e1f
|
refs/heads/master
| 2022-11-18T16:22:02.293270
| 2022-11-15T20:20:16
| 2022-11-15T20:20:16
| 18,805,509
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,055
|
r
|
bit-package.R
|
# Package documentation
# (c) 2008-2017 Jens Oehlschägel
# Licence: GPL2
# Provided 'as is', use at your own risk
#' bit: Classes and methods for fast memory-efficient boolean selections
#'
#' Provided are classes for boolean and skewed boolean vectors, fast boolean
#' methods, fast unique and non-unique integer sorting, fast set operations on
#' sorted and unsorted sets of integers, and foundations for ff (range indices,
#' compression, chunked processing).
#'
#' For details view the vignettes \url{../doc/bit-usage.pdf} and
#' \url{../doc/bit-performance.pdf}
#'
#'@name bit-package
NULL
# devtools::use_vignette("bit-usage")
# devtools::use_vignette("bit-performance")
# require(rhub)
# rhub_bit_4.0.5 <- check_for_cran(
# path = "../bit_4.0.5.tar.gz"
# , email = "Jens.Oehlschlaegel@truecluster.com"
# , check_args = "--as-cran"
# , env_vars = c('_R_CHECK_FORCE_SUGGESTS_'= "false",'_R_CHECK_CRAN_INCOMING_USE_ASPELL_'= "true", '_R_CHECK_XREFS_MIND_SUSPECT_ANCHORS_'="true")
# , platforms = NULL
# , show_status = FALSE
# )
#
# ─ Uploading package
# ─ Preparing build, see status at
# https://builder.r-hub.io/status/bit_4.0.5.tar.gz-b6a1c13ef92c42d9b68257139666fb46
# https://builder.r-hub.io/status/bit_4.0.5.tar.gz-abc80afc9741404c9e86511dee4585c5
# https://builder.r-hub.io/status/bit_4.0.5.tar.gz-93409d19ee544acab83be7de1b380740
# https://builder.r-hub.io/status/bit_4.0.5.tar.gz-d4f141780d004fa388c82d027a6e40e2
# olddir <- "../revdepold"
# newdir <- "../revdepnew"
# tools::check_packages_in_dir(olddir,
# check_args = c("--as-cran", ""),
# reverse = list(repos = getOption("repos")["CRAN"]))
# tools::check_packages_in_dir(newdir, old=olddir
# check_args = c("--as-cran", ""),
# reverse = list(repos = getOption("repos")["CRAN"]))
# tools::summarize_check_packages_in_dir_results(newdir, all = FALSE, full = TRUE)
# tools::check_packages_in_dir_changes(newdir, olddir, outputs = TRUE, sources = FALSE)
|
a528460e7215b372bd31b4c033651dcf5d7caaf8
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/shinystan/inst/ShinyStan/global_utils.R
|
45acfd3ef1b2ff50da7d6b6b6dcc2a45f4f51079
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,122
|
r
|
global_utils.R
|
# give ShinyStan app access to ggplot functions
ggplot_fns_file <- if (packageVersion("ggplot2") < "2.0.0")
"ggplot_fns_old.rda" else "ggplot_fns.rda"
load(ggplot_fns_file)
lapply(ggplot_fns, function(f) {
try(assign(f, getFromNamespace(f, "ggplot2"), envir = parent.frame(2)),
silent = TRUE)
})
helpers <- file.path("helper_functions", list.files("helper_functions", full.names = FALSE))
for (h in helpers) source(h, local = TRUE)
source(file.path("server_files","utilities","ppcheck_names_descriptions.R"), local = TRUE)
# avoid conflict with inline::code if rstan is loaded
code <- shiny::code
save_and_close <- tags$button(
id = 'save_and_close_button',
type = "button",
class = "btn action-button",
onclick = "window.close();",
"Save & Close"
)
shinystan_version <- function() {
# prevents error when deployed to shinyapps.io
ver <- try(utils::packageVersion("shinystan"))
if (inherits(ver, "try-error")) return()
else strong(paste("Version", ver))
}
logo_and_name <- function() {
div(
div(img(src = "wide_ensemble.png",
class = "wide-ensemble", width = "100%")),
div(style = "margin-top: 25px",
img(src = "stan_logo.png", class = "stan-logo"),
div(id = "shinystan-title", "ShinyStan"))
)
}
save_and_close_reminder <- function(id) {
helpText(id = id,
p("To make sure the changes aren't lost, use the",
span(class = "save-close-reminder", "Save & Close"),
"button in the top left corner to exit the app before",
"closing the browser window.")
)
}
toc_entry <- function(name, icon_name, ...) {
actionLink(inputId = paste0("toc_", tolower(name)), label = name,
if (!missing(icon_name)) icon = icon(name = icon_name, ...))
}
a_options <- function(name) {
lab <- if (name == "table") "Table Options" else "Show/Hide Options"
div(class = "aoptions",
checkboxInput(inputId = paste0(name, "_options_show"),
label = strong(style = "margin-top: 20px; color: #222222;", lab),
value = FALSE))
}
a_glossary <- function(id) {
div(class = "aoptions",
actionLink(inputId = id,
label = strong(style = "margin-top: 20px; color: #222222;",
"Glossary"),
icon = icon("book", lib = "glyphicon")))
}
strongMed <- function(...) {
strong(style = "font-size: 14px; margin-bottom: 5px;", ...)
}
strongBig <- function(...) {
strong(style = "font-size: 18px; margin-bottom: 5px;", ...)
}
strong_bl <- function(...) {
strong(style = "color: #006DCC;", ...)
}
algorithm_nuts <- h5(style = "color: #337ab7;", "algorithm = NUTS")
algorithm_hmc <- h5(style = "color: #337ab7;", "algorithm = HMC")
dygraphOutput_175px <- function(id) {
dygraphs::dygraphOutput(id, height = "175px")
}
plotOutput_200px <- function(id, ...) {
plotOutput(id, height = "200px")
}
plotOutput_400px <- function(id, ...) {
plotOutput(id, height = "400px")
}
condPanel_dens_together <- function(...) {
conditionalPanel(condition = "input.dens_chain_split == 'Together'", ...)
}
condPanel_dens_prior <- function(dist, ...) {
cond <- paste0("input.dens_prior ==","'", dist,"'")
conditionalPanel(cond, ...)
}
# function to suppress unnecessary warnings and messages generated by ggplot
suppress_and_print <- function(x) {
suppressMessages(suppressWarnings(print(x)))
}
# make_param_list ------------------------------------------------------
# generate list of parameter names (formatted for shiny::selectInput)
.make_param_list <- function(object) {
param_groups <- names(object@param_dims)
choices <- list()
ll <- length(object@param_dims)
choices[1:ll] <- ""
names(choices) <- param_groups
for(i in 1:ll) {
if (length(object@param_dims[[i]]) == 0) {
choices[[i]] <- list(param_groups[i])
}
else {
temp <- paste0(param_groups[i],"\\[")
choices[[i]] <- object@param_names[grep(temp, object@param_names)]
}
}
choices
}
# make_param_list_with_groups ------------------------------------------------------
# generate list of parameter names and include parameter groups (formatted for
# shiny::selectInput)
.make_param_list_with_groups <- function(object, sort_j = FALSE) {
choices <- list()
param_groups <- names(object@param_dims)
ll <- length(object@param_dims)
LL <- sapply(1:ll, function(i) length(object@param_dims[[i]]))
choices[1:ll] <- ""
names(choices) <- param_groups
for(i in 1:ll) {
if (LL[i] == 0) {
choices[[i]] <- list(param_groups[i])
} else {
group <- param_groups[i]
temp <- paste0("^",group,"\\[")
ch <- object@param_names[grep(temp, object@param_names)]
# toggle row/column major sorting so e.g. "beta[1,1], beta[1,2],
# beta[2,1], beta[2,2]" instead of "beta[1,1], beta[2,1], beta[1,2],
# beta[2,2]"
if (sort_j == TRUE & LL[i] > 1) ch <- gtools::mixedsort(ch)
ch_out <- c(paste0(group,"_as_shinystan_group"), ch)
names(ch_out) <- c(paste("ALL", group), ch)
choices[[i]] <- ch_out
}
}
choices
}
# update parameter selection for multi-parameter plots --------------------
# update with regex
.test_valid_regex <- function(pattern) {
trygrep <- try(grep(pattern, ""), silent = TRUE)
if (inherits(trygrep, "try-error")) FALSE else TRUE
}
.update_params_with_regex <- function(params, all_param_names, regex_pattern) {
sel <- which(all_param_names %in% params)
to_search <- if (length(sel)) all_param_names[-sel] else all_param_names
if (!length(regex_pattern)) return(params)
to_add <- grep(regex_pattern, to_search, value = TRUE)
if (!length(to_add)) params else c(params, to_add)
}
# update with groups
.update_params_with_groups <- function(params, all_param_names) {
as_group <- grep("_as_shinystan_group", params)
if (!length(as_group)) return(params)
make_group <- function(group_name) {
all_param_names[grep(paste0("^",group_name,"\\["), all_param_names)]
}
single_params <- params[-as_group]
grouped_params <- params[as_group]
groups <- gsub("_as_shinystan_group", "", grouped_params)
groups <- sapply(groups, make_group)
c(single_params, unlist(groups))
}
# generate color vectors --------------------------------------------------
color_vector <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h=hues, l=50, c=50)[1:n]
}
color_vector_chain <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h=hues, l=80, c=50)[1:n]
}
alpha_calc_pt <- function(N) {
if (N <= 100) return(1)
else if (N <= 200) return(0.75)
else if (N >= 1500) return(0.15)
else 1 - pnorm(N/1500)
}
alpha_calc_lines <- function(N) {
if (N < 50) return(0.5)
if (N < 500) return(0.4)
if (N < 1000) return(0.3)
if (N < 5000) return(0.2)
else return(0.1)
}
# transformations ---------------------------------------------------------
transformation_choices <-
c("abs", "atanh", cauchit = "pcauchy", "cloglog",
"exp", "expm1", "identity", "inverse", inv_logit = "plogis",
"log", "log", "log10", "log2", "log1p", logit = "qlogis",
probit = "pnorm", "square", "sqrt")
inverse <- function(x) 1/x
cloglog <- function(x) log(-log1p(-x))
square <- function(x) x^2
transformation_selectInput <- function(id) {
selectInput(id, label = NULL,
choices = transformation_choices,
selected = "identity")
}
transform_helpText <- function(var = "x") {
div(
if (var == "x")
helpText(style = "font-size: 13px;",
"To apply a transformation",
"select a function and click",
code("Transform"))
else if (var == "x,y")
helpText(style = "font-size: 13px;",
"To apply transformations",
"select a function for x and/or y",
"and click", code("Transform"))
else
helpText(style = "font-size: 13px;",
"To apply transformations",
"select a function for x, y, and/or z",
"and click", code("Transform"))
)
}
# extra distributions for density comparisons -----------------------------
# t distribution with location and scale
.dt_loc_scale <- function(x, df, location, scale) {
1/scale * dt((x - location)/scale, df)
}
# inverse gamma distribution
.dinversegamma <- function(x, shape, scale) {
logout <- log(scale)*shape - lgamma(shape) - (1+shape)*log(x) - (scale/x)
exp(logout)
}
# diagnostics help text ---------------------------------------------------
hT11 <- function(...) helpText(style = "font-size: 11px;", ...)
help_interval <- hT11(
"Highlighted interval shows \\(\\bar{x} \\pm sd(x)\\)")
help_lines <- hT11(
"Lines are mean (solid) and median (dashed)")
help_max_td <- hT11(
"Horizontal line indicates the max_treedepth setting")
help_points <- hT11(
"Large red points indicate which (if any) iterations",
"encountered a divergent transition. Yellow indicates",
"a transition hitting the maximum treedepth.")
help_dynamic <- hT11(
"Use your mouse or the sliders to select areas in the",
"traceplot to zoom into. The other plots on the screen",
"will update accordingly. Double-click to reset.")
# stan manual
stan_manual <- function() {
helpText(style = "font-size: 12px;",
"Glossary entries are compiled (with minor edits) from various excepts of the",
a("Stan Modeling Language User's Guide and Reference Manual",
href = "http://mc-stan.org/documentation/"),
"(",a(href = "http://creativecommons.org/licenses/by/3.0/", "CC BY (v3)"),")"
)
}
# to use in ui.R
.model_name <- slot(object, "model_name")
.param_names <- slot(object, "param_names")
.param_list <- .make_param_list(object)
.param_list_with_groups <- .make_param_list_with_groups(object)
.nChains <- slot(object, "nChains")
.nIter <- slot(object, "nIter")
.nWarmup <- slot(object, "nWarmup")
.model_code <- slot(object, "model_code")
.notes <- slot(object, "user_model_info")
.from_rstanarm <- if (is.null(object@misc$stanreg)) FALSE else object@misc$stanreg
|
9515428874486c1f960a791b1c56e2e8116d8922
|
4e263337af30425e2bfc61284f45f611cec6cd0e
|
/Analysis/Mixed_Model_Group.R
|
742acba1f826591a0b12fcd2e6a91fb7f4b05fb1
|
[] |
no_license
|
yeatmanlab/Parametric_speech_public
|
c9ce4f443783c11355a07d4d5c3c87f5a0936bb6
|
8df268acda5c9e425c6df43291191207082d91a4
|
refs/heads/master
| 2020-04-23T17:47:01.871970
| 2019-02-18T19:39:35
| 2019-02-18T19:39:35
| 171,344,531
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,343
|
r
|
Mixed_Model_Group.R
|
# This script does linear models on the fit psychometric functions
library(dplyr)
library(lme4)
library(pbkrtest)
library(ggplot2)
rm(list = ls())
psychometrics <- read.csv("../cleaned_psychometrics.csv")
## set deviation contrasts
psychometrics$duration<- factor(psychometrics$duration, levels=c("100", "300"))
duration_dimnames <- list(levels(psychometrics$duration),
levels(psychometrics$duration)[2])
contrasts(psychometrics$duration) <- matrix(c(-0.5, 0.5), nrow=2, dimnames=duration_dimnames)
# Only compare two groups
psychometrics <- subset(psychometrics, group != "Below Average")
### center reading score, etc
psychometrics$adhd_dx <- as.logical(psychometrics$adhd_dx)
psychometrics$wj_brs <- scale(psychometrics$wj_brs, scale = FALSE)
psychometrics$twre_index <- scale(psychometrics$twre_index, scale = FALSE)
psychometrics$wasi_mr_ts <- scale(psychometrics$wasi_mr_ts, scale=FALSE)
psychometrics <- na.omit(psychometrics)
## ## ## ## ## ## ##
## SLOPE MODELS ##
## ## ## ## ## ## ##
full_model <- lmer(slope ~ group*duration + adhd_dx + wasi_mr_ts + (1|subject_id),
data=psychometrics)
# p-values for full model
coefs <- data.frame(coef(summary(full_model)))
df.KR <- get_ddf_Lb(full_model, fixef(full_model))
coefs$p.KR <- 2*(1-pt(abs(coefs$t.value), df.KR))
print(coefs)
## model selection: nuisance variables
no_wasi_model <- update(full_model, ~ . - wasi_mr_ts)
anova(full_model, no_wasi_model) # Can remove wasi
pb.b <- PBmodcomp(full_model, no_wasi_model, nsim = 500)
pb.b
no_adhd_model <- update(full_model, ~ . - adhd_dx)
anova(full_model, no_adhd_model) # OK to remove adhd
no_nuisance_model <- update(full_model, ~ . - adhd_dx -wasi_mr_ts)
anova(full_model, no_nuisance_model) # OK to remove both nuisance parameters
# Does duration matter?
no_duration_model <- lmer(slope ~ group + (1|subject_id), data=psychometrics)
anova(no_nuisance_model, no_duration_model) # Don't want to remove duration
# Does group matter?
no_wj_model <- lmer(slope ~ duration +(1|subject_id), data=psychometrics)
anova(no_nuisance_model, no_wj_model) # Group doesn't matter
pb.b <- PBmodcomp(no_nuisance_model, no_wj_model, nsim = 500)
pb.b
fit <- lmer(slope ~ duration+ group + (1|subject_id), psychometrics)
win_model <- fit
summary(win_model)
coefs <- data.frame(coef(summary(win_model)))
df.KR <- get_ddf_Lb(win_model, fixef(win_model))
coefs$p.KR <- 2*(1-pt(abs(coefs$t.value), df.KR))
print(coefs)
# Is there any significant effect of group overall? Omnibus statistical value
group_only <- lmer(slope ~ group + (1|subject_id), psychometrics)
df.KR <- get_ddf_Lb(group_only, fixef(group_only))
Fval <- anova(group_only)$F
df1 <- anova(group_only)$Df
omnibus_sig = 1-pf(Fval, df1, df.KR)
omnibus_sig
# Now for duration
dur_only <- lmer(slope ~ duration + (1|subject_id), psychometrics)
df.KR <- get_ddf_Lb(dur_only, fixef(group_only))
Fval <- anova(dur_only)$F
df1 <- anova(dur_only)$Df
omnibus_sig = 1-pf(Fval, df1, df.KR)
omnibus_sig
### ## ## ## ## ## ## ##
## LAPSE RATE MODEL ##
## ## ## ## ## ## ## ##
psychometrics$lapse_rate <- with(psychometrics, (lo_asymp + hi_asymp) / 2)
full_model <- lmer(lapse_rate ~ group*duration + wasi_mr_ts + adhd_dx + (1|subject_id),
data=psychometrics)
# p-values for full model
coefs <- data.frame(coef(summary(full_model)))
df.KR <- get_ddf_Lb(full_model, fixef(full_model))
coefs$p.KR <- 2*(1-pt(abs(coefs$t.value), df.KR))
print(coefs)
## model selection: nuisance variables
no_wasi_model <- update(full_model, ~ . - wasi_mr_ts)
anova(full_model, no_wasi_model) # OK to remove wasi
no_adhd_model <- update(full_model, ~ . - adhd_dx)
anova(full_model, no_adhd_model) # OK to remove adhd
no_nuisance_model <- update(full_model, ~ . - wasi_mr_ts - adhd_dx)
anova(full_model, no_nuisance_model) # OK to remove both nuisance parameters
# Does duration matter?
no_duration_model <- lmer(lapse_rate ~ group + (1|subject_id), data=psychometrics)
anova(no_nuisance_model, no_duration_model) # No evidence duration matters for lapse
# Does wj matter?
no_wj_model <- lmer(lapse_rate ~ duration+ (1|subject_id), data=psychometrics)
anova(no_nuisance_model, no_wj_model) #No, we must keep reading!
#### Selected model ####
win_model <- lmer(lapse_rate ~group + (1|subject_id), data = psychometrics)
coefs <- data.frame(coef(summary(win_model)))
df.KR <- get_ddf_Lb(win_model, fixef(win_model))
coefs$p.KR <- 2*(1-pt(abs(coefs$t.value), df.KR))
print(coefs)
# Is there any significant effect of group overall? Omnibus statistical value
group_only <- lmer(lapse_rate ~ group + (1|subject_id), psychometrics)
df.KR <- get_ddf_Lb(group_only, fixef(group_only))
Fval <- anova(group_only)$F
df1 <- anova(group_only)$Df
omnibus_sig = 1-pf(Fval, df1, df.KR)
omnibus_sig
## ## ## ## ## ## ## ##
## PCA MODEL ##
## ## ## ## ## ## ## ##
# DO PCA
params <- psychometrics[,4:7]
PCA<- prcomp(params, scale=TRUE)
psychometrics <- cbind(psychometrics, PCA$x)
summary(PCA)
# Now let's see what predicts the first PCA component
lmfit <- lmer(PC1 ~ group*duration + adhd_dx + wasi_mr_ts + (1|subject_id), psychometrics)
# Summary for full model
coefs <- data.frame(coef(summary(lmfit)))
df.KR <- get_ddf_Lb(lmfit, fixef(lmfit))
coefs$p.KR <- 2*(1-pt(abs(coefs$t.value), df.KR))
print(coefs)
# Remove nuisance variables
lmfit_no_wasi <- update(lmfit, ~ . - wasi_mr_ts)
anova(lmfit, lmfit_no_wasi)
lmfit_no_adhd <- update(lmfit, ~ . - adhd_dx)
anova(lmfit, lmfit_no_adhd)
lmfit_no_nuisance <- update(lmfit, ~ . - wasi_mr_ts - adhd_dx)
anova(lmfit, lmfit_no_nuisance) # OK to remove both nuisances!
coefs <- data.frame(coef(summary(lmfit_no_nuisance)))
df.KR <- get_ddf_Lb(lmfit_no_nuisance, fixef(lmfit_no_nuisance))
coefs$p.KR <- 2*(1-pt(abs(coefs$t.value), df.KR))
print(coefs)
# To the main model
lmfit_no_duration <- lmer(PC1 ~ group + (1|subject_id), data=psychometrics)
anova(lmfit_no_nuisance, lmfit_no_duration) #can't remove main effect of duration
lmfit_no_int <- lmer(PC1 ~ group + duration + (1|subject_id), data=psychometrics)
anova(lmfit_no_nuisance, lmfit_no_int) #can remove interaction
win <- lmer(PC1 ~ group + duration + (1|subject_id), data = psychometrics)
# This is the winner
coefs <- data.frame(coef(summary(win)))
df.KR <- get_ddf_Lb(win, fixef(win))
coefs$p.KR <- 2*(1-pt(abs(coefs$t.value), df.KR))
print(coefs)
#### Is there an significant effect of including group?
group_only <- lmer(PC1 ~ group + (1|subject_id), psychometrics)
df.KR <- get_ddf_Lb(group_only, fixef(group_only))
Fval <- anova(group_only)$F
df1 <- anova(group_only)$Df
omnibus_sig = 1-pf(Fval, df1, df.KR)
omnibus_sig
######## Effect size of duration ################
dur_only <- lmer(PC1 ~ duration*group + (1|subject_id), psychometrics)
df.KR <- get_ddf_Lb(dur_only, fixef(dur_only))
Fval <- anova(dur_only)$F
df1 <- anova(dur_only)$Df
omnibus_sig = 1-pf(Fval, df1, df.KR)
omnibus_sig
################### GET COHENS D ####################################
psy_sum <- psychometrics %>%
group_by(subject_id)%>%
summarise(PC1 = mean(PC1),
slope = mean(slope),
lapse = mean(lapse_rate),
group = unique(group))
psy_sum$group <- factor(psy_sum$group)
cohen.d(psy_sum$PC1, psy_sum$group)
cohen.d(psy_sum$slope, psy_sum$group)
cohen.d(psy_sum$lapse, psy_sum$group)
|
b1af20f3d31c73bae00fe66f2bd3cd766f9c5390
|
c78d381271668ae9fcb74afd00ece39348e349b1
|
/per-poll-simulations/0193-reid-research/config.R
|
72e519e4fc976b1f338d23d6362e8de7a546b9c2
|
[] |
no_license
|
nzherald/nz-election-prediction
|
b1c1464e2ee0f3cb8bfd109b4ff9f244937f2424
|
5bbafe06a4a1cea09782c4f57210e84a5600b7df
|
refs/heads/master
| 2021-01-01T04:58:12.745919
| 2017-09-21T10:48:16
| 2017-09-21T10:48:16
| 97,279,096
| 0
| 0
| null | 2017-09-16T15:02:30
| 2017-07-14T23:13:56
|
R
|
UTF-8
|
R
| false
| false
| 29
|
r
|
config.R
|
MaxSims = 5000
DaysTo = 193
|
515be796e3305129c94dbde3803a1ebf06de6be9
|
103cefcd0a90175d953b11b1a13a6c76adb28aef
|
/analyses/photoperiod/photoperiodfig.R
|
b2b672d8bd7830a72e3afd6803f00053b672d1b3
|
[] |
no_license
|
lizzieinvancouver/ospree
|
8ab1732e1245762194db383cdea79be331bbe310
|
9622af29475e7bfaa1b5f6697dcd86e0153a0a30
|
refs/heads/master
| 2023-08-20T09:09:19.079970
| 2023-08-17T10:33:50
| 2023-08-17T10:33:50
| 44,701,634
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,801
|
r
|
photoperiodfig.R
|
#Figure of temporal and spatial effects on daylength for OSPREE photoperiod paper:
#Started 18 Sept 2017 by Ailene
#examples from geosphere package
#https://www.rdocumentation.org/packages/geosphere/versions/1.5-5/topics/daylength
library(geosphere)#daylength(lat, doy)
#generate a vector of daylengths from January 1-June 30 for Washington DC
photo_dc<-daylength(39.0, 1:181)
#generate a vector of daylengths from January 1-June 30 for Montreal
photo_mont<-daylength(45.5, 1:181)
#In 100 years, with temporal shifts earlier 3 days per decade (30 days total) as has been observed (Parmesan 2006)- this is a low end
photo_dc_temp<-daylength(39, c(336:365,1:151))
photo_mont_temp<-daylength(45.5, c(336:365,1:151))
#In 100 years, with spatial shifts of ~6km ( or ~0.05 degrees) per decade (0.5 deg total) poleward as has been observed (Parmesan 2006)- this is a low end
photo_dc_spat<-daylength(39.5, 1:181)
photo_mont_spat<-daylength(46, 1:181)
#Plot
quartz(height=6,width=7)
x<-seq(1:181)
plot(x,photo_dc_temp-photo_dc, type="l", col="red", lwd=2,lty=1,xaxt='n',ylab="Change in daylength (h)",xlab="Month",ylim=c(-1.6,.2))
lines(x,photo_dc_spat-photo_dc, type="l", col="red", lwd=2,lty=2)
lines(x,photo_mont_temp-photo_mont, type="l", col="blue", lwd=2,lty=1)
lines(x,photo_mont_spat-photo_mont, type="l", col="blue", lwd=2,lty=2)
mtext("DC (39 deg)",side=1,line=-4.3, col="red")
mtext("Montreal (45.5 deg)",side=1,line=-0.8, col="blue")
abline(h=0, lwd=2)
axis(1, at=c(1,32,60,91,122,152,182),labels=c("Jan","Feb","Mar","Apr","May","Jun","Jul"))
#legend(150,-1.4,legend=c("Space","Time"), bty="n", lty = c(1,2), lwd=2)
#Try making bar plot by month to see if this shows it more dramatically
date<-strptime(c(seq(1:181)), format = "%j")
month<-substr(date,6,7)
day<-substr(date,9,10)
#photo.df<-cbind(month,day,photo_dc_spat,photo_mont_spat,photo_dc_temp,photo_mont_temp)
photo_dc_spat_month<-aggregate(photo_dc_spat-photo_dc,by=list(month),mean)
photo_dc_temp_month<-aggregate(photo_dc_temp-photo_dc,by=list(month),mean)
photo_mont_spat_month<-aggregate(photo_mont_spat-photo_mont,by=list(month),mean)
photo_mont_temp_month<-aggregate(photo_mont_temp-photo_mont,by=list(month),mean)
quartz(height=5,width=10)
par(mfrow=c(1,2))
barplot(rbind(photo_dc_temp_month$x,photo_dc_spat_month$x),beside=T,ylab="Change in daylength (h)",names.arg = c("Jan","Feb","Mar","Apr","May","Jun"), main="DC (39 deg)")
abline(h=0, lwd=1)
barplot(rbind(photo_mont_temp_month$x,photo_mont_spat_month$x),beside=T,ylab="Change in daylength (h)",names.arg = c("Jan","Feb","Mar","Apr","May","Jun"), main="Montreal (45.5 deg)")
abline(h=0, lwd=1)
#no real difference between dc and montreal in overall patterns. look at lats that are further apart
#choose any lats
lat<-c(15,30,45)
quartz(height=3,width=11)
par(mfrow=c(1,3))
for(i in 1:length(lat)){
photos<-daylength(lat[i], 1:181)
#In 100 years, with temporal shifts earlier 3 days per decade (30 days total) as has been observed (Parmesan 2006)- this is a low end
photos_temp<-daylength(lat[i], c(336:365,1:151))
#In 100 years, with spatial shifts of ~6km ( or ~0.05 degrees) per decade (0.5 deg total) poleward as has been observed (Parmesan 2006)- this is a low end
photos_spat<-daylength(lat[i]+0.5, 1:181)
date<-strptime(c(seq(1:181)), format = "%j")
month<-substr(date,6,7)
photos_spat_month<-aggregate(photos_spat-photos,by=list(month),mean)
photos_temp_month<-aggregate(photos_temp-photos,by=list(month),mean)
barplot(rbind(photos_temp_month$x,photos_spat_month$x),beside=T,ylab="Change in daylength (h)",names.arg = c("Jan","Feb","Mar","Apr","May","Jun"), ylim=c(-1.6,.4),main=c(paste("lat=",lat[i])))
abline(h=0, lwd=1)
if(i==1){legend("bottomleft",legend=c("Shifts in Space","Shifts in Time"),fill=c("white","darkgray"),bty="n")}
}
|
525131f7081ebcfc46df61a502179aed4d4fb191
|
32e0458f7a034d1bbc63b2e251ed485c8672fc53
|
/man/prunePed.Rd
|
48f3056f8ab4ac3cb9af95044e6acc18ce5eaddd
|
[] |
no_license
|
matthewwolak/nadiv
|
8ac285b4d5d5e1de558b3de9019db1c81bdd6bce
|
4d60f7c2a71149780c0cd33aee2b7735e8650619
|
refs/heads/master
| 2023-08-02T13:14:04.450579
| 2023-06-16T02:00:38
| 2023-06-16T02:00:38
| 33,896,065
| 16
| 7
| null | 2023-06-16T02:00:39
| 2015-04-13T21:52:53
|
R
|
UTF-8
|
R
| false
| true
| 2,646
|
rd
|
prunePed.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prunePed.R
\name{prunePed}
\alias{prunePed}
\alias{prunePed.default}
\alias{prunePed.numPed}
\title{Prunes a pedigree based on individuals with phenotypes}
\usage{
prunePed(pedigree, phenotyped, ...)
\method{prunePed}{default}(pedigree, phenotyped, ...)
\method{prunePed}{numPed}(pedigree, phenotyped, ...)
}
\arguments{
\item{pedigree}{An object, where the first 3 columns correspond to: ID, Dam,
& Sire. See details.}
\item{phenotyped}{A vector indicating which individuals in the pedigree have
phenotypic information available.}
\item{\dots}{Arguments to be passed to methods}
}
\value{
The pedigree object (can have more columns than just ID, Dam, and
Sire), where the ID column contains an ID for all individuals who are
actually phenotyped or are an ancestor to an individual with a phenotype
(and are thus informative for estimating parameters in the base
population).
}
\description{
This function removes individuals who are either not themselves or not
ancestors to phenotyped individuals
}
\details{
Often mixed effect models run much faster when extraneous information is
removed before running the model. This is particularly so when reducing the
number of random effects associated with a relationship matrix constructed
from a pedigree.
NOTE: more columns than just a pedigree can be passed in the \code{pedigree}
argument.
Missing parents (e.g., base population) should be denoted by either 'NA',
'0', or '*'.
This function is very similar to (and the code is heavily borrowed from) a
function of the same name in the \code{MCMCglmm} package by Jarrod Hadfield.
}
\examples{
# Make a pedigree (with sex) from the warcolak dataset
warcolak_ped <- warcolak[, 1:4]
# Reduce the number of individuals that have a phenotype for "trait1" in
#the warcolak dataset
t1phenotyped <- warcolak
t1phenotyped[sample(seq.int(nrow(warcolak)), 1500, replace = FALSE), "trait1"] <- NA
t1phenotyped <- t1phenotyped[which(!is.na(t1phenotyped$trait1)), ]
# The following will give a pedigree with only individuals that have a
# phenotype for "trait1" OR are an ancestor to a phenotyped individual.
pruned_warcolak_ped <- prunePed(warcolak_ped, phenotyped = t1phenotyped$ID)
# Now compare the sizes (note, pruned_warcolak_ped retained its column indicating sex.
# We could have kept all of the data associated with individuals who had phenotypic
# information on "trait1" by instead specifying
# prunePed(warcolak, phenotyped = t1phenotyped$ID)
dim(warcolak_ped)
dim(pruned_warcolak_ped)
}
\seealso{
\code{\link[nadiv]{prepPed}}
}
|
a31e8890f0f220074022dee98cda48af482f490a
|
507d088e311f38ac0e7d97251957d34205d0aafb
|
/train.R
|
7c8045c1a54919c023f3943ea0a6b5494e7f94d0
|
[
"MIT"
] |
permissive
|
mrecos/R_keras_Unet
|
81e18d24d1405db2a7113030b8579ce12e93912c
|
58818811a0f0ee9cdaaafe02a5ec985c932517d3
|
refs/heads/master
| 2021-10-09T03:23:26.241715
| 2018-12-20T15:51:20
| 2018-12-20T15:51:20
| 160,965,416
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,898
|
r
|
train.R
|
library("config")
library("magick")
library("abind")
#library("parallel")
#library("doParallel")
#library("foreach")
config <- config::get(config = "testing", file = "./R_Unet/R_Unet_config.yml")
source(file = "./R_Unet/R_Unet_functions.R")
source(file = "./R_Unet/R_Unet.R")
model <- get_unet_128(input_shape = c(config$IMAGE_SIZE, config$IMAGE_SIZE, config$N_CHANNELS),
num_classes = config$NUM_CLASSES)
# Create Interators ---------------------------------------------
train_infinite_iterator <- py_iterator(train_infinite_generator(image_path = config$TRAIN_IMG,
mask_path = config$TRAIN_MSK,
image_size = config$IMAGE_SIZE,
batch_size = config$BATCH_SIZE,
epochs = config$EPOCHS,
amt_train = config$AMT_TRAIN,
use_augmentation = config$USE_AUGMENTATION,
augment_args = config$AUGMENT_ARGS,
mode = "train",
create_coord_logs = FALSE))
val_infinite_iterator <- py_iterator(train_infinite_generator(image_path = config$VAL_IMG,
mask_path = config$VAL_MSK,
image_size = config$IMAGE_SIZE,
batch_size = config$BATCH_SIZE,
epochs = config$EPOCHS,
amt_train = config$AMT_VAL,
use_augmentation = FALSE,
mode = "validate",
create_coord_logs = FALSE))
predict_generator <- train_infinite_generator(image_path = config$VAL_IMG,
mask_path = config$VAL_MSK,
image_size = config$IMAGE_SIZE,
epochs = 1,
amt_train = config$AMT_PREDICT,
use_augmentation = FALSE,
batch_size = config$AMT_PREDICT,
mode = "predict",
create_coord_logs = FALSE)
# Training -----------------------------------------------------
# tensorboard("logs_r")
callbacks_list <- list(
callback_tensorboard("logs_r"),
callback_early_stopping(monitor = "val_python_function",
min_delta = 1e-4,
patience = 8,
verbose = 1,
mode = "max"),
callback_reduce_lr_on_plateau(monitor = "val_python_function",
factor = 0.1,
patience = 4,
verbose = 1,
#epsilon = 1e-4,
mode = "max"),
callback_model_checkpoint(filepath = "weights_r/unet128_{epoch:02d}.h5",
monitor = "val_python_function",
save_best_only = TRUE,
save_weights_only = TRUE,
mode = "max" ),
train_dice_coef_by_batch <- dice_coef_by_batch$new()
)
# fit with infinite generator
history <- model %>% fit_generator(
train_infinite_iterator,
steps_per_epoch = as.integer(config$AMT_TRAIN / config$BATCH_SIZE),
epochs = config$EPOCHS,
validation_data = val_infinite_iterator,
validation_steps = as.integer(config$AMT_VAL / config$BATCH_SIZE),
verbose = 1,
callbacks = callbacks_list,
workers = 0 # so that the precise number of images are generated
)
# write.csv(data.frame(dice_coef = train_dice_coef_by_batch$dice_coef),
# file = paste0("./logs_r/","dice_by_batch_",format(Sys.time(), "%M-%H_%d_%m_%Y"),".csv"),
# row.names = FALSE)
model %>% evaluate_generator(val_infinite_iterator, steps = 10,
workers = 0) # so that the precise number of images are generated
predict_batch <- as.matrix(predict_generator()[[1]])
preds <- model %>% predict(predict_batch, steps = 1)
plot_pred_tensor_overlay(preds, predict_batch, 1, alpha = 0.45, mask=FALSE)
#### TESTING AREA train_generator step through
### testing infinite generator
inf_test <- train_infinite_generator(image_path = config$TRAIN_IMG,
mask_path = config$TRAIN_MSK,
image_size = config$IMAGE_SIZE,
batch_size = config$BATCH_SIZE,
use_augmentation = config$USE_AUGMENTATION,
augment_args = config$AUGMENT_ARGS)
inf_batch <- inf_test()
plot_pred_tensor_overlay(as.matrix(inf_batch[[2]]), as.matrix(inf_batch[[1]]), indx = 1,
alpha = 0.45, mask=TRUE, use_legend = FALSE)
# or a more manual plot
image_test <- as.matrix(inf_batch[[1]])
plot(as.raster(image_test[1,,,]))
# rasterImage(as.raster(image_test[1,,,]),0,0,128,128)
mask_test <- as.matrix(inf_batch[[2]])
plot(as.raster(mask_test[1,,,]))
# rasterImage(as.raster(mask_test[1,,,]),0,0,128,128, alpha = 0.5)
###### END TESTING ###
|
ae2b3110a8151c3708241053cdc59e0f2d413acd
|
59e78bdb65a0e75bfdb25bf7a2db7d3cbd4f5785
|
/man/boral_coefs.Rd
|
6a075d6c02b5378386208c0075583ebeb4fd3df2
|
[] |
no_license
|
mjwestgate/boralis
|
15e7b7b23ae4f3796f93c39fb003df5259b0e4ba
|
a216cd43a3a0ff7e7da3b4d4ecf66c70e60e4cf0
|
refs/heads/master
| 2020-04-17T07:01:50.400845
| 2019-05-23T05:11:15
| 2019-05-23T05:11:15
| 166,350,510
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,696
|
rd
|
boral_coefs.Rd
|
\name{boral_coefs}
\alias{boral_coefs}
\title{Extract regression coefficients from a boral model}
\description{Extracts the data necessary for drawing a caterpillar plot of regression coefficients from a boral model. Intended as an internal function called by \code{\link{boral_coefsplot}}, but included here in for users interested in exploring the data themselves.}
\usage{
ggboral_coefs(x, covname, est)
}
\arguments{
\item{x}{A boral model}
\item{covname}{A covariate for which coefficients should be extracted. If missing or NULL, then values are extracted for all covariates in the model.}
\item{type}{What estimates should be plotted; either 'traits' or 'coefs'.}
\item{est}{A choice of either the posterior median (est = "median") or posterior mean (est = "mean"), which are then used as the point estimates in the lines. Default is posterior median.}
}
\details{
}
\value{
A data.frame of predictions from the boral model
}
\seealso{
\code{\link{boral_coefsplot}} for the related plotting function.
}
\examples{
# use example from boral:
library(mvabund) ## Load a dataset from the mvabund package
data(spider)
y <- spider$abun
X <- scale(spider$x)
## NOTE: The values below MUST NOT be used in a real application;
## they are only used here to make the examples run quick!!!
example_mcmc_control <- list(
n.burnin = 10,
n.iteration = 100,
n.thin = 1)
spiderfit_nb <- boral(
y,
X = X,
family = "negative.binomial",
lv.control = list(num.lv = 2),
row.eff = "fixed",
mcmc.control = example_mcmc_control
)
# generate a data.frame of predictions
df <- boral_coefs(spiderfit_nb, "bare.sand")
# ditto, but for all variables
df <- boral_coefs(spiderfit_nb)
}
|
b591773e9d70abe37773139aa663627101d7c428
|
2ade465c017ed359b649bc958d6d3c50dc37db0c
|
/Task_02/task02 (Autosaved).R
|
e50341e573341c9512e61052aa61ba847bed4909
|
[] |
no_license
|
CheyenneMarie07/Tasks
|
7286a9a85e89695188fbbbf01a534e3d58b42072
|
1a93e838dcd2af6970efd8e168c80f37ea8e885c
|
refs/heads/master
| 2023-04-14T18:31:59.309751
| 2021-04-30T13:57:45
| 2021-04-30T13:57:45
| 332,087,185
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,817
|
r
|
task02 (Autosaved).R
|
setwd('//Users//cheyenneyoung//Desktop//Evolution//Tasks//Task_02')
Data <- read.csv('http://jonsmitchell.com/data/beren.csv',stringsAsFactors=F)
write.csv(Data,"rawdata.csv",quote=F)
length(Data)
nrow(Data)
ncol(Data)
print(Data)
head(Data)
colnames(Data)
Data[1, ]
Data[2, ]
Data[1:3, ]
Data[1:3, 4]
Data[1:5, 1:3]
Data[257, 1:3]
Feeds<- which(Data[ ,9]=="bottle")
berenMilk <- Data[Feeds,]
head(berenMilk)
nrow(berenMilk)
print(berenMilk)
Feeds<-which(Data[,"event"]=="bottle")
berenMilk1<-Data[Feeds,]
Feeds<-which(Data$event=="bottle")
berenMilk2<-Data[Feeds,]
berenMilk == berenMilk1
berenMilk1 == berenMilk2
berenMilk == berenMilk2
dayID<-apply(Data,1,function(x) paste(x[1:3],collapse="-"))
dateID<-sapply(dayID,as.Date,format="%Y-%m-%d",origin="2019-04-18")
Data$age<-dateID-dateID[which(Data$event=="birth")]
head(Data)
beren2 <- Data
beren3 <- beren2[order(beren2$age),]
head(Data)
head(beren2)
head(beren3)
write.csv(beren3,"berennew.csv",quote=F,row.name=FALSE)
#cd ~/Desktop/Evolution/Tasks
#git add -A
#git commit -m "Cheyenne Young Task 02 upload a"
#git push -u origin master
# start on task02b here
#Hypothesis III is testable.
#The other two are not testable because there is not enough information to assume the two.
Feeds <- which(beren3$event == "bottle")
avgMilk <- mean(beren3$value[Feeds]
avgFeed <- tapply(beren3$value [Feeds], beren3$age[Feeds], mean)
varFeed <- tapply(beren3$value[Feeds], beren3$age[Feeds], var)
totalFeed <- tapply(beren3$value[Feeds], beren3$age[Feeds], sum)
numFeeds <- tapply(beren3$value[Feeds], beren3$age[Feeds], length)
cor(beren3$value[Feeds], beren3$age[Feeds])
cor.test(beren3$value[Feeds], beren3$age[Feeds])
berenCor <- cor.test(beren3$value[Feeds], beren3$age[Feeds])
summary(berenCor)
berenANOVA <- aov(beren3$value[Feeds] ~ beren3$caregiver[Feeds])
boxplot(beren3$value[Feeds] ~ beren3$caregiver[Feeds], xlab= "who gave the bottle", ylab = "amount of milk consumed (oz)")
?par
par(las=1, mar=c(5,5,1,1), mgp=c(2, 0.5, 0), tck=-0.01)
plot(as.numeric(names(totalFeed)), totalFeed, type="b", pch=16, xlab="age in days", ylab="ounces of milk")
abline(h=mean(totalFeed), lty=2, col='red')
pdf("r02b-totalMilkByDay.pdf", height = 4, width = 4)
par(las=1, mar=c(5,5,1,1), mgp=c(2, 0.5, 0), tck=-0.01)
plot(as.numeric(names(totalFeed)), totalFeed, type="b", pch=16, xlab="age in days", ylab="ounces of milk")
abline(h=mean(totalFeed), lty=2, col='red')
dev.off()
#The data on the graph is too close together to analyze the data points being graphed.
source("http://jonsmitchell.com/code/plotFxn02b.R")
#New testable hypothesis that there is a correlation between the amount of solid food Beren eats and the bowel movements that are made or that there is a correlation with the amount of bottles of milk Beren consumes and the naps he takes.
unique(beren3$event)
|
ccd788b1d632780a431091cf05965a7d8140c1ca
|
3e14540a1ad52f1a26b2a6102c6d6e478bf2065a
|
/map_test1.r
|
6c725103f6d1c2ac376abed7218e2f0358a7d22e
|
[] |
no_license
|
vedapragna/germany_map
|
d4aeae9693317f48e933115d22161c9d02ba680c
|
cf3afbd3ce4dd7d9d9566e3a1abc89a8c214dfa1
|
refs/heads/master
| 2020-12-02T15:24:32.974788
| 2018-02-04T17:38:09
| 2018-02-04T17:38:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,715
|
r
|
map_test1.r
|
rm(list=ls(all=TRUE))
library(choroplethr)
library(dplyr)
library(ggplot2)
library(rgdal)
library(maptools)
library(gpclib)
library(readr)
library(R6)
#setwd("~/achim/statistik_r/geo/plz-gebiete.shp/")
#sf <- readOGR(dsn = ".", layer = "plz-gebiete")
#ari lamstein example
setwd("~/Documents/entwickeln/statistik_r/mapping_r/geo/Supervisor Districts as of April 2012")
sf <- readOGR(dsn = ".", layer = "geo_export_ade8ccf1-8c02-440d-8163-2f9d6c79112e")
#data file
df <- read_csv("~/Documents/entwickeln/statistik_r/mapping_r/Noise_Reports.csv")
plot(sf)
class(sf)
head(sf)
gpclibPermit()
sf@data$id <- rownames(sf@data)
sf.point <- fortify(sf, region="id")
sf.df <- inner_join(sf.point,sf@data, by="id")
head(sf.df)
ggplot(sf.df, aes(long, lat, group=group )) + geom_polygon()
df = df %>%
select("Supervisor.District", Category) %>%
filter(Category == "Noise Report") %>%
group_by("Supervisor.District") %>%
summarise(n = n())
df2 <- select(df, `Supervisor District`, as.integer( df$Category)) %>%
filter( df$Category == "Noise Report") %>%
group_by(`Supervisor District`) %>%
summarise(n = n())
sf.df$region <- sf.df$supervisor
head(sf.df)
SFChoropleth <- R6Class("SFChoropleth",
inherit = choroplethr:::Choropleth,
public = list(
initialize = function(user.df) {
super$initialize(sf.df, user.df)
}
)
)
colnames(df2) = c("region", "value")
c <- SFChoropleth$new(df2)
c$set_num_colors(2)
c$render_with_reference_map()
c$render()
c$title = "abc"
c$legend = "dfg"
|
a6460cb0a38489649110a79ac55dab996de283e4
|
17224ed7e6b364814c3a0b3cd55ccb1d3000296d
|
/plot4.R
|
c406453dcc2d9b7794c72e00154002cb60568838
|
[] |
no_license
|
andycook/ExData_Plotting1
|
9c145582c78539701adf161d7950a031eb089013
|
3f186bdc37e63e0a04c3baf5612605ee03b791bf
|
refs/heads/master
| 2020-12-25T01:43:14.745523
| 2014-06-09T00:02:33
| 2014-06-09T00:02:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,182
|
r
|
plot4.R
|
library(data.table)
testData <- data.table(read.table("./data//household_power_consumption.txt", sep = ";", header = TRUE, na.strings="?"))
newTest <- testData[(as.Date(testData$Date, "%d/%m/%Y") == as.Date("01/02/2007", "%d/%m/%Y")) | (as.Date(testData$Date, "%d/%m/%Y") == as.Date("02/02/2007", "%d/%m/%Y")),]
png(filename = "plot4.png")
par(mfcol=c(2,2))
xVals <- strptime(paste(newTest$Date, newTest$Time, sep=" "),format='%d/%m/%Y %H:%M:%S')
yVals<-newTest$Global_active_power
plot(xVals, yVals, type="l", xlab="", ylab="Global Active Power", main="")
yVals1 <- newTest$Sub_metering_1
yVals2 <- newTest$Sub_metering_2
yVals3 <- newTest$Sub_metering_3
plot(xVals, yVals1, type="n", xlab="", ylab="Energy sub metering", main="")
lines(xVals, yVals1)
lines(xVals, yVals2, col="red")
lines(xVals, yVals3, col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty="n", inset = .0, col=c("black", "red", "blue"), lwd=1)
voltage <- newTest$Voltage
plot(xVals, voltage, type="l", xlab="datetime", ylab="Voltage", main="")
grp <- newTest$Global_reactive_power
plot(xVals, grp, type="l", xlab="datetime", ylab="Global_reactive_power", main="")
dev.off()
|
8fad727948203c018b7f68e205f03e4bb7c48271
|
7b2983670bb3f5594ff6dae4fa7228117a00afc2
|
/dara_structure_R.R
|
03e5e9b1df99ccc3011d8b9a94ed087230a79ed0
|
[] |
no_license
|
ee15sa/Intro_to_R
|
d3d8d90275d98ed259cc85ed3e5c0897de00b966
|
0fcb4d9b58b01a702d8fa29bea32a19e4f23bec8
|
refs/heads/master
| 2021-05-11T18:45:14.965363
| 2018-01-17T16:15:41
| 2018-01-17T16:15:41
| 117,838,737
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 862
|
r
|
dara_structure_R.R
|
# Author: Murat Okumah
# Date: 10 January 2018
# Purpose: Script to create and analyse data frames
cats <- read.csv(file = "data/feline_data.csv")
cats
#Address a particular column with $
cats$weight
cats$coat
#Add 2kg to each weight
cats$weight + 2
#Data types
typeof(cats$weight)
typeof(cats$coat)
class(cats)
#Vectors
my_vector <- vector(length = 3)
my_vector
my_vector <- vector(mode = "character", length = 3)
#Make a vector
combine_vector <- c(2,4,8)
combine_vector
combine_vector <- c("2", "4", "eight")
char_vector <- c("2", "4", "8")
num_vector <- as.numeric(char_vector)
newvector <- 1:26
newvector <- newvector*2
names(newvector) <- LETTERS
newvector
#Factors and levels
coats <- c('tabby', 'tortoiseshell', 'tortoiseshell', 'black', 'tabby')
coats
#structure
str(coats)
categories <- factor(coats)
class(categories)
|
1297fdb8051b7c1c4470d1d2c9405910ed7aa332
|
6783a205b16a9a0edc07d336974588401e8d041d
|
/man/hgch_bar_DatNum.Rd
|
588726ffe40780e545198741caba710a167f3991
|
[] |
no_license
|
isciolab/hgchmagic
|
91f0307a862700c7af9ff3bcedf314620815c54e
|
4c580a9c60a27610e1bc64dc107d43ec55867c00
|
refs/heads/master
| 2020-05-01T18:42:05.017783
| 2018-08-07T03:06:25
| 2018-08-07T03:06:25
| 177,629,911
| 0
| 1
| null | 2019-03-25T17:05:44
| 2019-03-25T17:05:43
| null |
UTF-8
|
R
| false
| true
| 888
|
rd
|
hgch_bar_DatNum.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bars.R
\name{hgch_bar_DatNum}
\alias{hgch_bar_DatNum}
\title{Bar (dates, numbers)}
\usage{
hgch_bar_DatNum(data, title = NULL, subtitle = NULL, caption = NULL,
horLabel = NULL, verLabel = NULL, horLine = NULL, horLineLabel = NULL,
verLine = NULL, verLineLabel = NULL, agg = "sum", orientation = "ver",
marks = c("", "."), nDigits = NULL, dropNa = FALSE,
colorHighlightValue = "#F9B233", percentage = FALSE, format = c("", ""),
highlightValue = NULL, order = NULL, sort = "no", sliceN = NULL,
tooltip = list(headerFormat = NULL, pointFormat = NULL), export = FALSE,
thema = tma())
}
\arguments{
\item{data}{A data.frame}
}
\value{
Highcharts visualization
}
\description{
Compare quantities over dates
}
\section{ctypes}{
Dat-Num
}
\examples{
hgch_bar_DatNum(sampleData("Dat-Num", nrow = 10))
}
|
43e26d56054dfcae98464d9880eafc78a0ef1ed5
|
1a62b73c8d330398dd74478d1490c07ae7e2f29a
|
/cachematrix.R
|
adf296233aa4290f255883e2503d16fb2187300e
|
[] |
no_license
|
pnovosad/ProgrammingAssignment2
|
329021a17e9d08dab912b157d1891ba7729adcba
|
b079bcbb28971c1ff143ff6dfdffe3bab7002bab
|
refs/heads/master
| 2021-04-12T03:16:32.534476
| 2018-03-17T20:20:39
| 2018-03-17T20:20:39
| 125,661,593
| 0
| 0
| null | 2018-03-17T19:17:42
| 2018-03-17T19:17:41
| null |
UTF-8
|
R
| false
| false
| 1,970
|
r
|
cachematrix.R
|
## PA2 week 3 by pn, 20180317
## <- : single arrow assignment operator works at the current level
## <<-: double arrow assignment operator can modify variables in parent levels
## a closure is a function written by/in another function
## makeCacheMatrix(): creates a special matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
## @x: a square invertible matrix
## return: a list containing functions to
## 1. set the matrix
## 2. get the matrix
## 3. set the inverse
## 4. get the inverse
## this list is used as the input to cacheSolve()
mInverse = NULL
set = function(y) {
# use `<<-` to assign a value to an object in an environment different from the current environment.
x <<- y
mInverse <<- NULL
}
get = function() x
setInverse = function(inverse) mInverse <<- inverse
getInverse = function() mInverse
list(set=set, get=get, setInverse=setInverse, getInverse=getInverse)
}
## cacheSolve(): computes the inverse of the matrix returned by makeCacheMatrix().
## if the inverse has already been calculated and the matrix has not changed, it will
## retrieve the inverse from the cache directly
cacheSolve <- function(x, ...) {
## @x: output of makeCacheMatrix()
## return: inverse of the original matrix input to makeCacheMatrix()
mInverse = x$getInverse()
# if the inverse matrix exists then return the cached matrix
if (!is.null(mInverse)){
message("returning cached matrix data ...")
return(mInverse)
}
# otherwise, calculates the inverse
matrix.data = x$get()
mInverse = solve(matrix.data, ...)
# sets the value of the inverse matrix in the cache by using the setInverse function.
x$setInverse(mInverse)
return(mInverse)
}
|
6a66176de2914371f9d3e9d2d15b7dc9679fad98
|
d422bbcd3a9ca6fcdb8dd1994a550ff988d1990e
|
/Segmentation and Discriminant Analysis -Car Seats Data.R
|
63d5570437aec69d96992770153107e5534152d7
|
[] |
no_license
|
Drooz/HW4-BigData
|
8416864d85a65695a2e48630e47c4396f0130bda
|
c9f2d9c2a9b9fa4b7038aec6948132c26c592f3e
|
refs/heads/master
| 2020-04-29T09:26:08.452684
| 2019-03-16T21:08:35
| 2019-03-16T21:08:35
| 176,024,932
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,818
|
r
|
Segmentation and Discriminant Analysis -Car Seats Data.R
|
#################################################################
## Marketing analytics ##
## Exercise in Segmentation AND Discriminant Analysis ##
## Carseats Data ##
#################################################################
# free memory
rm(list = ls())
gc()
#The tree library is used to construct classification trees.
library (tree)
library (ISLR)
library(randomForest)
library(caret)
library(e1071)
data <-read.csv("C:\\Users\\Dr.ooz\\Downloads\\Churn_Modelling.csv",header=T)
#Observe a few examples
head(data)
#Remove Some Attributes
finaldata = subset(data, select = -c(1,2,3) )
#Convert tot dummy
results <- fastDummies::dummy_cols(finaldata,select_columns = "Gender" ,remove_first_dummy = TRUE)
df = subset(results, select = -c(Gender) )
# Convert categorical variables to numeric
must_convert<-sapply(df,is.factor)
Geography<-sapply(df[,must_convert],unclass)
f1<-cbind(df[,!must_convert],Geography)
f1$Exited <- factor(f1$Exited)
x <- subset(f1, select = -c(Exited))
y <- subset(f1, select = c(Exited))
# Cross vali
#10 folds repeat 3 times in order to slows down our process.
control <- trainControl(method='repeatedcv',
number=10,
repeats=3)
#Metric compare model is Accuracy
metric <- "Accuracy"
set.seed(123)
#Number randomely variable selected is mtry
mtry <- sqrt(ncol(x))
tunegrid <- expand.grid(.mtry=mtry)
model <- train(Exited~.,
data=f1,
method='rf',
metric='Accuracy',
tuneGrid=tunegrid,
trControl=control)
# Summarise Results
print(model)
|
55966b8aad94d2c3501342bec8e33e674cc061e8
|
184d33fbe6d0ab73a260d0db9d3849df00d33786
|
/tm.plugin.alceste/R/readAlceste.R
|
2c2d161f373280444512aca2135450233ecbf47c
|
[] |
no_license
|
nalimilan/R.TeMiS
|
65660d9fbe4c8ca7253aeba5571eab4445736c99
|
3a8398038595807790087c36375bb26417ca606a
|
refs/heads/master
| 2023-04-30T18:04:49.721122
| 2023-04-25T19:45:04
| 2023-04-25T19:45:04
| 81,315,737
| 25
| 7
| null | 2020-06-29T21:45:06
| 2017-02-08T10:07:16
|
C
|
UTF-8
|
R
| false
| false
| 1,024
|
r
|
readAlceste.R
|
readAlceste <- FunctionGenerator(function(elem, language, id) {
function(elem, language, id) {
id2 <- regmatches(elem$content[1], regexec("^([[:digit:]]+) \\*", elem$content[1]))[[1]][2]
# Only override default ID if present
if(!is.na(id2))
id <- id2
starred <- sub("^(\\*\\*\\*\\* +|[[:digit:]]+ \\*)", "", elem$content[1])
varexpr <- gsub(" ", "", strsplit(starred, "*", starred, fixed=TRUE)[[1]], fixed=TRUE)
vars <- strsplit(varexpr[nchar(varexpr) > 0], "_", fixed=TRUE)
# Theme lines, ignored
skip <- which(grepl("^-\\*", elem$content))
doc <- PlainTextDocument(x = elem$content[-c(1, skip)],
id = id,
language = language)
for(v in vars) {
# Boolean variable (without value after _)
if(is.na(v[2]))
meta(doc, v[1]) <- TRUE
else
meta(doc, v[1]) <- v[2]
}
doc
}
})
|
ee5e3bfbbce596a043ff00b47dc878118bd72d43
|
2935d597895945d2a32b6701f75e918405533a57
|
/DMC1/snakemake_ChIPseq/mapped/both/peaks/PeakRanger1.18/ranger/p0.001_q0.01/genomewide/peak_read_counts/RPKM/DMC1_Rep1_ChIP_peaks_ranLoc_read_counts_log2ChIPcontrol_RPKM.R
|
785e9b52aa76f24e90964bd187efe36cae1f29c0
|
[] |
no_license
|
ajtock/wheat
|
7e39a25664cb05436991e7e5b652cf3a1a1bc751
|
b062ec7de68121b45aaf8db6ea483edf4f5f4e44
|
refs/heads/master
| 2022-05-04T01:06:48.281070
| 2022-04-06T11:23:17
| 2022-04-06T11:23:17
| 162,912,621
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 35,423
|
r
|
DMC1_Rep1_ChIP_peaks_ranLoc_read_counts_log2ChIPcontrol_RPKM.R
|
#!/applications/R/R-3.5.0/bin/Rscript
# Get read counts for each peak and ranLoc (with equivalent width distribution to peaks)
# and plot summaries
# Usage:
# ./DMC1_Rep1_ChIP_peaks_ranLoc_read_counts_log2ChIPcontrol_RPKM.R DMC1_Rep1_ChIP DMC1 MNase_Rep1 MNase 'chr3B'
#libName <- "DMC1_Rep1_ChIP"
#dirName <- "DMC1"
#covlibName <- "MNase_Rep1"
#covdirName <- "MNase"
#chrs <- unlist(strsplit("chr3B",
# split = ","))
args <- commandArgs(trailingOnly = T)
libName <- args[1]
dirName <- args[2]
covlibName <- args[3]
covdirName <- args[4]
chrs <- unlist(strsplit(args[5],
split = ","))
library(GenomicAlignments)
library(ShortRead)
library(rtracklayer)
library(regioneR)
inDir <- paste0("/home/ajt200/analysis/wheat/", dirName,
"/snakemake_ChIPseq/mapped/both/peaks/PeakRanger1.18/ranger/p0.001_q0.01/")
R1R3 <- unlist(strsplit(c("Agenome_distal,Bgenome_distal,Dgenome_distal"),
split = ","))
R2C <- unlist(strsplit(c("Agenome_interstitial,Bgenome_interstitial,Dgenome_interstitial,Agenome_proximal,Bgenome_proximal,Dgenome_proximal"),
split = ","))
# Load R1R3peaks
R1R3peaks <- lapply(seq_along(R1R3), function(x) {
read.table(paste0(inDir,
libName,
"_rangerPeaksGRmergedOverlaps_minuslog10_p0.001_q0.01_noMinWidth_in_",
R1R3[x], ".gff"),
header = F, stringsAsFactors = F)
})
# Concatenate if R1R3peaks is a list of R1R3peak sets
if(length(R1R3peaks) > 1) {
R1R3peaks <- do.call(rbind, R1R3peaks)
} else {
R1R3peaks <- R1R3peaks[[1]]
}
# Convert R1R3peaks into GRanges
R1R3peaksGR <- GRanges(seqnames = R1R3peaks[,1],
ranges = IRanges(start = R1R3peaks[,4],
end = R1R3peaks[,5]),
strand = "*")
# Load R2Cpeaks
R2Cpeaks <- lapply(seq_along(R2C), function(x) {
read.table(paste0(inDir,
libName,
"_rangerPeaksGRmergedOverlaps_minuslog10_p0.001_q0.01_noMinWidth_in_",
R2C[x], ".gff"),
header = F, stringsAsFactors = F)
})
# Concatenate if R2Cpeaks is a list of R2Cpeak sets
if(length(R2Cpeaks) > 1) {
R2Cpeaks <- do.call(rbind, R2Cpeaks)
} else {
R2Cpeaks <- R2Cpeaks[[1]]
}
# Convert R2Cpeaks into GRanges
R2CpeaksGR <- GRanges(seqnames = R2Cpeaks[,1],
ranges = IRanges(start = R2Cpeaks[,4],
end = R2Cpeaks[,5]),
strand = "*")
# Subset peaks to specified chromosome(s)
if(chrs != "allchrs") {
R1R3peaksGR <- R1R3peaksGR[seqnames(R1R3peaksGR) %in% chrs]
R2CpeaksGR <- R2CpeaksGR[seqnames(R2CpeaksGR) %in% chrs]
}
print(paste0(libName, " R1R3peaks width median ="))
print(median(width(R1R3peaksGR)))
print(paste0(libName, " R1R3peaks width range ="))
print(range(width(R1R3peaksGR)))
print(paste0(libName, " R2Cpeaks width median ="))
print(median(width(R2CpeaksGR)))
print(paste0(libName, " R2Cpeaks width range ="))
print(range(width(R2CpeaksGR)))
# Load R1R3ranLoc
R1R3ranLoc <- lapply(seq_along(R1R3), function(x) {
read.table(paste0(inDir,
libName,
"_rangerPeaksGRmergedOverlaps_minuslog10_p0.001_q0.01_noMinWidth_in_",
R1R3[x], "_randomLoci.gff"),
header = F, stringsAsFactors = F)
})
# Concatenate if R1R3ranLoc is a list of R1R3peak sets
if(length(R1R3ranLoc) > 1) {
R1R3ranLoc <- do.call(rbind, R1R3ranLoc)
} else {
R1R3ranLoc <- R1R3ranLoc[[1]]
}
# Convert R1R3ranLoc into GRanges
R1R3ranLocGR <- GRanges(seqnames = R1R3ranLoc[,1],
ranges = IRanges(start = R1R3ranLoc[,4],
end = R1R3ranLoc[,5]),
strand = "*")
# Load R2CranLoc
R2CranLoc <- lapply(seq_along(R2C), function(x) {
read.table(paste0(inDir,
libName,
"_rangerPeaksGRmergedOverlaps_minuslog10_p0.001_q0.01_noMinWidth_in_",
R2C[x], "_randomLoci.gff"),
header = F, stringsAsFactors = F)
})
# Concatenate if R2CranLoc is a list of R2Cpeak sets
if(length(R2CranLoc) > 1) {
R2CranLoc <- do.call(rbind, R2CranLoc)
} else {
R2CranLoc <- R2CranLoc[[1]]
}
# Convert R2CranLoc into GRanges
R2CranLocGR <- GRanges(seqnames = R2CranLoc[,1],
ranges = IRanges(start = R2CranLoc[,4],
end = R2CranLoc[,5]),
strand = "*")
# Subset ranLoc to specified chromosome(s)
if(chrs != "allchrs") {
R1R3ranLocGR <- R1R3ranLocGR[seqnames(R1R3ranLocGR) %in% chrs]
R2CranLocGR <- R2CranLocGR[seqnames(R2CranLocGR) %in% chrs]
}
print(paste0("Random loci for ", libName, " R1R3peaks width median ="))
print(median(width(R1R3ranLocGR)))
print(paste0("Random loci for ", libName, " R1R3peaks width range ="))
print(range(width(R1R3ranLocGR)))
print(paste0("Random loci for ", libName, " R2Cpeaks width median ="))
print(median(width(R2CranLocGR)))
print(paste0("Random loci for ", libName, " R2Cpeaks width range ="))
print(range(width(R2CranLocGR)))
# Load ChIP BAM file
reads <- readGAlignmentPairs(paste0("/home/ajt200/analysis/wheat/", covdirName,
"/snakemake_ChIPseq/mapped/both/", covlibName,
"_MappedOn_wheat_v1.0_lowXM_both_sort.bam"))
reads <- reads[seqnames(reads) != "chrUn"]
save(reads,
file = paste0(covlibName, "_readGAlignmentPairs.RData"))
#load(paste0(covlibName, "_readGAlignmentPairs.RData"))
ChIP_reads <- reads
rm(reads); gc()
ChIP_lib_size <- length(ChIP_reads)
# Calculate "per million" scaling factor
ChIP_RPM_scaling_factor <- ChIP_lib_size/1e6
if(chrs != "allchrs") {
ChIP_reads <- ChIP_reads[seqnames(ChIP_reads) %in% chrs]
}
# Calculate RPM and RPKM for each R1R3peak and R1R3ranLoc
R1R3peak_ChIP_reads <- countOverlaps(query = R1R3peaksGR,
subject = ChIP_reads)
R1R3peak_ChIP_RPM <- R1R3peak_ChIP_reads/ChIP_RPM_scaling_factor
R1R3peak_ChIP_RPKM <- R1R3peak_ChIP_RPM/(width(R1R3peaksGR)/1e+03)
R1R3peak_ChIP_RPMplus1 <- R1R3peak_ChIP_RPM+1
R1R3peak_ChIP_RPKMplus1 <- R1R3peak_ChIP_RPKM+1
R1R3ranLoc_ChIP_reads <- countOverlaps(query = R1R3ranLocGR,
subject = ChIP_reads)
R1R3ranLoc_ChIP_RPM <- R1R3ranLoc_ChIP_reads/ChIP_RPM_scaling_factor
R1R3ranLoc_ChIP_RPKM <- R1R3ranLoc_ChIP_RPM/(width(R1R3ranLocGR)/1e+03)
R1R3ranLoc_ChIP_RPMplus1 <- R1R3ranLoc_ChIP_RPM+1
R1R3ranLoc_ChIP_RPKMplus1 <- R1R3ranLoc_ChIP_RPKM+1
# Calculate RPM and RPKM for each R2Cpeak and R2CranLoc
R2Cpeak_ChIP_reads <- countOverlaps(query = R2CpeaksGR,
subject = ChIP_reads)
R2Cpeak_ChIP_RPM <- R2Cpeak_ChIP_reads/ChIP_RPM_scaling_factor
R2Cpeak_ChIP_RPKM <- R2Cpeak_ChIP_RPM/(width(R2CpeaksGR)/1e+03)
R2Cpeak_ChIP_RPMplus1 <- R2Cpeak_ChIP_RPM+1
R2Cpeak_ChIP_RPKMplus1 <- R2Cpeak_ChIP_RPKM+1
R2CranLoc_ChIP_reads <- countOverlaps(query = R2CranLocGR,
subject = ChIP_reads)
R2CranLoc_ChIP_RPM <- R2CranLoc_ChIP_reads/ChIP_RPM_scaling_factor
R2CranLoc_ChIP_RPKM <- R2CranLoc_ChIP_RPM/(width(R2CranLocGR)/1e+03)
R2CranLoc_ChIP_RPMplus1 <- R2CranLoc_ChIP_RPM+1
R2CranLoc_ChIP_RPKMplus1 <- R2CranLoc_ChIP_RPKM+1
# Load input BAM file
#input_reads <- readGAlignmentPairs(paste0("/home/ajt200/analysis/wheat/epigenomics_shoot_leaf_IWGSC_2018_Science/input/snakemake_ChIPseq/mapped/both/",
# "H3_input_SRR6350669_MappedOn_wheat_v1.0_lowXM_both_sort.bam"))
#input_reads <- input_reads[seqnames(input_reads) != "chrUn"]
#save(input_reads,
# file = paste0("H3_input_SRR6350669_readGAlignmentPairs.RData"))
load(paste0("H3_input_SRR6350669_readGAlignmentPairs.RData"))
input_lib_size <- length(input_reads)
# Calculate "per million" scaling factor
input_RPM_scaling_factor <- input_lib_size/1e6
if(chrs != "allchrs") {
input_reads <- input_reads[seqnames(input_reads) %in% chrs]
}
# Calculate RPM and RPKM for each R1R3peak and R1R3ranLoc
R1R3peak_input_reads <- countOverlaps(query = R1R3peaksGR,
subject = input_reads)
R1R3peak_input_RPM <- R1R3peak_input_reads/input_RPM_scaling_factor
R1R3peak_input_RPKM <- R1R3peak_input_RPM/(width(R1R3peaksGR)/1e+03)
R1R3peak_input_RPMplus1 <- R1R3peak_input_RPM+1
R1R3peak_input_RPKMplus1 <- R1R3peak_input_RPKM+1
R1R3ranLoc_input_reads <- countOverlaps(query = R1R3ranLocGR,
subject = input_reads)
R1R3ranLoc_input_RPM <- R1R3ranLoc_input_reads/input_RPM_scaling_factor
R1R3ranLoc_input_RPKM <- R1R3ranLoc_input_RPM/(width(R1R3ranLocGR)/1e+03)
R1R3ranLoc_input_RPMplus1 <- R1R3ranLoc_input_RPM+1
R1R3ranLoc_input_RPKMplus1 <- R1R3ranLoc_input_RPKM+1
# Calculate RPM and RPKM for each R2Cpeak and R2CranLoc
R2Cpeak_input_reads <- countOverlaps(query = R2CpeaksGR,
subject = input_reads)
R2Cpeak_input_RPM <- R2Cpeak_input_reads/input_RPM_scaling_factor
R2Cpeak_input_RPKM <- R2Cpeak_input_RPM/(width(R2CpeaksGR)/1e+03)
R2Cpeak_input_RPMplus1 <- R2Cpeak_input_RPM+1
R2Cpeak_input_RPKMplus1 <- R2Cpeak_input_RPKM+1
R2CranLoc_input_reads <- countOverlaps(query = R2CranLocGR,
subject = input_reads)
R2CranLoc_input_RPM <- R2CranLoc_input_reads/input_RPM_scaling_factor
R2CranLoc_input_RPKM <- R2CranLoc_input_RPM/(width(R2CranLocGR)/1e+03)
R2CranLoc_input_RPMplus1 <- R2CranLoc_input_RPM+1
R2CranLoc_input_RPKMplus1 <- R2CranLoc_input_RPKM+1
# Calculate log2(ChIP/input) coverage for R1R3peaks and R1R3ranLoc
log2_R1R3peak_ChIP_input_RPKMplus1 <- log2(R1R3peak_ChIP_RPKMplus1/R1R3peak_input_RPKMplus1)
log2_R1R3peak_ChIP_input_RPMplus1 <- log2(R1R3peak_ChIP_RPMplus1/R1R3peak_input_RPMplus1)
log2_R1R3ranLoc_ChIP_input_RPKMplus1 <- log2(R1R3ranLoc_ChIP_RPKMplus1/R1R3ranLoc_input_RPKMplus1)
log2_R1R3ranLoc_ChIP_input_RPMplus1 <- log2(R1R3ranLoc_ChIP_RPMplus1/R1R3ranLoc_input_RPMplus1)
# Calculate log2(ChIP/input) coverage for R2Cpeaks and R2CranLoc
log2_R2Cpeak_ChIP_input_RPKMplus1 <- log2(R2Cpeak_ChIP_RPKMplus1/R2Cpeak_input_RPKMplus1)
log2_R2Cpeak_ChIP_input_RPMplus1 <- log2(R2Cpeak_ChIP_RPMplus1/R2Cpeak_input_RPMplus1)
log2_R2CranLoc_ChIP_input_RPKMplus1 <- log2(R2CranLoc_ChIP_RPKMplus1/R2CranLoc_input_RPKMplus1)
log2_R2CranLoc_ChIP_input_RPMplus1 <- log2(R2CranLoc_ChIP_RPMplus1/R2CranLoc_input_RPMplus1)
# Define function for making colours transparent (for peak width histograms)
makeTransparent <- function(thisColour, alpha = 180)
{
newColour <- col2rgb(thisColour)
apply(newColour, 2, function(x) {
rgb(red = x[1], green = x[2], blue = x[3],
alpha = alpha, maxColorValue = 255)
})
}
# Calculate peak heat vs width correlation coefficients
# Standardise P-values to a sample size of 100 (q-values) as proposed by
# Good (1982) Standardized tail-area probabilities. Journal of Computation and Simulation 16: 65-66
# and summarised by Woolley (2003):
# https://stats.stackexchange.com/questions/22233/how-to-choose-significance-level-for-a-large-data-set
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.518.5341&rep=rep1&type=pdf
# Woolley (2003): "Clearly, the meaningfulness of the p-value diminishes as the sample size increases";
# Anne Z. (2012, Pearson eCollege, Denver): "In the real world, there are unlikely to be semi-partial correlations
# that are exactly zero, which is the null hypothesis in testing significance of a regression coefficient."
# Formally, the standardised p-value is defined as:
# q = min(0.5, p * sqrt( (n/100) ))
# Woolley (2003): "The value of 0.5 is somewhat arbitrary, though its purpose is to avoid q-values of greater than 1."
R1R3peak_L2FC_RPKMplus1_r <- round(cor.test(x = log2_R1R3peak_ChIP_input_RPKMplus1, y = width(R1R3peaksGR), method = "spearman")$estimate[[1]], digits = 2)
R1R3peak_L2FC_RPKMplus1_p <- round(min(0.5, cor.test(x = log2_R1R3peak_ChIP_input_RPKMplus1, y = width(R1R3peaksGR), method = "spearman")$p.value * sqrt( (length(R1R3peaksGR)/100) )), digits = 4)
R2Cpeak_L2FC_RPKMplus1_r <- round(cor.test(x = log2_R2Cpeak_ChIP_input_RPKMplus1, y = width(R2CpeaksGR), method = "spearman")$estimate[[1]], digits = 2)
R2Cpeak_L2FC_RPKMplus1_p <- round(min(0.5, cor.test(x = log2_R2Cpeak_ChIP_input_RPKMplus1, y = width(R2CpeaksGR), method = "spearman")$p.value * sqrt( (length(R2CpeaksGR)/100) )), digits = 4)
R1R3ranLoc_L2FC_RPKMplus1_r <- round(cor.test(x = log2_R1R3ranLoc_ChIP_input_RPKMplus1, y = width(R1R3ranLocGR), method = "spearman")$estimate[[1]], digits = 2)
R1R3ranLoc_L2FC_RPKMplus1_p <- round(min(0.5, cor.test(x = log2_R1R3ranLoc_ChIP_input_RPKMplus1, y = width(R1R3ranLocGR), method = "spearman")$p.value * sqrt( (length(R1R3ranLocGR)/100) )), digits = 4)
R2CranLoc_L2FC_RPKMplus1_r <- round(cor.test(x = log2_R2CranLoc_ChIP_input_RPKMplus1, y = width(R2CranLocGR), method = "spearman")$estimate[[1]], digits = 2)
R2CranLoc_L2FC_RPKMplus1_p <- round(min(0.5, cor.test(x = log2_R2CranLoc_ChIP_input_RPKMplus1, y = width(R2CranLocGR), method = "spearman")$p.value * sqrt( (length(R2CranLocGR)/100) )), digits = 4)
R1R3peak_L2FC_RPMplus1_r <- round(cor.test(x = log2_R1R3peak_ChIP_input_RPMplus1, y = width(R1R3peaksGR), method = "spearman")$estimate[[1]], digits = 2)
R1R3peak_L2FC_RPMplus1_p <- round(min(0.5, cor.test(x = log2_R1R3peak_ChIP_input_RPMplus1, y = width(R1R3peaksGR), method = "spearman")$p.value * sqrt( (length(R1R3peaksGR)/100) )), digits = 4)
R2Cpeak_L2FC_RPMplus1_r <- round(cor.test(x = log2_R2Cpeak_ChIP_input_RPMplus1, y = width(R2CpeaksGR), method = "spearman")$estimate[[1]], digits = 2)
R2Cpeak_L2FC_RPMplus1_p <- round(min(0.5, cor.test(x = log2_R2Cpeak_ChIP_input_RPMplus1, y = width(R2CpeaksGR), method = "spearman")$p.value * sqrt( (length(R2CpeaksGR)/100) )), digits = 4)
R1R3ranLoc_L2FC_RPMplus1_r <- round(cor.test(x = log2_R1R3ranLoc_ChIP_input_RPMplus1, y = width(R1R3ranLocGR), method = "spearman")$estimate[[1]], digits = 2)
R1R3ranLoc_L2FC_RPMplus1_p <- round(min(0.5, cor.test(x = log2_R1R3ranLoc_ChIP_input_RPMplus1, y = width(R1R3ranLocGR), method = "spearman")$p.value * sqrt( (length(R1R3ranLocGR)/100) )), digits = 4)
R2CranLoc_L2FC_RPMplus1_r <- round(cor.test(x = log2_R2CranLoc_ChIP_input_RPMplus1, y = width(R2CranLocGR), method = "spearman")$estimate[[1]], digits = 2)
R2CranLoc_L2FC_RPMplus1_p <- round(min(0.5, cor.test(x = log2_R2CranLoc_ChIP_input_RPMplus1, y = width(R2CranLocGR), method = "spearman")$p.value * sqrt( (length(R2CranLocGR)/100) )), digits = 4)
R1R3peak_ChIP_RPKMplus1_r <- round(cor.test(x = R1R3peak_ChIP_RPKMplus1, y = width(R1R3peaksGR), method = "spearman")$estimate[[1]], digits = 2)
R1R3peak_ChIP_RPKMplus1_p <- round(min(0.5, cor.test(x = R1R3peak_ChIP_RPKMplus1, y = width(R1R3peaksGR), method = "spearman")$p.value * sqrt( (length(R1R3peaksGR)/100) )), digits = 4)
R2Cpeak_ChIP_RPKMplus1_r <- round(cor.test(x = R2Cpeak_ChIP_RPKMplus1, y = width(R2CpeaksGR), method = "spearman")$estimate[[1]], digits = 2)
R2Cpeak_ChIP_RPKMplus1_p <- round(min(0.5, cor.test(x = R2Cpeak_ChIP_RPKMplus1, y = width(R2CpeaksGR), method = "spearman")$p.value * sqrt( (length(R2CpeaksGR)/100) )), digits = 4)
R1R3ranLoc_ChIP_RPKMplus1_r <- round(cor.test(x = R1R3ranLoc_ChIP_RPKMplus1, y = width(R1R3ranLocGR), method = "spearman")$estimate[[1]], digits = 2)
R1R3ranLoc_ChIP_RPKMplus1_p <- round(min(0.5, cor.test(x = R1R3ranLoc_ChIP_RPKMplus1, y = width(R1R3ranLocGR), method = "spearman")$p.value * sqrt( (length(R1R3ranLocGR)/100) )), digits = 4)
R2CranLoc_ChIP_RPKMplus1_r <- round(cor.test(x = R2CranLoc_ChIP_RPKMplus1, y = width(R2CranLocGR), method = "spearman")$estimate[[1]], digits = 2)
R2CranLoc_ChIP_RPKMplus1_p <- round(min(0.5, cor.test(x = R2CranLoc_ChIP_RPKMplus1, y = width(R2CranLocGR), method = "spearman")$p.value * sqrt( (length(R2CranLocGR)/100) )), digits = 4)
R1R3peak_ChIP_RPMplus1_r <- round(cor.test(x = R1R3peak_ChIP_RPMplus1, y = width(R1R3peaksGR), method = "spearman")$estimate[[1]], digits = 2)
R1R3peak_ChIP_RPMplus1_p <- round(min(0.5, cor.test(x = R1R3peak_ChIP_RPMplus1, y = width(R1R3peaksGR), method = "spearman")$p.value * sqrt( (length(R1R3peaksGR)/100) )), digits = 4)
R2Cpeak_ChIP_RPMplus1_r <- round(cor.test(x = R2Cpeak_ChIP_RPMplus1, y = width(R2CpeaksGR), method = "spearman")$estimate[[1]], digits = 2)
R2Cpeak_ChIP_RPMplus1_p <- round(min(0.5, cor.test(x = R2Cpeak_ChIP_RPMplus1, y = width(R2CpeaksGR), method = "spearman")$p.value * sqrt( (length(R2CpeaksGR)/100) )), digits = 4)
R1R3ranLoc_ChIP_RPMplus1_r <- round(cor.test(x = R1R3ranLoc_ChIP_RPMplus1, y = width(R1R3ranLocGR), method = "spearman")$estimate[[1]], digits = 2)
R1R3ranLoc_ChIP_RPMplus1_p <- round(min(0.5, cor.test(x = R1R3ranLoc_ChIP_RPMplus1, y = width(R1R3ranLocGR), method = "spearman")$p.value * sqrt( (length(R1R3ranLocGR)/100) )), digits = 4)
R2CranLoc_ChIP_RPMplus1_r <- round(cor.test(x = R2CranLoc_ChIP_RPMplus1, y = width(R2CranLocGR), method = "spearman")$estimate[[1]], digits = 2)
R2CranLoc_ChIP_RPMplus1_p <- round(min(0.5, cor.test(x = R2CranLoc_ChIP_RPMplus1, y = width(R2CranLocGR), method = "spearman")$p.value * sqrt( (length(R2CranLocGR)/100) )), digits = 4)
# Plot peak width histogram, and RPKM+1 or RPM+1 vs loci width and cumulative fraction of loci (peaks and random)
pdf(file = paste0(libName,
"_peak_width_hist_and_", covlibName, "_log2ChIPinput_RPKMplus1_vs_ecdf_and_locus_width_",
chrs, ".pdf"),
height = 16, width = 12)
par(mfrow = c(4, 3), mar = c(6, 6, 2, 2), mgp = c(3, 1, 0))
# log2(ChIP/input) RPKM
minBreak <- min(c(width(R1R3peaksGR), width(R2CpeaksGR))) - 0.001
maxBreak <- max(c(width(R1R3peaksGR), width(R2CpeaksGR)))
vecBreak <- pretty(minBreak:maxBreak, n = 1000)
histR1R3 <- hist(width(R1R3peaksGR), breaks = vecBreak, plot = F)
histR2C <- hist(width(R2CpeaksGR), breaks = vecBreak, plot = F)
plot(histR2C, col = makeTransparent("red4"), border = NA, lwd = 2,
xlab = "Peak width (bp)", ylab = "Peaks", main = "", xlim = c(0, 1500),
cex.lab = 2, cex.axis = 2)
plot(add = T, histR1R3, col = makeTransparent("red"), border = NA, lwd = 2, xlim = c(0, 1500))
abline(v = c(median(width(R2CpeaksGR)), median(width(R1R3peaksGR))), col = c("black", "grey50"), lty = 2, lwd = 2)
legend("right",
legend = c(paste0(length(R1R3peaksGR), " R1 & R3 peaks median = ", round(median(width(R1R3peaksGR))), " bp"),
paste0(length(R2CpeaksGR), " R2a-R2b peaks median = ", round(median(width(R2CpeaksGR))), " bp")),
col = "white",
text.col = c("grey50", "black"),
ncol = 1, cex = 1, lwd = 2, bty = "n")
plot(ecdf(log2_R2CranLoc_ChIP_input_RPKMplus1), xlim = c(-1, 1.5), xaxt = "n", yaxt = "n", pch = ".",
xlab = "", ylab = "", main = "", col = "navy")
axis(side = 1, lwd.tick = 2, cex.axis = 2)
axis(side = 2, at = seq(0, 1, by = 0.25), labels = c("0", "", "0.5", "", "1"), lwd.tick = 2, cex.axis = 2)
par(new = T)
plot(ecdf(log2_R1R3ranLoc_ChIP_input_RPKMplus1), xlim = c(-1, 1.5),xaxt = "n", yaxt = "n", pch = ".",
xlab = "", ylab = "", main = "", col = "dodgerblue")
par(new = T)
plot(ecdf(log2_R2Cpeak_ChIP_input_RPKMplus1), xlim = c(-1, 1.5), xaxt = "n", yaxt = "n", pch = ".",
xlab = "", ylab = "", main = "", col = "red4")
par(new = T)
plot(ecdf(log2_R1R3peak_ChIP_input_RPKMplus1), xlim = c(-1, 1.5), xaxt = "n", yaxt = "n", pch = ".",
xlab = expression("Log"[2]*"(ChIP/control) RPKM"),
ylab = "Cumulative fraction of loci",
main = "", cex.lab = 2, col = "red")
legend("right",
legend = c("R1 & R3 peaks", "R2a-R2b peaks",
"R1 & R3 random loci", "R2a-R2b random loci"),
col = "white",
text.col = c("red", "red4",
"dodgerblue", "navy"),
ncol = 1, cex = 1, lwd = 2, bty = "n")
box(lwd = 2)
plot(x = log2_R2CranLoc_ChIP_input_RPKMplus1, y = width(R2CranLocGR), pch = ".", log = "y",
xlim = c(-1, 1.5), ylim = c(10, 10000), xaxt = "n", yaxt = "n",
xlab = "", ylab = "", col = makeTransparent("navy"))
axis(side = 1, lwd.tick = 2, cex.axis = 2)
axis(side = 2, at = c(seq(10, 90, by = 10), seq(100, 900, by = 100), seq(1000, 10000, by = 1000)), labels = c("10", rep("", 8), expression("10"^"2"), rep("", 8), expression("10"^"3"), rep("", 8), expression("10"^"4")), lwd.tick = 2, cex.axis = 2)
par(new = T)
plot(x = log2_R1R3ranLoc_ChIP_input_RPKMplus1, y = width(R1R3ranLocGR), pch = ".", log = "y",
xlim = c(-1, 1.5), ylim = c(10, 10000), xaxt = "n", yaxt = "n",
xlab = "", ylab = "", col = makeTransparent("dodgerblue"))
par(new = T)
plot(x = log2_R2Cpeak_ChIP_input_RPKMplus1, y = width(R2CpeaksGR), pch = ".", log = "y",
xlim = c(-1, 1.5), ylim = c(10, 10000), xaxt = "n", yaxt = "n",
xlab = "", ylab = "", col = makeTransparent("red4"))
par(new = T)
plot(x = log2_R1R3peak_ChIP_input_RPKMplus1, y = width(R1R3peaksGR), pch = ".", log = "y",
xlim = c(-1, 1.5), ylim = c(10, 10000), xaxt = "n", yaxt = "n",
xlab = expression("Log"[2]*"(ChIP/control) RPKM"),
ylab = "Locus width (bp)",
cex.lab = 2, col = makeTransparent("red"))
abline(h = c(median(width(R2CpeaksGR)), median(width(R1R3peaksGR))), col = c("black", "grey50"), lty = 2, lwd = 2)
legend("bottomright",
legend = c(as.expression(bquote("R1 & R3 peaks" ~ italic("r"[s]) ~ "=" ~ .(R1R3peak_L2FC_RPKMplus1_r) * ";" ~ italic("P =") ~ .(R1R3peak_L2FC_RPKMplus1_p))),
bquote("R2a-R2b peaks" ~ italic("r"[s]) ~ "=" ~ .(R2Cpeak_L2FC_RPKMplus1_r) * ";" ~ italic("P =") ~ .(R2Cpeak_L2FC_RPKMplus1_p)),
bquote("R1 & R3 random loci" ~ italic("r"[s]) ~ "=" ~ .(R1R3ranLoc_L2FC_RPKMplus1_r) * ";" ~ italic("P =") ~ .(R1R3ranLoc_L2FC_RPKMplus1_p)),
bquote("R2a-R2b random loci" ~ italic("r"[s]) ~ "=" ~ .(R2CranLoc_L2FC_RPKMplus1_r) * ";" ~ italic("P =") ~ .(R2CranLoc_L2FC_RPKMplus1_p))),
col = "white",
text.col = c("red", "red4",
"dodgerblue", "navy"),
ncol = 1, cex = 1, lwd = 2, bty = "n")
box(lwd = 2)
# log2(ChIP/input) RPM
minBreak <- min(c(width(R1R3peaksGR), width(R2CpeaksGR))) - 0.001
maxBreak <- max(c(width(R1R3peaksGR), width(R2CpeaksGR)))
vecBreak <- pretty(minBreak:maxBreak, n = 1000)
histR1R3 <- hist(width(R1R3peaksGR), breaks = vecBreak, plot = F)
histR2C <- hist(width(R2CpeaksGR), breaks = vecBreak, plot = F)
plot(histR2C, col = makeTransparent("red4"), border = NA, lwd = 2,
xlab = "Peak width (bp)", ylab = "Peaks", main = "", xlim = c(0, 1500),
cex.lab = 2, cex.axis = 2)
plot(add = T, histR1R3, col = makeTransparent("red"), border = NA, lwd = 2, xlim = c(0, 1500))
abline(v = c(median(width(R2CpeaksGR)), median(width(R1R3peaksGR))), col = c("black", "grey50"), lty = 2, lwd = 2)
legend("right",
legend = c(paste0(length(R1R3peaksGR), " R1 & R3 peaks median = ", round(median(width(R1R3peaksGR))), " bp"),
paste0(length(R2CpeaksGR), " R2a-R2b peaks median = ", round(median(width(R2CpeaksGR))), " bp")),
col = "white",
text.col = c("grey50", "black"),
ncol = 1, cex = 1, lwd = 2, bty = "n")
plot(ecdf(log2_R2CranLoc_ChIP_input_RPMplus1), xlim = c(-0.5,0.5), xaxt = "n", yaxt = "n", pch = ".",
xlab = "", ylab = "", main = "", col = "navy")
axis(side = 1, lwd.tick = 2, cex.axis = 2)
axis(side = 2, at = seq(0, 1, by = 0.25), labels = c("0", "", "0.5", "", "1"), lwd.tick = 2, cex.axis = 2)
par(new = T)
plot(ecdf(log2_R1R3ranLoc_ChIP_input_RPMplus1), xlim = c(-0.5,0.5),xaxt = "n", yaxt = "n", pch = ".",
xlab = "", ylab = "", main = "", col = "dodgerblue")
par(new = T)
plot(ecdf(log2_R2Cpeak_ChIP_input_RPMplus1), xlim = c(-0.5,0.5), xaxt = "n", yaxt = "n", pch = ".",
xlab = "", ylab = "", main = "", col = "red4")
par(new = T)
plot(ecdf(log2_R1R3peak_ChIP_input_RPMplus1), xlim = c(-0.5,0.5), xaxt = "n", yaxt = "n", pch = ".",
xlab = expression("Log"[2]*"(ChIP/control) RPM"),
ylab = "Cumulative fraction of loci",
main = "", cex.lab = 2, col = "red")
legend("right",
legend = c("R1 & R3 peaks", "R2a-R2b peaks",
"R1 & R3 random loci", "R2a-R2b random loci"),
col = "white",
text.col = c("red", "red4",
"dodgerblue", "navy"),
ncol = 1, cex = 1, lwd = 2, bty = "n")
box(lwd = 2)
plot(x = log2_R2CranLoc_ChIP_input_RPMplus1, y = width(R2CranLocGR), pch = ".", log = "y",
xlim = c(-0.5,0.5), ylim = c(10, 10000), xaxt = "n", yaxt = "n",
xlab = "", ylab = "", col = makeTransparent("navy"))
axis(side = 1, lwd.tick = 2, cex.axis = 2)
axis(side = 2, at = c(seq(10, 90, by = 10), seq(100, 900, by = 100), seq(1000, 10000, by = 1000)), labels = c("10", rep("", 8), expression("10"^"2"), rep("", 8), expression("10"^"3"), rep("", 8), expression("10"^"4")), lwd.tick = 2, cex.axis = 2)
par(new = T)
plot(x = log2_R1R3ranLoc_ChIP_input_RPMplus1, y = width(R1R3ranLocGR), pch = ".", log = "y",
xlim = c(-0.5,0.5), ylim = c(10, 10000), xaxt = "n", yaxt = "n",
xlab = "", ylab = "", col = makeTransparent("dodgerblue"))
par(new = T)
plot(x = log2_R2Cpeak_ChIP_input_RPMplus1, y = width(R2CpeaksGR), pch = ".", log = "y",
xlim = c(-0.5,0.5), ylim = c(10, 10000), xaxt = "n", yaxt = "n",
xlab = "", ylab = "", col = makeTransparent("red4"))
par(new = T)
plot(x = log2_R1R3peak_ChIP_input_RPMplus1, y = width(R1R3peaksGR), pch = ".", log = "y",
xlim = c(-0.5,0.5), ylim = c(10, 10000), xaxt = "n", yaxt = "n",
xlab = expression("Log"[2]*"(ChIP/control) RPM"),
ylab = "Locus width (bp)",
cex.lab = 2, col = makeTransparent("red"))
abline(h = c(median(width(R2CpeaksGR)), median(width(R1R3peaksGR))), col = c("black", "grey50"), lty = 2, lwd = 2)
legend("bottomright",
legend = c(as.expression(bquote("R1 & R3 peaks" ~ italic("r"[s]) ~ "=" ~ .(R1R3peak_L2FC_RPMplus1_r) * ";" ~ italic("P =") ~ .(R1R3peak_L2FC_RPMplus1_p))),
bquote("R2a-R2b peaks" ~ italic("r"[s]) ~ "=" ~ .(R2Cpeak_L2FC_RPMplus1_r) * ";" ~ italic("P =") ~ .(R2Cpeak_L2FC_RPMplus1_p)),
bquote("R1 & R3 random loci" ~ italic("r"[s]) ~ "=" ~ .(R1R3ranLoc_L2FC_RPMplus1_r) * ";" ~ italic("P =") ~ .(R1R3ranLoc_L2FC_RPMplus1_p)),
bquote("R2a-R2b random loci" ~ italic("r"[s]) ~ "=" ~ .(R2CranLoc_L2FC_RPMplus1_r) * ";" ~ italic("P =") ~ .(R2CranLoc_L2FC_RPMplus1_p))),
col = "white",
text.col = c("red", "red4",
"dodgerblue", "navy"),
ncol = 1, cex = 1, lwd = 2, bty = "n")
box(lwd = 2)
# ChIP RPKM+1
minBreak <- min(c(width(R1R3peaksGR), width(R2CpeaksGR))) - 0.001
maxBreak <- max(c(width(R1R3peaksGR), width(R2CpeaksGR)))
vecBreak <- pretty(minBreak:maxBreak, n = 1000)
histR1R3 <- hist(width(R1R3peaksGR), breaks = vecBreak, plot = F)
histR2C <- hist(width(R2CpeaksGR), breaks = vecBreak, plot = F)
plot(histR2C, col = makeTransparent("red4"), border = NA, lwd = 2,
xlab = "Peak width (bp)", ylab = "Peaks", main = "", xlim = c(0, 1500),
cex.lab = 2, cex.axis = 2)
plot(add = T, histR1R3, col = makeTransparent("red"), border = NA, lwd = 2, xlim = c(0, 1500))
abline(v = c(median(width(R2CpeaksGR)), median(width(R1R3peaksGR))), col = c("black", "grey50"), lty = 2, lwd = 2)
legend("right",
legend = c(paste0(length(R1R3peaksGR), " R1 & R3 peaks median = ", round(median(width(R1R3peaksGR))), " bp"),
paste0(length(R2CpeaksGR), " R2a-R2b peaks median = ", round(median(width(R2CpeaksGR))), " bp")),
col = "white",
text.col = c("grey50", "black"),
ncol = 1, cex = 1, lwd = 2, bty = "n")
plot(ecdf(R2CranLoc_ChIP_RPKMplus1), log = "x", xlim = c(1, 4), xaxt = "n", yaxt = "n", pch = ".",
xlab = "", ylab = "", main = "", col = "navy")
axis(side = 1, at = c(1:4), labels = c(1:4), lwd.tick = 2, cex.axis = 2)
axis(side = 2, at = seq(0, 1, by = 0.25), labels = c("0", "", "0.5", "", "1"), lwd.tick = 2, cex.axis = 2)
par(new = T)
plot(ecdf(R1R3ranLoc_ChIP_RPKMplus1), log = "x", xlim = c(1, 4),xaxt = "n", yaxt = "n", pch = ".",
xlab = "", ylab = "", main = "", col = "dodgerblue")
par(new = T)
plot(ecdf(R2Cpeak_ChIP_RPKMplus1), log = "x", xlim = c(1, 4), xaxt = "n", yaxt = "n", pch = ".",
xlab = "", ylab = "", main = "", col = "red4")
par(new = T)
plot(ecdf(R1R3peak_ChIP_RPKMplus1), log = "x", xlim = c(1, 4), xaxt = "n", yaxt = "n", pch = ".",
xlab = "Coverage RPKM+1",
ylab = "Cumulative fraction of loci",
main = "", cex.lab = 2, col = "red")
legend("right",
legend = c("R1 & R3 peaks", "R2a-R2b peaks",
"R1 & R3 random loci", "R2a-R2b random loci"),
col = "white",
text.col = c("red", "red4",
"dodgerblue", "navy"),
ncol = 1, cex = 1, lwd = 2, bty = "n")
box(lwd = 2)
plot(x = R2CranLoc_ChIP_RPKMplus1, y = width(R2CranLocGR), pch = ".", log = "xy",
xlim = c(1, 4), ylim = c(10, 10000), xaxt = "n", yaxt = "n",
xlab = "", ylab = "", col = makeTransparent("navy"))
axis(side = 1, at = c(1:4), labels = c(1:4), lwd.tick = 2, cex.axis = 2)
axis(side = 2, at = c(seq(10, 90, by = 10), seq(100, 900, by = 100), seq(1000, 10000, by = 1000)), labels = c("10", rep("", 8), expression("10"^"2"), rep("", 8), expression("10"^"3"), rep("", 8), expression("10"^"4")), lwd.tick = 2, cex.axis = 2)
par(new = T)
plot(x = R1R3ranLoc_ChIP_RPKMplus1, y = width(R1R3ranLocGR), pch = ".", log = "xy",
xlim = c(1, 4), ylim = c(10, 10000), xaxt = "n", yaxt = "n",
xlab = "", ylab = "", col = makeTransparent("dodgerblue"))
par(new = T)
plot(x = R2Cpeak_ChIP_RPKMplus1, y = width(R2CpeaksGR), pch = ".", log = "xy",
xlim = c(1, 4), ylim = c(10, 10000), xaxt = "n", yaxt = "n",
xlab = "", ylab = "", col = makeTransparent("red4"))
par(new = T)
plot(x = R1R3peak_ChIP_RPKMplus1, y = width(R1R3peaksGR), pch = ".", log = "xy",
xlim = c(1, 4), ylim = c(10, 10000), xaxt = "n", yaxt = "n",
xlab = "Coverage RPKM+1",
ylab = "Locus width (bp)",
cex.lab = 2, col = makeTransparent("red"))
abline(h = c(median(width(R2CpeaksGR)), median(width(R1R3peaksGR))), col = c("black", "grey50"), lty = 2, lwd = 2)
legend("bottomright",
legend = c(as.expression(bquote("R1 & R3 peaks" ~ italic("r"[s]) ~ "=" ~ .(R1R3peak_ChIP_RPKMplus1_r) * ";" ~ italic("P =") ~ .(R1R3peak_ChIP_RPKMplus1_p))),
bquote("R2a-R2b peaks" ~ italic("r"[s]) ~ "=" ~ .(R2Cpeak_ChIP_RPKMplus1_r) * ";" ~ italic("P =") ~ .(R2Cpeak_ChIP_RPKMplus1_p)),
bquote("R1 & R3 random loci" ~ italic("r"[s]) ~ "=" ~ .(R1R3ranLoc_ChIP_RPKMplus1_r) * ";" ~ italic("P =") ~ .(R1R3ranLoc_ChIP_RPKMplus1_p)),
bquote("R2a-R2b random loci" ~ italic("r"[s]) ~ "=" ~ .(R2CranLoc_ChIP_RPKMplus1_r) * ";" ~ italic("P =") ~ .(R2CranLoc_ChIP_RPKMplus1_p))),
col = "white",
text.col = c("red", "red4",
"dodgerblue", "navy"),
ncol = 1, cex = 1, lwd = 2, bty = "n")
box(lwd = 2)
# ChIP RPM+1
minBreak <- min(c(width(R1R3peaksGR), width(R2CpeaksGR))) - 0.001
maxBreak <- max(c(width(R1R3peaksGR), width(R2CpeaksGR)))
vecBreak <- pretty(minBreak:maxBreak, n = 1000)
histR1R3 <- hist(width(R1R3peaksGR), breaks = vecBreak, plot = F)
histR2C <- hist(width(R2CpeaksGR), breaks = vecBreak, plot = F)
plot(histR2C, col = makeTransparent("red4"), border = NA, lwd = 2,
xlab = "Peak width (bp)", ylab = "Peaks", main = "", xlim = c(0, 1500),
cex.lab = 2, cex.axis = 2)
plot(add = T, histR1R3, col = makeTransparent("red"), border = NA, lwd = 2, xlim = c(0, 1500))
abline(v = c(median(width(R2CpeaksGR)), median(width(R1R3peaksGR))), col = c("black", "grey50"), lty = 2, lwd = 2)
legend("right",
legend = c(paste0(length(R1R3peaksGR), " R1 & R3 peaks median = ", round(median(width(R1R3peaksGR))), " bp"),
paste0(length(R2CpeaksGR), " R2a-R2b peaks median = ", round(median(width(R2CpeaksGR))), " bp")),
col = "white",
text.col = c("grey50", "black"),
ncol = 1, cex = 1, lwd = 2, bty = "n")
plot(ecdf(R2CranLoc_ChIP_RPMplus1), log = "x", xlim = c(1, 4), xaxt = "n", yaxt = "n", pch = ".",
xlab = "", ylab = "", main = "", col = "navy")
axis(side = 1, at = c(1:4), labels = c(1:4), lwd.tick = 2, cex.axis = 2)
axis(side = 2, at = seq(0, 1, by = 0.25), labels = c("0", "", "0.5", "", "1"), lwd.tick = 2, cex.axis = 2)
par(new = T)
plot(ecdf(R1R3ranLoc_ChIP_RPMplus1), log = "x", xlim = c(1, 4),xaxt = "n", yaxt = "n", pch = ".",
xlab = "", ylab = "", main = "", col = "dodgerblue")
par(new = T)
plot(ecdf(R2Cpeak_ChIP_RPMplus1), log = "x", xlim = c(1, 4), xaxt = "n", yaxt = "n", pch = ".",
xlab = "", ylab = "", main = "", col = "red4")
par(new = T)
plot(ecdf(R1R3peak_ChIP_RPMplus1), log = "x", xlim = c(1, 4), xaxt = "n", yaxt = "n", pch = ".",
xlab = "Coverage RPM+1",
ylab = "Cumulative fraction of loci",
main = "", cex.lab = 2, col = "red")
legend("right",
legend = c("R1 & R3 peaks", "R2a-R2b peaks",
"R1 & R3 random loci", "R2a-R2b random loci"),
col = "white",
text.col = c("red", "red4",
"dodgerblue", "navy"),
ncol = 1, cex = 1, lwd = 2, bty = "n")
box(lwd = 2)
plot(x = R2CranLoc_ChIP_RPMplus1, y = width(R2CranLocGR), pch = ".", log = "xy",
xlim = c(1, 4), ylim = c(10, 10000), xaxt = "n", yaxt = "n",
xlab = "", ylab = "", col = makeTransparent("navy"))
axis(side = 1, at = c(1:4), labels = c(1:4), lwd.tick = 2, cex.axis = 2)
axis(side = 2, at = c(seq(10, 90, by = 10), seq(100, 900, by = 100), seq(1000, 10000, by = 1000)), labels = c("10", rep("", 8), expression("10"^"2"), rep("", 8), expression("10"^"3"), rep("", 8), expression("10"^"4")), lwd.tick = 2, cex.axis = 2)
par(new = T)
plot(x = R1R3ranLoc_ChIP_RPMplus1, y = width(R1R3ranLocGR), pch = ".", log = "xy",
xlim = c(1, 4), ylim = c(10, 10000), xaxt = "n", yaxt = "n",
xlab = "", ylab = "", col = makeTransparent("dodgerblue"))
par(new = T)
plot(x = R2Cpeak_ChIP_RPMplus1, y = width(R2CpeaksGR), pch = ".", log = "xy",
xlim = c(1, 4), ylim = c(10, 10000), xaxt = "n", yaxt = "n",
xlab = "", ylab = "", col = makeTransparent("red4"))
par(new = T)
plot(x = R1R3peak_ChIP_RPMplus1, y = width(R1R3peaksGR), pch = ".", log = "xy",
xlim = c(1, 4), ylim = c(10, 10000), xaxt = "n", yaxt = "n",
xlab = "Coverage RPM+1",
ylab = "Locus width (bp)",
cex.lab = 2, col = makeTransparent("red"))
abline(h = c(median(width(R2CpeaksGR)), median(width(R1R3peaksGR))), col = c("black", "grey50"), lty = 2, lwd = 2)
legend("bottomright",
legend = c(as.expression(bquote("R1 & R3 peaks" ~ italic("r"[s]) ~ "=" ~ .(R1R3peak_ChIP_RPMplus1_r) * ";" ~ italic("P =") ~ .(R1R3peak_ChIP_RPMplus1_p))),
bquote("R2a-R2b peaks" ~ italic("r"[s]) ~ "=" ~ .(R2Cpeak_ChIP_RPMplus1_r) * ";" ~ italic("P =") ~ .(R2Cpeak_ChIP_RPMplus1_p)),
bquote("R1 & R3 random loci" ~ italic("r"[s]) ~ "=" ~ .(R1R3ranLoc_ChIP_RPMplus1_r) * ";" ~ italic("P =") ~ .(R1R3ranLoc_ChIP_RPMplus1_p)),
bquote("R2a-R2b random loci" ~ italic("r"[s]) ~ "=" ~ .(R2CranLoc_ChIP_RPMplus1_r) * ";" ~ italic("P =") ~ .(R2CranLoc_ChIP_RPMplus1_p))),
col = "white",
text.col = c("red", "red4",
"dodgerblue", "navy"),
ncol = 1, cex = 1, lwd = 2, bty = "n")
box(lwd = 2)
dev.off()
|
a5390a731cf523ee2a5a916c311308cf717e474d
|
1a87d39148d5b6957e8fbb41a75cd726d85d69af
|
/man/dissimM.Rd
|
89e1b86f3a4919689842544bd4dba41cd410c2f8
|
[] |
no_license
|
mknoll/dataAnalysisMisc
|
61f218f42ba03bc3905416068ea72be1de839004
|
1c720c8e35ae18ca03aca15ff1a9485e920e8832
|
refs/heads/master
| 2023-01-12T16:49:39.807006
| 2022-12-22T10:21:41
| 2022-12-22T10:21:41
| 91,482,748
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 218
|
rd
|
dissimM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CNVprofiler.R
\name{dissimM}
\alias{dissimM}
\title{Compute dissim matrix}
\usage{
dissimM(dataChr)
}
\description{
Compute dissim matrix
}
|
1fb98bf59f801e824a26e8e8ddd480a0dd5725f8
|
c8cc82425c6d676ae62f09dc9b3107f0ceeaa854
|
/R/ReadNC.R
|
8d1f742f2d9d28ef5e8118bbd7d34c6ad968a00a
|
[] |
no_license
|
APCC21/rSDQDM
|
ceff58d08041c22d30ca6defe68bab64313ca7fc
|
ef1394e6b1ac32f48add5c1233b844e8c4c4e573
|
refs/heads/master
| 2021-05-13T20:34:42.707319
| 2018-01-15T04:30:15
| 2018-01-15T04:30:15
| 116,915,534
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,090
|
r
|
ReadNC.R
|
ReadNC<- function (Folder,NCfileName,Num_days_Cal,Variable,yr_bg,yr_ed,End_Year_ref,...) {
# Reading netCDF files
# Each GCM has different lengths of historical runs and projections.
# According to file names, this function combines all netCDF files into one and extract the reference and projection periods
# Folder: Directory which netCDF files exist
# NCfileName: the name of netCDF file
# Num_days_Cal: Number of calendar days within the period of netCDF file
# Variable: "pr", "tmin","tmax"
# yr_bg,yr_ed,End_Year_ref: beginning, ending years, and ending year of reference period
# Folder <- Folder
# NCfileName <- filelist[i]
# yr_bg <- yy_bg
# yr_ed <- yy_ed
# End_Year_ref <- End_Year
########################
filename<-NCfileName
splitfilenms <- unlist(strsplit(filename,"_"))[3]
NC_path<-paste(Folder,"/",filename,sep="")
nc_raw_data<-nc_open(NC_path)
lon <- ncvar_get(nc_raw_data,"lon")
lat <- ncvar_get(nc_raw_data,"lat")
nlon<-as.numeric(length(lon))
nlat<-as.numeric(length(lat))
nc_Var <- ncvar_get(nc_raw_data,Variable)
Num_days_NC<-as.numeric(length(nc_Var[1,1,]))
nc_close(nc_raw_data)
if(Num_days_NC==Num_days_Cal) {
nc_Var_filling<-nc_Var
} else {
nc_Var_filling=array(0,dim=c(nlon,nlat,Num_days_Cal))
norder_GCM=0
norder_fill=0
Nleap_Yr=0
if(yr_ed>End_Year_ref){
yr_ed_NC<-End_Year_ref
} else {
yr_ed_NC<- yr_ed
}
for (iYear in yr_bg:yr_ed_NC) {
if(!leap.year(iYear)){
if(splitfilenms=="bcc-csm1-1" & iYear==yr_ed_NC){
norder_start<-norder_GCM+1
norder_end<-norder_GCM+364
norder_start_fill<-norder_fill+1
norder_end_fill<-norder_fill+364
nc_Var_filling[,,norder_start_fill:norder_end_fill]<-nc_Var[,,norder_start:norder_end]
for (ilon in 1:nlon) {
for (ilat in 1:nlat){
x<-c(1,3)
y<-c(nc_Var[ilon,ilat,norder_end],nc_Var[ilon,ilat,norder_end])
nc_Var_filling[ilon,ilat,norder_end_fill+1]<-approx(x,y,2)$y
}
}
} else {
norder_start<-norder_GCM+1
norder_end<-norder_GCM+365
norder_start_fill<-norder_fill+1
norder_end_fill<-norder_fill+365
nc_Var_filling[,,norder_start_fill:norder_end_fill]<-nc_Var[,,norder_start:norder_end]
}
} else if (leap.year(iYear)){
Nleap_Yr=Nleap_Yr+1
norder_start<-norder_GCM+1
norder_end<-norder_GCM+59
norder_start_fill<-norder_fill+1
norder_end_fill<-norder_fill+59
nc_Var_filling[,,norder_start_fill:norder_end_fill]<-nc_Var[,,norder_start:norder_end]
for (ilon in 1:nlon) {
for (ilat in 1:nlat){
x<-c(1,3)
y<-c(nc_Var[ilon,ilat,norder_end],nc_Var[ilon,ilat,norder_end+1])
nc_Var_filling[ilon,ilat,norder_end_fill+1]<-approx(x,y,2)$y
}
}
if(splitfilenms=="bcc-csm1-1" & iYear==yr_ed_NC){
norder_start<-norder_end+1
norder_end<-norder_end+305
norder_start_fill<-norder_end_fill+1+1
norder_end_fill<-norder_end_fill+305+1
nc_Var_filling[,,norder_start_fill:norder_end_fill]<-nc_Var[,,norder_start:norder_end]
for (ilon in 1:nlon) {
for (ilat in 1:nlat){
x<-c(1,3)
y<-c(nc_Var[ilon,ilat,norder_end],nc_Var[ilon,ilat,norder_end])
nc_Var_filling[ilon,ilat,norder_end_fill+1]<-approx(x,y,2)$y
}
}
} else {
norder_start<-norder_end+1
norder_end<-norder_end+306
norder_start_fill<-norder_end_fill+1+1
norder_end_fill<-norder_end_fill+306+1
nc_Var_filling[,,norder_start_fill:norder_end_fill]<-nc_Var[,,norder_start:norder_end]
}
}
norder_GCM<-norder_end
norder_fill<-norder_end_fill
}
}
return(list(nc_Var_filling=nc_Var_filling,lon=lon,lat=lat))
}
|
accd60ffe53056fda15f36ff42d490be4bc84c35
|
71d45d4d5cc85b985e355327a49fd9708b9f6a46
|
/plot1.R
|
6ab5aeea1a0b8f2e2f2f52bf4ec2aeb9e778b1e3
|
[] |
no_license
|
ahmedsamouka/Peer-graded-Assignment-Course-Project-2
|
8565e7c3bff085f1f759d5ab00604a3360aa6185
|
3f88cd4a08af5acf1c943414d941e458f065d8ae
|
refs/heads/master
| 2020-04-26T07:11:52.843834
| 2019-03-02T01:01:24
| 2019-03-02T01:01:24
| 173,387,306
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 338
|
r
|
plot1.R
|
> NEI <- readRDS("exdata_data_NEI_data/summarySCC_PM25.rds")
> SCC <- readRDS("exdata_data_NEI_data/Source_Classification_Code.rds")
> emissionsyear <- tapply(NEI$Emissions, NEI$year)
> emissionsyear <- tapply(NEI$Emissions, NEI$year, sum)
> barplot(emissionsyear, xlab = "year", ylab = "total emission", main = "total emission per year")
|
11bf51a1aa4dcb2b79b76808d50f9baad3d7b8fb
|
1ee3625bc622a90c92617d2bb2711abff8e8c74f
|
/man/snip.Rd
|
9c1582800c308cc282f185ec3a0f4f46e69ebab7
|
[] |
no_license
|
darrellpenta/APAstyler
|
d54a49cd328261b448db5afa0dabee4c0d4612c2
|
c895a13f0473d76efc15bd42d202c245fe36a021
|
refs/heads/master
| 2021-01-22T19:08:28.415784
| 2017-10-07T13:55:11
| 2017-10-07T13:55:11
| 85,164,023
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,093
|
rd
|
snip.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/snip-function.R
\name{snip}
\alias{snip}
\title{Remove leading zero and/or trailing zero from a statistic}
\usage{
snip(x, lead = NULL, trail = 3)
}
\arguments{
\item{x}{A numeric value, coercible to a character.}
\item{lead}{Optional number of zeros to remove from the left of the decimal. Default is \code{NULL}.
See \strong{Leading Zero} for details.}
\item{trail}{Position for evaluating whether a trailing zero should be dropped? Defaults to \code{3}.
See \strong{Trailing Zero} for details.}
}
\value{
\code{x} as a string with leading and/or trailing zero removed.
}
\description{
Use \code{snip()} if you need to trim a leading zero (left of decimal) or trailing zero (right of decimal) from a value (esp. for reporting stats in APA format).
}
\section{Leading zero}{
For values that cannot exceed 1, (e.g., \emph{p} values), \strong{\href{http://blog.apastyle.org/apastyle/statistics/}{APA}} recommends removing the zero to the left of the decimal.
For example \emph{p} = 0.023 should be reported as \emph{p} = .023. In this case, \code{snip(0.023), lead = 1} would work.
If, for some reason, you need to snip more than one zero, you can specify the number.
Only exact matches (e.g., 1 or 2 "0"s, set by \code{lead}) for \code{x < 1} will be snipped; otherwise, the value to the left of the decimal is left unchanged.
}
\section{Trailing zero}{
The \strong{\href{http://blog.apastyle.org/apastyle/statistics/}{APA}} recommends reporting statistics with decimal fractions rounded to two or three places (in most circumstances).
When \code{trail = 3}, the default behavior is to assume that a minimium of 2 places after the decimal should be preserved under all conditions. This basically sets the position being evaluated \strong{= 3}, and assumes the APA-recommended floor threshold of .001: If there is a zero in this position, the zero is snipped.
See examples and \code{\link{APA_p}}.
}
\examples{
# Drop leading zero
cat(snip("0.22",1))
# Drop trailing zero
cat(snip("0.220"))
}
|
4e337295814e524e2645ad0f96ea39c35c11d87d
|
c6fc456fad22537821549275fcc7f492c23cf8a6
|
/Exemplo Clusterização Hierarquica.R
|
bb10e97b969da3033e1b9354cd9d458d614608af
|
[] |
no_license
|
dougvancan/IA-Modelos
|
c16422ffb5ab457f845f90240ffa3921f8c04dc8
|
fb5dce6417909ffd7f50759271f0aebf934f6c5d
|
refs/heads/master
| 2020-05-16T18:56:28.663134
| 2019-04-24T14:24:20
| 2019-04-24T14:24:20
| 183,244,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 341
|
r
|
Exemplo Clusterização Hierarquica.R
|
#Normaliza os dados do Dataset Iris
irissc = scale(iris[, 1:4])
labels = iris[, 5]
#Calcula a distânca Euclidiana entre todas as amotras
d = dist(irissc)
#Realiza a clusterização Hierarquica por meio da média
output_cluster<-hclust(d,method='average')
#Plota o Dendograma gerado
plot(output_cluster,labels=labels,ylab='distancia')
|
174621cffbbf0bf1feb1a0354e690264109caaea
|
ada00eed3e808d05bf2a609dee4d14da8af8003a
|
/spammow/src/data.r
|
88d7b98144360441a484dd0a606fe0548f70a4a0
|
[] |
no_license
|
felidadae/_archive
|
69bf239adea9c6b0471d85c983036e9440c25a01
|
7a32029d99944a1025d881472076b16e504bded7
|
refs/heads/master
| 2021-01-11T06:18:48.259714
| 2018-03-28T14:02:01
| 2018-03-28T14:02:01
| 70,054,282
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,551
|
r
|
data.r
|
#------------------------------------------------------------------
library(CORElearn)
library(tm) # Text mining: Corpus and Document Term Matrix
library(class) # KNN model
#------------------------------------------------------------------
#------------------------------------------------------------------
#
##
###
# Init
###
##
#
set.seed(100) # Set seed for reproducible results
source("../config.r") # For get basepath to spam dataset
#------------------------------------------------------------------
#------------------------------------------------------------------
#
##
###
# Load what prepareData_* produced from file
###
##
#
#
# @Description:
# trainingoPSize = 0.7,
# fun__createCorpuses = createCorpuses_fromAllData,
# keywordsN = 100,
# mindelta__P__w = 0.07
#
# dim(treningo)
# [1] 4232 67
# dim(testingo)
# [1] 1813 67
#
# treningo$CATEGORY_ is {0==HAM or 1==SPAM}
# testingo$CATEGORY_ ^^^^^^^^^^^^^^^^^^^^^^
# ($CATEGORY_ is last column)
#
# Look at function >>prepareData_a<<
#
load__testingo_treningo_1 <- function() {
load("../savedData/testingo_1.Rdata")
load("../savedData/treningo_1.Rdata")
(list(treningo, testingo))
}
#------------------------------------------------------------------
#------------------------------------------------------------------
#
##
###
# Putting blocks together
###
##
#
#
# @description
# Function return training and testing dataset;
#
# @input
# @trainingoPSize
# Description: How much of all available data
# will constitue training set;
#
# @return
# Class of the object: List of two DataFrame objects
# Description: list(treningo, testingo)
# Each row in each dataset represents
# document in keywords space; Each attribut
# is equal to frequency of a given word in
# a given document; last attribut should be
# interpreted as class SPAM/HAM
# Important note: column CATEGORY_ exists in dataframes:
# 1 -> SPAM
# 0 -> HAM
#
#
prepareData_a <- function (
trainingoPSize = 0.7,
fun__createCorpuses = createCorpuses_fromAllData,
keywordsN = 100,
mindelta__P__w = 0.07
)
{
corpuses <- fun__createCorpuses() #Call user function
spamCorpus <- corpuses[[1]]
hamCorpus <- corpuses[[2]]
remove(corpuses)
spamCorpus <- filterCorpus_standard(spamCorpus) #Call user function
hamCorpus <- filterCorpus_standard(hamCorpus) #Call user function
keywords = chooseKeywords_a(
spamCorpus = spamCorpus,
hamCorpus = hamCorpus,
keywordN = keywordsN,
mindelta__P__w = mindelta__P__w,
additionalInfo = TRUE
)
#
# returns two dataframes
#
listOfdocuments = castDocumentsToKeywords(
spamCorpus = spamCorpus,
hamCorpus = hamCorpus,
keywords = keywords
)
spam = listOfdocuments[[1]]
ham = listOfdocuments[[2]]
Nspam = dim(spam)[1]
Nham = dim(ham)[1]
all_am = rbind( spam, ham )
all_am = all_am[sample(nrow(all_am)),]
Nall_am <- nrow(all_am)
Lall_am <- trainingoPSize * Nall_am #border element
treningo <- all_am[1:Lall_am, ]
testingo <- all_am[(Lall_am+1):Nall_am, ]
(list(treningo, testingo)) #what a great pair!
}
#------------------------------------------------------------------
#------------------------------------------------------------------
#
##
###
# Prepare keywords, prepare training and testing datasets
###
##
#
#
# @description
# Cast corpuses of documents to
# keywords vector representation
#
# @input
# @spamCorpus
# Class: Corpus
# Description: corpus with spam data
# @hamCorpus
# Class: Corpus
# Description: corpus with ham data
# @keywords
# Class: numeric?
# vector of keywords
#
# @return
# Class of the object: List of two DataFrame objects
# Description: list(spamDV_df, hamDV_df)
# Important note: column CATEGORY_ exists in dataframes:
# 1 -> SPAM
# 0 -> HAM
#
#
castDocumentsToKeywords <- function(
spamCorpus,
hamCorpus,
keywords
)
{
#
# @dtmK <- document term matrix only for keywords
#
dtmK_spam <- DocumentTermMatrix(
spamCorpus,
control=list(dictionary = keywords[,1]))
dtmK_ham <- DocumentTermMatrix(
hamCorpus,
control=list(dictionary = keywords[,1]))
spam_df <- as.data.frame(
data.matrix(dtmK_spam),
stringsAsfactors = FALSE)
ham_df <- as.data.frame(
data.matrix(dtmK_ham),
stringsAsfactors = FALSE)
#add category
spam_df$CATEGORY_ = as.factor(1)
ham_df$CATEGORY_ = as.factor(0)
#spam_df$CATEGORY_ = factor(x=1, levels=c(0,1))
#ham_df$CATEGORY_ = factor(x=0, levels=c(0,1))
return ( list(spam_df, ham_df) )
}
#
# @description
# Returns keywords set;
# It chooses the set of all words appearing in
# the spam and ham corpuses (input arguments) a
# subset which will be used to represent documents.
#
# Documents will be represented as vectors of
# frequencies of that keywords;
#
# @input
# @spamCorpus
# corpus with spam data
# @hamCorpus
# corpus with ham data
# @keywordN
# how many keywords you do want;
# may be less, that is maximum;
# @mindelta__P__w
# look at doc/report.{pdf|tex} -> |P(w|S)-P(w|H)|
# @additionalInfo
# if so additional columns are added to keywords
# (to )
#
# @return
# object of class: ClassName
# description: DescriptionOfOutput
# useful operations: UsefulOperations
#
chooseKeywords_a <- function(
spamCorpus,
hamCorpus,
keywordN = 100,
mindelta__P__w = 0.3,
additionalInfo = FALSE
)
{
# Create P__w_
create_P__w_ <- function (corpus) {
dtm <- DocumentTermMatrix(corpus)
dtm_df <- as.data.frame(
data.matrix(dtm),
stringsAsfactors = FALSE)
P__w_ = colSums(dtm_df>1)/dim(dtm_df)[1]
}
P__w_SPAM = create_P__w_(spamCorpus)
P__w_HAM = create_P__w_(hamCorpus)
hash__P__w_SPAM <- new.env() # HASHMAPS
hash__P__w_HAM <- new.env() # HASHMAPS
for (i in 1:length(P__w_SPAM)) {
hash__P__w_SPAM[[attributes(P__w_SPAM)$names[i]]] <- i
}
for (i in 1:length(P__w_HAM)) {
hash__P__w_HAM[[attributes(P__w_HAM)$names[i]]] <- i
}
allwords = unique(c(
attributes(P__w_SPAM)$names,
attributes(P__w_HAM)$names))
wordsWithK = matrix(
data = NA,
nrow = length(allwords),
ncol = 4)
iadded = 0
#----------------------------------------------
Nallwords = length(allwords)
# Nallwords = 1000 #debug
for (iword in 1:Nallwords) {
word = allwords[iword]
idx__spam = hash__P__w_SPAM[[word]] # from HASHMAP
idx__ham = hash__P__w_HAM[[word]] # from HASHMAP
if ( !is.integer(idx__spam) ) {
P_word_SPAM = 0.0
} else {
P_word_SPAM = P__w_SPAM[ idx__spam ]
}
if ( !is.integer(idx__ham) ) {
P_word_HAM = 0.0
} else {
P_word_HAM = P__w_HAM[ idx__ham ]
}
delta = abs(
P_word_HAM -
P_word_SPAM);
if (delta > mindelta__P__w) {
K = max( P_word_SPAM, P_word_HAM ) /
min( P_word_SPAM, P_word_HAM )
wordsWithK[iadded, 1] = word;
wordsWithK[iadded, 2] = round(K, digits=2);
wordsWithK[iadded, 3] =
round(
max( P_word_SPAM, P_word_HAM ),
digits=2
)
if (P_word_SPAM>P_word_HAM) {
wordsWithK[iadded, 4] = "SPAM"
} else {
wordsWithK[iadded, 4] = "HAM"
}
iadded <- iadded + 1;
}
}
#----------------------------------------------
wordsWithK <- wordsWithK[1:(iadded-1), ]
wordsWithK <- wordsWithK[ order(wordsWithK[,3], decreasing = TRUE), ]
wordsWithK <- wordsWithK[ order(wordsWithK[,2], decreasing = TRUE), ]
if (keywordN > iadded) {
if (additionalInfo) {
(wordsWithK)
} else {
(wordsWithK[,1])
}
} else {
if (additionalInfo) {
(wordsWithK[1:keywordN, ])
} else {
(wordsWithK[1:keywordN, 1])
}
}
}
#
# @description
# return documents paths from given directory of
# spam dataset
#
# @input
# @dirnames
# @Class: characters vector
# description: which dirs from p__spam_dataset
# load;
# possible values: {spam, easy_ham, ...}
#
# @return
# object of class: DirSource
# description: contains paths to all documents
# object$filelist[idocument]
#
getDocumentsPaths <- function( dirnames ) {
source("../config.r")
dirpaths = paste (
p__spam_dataset,
paste("/", dirnames, sep=""),
sep=""
)
paths <- DirSource(
dirpaths,
encoding = "UTF-8",
pattern = "^\\d.*"
)
return (paths)
}
#
# @description
# Creates object of class Corpus
# with documents which paths were
# given into arg documentsPaths;
#
#
# @input
# @documentsPath
# Class: DirSource
# @n_start @n_stop
# Range which documents take
#
# @return
# object of class: Corpus
# description: container of documents
# useful operations: DocumentTermMatrix
# can be created from Corpus object
# dtm <- DocumentTermMatrix(corpus)
#
createCorpus <- function(
documentsPaths
)
{
documentsMatrix <- matrix(data = NA, nrow = 0, ncol = 1)
for(i in 1:documentsPaths$length)
{
document <- readLines(documentsPaths$filelist[i])
document <- paste(document, collapse=" ")
document <- iconv(document, "latin1", "ASCII", sub="")
documentsMatrix <- rbind(documentsMatrix, c(document))
}
documentsCorp <- Corpus(VectorSource(documentsMatrix[,1]))
return (documentsCorp)
}
#
# @description
# Return already created two corpuses
# (of Class tm:Corpus) with spam and with ham;
#
# @return
# object of class: List of two objects of class Corpus
# description: spam and ham corpuses
# useful operations: {corpuses <- fun__createCorpuses()
# spamCorpus <- corpuses[1]
# hamCorpus <- corpuses[2]}
#
createCorpuses_standard <- function(
spam_dirs = c("spam"),
ham_dirs = c("easy_ham")
)
{
ps_spam = getDocumentsPaths( spam_dirs )
ps_ham = getDocumentsPaths( ham_dirs )
spamCorpus <- createCorpus(ps_spam)
hamCorpus <- createCorpus(ps_ham)
return (list(spamCorpus, hamCorpus))
}
createCorpuses_fromAllData <- function()
{
createCorpuses_standard(
spam_dirs = c("spam", "spam_2"),
ham_dirs = c("easy_ham", "easy_ham_2", "hard_ham")
)
}
#
# @description
# Standard preprocessing of documents in corpus
# lowercasing, removing HTML, etc.;
#
# @input
# @corp
# Class: Corpus
# description: corpus in
#
# @return
# object of class: Corpus
# description: corpus processed
#
filterCorpus_standard <- function( corp ) {
corp <- tm_map(corp, removeHTMLTags)
corp <- tm_map(corp, content_transformer(tolower))
corp <- tm_map(corp, removeNumbers)
corp <- tm_map(corp, removePunctuation)
corp <- tm_map(corp, stripWhitespace)
corp <- tm_map(corp, removeWords, stopwords("english"))
}
#
# @description
# Remove HTML Tags;
# According to function >>tm_map<<, so
# corp <- tm_map(corp, removeHTMLTags) will work
#
# @input
# @x
# object of class: PlainTextDocument
# description: document before processing
#
# @return
# object of class: PlainTextDocument
# description: document after processing
#
removeHTMLTags <- function(x) {
newcontent = (gsub("<.*?>", "", x$content, perl=TRUE))
PlainTextDocument(x=newcontent,
id = meta(x, "id"),
language = meta(x, "language"))
}
#
# @description
# All words from corpus in decreasing order;
# Note: input argument is DocumentTermMatrix not Corpus;
#
# @input
# @dtm
# Class: DocumentTermMatrix
#
#
# @return
# object of class: ClassName
# description: DescriptionOfOutput
# useful operations: UsefulOperations
#
showWordsFrequencyInCorpusInOrder <- function(dtm) {
dtm_df = dtm_df <- as.data.frame(
data.matrix(dtm),
stringsAsfactors = FALSE)
wordsOnly = dtm_df[0,]
wordsWithFrequency = colSums(dtm_df)
ord_wordsWithFrequency = sort(wordsN, decreasing=TRUE)
}
#------------------------------------------------------------------
|
618e33e09d90efd6955a33b284864fbdb7bd8ded
|
8ff92ab6946777ce207b845cf8d4377a03d7ded4
|
/Process_data.R
|
fa8ded3c6fded23e54b1f75eaef0a8ba66b9fec9
|
[
"MIT"
] |
permissive
|
brainy749/Tiwara-G5-Sahel
|
aaca655d32e184cdaedb69228bf8f46525dacf9b
|
0021099914000c1e3c7b4def1a3fa80e158addd5
|
refs/heads/master
| 2020-04-25T22:15:53.117078
| 2019-02-28T14:48:15
| 2019-02-28T14:48:15
| 173,105,721
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,207
|
r
|
Process_data.R
|
# Imports paquets ---------------------------------------------------------
# library(devtools)
#devtools::install_github("RinteRface/bs4Dash")
library(shiny)
library(fontawesome)
library(shinyWidgets)
library(bs4Dash)
library(plotly)
library(r2d3)
library(r2d3maps)
library(rnaturalearth)
library(magick)
library(leaflet)
library(leaflet.extras)
library(lubridate)
library(DT)
library(recipes)
library(highcharter)
library(tidyverse)
library(billboarder)
library(echarts4r)
library(countrycode)
library(shinydashboard)
library(flexdashboard)
library(sf)
library(shinythemes)
library(htmlwidgets)
library(janitor)
library(configr)
library(httr)
library(jsonlite)
library(shinyWidgets)
library(corrplot)
library(tsne)
library(NbClust)
library(readxl)
library(readr)
library(geosphere)
library(qualpalr)
library(circlize)
library(rdbnomics)
library(esquisse)
conf <- read.config(file="./cfg_shinyapps.ini")
##Import chemin
data_path<-conf$CHEMIN$SAVE_DIR
processed_folder<-conf$CHEMIN$PROCESSED_FOLDER
##Import nom fichiers
acled_file<-conf$NOM_FICHIERS$ACLED
wdi_file<-conf$NOM_FICHIERS$WDI
dhs_file<-conf$NOM_FICHIERS$DHS
inform_file<-conf$NOM_FICHIERS$INFORM
sahel_shapefile<-conf$SHP$SAHEL_SHAPEFILE
micro_world_file<-conf$NOM_FICHIERS$MICRO_WORLD
unhcr_file<-conf$NOM_FICHIERS$UNHCR
countries_file=conf$NOM_FICHIERS$COUNTRIES
## Nom_fichiers_processed
dhs_processed<-conf$NOM_FICHIERS$PROCESSED_DHS
dhs_sf_processed<-conf$NOM_FICHIERS$PROCESSED_DHS_SF
dhs_recent_processed<-conf$NOM_FICHIERS$PROCESSED_DHS_RECENT
wdi_processed<-conf$NOM_FICHIERS$PROCESSED_WDI
acled_processed<-conf$NOM_FICHIERS$PROCESSED_ACLED
inform_processed<-conf$NOM_FICHIERS$PROCESSED_INFORM
inform_data_processed<-conf$NOM_FICHIERS$PROCESSED_INFORM_DATA
cluster_processed<-conf$NOM_FICHIERS$PROCESSED_CLUSTER
afd_processed<-conf$NOM_FICHIERS$PROCESSED_AFD
micro_world_processed<-conf$NOM_FICHIERS$PROCESSED_MICRO_WORLD
sahel_tiwara_geo_processed<-conf$NOM_FICHIERS$PROCESSED_SAHEL_TIWARA_GEO
unhcr_processed<-conf$NOM_FICHIERS$PROCESSED_UNHCR
work_data_processed<-conf$NOM_FICHIERS$PROCESSED_WORK_DATA
work_data_ts_prep_processed<-conf$NOM_FICHIERS$PROCESSED_WORK_DATA_TS_PREP
esquisse_processed<-conf$NOM_FICHIERS$PROCESSED_ESQUISSE
boxplot_processed<-conf$NOM_FICHIERS$PROCESSED_BOXPLOT
############################### FILE READING
variables<-strsplit(conf$VARIABLES_DHS$var_dhs,",")[[1]]
databases_work<-strsplit(conf$DBNOMICS_EMPLOYMENT$databases,",")[[1]]
countries_work<-strsplit(conf$DBNOMICS_EMPLOYMENT$countries,",")[[1]]
masques_work<-conf$DBNOMICS_EMPLOYMENT$mask
provider_work<-conf$DBNOMICS_EMPLOYMENT$provider
databases_work_ts<-conf$DBNOMICS_EMPLOYMENT$database_ts
iso3_cn<-strsplit(conf$COUNTRY_LISTS$ISO3_CN,",")[[1]]
iso3_clust<-strsplit(conf$COUNTRY_LISTS$ISO3_CLUST,",")[[1]]
country_names<-strsplit(conf$COUNTRY_LISTS$COUNTRY_NAME,",")[[1]]
country_names_fr<-strsplit(conf$COUNTRY_LISTS$COUNTRY_NAME_FR,",")[[1]]
# DHS IMPORT----------------------
df_dhs <- tbl_df(read_csv(paste0(data_path,dhs_file)))%>%
mutate(
CharacteristicLabel = stringr::str_replace_all( # remove leading points (ex: "..Kanem") in labels
string = CharacteristicLabel,
pattern = "^\\.+", replacement = ""
)
)%>%
mutate(location=paste0(DHS_CountryCode,sep='-',CharacteristicLabel))%>%
mutate_if(is.character, factor, ordered = FALSE)%>%
mutate('iso3c' = countrycode(CountryName, 'country.name', 'iso3c'))
saveRDS(df_dhs,paste0(data_path,processed_folder,dhs_processed))
# sdg4 indicators IMPORT------------------------
sdg4indics<-tbl_df(read_csv(paste0(data_path,wdi_file)))%>%mutate_if(is.character, factor, ordered = FALSE)
saveRDS(sdg4indics,paste0(data_path,processed_folder,wdi_processed))
# ACLED IMPORT---------------------------
acled_tiwara <- read_excel(paste0(data_path,acled_file),col_types = c("numeric",
"text",
"numeric",
"date",
"numeric",
"numeric",
"guess",
"text",
"text",
"guess",
"text",
"text",
"guess",
"numeric",
"guess",
"guess",
"text",
"text",
"text",
"text",
"numeric",
"numeric",
"guess",
"text",
"guess",
"text",
"numeric",
"numeric")) %>%
rename_all(funs(tolower(.))) %>%
filter(country %in% c("Mali","Burkina Faso", "Mauritania","Chad","Niger")) %>%
filter(year>=2010) %>%
mutate(event_date = dmy(event_date)) %>%
mutate_if(is.character, factor, ordered = FALSE)
saveRDS(acled_tiwara,paste0(data_path,processed_folder,acled_processed))
# data for map --------------------------------------------------------------------
df_dhs_sf <- df_dhs %>%
filter(!is.na(Coordinates)) %>% # remove missing values
select(IndicatorId, CountryName, CharacteristicLabel, Value, Coordinates) %>%
distinct(IndicatorId, CharacteristicLabel,CountryName, .keep_all = TRUE) %>% # <<<---- deduplicated (you need one row by country at the end)
tidyr::spread(IndicatorId, Value) %>% # <<<---- here transpose data
mutate(
Coordinates = st_as_sfc(Coordinates) # convert to sf geometries
)%>%
st_as_sf()
saveRDS(df_dhs_sf,paste0(data_path,processed_folder,dhs_sf_processed))
#### Donnee INform Risk IMPORT#####@-----------------------------------
# Inform Indicators
INFORM_SAHEL<- read_excel(paste0(data_path,inform_file),
sheet = "INFORM SAHEL Sep 2018 (a-z)",
na = c("(0-10)","x","(0-50)","(0-100%)","()","(1-191)")) %>%
slice(-1L)%>%janitor::clean_names() #remove 1st row, which is simple the scale
saveRDS(INFORM_SAHEL,paste0(data_path,processed_folder,inform_processed))
sahel_tiwara <- country_names
inform_sahel_tiwara <- INFORM_SAHEL %>%filter(iso3 %in% iso3_cn)
## On récupère la fraicheur des datas pour chaque dataset
fraicheur_acled<-max(acled_tiwara$event_date)
fraicheur_inform<-str_match(pattern = " (\\d+)_",inform_file)[2]
fraicheur_sdg4<-max(sdg4indics$period)
fraicheur_dhs<-max(df_dhs$SurveyYear)
fraicheur_dhs_pays<-df_dhs %>%
group_by(CountryName) %>%
summarise(max(SurveyYear))
chaine_char_dhs<-"<ul>"
for (i in 1:nrow(fraicheur_dhs_pays)){
chaine<-paste0("<li>",as.character(fraicheur_dhs_pays[i,1] %>% pull(CountryName))," : ",fraicheur_dhs_pays[i,2],"</li>")
chaine_char_dhs<-paste0(chaine_char_dhs,chaine)
}
chaine_char_dhs<- paste0(chaine_char_dhs,"</ul>")
# Code clusters --------------------------------------------------------------------
## Import_Data
df_dhs_recents<-df_dhs %>%
slice(-starts_with("Population",T,as.character(Indicator))) %>%
select(Indicator,Value,SurveyYear,DHS_CountryCode,CountryName,CharacteristicLabel) %>%
group_by(Indicator,DHS_CountryCode,CharacteristicLabel,CountryName) %>%
arrange(desc(SurveyYear)) %>%
slice(1) %>%
ungroup() %>%
select(-SurveyYear) %>%
spread(Indicator,Value)
inform_data<-read_excel(paste0(data_path,inform_file),
sheet = "INFORM SAHEL Sep 2018 (a-z)",
na = c("(0-10)","x","(0-50)","(0-100%)","()","(1-191)")) %>%
slice(-1L)%>%janitor::clean_names() #remove 1st row, which is simple the scale
inform_sahel_tiwara <- inform_data %>%filter(iso3 %in% iso3_clust) %>%
mutate(admin1bis=case_when(admin1=="Boucle du Mouhoun" ~ "Boucle de Mouhoun",
admin1=="Centre Est" ~ "Centre-Est",
admin1=="Centre Nord" ~ "Centre-Nord",
admin1=="Centre Ouest" ~ "Centre-Ouest",
admin1=="Centre Sud" ~ "Centre-Sud",
admin1=="Centre" ~ "Centre (excluding Ouagadougou)",
admin1=="Segou" ~ "Ségou",
admin1=="Timbuktu" ~ "Tombouctou",
admin1=="Tillabery" ~ "Tillabéri",
admin1=="Barh el Ghazel" ~ "Barh El Gazal",
admin1=="Chari-Baguirmi" ~ "Chari Baguirmi",
admin1=="Guera"~"Guéra",
admin1=="Mayo-Kebbi Est" ~ "Mayo Kebbi Est",
admin1=="Mayo-Kebbi Ouest" ~ "Mayo Kebbi Ouest",
admin1=="Moyen-Chari" ~ "Moyen Chari",
admin1=="Ouaddai" ~ "Ouaddaï",
admin1=="Tandjile"~"Tandjilé",
T ~ admin1))
## Jointure DHS Inform
## Jointure sur les sous régions
df_etude<-df_dhs_recents %>%
left_join(inform_sahel_tiwara,by=c("CountryName"="country","CharacteristicLabel"="admin1bis")) %>%
filter(!is.na(admin1)) %>%
mutate(rname=paste(DHS_CountryCode,CharacteristicLabel))
## Sélection de variables -> fait à partir d'une matrice de corrélations, on supprimes des variables corrélées
df_etude<-df_etude %>%
mutate(children_u5=ifelse(is.na(children_u5),mean(df_etude$children_u5,na.rm=T),children_u5)) %>%
column_to_rownames("rname") %>%
select(-DHS_CountryCode,-CharacteristicLabel,-admin1,-iso3,-iso_admin1,-CountryName) %>%
select(-drr,-institutional,-lack_of_coping_capacity,-infrastructure,-`Men who are literate`,-vulnerable_groups,-natural,-human,-`Women who are literate`,-risk,-governance,
-`Gross secondary school attendance rate: Total`,-vulnerability,-communication,-recent_shocks,-political_violence,-development_deprivation,-`Gross primary school attendance rate: Total`)
## Normalisation
scaled_df<-scale(df_etude,center = T,scale=T)
## NbClust
result_nbclus<-NbClust(data = scaled_df,method="average")
## Kmeans k=4, basé sur le résultat de nbClust
kms<-kmeans(scaled_df,centers=4,iter.max = 50)
## On prend le rés de NbClust
df_etude<-df_etude %>% mutate(clust=result_nbclus$Best.partition)
## Polygones
poly_dhs<-df_dhs %>%
select(CharacteristicLabel,Coordinates) %>%
mutate(Coordinates=as.character(Coordinates)) %>%
distinct()
## Jointure avec les data inform puis mise en forme pour les cartes
df_clustered<-df_dhs_recents %>%
select(CountryName,CharacteristicLabel) %>%
left_join(inform_sahel_tiwara %>% select(country,admin1bis,admin1),by=c("CountryName"="country","CharacteristicLabel"="admin1bis")) %>%
filter(!is.na(admin1)) %>%
bind_cols(df_etude) %>%
mutate(clust=as.factor(clust)) %>%
left_join(poly_dhs,by=c("CharacteristicLabel")) %>% # <<<---- here transpose data
mutate(
Coordinates = st_as_sfc(Coordinates) # convert to sf geometries
)%>%
st_as_sf()
saveRDS(df_clustered,paste0(data_path,processed_folder,cluster_processed))
numeric_car_clust<-names(df_clustered)[sapply(df_clustered,class)=="numeric"]
### DATA Carte AFD projets
# Données de l'aide au développement de l'AFD -----------------------------
url <- "https://opendata.afd.fr/api/records/1.0/search/?dataset=donnees-aide-au-developpement-afd&rows=5000&facet=etat_du_projet&facet=pays_de_realisation&facet=region&facet=libelle_cicid"
connecAfd <- fromJSON(url)
afddata<-tbl_df(connecAfd$records$fields)
afddata_sahel<-afddata %>%
filter(pays_de_realisation %in% country_names_fr)
saveRDS(afddata_sahel,paste0(data_path,processed_folder,afd_processed))
## Icone
Icon <- makeIcon(
iconUrl = "https://i.vimeocdn.com/portrait/17890243_640x640",
iconWidth = 35*215/230, iconHeight = 35,
iconAnchorX = 35*215/230/2, iconAnchorY = 35) # retrieve AFD logo to add on the map at each Beneficiaire location
## Dataframe coordonnées
AFDCoords_sahel <- data.frame(
lat = afddata_sahel$latitude,
lng = afddata_sahel$longitude) # prepare a dataframe with GPS coordinates
## Libellés, bénéficiares
BeneficiaireName <- paste("<a href='", "'>", afddata_sahel$libelle_beneficiaire_primaire,"</a>" ,sep = "")
AFD_Desc<-paste(afddata_sahel$libelle_cicid,afddata_sahel$description_du_projet,sep=" :<br/>")
## Data Financial Index ---------------------------------------
classic_labels<-c("yes","no","don't know", "refuse")
unclassic_labels<-c("no","yes")
## Data financial index
data_index<-read_csv(paste0(data_path,micro_world_file)) %>%
filter(economycode %in% iso3_cn) %>%
mutate(classe_age=cut(age,breaks=c(0,25,40,60,100),labels = c("0-25","25-40","40-60","60-100"))) %>%
rename("has_db_card"="fin2","has_cd_card"="fin7","save_for_business"="fin15","save_for_old_age"="fin16","has_national_id"="fin48","gender"="female","work"="emp_in") %>%
mutate(gender=factor(gender,labels=c("Male","Female")),
education=factor(educ,labels=c("primary-","secondary","tertiary+","don't know","ref")),
income_quantile=factor(inc_q,labels=c("poorest 20%","second 20%","middle 20%","fourth 20%","richest 20%")),
work=factor(work,labels=c("out of workforce","in the workforce")),
has_db_card=factor(has_db_card,labels=classic_labels),
has_cd_card=factor(has_cd_card,labels=classic_labels),
save_for_business=factor(save_for_business,labels=classic_labels),
save_for_old_age=factor(save_for_old_age,labels=classic_labels),
mobileowner=factor(mobileowner,labels=classic_labels),
has_national_id=factor(has_national_id,labels=classic_labels),
account=factor(account,labels=unclassic_labels),
saved=factor(saved,labels=unclassic_labels)
)
saveRDS(data_index,paste0(data_path,processed_folder,micro_world_processed))
## Variables catégorielles
data_index_cate<-c("gender","education","work","classe_age","income_quantile")
## Variables numériques
data_index_rep<-c("has_db_card","has_cd_card","save_for_business","save_for_old_age","mobileowner","has_national_id","account","saved")
##-------get geodata on Sahel states
#sahel <- ne_states(country=sahel_tiwara, returnclass = "sf") # filter on states in target countries
#st_write(st_as_sf(sahel), './inputs/sahel.shp')
sahel <- st_read(paste0(data_path,sahel_shapefile))
sahel_tiwara_geo_mr <- left_join(
y = sahel %>%select(name, geometry) %>% mutate(name=as.character(name)) %>% rename(CharacteristicLabel=name,Coordinates=geometry),
x = inform_sahel_tiwara %>% filter(country == "Mauritania" | admin1=="Sila"),
by = c("admin1" = "CharacteristicLabel")
) %>%
st_as_sf()
sahel_tiwara_geo_else <- left_join(
y = poly_dhs %>% mutate(CharacteristicLabel=as.character(CharacteristicLabel)),
x = inform_sahel_tiwara %>%
mutate(admin1bis=case_when(admin1=="Boucle du Mouhoun" ~ "Boucle de Mouhoun",
admin1=="Centre Est" ~ "Centre-Est",
admin1=="Centre Nord" ~ "Centre-Nord",
admin1=="Centre Ouest" ~ "Centre-Ouest",
admin1=="Centre Sud" ~ "Centre-Sud",
admin1=="Centre" ~ "Centre (excluding Ouagadougou)",
admin1=="Segou" ~ "Ségou",
admin1=="Timbuktu" ~ "Tombouctou",
admin1=="Tillabery" ~ "Tillabéri",
admin1=="Barh el Ghazel" ~ "Barh El Gazal",
admin1=="Chari-Baguirmi" ~ "Chari Baguirmi",
admin1=="Guera"~"Guéra",
admin1=="Mayo-Kebbi Est" ~ "Mayo Kebbi Est",
admin1=="Mayo-Kebbi Ouest" ~ "Mayo Kebbi Ouest",
admin1=="Moyen-Chari" ~ "Moyen Chari",
admin1=="Ouaddai" ~ "Ouaddaï",
admin1=="Tandjile"~"Tandjilé",
admin1=="Borkou" ~ "Borkou/Tibesti",
admin1=="Ennedi Est" ~ "Ennedi Est/Ennedi Ouest",
T ~ admin1)),
by = c("admin1bis" = "CharacteristicLabel")
) %>%
filter(! is.na(Coordinates)) %>%
mutate(Coordinates=st_as_sfc(Coordinates)) %>%
st_as_sf()
sahel_tiwara_geo<-bind_rows(sahel_tiwara_geo_else,sahel_tiwara_geo_mr)
saveRDS(sahel_tiwara_geo,paste0(data_path,processed_folder,sahel_tiwara_geo_processed))
### HDX Segment ----------
locale("fr")
hdx<-read_csv(paste0(data_path,unhcr_file),skip = 3) %>%
rename(arrivee=`Country / territory of asylum/residence`) %>%
mutate(Origin2=gsub("\\s?\\(.+\\)","",Origin),arrivee2=gsub("\\s?\\(.+\\)","",arrivee))%>%
mutate(Year=as.numeric(Year)) %>%
mutate(Origin=iconv(Origin,from="latin1",to="utf8"),arrivee=iconv(arrivee,from="latin1",to="utf8"))
countries_coord<-read_delim(paste0(data_path,countries_file),delim = "\t")
hdx_geo<-hdx %>%
left_join(countries_coord,by=c("Origin2" = "name")) %>%
rename(lat_debt=latitude,long_debt=longitude) %>%
left_join(countries_coord,by=c("arrivee2" = "name")) %>%
rename(lat_fin=latitude,long_fin=longitude) %>%
select(-country.y,-country.x,-arrivee2,-Origin2)
hdx_geo_2<-hdx_geo %>%
filter(! (is.na(lat_debt) | is.na(lat_fin) | is.na(long_debt) | is.na(long_fin)))
saveRDS(hdx_geo_2,paste0(data_path,processed_folder,unhcr_processed))
## ILO - Youth employment data ----------------------------
get_indic_rdb_one_db<-function(provider,dataset,countries,mask_end){
sahel_masks<-paste(countries,mask_end,sep=".")
data_rdb<-bind_rows(lapply(sahel_masks,function(x) {tryCatch({res<-data.frame()
res<-rdb(provider, dataset, mask = x)},
error=function(cond){print("DBNOMICS - ILO : Error in data recup")},
finally = {return(res)}
)})) %>%
select(dataset_name,original_period,ref_area,series_name,value)
}
work_data<-bind_rows(lapply(databases_work, function(x) {get_indic_rdb_one_db(provider = provider_work,dataset = x,countries = countries_work,mask_end = masques_work)})) %>%
group_by(ref_area,dataset_name) %>%
arrange(original_period)
saveRDS(work_data,paste0(data_path,processed_folder,work_data_processed))
work_data_ts_prep<-bind_rows(lapply(databases_work_ts, function(x) {get_indic_rdb_one_db(provider = provider_work,dataset = x,countries = countries_work,mask_end = masques_work)})) %>%
group_by(ref_area,dataset_name) %>%
arrange(original_period)%>%
ungroup()
saveRDS(work_data_ts_prep,paste0(data_path,processed_folder,work_data_ts_prep_processed))
work_data_ts_name<-work_data_ts_prep$dataset_name[1]
work_data_ts<-work_data_ts_prep %>%
select(-series_name,-dataset_name) %>%
spread(ref_area,value) %>%
arrange(original_period)
## Esquisse Country Dataset ================
### Country based information based on most recent data
## ACLED Pour celui ci on a tout
ACLEDTOMERGEC<-acled_tiwara %>%
select(country,year,event_type,fatalities) %>%
group_by(country,year,event_type) %>%
summarise(fatalities=sum(fatalities)) %>%
ungroup() %>% group_by(country,event_type) %>%
arrange(desc(year)) %>%
slice(1) %>%
select(-year) %>%
ungroup() %>%
mutate(event_type=paste0("ACLED:Fatalities - ",event_type,"- Most recent year")) %>%
rename(value=fatalities,var=event_type)
## AFD Pour celui ci on a pas vraiment de précision géographique mieux que Country
AFDDATATOMERGEC<-afddata_sahel %>%
mutate(event_date=as.Date(date_de_1er_versement_projet),year=year(event_date)) %>%
select(pays_de_realisation,year,libelle_secteur_economique_cad_5,versements_euro) %>%
mutate(pays_de_realisation=case_when(pays_de_realisation=="BURKINA FASO" ~ "Burkina Faso",
pays_de_realisation=="MAURITANIE" ~ "Mauritania",
pays_de_realisation=="MALI" ~ "Mali",
pays_de_realisation=="NIGER" ~ "Niger",
pays_de_realisation=="TCHAD" ~ "Chad")) %>%
rename(country=pays_de_realisation,event_type=libelle_secteur_economique_cad_5) %>%
group_by(country,year,event_type) %>%
summarise(versements_euro=sum(versements_euro,na.rm=T)) %>%
ungroup() %>% group_by(country,event_type) %>%
arrange(desc(year)) %>%
slice(1) %>%
select(-year) %>%
ungroup() %>%
mutate(event_type=paste0("AFD: Versements euros - ",event_type,"- Most recent year")) %>%
rename(value=versements_euro,var=event_type)
## Data Index : On a year, country
GLOBALFINDEXTOMERGEC<-read_excel("datas/Global Findex Database.xlsx",sheet = "Data") %>%
rename(year=X__1,country=X__3) %>%
select(-X__2,-X__4,-X__5) %>%
filter(country %in% country_names) %>%
reshape2::melt(id=c("country","year")) %>%
group_by(country,variable) %>%
top_n(1,year) %>%
ungroup() %>%
select(-year) %>%
rename(var=variable) %>%
mutate(var=paste0("GLOBAL FINDEX : ",var))
## DHS Year/country/Admin1
DHSTOMERGEC<-df_dhs %>%
rename(admin1=CharacteristicLabel,country=CountryName,year=SurveyYear) %>%
mutate(admin1=as.character(admin1),country=as.character(country)) %>%
select(country,year,admin1,IndicatorId,Value) %>%
filter(!is.na(Value)) %>%
group_by(country,admin1,IndicatorId) %>%
arrange(desc(year)) %>%
slice(1) %>%
ungroup() %>%
select(-year,-admin1) %>%
group_by(country,IndicatorId) %>%
summarise(Value=sum(Value)) %>%
rename(var=IndicatorId,value=Value) %>%
mutate(var=paste0("DHS SURVEY : ",var))
## INFORM Country/admin1
INFORMTOMERGEC<- INFORM_SAHEL %>%
select(-iso3,-iso_admin1,-admin1) %>%
reshape2::melt(id="country") %>%
rename(var=variable) %>%
filter(country %in% country_names) %>%
group_by(country,var) %>%
summarise(value=mean(value,na.rm=T)) %>%
ungroup() %>%
mutate(var=paste0("INFORM RISK : ",var))
## SDG4Indics Country/Year
SDG4TOMERGEC<- sdg4indics %>%
filter(!is.na(value)) %>%
mutate(year=year(period)) %>%
select(country,year,indicator_label,value) %>%
group_by(country,indicator_label) %>%
arrange(desc(year)) %>%
slice(1) %>%
select(-year) %>%
ungroup() %>%
mutate(indicator_label=paste0("SDG4 - ",indicator_label,"- Most recent year")) %>%
rename(var=indicator_label) %>%
mutate(country=case_when(country=="BF" ~ "Burkina Faso",
country=="MR" ~ "Mauritania",
country=="ML" ~ "Mali",
country=="NR" ~ "Niger",
country=="TD" ~ "Chad"))
## ILO Data CountryISO3/YEAR
ILOTOMERGEC<-work_data %>%
rename(country=ref_area) %>%
mutate(year=original_period) %>%
select(country,year,dataset_name,value) %>%
group_by(country,dataset_name) %>%
arrange(desc(year)) %>%
slice(1) %>%
select(-year) %>%
rename(var=dataset_name) %>%
ungroup() %>%
mutate(country=case_when(country=="BFA" ~ "Burkina Faso",
country=="MRT" ~ "Mauritania",
country=="MLI" ~ "Mali",
country=="NER" ~ "Niger",
country=="TCD" ~ "Chad")) %>%
mutate(var=paste0("ILO : ",gsub("Key Indicators / Youth / ","",var)))
esquisse_country<-bind_rows(ACLEDTOMERGEC,AFDDATATOMERGEC,GLOBALFINDEXTOMERGEC,DHSTOMERGEC,INFORMTOMERGEC,SDG4TOMERGEC,ILOTOMERGEC) %>%
filter(!(is.na(country)|is.na(value))) %>%
spread(key=c("var"),value = value)
saveRDS(esquisse_country,paste0(data_path,processed_folder,esquisse_processed))
## BoxPlot Clusters
boxplot_cluster<-df_dhs_recents %>%
select(CountryName,CharacteristicLabel) %>%
left_join(inform_sahel_tiwara %>% select(country,admin1bis,admin1),by=c("CountryName"="country","CharacteristicLabel"="admin1bis")) %>%
filter(!is.na(admin1)) %>%
bind_cols(df_etude)
saveRDS(boxplot_cluster,paste0(data_path,processed_folder,boxplot_processed))
|
7028a717d9e070b4a9cf1d205cc6a55c2adee63d
|
000a615bc4e146c9c47e8b1fe158df2fe3ae0996
|
/Cute3_1/RandomForest/RFWithoutSquareTransform.r
|
bec8b1d2193a70db49f2cb0f38d295a1c80a10f7
|
[] |
no_license
|
SheikMBasha/MLLearning
|
4f10da4a354dd897b61f82b1b556d29b00942737
|
525a28875745dd1090cd85da20df8f5bfe3951cc
|
refs/heads/master
| 2021-09-28T17:42:19.309258
| 2018-02-25T18:43:16
| 2018-02-25T18:43:16
| 115,577,096
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,859
|
r
|
RFWithoutSquareTransform.r
|
mae mse rmse mape
0.15388315 0.06503103 0.25501183 0.91108427
> regr.eval(test_target, pred_test)
mae mse rmse mape
0.2203253 0.1683485 0.4103029 1.1221224
########################
10,10
mae mse rmse mape
0.11273081 0.03794958 0.19480653 0.61562160
> regr.eval(test_target, pred_test)
mae mse rmse mape
0.2042327 0.1393851 0.3733431 1.0606959
>
#####################
10,20
> regr.eval(train_target, pred_train)
mae mse rmse mape
0.08682808 0.02376715 0.15416599 0.51802800
> regr.eval(test_target, pred_test)
mae mse rmse mape
0.17853869 0.09792432 0.31292861 0.96759038
>
###################
10,30
mae mse rmse mape
0.07992567 0.02003510 0.14154539 0.49486268
> regr.eval(test_target, pred_test)
mae mse rmse mape
0.17292591 0.09113586 0.30188716 0.97715534
>
###########################################
20,30
> regr.eval(train_target, pred_train)
mae mse rmse mape
0.07594644 0.01772898 0.13315020 0.47331585
> regr.eval(test_target, pred_test)
mae mse rmse mape
0.16630178 0.08625397 0.29369027 0.94024796
###########################################
30,30
mae mse rmse mape
0.07594644 0.01772898 0.13315020 0.47331585
> regr.eval(test_target, pred_test)
mae mse rmse mape
0.16630178 0.08625397 0.29369027 0.94024796
###############################################
50,40
> regr.eval(train_target, pred_train)
mae mse rmse mape
0.07012095 0.01512949 0.12300199 0.43689128
> regr.eval(test_target, pred_test)
mae mse rmse mape
0.15946579 0.07976437 0.28242586 0.89900261
|
1be9d59f5ae9c2283e46ca9714f919febc3acdd0
|
424c7098a182cb3f67f334765a225d0531a0d5a5
|
/r/man/Scalar.Rd
|
21e04c12e088aa660c7d2ea086968739684e82de
|
[
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"JSON",
"OpenSSL",
"CC-BY-3.0",
"NTP",
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0",
"LLVM-exception",
"Zlib",
"CC-BY-4.0",
"LicenseRef-scancode-protobuf",
"ZPL-2.1",
"BSL-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
romainfrancois/arrow
|
08b7d1ae810438c8507c50ba33a9cabc35b4ee74
|
8cebc4948ab5c5792c20a3f463e2043e01c49828
|
refs/heads/master
| 2022-03-12T02:08:17.793883
| 2021-12-05T06:19:46
| 2021-12-05T06:19:46
| 124,081,421
| 16
| 3
|
Apache-2.0
| 2018-03-06T13:21:29
| 2018-03-06T13:21:28
| null |
UTF-8
|
R
| false
| true
| 1,229
|
rd
|
Scalar.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scalar.R
\docType{class}
\name{Scalar}
\alias{Scalar}
\title{Arrow scalars}
\description{
A \code{Scalar} holds a single value of an Arrow type.
}
\section{Methods}{
\verb{$ToString()}: convert to a string
\verb{$as_vector()}: convert to an R vector
\verb{$as_array()}: convert to an Arrow \code{Array}
\verb{$Equals(other)}: is this Scalar equal to \code{other}
\verb{$ApproxEquals(other)}: is this Scalar approximately equal to \code{other}
\verb{$is_valid}: is this Scalar valid
\verb{$null_count}: number of invalid values - 1 or 0
\verb{$type}: Scalar type
}
\examples{
\dontshow{if (arrow_available()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
Scalar$create(pi)
Scalar$create(404)
# If you pass a vector into Scalar$create, you get a list containing your items
Scalar$create(c(1, 2, 3))
# Comparisons
my_scalar <- Scalar$create(99)
my_scalar$ApproxEquals(Scalar$create(99.00001)) # FALSE
my_scalar$ApproxEquals(Scalar$create(99.000009)) # TRUE
my_scalar$Equals(Scalar$create(99.000009)) # FALSE
my_scalar$Equals(Scalar$create(99L)) # FALSE (types don't match)
my_scalar$ToString()
\dontshow{\}) # examplesIf}
}
|
2020e406ae283261260b9cd2487d75aebca12bea
|
0e92c0b362b230341f9cc31207df8139dbc3ac18
|
/R/writeAllGDAL.R
|
9e1999511d0aebbfe52755cdc6c756df221bd0b6
|
[] |
no_license
|
cran/raster
|
b08740e15a19ad3af5e0ec128d656853e3f4d3c6
|
dec20262815cf92b3124e8973aeb9ccf1a1a2fda
|
refs/heads/master
| 2023-07-09T20:03:45.126382
| 2023-07-04T10:40:02
| 2023-07-04T10:40:02
| 17,699,044
| 29
| 35
| null | 2015-12-05T19:06:17
| 2014-03-13T06:02:19
|
R
|
UTF-8
|
R
| false
| false
| 1,053
|
r
|
writeAllGDAL.R
|
# Author: Robert J. Hijmans
# Date : January 2009
# Version 0.9
# Licence GPL v3
.writeGDALall <- function(x, filename, options=NULL, setStatistics=TRUE, ...) {
stat <- cbind(NA, NA)
# if (nlayers(x) > 1) {
# y <- brick(x, values=FALSE)
# levels(y) <- levels(x)
# x <- getValues(x)
## if (setStatistics) {
## stat <- t(apply(x, 2, function(z, ...) cbind(mean(z, na.rm=TRUE), stats::sd(z, na.rm=TRUE))))
## }
# } else {
# y <- raster(x)
# levels(y) <- levels(x)
# y@legend@colortable <- colortable(x)
# x <- getValues(x)
## if (setStatistics) {
## stat <- cbind(mean(x, na.rm=TRUE), stats::sd(x, na.rm=TRUE))
## }
# }
## filetype <- .filetype(format=format, filename=filename)
## y <- .startGDALwriting(y, filename, gdal=options, overwrite=overwrite, format=filetype, ...)
# y <- .startGDALwriting(y, filename, gdal=options, ...)
# x <- writeValues(y, x, start=1)
y <- .startGDALwriting(x, filename, gdal=options, ...)
x <- writeValues(y, getValues(x), start=1)
.stopGDALwriting(x, stat)
}
|
fa35d4f0f0ee530b4a80c90581e10a1373875864
|
6470ce550c26c7cd13245dab8b84623534e78655
|
/第10章 网络关系型图表/图10-5-1_蜂巢网络图.R
|
8ed007f34eca2fa3d051bc346190c12706fde20e
|
[] |
no_license
|
EasyChart/Beautiful-Visualization-with-R
|
0d73ed4ee1e1855e33048330294335fbad6d2a25
|
27990b9349b697ec4336d3e72bae5f3a08d5f5ea
|
refs/heads/master
| 2023-06-10T07:36:29.289034
| 2023-06-05T03:48:59
| 2023-06-05T03:48:59
| 189,740,776
| 687
| 446
| null | 2020-02-26T08:07:21
| 2019-06-01T14:14:10
|
PostScript
|
UTF-8
|
R
| false
| false
| 1,992
|
r
|
图10-5-1_蜂巢网络图.R
|
#EasyShu团队出品,更多文章请关注微信公众号【EasyShu】
#如有问题修正与深入学习,可联系微信:EasyCharts
#reference:
#http://www.hiveplot.net/
#https://www.data-imaginist.com/tags/ggraph/
#http://www.sthda.com/english/articles/33-social-network-analysis/135-network-visualization-essentials-in-r/
library(ggraph)
library(igraph)
library(dplyr)
library(wesanderson)
graph <- graph_from_data_frame(highschool)
V(graph)$friends <- degree(graph, mode = 'in')
V(graph)$friends <- ifelse(V(graph)$friends < 5, 'Few',
ifelse(V(graph)$friends >= 15, 'Many', 'Medium'))
V(graph)$count <- degree(graph, mode = 'in')
mycolor <- wes_palette("Darjeeling1", length(unique((V(graph)$friends))), type = "continuous")
ggraph(graph, 'hive', axis = 'friends',sort.by ='count') +
geom_edge_hive(colour = 'black', edge_alpha = 0.3) +
geom_axis_hive(color='black', size = 1, label = TRUE) +
geom_node_point(aes(size=count,fill=friends),shape=21,colour = 'black',stroke=0.2, alpha = 0.95)+
scale_size_continuous(range=c(0.5,8)) +
scale_fill_manual(values=mycolor)+
guides(fill='none')+
coord_fixed()+
theme_minimal() +
theme(
panel.grid = element_blank(),
axis.line = element_blank(),
axis.ticks =element_blank(),
axis.text =element_blank(),
axis.title = element_blank()
)
ggraph(graph, 'hive', axis = 'friends',sort.by ='count') +
geom_edge_hive(aes(colour = factor(year)), edge_alpha = 0.5) +
geom_axis_hive(color='black', size = 1, label = TRUE) +
geom_node_point(aes(size=count),fill='gray80',shape=21,colour = 'black',stroke=0.2, alpha = 0.95)+
scale_size_continuous(range=c(0.5,8)) +
scale_edge_colour_manual(values=mycolor[c(1,3)])+
coord_fixed()+
theme_minimal() +
theme(
legend.position="right",
panel.grid = element_blank(),
axis.line = element_blank(),
axis.ticks =element_blank(),
axis.text =element_blank(),
axis.title = element_blank())
|
ee0394c47cef94e91651b20c8c4f52d85adb7ca6
|
f81ac43a1d02013a9cb9eebc2a7d92da4cae9169
|
/R/significance_means.R
|
6f5a7b009be22e56ae9e14fce38f59a05b2c2f3c
|
[] |
no_license
|
gdemin/expss
|
67d7df59bd4dad2287f49403741840598e01f4a6
|
668d7bace676b555cb34d5e0d633fad516c0f19b
|
refs/heads/master
| 2023-08-31T03:27:40.220828
| 2023-07-16T21:41:53
| 2023-07-16T21:41:53
| 31,271,628
| 83
| 15
| null | 2022-11-02T18:53:17
| 2015-02-24T17:16:42
|
R
|
UTF-8
|
R
| false
| false
| 18,586
|
r
|
significance_means.R
|
MEANS_IND = c(TRUE, FALSE, FALSE)
SD_IND = c(FALSE, TRUE, FALSE)
N_IND = c(FALSE, FALSE, TRUE)
#' @rdname significance
#' @export
significance_means = function(x,
sig_level = 0.05,
delta_means = 0,
min_base = 2,
compare_type ="subtable",
bonferroni = FALSE,
subtable_marks = c("greater", "both", "less"),
inequality_sign = "both" %in% subtable_marks,
sig_labels = LETTERS,
sig_labels_previous_column = c("v", "^"),
sig_labels_first_column = c("-", "+"),
keep = c("means", "sd", "bases"),
var_equal = FALSE,
digits = get_expss_digits()
){
UseMethod("significance_means")
}
#' @export
significance_means.etable = function(x,
sig_level = 0.05,
delta_means = 0,
min_base = 2,
compare_type ="subtable",
bonferroni = FALSE,
subtable_marks = c("greater", "both", "less"),
inequality_sign = "both" %in% subtable_marks,
sig_labels = LETTERS,
sig_labels_previous_column = c("v", "^"),
sig_labels_first_column = c("-", "+"),
keep = c("means", "sd", "bases"),
var_equal = FALSE,
digits = get_expss_digits()
){
stopif((NROW(x) %% 3 !=0) || NROW(x) == 0,
"Incorrect table. Table should have rows with means, standard deviations and valid N.")
compare_type = match.arg(compare_type, choices = COMPARE_TYPE, several.ok = TRUE)
stopif(sum(compare_type %in% c("first_column", "adjusted_first_column"))>1,
"mutually exclusive compare types in significance testing: 'first_column' and 'adjusted_first_column'.")
subtable_marks = match.arg(subtable_marks)
mark_greater = subtable_marks %in% c("greater", "both")
mark_less = subtable_marks %in% c("both", "less")
keep = match.arg(keep, KEEP_STAT, several.ok = TRUE)
keep_means = "means" %in% keep
keep_sd = "sd" %in% keep
keep_bases = "bases" %in% keep
groups = header_groups(colnames(x))
if("subtable" %in% compare_type){
if(!is.null(sig_labels)){
x = add_sig_labels(x, sig_labels = sig_labels)
}
all_column_labels = get_category_labels(colnames(x))
}
# some types (data.table) doesn't support recycling of logicals
means_ind = rep_len(MEANS_IND, nrow(x))
sd_ind = rep_len(SD_IND, nrow(x))
n_ind = rep_len(N_IND, nrow(x))
all_means = x[means_ind, ,drop = FALSE]
all_sds = x[sd_ind, ,drop = FALSE]
all_ns = x[N_IND, ,drop = FALSE]
recode(all_ns) = lt(min_base) ~ NA
sig_table = x[means_ind, ]
sig_table[, -1] = ""
empty_sig_table = sig_table
if(any(c("first_column", "adjusted_first_column") %in% compare_type)){
sig_table = section_sig_first_column_means(sig_section = sig_table,
curr_means = all_means,
curr_sds = all_sds,
curr_ns = all_ns,
groups = groups,
sig_labels_first_column = sig_labels_first_column,
sig_level = sig_level,
delta_means = delta_means,
bonferroni = bonferroni,
var_equal = var_equal,
adjust_common_base = "adjusted_first_column" %in% compare_type)
}
if(any(c("previous_column") %in% compare_type)){
sig_table = section_sig_previous_column_means(sig_section = sig_table,
curr_means = all_means,
curr_sds = all_sds,
curr_ns = all_ns,
groups = groups,
sig_labels_previous_column = sig_labels_previous_column,
sig_level = sig_level,
delta_means = delta_means,
bonferroni = bonferroni,
var_equal = var_equal)
}
if("subtable" %in% compare_type){
prepend = ""
if(mark_greater){
if(inequality_sign) {
prepend = ">"
}
subtable_sig_table = section_sig_means(sig_section = empty_sig_table,
curr_means = all_means,
curr_sds = all_sds,
curr_ns = all_ns,
groups = groups,
all_column_labels = all_column_labels,
sig_level = sig_level,
delta_means = delta_means,
bonferroni = bonferroni,
mark_greater = TRUE,
prepend = prepend,
var_equal = var_equal)
for(i in seq_along(sig_table)[-1]){
sig_table[[i]] = paste_non_empty(sig_table[[i]],
subtable_sig_table[[i]],
sep = " "
)
}
}
if(mark_less){
if(inequality_sign) {
prepend = "<"
}
subtable_sig_table = section_sig_means(sig_section = empty_sig_table,
curr_means = all_means,
curr_sds = all_sds,
curr_ns = all_ns,
groups = groups,
all_column_labels = all_column_labels,
sig_level = sig_level,
delta_means = delta_means,
bonferroni = bonferroni,
mark_greater = FALSE,
prepend = prepend,
var_equal = var_equal)
for(i in seq_along(sig_table)[-1]){
sig_table[[i]] = paste_non_empty(sig_table[[i]],
subtable_sig_table[[i]],
sep = " "
)
}
}
}
x = round_dataframe(x, digits = digits)
sig_table_with_rows = x
sig_table_with_rows[,-1] = ""
sig_table_with_rows[means_ind, -1] = sig_table[, -1, drop = FALSE]
if(keep_means){
x[, -1] = format_to_character(x[, -1], digits = digits)
x[, -1] = paste_df_non_empty(
x[, -1, drop = FALSE],
sig_table_with_rows[, -1, drop = FALSE],
sep = " "
)
} else {
x[means_ind, -1] = sig_table_with_rows[means_ind, -1, drop = FALSE]
}
x[means_ind | (keep_sd & sd_ind) | (keep_bases & n_ind), ]
# class(x) = union("etable", class(x))
# x
}
########################
section_sig_means = function(sig_section,
curr_means,
curr_sds,
curr_ns,
groups,
all_column_labels,
sig_level,
delta_means,
bonferroni,
mark_greater,
prepend,
var_equal) {
for(each_group in groups){
if(length(each_group)>1){
if(bonferroni) {
comparable_values = !(is.na(curr_means[,each_group, drop = FALSE]) |
is.na(curr_sds[,each_group, drop = FALSE]) |
is.na(curr_ns[,each_group, drop = FALSE]))
# count number of comaprisons
valid_values_in_row = rowSums(comparable_values, na.rm = TRUE)
number_of_comparisons_in_row = valid_values_in_row*(valid_values_in_row-1)/2
number_of_comparisons_in_row[number_of_comparisons_in_row<0] = 0
bonferroni_coef = number_of_comparisons_in_row #sum(number_of_comparisons_in_row, na.rm = TRUE)
bonferroni_coef[bonferroni_coef==0] = 1
} else {
bonferroni_coef = 1
}
for(col1 in each_group[-length(each_group)]){
mean1 = curr_means[[col1]]
sd1 = curr_sds[[col1]]
n1 = curr_ns[[col1]]
for(col2 in (col1 + 1):each_group[length(each_group)]){
mean2 = curr_means[[col2]]
sd2 = curr_sds[[col2]]
n2 = curr_ns[[col2]]
pval = compare_means(mean1 = mean1,
mean2 = mean2,
sd1 = sd1,
sd2 = sd2,
base1 = n1,
base2 = n2,
common_base = 0,
var_equal = var_equal
)
if_na(pval) = 1
pval = pmin(pval*bonferroni_coef, 1)
if(mark_greater) {
comparison = mean1 > mean2
} else {
comparison = mean2 > mean1
}
delta = abs(mean1 - mean2)
sig_section[[col1]] = ifelse(delta>delta_means &
comparison &
pval<sig_level,
paste_non_empty(sig_section[[col1]],
all_column_labels[[col2]],
sep = " "),
sig_section[[col1]]
)
sig_section[[col2]] = ifelse(delta>delta_means &
!comparison &
pval<sig_level,
paste_non_empty(sig_section[[col2]],
all_column_labels[[col1]],
sep = " "),
sig_section[[col2]]
)
}
}
}
}
if(prepend!=""){
recode(sig_section[,-1]) = neq("") ~ function(x) paste(prepend, x)
}
sig_section
}
########################
section_sig_previous_column_means = function(sig_section,
curr_means,
curr_sds,
curr_ns,
groups,
sig_labels_previous_column,
sig_level,
delta_means,
bonferroni,
var_equal) {
for(each_group in groups){
if(length(each_group)>1){
# col1 - current column
# col2 - previous column
if(bonferroni) {
comparable_values = !(is.na(curr_means[,each_group, drop = FALSE]) |
is.na(curr_sds[,each_group, drop = FALSE]) |
is.na(curr_ns[,each_group, drop = FALSE]))
# count number of comparisons
number_of_comparisons_in_row = 0
for(col1 in seq_len(ncol(comparable_values))[-1]){
col2 = col1 - 1
number_of_comparisons_in_row = number_of_comparisons_in_row +
(comparable_values[ ,col2] & comparable_values[ ,col1])
}
bonferroni_coef = number_of_comparisons_in_row #sum(number_of_comparisons_in_row, na.rm = TRUE)
bonferroni_coef[bonferroni_coef==0] = 1
} else {
bonferroni_coef = 1
}
for(col1 in each_group[-1]){
col2 = col1 - 1
mean1 = curr_means[[col1]]
sd1 = curr_sds[[col1]]
n1 = curr_ns[[col1]]
mean2 = curr_means[[col2]]
sd2 = curr_sds[[col2]]
n2 = curr_ns[[col2]]
pval = compare_means(mean1 = mean1,
mean2 = mean2,
sd1 = sd1,
sd2 = sd2,
base1 = n1,
base2 = n2,
common_base = 0,
var_equal = var_equal
)
if_na(pval) = 1
pval = pmin(pval*bonferroni_coef, 1)
sig_section[[col1]] = ifelse(abs(mean1 - mean2)>delta_means & pval<sig_level,
# previous value is greater
ifelse(mean2>mean1,
paste_non_empty(sig_section[[col1]],
sig_labels_previous_column[[1]],
sep = " "),
# previous value is smaller
paste_non_empty(sig_section[[col1]],
sig_labels_previous_column[[2]],
sep = " ")
),
sig_section[[col1]]
)
}
}
}
sig_section
}
########################
section_sig_first_column_means = function(sig_section,
curr_means,
curr_sds,
curr_ns,
groups,
sig_labels_first_column,
sig_level,
delta_means,
bonferroni,
var_equal,
adjust_common_base = FALSE) {
groups = unlist(groups)
# col1 - first column
# col2 - other columns
col1 = groups[1]
mean1 = curr_means[[col1]]
sd1 = curr_sds[[col1]]
base1 = curr_ns[[col1]]
if(length(groups)>1){
if(bonferroni) {
comparable_values = !(is.na(curr_means[,groups, drop = FALSE]) |
is.na(curr_sds[,groups, drop = FALSE]) |
is.na(curr_ns[,groups, drop = FALSE]))
# count number of comparisons
bonferroni_coef = rowSums(comparable_values[,-1]) # sum(number_of_comparisons_in_row, na.rm = TRUE)
bonferroni_coef[bonferroni_coef==0] = 1
} else {
bonferroni_coef = 1
}
for(col2 in groups[-1]){
mean2 = curr_means[[col2]]
sd2 = curr_sds[[col2]]
base2 = curr_ns[[col2]]
pval = compare_means(mean1, mean2,
sd1, sd2,
base1, base2,
common_base = base2*adjust_common_base,
var_equal = var_equal)
if_na(pval) = Inf
pval = pmin(pval*bonferroni_coef, 1)
sig_section[[col2]] = ifelse(abs(mean1 - mean2)>delta_means & pval<sig_level,
# previous value is greater
ifelse(mean1 > mean2,
paste_non_empty(sig_section[[col2]],
sig_labels_first_column[[1]],
sep = " "),
# previous value is smaller
paste_non_empty(sig_section[[col2]],
sig_labels_first_column[[2]],
sep = " ")
),
sig_section[[col2]]
)
}
}
sig_section
}
########################
|
c4cf06315ed35cfc265cbef6fe9a025fe4ef250c
|
08ff8a019901e8f9aab196f0635de156f33af35e
|
/predJoin_v_0_3.r
|
8f1c6843a1d277bbb4f87689b02975a7a6e5b2ce
|
[] |
no_license
|
squirrelClare/DataAnalysis_R
|
63561b514297ab5038c34f707cbb9e28def2f7da
|
ee7b8f12f26489d380bb192f517797980b41686b
|
refs/heads/master
| 2021-01-10T03:44:31.778737
| 2015-10-21T15:46:38
| 2015-10-21T15:46:38
| 44,686,687
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,057
|
r
|
predJoin_v_0_3.r
|
# 在数据库DM2015中执行sql,并返回查询的结果
methChoice_exeDm2015<-function(sql) {
library(RMySQL)
conn<-dbConnect(RMySQL::MySQL(),dbname='DM2015',username='dimer',host='192.168.10.87',
port=3306,password='data123')
dbSendQuery(conn,statement="set names 'utf8';")
res<-dbSendQuery(conn,statement=sql)
mydata<-fetch(res,n=-1)
dbDisconnect(conn)
return(mydata)
}
#在数据库HUMBIRD中执行sql,并返回查询的结果
methChoice_exeHumbird<-function(sql) {
library(RMySQL)
conn<-dbConnect(RMySQL::MySQL(),dbname='HUMMINGBIRD',username='humbird',host='192.168.10.87',
port=3306,password='humbird123')
res<-dbSendQuery(conn,statement=sql)
mydata<-fetch(res,n=-1)
dbDisconnect(conn)
return(mydata)
}
#获取含有预测结果的表
methChoice_getPredTableName<-function() {
sql<-"select table_name from information_schema.tables
where table_name like '%_preds';"
tableNames<-unlist(methChoice_exeDm2015(sql))
return(tableNames)
}
#获取各种方法的预测误差率结果
methChoice_getErrorRatio<-function(tableNames) {
errorRatios<-list()
for (tName in tableNames) {
sql<-paste0("select * from ",tName,";")
errorRatios[[tName]]<-methChoice_exeHumbird(sql)
}
return(errorRatios)
}
#最优算法提取
methChoice_bestMethod<-function(errorRatios) {
#获取台区编号
flag_id1s<-errorRatios[[1]]$flag_id1
#获取表名,对应于不同的算法
tableNames<-names(errorRatios)
#台区个数
nId<-length(flag_id1s)
#表的个数
nTable<-length(tableNames)
#预测天数
nPreDate<-ncol(errorRatios[[1]])-1
#存储结果
bestMeths<-matrix(NA,nrow = nId,ncol = nPreDate)
colnames(bestMeths)<-colnames(errorRatios[[1]])[-1]
#逐ID进行扫描
for (i in 1:length(flag_id1s)) {
id<-flag_id1s[i]
# singleIdRatio<-matrix(NA,nrow = nTable,ncol = nPreDate)
singleIdRatio<-data.frame()
for (j in 1:length(tableNames)) {
tName<-tableNames[j]
tmpErrorRatio<-errorRatios[[tName]]
singleIdRatio<-rbind(singleIdRatio,tmpErrorRatio[which(tmpErrorRatio$flag_id1==id),][-1])
}
#选取最好的算法,返回表的在tableNames中索引
bestMeth<-apply(X =singleIdRatio,MARGIN =2,FUN = function(ratio) {
return(which(ratio==min(ratio))[1])
} )
bestMeths[i,]<-as.numeric(bestMeth)
}
#转为数据框并且加入台区编号
bestMeths<-as.data.frame.matrix(bestMeths)
bestMeths<-cbind(flag_id1s,bestMeths)
return(bestMeths)
}
#测试函数()建立台区编号、预测日期、最优算法对应表
methChoice_main<-function() {
#整合八张表
tableNames<-methChoice_getPredTableName()
errorRatios<-methChoice_getErrorRatio(tableNames)
bestMeths<-methChoice_bestMethod(errorRatios)
#整合结果写入数据库
library(RMySQL)
conn<-dbConnect(RMySQL::MySQL(),dbname='DM2015',username='dimer',host='192.168.10.87',
port=3306,password='data123')
ddbSendQuery("set names 'utf8';")
dbWriteTable(conn,name = 'best_method',bestMeths)
dbDisconnect(conn)
#生成特征表,包含气温负荷特征和最佳算法
methChoice_generatTable()
}
#获取与天气星期相关的参数
methChoice_wethInf<-function(preDate,wethData) {
#基础天气数据信息
baseWethInf<-methChoice_baseWethInf(preDate,wethData)
#预测日前5个同类型日气温信息
sameDayWethInf<-methChoice_sameDayWethInf(preDate,wethData)
return(append(baseWethInf,sameDayWethInf))
}
#获取与负荷相关的参数
methChoice_loadInf<-function(preDate,loadData,ahead) {
#基础负荷信息
baseLoadInf<-methChoice_baseLoadInf(as.character(as.Date(preDate)-ahead+1),loadData)
#预测日前五个同类型日负荷信息
sameDayLoadInf<-methChoice_sameDayLoadInf(preDate,loadData)
return(append(baseLoadInf,sameDayLoadInf))
}
methChoice_generatTable<-function() {
#编号最有方法表
bestMeths<-methChoice_exeDm2015('select * from best_method;')
bestMeths$row_names<-NULL
#预测日期
preDate<-'2014-9-1'
#预测天数
ahead<-7
#天气数据
sqlWethData<-"select * from L1_WEATHER_HISTORY;"
wethData<-methChoice_exeHumbird(sqlWethData)
#初始化最终结果存储数据框
result<-array()
for (i in 1:nrow(bestMeths)) {
#单个台区最优方法
record<-unlist(bestMeths[i,])
#台区编号
vkont<-record[1]
#获取台区负荷数据
sqlLoadData<-paste0("select * from JL_TAIQU_LOAD where VKONT='",vkont,"' order
by LOAD_DATE;")
loadData<-methChoice_exeDm2015(sqlLoadData)[4:100]
for (j in 1:7) {
#气温参数
wethInf<-methChoice_wethInf(as.character(as.Date(preDate)+j-1),wethData)
loadInf<-methChoice_loadInf(as.character(as.Date(preDate)+j-1),loadData,j)
# tmp<-c(id=vkont)
tmp<-c(vkont)
tmp<-append(tmp,wethInf)
tmp<-append(tmp,loadInf)
tmp<-append(tmp,c(method=record[j+1]))
# tmp<-t(as.data.frame.vector(tmp))
# rownames(tmp)<-NULL
result<-rbind(result,tmp)
}
print(i)
}
result<-as.data.frame.array(result)
result<-na.omit(result)
tableNames<-methChoice_getPredTableName()
row.names(result)<-NULL
result$method.rates_s_1<-tableNames[result$method.rates_s_1]
return(result)
}
#获取指定日期前五个同类型日
methChoice_sameDay<-function(preDate) {
dates<-seq(as.Date(-38,origin=preDate), as.Date(-1,origin = preDate), by=1)
return(dates[which(weekdays(dates)==weekdays(as.Date(preDate)))])
}
#预测日前5个同类型日气温信息整合
methChoice_sameDayWethInf<-function(preDate,wethData) {
#天气数据预处理
wethData$WETH_DATE<-as.Date(wethData$WETH_DATE)
#前五个同类型日日期
sameDates<-methChoice_sameDay(preDate)
#前五个同类型日天气数据
sameDatesWethData<-wethData[which(wethData$WETH_DATE%in%sameDates),]
# 预测日前5个同类型日最高气温
h_temp_same_day<-max(sameDatesWethData$MAX_TMP)
# 预测日前5个同类型日最低气温
l_temp_same_day<-max(sameDatesWethData$MIN_TMP)
# 预测日前5个同类型日平均气温
m_temp_same_day<-mean(append(wethData$MAX_TMP,wethData$MIN_TMP))
# 预测日前5个同类型日最高气温标准差
h_temp_same_day_sd<-sd(sameDatesWethData$MAX_TMP)
# 预测日前5个同类型日最低气温标准差
l_temp_same_day_sd<-sd(sameDatesWethData$MIN_TMP)
# 预测日前5个同类型日平均气温标准差
m_temp_same_day_sd<-sd(apply(X=cbind(sameDatesWethData$MAX_TMP,
sameDatesWethData$MIN_TMP),MARGIN=1,FUN=mean))
#预测日前5个同类型日气温信息整合
sameDatesWethInf<-c(h_temp_same_day=h_temp_same_day,l_temp_same_day=l_temp_same_day,
m_temp_same_day=m_temp_same_day,h_temp_same_day_sd=h_temp_same_day_sd,
l_temp_same_day_sd=l_temp_same_day_sd,m_temp_same_day_sd=m_temp_same_day_sd)
return(sameDatesWethInf)
}
#基本气温数据信息
methChoice_baseWethInf<-function(preDate,wethData) {
#天气数据预处理
wethData$WETH_DATE<-as.Date(wethData$WETH_DATE)
#预测日前一周日期
dates<-seq(as.Date(-7,origin=preDate), as.Date(-1,origin=preDate), by=1)
#预测日前一周气温数据
tmpWethData<-wethData[which(wethData$WETH_DATE%in%dates),]
#预测日当天气温数据
prevWethData<-wethData[which(wethData$WETH_DATE==as.Date(preDate)),]
#预测日最高气温
h_temp<-prevWethData$MAX_TMP
#预测日最低气温
l_temp<-prevWethData$MIN_TMP
#预测日的星期类型
type_week<-weekdays(as.Date(preDate))
#预测日所在月份
month<-months(as.Date(preDate))
#预测日前一周最高气温
h_temp_prev_week<-max(tmpWethData$MAX_TMP)
#预测日前一周最低气温
l_temp_prev_week<-min(tmpWethData$MIN_TMP)
#预测日前一周最高气温方差(每天最高气温)
h_temp_prev_week_sd<-sd(tmpWethData$MAX_TMP)
#预测日前一周最低气温方差(每天最低气温)
l_temp_prev_week_sd<-sd(tmpWethData$MIN_TMP)
return(c(h_temp=h_temp,l_temp=l_temp,type_week=type_week,month=month,
h_temp_prev_week=h_temp_prev_week,l_temp_prev_week=l_temp_prev_week,
h_temp_prev_week_sd=h_temp_prev_week_sd,l_temp_prev_week_sd=l_temp_prev_week_sd))
}
#基本负荷信息
methChoice_baseLoadInf<-function(preDate,loadData) {
loadData$LOAD_DATE<-as.Date(loadData$LOAD_DATE)
dates<-seq(as.Date(-7,origin=preDate), as.Date(-1,origin=preDate), by=1)
tmpLoadData<-as.matrix.data.frame(loadData[which(loadData$LOAD_DATE%in%dates),][-1])
#预测日前一周最高负荷
h_load_prev_week<-max(tmpLoadData)
#预测日前一周最低负荷
l_load_prev_week<-min(tmpLoadData)
#预测日前一周等单天最高负高荷的标准差
h_load_prev_week_sd<-sd(apply(tmpLoadData,MARGIN = 1,FUN = max))
#预测日前一周等单天最高低负荷的标准差
l_load_prev_week_sd<-sd(apply(tmpLoadData,MARGIN = 1,FUN = min))
return(c(h_load_prev_week=h_load_prev_week,l_load_prev_week=l_load_prev_week,
h_load_prev_week_sd=h_load_prev_week_sd,l_load_prev_week_sd=l_load_prev_week_sd))
}
#预测日前5个同类型日气温信息整合
methChoice_sameDayLoadInf<-function(preDate,loadData) {
#前五个同类型日日期
sameDates<-methChoice_sameDay(preDate)
#负荷数据日期预处理
loadData$LOAD_DATE<-as.Date(loadData$LOAD_DATE)
#前五个同类型日负荷数据
sameDatesLoadData<-as.matrix.data.frame(loadData[which(loadData$LOAD_DATE%in%sameDates),][-1])
# 预测日前5个同类型日最高负荷
h_load_same_day<-max(sameDatesLoadData)
# 预测日前5个同类型日最低负荷
l_load_same_day<-min(sameDatesLoadData)
# 预测日前5个同类型日平均负荷
m_load_same_day<-mean(sameDatesLoadData)
# 预测日前5个同类型日最高负荷标准差
h_load_same_day_sd<-sd(apply(sameDatesLoadData,MARGIN = 1,FUN = max))
# 预测日前5个同类型日最低负荷标准差
l_load_same_day_sd<-sd(apply(sameDatesLoadData,MARGIN = 1,FUN = min))
# 预测日前5个同类型日平均负荷标准差
m_load_same_day_sd<-sd(apply(sameDatesLoadData,MARGIN = 1,FUN = mean))
loadInf<-c(h_load_same_day=h_load_same_day,l_load_same_day=l_load_same_day,
m_load_same_day=m_load_same_day,h_load_same_day_sd=h_load_same_day_sd,
l_load_same_day_sd=l_load_same_day_sd,m_load_same_day_sd=m_load_same_day_sd)
return(loadInf)
}
|
6d62b077f9283995f9e1d23d3ed078850a7a70fe
|
38b5ddfe71331594e45ca40026b57219377b9bac
|
/PreProcessingHelpers_ImageStream.R
|
b370fb39817b80c83c9be38e2996e7cde339e80e
|
[] |
no_license
|
jaywarrick/R-Cytoprofiling
|
0602429224c24c63630b339452f8a69727413981
|
88aeb315f95d2f48ca6b50bc92e2a2e9f4fdfc22
|
refs/heads/master
| 2023-07-20T04:13:57.119807
| 2023-07-14T03:33:10
| 2023-07-14T03:33:10
| 51,033,729
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,546
|
r
|
PreProcessingHelpers_ImageStream.R
|
library(data.table)
library(foreign)
##### Visualization #####
browseShinyData <- function()
{
sourceGitHubFile(user='jaywarrick', repo='R-General', branch='master', file='DataClassBrowser/ui.R')
sourceGitHubFile(user='jaywarrick', repo='R-General', branch='master', file='DataClassBrowser/server.R')
shinyApp(ui=myUI, server=myServer)
}
plotHist <- function(x, feature)
{
breaks=c(-1000, seq(-4,4,0.5), 1000)
wt <- x[Class == 'WT'][[feature]]
mt <- x[Class == 'MT'][[feature]]
cmt <- rgb(0,0,1,0.8)
cwt <- rgb(1,0,0,0.8)
wtd <- density(wt, from=-4, to=4)
mtd <- density(mt, from=-4, to=4)
if(max(wtd$y) > max(mtd$y))
{
plot(wtd, col='red', xlim=c(-4,4), main='', xlab=feature)
lines(mtd, col='blue')
}
else
{
plot(mtd, col='blue', xlim=c(-4,4), main='', xlab=feature)
lines(wtd, col='red')
}
legend('topright', legend=c('MT','WT'), col=c('blue','red'), lty=1)
}
##### General #####
resample <- function(x, ...)
{
x[sample.int(length(x), ...)]
}
getLocsFromRCs <- function(r, c, numRows, zeroIndexing=true)
{
if(zeroIndexing)
{
r + max(numRows) * c
}
else
{
(r-1) + max(numRows) * (c-1)
}
}
sind <- function(x)
{
return(sin(x*pi/180))
}
cosd <- function(x)
{
return(cos(x*pi/180))
}
tand <- function(x)
{
return(tan(x*pi/180))
}
refactor <- function(x)
{
return(x[,lapply(.SD, function(x){if(is.factor(x)){factor(x)}else{x}})])
}
##### Table IO #####
getTableList <- function(dir1, fileList, class, expt, sampleSize=NULL, cIds=NULL)
{
if(!is.null(sampleSize))
{
subSampleSize <- sampleSize / length(fileList)
}
tableList <- list()
for(f in fileList)
{
path <- file.path(dir1, f)
print(paste0('Reading file: ', path))
temp <- fread(input=path)
temp$Class <- class
temp$Expt <- expt
fileXY <- unlist(strsplit(f, 'x', fixed=T))
temp$File <- paste0('x', fileXY[length(fileXY)])
temp$cId <- paste(temp$Expt, temp$File, temp$Id, sep='.')
if(!is.null(sampleSize))
{
rIds <- trySample(unique(temp$cId), subSampleSize)
temp <- temp[cId %in% rIds]
}
else if(!is.null(cIds))
{
temp <- temp[cId %in% cIds]
}
tableList <- append(tableList, list(temp))
}
return(tableList)
}
getXYCSVsAsTableFromDir <- function(dir, xName='SNR', xExpression='(x+1)', yName='BLUR', yExpression='(y+1)*0.05')
{
ret <- list()
fList <- list.files(path = dir, recursive = TRUE)
for(f in fList)
{
if((grepl('x', f) || grepl('y', f)) & grepl('.csv', f))
{
fileName <- strsplit(f, "\\.")[[1]][1]
ret[[fileName]] <- getXYCSVAsTable(dir, f, xName, xExpression, yName, yExpression)
}
}
retTable <- rbindlist(ret)
return(retTable)
}
getXYCSVAsTable <- function(dir, file, xName='SNR', xExpression='(x+1)', yName='BLUR', yExpression='(y+1)*0.05')
{
fileName <- strsplit(file, "\\.")[[1]][1]
xy <- strsplit(fileName, "_")[[1]]
y <- as.numeric(substr(xy[1],2,nchar(xy[1])))
x <- as.numeric(substr(xy[2],2,nchar(xy[2])))
xVal <- eval(parse(text=xExpression))
yVal <- eval(parse(text=yExpression))
print(paste0('Reading ', file.path(dir,file), ' as ', xName, '=', xVal, ', ', yName, '=', yVal, '.'))
theTable <- fread(file.path(dir,file))
theTable[,(xName),with=FALSE] <- xVal
theTable[,(yName),with=FALSE] <- yVal
return(theTable)
}
getXYArffsAsTableFromDir <- function(dir, xName='SNR', xExpression='(x+1)', yName='BLUR', yExpression='(y+1)*0.05')
{
ret <- list()
fList <- list.files(path = dir, recursive = TRUE)
for(f in fList)
{
if((grepl('x', f) || grepl('y', f)) & grepl('.arff', f))
{
fileName <- strsplit(f, "\\.")[[1]][1]
ret[[fileName]] <- getXYArffAsTable(dir, f, xName, xExpression, yName, yExpression)
}
}
retTable <- rbindlist(ret)
return(retTable)
}
getXYArffAsTable <- function(dir, file, xName='SNR', xExpression='(x+1)', yName='BLUR', yExpression='(y+1)*0.05')
{
fileName <- strsplit(file, "\\.")[[1]][1]
xy <- strsplit(fileName, "_")[[1]]
y <- as.numeric(substr(xy[1],2,nchar(xy[1])))
x <- as.numeric(substr(xy[2],2,nchar(xy[2])))
xVal <- eval(parse(text=xExpression))
yVal <- eval(parse(text=yExpression))
print(paste0('Reading ', file.path(dir,file), ' as ', xName, '=', xVal, ', ', yName, '=', yVal, '.'))
theTable <- read.arff(file.path(dir,file))
theTable[,xName] <- xVal
theTable[,yName] <- yVal
return(data.table(theTable))
}
##### Wide Table Operations #####
applySimilarityTransform <- function(x, measurements='Stats.PearsonsCorrelationCoefficient', bounds=c(-1,1), removeOld=TRUE)
{
for(measure in measurements)
{
x[, c(paste0(measure, '.Dilate')) := log((abs(bounds[1]) + get(measure))/(abs(bounds[2]) - get(measure)))]
if(removeOld)
{
x[, c(measure):=NULL]
}
}
}
summarizeGeometry <- function(x, idCols)
{
x[, MaskChannel2 := tstrsplit(MaskChannel, '.p', fixed=T, keep=1L)]
x[is.finite(Geometric.SizeIterable), ':='(weights=Geometric.SizeIterable/(sum(Geometric.SizeIterable, na.rm=T)), countWeights=Geometric.SizeIterable/(max(Geometric.SizeIterable, na.rm=T))), by=c(idCols, 'MaskChannel2')]
x[, ':='(Geometric.FeretsAspectRatio = Geometric.MaximumFeretsDiameter/Geometric.MinimumFeretsDiameter, Geometric.EllipseAspectRatio = Geometric.MajorAxis/Geometric.MinorAxis)]
x[, ':='(Geometric.MaximumFeretsDiameter=NULL, Geometric.MinimumFeretsDiameter=NULL, Geometric.MajorAxis=NULL, Geometric.MinorAxis=NULL)]
# Decide how to combine different geometric features
#geomFeatures <- c('Convexity', 'Solidity', 'SizeIterable', 'BoundarySize', 'MainElongation', 'Circularity',
#'Boxivity', 'Eccentricity', 'MajorAxis', 'MaximumFeretsDiameter', 'MinimumFeretsDiameter',
#'MinorAxis', 'Roundness', 'X', 'Y')
geomFeatures_Total <- c('Geometric.SizeIterable', 'Geometric.BoundarySize')
geomFeatures_SizeWeightedMean <- c('Geometric.SizeIterable', 'Geometric.Convexity', 'Geometric.Solidity', 'Geometric.MainElongation', 'Geometric.Circularity', 'Geometric.Boxivity', 'Geometric.Eccentricity', 'Geometric.BoundarySize','Geometric.FeretsAspectRatio','Geometric.EllipseAspectRatio','Geometric.Roundness')
for(feature in geomFeatures_Total)
{
x[is.finite(get(feature)), (paste0(feature, '.Total')):=sum(get(feature), na.rm=TRUE), by=c(idCols, 'MaskChannel2')]
}
for(feature in geomFeatures_SizeWeightedMean)
{
x[is.finite(get(feature)), (feature):=sum(get(feature)*weights, na.rm=TRUE), by=c(idCols, 'MaskChannel2')]
}
x[is.finite(countWeights), ':='(N=as.double(.N), weightedN=sum(countWeights)), by=c(idCols, 'MaskChannel2')]
getPointStats <- function(x, y, weights)
{
if(length(x) > 1)
{
pts <- matrix(c(x, y), ncol=2)
circ <- getMinCircle(pts)
}
else if(length(x) == 1)
{
circ <- list(rad=1L, ctr=c(x,y))
}
else
{
return(lapply(list(Intensity=NA, WeightedIntensity=NA, ConvexArea=NA, ConvexPerimeter=NA), as.double))
}
pp <- ppp(x, y, window=disc(radius=circ$rad*1.01, centre=circ$ctr))
hull <- convexhull(pp)
ret <- lapply(list(Intensity=intensity(pp), WeightedIntensity=intensity(pp, weights=weights), ConvexArea=area(hull), ConvexPerimeter=perimeter(hull), Diameter=circ$rad), as.double)
if(ret$ConvexArea == 0 || ret$ConvexPerimeter == 0)
{
ret$Circularity <- 0
}
else
{
ret$Circularity <- as.double(4*pi*ret$ConvexArea/(ret$ConvexPerimeter*ret$ConvexPerimeter))
}
return(ret)
}
x[is.finite(Geometric.X), c('Geometric.SubRegionIntensity', 'Geometric.SubRegionWeightedIntensity', 'Geometric.SubRegionConvexArea', 'Geometric.SubRegionConvexPerimeter', 'Geometric.SubRegionRadius', 'Geometric.SubRegionCircularity'):=getPointStats(Geometric.X, Geometric.Y, weights), by=c(idCols, 'MaskChannel2')]
x[, ':='(MaskChannel=MaskChannel2, MaskChannel2=NULL, weights=NULL, countWeights=NULL, Geometric.X=NULL, Geometric.Y=NULL)]
return(x)
}
labeledBoxPlot <- function(data, labelCols=c('name'), ord=c(1), valueCol='MeanDecreaseAccuracy', col=c('blue'), bottom=10, left=4, top=1, right=1, num.offset=1, axlab.offset=5, ...)
{
library(data.table)
myFormula <- reformulate(termlabels=labelCols, response=valueCol)
setorderv(rfImp, labelCols, ord)
labels <- unique(data[[labelCols[length(labelCols)]]])
at <- (1:length(labels))
print(labels)
par(mar=c(bottom,left,top,right), mgp=c(axlab.offset,num.offset,0))
duh <- boxplot(myFormula, data=data, las=2, mar=c(6,6,6,6), col=col, xaxt='n', ...)
axis(1, labels = labels, at=at, las=2)
}
# getSubRegionSizeWeightedMeans2 <- function(x, weights, features, featureResultNames=features, idCols=getAllColNamesExcept(x, c('MaskChannel','Value')))
# {
# subWeights <- function(weights, this.cId, this.MaskChannel2)
# {
# return(weights[cId==this.cId & MaskChannel2==this.MaskChannel2]$Value)
# }
# maskChannel2Index <- match('MaskChannel2', idCols)
# cIdIndex <- match('cId', idCols)
# results <- x[Measurement %in% features, list(MaskChannel=.BY[[maskChannel2Index]], Value=sum(Value * subWeights(weights=weights, this.cId=.BY[[cIdIndex]], this.MaskChannel2=.BY[[maskChannel2Index]]), na.rm=T)), by=idCols]
# results[, MaskChannel2:=NULL]
# replaceMeasurementNames(results, features, featureResultNames)
# return(results)
# }
#
# getSubRegionTotals <- function(x, features, featureResultNames=paste0(features, '.Total'), idCols=getAllColNamesExcept(x, c('MaskChannel','Value')))
# {
# # Might need to do na.rm=T for sum(...)
# maskChannel2Index <- match('MaskChannel2', idCols)
# results <- x[Measurement %in% features, list(MaskChannel=.BY[[maskChannel2Index]], Value=sum(Value, na.rm=T)), by=idCols]
# results[, MaskChannel2:=NULL]
# replaceMeasurementNames(results, features, featureResultNames)
# return(results)
# }
#
# getSubRegionTotals2 <- function(x, features, featureResultNames=paste0(features, '.Total'), idCols=getAllColNamesExcept(x, c('MaskChannel','Value')))
# {
# # Might need to do na.rm=T for sum(...)
# maskChannel2Index <- match('MaskChannel2', idCols)
# results <- x[Measurement %in% features, list(MaskChannel=.BY[[maskChannel2Index]], Value=sum(Value, na.rm=T)), by=idCols]
# results[, MaskChannel2:=NULL]
# replaceMeasurementNames(results, features, featureResultNames)
# return(results)
# }
#
# getSubRegionCountsAndSizeWeightedCounts <- function(x, idCols=getAllColNamesExcept(x, c('MaskChannel','Value')))
# {
# # rename MaskChannel - Also rename the MaskChannel to eliminate the '.p#' designation
# # weightedN - Use SizeIterable to make this weighted count since we need it for weighting anyway
# # N - The number of subregions for this cell, Array.X, Array.Y, and MaskChannel
# results <- x[Measurement == 'Geometric.SizeIterable', list(MaskChannel=MaskChannel2, Value=.N), by=idCols]
# results$Measurement <- 'N'
# results2 <- x[Measurement == 'Geometric.SizeIterable', list(MaskChannel=MaskChannel2, Value=sum(Value/max(Value))), by=idCols]
# results2$Measurement <- 'weightedN'
# results <- rbindlist(list(results, results2), use.names = T)
# results[, MaskChannel2:=NULL]
# return(results)
# }
#
# getSubRegionCountsAndSizeWeightedCounts2 <- function(x, idCols=getAllColNamesExcept(x, c('MaskChannel','Value')))
# {
# # rename MaskChannel - Also rename the MaskChannel to eliminate the '.p#' designation
# # weightedN - Use SizeIterable to make this weighted count since we need it for weighting anyway
# # N - The number of subregions for this cell, Array.X, Array.Y, and MaskChannel
# results <- x[Measurement == 'Geometric.SizeIterable', list(MaskChannel=MaskChannel2, Value=.N), by=idCols]
# results$Measurement <- 'N'
# results2 <- x[Measurement == 'Geometric.SizeIterable', list(MaskChannel=MaskChannel2, Value=sum(Value/max(Value))), by=idCols]
# results2$Measurement <- 'weightedN'
# results <- rbindlist(list(results, results2), use.names = T)
# results[, MaskChannel2:=NULL]
# return(results)
# }
findDuplicateValuesInWideTableFaultyResult <- function(x)
{
troubleNames <- names(x5)[sapply(x, function(x){if(is.numeric(x)){return(max(x) > 1)}else{return(TRUE)}})]
return(x[,troubleNames,with=F])
}
removeExtraneousColumns <- function(x)
{
dumbCols <- c(getColNamesContaining(x, 'Phase'), getColNamesContaining(x, 'ImageMoments.Moment'), getColNamesContaining(x, 'ImageMoments.HuMoment'), getColNamesContaining(x, 'ImageMoments.NormalizedCentralMoment'))#, getColNamesContaining(x, 'ImageMoments.CentralMoment'))
dumbCols <- unique(dumbCols)
print('Removing the following extraneous columns of information...')
for(colName in dumbCols)
{
print(colName)
}
x[,(dumbCols):=NULL]
return(x)
}
divideColAByColB <- function(x, colA, colB)
{
x[get(colB)==0,(colA):=NA]
x[get(colB)!=0,(colA):=get(colA)/get(colB)]
return(x)
}
removeColsWithNonfiniteVals <- function(x)
{
duh <- x[,lapply(.SD, function(y){length(which(!is.finite(y))) > 0}), .SDcols=getNumericCols(x)]
duh2 <- getNumericCols(x)[as.logical(as.vector(duh))]
if(length(duh2 > 0))
{
print("Removing cols with infinite values...")
for(col in duh2)
{
print(col)
x[,(col):=NULL]
}
}
else
{
print("No non-finite data found in any column. Yay!")
}
}
removeCellsWithNonfiniteVals <- function(x, colDeletionThreshold=0.05)
{
duh <- x[,lapply(.SD, function(y){as.double(length(which(!is.finite(y))))/as.double(length(y)) > colDeletionThreshold}), .SDcols=getNumericCols(x)]
duh1 <- getNumericCols(x)[as.logical(as.vector(duh))]
for(col in duh1)
{
print(paste0("Removing column ", col, " as more than 5% of cells have NAs/Infs for this feature"))
x[, c(col):=NULL]
}
duh <- x[,lapply(.SD, function(y){length(which(!is.finite(y))) > 0}), .SDcols=getNumericCols(x)]
duh2 <- getNumericCols(x)[as.logical(as.vector(duh))]
if(length(duh2 > 0))
{
print("Finding cells to remove...")
print(paste0('Checking column: ', duh2[1]))
cIds <- x[!is.finite(get(duh2[1]))]$cId
if(length(cIds) > 0)
{
ret <- x[is.finite(get(duh2[1]))]
cIds <- unique(cIds)
print('Removing the following ids')
print(paste(cIds, collapse=', '))
# print(ret$cId)
# Call recursively
return(removeCellsWithNonfiniteVals(ret))
}
else
{
print("Shouldn't ever get here!!!")
}
}
else
{
print("No more non-finite data was found in any column. Yay!")
return(x)
}
}
getColNamesContaining <- function(x, name)
{
return(names(x)[grepl(name,names(x))])
}
removeColNamesContaining <- function(x, name)
{
colsToRemove <- getColNamesContaining(x,name)
print(paste0("Removing colums with names containing '", name, "'"))
for(colToRemove in colsToRemove)
{
print(colToRemove)
x[,(colToRemove):=NULL]
}
return(x)
}
removeColsContainingNames <- function(x, namesToMatch)
{
colsToRemove <- getColNamesContaining(x, namesToMatch[1])
print(paste0("Removing colums with names containing..."))
for(nameToMatch in namesToMatch)
{
print(nameToMatch)
colsToRemove <- colsToRemove[colsToRemove %in% getColNamesContaining(x, nameToMatch)]
}
for(colToRemove in unique(colsToRemove))
{
print(colToRemove)
x[,(colToRemove):=NULL]
}
return(x)
}
fixColNames <- function(x)
{
replaceStringInColNames(x, ' ', '')
replaceStringInColNames(x, '\\$', '.')
replaceStringInColNames(x, ':', '_')
}
getAllColNamesExcept <- function(x, names)
{
return(names(x)[!(names(x) %in% names)])
}
getNumericCols <- function(x)
{
return(names(x)[unlist(x[,lapply(.SD, is.numeric)])])
}
getNonNumericCols <- function(x)
{
return(names(x)[!unlist(x[,lapply(.SD, is.numeric)])])
}
replaceStringInColNames <- function(x, old, new)
{
oldNames <- names(x)
newNames <- gsub(old, new, names(x))
setnames(x, oldNames, newNames)
}
getWideTable <- function(x)
{
idCols <- getAllColNamesExcept(x, c('Value','Measurement'))
x <- reorganize(x, idCols=idCols)
x <- sortColsByName(x);
return(x)
}
sortColsByName <- function(x)
{
setcolorder(x, sort(names(x)))
}
# standardizeWideData <- function(x)
# {
# removeNoVarianceCols(x)
# robustScale <- function(x)
# {
# m <- median(x, na.rm=TRUE)
# return((x-m)/mad(x, center=m, na.rm=TRUE))
# }
# x[,lapply(.SD, function(x){if(is.numeric(x)){return(robustScale(x))}else{return(x)}})]
# }
# removeNoVarianceCols <- function(x)
# {
# namesToRemove <- getNoVarianceCols(x)
# if(length(namesToRemove) > 0)
# {
# print("Removing cols with a variance of zero...")
# for(name in namesToRemove)
# {
# print(name)
# x[,(name):=NULL]
# }
# }
# }
# getNoVarianceCols <- function(x)
# {
# tempSD <- function(y){sd(y, na.rm = TRUE)}
# tempNames <- x[,lapply(.SD, tempSD), .SDcols=getNumericCols(x)]
# return(names(tempNames)[as.numeric(as.vector(tempNames))==0])
# }
##### Long Table Operations #####
divideMAbyMBbyRef <- function(x, mA, mB)
{
mATable <- x[Measurement==mA]
mBTable <- x[Measurement==mB]
if(nrow(mATable) != nrow(mBTable))
{
# Try to perform the operation on the subset of the mB column (can't do reverse because we are editing the mA column)
mBTable <- mBTable[MaskChannel %in% unique(mATable$MaskChannel)]
if(nrow(mATable) != nrow(mBTable))
{
stop('Number of rows for these measurements do not match! Aborting operation.')
}
}
ret <- mATable$Value / mBTable$Value
x[Measurement==mA]$Value <- ret
return(x)
}
intIntensityNormalizeCentralMoments <- function(x)
{
mNames <- getMeasurementNamesContaining(x, 'ImageMoments.CentralMoment')
for(mName in mNames)
{
x <- divideMAbyMBbyRef(x, mName, 'Stats.Sum')
}
return(x)
}
meanNormalizeZernikeMoments <- function(x)
{
mNames <- getMeasurementNamesContaining(x, 'ZernikeMag')
for(mName in mNames)
{
x <- divideMAbyMBbyRef(x, mName, 'Stats.Mean')
}
return(x)
}
getRowsMatching <- function(x, col, baseName)
{
return(x[grepl(baseName, x[[col]])])
}
getLongTable <- function(x, idCols, measurementName='Measurement', valueName='Value')
{
return(melt(x, getAllColNamesExcept(x, idCols), variable.name=measurementName, value.name=valueName, na.rm=TRUE))
}
getLongTableFromTemplate <- function(x, longTemplate)
{
return(getLongTable(x, idCols=getAllColNamesExcept(x, getAllColNamesExcept(longTemplate, c('Measurement','Value')))))
}
getMeasurementNamesContaining <- function(x, name)
{
ms <- unique(x$Measurement)
return(ms[grepl(name,ms)])
}
getMomentTable <- function(x, baseName='ImageMoments.CentralMoment')
{
theNames <- unique(x[['Measurement']])
theNames <- theNames[grepl(baseName,theNames, fixed=TRUE)]
start <- nchar(baseName)
orders <- substr(theNames, start+1, start+2)
ret <- data.frame(Measurement=theNames, orderx=as.numeric(substr(orders,1,1)), ordery=as.numeric(substr(orders,2,2)))
return(ret)
}
removeMeasurementNamesContaining <- function(x, name)
{
namesToRemove <- getMeasurementNamesContaining(x, name)
print("Removing the following Measurements...")
for(name in namesToRemove)
{
print(name)
}
x <- x[!(Measurement %in% namesToRemove)]
return(x)
}
removeMeasurementNames <- function(x, names)
{
print("Removing the following Measurements...")
names <- names[names %in% unique(x$Measurement)]
for(name in names)
{
print(name)
}
x <- x[!(Measurement %in% names)]
return(x)
}
replaceMeasurementNames <- function(x, oldNames, newNames)
{
print("Replacing the following Measurement names...")
oldNames.temp <- oldNames[oldNames %in% unique(x$Measurement)]
for(name in oldNames.temp)
{
print(name)
}
print("with...")
newNames.temp <- newNames[oldNames %in% unique(x$Measurement)]
for(name in newNames.temp)
{
print(name)
}
x <- x[Measurement %in% oldNames.temp, Measurement:=newNames.temp[match(Measurement,oldNames.temp)]]
return(x)
}
standardizeLongData <- function(x, by=c('MaskChannel','ImageChannel','Measurement','Expt'), use.mad=TRUE)
{
robustStandardize <- function(x, measurement, use.mad)
{
if(substr(measurement,1,12) == 'ZernikePhase')
{
return(x)
}
else
{
if(use.mad)
{
m <- median(x, na.rm=TRUE)
return((x-m)/mad(x, center=m, na.rm=TRUE))
}
else
{
m <- mean(x, na.rm=TRUE)
return((x-m)/sd(x, center=m, na.rm=TRUE))
}
}
}
x <- removeNoVarCombos(x, by=by, use.mad=use.mad)
x[,Value:=robustStandardize(Value,Measurement,use.mad=use.mad),by=by]
return(x)
}
# removeNoVarianceMeasurements <- function(x, val='Value', by=c('MaskChannel','ImageChannel','Measurement','Expt'))
# {
# # See if we have any columns to remove and record the info for reporting
# temp <- x[,list(stdev=sd(get(val))), by=by]
# temp <- data.frame(temp[stdev == 0])
# print("Removing measurements with 0 variance...")
# print(temp)
# # Tempororarily add a column in the table with stdev in it
# x[,stdev:=sd(get(val)), by=by]
# y <- x[stdev != 0]
# x[, stdev:=NULL]
# y[, stdev:=NULL]
# return(y)
# }
removeNonFiniteVarCombos <- function(x, val='Value', by=c('MaskChannel','ImageChannel','Measurement','Expt'))
{
# If any of the experiments saw 0 variance (see how unique call does not include 'Expt'), then we should eliminate that measure (combo of Measurement, ImageChannel, and MaskChannel) from all experiments
toRemove <- unique(x[!is.finite(Value),c('Measurement','ImageChannel','MaskChannel')])
toRemove[, combo:=paste(Measurement,ImageChannel,MaskChannel,sep=' ')]
if(nrow(toRemove)>0)
{
print("Removing measurements with non-finite values...")
maxOption <- getOption('max.print')
options(max.print=nrow(toRemove) + 10)
print(toRemove$combo, nrows=nrow(toRemove))
options(max.print=maxOption)
y <- x[!(paste(Measurement,ImageChannel,MaskChannel,sep=' ') %in% toRemove$combo)]
x[, VAR:=NULL]
y[, VAR:=NULL]
return(y)
}else
{
return(x)
}
}
removeNoVarCombos <- function(x, val='Value', by=c('MaskChannel','ImageChannel','Measurement','Expt'), use.mad=TRUE)
{
# For each experiment calculate the variance/mad seen
if(use.mad)
{
x[,VAR:=mad(get(val), na.rm=TRUE), by=by]
}
else
{
x[,VAR:=var(get(val), na.rm=TRUE), by=by]
}
# If any of the experiments saw 0 variance (see how unique call does not include 'Expt'), then we should eliminate that measure (combo of Measurement, ImageChannel, and MaskChannel) from all experiments
toRemove <- unique(x[VAR == 0,c('Measurement','ImageChannel','MaskChannel')])
toRemove[, combo:=paste(Measurement,MaskChannel,ImageChannel,sep=' ')]
if(nrow(toRemove)>0)
{
print("Removing measurements with 0 VAR/MAD...")
maxOption <- getOption('max.print')
options(max.print=nrow(toRemove) + 10)
print(toRemove$combo, nrows=nrow(toRemove))
options(max.print=maxOption)
y <- x[!(paste(Measurement,MaskChannel,ImageChannel,sep=' ') %in% toRemove$combo)]
x[, VAR:=NULL]
y[, VAR:=NULL]
return(y)
}else
{
x[, VAR:=NULL]
return(x)
}
}
# removeNoMADMeasurements <- function(x, val='Value', by=c('MaskChannel','ImageChannel','Measurement','Expt'))
# {
# # Tempororarily add a column in the table with stdev in it
# x[,MAD:=mad(get(val), na.rm=TRUE), by=by]
# toRemove <- unique(x[MAD == 0]$Measurement)
# if(length(toRemove)>0)
# {
# print("Removing measurements with 0 MAD...")
# for(m in toRemove)
# {
# print(m)
# }
# y <- x[!(Measurement %in% toRemove)]
# x[, MAD:=NULL]
# y[, MAD:=NULL]
# return(y)
# }else
# {
# x[, MAD:=NULL]
# return(x)
# }
# }
# removeNoVarianceMeasurements <- function(x, val='Value', by=c('MaskChannel','ImageChannel','Measurement','Expt'))
# {
# # See if we have any columns to remove and record the info for reporting
# temp <- x[,list(stdev=sd(get(val))), by=by]
# temp <- data.frame(temp[stdev == 0])
# print("Removing measurements with 0 variance...")
# print(temp)
# # Tempororarily add a column in the table with stdev in it
# x[,stdev:=sd(get(val)), by=by]
# y <- x[stdev != 0]
# x[, stdev:=NULL]
# y[, stdev:=NULL]
# return(y)
# }
replaceSubStringInAllRowsOfCol <- function(x, old, new, col)
{
x[,c(col):=gsub(old,new,get(col),fixed=TRUE)]
}
trySample <- function(x, n, replace=F, prob=NULL)
{
if(n > length(x))
{
return(x)
}
else
{
return(sample(x, n, replace, prob))
}
}
fixLongTableStringsInCol <- function(x, col)
{
replaceSubStringInAllRowsOfCol(x,'_Order_','',col)
replaceSubStringInAllRowsOfCol(x,'_Rep_','',col)
replaceSubStringInAllRowsOfCol(x,'$','.',col)
replaceSubStringInAllRowsOfCol(x,'net.imagej.ops.Ops.','',col)
replaceSubStringInAllRowsOfCol(x,' ','',col)
replaceSubStringInAllRowsOfCol(x,':','_',col)
}
##### Feature Calculations #####
unmergeChannelNames <- function(channelString)
{
temp <- unlist(strsplit(channelString,'_minus_',fixed=TRUE))
return(list(channel1=temp[1], channel2=temp[2]))
}
calculateChannelDifferences <- function(x)
{
if(length(unique(x$ImageChannel)) > 1)
{
# Calculate differences between channels for each Cell and Measurement (but keep other column information too so include other cols in 'by')
idCols <- getAllColNamesExcept(x, c('Value','ImageChannel'))
return(x[ImageChannel != 'None' & !grepl('_',ImageChannel,fixed=T),list(ImageChannel=getComboNames(ImageChannel), Value=getComboDifferences(Value)), by=idCols])
}else
{
# return an empty table with the same columns as provided
return(x[FALSE])
}
}
# Meant to be called on a subset of the main table
calculateChannelProducts <- function(x)
{
if(length(unique(x$ImageChannel)) > 1)
{
# Calculate differences between channels for each Cell and Measurement (but keep other column information too so include other cols in 'by')
idCols <- getAllColNamesExcept(x, c('Value','ImageChannel'))
x2 <- x[ImageChannel != 'None',list(ImageChannel=getComboNames(ImageChannel, '_times_'), Value=getComboProducts(Value)), by=idCols]
}else
{
# return an empty table with the same columns as provided
return(x[FALSE])
}
}
getComboNames <- function(x, operation='_minus_')
{
if(length(x) < 2)
{
return(NULL)
}
temp <- combn(x, 2)
#print(temp)
temp <- paste0(temp[1,],operation,temp[2,])
return(temp)
}
getComboDifferences <- function(x)
{
if(length(x) < 2)
{
return(NULL)
}
temp <- combn(x, 2)
temp <- temp[1,]-temp[2,]
return(temp)
}
getComboProducts <- function(x)
{
if(length(x) < 2)
{
return(NULL)
}
temp <- combn(x, 2)
temp <- temp[1,]*temp[2,]
return(temp)
}
calculateRMSofHaralick <- function(x, removeOriginalHaralickMeasures=FALSE)
{
# If keeping Haralick features, combine measures for each direction by averaging to make "rotationally invariant".
# Find all names with Horizontal in them
hNames <- getColNamesContaining(x, 'Horizontal')
vNames <- gsub("Horizontal", "Vertical", hNames)
dNames <- gsub("Horizontal", "Diagonal", hNames)
adNames <- gsub("Horizontal", "AntiDiagonal", hNames)
avgNames <- gsub("Horizontal", "Avg", hNames)
haralickNames <- data.frame(H=hNames, V=vNames, D=dNames, AD=adNames, avg=avgNames, stringsAsFactors=FALSE)
myfunc <- function(row, theNames)
{
return(mean(row[,theNames$H] + row[,theNames$V] + row[,theNames$D] + row[,theNames$AD]))
}
x <- data.frame(x)
for(i in 1:nrow(haralickNames))
{
x[,haralickNames[i,5]] <- (x[,haralickNames[i,1]] + x[,haralickNames[i,2]] + x[,haralickNames[i,3]] + x[,haralickNames[i,4]])/4
if(removeOriginalHaralickMeasures)
{
x <- x[,!(names(x) %in% as.character(haralickNames[i,1:4]))]
}
}
return(data.table(x))
}
getColors <- function(pointClasses)
{
ret <- rep('rgb(0,0,1,0.2)', length(pointClasses))
ret[pointClasses == 'MT'] <- 'rgb(1,0,0,0.2)'
return(ret)
}
##### Testing #####
# testFunc2 <- function(x, measurement)
# {
# sdx <- sd(x, na.rm=TRUE)
# if(is.na(sdx) || sdx == 0 || is.nan(sdx))
# {
# print(paste0("Removing zero variance measure: ", measurement, '.'))
# return(NULL)
# }else
# {
# return(x)
# }
# }
# duh2 <- data.table(a=rep(1:3,each=3), b=c(1:3,c(1,1,1),1:3), c=c('a','b','c','d','e','f','g','h','i'))
# duh2[,list(Value=testFunc2(b, a)), by=c('a')]
|
c82975590c5e45d830f21e0c933dbdf59b6c494e
|
c6e28546148e8714443e996346c23cd22e6e260a
|
/At1/methods.R
|
6e406dda6f8e5fe3b5efa517f04cfad3bd444030
|
[] |
no_license
|
isrvasconcelos/SystemsIdentification-2017.2
|
8d68b20933edaac9c3f30d492ee9819230b4c242
|
b6a50572978ec08285ad340cba27d2bf6a2c3802
|
refs/heads/master
| 2020-03-17T17:25:25.369186
| 2018-05-19T02:23:33
| 2018-05-19T02:23:33
| 126,961,190
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,878
|
r
|
methods.R
|
source("systemblocks.R")
suppressMessages(library(Metrics))
######################################################################################
## Plot auxiliar
exportMSE_Barplot <- function( data , imgName , title ) {
ylim <- c( 0 , max(data) )
setEPS(width=10, height=7)
postscript(paste(imgName, ".eps", sep='') )
barplot( data , beside = FALSE ,
names.arg=c("Zieg-Nich.","Haggl.","Smith","Sund-Krish.", "Mollen."),
col=c("cyan4"), ylab="Mean Square Error", xlab="Method",
ylim=ylim,
main=title
)
dev.off()
}
######################################################################################
## DONE
calibration <- function(interval, response, response_filtered, tgline, fileName, precision=FALSE) {
y <- response_filtered
yraw <- response
ylim <- c( min(y, yraw) , max(y,yraw) )
plot(t,yraw, type='l', col='tan', xlab="Time (s)", ylab="Amplitude", ylim=ylim)
points(t, y, type='l', col='red')
points(x=t, y=tgline, type='l', col='dodgerblue', lty=5)
abline(v=InflectionPoint, h=y[InflectionIndex], col='lightblue', lty=4) # Reference for inflection point
# abline(v=t1, col='dodgerblue', lty=3)
# abline(v=t3, col='dodgerblue', lty=3)
legend("bottomright", legend=c("Original System",
"Filtered System",
"Tangent Line on Inflection"),
lty=c(1,1,5), col=c("tan","red", "dodgerblue"), bty="n")
}
######################################################################################
# 1st Order System Identification
## DONE
zieglerNichols <- function(interval, response, response_filtered, tgline, fileName, precision=FALSE) {
###########################################
# Variable fixes
y <- response_filtered
yraw <- response
###########################################
# Timestamps for evaluations
t1Index <- which( abs(tgline) < .1 )[1] # Getting the index which tangent line is close to zero
if(precision == TRUE) # Increase precision for this specific dataset
t1Index <- which( round( abs(tgline) ,3) < .001 )[1]
t1 <- t[t1Index]
if(!length(t1))
t1=min(t)
peakIndex <- which( y == max(y) )[1]
t3Index <- which( tgline > y[peakIndex] )[1]
t3 <- t[t3Index]
t2Index <- which( tgline > y[peakIndex]*0.632 )[1]
t2 <- t[t2Index]
###########################################
# Parameters for estimation
Amax <- max(y)
tau <- t3-t1
theta <- t1
###########################################
# System blocks
PredictBlock <- series( ApproximationBlock1stOrder(Amax, tau), DelayerBlock(theta) )
SystemResponse <- step(PredictBlock, t=t) # Running Experiment
###########################################
# Plot the data
ylim <- c( min(y, yraw, SystemResponse$y) , max(y,yraw, SystemResponse$y) )
legendNames <- c("Estimated System","Tangent Line on Inflection","Filtered System","Original System")
if(!abs(sum(y-yraw))) # Check if response == response_filtered
legendNames <- c("Estimated System", "Tangent Line on Inflection", "Original System" )
setEPS()
imgName <- paste("ziegler-nichols/zn-", gsub('.{4}$', '', fileName), ".eps", sep='')
postscript(imgName)
plot(t,yraw, type='l', col='tan', xlab="Time (s)", ylab="Amplitude", ylim=ylim)
points(t, y, type='l', col='red') # Filtered data plot
points(x=t, y=tgline, type='l', col='dodgerblue', lty=5)
# abline(v=InflectionPoint, h=y[InflectionIndex], col='lightblue', lty=4) # Reference for inflection point
abline(v=t1, col='dodgerblue', lty=3)
abline(v=t3, col='dodgerblue', lty=3)
points(SystemResponse$t, SystemResponse$y, type='l', col='navy')
legend("bottomright", legend=legendNames, lty=c(1,5,1,1), col=c("navy","dodgerblue","red","tan"), bty="n")
dev.off()
return(mse(SystemResponse$y , y))
}
## DONE
hagglund <- function(interval, response, response_filtered, tgline, fileName, precision=FALSE) {
###########################################
# Variable fixes
y <- response_filtered
yraw <- response
###########################################
# Timestamps for evaluations
t1Index <- which( abs(tgline) < .1 )[1] # Getting the index which tangent line is close to zero
if(precision == TRUE) # Increase precision for this specific dataset
t1Index <- which( round( abs(tgline) ,3) < .001 )[1]
t1 <- t[t1Index]
if(!length(t1))
t1=min(t)
peakIndex <- which( y == max(y) )[1]
t2Index <- which( tgline > y[peakIndex]*0.632 )[1]
t3 <- t2 <- t[t2Index]
###########################################
# Parameters for estimation
Amax <- max(y)
tau <- t3-t1
theta <- t1
###########################################
# System blocks
PredictBlock <- series( ApproximationBlock1stOrder(Amax, tau), DelayerBlock(theta) )
SystemResponse <- step(PredictBlock, t=t) # Running Experiment
###########################################
# Plot the data
ylim <- c( min(y, yraw, SystemResponse$y) , max(y,yraw, SystemResponse$y) )
legendNames <- c("Estimated System","Tangent Line on Inflection","Filtered System","Original System")
if(!abs(sum(y-yraw))) # Check if response == response_filtered
legendNames <- c("Estimated System", "Tangent Line on Inflection", "Original System" )
setEPS()
imgName <- paste("hagglund/hag-", gsub('.{4}$', '', fileName), ".eps", sep='')
postscript(imgName)
plot(t,yraw, type='l', col='tan', xlab="Time (s)", ylab="Amplitude", ylim=ylim)
points(t, y, type='l', col='red') # Filtered data plot
points(x=t, y=tgline, type='l', col='dodgerblue', lty=5)
# abline(v=InflectionPoint, h=y[InflectionIndex], col='lightblue', lty=4) # Reference for inflection point
abline(v=t1, col='dodgerblue', lty=3)
abline(v=t3, col='dodgerblue', lty=3)
points(SystemResponse$t, SystemResponse$y, type='l', col='navy')
legend("bottomright", legend=legendNames, lty=c(1,5,1,1), col=c("navy","dodgerblue","red","tan"), bty="n")
dev.off()
return(mse(SystemResponse$y , y))
}
## DONE
sunKris <- function(interval, response, response_filtered, tgline, fileName, precision=FALSE) {
###########################################
# Variable fixes: This procedure evaluates datasets without filtering
yraw <- response
y <- yraw
###########################################
# Timestamps for evaluations
peakIndex <- which( y == max(y) )[1]
A1 <- y[peakIndex]*0.353
t1Index <-which( y > A1 )[1]
t1 <- t[t1Index]
A2 <- y[peakIndex]*0.853
t2Index <-which( y > A2 )[1]
t2 <- t[t2Index]
###########################################
# Parameters for estimation
Amax <- max(y)
tau <- 0.67*(t2-t1)
theta <- 1.3*t1 - 0.29*t2
###########################################
# System blocks
PredictBlock <- series( ApproximationBlock1stOrder(Amax, tau), DelayerBlock(theta) )
SystemResponse <- step(PredictBlock, t=t) # Running Experiment
###########################################
# Plot the data
ylim <- c( min(y, yraw, SystemResponse$y) , max(y,yraw, SystemResponse$y) )
legendNames <- c("Estimated System", "Filtered System" ,"Original System")
if(!abs(sum(y-yraw))) # Check if response == response_filtered
legendNames <- c("Estimated System", "Original System")
setEPS()
imgName <- paste("sun-kris/sk-", gsub('.{4}$', '', fileName), ".eps", sep='')
postscript(imgName)
plot(t,yraw, type='l', col='tan', xlab="Time (s)", ylab="Amplitude", ylim=ylim)
abline(v=t1, col='dodgerblue', lty=3)
abline(v=t2, col='dodgerblue', lty=3)
points(SystemResponse$t, SystemResponse$y, type='l', col='navy')
legend("bottomright", legend=legendNames, lty=c(1,1), col=c("navy","tan"), bty="n")
dev.off()
return(mse(SystemResponse$y , y))
}
# DONE
smith1st <- function(interval, response, response_filtered, tgline, fileName, precision=FALSE) {
## DONE
###########################################
# Variable fixes: This procedure evaluates datasets without filtering
yraw <- response
y <- yraw
###########################################
# Timestamps for evaluations
peakIndex <- which( y == max(y) )[1]
A1 <- y[peakIndex]*0.283
t1Index <-which( y > A1 )[1]
t1 <- t[t1Index]
A2 <- y[peakIndex]*0.632
t2Index <-which( y > A2 )[1]
t2 <- t[t2Index]
###########################################
# Parameters for estimation
Amax <- max(y)
tau <- 1.5*(t2-t1)
theta <-t2 - tau
###########################################
# System blocks
PredictBlock <- series( ApproximationBlock1stOrder(Amax, tau), DelayerBlock(theta) )
SystemResponse <- step(PredictBlock, t=t) # Running Experiment
###########################################
# Plot the data
ylim <- c( min(y, yraw, SystemResponse$y) , max(y,yraw, SystemResponse$y) )
legendNames <- c("Estimated System", "Original System")
setEPS()
imgName <- paste("smith/sm1-", gsub('.{4}$', '', fileName), ".eps", sep='')
postscript(imgName)
plot(t,yraw, type='l', col='tan', xlab="Time (s)", ylab="Amplitude", ylim=ylim)
# points(t, y, type='l', col='tan')
# points(x=t, y=tgline, type='l', col='dodgerblue', lty=5)
# abline(v=InflectionPoint, h=y[InflectionIndex], col='lightblue', lty=4) # Reference for inflection point
abline(v=t1, col='dodgerblue', lty=3)
abline(v=t2, col='dodgerblue', lty=3)
points(SystemResponse$t, SystemResponse$y, type='l', col='navy')
legend("bottomright", legend=legendNames, lty=c(1,1), col=c("navy", "tan"), bty="n")
dev.off()
return(mse(SystemResponse$y , y))
}
######################################################################################
# TODO: PENDING
# 2nd Order System Identification
smith2nd <- function(interval, response, response_filtered, tgline, fileName, precision=FALSE) {
###########################################
y <- response_filtered
yraw <- response
###########################################
# Timestamps for evaluations
peakIndex <- which( y == max(y) )[1]
A1 <- y[peakIndex]*0.2
t1Index <-which( y > A1 )[15]
t1 <- t[t1Index]
A2 <- y[peakIndex]*0.6
t2Index <-which( y > A2 )[1]
t2 <- t[t2Index]
###########################################
# Parameters for estimation
Amax <- max(y)
zeta <- t1/t2
tau <- 1 # TODO: Discover how to evaluate tau
theta <- t2-tau
SmithBlock <- 0
if(zeta < 1)
SmithBlock <- ApproximationBlock2ndOrder( nP=1, dPs2=(tau^2) , dPs=(2*zeta*tau), dP=1 )
else if(zeta >=1) {
tau1 <- tau*zeta - tau*((-1 + zeta^2)^1/2)
tau1 <- tau*zeta + tau*((-1 + zeta^2)^1/2)
SmithBlock <- series( ApproximationBlock1stOrder(1, tau1), ApproximationBlock1stOrder(1, tau2) )
}
###########################################
# System blocks
Delayer <- DelayerBlock(theta)
Gain <- ApproximationBlock1stOrder(K, 1)
PredictBlock <- series( Gain , SmithBlock , Delayer )
SystemResponse <- step( PredictBlock , t=t) # Running Experiment
###########################################
# Plot the data
ylim <- c( min(y, yraw, SystemResponse$y) , max(y,yraw, SystemResponse$y) )
legendNames <- c("Estimated System", "Original System")
setEPS()
imgName <- paste("smith/sm2-", gsub('.{4}$', '', fileName), ".eps", sep='')
postscript(imgName)
plot(t,yraw, type='l', col='tan', xlab="Time (s)", ylab="Amplitude", ylim=ylim)
points(t, y, type='l', col='red')
points(SystemResponse$t, SystemResponse$y, type='l', col='navy')
legend("bottomright", legend=legendNames, lty=c(1,1,1), col=c("navy","red", "tan"), bty="n")
dev.off()
return(mse(SystemResponse$y , y))
}
# DONE
mollenkamp <- function(interval, response, response_filtered, tgline, fileName, precision=FALSE) {
###########################################
# Variable fixes: This procedure evaluates datasets without filtering
yraw <- response
y <- yraw
###########################################
# Timestamps for evaluations
peakIndex <- which( y == max(y) )[1]
A1 <- y[peakIndex]*0.15
t1Index <-which( y > A1 )[1]
t1 <- t[t1Index]
A2 <- y[peakIndex]*0.45
t2Index <-which( y > A2 )[1]
t2 <- t[t2Index]
A3 <- y[peakIndex]*0.75
t3Index <-which( y > A3 )[1]
t3 <- t[t3Index]
if(precision==TRUE) {
t1Index <- which( y/max(y) > 0.15 )[20] # Manual calibration
t2Index <- which( y/max(y) > 0.45 )[1]
t3Index <- which( y/max(y) > 0.75 )[1]
t1 <- t[t1Index]
t2 <- t[t2Index]
t3 <- t[t3Index]
}
###########################################
# Parameters evaluation
MollenkampBlock <- 0
f2 <- 0
K <- y[peakIndex]
x <- (t2-t1) / (t3-t1)
zeta <- ( 0.085 - 5.547*((0.475-x)^2) ) / ( x - 0.356 )
if(zeta < 1) { # Underdamped
f2 <- (0.708)*((2.811)^zeta)
wn <- f2/(t3-t1)
f3 <- (0.922)*((1.66)^zeta)
theta <- t2 - (f3/wn)
MollenkampBlock <- ApproximationBlock2ndOrder( nP=(wn^2) , dPs2=1 , dPs=(2*zeta*wn) , dP=(wn^2) )
}
else if(zeta >=1) { # Overdamped
f2 <- 2.6*zeta - 0.6
wn <- f2/(t3-t1)
f3 <- (0.922)*((1.66)^zeta)
theta <- t2 - (f3/wn)
tau1 <- ( zeta + (-1 + zeta^2)^(1/2) ) / wn
tau2 <- ( zeta - (-1 + zeta^2)^(1/2) ) / wn
MollenkampBlock <- series( ApproximationBlock1stOrder(1, tau1) , ApproximationBlock1stOrder(1, tau2) )
}
###########################################
# System blocks
Delayer <- DelayerBlock(theta)
Gain <- ApproximationBlock1stOrder(K, 1)
PredictBlock <- series( Gain , MollenkampBlock , Delayer )
SystemResponse <- step( PredictBlock , t=t) # Running Experiment
###########################################
# Plot the data
ylim <- c( min(y, yraw, SystemResponse$y) , max(y,yraw, SystemResponse$y) )
legendNames <- c("Estimated System", "Original System")
setEPS()
imgName <- paste("mollenkamp/mol-", gsub('.{4}$', '', fileName), ".eps", sep='')
postscript(imgName)
plot(t,yraw, type='l', col='tan', xlab="Time (s)", ylab="Amplitude", ylim=ylim)
abline(v=t1, col='dodgerblue', lty=3)
abline(v=t2, col='dodgerblue', lty=3)
abline(v=t3, col='dodgerblue', lty=3)
points(SystemResponse$t, SystemResponse$y, type='l', col='navy')
legend("bottomright", legend=legendNames, lty=c(1,1), col=c("navy","tan"), bty="n")
dev.off()
return(mse(SystemResponse$y , y))
}
|
c0684eb52550efe88a21c5d65f1a5bb92676c0cb
|
420938f4f6a85690269001ecce76f08cf3397c4e
|
/man/bracket_drop.Rd
|
c7e33c0d547978b6a031fc300f5088fa96c634cf
|
[] |
no_license
|
cran/table.glue
|
d50fb96df4bdf322b03e6ec1d13a97b856f4d9d9
|
45af6a4234fa45b2a1c9031205b6bcc49b68e3e1
|
refs/heads/master
| 2023-02-19T10:14:29.835635
| 2023-02-07T07:30:02
| 2023-02-07T07:30:02
| 300,206,546
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,290
|
rd
|
bracket_drop.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bracket_helpers.R
\name{bracket_drop}
\alias{bracket_drop}
\alias{bracket_extract}
\alias{bracket_insert_left}
\alias{bracket_insert_right}
\alias{bracket_point_estimate}
\alias{bracket_lower_bound}
\alias{bracket_upper_bound}
\title{Bracket helpers}
\usage{
bracket_drop(x, bracket_left = "(", bracket_right = ")")
bracket_extract(
x,
bracket_left = "(",
bracket_right = ")",
drop_bracket = FALSE
)
bracket_insert_left(x, string, bracket_left = "(", bracket_right = ")")
bracket_insert_right(x, string, bracket_left = "(", bracket_right = ")")
bracket_point_estimate(x, bracket_left = "(", bracket_right = ")")
bracket_lower_bound(
x,
bracket_left = "(",
separator = ",",
bracket_right = ")"
)
bracket_upper_bound(
x,
bracket_left = "(",
separator = ",",
bracket_right = ")"
)
}
\arguments{
\item{x}{a character vector where each value contains a point
estimate and confidence limits.}
\item{bracket_left}{a character value specifying what symbol is
used to bracket the left hand side of the confidence interval}
\item{bracket_right}{a character value specifying what symbol is
used to bracket the right hand side of the confidence interval}
\item{drop_bracket}{a logical value (\code{TRUE} or \code{FALSE}). If \code{TRUE},
then the symbols on the left and right hand side of the interval
will not be included in the returned value. If \code{FALSE}, these symbols
will be included.}
\item{string}{a character value of a string that will be inserted
into the left or right side of the bracket.}
\item{separator}{a character value specifying what symbol is used
to separate the lower and upper bounds of the interval.}
}
\value{
a character value with length equal to the length of \code{x}.
}
\description{
If you have table values that take the form
\emph{point estimate (uncertainty estimate)}, you can use these
functions to access specific parts of the table value.
}
\examples{
tbl_value <- "12.1 (95\% CI: 9.1, 15.1)"
bracket_drop(tbl_value)
bracket_point_estimate(tbl_value)
bracket_extract(tbl_value, drop_bracket = TRUE)
bracket_lower_bound(tbl_value)
bracket_upper_bound(tbl_value)
}
|
75484b35b9d5c4aed1ca691683de3a185c386373
|
441b9022b155015fe64e707742bcc47b4dd8b542
|
/scripts/land_use/water_use_CA_counties.R
|
7385215d300aff06c29b6fdf455e328493ebd5f4
|
[] |
no_license
|
sudokita/ClimateActionR
|
9f19e61cd3f9cf6b5b878928f91e6f7277c84f26
|
3697477ff00888d28443b09f3998bce2d89af585
|
refs/heads/master
| 2021-01-21T02:50:37.103083
| 2016-03-19T06:14:59
| 2016-03-19T06:14:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,444
|
r
|
water_use_CA_counties.R
|
#################################################-
## DOMAIN: LAND USE
## Retrieve California water use data 1985-2010
## URL: http://waterdata.usgs.gov/ca/nwis/wu
## Author: W. Petry
#################################################-
## Preliminaries
library(rdrop2)
library(dplyr)
library(tidyr)
#################################################-
## Download data, unzip, and upload to Dropbox
#################################################-
data.url<-"http://waterdata.usgs.gov/ca/nwis/water_use?format=rdb&rdb_compression=value&wu_area=County&wu_year=ALL&wu_county=ALL&wu_category=TP%2CPS%2CCO%2CDO%2CIN%2CPT%2CLI%2CIT%2CIC%2CIG%2CHY%2CWW&wu_county_nms=--ALL%2BCounties--&wu_category_nms=Total%2BPopulation%252CPublic%2BSupply%252CCommercial%252CDomestic%252CIndustrial%252CTotal%2BThermoelectric%2BPower%252CLivestock%252CIrrigation%252C%2BTotal%252CIrrigation%252C%2BCrop%252CIrrigation%252C%2BGolf%2BCourses%252CHydroelectric%2BPower%252CWastewater%2BTreatment"
water.use<-read.table(data.url,header=F,skip=165,sep="\t",na.strings=c("-","na"),col.names=)
names(water.use)<-names(read.table(data.url,header=T,skip=163,nrows=1,sep="\t"))
# Extract water use data
water.withdrawals<-water.use %>%
select(-contains("state_")) %>%
rename(total.population=Total.Population.total.population.of.area..in.thousands) %>%
mutate(total.population=total.population*1000) %>%
group_by(county_cd,county_nm,year,total.population) %>%
select(contains("in.Mgal")) %>%
mutate_each(funs(as.numeric)) %>%
rename_(.dots=setNames(names(.),tolower(gsub("\\.{1,2}in\\.[A-z]gal\\.d$","",names(.))))) %>%
gather(key=use.category,value=Mgal.d,-county_cd,-county_nm,-year,-total.population)
# Extract irrigation area data
irrigated.area<-water.use %>%
select(-contains("state_")) %>%
group_by(county_cd,county_nm,year) %>%
select(contains("acres")) %>%
rename_(.dots=setNames(names(.),tolower(gsub("^Irrigation\\.{1,2}","",names(.))))) %>%
rename_(.dots=setNames(names(.),tolower(gsub("\\.{1,2}in\\.thousand\\.acres$","",names(.))))) %>%
gather(key=use.category,value=thousand.acres,-county_cd,-county_nm,-year)
# Upload to Dropbox and clean up
write.csv(water.withdrawals,"water_withdrawals.csv")
drop_upload("water_withdrawals.csv",dest="ClimateActionRData")
unlink("water_withdrawals.csv")
write.csv(irrigated.area,"irrigated_area.csv")
drop_upload("irrigated_area.csv",dest="ClimateActionRData")
unlink("irrigated_area.csv")
|
7b68ef78a30d8e33ff774eac58c13b041ea7d470
|
5c21757fb60ca9fa2232f87cc05ade4e34de6466
|
/man/randdis.Rd
|
c54a4acc5689d88e10b5a50b9f6a6145d9ab17a8
|
[] |
no_license
|
cran/DBGSA
|
d2f0c59b50ce4568d98c8acb6aeff7542ed8d5b0
|
5ca177761739df5c3910876059d1b4a7f8fb1183
|
refs/heads/master
| 2016-09-06T20:06:04.407631
| 2011-12-29T00:00:00
| 2011-12-29T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,732
|
rd
|
randdis.Rd
|
\name{randdis}
\alias{randdis}
\title{Randomly generating some gene expression profiles by gene resampling}
\description{
A function which is used to generate the required number of gene expression profiles by permutation called gene resampling
}
\usage{
randdis(z,minigenenum,randnum,setdis,normnum,Meth,resultname)}
\arguments{
\item{z}{
An input matrix which contains the gene profiles
}
\item{minigenenum}{
An integer indicating the number of gene to be permutated, the number of it has to be larger than the gene number that any gene function label has.
}
\item{randnum}{
An integer represents the number of permutation
}
\item{setdis}{
a character string indicating which method is to be used to compute the distances between case group and control group, avelinkdis or centdis is the choice to choose
}
\item{normnum}{
an integer indecating the number of the case group
}
\item{Meth}{
A character string indicates which method to be used to compute the distances between genes, euclidean and Manhattan is available to choose
}
\item{resultname}{
character string which represents the name of the output file
}
}
\value{
A text containing the distances only.
}
\author{ Li Jin, Huang Meilin
}
\examples{
\dontrun{
data(afExp)
##Randomly generating the gene expression profile and save it in the text named rand
randdis(afExp,500,10,avelinkdis,10,"euclidean","rand.txt")
}
}
\keyword{methods}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.