blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c78338a9234309b29b0a92cc1f2a8a23208f68b2 | 66e1c7f5c84ebf79a883ff00e026e238d70cea29 | /R/3-writeFactors.R | 07fa4cb5cc5af5f884741be63dcf669e824eef59 | [] | no_license | datad/CLIGEN | 8534bfc9ea347be61babd067ee568b950b14b5be | 0cbae5e96a11436e1d4449f59bababc5d547aa96 | refs/heads/master | 2021-06-11T05:24:41.396289 | 2020-05-25T20:59:03 | 2020-05-25T20:59:03 | 128,544,596 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 613 | r | 3-writeFactors.R | #read rubik output
rRange = c(3:90)
load("data/patients.RData")
for(r in rRange)
{
patientsFactors <- read.table(file=paste0("MATLAB-RUBIK/rubikOutput/R_",
r,"/u2.csv"),
sep = ",", quote = "'",
header = FALSE, stringsAsFactors = FALSE,
skipNul = FALSE, blank.lines.skip = FALSE)
mydata <- na.omit(patientsFactors)
row.names(mydata) <- patients
mydata <- scale(mydata) # standarize variables
save(mydata,file=paste0("temp/patientFactors_r",r,".RData"))
}
|
96e1e5391a6abe274943e0e931a4646f35c84950 | 4b3de47e87774d4e4cc72cbbe9ab3ce0a85d635e | /R/scat.R | c6203d868743871e2bdc4507f2f95457238ca1b6 | [
"MIT"
] | permissive | tidymodels/modeldata | 2a697daa91594c78bfeec05a0052a83693505c9a | d44d367dda2ceb58282c4bc844aa9453d33ad064 | refs/heads/main | 2023-08-11T07:58:18.615400 | 2023-08-09T19:22:49 | 2023-08-09T19:22:49 | 224,252,166 | 23 | 4 | NOASSERTION | 2023-08-09T19:21:51 | 2019-11-26T17:46:11 | R | UTF-8 | R | false | false | 617 | r | scat.R | #' Morphometric data on scat
#'
#' Reid (2015) collected data on animal feses in coastal California. The data
#' consist of DNA verified species designations as well as fields related to
#' the time and place of the collection and the scat itself. The data are on
#' the three main species.
#'
#'
#' @name scat
#' @aliases scat
#' @docType data
#' @return \item{scat}{a tibble}
#' @source Reid, R. E. B. (2015). A morphometric modeling approach to
#' distinguishing among bobcat, coyote and gray fox scats. \emph{Wildlife
#' Biology}, 21(5), 254-262
#' @keywords datasets
#' @examples
#' data(scat)
#' str(scat)
NULL
|
640bd71fcb1e5eb22a308cfbe159a17ca0e10694 | f1c7e1c6b0907a6d3184e99b033dbfb183105f94 | /ch14by밍.R | eb7a372dfd641585a495556d9f1424c3c08e91b0 | [] | no_license | Ming-JU/STATISTICS | 3e723a855d1eac1c42097d5cd518d094e6beeb79 | ab58809c4765da2a6fd6f1bc5e43662070beb7aa | refs/heads/main | 2023-02-12T18:56:43.964967 | 2021-01-02T19:38:16 | 2021-01-02T19:38:16 | 326,256,749 | 0 | 0 | null | null | null | null | UHC | R | false | false | 7,969 | r | ch14by밍.R |
# 예제 ----------------------------------------------------------------------
# 예제1 ---------------------------------------------------------------------
y1 = c(10, 15, 8, 12,15)
y2 = c(14, 18, 21, 15)
y3 = c(17, 16, 14, 15, 17, 15, 18)
y4 = c(12, 15, 17, 15, 16, 15)
k = 4
ni = c(length(y1), length(y2), length(y3), length(y4))
ni
yibar = c(mean(y1), mean(y2), mean(y3), mean(y4))
n = sum(ni)
y = c(y1, y2, y3, y4)
ybar = mean(y)
sst = sum((y - ybar)**2)
sst2 = var(y)*(n-1)
sstr = sum(ni*(yibar - ybar)**2)
sse = sum((ni - 1)*c(var(y1), var(y2), var(y3), var(y4)))
ss = c(sstr, sse, sst)
Df = c(k-1, n-k, n-1) # 자유도
ms = ss/Df
F0 = ms[1]/ms[2]
anovaTbl = data.frame("제곱" = c(sstr, sse, sst),
"자유도" = Df, "평균제곱" = ms,
"F값" = c(F0, "", ""))
rownames(anovaTbl) = c("처리", "오차", "합계")
print(anovaTbl)
F0
y1 = c(10,15,8,12,15)
y2 = c(14,18,21,15)
y3 = c(17,16,14,15,17,15,18)
y4 = c(12,15,17,15,16,15)
y = c(y1,y2,y3,y4)
ni = c(length(y1),length(y2),length(y3),length(y4))
n = sum(ni)
k = 4
yibar = c(mean(y1),mean(y2),mean(y3),mean(y4))
ybar = mean(y)
sst = sum((y- ybar)^2)
sse = sum((ni-1)*c(var(y1),var(y2),var(y3),var(y4)))
sstr = sum(ni*(yibar - ybar)^2)
ss = c(sstr,sse,sst)
Df = c(k-1,n-k,n-1)
ms = ss/Df
F0 = ms[1]/ms[2]
aTb1 = data.frame("제곱합"=ss,"자유도"=Df,"평균제곱"=ms,
"F값"=c(F0,"",""))
rownames(aTb1) = c("처리","오차","합계")
#fit = lm(rownames,data)
#aTb1 = anova(fit)
print(aTb1)
# ㄴㄴ ----------------------------------------------------------------------
Fvalue <- ms[1]/ms[2]
Pvalue <- 1 - pf(Fvalue, k-1, n-k)
anovaTbl <- data.frame('제곱합'=ss, '자유도'=Df, '평균제곱'=ms, 'F값'=c(Fvalue,'',''), 'P-값'=c(Pvalue,'','') )
rownames(anovaTbl) <- c('처리', '오차', '합계')
print(anovaTbl)
ni <- c(32, 16, 16)
yibar <- c(81.06, 78.56, 87.81)
ybar <- sum(yibar*ni)/64
sstr <- sum(ni*(yibar-ybar)^2)
ni <- c(length(y1), length(y2), length(y3), length(y4))
group <- rep(c('A', 'B', 'C', 'D'), ni)
data <- data.frame( '마모도' = c(y1, y2, y3, y4), '코팅' = group )
print(data)
fit <- lm(마모도 ~ 코팅, data)
aTbl <- anova(fit)
print(aTbl)
# 2절 ----------------------------------------------------------------------
# #2.1 --------------------------------------------------------------------
y1<-c(6,10)
y2<-c(9,5)
y3<-c(9,7)
y4<-c(4,6)
k <- 4
ni <- c(length(y1), length(y2), length(y3), length(y4))
yibar <-c(mean(y1), mean(y2), mean(y3), mean(y4))
n <- sum(ni)
y <- c(y1, y2, y3, y4)
ybar <- mean(y)
sst <- sum((y-ybar)^2)
sst2 <- var(y)*(n-1)
sstr <- sum(ni*(yibar-ybar)^2)
sse <- sum((ni-1)*c(var(y1), var(y2), var(y3), var(y4)))
ss<- c(sstr, sse, sst)
Df <- c(k-1, n-k, n-1)
ms <- ss/Df
F0 <-ms[1]/ms[2]
anovaTb1 <- data.frame("제곱합"= ss, "자유도"= Df, "평균"=ms, "오차"=c(F0,"",""))
rownames(anovaTb1) <- c("처리", "오차", "합계")
print(anovaTb1)
# #2.1 --------------------------------------------------------------------
y1 = c(6,10)
y2 = c(9,5)
y3 = c(9,7)
y4 = c(4,6)
y = c(y1,y2,y3,y4)
ni = c(length(y1),length(y2),length(y3),length(y4))
yibar = c(mean(y1),mean(y2),mean(y3),mean(y4))
k = 4
ybar = mean(y)
sst = sum((y-ybar)^2)
sse = sum((ni-1)*(c(var(y1),var(y2),var(y3),var(y4))))
sstr = sum(ni*(yibar-ybar)^2)
ss = c(sstr,sse,sst)
df = c(k-1,n-k,n-1)
ms = ss/df
F0 = ms[1]/ms[2]
aTb1 = data.frame("제곱합"=ss,"자유도"=df,"평균"=ms,"오차"=c(F0,"",""))
rownames(aTb1) = c("처리","오차","합계")
print(aTb1)
# #2.2 --------------------------------------------------------------------
y1<-c(35, 24, 28, 21)
y2<-c(19, 14, 14, 13)
y3<-c(21, 16, 21, 14)
k = 3
ni = c(length(y1),length(y2),length(y3))
y = c(y1, y2, y3)
yibar = c(mean(y1),mean(y2),mean(y3))
ybar = mean(y)
n = sum(ni)
sst = sum((yn-ybar)^2)
sstr = sum(ni*(yibar-ybar)^2)
sse = sum((ni-1)*c(var(y1),var(y2),var(y3)))
ss = c(sstr,sse,sst)
df = c(k-1,n-k,n-1)
ms = ss/df
F0 = ms[1]/ms[2]
aTb1 = data.frame("제곱합"=ss,"자유도"=df,"평균"=ms,"오차"=c(F0,"",""))
rownames(aTb1) = c("처리","오차","합계")
print(aTb1)
# #2.3 --------------------------------------------------------------------
y1 = c(5,3,2,2)
y2 = c(5,0,1)
y3 = c(2,1,0,1)
y = c(y1,y2,y3)
yibar = c(mean(y1),mean(y2),mean(y3))
ni = c(length(y1),length(y2),length(y3))
ybar = mean(y)
k = 3
n = sum(ni)
sst = sum((y-ybar)^2)
sstr = sum(ni*(yibar-ybar)^2)
sse = sum((ni-1)*c(var(y1),var(y2),var(y3)))
ss = c(sstr,sse,sst)
df = c(k-1,n-k,n-1)
ms = ss/df
F0 = ms[1]/ms[2]
aTb1 = data.frame("제곱합"=ss,"자유도"=df,"평균"=ms,"오차"=c(F0,"",""))
rownames(aTb1) = c("처리","오차","합계")
print(aTb1)
# 2.5---------------------------------------------------------------------
y1 = c(2,1,3)
y2 = c(1,5)
y3 = c(9,5,6,4)
y4 = c(3,4,5)
y = c(y1,y2,y3,y4)
yibar = c(mean(y1),mean(y2),mean(y3),mean(y4))
ni = c(length(y1),length(y2),length(y3),length(y4))
ybar = mean(y)
k = 4
n = sum(ni)
sst = sum((y-ybar)^2)
sstr = sum(ni*(yibar-ybar)^2)
sse = sum((ni-1)*c(var(y1),var(y2),var(y3),var(y4)))
ss = c(sstr,sse,sst)
df = c(k-1,n-k,n-1)
ms = ss/df
F0 = ms[1]/ms[2]
aTb1 = data.frame("제곱합"=ss,"자유도"=df,"평균제곱"=ms,"오차"=c(F0,"",""))
rownames(aTb1) = c("처리","오차","합계")
print(aTb1)
# #2.5 --------------------------------------------------------------------
y1 = c(2,1,3)
y2 = c(1,5)
y3 = c(9,5,6,4)
y4 = c(3,4,5)
y = c(y1,y2,y3,y4)
ni = c(length(y1),length(y2),length(y3),length(y4))
n = sum(ni)
yibar = c(mean(y1),mean(y2),mean(y3),mean(y4))
sst = sum((y-ybar)^2)
sstr = sum(ni*(yibar-ybar)^2)
sse = sum((ni-1)*c(var(y1),var(y2),var(y3),var(y4)))
ss = c(sstr,sse,sst)
df = c(k-1,n-k,n-1)
ms = ss/df
F0 = ms[1]/ms[2]
aTb1 = data.frame("제곱합"=ss,"자유도"=df,"평균제곱"=ms,"오차"=c(F0,"",""))
rownames(aTb1) = c("처리","오차","합")
print(aTb1)
# #2.6 --------------------------------------------------------------------
ni = c(10,6,9)
yibar = c(5,2,7)
n = sum(ni)
ybar = sum(ni*yibar)/n
k = 3
sse = 30+16+25
sstr = sum(ni*(yibar-ybar)^2)
sst = sse+sstr
ss = c(sstr,sse,sst)
df = c(k-1, n-k ,n-1)
ms = ss/df
F0 = ms[1]/ms[2]
aTb1 = data.frame("제곱합"=ss,"자유도"=df,"평균"=ms,"오차"=c(F0,"",""))
rownames(aTb1) = c("오차","처리","합")
print(aTb1)
# #2.7 --------------------------------------------------------------------
yibar = c(81.06,78.56,87.81)
ni = c(32,16,16)
s = c(17.05,15.43,14.36)
n = sum(ni)
ybar = sum(yibar*ni)/n
k = 3
sse = sum((ni-1)*s^2)
sstr = sum(ni*(yibar-ybar)^2)
sst = sse+sstr
ss = c(sstr,sse,sst)
df = c(k-1,n-k,n-1)
ms = ss/df
F0= ms[1]/ms[2]
aTb1 = data.frame("제곱합"=ss,"자유도"=df,"평균"=ms,"오차"=c(F0,"",""))
rownames(aTb1) = c("처리","오차","합")
print(aTb1)
# #3.1 --------------------------------------------------------------------
qf(1-0.05,5,10)
qf(1-0.05,10,5)
# #3.2 --------------------------------------------------------------------
qf(1-0.1,3,5)
qf(1-0.1,3,10)
# #3.3 --------------------------------------------------------------------
qf(1-0.1,5,20)
F0 = (104/5)/(109/20)
F0
# #3.4 --------------------------------------------------------------------
F0 =(24/5)/(57/35)
qf(1-0.05,5,35)
F0
# 3.8 ---------------------------------------------------------------------
y1 = c(0.95,0.86,0.71,0.72,0.74)
y2 = c(0.71,0.85,0.62,0.72,0.64)
y3 = c(0.69,0.68,0.51,0.73,0.44)
y = c(y1,y2,y3)
yibar = c(mean(y1),mean(y2),mean(y3))
ni = c(length(y1),length(y2),length(y3))
ybar = mean(y)
k = 3
n = sum(ni)
sst = sum((y-ybar)^2)
sstr = sum(ni*(yibar-ybar)^2)
sse = sum((ni-1)*c(var(y1),var(y2),var(y3)))
ss = c(sstr,sse,sst)
df = c(k-1,n-k,n-1)
ms = ss/df
F0 = ms[1]/ms[2]
aTb1 = data.frame("제곱합"=ss,"자유도"=df,"평균제곱"=ms,"오차"=c(F0,"",""))
rownames(aTb1) = c("처리","오차","합계")
print(aTb1)
pvalue = 1-pf(F0,2,12)
pvalue
|
b77e1787f5ffed31bbeed01d681f7e0dbf940c98 | 797b15e5b58f536fd87e59894c3fd6a5203031f3 | /ClassFiles/week4/Queue/queues.R | 9165e7dd2861f95dda1d0b80aa3625b77e2d7581 | [] | no_license | JINJINT/documents | c62422b0b3d8f657d9c8f3cf21b535542df8e556 | 9d58f85478f98326b6f21e3f41f6531f623a37a3 | refs/heads/master | 2020-04-04T15:44:54.402560 | 2018-11-01T14:50:33 | 2018-11-01T14:50:33 | 156,050,759 | 0 | 1 | null | 2018-11-04T04:59:51 | 2018-11-04T04:59:51 | null | UTF-8 | R | false | false | 6,453 | r | queues.R | #!/usr/bin/env Rscript
empty_queue_error <- function(msg, call=sys.call(-1), ...) {
structure(
class=c("empty_queue", "error", "condition"),
list(message=msg, call=call),
...
)
}
## Some examples:
## foo <- Queue()
## foo$enqueue(4)
## foo$dequeue() #=> 4
## foo$is_empty() #=> TRUE
Queue <- setRefClass(
"Queue",
fields=c("queue"),
methods=list(
initialize=function() {
queue <<- list()
},
dequeue=function() {
if (is_empty()) {
stop(empty_queue_error("Attempt to dequeue an empty queue"))
}
val <- queue[[1]]
queue <<- queue[-1]
return(val)
},
enqueue=function(obj) {
queue <<- c(queue, obj)
},
peek=function() {
return(queue[[1]])
},
is_empty=function() {
return(length(queue) == 0)
}
)
)
## Examples:
## foo <- PoissonProcss(lam=4)
## foo$latest()
## foo$next_event()
PoissonProcess <- setRefClass(
"PoissonProcess",
fields=list(lam="numeric", xlatest="numeric"),
methods=list(
initialize=function(lam) {
lam <<- lam
xlatest <<- interarrival()
},
latest=function() {
return(xlatest)
},
interarrival=function() {
return(rexp(1, rate=lam))
},
next_event=function() {
xlatest <<- xlatest + interarrival()
}
)
)
find_next_event <- function(events) {
minimum <- Inf
which <- 0
index <- 0
for (ind in seq_along(events)) {
if (events[[ind]]$latest() < minimum) {
minimum <- events[[ind]]$latest()
which <- events[[ind]]
index <- ind
}
}
return(list(next_service=which, next_index=ind))
}
MMkGroceryQueue <- setRefClass(
"MMkGroceryQueue",
fields=c("num_servers", "lambda_arrival", "lambda_serve",
"servers", "arrival", "station", "queues", "time", "served",
"total_waiting_time"),
methods=list(
initialize=function(nqueues, lambda_arrival, lambda_serve) {
num_servers <<- nqueues
lambda_arrival <<- lambda_arrival
lambda_serve <<- lambda_serve
servers <<- lapply(seq_len(nqueues),
function(arg) {
return(PoissonProcess(lambda_serve))
})
arrival <<- lapply(seq_len(nqueues),
function(arg) {
return(PoissonProcess(lambda_arrival))
})
station <<- numeric(nqueues) # entry time into the service station
queues <<- lapply(seq_len(nqueues),
function(arg) { return(Queue())})
time <<- 0.0
served <<- 0
total_waiting_time <<- 0.0
},
step=function() {
########
## ATTN TO BE IMPLEMENTED
########
return(time)
},
run_until=function(time_limit) {
step()
while (time < time_limit) {
step()
}
},
average_waiting_time=function() {
if (served > 0) {
return(total_waiting_time / served)
}
return(NA)
}
)
)
MMkBankQueue <- setRefClass(
"MMkBankQueue",
fields=c("num_servers", "lambda_arrivals", "lambda_serve",
"servers", "arrivals", "station", "queue", "time", "served",
"total_waiting_time"),
methods=list(
initialize=function(nservers, lambda_arrivals, lambda_serve) {
num_servers <<- nservers
lambda_arrivals <<- lambda_arrivals
lambda_serve <<- lambda_serve
servers <<- lapply(seq_len(nservers),
function(arg) {
return(PoissonProcess(lambda_serve))
})
arrivals <<- PoissonProcess(lambda_arrivals)
station <<- rep(NA, num_servers) # entry time into the service station
queue <<- Queue()
time <<- 0.0
served <<- 0
total_waiting_time <<- 0.0
},
step=function(debug=FALSE) {
n <- find_next_event(servers)
## Forward event times for empty servers triggering before next
## arrival
while(n$next_service$latest() < arrivals$latest() && is.na(station[n$next_index])) {
n$next_service$next_event()
n <- find_next_event(servers)
}
if (arrivals$latest() < n$next_service$latest()) {
time <<- arrivals$latest()
arrivals$next_event()
if (all(!is.na(station))) {
queue$enqueue(time)
} else {
for (ii in seq_len(num_servers)) {
if (is.na(station[ii])) {
station[ii] <<- time
break
}
}
}
} else {
time <<- n$next_service$latest()
entry_time <- station[n$next_index]
waiting_time <- time - entry_time
served <<- served + 1
total_waiting_time <<- total_waiting_time + waiting_time
if (queue$is_empty()) {
station[n$next_index] <<- NA
} else {
station[n$next_index] <<- queue$dequeue()
}
}
if (debug) {
print(time)
}
return(time)
},
run_until=function(time_limit) {
step()
while (time < time_limit) {
step()
}
},
average_waiting_time=function() {
if (served > 0) {
return(total_waiting_time / served)
}
return(NA)
},
report=function() {
cat("Served ", served, ", avg wait ", average_waiting_time(),
", time ", time, "\n", sep="")
}
)
)
bank <- MMkBankQueue(10, 1.0, 0.001)
bank$run_until(600.0)
bank$report()
|
366acaecd3643738ba9c5169ff1146f73da72996 | e0f3a5b5a154ea4e9d8df85c2535a38a23b940c5 | /Supervised_Learning/Random_Forest/Housing_prices_estimation/solucion.R | 95b4c9819617d78aff1461082b359bb2ceb2f0f5 | [] | no_license | OscarMongeVillora/Master-self-practice | dba6ece91f72df3db9af6809659d196bdc845846 | c8c22cef20f18fa50d462f47c536c024350abc5e | refs/heads/master | 2022-12-11T06:54:16.910967 | 2020-09-13T19:47:45 | 2020-09-13T19:47:45 | 295,223,727 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,598 | r | solucion.R | #Borramos los datos
rm(list=ls())
#-------------- Pregunta 1: Lectura de datos ----------#
library(MASS)
#data(package="MASS")
boston<-Boston
boston
dim(boston)
names(boston)
#-------------- Pregunta 2: datos de train ----------#
set.seed(101)
train = sample(1:nrow(boston), 300) #seleccionamos 300 valores para entrenar
#-------------- Pregunta 3: Ajuste del modelo ----------#
rf.boston = randomForest(medv~., data = boston, subset = train,ntree=500, importance = TRUE)
rf.boston
#como se calcula el % varianza explicada?
predicted=rf.boston$predicted
y=boston$medv[train]
1 - sum((y-predicted)^2)/sum((y-mean(y))^2) #=R^2
mean((predicted - boston$medv[train])^2)
#-------------- Pregunta 4: Arboles vs. error ----------#
rf.boston
plot(rf.boston)
pred = predict(rf.boston, boston[train,]) #No es lo mismo
predicted=rf.boston$predicted # No es lo mismo
#-------------- Pregunta 5: oob error vs test error ----------#
#vamos a intentar para cada valor de mtry posible
oob.err = double(13) #out of bag error, cuantas variables dejo sin introducir en el bosque
test.err = double(13)
oob.err
for(mtry in 1:13){
fit = randomForest(medv~., data = boston, subset=train, mtry=mtry, ntree = 350)
oob.err[mtry] = fit$mse[350] #por que elijo aqui solo el ultimo valor (350)?
pred = predict(fit, boston[-train,])
test.err[mtry] = with(boston[-train,], mean( (medv-pred)^2 ))
}
#-------------- Pregunta 6: Grafico oob error vs test error ----------#
matplot(1:mtry, cbind(test.err, oob.err), pch = 23, col = c("red", "blue"), type = "b", ylab="Mean Squared Error")
legend("topright", legend = c("OOB", "Test"), pch = 23, col = c("red", "blue"))
cbind(test.err, oob.err)
min(boston$medv)
max(boston$medv)
##############################Apartado mio extra RF FOR CLASSIFICATION
#ROC PLOT Variable binaria barato, Caro Por encima de 35 es caro -- Y = 1
install.packages("verification")
library(verification)
par(mfrow = c(3, 3))
Y <- factor(ifelse(boston$medv > 30, 1, 0))
boston <- data.frame(boston, Y)
boston$Y
fit = randomForest(Y~.-medv, data = boston, subset=train, ntree = 350)
fit$err.rate
fit$votes
train_predict = list()
for(mtry in 1:9){
fit = randomForest(Y~.-medv, data = boston, subset=train, mtry=mtry, ntree = 350)
train_predict[[mtry]] = fit$votes[,2]
}
yy <- as.numeric(as.character(Y[train]))
for(i in
1:9){
r <- roc.area(yy, train_predict[[i]])
roc.plot(yy, train_predict[[i]],
main = paste0("Var= ", i," ROC_a= ", r$A))
}
|
d71e9e7f14455af3e665b59e06150209f9096ef5 | 05d368d0f4003e02332d0705531737152aaff344 | /SCRIPTS/script_de_analisis.R | 75ac253a5f890681acee036034d7cb32be83a867 | [] | no_license | bigdataciat/Taller_Honduras_DMAEPS_2018 | 5f6362a5bdafc5b4a55bef0ba897d4202d01657d | 6c10cdb595d5f71a4e760ad230447539eaaf4367 | refs/heads/master | 2020-03-22T09:31:54.370060 | 2018-07-26T13:58:05 | 2018-07-26T13:58:05 | 139,843,386 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,240 | r | script_de_analisis.R | # file data-analysis-AEPS-BigData.R
#
# This file contains a script to develop regressions with machine learning methodologies
#
#
# author: Hugo Andres Dorado 02-16-2015
#
#This script is free: you can redistribute it and/or modify
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#-----------------------------------------------------------------------------------------------------------------
#SCRIPT BUILED FOR R VERSION 3.0.2
#PACKAGES
rm(list=ls())
require(gtools)
require(gridBase)
require(gridExtra)
require(relaimpo)
require(caret)
require(party)
require(randomForest)
require(snowfall)
require(earth)
require(agricolae)
require(cowplot)
require(reshape)
require(stringr)
require(gbm)
require(plyr)
library(rpart)
library(rpart.plot)
#Load functions; Open All-Functions-AEPS_BD.RData
load("C:/Users/hadorado/Desktop/TALLER_EN_R_JULIO_2018/All-Functions-AEPS_BD.RData")
#Work Directory
dirFol <- "C:/Users/hadorado/Desktop/TALLER_EN_R_JULIO_2018/"
setwd(dirFol)
#DataBase structure
datNam <- "mora_toyset_2.csv"
dataSet <- read.csv(datNam,row.names=1)
namsDataSet <- names(dataSet)
inputs <- 1:22 #inputs columns
segme <- 23 #split column
output <- 24 #output column
#Creating the split factors
#contVariety <- table(dataSet[,segme])
#variety0 <- names(sort(contVariety[contVariety>=30]))
#if(length(variety0)==0){variety = variety0 }else{variety = factor(c(variety0,"All"))}
variety <- 'Todos'
#creating folders
createFolders(dirFol,variety)
#Descriptive Analysis
descriptiveGraphics(variety,dataSet,inputs = inputs,segme = segme,output = output,
smooth=T,ylabel = "Rendimiento (kg/ha)",smoothInd = NULL,
ghrp="box",res=80)
#DataSets ProcesosF
dataSetProces(variety,dataSet,segme,corRed="caret")
#RANDOM FOREST
randomForestFun("Todos",nb.it=10,ncores = 2,saveWS=F,barplot = T)
# CLASIFCATION AND REGRESSION TREES
mora.arbol <- rpart(Yield~.,data=dataSet[,-23])
rpart.plot(mora.arbol,type = 2,main="Teff")
|
77832b532874e09d52d4ff3c9dd2095e624e20d5 | 7b102f9c8f2e3f9240090d1d67af50333a2ba98d | /gbd_2019/risk_factors_code/wash_sanitation/append_all_locs.R | a65181cf627b4e980b467d5164ac1ab4c6735386 | [] | no_license | Nermin-Ghith/ihme-modeling | 9c8ec56b249cb0c417361102724fef1e6e0bcebd | 746ea5fb76a9c049c37a8c15aa089c041a90a6d5 | refs/heads/main | 2023-04-13T00:26:55.363986 | 2020-10-28T19:51:51 | 2020-10-28T19:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 816 | r | append_all_locs.R | # Author: NAME
# Date: 2/22/2019
# Purpose: Combine draws for all locations into one file to prep for post-ST-GPR processing [WaSH]
rm(list=ls())
library(data.table)
arg <- commandArgs(trailingOnly = T)
print(arg)
me_name <- as.character(arg[1])
run_id <- as.numeric(arg[2])
me_parent <- as.character(arg[3])
run <- as.character(arg[4])
decomp_step <- as.character(arg[5])
# --------------------------------------------------
# in
input.dir <- file.path("FILEPATH")
files <- list.files(input.dir)
# out
output.dir <- file.path("FILEPATH")
if(!dir.exists(output.dir)) dir.create(output.dir, recursive = TRUE)
# create data.table w/ draws for all locs & save
all_locs <- rbindlist(lapply(file.path(input.dir,files), fread), use.names = TRUE)
write.csv(all_locs, paste0(file.path(output.dir, me_name), ".csv"))
|
d42c4b22ed6ac3156050cf6eb7f0030e107e7a3c | 5297cd2bf0c2f7e3a4b76e4d692b042e164fb44b | /proj/submitFiles/final/code/graphScripts/levels.R | a94feb831959be2e12e12c0fa1ce18b69d1fa50a | [] | no_license | ibush/MarioAI | df410153c074c29122cd9e70e58e0ccf464e865e | 0ea84c61b9ccd0948f8dea95214417564dd326b5 | refs/heads/master | 2021-01-10T09:42:08.163691 | 2015-12-12T02:17:29 | 2015-12-12T02:17:29 | 44,492,430 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,681 | r | levels.R | library(ggplot2)
library(dplyr)
# Directory Structure
wd <- '~/Documents/Stanford/CS221/mario/marioai/'
proj_dir <- paste0(wd,'proj/')
data_dir <- paste0(proj_dir, 'data/allLevels/')
out_dir <- paste0(proj_dir, 'writeup/imgs/')
src_dir <- paste0(proj_dir, 'writeup/graphScripts/')
source(paste0(src_dir, 'formats.R'))
agents <- c('RandomAgent', 'QLearningAgent', 'QLinearAgent', 'NNAgent')
all_data <- data.frame()
for(agent in agents){
agent_data <- data.frame()
for(i in 0:39){
data_path <- paste0(data_dir, agent,'/level_', i, '/stats/')
data <- read.csv(paste0(data_path, 'distance_', agent), header=FALSE)
names(data) <- 'dist'
data$sim_num <- 1:nrow(data)
data$level <- i
data$agent <- agent
if(agent == 'QLearningAgent') data$agent <- 'IdentityAgent'
if(agent == 'QLinearAgent') data$agent <- 'LinearAgent'
agent_data <- rbind(agent_data, data)
}
min_ind <- 1950 # store last 50 runs
if(agent == 'RandomAgent' || agent == 'QLearningAgent') min_ind <- 200
agent_data <- subset(agent_data, sim_num > min_ind)
all_data <- rbind(all_data, agent_data)
}
sum_data <- summarise(group_by(all_data, agent, level), dist=mean(dist))
# Print sum of scores for all levels
level_sum <- summarise(group_by(sum_data, agent), dist=sum(dist))
print(level_sum)
ten_level_data <- subset(sum_data, level < 10)
ten_level_sum <- summarise(group_by(ten_level_data, agent), dist=sum(dist))
print(ten_level_sum)
# scatter plot
g <- ggplot(sum_data, aes(x=level, y=dist, colour=agent)) + geom_point()+
labs(x='Difficulty Level', y='Distance Traveled', colour='Agent')
pdf(paste0(out_dir, 'dist_levels.pdf'))
plot(common_format(g))
dev.off() |
437b6d6fa507abb38f0e47ec5607b4cc85e42a66 | b44d584ff7816393593552622956f66d149dbf51 | /src/analysis/position_profile_clustering.R | c2b9fabfb5bd85b84a5d622e15045b57c6378856 | [
"Apache-2.0"
] | permissive | allydunham/dms_mutations | b51536c611611d74754135b8bc22223eb8d13840 | e4954eaa28dab72e8259559a4d3c2a0f77124ca4 | refs/heads/master | 2023-08-15T03:11:55.406243 | 2021-10-04T17:07:40 | 2021-10-04T17:07:40 | 180,122,942 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,228 | r | position_profile_clustering.R | #!/usr/bin/env Rscript
# Functions to perform clustering analysis on per position mutational profiles from deep mutagenesis studies
data("BLOSUM62")
#### PCA ####
# Generate PCA of mutational profiles
# TODO move to new tibble_pca func in misc_utils.R
positional_profile_PCA <- function(variant_matrix){
pca <- prcomp(as.matrix(select(variant_matrix, A:Y)), center = TRUE, scale. = TRUE)
pca_variants <- bind_cols(select(variant_matrix, -(A:Y)), as_tibble(pca$x))
return(list(profiles=pca_variants, pca=pca))
}
basic_pca_plots <- function(pca){
plots <- list()
plots$all_pcs <- plot_all_pcs(pca$profiles, colour_var = 'wt')
plots$by_authour <- ggplot(pca$profiles, aes(x=PC1, y=PC2, colour=gene_name)) +
facet_wrap(~study) +
geom_point()
plots$secondary_structure <- plot_all_pcs(pca$profiles, colour_var = 'ss')
plots$secondary_structure_reduced <- plot_all_pcs(pca$profiles, colour_var = 'ss_reduced')
plots$fields_group_studies <- ggplot(filter(pca$profiles,
authour %in% c('Araya et al.', 'Melamed et al.', 'Starita et al.',
'Kitzman et al.', 'Weile et al.')),
aes(x=PC1, y=PC2, colour=study)) +
geom_point()
plots$position_sig <- ggplot(pca$profiles, aes(x=PC1, y=PC2, colour=sig_count)) +
geom_point()
plots$by_aa <- ggplot(pca$profiles, aes(x=PC1, y=PC2, colour=gene_name)) +
facet_wrap(~wt) +
geom_point()
plots$surface_accesibility <- ggplot(pca$profiles, aes(x=PC1, y=PC2, colour=all_atom_rel)) +
geom_point() +
scale_colour_gradientn(colours = c('blue', 'green', 'yellow', 'orange', 'red'))
return(plots)
}
get_avg_aa_pca_profile <- function(pca, aa_col='wt'){
aa_col_sym <- sym(aa_col)
avg_profile <- pca$profiles %>%
group_by(!!aa_col_sym) %>%
summarise_at(.vars = vars(starts_with('PC')), .funs = list(~ mean(.)))
cor_mat <- select(avg_profile, -!!aa_col_sym) %>%
t() %>%
set_colnames(avg_profile[[aa_col]]) %>%
cor()
aa_order <- rownames(cor_mat)[hclust(dist(cor_mat))$order]
cor_tbl <- cor_mat %>%
as_tibble(rownames = 'AA1') %>%
gather(key = 'AA2', value = 'cor', -AA1) %>%
mutate(AA1 = factor(AA1, levels = aa_order),
AA2 = factor(AA2, levels = aa_order))
return(list(avg_profile=avg_profile, cor_mat=cor_mat, cor_tbl=cor_tbl, aa_order=aa_order))
}
plot_aa_pca_profile_average_cor <- function(pca){
cors <- tibble_to_matrix(pca$profiles, PC1:PC20,
row_names = str_c(pca$profiles$study, pca$profiles$pos, pca$profiles$wt, sep = '~')) %>%
t() %>%
cor() %>%
as_tibble(rownames = 'pos1') %>%
gather(key = 'pos2', value = 'cor', -pos1) %>%
mutate(AA1 = str_sub(pos1, start = -1),
AA2 = str_sub(pos2, start = -1)) %>%
group_by(AA1, AA2) %>%
summarise(cor = mean(cor)) %>%
ungroup()
aa_order <- spread(cors, key = AA2, value = cor) %>%
tibble_to_matrix(., A:Y, row_names = .$AA1)
aa_order <- rownames(aa_order)[hclust(dist(aa_order))$order]
cors <- mutate(cors, AA1 = factor(AA1, levels = aa_order), AA2 = factor(AA2, levels = aa_order))
return(
ggplot(cors, aes(x=AA1, y=AA2, fill=cor)) +
geom_tile(colour='white') +
scale_fill_gradient2() +
theme(axis.ticks = element_blank(), panel.background = element_blank())
)
}
pca_factor_cor <- function(pca, .vars){
pcas_mat <- select(pca$profiles, starts_with('PC')) %>%
as.matrix()
factor_mat <- select(pca$profiles, !!!.vars) %>%
as.matrix()
cor_mat <- cor(pcas_mat, factor_mat, use = 'pairwise.complete.obs')
cor_tbl <- cor_mat %>%
as_tibble(rownames = 'PC') %>%
gather(key = 'factor', value = 'cor', -PC) %>%
mutate(PC = factor(PC, levels = str_c('PC', 1:dim(cor_mat)[1])))
return(list(tbl=cor_tbl, matrix=cor_mat))
}
pca_factor_heatmap <- function(pca){
ggplot(pca$tbl, aes(x=PC, y=factor, fill=cor)) +
geom_tile(colour='white') +
scale_fill_gradient2() +
theme(axis.ticks = element_blank(), panel.background = element_blank())
}
aa_avg_profile_plot <- function(x){list(avg_aa_profile=ggplot(x$avg_profile, aes(x=PC1, y=PC2, label=wt)) + geom_text())}
aa_profile_heatmap <- function(pca){list(
aa_profile_heatmap=ggplot(pca$cor_tbl, aes(x=AA1, y=AA2, fill=cor)) +
geom_tile(colour='white') +
scale_fill_gradient2() +
theme(axis.ticks = element_blank(), panel.background = element_blank())
)}
per_aa_pcas <- function(aa, variant_matrix){
variant_matrix <- filter(variant_matrix, wt == aa)
profile_pca <- positional_profile_PCA(variant_matrix)
surface_cor <- pca_surf_acc_cor(profile_pca)
basic_plots <- basic_pca_plots(profile_pca)
surface_heatmap <- pca_surface_heatmap(surface_cor)
return(c(basic_plots, pc_surface_acc_heatmap=surface_heatmap))
}
########
#### tSNE ####
tsne_plot <- function(tbl, var){
var <- enquo(var)
return(
ggplot(tsne$tbl, aes(x = tSNE1, y=tSNE2, colour=!!var)) +
geom_point() +
theme_pubclean() +
theme(legend.position = 'right', panel.grid.major = element_line(linetype = 'dotted', colour = 'grey'))
)
}
########
#### kmeans ####
make_kmeans_clusters <- function(tbl, cols, n=5, ...){
cols <- enquo(cols)
mat <- tibble_to_matrix(tbl, !!cols)
km <- kmeans(mat, centers = n, ...)
return(list(tbl=mutate(tbl, cluster = km$cluster),
kmeans=km))
}
########
#### hclust ####
# Perfrom hclust on columns of a tibble, using parameters in conf or by specific h, k, ... settings if given. conf takes preference
# k overrides h as in the base hclust
make_hclust_clusters <- function(tbl, cols, dist_method = 'manhattan', conf=NULL, h = NULL, k = NULL, max_k=Inf, min_k=0, ...){
cols <- enquo(cols)
defaults <- list(h=h, k=k, max_k=max_k, min_k=min_k)
if (is.null(conf)){
conf <- defaults
} else {
conf <- list_modify(defaults, !!!conf)
}
mat <- tibble_to_matrix(tbl, !!cols)
hc <- hclust(dist(mat, method = dist_method), ...)
clus <- cutree(hc, k = conf$k, h = conf$h)
# Use max/min cluster nums if using h (defaults mean any number is allowed)
if (is.null(conf$k) & !is.null(conf$max_k) & !is.null(conf$min_k)){
# too many clusters
if (max(clus) > conf$max_k){
clus <- cutree(hc, k = conf$max_k)
}
# too few clusters
if (max(clus) < conf$min_k){
clus <- cutree(hc, k = conf$min_k)
}
}
return(list(tbl = mutate(tbl, cluster = clus),
hclust = hc))
}
# Generate a sensible name for an hclust run passing a config list and/or individual values for the params (overrides settings)
make_hclust_cluster_str <- function(conf=NULL, ...){
manual <- list(...)
if (is.null(conf)){
conf <- list(h=NULL, k=NULL, max_k=NULL, min_k=NULL)
}
if (length(manual) > 0){
conf <- list_modify(conf, manual)
}
conf <- conf[!sapply(conf, is.null)]
return(str_c('hclust ', str_sub(capture.output(dput(conf)), start = 5)))
}
########
#### hdbscan ####
make_hdbscan_clusters <- function(tbl, cols, dist_method = 'euclidean', minPts=5, ...){
cols <- enquo(cols)
mat <- tibble_to_matrix(tbl, !!cols)
dis <- dist(mat, method = dist_method)
hdb <- hdbscan(mat, minPts = minPts, xdist = dis, ...)
return(list(tbl = mutate(tbl, cluster = hdb$cluster),
hdbscan = hdb))
}
########
#### Cluster analysis ####
# Expects a tbl with a columns:
# study - deep mut study
# pos - position in protein
# wt - wt AA at that position
# cluster - cluster assignment of the position
# backbone_angles = tbl giving psi/phi for each study/pdb_id/chain/aa/position combo
# foldx = tbl giving FoldX derived energy terms for deep mut positions
cluster_analysis <- function(tbl, backbone_angles=NULL, foldx=NULL, cluster_str='<UNKNOWN>', er_str='<UNKNOWN>',
id_col=NULL, pos_col=NULL){
id_col <- enquo(id_col)
if (rlang::quo_is_null(id_col)){
id_col <- quo(study)
id_col_str <- 'study'
} else {
id_col_str <- rlang::as_name(id_col)
}
pos_col <- enquo(pos_col)
if (rlang::quo_is_null(pos_col)){
pos_col <- quo(position)
pos_col_str <- 'position'
} else {
pos_col_str <- rlang::as_name(pos_col)
}
# Ramachandran Plot
if (!is.null(backbone_angles)){
angles <- left_join(rename(backbone_angles, !!pos_col:=position, wt=aa),
select(tbl, study, !!pos_col, wt, cluster),
by = c('study', 'pos', 'wt')) %>%
drop_na(cluster) %>%
mutate(cluster_num = str_sub(cluster, start=-1))
p_ramachandran <- ggplot(angles, aes(x=phi, y=psi, colour=cluster_num)) +
geom_point() +
facet_wrap(~wt)
} else {
angles <- NULL
p_ramachandran <- NULL
}
# Cluster mean profiles
mean_profiles <- group_by(tbl, cluster) %>%
summarise_at(.vars = vars(A:Y), .funs = mean)
mean_prof_long <- gather(mean_profiles, key='mut', value = 'norm_er', -cluster) %>%
add_factor_order(cluster, mut, norm_er, sym=FALSE)
# Cluster mean profile correlation
cluster_cors <- transpose_tibble(mean_profiles, cluster, name_col = 'aa') %>%
tibble_correlation(-aa) %>%
rename(cluster1 = cat1, cluster2 = cat2) %>%
mutate(wt1 = str_sub(cluster1, end = 1),
wt2 = str_sub(cluster2, end = 1)) %>%
left_join(as_tibble(BLOSUM62, rownames='wt1') %>%
gather(key = 'wt2', value = 'BLOSUM62', -wt1) %>%
filter(wt1 %in% Biostrings::AA_STANDARD, wt2 %in% Biostrings::AA_STANDARD),
by=c('wt1', 'wt2')) %>%
mutate(pair = mapply(function(x, y){str_c(str_sort(c(x, y)), collapse = '')}, wt1, wt2))
cluster_mean_order <- levels(mean_prof_long$cluster)
cluster_cor_order <- levels(cluster_cors$cluster1)
mean_prof_long <- mutate(mean_prof_long, cluster = as.character(cluster), mut = as.character(mut))
p_mean_prof <- labeled_ggplot(
p=ggplot(mean_prof_long, aes(x=mut, y=cluster, fill=norm_er)) +
geom_tile() +
scale_fill_gradient2() +
coord_fixed() +
ggtitle(str_c('Cluster centroid', er_str, 'for', cluster_str, 'clusters', sep=' ')) +
guides(fill=guide_colourbar(title = er_str)) +
theme(axis.ticks = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text.x = element_text(colour = AA_COLOURS[unique(mean_prof_long$mut)]),
axis.text.y = element_text(colour = AA_COLOURS[str_sub(unique(mean_prof_long$cluster), end = 1)])),
units = 'cm', width = 0.5*n_distinct(mean_prof_long$mut) + 4,
height = 0.5*n_distinct(mean_prof_long$cluster) + 2, limitsize=FALSE)
# Cluster Sizes
cluster_sizes <- group_by(tbl, cluster) %>%
summarise(n = n()) %>%
mutate(aa = str_sub(cluster, end = 1),
cluster = factor(cluster, levels = levels(mean_prof_long$cluster)))
p_cluster_size <- labeled_ggplot(
p = ggplot(cluster_sizes, aes(x=cluster, y=n, fill=aa)) +
geom_col() +
xlab('Cluster') +
ylab('Size') +
scale_fill_manual(values = AA_COLOURS) +
scale_y_log10() +
theme_pubclean() +
guides(fill=FALSE) +
theme(axis.text.x = element_text(colour = AA_COLOURS[str_sub(levels(cluster_sizes$cluster), end = 1)],
angle = 90, hjust = 1, vjust = 0.5)),
units = 'cm', height = 15, width = nrow(cluster_sizes) * 0.5 + 2)
p_centre_cor <- labeled_ggplot(
p = ggplot(cluster_cors, aes(x=cluster1, y=cluster2, fill=cor)) +
geom_tile() +
scale_fill_gradient2() +
ggtitle(str_c('Correlation of', cluster_str, 'centroids for clusters based on', er_str, sep = ' ')) +
coord_fixed() +
theme(axis.ticks = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text.x = element_text(colour = AA_COLOURS[str_sub(levels(cluster_cors$cluster1), end = 1)], angle = 90, vjust = 0.5),
axis.text.y = element_text(colour = AA_COLOURS[str_sub(levels(cluster_cors$cluster2), end = 1)])),
units = 'cm', width = 0.5*length(levels(cluster_cors$cluster1)) + 2,
height = 0.5*length(levels(cluster_cors$cluster2)) + 2, limitsize=FALSE)
# Vs Blosum
p_vs_blosum <- plot_cluster_profile_cor_blosum(cluster_cors, 'DE')
# FoldX params
if (!is.null(foldx)){
tbl_fx <- group_by(foldx, !!id_col, !!pos_col, wt) %>%
summarise_at(.vars = vars(-mut, -pdb_id, -sd), .funs = mean, na.rm=TRUE) %>%
inner_join(tbl, ., by=c(id_col_str, pos_col_str, 'wt')) %>%
select(cluster, !!id_col, !!pos_col, wt, total_energy:entropy_complex, everything())
p_foldx_boxes <- labeled_ggplot(
p=ggplot(gather(tbl_fx, key = 'term', value = 'ddG', total_energy:entropy_complex),
aes(x=cluster, y=ddG, colour=wt)) +
scale_colour_manual(values = AA_COLOURS) +
geom_boxplot() +
facet_wrap(~term, scales = 'free', ncol = 2) +
guides(colour=FALSE) +
ggtitle(str_c('FoldX energy term distribution for ', cluster_str, 'clusters (', er_str, ')')) +
theme(panel.background = element_blank(),
axis.title = element_blank(),
axis.text.x = element_text(colour = AA_COLOURS[str_sub(unique(tbl_fx$cluster), end = 1)],
angle = 90, vjust = 0.5)),
units = 'cm', width = length(unique(tbl_fx$cluster)) + 5, height = 80)
foldx_cluster_mean_energy <- gather(tbl_fx, key = 'foldx_term', value = 'ddG', total_energy:entropy_complex) %>%
select(cluster, !!id_col, !!pos_col, wt, foldx_term, ddG, everything()) %>%
group_by(cluster, foldx_term) %>%
summarise(ddG = mean(ddG)) %>%
group_by(foldx_term) %>%
mutate(max_ddG = max(abs(ddG))) %>%
filter(max_ddG != 0) %>% # Filter any terms that are all 0
ungroup() %>%
mutate(rel_ddG = ddG/max_ddG) %>%
add_factor_order(cluster, foldx_term, rel_ddG, sym = FALSE)
p_cluster_avg_foldx_profile <- labeled_ggplot(
p=ggplot(foldx_cluster_mean_energy,
aes(x=foldx_term, y=cluster, fill=rel_ddG)) +
geom_tile() +
scale_fill_gradient2() +
ggtitle(str_c('Mean FoldX energy terms for each ', cluster_str, ' cluster (', er_str, ')')) +
coord_fixed() +
theme(plot.title = element_text(hjust = 0.5, size=8),
axis.ticks = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1),
axis.text.y = element_text(colour = AA_COLOURS[str_sub(levels(foldx_cluster_mean_energy$cluster), end = 1)])),
units='cm', height=0.4 * length(unique(tbl_fx$cluster)) + 5, width=13, limitsize=FALSE)
} else {
tbl_fx <- NULL
foldx_cluster_mean_energy <- NULL
p_foldx_boxes <- NULL
p_cluster_avg_foldx_profile <- NULL
}
return(list(angles=angles,
cluster_sizes=cluster_sizes,
mean_profiles=mean_profiles,
cluster_cor_order=cluster_cor_order,
cluster_mean_order=cluster_mean_order,
foldx=tbl_fx,
foldx_cluster_mean_energy=foldx_cluster_mean_energy,
plots=list(cluster_sizes=p_cluster_size,
ramachandran=p_ramachandran,
mean_profiles=p_mean_prof,
mean_profile_vs_blosum=p_vs_blosum,
mean_profile_cor=p_centre_cor,
foldx_term_distribution=p_foldx_boxes,
foldx_cluster_mean_profile=p_cluster_avg_foldx_profile)))
}
plot_cluster_profile_cor_blosum <- function(cluster_cors, aa_pair='DE'){
return(
ggplot(filter(cluster_cors, cor < 1), aes(x=cor, y=BLOSUM62)) +
geom_point(aes(colour='All')) +
geom_point(aes(colour=aa_pair), filter(cluster_cors, cor < 1, pair == aa_pair)) +
geom_smooth(method = 'lm') +
scale_colour_manual(values = structure(c('red', 'black'), names=c(aa_pair, 'All'))) +
guides(colour = guide_legend(title = 'AA Pair'))
)
}
######## |
65671c280b0ccf5857e85d67c55f2aec657ced11 | eabafd5e9945bc079b8146b8a850b9d269d773cb | /R/0_imports.R | 984ecb3ceb7981d3fd7052712545d18c56d06533 | [
"MIT"
] | permissive | camilodlt/embed | cfc3c8f0ed57f2aa712c2cf910b332c8ed5207d5 | 754ec3b4a8e6895202da9257ecfc39132701ca75 | refs/heads/master | 2023-06-03T05:16:22.683394 | 2021-06-10T21:53:26 | 2021-06-10T21:53:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,508 | r | 0_imports.R | #' @import recipes
#' @import rlang
#' @importFrom utils globalVariables capture.output packageVersion stack compareVersion
#' @importFrom uwot umap_transform umap
#' @importFrom keras keras_model_sequential layer_embedding layer_flatten
#' @importFrom keras layer_dense compile fit get_layer backend keras_model
#' @importFrom keras layer_concatenate layer_input
#' @importFrom lifecycle deprecated
#' @importFrom stats as.formula glm binomial coef gaussian na.omit
#' @importFrom stats setNames model.matrix complete.cases
#' @importFrom purrr map
#' @importFrom tibble rownames_to_column as_tibble tibble
#' @importFrom dplyr bind_cols bind_rows mutate filter left_join %>% arrange
#' @importFrom dplyr ends_with contains one_of
#' @importFrom dplyr tibble mutate filter left_join %>% arrange
#' @importFrom tidyr gather
#' @importFrom withr with_seed
# ------------------------------------------------------------------------------
#' @importFrom generics tidy
#' @export
generics::tidy
#' @importFrom generics required_pkgs
#' @export
generics::required_pkgs
#' @importFrom generics tunable
#' @export
generics::tunable
# ------------------------------------------------------------------------------
utils::globalVariables(
c(
"Feature", "Missing", "No", "Node", "Split", "Yes",
"training", "col_names", "y_name",
"n", "p", "predictor", "summary_outcome", "value", "woe", "select",
"variable", ".",
"type", "loss", "epochs", "..level", "..order",
"data"
)
)
|
75c190440f54f7559de0de725aee46ed6a4836e9 | f7a923eba16f91594419d48f6f747d7cbc6230f5 | /man/AlleleProfileR.parseleadseq.Rd | ddf25ff8d8f86a97d18e09b0a7dd81e62973d4c8 | [] | no_license | abruyneel/AlleleProfileR | 8682982914a11f845adf3af2d491b654d03ae67e | 168737311a453f6e5f3aff4010beea88d53319e4 | refs/heads/master | 2021-03-24T10:14:16.715133 | 2020-06-09T14:13:43 | 2020-06-09T14:13:43 | 110,288,019 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 882 | rd | AlleleProfileR.parseleadseq.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions_main.R
\name{AlleleProfileR.parseleadseq}
\alias{AlleleProfileR.parseleadseq}
\title{Parse the lead sequence in a chimeric pair}
\usage{
AlleleProfileR.parseleadseq(obj, gene, cutoff.large, index, cutrangelist)
}
\arguments{
\item{obj}{Row in the BAM datatable, a sequencing read.}
\item{gene}{Gene information vector}
\item{cutoff.large}{Cutoff value for determining whether an indel is large or small. Default is 25.}
\item{index}{Path to the .fa file containing the reference genome.}
\item{cutrangelist}{cutrangelist}
}
\value{
List with elements: output_fs, output_sm, output_lg, dels, list(ins_loc,ins_bps), startpos+atgpos-1, indelseq, output_utr, output_atg, output_inrange
}
\description{
This function processes the leading sequence in a chimeric pair.
}
\author{
Arne Bruyneel
}
|
89aa1375be333c5eb98865104b0be2519d3418dc | 61f2bb25c84e0e1008333f592f83d217604905a8 | /AHM1_ch07/AHM1_07.07.R | 3d9cea5918cf65da1dea8b00b0e457c49bfd6db8 | [] | no_license | mikemeredith/AHM_code | 9120fae8cfe7b3b79f0319038a3c6c5a80afab1a | 7f01bba14d24560a7335071a8ec6cbaf91ce30a6 | refs/heads/main | 2022-11-24T10:04:29.217824 | 2022-11-09T10:20:24 | 2022-11-09T10:20:24 | 200,148,296 | 34 | 15 | null | null | null | null | WINDOWS-1250 | R | false | false | 1,155 | r | AHM1_07.07.R | # Applied hierarchical modeling in ecology
# Modeling distribution, abundance and species richness using R and BUGS
# Volume 1: Prelude and Static models
# Marc Kéry & J. Andy Royle
#
# Chapter 7. Modeling abundance using multinomial N-mixture models
# =========================================================================
library(unmarked)
# 7.7 Building custom multinomial models in unmarked
# ==================================================
# Removal model: capture probs for 5 sites, with 3 removal periods
(pRem <- matrix(0.5, nrow=5, ncol=3))
removalPiFun(pRem) # Multinomial cell probabilities for each site
# Double observer model: capture probs for 5 sites, with 2 observers
(pDouble <- matrix(0.5, 5, 2))
doublePiFun(pDouble) # Multinomial cell probabilities for each site
instRemPiFun <- function(p){
M <- nrow(p)
J <- ncol(p)
pi <- matrix(NA, M, J)
p[,1] <- pi[,1] <- 1 - (1 - p[,1])^2
p[,2] <- 1 - (1 - p[,2])^3
p[,3] <- 1 - (1 - p[,3])^5
for(i in 2:J) {
pi[,i] <- pi[, i - 1]/p[, i - 1] * (1 - p[, i - 1]) * p[, i]
}
return(pi)
}
instRemPiFun(pRem)
o2y <- matrix(1, 2, 3)
o2y
|
6f4eeac19f534986721e0dd28a949333b1475d10 | 0f77b2f6d67cf63c4fbac1c823e8167f9d1b6207 | /R/polygons.R | 951e10400e1d4ba96f69c741cda579589be6bb71 | [] | no_license | cran/TargomoR | 1b658d99c08fb636767d9265ef2891ca495b20fb | 94ca6def7488844f9c5e9f6e3990b89afdae6fe4 | refs/heads/master | 2020-12-22T19:19:58.043815 | 2019-12-06T22:40:08 | 2019-12-06T22:40:08 | 236,904,706 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,928 | r | polygons.R | #' Add Targomo Polygons to a Leaflet Map
#'
#' Functions for retrieving isochrone polygons from the Targomo API and adding
#' drawing them on a \code{leaflet} map.
#'
#' @param map A leaflet map.
#' @param source_data The data object from which source ppoints are derived.
#' @param source_lng,source_lat Vectors/one-sided formulas of longitude and latitude.
#' @param options A list of \code{\link{targomoOptions}} to call the API.
#' @param polygons A polygons dataset returned by \code{getTargomoPolygons}, for drawing
#' @param drawOptions A list of \code{\link{polygonDrawOptions}} to determine how to show
#' the resulting polygons on the map.
#' @param group The leaflet map group to add the polygons to. A single group is used
#' for all the polygons added by one API call.
#' @param ... Further arguments to pass to \code{\link[leaflet]{addPolygons}}
#' @param api_key Your Targomo API key - defaults to the \code{TARGOMO_API_KEY}
#' ennvironment variable
#' @param region Your Targomo region - defaults to the \code{TARGOMO_REGION}
#' environment variable
#' @param config Config options to pass to \code{httr::POST} e.g. proxy settings
#' @param verbose Whether to print out information about the API call.
#' @param progress Whether to show a progress bar of the API call.
#' @param timeout Timeout in seconds (leave NULL for no timeout/curl default).
#'
#' @return For `get*`, an object of class "sf" containing the polygons. For `draw*` and `add*`,
#' the leaflet map returned with the polygons drawn on.
#'
#' @examples
#' \donttest{
#' # load leaflet package
#' library(leaflet)
#' l <- leaflet()
#'
#' # get the polygons
#' p <- getTargomoPolygons(source_lat = 51.5007, source_lng = -0.1246,
#' options = targomoOptions(travelType = "bike"))
#'
#' # draw them on the map
#' l %>% drawTargomoPolygons(polygons = p, group = "BigBenBike")
#'
#' # note could combine get... and draw... into one with add...
#'
#' }
#'
#' @name getTargomoPolygons
#'
NULL
#' @rdname getTargomoPolygons
#' @export
getTargomoPolygons <- function(source_data = NULL, source_lat = NULL, source_lng = NULL,
options = targomoOptions(),
api_key = Sys.getenv("TARGOMO_API_KEY"),
region = Sys.getenv("TARGOMO_REGION"),
config = list(),
verbose = FALSE,
progress = FALSE,
timeout = NULL) {
s_points <- createPoints(source_data, source_lat, source_lng, NULL)
options <- deriveOptions(options)
sources <- deriveSources(s_points, options)
body <- createRequestBody("polygon", sources, NULL, options)
response <- callTargomoAPI(api_key = api_key, region = region,
service = "polygon", body = body,
config = config,
verbose = verbose, progress = progress,
timeout = timeout)
output <- processResponse(response, service = "polygon")
return(output)
}
#' @rdname getTargomoPolygons
#' @export
drawTargomoPolygons <- function(map, polygons,
drawOptions = polygonDrawOptions(),
group = NULL,
...) {
opts <- drawOptions
leaflet::addPolygons(map, data = polygons, group = group,
stroke = opts$stroke, weight = opts$weight,
color = opts$color, opacity = opts$opacity,
fill = opts$fill, fillColor = opts$fillColor,
fillOpacity = opts$fillOpacity, dashArray = opts$dashArray,
smoothFactor = opts$smoothFactor, noClip = opts$noClip,
...)
}
#' @rdname getTargomoPolygons
#' @export
addTargomoPolygons <- function(map,
source_data = NULL, source_lng = NULL, source_lat = NULL,
options = targomoOptions(),
drawOptions = polygonDrawOptions(),
group = NULL,
...,
api_key = Sys.getenv("TARGOMO_API_KEY"),
region = Sys.getenv("TARGOMO_REGION"),
config = list(),
verbose = FALSE,
progress = FALSE,
timeout = NULL) {
polygons <- getTargomoPolygons(api_key = api_key, region = region,
source_data = source_data,
source_lat = source_lat, source_lng = source_lng,
options = options, config = config,
verbose = verbose, progress = progress,
timeout = timeout)
map <- drawTargomoPolygons(
map = map,
polygons = polygons,
drawOptions = drawOptions,
group = group,
...
)
return(map)
}
#' Options for Drawing Polygons on the Map
#'
#' Function to return a list of the desired drawing options - you can set all the usual
#' parameters of a call to \code{\link[leaflet]{addPolygons}}.
#'
#' @param stroke Whether to draw the polygon borders.
#' @param weight Stroke width in pixels.
#' @param color Stroke colour.
#' @param opacity Stroke opacity.
#' @param fill Whether to fill the polygons in with colour.
#' @param fillColor The fill colour.
#' @param fillOpacity The fill opacity.
#' @param dashArray A string to define the stroke dash pattern.
#' @param smoothFactor How much to simplify polylines on each zoom level.
#' @param noClip Whether to disable polyline clipping.
#'
#' @return A list of options governing how the polygons appear on the map
#'
#' @examples
#' # show the list
#' polygonDrawOptions()
#'
#' @export
polygonDrawOptions <- function(stroke = TRUE,
weight = 5,
color = c("red", "orange", "green"),
opacity = 0.5,
fill = TRUE,
fillColor = color,
fillOpacity = 0.2,
dashArray = NULL,
smoothFactor = 1,
noClip = FALSE) {
leaflet::filterNULL(
list(
stroke = stroke,
weight = weight,
color = color,
opacity = opacity,
fill = fill,
fillColor = fillColor,
fillOpacity = fillOpacity,
dashArray = dashArray,
smoothFactor = smoothFactor,
noClip = noClip
)
)
}
|
04f6ba771de574adc874e9a6df57c9d84bceed44 | af77cc9ccadb9cf4d451831fdd07abe13503a879 | /yelp/wekafiles/packages/RPlugin/mlr/mlr/man/predict.Rd | 29d893ab1382e61e0560c765781a4255e7c862fa | [] | no_license | tummykung/yelp-dataset-challenge | 7eed6a4d38b6c9c90011fd09317c5fa40f9bc75c | 84f12682cba75fa4f10b5b3484ce9f6b6c8dad4a | refs/heads/master | 2021-01-18T14:10:55.722349 | 2013-05-21T09:30:37 | 2013-05-21T09:30:37 | 9,527,545 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,531 | rd | predict.Rd | \name{predict}
\alias{predict,wrapped.model-method}
\alias{predict}
\title{Predict new data.}
\description{Predict the target variable of new data using a fitted model. If the type is set to "prob" or "decision"
probabilities or decision values will be stored in the resulting object. The resulting class labels are
the classes with the maximum values or thresholding can also be used.}
\value{\code{\linkS4class{prediction}}.}
\seealso{\code{\link{train}}}
\arguments{\item{object}{[\code{\linkS4class{wrapped.model}}] \cr
Wrapped model, trained from a learn task.}
\item{task}{[\code{\linkS4class{learn.task}}]\cr
Specifies learning task. If this is passed, data from this task is predicted.}
\item{subset}{[integer] \cr
Index vector to subset the data in the task to use for prediction.}
\item{newdata}{[\code{\link{data.frame}}] \cr
New observations which should be predicted. Alternatively pass this instead of task.}
\item{type}{[string] \cr
Classification: "response" | "prob" | "decision", specifying the type to predict.
Default is "response". "decision" is experimental.
Ignored for regression.}
\item{threshold}{[numeric] \cr
Threshold to produce class labels if type is not "response".
Currently only supported for binary classification and type="prob", where it represents the required predicted probability
for the positive class, so that a positive class is predicted as "response".
Default is 0.5 for type="prob".
Ignored for regression.}
\item{group}{[factor] \cr
Only for internal use!
Default is NULL.}
}
|
4d40cfb97296e18b9459b1f1cc2042e54ea985ad | fcbcc2b78b14310692e068b9c8b2b3b983036015 | /R Code/01_RBTV_YTComment_Analysis.R | bf9685782cacfbb03b601703939349e1c8744c24 | [] | no_license | Llanek/R_YouTube_Sentiment_Analysis | cdb78564d9cb608962730de30d8eacb8ce38de76 | faaf1d280279081bfb80e9897eecd7f95e7bda5a | refs/heads/main | 2023-02-04T02:33:16.349268 | 2020-12-24T02:48:18 | 2020-12-24T02:48:18 | 322,159,083 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,654 | r | 01_RBTV_YTComment_Analysis.R | # Filename: RBTV_YTComment_Analysis
# Author: YBU
# Date: 23.12.2020
# Runtime: --
# Call the needed libraries
require(vosonSML) # necessary to get the Authenticate, Collect, ... functions
require(jsonlite) # necessary to get the fromJSON function
require(config) # necessary to read out the config .yml-file
require(data.table) # better than just data.frame
#library(magrittr)
#require(curl)
# Get and Read Google Developer Key / YouTube V3 API Key and Authentification
sAPIKey <- config::get("API_Key")
#print(APIkey) # Debugging
arrKey <- Authenticate("youtube",sAPIKey)
# #Collect Data using YouTube videos
# ytVideos <- c()
# ytVideoIds <- GetYoutubeVideoIDs(ytVideos)
# ytData <- Collect(
# keyvideoIDs = ytVideoIds
# ,maxComments = 100
# ,verbose = FALSE
# )
#
# # Save the comment data as csv
# filenamecsv <- "snippet2"
# pathcsv <- paste("C:/Users/Yannic/Desktop/", filenamecsv, ".csv")
# delimitercsv <- ";"
# write.csv2(
# ytData
# , file = pathcsv
# , append = FALSE
# , sep = delimitercsv
# , row.names = FALSE
# )
#
# # Read the comment data
# data <- read.csv(
# pathcsv
# , sep = delimitercsv
# , header = TRUE
# )
# str(data)
#
# # Get channel ID
# channelID <- "UCkfDws3roWo1GaA3pZUzfIQ" #RBTV LP&Streams
# channelData <- youtube.channels.list(channelID, part=contentDetails)
# Set debugging variables
bug_VideoID <- "3gJngOCyrZg"
bug_ChannelID <- "UCkfDws3roWo1GaA3pZUzfIQ"
bug_PlaylistID <- "PLsD6gQXey8N1pHbp1MVTmnmCx5vConHKl"
# function to get up to the last 50 playlists (default 15) and their names from a specific channel. requires the channelID!
get_ChannelPlaylists <- function(arg_sChannelID, arg_sAPIKey, iMaxResults = 15){
# create url to access YouTube V3 API to retrieve the information about the playlists from the channel
sURL <- paste0('https://www.googleapis.com/youtube/v3/playlists?part=snippet&channelId=',arg_sChannelID,'&key=',arg_sAPIKey,'&maxResults=',iMaxResults)
# access the JSON results which can be found opening the URL via JSONlite and temporarily save them in a list
lResult <- fromJSON(sURL)
# return a data.frame with last n playlist names and corresponding playlist IDs
return(
data.table(
playlist_names = lResult[["items"]][["snippet"]][["title"]]
, playlist_IDs = lResult[["items"]][["id"]]
)
)
}
# Test
# test_ChannelPlaylists <- get_ChannelPlaylists(sChannelID, sAPIKey)
# function to get up to the last 50 videoIDs (default 15) out of a playlist and the video release dates. requires the playlistID!
get_PlaylistVideos <- function(arg_sPlaylistID, arg_sAPIKey, iMaxResults = 15){
# create url to access YouTube V3 API to retrieve the information about the videos in the playlist
sURL <- paste0('https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&playlistId=',arg_sPlaylistID,'&key=',arg_sAPIKey,'&maxResults=',iMaxResults)
# access the JSON results which can be found opening the URL via JSONlite and temporarily save them in a list
lResult <- fromJSON(sURL)
#return(lResult)
# return a data.frame with last n video names and corresponding video IDs as well as publish dates
return(
data.table(
name = lResult$items$snippet$title
, videoID = lResult$items$snippet$resourceId$videoId
, published = lResult$items$snippet$publishedAt
)
)
}
# Test
# test_PlaylistVideos <- get_PlaylistVideos(bug_PlaylistID, sAPIKey)
# function to retrieve the information about a video and its basic statistics. requires the videoID!
get_VideoStats <- function(arg_sVideoID, arg_sAPIKey){
# create url to access YouTube V3 API to retrieve the information about the video
sURL <- paste0("https://www.googleapis.com/youtube/v3/videos?part=snippet,statistics&id=",arg_sVideoID,"&key=",arg_sAPIKey)
# access the JSON results which can be found opening the URL via JSONlite and temporarily save them in a list
lResult <- fromJSON(sURL)
# return(lResult)
# return a data.table with information about the video and simple statistics
return(
data.table(
videoID = arg_sVideoID
, tags = list(lResult[["items"]][["snippet"]][["tags"]][[1]])
, viewCount = lResult$items$statistics$viewCount
, likeCount = lResult$items$statistics$likeCount
, dislikeCount = lResult$items$statistics$dislikeCount
, favoriteCount = lResult$items$statistics$favoriteCount
, commentCount = lResult$items$statistics$commentCount
)
)
}
# Test
# test_VideoStats <- get_VideoStats(bug_VideoID, sAPIKey)
# function to join the results from get_PlaylistVideos and get_VideoStats
join_PlaylistVideosInformation <- function(arg_dt_resultPlaylist, arg_sAPIKey){
# set a key for the playlist data.table videoID to later specify the exact row
setkey(arg_dt_resultPlaylist, videoID)
# generate an empty data.table with the corresponding column(headers) to later be the result data.table
dtResult <- data.table(
name = character()
, videoID = character()
, published = character()
, tags = character()
, viewCount = character()
, likeCount = character()
, dislikeCount = character()
, favoriteCount = character()
, commentCount = character()
)
# loop through the videoIDs from the playlist data.table
for (tmp_videoID in arg_dt_resultPlaylist$videoID){
# call the VideoStats-function to get the information for the current videoID iteration
tmp_dt_vidInfo <- get_VideoStats(tmp_videoID, arg_sAPIKey)
# also set a key for the videoID to specify the exact row
setkey(tmp_dt_vidInfo, videoID)
# use the more efficient rbindlist to add rows to the result data.table joining the video information onto the playlist videos
dtResult <- rbindlist(list(dtResult, cbind(arg_dt_resultPlaylist[.(tmp_videoID)], tmp_dt_vidInfo[.(tmp_videoID),c(2:7)])))
}
# return a data.table with information about all the videos and their simple statistics which are present in the given playlist
return(dtResult)
}
# Test
# test_Join <- join_PlaylistVideosInformation(test_PlaylistVideos, sAPIKey)
|
992768be10c09edd66291df3b3f98b940ec2cb90 | 20acf11f5893b0edd6221c1aa427276c38c3b41a | /tests/regtest-stabsel.R | e4e09996b29d41fbfc7b03c42d233843f6f13229 | [] | no_license | cran/gamboostLSS | 6051c3cc662986a7cad47d0debebc0b6d93d1155 | b92d7ea2416068ae6475f2e0f8fe25f8cbff448b | refs/heads/master | 2023-03-19T10:51:39.078635 | 2023-03-09T19:20:02 | 2023-03-09T19:20:02 | 17,696,249 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,814 | r | regtest-stabsel.R | require("gamboostLSS")
### Data generating process:
set.seed(1907)
x1 <- rnorm(500)
x2 <- rnorm(500)
x3 <- rnorm(500)
x4 <- rnorm(500)
x5 <- rnorm(500)
x6 <- rnorm(500)
mu <- exp(1.5 +1 * x1 +0.5 * x2 -0.5 * x3 -1 * x4)
sigma <- exp(-0.4 * x3 -0.2 * x4 +0.2 * x5 +0.4 * x6)
y <- numeric(500)
for( i in 1:500)
y[i] <- rnbinom(1, size = sigma[i], mu = mu[i])
dat <- data.frame(x1, x2, x3, x4, x5, x6, y)
model <- glmboostLSS(y ~ ., families = NBinomialLSS(), data = dat,
control = boost_control(mstop = 10),
center = TRUE, method = "cyclic")
s1 <- stabsel(model, q = 5, PFER = 1, B = 10) ## warning is expected
plot(s1)
plot(s1, type = "paths")
model <- glmboostLSS(y ~ ., families = NBinomialLSS(), data = dat,
control = boost_control(mstop = 10),
center = TRUE, method = "noncyclic")
s2 <- stabsel(model, q = 5, PFER = 1, B = 10) ## warning is expected
plot(s2)
plot(s2, type = "paths")
## with informative sigma:
sigma <- exp(-0.4 * x3 -0.2 * x4 +0.2 * x5 + 1 * x6)
y <- numeric(500)
for( i in 1:500)
y[i] <- rnbinom(1, size = sigma[i], mu = mu[i])
dat <- data.frame(x1, x2, x3, x4, x5, x6, y)
model <- glmboostLSS(y ~ ., families = NBinomialLSS(), data = dat,
control = boost_control(mstop = 10),
center = TRUE, method = "cyclic")
s3 <- stabsel(model, q = 5, PFER = 1, B = 10) ## warning is expected
plot(s3)
plot(s3, type = "paths")
model <- glmboostLSS(y ~ ., families = NBinomialLSS(), data = dat,
control = boost_control(mstop = 10),
center = TRUE, method = "noncyclic")
s4 <- stabsel(model, q = 5, PFER = 1, B = 10) ## warning is expected
plot(s4)
plot(s4, type = "paths")
|
f445e12d840320ca232fda2bcaf64c9965cf8fd9 | 3db305c9b6f9f791d2668f88e9f42c0cbfbaf4cf | /argosTrack/man/plotMap-Animal-method.Rd | b360ab31a189e23803ec533429a59b06f09841a2 | [] | no_license | calbertsen/argosTrack | 4789f170f0b53cf2afa83195c55d57c25d3bd591 | d09d54082bcf03c555f3553ff444bb5dc2246b34 | refs/heads/master | 2022-09-02T05:37:29.760935 | 2020-11-25T12:59:55 | 2020-11-25T12:59:55 | 24,145,844 | 10 | 7 | null | null | null | null | UTF-8 | R | false | true | 1,047 | rd | plotMap-Animal-method.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotMap.R
\docType{methods}
\name{plotMap,Animal-method}
\alias{plotMap,Animal-method}
\title{Map plot for an Animal reference class object}
\usage{
\S4method{plotMap}{Animal}(object, plotArgs = list(), args = list(lwd =
3, col = "red"), add = FALSE, obsArgs = list(pch = 16),
sdArgs = list(col = "grey", border = NA), ...)
}
\arguments{
\item{object}{Animal reference class object}
\item{plotArgs}{Arguments to setup background plot}
\item{args}{Arguments for plotting movement data.}
\item{add}{If FALSE a new plot window is created.}
\item{obsArgs}{Arguments for plotting observation data.}
\item{sdArgs}{Arguments for plotting standard errors.}
\item{...}{additional arguments}
}
\value{
Invisibly returns the reference class object
}
\description{
Map plot for an Animal reference class object
}
\seealso{
\code{\link{plotMap}}, \code{\link{plotMap,Movement-method}}, \code{\link{plotMap,Observation-method}}
}
\author{
Christoffer Moesgaard Albertsen
}
|
3c53a162d0442a0168f532816368a1fe494ee873 | e54ab4168c48575acb6a577c5f523aa250779266 | /plot4.r | 987485095f394862c4d9def35d4ab8364542ae63 | [] | no_license | gimanhh/ExData_Plotting1 | 9d4001b6b719ca3e0f6f4cd3def1297b1d2f5190 | 75a90b50ae204ea8b0e0b0d8bcf985077b1b97c8 | refs/heads/master | 2020-12-25T22:57:25.955937 | 2015-05-10T23:53:10 | 2015-05-10T23:53:10 | 35,391,770 | 0 | 0 | null | 2015-05-10T23:13:48 | 2015-05-10T23:13:48 | null | UTF-8 | R | false | false | 1,169 | r | plot4.r | data<-read.table("household_power_consumption.txt",sep=";",header=T,stringsAsFactors=F)
data$Date<-strptime(paste(data$Date,data$Time,sep=" "),"%d/%m/%Y %H:%M:%S")
data$Global_active_power<-as.numeric(data$Global_active_power)
data$Global_reactive_power<-as.numeric(data$Global_reactive_power)
data$Voltage<-as.numeric(data$Voltage)
data$Global_intensity<-as.numeric(data$Global_intensity)
data$Sub_metering_1<-as.numeric(data$Sub_metering_1)
data$Sub_metering_2<-as.numeric(data$Sub_metering_2)
subset1<-subset(data,Date<"2007-02-03")
subset1<-subset(subset1,Date>="2007-02-01")
rm(data)
attach(subset1)
#plot4
png("Plot4.png",width=480,height=480)
par(mfrow=c(2,2))
plot(Date,Global_active_power,type='l',xlab='',ylab="Global Active Power (kilowatts)")
plot(Date,Voltage,type='l',xlab='datetime',ylab='Voltage')
plot(Date,Sub_metering_1,type='l',xlab='',ylab='Energy sub metering')
lines(Date,Sub_metering_2,col=2)
lines(Date,Sub_metering_3,col=4)
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,col=c(1,2,4),bty="n",cex=0.8)
plot(Date,Global_reactive_power,type='l',xlab='datetime',ylab="Global_reactive_power")
dev.off()
|
37957231359ff527bba0800402fb285116506065 | 4b29ea37d3fc6bdd1b762a4b8b339907e1d37c23 | /STAT534/HW 3 Q1_maahs.R | 1a4108876f4e2bed222f520ccdfb93b971eadb6f | [] | no_license | ReadingRailroad/BMB-COC | db89674d8213bd407ba3ea5e19124a3ef16d6e27 | c44b5c6168094b2dc9295cc08a82e8f8142e3875 | refs/heads/master | 2022-04-26T03:36:40.475750 | 2020-04-24T12:24:21 | 2020-04-24T12:24:21 | 151,457,143 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,498 | r | HW 3 Q1_maahs.R | setwd("~/Stat 534/Homework 3")
library(RMark)
cormorant <- import.chdata("cormorantLD.txt", header = T, field.types=c('n'))
#1B
#process data and specify model
cormorant_proc <- process.data(cormorant, model='Recovery')
#create design matrix
cormorant_ddl <- make.design.data(cormorant_proc)
cormorant_ddl$S
cormorant_ddl$r
f0 <- list(formula=~1)
ft <- list(formula=~time)
S0_rt <- mark(data = cormorant_proc,
ddl = cormorant_ddl,
model.parameters= list (S=f0, r=ft),
invisible = FALSE,
model.name = 'Recovery')
S0_rt$results$real #survival = 0.51 (0.44, 0.58)
#1C
St_rt <- mark(data = cormorant_proc,
ddl = cormorant_ddl,
model.parameters= list (S=ft, r=ft),
invisible = FALSE,
model.name = 'Recovery')
St_rt$results$real #survival = the first estimate is unestimatable and last S is confounded
#1D
fT <- list(formula=~Time)
ST_rt <- mark(data = cormorant_proc,
ddl = cormorant_ddl,
model.parameters= list (S=fT, r=ft),
model.name = 'Recovery')
ST_rt$results$real #
#1E
cormorant_results <- collect.models(lx = NULL,
type = 'Recovery',
table = TRUE,
adjust = TRUE)
cormorant_model_table <- model.table(cormorant_results, model.name = F)
cormorant_model_table #St_rt has the lowest AIC and all the weight
|
d1e3fe9929d5be65b429080a9148dd20afcd2a16 | 08246e3776b02900f7aa2543c1b43544b13e84c8 | /scr/create_regression_table.R | dcf1b73dab95eb72ba8516effb3ca4e73da98398 | [] | no_license | klsea/boundSkew1 | 4da5d41d114eb62089cb168c9a3587d17a37b6aa | 660ae5483fa135c42226762f1658ddb9d14e46ba | refs/heads/master | 2022-08-30T06:39:17.504130 | 2020-05-15T22:32:49 | 2020-05-15T22:32:49 | 192,785,765 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,321 | r | create_regression_table.R | # run models on skew data
# 7.15.19 KLS updated 10.17.19
# load required packages
library(here)
library(sjPlot)
# read in model fits
b <- readRDS(here('output', 'baseline.RDS'))
m1 <- readRDS(here('output', 'm1.RDS'))
m2 <- readRDS(here('output', 'm2.RDS'))
m3 <- readRDS(here('output', 'm3.RDS'))
m4 <- readRDS(here('output', 'm4.RDS'))
# make table
t2 <- tab_model(b,m1,m2,m3,m4,
dv.labels = c('Baseline', 'Model 1', 'Model 2', 'Model 3', 'Model 4'),
pred.labels = c('Intercept', 'Degree of Skew (Weak)', 'Degree of Skew (Moderate)', 'Degree of Skew (Strong)', 'Valence (Gain)', 'Valence (Loss)', 'Magnitude (0.5)', 'Magnitude (5)', "Magnitude x Valence (Gain x 0.5)", "Magnitude x Valence (Loss x 0.5)", "Magnitude x Valence (Gain x 5)", "Magnitude x Valence (Loss x 5)", "Age"))
t2
# create table of chi square values comparing models
chi <- c(anova(b,m1)[6][2,], anova(b,m2)[6][2,], anova(b,m3)[6][2,], anova(m3,m4)[6][2,])
df <- c(anova(b,m1)[7][2,], anova(b,m2)[7][2,], anova(b,m3)[7][2,], anova(m3,m4)[7][2,])
p <- c(anova(b,m1)[8][2,], anova(b,m2)[8][2,], anova(b,m3)[8][2,], anova(m3,m4)[8][2,])
p <- round(p, 3)
n <- rep(b@Gp[2],4)
chi <- data.frame(df, n, chi, p)
rownames(chi) <- c('b_m1', 'b_m2', 'm1_m3', 'm3_m4')
write.csv(chi, here('output', 's1_chi_squared.csv'))
|
809f80a45af0d93ff1bc8408591ce9a72ffe6349 | c19fd9986434986eafba283f678336837d755101 | /main.R | f580e66624f2944c0b514808d6534b206239411b | [] | no_license | HughPham/agriculture | 4108a817f59238d6b4cf61a39b74254b0f3dca29 | 639e30e1d5fb77b0b90e8f1bd8bdbdc9d9ea5705 | refs/heads/master | 2020-03-21T04:39:38.428495 | 2018-06-21T04:56:14 | 2018-06-21T04:56:14 | 138,121,041 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,607 | r | main.R | library(readr)
library(tidyverse)
# data from 2001-2004
county_data_2001 <-
read_csv("original-data-agriculture/county_data_2001.csv")
county_data_2001$var84 <- as.numeric(county_data_2001$var84)
county_data_2002 <-
read_csv("original-data-agriculture/county_data_2002.csv") %>% select(-countyname_2002)
county_data_2003 <-
read_csv("original-data-agriculture/county_data_2003.csv")
county_data_2004 <-
read_csv("original-data-agriculture/county_data_2004.csv") %>% select(-countyname_2004)
dt01_04 <-
bind_rows(county_data_2001,
county_data_2002,
county_data_2003,
county_data_2004) %>%
arrange(var3, var1)
dt01_04_part2 <-
dt01_04 %>% select(seq(5, ncol(dt01_04))) %>%
apply(., 2, function(x)
ifelse(x == 0 , NA, x))
new_dt01_04 <-
cbind(dt01_04 %>% select(seq(1, 4)), dt01_04_part2) %>%
filter(dt01_04_part2 %>%
apply(., 1, function(x)
(sum(is.na(
x
)) < ncol(dt01_04_part2)))) %>%
mutate(var2 = gsub(" ", "", var2)) %>%
mutate(var2 = gsub(" ", "", var2)) %>%
mutate(var3 = as.character(var3))
new_dt01_04 <-
within(new_dt01_04, {
var3[which(grepl("宝坻", var2))] <- "120224"
var3[which(grepl("静海", var2))] <- "120223"
var3[which(grepl("宁河", var2))] <- "120221"
var3[which(grepl("海城市", var2))] <- "210381"
var3[which(grepl("砀山县", var2))] <- "341321"
var3[which(grepl("金城江区", var2))] <- "451202"
var3[which(grepl("新都区", var2))] <- "510114"
var3[which(grepl("瑶海区", var2))] <- "340102"
var3[which(grepl("庐阳区", var2))] <- "340103"
var3[which(grepl("蜀山区", var2))] <- "340104"
var3[which(grepl("包河区", var2))] <- "340111"
var3[which(grepl("毛集区", var2))] <- "340407"
var3[which(grepl("叶集区", var2))] <- "341501"
var3[which(grepl("双湖", var2))] <- "542431"
})
# 检查是否有code出错的情况
new_dt01_04 %>% group_by(var3, var1) %>%
summarise(count = n()) %>% filter(count > 1) %>% arrange(-count) %>% View()
new_dt01_04 %>% group_by(var3) %>%
summarise(count = n()) %>% filter(count > 4) %>% arrange(-count) %>% View()
rm(
county_data_2001,
county_data_2002,
county_data_2003,
county_data_2004,
dt_part1,
dt01_04,
dt01_04_part2
)
# data from 1996-2000
X1996_2000new <-
read_csv("original-data-agriculture/1996-2000new.csv")
dt_part1 <-
X1996_2000new %>%
select(seq(8, ncol(X1996_2000new))) %>%
apply(., 2, function(x)
ifelse(x == 0, NA, x))
dt96_00 <-
cbind(X1996_2000new %>% select(seq(1, 7)), dt_part1) %>%
filter(dt_part1 %>% apply(., 1, function(x)
sum(is.na(x))) != 108)
dt96_00 <-
dt96_00 %>%
filter(!duplicated(.))
dt96_00_1 <-
dt96_00 %>%
filter(is.na(code)) %>% select(name) %>% filter(!duplicated(.)) %>%
inner_join(., dt96_00) %>%
arrange(name) %>% View(.)
dt96_00 <-
within(dt96_00, {
code[which(name == "城关区")] <- "620102"
code[which(name == "东区")] <- "510402"
code[which(name == "西区")] <- "510403"
code[which(name == "红海湾开发")] <- "440902"
code[which(grepl("双湖", name))] <- "542431"
}) %>%
filter(!is.na(code))
library(readxl)
variable_name_1980_2000 <-
read_excel("original-data-agriculture/variable_name.xls",
sheet = "merge_list") %>%
mutate(new_var = paste("new", seq(1, nrow(.)), sep = ""))
new_dt2 <-
new_dt01_04 %>%
rename(
year = var1,
name = var2,
nameen = var2_EN,
code = var3
) %>%
select(-nameen) %>%
reshape2::melt(.,
id.vars = c("name", "year", "code"),
na.rm = TRUE) %>%
mutate(variable = as.character(variable)) %>%
left_join(
.,
variable_name_1980_2000 %>%
select(var00, new_var) %>%
filter(!is.na(var00)) %>%
rename(variable = var00)
) %>%
select(-variable) #%>%
reshape2::dcast(., name + code + year ~ new_var)
new_dt1 <-
dt96_00 %>%
select(-starts_with("relation")) %>%
reshape2::melt(.,
id.vars = c("name", "year", "code"),
na.rm = TRUE) %>%
mutate(variable = gsub("x", "X", as.character(variable))) %>%
mutate(variable = gsub("a", "A", as.character(variable))) %>%
mutate(variable = gsub(" ", "", as.character(variable))) %>%
left_join(
.,
variable_name_1980_2000 %>%
select(var90, new_var) %>%
filter(!is.na(var90)) %>%
rename(variable = var90)
) %>%
select(-variable) #
dt9604 <-
bind_rows(new_dt1, new_dt2) %>%
reshape2::dcast(., name + code + year ~ new_var) %>%
arrange(code, year)
|
021c81e725cde4a63d75548bbd65eb37aa5c03ca | a3c78700a65f10714471a0d307ab984e8a71644d | /modules/emulator/man/PEcAn.emulator-package.Rd | ba35157964ce065c9b0f9b0ba6ab0d73ec4356e1 | [
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | PecanProject/pecan | e42a8a6a0fc9c0bb624e0743ab891f6cf131ed3f | ce327b92bf14498fa32fcf4ef500a7a5db5c9c6c | refs/heads/develop | 2023-08-31T23:30:32.388665 | 2023-08-28T13:53:32 | 2023-08-28T13:53:32 | 6,857,384 | 187 | 217 | NOASSERTION | 2023-09-14T01:40:24 | 2012-11-25T23:48:26 | R | UTF-8 | R | false | true | 770 | rd | PEcAn.emulator-package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PEcAn.emulator-package.R
\docType{package}
\name{PEcAn.emulator-package}
\alias{PEcAn.emulator}
\alias{PEcAn.emulator-package}
\title{Implementation of a Gaussian Process model for `PEcAn`
Supports both likelihood and bayesian approaches for kriging and model
emulation. Includes functions for sampling design and prediction.}
\description{
Implementation of a Gaussian Process model (both likelihood and bayesian approaches) for kriging and model emulation. Includes functions for sampling design and prediction.
}
\author{
\strong{Maintainer}: Mike Dietze \email{dietze@bu.edu}
Other contributors:
\itemize{
\item University of Illinois, NCSA [copyright holder]
}
}
\keyword{internal}
|
e235b827bb1ec3c726440e955a0e37846b54c629 | 887ca9c3979a933ec88bf5a6a9caaac80b8428dc | /server.R | 525af5712f41018d1f8fd0fef1528b355b69e03c | [] | no_license | jerrymcummings/coursera-developing-data-products | 04e70b1bdf1526830ba581ad8ee9e2852bb31aea | 7cc1f7a55dc7aad0c37b1d30dece68b7a0dffebb | refs/heads/master | 2021-01-01T18:18:22.820254 | 2014-06-17T00:19:52 | 2014-06-17T00:19:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,005 | r | server.R |
# This is the server logic for a Shiny web application that
# uses the mtcars data set ("mt" as in "Motor Trend") from
# a standard R distribution.
#
# The weight of the car and the number of cyclinders in the
# cars engine are used to predict gas mileage.
#
library(shiny)
library(ggplot2)
shinyServer(function(input, output) {
# for the sake of effeciency, put the 'expensive' model run
# in one place where shiny's reactive machinery can keep it
# to one run per change (i.e. not doing the calc twice, once
# for the text output in the sidebar and once for the plot)
predicted.mpg <- reactive({
# create the model. in the future additional inputs could
# go here
model <- lm(mpg ~ cyl + wt, data=mtcars)
# given user's selections, get a prediction
x <- predict(model, newdata=data.frame(cyl=input$cyl, wt=input$wt))
# return prediction to the caller
return(x[1])
})
# format and return the text of our prediction
output$predictedMpg <- renderText(sprintf('Prediction: %.1f MPG',predicted.mpg()))
# Show an awesome plot of all the motortrend data plus the user's
# car and prediction.
output$mpgPlot <- renderPlot({
# we're going to start with a copy of mtcars data set and
# then add a fake car for the user
zcars <- mtcars
# new column, initialized to show where the car data came from
zcars$from = 'Motor Trend'
# create the user's car
new.car <- data.frame(
mpg = predicted.mpg(),
cyl = input$cyl,
disp = 1,
hp = 1,
drat = 1,
wt = input$wt,
qsec = 1,
vs = 1,
am = 1,
gear = 1,
carb = 1,
from = 'Prediction'
)
# append the new car
zcars <- rbind(zcars, new.car)
rownames(zcars)[nrow(zcars)] <- "user"
# we'll try to highlight the user's car and prediction
# by drawing a shaded rectangle around that point on
# the plot. Figure out how much we need to go in each
# direction around the point to draw the box.
xfactor <- (max(zcars$wt) - min(zcars$wt)) * 0.025
yfactor <- (max(zcars$cyl) - min(zcars$cyl)) * 0.25
# Instead of showing cylinders as a continuous variable, flip
# it into a factor so that we get discrete values and colors.
# --- we do this after the above calculation so that max/min
# --- can work as expected.
zcars$cyl <- as.factor(zcars$cyl)
# do the plot
p <- ggplot(zcars, aes(x=wt, y=mpg, color=cyl, shape=from)) +
annotate('rect',
xmin=new.car$wt-xfactor, ymin=new.car$mpg-yfactor,
xmax=new.car$wt+xfactor, ymax=new.car$mpg+yfactor,
alpha=0.3,
fill='darkorange') +
geom_point(size=5, alpha=0.75) +
xlab('Weight (tons)') +
ylab('Miles Per Gallon') +
ggtitle('Predicting Miles Per Gallon\nfrom Engine Cylinder Count and Weight\n') +
theme_bw()
# 'return' the plot to shiny
print(p)
})
}) |
14c25dfbdd184ff7dde908d1fc372c0bbf1de31e | 8a79ffea403d4daa70cb3868a67d8a0e5ffe0535 | /Exerc_04.R | 09a482d54bced5e3519161389ef995284c2fd28c | [] | no_license | MAR0NY/Marony | df02c68633d0d9c43d4d86c2d79bdce6d49a0e70 | 80644a5c051acf58f1cd593d1d10529f3d12715d | refs/heads/master | 2020-03-19T00:14:30.705590 | 2018-10-18T14:51:24 | 2018-10-18T14:51:24 | 135,465,979 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,974 | r | Exerc_04.R | # Exercícios aula 04
lista.de.pacotes = c("tidyverse","lubridate","janitor","readxl","stringr","repmis") # escreva a lista de pacotes
novos.pacotes <- lista.de.pacotes[!(lista.de.pacotes %in%
installed.packages()[,"Package"])]
if(length(novos.pacotes) > 0) {install.packages(novos.pacotes)}
lapply(lista.de.pacotes, require, character.only=T)
rm(lista.de.pacotes,novos.pacotes)
gc()
######################
# Exercícios Aula 02 #
######################
# 1.Carregue os dados rds ----
decisoes <- readRDS("~/Marony/decisoes.rds")
decisoes
# 2.Observe os dados ----
decisoes
# 3.selecione as colunas que acabam com "cisao". ----
decisoes.exem <- decisoes %>%
select(id_decisao, n_processo, municipio, juiz)
decisoes.exem
decisoes.end.cisao <- decisoes %>%
select(id_decisao, ends_with("cisao"))
decisoes.end.cisao
# 4.tire as colunas de texto = 'txt_decisao' e classe/assunto = 'classe_assunto'. ----
### Dica: veja os exemplos de `?select` em `Drop variables ...`
#para retirar colunas, basta usar o select com o (-)
decisoes
decisoes.peq <- decisoes %>%
select(-txt_decisao, -classe_assunto)
decisoes.peq
# 5.filtre apenas casos em que `id_decisao` ? igual a NA` ----
decisoes.na <- decisoes %>%
filter(is.na(id_decisao))
decisoes.na
#a fun??o is.na indicaca quais lementos est?o faltando> is.na(id_decisao)
#retira da coluna id_decisao os valores NA
# 6.filtre todas as decisões de 2018. ----
### Dica: função `lubridate::year()`
decisoes2018 <- decisoes %>%
filter(year(dmy(data_decisao)) == 2018)
decisoes2018
#7. exempplo mutate. Quanto tempo entre a data do registro e a data da decis?o
#mutate vai criar uma nova coluna a partir de outra
decisoes.tempo <- decisoes %>%
select(n_processo, data_decisao, data_registro) %>%
mutate(tempo = dmy(data_registro) - dmy(data_decisao))
decisoes.tempo
# 7.Crie uma coluna binária `drogas` que vale `TRUE` se no texto da decisão algo é falado de drogas e `FALSE` caso contrário. ----
### Dica: `str_detect`
### Obs.: Considere tanto a palavra 'droga' como seus sinônimos,
### ou algum exemplo de droga e retire os casos em que `txt_decisao` é vazio
decisoes.sobre.droga <- decisoes %>%
#transforma a coluna txt_decis?o > onde tiver NA vai colocar false
filter(!is.na(txt_decisao)) %>%
#tolower coloca tudo em minusculo
#na fun?ao mutate vai criar a variavel droga
mutate(txt_decisao = tolower(txt_decisao),
droga = str_detect(txt_decisao,
"droga|entorpecente|psicotr[?o]pico|maconha|haxixe|coca[?i]na")) %>%
dplyr::select(n_processo,droga)
decisoes.sobre.droga
#8 quantas decisoes tem sobre droga?
decisoes.sobre.droga %>%
group_by(droga) %>%
summarise(n=n())%>%
head()
#9
# 8.Quem são os cinco relatores mais prolixos? ----
### Dica: use `str_length()`
### Lembre-se da função `head()`
decisoes %>%
filter(!is.na(txt_decisao)) %>%
mutate(tamanho = str_length(txt_decisao)) %>%
group_by(juiz) %>%
summarise(n = n(),
tamanho_mediana = median(tamanho)) %>%
filter(n >= 10) %>%
arrange(desc(tamanho_mediana)) %>%
head(5)
#10 filtra ju?zes que t?m `Z` ou `z` no nome
decisoes %>%
select(juiz) %>%
filter(str_detect(juiz, regex("z", ignore_case = TRUE))) %>%
# conta e ordena os juizes em ordem decrescente
count(juiz, sort = TRUE) %>%
head()
#11
decisoes %>%
select(n_processo, municipio, data_decisao) %>%
# pega ano da decis?o
mutate(ano_julgamento = year(dmy(data_decisao)),
# pega o ano do processo 0057003-20.2017.8.26.0000" -> "2017"
ano_proc = str_sub(n_processo, 12, 15),
# transforma o ano em inteiro
ano_proc = as.numeric(ano_proc),
# calcula o tempo em anos
tempo_anos = ano_julgamento - ano_proc) %>%
group_by(municipio) %>%
summarise(n = n(),
media_anos = mean(tempo_anos),
min_anos = min(tempo_anos),
max_anos = max(tempo_anos))
#12
decisoes %>%
count(juiz, sort = TRUE) %>%
mutate(prop = n / sum(n),
prop = scales::percent(prop))
#13 sem formato de %
decisoes %>%
count(juiz, sort = TRUE) %>%
mutate(prop = prop.table(n))
#14
decisoes.drogas <- decisoes %>%
filter(!is.na(txt_decisao)) %>%
mutate(txt_decisao = tolower(txt_decisao),
droga = str_detect(txt_decisao,
"droga|entorpecente|psicotr[óo]pico|maconha|haxixe|coca[íi]na"),
droga=case_when(
droga==TRUE ~ "droga",
droga==FALSE ~ "n_droga"
)) %>%
group_by(juiz,droga) %>%
summarise(n=n()) %>%
spread(droga,n,fill = 0) %>%
mutate(total=droga+n_droga,
proporcao=droga/total)
decisoes.drogas
#15 qual a quantidade total de processos por juiz
quant.dec.mensal <- decisoes %>%
filter(data_decisao) %>%
mutate(quant.mes = n(txt_decisao)) %>%
group_by(juiz,quant.mes) %>%
summarise(n=n()) %>%
spread(droga,n,fill = 0) %>%
|
564c9252d9e1f33eb445a5bcf6fbed0a29f3694f | c4f40e3e960035cf877c7ae34e7a8e13009d7a98 | /constitution_amendments.R | e2721e12cf8a256b5b5fa0349ebdfafffbf2139b | [] | no_license | ContraDatos/constituciones | 11d7db8d9e9254726ed1c9022802dc619a62c3c1 | 59031271c1ac5779a0427000a52421c8288d750f | refs/heads/master | 2021-08-08T19:01:46.472131 | 2017-11-10T23:00:13 | 2017-11-10T23:00:13 | 110,159,239 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23,291 | r | constitution_amendments.R | ##################################### LOAD LIBRARIES ##################################################
library(data.table);
library(foreach);
library(doParallel);
library(readstata13)
##################################### GLOBAL PATHS ##################################################
root_path<-"/home/jesus/Desktop/home/contradata/catalonia/";
input_path<-file.path(root_path,"input/");
output_path<-file.path(root_path,"output/");
scripts_path<-file.path(root_path,"scripts/R/");
##################################### SOURCE SCRIPTS ##################################################
##################################### GLOBAL VARIABLES ##################################################
cores<-1;
################################## GLOBAL FUNCTIONS ##################################################
##################################### BEGIN ##################################################
registerDoParallel(cores=cores)
##################################### LOAD DATA ##################################################
dat<-fread(file.path(input_path,"ccpcce","ccpcce_v1_2.txt"))
##################################### CURRENT CONSTITUTIONS ##################################################
max_year<-dat[,list(max_year=max(year)),by="country"]
dat<-merge(dat,max_year,by="country");
dat<-dat[max_year==2013]
################################## [1] GENERAL ITERATION ############################
##################################### CREATION YEAR ##################################################
# Get year of last constitution creation
origin<-dat[evnttype=="new"];
origin<-origin[,list(creation_year=max(year)),by="country"];
##################################### REMOVE SUSPENDED CONSTITUTIONS ##################################################
# Get year of last constitution suspension
suspension<-dat[evnttype=="suspension"];
suspension<-suspension[,list(suspension_year=max(year)),by="country"];
# Merge creation with suspension
origin<-merge(origin,suspension,by="country",all.x = TRUE)
# Remove suspended constitutions
origin<-origin[is.na(suspension_year) | (suspension_year < creation_year)];
##################################### TENURE ##################################################
# Get tenure
origin$tenure<-max(dat$year)-origin$creation_year;
# Remove too recent constitutions
origin<-origin[tenure > 21]
# Sort by tenure
origin<-origin[order(tenure)];
# Get Spain position (75 oldest - 60%)
which(origin$country=="Spain")
100*which(origin$country=="Spain")/nrow(origin)
##################################### AMENDMENTS PER YEAR ##################################################
# Get amendments per year
amendments<-dat[evnttype=="amendment"];
# Merge to obtain creation year
amendments<-merge(origin,amendments,by="country")
amendments<-amendments[year>=creation_year];
# Get total amendments
amendments<-amendments[,list(n_amendments=(COUNT=.N)),by="country"];
amendments<-merge(origin,amendments,by="country",all.x = TRUE)
amendments[is.na(amendments$n_amendments)]$n_amendments<-0;
amendments<-amendments[,c("country","n_amendments"),with=F]
# Get amendments per year
amendments<-merge(amendments,origin,by="country");
amendments$amendments_per_year<-round(amendments$n_amendments/amendments$tenure,4);
# Sort by amendments per year
amendments<-amendments[order(-amendments_per_year)];
# Get Spain position (91 less amendments per year -> 72%)
which(amendments$country=="Spain")
100*which(amendments$country=="Spain")/nrow(amendments)
##################################### FINAL RESULTS ##################################################
# Save to final results
res<-amendments;
write.table(res,file.path(output_path,"results.csv"),sep=";",col.names = TRUE,row.names = FALSE)
##################################### STATISTICS ##################################################
summary(amendments$amendments_per_year)
amendments$amendments_per_year[amendments$country=="Spain"]
##################################### PLOTS ##################################################
boxplot(amendments$amendments_per_year,main="Countries Constitutions Amendments Per Year")
stripchart(amendments$amendments_per_year[amendments$country=="Spain"], data = InsectSprays,
vertical = TRUE, method = "jitter",
pch = 17, col = "red", bg = "bisque",
add = TRUE)
##################################### STATISTICS - EUROPE ##################################################
europe_countries<-unique(fread(file.path(input_path,"europe_countries"))$V2)
europe_countries<-tolower(europe_countries)
amendments$country<-tolower(amendments$country)
amendments$country<-sapply(strsplit(amendments$country,split="\\("),"[",1)
amendments$country<-sapply(strsplit(amendments$country,split="/"),"[",1)
amendments$country<-gsub(" ","",amendments$country)
europe_amendments<-amendments[country %in% europe_countries]
bad<-amendments[!(country %in% europe_countries)]
europe_amendments<-rbind(europe_amendments,bad[country=="german federal republic"])
europe_amendments<-rbind(europe_amendments,bad[country=="bosnia-herzegovina"])
# Sort by amendments per year
europe_amendments<-europe_amendments[order(-amendments_per_year)];
# Get Spain position (26 less amendments per year -> 76%)
which(europe_amendments$country=="spain")
100*which(europe_amendments$country=="spain")/nrow(europe_amendments)
summary(europe_amendments$amendments_per_year)
europe_amendments$amendments_per_year[europe_amendments$country=="spain"]
##################################### PLOTS - EUROPE ##################################################
boxplot(europe_amendments$amendments_per_year,main="European Countries Constitutions Amendments Per Year")
stripchart(europe_amendments$amendments_per_year[europe_amendments$country=="spain"], data = InsectSprays,
vertical = TRUE, method = "jitter",
pch = 17, col = "red", bg = "bisque",
add = TRUE)
##################################### STATISTICS - UE ##################################################
ue_countries<-unique(fread(file.path(input_path,"europe_countries"))[V11=="EU"]$V2)
ue_countries<-tolower(ue_countries)
ue_amendments<-amendments[country %in% ue_countries]
bad<-europe_amendments[!(country %in% ue_countries)]
# Sort by amendments per year
ue_amendments<-ue_amendments[order(-amendments_per_year)];
# Get Spain position (17 less amendments per year -> 85%)
which(ue_amendments$country=="spain")
100*which(ue_amendments$country=="spain")/nrow(ue_amendments)
summary(ue_amendments$amendments_per_year)
ue_amendments$amendments_per_year[ue_amendments$country=="spain"]
##################################### PLOTS - UE ##################################################
boxplot(ue_amendments$amendments_per_year,main="UE Countries Constitutions Amendments Per Year")
stripchart(ue_amendments$amendments_per_year[ue_amendments$country=="spain"], data = InsectSprays,
vertical = TRUE, method = "jitter",
pch = 17, col = "red", bg = "bisque",
add = TRUE)
################################## [2] CLOSE TENURE ITERATION ############################
##################################### CREATION YEAR ##################################################
# Get year of last constitution creation
origin<-dat[evnttype=="new"];
origin<-origin[,list(creation_year=max(year)),by="country"];
##################################### REMOVE SUSPENDED CONSTITUTIONS ##################################################
# Get year of last constitution suspension
suspension<-dat[evnttype=="suspension"];
suspension<-suspension[,list(suspension_year=max(year)),by="country"];
# Merge creation with suspension
origin<-merge(origin,suspension,by="country",all.x = TRUE)
# Remove suspended constitutions
origin<-origin[is.na(suspension_year) | (suspension_year < creation_year)];
##################################### TENURE ##################################################
# Get tenure
origin$tenure<-max(dat$year)-origin$creation_year;
# Remove too recent constitutions
origin<-origin[tenure <= 40 & tenure >= 30]
# Sort by tenure
origin<-origin[order(tenure)];
# Get Spain position (75 oldest - 60%)
which(origin$country=="Spain")
100*which(origin$country=="Spain")/nrow(origin)
##################################### AMENDMENTS PER YEAR ##################################################
# Get amendments per year
amendments<-dat[evnttype=="amendment"];
# Merge to obtain creation year
amendments<-merge(origin,amendments,by="country")
amendments<-amendments[year>=creation_year];
# Get total amendments
amendments<-amendments[,list(n_amendments=(COUNT=.N)),by="country"];
amendments<-merge(origin,amendments,by="country",all.x = TRUE)
amendments[is.na(amendments$n_amendments)]$n_amendments<-0;
amendments<-amendments[,c("country","n_amendments"),with=F]
# Get amendments per year
amendments<-merge(amendments,origin,by="country");
amendments$amendments_per_year<-amendments$n_amendments/amendments$tenure;
# Sort by amendments per year
amendments<-amendments[order(-amendments_per_year)];
# Get Spain position (91 less amendments per year -> 72%)
which(amendments$country=="Spain")
100*which(amendments$country=="Spain")/nrow(amendments)
##################################### FINAL RESULTS ##################################################
# Save to final results
res<-amendments;
write.table(res,file.path(output_path,"results.csv"),sep=";",col.names = TRUE,row.names = FALSE)
##################################### STATISTICS ##################################################
summary(amendments$amendments_per_year)
amendments$amendments_per_year[amendments$country=="Spain"]
##################################### PLOTS ##################################################
boxplot(amendments$amendments_per_year,main="Countries Constitutions Amendments Per Year")
stripchart(amendments$amendments_per_year[amendments$country=="Spain"], data = InsectSprays,
vertical = TRUE, method = "jitter",
pch = 17, col = "red", bg = "bisque",
add = TRUE)
##################################### STATISTICS - EUROPE ##################################################
europe_countries<-unique(fread(file.path(input_path,"europe_countries"))$V2)
europe_countries<-tolower(europe_countries)
amendments$country<-tolower(amendments$country)
amendments$country<-sapply(strsplit(amendments$country,split="\\("),"[",1)
amendments$country<-sapply(strsplit(amendments$country,split="/"),"[",1)
amendments$country<-gsub(" ","",amendments$country)
europe_amendments<-amendments[country %in% europe_countries]
bad<-amendments[!(country %in% europe_countries)]
europe_amendments<-rbind(europe_amendments,bad[country=="german federal republic"])
europe_amendments<-rbind(europe_amendments,bad[country=="bosnia-herzegovina"])
# Sort by amendments per year
europe_amendments<-europe_amendments[order(-amendments_per_year)];
# Get Spain position (26 less amendments per year -> 76%)
which(europe_amendments$country=="spain")
100*which(europe_amendments$country=="spain")/nrow(europe_amendments)
summary(europe_amendments$amendments_per_year)
europe_amendments$amendments_per_year[europe_amendments$country=="spain"]
##################################### PLOTS - EUROPE ##################################################
boxplot(europe_amendments$amendments_per_year,main="European Countries Constitutions Amendments Per Year")
stripchart(europe_amendments$amendments_per_year[europe_amendments$country=="spain"], data = InsectSprays,
vertical = TRUE, method = "jitter",
pch = 17, col = "red", bg = "bisque",
add = TRUE)
##################################### STATISTICS - UE ##################################################
ue_countries<-unique(fread(file.path(input_path,"europe_countries"))[V11=="EU"]$V2)
ue_countries<-tolower(ue_countries)
ue_amendments<-amendments[country %in% ue_countries]
bad<-europe_amendments[!(country %in% ue_countries)]
# Sort by amendments per year
ue_amendments<-ue_amendments[order(-amendments_per_year)];
# Get Spain position (17 less amendments per year -> 85%)
which(ue_amendments$country=="spain")
100*which(ue_amendments$country=="spain")/nrow(ue_amendments)
summary(ue_amendments$amendments_per_year)
ue_amendments$amendments_per_year[ue_amendments$country=="spain"]
##################################### PLOTS - UE ##################################################
boxplot(ue_amendments$amendments_per_year,main="UE Countries Constitutions Amendments Per Year")
stripchart(ue_amendments$amendments_per_year[ue_amendments$country=="spain"], data = InsectSprays,
vertical = TRUE, method = "jitter",
pch = 17, col = "red", bg = "bisque",
add = TRUE)
################################## [3] SAME DIFFICULTY ITERATION ############################
##################################### CREATION YEAR ##################################################
# Get year of last constitution creation
origin<-dat[evnttype=="new"];
origin<-origin[,list(creation_year=max(year)),by="country"];
##################################### REMOVE SUSPENDED CONSTITUTIONS ##################################################
# Get year of last constitution suspension
suspension<-dat[evnttype=="suspension"];
suspension<-suspension[,list(suspension_year=max(year)),by="country"];
# Merge creation with suspension
origin<-merge(origin,suspension,by="country",all.x = TRUE)
# Remove suspended constitutions
origin<-origin[is.na(suspension_year) | (suspension_year < creation_year)];
##################################### TENURE ##################################################
# Get tenure
origin$tenure<-max(dat$year)-origin$creation_year;
# Merge with difficulty
difficulty<-data.table(read.dta13(file.path(input_path,"ccpcce","ADData.dta")))
max_years<-difficulty[,list(max_year=max(year)),by="country"]
difficulty<-merge(difficulty,max_years,by="country")
difficulty<-difficulty[year == max_year]
difficulty<-difficulty[,c("country","ad_ak"),with=F]
origin<-merge(origin,difficulty,by="country")
origin<-origin[ad_ak == origin[country=="Spain"]$ad_ak]
origin<-origin[tenure > 21]
# Sort by tenure
origin<-origin[order(tenure)];
# Get Spain position (75 oldest - 60%)
which(origin$country=="Spain")
100*which(origin$country=="Spain")/nrow(origin)
##################################### AMENDMENTS PER YEAR ##################################################
# Get amendments per year
amendments<-dat[evnttype=="amendment"];
# Merge to obtain creation year
amendments<-merge(origin,amendments,by="country")
amendments<-amendments[year>=creation_year];
# Get total amendments
amendments<-amendments[,list(n_amendments=(COUNT=.N)),by="country"];
amendments<-merge(origin,amendments,by="country",all.x = TRUE)
amendments[is.na(amendments$n_amendments)]$n_amendments<-0;
amendments<-amendments[,c("country","n_amendments"),with=F]
# Get amendments per year
amendments<-merge(amendments,origin,by="country");
amendments$amendments_per_year<-amendments$n_amendments/amendments$tenure;
# Sort by amendments per year
amendments<-amendments[order(-amendments_per_year)];
# Get Spain position (91 less amendments per year -> 72%)
which(amendments$country=="Spain")
100*which(amendments$country=="Spain")/nrow(amendments)
##################################### FINAL RESULTS ##################################################
# Save to final results
res<-amendments;
write.table(res,file.path(output_path,"results.csv"),sep=";",col.names = TRUE,row.names = FALSE)
##################################### STATISTICS ##################################################
summary(amendments$amendments_per_year)
amendments$amendments_per_year[amendments$country=="Spain"]
##################################### PLOTS ##################################################
boxplot(amendments$amendments_per_year,main="Countries Constitutions Amendments Per Year")
stripchart(amendments$amendments_per_year[amendments$country=="Spain"], data = InsectSprays,
vertical = TRUE, method = "jitter",
pch = 17, col = "red", bg = "bisque",
add = TRUE)
##################################### STATISTICS - EUROPE ##################################################
europe_countries<-unique(fread(file.path(input_path,"europe_countries"))$V2)
europe_countries<-tolower(europe_countries)
amendments$country<-tolower(amendments$country)
amendments$country<-sapply(strsplit(amendments$country,split="\\("),"[",1)
amendments$country<-sapply(strsplit(amendments$country,split="/"),"[",1)
amendments$country<-gsub(" ","",amendments$country)
europe_amendments<-amendments[country %in% europe_countries]
bad<-amendments[!(country %in% europe_countries)]
europe_amendments<-rbind(europe_amendments,bad[country=="german federal republic"])
europe_amendments<-rbind(europe_amendments,bad[country=="bosnia-herzegovina"])
# Get Spain position (26 less amendments per year -> 76%)
which(europe_amendments$country=="spain")
100*which(europe_amendments$country=="spain")/nrow(europe_amendments)
summary(europe_amendments$amendments_per_year)
europe_amendments$amendments_per_year[europe_amendments$country=="spain"]
##################################### PLOTS - EUROPE ##################################################
boxplot(europe_amendments$amendments_per_year,main="European Countries Constitutions Amendments Per Year")
stripchart(europe_amendments$amendments_per_year[europe_amendments$country=="spain"], data = InsectSprays,
vertical = TRUE, method = "jitter",
pch = 17, col = "red", bg = "bisque",
add = TRUE)
##################################### STATISTICS - UE ##################################################
ue_countries<-unique(fread(file.path(input_path,"europe_countries"))[V11=="EU"]$V2)
ue_countries<-tolower(ue_countries)
ue_amendments<-amendments[country %in% ue_countries]
bad<-europe_amendments[!(country %in% ue_countries)]
# Get Spain position (17 less amendments per year -> 85%)
which(ue_amendments$country=="spain")
100*which(ue_amendments$country=="spain")/nrow(ue_amendments)
summary(ue_amendments$amendments_per_year)
ue_amendments$amendments_per_year[ue_amendments$country=="spain"]
##################################### PLOTS - UE ##################################################
boxplot(ue_amendments$amendments_per_year,main="UE Countries Constitutions Amendments Per Year")
stripchart(ue_amendments$amendments_per_year[ue_amendments$country=="spain"], data = InsectSprays,
vertical = TRUE, method = "jitter",
pch = 17, col = "red", bg = "bisque",
add = TRUE)
################################## [4] AMENDMENTS BY DIFFICULTY ############################
##################################### CREATION YEAR ##################################################
# Get year of last constitution creation
origin<-dat[evnttype=="new"];
origin<-origin[,list(creation_year=max(year)),by="country"];
##################################### REMOVE SUSPENDED CONSTITUTIONS ##################################################
# Get year of last constitution suspension
suspension<-dat[evnttype=="suspension"];
suspension<-suspension[,list(suspension_year=max(year)),by="country"];
# Merge creation with suspension
origin<-merge(origin,suspension,by="country",all.x = TRUE)
# Remove suspended constitutions
origin<-origin[is.na(suspension_year) | (suspension_year < creation_year)];
##################################### TENURE ##################################################
# Get tenure
origin$tenure<-max(dat$year)-origin$creation_year;
# Keep only same difficulty constitutions
difficulty<-data.table(read.dta13(file.path(input_path,"ccpcce","ADData.dta")))
max_years<-difficulty[,list(max_year=max(year)),by="country"]
difficulty<-merge(difficulty,max_years,by="country")
difficulty<-difficulty[year == max_year]
difficulty<-difficulty[,c("country","ad_ak"),with=F]
origin<-merge(origin,difficulty,by="country")
origin<-origin[!is.na(ad_ak)]
# Sort by tenure
origin<-origin[order(tenure)];
# Get Spain position (75 oldest - 60%)
which(origin$country=="Spain")
100*which(origin$country=="Spain")/nrow(origin)
##################################### AMENDMENTS PER YEAR ##################################################
# Get amendments per year
amendments<-dat[evnttype=="amendment"];
# Merge to obtain creation year
amendments<-merge(origin,amendments,by="country")
amendments<-amendments[year>=creation_year];
# Get total amendments
amendments<-amendments[,list(n_amendments=(COUNT=.N)),by="country"];
amendments<-merge(origin,amendments,by="country",all.x = TRUE)
amendments[is.na(amendments$n_amendments)]$n_amendments<-0;
amendments<-amendments[,c("country","n_amendments"),with=F]
# Get amendments per year
amendments<-merge(amendments,origin,by="country");
amendments$amendments_per_year<-amendments$n_amendments/amendments$tenure;
# Sort by amendments per year
amendments<-amendments[order(-amendments_per_year)];
# Get mean amendments_per_year by nivel of amendment difficulty
mean_by_diff<-amendments[,list(amendments_per_year=mean(amendments_per_year)),by="ad_ak"]
mean_by_diff<-mean_by_diff[order(ad_ak)]
mean_by_diff
##################################### STATISTICS - EUROPE ##################################################
europe_countries<-unique(fread(file.path(input_path,"europe_countries"))$V2)
europe_countries<-tolower(europe_countries)
amendments$country<-tolower(amendments$country)
amendments$country<-sapply(strsplit(amendments$country,split="\\("),"[",1)
amendments$country<-sapply(strsplit(amendments$country,split="/"),"[",1)
amendments$country<-gsub(" ","",amendments$country)
europe_amendments<-amendments[country %in% europe_countries]
bad<-amendments[!(country %in% europe_countries)]
europe_amendments<-rbind(europe_amendments,bad[country=="german federal republic"])
europe_amendments<-rbind(europe_amendments,bad[country=="bosnia-herzegovina"])
# Get mean amendments_per_year by nivel of amendment difficulty
mean_by_diff<-europe_amendments[,list(amendments_per_year=mean(amendments_per_year)),by="ad_ak"]
mean_by_diff<-mean_by_diff[order(ad_ak)]
mean_by_diff
##################################### STATISTICS - UE ##################################################
ue_countries<-unique(fread(file.path(input_path,"europe_countries"))[V11=="EU"]$V2)
ue_countries<-tolower(ue_countries)
ue_amendments<-amendments[country %in% ue_countries]
bad<-europe_amendments[!(country %in% ue_countries)]
# Get mean amendments_per_year by nivel of amendment difficulty
mean_by_diff<-ue_amendments[,list(amendments_per_year=mean(amendments_per_year)),by="ad_ak"]
mean_by_diff<-mean_by_diff[order(ad_ak)]
mean_by_diff
################################## [5] REGRESSION MODEL ############################
# Data pre-processing
X<-ue_amendments[,c("ad_ak","amendments_per_year"),with=F];
X<-X[ad_ak <= 6]
setnames(X,"amendments_per_year","y")
# Compute model mse
mse<-mean((predictions-y_test)^2)
null_mse<-mean((null_predictions-y_test)^2)
# Check R-squared
model <- lm(y ~ .,data=X)
X$ad_ak<-rnorm(nrow(X),mean = 0,sd = 10)
random_model <- lm(y ~.,data=X)
summary(model)$r.squared
summary(random_model)$r.squared
|
6581223a2618a613b502c9a893bcf5ac6e6323db | a3f23e8d7ac043c02d5de504215d29d3ac25c932 | /man/get_transverse_spin.Rd | 3d8e598a454c5cfdcbab53aa8fcfdc991761ecde | [] | no_license | pmelgren/RStatcastUtils | b8b3e8c44fc745e432a35a6edc9aefe4019c46cc | a32619f26acdbb091c1f4fdc548caa774c3d1343 | refs/heads/master | 2020-04-16T20:00:41.993220 | 2020-03-24T17:40:56 | 2020-03-24T17:40:56 | 165,883,054 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,910 | rd | get_transverse_spin.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Trackman_functions.R
\name{get_transverse_spin}
\alias{get_transverse_spin}
\title{Get Transverse Spin}
\usage{
get_transverse_spin(extension, tot_spin, vx0, vy0, vz0, ax, ay, az,
direction = "TRANSVERSE")
}
\arguments{
\item{extension}{Release Extension (In Feet From Mound)}
\item{tot_spin}{Release Total Spin Rate (RpM)}
\item{vx0}{Initial X Velocity (MpH)}
\item{vy0}{Initial Y Velocity (MpH)}
\item{vz0}{Initial Z Velocity (MpH)}
\item{ax}{X Acceleration}
\item{ay}{Y Acceleration}
\item{az}{Z Acceleration}
\item{direction}{Which spin component should be returned? See details}
}
\value{
If directional = TRUE, the function will return a named list with
total transverse spin as well as the spin in the X,Y, and Z directions,
otherwise it will return a scalar representing the total transverse spin.
}
\description{
Get Transverse Spin takes the Trackman data that is publicly available and
uses it to calculate the trasverse (useful) spin. The calculations in this
package are based on the following paper by Dr. Allen Nathan:
http://baseball.physics.illinois.edu/trackman/SpinAxis.pdf
and all calculations were directly adapted from this workbook, also by
Dr. Nathan:
http://baseball.physics.illinois.edu/trackman/MovementSpinEfficiencyTemplate.xlsx
}
\details{
The direction argument will return the total transverse spin if
direction=="TRANSVERSE". "X","Y", or "Z" will return the transvers spin in
that direction. Direction == "ALL" will return a named list consisting of
the transverse spin (spinT) as well as all components (spinTx,spinTy,spinTz)
}
\note{
The current version of this function is based on atmospheric
conditions inside Tropicana Field in June, but future versions should
include paramters to specify atmospheric condiditions so as to allow for
more accurate spin estimates across ballparks.
}
|
e81543b1a471ec779477d55343d666ddb7410b58 | 23fde635322bd2786b7bcccf0166bc7a27c59f6f | /R/nModule.R | f6e3f02b4c8d584620f316afd3f01e2b8d3fe859 | [] | no_license | bioinfoxh/TERM | f0697dd184cb713a714da48f3eb30c0d96b4d603 | 51ccc79fa1ee532e31231ed88e0f1cd98edb4834 | refs/heads/master | 2020-04-11T18:46:18.106350 | 2018-12-16T14:51:57 | 2018-12-16T14:51:57 | 120,759,029 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,775 | r | nModule.R |
# select candidates for seeds with various constrains:
# cur_node: the current nodes of the n-module
# threshold: the values the candidate must satisfying
# the strategy: 1. get the submatrix; 2. select only gene only one time based on the submatrix;
# networks=multi_netwk
# cur_node=ori_module[[i]]$members
# threshold=beta
# min_num=min_num_netw
# size_cand=size_cand
Cand_neibor <- function(networks,cur_node,threshold,min_num,size_cand)
{
neibor=c();
#print("yes, in the candidate");
num_node = dim(networks)[1]
num_netw = dim(networks)[3]
rem_node = setdiff(c(1:num_node),cur_node);
num_rem_node = length(rem_node);
rem_networks = networks[rem_node,cur_node,];
#print(dim(rem_networks));
rem_connect = array(0,dim=c(num_rem_node,num_netw+1));
for(i in 1:num_netw){
#print( paste( "i for num_netw Cand_neibor: ",i,sep="") )
rem_connect[,i]=rowSums(rem_networks[,,i]);
}
rem_connect[,num_netw+1]=rowSums(rem_connect[,1:num_netw])/num_netw;
#******************* First level candidate ***********************************************************
for(i in 1:num_rem_node){
#print( paste( "i for num_rem_node Cand_neibor: ",i,sep="") )
num_count = 0;
index_count = rep(0,2);
#for(tempi in 1:num_netw){
if((max(rem_connect[i,1:(num_netw+1)])>threshold)&(min(rem_connect[i,1:(num_netw+1)])>0)){
#num_count = num_count+1;
num_count = num_count+sum(rem_connect[i,1:(num_netw+1)]>threshold)
}
#}
if(num_count>=min_num) {
neibor=c(neibor,i);
}
}
#print(length(neibor));
#******************* Second level candidate ***********************************************************
if( length(neibor)==1 ){
neibor = rem_node[neibor]
}
if(length(neibor)>1 ){
rem_node = rem_node[neibor];
rem_networks = rem_networks[neibor,,];
can_weight = rowSums(rem_networks[,,1]);
for(tempi in 2:num_netw){
can_weight = can_weight+rowSums(rem_networks[,,tempi]);
}
can_weight_list = order(can_weight,decreasing=TRUE);
if(length(neibor)>size_cand) {
neibor = rem_node[can_weight_list[1:size_cand]];
}else{
neibor = rem_node[can_weight_list];
}
}
neibor;
}
# this is a fast version of N-modules:
#cmatrix: the adjacent matrix,
#cnode: the set of nodes;
#curentropy: the current entropy for members
# cmatrix=c_matrix
# cnode=c_node
# curentropy=ori_module[[i]]$p_entropy
multip_entropy <- function(cmatrix, cnode, curentropy, degree){
#*************Step 1: compute the individual partial entropy for the candidate genes ********************
#print("hello welcome to the fast one");
num_mem = dim(curentropy)[1]; # number of members
num_can = length(cnode) - num_mem; # number of candidates
num_netw = ncol(degree)
#*******************Step 2: compute the entropy changes for each input node ***************************************
nsize = num_mem+1;
Min_evalue = 1000000;
Min_ematrix = c();
Add_node = c();
for(tempi in 1:num_can){ # for testing
#print( paste( "tempi for num_can multi_entropy: ",tempi,sep="") )
can_pentropy = array(0,dim=c(nsize,1+3*num_netw));
can_pentropy[1:num_mem,] = curentropy;
can_pentropy[nsize,1] = cnode[num_mem+tempi];
can_pentropy[nsize,c(1:num_netw)+1+num_netw] = degree[can_pentropy[nsize,1],];
#print(can_pentropy);
rel_matrix = cmatrix[num_mem+tempi,,];
can_pentropy[nsize,c(1:num_netw)+1] = colSums(rel_matrix);
#print(rel_matrix);
can_pentropy[1:num_mem,c(1:num_netw)+1] = can_pentropy[1:num_mem,c(1:num_netw)+1]+rel_matrix; ### add the within degree of the new candidate to module members
#print(can_pentropy);
for(tempi1 in 1:nsize){
for(tempi2 in 1:num_netw){
temp_indegree = can_pentropy[tempi1,tempi2+1]
temp_totaldegree = can_pentropy[tempi1,tempi2+1+num_netw]
tempprob = temp_indegree/temp_totaldegree
if(tempprob==0){
can_pentropy[tempi1,tempi2+1+2*num_netw] = 2
}else if( tempprob>=1 ){
can_pentropy[tempi1,tempi2+1+2*num_netw] = 0
}else if( (tempprob>0) & (tempprob<=0.5) ){
can_pentropy[tempi1,tempi2+1+2*num_netw] = 2 + tempprob*log2(tempprob)+(1-tempprob)*log2(1-tempprob)
}else if( (tempprob>0.5) & (tempprob<1) ){
can_pentropy[tempi1,tempi2+1+2*num_netw] = -tempprob*log2(tempprob)-(1-tempprob)*log2(1-tempprob);
}
# if ( can_pentropy[tempi1,tempi2+1]==0){
# can_pentropy[tempi1,tempi2+1+2*num_netw] = 2;
# }
# if( (can_pentropy[tempi1,tempi2+1]==can_pentropy[tempi1,tempi2+1+num_netw])&(can_pentropy[tempi1,tempi2+1]>0) ){
# can_pentropy[tempi1,tempi2+1+2*num_netw] = 0
# }
# if ( (2*can_pentropy[tempi1,tempi2+1]<=can_pentropy[tempi1,tempi2+1+num_netw])&(can_pentropy[tempi1,tempi2+1]>0) ){
# tempprob = can_pentropy[tempi1,tempi2+1]/can_pentropy[tempi1,tempi2+1+num_netw];
# can_pentropy[tempi1,tempi2+1+2*num_netw] = 2 + tempprob*log2(tempprob)+(1-tempprob)*log2(1-tempprob);
# }
# if( (2*can_pentropy[tempi1,tempi2+1]>can_pentropy[tempi1,tempi2+1+num_netw])&(can_pentropy[tempi1,tempi2+1]>0)&(can_pentropy[tempi1,tempi2+1]!=can_pentropy[tempi1,tempi2+1+num_netw]) ){
# tempprob = can_pentropy[tempi1,tempi2+1]/can_pentropy[tempi1,tempi2+1+num_netw];
# print(can_pentropy[tempi1,tempi2+1])
# print(can_pentropy[tempi1,tempi2+1+num_netw])
# print(paste( "tempprob greater than 0.5:", tempprob, sep="") )
# can_pentropy[tempi1,tempi2+1+2*num_netw] = -tempprob*log2(tempprob)-(1-tempprob)*log2(1-tempprob);
# }
} # tempi2
} # tempi1
temp_entropy_value = sum(can_pentropy[,c(1:num_netw)+1+2*num_netw])/num_netw/(num_mem+1);
temp_entropy_value[is.na(temp_entropy_value)] = 100;
ori_entropy_value = colSums(curentropy[,c(1:num_netw)+1+2*num_netw])/num_mem;
cur_entropy_value = colSums(can_pentropy[,c(1:num_netw)+1+2*num_netw])/(num_mem+1);
threshold = min(ori_entropy_value-cur_entropy_value);
if(length(cur_entropy_value[is.na(cur_entropy_value)])==0){
if ((temp_entropy_value<Min_evalue)&(threshold>0.001)) {
Min_evalue = temp_entropy_value;
Min_ematrix = can_pentropy;
Add_node = cnode[tempi+num_mem];
}
}
}
#print(Add_node);
list(Min_evalue,Min_ematrix,Add_node);
}
# the entropy value of each module
entropy<-function(module,networks){
num_module = length(module);
num_netw = dim(networks)[3];
num_node = dim(networks)[1]
for(i in 1:num_netw){
diag(networks[,,i])=0
}
degree=array(0,dim=c(num_node,num_netw));
for(tempi in 1:num_netw){
diag(networks[,,tempi])=0;
degree[,tempi]=rowSums(networks[,,tempi]);
}
mentropy = array(0,dim=c(num_module,num_netw+1));
wdegree = degree;
for(i in 1:length(module)){
#print(i);
adjmatrix = networks[module[[i]]$members,module[[i]]$members,];
modulelength = length(module[[i]]$members);
for(j in 1:num_netw){
tempmatrix = c();
tempmatrix = adjmatrix[,,j];
degree1 = wdegree[module[[i]]$members,j];#diag(tempmatrix);
tempmatrix[is.na(tempmatrix)] = 0;
diag(tempmatrix) = 0;
indegree = rowSums(tempmatrix);
#print(degree);
entropyvalue_k = c();
for(k in 1:modulelength){
#if(degree[k]==0){prob=0; print(tempmatrix);}else{prob = indegree[k]/degree[k];}
prob = indegree[k]/degree1[k];
if(prob==0){
entropyvalue=2
}else if(prob>=1){
entropyvalue=0
}else if( (prob>0) & (prob<=0.5) ){
entropyvalue= 2+prob*log2(prob)+(1-prob)*log2(1-prob)
}else if( (prob>0.5) & (prob<1) ){
entropyvalue = -prob*log2(prob)-(1-prob)*log2(1-prob)
}
entropyvalue_k = c(entropyvalue_k,entropyvalue);
}
#mentropy[i,j]=log2(sum(entropyvalue_k)/modulelength);
mentropy[i,j]=sum(entropyvalue_k)/modulelength;
}
mentropy[i,num_netw+1]=sum(mentropy[i,1:num_netw])/num_netw;
}
mentropy;
}
# 7 = 1+3*num_netw
# 5 = 1+2*num_netw
# 3 = 1+num_netw
# c(2:3) = c(1:num_netw)+1
# c(4:5) = c(1:num_netw)+1+num_netw
# c(6:7) = c(1:num_netw)+1+2*num_netw
# the goal of the alogrithm is the n-modules in all these networkr
nModule <- function(networks,seed_v){
#options(warn=1)
print("starting the n-module extraction precedure")
adjmatrix = networks
adjmatrix[is.na(adjmatrix)] = 0
num_vertex = dim(networks)[1]
num_netw = dim(networks)[3]
num_node = dim(networks)[1];
for(i in 1:num_netw){
diag(networks[,,i])=0
}
degree=array(0,dim=c(num_node,num_netw));
for(tempi in 1:num_netw){
diag(networks[,,tempi])=0;
degree[,tempi]=rowSums(networks[,,tempi]);
}
ori_module <- vector(mode='list', length=length(seed_v)) # to store the n-modules
print("finishing the module initail construction")
#**************** initial procedure ***************************
initial_index = 1 # 1: maximal 2: unoverlapping
ori_module <- vector(mode='list', length=length(seed_v)) # to store the n-modules
if(initial_index==1){
for (i in 1:length(seed_v)){
ori_module[[i]]<- list(name=paste("module", i, sep=" "), entropy=100,members=seed_v[i])
tempmatrix = networks[ori_module[[i]]$members,,] # co-expression weights of seed_v[i] in all the layers of networks
# print(dim(tempmatrix)) 7737* 2
can_weight = rowSums(tempmatrix)
if( sum(can_weight)==0 ){ next }
can_gene_list = order(can_weight,decreasing=TRUE)
ori_module[[i]]$members=c(ori_module[[i]]$members, can_gene_list[1])
tempmatrix = networks[,ori_module[[i]]$members,]
can_weight = rowSums(tempmatrix[,,1])
for(tempi in 2:num_netw){
can_weight = can_weight+rowSums(tempmatrix[,,tempi])
}
if( sum(can_weight)==0 ){ next }
can_gene_list = order(can_weight,decreasing=TRUE)
for(tempi in 1:length(can_gene_list)){
if (!(can_gene_list[tempi] %in% ori_module[[i]]$members)){
ori_module[[i]]$members=c(ori_module[[i]]$members, can_gene_list[tempi])
break ### add the one with the highest weight into module i, and then get out of the loop
}
}
# print(ori_module[[i]]$members)
ori_module[[i]]$members = sort( ori_module[[i]]$members)
#m_entropy=array(0,dim=c(length( ori_module[[i]]$members),7)) #c1 genes,c2-c3 within-module degree, c4-c5 total degree, c6-c7 entropy
##### !!!!!!!! WARNING: 7 is for two-layer network, for more than 2 layers, it should be modified
m_entropy=array(0,dim=c(length( ori_module[[i]]$members), 1+3*num_netw)) #### for multi-layer networks with more than 2 layers
m_entropy[,1]=ori_module[[i]]$members #c1: genes
m_entropy[,c(1:num_netw)+1+num_netw]=degree[ori_module[[i]]$members,] #total degrees in each layer
for(tempj in 1:num_netw){
m_entropy[,tempj+1]=rowSums(networks[ori_module[[i]]$members,ori_module[[i]]$members,tempj]) #within module degrees in each layer
# compute the entropy for each gene
for(tempj1 in 1:dim(m_entropy)[1]){
temp_indegree = m_entropy[tempj1,tempj+1]
temp_totaldegree = m_entropy[tempj1,tempj+1+num_netw]
tempprob = temp_indegree/temp_totaldegree
if(tempprob==0){
m_entropy[tempj1,tempj+1+2*num_netw] = 2
}else if( tempprob>=1 ){
m_entropy[tempj1,tempj+1+2*num_netw] = 0
}else if( (tempprob>0) & (tempprob<=0.5) ){
m_entropy[tempj1,tempj+1+2*num_netw] = 2 + tempprob*log2(tempprob)+(1-tempprob)*log2(1-tempprob)
}else if( (tempprob>0.5) & (tempprob<1) ){
m_entropy[tempj1,tempj+1+2*num_netw] = -tempprob*log2(tempprob)-(1-tempprob)*log2(1-tempprob)
}
# if ( m_entropy[tempj1,tempj+1] == 0 ){
# m_entropy[tempj1,tempj+1+2*num_netw] = 2
# }else if ( (m_entropy[tempj1,tempj+1]==m_entropy[tempj1,tempj+1+num_netw]) ){
# m_entropy[tempj1,tempj+1+2*num_netw] = 0
# }else if ( (2*m_entropy[tempj1,tempj+1] <= m_entropy[tempj1,tempj+1+num_netw]) ) {
# tempprob = m_entropy[tempj1,tempj+1]/m_entropy[tempj1,tempj+1+num_netw]
# m_entropy[tempj1,tempj+1+2*num_netw] = 2 + tempprob*log2(tempprob)+(1-tempprob)*log2(1-tempprob)
# }else{
# tempprob = m_entropy[tempj1,tempj+1]/m_entropy[tempj1,tempj+1+num_netw]
# m_entropy[tempj1,tempj+1+2*num_netw] = -tempprob*log2(tempprob)-(1-tempprob)*log2(1-tempprob)
# }
}#tempj1
} #tempj
#print(ori_module[[i]]$members)
ori_module[[i]]$p_entropy = m_entropy
# print(ori_module[[i]]$p_entropy)
ori_module[[i]]$v_entropy = sum(m_entropy[,c(1:num_netw)+1+2*num_netw])/num_netw/length(ori_module[[i]]$members)
}
}else{
init_index_vect = rep(0,num_vertex)
for (i in 1:length(seed_v)){
ori_module[[i]]<- list(name=paste("module", i, sep=" "), entropy=100,members=seed_v[i])
tempmatrix = networks[ori_module[[i]]$members,,]
# print(dim(tempmatrix)) 7737* 2
can_weight = rowSums(tempmatrix)
if( sum(can_weight)==0 ){ next }
can_gene_list = order(can_weight,decreasing=TRUE)
temp.index = 1
while(length(ori_module[[i]]$members)<3){
if(!init_index_vect[can_gene_list[temp.index]] ){
if( can_weight[ can_gene_list[temp.index] ]==0 ){
ori_module[[i]]$members = c(ori_module[[i]]$members,NULL)
break
}else{
ori_module[[i]]$members = c(ori_module[[i]]$members,can_gene_list[temp.index])
init_index_vect[can_gene_list[temp.index]]=1
}
}
temp.index = temp.index +1
}
if(length(ori_module[[i]]$members)<3){next}
ori_module[[i]]$members = sort( ori_module[[i]]$members)
m_entropy=array(0,dim=c(length( ori_module[[i]]$members),1+3*num_netw)) #c1:genesc2-c3: in degree, c4-c5: degree, c6-c7:entropy
m_entropy[,1]=ori_module[[i]]$members #c1: genes
m_entropy[,c(1:num_netw)+1+num_netw]=degree[ori_module[[i]]$members,] #c2-c3: in degree
for(tempj in 1:num_netw){
m_entropy[,tempj+1]=rowSums(networks[ori_module[[i]]$members,ori_module[[i]]$members,tempj]) #c
# compute the entropy for each gene
for(tempj1 in 1:dim(m_entropy)[1]){
temp_indegree = m_entropy[tempj1,tempj+1]
temp_totaldegree = m_entropy[tempj1,tempj+1+num_netw]
tempprob = temp_indegree/temp_totaldegree
if(tempprob==0){
m_entropy[tempj1,tempj+1+2*num_netw] = 2
}else if( tempprob>=1 ){
m_entropy[tempj1,tempj+1+2*num_netw] = 0
}else if( (tempprob>0) & (tempprob<=0.5) ){
m_entropy[tempj1,tempj+1+2*num_netw] = 2 + tempprob*log2(tempprob)+(1-tempprob)*log2(1-tempprob)
}else if( (tempprob>0.5) & (tempprob<1) ){
m_entropy[tempj1,tempj+1+2*num_netw] = -tempprob*log2(tempprob)-(1-tempprob)*log2(1-tempprob)
}
# if (m_entropy[tempj1,tempj+1] == 0){
# m_entropy[tempj1,tempj+1+2*num_netw] = 2
# }else if( (m_entropy[tempj1,tempj+1]==m_entropy[tempj1,tempj+1+num_netw]) ){
# m_entropy[tempj1,tempj+1+2*num_netw] = 0
# }else if ( (2*m_entropy[tempj1,tempj+1] <= m_entropy[tempj1,tempj+1+num_netw]) ){
# tempprob = m_entropy[tempj1,tempj+1]/m_entropy[tempj1,tempj+1+num_netw]
# m_entropy[tempj1,tempj+1+2*num_netw] = 2 + tempprob*log2(tempprob)+(1-tempprob)*log2(1-tempprob)
# }else{
# tempprob = m_entropy[tempj1,tempj+1]/m_entropy[tempj1,tempj+1+num_netw]
# m_entropy[tempj1,tempj+1+2*num_netw] = -tempprob*log2(tempprob)-(1-tempprob)*log2(1-tempprob)
# }
#
} #tempj1
} #tempj
#print(ori_module[[i]]$members)
ori_module[[i]]$p_entropy = m_entropy
# print(ori_module[[i]]$p_entropy)
ori_module[[i]]$v_entropy = sum(m_entropy[,c(1:num_netw)+1+2*num_netw])/num_netw/length(ori_module[[i]]$members)
}
}
#*************************************************************
#***** The candidates should meet two requirements:
#***** 1. should be highly co-expressed
#***** 2. sensitivity to the drug
beta = 0.01
alpha = 0.1 # the threshold for the
min_num_netw = 1
size_cand = 100
beta2 = 0.001
m_size = 200 # the size of the
#**************** expand procedure ***************************
for (i in 1:length(seed_v)){ #for checking
# tempindex indicates the maximum size of a module
print(paste("extracting the module ", i," / ",length(seed_v), sep=""))
if(length(ori_module[[i]]$members)<3){next}
tempindex=0
#temp_loop_count=0
while((length(ori_module[[i]]$members)<m_size)&(tempindex<1)){
#temp_loop_count = temp_loop_count+1 ##### for test
#print(paste("temp_loop_count= ",temp_loop_count,sep="" ) )
#source("./Cand_neibor.R")
neibor = Cand_neibor(networks,ori_module[[i]]$members,beta,min_num_netw,size_cand)
if (length(neibor)==0){
print("no candidates")
tempindex=101
}else{
c_node = c(ori_module[[i]]$members,neibor)
c_matrix = networks[c_node,ori_module[[i]]$members,]
#source("./multip_entropy.R")
candid =multip_entropy(c_matrix,c_node,ori_module[[i]]$p_entropy,degree) #c_node is the union of $members and neibors
#print(candid[[1]])
if ((ori_module[[i]]$v_entropy-candid[[1]])<beta2|is.na(ori_module[[i]]$v_entropy-candid[[1]])){
#print("the entropy is: ")
#print(ori_module[[i]]$v_entropy)
#print(candid[[1]])
#print("the new node")
# print(candid[[3]])
# print("no further improvement")
tempindex=101
}else{
# print("the entropy is: ")
#print(ori_module[[i]]$v_entropy)
#print(candid[[1]])
# print("the new node")
# print(candid[[3]])
ori_module[[i]]$members = c(ori_module[[i]]$members,candid[[3]])
ori_module[[i]]$members = sort( ori_module[[i]]$members)
tempmatrix = c()
tempmatrix = candid[[2]]
ori_module[[i]]$members=sort(tempmatrix[,1])
tempmatrix= tempmatrix[order(tempmatrix[,1]),]
ori_module[[i]]$p_entropy = c()
ori_module[[i]]$p_entropy = tempmatrix
ori_module[[i]]$v_entropy=candid[[1]]
}
}
}
ori_module[[i]]$matrix = networks[sort(ori_module[[i]]$members),sort(ori_module[[i]]$members),]
ori_module[[i]]$entropy = colSums(ori_module[[i]]$p_entropy[,c(1:num_netw)+1+2*num_netw])/length(ori_module[[i]]$members)
# print(ori_module[[i]]$members)
}
#*************************************************************
ori_module
}
|
29304e640c274832d868d2e46d386822968b443f | 17384cd2a9964facca580812216e95b85c446100 | /inst/shinyLDA/server.R | 6b578a19d76e16f49459a02fa9f5304db967c384 | [] | no_license | JavierDeLaHoz/LDAShiny | c7a1b3b0ad458caa8a8bfe60619f2db8835f08c0 | 204cfe108f5107762ff143c7881f037f97114df7 | refs/heads/main | 2023-04-02T17:39:27.204791 | 2021-03-30T21:32:53 | 2021-03-30T21:32:53 | 332,085,726 | 3 | 2 | null | 2021-03-05T10:50:57 | 2021-01-22T23:22:41 | HTML | UTF-8 | R | false | false | 34,947 | r | server.R | require("textmineR")
require("magrittr")
require("highcharter")
require("dplyr")
require("parallel")
require("ldatuning")
require("purrr")
require("topicmodels")
require("stringr")
require("broom")
require("DT")
shinyServer(function(input,output,session) {
options(shiny.maxRequestSize=50000000*1024^2)
output$selectfile <- renderUI({
if(is.null(input$file)) {return()}
list(hr(),
helpText("Select the files for which you need
to see data and summary stats"),
selectInput("Select", "Select",
choices=input$file$name)
)
})
## Summary Stats code ##
# this reactive output contains the summary of the dataset and display the summary in table format
output$summexample <- renderPrint({
if(input$example == FALSE){return()}
dataexample <- system.file("extdata", "scopusJOSS.csv",
package = "LDAShiny")
data_example <- read.csv(dataexample)
summary(data_example)
})
output$summ <- renderPrint({
if(is.null(input$example)){return()}
summary(read.table (input$file$datapath[input$file$name==input$Select],
sep=input$sep,
header = input$header,
stringsAsFactors = input$stringAsFactors))
})
observeEvent(input$example, {
if(input$example == TRUE){
shinyjs::disable("choice")
} else {
shinyjs::enable("choice")
}
})
## MainPanel tabset renderUI code ##
# the following renderUI is used to dynamically g
# enerate the tabsets when the file is loaded.
# Until the file is loaded, app will not show the tabset.
output$tb2 <- renderUI({
if(input$example == FALSE){return()}
else
tabsetPanel(
tabPanel("Statistical summary example",
verbatimTextOutput("summexample")
)
)
})
output$tb <- renderUI({
if(is.null(input$file)){return()}
else tabsetPanel(
tabPanel("Statistical summary ",
verbatimTextOutput("summ")
)
)
})
info <- eventReactive(input$choice, {
# Changes in read.table
f <- read.table(file=input$file$datapath[input$file$name==input$Select],
sep=input$sep,
header = input$header,
stringsAsFactors = input$stringAsFactors)
vars <- names(f)
# Update select input immediately after clicking on the action button.
updateSelectInput(session,
"column1",
"Select id document",
choices = vars)
updateSelectInput(session,
"column2",
"Select document vector",
choices = vars)
updateSelectInput(session,
"column3",
"Select publish year",
choices = vars)
f
})
output$table_display1 <- renderTable({
f <- info()
f <- subset(f,
select = "input$column1",
drop = TRUE) #subsetting takes place here
})
output$table_display2 <- renderTable({
f <- info()
g <- subset(f,
select = "input$column2",
drop = TRUE) #subsetting takes place here
})
observeEvent(input$checkStemming, {
if(input$checkStemming == FALSE){
shinyjs::disable("Stemm")}
else {shinyjs::enable("Stemm")}
})
observe({
if (isTRUE(input$example == TRUE )) {
shinyjs::disable("file")
}
else {shinyjs::enable("file")
}
})
observe({
if (!is.null(input$file)) {
shinyjs::disable("example")
}
else {shinyjs::enable("example")
}
})
observe({
if (is.null(input$file)& input$example == FALSE) {
shinyjs::disable("dtm.update")
}
else {shinyjs::enable("dtm.update")
}
})
z <- reactiveValues(odtm=NULL,
dtmt = NULL,
tf_mat = NULL,
dimen = NULL,
dtmF =NULL,
freq=NULL,
wf=NULL,
year=NULL,
endtime=NULL)
observeEvent(input$dtm.update, {
if( input$example == TRUE){
dataexample <- system.file("extdata",
"scopusJOSS.csv",
package = "LDAShiny")
data_example <- read.csv(dataexample)
filtro <- data.frame(doc_names = data_example$Title,
doc_vec = data_example$Abstract,
year = data_example$Year)
print(dataexample)
}
else {filtro <- tibble::tibble(read.table(file=input$file$datapath[input$file$name==input$Select],
sep=input$sep,
header = input$header,
stringsAsFactors = input$stringAsFactors))
filtro <- dplyr::select(filtro,
doc_names=input$column1,
doc_vec=input$column2,
year=input$column3)
}
z$year <- filtro$year
stp <- unlist(strsplit(input$stopwords,","))
stp <- trimws(stp)
if(input$example == TRUE){
cpus <- 2}
else {
cpus <- parallel::detectCores()
}
ngram <- as.integer(input$ngrams)
Stemm <- trimws(input$Stemm)
odtm <- textmineR::CreateDtm(doc_vec = filtro$doc_vec,
doc_names = filtro$doc_names,
ngram_window = c(1,ngram),
lower = FALSE,
remove_punctuation = FALSE,
remove_numbers = FALSE,
#stem_lemma_function = function(x) SnowballC::wordStem(x, Stemm), ## primero se debe decidir si se hace o no stemming y si se hace debe seleccionarse el idioma
cpus = cpus)
if(input$checkStemming)
{
dtm <- textmineR::CreateDtm(doc_vec = filtro$doc_vec,
doc_names = filtro$doc_names,
ngram_window = c(1,ngram),
stopword_vec = c(stopwords::stopwords(input$Language),
letters,stp),
lower = TRUE,
remove_punctuation = TRUE,
remove_numbers = input$removenumber,
stem_lemma_function = function(x) SnowballC::wordStem(x, Stemm),
cpus = cpus)
} else
{dtm <- textmineR::CreateDtm(doc_vec = filtro$doc_vec,
doc_names = filtro$doc_names,
lower = TRUE,
stopword_vec = c(stopwords::stopwords(input$Language),letters,stp),# Seleccionar el lenguaje
ngram_window = c(1,ngram),
remove_punctuation = TRUE,
remove_numbers = input$removenumber,
#stem_lemma_function = function(x) SnowballC::wordStem(x, Stemm), ## primero se debe decidir si se hace o no stemming y si se hace debe seleccionarse el idioma
cpus = cpus) }
z$dtm <- quanteda::as.dfm(dtm)
CONVERT <- quanteda::convert(z$dtm,
to = "topicmodels")
z$dtmt <- removeSparseTerms(CONVERT,
sparse= input$sparce)
z$dtmF <- chinese.misc::m3m(z$dtmt,
to="dgCMatrix")
Original <- dim(odtm)
Without_Sparsity <- dim(z$dtm)
Final <- dim (z$dtmt)
z$dimen <- rbind(Original,
Final)
colnames (z$dimen) <- c ("document", "term")
z$tf_mat <- textmineR::TermDocFreq(dtm = z$dtmF)
z$freq <- colSums(as.matrix(z$dtmF)) #
z$wf <- tibble::tibble(word=names(z$freq), freq=z$freq)
beepr::beep(2)
})
output$Table_dim <- DT::renderDT({
DT::datatable(data = as.matrix(z$dimen),
options = list(pageLength = 5,
searching = FALSE,
rownames = TRUE))
})
output$data_b <- DT::renderDT({
DT::datatable(data = z$tf_mat, extensions = 'Buttons',
options = list(dom = 'Bfrtip',
buttons = c('pageLength', 'copy', 'csv', 'excel', 'pdf', 'print'),
pagelength = 10,
lengthMenu = list(c(10, 25, 100, -1),
c('10', '25', '100','All')
)
)
)
})
output$plot_gg <- highcharter::renderHighchart({
export
z$wf %>% top_n(input$b, freq) %>%
hchart("column",
hcaes(x = word, y = freq),
color = "lightgray",
borderColor = "black") %>%
hc_add_theme(hc_theme_ggplot2()) %>%
hc_xAxis(title = list(text = "Term")) %>%
hc_yAxis(title = list(text = "Frequency")) %>%
hc_exporting(
enabled = TRUE,
formAttributes = list(target = "_blank"),
buttons = list(contextButton = list(
text = "Export",
theme = list(fill = "transparent"),
menuItems = export)
)
)
})
output$plot_gf<- highcharter::renderHighchart({
export
z$wf %>% top_n(input$c, freq) %>%
hchart( "wordcloud",
hcaes(name = word, weight = freq)) %>%
hc_exporting(
enabled = TRUE,
formAttributes = list(target = "_blank"),
buttons = list(contextButton = list(
text = "Export",
theme = list(fill = "transparent"),
menuItems = export)))
})
#############################number topic###############
observe({
if (isTRUE(input$num1>=input$num2||input$num3>input$num2)) {
shinyjs::disable("Run.model1")
}
else if(isTRUE(input$num5>=input$num4)) {
shinyjs::disable("Run.model1")
}
else if(isTRUE(input$num1 < 2 )) {
shinyjs::disable("Run.model1")
}
else if(is.na(as.numeric(input$num1))) {
shinyjs::disable("Run.model1")
}
else if(is.na(as.numeric(input$num2))) {
shinyjs::disable("Run.model1")
}
else if(is.na(as.numeric(input$num3))) {
shinyjs::disable("Run.model1")
}
else if(is.na(as.numeric(input$num4))) {
shinyjs::disable("Run.model1")
}
else if(is.na(as.numeric(input$num5))) {
shinyjs::disable("Run.model1")
}
else if(is.na(as.numeric(input$num6))) {
shinyjs::disable("Run.model1")
}
else if(is.null(z$dtmF)) {
shinyjs::disable("Run.model1")
}
else {shinyjs::enable("Run.model1")
}
})
output$OthKcoh <- renderText({
stpCohe <- unlist(strsplit(input$OtherKCoherence,","))
stpCohe <- as.numeric(trimws(stpCohe))
if (anyNA(stpCohe)) {
"Invalid input"
}
})
alist <- reactiveValues(coherence_mat=NULL,
end_time=NULL)
observeEvent(input$Run.model1,{
set.seed(1234)
ptm <- proc.time()
stpCohe <- unlist(strsplit(input$OtherKCoherence,","))
stpCohe <- as.numeric(trimws(stpCohe))
seqk <- c(seq(from=input$num1,to=input$num2,by=input$num3),stpCohe)# Candidate number of topics k
iterations <- input$num4 # Parameters control Gibbs sampling
burnin <- input$num5 # Parameters control Gibbs sampling
alpha <- input$num6 # Parameters control
if(input$example == TRUE){
cores <- 2}
else {
cores <- parallel::detectCores()
}
dtm <- z$dtmF
coherence_list <- textmineR::TmParallelApply(X = seqk , FUN = function(k){
m <- textmineR::FitLdaModel(dtm= dtm ,
k = k,
iterations =iterations ,
burnin = burnin,
alpha = alpha,
beta = colSums(dtm) / sum(dtm) * 100,
optimize_alpha = TRUE,
calc_likelihood = TRUE,
calc_coherence = TRUE,
calc_r2 = FALSE,
cpus = cores)
m$k <- k
m
},export= ls(), # c("nih_sample_dtm"), # export only needed for Windows machines
cpus = cores)
alist$coherence_mat <- tibble::tibble(k = sapply(coherence_list, function(x) nrow(x$phi)),
coherence = sapply(coherence_list, function(x) mean(x$coherence)),
stringsAsFactors = FALSE)
beepr::beep(2)
alist$end_time <- proc.time() - ptm
})
output$timeCoherence <- renderPrint({
print( alist$end_time)
})
output$plot_gi <- highcharter::renderHighchart({
export
alist$coherence_mat %>%
hchart("line", hcaes(x = k, y = coherence)) %>%
hc_add_theme(hc_theme_ggplot2())%>%
hc_exporting(
enabled = TRUE,
formAttributes = list(target = "_blank"),
buttons = list(contextButton = list(
text = "Export",
theme = list(fill = "transparent"),
menuItems = export)))
})
observe({
if (isTRUE(input$num7>=input$num8||input$num9>input$num8)) {
shinyjs::disable("Run.model2")
}
else if(is.na(as.numeric(input$num7))) {
shinyjs::disable("Run.model2")
}
else if(isTRUE(input$num7 < 2)) {
shinyjs::disable("Run.model2")
}
else if(is.na(as.numeric(input$num8))) {
shinyjs::disable("Run.model2")
}
else if(is.na(as.numeric(input$num9))) {
shinyjs::disable("Run.model2")
}
else if(is.null(z$dtmt)) {
shinyjs::disable("Run.model2")
}
else {
shinyjs::enable("Run.model2")
}
})
output$OthK4metric <- renderText({
stpCohe <- unlist(strsplit(input$OtherK4metric,","))
stpCohe <- as.numeric(trimws(stpCohe))
if (anyNA(stpCohe)) {
"Invalid input"
}
})
blist <- reactiveValues(fourmetric_mat = NULL,
end_time2 = NULL)
observeEvent(input$Run.model2, {
set.seed(1234)
ptm2 <- proc.time()
#stp2 = unlist(strsplit(input$metric,","))
#stp2 = trimws(stp2)
method <- input$methods
stpfourm <- unlist(strsplit(input$OtherK4metric,","))
stpfourm <- as.numeric (trimws(stpfourm))
seqk <- c(seq(from = input$num7,
to = input$num8,
by = input$num9),stpfourm)
if(input$example == TRUE){
cl <- makeCluster(2,
setup_strategy = "sequential")}
else {
cl <- makeCluster(parallel::detectCores(),
setup_strategy = "sequential")
}
fourmetric_mat <- ldatuning::FindTopicsNumber(
z$dtmt,
topics = seqk, # Select range number of topics
metrics = c("Griffiths2004",
"CaoJuan2009",
"Arun2010",
"Deveaud2014"),
method = method,
control = list(seed = 77),
mc.cores = cl )
blist$fourmetric_mat <- g4metric(fourmetric_mat)
beepr::beep(2)
blist$end_time2 <- proc.time() - ptm2
stopCluster(cl)
})
output$timefourmetric <- renderPrint({
print(blist$end_time2)
})
output$plot_gj <- highcharter::renderHighchart({
export
blist$fourmetric_mat %>%
hchart("line", hcaes(x = topics, y = value, group =variable)) %>%
hc_add_theme(hc_theme_ggplot2())%>%
hc_exporting(
enabled = TRUE,
formAttributes = list(target = "_blank"),
buttons = list(contextButton = list(
text = "Export",
theme = list(fill = "transparent"),
menuItems = export)))
})
observe({
if (isTRUE(input$num13>=input$num14 || input$num15>input$num14)) {
shinyjs::disable("Run.model3")
}
else if(isTRUE(input$num17 > input$num16 ||input$num18 > input$num17)) {
shinyjs::disable("Run.model3")
}
else if(isTRUE(input$num13 < 2 )) {
shinyjs::disable("Run.model3")
}
else if(is.na(as.numeric(input$num13))) {
shinyjs::disable("Run.model3")
}
else if(is.na(as.numeric(input$num14))) {
shinyjs::disable("Run.model3")
}
else if(is.na(as.numeric(input$num15))) {
shinyjs::disable("Run.model3")
}
else if(is.na(as.numeric(input$num16))) {
shinyjs::disable("Run.model3")
}
else if(is.na(as.numeric(input$num17))) {
shinyjs::disable("Run.model3")
}
else if(is.na(as.numeric(input$num17))) {
shinyjs::disable("Run.model3")
}
else if(is.null(z$dtmt)) {
shinyjs::disable("Run.model3")
}
else {
shinyjs::enable("Run.model3")
}
})
output$OthKLL <- renderText({
stpCohe <- unlist(strsplit(input$OtherKLL,","))
stpCohe <- as.numeric(trimws(stpCohe))
if (anyNA(stpCohe)) {
"Invalid input"
}
})
clist <- reactiveValues(best.model = NULL,
end_time3 = NULL)
observeEvent(input$Run.model3,{
set.seed(12345)
ptm3 <- proc.time()
stpLL <- unlist(strsplit(input$OtherKLL,","))
stpLL <- as.numeric (trimws(stpLL))
seqk <- c(seq(from = input$num13,
to = input$num14,
by = input$num15),stpLL)
iter <- input$num16
burnin <- input$num17
thin <- input$num18
# best.model <- lapply(seqk, function(k){LDA(z$dtmt, k, method = "Gibbs",iter =iter,burnin=burnin,thin=thin)})
#best.model<- tibble(as.matrix(lapply(best.model, logLik)))
#clist$best.model <- tibble(topics=seqk, logL=as.numeric(as.matrix(best.model)))
perplex <- seqk %>%
purrr::map(topicmodels::LDA, x =z$dtmt ,
newdata = z$dtmt ,
estimate_theta=FALSE,
iter =iter,
burnin=burnin,
thin= thin)
clist$best.model <- tibble::tibble(Topics = seqk,
Perplexity = map_dbl(perplex , perplexity))
beepr::beep(2)
clist$end_time3 <- proc.time() - ptm3
})
output$timeloglike <- renderPrint({
print(clist$end_time3)
})
output$plot_gk <- highcharter::renderHighchart({
export
Perplex <- clist$best.model
Perplex %>%
hchart("line", hcaes(x=Topics, y = Perplexity)) %>%
hc_add_theme(hc_theme_ggplot2())%>%
hc_exporting(
enabled = TRUE,
formAttributes = list(target = "_blank"),
buttons = list(contextButton = list(
text = "Export",
theme = list(fill = "transparent"),
menuItems = export)))
})
#############################
observe({
if (isTRUE(input$num19>=input$num20 || input$num21>input$num20)) {
shinyjs::disable("Run.model4")
}
else if(isTRUE(input$num23>input$num22 || input$num24>=input$num23)) {
shinyjs::disable("Run.model4")
}
else if(isTRUE(input$num19 < 2 )) {
shinyjs::disable("Run.model4")
}
else if(is.na(as.numeric(input$num19))) {
shinyjs::disable("Run.model4")
}
else if(is.na(as.numeric(input$num20))) {
shinyjs::disable("Run.model4")
}
else if(is.na(as.numeric(input$num21))) {
shinyjs::disable("Run.model4")
}
else if(is.na(as.numeric(input$num22))) {
shinyjs::disable("Run.model4")
}
else if(is.na(as.numeric(input$num23))) {
shinyjs::disable("Run.model4")
}
else if(is.na(as.numeric(input$num24))) {
shinyjs::disable("Run.model4")
}
else if(is.null(z$dtmt)) {
shinyjs::disable("Run.model4")
}
else {
shinyjs::enable("Run.model4")
}
})
output$Okhm <- renderText({
stpCohe <- unlist(strsplit(input$OtherKHM,","))
stpCohe <- as.numeric(trimws(stpCohe))
if (anyNA(stpCohe)) {
"Invalid input"
}
})
dlist <- reactiveValues(hm_many = NULL)
observeEvent(input$Run.model4,{
set.seed(12345)
ptm4 <- proc.time()
stpHM <- unlist(strsplit(input$OtherKHM,","))
stpHM <- as.numeric (trimws(stpHM))
seqk <- c(seq(from = input$num19,
to = input$num20,
by = input$num21),stpHM)
iter <- input$num22
burnin <- input$num23
keep <- input$num24
fitted_many <- lapply(seqk,
function(k)LDA(z$dtmt,
k = k,
method = "Gibbs",
control = list(burnin = burnin,
iter = iter,
keep = keep)
))
# extract logliks from each topic
logLiks_many <- lapply(fitted_many, function(L)L@logLiks[-c(1:(burnin/keep))])
# compute harmonic means
hm_many <- tibble::tibble(as.matrix (sapply(logLiks_many,
function(h) harmonicMean(h)
)
)
)
# inspect
dlist$hm_many <- tibble::tibble(topics=seqk,
logL=as.numeric(as.matrix(hm_many)
)
)
beepr::beep(2)
dlist$end_time4 <- proc.time() - ptm4
})
output$timeHmean<- renderPrint({
print(dlist$end_time4)
})
output$plot_gl <- highcharter::renderHighchart({
export
dlist$hm_many %>%
hchart("line", hcaes(x=topics, y=logL)) %>%
hc_add_theme(hc_theme_ggplot2())%>%
hc_exporting(
enabled = TRUE,
formAttributes = list(target = "_blank"),
buttons = list(contextButton = list(
text = "Export",
theme = list(fill = "transparent"),
menuItems = export)))
})
#############################
observe({
if (isTRUE(input$num27 >= input$num26|| input$num25 < 2)) {
shinyjs::disable("Run.model5")
}
else if(is.null(z$dtmF)) {
shinyjs::disable("Run.model5")
}
else if(is.na(as.numeric(input$num25))) {
shinyjs::disable("Run.model5")
}
else if(is.na(as.numeric(input$num26))) {
shinyjs::disable("Run.model5")
}
else if(is.na(as.numeric(input$num27))) {
shinyjs::disable("Run.model5")
}
else if(is.na(as.numeric(input$num28))) {
shinyjs::disable("Run.model5")
}
else {
shinyjs::enable("Run.model5")
}
})
elist <- reactiveValues(summary= NULL,
tidy_thetha=NULL,
tidy_beta = NULL,
dfCoef=NULL,
model=NULL)
observeEvent(input$Run.model5,{
set.seed(12345)
k <- input$num25
iter <- input$num26
burnin <- input$num27
alpha <- input$num28
if(input$example == TRUE){
cpus <- 2L}
else {
cpus <- parallel::detectCores()
}
elist$model <- textmineR::FitLdaModel(z$dtmF, # parameter
k = k ,# Number of topics k
iterations = iter, # parameter
burnin = burnin, #parameter
alpha = alpha,# parameter
beta = colSums(z$dtmF)/sum(z$dtmF)*100,
optimize_alpha = TRUE, # parameter
calc_likelihood = TRUE,
calc_coherence = TRUE,
calc_r2 = FALSE,
cpus = cpus)
top_terms <- GetTopTerms(phi = elist$model$phi,
M = 10)
prevalence <- colSums(elist$model$theta) / sum(elist$model$theta) * 100
#textmineR has a naive topic labeling tool based on probable bigrams
labels <- LabelTopics(assignments = elist$model$theta > 0.05,
dtm = z$dtmF,
M = input$Labels)
elist$summary <- data.frame(topic = rownames(elist$model$phi),
label = labels,
coherence = round(elist$model$coherence, 3),
prevalence = round(prevalence,3),
top_terms = apply(top_terms, 2, function(x){
paste(x, collapse = ", ")
}),
stringsAsFactors = FALSE)
elist$tidy_thetha <- data.frame(document = rownames(elist$model$theta),
round(elist$model$theta,5),
stringsAsFactors = FALSE) %>%
tidyr::gather(topic, gamma, -document)
elist$tidy_beta <- data.frame(topic = as.integer(str_replace_all(rownames(elist$model$phi),
"t_", "")
),
round(elist$model$phi,5),
stringsAsFactors = FALSE)%>%
tidyr::gather(term, beta, -topic)
elist$thetayear <- data.frame(elist$tidy_thetha,
year = rep(z$year))%>%
group_by(topic,year) %>%
summarise(proportion= mean(gamma))
elist$dfreg <- elist$thetayear %>% group_by(topic) %>%
do(fitreg = lm(proportion ~ year, data = .))
elist$thetayear <- data.frame(elist$thetayear)
elist$dfCoef <- elist$thetayear %>%
nest_by(topic) %>%
#change do() to mutate(), then add list() before your model
# make sure to change data = . to data = data
mutate(fitmodelreg = list(lm(proportion ~ year, data = data))) %>%
summarise(tidy(fitmodelreg))
classifications <- elist$tidy_thetha %>%
dplyr::group_by(topic, document) %>%
dplyr::top_n(1, gamma) %>%
ungroup()
beepr::beep(2)
})
output$sum <- DT::renderDT({
DT::datatable(data = elist$summary, extensions = 'Buttons',
options = list(dom = 'Bfrtip',
buttons = c('pageLength',
'copy',
'csv',
'excel',
'pdf',
'print'),
pagelength = 10,
lengthMenu = list(c(10, 25, 100, -1),
c('10', '25', '100','All')
)
)
)
})
output$summLDA <- DT::renderDT({
model <- elist$model
top_terms <- textmineR::GetTopTerms(phi = model$phi, M = input$Topterm)
prevalence <- colSums(model$theta) / sum(model$theta) * 100
# textmineR has a naive topic labeling tool based on probable bigrams
labels <- LabelTopics(assignments = model$theta > input$assignments,
dtm = z$dtmF,
M = input$Labels)
summary <- data.frame(topic = rownames(model$phi),
label = labels,
coherence = round(model$coherence, 3),
prevalence = round(prevalence,3),
top_terms = apply(top_terms, 2, function(x){
paste(x, collapse = ", ")
}),
stringsAsFactors = FALSE)
DT::datatable(data = summary, extensions = 'Buttons',
options = list(dom = 'Bfrtip',
buttons = c('pageLength',
'copy',
'csv',
'excel',
'pdf',
'print'),
pagelength = 10,
lengthMenu = list(c(10, 25, 100, -1),
c('10', '25', '100','All'))))%>%
formatRound( columns= c("coherence","prevalence"),
digits=5)
})
output$theta <- DT::renderDT({
DT::datatable(data = elist$tidy_thetha ,
extensions = 'Buttons',
filter = 'top',
colnames=c("document","topic", "theta"),
options = list(dom = 'Bfrtip',
buttons = c('pageLength',
'copy',
'csv',
'excel',
'pdf',
'print'),
pagelength = 10,
lengthMenu = list(c(10,100,20000,-1),
c('10', '25', '10000','All')
)
)
)%>% formatRound( columns= c("gamma"),digits=5)
})
output$downloadData <- downloadHandler(
filename = function() {
paste('data-', Sys.Date(), '.csv', sep='')
},
content = function(con) {
write.csv(elist$tidy_thetha, con)
}
)
output$phi <- DT::renderDT({
DT::datatable(data =elist$tidy_beta, extensions = 'Buttons',filter = 'top',
colnames=c("topic", "term", "phi"),
options = list(dom = 'Bfrtip',
buttons = c('pageLength',
'copy', 'csv',
'excel', 'pdf',
'print'),
pagelength = 10,
lengthMenu = list(c(10, 25, 100, -1),
c('10', '25', '100','All'))))%>%
formatRound( columns= c("beta"),digits=5)
})
output$Alloca <- DT::renderDT({
classifications <- elist$tidy_thetha %>%
dplyr::group_by(topic) %>%
dplyr::top_n(input$topnumber, gamma) %>%
ungroup()
DT::datatable(data = classifications,
extensions = 'Buttons',
filter = 'top',
colnames=c("document","topic", "theta"),
options = list(dom = 'Bfrtip',
buttons = c('pageLength',
'copy',
'csv',
'excel',
'pdf',
'print'),
pagelength = 10,
lengthMenu = list(c(10, 25, 100, -1),
c('10', '25', '100','All'))))%>%
formatRound( columns= c("gamma"),digits=5)
})
output$reg <- DT::renderDT({
datareg <- elist$dfCoef
DT::datatable(data = datareg,
extensions = 'Buttons',
options = list(dom = 'Bfrtip',
buttons = c('pageLength',
'copy',
'csv',
'excel',
'pdf',
'print'),
pagelength = 10,
lengthMenu = list(c(10, 25, 100, -1),
c('10',
'25', '100','All'))))%>%
formatRound( columns= c("estimate",
"std.error",
"statistic",
"p.value"),digits=5)
})
output$plot_trend <- highcharter::renderHighchart({
export
elist$thetayear %>%
hchart("line", hcaes(x = year,
y = proportion,
group = as.integer(str_replace_all(topic,"t_", " ")))) %>%
hc_add_theme(hc_theme_ggplot2())%>%
hc_exporting(
enabled = TRUE,
formAttributes = list(target = "_blank"),
buttons = list(contextButton = list(
text = "Export",
theme = list(fill = "transparent"),
menuItems = export)))
})
output$plot_worcloud <- highcharter::renderHighchart({
export
elist$tidy_beta %>% dplyr::filter(topic==input$num29)%>%
dplyr::top_n(input$cloud, beta) %>%
hchart( "wordcloud", hcaes(name = term,
weight = beta)) %>%
hc_exporting(
enabled = TRUE,
formAttributes = list(target = "_blank"),
buttons = list(contextButton = list(
text = "Export",
theme = list(fill = "transparent"),
menuItems = export)))
})
output$plot_heatmap <- highcharter::renderHighchart({
colr <- list( list(0, '#2E86C1'),
list(1, '#FF5733'))
export
elist$thetayear %>%
hchart("heatmap", hcaes(x = year,
y = as.integer(str_replace_all(topic,"t_", " ")) ,
value =proportion)) %>%
hc_colorAxis( stops= colr,
min=min(elist$thetayear$proportion),
max= max(elist$thetayear$proportion)) %>%
hc_yAxis(title = list(text = "Topic"))%>%
hc_exporting(
enabled = TRUE,
formAttributes = list(target = "_blank"),
buttons = list(contextButton = list(
text = "Export",
theme = list(fill = "transparent"),
menuItems = export)))
})
#####################################end number topic
#observe({
# if (input$Stop > 0) stopApp() # stop shiny
#})
})
|
120ff7e019816474a2e014966243405c5b4fb4b1 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/RProtoBuf/examples/type.Rd.R | a9e8bb11d8dc6a86292ebed438cd3d0e512be5ba | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 928 | r | type.Rd.R | library(RProtoBuf)
### Name: type-methods
### Title: Gets the type or the C++ type of a field
### Aliases: type type-methods cpp_type cpp_type-methods TYPE_DOUBLE
### TYPE_FLOAT TYPE_INT64 TYPE_UINT64 TYPE_INT32 TYPE_FIXED64
### TYPE_FIXED32 TYPE_BOOL TYPE_STRING TYPE_GROUP TYPE_MESSAGE TYPE_BYTES
### TYPE_UINT32 TYPE_ENUM TYPE_SFIXED32 TYPE_SFIXED64 TYPE_SINT32
### TYPE_SINT64 CPPTYPE_INT32 CPPTYPE_INT64 CPPTYPE_UINT32 CPPTYPE_UINT64
### CPPTYPE_DOUBLE CPPTYPE_FLOAT CPPTYPE_BOOL CPPTYPE_ENUM CPPTYPE_STRING
### CPPTYPE_MESSAGE
### Keywords: methods
### ** Examples
## Not run:
##D proto.file <- system.file( "proto", "addressbook.proto", package = "RProtoBuf" )
##D Person <- P( "tutorial.Person", file = proto.file )
## End(Not run)
## Don't show:
Person <- P( "tutorial.Person" )
## End(Don't show)
type(Person$id)
type(Person$id, as.string=TRUE)
cpp_type(Person$email)
cpp_type(Person$email, TRUE)
|
c601461fd247d3d00f9ed0a14c814d4688dbbc91 | 5a52d122a5f5867d3a5a5353bf46abbcb81ba2bc | /R/indicators.r | 0128e9c6d20e81f2f89ec97161092114be572976 | [] | no_license | olafmersmann/emoa | af57392db2efae54dce9aab2918628007247b561 | 7bc90c4b4a622b826683c9610b07ee8800ae285e | refs/heads/master | 2016-09-09T23:13:22.751552 | 2014-12-21T15:23:59 | 2014-12-21T15:23:59 | 28,303,541 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,959 | r | indicators.r | ##
## pareto_utilities.r - Operators relating to pareto optimality
##
## Author:
## Olaf Mersmann (OME) <olafm@statistik.tu-dortmund.de>
##
##' Scale point cloud
##'
##' Rescale all points to lie in the box bounded by \code{minval}
##' and \code{maxval}.
##'
##' @param points Matrix containing points, one per column.
##' @param minval Optional lower limits for the new bounding box.
##' @param maxval Optional upper limits for the new bounding box.
##' @return Scaled points.
##'
##' @author Olaf Mersmann \email{olafm@@statistik.tu-dortmund.de}
##' @export
normalize_points <- function(points, minval, maxval) {
if (missing(minval))
minval <- apply(points, 1, min)
if (missing(maxval))
maxval <- apply(points, 1, max)
## FIXME: This is ugly!
(points - minval)/(maxval - minval)
}
##' Binary quality indicators
##'
##' Calculates the quality indicator value of the set of points given in
##' \code{x} with respect to the set given in \code{o}. As with all
##' functions in \code{emoa} that deal with sets of objective values
##' these are stored by column.
##'
##' @param points Matrix of points for which to calculate the indicator
##' value stored one per column.
##' @param o Matrix of points of the reference set.
##' @param ref Reference point, if omitted, the nadir of the point sets
##' is used.
##' @param ideal Ideal point of true Pareto front. If omited the ideal
##' of both point sets is used.
##' @param nadir Nadir of the true Pareto front. If ommited the nadir
##' of both point sets is used.
##' @param lambda Number of weight vectors to use in estimating the
##' utility.
##' @param utility Name of utility function.
##' @return Value of the quality indicator.
##'
##' @author Olaf Mersmann \email{olafm@@statistik.tu-dortmund.de}
##'
##' @references
##' Zitzler, E., Thiele, L., Laumanns, M., Fonseca, C., and
##' Grunert da Fonseca, V (2003): Performance Assessment of
##' Multiobjective Optimizers: An Analysis and Review. IEEE
##' Transactions on Evolutionary Computation, 7(2), 117-132.
##'
##' @export
##' @rdname binary_indicator
hypervolume_indicator <- function(points, o, ref) {
if (missing(ref))
ref <- pmax(apply(points, 1, max), apply(o, 1, max))
hvx <- dominated_hypervolume(points, ref)
hvo <- dominated_hypervolume(o, ref)
return(hvo - hvx)
}
##' @export
##' @rdname binary_indicator
epsilon_indicator <- function(points, o) {
stopifnot(is.matrix(points), is.numeric(points),
is.matrix(o), is.numeric(o))
if (any(points < 0) || any(o < 0))
stop("The epsilon indicator is only defined for strictly positive objective values.")
.Call(do_eps_ind, points, o)
}
##
## R indicators:
##
r_indicator <- function(points, o, ideal, nadir, lambda, utility, summary) {
## (OME): Order of utility functions is important. It translates
## into the method number in the C code!
utility.functions <- c("weighted sum", "Tchebycheff", "Augmented Tchebycheff")
utility <- match.arg(utility, utility.functions)
method <- which(utility == utility.functions)
if (missing(ideal))
ideal <- pmin(apply(points, 1, min), apply(o, 1, min))
if (missing(nadir))
nadir <- pmax(apply(points, 1, max), apply(o, 1, max))
dim <- nrow(points)
if (missing(lambda)) {
lambda <- if (dim == 2) { 500 }
else if (dim == 3) { 30 }
else if (dim == 4) { 12 }
else if (dim == 5) { 8 }
else { 3 }
}
ix <- .Call(do_r_ind, points, ideal, nadir,
as.integer(lambda), as.integer(method))
io <- .Call(do_r_ind, o, ideal, nadir,
as.integer(lambda), as.integer(method))
return(summary(ix, io))
}
##' @export
##' @rdname binary_indicator
r1_indicator <- function(points, o, ideal, nadir, lambda, utility="Tchebycheff")
r_indicator(points, o, ideal, nadir, lambda, utility,
function(ua, ur) mean(ua > ur) + mean(ua == ur)/2)
##' @export
##' @rdname binary_indicator
r2_indicator <- function(points, o, ideal, nadir, lambda, utility="Tchebycheff")
r_indicator(points, o, ideal, nadir, lambda, utility,
function(ua, ur) mean(ur - ua))
##' @export
##' @rdname binary_indicator
r3_indicator <- function(points, o, ideal, nadir, lambda, utility="Tchebycheff")
r_indicator(points, o, ideal, nadir, lambda, utility,
function(ua, ur) mean((ur - ua)/ur))
##' Unary R2 indicator
##'
##' @param points Matrix of points for which to calculate the indicator
##' value stored one per column.
##' @param weights Matrix of weight vectors stored one per column.
##' @param ideal Ideal point of true Pareto front. If omited the ideal
##' of \code{points} is used.
##' @return Value of unary R2 indicator.
##'
##' @export
##' @author Olaf Mersmann \email{olafm@@p-value.net}
unary_r2_indicator <- function(points, weights, ideal) {
if (missing(ideal))
ideal <- apply(points, 1, min)
.Call(do_unary_r2_ind, points, weights, ideal)
}
|
3dbe198c95e57ca66b4e93e48e90d5e8448fbdfa | da580939f12f4a51ce420ab0e82565375933c614 | /man/make_scale.Rd | c05cb8adabbf984b29454afcfd87785b872bdf3e | [] | no_license | LukasWallrich/rNuggets | c2c0c3f9d638add7240b6d3d83a8f9450c75f858 | 0a15ae9c9fc163687eb9f0ad25f899ee370eb4d6 | refs/heads/master | 2022-09-01T16:42:30.854303 | 2022-08-19T21:53:59 | 2022-08-19T21:53:59 | 247,954,955 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,020 | rd | make_scale.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_scales.R
\name{make_scale}
\alias{make_scale}
\title{Creates a scale by calculating item mean and returns descriptives}
\usage{
make_scale(
df,
scale_items,
scale_name,
reverse = c("auto", "none", "spec"),
reverse_items = NULL,
two_items_reliability = c("spearman_brown", "cron_alpha", "r"),
r_key = NULL,
print_hist = TRUE,
print_desc = TRUE,
return_list = FALSE
)
}
\arguments{
\item{df}{A dataframe}
\item{scale_items}{Character vector with names of scale items (variables in df)}
\item{scale_name}{Name of the scale}
\item{reverse}{Should scale items be reverse coded? One of "auto" - items are
reversed if that contributes to scale consistency, "none" - no items reversed,
or "spec" - items specific in \code{reverse_items} are reversed.}
\item{reverse_items}{Character vector with names of scale items to be reversed
(must be subset of scale_items)}
\item{two_items_reliability}{How should the reliability of two-item scales be
reported? "spearman_brown" is the recommended default, but "cronbachs_alpha"
and Pearson's "r" are also supported.}
\item{r_key}{(optional) Numeric. Set to the possible maximum value of the scale
if the whole scale should be reversed, or to -1 to reverse the scale based on
the observed maximum.}
\item{print_hist}{Logical. Should histograms for items and resulting scale be printed?}
\item{print_desc}{Logical. Should descriptives for scales be printed?}
\item{return_list}{Logical. Should only scale values be returned, or descriptives as well?}
}
\value{
Depends on \code{return_list} argument. Either just the scale values,
or a list of scale values and descriptives.
}
\description{
This function creates a scale by calculating the mean of a set of items,
and prints and returns descriptives that allow to assess internal consistency
and spread. It is primarily based on the \code{psych::alpha} function, with
more parsimonious output and some added functionality.
}
|
1b3dc1e11ea3e8344979f7d231bc5aff57619027 | 4e8820873e4c66cd063998c8f12a7d33e6c7c1ec | /chinese_officials.R | f2a748113bd07248430ed2370326f64554d68fac | [
"MIT"
] | permissive | vfulco/ChinaVitae-Scraper | 7328a5f6d6f3d00e41bd33abb07f7269fee4d2c7 | 1730761f1861a8ca59ac63dc65e48a27d2ea1c30 | refs/heads/master | 2020-03-22T11:56:03.662288 | 2015-11-09T01:34:18 | 2015-11-09T01:34:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,392 | r | chinese_officials.R | library(XML)
library(stringr)
getBio <- function(url)
{
# This function takes the URL for an official's page and returns the appropriate table.
#
# Args:
# url: The URL for an official. Example: "http://www.chinavitae.com/biography/Shen_Weichen/career"
#
# Returns:
# A dataframe of the official's professional history.
page <- htmlParse(url)
name <- xpathSApply(page, "//*/div[@class='bioName']", xmlValue)
# Get official's name
chinese.name <- str_extract(name, "\\s+[^ ]+$")
chinese.name <- gsub("^ ", "", chinese.name)
english.name <- gsub("\\s+[^ ]+$", "", name)
english.name <- gsub("\\s+$", "", english.name)
# Get official's biography
bio <- xpathSApply(page, "//*/div[@class='bioDetails']", xmlValue)
birth.date <- gsub("^[^ ]+\\s", "", bio[1])
birth.place <- gsub("^[^ ]+\\s", "", bio[2])
# Get history
tabs <- readHTMLTable(page, header=F)
history <- tabs[[1]]
history <- cleanHistory(history)
if(nrow(history)<1)
history <- cbind(start.date=NA,end.date=NA,position=NA,institution=NA,location=NA)
return.df <- data.frame(chinese.name, english.name, birth.date, birth.place, history)
return(return.df)
}
cleanHistory <- function(history.df)
{
# Cleans an official's history data frame.
#
# Args:
# history.df: A dataframe of official's history.
# Returns:
# A cleaned dataframe of official's history.
start.date <- str_extract(history.df[,1], "^[[:digit:]]+")
end.date <- str_extract(history.df[,1], "[[:digit:]]+$")
history.df[,2] <- gsub("\\(|\\)", "", history.df[,2])
position <- str_extract(history.df[,2], "^[^,]+")
location <- str_extract(history.df[,2], "\\s{3}.+$")
temp <- gsub(" ","~~",history.df[,2])
institution <- str_extract(temp, ", [^[~~]]+")
institution <- gsub("^, ", "", institution)
return.df <- data.frame(start.date, end.date, position, institution, location)
return(return.df)
}
getOfficialsList <- function(url)
{
# Get's a list of officials' names (and links) from the library page.
#
# Args:
# url: The URL of a "Browse by Name" page from chinavitae.com.
#
# Returns:
# A vectory of career URL's to scrape for officials' bios.
page <- htmlParse(url)
links <- str_extract_all(toString.XMLNode(page), "biography/[^ ]+")[[1]]
links <- gsub("[[:punct:]]*$","",links)
links <- paste("http://www.chinavitae.com/",links,"/career",sep="")
return(links)
}
# Create a base URL, then all 26 letters, then paste them together to get all 26 library pages.
base.url <- "http://www.chinavitae.com/biography_browse.php?l="
page.letters <- letters[1:26]
library.urls <- paste(base.url, page.letters, sep="")
# This will be the final data frame we produce.
official.df <- list()
failure.list <- NULL
# Loop through all URLs and get officials' information.
for(uu in library.urls)
{
official.list <- getOfficialsList(uu)
for(oo in official.list)
{
cat("\r",oo," ")
flush.console()
official.bio <- NULL
try(official.bio <- getBio(oo))
if(is.null(official.bio))
failure.list <- c(failure.list, oo)
official.df <- c(official.df, list(official.bio))
Sys.sleep(runif(1,0.5,2))
}
}
official.df <- do.call(rbind,official.df)
write.csv(official.df,"chinese_officials.csv",row.names=F)
write.csv(failure.list,"failures.csv",row.names=F) |
e08eaebec483e74336ccec114585018fa6f83a71 | 66028645bc7824ff6ab202c39f94d900e56deca3 | /R/estimateGLMTagwiseDisp.R | ec677cddaa277ba55f546b764d76e1196ad79bdd | [] | no_license | genome-vendor/r-bioc-edger | 731345805714d2c0f722f8815e9ac8e24012e781 | 978f5b818249e52f2be9f54af71e2acd54abf443 | refs/heads/master | 2021-05-11T08:26:38.615419 | 2013-12-02T19:26:29 | 2013-12-02T19:26:29 | 118,052,434 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,169 | r | estimateGLMTagwiseDisp.R | # Created March 2011. Last modified 13 March 2013.
estimateGLMTagwiseDisp <- function(y, ...)
UseMethod("estimateGLMTagwiseDisp")
estimateGLMTagwiseDisp.DGEList <- function(y, design=NULL, offset=NULL, dispersion=NULL, prior.df=10, trend=!is.null(y$trended.dispersion), span=NULL, AveLogCPM=NULL, ...)
{
# If provided as arguments, offset and AveLogCPM over-rule the values stored in y
if(!is.null(AveLogCPM)) y$AveLogCPM <- AveLogCPM
if(is.null(y$AveLogCPM)) y$AveLogCPM <- aveLogCPM(y)
if(!is.null(offset)) y$offset <- expandAsMatrix(offset,dim(y))
# Find appropriate dispersion
if(trend) {
if(is.null(dispersion)) dispersion <- y$trended.dispersion
if(is.null(dispersion)) stop("No trended.dispersion found in data object. Run estimateGLMTrendedDisp first.")
} else {
if(is.null(dispersion)) dispersion <- y$common.dispersion
if(is.null(dispersion)) stop("No common.dispersion found in data object. Run estimateGLMCommonDisp first.")
}
d <- estimateGLMTagwiseDisp(y=y$counts, design=design, offset=getOffset(y), dispersion=dispersion, trend=trend, prior.df=prior.df, AveLogCPM=y$AveLogCPM, ...)
y$prior.df <- prior.df
y$span <- d$span
y$tagwise.dispersion <- d$tagwise.dispersion
y
}
estimateGLMTagwiseDisp.default <- function(y, design=NULL, offset=NULL, dispersion, prior.df=10, trend=TRUE, span=NULL, AveLogCPM=NULL, ...)
{
# Check y
y <- as.matrix(y)
ntags <- nrow(y)
if(ntags==0) return(numeric(0))
nlibs <- ncol(y)
# Check design
if(is.null(design)) {
design <- matrix(1,ncol(y),1)
rownames(design) <- colnames(y)
colnames(design) <- "Intercept"
} else {
design <- as.matrix(design)
}
if(ncol(design) >= ncol(y)) {
warning("No residual df: setting dispersion to NA")
return(rep(NA,ntags))
}
# Check span
if(is.null(span)) if(ntags>10) span <- (10/ntags)^0.23 else span <- 1
# Check AveLogCPM
if(is.null(AveLogCPM)) AveLogCPM <- aveLogCPM(y,lib.size=exp(offset))
# Call Cox-Reid grid method
tagwise.dispersion <- dispCoxReidInterpolateTagwise(y, design, offset=offset, dispersion, trend=trend, prior.df=prior.df, span=span, AveLogCPM=AveLogCPM, ...)
list(tagwise.dispersion=tagwise.dispersion,span=span)
}
|
339fe6f15293b06f1f62023a45e85c414de4cd1b | 5732bc1e5004cf4cb69e87e96a821dee881952da | /script_project.R | 6d0e48a2f183f29c87619dfc09dbd92d52404fd9 | [] | no_license | akourm910e/shinyproject-1 | 52925dc6b094ec4aae6111285e523b6936b454bf | 1266df89d098f7c61d576fefa7cb91f163892dc3 | refs/heads/master | 2021-07-13T17:52:47.071281 | 2017-10-19T14:11:59 | 2017-10-19T14:11:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,208 | r | script_project.R | setwd("~/Bureau/m2/s3/stat/project")
######################################
# Imporation des données : paramètres mesurés selon mois et selon année
######################################
mois<-read.csv("bdd_mois_somlit.csv", header = T, dec = ".")
annee<-read.csv("bdd_annee_somlit.csv", header = T, dec = ".")
#############
# Transformation des paramètres année et mois en facteur
#############
# annee$Annee<-as.factor(annee$Annee)
# mois$Annee<-as.factor(mois$Annee); mois$Mois<-as.factor(mois$Mois)
# annee$Annee<-as.numeric(annee$Annee)
# mois$Annee<-as.numeric(mois$Annee); mois$Mois<-as.numeric(mois$Mois)
#############
# Analyse multivariée
#############
str(annee)
summary(annee)
# exemple d'ACP pour Antioche pour paramètre pH et température
# choix des paramètres(select) et de l'année/site
data<-subset(annee, NOM_SITE=="Antioche", select = c("pH","Temperature", "Salinite", "Oxygene"))
#nb_na<-sum(is.na(annee))
#c("Il y a", nb_na,"NA")
# calcul moyenne
library(FactoMineR)
ACP<-PCA(scale(data), scale.unit = TRUE )
round(ACP$eig,2)
barplot(ACP$eig[,1], main="Ebouli des valeurs propres", xlab = "Composantes", ylab="Valeurs propres")
100/ncol(data) #contrib min
round(ACP$var$contrib[,1:2],2 )
round (ACP$var$cos2[,1] + ACP$var$cos2[,2],2) # Somme des cos2 des variables pour deux axes gardés
coordP<-round(ACP$var$coord[,1:2],2);coordP #Coordonnées sur le cercle de corrélation
coordS<-round(ACP$ind$coord[,1:2],2);coordS #Coordonnées des stations
par(mfrow=c(1,2))
plot(ACP,choix="var",axes= c(1,2))
plot(ACP,choix="ind",axes= c(1,2))
library(ade4)
s.class(ACP$ind$coord[,c(1,2)] , fac=annee$NOM_SITE, col=c(1:20) )
s.class(ACP$ind$coord[,c(1,2)] , fac=annee$Annee, col=c(1:20) )
library(lattice)
xyplot(Temperature~Annee, groups = NOM_SITE, data=annee, main="Température~Année", xlab="Années", ylab="Temps", col=c(1:4),pch=c(16:20))
plot(Temperature~Annee, annee, type = "l", main = "Température selon Année")
with(subset(annee, NOM_SITE=="Eyrac"), plot(Annee, Temperature, type = "l", points(Temperature~Annee, col = "green")))
with(subset(annee, NOM_SITE=="Antioche"), plot(Annee, Temperature, type= "l", points(Temperature~Annee, col = "blue")))
?plot |
d60bc7129bbd2cfcc0b10a27b765b32640f49e9e | 5f621b6457095f50b61ec088172bfeafb027942e | /Chapter 13/Chapter 13 Exercises 4.R | 98fa7f33b0d2df5bdee16da97c1ccc7e8357760a | [] | no_license | jpfonseca84/BookR | d1cabc122afdbac74cfe843e58cba65cae9a51a6 | 18cb73cb1008e65642099db837269075998e6102 | refs/heads/master | 2022-05-02T11:01:37.269747 | 2017-06-05T01:04:36 | 2017-06-05T01:04:36 | 82,219,136 | 16 | 16 | null | 2022-04-22T19:27:04 | 2017-02-16T19:42:55 | R | UTF-8 | R | false | false | 1,704 | r | Chapter 13 Exercises 4.R | #a ----
weight<-c(55,85,75,42,93,63,58,75,89,67)
height<-c(161,185,174,154,188,178,170,167,181,178)
Sex<-c("f","m","m","f","m","m","f","m","m","f")
cor(weight,height)
#b ----
mtcars[1:5,]
#i
?mtcars
#ii
thecor<-cor(mtcars[,4],mtcars[,7])
plot(mtcars[,4],mtcars[,7],xlab="Horsepower",ylab="1/4 mile time")
text(300,20,labels=c("correlation is\n\n", round(thecor,2)))
#iii
tranfac<-factor(mtcars[,9],labels=c("auto","manual"))
#iv
theplot<-qplot(mtcars[,4],mtcars[,7],
main="The Plot",
xlab="Horsepower",
ylab="1/4 mile time",
color=tranfac,
shape=tranfac)
#v
autoflag<-mtcars[,9]==0
manualcor<-round(cor(mtcars[,4][autoflag],mtcars[,7][autoflag]),4)
autocor<-round(cor(mtcars[,4][!autoflag],mtcars[,7][!autoflag]),4)
#Separeted by transmission, the negative correlation gets stronger
#c ----
#i
sunchicks<-chickwts$weight[chickwts$feed == "sunflower"]
plot(
x = sunchicks,
y = rep(0, length(sunchicks)),
xlab = "weight",
xlim=c(min(sunchicks),
max(sunchicks)),
ylab = "sunflower chick weights",
yaxt = "n",
bty = "n",
cex.axis=1.5,
cex.lab=1.5)
abline(h=0,lty=2)
#ii
sd(sunchicks)
#[1] 48.83638
IQR(sunchicks)
#[1] 27.5
#iii
sunchicks2<-sunchicks[-6]
plot(
x = sunchicks2,
y = rep(0, length(sunchicks2)),
xlab = "weight",
xlim=c(min(sunchicks2),
max(sunchicks2)),
ylab = "",
yaxt = "n",
bty = "n",
cex.axis=1.5,
cex.lab=1.5)
abline(h=0,lty=2)
sd(sunchicks2)
#[1] 38.31473
IQR(sunchicks2)
#[1] 21.5
|
e538c4f3449ffd7f82a29abb37e8f4c7c2a5b9fa | d7bf726ad2e6f242437252a355af2df03bec248b | /Common_species_list.R | cba2fc60704d585f4f6c2129b5430d94ae4b90dc | [] | no_license | Ramalh/Comparing-identification-methods | e706e381080dfc89be29409e31d7160875e4c65c | 389ceafd0d516cbb03e04a9708bf7207fb1db6e8 | refs/heads/main | 2023-05-13T10:31:00.106436 | 2021-06-09T18:08:58 | 2021-06-09T18:08:58 | 373,818,619 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,197 | r | Common_species_list.R | library(readr)
library(ape)
names <- read_delim("../names.dmp", "|\t", escape_double = FALSE,
trim_ws = TRUE, col_names = c("taxid", "txt", "X3", "X4", "X5"),
col_types = cols_only("taxid" = "c", "txt" = "c"))
names$txt <- gsub(" ", "_", names$txt)
asmbl <- read_delim("../assembly_summary_refseq.txt", "\t", escape_double = FALSE, col_names = T
, col_types = cols_only("assembly_accession" = "c", "refseq_category" = "c",
"taxid" = "c", "species_taxid" = "c", "organism_name" = "c", "gbrs_paired_asm" = "c",
"ftp_path" = "c"), trim_ws = TRUE)
category <- read_delim("../categories.dmp", "\t", escape_double = F, trim_ws = T, col_names = F, col_types = cols_only(
"X1" = "c", "X2" = "c", "X3" = "c"
))
asmbl <- asmbl[asmbl$taxid %in% category$X2, ]
#Assigning all species names to a file which will be uploaded to Timetree.org to query
tree_names <- unique(asmbl$organism_name)
write_csv(tree_names, file = "../tree_names.txt", row.names = F, quote = F)
#the file which downloaded from Timetree.org after commiting the query
tree <- read.tree("../tree_names.nwk")
taxid <- unique(c(asmbl$species_taxid, asmbl$taxid))
names_in_ours <- names$taxid %in% asmbl$species_taxid
names_in_tree <- names$txt %in% tree$tip.label
keep <- subset(names, names_in_ours & names_in_tree)
t_name <- keep$txt
names(t_name) <- keep$taxid
asmbl <- subset(asmbl, taxid %in% keep$taxid | species_taxid %in% keep$taxid)
asmbl$sname <- t_name[asmbl$species_taxid]
tree_in_ours <- tree$tip.label %in% asmbl$sname
tt <- drop.tip(tree, tree$tip.label[!tree_in_ours])
dist <- cophenetic.phylo(tt)
asmbl<-asmbl[match(unique(asmbl$species_taxid), asmbl$species_taxid),]
write.table(asmbl, file = "../All_asmbl.txt", sep = "\t", row.names = F, quote = F)
#Changing matrix col and row names to GCF numbers
colnames(dist) <- asmbl$assembly_accession[match(colnames(dist), asmbl$sname)]
rownames(dist) <- asmbl$assembly_accession[match(rownames(dist), asmbl$sname)]
save(dist, file = "../timetree-dist.Rdata")
|
bfadc2c7cdab5a983d3d4d2523aa22969d7ba111 | bb1f0125cce89c1049857181e94a8efce2f39f98 | /data_access/GBIF.R | fb6b283eec10ffcf998770442233ac902faae9fc | [
"MIT"
] | permissive | helixcn/spatial_bioinformatics | d2f74a78be1faabe3511ec941a5352a647815046 | 2c4ad5094c4d014761c4011a18d57e2cc1e8a3ee | refs/heads/master | 2022-01-10T11:07:18.806929 | 2018-09-10T17:51:09 | 2018-09-10T17:51:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,618 | r | GBIF.R | ### Downloading occurrence data from GBIF and plotting on a map ###
install.packages('dismo')
install.packages('rgdal')
install.packages('GISTools')
library(dismo)
library(rgdal)
library(maps)
## Go to www.naturalearthdata.com > Get the Data > Medium Scale Data; Cultural > Download countries.
# then load the shapefile back into R.
continents <- readOGR(dsn = "/home/pjg/GIS/shpData", layer = "ne_50m_admin_0_countries")
# Subset the SpatialPolygonsDataset by North America. Try plotting this and see how it looks.
N.America<-continents[continents@data$CONTINENT=="North America",]
# crop North America by a rough extent of the area of interest.
Amb.opa<-crop(N.America, extent(c(-100, -66, 23, 46)))
# Plot this extent of the map. Use any color you like.
plot(Amb.opa, col='blue')
# Query GBIF for data associated with species name
MarbSalam<- gbif(genus = "Ambystoma", species = "opacum", download = T)
# Look at the first 5 rows of MarbSalam. Get only the columns that have the species name, latitude and longitude.
Locs<-na.omit(data.frame(cbind(MarbSalam$species), MarbSalam$lon, MarbSalam$lat))
# Rename the column names of the Locs data.frame
colnames(Locs)<-c("SPECIES","LONGITUDE","LATITUDE")
# Put these points on the map
points(Locs[,2:3], col='red', pch=16)
# Add a legend
legend("bottomright", col='red', pch=16, legend = 'Ambystoma opacum')
# Add a scale
map.scale(x = -98, y = 23, relwidth=0.1, ratio = T)
# load GISTools library. This will mask the map.scale function from the "maps" package. Do this step last.
# Add north arrow.
library(GISTools)
north.arrow(xb=-67, yb = 30, len=0.5, lab="N")
|
ad3afdffa44dd89fb4cdf65c70ed28cfc609f5b7 | aec81c8d4ec899d48791fb2f52607842a3a7bca0 | /tests/testthat/test-rcmd.R | 8de39f0da1ebdad23ceed5b894e36ef6cea8c85a | [
"MIT"
] | permissive | silvavelosa/callr | e59b79d4db1fa0279268ee62b672865cbdda9483 | 998c862249311e31870ca6cbe2420586360b687a | refs/heads/master | 2021-01-20T15:54:46.299837 | 2017-05-09T23:52:28 | 2017-05-09T23:52:28 | 90,802,047 | 0 | 0 | null | 2017-05-09T23:52:18 | 2017-05-09T23:52:18 | null | UTF-8 | R | false | false | 1,286 | r | test-rcmd.R |
context("rcmd")
test_that("rcmd works", {
expect_equal(rcmd("config", "CC")$status, 0)
expect_match(rcmd("config", "CC")$stdout, ".")
})
test_that("rcmd show works", {
expect_output(rcmd("config", "CC", show = TRUE), ".")
})
test_that("rcmd echo works", {
expect_output(rcmd("config", "CC", echo = TRUE), "config\\s+CC")
})
test_that("rcmd on windows", {
wbin <- NULL
wargs <- NULL
with_mock(
`callr::os_platform` = function() "windows",
`callr::run_r` = function(bin, args, ...) {
wbin <<- bin; wargs <<- args
},
rcmd("config", "CC")
)
expect_match(wbin, "Rcmd.exe")
expect_equal(wargs, c("config", "CC"))
})
test_that("rcmd_safe", {
expect_equal(rcmd_safe("config", "CC")$status, 0)
})
test_that("wd argument", {
tmp <- tempfile(fileext = ".R")
tmpout <- paste0(tmp, "out")
cat("print(getwd())", file = tmp)
mywd <- getwd()
rcmd("BATCH", c(tmp, tmpout), wd = tempdir())
expect_equal(mywd, getwd())
expect_match(
paste(readLines(tmpout), collapse = "\n"),
basename(tempdir())
)
})
test_that("fail_on_status", {
rand <- basename(tempfile())
expect_error(rcmd("BATCH", rand, fail_on_status = TRUE))
expect_silent(out <- rcmd("BATCH", rand, fail_on_status = FALSE))
expect_true(out$status != 0)
})
|
a7ee09ee2b6b5d5903f061f4685b6d11569ca10a | d1d622b0495b46693edce231af5d09e047f2df54 | /r-para-sqlite.R | 433e021432e75523e4f788adb88ace98df58d255 | [
"MIT"
] | permissive | jfaganUK/RParallelSQLite | 0390dcdcd06a9b31c0c784edc88ec09bfee95f92 | 2346a01eda56a560206d27616ab7fcbf0edefda6 | refs/heads/master | 2020-05-30T13:07:18.738742 | 2014-10-13T13:35:35 | 2014-10-13T13:35:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 951 | r | r-para-sqlite.R | ###############################################################################
### Using R and SQLite to process data using multiple cores from a database and
### save it back into a different database
### Jesse Fagan
### October 12, 2014
### Clear the workspace
rm(list=ls())
gc()
### Libraries
library(data.table)
library(RSQLite)
library(parallel)
library(rbenchmark)
source('r-para-sqlite_functions.R')
### Functions #################################################################
con_data <- dbConnect(SQLite(), 'somedata.db')
con_result <- dbConnect(SQLite(), 'someresults.db')
n <- 1000
grps <- 1326
createDummyDB(con_data, n = n, grps = grps)
runAllModels()
runAllModelsMC(ncores = 4)
benchmark(runAllModels(), runAllModelsMC(ncores = 2), runAllModelsMC(ncores=6), runAllModelsMC(ncores=6, block.size = 500), runAllModelsMCDT(),
order = 'relative', replications = 1)
dbDisconnect(con_data)
dbDisconnect(con_result)
|
1611d49bc8d1724d55c6de955f5098118bdd0cef | 40509fc494148bc2e7ddfe89884146199b308e53 | /R/wideGLRIData.R | 788347e949e36f022988345861c5110a144706c6 | [
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | srcorsi-USGS/GLRItcl | f85889cff82c98d5597f78c65cc0eeca17859405 | c4250bb93f680346fa90f00abd37a36c9ca78a1c | refs/heads/master | 2020-02-26T13:46:53.490898 | 2013-12-03T04:31:18 | 2013-12-03T04:31:18 | 14,752,688 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,358 | r | wideGLRIData.R | #' Convert long GLRI dataframe to wide
#'
#' Filter the filtered data into a wide format.
#'
#' @param filteredData data.frame
#' @keywords filter
#' @return DF dataframe
#' @export
#' @examples
#' genericCensoringValue <- function(qualifier,value, detectionLimit){
#' valueToUse <- ifelse("<" == qualifier, detectionLimit, value)
#' return(valueToUse)
#' }
#' filteredData <- filterGLRIData(QWPortalGLRI,genericCensoringValue)
#' wideGLRIData(filteredData)
wideGLRIData <- function(filteredData){
colNames <- colnames(filteredData)
index <- which(colNames != "tz" & colNames != "ActivityStartDateCurrentLocal" & colNames != "ActivityEndDateCurrentLocal" & colNames != "ActivityStartDateUTC" & colNames != "ActivityEndDateUTC" & colNames != "ActivityEndDateGiven")
filteredDataSub <- filteredData[,index]
data <- reshape(filteredDataSub, idvar=c("ActivityStartDateGiven","site","HydrologicEvent","HydrologicCondition"),timevar = "USGSPCode", direction="wide",sep="_")
filteredPcode1 <- filteredData[filteredData$USGSPCode == filteredData$USGSPCode[2],]
endDate <- setNames(filteredPcode1$ActivityEndDateGiven, filteredPcode1$ActivityStartDateGiven)
data$ActivityEndDateGiven <- endDate[as.character(data$ActivityStartDateGiven)]
row.names(data) <- NULL
data <- data[,c(1:2,ncol(data),3:(ncol(data)-1))]
return(data)
}
|
cc1bcbe344a908976981341ec722cfb372a064fd | ecfc9de04a77da8d3812307f1c07185936dacd64 | /code_boxes/Box 5.4.R | 3c209bc1562d2b4544706b5c42761cb8e3269629 | [] | no_license | finleya/GFS | c48661e4029101de5ef316e8c2c47c93b441ada0 | 5329a4e71309f98c50178a709e3a861b0a73d6e6 | refs/heads/master | 2023-01-22T03:37:24.868432 | 2020-12-05T19:20:16 | 2020-12-05T19:20:16 | 269,411,442 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,665 | r | Box 5.4.R | rm(list=ls())
#
# WAIC and LOO for exponential and Weibul models using
# fire scar data
# Be sure to set working directory!!
#
setwd(" ")
#
# The package loo is used to compute LOO and WAIC
#
library("loo")
# fire scar data
y<-c(
2, 4, 4, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 8, 8, 8,
8, 9, 9, 9, 9, 9, 9, 9, 10, 11, 11, 12, 12, 13, 13,
13, 13, 13, 14, 14, 14, 14, 15, 16, 16, 17, 19, 20,
21, 24, 25, 25, 30, 30, 31, 31, 31, 31, 31, 31, 33,
33, 34, 36, 37, 39, 41, 44, 45, 47, 48, 51, 52, 52,
53, 53, 53, 53, 53, 57, 60, 62, 76, 77, 164)
Nobs <- length(y)
# read joint posterior samples
expon <- read.table("exp.out",header=FALSE,row.names = NULL)
weib <- read.table("Weibull.out",header=FALSE,row.names = NULL)
k <- 10000 # posterior sample size
# intitialize parameter matrices and vectors
lambda <- rep(0,k)
nu <- rep(0,k)
gamma <- rep(0,k)
# copy posterior samples into appropriate matrices and vectors
lambda[1:k] <- expon[1:k,2]
gamma[1:k] <- weib[1:k,2]
nu[1:k] <- weib[((k+1):(2*k)),2]
rm(expon,weib)
#
# intilalize log-likelihood matrices
#
log_lik_e <- matrix(0,nrow=k,ncol=Nobs)
log_lik_w <- matrix(0,nrow=k,ncol=Nobs)
# Compute loglikelihood for each tree and each set of
# parameters in joint poterior sample
for (i in 1:k){
# parameters have index i
for (j in 1:Nobs){
# observations have index j
log_lik_e[i,j] <- log(lambda[i])-(y[j]*lambda[i])
log_lik_w[i,j] <- log(nu[i])+log(gamma[i])+((nu[i]-1)*log(y[j])) -
(gamma[i]*(y[j]^nu[i]))
}
}
#
# Get LOO and WAIC from loo
#
LOO_e <- loo(log_lik_e)
WAIC_e <- waic(log_lik_e)
LOO_w <- loo(log_lik_w)
WAIC_w <- waic(log_lik_w)
LOO_e; LOO_w
WAIC_e; WAIC_w
|
a1fc117f5fbb11532da56a536993b8661d677096 | 29585dff702209dd446c0ab52ceea046c58e384e | /mixPHM/R/Eclass.R | 8862ec6463f6657ebd86325117b0016109379179 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,727 | r | Eclass.R | `Eclass` <-
function(x, old, K, method, Sdist,p, cutpoint)
{
shape <- matrix(NA, K, p) # K x p Matrix with Shape-Parameter
scale <- matrix(NA, K, p) # K x p Matrix with Scale-Parameter
#-------------sanity checks during EM-iteration-----------------
if (any(as.vector(table(old))==1)) { #if a frequency equals 1
outlier <- (1:length(old))[which(old==which(table(old)==1))]
cat("Cluster contains only one observation! Subject",outlier,"may be an outlier!\n")
stop("Cannot proceed with estimation!")
}
if (length(unique(old))!=K) { #if a cluster doesn't contain any element
stop("Cluster contains 0 elements! Re-run with less components!")
}
for (j in 1:K) {
y <- as.matrix(x[old==j,])
ttab <- apply(y,2,table,exclude=0) #table of dwell-times (list)
lvec <- sapply(ttab,length) #vector with different dwell-times for each page
ind0 <- which(lvec<=1) #column index for those with less than 2 values
rep.el <- sort(unique(as.vector(y)))[2:3] #elements for 0-replacement (2 smallest except 0)
if (length(ind0) >= 1) {
for (i in ind0) y[,i][which(y[,i]==0)][1:2] <- rep.el
warning("Complete 0 survival times in cluster occured. Two of them are replaced by minimum survival times in order to proceed with estimation!")
}
x[old==j,] <- y
}
#-------------end sanity checks----------------------
priorl <- by(x,old,function(y) { #list of prior probabilities
y <- as.matrix(y)
nses <- length(y[,1]) #number of sessions in group
apply(y,2,function(z){ lz <- length(z[z>0])/nses})
})
prior <- matrix(unlist(priorl),ncol=p,byrow=TRUE) #matrix of prior probabilities
#------------- separate ----------------------
if (method=="separate") {
parlist <- tapply(1:dim(x)[1],old, function(ind) {
y <- as.matrix(x[ind,])
apply(y,2,function(z) {
censvec <- rep(1, length(z))
censvec[z > cutpoint] <- 0 #vector for censored data (set to 0)
wphm <- survreg(Surv(z[z>0], censvec[z>0])~1,dist=Sdist) #wphm for each page within group
shapep <- 1/wphm$scale
scalep <- exp(wphm$coefficients[1])
list(scalep,shapep)
}) })
shsclist <- tapply(unlist(parlist),rep(1:2,length(unlist(parlist))/2),function(y){
matrix(y,nrow=K,byrow=TRUE)}) #reorganizing parlist
shape <- shsclist[[2]] #shape matrix K,p
scale <- shsclist[[1]] #scale matrix K,p
anzpar <- 2*K*p
}
#---------------------- group contrast ----------------------
if (method=="main.g") {
for (i in 1:p) {
datreg <- as.vector(x[,i]) #VD-vektor i-te Seite
datreg <- datreg[x[,i] > 0]
censvec <- rep(1, length(datreg))
censvec[datreg > cutpoint] <- 0 #vector for censored data (set to 0)
xold <- old[x[,i] > 0] #Gruppenvektor i-te Seite
wphm <- survreg(Surv(datreg, censvec)~factor(xold),dist=Sdist)
scalebase <- as.vector(wphm$coefficients[1]) #scale parameter group 1 (reference group)
scalevec1 <- as.vector(exp(wphm$coefficients[2:K]+scalebase)) #scale parameter of the remaining groups
scale [,i] <- c(exp(scalebase),scalevec1)
shape [,i] <- 1/wphm$scale #shape gruppen konstant
}
anzpar <- K*p+p
}
#------------- page constasts -----------------
if (method=="main.p") {
for (j in 1:K) {
datregmat <- as.matrix(x[old == j,])
nsess <- dim(datregmat)[1] #sessionanzahl in gruppe j
pagevek <- rep(1:p,rep(nsess,p)) #Seitenvektor sessions in gruppe j
datreg <- as.vector(datregmat)
xold <- pagevek[datreg > 0] #VD > 0
datreg <- datreg[datreg > 0]
censvec <- rep(1, length(datreg))
censvec[datreg > cutpoint] <- 0 #vector for censored data (set to 0)
wphm <- survreg(Surv(datreg, censvec)~factor(xold),dist=Sdist) #xold bezieht sich auf seiten
scalebase <- as.vector(wphm$coefficients[1])
scalevec1 <- as.vector(exp(wphm$coefficients[2:p]+scalebase))
scale[j,] <- c(exp(scalebase),scalevec1)
shape[j,] <- 1/wphm$scale #shape bleibt seiten konstant
}
anzpar <- K*p+K
}
#------------ page*group interaction ----------------
if (method=="int.gp") {
datreg <- as.vector(x)
nsess <- dim(x)[1]
pagevek <- rep(1:p,rep(nsess,p))
oldall <- rep(old,p)
xoldg <- oldall[datreg > 0] #Gruppencontrast
xoldp <- pagevek[datreg > 0] #Seitencontrast
datreg <- datreg[datreg > 0]
censvec <- rep(1, length(datreg))
censvec[datreg > cutpoint] <- 0 #vector for censored data (set to 0)
wphm <- survreg(Surv(datreg, censvec)~factor(xoldg)*factor(xoldp),dist=Sdist)
scalebase <- as.vector(exp(wphm$coefficients[1]))
scaleg <- exp(c(0,wphm$coefficient[2:K])) #group contrast
scalep <- exp(c(0,wphm$coefficient[(K+1):(K+p-1)])) #page contrast
scaleimat <- matrix(exp(wphm$coefficient[(K+p):(K*p)]),(K-1),(p-1)) #interaction effects
scaleimat <- rbind(rep(1,p),cbind(rep(1,K-1),scaleimat))
scaletemp <- outer(scaleg,scalep)*scalebase
scale <- scaletemp*scaleimat
shape <- matrix((1/wphm$scale),K,p)
anzpar <- K*p+1
}
#------------------ page + group main effects ----------------
if (method=="main.gp") {
datreg <- as.vector(x)
nsess <- dim(x)[1]
pagevek <- rep(1:p,rep(nsess,p))
oldall <- rep(old,p)
xoldg <- oldall[datreg > 0] #Gruppencontrast
xoldp <- pagevek[datreg > 0] #Seitencontrast
datreg <- datreg[datreg > 0]
censvec <- rep(1, length(datreg))
censvec[datreg > cutpoint] <- 0 #vector for censored data (set to 0)
wphm <- survreg(Surv(datreg, censvec)~factor(xoldg)+factor(xoldp),dist=Sdist)
scalebase <- as.vector(exp(wphm$coefficients[1]))
scaleg <- exp(c(0,wphm$coefficient[2:K])) #group contrast
scalep <- exp(c(0,wphm$coefficient[(K+1):(K+p-1)])) #page contrast
scale <- outer(scaleg,scalep)*scalebase
shape <- matrix((1/wphm$scale),K,p)
anzpar <- K+p
}
list (scale = scale, shape = shape, prior = prior, anzpar = anzpar)
}
#returns matrices with shape and scale parameters as well as prior matrix
|
3219988c4942aab333c1bb43b30fca8b5e00a729 | b07a4bad19e0e9ccf6d5d1d2e3c7355b91eb86b0 | /CS450-Machine_Learning/week11abalone.R | 4abd165a9994b3bfcdce7ce2f044125155b4116d | [] | no_license | CalebSpear/Projects | 11b3e38de9158939ddb70880cc7883142ab1b974 | ba8540e5e9dcc5a464b9a653a889cb00f66399dd | refs/heads/master | 2022-10-28T02:02:05.689051 | 2020-06-14T02:15:41 | 2020-06-14T02:15:41 | 265,416,461 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,500 | r | week11abalone.R | library(ucimlr)
library(mlr)
library(mlrMBO)
library(tidyverse)
abalone <- read_ucimlr("abalone")
for (i in 1:4177) {
print(abalone[i,9])
if (abalone[i,9] < 6) {
abalone[i,9] = ">7.5"
} else if (abalone[i,9] > 13) {
abalone[i,9] = ">14.5"
} else {
abalone[i,9] = "<=14.5"
}
}
summarizeColumns(abalone)
abalone <- abalone %>%
mutate_at(vars(sex), as.factor)
task <- makeClassifTask(id = "abalone", data = abalone, target = "rings") %>%
mergeSmallFactorLevels(min.perc = 0.02)
task.train <- makeClassifTask(id = "abalone.train", data = getTaskData(task)[(1:345 * 2), ], target = "rings")
task.test <- makeClassifTask(id = "abalone.test", data = getTaskData(task)[(1:345 * 2 - 1), ], target = "rings")
dt <- makeLearner("classif.rpart", predict.type = "prob") %>%
makeDummyFeaturesWrapper()
dt1 <- makeLearner("classif.ksvm", predict.type = "prob") %>%
makeDummyFeaturesWrapper()
dt2 <- makeLearner("classif.nnet", predict.type = "prob") %>%
makeDummyFeaturesWrapper()
dt3 <- makeLearner("classif.randomForest", predict.type = "prob") %>%
makeDummyFeaturesWrapper()
dt4 <- makeLearner("classif.gbm", predict.type = "prob") %>%
makeDummyFeaturesWrapper()
resample(dt, task.train, cv10, auc)
resample(dt1, task.train, cv10, auc)
resample(dt2, task.train, cv10, auc)
resample(dt3, task.train, cv10, auc)
resample(dt4, task.train, cv10, auc)
```{r tune_learner}
getParamSet(dt)
dt.parset <- makeParamSet(
makeIntegerParam("minsplit", lower = 1, upper = 100),
makeNumericParam("cp", lower = 0, upper = 1),
makeIntegerParam("maxdepth", lower = 10, upper = 50)
)
dt.tuned <- dt %>%
makeTuneWrapper(resampling = cv5,
measures = auc,
par.set = dt.parset,
control = makeTuneControlGrid(resolution = 5))
```{r train_model}
# parallelStartSocket(cpus = 16L, level = 'mlr.resample')
model <- train(dt.tuned, task.train)
model1 <- train(dt1, task.train)
model2 <- train(dt2, task.train)
model3 <- train(dt3, task.train)
model4 <- train(dt4, task.train)
# parallelStop()
preds <- predict(model, newdata = getTaskData(task.test))
preds1 <- predict(model1, newdata = getTaskData(task.test))
preds2 <- predict(model2, newdata = getTaskData(task.test))
preds3 <- predict(model3, newdata = getTaskData(task.test))
preds4 <- predict(model4, newdata = getTaskData(task.test))
performance(preds, auc)
performance(preds1, auc)
performance(preds2, auc)
performance(preds3, auc)
performance(preds4, auc)
``` |
fb59313bc87e106cb1dd13382ba7ef3bb67a3784 | 404380ca0cabe3e8c50379284fc8144c3ac0bf96 | /statsguys_lesson2.R | 2caae010619d61c6575aa910962f07291a2d7d7a | [] | no_license | bheavner/titanic | e8b6a67a05abb539e3f84bf1536d2766c7824cf7 | d8057e702374bc3c2bdbab15e06e36d7573c5e81 | refs/heads/master | 2020-05-20T02:57:48.402508 | 2014-04-18T21:58:44 | 2014-04-18T21:58:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,362 | r | statsguys_lesson2.R | # Following the logistic regression model approach from
# statsguys.wordpress.com/2014/01/11/data-analytics-for-beginners-pt-2/
# load the data
trainData <- read.csv("data/train.csv", header = TRUE, stringsAsFactors = FALSE)
testData <- read.csv("data/test.csv", header = TRUE, stringsAsFactors = FALSE)
## cleaning the TRAIN data
# remove unused variables - ID, ticket, fare, cabin, embarked
str(trainData)
trainData <- trainData[-c(1,9:12)]
str(trainData)
# replace qualitative variables with quantitative variables
# (I'm not sure why not to just use factors)
trainData$Sex <- gsub("female", 1, trainData$Sex)
trainData$Sex <- gsub("^male", 0, trainData$Sex)
str(trainData$Sex)
# try to infer some ages based on title. Assume that people with similar titles are similar ages.
master_vector = grep("Master.",trainData$Name, fixed=TRUE)
miss_vector = grep("Miss.", trainData$Name, fixed=TRUE)
mrs_vector = grep("Mrs.", trainData$Name, fixed=TRUE)
mr_vector = grep("Mr.", trainData$Name, fixed=TRUE)
dr_vector = grep("Dr.", trainData$Name, fixed=TRUE)
for(i in master_vector) {
trainData$Name[i] = "Master"
}
for(i in miss_vector) {
trainData$Name[i] = "Miss"
}
for(i in mrs_vector) {
trainData$Name[i] = "Mrs"
}
for(i in mr_vector) {
trainData$Name[i] = "Mr"
}
for(i in dr_vector) {
trainData$Name[i] = "Dr"
}
master_age = round(mean(trainData$Age[trainData$Name == "Master"], na.rm = TRUE), digits = 2)
miss_age = round(mean(trainData$Age[trainData$Name == "Miss"], na.rm = TRUE), digits =2)
mrs_age = round(mean(trainData$Age[trainData$Name == "Mrs"], na.rm = TRUE), digits = 2)
mr_age = round(mean(trainData$Age[trainData$Name == "Mr"], na.rm = TRUE), digits = 2)
dr_age = round(mean(trainData$Age[trainData$Name == "Dr"], na.rm = TRUE), digits = 2)
for (i in 1:nrow(trainData)) {
if (is.na(trainData[i,5])) {
if (trainData$Name[i] == "Master") {
trainData$Age[i] = master_age
} else if (trainData$Name[i] == "Miss") {
trainData$Age[i] = miss_age
} else if (trainData$Name[i] == "Mrs") {
trainData$Age[i] = mrs_age
} else if (trainData$Name[i] == "Mr") {
trainData$Age[i] = mr_age
} else if (trainData$Name[i] == "Dr") {
trainData$Age[i] = dr_age
} else {
print("Uncaught Title")
}
}
}
## Next, create new variables that might be useful features: child, family, mother |
5d79255786a43a0ed60eb3a069dbef6794bd70a6 | db94ee12e92ec141460dd6a5b3c073c24455fc9d | /reshape2.R | b76191a2bd3005d62fbce395f562e4192b7d8040 | [] | no_license | goal1234/practice | 43a95131ad78d7831068a0a5bd6a78907686a337 | 7c383b2eaa0adba6f1afe7486be803082c2e7b82 | refs/heads/master | 2021-09-05T18:45:26.879828 | 2018-01-30T10:36:24 | 2018-01-30T10:36:24 | 100,257,004 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,560 | r | reshape2.R | ##---add_margins Add margins to a data frame.---##
add_margins(df, vars, margins = TRUE)add_margins(df, vars, margins = TRUE)
dcast(data, formula, fun.aggregate = NULL, ..., margins = NULL,
subset = NULL, fill = NULL, drop = TRUE,
value.var = guess_value(data))
acast(data, formula, fun.aggregate = NULL, ..., margins = NULL,
subset = NULL, fill = NULL, drop = TRUE,
value.var = guess_value(data))
#Air quality example
names(airquality) <- tolower(names(airquality))
aqm <- melt(airquality, id=c("month", "day"), na.rm=TRUE)
acast(aqm, day ~ month ~ variable)
acast(aqm, month ~ variable, mean)
acast(aqm, month ~ variable, mean, margins = TRUE)
dcast(aqm, month ~ variable, mean, margins = c("month", "variable"))
library(plyr) # needed to access . function
acast(aqm, variable ~ month, mean, subset = .(variable == "ozone"))
acast(aqm, variable ~ month, mean, subset = .(month == 5))
#Chick weight example
names(ChickWeight) <- tolower(names(ChickWeight))
chick_m <- melt(ChickWeight, id=2:4, na.rm=TRUE)
dcast(chick_m, time ~ variable, mean) # average effect of time
dcast(chick_m, diet ~ variable, mean) # average effect of diet
acast(chick_m, diet ~ time, mean) # average effect of diet & time
# How many chicks at each time? - checking for balance
acast(chick_m, time ~ diet, length)
acast(chick_m, chick ~ time, mean)
acast(chick_m, chick ~ time, mean, subset = .(time < 10 & chick < 20))
acast(chick_m, time ~ diet, length)
dcast(chick_m, diet + chick ~ time)
acast(chick_m, diet + chick ~ time)
acast(chick_m, chick ~ time ~ diet)
acast(chick_m, diet + chick ~ time, length, margins="diet")
acast(chick_m, diet + chick ~ time, length, drop = FALSE)
#Tips example
dcast(melt(tips), sex ~ smoker, mean, subset = .(variable == "total_bill"))
ff_d <- melt(french_fries, id=1:4, na.rm=TRUE)
acast(ff_d, subject ~ time, length)
acast(ff_d, subject ~ time, length, fill=0)
dcast(ff_d, treatment ~ variable, mean, margins = TRUE)
dcast(ff_d, treatment + subject ~ variable, mean, margins="treatment")
if (require("lattice")) {
lattice::xyplot(`1` ~ `2` | variable, dcast(ff_d, ... ~ rep), aspect="iso")
}
##---colsplit Split a vector into multiple columns---##
colsplit(string, pattern, names)
x <- c("a_1", "a_2", "b_2", "c_3")
vars <- colsplit(x, "_", c("trt", "time"))
vars
str(vars)
##---french_fries Sensory data from a french fries experiment---##
##---melt Convert an object into a molten data frame.---##
melt(data, ..., na.rm = FALSE, value.name = "value")
##cast
## S3 method for class 'array'
melt(data, varnames = names(dimnames(data)), ...,
na.rm = FALSE, as.is = FALSE, value.name = "value")
## S3 method for class 'table'
melt(data, varnames = names(dimnames(data)), ...,
na.rm = FALSE, as.is = FALSE, value.name = "value")
## S3 method for class 'matrix'
melt(data, varnames = names(dimnames(data)), ...,
na.rm = FALSE, as.is = FALSE, value.name = "value")
a <- array(c(1:23, NA), c(2,3,4))
melt(a)
melt(a, na.rm = TRUE)
melt(a, varnames=c("X","Y","Z"))
dimnames(a) <- lapply(dim(a), function(x) LETTERS[1:x])
melt(a)
melt(a, varnames=c("X","Y","Z"))
dimnames(a)[1] <- list(NULL)
melt(a)
##---melt.data.frame Melt a data frame into form suitable for easy casting.---##
## S3 method for class 'data.frame'
melt(data, id.vars, measure.vars,
variable.name = "variable", ..., na.rm = FALSE, value.name = "value",
factorsAsStrings = TRUE)
names(airquality) <- tolower(names(airquality))
melt(airquality, id=c("month", "day"))
names(ChickWeight) <- tolower(names(ChickWeight))
melt(ChickWeight, id=2:4)
##---melt.default Melt a vector. For vectors, makes a column of a data frame---##
## S3 method for class 'list'
melt(data, ..., level = 1)
a <- as.list(c(1:4, NA))
melt(a)
names(a) <- letters[1:4]
melt(a)
a <- list(matrix(1:4, ncol=2), matrix(1:6, ncol=2))
melt(a)
a <- list(matrix(1:4, ncol=2), array(1:27, c(3,3,3)))
melt(a)
melt(list(1:5, matrix(1:4, ncol=2)))
melt(list(list(1:3), 1, list(as.list(3:4), as.list(1:2))))
##---melt_check Check that input variables to melt are appropriate.---##
melt_check(data, id.vars, measure.vars, variable.name, value.name)
##---parse_formula Parse casting formulae Description---##
parse_formula(formula = "... ~ variable", varnames, value.var = "value")
reshape2:::parse_formula("a + ...", letters[1:6])
reshape2:::parse_formula("a ~ b + d")
reshape2:::parse_formula("a + b ~ c ~ .")
##---recast Recast: melt and cast in a single step---##
recast(data, formula, ..., id.var, measure.var)
recast(french_fries, time ~ variable, id.var = 1:4)
|
516e5579b4cc08ed880ce32fb4064d6ae1a9fbe5 | 80891a4179c70c221df5bdcf15a91b0088b02e53 | /app.R | 17df35f89d38f714921d780bc8c73cc3115a1ae0 | [
"MIT"
] | permissive | UBC-MDS/DSCI_532_Group_113_Overdose_R | 82bbf19440c8407b402f43b5f3be661259630e7a | 31a03f9da2cc33f8e7c59abf8f8aa87920e89f67 | refs/heads/master | 2022-07-28T06:28:52.789802 | 2019-12-14T04:02:50 | 2019-12-14T04:02:50 | 226,436,242 | 0 | 4 | MIT | 2022-06-21T23:45:47 | 2019-12-07T01:05:27 | R | UTF-8 | R | false | false | 14,051 | r | app.R | library(tidyverse)
library(viridis)
library(plotly)
# dash
library(dashCoreComponents)
library(dashHtmlComponents)
library(dashTable)
library(dash)
library(readxl)
app <- Dash$new(external_stylesheets = "https://codepen.io/chriddyp/pen/bWLwgP.css")
# load dataset
url <- "https://github.com/UBC-MDS/DSCI_532_Group_113_Overdose_R/blob/master/data/2012-2018_lab4_data_drug-overdose-deaths-connecticut-wrangled-pivot.csv?raw=true"
pivoted_data <- read_csv(url)
url_1 <- "https://github.com/UBC-MDS/DSCI_532_Group_113_Overdose_R/blob/master/data/2012-2018_lab4_data_drug-overdose-deaths-connecticut-wrangled-melted.csv?raw=true"
drug_overdose_wrangled_m = read_csv(url_1)
url_2 <- "https://github.com/UBC-MDS/DSCI_532_Group_113_Overdose_R/blob/master/data/lab4_drug-description.csv?raw=true"
drug_description <- read_csv(url_2)
url_3 <- "https://github.com/UBC-MDS/DSCI_532_Group_113_Overdose_R/blob/master/data/2012-2018_lab4_data_drug-overdose-counts.csv?raw=true"
combination_count <- read_csv(url_3) %>%
rename(second_drug = `Second drug`) %>%
mutate(index = factor(index),
second_drug = factor(second_drug))
combination_count$index <- combination_count$index %>%
fct_relevel('Heroin', 'Fentanyl', 'Cocaine', 'Benzodiazepine', 'Ethanol', 'Oxycodone',
'Methadone', 'Other', 'Fentanyl Analogue', 'Amphet', 'Tramad', 'Hydrocodone',
'Oxymorphone','OpiateNOS', 'Morphine', 'Hydromorphone')
combination_count$second_drug <- combination_count$second_drug %>%
fct_relevel('Hydromorphone','Morphine','OpiateNOS','Oxymorphone','Hydrocodone','Tramad','Amphet','Fentanyl Analogue',
'Other','Methadone','Oxycodone', 'Ethanol', 'Benzodiazepine', 'Cocaine','Fentanyl', 'Heroin')
drug_name <- "Heroin"
header_colors <- function(){
list(
bg_color = "#0D76BF",
font_color = "#fff",
"light_logo" = FALSE
)
}
set_graph_race <- function(drug = drug_name){
# some wrangling for race
drug = sym(drug)
if (drug == sym("Everything")){
top_race <- pivoted_data %>%
count(Race)
} else{
top_race <- pivoted_data %>%
group_by(Race) %>%
summarise(n = sum(!!drug))
}
top_race <- top_race %>%
arrange(desc(n)) %>%
head(3)
race <- top_race %>%
ggplot(aes(reorder(Race, -n), n)) +
geom_bar(aes(fill = Race), stat = "identity", show.legend = FALSE) +
scale_fill_viridis_d() +
labs(x = "Race", y = "count", title = paste("Top 3 Races \nwith the most deaths in", drug)) +
theme(
plot.title = element_text(size = 10),
axis.text = element_text(angle = 45),
axis.text.x=element_blank()
)
return(race)
}
set_graph_gender <- function(drug = drug_name){
drug = sym(drug)
if (drug == sym("Everything")){
pivoted_data <- pivoted_data
} else{
pivoted_data <- pivoted_data %>%
filter(!!drug == 1)
}
gender <- pivoted_data %>%
filter(Sex == "Male" | Sex == "Female") %>%
ggplot(aes(Sex, fill = Sex)) +
geom_bar(show.legend = FALSE) +
scale_fill_viridis_d() +
labs(x = "Gender", title = paste("Gender distribution \nfor the deaths in", drug)) +
theme(
plot.title = element_text(size = 10),
axis.text = element_text(angle = 45),
axis.text.x=element_blank()
)
return(gender)
}
set_graph_age <- function(drug = drug_name){
drug = sym(drug)
if (drug == sym("Everything")){
pivoted_data <- pivoted_data
} else{
pivoted_data <- pivoted_data %>%
filter(!!drug == 1)
}
age <- pivoted_data %>%
ggplot(aes(Age)) +
geom_density(alpha = 0.8, show.legend = FALSE, fill = "#21908C") +
scale_fill_viridis_d() +
labs(x = "Age", y = "count", title = paste("Age distribution \nfor the deaths in", drug)) +
theme(
plot.title = element_text(size = 10),
axis.text = element_text(angle = 45)
)
return(age)
}
drugs_heatmap <- combination_count %>%
ggplot(aes(index, second_drug, text = paste('First Drug:', index, '<br>Second Drug: ', second_drug))) +
geom_tile(aes(fill = Count)) +
geom_text(aes(label = round(Count, 1)), color = 'white', size = 3) +
labs(title = "Count of overdose victims with a combination of 2 drugs", x = "First drug", y = "Second drug") +
scale_fill_viridis() +
theme_minimal() +
theme(
axis.text = element_text(angle = 45)
)
drugs_heatmap <- ggplotly(drugs_heatmap, width = 650, height = 600, tooltip = "text")
df <- drug_overdose_wrangled_m %>%
group_by(Drug) %>%
summarize(times_tested_positive = sum(Toxicity_test, na.rm = TRUE))%>%
arrange(desc(times_tested_positive))
h_bar_plot <- df %>% ggplot(aes(x=reorder(Drug, times_tested_positive), y=times_tested_positive)) +
geom_bar(stat='identity',fill="cyan4") +
coord_flip()+
labs(title = "Ranking of drugs by the times tested positive",x ="Drug ", y = "Times a drug tested positive")+
theme_minimal()+
theme(plot.title = element_text(hjust = 0.5),text = element_text(size=10))
app <- Dash$new(external_stylesheets = list("https://cdnjs.cloudflare.com/ajax/libs/normalize/7.0.0/normalize.min.css",
"https://cdnjs.cloudflare.com/ajax/libs/skeleton/2.0.4/skeleton.min.css",
"https://codepen.io/bcd/pen/KQrXdb.css",
"https://maxcdn.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css"))
DrugsDD <- dccDropdown(
id = 'drugs_dd',
options = lapply(
unique(drug_description$Drug), function(x){
list(label=x, value=x)
}),
value = 'Heroin'
)
set_description <- function(drug = drug_name){
filtered <- drug_description %>% filter(Drug == drug)
return(filtered[["Description"]])
}
set_image <- function(drug = drug_name){
filtered <- drug_description %>% filter(Drug == drug)
return(filtered[["Link"]])
}
set_reference<- function(drug = drug_name){
filtered <- drug_description %>% filter(Drug == drug)
return(filtered[["Reference"]])
}
app$layout(
htmlDiv(htmlBr(),
children = list(
htmlDiv(
id = "app-page-header",
style = list(
width = "100%",
background = header_colors()[["bg_color"]],
color = "#fff"
),
children = list(
htmlA(
id = "dashbio-logo",
href = "/Portal"
),
htmlH1("Overdose"),
htmlA(
id = "gh-link",
children = list(paste0(
"How drug overdose is stealing lives from us!"
)),
href = "https://github.com/UBC-MDS/DSCI_532_Group_113_Overdose_R",
style = list(color = "white",'margin-left' = "10px","font-size" = "20px"),
htmlImg(
src = "assets/git.png"
)
)
)
),
htmlDiv(
style = list('margin-left' = "10px"),
children = list(htmlH5(paste0(
"Overdose app allows you to visualize ",
"different factors associated with ",
"accidental death by overdose in Connecticut, US, from 2012 - 2018"
)
))),
htmlDiv(style = list('margin-left' = "10px"),
children = list(
htmlH5(style = list(color = "grey",'margin-left' = "10px","font-size" = "20px"),paste0(
"You ",
"can interactively explore this issue ",
"using (The Killers tab) or ",
"the (The Victims tab)"
)
))),
htmlDiv(
list(
dccTabs(id="tabs", children = list(
dccTab(label = 'The Killer', children =list(
htmlDiv(list(
htmlP("This section, named 'the killers', focuses on the effect of drugs. Two static graphs are displayed; one is the prevalence ranking of drugs found in the deceased people. Another one is the correlation map of two drugs from this dataset, which counts and compares the occurrences of two-drug combinations in the deaths."
)), style = list("margin-left" = "300px", "margin-right" = "300px", "font-size"= "16px")),
htmlDiv(list(
dccGraph(
id='vic-drugs',
figure = ggplotly(h_bar_plot, width = 550, height = 600)
)
), style = list('display' = "block", 'float' = "left", 'margin-left' = "10px",
'margin-right' = "1px", 'width' = "500px", "font-size" = "15px", "margin-bottom" = "3px") ),
htmlDiv(list(
dccGraph(
id='vic-heatmap-0',
figure = drugs_heatmap
)
), style = list('display' = "block", 'float' = "right", 'margin-left' = "10px",
'margin-right' = "10px", 'width' = "650px", "font-size" = "15px", "margin-bottom" = "3px") )
)
),
dccTab(label = 'The Victims', children = list(
htmlDiv(list(
htmlP("Please select one drug to see the affected demographic group by age, race and gender"),
DrugsDD,
htmlImg(
id='drug_img',
src = set_image(),
height = '150',
width = '200'
),
htmlP(children = set_description(), id="drug_desc"),
htmlA(
children = 'This info was retrieved from drugbank.ca',
id = "drug_ref",
href = set_reference(),
target="_blank")
), style = list('display' = "block", 'float' = "left", 'margin-left' = "100px",
'margin-right' = "1px", 'width' = "300px", "font-size" = "15px"),
),
htmlDiv(
list(
htmlDiv(list(
dccGraph(
id='vic-age_0',
figure = ggplotly(set_graph_age(), width = 700, height = 300)
)
), style = list('display' = "table-row", "margin-bottom" = "1px")
),
htmlDiv(list(
htmlDiv(list(
dccGraph(
id='vic-gender_0',
figure = ggplotly(set_graph_gender(), width = 400, height = 300)
)
), style = list('display' = "block", 'float' = "left", 'margin-left' = "1px",
'margin-right' = "1px")
),
htmlDiv(list(
dccGraph(
id='vic-race_0',
figure = ggplotly(set_graph_race(), width = 400, height = 300)
)
), style = list('display' = "block", 'float' = "left", 'margin-left' = "1px",
'margin-right' = "50px")
)
) , style = list('display' = "table-row", "margin-top" = "1px", 'float' = "left")
)
), style = list('float' = "right")
)
)
)
), style = list("font-size"= "16px", "font-weight" = "bold")
)
)
),
htmlDiv(style = list('margin-left' = "10px"),
children = list(
htmlA(children = "Data retrieved from the data.ct.gov", href = "https://catalog.data.gov/dataset/accidental-drug-related-deaths-january-2012-sept-2015"
)))
), style = list('background-color' = "#ffffff")
)
)
#Callbacks
app$callback(
output=list(id = 'drug_img', property='src'),
params=list(input(id = 'drugs_dd', property='value')),
function(drug_input) {
result <- set_image(drug = drug_input)
return(result)
})
app$callback(
output=list(id = 'vic-race_0', property='figure'),
params=list(input(id = 'drugs_dd', property='value')),
function(drug_input) {
result <- ggplotly(set_graph_race(drug = drug_input) ,width = 400, height = 300)
return(result)
})
app$callback(
output=list(id = 'vic-gender_0', property='figure'),
params=list(input(id = 'drugs_dd', property='value')),
function(drug_input) {
result <- ggplotly(set_graph_gender(drug = drug_input) ,width = 400, height = 300)
return(result)
})
app$callback(
output=list(id = 'vic-age_0', property='figure'),
params=list(input(id = 'drugs_dd', property='value')),
function(drug_input) {
result <- ggplotly(set_graph_age(drug = drug_input) ,width = 400, height = 300)
return(result)
})
app$callback(
output=list(id = 'drug_desc', property='children'),
params=list(input(id = 'drugs_dd', property='value')),
function(drug_input) {
result <- set_description(drug = drug_input)
return(result)
})
app$run_server(host = "0.0.0.0", port = Sys.getenv('PORT', 8050)) |
d8dbf834941ad6a38954e78fa8942faa4a715c8a | d810cb5656f7374e865abfdc8b1cde0c81c48129 | /C5.R | 6e7806982da626ec9d36b424836af4784660cbcc | [
"MIT"
] | permissive | jcorrean/IdeologicalConsumerism | 121a650e28c64d696c5567d6093cfe4fc451d660 | dc1451c1b647c389a651f7f84cc78d1f9fe8a986 | refs/heads/master | 2022-12-07T22:31:36.374854 | 2020-08-18T00:05:16 | 2020-08-18T00:05:16 | 287,078,289 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 414 | r | C5.R | # Linguistic Similarity for Candidate #5
C5c <- dfm(corpus_subset(tweets, Candidate == "RAMIRO NULL BARRAGAN"), remove_numbers = TRUE, remove = stopwords("spanish"), stem = TRUE, remove_punct = TRUE)
c5c <- textstat_simil(C5c, margin = "documents", method = "jaccard")
SC5 <- data.frame(jaccard = c5c[lower.tri(c5c, diag = FALSE)], Candidate = "Ramiro Barragan")
C5$LinguisticSimilarity <- summary(SC5$jaccard)[4] |
e01089be1c509d76b24c367b932155313f7b2f51 | 0290c8cb8b9eff9349dd07644c6d1a1fc4cec142 | /R code/kaggle/hour_functions.R | 3a9e64985171a3906a2c247282bd1e2658b35064 | [] | no_license | ivanliu1989/Helping-Santas-Helpers | c03799023952651be0e950fe77c4a31cd4303215 | 4287946489d2faa7d44297f6169d312b9a548f01 | refs/heads/master | 2021-01-02T09:33:26.783676 | 2015-01-03T23:41:37 | 2015-01-03T23:41:37 | 27,221,218 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,825 | r | hour_functions.R | hours_per_day <- 10
day_start <- 9 * 60
day_end <- (9 + hours_per_day) * 60
reference_time <- as.POSIXct('2014 1 1 0 0', '%Y %m %d %H %M', tz = 'UTC')
minutes_in_24h <- 24 * 60
convert_to_minute <- function(arrival) {
arrive_time <- as.POSIXct(arrival, '%Y %m %d %H %M', tz = 'UTC')
age <- as.integer(difftime(arrive_time, reference_time, units = 'mins', tz = 'UTC'))
return(age)
}
convert_to_chardate <- function(arrive_int) {
char_date <- format(reference_time + arrive_int * 60, format = '%Y %m %d %H %M', tz = 'UTC')
return(char_date)
}
is_sanctioned_time <- function(minute) {
is_sanctioned <- ((minute - day_start) %% minutes_in_24h) < (hours_per_day * 60)
return(is_sanctioned)
}
get_sanctioned_breakdown <- function(start_minute, work_duration) {
full_days <- as.integer(work_duration / minutes_in_24h)
sanctioned <- full_days * hours_per_day * 60
unsanctioned <- full_days * (24 - hours_per_day) * 60
remainder_start <- start_minute + full_days * minutes_in_24h
remainder_end <- start_minute + work_duration - 1 # to avoid off-by-one per R iterator
if(remainder_end >= remainder_start) {
sanctioned <- sanctioned + sum(is_sanctioned_time(remainder_start:remainder_end))
unsanctioned <- unsanctioned + sum(!is_sanctioned_time(remainder_start:remainder_end))
}
return(c(sanctioned, unsanctioned))
}
next_sanctioned_minute <- function(minute) {
if(is_sanctioned_time(minute) && is_sanctioned_time(minute + 1)) {
next_min <- minute + 1
} else {
num_days <- as.integer(minute / minutes_in_24h)
am_or_pm <- as.integer(((minute %% minutes_in_24h)/day_start))
# This is necessary, else end-of-day unsanctioned minutes jump over an entire day.
# David Thaler's fix works at minutes >=540, but fails at 539
next_min <- day_start + (num_days + am_or_pm / 2) * minutes_in_24h
}
return(next_min)
}
apply_resting_period <- function(rest_start, num_unsanctioned) {
num_days_since_jan1 <- as.integer(rest_start / minutes_in_24h)
rest_time <- num_unsanctioned
rest_time_in_working_days <- as.integer(rest_time / (60 * hours_per_day))
rest_time_remaining_minutes <- rest_time %% (60 * hours_per_day)
local_start <- rest_start %% minutes_in_24h
if(local_start < day_start) local_start <- day_start
if(local_start > day_end) {
num_days_since_jan1 <- num_days_since_jan1 + 1
local_start <- day_start
}
if((local_start + rest_time_remaining_minutes) > day_end) {
rest_time_in_working_days <- rest_time_in_working_days + 1
rest_time_remaining_minutes <- rest_time_remaining_minutes - (day_end - local_start)
local_start <- day_start
}
total_days <- num_days_since_jan1 + rest_time_in_working_days
rest_period <- total_days * minutes_in_24h + local_start + rest_time_remaining_minutes
return(rest_period)
}
|
e62149c2e2b50523d3074fd36c5a4b97d0efe2e8 | 7a95abd73d1ab9826e7f2bd7762f31c98bd0274f | /mcga/inst/testfiles/ByteCodeMutation/libFuzzer_ByteCodeMutation/ByteCodeMutation_valgrind_files/1612803349-test.R | d631be922e9b5d810a1de1d53963ae72277f4ebd | [] | no_license | akhikolla/updatedatatype-list3 | 536d4e126d14ffb84bb655b8551ed5bc9b16d2c5 | d1505cabc5bea8badb599bf1ed44efad5306636c | refs/heads/master | 2023-03-25T09:44:15.112369 | 2021-03-20T15:57:10 | 2021-03-20T15:57:10 | 349,770,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 148 | r | 1612803349-test.R | testlist <- list(bytes1 = c(NA, NA, 1819552040L), pmutation = 6.99512702068968e-308)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result) |
f245a8bb9e68dd1345da9fda17db82340d70bad1 | 644030724693faaa7e83b14a2bb6b68a1bb0e77c | /R/modComLik.R | fad7ddf5511b923e1d53c71c292881a0d8c2f8fd | [] | no_license | Rene-Gutierrez/BayTenGraMod | eb07ece068a09a027f5262fc8be4b5cd1fbaceb8 | e2a6972d3f3fbd44accf2c49db246aa55401870e | refs/heads/master | 2021-03-22T17:08:21.614191 | 2020-12-10T05:48:20 | 2020-12-10T05:48:20 | 247,385,532 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 985 | r | modComLik.R | #' Computes the log-likelihood of the Tesor Normal for each model estimate.
#'
#' Computes the log-likelihood of the Tesor Normal for each model estimate
#' given a sample of tensors.
#'
#' @param modelList A list of lists. The outer list is indexed by model. The
#' inner list is indexed by Matrix.
#' @param tensors A list with the sample of tensors
#'
#' @return A vector with the log-likelihood for each model.
#'
#' @author Rene Gutierrez Marquez
#'
#' @export
###############################################################################
###
### Performance Statistics Graph
###
###
###############################################################################
### Number
modComLik <- function(modelList, tensors){
### Number of Models
numMod <- length(modelList)
logLik <- numeric()
for(model in modelList){
logLik <- c(logLik, logLikTNorm(tensors = tensors,
precisions = model))
}
### Return
return(logLik)
}
|
ee74307be48a7b47ffb46f542f15781ddf64d811 | 716099bf019e764568a930a227a28beea650e681 | /R/Berechnung_IBCH.R | c75e37c6c823f715d20f4d968fe5b9215567ccb9 | [] | no_license | TobiasRoth/ibchR | 4d6366df7f4bdd3773591ec99596f08af0bf3dab | 9dc30d63250811e316419a7478d4f27de8496f6a | refs/heads/master | 2022-10-24T19:56:55.585552 | 2020-06-17T09:33:39 | 2020-06-17T09:33:39 | 272,900,084 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,671 | r | Berechnung_IBCH.R | rm(list=ls(all=TRUE))
#------------------------------------------------------------------------------------------------------------
# Einstellungen
#------------------------------------------------------------------------------------------------------------
# R-Packete
library(tidyverse)
#------------------------------------------------------------------------------------------------------------
# Hilfsfunktion zur Bewertung des Uferbereichs
#------------------------------------------------------------------------------------------------------------
raumbedarfL <- function(dat) {
dat$res <- "ungenuegend"
for(i in 1:nrow(dat)) {
res <- FALSE
if(dat[i,"Wasserspiegel"]=="ausgepraegt") res <- dat[i,"Uferbreite_links"]>=15 | (dat[i,"Uferbreite_links"]>=5 & dat[i,"Uferbreite_links"] > 3.75 + 0.75*dat[i,"Breite"])
if(dat[i,"Wasserspiegel"]=="eingeschraenkt") res <- dat[i,"Uferbreite_links"]>=15 | (dat[i,"Uferbreite_links"]>=5 & dat[i,"Uferbreite_links"] > 3 + 1.2*dat[i,"Breite"])
if(dat[i,"Wasserspiegel"]=="keine") res <- dat[i,"Uferbreite_links"]>=15 | (dat[i,"Uferbreite_links"]>=5 & dat[i,"Uferbreite_links"] > 3.5 + 1.5*dat[i,"Breite"])
dat[i, "res"] <- ifelse(res, "genuegend","ungenuegend")
}
dat$res
}
raumbedarfR <- function(dat) {
dat$res <- "ungenuegend"
for(i in 1:nrow(dat)) {
res <- FALSE
if(dat[i,"Wasserspiegel"]=="ausgepraegt") res <- dat[i,"Uferbreite_rechts"]>=15 | (dat[i,"Uferbreite_rechts"]>=5 & dat[i,"Uferbreite_rechts"] > 3.75 + 0.75*dat[i,"Breite"])
if(dat[i,"Wasserspiegel"]=="eingeschraenkt") res <- dat[i,"Uferbreite_rechts"]>=15 | (dat[i,"Uferbreite_rechts"]>=5 & dat[i,"Uferbreite_rechts"] > 3 + 1.2*dat[i,"Breite"])
if(dat[i,"Wasserspiegel"]=="keine") res <- dat[i,"Uferbreite_rechts"]>=15 | (dat[i,"Uferbreite_rechts"]>=5 & dat[i,"Uferbreite_rechts"] > 3.5 + 1.5*dat[i,"Breite"])
dat[i, "res"] <- ifelse(res, "genuegend","ungenuegend")
}
dat$res
}
#------------------------------------------------------------------------------------------------------------
# Daten einlesen und aufbereiten; Export MSK_Daten ohne Stationen Eindolung = ja, mit excel abspeichern, so dass Dezimalstellen mit . anstatt , abgetrennt werden!
# Sohlverb, Boschung_links/rechts etc. Klassen in DB zuerst anpassen. Anroid gibt z. B. < 10% als 1-9% aus. Die Klassen müssen in der DB sowieso angepasst werden, damit einheitlich.
#------------------------------------------------------------------------------------------------------------
dat <- read_csv("Daten/rohdaten_oekomorphologie.csv")
### Wasserspiegelbreitenvariabiliät
if(length(unique(dat$Wasserspiegel))!=3) stop("Einige Gewässer haben eine falsche Angabe zum Wasserspiegel")
BREITENVAR <- rep(0, nrow(dat))
BREITENVAR[dat$Wasserspiegel=="eingeschraenkt"] <- 2
BREITENVAR[dat$Wasserspiegel=="keine"] <- 3
M1 <- BREITENVAR
### Verbauung der Sohle
if(length(unique(dat$Sohlenverbauung))>6) stop("Einige Gewässer haben eine falsche Angaben zur Sohlenverbauung")
SOHLVER <- rep(5, nrow(dat))
SOHLVER[dat$Sohlenverbauung =="keine"] <- 0
SOHLVER[dat$Sohlenverbauung =="< 10%"] <- 1
SOHLVER[dat$Sohlenverbauung =="10-30%"] <- 2
SOHLVER[dat$Sohlenverbauung =="30-60%"] <- 3
SOHLVER[dat$Sohlenverbauung =="> 60%"] <- 4
if(length(unique(dat$Material))>6) stop("Einige Gewässer haben eine falsche Angaben zum Sohlenmaterial")
SOHLMAT <- rep(0, nrow(dat))
SOHLMAT[dat$Material =="Steine"] <- 0
SOHLMAT[dat$Material =="Holz"] <- 1
SOHLMAT[dat$Material =="Beton"] <- 1
SOHLMAT[dat$Material =="undurchlaessig"] <- 1
SOHLMAT[dat$Material =="andere (dicht)"] <- 1
M2 <- ifelse(SOHLVER <= 2, SOHLVER, 2+SOHLMAT)
### Bebauung des Böschungsfusses
if(length(unique(dat$Boschung_links))>6) stop("Einige Gewässer haben eine falsche Angaben zur linken Böschung")
LBUKVER <- rep(2.5, nrow(dat))
LBUKVER[dat$Boschung_links =="keine"] <- 0
LBUKVER[dat$Boschung_links =="< 10%"] <- 0
LBUKVER[dat$Boschung_links =="10-30%"] <- 0.5
LBUKVER[dat$Boschung_links =="30-60%"] <- 1.5
LBUKVER[dat$Boschung_links =="> 60%"] <- 2.5
if(length(unique(dat$Durchlaessigkeit_links))!=3) stop("Einige Gewässer haben eine falsche Angaben zur linken Durchlässigkeit")
LBUKMAT <- rep(0, nrow(dat))
LBUKMAT[dat$Durchlaessigkeit_links =="durchlaessig"] <- 0
LBUKMAT[dat$Durchlaessigkeit_links =="undurchlaessig"] <- 0.5
M3L <- ifelse(LBUKVER == 0, LBUKVER, LBUKVER+ LBUKMAT)
if(length(unique(dat$Boschung_rechts))>6) stop("Einige Gewässer haben eine falsche Angaben zur rechten Böschung")
LBUKVER <- rep(2.5, nrow(dat))
LBUKVER[dat$Boschung_rechts =="keine"] <- 0
LBUKVER[dat$Boschung_rechts =="< 10%"] <- 0
LBUKVER[dat$Boschung_rechts =="10-30%"] <- 0.5
LBUKVER[dat$Boschung_rechts =="30-60%"] <- 1.5
LBUKVER[dat$Boschung_rechts =="> 60%"] <- 2.5
if(length(unique(dat$Durchlaessigkeit_rechts))!=3) stop("Einige Gewässer haben eine falsche Angaben zur rechten Durchlässigkeit")
LBUKMAT <- rep(0, nrow(dat))
LBUKMAT[dat$Durchlaessigkeit_rechts =="durchlaessig"] <- 0
LBUKMAT[dat$Durchlaessigkeit_rechts =="undurchlaessig"] <- 0.5
M3R <- ifelse(LBUKVER == 0, LBUKVER, LBUKVER+ LBUKMAT)
### Uferbereich
if(sum(is.na(dat$Uferbreite_links))>0) stop("Einige Gewässer haben keine Angabe zur linken Uferbreite ('Uferbreite_links')")
if(sum(is.na(dat$Uferbreite_rechts))>0) stop("Einige Gewässer haben keine Angabe zur rechten Uferbreite ('Uferbreite_rechts')")
if(sum(is.na(dat$Breite))>0) stop("Einige Gewässer haben keine Angabe zur Breite der Gewässersohle ('Breite')")
dat[dat$Uferbreite_links == 0,"Beschaffenheit_links"] <- "kunstlich"
dat[dat$Uferbreite_rechts == 0,"Beschaffenheit_rechts"] <- "kunstlich"
if(length(unique(dat$Beschaffenheit_links))>3) stop("Einige Gewässer haben eine falsche Angaben zur linken Uferbeschaffenheit")
if(length(unique(dat$Beschaffenheit_rechts))>3) stop("Einige Gewässer haben eine falsche Angaben zur rechten Uferbeschaffenheit")
RAUMBED <- raumbedarfL(dat[,c("Breite", "Uferbreite_links", "Wasserspiegel")])
M4L <- rep(NA, nrow(dat))
for(i in 1: length(M4L)) {
if(RAUMBED[i] =="genuegend" & dat[i,"Beschaffenheit_links"] == "gewaessergerecht") M4L[i] <- 0
if(RAUMBED[i] =="genuegend" & dat[i,"Beschaffenheit_links"] == "gewaesserfremd") M4L[i] <- 1.5
if(RAUMBED[i] =="genuegend" & dat[i,"Beschaffenheit_links"] == "kunstlich") M4L[i] <- 3.0
if(RAUMBED[i] =="ungenuegend" & dat[i,"Beschaffenheit_links"] == "gewaessergerecht") M4L[i] <- 2.0
if(RAUMBED[i] =="ungenuegend" & dat[i,"Beschaffenheit_links"] == "gewaesserfremd") M4L[i] <- 3.0
if(RAUMBED[i] =="ungenuegend" & dat[i,"Beschaffenheit_links"] == "kunstlich") M4L[i] <- 3.0
}
RAUMBED <- raumbedarfR(dat[,c("Breite", "Uferbreite_rechts", "Wasserspiegel")])
M4R <- rep(NA, nrow(dat))
for(i in 1: length(M4R)) {
if(RAUMBED[i] =="genuegend" & dat[i,"Beschaffenheit_rechts"] == "gewaessergerecht") M4R[i] <- 0
if(RAUMBED[i] =="genuegend" & dat[i,"Beschaffenheit_rechts"] == "gewaesserfremd") M4R[i] <- 1.5
if(RAUMBED[i] =="genuegend" & dat[i,"Beschaffenheit_rechts"] == "kunstlich") M4R[i] <- 3.0
if(RAUMBED[i] =="ungenuegend" & dat[i,"Beschaffenheit_rechts"] == "gewaessergerecht") M4R[i] <- 2.0
if(RAUMBED[i] =="ungenuegend" & dat[i,"Beschaffenheit_rechts"] == "gewaesserfremd") M4R[i] <- 3.0
if(RAUMBED[i] =="ungenuegend" & dat[i,"Beschaffenheit_rechts"] == "kunstlich") M4R[i] <- 3.0
}
#------------------------------------------------------------------------------------------------------------
# Berchnung MSK
#------------------------------------------------------------------------------------------------------------
MSL <- M1 + M2 + M3L + M4L
MSR <- M1 + M2 + M3R + M4R
MS <- (MSL+MSR)/2
MS
|
a89498541fde61012d6c0863734a92e03a476c74 | 99b4cbb15706f5cb9ab59ae8ede634060d905899 | /ui.R | 322630ba331132b21b6e897d66c3bbed77983a55 | [] | no_license | leeed1998/Developing-Data-Products | 54e77ed18be0b25e573cb0d3ab59e75d57c319fb | 543ae48a503717d29c21e4a8d1d581b8ea5145a5 | refs/heads/master | 2021-01-25T10:14:53.390973 | 2015-09-27T03:20:10 | 2015-09-27T03:20:10 | 34,539,648 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 957 | r | ui.R |
# Install the shiny package if not already installed
# install.packages("shiny")
#library(shiny) # load the shiny package
# Define UI for application
shinyUI(fluidPage(
# Header or title Panel
titlePanel(h4('A Histogram with of KPI for WJ', align = "center")),
# Sidebar panel
sidebarPanel(
selectInput("var", label = "1. Select the KPI to Check",
choices = c("Ave_Freq" = 7, "Ave_Spend" = 6, "Ave_ticket" = 8, "People"=3),
selected = 3),
sliderInput("Months", "2. Select the number of histogram Months by using the slider below", min=1, max=12, value=1),
radioButtons("color", label = "3. Select the color of histogram",
choices = c("Green", "Red",
"Yellow"), selected = "Green")
),
# Main Panel
mainPanel(
textOutput("text1"),
textOutput("text2"),
textOutput("text3"),
plotOutput("myhist")
)
)
) |
0b1c06881411cbe0c11e2e8b5f3383e40305b904 | b04cc9ec33f2d70aac365fd3a839950e2f20be90 | /cachematrix.R | a130fb471f0e88bd4b506af22263ca8683c65ffa | [] | no_license | jhbrewster/ProgrammingAssignment2 | 9ce2ae13da52ab7bec8f46355c54fdb3e38c5082 | ed9541ca0127578940d9a81ef354821bc48a3e56 | refs/heads/master | 2021-01-21T08:32:50.750335 | 2014-09-14T10:34:54 | 2014-09-14T10:34:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,553 | r | cachematrix.R | ## Thses functions create a CacheMatrix - a custom datatype (which is based
## on a list), which allows the matrix inverse to be stored in a cache to
## avoid recalculation. If the matrix data is changed, the cache is cleared
## to prevent an inverse being stored which is based on old data
## This function takes a matrix as an argument, and returns a CacheMatrix
## (in the form of a list).
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL # inverse is initially NULL as it hasn't yet been calculated
set <- function(y) {
x <<- y
inv <<- NULL # clear any cached inverse from previous matrix
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function takes a CacheMatrix as an argument. If there already
## and inverse stored in the cache, then that is returned to avoid recalculation.
## If the current cache is NULL, then the matrix inverse is calculated, stored in
## the cache for future reuse, and returned.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv) # cached inverse exits, so can return without calculating
}
# no inverse in cache, so calculate it, store, and return
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
40b7f11d538935d0e316e4bbcb130b1e8decfdb1 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/TSDT/examples/unpack_args.Rd.R | 5c14d3c52be38cb6b3797d9ef63904ca9e83e4c3 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 422 | r | unpack_args.Rd.R | library(TSDT)
### Name: unpack_args
### Title: unpack_args
### Aliases: unpack_args
### ** Examples
## Create a list of named elements
arglist <- list( one = 1, two = 2, color = "blue" )
## The variables one, two, and color do not exist in the current environment
ls()
## Unpack the elements in arglist
unpack_args( arglist )
## Now the variables one, two, and color do exist in the current environment
ls()
one
|
11e5e131a7bf2c5c08310c144a6811804e15d8b2 | b53a8de0aa574d0bff6b0fea9699efa73fa0c763 | /test.r | f81a43dae74771e0417c789faff93456943a4003 | [] | no_license | mwschultz/RApp | a320cc946802128732b652f9f54b77795bfd0766 | db98a651dc194537ad2b41624b83f66241051b9f | refs/heads/master | 2021-01-15T11:40:00.457272 | 2017-08-08T22:02:28 | 2017-08-08T22:02:28 | 99,633,352 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,122 | r | test.r | require(tidyverse)
require(haven)
require(dplyr)
require(Lahman)
require(magrittr)
Batting %>% select(X2B:HR)
c<-Batting %>% transmute(ExtraBaseHits = X2B + X3B + HR)
#mutate -- add newly created column to data frame
#transmute -- create new variable
Batting %>% summarise(AvgX2B = mean(X2B, na.rm=TRUE))
#NA is missing
Batting %>% group_by (teamID)
Batting <- tbl_df(Batting)
Batting
Batting %>% group_by(teamID) #Once group by something, the tag sticks
#with the data set until you ungroup it.
summarise(AvgX2B = mean(X2B, na.rm = TRUE))
# create two simple data frames
# underscore functions are from the tidyverse package
a <- data_frame(color = c("green", "yellow", "red"), num = 1:3)
b <- data_frame(color = c("green", "yellow", "pink"), size = c("S", "M", "L"))
a
b
inner_join(a, b)
inner_join(a, b)
full_join(a, b)
left_join(a, b)
right_join(a, b)
left_join(b, a)
semi_join(a, b) #joining between a and b, and filtering a where there is a match
anti_join(a, b) #joining between a and b, and filthering where there is NOT a match
b <- b %>% rename(col = color)
a
b
inner_join(a, b, by = c("color" = "col"))
titanicData <- read_csv("https://raw.githubusercontent.com/jbpost2/DataScienceR/master/datasets/titanic.csv")
titanicData
table(titanicData$embarked)
table(titanicData$survived)
table(titanicData$sex)
help(table)
table(titanicData$survived, titanicData$sex)
tab <- table(titanicData$survived, titanicData$embarked, titanicData$sex)
tab
tab[1, ,]
tabl[]
require(ggplot2)
#filled bar plot
g <- ggplot(data = titanicData %>% drop_na(embarked),
aes(x = as.factor(embarked)))
g + geom_bar(aes(fill = as.factor(survived)))
g <- ggplot(data = titanicData %>% drop_na(embarked),
aes(x = as.factor(embarked)))
g + geom_bar(aes(fill = as.factor(survived))) +
labs(x = "City Embarked",
title = "Bar Plot of Embarked City for Titanic Passengers") +
scale_x_discrete(labels = c("Cherbourg", "Queenstown", "Southampton")) +
scale_fill_discrete(name = "Surived", labels = c("No","Yes"))
####
CO2 <- tbl_df(CO2)
CO2
mean(CO2$uptake, trim = 0.05)
median(CO2$uptake)
summary(CO2$uptake)
quantile(CO2$uptake, probs = c(0.1,0.2))
stats <- c(summary(CO2$uptake), var(CO2$uptake),
sd(CO2$uptake), quantile(CO2$uptake, probs = c(0.1, 0.2)))
stats
str(stats)
attributes(stats)
names(stats)[7:10] <-c("Var", "SD", "10thP", "20thP")
CO2 %>% group_by(Treatment) %>% summarise(avg=mean(uptake))
CO2 %>% group_by(Treatment) %>% summarise(median=median(uptake))
CO2 %>% group_by(Treatment, Type) %>% summarise(avg=mean(uptake))
g <- ggplot(CO2, aes(x= uptake)) + geom_dotplot()
g
g <- ggplot(CO2, aes(x= uptake)) + geom_dotplot(aes(color=Treatment))
g<-ggplot(CO2, aes(x=uptake)) + geom_histogram(color="blue", fill="red", linetype = "dashed")
g
g <- ggplot(CO2, aes(x = uptake))+
geom_histogram(aes(y = ..density.., fill = Treatment))+
geom_density(adjust = 0.25, alpha = 0.5, aes(fill = Treatment))
g <- ggplot(CO2, aes(x=uptake, color=Treatment)) + stat_ecdf(geom="step")
g
scoresFull <- read_csv("https://raw.githubusercontent.com/jbpost2/DataScienceR/master/datasets/scoresFull.csv")
scoresFull
g <- ggplot(scoresFull, aes(x = homeRushYds, y = HFinal)) + geom_point() + geom_smooth() +
geom_smooth(method = lm, col = "Red") #linear regression line
g
g <- ggplot(scoresFull, aes(x = homeRushYds, y = HFinal)) +
geom_point()
g
g <- ggplot(scoresFull, aes(x = homeRushYds, y = HFinal)) +
geom_point() +
geom_smooth() +
geom_smooth(method = lm, col = "Red")
paste("Hi", "What", "Is", "Going", "On", "?", sep = " ")
paste("Hi", "What", "Is", "Going", "On", "?", sep = ".")
g <- ggplot(scoresFull, aes(x = homeRushYds,y = HFinal)) +
geom_point() +
geom_smooth() +
geom_smooth(method = lm, col = "Red") +
geom_text(x = 315, y = 10, size = 5, label = paste0("Correlation = ", round(correlation, 2)))
g
g <- ggplot(scoresFull, aes(x = homeRushYds, y = HFinal)) +
geom_point()+
facet_grid(roof ~ surface)
g
g <- ggplot(scoresFull, aes(x = homeRushYds,y = HFinal)) +
geom_point(aes(col = homeSpread), alpha = 0.3, size = 0.5) +
facet_grid(roof ~ surface)
g
pairs(select(scoresFull, Hturnovers, homeRushYds,
homePassYds, HFinal), cex = 0.3)
Correlation <- cor(select(scoresFull, Hturnovers, homeRushYds,
homePassYds, HFinal), method = "spearman")
require(corrplot)
corrplot(Correlation, type = "upper",
title = "Figure 2: Correlation matrix of variables.",
tl.pos = "lt")
corrplot(Correlation, type = "lower", method = "number",
add = TRUE, diag = FALSE, tl.pos = "n")
g <- ggplot(scoresFull, aes(x = surface, y = homePassYds)) +
geom_boxplot(fill = "grey")
g
g <- ggplot(scoresFull, aes(x = surface, y = homePassYds)) +
geom_boxplot(fill = "grey") +
geom_jitter(aes(col = roof), alpha = 0.3, size = 0.3) +
stat_summary(fun.y = mean, geom = "line",
lwd = 1.5, aes(group = roof, col = roof))
g
g <- ggplot(scoresFull, aes(x = surface, y = homePassYds))+
geom_violin(fill = "blue") + geom_boxplot(fill="grey", alpha = 0.3)
g
oneDate<-paste(scoresFull$date[1], scoresFull$season[1], sep = "-")
oneDate
library(lubridate)
as.Date(oneDate, "%d-%b-%Y")
as.Date(oneDate, "%d-%b-%Y") + 1
scoresFull$date <- paste(scoresFull$date, scoresFull$season, sep = "-") %>%
as.Date("%d-%b-%Y")
subScores <- scoresFull %>%
filter(homeTeam %in% c("Pittsburgh Steelers", "Cleveland Browns",
"Baltimore Ravens", "Cincinnati Bengals")) %>%
group_by(season, homeTeam) %>%
summarise(homeAvgYds = mean(homePassYds + homeRushYds))
subScores
g <- ggplot(subScores, aes(x = season, y = homeAvgYds, color = homeTeam)) +
geom_line(lwd = 2)
g
install.packages("plot3Drgl")
library(plot3Drgl)
scatter3D(x = scoresFull$homeRushYds, y = scoresFull$awayRushYds,
z = scoresFull$HFinal)
plotrgl()
voting <- read.csv("https://raw.githubusercontent.com/jbpost2/DataScienceR/master/datasets/counties.csv", header = TRUE)
voting
votePlot <- ggplot(voting, aes(x = college, y = income))
votePlot +
geom_point()+
geom_text(x = 40, y = 15000, label = round(cor(voting$college, voting$income), 2))
votePlot
lm(income ~ college, data = voting)
fit <- lm(income ~ college, data = voting)
attributes(fit)
anova(fit)
summary(fit)
plot(fit)
predict(fit, newdata = data.frame(college = c(40, 10)))
predict(fit, newdata = data.frame(college = c(40, 10)), se.fit = TRUE)
predict(fit, newdata = data.frame(college = c(40, 10)),
se.fit = TRUE, interval = "confidence")
predict(fit, newdata = data.frame(college = c(40, 10)),
se.fit = TRUE, interval = "prediction")
votePlot +
geom_point(aes(col = region)) +
geom_smooth(method = "lm", aes(col = region))
fits <- voting %>% group_by(region) %>%
do(model = lm(income ~ college, data = .))
names(fits)
fit2<-lm(income ~ college + Perot, data = voting)
anova(fit2)
summary(fit2)
coef(fit2)
fit2$rank
plot(fit2)
predict(fit2, newdata = data.frame(college = 40, Perot = 20))
|
da1afb8ecfe4444c74882f7ae840d209515393f0 | 6ccbeb28582657306ee2a5500ec4396bafad06e4 | /Generation25/NovelPopulationMaleFitnessAssay/DeltaNovelPopMaleFitnessAssayG25.R | 3adcf1893942c4d68c2adb078db89a1e08d6fcfc | [] | no_license | KKLund-Hansen/SexChromCoAdapt | ac890836f047e1363459db2d4bd0b2b2071721b1 | 34712cefaa8b257b2c279969a377b4bbf7174d21 | refs/heads/master | 2021-07-20T10:26:06.996833 | 2021-02-06T19:47:25 | 2021-02-06T19:47:25 | 242,177,677 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,270 | r | DeltaNovelPopMaleFitnessAssayG25.R | ################################################################################################
###################### ΔNOVEL POPULATION MALE FITNESS ASSAY GENERATION 25 ######################
################################################################################################
#Set up environment
library(Hmisc)
#Read in csv file with data
DeltaNPFAg25.data <- read.table(file = "NovelPopMaleFitnessAssayG25.csv", h = T, sep = ",")
########################################## STATISTIC #########################################
### CALCULATE RELATIVE FITNESS ###
#Calculate the proportion of red eyed offspring
DeltaNPFAg25.data$prop_red <- DeltaNPFAg25.data$red / DeltaNPFAg25.data$total
#Now divid each proportion red by 5 to get the number for each male
DeltaNPFAg25.data$prop_red_male <- DeltaNPFAg25.data$prop_red / 5
#Find maxium
maxDeltaNPFAg25 <- max(DeltaNPFAg25.data$prop_red_male, na.rm = T)
#Calculate relative fitness by dividing each proportion by the maximum
DeltaNPFAg25.data$relative_fit <- DeltaNPFAg25.data$prop_red_male / maxDeltaNPFAg25
### ΔFITNESS CALCULATIONS ###
#To test if the change of sex chromosomes is significantly from the wild type population,
#we do a bootstrap model to generate CI. If theses don't overlap 0 there has been a significant change
Delta.NPFAg25 <- as.numeric(tapply(DeltaNPFAg25.data$relative_fit, DeltaNPFAg25.data$population, mean, na.rm = T))
#View Delta.NPFAg25
Delta.NPFAg25
#Δfitness Inn-Lx - Innisfail
Delta.NPFAg25[3] - Delta.NPFAg25[2]
# 0.07378461
#Δfitness Inn-Ly - Innisfail
Delta.NPFAg25[4] - Delta.NPFAg25[2]
# 0.08668395
#Δfitness Inn-Ox - Innisfail
Delta.NPFAg25[5] - Delta.NPFAg25[2]
# 0.1420969
#Δfitness Inn-Oy - Innisfail
Delta.NPFAg25[6] - Delta.NPFAg25[2]
# 0.100319
#Δfitness Odd-Ix - Odder
Delta.NPFAg25[8] - Delta.NPFAg25[7]
# -0.04548151
#Δfitness Odd-Iy - Odder
Delta.NPFAg25[9] - Delta.NPFAg25[7]
# 0.01631576
#Δfitness Odd-Dx - Odder
Delta.NPFAg25[10] - Delta.NPFAg25[7]
# 0.03387821
#Δfitness Odd-Dy - Odder
Delta.NPFAg25[11] - Delta.NPFAg25[7]
# 0.03722847
#TEST OF PROBABILITY OF SUCCESS. THE PROBABILITY OF POSITIVE VALUE
binom.test(7, 8, p = 0.5, alternative = "two.sided")
#Not significant, P = 0.07031
#First we make new vector to collect the data
LxI <- numeric(10000)
LyI <- numeric(10000)
OxI <- numeric(10000)
OyI <- numeric(10000)
IxO <- numeric(10000)
IyO <- numeric(10000)
DxO <- numeric(10000)
DyO <- numeric(10000)
#Then we set up a bootstrap that resampels the data from 12 data points to calculate a new mean everytime for 10000 times
for (i in 1:10000){
DATA <- do.call(rbind, lapply(split(DeltaNPFAg25.data, DeltaNPFAg25.data$population), function(x) x[sample(12, replace = T),]))
Delta.NPFAg25 <- as.numeric(tapply(DATA$relative_fit, DATA$population, mean, na.rm = T))
LxI[i] <- Delta.NPFAg25[3] - Delta.NPFAg25[2]
LyI[i] <- Delta.NPFAg25[4] - Delta.NPFAg25[2]
OxI[i] <- Delta.NPFAg25[5] - Delta.NPFAg25[2]
OyI[i] <- Delta.NPFAg25[6] - Delta.NPFAg25[2]
IxO[i] <- Delta.NPFAg25[8] - Delta.NPFAg25[7]
IyO[i] <- Delta.NPFAg25[9] - Delta.NPFAg25[7]
DxO[i] <- Delta.NPFAg25[10] - Delta.NPFAg25[7]
DyO[i] <- Delta.NPFAg25[11] - Delta.NPFAg25[7] }
#Run the calculation
#95% CI for LxI
mean(LxI)
# 0.07305206
mean(LxI) - (1.96 * sd(LxI))
# -0.04072608
mean(LxI) + (1.96 * sd(LxI))
# 0.1868302
#95% CI for LyI
mean(LyI)
# 0.08624951
mean(LyI) - (1.96 * sd(LyI))
# -0.05693295
mean(LyI) + (1.96 * sd(LyI))
# 0.229432
#95% CI for OxI
mean(OxI)
# 0.1419263
mean(OxI) - (1.96 * sd(OxI))
# 0.01872792
mean(OxI) + (1.96 * sd(OxI))
# 0.2651247
#95% CI for OyI
mean(OyI)
# 0.1004432
mean(OyI) - (1.96 * sd(OyI))
# -0.02852504
mean(OyI) + (1.96 * sd(OyI))
# 0.2294115
#95% CI for IxO
mean(IxO)
# -0.0447177
mean(IxO) - (1.96 * sd(IxO))
# -0.1829805
mean(IxO) + (1.96 * sd(IxO))
# 0.09354508
#95% CI for IyO
mean(IyO)
# 0.01626032
mean(IyO) - (1.96 * sd(IyO))
# -0.1290477
mean(IyO) + (1.96 * sd(IyO))
# 0.1615684
#95% CI for DxO
mean(DxO)
# 0.03317775
mean(DxO) - (1.96 * sd(DxO))
# -0.08780523
mean(DxO) + (1.96 * sd(DxO))
# 0.1541607
#95% CI for DyO
mean(DyO)
# 0.03630816
mean(DyO) - (1.96 * sd(DyO))
# -0.09237264
mean(DyO) + (1.96 * sd(DyO))
# 0.164989
######################################### PLOT DATA #########################################
#To be able to plot the bootstrap data as boxplots I create a new data frame with the data
#First I make a vector with the populations
population <- c(rep("aLHmX_Inn", 10000), rep("bLHmY_Inn", 10000), rep("cOddX_Inn", 10000), rep("dOddY_Inn", 10000),
rep("eInnX_Odd", 10000), rep("fInnY_Odd", 10000), rep("gDahX_Odd", 10000), rep("hDahY_Odd", 10000))
#Then I collcet all the bootstrap data in a new vector
deltafitness <- c(LxI, LyI, OxI, OyI, IxO, IyO, DxO, DyO)
#Then it's all collceted in a new data frame
DFg25 <- data.frame(population, deltafitness)
#And write it into a new file
write.csv(EvolDF, file = "DeltaNovelPopMaleFitnessAssayG25.csv")
#Read in csv file with data
DNPFAg25.data<- read.table(file = "DeltaNovelPopMaleFitnessAssayG25.csv", h = T, sep = ",")
#MEAN
meanDNPFAg25 <- tapply(DNPFAg25.data$deltafitness, DNPFAg25.data$population, mean)
#SD
sdDNPFAg25 <- tapply(DNPFAg25.data$deltafitness, DNPFAg25.data$population, sd)
#Plot
par(mar = c(6, 5, 2, 2))
#Plot errorbars
xDNPFAg25 <- c(0.5,1,1.5,2, 3,3.5,4,4.5)
errbar(xDNPFAg25, meanDNPFAg25, meanDNPFAg25 + (1.96 * sdDNPFAg25), meanDNPFAg25 - (1.96 * sdDNPFAg25),
xlim = c(0.3, 4.7), xlab = "", xaxt = "n", ylim = c(-0.2, 0.3), ylab = expression(Delta~"Fitness"),
cex.axis = 1.2, cex.lab = 1.5, las = 1, pch = c(17, 18), cex = c(3, 3.5), lwd = 3)
#AXIS
axis(1, at = c(0.5,1,1.5,2, 3,3.5,4,4.5), cex.axis = 1.2,
labels = c(expression("L"["X"]), expression("L"["Y"]), expression("O"["X"]), expression("O"["Y"]),
expression("I"["X"]), expression("I"["Y"]), expression("D"["X"]), expression("D"["Y"])))
#Add line at 0
abline(h = 0, lty = 2, lwd = 2)
#And add text below
mtext(expression(italic("Innisfail")), side = 1, line = 3, at = 1.25, cex = 1.5)
mtext(expression(italic("Odder")), side = 1, line = 3, at = 3.75, cex = 1.5)
#Now add arrows to show significance
points(1.5, 0.3, pch = "*", bg = "black", cex = 1.5)
|
e9057317284b134a8ed1d5332acbc814cb32de9b | 6dde5e79e31f29db901c81e4286fea4fa6adbc48 | /man/plotscores.Rd | e8c8d397f1d8d90d4f82a21abafcd49990deb31f | [] | no_license | cran/fda | 21b10e67f4edd97731a37848d103ccc0ef015f5a | 68dfa29e2575fb45f84eb34497bb0e2bb795540f | refs/heads/master | 2023-06-08T07:08:07.321404 | 2023-05-23T22:32:07 | 2023-05-23T22:32:07 | 17,696,014 | 23 | 19 | null | 2022-03-13T17:58:28 | 2014-03-13T04:40:29 | R | UTF-8 | R | false | false | 1,062 | rd | plotscores.Rd | \name{plotscores}
\alias{plotscores}
\title{
Plot Principal Component Scores
}
\description{
The coefficients multiplying the harmonics or principal component functions
are plotted as points.
}
\usage{
plotscores(pcafd, scores=c(1, 2), xlab=NULL, ylab=NULL,
loc=1, matplt2=FALSE, ...)
}
\arguments{
\item{pcafd}{
an object of the "pca.fd" class that is output by function
\code{pca.fd}.
}
\item{scores}{
the indices of the harmonics for which coefficients are
plotted.
}
\item{xlab}{
a label for the horizontal axis.
}
\item{ylab}{
a label for the vertical axis.
}
\item{loc}{
an integer:
if loc >0, you can then click on the plot in loc places and you'll get
plots of the functions with these values of the principal component
coefficients.
}
\item{matplt2}{
a logical value:
if \code{TRUE}, the curves are plotted on the same plot;
otherwise, they are plotted separately.
}
\item{\dots }{
additional plotting arguments used in function \code{plot}.
}
}
\section{Side Effects}{
a plot of scores
}
\seealso{
\code{\link{pca.fd}}
}
\keyword{smooth}
|
d6117cbe013ed49cb9c51ff7f8a5ea60e1858773 | a2ea354ac7e7bc199b68a3e5a6f8238db479a1b2 | /R/staticdocs.R | 7da02cef651f4a31cad5f734570582b2c43f6b80 | [
"MIT"
] | permissive | robertzk/rocco | 84c641f490f7d296bf39affef248a36d1e68c761 | d6c6969567c023536358b3ebb044abae7ef15a0e | refs/heads/master | 2020-06-03T07:04:58.543644 | 2018-08-04T17:59:17 | 2018-08-04T17:59:17 | 33,039,832 | 2 | 2 | null | 2017-05-19T18:15:53 | 2015-03-28T16:05:48 | CSS | UTF-8 | R | false | false | 4,362 | r | staticdocs.R | #' Writes pkgdown if they don't already exist.
#' @param package_dir character. The directory of the package to write pkgdown for.
write_pkgdown <- function(package_dir) {
check_for_pkgdown_package()
devtools::document(package_dir)
if (!inst_exists(package_dir)) { create_inst(package_dir) }
if (!pkgdown_index_exists(package_dir)) { create_pkgdown_index(package_dir) }
if (!pkgdown_folder_exists(package_dir)) { create_pkgdown_folder(package_dir) }
pkgdown::build_site(package_dir)
}
#' Add pkgdown into the Rocco directory.
#'
#' Since Rocco and Pkgdown conflict for gh-pages and we often want both,
#' this will resolve the tension and create one harmonious site with rocco
#' docs located at index.html and pkgdown located at pkgdown/index.html.
#'
#' @param directory character. The directory Rocco is running in.
#' @param output character. The directory to create the skeleton in.
load_pkgdown <- function(directory, output) {
create_pkgdown_directory <- function(dir) {
unlink(dir, recursive = TRUE, force = TRUE)
dir.create(dir, showWarnings = FALSE)
}
create_pkgdown_folder_tree <- function(dir, subdirs) {
subdirs <- lapply(subdirs, function(subdir) file.path(dir, subdir))
unlink(subdirs, recursive = TRUE, force = TRUE)
lapply(subdirs, dir.create, showWarnings = FALSE)
}
determine_dir <- function(dir, file) {
dir_split <- strsplit(file, "/")[[1]]
if (length(dir_split) > 1) {
file.path(dir, dir_split[[1]])
} else { dir }
}
create_pkgdown_files <- function(files, source_dir, destination) {
from_files <- lapply(files, function(file) file.path(source_dir, file))
destination <- file.path(destination, "pkgdown")
to_dirs <- Map(determine_dir, rep(destination, length(files)), files)
Map(file.copy, from_files, to_dirs, overwrite = TRUE)
}
pkgdown_dir <- file.path(output, "pkgdown")
create_pkgdown_directory(pkgdown_dir)
web_dir <- file.path(directory, "inst", "web")
pkgdown_subdirs <- grep(".html", dir(web_dir), value = TRUE,
fixed = FALSE, invert = TRUE)
create_pkgdown_folder_tree(pkgdown_dir, pkgdown_subdirs)
pkgdown_files <- dir(web_dir, recursive = TRUE)
create_pkgdown_files(pkgdown_files, source_dir = web_dir, destination = output)
}
#' Check to see if a directory exists within the package.
#' @param directory character. The directory of the package to check for pkgdown.
#' @param ... list. The folder structure to pass to \code{file.path}.
dir_exists <- function(directory, ...) {
file.exists(file.path(directory, ...))
}
#' Create a directory if it doesn't exist.
#' @inheritParams dir_exists
dir_create <- function(directory, ...) {
dir.create(file.path(directory, ...), showWarnings = FALSE)
}
#' Check whether the inst folder exists.
#' @inheritParams dir_exists
inst_exists <- function(directory) { dir_exists(directory, "inst") }
#' Create the inst directory.
#' @inheritParams dir_exists
create_inst <- function(directory) { dir_create(directory, "inst") }
#' Check whether the pkgdown folder exists.
#' @inheritParams dir_exists
pkgdown_folder_exists <- function(directory) {
dir_exists(directory, "inst", "pkgdown")
}
#' Create the pkgdown directory.
#' @inheritParams dir_exists
create_pkgdown_folder <- function(directory) {
dir_create(directory, "inst", "pkgdown")
}
#' Check whether a pkgdown index file exists.
#' @inheritParams dir_exists
pkgdown_index_exists <- function(directory) {
pkgdown_folder_exists(directory) &&
dir_exists(directory, "inst", "pkgdown", "index.r")
}
#' Create the pkgdown index.
#' @inheritParams dir_exists
create_pkgdown_index <- function(directory) {
dir_create(directory, "inst", "pkgdown", "index.r")
}
#' Check whether pkgdown files have been written.
#' @inheritParams dir_exists
pkgdown_written <- function(directory) {
dir_exists(directory, "inst", "web", "index.html")
}
#' Check whether pkgdown exist.
#' @inheritParams dir_exists
pkgdown_exist <- function(directory) {
pkgdown_index_exists(directory) && pkgdown_written(directory)
}
#' Checks that the pkgdown package is installed.
check_for_pkgdown_package <- function() {
if (!(is.element("pkgdown", utils::installed.packages()[, 1]))) {
stop("You must install the pkgdown package to run pkgdown. ",
"You can get it from https://github.com/hadley/pkgdown.", call. = FALSE)
}
}
|
581d5982750c0c4c6fc70a1b8dd656757dcec641 | fd76aba67a052198f7ee53b3c17d5838ed316a60 | /Week 2/Programming Assignment 2/complete.R | 80258afc199340a44caf5796bc3259f98741312c | [] | no_license | romilhc/Coursera-R-Programming | 08dd9f149317400e7b7739578e63ff80ad6c1eb4 | e27386989edbc5b00d0a8aad6b524e781da29753 | refs/heads/master | 2021-04-28T22:23:54.426343 | 2017-01-13T06:04:48 | 2017-01-13T06:04:48 | 77,752,125 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 758 | r | complete.R | complete <- function(directory, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor ID number and 'nobs' is the
## number of complete cases
#Get files from working directory
allFile <- list.files(path = directory, full.names = TRUE)
#Initialize Data Frame
fileData<-data.frame()
compCase<-data.frame()
for(i in id){
#Compute no. of complete cases
fileData<-read.csv(allFile[i])
nobs<-sum(complete.cases(fileData))
compCase<-rbind(compCase,data.frame(i,nobs))
}
compCase
} |
ebbc5f204d8927707e8f023ce406a05adeb1a69e | 1db0e3ae5165dbcd040320a3ba058528139ffbdc | /man/r2julia_sort.Rd | f0064fd52a996cc02785f40f12bb1b50dc747efa | [] | no_license | hemberg-lab/sctransfermap | f04e683872472602557837e3047ec466ddd680aa | 8d182123127b827ca7f0072aea5ac3eade0cbd16 | refs/heads/master | 2020-08-30T05:17:55.289158 | 2019-11-08T09:37:52 | 2019-11-08T09:37:52 | 218,274,620 | 0 | 3 | null | 2019-11-08T09:37:54 | 2019-10-29T11:48:30 | R | UTF-8 | R | false | true | 619 | rd | r2julia_sort.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CoreMethods.R
\name{r2julia_sort}
\alias{r2julia_sort}
\title{r2julia_sort}
\usage{
r2julia_sort(r_list, julia_list, Index = TRUE)
}
\arguments{
\item{r_list}{list that want to change}
\item{julia_list}{target list}
\item{Index}{TRUE if the result should be an index for r_list. FALSE if the result should be the changed list or r_list}
}
\value{
index or changed list of r_list
}
\description{
make a index for a given list order
}
\examples{
r_list
new_list
new_index <- r2julia_sort(r_list, new_list)
r_list_new <- r_list[new_index]
}
|
f2c88f27486e650899d42b42997a09e40a1a2ee1 | 8c6d6fb800aea7364355a1047bfecbb06877bcde | /01-read_galp.R | 95aa1b0a31ecf6b4e9b06edcc33bb9fd827ef8ac | [] | no_license | TerminatorJ/delfi_scripts | 2bdd07e04b4b120b5ffe2fcb9974f34a13f99a99 | 51b7ce1c85f6e68363370ee08e713ab0553f7f82 | refs/heads/master | 2022-11-26T17:03:39.559535 | 2020-08-06T02:32:39 | 2020-08-06T02:32:39 | 281,132,281 | 1 | 0 | null | 2020-07-20T14:01:28 | 2020-07-20T14:01:27 | null | UTF-8 | R | false | false | 2,233 | r | 01-read_galp.R | library(GenomicAlignments)
library(GenomicRanges)
library(getopt)
### Used for getting information from shell script
args <- commandArgs(trailingOnly = TRUE)##仅仅返回--之后的字符串:[1] "--a" "file1" "--b" "file2" "--c" "file3"
hh <- paste(unlist(args), collapse = " ")#将参数全部使用空格隔开形成一个大字符串:[1] "--a file1 --b file2 --c file3"
listoptions <- unlist(strsplit(hh, "--"))[-1]##去掉--:[1] "a file1 " "b file2 " "c file3"
#得到每一个参数传入的变量
#a file1 b file2 c file3
# "file1" "file2" "file3"
options.args <- sapply(listoptions, function(x) {
unlist(strsplit(x, " "))[-1]
})
##得到每一个参数名称
#a file1 b file2 c file3
# "a" "b" "c"
options.names <- sapply(listoptions, function(x) {
option <- unlist(strsplit(x, " "))[1]
})
#得到每一个传入的参数以及其对应的名称
# a b c
#"file1" "file2" "file3"
names(options.args) <- unlist(options.names)
id <- options.args[1]
bamdir <- options.args[2]
galpdir <- options.args[3]
###
### Read GAlignmentPairs
bamfile <- file.path(bamdir, id)##得到路径类型的字符串a/b$
indexed.bam <- gsub("$", ".bai", bamfile)#给文件添加末尾添加.bai,$表示字符串的末尾,结果应该是bam.bai
if (!file.exists(indexed.bam)) {
indexBam(bamfile)#对bam文件快速构建索引,产生bai文件,这之后,所有的bam文件都被加上索引
}
#ScanBamParam可以用来进行Bam文件的过滤,scanBamFlag可以用来指定特定的筛选条件
param <- ScanBamParam(flag = scanBamFlag(isDuplicate = FALSE,#去重
isSecondaryAlignment = FALSE,#去除多比对序列
isUnmappedQuery = FALSE),#去除没有比对上的序列
mapqFilter = 30)#不符合Q30的序列被去除掉
sample <- gsub(".bam", "", id)#id指的是每一个样本比对的文件,可以看出原来的命名为sample.bam
galp.file <- file.path(galpdir, paste0(sample, ".rds"))#设定保存文件的文件名称和路径
galp <- readGAlignmentPairs(bamfile, param = param)#将过滤后的文件进行读取
saveRDS(galp, galp.file)#存储过滤后的文件
|
e4b729f58240395e913a1844e090d3068aa91fb0 | 1a86508e2ea2c316d0fc77f9afd882627409e1f0 | /man/as_workflow_set.Rd | ae9189790b8a30cce97c156c940d639117056a27 | [
"MIT"
] | permissive | jonthegeek/workflowsets | d44faa0f53391d5c6f550044b2b21163e49a90ca | f910891f0f508cf654248d44403df47f981b41d2 | refs/heads/main | 2023-05-28T22:29:26.001283 | 2021-04-21T14:05:40 | 2021-04-21T14:05:40 | 359,985,659 | 0 | 0 | NOASSERTION | 2021-04-21T00:21:26 | 2021-04-21T00:21:25 | null | UTF-8 | R | false | true | 933 | rd | as_workflow_set.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as_workflow_set.R
\name{as_workflow_set}
\alias{as_workflow_set}
\title{Save results from tuning or resampling functions as a workflow set}
\usage{
as_workflow_set(...)
}
\arguments{
\item{...}{One or more named objects. Names should be unique and the
objects should have at least one of the following classes:
\code{iteration_results}, \code{tune_results}, \code{resample_results}, or \code{tune_race}. Each
element should also contain the original workflow (accomplished using the
\code{save_workflow} option in the control function).}
}
\value{
A workflow set. Note that the \code{option} column will not reflect the
options that were used to create each object.
}
\description{
If results have been generated directly from functions like
\code{\link[tune:tune_grid]{tune::tune_grid()}}, they can be combined into a workflow set using this
function.
}
|
4a9cecc1371832837b9d7b7b04bb59c2668fa2f0 | e81dd06f9bc38ce2016de3c11312b5366b89cd53 | /R/load.R | a285aa7ccb9ef824f9d7bf13a172f609b43ec2b2 | [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | zergmk2/tutoring2 | 8d4991f2dfcb73e29decdbbd8b3834ae5ac44e14 | b47adf99942cfed27e0203095a593a02c64c2a11 | refs/heads/master | 2023-02-21T18:41:43.165221 | 2021-01-27T07:16:05 | 2021-01-27T07:16:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 349 | r | load.R | library(tidyverse)
library(gcookbook)
library(latex2exp)
library(add2ggplot)
library(scales)
library(add2ggplot)
library(data.table)
library(glue)
library(data.table)
library(tidyverse)
library(glue)
library(fs)
library(patchwork)
library(wrapr)
library(tidyverse)
library(ggforce)
library(rgdal)
library(readxl)
library(projmgr)
library(lubridate)
|
a7c62abb0bd949421c51d0bd1fb71eca1ee90890 | 46ca4e8ed44c2ec2c478e6522c38a7ff5f749ce5 | /bearing_snippets/training_set.R | ba21feb0208316273d5b280f8bc92e9b42f2c312 | [
"MIT"
] | permissive | robertfjones/data-science | 5904fe089db41b865474e33a06e837f88e6cbfa8 | afe136a7d2798b953a01d677fe1b30333438c72a | refs/heads/master | 2020-04-06T05:17:36.643266 | 2014-01-26T16:44:51 | 2014-01-26T16:44:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 720 | r | training_set.R | # Read in the relabelled best features
basedir <- "/Users/vic/Projects/bearings/bearing_IMS/1st_test/"
data <- read.table(file=paste0(basedir, "../all_bearings_relabelled.csv"), sep=",", header=TRUE)
# Split into train and test sets, preserving percentage across states
train.pc <- 0.7
train <- vector()
for (state in unique(data$State))
{
all.samples <- data[data$State==state,]
len <- length(all.samples[,1])
rownums <- sample(len, len*train.pc, replace=FALSE)
train <- c(train, as.integer(row.names(all.samples)[rownums]))
}
# Write to file for future use
write.table(train, file=paste0(basedir, "../train.rows.csv"), sep=",")
# Compare the balance of classes
table(data$State)
table(data[train,"State"])
|
380230b0a4a1d1e6fae43f0635b98ab05bba109b | ff9eb712be2af2fa24b28ecc75341b741d5e0b01 | /R/summaryStats.data.frame.R | e1af82c379a6811d35d445a715d116464beca319 | [] | no_license | alexkowa/EnvStats | 715c35c196832480ee304af1034ce286e40e46c2 | 166e5445d252aa77e50b2b0316f79dee6d070d14 | refs/heads/master | 2023-06-26T19:27:24.446592 | 2023-06-14T05:48:07 | 2023-06-14T05:48:07 | 140,378,542 | 21 | 6 | null | 2023-05-10T10:27:08 | 2018-07-10T04:49:22 | R | UTF-8 | R | false | false | 1,819 | r | summaryStats.data.frame.R | summaryStats.data.frame <-
function (object, ...)
{
if (all(sapply(object, is.numeric))) {
names.object <- names(object)
nc <- ncol(object)
if (nc == 1) {
arg.list <- list(object = as.vector(unlist(object)))
match.vec <- pmatch(names(list(...)), "data.name")
if (length(match.vec) == 0 || is.na(match.vec))
arg.list <- c(arg.list, list(data.name = names.object),
...)
else arg.list <- c(arg.list, ...)
do.call("summaryStats.default", arg.list)
}
else {
nr <- nrow(object)
group <- rep(names.object, each = nr)
summaryStats.default(object = as.vector(unlist(object)),
group = group, ...)
}
}
else if (all(sapply(object, is.factor))) {
list.levels <- lapply(object, levels)
list.lengths <- sapply(list.levels, length)
if (!all(list.lengths == list.lengths[1]))
stop(paste("When \"object\" is a data frame and all columns are factors,",
"all columns have to have the same levels"))
all.levels.identical <- all(sapply(list.levels, function(x) identical(x,
list.levels[[1]])))
if (!all.levels.identical)
stop(paste("When \"object\" is a data frame and all columns are factors,",
"all columns have to have the same levels"))
names.object <- names(object)
nr <- nrow(object)
object <- unlist(object)
group <- rep(names.object, each = nr)
summaryStats.factor(object = object, group = group, ...)
}
else stop(paste("When \"object\" is a data frame,", "all columns must be numeric or all columns must be factors"))
}
|
ac6ad454b8370b5159c6e0a735228df4ad7af294 | d158e0cd917560ff4b940b0a7b63416a1e532e49 | /plot2.R | ae602386634b0cc9f479367119634648becd923d | [] | no_license | tusharbhalla/ExData_Plotting2 | fb8d59c8243cd17d03be8229113aa80144c08abe | 711702e4e8a6eb8d669f7405b8c6c36f09ce3c0d | refs/heads/master | 2021-01-25T09:53:59.984296 | 2014-06-22T23:42:46 | 2014-06-22T23:42:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 509 | r | plot2.R | ## Get Data
require(data.table)
summarySCC <- readRDS("./exdata-data-NEI_data/summarySCC_PM25.rds")
summarySCC_dt <- data.table(summarySCC)
rm(summarySCC)
## Subset Data
sub <- subset(summarySCC_dt, fips == "24510")
subSummary <- sub[,sum(Emissions), by = year]
## Create Plot
png(filename = "plot2.png", width = 480, height = 480)
plot(subSummary$year, subSummary$V1, type = "b", xlab = "Year", ylab = "Total Emmisions",
main = "Trend of PM2.5 in Baltimore (fips-24510) from 1999 to 2008")
dev.off() |
af2fb46a490d3af8cf34f5688baa6006903e9e2e | 1a0675e958bd7c1bd51211f64834c6784b358efa | /P7/P7_A.r | 0d263a15974e5af39e5bc0d6048fb224c3804ce1 | [] | no_license | dagoquevedo/parallelr | f4504c0bd6ec521354b63b0cdf1d77c02ccd3059 | 0addc443b1ca62772708bfe6209b49b9a05bda89 | refs/heads/master | 2021-01-01T04:14:31.294072 | 2017-10-17T19:21:43 | 2017-10-17T19:21:43 | 97,148,352 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,085 | r | P7_A.r | #Project : Local search
#Author : Dago Quevedo
#Date : Sep 2017
suppressMessages(library(doParallel))
library(ggplot2)
library(lattice)
unlink("img/P7_A*.png")
unlink("img/P7_A*.gif")
g <- function(x, y) {
return((((x + 0.5)^4 - 30 * x^2 - 20 * x) +
((y + 0.5)^4 - 30 * y^2 - 20 * y))/100)
}
low <- -6
high <- 5
step <- 0.25
k <- 30
g_max <- 1.301250
LS <- function(time) {
curr <- runif(2, low, high)
best <- curr
for (t in 1:time) {
delta <- runif(2, 0, step)
neighbors <- rbind( c(curr[1] + delta[1], curr[2] + delta[2]),
c(curr[1] - delta[1], curr[2] + delta[2]),
c(curr[1] + delta[1], curr[2] - delta[2]),
c(curr[1] - delta[1], curr[2] - delta[2])
)
for(i in 1:length(neighbors[1,])) {
if(neighbors[i,1] >= low & neighbors[i,1] <= high &
neighbors[i,2] >= low & neighbors[i,2] <= high) {
if (g(neighbors[i,1],neighbors[i,2]) > g(curr[1],curr[2])) {
curr <- neighbors[i,]
}
}
}
if(g(curr[1],curr[2]) > g(best[1],best[2])) {
best <- curr
}
}
return(best)
}
registerDoParallel(makeCluster(detectCores() - 1))
x <- seq(low, high, length = 256)
y <- seq(low, high, length = 256)
grid <- expand.grid(x=x, y=y)
grid$z <- g(grid$x, grid$y)
for (pow in 1:5) {
tmax <- 10^pow
result <- foreach(i = 1:k, .combine = rbind) %dopar% LS(tmax)
values <- g(result[,1],result[,2])
best <- which.max(values)
gap <- (abs(max(values) - g_max) / g_max) * 100
output <- paste("img/P7_A_1_",formatC(tmax, width = 4, format = "d", flag = "0"),".png")
png(output, width = 7, height = 7, units = "in", res = 150)
print(
levelplot( z ~ x * y, grid, main = paste(formatC(tmax, width = 5, format = "d", flag = "0"),
" iteraciones | ", formatC(k, width = 2, format = "d", flag = "0"),
" reinicios |",sprintf("%.2f%% gap", gap)),
xlab.top = "Óptimos locales y globales", contour = TRUE,
panel = function(...) {
panel.levelplot(...)
panel.abline(h = result[best,2], col = "blue")
panel.abline(v = result[best,1], col = "blue")
panel.xyplot(result[,1], result[,2], pch = 20, col = "red", cex = 1)
panel.xyplot(result[best,1],result[best,2], pch = 20, col = "blue", cex = 2)
}
)
)
graphics.off()
}
stopImplicitCluster()
system(sprintf("convert -delay %d img/P7_A_1_*.png img/P7_A_1.gif", 90))
unlink("img/P7_A_1_*.png")
x <- seq(low, high, length = 50)
y <- seq(low, high, length = 50)
z <- outer(x, y, g)
nrz <-nrow(z)
ncz <-ncol(z)
color <-rainbow(256)
zgrad <-z[-1,-1]+z[-1,-ncz]+z[-nrz,-1]+z[-nrz,-ncz]
gradient<-cut(zgrad,length(color))
for(gr in seq(4,360,4)) {
output = paste("img/P7_A_2_",formatC(gr, width=3, format="d",flag="0"),".png")
png(output, width = 7, height = 7, units = "in", res = 150)
persp(x, y, z, phi = 30, theta = gr, col =color[gradient])
graphics.off()
}
system(sprintf("convert -delay %d img/P7_A_2_*.png img/P7_A_2.gif", 10))
unlink("img/P7_A_2_*.png")
|
aaf31c4552fbfee1cc35d2ef007dfe528e93e7af | 3e1f696029222b31796a5adef0afc272f6cc4c11 | /NSRGYMerged8_NormClose_OLD_20190820.R | 6782699dd535faeab9575772c3b508cb7b4f844a | [] | no_license | dehrlich014/DoriansStockDataCode | 82c890070bf66075b331cb80c056e24122a80ec5 | b64f6ee2d369d7c269efd266701065d15b3c5347 | refs/heads/master | 2023-02-03T13:52:58.288342 | 2020-12-22T20:08:55 | 2020-12-22T20:08:55 | 275,254,152 | 0 | 0 | null | 2020-07-16T22:55:35 | 2020-06-26T21:51:11 | R | UTF-8 | R | false | false | 6,483 | r | NSRGYMerged8_NormClose_OLD_20190820.R | ####This file is OLD and has been REVISED on 6/27/2020
####This file is OLD and has been REVISED on 6/27/2020
####This file is OLD and has been REVISED on 6/27/2020
source("GSPC_DeltaOnly_20190820.R")
source("DJI_DeltaOnly_20190820.R")
source("NSRGY_DeltaOnly_20190820.R")
source("PFF_DeltaOnly_20190820.R")
source("IAU_DeltaOnly_20190820.R")
source("SLV_DeltaOnly_20190820.R")
source("SHY_DeltaOnly_20190820.R")
source("HYG_DeltaOnly_20190820.R")
detach()
symbols <- c("GSPC","DJI","NSRGY","PFF","IAU","SLV","SHY","HYG")
colors <- c("black","red","blue","forestgreen","darkgoldenrod","gray","turquoise","limegreen")
securities <- data.frame(symbols,colors)
#######I am going to try to use hard-coded symbols as little as possible
#######For this reason, I want to create a mini-data.frame of
#######The names of the securities and the color I'll assign each one
#######Then, this essentially assigns each security a number
######I'll want to just think of securities as being numbers 1 - 2^k for some k
####6/27/2019: My k is 3 for now, I think higher than that may get unwieldy
####6/28/2019: I am not going to mess with the hard-coded symbols, now that is for another day :)
print("files are open and running")
myData1 <- merge(GSPC_Historical,DJI_Historical,by.x = "Date", by.y = "Date",suffixes = c(".GSPC",".DJI"))
myData2 <- merge(NSRGY_Historical,PFF_Historical,by.x = "Date", by.y = "Date",suffixes = c(".NSRGY",".PFF"))
myData3 <- merge(IAU_Historical,SLV_Historical,by.x = "Date", by.y = "Date",suffixes = c(".IAU",".SLV"))
myData4 <- merge(SHY_Historical,HYG_Historical,by.x = "Date", by.y = "Date",suffixes = c(".SHY",".HYG"))
myData5 <- merge(myData1,myData2,by.x = "Date",by.y = "Date")
myData6 <- merge(myData3,myData4,by.x = "Date",by.y = "Date")
myData <- merge(myData5,myData6,by.x = "Date",by.y = "Date")
print("over here")
####IMPT: s is for start, N is for eNd
#Merged_s <- which(myData$Date %in% as.Date("2007-1-10"))
Merged_s <- 2
###"s" for start
###needs to be >= 2
#Merged_N <- length(myData[[1]])
Merged_N <- which(myData$Date %in% as.Date("2010-6-15"))
#Merged_N <- 700
###This is just the day where we start computing the NormClose
###The which() line returns the index of the desired date
###With this which() line, we can just input a date rather than arbitrarily find a good cutoff index
myData$NormClose.GSPC <- 1
myData$NormClose.DJI <- 1
myData$NormClose.NSRGY <- 1
myData$NormClose.PFF <- 1
myData$NormClose.IAU <- 1
myData$NormClose.SLV <- 1
myData$NormClose.SHY <- 1
myData$NormClose.HYG <- 1
print("all the way here")
for(i in Merged_s:Merged_N){
myData$NormClose.GSPC[i] <- myData$NormClose.GSPC[i-1]*(1+myData$Delta.GSPC[i])
myData$NormClose.DJI[i] <- myData$NormClose.DJI[i-1]*(1+myData$Delta.DJI[i])
myData$NormClose.NSRGY[i] <- myData$NormClose.NSRGY[i-1]*(1+myData$Delta.NSRGY[i])
myData$NormClose.PFF[i] <- myData$NormClose.PFF[i-1]*(1+myData$Delta.PFF[i])
myData$NormClose.IAU[i] <- myData$NormClose.IAU[i-1]*(1+myData$Delta.IAU[i])
myData$NormClose.SLV[i] <- myData$NormClose.SLV[i-1]*(1+myData$Delta.SLV[i])
myData$NormClose.SHY[i] <- myData$NormClose.SHY[i-1]*(1+myData$Delta.SHY[i])
myData$NormClose.HYG[i] <- myData$NormClose.HYG[i-1]*(1+myData$Delta.HYG[i])
}
####^^^Wondering if there is a more efficient way to do this....
####It also may not matter, I have not had any issues with find/replace or anything else
attach(myData)
print(names(myData))
####These next few lines are just to spiff up whatever graph I choose to generate
groupName <- "NSRGY/Commodities/Indicies"
chartName <- paste(groupName,"from",as.character(Date[Merged_s]),"through",as.character(Date[Merged_N]))
legend <- NULL
for(i in 1:length(securities[,1])){
currString <- paste(as.character(securities[,1][i]),"=",as.character(securities[,2][i]),sep = "")
legend <- paste(legend,currString,sep = "||")
}
# ####Taking the max of the NormClose's so that I know how high to draw the y-axis
L <- max(NormClose.GSPC,NormClose.NSRGY,NormClose.PFF,NormClose.DJI,NormClose.IAU,NormClose.SLV,NormClose.SHY,NormClose.HYG)
m <- min(NormClose.GSPC,NormClose.NSRGY,NormClose.PFF,NormClose.DJI,NormClose.IAU,NormClose.SLV,NormClose.SHY,NormClose.HYG)
#####USE THE ABOVE L
#L <- max(NormClose.GSPC,NormClose.NSRGY,NormClose.PFF,NormClose.DJI,NormClose.SLV,NormClose.SHY,NormClose.HYG)
plot(x = Date[(Merged_s-1):Merged_N], y = NormClose.GSPC[(Merged_s-1):Merged_N], ylim = c((m-.07),(L+.5)),type = "l", main = chartName, xlab = "", ylab = legend, cex.lab = .5,col = as.character(securities[,2][1]))
lines(x = Date[(Merged_s-1):Merged_N], y = NormClose.DJI[(Merged_s-1):Merged_N], type = "l", col = as.character(securities[,2][2]))
lines(x = Date[(Merged_s-1):Merged_N], y = NormClose.NSRGY[(Merged_s-1):Merged_N], type = "l", col = as.character(securities[,2][3]))
lines(x = Date[(Merged_s-1):Merged_N], y = NormClose.PFF[(Merged_s-1):Merged_N], type = "l", col = as.character(securities[,2][4]))
lines(x = Date[(Merged_s-1):Merged_N], y = NormClose.IAU[(Merged_s-1):Merged_N], type = "l", col = as.character(securities[,2][5]))
lines(x = Date[(Merged_s-1):Merged_N], y = NormClose.SLV[(Merged_s-1):Merged_N], type = "l", col = as.character(securities[,2][6]))
lines(x = Date[(Merged_s-1):Merged_N], y = NormClose.SHY[(Merged_s-1):Merged_N], type = "l", col = as.character(securities[,2][7]))
lines(x = Date[(Merged_s-1):Merged_N], y = NormClose.HYG[(Merged_s-1):Merged_N], type = "l", col = as.character(securities[,2][8]))
# plot(density(Delta.GSPC[Merged_s:Merged_N]), xlim = c(-.075,.075), ylim = c(0,75), main = chartName, xlab = "", ylab = legend, cex.lab = .5,col = as.character(securities[,2][1]))
# lines(density(Delta.DJI[Merged_s:Merged_N]),col = as.character(securities[,2][2]))
# lines(density(Delta.NSRGY[Merged_s:Merged_N]),col = as.character(securities[,2][3]))
# lines(density(Delta.PFF[Merged_s:Merged_N]),col = as.character(securities[,2][4]))
# lines(density(Delta.IAU[Merged_s:Merged_N]),col = as.character(securities[,2][5]))
# lines(density(Delta.SLV[Merged_s:Merged_N]),col = as.character(securities[,2][6]))
# lines(density(Delta.SHY[Merged_s:Merged_N]),col = as.character(securities[,2][7]))
# lines(density(Delta.HYG[Merged_s:Merged_N]),col = as.character(securities[,2][8]))
abline(h = 1)
abline(h = min(NormClose.GSPC), col = "brown")
abline(h = max(NormClose.GSPC), col = "brown")
#abline(v = Date[700])
###which(NormClose.NSRGY %in% min(NormClose.NSRGY)) == 700
###700 is a bit of a recession naidir
|
cb2efcd6cb5d27a6b1c0aa68e26af8eaf62050d0 | 2a068e32d4a495d83d77aad0495a74cf7bc230f2 | /scripts/bio/stat/gsea | 40b05e83a5e5cb54a3a22cc71c7bc662bb40f364 | [] | no_license | xmao/kit | 06da86cefb549793e49d84708b213a7e4956f5d6 | e4eafccf216dfa6ad62809d6bd602927bd423d05 | refs/heads/master | 2021-07-07T08:11:46.733155 | 2021-05-24T15:01:31 | 2021-05-24T15:01:31 | 193,253 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,552 | gsea | #!/usr/bin/env Rscript
## GSER.R FILE METHOD OFFSET FDR
methods <- list(
binom = function(x) {
t <- chisq.test(matrix(c(x[1], x[2]-x[1], x[3], x[4]-x[3]), nc=2))
t$p.value
},
chisq = function(x) {
t <- binom.test((c(x[1], x[2]-x[1])), p=x[3]/x[4])
t$p.value
},
fisher = function(x) {
t <- fisher.test(matrix(c(x[1], x[2]-x[1], x[3], x[4]-x[3]), nc=2))
t$p.value
},
hyper = function(x) {
1 - phyper(x[1] - 1, x[3], x[4] - x[3], x[2])
}
)
opts <- list(file = 'stdin', method = 'hyper', offset = 1, qvalue = 'qvalue')
args <- commandArgs(trailingOnly = TRUE)
if (length(args) >= 1) opts$file <- args[1]
if (length(args) >= 2) opts$method <- args[2]
if (length(args) >= 3) opts$offset <- as.integer(args[3])
if (length(args) >= 4) opts$fdr <- args[4]
a <- read.delim(file(opts$file), header=F)
## Statistic test for two samples
a[['P_value']] <- apply(a[(c(3, 4 ,6, 7) + (opts$offset - 1))], 1,
function(x) methods[[opts$method]](as.integer(unlist(x))))
a <- a[order(a[['P_value']]), ]
## Correction of false discovery rate
options(error = expression(cat())) # Ignore possible qvalue errors
if (opts$fdr == 'qvalue') {
## Available methods: smoother or bootstrap
suppressPackageStartupMessages(library(qvalue))
a[['Q_value']] <- qvalue(a[['P_value']], pi0.method='smooth')[['qvalues']]
} else {
suppressPackageStartupMessages(library(multtest))
a <- data.frame(a, mt.rawp2adjp(a[['P_value']])$adjp[, -1])
}
write.table(a, sep='\t', quote=FALSE, row.names=FALSE)
| |
e17263d7f3900d66b925c98a22d8d9d55d6c5ff7 | f8072e49a8d128ff5e6eabeb0c28f1e6f5fe6d18 | /04-Script_bootstrap_FSpecialist.R | 963ede4213b61dd2fb0da7d5735f34628cc2af0e | [] | no_license | OACColombia/FaunalRecoveryCode | a43ad9a8817b48c26fb869da73747e0ca428d75d | 95554a35bc26124b28eab4c34566d9fb80f9aef2 | refs/heads/master | 2020-06-22T07:16:48.782962 | 2017-09-12T20:13:43 | 2017-09-12T20:13:43 | 74,600,107 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,365 | r | 04-Script_bootstrap_FSpecialist.R | #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#Bootstrapping of the Response ratio in Species richness
#Clear memory
rm(list=ls())
#Working in the office
setwd("//Users/orlando/Dropbox/Vertebrate Recovery - Nature/data")
#Call the file MainData.csv
mydata <- read.csv("//Users/orlando/Dropbox/Vertebrate Recovery - Nature/data/MainData.csv")
#Working in my house
setwd("c://Users/Flaco/Dropbox/Vertebrate Recovery - Nature/data")
#Call the file MainData.csv
mydata <- read.csv("c://Users/Flaco/Dropbox/Vertebrate Recovery - Nature/data/MainData.csv")
#packages used
library(ggplot2)
library(reshape2)
library(gridExtra)
library(gtable)
library(grid)
library(ggrepel)
library(plyr)
data<-subset(mydata,Biome=="Moist")
#Exclude the forest with zero richness (the RRFSp can't be calculated)
data<-subset(mydata,RRFSp!="NA")
nrow(data)#136
head(data)
#To select one comparison per study (avoid spacial pseudoreplication)
randomRows= function(df,n){
return(df[sample(nrow(df),n),])
}
#B1: Amphibians in Early Succession (ES), moist forest specialist species
AmES<-subset(data,BGroupOverall=="AmphibiansES")
nrow(AmES)
median(AmES$RRFSp)
R<-10000
B1<-rep(0,R);#change the name of the Bootstrap object
for(kk in 1:R){
##Sample 1 data per study
AmES2<-ddply(AmES,.(Nstudy))
boot.sample <- sample(AmES2$RRFSp, replace = TRUE)
B1[kk] <- mean(boot.sample)
}
boxplot(B1)
quantile(B1,c(0.025,0.975))
hist(B1, breaks = 30)
dB1<-c(Bmean=mean(B1),quantile(B1,c(0.025,0.975)),n=nrow(AmES2))
dB1
#B2: Amphibians in Young Secondary Forest (YSF), overall
AmYSF<-subset(data,BGroupOverall=="AmphibiansYSF")
median(AmYSF$RRFSp)
nrow(AmYSF)
R<-10000
B2<-rep(0,R);#change the name of the Bootstrap object
for(kk in 1:R){
##Sample 1 data per study
AmYSF2<-ddply(AmYSF,.(Nstudy))
boot.sample <- sample(AmYSF2$RRFSp, replace = TRUE)
B2[kk] <- mean(boot.sample)
}
boxplot(B2)
quantile(B2,c(0.025,0.975))
hist(B2, breaks = 30)
dB2<-c(Bmean=mean(B2),quantile(B2,c(0.025,0.975)),n=nrow(AmYSF2))
dB2
#B3: Amphibians in Mid-successional Secondary Forest (MSF), overall
AmMSF<-subset(data,BGroupOverall=="AmphibiansMSF")
median(AmMSF$RRFSp)
nrow(AmMSF)
R<-10000
B3<-rep(0,R);#change the name of the Bootstrap object
for(kk in 1:R){
##Sample 1 data per study
AmMSF2<-ddply(AmMSF,.(Nstudy))
boot.sample <- sample(AmMSF2$RRFSp, replace = TRUE)
B3[kk] <- mean(boot.sample)
}
boxplot(B3)
quantile(B3,c(0.025,0.975))
hist(B3, breaks = 30)
dB3<-c(Bmean=mean(B3),quantile(B3,c(0.025,0.975)),n=nrow(AmMSF2))
dB3
#B4: Amphibians in Old Secondary Forest (OSF), overall
AmOSF<-subset(data,BGroupOverall=="AmphibiansOSF")
nrow(AmOSF)
B4<-rep(mean(AmOSF$RRFSp),10000)
dB4<-c(Bmean=mean(B4),"2.5%" = NaN,"97.5%" = NaN,n=nrow(AmOSF))
dB4
#B5: Reptiles in ES, overall
ReES<-subset(data,BGroupOverall=="ReptilesES")
nrow(ReES)
B5<-rep(mean(ReES$RRFSp),10000)
dB5<-c(Bmean=mean(B5),"2.5%" = NaN,"97.5%" = NaN,n=nrow(ReES))
dB5
#B6: Reptiles in YSF, overall
ReYSF<-subset(data,BGroupOverall=="ReptilesYSF")
nrow(ReYSF)
median(ReYSF$RRFSp)
R<-10000
B6<-rep(0,R);#change the name of the Bootstrap object
for(kk in 1:R){
##Sample 1 data per study
ReYSF2<-ddply(ReYSF,.(Nstudy))
boot.sample <- sample(ReYSF2$RRFSp, replace = TRUE)
B6[kk] <- mean(boot.sample)
}
boxplot(B6)
quantile(B6,c(0.025,0.975))
hist(B6, breaks = 30)
dB6<-c(Bmean=mean(B6),quantile(B6,c(0.025,0.975)),n=nrow(ReYSF2))
dB6
#B7: Reptiles in MSF, overall
ReMSF<-subset(data,BGroupOverall=="ReptilesMSF")
nrow(ReMSF)
median(ReMSF$RRFSp)
R<-10000
B7<-rep(0,R);#change the name of the Bootstrap object
for(kk in 1:R){
##Sample 1 data per study
ReMSF2<-ddply(ReMSF,.(Nstudy))
boot.sample <- sample(ReMSF2$RRFSp, replace = TRUE)
B7[kk] <- mean(boot.sample)
}
boxplot(B7)
quantile(B7,c(0.025,0.975))
hist(B7, breaks = 30)
dB7<-c(Bmean=mean(B7),quantile(B7,c(0.025,0.975)),n=nrow(ReMSF2))
dB7
#B8: Reptiles in OSF, overall
ReOSF<-subset(data,BGroupOverall=="ReptilesOSF")
nrow(ReOSF)
B8<-rep(mean(ReOSF$RRFSp),10000)
dB8<-c(Bmean=mean(B8),"2.5%" = NaN,"97.5%" = NaN,n=nrow(ReOSF))
dB8
#B9: Birds in ES, overall
BiES<-subset(data,BGroupOverall=="BirdsES")
nrow(BiES)
median(BiES$RRFSp)
R<-10000
B9<-rep(0,R);#change the name of the Bootstrap object
for(kk in 1:R){
##Sample 1 data per study
BiES2<-ddply(BiES,.(Nstudy))
boot.sample <- sample(BiES2$RRFSp, replace = TRUE)
B9[kk] <- mean(boot.sample)
}
boxplot(B9)
quantile(B9,c(0.025,0.975))
hist(B9, breaks = 30)
dB9<-c(Bmean=mean(B9),quantile(B9,c(0.025,0.975)),n=nrow(BiES2))
dB9
#B10: Birds in YSF, overall
BiYSF<-subset(data,BGroupOverall=="BirdsYSF")
median(BiYSF$RRFSp)
nrow(BiYSF)
R<-10000
B10<-rep(0,R);#change the name of the Bootstrap object
for(kk in 1:R){
##Sample 1 data per study
BiYSF2<-ddply(BiYSF,.(Nstudy))
boot.sample <- sample(BiYSF2$RRFSp, replace = TRUE)
B10[kk] <- mean(boot.sample)
}
boxplot(B10)
quantile(B10,c(0.025,0.975))
hist(B10, breaks = 30)
dB10<-c(Bmean=mean(B10),quantile(B10,c(0.025,0.975)),n=nrow(BiYSF2))
dB10
#B11: Birds in MSF, overall
BiMSF<-subset(data,BGroupOverall=="BirdsMSF")
median(BiMSF$RRFSp)
nrow(BiMSF)
R<-10000
B11<-rep(0,R);#change the name of the Bootstrap object
for(kk in 1:R){
##Sample 1 data per study
BiMSF2<-ddply(BiMSF,.(Nstudy))
boot.sample <- sample(BiMSF2$RRFSp, replace = TRUE)
B11[kk] <- mean(boot.sample)
}
boxplot(B11)
quantile(B11,c(0.025,0.975))
hist(B11, breaks = 30)
dB11<-c(Bmean=mean(B11),quantile(B11,c(0.025,0.975)),n=nrow(BiMSF2))
dB11
#B12: Birds in OSF, overall
BiOSF<-subset(data,BGroupOverall=="BirdsOSF")
nrow(BiOSF)
B12<-rep(mean(BiOSF$RRFSp),10000)
dB12<-c(Bmean=mean(B12),"2.5%" = NaN,"97.5%" = NaN,n=nrow(BiOSF))
dB12
#B13: Mammals in ES, overall
MaES<-subset(data,BGroupOverall=="MammalsES")
median(MaES$RRFSp)
nrow(MaES)
B13<-rep(mean(MaES$RRFSp),10000)
dB13<-c(Bmean=mean(B13),"2.5%" = NaN,"97.5%" = NaN,n=nrow(MaES))
dB13
#B14: Mammals in YSF, overall
MaYSF<-subset(data,BGroupOverall=="MammalsYSF")
median(MaYSF$RRFSp)
nrow(MaYSF)
R<-10000
B14<-rep(0,R);#change the name of the Bootstrap object
for(kk in 1:R){
##Sample 1 data per study
MaYSF2<-ddply(MaYSF,.(Nstudy))
boot.sample <- sample(MaYSF2$RRFSp, replace = TRUE)
B14[kk] <- mean(boot.sample)
}
boxplot(B14)
quantile(B14,c(0.025,0.975))
hist(B14, breaks = 30)
dB14<-c(Bmean=mean(B14),quantile(B14,c(0.025,0.975)),n=nrow(MaYSF2))
dB14
#B15: Mammals in MSF, overall
MaMSF<-subset(data,BGroupOverall=="MammalsMSF")
median(MaMSF$RRFSp)
nrow(MaMSF)
R<-10000
B15<-rep(0,R);#change the name of the Bootstrap object
for(kk in 1:R){
##Sample 1 data per study
MaMSF2<-ddply(MaMSF,.(Nstudy))
boot.sample <- sample(MaMSF2$RRFSp, replace = TRUE)
B15[kk] <- mean(boot.sample)
}
boxplot(B15)
quantile(B15,c(0.025,0.975))
hist(B15, breaks = 30)
dB15<-c(Bmean=mean(B15),quantile(B15,c(0.025,0.975)),n=nrow(MaMSF2))
dB15
#B16: Mammals in OSF, overall
MaOSF<-subset(data,BGroupOverall=="MammalsOSF")
nrow(MaOSF)
B16<-rep(mean(MaOSF$RRFSp),10000)
dB16<-c(Bmean=mean(B16),"2.5%" = NaN,"97.5%" = NaN,n=nrow(MaOSF))
dB16
#Join results
#Resume data of Bootstrap (Bootstrap mean, confidence limits[2.5%-97.5%],n)
dbR=data.frame(dB1,dB2,dB3,dB4,dB5,dB6,dB7,dB8,dB9,dB10, dB11,dB12,dB13,dB14,dB15,dB16)
t.dbR<-t(dbR)
datboots<-as.data.frame(t.dbR)
head(datboots)
datboots$FigurePart<-c(rep("FSpecialists",16))
taxdb<-c(rep("Amphibians",4),rep("Reptiles",4),rep("Birds",4),rep("Mammals",4))
Succ.stage<-c(rep("ES",1),rep("YSF",1),rep("MSF",1),rep("OSF",1))
datboots$Taxa<-c(rep(taxdb,1))
datboots$Succ.Stage<-c(rep(Succ.stage,4))
head(datboots)
tail(datboots)
write.table(datboots, "BootstrappFSpecialist.txt",quote=F, sep="\t")
#Lets graph all this work
colnames(datboots)<- c("Bmean","lower","upper", "n","FigurePart","Taxa", "Succ.Stage")
datboots$n
datboots$Succ.Stage <- factor(datboots$Succ.Stage,levels=c('ES','YSF','MSF', 'OSF'))
datboots$Taxa <- factor(datboots$Taxa,levels=c('Mammals','Birds','Reptiles','Amphibians'))
ggplot(datboots, aes(y = Bmean, ymin = lower, ymax = upper,
x = Taxa, shape=Succ.Stage, fill=Taxa))+
geom_vline(xintercept=c(1.5,2.5,3.5),color="darkgray", size=0.4)+
geom_hline(yintercept = 0, linetype = "dashed",color="black", size=0.5)+
geom_text(aes(label=datboots$n, y=0.85, fill=Taxa),
position = position_dodge(width = 1), size=2.5)+
#geom_pointrange(position = position_dodge(1.2), size=0.8,aes(fill=Taxa), colour="black", stroke=1)+
geom_linerange(position = position_dodge(1), size=1.5, alpha=0.6, aes(color=Taxa))+
geom_point(position = position_dodge(1), size=3.5, stroke = 1, alpha=0.8, aes(fill=Taxa))+
ylab("Response ratio of the vertebrate forest specialist species (tropical moist forest)\n during secondary forest succession(bootstrapped effect size)")+
xlab("")+
scale_shape_manual(name="Succ.Stage",
values = c("ES" = 21, "YSF"=22,
"MSF"=24,"OSF"=23))+
scale_y_continuous(breaks = seq(-3,0.9, by=0.5),
labels = seq(-3,0.9, by=0.5),
limits = c(-3.25,1.1), expand = c(0, 0))+
scale_x_discrete(breaks=c('Amphibians','Reptiles','Birds','Mammals'),
labels=c('Amphibians','Reptiles','Birds','Mammals'),
expand = c(0.025, 0))+
scale_fill_manual(name="Taxa",
values = c("Amphibians" = "#397d34", "Reptiles"="#FFdd02",
"Birds"="#1f78b4","Mammals"="#FF7f00"))+
scale_color_manual(name="Taxa",
values = c("Amphibians" = "#397d34", "Reptiles"="#FFdd02",
"Birds"="#1f78b4","Mammals"="#FF7f00"))+
theme_bw()+
theme(legend.position="none",axis.text.x=element_text(size=12, hjust = 0.5), axis.text.y=element_blank(),
plot.margin=unit(c(1,1,1,1),"mm"),panel.margin.y = unit(0, "lines"), panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
theme(strip.background = element_blank(),plot.title=element_text(hjust=-0.025, size = 16, face = "bold"))+
theme(strip.text = element_blank())+
coord_flip()+
facet_wrap(~FigurePart, ncol = 3, nrow = 1)
|
7fc306a6e1834d7d712d2bef2d87ad35f45699c2 | 512dd6a8d64429a6095d8d10e4bc3620b52283e0 | /opal.R | d0687f0f18b99bdd4722b92dedfc14e347073d33 | [] | no_license | michaeledwardmarks/opal-data-analysis | 4d79688528542afeaf84d318ab17c6f6895e0b67 | 7999628c4cf8f66f2e45d2df2fef459b25a22de2 | refs/heads/master | 2021-01-17T22:40:39.997160 | 2016-03-01T16:34:10 | 2016-03-01T16:34:10 | 42,506,105 | 0 | 0 | null | 2015-09-18T14:44:37 | 2015-09-15T08:47:06 | Stata | UTF-8 | R | false | false | 3,528 | r | opal.R | require(ggplot2)
require(plyr)
#
# Plot an age distribution for this extract
#
age_distribution <- function(extract_dir){
episodes <- read.csv(sprintf("%s%s", extract_dir, "episodes.csv"))
episodes$year.of.birth <- substr(episodes$date.of.birth, 0, 4)
age <- function(x) 2015 - as.integer(x)
episodes$age <- age(episodes$year.of.birth)
ages <- as.data.frame(table(na.omit(episodes)$age))
names(ages) <- c("Age", "Frequency")
ggplot(ages, aes(x=Age, y=Frequency, fill=Age)) +
geom_bar(stat="identity") +
labs(title="Age Distribution") +
guides(fill=FALSE) +
scale_x_discrete(breaks=c(20, 40, 60, 80))
}
#
# Plot frequent diagnoses for this extract
#
common_diagnoses <- function(extract_dir){
diagnoses <- read.csv(sprintf("%s%s", extract_dir, "diagnosis.csv"))
conditions <- as.data.frame(table(diagnoses$condition))
names(conditions) <- c("Condition", "Frequency")
conditions <- conditions[conditions$Freq > 3,]
conditions <- conditions[conditions$Condition != "",]
ggplot(conditions, aes(x=Condition, y=Frequency, fill=Condition)) +
geom_bar(stat="identity") +
labs(title="Common Diagnoses") +
guides(fill=FALSE) +
coord_flip()
}
#
# Plot frequent travel destinatinos for this extract
#
common_destinations <- function(extract_dir){
travel <- read.csv(sprintf("%s%s", extract_dir, "travel.csv"))
destinations <- as.data.frame(table(travel$destination))
names(destinations) <- c("Destination", "Frequency")
destinations <- destinations[destinations$Frequency > 1,]
destinations <- destinations[destinations$Destination != "",]
ggplot(destinations, aes(x=Destination, y=Frequency, fill=Destination)) +
geom_bar(stat="identity") +
labs(title="Travel Destinations") +
guides(fill=FALSE) +
coord_flip()
}
#
# Plot length of stay
#
length_of_stay <- function(extract_dir){
episodes <- read.csv(sprintf("%s%s", extract_dir, "episodes.csv"))
episodes$los <- as.Date(demographics$discharge.date) - as.Date(demographics$date.of.admission)
los <- as.data.frame(table(na.omit(episodes[episodes$los >= 0,])$los))
names(los) <- c("LOS", "Frequency")
ggplot(los, aes(x=LOS, y=Frequency, fill=LOS)) +
geom_bar(stat="identity") +
labs(title="Length of stay", x="Days") +
guides(fill=FALSE) +
scale_x_discrete(breaks=c(5, 10, 20, 30, 40, 60))
}
plot_audit_counts <- function(audit.counts){
View(audit.counts)
ggplot(audit.counts, aes(reorder(x, y), x=Action, y=Count, fill=Action)) +
geom_bar(stat="identity") +
coord_flip() +
labs(title="Clinical Advice Audit Activity")
}
#
# Plot Clinical advice audit checkboxes
#
advice_audits <- function(extract_dir){
advice <- read.csv(sprintf("%s%s", extract_dir, "clinical_advice.csv"))
ca.audit <- advice[,8:11]
numtrue <- function(x) sum(x == "True")
audit.counts <- colwise(numtrue)(ca.audit)
audit.counts <- data.frame(t(audit.counts))
names(audit.counts) <- c("Count")
audit.counts$Action <- row.names(audit.counts)
plot_audit_counts(audit.counts)
}
advice_audits_for_user <- function(extract_dir, user){
advice <- read.csv(sprintf("%s%s", extract_dir, "clinical_advice.csv"))
advice <- advice[advice$initials == user,]
ca.audit <- advice[,8:11]
numtrue <- function(x) sum(x == "True")
audit.counts <- colwise(numtrue)(ca.audit)
audit.counts <- data.frame(t(audit.counts))
names(audit.counts) <- c("Count")
audit.counts$Action <- row.names(audit.counts)
plot_audit_counts(audit.counts)
} |
4428b93a1dc0f1f67c18221d06a7755054c68c82 | 69b1549ca33aadf683ed04fdc3bf65c165f94b61 | /Datasets/R Code/Bootstrapping/Individual Dataset/predImports85.R | 9cb0e47a72900ddff22138b20341144da5031139 | [] | no_license | pcalhoun1/AR-Code | 4798d13279d8d51996e4f3192a5a003948592a8e | dd472ca92547daf920d8c0fa7239fe73e1879dea | refs/heads/master | 2020-05-28T08:21:37.960791 | 2019-05-29T01:19:55 | 2019-05-29T01:19:55 | 188,935,776 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,554 | r | predImports85.R | #rm(list=ls(all=TRUE))
library(parallel)
getwd()
dir()
source('../../../../R Functions/RF functions 20JAN19.R')
load(file="../../../Data/contData.RData")
### Imports85 ###
nsim=50
ntrees=100
# imports85 #
imports85<-contData$imports85
form<-as.formula("Response ~ symboling + make + fuelType + aspiration + numOfDoors + bodyStyle +
driveWheels + engineLocation + wheelBase + length + width + height + curbWeight +
engineType + numOfCylinders + engineSize + fuelSystem + bore + stroke +
compressionRatio + horsepower + peakRpm + cityMpg + highwayMpg")
mseRF_imports85 <- rep(NA, nsim); mseSSS_imports85 <- rep(NA, nsim); mseER_imports85 <- rep(NA, nsim); mseAR_imports85 <- rep(NA, nsim)
for (sim in 1:nsim) {
rf_imports85<-growRF_Parallel(ntrees=ntrees, formula=form, data=imports85, search="exhaustive", method="anova", split="MSE",
mtry=8, nsplit=NULL, minsplit=6, minbucket=3, maxdepth=30, sampleMethod='bootstrap', useRpart=TRUE, iseed=sim)
sss_imports85<-growRF_Parallel(ntrees=ntrees, formula=form, data=imports85, search="sss", method="anova", split="MSE",
mtry=8, nsplit=NULL, minsplit=6, minbucket=3, maxdepth=30, a=50, sampleMethod='bootstrap', iseed=sim)
er_imports85<-growRF_Parallel(ntrees=ntrees, formula=form, data=imports85, search="exhaustive", method="anova", split="MSE",
mtry=8, nsplit=1, minsplit=6, minbucket=3, maxdepth=30, sampleMethod='bootstrap', iseed=sim)
ar_imports85<-growRF_Parallel(ntrees=ntrees, formula=form, data=imports85, search="ar", method="anova", split="MSE",
mtry=1, nsplit=1, minsplit=6, minbucket=3, maxdepth=30, minpvalue=0.05, sampleMethod='bootstrap', iseed=sim)
mseRF_imports85[sim] <- mean((predictRF(rf_imports85,imports85,checkCases=TRUE)-imports85$Response)^2)
mseSSS_imports85[sim] <- mean((predictRF(sss_imports85,imports85,checkCases=TRUE)-imports85$Response)^2)
mseER_imports85[sim] <- mean((predictRF(er_imports85,imports85,checkCases=TRUE)-imports85$Response)^2)
mseAR_imports85[sim] <- mean((predictRF(ar_imports85,imports85,checkCases=TRUE)-imports85$Response)^2)
}
outData <- data.frame(dataset=rep("Imports85", 4*nsim), sim=rep(1:nsim, 4), method=rep(c("RF", "SSS", "ER", "AR"), each=nsim),
mse = c(mseRF_imports85, mseSSS_imports85, mseER_imports85, mseAR_imports85))
#write.table(outData, file = "Results/predImports85.csv", sep = ",", row.names=FALSE)
|
00bcc59a7bd13b993c8d023095c0042508db88ca | 00856666243ffde6e1cc5451d9605abf98e1909d | /02 Rt.R | 0b9acf8f2f0eaa72d94bf9ab7fc07da406c55e09 | [] | no_license | holab-biostat/2020-COVID19-IJE | 6145b67c82fd37216ad29a02dac540f405fc884c | 922cd85579da41a0bd6cb07344219ac1f0ae09f7 | refs/heads/master | 2022-12-04T10:19:11.502346 | 2020-07-29T07:06:47 | 2020-07-29T07:06:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 630 | r | 02 Rt.R | ##### 2. Rt #####
rt_ind<-date_all>=as.Date("2020-02-18")
date2_all<-unique(data$date2)
datert<-date_all[date_all>=as.Date("2020-02-18")]
#Observed Daily new cases
pdf_real<-confirmed_all-Lag(confirmed_all,1)
data_res_real<-data.frame(dates=datert,I=pdf_real[rt_ind])
### Rt calculation (Example code only) ###
#1) The important cases were not considered
#2) Sliding windows: 7 days (default)
#3) Serial Interval (Gamma Dist.): Mean (4.98), SD: 3.22)
res<-estimate_R(incid=data_res_real,method="parametric_si",
config = make_config(list(mean_si =4.98, std_si=3.22)))
rt<-res$R$Mean #Mean Rts
|
91a53911e8be2f0c7176ccb8f29b25a886e31b55 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/SeleMix/examples/pred.y.Rd.R | a79d787f4aae11bbde74641047538373e8286608 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 640 | r | pred.y.Rd.R | library(SeleMix)
### Name: pred.y
### Title: Prediction of y variables
### Aliases: pred.y
### ** Examples
# Parameter estimation with one contaminated variable and one covariate
data(ex1.data)
# Parameters estimated applying ml.est to \code{ex1.data}
B1 <- as.matrix(c(-0.152, 1.215))
sigma1 <- as.matrix(1.25)
lambda1 <- 15.5
w1 <- 0.0479
# Variable prediction
ypred <- pred.y (y=ex1.data[,"Y1"], x=ex1.data[,"X1"], B=B1,
sigma=sigma1, lambda=lambda1, w=w1, model="LN", t.outl=0.5)
# Plot ypred vs Y1
sel.pairs(cbind(ypred[,1,drop=FALSE],ex1.data[,"Y1",drop=FALSE]),
outl=ypred[,"outlier"])
|
8ab29e3d21db86094dc82852dafe5bf1de1653fe | 7e29640eb74f442bb82f3687cdeb362ad4155173 | /Plot1.R | 758d539cf8d2ce98a62de813cd9b274be18a431e | [] | no_license | old21nick21/ExData_Plotting1 | e68e15a64702af1a191ba5503743b8b49af0aa60 | 53049e1fdff93f1d1d36c0472aaf2aa3b4d8e3bf | refs/heads/master | 2021-01-18T14:45:22.552775 | 2015-02-08T23:16:21 | 2015-02-08T23:16:21 | 30,496,336 | 0 | 0 | null | 2015-02-08T16:01:59 | 2015-02-08T16:01:59 | null | UTF-8 | R | false | false | 960 | r | Plot1.R | ## Read all records from the source file
electricity <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
## convert Date column into date datatype
electricity$Date <- as.Date(electricity$Date, format="%d/%m/%Y")
## create a new dataset - for only two days
twoDaysData <- subset(electricity, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
## merge date and time columns
dateTime <- paste(as.Date(twoDaysData$Date), twoDaysData$Time)
## create a new column with date/time stamp
twoDaysData$DateTime <- as.POSIXct(dateTime)
## create a histogram on the screen
hist(twoDaysData$Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
## copy histogram from the screen to the png file
dev.copy(png, file="plot1.png", height=480, width=480)
## close the connection to finalize the file
dev.off() |
45910ece0ae53a1542bf5e4c40eb61cdcd3ca725 | 910e5240c834a0434143a14d2fbc019af8eb474b | /R/exactRLRT.R | 828962c75463fa529a8ffcb772c751541a264cc4 | [] | no_license | fabian-s/RLRsim | dd0c6f5f6883c5cb5112f7511ea4b91d158edc5b | 89e0fa4094dd1a1b043714a02b2efb651d05339c | refs/heads/master | 2022-05-06T04:45:32.469261 | 2022-03-15T16:23:33 | 2022-03-15T16:23:33 | 22,876,600 | 11 | 5 | null | 2020-03-23T18:22:25 | 2014-08-12T12:33:23 | R | UTF-8 | R | false | false | 9,115 | r | exactRLRT.R | #' Restricted Likelihood Ratio Tests for additive and linear mixed models
#'
#' This function provides an (exact) restricted likelihood ratio test based on
#' simulated values from the finite sample distribution for testing whether the
#' variance of a random effect is 0 in a linear mixed model with known
#' correlation structure of the tested random effect and i.i.d. errors.
#'
#' Testing in models with only a single variance component require only the
#' first argument \code{m}. For testing in models with multiple variance
#' components, the fitted model \code{m} must contain \bold{only} the random
#' effect set to zero under the null hypothesis, while \code{mA} and \code{m0}
#' are the models under the alternative and the null, respectively. For models
#' with a single variance component, the simulated distribution is exact if the
#' number of parameters (fixed and random) is smaller than the number of
#' observations. Extensive simulation studies (see second reference below)
#' confirm that the application of the test to models with multiple variance
#' components is safe and the simulated distribution is correct as long as the
#' number of parameters (fixed and random) is smaller than the number of
#' observations and the nuisance variance components are not superfluous or
#' very small. We use the finite sample distribution of the restricted
#' likelihood ratio test statistic as derived by Crainiceanu & Ruppert (2004).
#'
#' No simulation is performed if the observed test statistic is 0. (i.e., if the
#' fit of the model fitted under the alternative is indistinguishable from the
#' model fit under H0), since the p-value is always 1 in this case.
#'
#' @param m The fitted model under the alternative or, for testing in models
#' with multiple variance components, the reduced model containing only the
#' random effect to be tested (see Details), an \code{lme}, \code{lmerMod} or
#' \code{spm} object
#' @param mA The full model under the alternative for testing in models with
#' multiple variance components
#' @param m0 The model under the null for testing in models with multiple
#' variance components
#' @param seed input for \code{set.seed}
#' @param nsim Number of values to simulate
#' @param log.grid.hi Lower value of the grid on the log scale. See
#' \code{\link{exactRLRT}}.
#' @param log.grid.lo Lower value of the grid on the log scale. See
#' \code{\link{exactRLRT}}.
#' @param gridlength Length of the grid. See \code{\link{exactLRT}}.
#' @param parallel The type of parallel operation to be used (if any). If
#' missing, the default is "no parallelization").
#' @param ncpus integer: number of processes to be used in parallel operation:
#' typically one would chose this to the number of available CPUs. Defaults to
#' 1, i.e., no parallelization.
#' @param cl An optional parallel or snow cluster for use if parallel = "snow".
#' If not supplied, a cluster on the local machine is created for the duration
#' of the call.
#' @return A list of class \code{htest} containing the following components:
#' @return A list of class \code{htest} containing the following components:
#' \itemize{
#' \item \code{statistic} the observed likelihood ratio
#' \item \code{p} p-value for the observed test statistic
#' \item \code{method} a character string indicating what type of test was
#' performed and how many values were simulated to determine the critical value
#' \item \code{sample} the samples from the null distribution returned by
#' \code{\link{RLRTSim}}
#' }
#' @author Fabian Scheipl, bug fixes by Andrzej Galecki, updates for
#' \pkg{lme4}-compatibility by Ben Bolker
#' @seealso \code{\link{RLRTSim}} for the underlying simulation algorithm;
#' \code{\link{exactLRT}} for likelihood based tests
#' @references Crainiceanu, C. and Ruppert, D. (2004) Likelihood ratio tests in
#' linear mixed models with one variance component, \emph{Journal of the Royal
#' Statistical Society: Series B},\bold{66},165--185.
#'
#' Greven, S., Crainiceanu, C., Kuechenhoff, H., and Peters, A. (2008)
#' Restricted Likelihood Ratio Testing for Zero Variance Components in Linear
#' Mixed Models, \emph{Journal of Computational and Graphical Statistics},
#' \bold{17} (4): 870--891.
#'
#' Scheipl, F., Greven, S. and Kuechenhoff, H. (2008) Size and power of tests
#' for a zero random effect variance or polynomial regression in additive and
#' linear mixed models. \emph{Computational Statistics & Data Analysis},
#' \bold{52}(7):3283--3299.
#' @keywords htest
#' @examples
#'
#' data(sleepstudy, package = "lme4")
#' mA <- lme4::lmer(Reaction ~ I(Days-4.5) + (1|Subject) + (0 + I(Days-4.5)|Subject),
#' data = sleepstudy)
#' m0 <- update(mA, . ~ . - (0 + I(Days-4.5)|Subject))
#' m.slope <- update(mA, . ~ . - (1|Subject))
#' #test for subject specific slopes:
#' exactRLRT(m.slope, mA, m0)
#'
#' library(mgcv)
#' data(trees)
#' #test quadratic trend vs. smooth alternative
#' m.q<-gamm(I(log(Volume)) ~ Height + s(Girth, m = 3), data = trees,
#' method = "REML")$lme
#' exactRLRT(m.q)
#' #test linear trend vs. smooth alternative
#' m.l<-gamm(I(log(Volume)) ~ Height + s(Girth, m = 2), data = trees,
#' method = "REML")$lme
#' exactRLRT(m.l)
#'
#' @export exactRLRT
#' @importFrom stats anova cov2cor logLik quantile
#' @importFrom utils packageVersion
'exactRLRT' <- function(m, mA = NULL, m0 = NULL, seed = NA,
nsim = 10000, log.grid.hi = 8, log.grid.lo = -10, gridlength = 200,
parallel = c("no", "multicore", "snow"),
ncpus = 1L, cl = NULL) {
if (inherits(m, "spm")) {
m <- m$fit
class(m) <- "lme"
}
if (any(class(m) %in% c("amer", "mer")))
stop("Models fit with package <amer> or versions of <lme4> below 1.0 are no longer supported.")
c.m <- class(m)
if (!any(c.m %in% c("lme", "lmerMod", "merModLmerTest", "lmerModLmerTest")))
stop("Invalid <m> specified. \n")
if (any(c.m %in% c("merModLmerTest", "lmerModLmerTest")))
c.m <- "lmerMod"
if ("REML" != switch(c.m,
lme = m$method,
lmerMod = ifelse(lme4::isREML(m), "REML", "ML"))){
message("Using restricted likelihood evaluated at ML estimators.")
message("Refit with method=\"REML\" for exact results.")
}
d <- switch(c.m, lme = extract.lmeDesign(m),
lmerMod = extract.lmerModDesign(m))
X <- d$X
qrX <- qr(X)
Z <- d$Z
y <- d$y
Vr <- d$Vr
if (all(Vr == 0)) {
# this only happens if the estimate of the tested variance component is 0.
# since we still want chol(cov2cor(Vr)) to work, this does the trick.
diag(Vr) <- 1
}
K <- ncol(Z)
n <- nrow(X)
p <- ncol(X)
if (is.null(mA) && is.null(m0)) {
if (length(d$lambda) != 1 || d$k != 1)
stop("multiple random effects in model -
exactRLRT needs <m> with only a single random effect.")
#2*restricted ProfileLogLik under H0: lambda=0
res <- qr.resid(qrX, y)
R <- qr.R(qrX)
detXtX <- det(t(R) %*% R)
reml.H0 <- -((n - p) * log(2 * pi) + (n - p) * log(sum(res^2)) +
log(detXtX) + (n - p) - (n - p) * log(n - p))
#observed value of the test-statistic
reml.obs <- 2 * logLik(m, REML = TRUE)[1]
rlrt.obs <- max(0, reml.obs - reml.H0)
lambda <- d$lambda
} else {
nonidentfixmsg <-
"Fixed effects structures of <mA> and <m0> not identical.
REML-based inference not appropriate."
if (c.m == "lme") {
if (any(mA$fixDF$terms != m0$fixDF$terms))
stop(nonidentfixmsg)
} else {
if (c.m == "mer") {
if (any(mA@X != m0@X))
stop(nonidentfixmsg)
} else {
if (c.m == "lmerMod") {
if (any(lme4::getME(mA,"X") != lme4::getME(m0,"X")))
stop(nonidentfixmsg)
}
}
}
lmer_nm <- if (utils::packageVersion("lme4")<="1.1.21") "Df" else "npar"
## bug fix submitted by Andrzej Galecki 3/10/2009
DFx <- switch(c.m, lme = anova(mA,m0)$df,
lmerMod = anova(mA, m0, refit = FALSE)[[lmer_nm]])
if (abs(diff(DFx)) > 1) {
stop("Random effects not independent - covariance(s) set to 0 under H0.\n
exactRLRT can only test a single variance.\n")
}
rlrt.obs <- max(0, 2 * (logLik(mA, REML = TRUE)[1] -
logLik(m0, REML = TRUE)[1]))
}
p <- if (rlrt.obs != 0) {
sample <- RLRTSim(X, Z, qrX = qrX, sqrt.Sigma = chol(cov2cor(Vr)),
lambda0 = 0, seed = seed, nsim = nsim,
log.grid.hi = log.grid.hi,
log.grid.lo = log.grid.lo, gridlength = gridlength,
parallel = match.arg(parallel),
ncpus = ncpus, cl = cl)
if (quantile(sample, 0.9) == 0) {
warning("Null distribution has mass ", mean(sample ==
0), " at zero.\n")
}
mean(rlrt.obs < sample)
} else {
message("Observed RLRT statistic is 0, no simulation performed.")
nsim <- 0
sample <- NULL
1
}
RVAL <- list(statistic = c(RLRT = rlrt.obs), p.value = p,
method = paste("simulated finite sample distribution of RLRT.\n
(p-value based on",
nsim, "simulated values)"), sample = sample)
class(RVAL) <- "htest"
return(RVAL)
}
|
44b661eea4d2903144ee3efe0096e1d9c1b7108d | 270140bfcca7573f21b2ba60f9125cb40d65a9ff | /man/viz_hist.Rd | dcce007fee334d224fa9c0dd05d719ea32d7213c | [
"MIT"
] | permissive | ideas42/tools42 | 83d508fadb18db8bd9e60e723aa1f91d1baa8ae2 | 89d44aab98c2a64aad2935561a006ec30cf8a97f | refs/heads/master | 2023-02-23T10:38:43.909183 | 2021-01-28T11:16:21 | 2021-01-28T11:16:21 | 332,281,881 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 485 | rd | viz_hist.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/viz_hist.R
\name{viz_hist}
\alias{viz_hist}
\title{Generate a histogram with i42 styling}
\usage{
viz_hist(data, xvar)
}
\arguments{
\item{data}{data.frame}
\item{xvar}{variable}
}
\value{
}
\description{
Explore the distribution of your data with a histogram. This function generates
a ggplot2 object, so it's easily expanded.
}
\examples{
my_data <- data.frame(x = c(1,2,3,4,5))
viz_hist(my_data, x)
}
|
ec285007106961f45c6969c90107978990b6b4fa | f9288f22d5e1c33427d1f29705691ae1858cfcd0 | /man/assessPower.Rd | c6ef4a4f2a9a7bdc25aafe18ec958e270e2800f9 | [] | no_license | bmtglobal/epower | ccd4aa7095071b772d8beca00c4a1d42ecef6dc5 | 6ad5a9a22a1281739e0745cd79ef410daa122e29 | refs/heads/master | 2022-11-23T21:52:18.157181 | 2022-11-10T00:33:35 | 2022-11-10T00:33:35 | 201,881,464 | 5 | 5 | null | 2022-11-10T00:30:24 | 2019-08-12T07:38:08 | R | UTF-8 | R | false | true | 3,198 | rd | assessPower.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Toolbox_run.r
\name{assessPower}
\alias{assessPower}
\title{Assess Power}
\usage{
assessPower()
}
\arguments{
\item{NA}{the function takes no arguments, but instead uses the objects contained
in the global environment generated by the function fitData()}
}
\value{
A power analysis - Including: a saved R workspace containing all
simulated scenario data and simulation results exported as a .csv file
labelled with the string: ...scenario_power_summary.csv
}
\description{
Run the power toolbox following a call to fitData(). See ?fitData
}
\details{
This function wraps the other functions within the epower
package to perform the power analysis given as a
scenario within the excel file supplied to the companion function fitData().
fitData() must be run prior to running assessPower() in order to generate
the required model objects dataComponents and scenarioParams.
The function assessPower() allows the user to assess power across a range of
scenarios as specified in the excel interface workbook and unpacked in the call to fitData().
The function is directly
called by the user and has no arguments that need to be specified, but will only
run if the function fitData() has already been called by the user during that R
session, because it relies on global variables generated during the execution
of fitData(). Initially assessPower() calls the function buildScenarioMatrix()
which takes the information supplied on the excel interface file and generates
a matrix of all requested scenario combinations. Each row of this matrix is
then passed to the function run.scenario(), which is responsible for building the
monte-carlo datasets based on the specifications of that scenario (including
the specified effect size) and the posterior sample generated by powerScenario()
from the pilot data model fit; combining this with the original pilot data;
and then calculating posterior model probabilities for a model with and without
the BA*CI interaction term. The returned model probabilities are collated such
that those <0.5 are assigned a 1 (representing a successful detection of impact)
for that iteration of that scenario, and those >0.5 are assigned a 0
(no detection of impact). Where no effect is applied in a given scenario,
the proportion of successful detections represents type 1 error, whereas if
an effect was applied, the proportion of successful detections represents
statistical power for that scenario. The proportion of successful detections
is combined with the generated scenario matrix, and output as a csv
file ...scenario_power_summary.csv.
}
\examples{
install.packages("epower",dependencies=TRUE)
library(epower)
# Set the working directory in R to the folder containg the
# excel workbook. This can be done by clicking
# File -> Change dir...
fitData(excelInFile="epower_interface_V1.3.xlsx")
assessPower()
}
\references{
Fisher R, Shiell GR, Sadler RJ, Inostroza K, Shedrawi G, Holmes TH, McGree JM (2019) epower: an R package for power analysis of Before-After-Control-Impact (BACI) designs. Methods in Ecology and Evolution.
}
\author{
Rebecca Fisher \email{r.fisher@aims.gov.au}
}
|
a0d82ed4303e9d3cd281cec5f2bc9fb5fa21dd73 | 3ee366ed0dc1cc8cf768cae3f509874a0e370a43 | /man/geobuffer_pts.Rd | d8ef532a1eae3d6d2e508e229070e0f01c1579d9 | [
"MIT"
] | permissive | valentinitnelav/geobuffer | b475c9cd01408ed14403ce3fed85f48eb535a6dd | b4b0c9d853d6b7d612a9a713c4c0505245dca2c9 | refs/heads/master | 2022-10-28T10:39:25.168883 | 2022-10-07T06:35:28 | 2022-10-07T06:35:28 | 170,037,220 | 16 | 1 | null | null | null | null | UTF-8 | R | false | true | 3,667 | rd | geobuffer_pts.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geobuffer_pts.R
\name{geobuffer_pts}
\alias{geobuffer_pts}
\title{Geodesic buffer around points (long, lat) using metric radius}
\usage{
geobuffer_pts(xy, dist_m, step_dg = 10,
crs = "+proj=longlat +ellps=WGS84 +datum=WGS84", output = "sp", ...)
}
\arguments{
\item{xy}{One of the following: \code{SpatialPoints}, \code{SpatialPointsDataFrame},
points as \code{sf}, or two columns \code{matrix}, \code{data.frame} or \code{data.table}, with
the first column containing unprojected longitudes and the second
containing unprojected latitudes of your points around which you desire
buffers.}
\item{dist_m}{Distance in meters passed as \code{d} to \code{geosphere::destPoint()}.
The distance must be a numeric vector. Its length must be either 1
(assuming you want the same buffer radius for all points in \code{xy}), or the
total number of points you have in \code{xy} (assuming you want a different
buffer radius for each point).}
\item{step_dg}{Step of bearings (directions) in degrees. Must be numeric of
length 1. Defaults to 10. Dictates the point density of the buffer edge,
therefore the buffer's shape. For example, the maximum allowed value of 120
corresponds to 360/120 = 3 points on a circle, which will form a buffer as
an equilateral triangle. For more circle-like shaped buffers, use a smaller
step like 10, 5 dg or even smaller. However, the smaller the step, the more
computational intensive the operations are. The smallest allowed value is 1
dg.}
\item{crs}{Character string of projection arguments. Defaults to
\code{"+proj=longlat +ellps=WGS84 +datum=WGS84"}. The CRS must be the one
corresponding to your points/coordinates. If you are unsure, then could be
a safe bet to try the default value. For more details see \code{?sp::CRS}.}
\item{output}{Dictates the type of output. Character vector with one of the
following values: \code{"sp"}, \code{"sf"}, \code{"data.table"} or \code{"data.frame"}.
Defaults to \code{"sp"}. If indicates a spatial object (\code{"sp"} or \code{"sf"}), then
it returns the buffers as polygons around the given points. If indicates a
table object (\code{"data.table"} or \code{"data.frame"}), then it returns the points
that constitute the buffers as a 3 columns \code{data.table} or \code{data.frame}:
\code{lon}, \code{lat}, \code{id}, where \code{id} is the id of each point in \code{xy}. This can be
useful for plotting with \code{ggplot2}.}
\item{...}{Additional arguments passed to \code{geosphere::destPoint()}, like \code{a}
and \code{f}.}
}
\value{
Depending on the value given to \code{output} (see above).
}
\description{
Allows the possibility of creating geodesic buffers when the
radius is given in metric units. A geodesic buffer is not affected by the
distortions introduced by projected coordinate systems. This function is a
wrapper of \code{geosphere::destPoint()}.
}
\examples{
bucharest_500km <- geobuffer_pts(xy = data.frame(lon = 26.101390,
lat = 44.427764),
dist_m = 500*10^3,
output = "sf")
bucharest_500km
plot(bucharest_500km)
library(mapview)
library(sf)
mapView(as(bucharest_500km, "Spatial"), alpha.regions = 0.2)
}
\references{
This function is a wrapper of \code{geosphere::destPoint()}. See also
\href{https://gis.stackexchange.com/questions/250389/euclidean-and-geodesic-buffering-in-r}{Euclidean and Geodesic Buffering in R}
on gis.stackexchange. Also check \href{https://www.esri.com/news/arcuser/0111/geodesic.html}{Understanding Geodesic Buffering}.
}
\author{
Valentin Stefan
}
|
de8766624451554ca56fb799083d1c904efcb125 | aa615d35770dee4fc97b119bf1e6dbfe61b4486c | /R/access_level.R | 73ad37b6358d4484bb5e28a17f7ff913fd0d02e3 | [] | no_license | oalbishri/rtweet | aad173a3b5f9b28f96b7589e871d7c74f9dc2ba2 | fa8987ffc34beb6fc8355f68f47af9bd5635ff46 | refs/heads/master | 2023-03-23T20:47:23.839722 | 2021-03-14T17:21:32 | 2021-03-14T17:21:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 238 | r | access_level.R | api_access_level <- function(token = NULL) {
r <- TWIT_get(token, "/1.1/account/settings", parse = FALSE)
if ("headers" %in% names(r) && "x-access-level" %in% names(r$headers)) {
r$headers$`x-access-level`
} else {
r
}
}
|
9410ae633397130cc2123ca18753fed445c13661 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Luminescence/examples/CW2pPMi.Rd.R | 7afa023eeff0446214cb5bae7c4438f10d2755d9 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,355 | r | CW2pPMi.Rd.R | library(Luminescence)
### Name: CW2pPMi
### Title: Transform a CW-OSL curve into a pPM-OSL curve via interpolation
### under parabolic modulation conditions
### Aliases: CW2pPMi
### Keywords: manip
### ** Examples
##(1)
##load CW-OSL curve data
data(ExampleData.CW_OSL_Curve, envir = environment())
##transform values
values.transformed <- CW2pPMi(ExampleData.CW_OSL_Curve)
##plot
plot(values.transformed$x,values.transformed$y.t, log = "x")
##(2) - produce Fig. 4 from Bos & Wallinga (2012)
##load data
data(ExampleData.CW_OSL_Curve, envir = environment())
values <- CW_Curve.BosWallinga2012
##open plot area
plot(NA, NA,
xlim = c(0.001,10),
ylim = c(0,8000),
ylab = "pseudo OSL (cts/0.01 s)",
xlab = "t [s]",
log = "x",
main = "Fig. 4 - Bos & Wallinga (2012)")
values.t <- CW2pLMi(values, P = 1/20)
lines(values[1:length(values.t[,1]),1],CW2pLMi(values, P = 1/20)[,2],
col = "red",lwd = 1.3)
text(0.03,4500,"LM", col = "red", cex = .8)
values.t <- CW2pHMi(values, delta = 40)
lines(values[1:length(values.t[,1]),1], CW2pHMi(values, delta = 40)[,2],
col = "black", lwd = 1.3)
text(0.005,3000,"HM", cex = .8)
values.t <- CW2pPMi(values, P = 1/10)
lines(values[1:length(values.t[,1]),1], CW2pPMi(values, P = 1/10)[,2],
col = "blue", lwd = 1.3)
text(0.5,6500,"PM", col = "blue", cex = .8)
|
20119c979325eb9e4e669586456cb0e977c103ad | 965901a2e6a68c4ee9757ba58b1ec2a82172c9a6 | /RforDataScience_ggplot2.R | 28d0cdef5c38e65b3c32f37c5ccbf2afdd39b9ce | [] | no_license | sivaram10/Stat-545-Parctice | 3c9a9592cbc6b97395f22a55e5b392390543c98e | 78cdc05afe1ba999e91c84ea31be4c16bccea6e1 | refs/heads/master | 2021-01-19T20:27:00.340259 | 2017-05-15T03:04:24 | 2017-05-15T03:04:24 | 88,505,214 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,075 | r | RforDataScience_ggplot2.R | library(tidyverse)
ggplot(mpg, aes(displ, hwy)) +
geom_point(aes(color = class)) +
geom_smooth(se = FALSE) +
labs(title = "Fuel efficiency generally decreases with increase in engine size",
subtitle = "Two seater (sports cars) are an expection to this trend",
caption = "Data from fueleconomy.gov")
ggplot(mpg, aes(displ, hwy)) +
geom_point(aes(color = class)) +
geom_smooth(se = FALSE) +
labs(x = "Engine displacement (L)",
y = "Highway fuel economy (mpg)",
colour = "Car Type")
df <- tibble(
x = runif(10),
y = runif(10)
)
ggplot(df, aes(x, y)) +
geom_point() +
labs(
x = quote(sum(x[i] ^ 2, i == 1, n)),
y = quote(alpha + beta + frac(delta, theta))
)
?plotmath
ggplot(mpg, aes(displ, hwy)) +
geom_point(aes(color = class)) +
geom_smooth(se = FALSE, method = "lm")
best_in_class <- mpg %>%
group_by(class) %>%
filter(row_number(desc(hwy))==1)
best_in_class
ggplot(mpg, aes(displ,hwy)) +
geom_point(aes(color = class)) +
geom_text(aes(label = model), data = best_in_class)
ggplot(mpg, aes(displ, hwy)) +
geom_point(aes(colour = class)) +
geom_label(aes(label = model), data = best_in_class, nudge_y = 2, alpha = 0.5)
ggplot(mpg, aes(displ, hwy)) +
geom_point(aes(colour = class)) +
geom_point(size = 3, shape = 1, data = best_in_class) +
ggrepel::geom_label_repel(aes(label = model), data = best_in_class)
label <- mpg %>%
summarise(
displ = max(displ),
hwy = max(hwy),
label = "Increasing engine size is \nrelated to decreasing fuel economy."
)
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
geom_text(aes(label = label), data = label, vjust = "top", hjust = "right")
label <- tibble(
displ = Inf,
hwy = Inf,
label = "Increasing engine size is \nrelated to decreasing fuel economy."
)
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
geom_text(aes(label = label), data = label, vjust = "top", hjust = "right")
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
scale_y_continuous(breaks = seq(15,40, by =5))
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
scale_x_continuous(labels = NULL) +
scale_y_continuous(labels = NULL)
#not working
presidential %>%
mutate(id = 33 + row_number()) %>%
ggplot(aes(start, id)) +
geom_point() +
geom_segment(aes(xend = end, yend = id)) +
scale_x_date(NULL, breaks = presidential$start, date_labels = "'%y")
base <- ggplot(mpg, aes(displ, hwy)) +
geom_point(aes(colour = class))
base + theme(legend.position = "left")
base + theme(legend.position = "top")
base + theme(legend.position = "bottom")
base + theme(legend.position = "right") # the default
ggplot(mpg, aes(displ, hwy)) +
geom_point(aes(colour = class)) +
geom_smooth(se = FALSE) +
theme(legend.position = "bottom") +
guides(colour = guide_legend(nrow = 1, override.aes = list(size = 4)))
ggplot(diamonds, aes(carat, price)) +
geom_bin2d()
ggplot(diamonds, aes(log10(carat), log10(price))) +
geom_bin2d()
ggplot(diamonds, aes(carat, price)) +
geom_bin2d() +
scale_x_log10() +
scale_y_log10()
ggplot(mpg, aes(displ, hwy)) +
geom_point(aes(color = drv)) +
scale_colour_brewer(palette = "Set1")
ggplot(mpg, aes(displ, hwy)) +
geom_point(aes(color = drv, shape = drv)) +
scale_colour_brewer(palette = "Set1")
#Not working
presidential %>%
mutate(id = 33 + row_number()) %>%
ggplot(aes(start, id, colour = party)) +
geom_point() +
geom_segment(aes(xend = end, yend = id)) +
scale_colour_manual(values = c(Republican = "red", Democratic = "blue"))
df <- tibble(
x = rnorm(10000),
y = rnorm(10000)
)
ggplot(df, aes(x, y)) +
geom_hex() +
coord_fixed()
#> Loading required package: methods
ggplot(df, aes(x, y)) +
geom_hex() +
viridis::scale_fill_viridis() +
coord_fixed()
ggplot(df, aes(x, y)) +
geom_hex() +
scale_colour_gradient(low = "white", high = "red") +
coord_fixed()
ggplot(diamonds, aes(carat, price)) +
geom_point(aes(colour = cut), alpha = 1/20)
ggplot(mpg, aes(x = displ, y = hwy)) +
geom_point(aes(color = class), size =4) +
theme_light()
library(ggplot2)
library(gapminder)
suppressPackageStartupMessages(library(dplyr))
jdat <- gapminder %>%
filter(continent != "Oceania") %>%
droplevels() %>%
mutate(country = reorder(country, -1 * pop)) %>%
arrange(year, country)
j_year <- 2007
q <-
jdat %>%
filter(year == j_year) %>%
ggplot(aes(x = gdpPercap, y = lifeExp)) +
scale_x_log10(limits = c(230, 63000))
q + geom_point()
q + geom_point(aes(size = pop), pch = 21)
(r <- q +
geom_point(aes(size = pop), pch = 21, show.legend = FALSE) +
scale_size_continuous(range = c(1,40)))
(r <- r + facet_wrap(~ continent) + ylim(c(39, 87)))
r + aes(fill = continent)
j_year <- 2007
jdat %>%
filter(year == j_year) %>%
ggplot(aes(x = gdpPercap, y = lifeExp, fill = country)) +
scale_fill_manual(values = country_colors) +
facet_wrap(~ continent) +
geom_point(aes(size = pop), pch = 21, show.legend = FALSE) +
scale_x_log10(limits = c(230, 63000)) +
scale_size_continuous(range = c(1,40)) + ylim(c(39, 87)) +
theme_bw()
|
e7593d4c2f6e4d973b60119c4d4f5404757f9f4a | f25f0ce112516575e7129ae0d370ce4a3031cdf9 | /man/type_one_smooth.Rd | 043bac18081fda38841b410cd8e5d05d5d973845 | [] | no_license | MatheMax/OptReSample | 83fb7dfc57afafbfa3f4d4939caca7d9c35ce87f | 8a3856ddb31d01b9f5daae64f58ccd12c3811416 | refs/heads/master | 2021-08-06T05:24:51.263279 | 2018-07-18T20:16:54 | 2018-07-18T20:16:54 | 130,708,697 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 639 | rd | type_one_smooth.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StageTwo.R
\name{type_one_smooth}
\alias{type_one_smooth}
\title{Type I version for smooth direct designs}
\usage{
type_one_smooth(parameters, cf, c2, h, N, w)
}
\arguments{
\item{parameters}{Parameters specifying the design}
\item{cf}{Boundary for stopping for futility}
\item{c2}{c_2-values}
\item{h}{Distance between two nodes}
\item{N}{4N+1 gives the number of nodes}
\item{w}{nodes inside the interval (cf,ce)}
}
\description{
\code{type_one_smooth} gives the version of the type I error that is needed for \link{stage_two}.
}
|
816e90a50ed46d689164c99875d23b3eec06d4c3 | b72a579eddbd2e20a0d154a4704fa28dc89adf5f | /code/breast_cancer/preprocess_AABC_sum.R | 0161f34172155be064a2fa7d6643db722e1fee53 | [] | no_license | andrewhaoyu/multi_ethnic | cf94c2b02c719e5e0cbd212b1e09fdd7c0b54b1f | d1fd7d41ac6b91ba1da8bb8cd1b2b0768f28b984 | refs/heads/master | 2023-06-24T20:47:18.268972 | 2023-06-13T15:30:14 | 2023-06-13T15:30:14 | 214,069,397 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,793 | r | preprocess_AABC_sum.R | #goal: preprocess AABC summary level statistics
setwd("/data/zhangh24/multi_ethnic/data/")
library(tidyverse)
library(data.table)
sum.data = fread("./AABC_data/final_metal_4aa_no_ghana1.txt")
colnames(sum.data)[10] = "P"
sum.data = sum.data %>%
separate(MarkerName,into = c("CHR","POS","No1","No2"),sep=":",remove=F) %>%
unite("chr.pos",CHR,POS,sep=":",remove=F) %>%
mutate(ID=MarkerName,
Effect_allele=toupper(Allele1),
Alt_allele=toupper(Allele2)) %>%
select(ID,chr.pos,CHR,POS,Effect_allele,Alt_allele,
Freq1,FreqSE,Effect,StdErr,P)
sum.data.meta = sum.data
l = 1
trait = c("overall","erpos","erneg")
#match SNP in Ghana study
#load ghana
bim = fread("/data/zhangh24/multi_ethnic/data/GBHS_plink/all_chr.bim")
colnames(bim) = c("CHR","GA_ID","na","POS","Allele1","Allele2")
sum.data.update = bim %>%
unite("chr.pos",CHR,POS,sep=":",remove=F)
#idx <- which(sum.data$POS==114445880)
sum.data.ga = sum.data.update %>%
select(chr.pos,GA_ID,Allele1,Allele2) %>%
rename(
Eff_allele_GA=Allele1,
Ref_allele_GA = Allele2)
sum.data.match = inner_join(sum.data.ga,
sum.data.meta,
by="chr.pos") %>%
filter(((Effect_allele==Eff_allele_GA)&(Alt_allele==Ref_allele_GA))|
(Effect_allele==Ref_allele_GA)&(Alt_allele==Eff_allele_GA))
sum.data.match =sum.data.match %>%
mutate(MAF = ifelse(Freq1<=0.5,Freq1,1-Freq1)) %>%
select(chr.pos,GA_ID,CHR,POS,Effect_allele,Alt_allele,MAF,Effect,StdErr,P) %>%
sum.data.match = sum.data.match %>% rename(ID= GA_ID)
sum.data = sum.data.match
sum.data = sum.data %>%
mutate(POS = as.numeric(POS),
CHR=as.numeric(CHR))
save(sum.data,file = paste0("./AABC_data/BC_AFR_",trait[l],"remove_GHBS.rdata"))
|
2d8eef6d8d159f295a783d61819436aac0f4e098 | cc3beea2feb5d66b4df71a96f42129687a1296e7 | /draft/from_salmon_folder/functions_entrapmentAnalysis.R | fd7453b20cbacdad5bb95b366bc209749da430e0 | [] | no_license | YulongXieGitHub/YulongR_Code | 133c90b708c33c447737aaa0b6d01f5c9cb33818 | e1f68c1564fb4036df9500297fbd36548e3b8014 | refs/heads/master | 2021-01-23T15:03:12.427516 | 2015-07-16T01:52:35 | 2015-07-16T01:52:35 | 39,168,963 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 20,138 | r | functions_entrapmentAnalysis.R | #
# eliminate all stuff
rm(list = ls(all = TRUE))
# setup start date and time
start_time <- date();
Start.time <- Sys.time()
set.seed(12345, kind = NULL) # set seed of random number
# close all devices which have been opened
device.list <- dev.list()
if (length(device.list) != 0){for (device.this in device.list){dev.off(device.this)}}
packages.desired <- c("akima","bitops","caTools","chron","cshapes","cwhmisc","data.table","Defaults","fortunes","gplots","gtools","iterators","itertools","lme4","locfit","maptools","mlmRev","neuralnet","plyr","psych","quantmod","reshape","reshape2","rJava","RODBC","scatterplot3d","sp","splus2R","stringr","survey","timeDate","TTR","xts","zoo")
packages.needed <- c("chron","RODBC","timeDate","stats","lattice","graphics","cwhmisc","reshape")
packages.loaded <- search() # packages already loaded
packages.available <- (unlist(library()$results))[,"Package"] # packages installed which are ready to load
packages.libPath <- (unlist(library()$results))[,"LibPath"][1] # the path to install package
for (package.needed in packages.needed)
{
if (length(grep(package.needed,packages.loaded,perl=TRUE,value=TRUE))>0)
{
# package needed has already been loaded
cat(paste("Package \"",package.needed,"\" has already been loaded\n",sep=""))
}else{
# package needed has NOT been loaded
if (length(grep(package.needed,packages.available,perl=TRUE,value=TRUE))<=0)
{
# package needed which has NOT been loaded has NOT been installed, install it
install.packages(package.needed,
lib = packages.libPath,
repos = "http://lib.stat.cmu.edu/R/CRAN",
available = NULL, destdir = NULL,dependencies = NA, type = getOption("pkgType"),clean = FALSE)
cat(paste("Package \"",package.needed,"\" does not exist and has just been installed\n",sep=""))
}
# now load it
command.string <- paste("library(",package.needed,")",sep="")
eval(parse(text=command.string))
cat(paste("Package \"",package.needed,"\" has just been loaded\n",sep=""))
}
}
# today's month, day and year in the format of "Thu Jun 16 08:48:36 2011", 5 fields separated by space
today.month <- strsplit(date(),"\\s+",perl=TRUE)[[1]][2]
today.day <- strsplit(date(),"\\s+",perl=TRUE)[[1]][3]
today.year <- strsplit(date(),"\\s+",perl=TRUE)[[1]][5]
today.hour <- strsplit(strsplit(date(),"\\s+",perl=TRUE)[[1]][4],":",perl=TRUE)[[1]][1]
today.minute <- strsplit(strsplit(date(),"\\s+",perl=TRUE)[[1]][4],":",perl=TRUE)[[1]][2]
today.second <- strsplit(strsplit(date(),"\\s+",perl=TRUE)[[1]][4],":",perl=TRUE)[[1]][3]
# a function took from the boot strap package
norm.inter <- function(t,alpha)
#
# Interpolation on the normal quantile scale. For a non-integer
# order statistic this function interpolates between the surrounding
# order statistics using the normal quantile scale. See equation
# 5.8 of Davison and Hinkley (1997)
#
{
t <- t[is.finite(t)]
R <- length(t)
rk <- (R+1)*alpha
if (!all(rk>1 & rk<R))
warning("extreme order statistics used as endpoints")
k <- trunc(rk)
inds <- seq_along(k)
out <- inds
kvs <- k[k>0 & k<R]
tstar <- sort(t, partial = sort(union(c(1, R), c(kvs, kvs+1))))
ints <- (k == rk)
if (any(ints)) out[inds[ints]] <- tstar[k[inds[ints]]]
out[k == 0] <- tstar[1L]
out[k == R] <- tstar[R]
not <- function(v) xor(rep(TRUE,length(v)),v)
temp <- inds[not(ints) & k != 0 & k != R]
temp1 <- qnorm(alpha[temp])
temp2 <- qnorm(k[temp]/(R+1))
temp3 <- qnorm((k[temp]+1)/(R+1))
tk <- tstar[k[temp]]
tk1 <- tstar[k[temp]+1L]
out[temp] <- tk + (temp1-temp2)/(temp3-temp2)*(tk1 - tk)
cbind(round(rk, 2), out)
}
# -------------------------------------------------------------------------------------------------
# define label for the day of the week
# -------------------------------------------------------------------------------------------------
week.label <- c( 1, 2, 3, 4, 5, 6, 7)
week.names <- c("Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat")
week.fullNames <- c("Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday")
names(week.fullNames) <- week.names
# -------------------------------------------------------------------------------------------------
# a function to calculate the variety of counts per segment:
# inputs: data subset, biweek.idx and segment list of the entire data set
# -------------------------------------------------------------------------------------------------
count.perSegment <- function(my.data,my.biweek.idx,my.allSegment)
{
idx <- 0
for (this.segment in sort(my.allSegment))
{
idx <- idx + 1
myData.sub <- subset(my.data,segment == this.segment)
if (dim(myData.sub)[1] > 0)
{
no.sites.visited <- length(unique(myData.sub[,"transect"]))
no.sites.ent.yes1 <- length(unique(myData.sub[myData.sub[,"entrapments.present"]=="yes","transect"])) # This calculation is not correct
no.sites.ent.no1 <- no.sites.visited - no.sites.ent.yes1 # This calculation is not correct
no.entrapment.yes <- dim(myData.sub[myData.sub[,"entrapments.present"]=="yes",])[1]
no.entrapment.wt.chinook<- dim(myData.sub[myData.sub[,"fish.exist"]=="yes",])[1] # November 28, 2012: since the replacement of "fish.present" with "fish.exist", this function will not work for the script before revision.
no.entrapment.unknown <- dim(myData.sub[myData.sub[,"fate"]=="Unknown",])[1]
no.entrapment.reflood <- dim(myData.sub[myData.sub[,"fate"]=="Reflood",])[1]
no.entrapment.dewatered <- dim(myData.sub[myData.sub[,"fate"]=="Dewatered",])[1]
no.entrapment.thermal <- dim(myData.sub[myData.sub[,"fate"]=="Temp > 27C",])[1]
no.fish.alive <- sum(myData.sub[,"fish.alive"],na.rm=TRUE)
no.fish.dead <- sum(myData.sub[,"fish.dead"], na.rm=TRUE)
no.fish.total <- sum(myData.sub[,"fish.total"],na.rm=TRUE)
no.entrapment.lethalFate<- dim(myData.sub[myData.sub[,"lethal"] == "yes",])[1] # number of entrapments (data points) when the entrapment fate is known as lethal
no.entrapment.knownFate <- dim(myData.sub[((myData.sub[,"lethal"] == "yes") | (myData.sub[,"lethal"] == "no")),])[1] # number of entrapments (data points) when the entrapment fate is known as lethal ot not lethal, i.e., is not "unknown"
fish.mortality <- sum(myData.sub[,"mortality"], na.rm=TRUE) # different from fish.dead or fish.alive. fish.alive could be fish.mortality if the entrapment is lethal.
fish.mortality.knownFate<- sum(myData.sub[myData.sub[,"lethal"] == "yes" | myData.sub[,"lethal"] == "no","mortality"], na.rm=TRUE) # different from fish.dead or fish.alive. fish.alive could be fish.mortality if the entrapment is lethal.
fish.total.knownFate <- sum(myData.sub[myData.sub[,"lethal"] == "yes" | myData.sub[,"lethal"] == "no","fish.total"], na.rm=TRUE) # different from fish.dead or fish.alive. fish.alive could be fish.mortality if the entrapment is lethal.
mortalityRate.entrapment<- no.entrapment.lethalFate / no.entrapment.knownFate
mortalityRate.fish <- fish.mortality.knownFate / fish.total.knownFate
perc.ent.wt.chinook <- no.entrapment.wt.chinook / no.entrapment.yes
chinook.per.ent <- no.fish.total / no.entrapment.yes
# -------------------------------------------------------------------------------------------------
# number of total entrapments of each unique sample (i.e., date-transect combination): count the number of all quardrants ("sampled"="Y" or "N") for a give transect at a given day
ent.count <- aggregate(myData.sub[,c("entrapment.yes")],list(myData.sub[,"biweek.idx"],myData.sub[,"transect"]), FUN=length) # the number of records of a "date-segment-transect" combination
names(ent.count) <- c( "biweek.idx","transect","ent.count.all")
# number of sampled entrapments of each unique sample (i.e., date-transect combination): count the number of "Y" quardrants ("sampled"="Y") for a give transect at a given day
ent.yes <- aggregate(myData.sub[,c("entrapment.yes")],list(myData.sub[,"biweek.idx"],myData.sub[,"transect"]), FUN=sum,na.rm=T) # the number of sampled "Yes" records of a "date-segment-transect" combination
names(ent.yes) <- c( "biweek.idx","transect","ent.count.yes")
# number of not-sampled entrapments of each unique sample (i.e., date-transect combination): count the number of "N" quardrants ("sampled"="N") for a give transect at a given day
ent.no <- aggregate(myData.sub[,c("entrapment.no")], list(myData.sub[,"biweek.idx"],myData.sub[,"transect"]), FUN=sum,na.rm=T) # the number of sampled "Yes" records of a "date-segment-transect" combination
names(ent.no) <- c( "biweek.idx","transect","ent.count.no")
cat(paste("count numbers of total, sampled and not-sampled entrapments in the segments\n",sep=""))
cat(paste("count numbers of total, sampled and not-sampled entrapments in the segments\n",sep=""),file=FL.LOG,append=TRUE)
# assemble the counts into a dataframe. The numbers of total entrapments, sampled entrapments and not sampled entrapments are used to create the "entrapments Sampled" and "entrapments Not Sampled" statistics
mydata.ent <- cbind(ent.count,
ent.count.yes = ent.yes[,"ent.count.yes"],
ent.count.no = ent.no[,"ent.count.no"])
# assign "entrapments Sampled No": if total entrapments == not sampled entrapments, i.e., ent.count == ent.no, assign 1 otherwise 0
# "entrapments Sampled Yes": if total entrapments == sampled entrapments, i.e., ent.count == ent.yes, assign 1 otherwise 0
# "entrapments Sampled YesNo": if sampled entrapments > 0, i.e., ent.count.yes > 0, assign 1 otherwise 0 This is the "entrapments Sampled Yes" in the summary tab "Stranding Summary"
mydata.ent <- cbind(mydata.ent,
ent.no = rep(0,dim(mydata.ent)[1]), # initialize "Plots Sampled : No" with 0
ent.yes = rep(0,dim(mydata.ent)[1]), # initialize "Plots Sampled : Yes" with 0
ent.yesNo = rep(0,dim(mydata.ent)[1])) # initialize "Plots Sampled : YesNo" with 0
# assign values
mydata.ent[mydata.ent[,"ent.count.all"] == mydata.ent[,"ent.count.no"], "ent.no"] <- 1 # the sum of sampled "no" in the data packet is the same as the length of the data packet, means all records in the data packet are "No".
mydata.ent[mydata.ent[,"ent.count.all"] == mydata.ent[,"ent.count.yes"],"ent.yes"] <- 1 # the sum of sampled "yes" in the data packet is the same as the length of the data packet, means all records in the data packet are "Yes".
mydata.ent[mydata.ent[,"ent.count.yes"] > 0, "ent.yesNo"] <- 1 # the sum of sampled "yes" in the data packet is not zero , means at least there is sampled "Yes" records
cat(paste("creat a data.frame of [mydata.ent]\n",sep=""))
cat(paste("creat a data.frame of [mydata.ent]\n",sep=""),file=FL.LOG,append=TRUE)
# -------------------------------------------------------------------------------------------------
# count entrapments sampled "No" and "Yes"
no.sites.ent.yes2 <- sum(mydata.ent[,"ent.yesNo"],na.rm=TRUE) # this is "Plots Sampled Yes" in the summary tab "Stranding Summary"
no.sites.ent.no2 <- sum(mydata.ent[,"ent.no"], na.rm=TRUE) # this is "Plots Sampled No" in the summary tab "Stranding Summary"
cat(paste("the count the Yes/No transect/quardrant\n",sep=""))
cat(paste("the count the Yes/No transect/quardrant\n",sep=""),file=FL.LOG,append=TRUE)
# -------------------------------------------------------------------------------------------------
if (idx == 1)
{
output <- data.frame(segment = this.segment,
sites.visited = no.sites.visited,
sites.ent.yes = no.sites.ent.yes2,
sites.ent.no = no.sites.ent.no2,
ent.sampled = no.entrapment.yes,
ent.wt.chinook = no.entrapment.wt.chinook,
ent.fate.unknown = no.entrapment.unknown,
ent.fate.reflood = no.entrapment.reflood,
ent.fate.dewatered = no.entrapment.dewatered,
ent.fate.thermal = no.entrapment.thermal,
fish.alive = no.fish.alive,
fish.dead = no.fish.dead,
fish.total = no.fish.total,
sites.ent.yes1 = no.sites.ent.yes1,
sites.ent.no1 = no.sites.ent.no1,
fish.mortality = fish.mortality,
no.ent.lethal.fate = no.entrapment.lethalFate,
no.ent.known.fate = no.entrapment.knownFate,
mort.rate.ent = mortalityRate.entrapment,
fish.morts.known = fish.mortality.knownFate,
fish.total.known = fish.total.knownFate,
mort.rate.fish = mortalityRate.fish,
perc.ent.wt.chinook= perc.ent.wt.chinook,
chinook.per.ent = chinook.per.ent)
}else{
output <- rbind(output,
c(segment = this.segment,
sites.visited = no.sites.visited,
sites.ent.yes = no.sites.ent.yes2,
sites.ent.no = no.sites.ent.no2,
ent.sampled = no.entrapment.yes,
ent.wt.chinook = no.entrapment.wt.chinook,
ent.fate.unknown = no.entrapment.unknown,
ent.fate.reflood = no.entrapment.reflood,
ent.fate.dewatered = no.entrapment.dewatered,
ent.fate.thermal = no.entrapment.thermal,
fish.alive = no.fish.alive,
fish.dead = no.fish.dead,
fish.total = no.fish.total,
sites.ent.yes1 = no.sites.ent.yes1, # this calculation is not correct
sites.ent.no1 = no.sites.ent.no1,
fish.mortality = fish.mortality,
no.ent.lethal.fate = no.entrapment.lethalFate,
no.ent.known.fate = no.entrapment.knownFate,
mort.rate.ent = mortalityRate.entrapment,
fish.morts.known = fish.mortality.knownFate,
fish.total.known = fish.total.knownFate,
mort.rate.fish = mortalityRate.fish,
perc.ent.wt.chinook= perc.ent.wt.chinook,
chinook.per.ent = chinook.per.ent)) # this calculation is not correct
}
}else{
if (idx == 1)
{
output <- data.frame(segment = this.segment,
sites.visited = 0,
sites.ent.yes = 0,
sites.ent.no = 0,
ent.sampled = 0,
ent.wt.chinook = 0,
ent.fate.unknown = 0,
ent.fate.reflood = 0,
ent.fate.dewatered = 0,
ent.fate.thermal = 0,
fish.alive = 0,
fish.dead = 0,
fish.total = 0,
sites.ent.yes1 = 0,
sites.ent.no1 = 0,
fish.mortality = 0,
no.ent.lethal.fate = 0,
no.ent.known.fate = 0,
mort.rate.ent = 0,
fish.morts.known = 0,
fish.total.known = 0,
mort.rate.fish = 0,
perc.ent.wt.chinook= 0,
chinook.per.ent = 0)
}else{
output <- rbind(output,
c(segment = this.segment,
sites.visited = 0,
sites.ent.yes = 0,
sites.ent.no = 0,
ent.sampled = 0,
ent.wt.chinook = 0,
ent.fate.unknown = 0,
ent.fate.reflood = 0,
ent.fate.dewatered = 0,
ent.fate.thermal = 0,
fish.alive = 0,
fish.dead = 0,
fish.total = 0,
sites.ent.yes1 = 0,
sites.ent.no1 = 0,
fish.mortality = 0,
no.ent.lethal.fate = 0,
no.ent.known.fate = 0,
mort.rate.ent = 0,
fish.morts.known = 0,
fish.total.known = 0,
mort.rate.fish = 0,
perc.ent.wt.chinook= 0,
chinook.per.ent = 0)
)
}
}
}
# do a total
output <- rbind(output,
c(segment = 9,apply(output[,-1],2,FUN=sum,na.rm=TRUE)))
output[output[,"segment"] == 9,"segment"] <- "total"
# do it for section level
output <- rbind(output,
c(segment = 10,apply(output[c(1,2), -1],2,FUN=sum,na.rm=TRUE)),
c(segment = 11,apply(output[c(3,4,5,6),-1],2,FUN=sum,na.rm=TRUE)),
c(segment = 12,apply(output[c(7,8), -1],2,FUN=sum,na.rm=TRUE)))
output[output[,"segment"] ==10,"segment"] <- "section 1"
output[output[,"segment"] ==11,"segment"] <- "section 2"
output[output[,"segment"] ==12,"segment"] <- "section 3"
# recalculate rate for "total" and the sections
index <- output[,"segment"] == "total" | output[,"segment"] == "section 1" | output[,"segment"] == "section 2" | output[,"segment"] == "section 3"
output[index,"mort.rate.ent"] <- output[index,"no.ent.lethal.fate"] / output[index,"no.ent.known.fate"]
output[index,"mort.rate.fish"] <- output[index,"fish.morts.known"] / output[index,"fish.total.known"]
output[index,"perc.ent.wt.chinook"] <- output[index,"ent.wt.chinook"] / output[index,"ent.sampled"]
output[index,"chinook.per.ent"] <- output[index,"fish.total"] / output[index,"ent.sampled"]
return(output)
}
# -------------------------------------------------------------------------------------------------
# a function to calculate some statistics in the data frame: TO make the script concise, put all the code block for stat into a function
# -------------------------------------------------------------------------------------------------
stat.data <- function(my.data,my.biweek,my.segment)
{
# [my.data] is the input data frame
stat.number <- dim(my.data)[1] # the number of samples
stat.1 <- sum(my.data[my.data[,"binary"] == 1,"binary"]) # the number of 1 samples
stat.1perc <- round(100*sum(my.data[,"binary"]) / dim(my.data)[1], digits = 2) # the percentage of 1 status
stat.0perc <- round(100 - stat.1perc, digits = 2) # the percentage of 0 status
stat.mean.morts <- round(mean(my.data[,"morts"]) , digits = 3) # the mean mortality
stat.max.morts <- round(max(my.data[,"morts"]) , digits = 3) # the max mortality
stat.mean.multi <- round(mean(my.data[,"multiplier"]) , digits = 3) # the mean multiplier
stat.max.multi <- round(max(my.data[,"multiplier"]) , digits = 3) # the max multiplier
stat.min.multi <- round(min(my.data[,"multiplier"]) , digits = 3) # the min multiplier
my.stat <- data.frame( biweek = my.biweek,
segment = my.segment,
number.all = stat.number,
number.1 = stat.1,
perc.1 = stat.1perc,
perc.0 = stat.0perc,
mean.morts = stat.mean.morts,
max.morts = stat.max.morts,
mean.multi = stat.mean.multi,
min.multi = stat.min.multi,
max.multi = stat.max.multi)
return(my.stat)
}
|
496257c08ece09a30a3c6c8a5de4884f2cbd55e4 | c8f83302772325930ecab8f61d4f39c06e9e2e98 | /trees/03_sim_trees_detection.R | cb9f8f99f09a6a73e7e77073bb83eac3e75f5fee | [
"MIT"
] | permissive | boydorr/TransmissionScale | 5fda1bb6b2ae62f522cea0b2a9af1901f57456d4 | 566e1830254d57d262dfc5771ef564218981fb44 | refs/heads/main | 2023-04-11T21:47:34.835197 | 2022-04-21T14:55:56 | 2022-04-21T14:55:56 | 450,220,751 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,325 | r | 03_sim_trees_detection.R | # Simulate unpruned trees for comp to detection ----
# sub_cmd:=-t 2 -n 3 -jn test -wt 5m -sn -mem 6000
if(Sys.getenv("SLURM_JOB_ID") != "") {
ncores <- as.numeric(Sys.getenv("SLURM_NTASKS"))
} else {
ncores <- parallel::detectCores() - 1
}
print(ncores)
cl <- parallel::makeCluster(ncores)
doParallel::registerDoParallel(cl)
# Packages
library(treerabid) # devtools::install_github("mrajeev08/treerabid")
library(data.table)
library(lubridate)
library(dplyr)
library(lubridate)
library(magrittr)
library(foreach)
library(iterators)
library(doRNG)
library(igraph)
library(glue)
# clean up (no cases with NA location or time & filter to start/end dates) ----
case_dt <- readRDS(file = "output/clean_bite_data.rda")
case_dt %<>%
dplyr::filter(!is.na(Symptoms.started),
!is.na(UTM.Easting),
!is.na(UTM.Northing),
Symptoms.started >= ymd("2002-01-01"),
Symptoms.started >= "2002-01-01",
Symptoms.started <= ymd("2015-12-31")) %>%
# get uncertainty in days
mutate(days_uncertain = case_when(Symptoms.started.accuracy == "+/- 14 days" ~ 14L,
Symptoms.started.accuracy == "+/- 7 days" ~ 7L,
Symptoms.started.accuracy == "+/- 28 days" ~ 28L,
Symptoms.started.accuracy == "0" ~ 0L,
TRUE ~ 0L),
owned = ifelse(Owner %in% "Known", TRUE, FALSE))
# filter to one record per case ----
case_dt %>%
group_by(ID) %>%
slice(1) %>%
as.data.table() -> case_dt
case_dates <- data.table(id_case = case_dt$ID,
symptoms_started = case_dt$Symptoms.started)
# Use the `best` dists/cutoffs & known source to generate trees + incs ----
# This takes about 15 mins on my computer with 3 cores
i <-
tidyr::expand_grid(si_pdist = "lnorm",
dist_pdist = "weibull",
convolve = "mixed",
prune = FALSE,
cutoff = 1,
use_known = TRUE,
nsim = 1000)
i$seed <- 49
ttrees <- boot_trees(id_case = case_dt$ID,
id_biter = case_dt$Biter.ID,
x_coord = case_dt$UTM.Easting,
y_coord = case_dt$UTM.Northing,
owned = case_dt$owned,
date_symptoms = case_dt$Symptoms.started,
days_uncertain = case_dt$days_uncertain,
use_known_source = TRUE,
prune = i$prune,
si_fun = si_lnorm1,
dist_fun = dist_weibull_mixed,
params = treerabid::params_treerabid,
cutoff = i$cutoff,
N = i$nsim,
seed = i$seed)
# Summarize the trees
# do this outside of function to get min t_diff as well
links_all <- ttrees[, .(links = .N,
t_diff_min_days = min(t_diff),
t_diff_median_days = median(t_diff),
dist_diff_meters = median(dist_diff)),
by = c("id_case", "id_progen")][, prob := links/i$nsim]
links_consensus <- build_consensus_links(links_all, case_dates)
tree_ids <- c(mcc =
build_consensus_tree(links_consensus, ttrees, links_all,
type = "mcc", output = "sim"),
majority =
build_consensus_tree(links_consensus, ttrees, links_all,
type = "majority", output = "sim"))
ttrees$mcc <- ifelse(ttrees$sim %in% tree_ids["mcc"], 1, 0)
ttrees$majority <- ifelse(ttrees$sim %in% tree_ids["majority"], 1, 0)
set.seed(5679)
out_trees <- ttrees[sim %in% c(sample((1:i$nsim)[-tree_ids], 100), tree_ids)]
links_consensus <- cbind(links_consensus, i)
ttrees_all <- data.table(out_trees, cutoff = i$cutoff)
parallel::stopCluster(cl)
# Write out files
fwrite(ttrees_all, "output/trees/trees_sampled_unpruned.gz")
fwrite(links_consensus, "output/trees/consensus_links_unpruned.csv")
# Parse these from subutil for where to put things
syncto <- "~/Documents/Projects/Serengeti_Rabies/output/"
syncfrom <- "mrajeev@della.princeton.edu:Serengeti_Rabies/output/trees"
|
ccd5a3a03c0d793eb7d97a3fe8b8ec9f00bfd7de | e2f3ace7d5476cc8042514b3f93e466098aaf641 | /man/mae.Rd | fcf0a268d2647736bf9248aa3555e6a15340b57f | [] | no_license | erp12/rgp | 1527a5901fb6cb570e9461487fadb89a9bd66dd9 | 4f6e7a03585f75a139d232b8b817527d15c74d47 | refs/heads/master | 2020-12-31T02:22:38.126098 | 2016-08-22T21:42:32 | 2016-08-22T21:42:32 | 66,305,730 | 0 | 0 | null | 2016-08-22T20:30:13 | 2016-08-22T20:30:13 | null | UTF-8 | R | false | false | 309 | rd | mae.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{mae}
\alias{mae}
\title{Mean absolute error (MAE)}
\usage{
mae(x, y)
}
\arguments{
\item{x}{A numeric vector or list.}
\item{y}{A numeric vector or list.}
}
\value{
The MAE between \code{x} and \code{y}.
}
\description{
Mean absolute error (MAE)
}
|
45b885dea0156d7960a99abd0c30fdcd46e806ed | e521394f015a8992fa63b04d52f0a6b87ee08153 | /man/getPlot.Rd | b1d98defbdc0e6ce7de8a1cd8023efe69adead74 | [] | no_license | jshayiding/MSPC | e956d494e2c67d2a0a5ed40a4977cc3bbab0f0a1 | 66f23b8adb9ebe4d267ac72b0ff3dbd8c5c1146a | refs/heads/master | 2022-03-08T12:03:15.910696 | 2019-10-17T00:58:08 | 2019-10-17T00:58:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,733 | rd | getPlot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getPlot.R
\name{getPlot}
\alias{getPlot}
\title{Graphical view of different ERs set for each Chip-seq replicates.}
\usage{
getPlot(peakList_A, peakList_B, tau.s = 1e-08)
}
\arguments{
\item{peakList_A}{output of \link{runMSPC},
is set of all confirmed ERs in \link[GenomicRanges]{GRanges} objects.}
\item{peakList_B}{output of \link{runMSPC},
is set of all discarded ERs in \link[GenomicRanges]{GRanges} objects.}
\item{tau.s}{permissive threshold for stringent enriched regions,
all enriched regions below this threshold, are considered stringent ERs}
}
\value{
using \link[ggplot2]{ggplot} to generate stack bar plot for file bar
}
\description{
This function is served as graphical version of \link{export_ERs}.
To help user gaining deeper insight and biological evaluation of
analysis result, using \link[ggplot2]{ggplot} to generate
stack bar plot for each Chip-seq replicates can be done.
}
\examples{
# set up
library(GenomicRanges)
library(rtracklayer)
# load peak files
files <- getPeakFile()[1:3]
grs <- readPeakFiles(files, pvalueBase=1L)
## Exclude background noise
total.ERs <- denoise_ERs(peakGRs = grs, tau.w = 1.0E-04,
overwrite = TRUE)
## explore set of confirmed, discarde peaks
confirmedERs <- runMSPC(peakset = total.ERs, whichType = "max",
cmbStrgThreshold = 1.0E-08, isConfirmed = TRUE)
discardedERs <- runMSPC(peakset = total.ERs, whichType = "max",
cmbStrgThreshold = 1.0E-08, isConfirmed = FALSE)
# Visualize the output set for file bar
getPlot(peakList_A = confirmedERs,
peakList_B = discardedERs, tau.s = 1.0E-08)
}
\author{
Jurat Shahidin
}
|
0eddf10693f0e7dde56fa5ad5c4b8d91ae8e3d83 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/adegraphics/examples/S2.match-class.Rd.R | 4cc8355085240f2765e39897a675cd07fa4e62ad | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 230 | r | S2.match-class.Rd.R | library(adegraphics)
### Name: S2.match-class
### Title: Class 'S2.match'
### Aliases: S2.match S2.match-class prepare,S2.match-method
### panel,S2.match-method
### Keywords: classes
### ** Examples
showClass("S2.match")
|
b5cb6e3c971af6444093fd23d453fc9c8b00e8ab | 95f20674eb7e0afdd209229af28305c4af281879 | /plot4.R | 65696ed7ab9aee5402c1e2656a93ba4b1e641d56 | [] | no_license | iterion/ExData_Plotting1 | 56c6ac733f2425acd346fd86cf0ca90ca41e7e0c | 82b8c6a2d8c921661a41bc8d6bd0041b6f449c7c | refs/heads/master | 2021-01-21T07:08:06.509426 | 2014-05-11T20:27:03 | 2014-05-11T20:27:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 740 | r | plot4.R | png("plot4.png")
par(mfrow = c(2, 2))
with(power.consumption, {
# plot 1
plot(DateTime, Global_active_power,
type="l", ylab="Global Active Power (kilowatts)", xlab="")
# plot 1
plot(DateTime, Voltage,
type="l", ylab="Voltage", xlab="datetime")
# plot 3
plot(DateTime, Sub_metering_1,
type="l", ylab="Energy sub metering", xlab="")
lines(DateTime, Sub_metering_2, type="l", col="red")
lines(DateTime, Sub_metering_3, type="l", col="blue")
legend("topright",
col=c("black", "red", "blue"),
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=1, bty="n")
# plot 4
plot(DateTime, Global_reactive_power,
type="l", xlab="datetime")
})
dev.off() |
b1f981c4346ed5b6c665bd22021b696ebe0c3aee | 4f2743db548d08f57ec5c441011d94c28aa0ccac | /R/model.R | a365eaed74801a06bd07aca98c78e450459a7c82 | [] | no_license | bergsmat/nonmemica | 85cdf26fa83c0fcccc89112c5843958669373a2a | 8eddf25fdd603a5aca719a665c5b9475013c55b3 | refs/heads/master | 2023-09-04T06:10:48.651153 | 2023-08-28T13:23:18 | 2023-08-28T13:23:18 | 78,268,029 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 19,843 | r | model.R | # bug: can't parse model if estimate on same line as $[THETA|OMEGA|SIGMA]
# bug: can't parse DV=DV1 in INPUT statement
globalVariables(c('item','.','parameter','estimate','se'))
#' Coerce to NONMEM Control Object
#'
#' Coerces to NONMEM control stream object.
#' @param x object of dispatch
#' @param ... passed arguments
#' @return model
#' @family as.model
#' @export
#' @keywords internal
as.model <- function(x,...)UseMethod('as.model')
#' Coerce NONMEM Control Object to character
#'
#' Coerces NONMEM control stream object to character.
#' @param x model
#' @param ... ignored
#' @return model
#' @export
#' @family as.character
#' @keywords internal
as.character.model <- function(x,...){
if(length(x)==0) return(character(0))
meta <- x[sapply(x,inherits,'items') | sapply(x,inherits,'inits')]
meta <- lapply(meta, comwidth)
widths <- maxWidths(meta)
#x[] <- lapply(x,as.character,widths = widths) # to accommodate novel underlying object types
x[] <- lapply(x,as.character)
order <- sapply(x,length)
recnums <- 1:length(x)
record <- rep(recnums,order)
flag <- runhead(record)
content <- as.character(unlist(x))
nms <- toupper(names(x))
content[flag] <- paste(paste0('$',nms),content[flag])
content[flag] <- sub(' $','',content[flag])
content
}
#' Coerce Problem to Character
#'
#' Coerces NONMEM problem statement to character.
#' @param x problem
#' @param ... ignored
#' @return character
#' @export
#' @family as.character
#' @keywords internal
as.character.problem <- function(x,...){
at <- attr(x, 'runrecord')
for(i in seq_along(at)){
nm <- names(at)[[i]]
label <- paste0(';; ', i,'. ', nm, ':')
if(nm == 'Based on'){
label <- paste(label, at[[i]])
}else{
label <- c(label, paste0(';; ', at[[i]]))
}
x <- c(x, label)
}
x
}
#' Coerce model to list
#'
#' Coerces model to list.
#' @param x model
#' @param ... dots
#' @return list
#' @export
#' @family as.list
#' @keywords internal
as.list.model <-
function(x,...)unclass(x)
#' Coerce to Model from Numeric
#'
#' Coerces to model from numeric by coercing to character.
#' @param x numeric
#' @param ... passed arguments
#' @export
#' @family as.model
#' @keywords internal
as.model.numeric <- function(x,...)as.model(as.character(x),...)
#' Coerce character to model
#'
#' Coerces chacter to model.
#' @param x character
#' @param ... ignored
#' @param pattern pattern to identify record declarations
#' @param head subpattern to identify declaration type
#' @param tail subpattern remaining
#' @param parse whether to convert thetas omegas and sigmas to inits, tables to items, and runrecords to fields
#' @return list
#' @export
#' @family as.model
#' @examples
#' library(magrittr)
#' options(project = system.file('project/model',package='nonmemica'))
#' 1001 %>% as.model
as.model.character <-
function(
x,
pattern='^\\s*\\$(\\S+)(\\s.*)?$',
head='\\1',
tail='\\2',
parse=TRUE,
...
){
if(length(x) == 1){
if(!file_test('-f',x))x <- modelfile(x,...)
if(!file_test('-f',x))stop(x, ' does not exist as a file')
x <- readLines(x)
}
# any lines beginning with ;; are treated as trailing comments for $problem
y <- x[ grepl('^;;',x)] # y is lines in x beginning with ;;
x <- x[!grepl('^;;',x)] # these are dropped from x
flag <- grepl(pattern,x)
nms <- sub(pattern,head,x)
nms <- nms[flag]
nms <- tolower(nms)
content <- sub(pattern,tail,x)
content[flag] <- sub('^ ','',content[flag])
content <- split(content,cumsum(flag))
content[['0']] <- NULL
names(content) <- nms
class(content) <- c('model',class(content))
thetas <- names(content)=='theta'
omegas <- names(content)=='omega'
sigmas <- names(content)=='sigma'
tables <- names(content)=='table'
problem <- names(content) %in% c('prob','problem')
content[problem][[1]] <- c(content[problem][[1]], y) # append runrecord
if(parse)content[thetas] <- lapply(content[thetas],as.inits)
if(parse)content[omegas] <- lapply(content[omegas],as.inits)
if(parse)content[sigmas] <- lapply(content[sigmas],as.inits)
if(parse)content[tables] <- lapply(content[tables],as.items)
if(parse)content[problem] <- lapply(content[problem], as.problem)
content
}
#' Format model
#'
#' Format model.
#'
#' Coerces to character.
#' @param x model
#' @param ... passed arguments
#' @return character
#' @export
#' @family format
#' @keywords internal
format.model <-
function(x,...)as.character(x,...)
#' Print model
#'
#' Print model.
#'
#' Formats and prints.
#' @param x model
#' @param ... passed arguments
#' @return character
#' @export
#' @family print
#' @keywords internal
print.model <-
function(x,...)print(format(x,...))
#' Read model
#'
#' Read model.
#'
#' Reads model from a connection.
#' @param con model connection
#' @param parse whether to convert thetas to inits objects
#' @param ... passed arguments
#' @return character
#' @export
#' @family as.model
#' @keywords internal
read.model <-
function(con,parse=TRUE,...)as.model(readLines(con),parse=parse,...)
#' Write model
#'
#' Write model.
#'
#' writes (formatted) model to file.
#' @param x model
#' @param file passed to write()
#' @param ncolumns passed to write()
#' @param append passed to write()
#' @param sep passed to write()
#' @param ... passed arguments
#' @return used for side effects
#' @export
#' @family as.model
#' @keywords internal
write.model <-
function(x, file='data',ncolumns=1,append=FALSE, sep=" ",...){
out <- format(x)
write(
out,
file=file,
ncolumns=ncolumns,
append=append,
sep=sep,
...
)
}
#' Subset model
#'
#' Subsets model.
#' @param x model
#' @param ... ignored
#' @param drop passed to subset
#' @return model
#' @export
#' @family as.model
#' @keywords internal
`[.model` <- function (x, ..., drop = TRUE){
cl <- oldClass(x)
class(x) <- NULL
val <- NextMethod("[")
class(val) <- cl
val
}
#' Select model Element
#'
#' Selects model element.
#' @param x model
#' @param ... passed arguments
#' @param drop passed to element select
#' @return element
#' @export
#' @family as.model
#' @keywords internal
`[[.model` <- function (x, ..., drop = TRUE)NextMethod("[[")
#' Extract Thetas
#'
#' Extracts thetas.
#'
#'@param x object
#'@param ... passed arguments
#'@export
#'@family as.theta
#'@keywords internal
as.theta <- function(x,...)UseMethod('as.theta')
#' Extract Thetas from Model
#'
#' Extracts thetas from model.
#'
#'@param x model
#'@param ... passed arguments
#'@return theta (subset of model)
#'@export
#'@family as.theta
#'@keywords internal
as.theta.model <- function(x,...){
y <- x[names(x) %in% 'theta' ]
class(y) <- union(c('theta','records'), class(y))
y
}
#' Extract Omegas
#'
#' Extracts omegas.
#'
#'@param x object
#'@param ... passed arguments
#'@export
#'@family as.omega
#'@keywords internal
as.omega <- function(x,...)UseMethod('as.omega')
#' Extract Omegas from Model
#'
#' Extracts omegas from model.
#'
#'@param x model
#'@param ... passed arguments
#'@return omega (subset of model)
#'@export
#'@family as.omega
#'@keywords internal
as.omega.model <- function(x,...){
y <- x[names(x) %in% 'omega' ]
class(y) <- union(c('omega','records'), class(y))
y
}
#' Extract Sigmas
#'
#' Extracts sigmas.
#'
#'@param x object
#'@param ... passed arguments
#'@export
#'@keywords internal
as.sigma <- function(x,...)UseMethod('as.sigma')
#' Extract Sigmas from Model
#'
#' Extracts sigmas from model.
#'
#'@param x model
#'@param ... passed arguments
#'@return sigma (subset of model)
#'@export
#'@family as.sigma
#'@keywords internal
as.sigma.model <- function(x,...){
y <- x[names(x) %in% 'sigma' ]
class(y) <- union(c('sigma','records'), class(y))
y
}
#' Extract Tables
#'
#' Extracts tables.
#'
#'@param x object
#'@param ... passed arguments
#'@export
#'@family as.tab
#'@keywords internal
as.tab <- function(x,...)UseMethod('as.tab')
#' Extract Tables from Model
#'
#' Extracts tables from model.
#'
#'@param x model
#'@param ... passed arguments
#'@return tab (subset of model)
#'@export
#'@family as.tab
#'@keywords internal
as.tab.model <- function(x,...){
y <- x[names(x) %in% 'table' ]
class(y) <- union(c('tab','records'), class(y))
y
}
#' Extract Comments
#'
#' Extracts comments.
#'
#' @param x object of dispatch
#' @param ... passed arguments
#' @export
#' @family comments
#' @keywords internal
comments <- function(x,...)UseMethod('comments')
#' Extract Comments from Records
#'
#' Extracts comments from records.
#'
#' @param x records
#' @param ... ignored
#' @return data.frame
#' @describeIn comments record method
#' @export
#' @family comments
#'@keywords internal
#'
comments.records <- function(x,...){
y <- list()
prior <- 0
type = class(x)[[1]]
for(i in seq_along(x)){
this <- x[[i]]
y[[i]] <- comments(this, type=type, prior=prior)
prior <- prior + ord(this)
}
y <- if(length(y)){
do.call(rbind,y)
} else {
data.frame(item=character(0),comment=character(0))
}
class(y) <- union('comments',class(y))
y
}
#' Extract Comments from Model
#'
#' Extracts comments from model.
#'
#' @param x model
#' @param ... passed arguments
#' @param fields data items to scavenge from control stream comments
#' @param expected parameters known from NONMEM output
#' @param na string to use for NA values when writing default metafile
#' @param tables whether to include table comments
#' @return data.frame
#' @export
#' @family comments
#' @examples
#' library(magrittr)
#' options(project = system.file('project/model',package='nonmemica'))
#' 1001 %>% as.model %>% comments
comments.model <- function(
x,
fields=c('symbol','unit','label'),
expected=character(0),
na=NA_character_,
tables=TRUE,
...
){
t <- comments(as.theta(x))
o <- comments(as.omega(x))
s <- comments(as.sigma(x))
b <- comments(as.tab(x))
y <- rbind(t,o,s)
if(tables) y <- rbind(y,b)
y <- cbind(y[,'item',drop=F], .renderComments(
y$comment,fields=fields, na=na, ...))
if(length(expected)) y <- left_join(data.frame(stringsAsFactors=F,item=expected), y, by='item')
class(y) <- union('comments',class(y))
y
}
.renderComments <- function(x, fields, cumulative = NULL,na, ...){
if(length(fields) < 1) return(cumulative)
col <- fields[[1]]
dat <- sub('^([^;]*);?(.*)$','\\1',x)
rem <- sub('^([^;]*);?(.*)$','\\2',x)
dat <- sub('^\\s+','',dat)
dat <- sub('\\s+$','',dat)
out <- data.frame(stringsAsFactors=F, col = dat)
out$col[is.defined(out) & out == ''] <- na
names(out)[names(out) == 'col'] <- col
cum <- if(is.null(cumulative)) out else cbind(cumulative,out)
.renderComments(x=rem,fields=fields[-1],cumulative=cum, na=na)
}
#' Convert to Items
#'
#' Converts to items.
#'
#' @param x object
#' @param ... passed arguments
#' @export
#' @family as.itmes
#' @keywords internal
as.items <- function(x,...)UseMethod('as.items')
#' Convert to Items from Character
#'
#' Converts to items from character
#' @param x character
#' @param ... ignored
#' @return items
#' @export
#' @family as.items
#' @keywords internal
as.items.character <- function(x,...){
txt <- x
# for nonmem table items. 'BY' not supported
x <- sub('FILE *= *[^ ]+','',x) # filename must not contain space
reserved <- c(
'NOPRINT','PRINT','NOHEADER','ONEHEADER',
'FIRSTONLY','NOFORWARD','FORWARD',
'NOAPPEND','APPEND',
'UNCONDITIONAL','CONDITIONAL','OMITTED'
)
for(i in reserved) x <- sub(i,'',x) # remove reserved words
x <- gsub(' +',' ',x) # remove double spaces
x <- sub('^ *','',x) # rm leading spaces
x <- sub(' *$','',x) # rm trailing spaces
x <- x[!grepl('^;',x)] # rm pure comments
x <- x[x!=''] # remove blank lines
# each line is now a set of items followed by an optional comment that applies to the last item
sets <- sub(' *;.*','',x) # rm first semicolon, any preceding spaces, and all following
comment <- sub('^[^;]*;','',x) # select only material following the first semicolon
comment[comment == x] <- '' # if pattern not found
stopifnot(length(sets) == length(comment)) # one comment per set, even if blank
sets <- strsplit(sets,c(' ',',')) # sets is now a list of character vectors, possibly length one
sets <- lapply(sets,as.list) # sets is now a list of lists of character vectors
for(i in seq_along(sets)){ # for each list of lists of character vectors
com <- comment[[i]] # the relevant comment
len <- length(sets[[i]])# the element on which to place the comment
for(j in seq_along(sets[[i]])){ # assign each element of each set
attr(sets[[i]][[j]],'comment') <- if(j == len) com else '' # blank, or comment for last element
}
}
sets <- do.call(c,sets)
class(sets) <- c('items','list')
attr(sets,'text') <- txt
sets
}
#' Format Items
#'
#' Formats items.
#' @param x items
#' @param ... passed arguments
#' @return character
#' @export
#' @family format
#' @keywords internal
format.items <-function(x,...)as.character(x,...)
#' Print Items
#'
#' Prints items.
#' @param x items
#' @param ... passed arguments
#' @return character
#' @export
#' @family print
#' @keywords internal
print.items <-function(x,...)print(format(x,...))
#' Extract Comments from Items
#'
#' Extracts comments from items.
#'
#' @param x items
#' @param ... ignored
#' @return data.frame
#' @export
#' @family comments
#'
comments.items <- function(x, ...){
item <- sapply(x,as.character)
comment <- sapply(x,function(i)attr(i,'comment'))
dex <- cbind(item,comment)
class(dex) <- union('comments',class(dex))
dex
}
#' Extract Comments from Inits
#'
#' Extracts comments from inits.
#'
#' @param x inits
#' @param ... ignored
#' @param type item type: theta, omega, sigma (tables give items not inits)
#' @param prior number of prior items of this type (maybe imporant for numbering)
#' @return data.frame
#' @export
#' @family comments
#'
comments.inits <- function(x, type, prior,...){
block <- attr(x,'block')
com <- lapply(x,function(i)attr(i,'comment'))
com <- sapply(com, function(i){ # ensure single string
if(length(i) == 0) return('')
i[[1]]
})
stopifnot(length(com) == length(x))
if(block > 0) stopifnot(block == ord(as.halfmatrix(seq_along(x))))
block <- block > 0
dex <- if(block)as.data.frame(as.halfmatrix(com)) else data.frame(
row = seq_along(com), col=seq_along(com), x=com
)
dex$row <- padded(dex$row + prior,2)
dex$col <- padded(dex$col + prior,2)
dex$item <- type
dex$item <- paste(sep='_',dex$item,dex$row)
if(type %in% c('omega','sigma'))dex$item <- paste(sep='_', dex$item, dex$col)
dex <- rename(dex,comment = x)
dex <- select(dex,item,comment)
class(dex) <- union('comments',class(dex))
dex
}
#' Identify the order of an inits
#'
#' Identifies the order of an inits.
#'
#' Essentially the length of the list, or the length of the diagonal of a matrix (if BLOCK was defined).
#' @param x inits
#' @param ... ignored
#' @return numeric
#' @export
#' @family ord
#' @keywords internal
ord.inits <- function(x,...){
block <- attr(x,'block')
len <- length(x)
if(is.null(block)) return(len)
if(block == 0) return(len)
return(block)
}
#' Identify the Order of an Items Object
#'
#' Identifies the order of an items object.
#'
#' Essentially the length of the list
#' @param x items
#' @param ... ignored
#' @return numeric
#' @export
#' @family ord
#' @keywords internal
ord.items <- function(x,...)length(x)
#' Identify Indices of Initial Estimates
#'
#' Identifies indices of initial Estimates.
#' @param x object of dispatch
#' @param ... passed arguments
#' @export
#' @family initDex
#' @keywords internal
initDex <- function(x,...)UseMethod('initDex')
#' Identify Indices of Initial Estimates in model
#'
#' Identifies record indices of initial estimates for an object of class model. If model has not been parsed, the result is integer(0). Otherwise, the result is the record numbers for the canonical order of all init objects among theta, omega, and sigma element types, regardless of the number and order of such types. If a block(2) omega is specified between two thetas and one sigma follows, the results could be c(6L, 8L, 7L, 7L, 7L, 9L).
#' @param x model
#' @param ... ignored
#' @return integer
#' @export
#' @family initDex
#' @keywords internal
#'
initDex.model <- function(x,...){
i <- seq_along(x)
t <- i[names(x) == 'theta']
o <- i[names(x) == 'omega']
s <- i[names(x) == 'sigma']
c <- c(t,o,s)
y <- x[c]
l <- sapply(y,length)
parsed <- all(sapply(y,inherits,'inits'))
if(!parsed)return(integer(0))
z <- rep(c,times=l)
z
}
#' Identify Subscripts
#'
#' Identifies subscripts.
#' @param x object of dispatch
#' @param ... passed arguments
#' @export
#' @family initSubscripts
#' @keywords internal
initSubscripts <- function(x,...)UseMethod('initSubscripts')
#' Identify Subscripts of Initial Estimates in model
#'
#' Identifies subscripts of record indices of initial estimates for an object of class model. If model has not been parsed, the result is integer(0). Otherwise, the result is the element number for each init object within each inits in x (canonical order).
#' @param x model
#' @param ... ignored
#' @return integer
#' @export
#' @family initSubscripts
#' @keywords internal
#'
initSubscripts.model <- function(x,...){
i <- seq_along(x)
t <- i[names(x) == 'theta']
o <- i[names(x) == 'omega']
s <- i[names(x) == 'sigma']
c <- c(t,o,s)
y <- x[c]
l <- sapply(y,length)
parsed <- all(sapply(y,inherits,'inits'))
if(!parsed)return(integer(0))
z <- do.call('c',lapply(l,seq_len))
z <- as.integer(z)
z
}
#' Create the Updated Version of Something
#'
#' Creates the updated version of something. Don't confuse with stats::update.
#'
#' @param x object of dispatch
#' @param ... passed arguments
#' @export
#' @family updated
#' @keywords internal
updated <- function(x,...)UseMethod('updated')
#' Create the Updated Version of Numeric
#'
#' Creates the updated version of numeric by coercing to character.
#' @param x numeric
#' @param ... passed arguments
#' @export
#' @family updated
#' @keywords internal
updated.numeric <- function(x,...)updated(as.character(x),...)
#' Create the Updated Version of Character
#'
#' Creates the updated version of character by treating as a modelname. Parses the associated control stream and ammends the initial estimates to reflect model results (as per xml file).
#'
#' @param x character
#' @param initial values to use for initial estimates (numeric)
#' @param parse whether to parse the initial estimates, etc.
#' @param verbose extended messaging
#' @param ... passed arguements
#' @return model
#' @export
#' @family updated
updated.character <- function(x, initial = estimates(x,...), parse= TRUE,verbose=FALSE, ...){
y <- as.model(x, parse=TRUE,verbose=verbose,...)
initial(y) <- initial
y
}
#' Coerce to List of Matrices
#'
#' Coerces to list of matrices.
#' @param x object of dispatch
#' @param ... passed arguments
#' @export
#' @family as.matrices
#' @keywords internal
as.matrices <- function(x,...)UseMethod('as.matrices')
#' Coerce to List of Matrices from Records
#'
#' Coerces to list of matrices from Records
#' @param x records
#' @param ... ignored
#' @export
#' @family as.matrices
#' @keywords internal
as.matrices.records <- function(x,...){
y <- lapply(x,as.matrices)
z <- do.call(c,y)
z
}
#' Coerce to Matrices from Inits
#'
#' Coerces to matrices from inits. Non-block inits is expanded into list of matrices.
#'
#' @param x inits
#' @param ... ignored
#' @return matrices
#' @export
#' @family as.matrices
#' @keywords internal
as.matrices.inits <- function(x,...){
block <- attr(x,'block')
y <- sapply(x, `[[`, 'init')
stopifnot(length(y) >= 1)
if(block != 0) return(list(as.matrix(as.halfmatrix(y))))
return(lapply(y,as.matrix))
}
|
f28626cc11b20d4a394f6b68c904e721be1499c2 | b3a0e16fc9972c701e4b87dd9b8919f730af0e79 | /plot1.R | 321a0bd913d3587c0ce685977a64428523f1a62f | [] | no_license | celebros3019/ExData_Plotting1 | cc2504f466872ebce879c24646d9b55a3a4fa8d7 | 1202a28ca7538377d99d4cb2c31a4ed48e4dc508 | refs/heads/master | 2021-01-14T14:35:03.484584 | 2016-04-25T14:49:20 | 2016-04-25T14:49:20 | 47,931,889 | 0 | 0 | null | 2015-12-13T18:51:47 | 2015-12-13T18:51:47 | null | UTF-8 | R | false | false | 1,333 | r | plot1.R | # Download zip file ("exdata-data-household_power_consumption.zip") into the forked repo.
# This will use the packages tidyr, and lubridate. They must be installed for this to work.
# Replace the first line with your personal working directory.
setwd("c:/Users/Teresa/Documents/GitHub/datasciencecoursera/ExData_Plotting1")
require(tidyr)
require(lubridate)
unzip("exdata-data-household_power_consumption.zip") -> data
read.table(data[1], header=T) -> household
separate(household, col = Date.Time.Global_active_power.Global_reactive_power.Voltage.Global_intensity.Sub_metering_1.Sub_metering_2.Sub_metering_3,
into = c("Date", "Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity",
"Sub_metering_1","Sub_metering_2","Sub_metering_3"), sep = ";") -> household
dmy(household$Date) -> household$Date
household[household$Date == ymd("2007-02-01 UTC"),] -> Feb_1
household[household$Date == ymd("2007-02-02 UTC"),] -> Feb_2
rbind(Feb_1, Feb_2) -> dataset
hist(as.numeric(dataset$Global_active_power), freq = T) -> histoR1
png()
png("plot1.png", width = 480, height = 480)
plot(histoR1,
col = "Red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
graphics.off() |
e0c215038c94bb8262eaaec1838e845dd12ad0c2 | 25ab84c3e54d6bf983d2900a0b43e83b3b1f146b | /man/copy.Rd | f998a15142b67e9da5696cd17ec29fa4bff02cb0 | [] | no_license | frenkiboy/rstatic | f9dee36372879e02b928f8ecbae3064ec69607bd | 1e061e0f4edb8b6bc2040e424747b80e023af1f3 | refs/heads/master | 2020-12-13T09:05:08.574939 | 2019-10-16T03:37:14 | 2019-10-16T03:37:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,015 | rd | copy.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/copy.R
\name{copy}
\alias{copy}
\title{Copy an RStatic Object}
\usage{
copy(x, ...)
}
\arguments{
\item{x}{The object to copy.}
\item{...}{Additional arguments to methods.}
\item{skip_set_parent}{(character) Names of fields which should never have
\code{set_parent} called on them.}
}
\description{
This function copies the given RStatic object, while ensuring that
parent-child relationships are preserved for the copied object.
}
\details{
If \code{x} is any other R6 object, \code{x} is deep-cloned. If \code{x} is
not an R6 object, no action is taken and \code{x} is returned.
Since RStatic objects are references, assignment does not make a copy. This
function can be used to explicitly copy an RStatic object.
}
\examples{
x = quote_ast(x <- "Hi DTL!")
y = x
z = copy(x)
x$read = Numeric$new(141)
# Changing 'x' changed 'y' (a reference), but not 'z' (a copy).
}
\seealso{
\code{set_parent}, which is used by this function.
}
|
2750e0d26c03ef1aa924174826187ea6867d727c | 25e7451c4d860ab4c0ba81e85a7b3dd7d0f916ae | /Analysis/mixtureModel3Cat.R | 6970036c6eb3ff96558fb9da8b39f526a3b8a85f | [] | no_license | fsingletonthorn/effectSizeAdjustment | 5e24c8846260439ba8c61be6d4c3cfebf4c1dab8 | 60827d9d7e563daf5b957845eb4157434d859dfd | refs/heads/master | 2020-03-30T19:35:02.851691 | 2019-11-19T23:43:29 | 2019-11-19T23:43:29 | 151,549,406 | 0 | 0 | null | 2019-09-25T23:21:13 | 2018-10-04T09:41:51 | R | UTF-8 | R | false | false | 1,586 | r | mixtureModel3Cat.R | model{
# Mixture Model Priors:
tau ~ dgamma(0.001,0.001) # vague prior on study precision
phi ~ ddirch(mPriorProb) # Flat prior on the model priors
mPriorProb[1] <- 1 # This sets the priors to be equal
mPriorProb[2] <- 1 # This sets the priors to be equal
mPriorProb[3] <- 1 # This sets the priors to be equal
alpha ~ dunif(0,1) # flat prior on attenuation factor for each replication project
# prior on true effect size of original studies:
for (i in 1:n){
trueOrgEffect[i] ~ dnorm(0, 1) # Normal prior on the original effect size
}
# Mixture Model Likelihood:
# Study level
for(i in 1:n){
clust[i] ~ dcat(phi)# cluster is equal to one of the categories with probability equal to cat
orgEffect[i] ~ dnorm(trueOrgEffect[i] , orgTau[i]) # the original effect is from a dist with a mean equal to the true org effect (estimated) w/ a precision equal to the SD of the org
# if clust[i] = 0 then H0 is true; if clust[i] = 1 then the true effect size is a function of the original effect size (times alpha),
# if phi == 2 the the effect is exactly equal to the original effect
# the observed replication effect is a function of the original effect:
mu[i] <- ifelse(clust[i] == 2, (alpha * trueOrgEffect[i]), ifelse(clust[i] == 3, trueOrgEffect[i], 0))
H1original[i] <- ifelse(clust[i] == 3, 1, 0)
H1decrease[i] <- ifelse(clust[i] == 2, 1, 0)
H0True[i] <- ifelse(clust[i] == 1, 1, 0)
trueRepEffect[i] ~ dnorm(mu[i], tau) T(0,)
repEffect[i] ~ dnorm(trueRepEffect[i] , repTau[i])
}
} |
65f893d8b3227130ad85d5634426a0371599870a | f1b9b81a57dad419c7216445b9a75df120a47791 | /R/SDMXComponents-methods.R | 8c4b0f8981fb20d05062311f829c23d66e364250 | [] | no_license | opensdmx/rsdmx | d71dc83799d76da3233ddfc0d4fa75ce5ff097b9 | 3c0c2316ff4fa237cdc62731d379a17369e05ae3 | refs/heads/master | 2023-08-31T01:56:25.934458 | 2023-08-28T09:57:08 | 2023-08-28T09:57:08 | 10,642,895 | 96 | 38 | null | 2021-04-21T20:41:10 | 2013-06-12T13:01:55 | R | UTF-8 | R | false | false | 6,129 | r | SDMXComponents-methods.R | #' @name SDMXComponents
#' @rdname SDMXComponents
#' @aliases SDMXComponents,SDMXComponents-method
#'
#' @usage
#' SDMXComponents(xmlObj, namespaces)
#'
#' @param xmlObj object of class "XMLInternalDocument derived from XML package
#' @param namespaces object of class "data.frame" given the list of namespace URIs
#' @return an object of class "SDMXComponents"
#'
#' @seealso \link{readSDMX}
#'
SDMXComponents <- function(xmlObj, namespaces){
new("SDMXComponents",
Dimensions = dimensions.SDMXComponents(xmlObj, namespaces),
TimeDimension = timedimension.SDMXComponents(xmlObj, namespaces),
PrimaryMeasure = primarymeasure.SDMXComponents(xmlObj, namespaces),
Attributes = attributes.SDMXComponents(xmlObj, namespaces)
)
}
#get list of SDMXDimension
#=========================
dimensions.SDMXComponents <- function(xmlObj, namespaces){
dimensions <- NULL
strNs <- findNamespace(namespaces, "structure")
sdmxVersion <- version.SDMXSchema(xmlDoc(xmlObj), namespaces)
VERSION.21 <- sdmxVersion == "2.1"
dimensionsXML <- NULL
if(VERSION.21){
dimensionsXML <- getNodeSet(xmlDoc(xmlObj),
"//str:DimensionList/str:Dimension",
namespaces = c(str = as.character(strNs)))
}else{
dimensionsXML <- getNodeSet(xmlDoc(xmlObj),
"//str:Dimension",
namespaces = c(str = as.character(strNs)))
}
if(!is.null(dimensionsXML)){
dimensions <- lapply(dimensionsXML, SDMXDimension, namespaces)
}
return(dimensions)
}
#get SDMXTimeDimension
#=====================
timedimension.SDMXComponents <- function(xmlObj, namespaces){
timedimension <- NULL
sdmxVersion <- version.SDMXSchema(xmlDoc(xmlObj), namespaces)
VERSION.21 <- sdmxVersion == "2.1"
strNs <- findNamespace(namespaces, "structure")
timeDimXML <- NULL
if(VERSION.21){
timeDimXML <- getNodeSet(xmlDoc(xmlObj),
"//str:DimensionList/str:TimeDimension",
namespaces = c(str = as.character(strNs)))
}else{
timeDimXML <- getNodeSet(xmlDoc(xmlObj),
"//str:TimeDimension",
namespaces = c(str = as.character(strNs)))
}
if(length(timeDimXML) > 0){
timeDimensionXML <- timeDimXML[[1]]
timedimension <- SDMXTimeDimension(timeDimensionXML, namespaces)
}
return(timedimension)
}
#get SDMXPrimaryMeasure
#======================
primarymeasure.SDMXComponents <- function(xmlObj, namespaces){
primarymeasure <- NULL
sdmxVersion <- version.SDMXSchema(xmlDoc(xmlObj), namespaces)
VERSION.21 <- sdmxVersion == "2.1"
strNs <- findNamespace(namespaces, "structure")
if(VERSION.21){
measureXML <- getNodeSet(xmlDoc(xmlObj),
"//str:MeasureList/str:PrimaryMeasure",
namespaces = c(str = as.character(strNs)))
}else{
measureXML <- getNodeSet(xmlDoc(xmlObj),
"//str:PrimaryMeasure",
namespaces = c(str = as.character(strNs)))
}
if(length(measureXML) > 0){
measureXML <- measureXML[[1]]
primarymeasure <- SDMXPrimaryMeasure(measureXML, namespaces)
}
return(primarymeasure)
}
#get list of SDMXAttribute
#=========================
attributes.SDMXComponents <- function(xmlObj, namespaces){
attributes <- NULL
sdmxVersion <- version.SDMXSchema(xmlDoc(xmlObj), namespaces)
VERSION.21 <- sdmxVersion == "2.1"
strNs <- findNamespace(namespaces, "structure")
if(VERSION.21){
attributesXML <- getNodeSet(xmlDoc(xmlObj),
"//str:AttributeList/str:Attribute",
namespaces = c(str = as.character(strNs)))
}else{
attributesXML <- getNodeSet(xmlDoc(xmlObj),
"//str:Attribute",
namespaces = c(str = as.character(strNs)))
}
if(!is.null(attributesXML)){
attributes <- lapply(attributesXML, SDMXDimension, namespaces)
}
return(attributes)
}
#methods
as.data.frame.SDMXComponents <- function(x, ...){
#dimensions
dimensions <- slot(x, "Dimensions")
dimensions.df <- as.data.frame(
do.call("rbind",
lapply(
dimensions,
function(x){
sapply(slotNames(x), function(elem){slot(x,elem)})
}
)
),stringsAsFactors = FALSE)
if(nrow(dimensions.df)>0){
dimensions.df <- cbind(component = "Dimension", dimensions.df,
stringsAsFactors = FALSE)
}
#time dimension
timeDimension <- slot(x, "TimeDimension")
timeDimension.df <- NULL
if(!is.null(timeDimension)){
timeDimension.df <- as.data.frame(
t(sapply(slotNames(timeDimension), function(elem){slot(timeDimension,elem)})),
stringsAsFactors = FALSE
)
timeDimension.df <- cbind(component = "TimeDimension", timeDimension.df,
stringsAsFactors = FALSE)
}
#primary measure
primaryMeasure <- slot(x, "PrimaryMeasure")
primaryMeasure.df <- as.data.frame(
t(sapply(slotNames(primaryMeasure), function(elem){slot(primaryMeasure,elem)})),
stringsAsFactors = FALSE
)
primaryMeasure.df <- cbind(component = "PrimaryMeasure", primaryMeasure.df,
stringsAsFactors = FALSE)
#attributes
attributes <- slot(x, "Attributes")
attributes.df <- as.data.frame(
do.call("rbind",
lapply(
attributes,
function(x){
sapply(slotNames(x), function(elem){slot(x,elem)})
}
)
),stringsAsFactors = FALSE)
if(nrow(attributes.df)>0){
attributes.df <- cbind(component = "Attribute", attributes.df,
stringsAsFactors = FALSE)
}
#output
df<- do.call("rbind.fill", list(dimensions.df, timeDimension.df,
primaryMeasure.df, attributes.df))
return(encodeSDMXOutput(df))
}
setAs("SDMXComponents", "data.frame",
function(from) as.data.frame.SDMXComponents(from)) |
e39bb26b42d373f2e467efb9d8aaab6d2bc1f5ee | c3028bdbf293b17d5a431de7fb403f3e802f9c49 | /R/show-methods.R | 03520042f1ee1c99ef1c60cfc1dc02754b9e0696 | [] | no_license | UBod/procoil | 2a6c2455579ec2022fa33f4b511061a4abc9bf6e | 328134c3fe8eb814325a1dab9e98fa9312fa2d7a | refs/heads/master | 2022-11-23T04:11:49.111008 | 2022-11-01T15:05:37 | 2022-11-01T15:05:37 | 133,511,025 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,128 | r | show-methods.R | show.CCModel <- function(object)
{
cat("An object of class ", dQuote(class(object)), "\n\n")
cat("Model parameters:\n\tcoiled coil kernel with m=", object@m, " and",
ifelse(object@scaling, "", " without"),
" kernel normalization\n", sep="")
cat("\toffset b=", format(object@b, digits=4), "\n\n")
cat("Feature weights:\n")
ord <- order(object@weights[1, ], decreasing=TRUE)
sel <- ord[1:5]
cat(paste0("\t",
formatC(object@weights[1, sel], format="f", digits=4, width=8),
" ... ", colnames(object@weights)[sel]), sep="\n")
cat("\t", formatC("...", format="s", width=8), " ... ...\n", sep="")
sel <- ord[(length(ord) - 4):length(ord)]
cat(paste0("\t",
formatC(object@weights[1, sel], format="f", digits=4, width=8),
" ... ", colnames(object@weights)[sel]), sep="\n")
cat("\n")
}
setMethod("show", signature(object="CCModel"), show.CCModel)
show.CCProfile <- function(object)
{
getMethod("show", signature(object="PredictionProfile"))(object)
noOfDigits <- 9
colWidth <- noOfDigits + 3
noOfBlocks <- 1
blockSize <- length(object@pred)
cat("\nPredictions:\n")
if (length(object@pred) > 10)
{
noOfBlocks <- 2
blockSize <- 5
}
if (length(names(object@pred)) > 0)
nwidth <- min(max(names(object@pred)), 20)
else
nwidth <- ceiling(log10(length(object@pred))) + 2
noPos <- ncol(object@profiles)
offset <- 0
for (i in 1:noOfBlocks)
{
if (i == 2)
offset <- length(object@pred) - blockSize
if (i == 1)
{
cat(format(" ", width=nwidth))
cat(format("Score", width=colWidth + 1, justify="right"))
cat(format("Class", width=7, justify="right"))
cat("\n")
}
for (j in (1 + offset):(blockSize + offset))
{
if (length(names(object@pred)) > 0)
{
sampleName <- names(object@pred)[j]
if (nchar(sampleName) > 20)
sampleName <- paste0(substring(sampleName, 1, 17), "...")
}
else
sampleName <- format(paste0("[", j, "]"), nwidth,
justify="right")
cat(formatC(sampleName, format="s", width=nwidth))
cat(formatC(object@disc[j], format="f", digits=noOfDigits,
width=colWidth + 1))
cat(formatC(as.character(object@pred[j]), format="s", width=7))
cat("\n")
}
if (i == 1 && noOfBlocks > 1)
{
cat(formatC(paste(rep(".", nwidth - 2), sep="", collapse=""),
format="s", width=nwidth))
cat(formatC(paste(rep(".", 6), sep="", collapse=""),
format="s", width=colWidth + 1))
cat(formatC(paste(rep(".", 4), sep="", collapse=""),
format="s", width=7))
cat("\n")
}
}
cat("\n")
}
setMethod("show", signature(object="CCProfile"), show.CCProfile)
|
28fffe80fa847f7079c2bcbe4ed9705f99131694 | 19634470cd630e194976613859b03e54f3b7968c | /SnowModel/Scripts/ASCIIConvert.R | 4ba8c98a749bf05a90fd17e859da24af0732d760 | [] | no_license | snowex-hackweek/snow-sinking | 7226d231f54dd529ce9f584d4cad5f377a95b583 | 31d61712d811639866fffbee5fea1a69b550ae03 | refs/heads/main | 2023-06-16T00:17:53.464228 | 2021-07-16T20:05:51 | 2021-07-16T20:05:51 | 385,720,429 | 0 | 2 | null | 2021-07-16T20:05:52 | 2021-07-13T19:55:10 | Jupyter Notebook | UTF-8 | R | false | false | 567 | r | ASCIIConvert.R | require(spatial)
require(raster)
# Set working directory.
setwd(paste0(dirname(rstudioapi::getSourceEditorContext()$path),"/Input Data/Rasters"))
# Gather location and names of tif files.
files<-list.files(path = "TIFs", pattern = ".tif")
# Convert and save each tif as an ASCII file..
for(file in files){
r<-raster(paste0("TIFs/",file))
name<-substr(file,1,nchar(file)-4)
writeRaster(r, paste0(name,"_ASCII"), format="ascii",overwrite=TRUE, datatype='INT4S',NAflag=-9999)
}
# Plot for testing/verification.
plot(dem.r)
plot(landcover.r)
|
56c414b908ffb3ed50c2158fc24a73c5195622b0 | 83fc42f1093b39713f27ced1b7bcec977f64314e | /HW3/Project3/ui.R | de7289092bf9e899f6d50d12c47b9c4ce51935a9 | [] | no_license | AlejandroOsborne/DATA608 | e5118aabe18a36c4dfe08f9d8a9b443373174a88 | a501b185a496efb3c50de69c420393592b28469a | refs/heads/master | 2020-04-22T02:29:08.014663 | 2019-05-17T06:00:01 | 2019-05-17T06:00:01 | 170,051,029 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 820 | r | ui.R | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
library(dplyr)
library(googleVis)
data <- read.csv("https://raw.githubusercontent.com/charleyferrari/CUNY_DATA_608/master/module3/data/cleaned-cdc-mortality-1999-2010-2.csv", stringsAsFactors = FALSE)
ui <- fluidPage(
titlePanel("Mortality Rate"),
sidebarLayout(
sidebarPanel(
uiOutput("YearOutput"),
uiOutput("diseaseOutput")
),
mainPanel(
tabsetPanel(
tabPanel("Plot", plotOutput("coolplot")),
tabPanel("Table", tableOutput("results"))
)
)
)
)
|
7d3938dfc0f995736047004a60684ab5cb4ce532 | 057d16a890f9d5af442efe61338679c23c24d826 | /src/PlotSurvivalCurves.R | 2c853eea66ecbf7028cd267dcdf35eb1c65e34cd | [] | no_license | lzdh/Using-Multi-omic-Cancer-Data-to-Find-Ways-to-Improve-the-Treatment-of-Bladder-Cancer | a5496639864d279aeaa2051dbdddbac9cb09ff4f | 52e2c42d10519c8b0f09a88380ae5e2a2d066b39 | refs/heads/master | 2021-03-22T03:06:30.932950 | 2019-11-15T08:43:43 | 2019-11-15T08:43:43 | 117,106,696 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,930 | r | PlotSurvivalCurves.R | ## R script is courtesy of Dr Richard S Savage
## Script to plot nice Kaplan Meier curves
##Function to plot survival curves for the output of a ISDF run
##NOTE: we assume here that the timeToEvent is given in months
##
PlotSurvivalCurves <- function(titleString, clusterIDs, died, timeToEvent, itemNames, nMinItems=5){
##----------------------------------------------------------------------
## SOURCE SOME FUNCTIONS, LIBRARIES ------------------------------------
##----------------------------------------------------------------------
library(survival)
##----------------------------------------------------------------------
## LABEL THE died, timeToEvent VECTORS ---------------------------------
##----------------------------------------------------------------------
names(died) <- itemNames
names(timeToEvent) <- itemNames
##----------------------------------------------------------------------
## REMOVE ITEMS FOR WHICH WE HAVE INCOMPLETE OUTCOME INFORMATION -------
##----------------------------------------------------------------------
keep <- which(is.finite(died) & is.finite(timeToEvent))
clusterIDs <- clusterIDs[keep]
died <- died[keep]
timeToEvent <- timeToEvent[keep]
##----------------------------------------------------------------------
## REMOVE SMALL CLUSTERS -----------------------------------------------
##----------------------------------------------------------------------
uniqueIDs <- unique(clusterIDs)
nClusters <- length(uniqueIDs)
for (i in 1:nClusters){
index <- which(clusterIDs==uniqueIDs[i])
if (length(index)<nMinItems)
clusterIDs <- clusterIDs[-index]
}
##----------------------------------------------------------------------
## FIND USEFUL VALUES --------------------------------------------------
##----------------------------------------------------------------------
itemNames <- names(clusterIDs)
uniqueIDs <- unique(clusterIDs)
nClusters <- length(uniqueIDs)
clusterLabels <- vector("character", nClusters)
for (i in 1:nClusters)
clusterLabels[i] <- paste("Cluster", uniqueIDs[i], " (", sum(clusterIDs==uniqueIDs[i]), " items)", sep="")
##----------------------------------------------------------------------
## EXTRACT THE RELEVANT OUTCOME VALUES ---------------------------------
##----------------------------------------------------------------------
died <- died[itemNames]
timeToEvent <- timeToEvent[itemNames]
##----------------------------------------------------------------------
## GENERATE KAPLAN-MEIER SURVIVAL CURVES -------------------------------
##----------------------------------------------------------------------
chiSquared <- NULL
survivalObject <- Surv(timeToEvent, died)
survivalFit <- survfit(survivalObject~clusterIDs)
##FOR 2+ CLUSTERS, COMPUTE A P-VALUE (NULL: ALL CURVES COME FROM THE SAME UNDERLYING DISTRIBUTION)
if (nClusters>1){
survivalDiff <- survdiff(survivalObject~clusterIDs, rho=0)#rho=1 gives Gehan-Wilcoxon test w. Peto & Peto mod
chiSquared <- pchisq(survivalDiff$chisq, length(survivalDiff$n)-1, lower.tail=FALSE)
}
##GENERATE THE PLOT
plot(survivalFit, lty = 1:nClusters, conf.int=FALSE, col=palette())
titleString = paste(titleString, " (pValue=", format(chiSquared, scientific=TRUE, digits=3), ")", sep="")
title(titleString, xlab="number of months from diagnosis", ylab="Survival probability")
legend("bottomright", clusterLabels, lty = 1:nClusters, box.lwd=2, cex=0.75, col=palette())
}
##*****************************************************************************
##*****************************************************************************
##----------------------------------------------------------------------
## ----------------------------------------
##----------------------------------------------------------------------
|
123d9145117b589ffc395acb7ab46171ce6fe079 | 6c37b3af3e8379222b238cb67b877a563a6f3dd4 | /R/tab.provenance.r | f4d2aaf4ff595e6e0886e059677afb6970f33298 | [] | no_license | ChristopherBarrington/seuratvis | e809fefabb9f6125d649558b2b860a0c6fe55772 | 413ddca360790eb4c277d0cdc2b14ec2791f1c04 | refs/heads/master | 2021-09-08T09:44:05.645790 | 2021-09-01T07:10:17 | 2021-09-01T07:10:17 | 242,530,342 | 0 | 0 | null | 2020-06-19T06:40:05 | 2020-02-23T14:21:23 | R | UTF-8 | R | false | false | 1,479 | r | tab.provenance.r | #'
#'
provenance.tab <- function() {
bquote({
tab <- 'provenance_tab'
menuItem(text='Provenance', icon=icon('history'), tabName=tab) -> menu_item
tabItem(tabName=tab,
h1('View the functions used to create this Seurat object'),
fluidRow(dataset_info_text_box.ui(id=NS(tab, 'project_name'), width=12)),
ace_editor.ui(id=NS(tab, 'editor'))) -> content
menus %<>% append(list(menu_item))
contents %<>% append(list(content))})
}
#'
#'
provenance_tab.server <- function(input, output, session, server_input, server_output, server_session, seurat) {
# build the sidebar ui
observeEvent(eventExpr=server_input$left_sidebar, handlerExpr={
tab <- 'provenance_tab'
if(server_input$left_sidebar==tab) {
if(seurat$provenance_missing) {
error_alert(title='Analysis history', text='This Seurat object does not have a saved history.')
go_to_config(session=server_session)
}
tab %<>% str_c('-')
renderUI({provenace_picker.ui(id=tab, seurat=seurat)}) -> server_output$right_sidebar.data_opts
renderUI({p('No options')}) -> server_output$right_sidebar.plotting_opts}})
# call the modules for this tab
provenace_picker <- callModule(module=provenace_picker.server, id='', seurat=seurat)
callModule(module=dataset_info_text_box.project_name, id='project_name', seurat=seurat)
callModule(module=ace_editor.server, id='editor', display_text=provenace_picker$script)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.