blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
982cd5516fe375f243459df76aa8da164444c8dd | bf304e7ffa1a7cd1c34358874e59f26aa18d8c6e | /man/kdplotnumz.Rd | 506640c804dc787378d5dee12018202e73ebbdae | [] | no_license | jpison/KDSeries | e9c05daa4296d3508c3d5ed497dd4667fb6164ee | 86835e1633698b1524edbff6393f569239b94433 | refs/heads/master | 2021-01-19T17:30:28.464273 | 2017-08-22T17:56:14 | 2017-08-22T17:56:14 | 101,063,307 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 668 | rd | kdplotnumz.Rd | \name{kdplotnumz}
\alias{kdplotnumz}
\title{kdplotnumz Function}
\description{Plot Number ZerosCrossing Curve vs Filter's Windows Width}
\usage{
kdplotnumz <- function(MAT, Positions=1:dim(MAT$MFilt)[1])
}
\arguments{
\item{MAT}{Matrix with Differents Filters(from kdmatfilter()).}
\item{Positions}{Plot filter placed in Positions.}
}
\value{Plot relative maximums and minimums of the time series that has been filtered by different windows width.}
\author{
Francisco Javier Martinez de Pisón. \cr\email{francisco.martinez@dim.unirioja.es}\cr
Miguel Lodosa Ayala. \cr\email{miguelodosa@hotmail.com}\cr
}
\references{
}
\seealso{
}
\keyword{}
|
601f3cf2b95457906a68df2caadc2212532b55b5 | 7dc95714e6ce1fc6c65279a8da618c69652f7fa9 | /man/geneInfo.Rd | d61565116131deb2b43bc1aa809a4a19a2b650e4 | [] | no_license | tiagochst/TCGAbiolinks | 0ed95a69b935edf9a94f24f7b31137698d5d3278 | f8f7fb9e2ad9147cb8ef09cc5d9dc19edb35e8db | refs/heads/master | 2021-01-18T12:33:31.573345 | 2015-11-08T11:18:47 | 2015-11-08T11:18:47 | 45,870,428 | 1 | 0 | null | 2015-11-09T22:16:58 | 2015-11-09T22:16:58 | null | UTF-8 | R | false | true | 293 | rd | geneInfo.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TCGAbiolinks.R
\docType{data}
\name{geneInfo}
\alias{geneInfo}
\title{geneInfo for normalization}
\format{A data frame with 20531 rows and 2 variables}
\description{
geneInfo for normalization
}
\keyword{internal}
|
080ddd59c15a6a59477294b4b91740d891ff21ac | 8ffb7983b6deba81d2a83ac959998cc86749ec00 | /plot2.R | 2e94ceadcdd671f2ccc7ed7f6de225795ec07fc4 | [] | no_license | kathleen-hop/ExData_Plotting1 | ffbb49838f930da6a8e876df8765ac346ea5ae93 | 442a3028ce0cc9f2b93b731d61b36f3fbb3d9c94 | refs/heads/master | 2022-12-24T18:45:34.651558 | 2020-09-22T01:32:16 | 2020-09-22T01:32:16 | 296,841,580 | 0 | 0 | null | 2020-09-19T10:19:17 | 2020-09-19T10:19:16 | null | UTF-8 | R | false | false | 678 | r | plot2.R | #read in data
allData <- read.table("household_power_consumption.txt", sep = ";", header = TRUE)
#convert data and time variables
allData$Date <- as.Date(allData$Date, "%d/%m/%Y")
allData$DateTime <- paste(allData$Date, allData$Time)
allData$DateTime <- strptime(allData$DateTime, "%Y-%m-%d %H:%M:%S")
#subset data
startDate <- as.Date("2007-02-01")
endDate <- as.Date("2007-02-02")
selected <- subset(allData, (allData$Date == startDate | allData$Date == endDate))
#open png graphics device
png("plot2.png")
#make plot
plot(selected$DateTime, selected$Global_active_power, type = "l", xlab = NA, ylab = "Global Active Power (kilowatts)")
#close graphics device
dev.off()
|
7739f3f797f1e7356fc38f0d8997b1e5b647cd63 | 168e077a90f0a3ec27b191cab93cb3ce0d8b8dd7 | /code/R/pval.R | b02d02f16320b5368ff80055082455e66285df3d | [] | no_license | runesen/icph | e71d2edd89aaef8cc1e21ec0fb4c40a84636e2a6 | 10b5ab5eb35b1765d7bc89c582f39e1d4d791b6a | refs/heads/master | 2020-03-26T04:01:50.991424 | 2019-12-08T22:18:28 | 2019-12-08T22:18:28 | 144,482,655 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 8,323 | r | pval.R | # Intersecting confidence regions and calculating pvalues
# source("fisher.info.R")
intersect.fun <- function(MF1, MF2, testpars, alpha=l/ne, arbvar){
w <- which(MF1$parameter %in% testpars)
if(!arbvar & "sigma" %in% testpars){
ww <- which(MF1$parameter == "sigma")
w <- w[!(w %in% ww[2:length(ww)])]
}
mle1 <- unlist(MF1$theta)[w]
mle2 <- unlist(MF2$theta)[w]
covmat1 <- MF1$covmat[w, w]
covmat2 <- MF2$covmat[w, w]
int <- intersect.ellipses(mle1, mle2, covmat1, covmat2, alpha)
return(int)
}
# checks if two ellipsoid confidence regions with centers ci, covariance matrices Ci and coverage 1-alpha intersect
## BRAINSTORM: Would it be possible to express distance in terms of alpha (appears only in c)?
## Then we would not have to call this function over a grid of alpha values..
intersect.ellipses <- function(c1=mle1, c2=mle2, C1=covmat1, C2=covmat2, alpha){
if(alpha==1) return(FALSE)
if(alpha==0) return(TRUE)
m <- length(c1)
q <- sqrt(qchisq(1-alpha, df = m))
if(m==1){
intersect <- c(abs(c1-c2) < (sqrt(C1)+sqrt(C2))*q)
}else{
#print(C1)
e1 <- eigen(C1)
d1 <- e1$values
# d1 <- pmax(d1,min(1e-14, min(d1[d1>0])))
# d1 <- sqrt(e1$values)
d1 <- makePositive(d1)
d1 <- sqrt(d1)
U1 <- e1$vectors
c <- 1/q * diag(1/d1) %*% t(U1) %*%(c2-c1)
C <- diag(1/d1) %*% t(U1) %*% C2 %*% U1 %*% diag(1/d1)
#print(C)
e <- eigen(C)
U <- e$vectors
d <- e$values
d <- makePositive(d,silent=TRUE)
d <- sqrt(d)
#print(e$values)
#d <- sqrt(e$values)
y <- -t(U)%*%c
y <- abs(y) # 0 expressed in coordinate system of l and rotated to the first quadrant
if(sum((y/d)^2) <= 1){ # y inside the ellipse
intersect <- TRUE
}else{ # Newton-Rhapson iterations
# f goes monotone, quadratically to -1, so sure and fast convergence
f <- function(t) sum((d*y/(t+d^2))^2)-1
df <- function(t) -2*sum((y*d)^2/(t+d^2)^3)
t0 <- 0
ft0 <- f(t0)
while(ft0>1e-4){
t0 <- t0 - f(t0)/df(t0)
ft0 <- f(t0)
}
x0 <- y*d^2/(d^2+t0) # projection of y onto (c,C)
dist <- sqrt(sum((y-x0)^2))
intersect <- (dist < 1)
}
}
return(intersect)
}
# em is EM object, perm is a permutation of the vector (1,...,K)
permute.labels <- function(MF=MFs[[1]], perm){
Npar <- length(unlist(MF$theta[[1]]))
arbvar <- MF$arbvar
theta.new <- lapply(perm, function(i) MF$theta[[i]])
MF$theta <- theta.new
MF$h <- perm[MF$h]
new.order <- c(sapply(1:MF$K, function(i) (perm[i]-1)*Npar + 1:Npar))
MF$covmat <- MF$covmat[new.order,]
MF$covmat <- MF$covmat[,new.order]
return(MF)
}
pval.fun <- function(MFs, pars, level, output.pvalue){
ne <- length(MFs)
K <- MFs[[1]]$K
d <- MFs[[1]]$d
N <- MFs[[1]]$N
arbvar <- MFs[[1]]$arbvar
pval <- 1
reject <- FALSE
perm <- lapply(1:ne, function(i) list(1:K))
# If only interecpt is tested for invariance, the problem reduces to testing the equality of (unconditional) normals.
if(d==1 & MFs[[1]]$intercept){
pval <- numeric(ne)
for(i in 1:ne){
y <- MFs[[i]]$y
yy <- unlist(sapply((1:ne)[-i], function(j) MFs[[j]]$y))
pval[i] <- ks.test(y, yy, alternative = "two.sided")$p.value
}
pval <- min(1, ne*min(pval))
reject <- (pval < level)
if(!output.pvalue) pval <- ifelse(reject, paste("<", level), paste(">=", level))
}
else{
# If !output.pvalue, only check whether confidence regions intersect at level "level"
if(output.pvalue){
lseq <- 10^(seq(-4,0,length=100))
} else {
lseq <- level
}
count <- 0
## Loop over coverage of confidence regions
for(l in lseq){
## All permutations of estimate in first environment
comb <- lapply(1:factorial(K), function(i) list(gtools::permutations(K,K)[i,]))
comb <- list(list(1:K))
keepIndex <- 1
## Loop over number of environments to compare
for(nCompare in 2:ne){
# keep only those configurations for which the first nCompare-1 environments overlap
# If none overlap, we are done
if(length(keepIndex) == 0) break
comb <- lapply(keepIndex, function(i) comb[[i]])
keepIndex <- c()
tmp <- lapply(1:factorial(K), function(i) gtools::permutations(K,K)[i,])
## For each overlapping configuration, add each permutation for the nCompare'st
comb <- unlist(lapply(1:length(comb), function(i){
lapply(1:length(tmp), function(j){
lst <- comb[[i]]
lst[[length(lst)+1]] <- tmp[[j]]
return(lst)
}
)}), recursive = FALSE)
## Loop over "active" combinations of the first nCompare environments
for(k in 1:length(comb)){
#if(k > length(comb)) break ## entries of comb are successively removed
perms <- comb[[k]]
all.intersect <- TRUE
# count <- count+1
# print(count)
## Check for pairwise intersection
for(i in 1:(nCompare-1)){
for(j in (i+1):nCompare){
MFi <- permute.labels(MFs[[i]], unlist(perms[[i]]))
MFj <- permute.labels(MFs[[j]], unlist(perms[[j]]))
all.intersect <- all.intersect*intersect.fun(MFi, MFj, pars, l / ne, arbvar)
if(!all.intersect) break
}
if(!all.intersect) break
}
## If the first nCompare environments intersect, keep this combination of permutations
if(all.intersect) keepIndex <- c(keepIndex, k)
## When having found a combination for which all environments intersect, break and increase \ell
if(nCompare==ne & all.intersect){perm <- perms; break}
}
}
## If no permutation lead to an intersection, set p-value to \ell and break. Otherwise increase \ell.
if(!all.intersect){pval <- l; reject <- (pval < level); break}
}
## If no intersection at this level, pval has been set to level and we reject.
if(!output.pvalue){reject <- pval == level; pval <- ifelse(reject, paste("<", level), paste(">=", level))}
}
return(list(pval = pval, perm = perm, reject = reject))
}
## Old version
if(0){
pval.fun <- function(MFs, pars, level, output.pvalue){
ne <- length(MFs)
K <- MFs[[1]]$K
d <- MFs[[1]]$d
N <- MFs[[1]]$N
arbvar <- MFs[[1]]$arbvar
pval <- 1
reject <- FALSE
perm <- lapply(1:ne, function(i) list(1:K))
# If only interecpt is tested for invariance, the problem reduces to testing the equality of (unconditional) normals.
if(d==1 & MFs[[1]]$intercept){
pval <- numeric(ne)
for(i in 1:ne){
y <- MFs[[i]]$y
yy <- unlist(sapply((1:ne)[-i], function(j) MFs[[j]]$y))
pval[i] <- ks.test(y, yy, alternative = "two.sided")$p.value
}
pval <- min(1, ne*min(pval))
reject <- (pval < level)
if(!output.pvalue) pval <- ifelse(reject, paste("<", level), paste(">=", level))
}
else{
combinations <- expand.grid(lapply(1:ne, function(k){
lapply(1:factorial(K), function(i) gtools::permutations(K,K)[i,])
}))
# If !output.pvalue, only check whether confidence regions intersect at level "level"
if(output.pvalue){
lseq <- seq(.01,1,.01)
} else {
lseq <- level
}
count<-0
## Loop over coverage of confidence regions
for(l in lseq){
for(k in 1:factorial(K)^ne){
perms <- combinations[k,]
all.intersect <- TRUE
# count<-count+1
# print(count)
for(i in 1:(ne-1)){
for(j in (i+1):ne){
MFi <- permute.labels(MFs[[i]], unlist(perms[[i]]))
MFj <- permute.labels(MFs[[j]], unlist(perms[[j]]))
all.intersect <- all.intersect*intersect.fun(MFi, MFj, pars, l / ne, arbvar)
if(!all.intersect) break
}
if(!all.intersect) break
}
if(all.intersect) break
}
if(!all.intersect){pval <- l; reject <- (pval < level); break}
}
if(!output.pvalue){reject <- pval == level; pval <- ifelse(reject, paste("<", level), paste(">=", level))}
}
return(list(pval = pval, perm = perm, reject = reject))
}
} |
cf3a3efe481b03f0c68ae971689d74aaf405d1bc | fbe895c5ceb7ffb1b49e4a3de9c8c9d1d3cd1aae | /man/gendata_Fac.Rd | 90dd0003252882af97583e0dd45b28a9049e72b5 | [] | no_license | feiyoung/TOSI | aba563334bf8e4cac07395a2494e2f9729cd2405 | ee68a1ca3239b60737749951b6e511cce0460c0f | refs/heads/master | 2023-05-27T14:41:49.209725 | 2023-04-29T13:01:52 | 2023-04-29T13:01:52 | 283,396,199 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,521 | rd | gendata_Fac.Rd | \name{gendata_Fac}
\alias{gendata_Fac}
\title{Generate simulated data}
\description{
Generate simulated data from high dimensional sparse factor model.
}
\usage{
gendata_Fac(n, p, seed=1, q=6, pzero= floor(p/4),
sigma2=0.1, gamma=1, heter=FALSE, rho=1)
}
\arguments{
\item{n}{a positive integer, the sample size.}
\item{p}{an positive integer, the variable dimension.}
\item{seed}{a nonnegative integer, the random seed, default as 1.}
\item{q}{a positive integer, the number of factors.}
\item{pzero}{a positive integer, the number of zero loading vectors, default as p/4.}
\item{sigma2}{a positive real number, the homogenous variance of error term.}
\item{gamma}{a positive number, the common component of heteroscedasticity of error term.}
\item{heter}{a logical value, indicates whether generate heteroscendastic error term.}
\item{rho}{a positive number, controlling the magnitude of loading matrix.}
}
\value{
return a list including two components:
\item{X}{a \code{n}-by-\code{p} matrix, the observed data matrix.}
\item{H0}{a \code{n}-by-\code{q} matrix, the true lantent factor matrix.}
\item{B0}{a \code{p}-by-\code{q} matrix, the true loading matrix, the last pzero rows are vectors of zeros.}
\item{ind_nz}{a integer vector, the index vector for which rows of \code{B0} not zeros.}
}
\author{
Liu Wei
}
\note{
nothing
}
\seealso{
\code{\link{Factorm}}.
}
\examples{
dat <- gendata_Fac(n=300, p = 500)
str(dat)
}
\keyword{Factor}
\keyword{Feature}
|
f7120c26f8364aaa21b599d776f53f230d236ed0 | 3e692132e9a1372d5c9181fed53871febc547298 | /GEE of Version 09.07/GEE main function real data 03.R | f327606fd5f49be7fc0a51298e5f78ac40dcfe90 | [] | no_license | Duanchenyang/WeightedADMM | df00cdaa0fb2df7c17922de9b1e5d65ee01741ca | 97c5d75f8aa1f50dc89605bb682aa7610e3c7e97 | refs/heads/master | 2022-12-15T08:03:19.222045 | 2020-09-08T12:08:18 | 2020-09-08T12:08:18 | 266,042,696 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,927 | r | GEE main function real data 03.R | library(plyr)
library(fda)#bspline basis
library(Matrix)
library(matrixcalc)
library(igraph)
library(Rcpp)
library(tidyverse)
library(matlab)
data <-read.csv('GEE of Version 09.07/juvenile_growth.csv', header = TRUE)
timerange <- data$age.years*12
y <- as.matrix(round(data[,12:23]*data[,2],0))
X <- bsplineS(timerange,knots_eq3(timerange, k = order, m = nknots), norder = order)
individual <- data$Numeric.Animal.ID
time <- data$age.years*12
total_n<- data$read.number
Capture.Number<- data$Capture.Number
individual_time <- data.frame(time,total_n,individual,Capture.Number)
nknots <- 3
order <- 3
n <- 12
gamma1 <- 1
gamma2 <- 30000
p <- order + nknots
C <- matrix(0, nrow=nknots+order-2, ncol=nknots+order)
for (j in 1:(nknots+order-2)){
d_j <- c(rep(0,j-1),1,-2,1,rep(0,(nknots+order)-3-(j-1)))
e_j <- c(rep(0,j-1), 1 ,rep(0,(nknots+order)-3-(j-1)))
C <- C + e_j%*%t(d_j)
}
D = t(C)%*%C
diagD <- kronecker(diag(1, n), D)
index = t(combn(n,2))
B_0 <- B_ini0(X,y)
try.sample <- prclust_admm(X=X, y=y, diagD=diagD, B_0=B_0, index=index,
gamma1 = gamma1, gamma2 = gamma2, theta=20000, tau = 2, n=n, p=p, max_iter=10000,
eps_abs=1e-3, eps_rel=1e-3,individual_time=individual_time)
Ad_final <- create_adjacency(try.sample$V, n)
G_final <- graph.adjacency(Ad_final, mode = 'upper')
#clustering membership
cls_final <- components(G_final)
#number of clusters
k_final <- cls_final$no
cls_final
#####generate the plot############
finalresult <- cls_final
B_ini1 <- function(X,y){
B_0 <- matrix(nrow = ncol(X),ncol=ncol(y))
logy <- log(y+1)
for(i in 1 :length(finalresult$csize)){
choose_class <-unique(finalresult$membership)[i]
sameg <- which(finalresult$membership==choose_class)
X_sameg<- NULL
logy_sameg<-NULL
for(j in 1:length(sameg)){
X_sameg<- rbind(X_sameg,X)
logy_sameg<- c(logy_sameg,logy[,sameg[j]])
}
B_0solution <- solve(t(X_sameg)%*%X_sameg)%*%t(X_sameg)%*%logy_sameg
B_0[,sameg]<- matrix(rep(B_0solution,length(sameg)),nrow=ncol(X))
}
return( B_0)
}
B_coeffient <- try.sample$B
B_coeffient<- B_ini1(X,y)
trydata <- data.frame(exp(X%*%B_coeffient)/rowSums(exp(X%*%B_coeffient)),timerange,y,data[,2])
trydata1<- gather(trydata,Tvelifera.sc,TveliferaB,TveliferaUD,Tmutans1,Tmutans2,Tmutans3,
TmutansMSD,Tmutans.sc, TmutansUD,TspBg,TspBf,Tparva,key="type2",value="true_response2")
trydata1$type2 <- factor(trydata1$type2,level=c("Tvelifera.sc", "TveliferaB" ,
"TveliferaUD", "Tmutans1",
"Tmutans2" , "Tmutans3" ,
"TmutansMSD" , "Tmutans.sc" ,
"TmutansUD" , "TspBg" ,
"TspBf" , "Tparva"))
trydata2 <- gather(trydata,X1,X2,X3,X4,X5,X6,X7,X8,X9,X10,X11,X12,key="type",value="true_response")
trydata2$type <- factor(trydata2$type,level=c("X1","X2","X3","X4","X5","X6",
"X7","X8","X9","X10","X11","X12"))
try.labs<-levels(trydata1$type2)
names(try.labs)<- c("X1","X2","X3","X4","X5","X6",
"X7","X8","X9","X10","X11","X12")
data.new <- data.frame(trydata2[,c("type","timerange","true_response")],trydata1[,c("true_response2","type2")],data[,2])
data.new$group <- rep(0,length(data.new$type))
for(i in 1:12){
data.new$group[data.new$type==paste("X",i,sep = "")] <- cls_final$membership[i]}
data.new$group<- as.factor(data.new$group)
ggplot(data.new)+
geom_point(aes(x=timerange,y=true_response2/`data...2.`),color="gray50",size=0.8)+
geom_line(aes(x=timerange,y=true_response),size=0.75)+
facet_wrap( ~ type, ncol=4,labeller = as_labeller(try.labs))+
labs(x='Age (month)',y='Relative abundance')
|
f7cac86c36ecb19c143cb9da6e95ae13580e604a | 12180c9c90eebc7137cef7f718c802b2d7047e44 | /calculate_overlap.R | d7ac888b51d0c8c6df5c2cf32863d25d70195c25 | [] | no_license | zhijcao/omics | 5444258e3cd1f20d2953b594ef649a3ec981af6c | ac924cfdfb6c465b1f9f2db0b34424468a9b9efc | refs/heads/main | 2023-08-28T01:30:03.572990 | 2021-11-11T20:02:32 | 2021-11-11T20:02:32 | 419,838,520 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,685 | r | calculate_overlap.R | col<-function (x)
{
if (1 == length(x)) {
overlap <- x
}
else if (2 == length(x)) {
A = x[[1]]
B = x[[2]]
nab<-intersect(A, B)
a1=A[-which(A %in% nab)]
a2=B[-which(B %in% nab)]
overlap <- list(a1 = a1, a2 = a2, a3 = nab)
#make results easy to understand
names(overlap)<-c(paste(c("unique",names(x)[1]),collapse="_"),
paste(c("unique",names(x)[2]),collapse="_"),
paste(c("com",names(x)),collapse="_"))
return (overlap)
}
else if (3 == length(x)) {
A <- x[[1]]
B <- x[[2]]
C <- x[[3]]
nab <- intersect(A, B)
nbc <- intersect(B, C)
nac <- intersect(A, C)
nabc <- intersect(nab, C)
a5 = nabc
a2 = nab[-which(nab %in% a5)]
a4 = nac[-which(nac %in% a5)]
a6 = nbc[-which(nbc %in% a5)]
a1 = A[-which(A %in% c(a2, a4, a5))]
a3 = B[-which(B %in% c(a2, a5, a6))]
a7 = C[-which(C %in% c(a4, a5, a6))]
overlap <- list(a5 = a5, a2 = a2, a4 = a4, a6 = a6,
a1 = a1, a3 = a3, a7 = a7,nab=nab,nac=nac,nbc=nbc)
names(overlap) <- c(paste(c("com",names(x)[1:3]),collapse="_"), #a5
paste(c("Unique","com",names(x)[1:2]),collapse="_"), #a2
paste(c("unique","com",names(x)[c(1,3)]),collapse="_"), #a4
paste(c("unique","com",names(x)[2:3]),collapse="_"), #a6
paste(c("unique",names(x)[1]),collapse="_"), #a1
paste(c("unique",names(x)[2]),collapse="_"), #a3
paste(c("unique",names(x)[3]),collapse="_"), #a7
paste(c("com",names(x)[1:2]),collapse="_"), #nab
paste(c("com",names(x)[c(1,3)]),collapse="_"), #nac
paste(c("com",names(x)[2:3]),collapse="_")) #nbc
return (overlap)
}
else if (4 == length(x)) {
A <- x[[1]]
B <- x[[2]]
C <- x[[3]]
D <- x[[4]]
n12 <- intersect(A, B)
n13 <- intersect(A, C)
n14 <- intersect(A, D)
n23 <- intersect(B, C)
n24 <- intersect(B, D)
n34 <- intersect(C, D)
n123 <- intersect(n12, C)
n124 <- intersect(n12, D)
n134 <- intersect(n13, D)
n234 <- intersect(n23, D)
n1234 <- intersect(n123, D)
a6 = n1234
a12 = n123[-which(n123 %in% a6)]
a11 = n124[-which(n124 %in% a6)]
a5 = n134[-which(n134 %in% a6)]
a7 = n234[-which(n234 %in% a6)]
a15 = n12[-which(n12 %in% c(a6, a11, a12))]
a4 = n13[-which(n13 %in% c(a6, a5, a12))]
a10 = n14[-which(n14 %in% c(a6, a5, a11))]
a13 = n23[-which(n23 %in% c(a6, a7, a12))]
a8 = n24[-which(n24 %in% c(a6, a7, a11))]
a2 = n34[-which(n34 %in% c(a6, a5, a7))]
a9 = A[-which(A %in% c(a4, a5, a6, a10, a11, a12, a15))]
a14 = B[-which(B %in% c(a6, a7, a8, a11, a12, a13, a15))]
a1 = C[-which(C %in% c(a2, a4, a5, a6, a7, a12, a13))]
a3 = D[-which(D %in% c(a2, a5, a6, a7, a8, a10, a11))]
overlap <- list(a6 = a6, a12 = a12, a11 = a11, a5 = a5,
a7 = a7, a15 = a15, a4 = a4, a10 = a10, a13 = a13,
a8 = a8, a2 = a2, a9 = a9, a14 = a14, a1 = a1, a3 = a3,
n12=n12,n13=n13,n14=n14,n23=n23,n24=n24,n34=n34,
n123=n123,n124=n124,n134=n134,n234=n234)
names(overlap) <- c(paste(c("com",names(x)[1:4]),collapse="_"), #a6
paste(c("Unique","com",names(x)[1:3]),collapse="_"), #a12
paste(c("unique","com",names(x)[c(1,2,4)]),collapse="_"), #a11
paste(c("unique","com",names(x)[c(1,3,4)]),collapse="_"), #a5
paste(c("unique","com",names(x)[c(2,3,4)]),collapse="_"), #a7
paste(c("unique","com",names(x)[c(1,2)]),collapse="_"), #a15
paste(c("unique","com",names(x)[c(1,3)]),collapse="_"), #a4
paste(c("unique","com",names(x)[c(1,4)]),collapse="_"), #a10
paste(c("unique","com",names(x)[c(2,3)]),collapse="_"), #a13
paste(c("unique","com",names(x)[c(2,4)]),collapse="_"), #a8
paste(c("Unique","com",names(x)[c(3,4)]),collapse="_"), #a2
paste(c("unique",names(x)[1]),collapse="_"), #a9
paste(c("unique",names(x)[2]),collapse="_"), #a14
paste(c("unique",names(x)[3]),collapse="_"), #a1
paste(c("unique",names(x)[4]),collapse="_"), #a3
paste(c("com",names(x)[c(1,2)]),collapse="_"), #n12
paste(c("com",names(x)[c(1,3)]),collapse="_"), #n13
paste(c("com",names(x)[c(1,4)]),collapse="_"), #n14
paste(c("com",names(x)[c(2,3)]),collapse="_"), #n23
paste(c("com",names(x)[c(2,4)]),collapse="_"), #n24
paste(c("com",names(x)[c(3,4)]),collapse="_"), #n34
paste(c("com",names(x)[c(1,2,3)]),collapse="_"), #n123
paste(c("com",names(x)[c(1,2,4)]),collapse="_"), #n124
paste(c("com",names(x)[c(1,3,4)]),collapse="_"), #n134
paste(c("com",names(x)[c(2,3,4)]),collapse="_")) #n234
return (overlap)
}
else if (5 == length(x)) {
A <- x[[1]]
B <- x[[2]]
C <- x[[3]]
D <- x[[4]]
E <- x[[5]]
n12 <- intersect(A, B)
n13 <- intersect(A, C)
n14 <- intersect(A, D)
n15 <- intersect(A, E)
n23 <- intersect(B, C)
n24 <- intersect(B, D)
n25 <- intersect(B, E)
n34 <- intersect(C, D)
n35 <- intersect(C, E)
n45 <- intersect(D, E)
n123 <- intersect(n12, C)
n124 <- intersect(n12, D)
n125 <- intersect(n12, E)
n134 <- intersect(n13, D)
n135 <- intersect(n13, E)
n145 <- intersect(n14, E)
n234 <- intersect(n23, D)
n235 <- intersect(n23, E)
n245 <- intersect(n24, E)
n345 <- intersect(n34, E)
n1234 <- intersect(n123, D)
n1235 <- intersect(n123, E)
n1245 <- intersect(n124, E)
n1345 <- intersect(n134, E)
n2345 <- intersect(n234, E)
n12345 <- intersect(n1234, E)
a31 = n12345
a30 = n1234[-which(n1234 %in% a31)]
a29 = n1235[-which(n1235 %in% a31)]
a28 = n1245[-which(n1245 %in% a31)]
a27 = n1345[-which(n1345 %in% a31)]
a26 = n2345[-which(n2345 %in% a31)]
a25 = n245[-which(n245 %in% c(a26, a28, a31))]
a24 = n234[-which(n234 %in% c(a26, a30, a31))]
a23 = n134[-which(n134 %in% c(a27, a30, a31))]
a22 = n123[-which(n123 %in% c(a29, a30, a31))]
a21 = n235[-which(n235 %in% c(a26, a29, a31))]
a20 = n125[-which(n125 %in% c(a28, a29, a31))]
a19 = n124[-which(n124 %in% c(a28, a30, a31))]
a18 = n145[-which(n145 %in% c(a27, a28, a31))]
a17 = n135[-which(n135 %in% c(a27, a29, a31))]
a16 = n345[-which(n345 %in% c(a26, a27, a31))]
a15 = n45[-which(n45 %in% c(a18, a25, a16, a28, a27,
a26, a31))]
a14 = n24[-which(n24 %in% c(a19, a24, a25, a30, a28,
a26, a31))]
a13 = n34[-which(n34 %in% c(a16, a23, a24, a26, a27,
a30, a31))]
a12 = n13[-which(n13 %in% c(a17, a22, a23, a27, a29,
a30, a31))]
a11 = n23[-which(n23 %in% c(a21, a22, a24, a26, a29,
a30, a31))]
a10 = n25[-which(n25 %in% c(a20, a21, a25, a26, a28,
a29, a31))]
a9 = n12[-which(n12 %in% c(a19, a20, a22, a28, a29,
a30, a31))]
a8 = n14[-which(n14 %in% c(a18, a19, a23, a27, a28,
a30, a31))]
a7 = n15[-which(n15 %in% c(a17, a18, a20, a27, a28,
a29, a31))]
a6 = n35[-which(n35 %in% c(a16, a17, a21, a26, a27,
a29, a31))]
a5 = E[-which(E %in% c(a6, a7, a15, a16, a17, a18, a25,
a26, a27, a28, a31, a20, a29, a21, a10))]
a4 = D[-which(D %in% c(a13, a14, a15, a16, a23, a24,
a25, a26, a27, a28, a31, a18, a19, a8, a30))]
a3 = C[-which(C %in% c(a21, a11, a12, a13, a29, a22,
a23, a24, a30, a31, a26, a27, a16, a6, a17))]
a2 = B[-which(B %in% c(a9, a10, a19, a20, a21, a11,
a28, a29, a31, a22, a30, a26, a25, a24, a14))]
a1 = A[-which(A %in% c(a7, a8, a18, a17, a19, a9, a27,
a28, a31, a20, a30, a29, a22, a23, a12))]
overlap <- list(a31 = a31, a30 = a30, a29 = a29, a28 = a28,
a27 = a27, a26 = a26, a25 = a25, a24 = a24, a23 = a23,
a22 = a22, a21 = a21, a20 = a20, a19 = a19, a18 = a18,
a17 = a17, a16 = a16, a15 = a15, a14 = a14, a13 = a13,
a12 = a12, a11 = a11, a10 = a10, a9 = a9, a8 = a8,
a7 = a7, a6 = a6, a5 = a5, a4 = a4, a3 = a3, a2 = a2, a1 = a1,
n12=n12, n13=n13, n14=n14, n15=n15, n23=n23, n24=n24,
n25=n25, n34=n34, n35=n35, n45=n45, n123=n123, n124=n124, n125=n125,
n134=n134, n135=n135, n145=n145, n234=n234, n235=n235, n245=n245,
n345=n345, n1234=n1234, n1235=n1235, n1245=n1245, n1345=n1345, n2345=n2345)
names(overlap)<-c(paste(c("com",names(x)[c(1,2,3,4,5)]),collapse="_"), #a31
paste(c("unique","com",names(x)[c(1,2,3,4)]),collapse="_"), #a30
paste(c("unique","com",names(x)[c(1,2,3,5)]),collapse="_"), #a29
paste(c("unique","com",names(x)[c(1,2,4,5)]),collapse="_"), #a28
paste(c("unique","com",names(x)[c(1,3,4,5)]),collapse="_"), #a27
paste(c("unique","com",names(x)[c(2,3,4,5)]),collapse="_"), #a26
paste(c("unique","com",names(x)[c(2,4,5)]),collapse="_"), #a25
paste(c("unique","com",names(x)[c(2,3,4)]),collapse="_"), #a24
paste(c("unique","com",names(x)[c(1,3,4)]),collapse="_"), #a23
paste(c("unique","com",names(x)[c(1,2,3)]),collapse="_"), #a22
paste(c("unique","com",names(x)[c(2,3,5)]),collapse="_"), #a21
paste(c("unique","com",names(x)[c(1,2,5)]),collapse="_"), #a20
paste(c("unique","com",names(x)[c(1,2,4)]),collapse="_"), #a19
paste(c("unique","com",names(x)[c(1,4,5)]),collapse="_"), #a18
paste(c("unique","com",names(x)[c(1,3,5)]),collapse="_"), #a17
paste(c("unique","com",names(x)[c(3,4,5)]),collapse="_"), #a16
paste(c("unique","com",names(x)[c(4,5)]),collapse="_"), #a15
paste(c("unique","com",names(x)[c(2,4)]),collapse="_"), #a14
paste(c("unique","com",names(x)[c(3,4)]),collapse="_"), #a13
paste(c("unique","com",names(x)[c(1,3)]),collapse="_"), #a12
paste(c("unique","com",names(x)[c(2,3)]),collapse="_"), #a11
paste(c("unique","com",names(x)[c(2,5)]),collapse="_"), #a10
paste(c("unique","com",names(x)[c(1,2)]),collapse="_"), #a9
paste(c("unique","com",names(x)[c(1,4)]),collapse="_"), #a8
paste(c("unique","com",names(x)[c(1,5)]),collapse="_"), #a7
paste(c("unique","com",names(x)[c(3,5)]),collapse="_"), #a6
paste(c("unique",names(x)[5]),collapse="_"), #a5
paste(c("unique",names(x)[4]),collapse="_"), #a4
paste(c("unique",names(x)[3]),collapse="_"), #a3
paste(c("unique",names(x)[2]),collapse="_"), #a2
paste(c("unique",names(x)[1]),collapse="_"), #a1
paste(c("com",names(x)[c(1,2)]),collapse="_"), #n12
paste(c("com",names(x)[c(1,3)]),collapse="_"), #n13
paste(c("com",names(x)[c(1,4)]),collapse="_"), #n14
paste(c("com",names(x)[c(1,5)]),collapse="_"), #n15
paste(c("com",names(x)[c(2,3)]),collapse="_"), #n23
paste(c("com",names(x)[c(2,4)]),collapse="_"), #n24
paste(c("com",names(x)[c(2,5)]),collapse="_"), #n25
paste(c("com",names(x)[c(3,4)]),collapse="_"), #n34
paste(c("com",names(x)[c(3,5)]),collapse="_"), #n35
paste(c("com",names(x)[c(4,5)]),collapse="_"), #n45
paste(c("com",names(x)[c(1,2,3)]),collapse="_"), #n123
paste(c("com",names(x)[c(1,2,4)]),collapse="_"), #n124
paste(c("com",names(x)[c(1,2,5)]),collapse="_"), #n125
paste(c("com",names(x)[c(1,3,4)]),collapse="_"), #n134
paste(c("com",names(x)[c(1,3,5)]),collapse="_"), #n135
paste(c("com",names(x)[c(1,4,5)]),collapse="_"), #n145
paste(c("com",names(x)[c(2,3,4)]),collapse="_"), #n234
paste(c("com",names(x)[c(2,3,5)]),collapse="_"), #n235
paste(c("com",names(x)[c(2,4,5)]),collapse="_"), #n245
paste(c("com",names(x)[c(3,4,5)]),collapse="_"), #n345
paste(c("com",names(x)[c(1,2,3,4)]),collapse="_"), #n1234
paste(c("com",names(x)[c(1,2,3,5)]),collapse="_"), #n1235
paste(c("com",names(x)[c(1,2,4,5)]),collapse="_"), #n1245
paste(c("com",names(x)[c(1,3,4,5)]),collapse="_"), #n1345
paste(c("com",names(x)[c(2,3,4,5)]),collapse="_")) #n2345
return (overlap)
}
else {
flog.error("Invalid size of input object", name = "VennDiagramLogger")
stop("Invalid size of input object")
}
}
|
0a82f8142307a4fd3a650e7e17644d983bca5353 | 6bf04c9c71d566dfcbc5d498652764ce29cb1fd0 | /analysis/social_interdependence.R | c530a7ba0bf93b42b0e839343e210b7de3dfac7e | [] | no_license | bilakhiaricky/4cases_routines | df49257bd1e97953e310a63b837e160c4c36004c | d9d341b2a74e6ca61378c5234713d24f5d60abe3 | refs/heads/master | 2021-01-10T22:42:18.661241 | 2016-03-08T19:20:01 | 2016-03-08T19:20:01 | 54,907,444 | 1 | 0 | null | 2016-03-28T16:49:34 | 2016-03-28T16:49:34 | null | UTF-8 | R | false | false | 2,824 | r | social_interdependence.R | # load the data
create_network <- function(project){
dat <- read_seqdata_for_network(paste0("/Users/aron/git/4cases_routines/data/activity-", project, ".txt"), '2012-01-06', '2013-01-06')
dat_split <- split(dat, dat$id)
actors_per_PR <- list()
for (i in 1:length(dat_split)){
actors_per_PR[i] <- list(as.character(dat_split[[i]]$actor))
}
actors_per_PR <- lapply(actors_per_PR, unlist)
combine_edge_lists <- function(data){
try_default(t(combn(data, 2)), default = NULL)
}
actor_lists_merged <- lapply(actors_per_PR, combine_edge_lists)
# Create a network for the whole project
edge_list <- do.call(rbind, actor_lists_merged)
whole_network <- graph_from_edgelist(edge_list)
(whole_network)
}
# At this point we need to extract an SNA-statistic for each actor, that we can match with the list of actors associated with each pull request.
# degree_list <- igraph::degree(g)
# Calculate degrees for each pull request
# pr_degrees <- rapply(filename_lists3, function(x) ifelse(x %in% names(degree_list), degree_list[x], x), how='replace')
# output_list <- vector(mode = "double", length = length(pr_degrees))
# for (i in 1:length(pr_degrees)){
# ifelse(is.null(output_list[i]),output_list[i] <- NA,try_default(output_list[i] <- sum(pr_degrees[[i]]), default = NA))
# }
# names(output_list) <- names(pr_degrees)
# pr_degrees_sums <- t(as.data.frame(lapply(output_list, sum)))
# pr_degrees_sums[pr_degrees_sums == 0] <- NA
rubinius_network <- create_network("rubinius-rubinius")
rails_network <- create_network("rails-rails")
django_network <- create_network("django-django")
bootstrap_network <- create_network("twitter-bootstrap")
modularity(walktrap.community(rails_network))
modularity(walktrap.community(rubinius_network))
modularity(walktrap.community(django_network))
modularity(walktrap.community(bootstrap_network))
centralization.betweenness(rubinius_network)$centralization
centralization.betweenness(rails_network)$centralization
centralization.betweenness(bootstrap_network)$centralization
centralization.betweenness(django_network)$centralization
# Plots
plot(django_network, #the graph to be plotted
layout=layout.fruchterman.reingold, # the layout method. see the igraph documentation for details
vertex.label.dist=0.5, #puts the name labels slightly off the dots
vertex.frame.color='blue', #the color of the border of the dots
vertex.label.color='black', #the color of the name labels
)
library(intergraph)
plot.network(asNetwork(django_network),
vertex.col="#FF000020",
vertex.border="#FF000020",
edge.col="#FFFFFF")
plot.network(asNetwork(rubinius_network),
vertex.col="#FF000020",
vertex.border="#FF000020",
edge.col="#FFFFFF") |
558e264a4ec17cccb73b62ea9ad6d7d938025478 | a4981f3a463848081f67846c8b460a89c39c2353 | /twitter-rest-api.R | 8a91318081cb00af39632e01e98788a437ecb11a | [] | no_license | MUSA-620-Spring-2017/MUSA-620-Week-4 | 842bdb3e9d72910a523f5d53a7945df5f0e8677e | d476e48d2923b64160d0d736d3d9d39ca32fdd8e | refs/heads/master | 2021-06-11T18:28:49.472499 | 2017-02-10T22:32:01 | 2017-02-10T22:32:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 446 | r | twitter-rest-api.R | library(twitteR)
access_token <- ""
access_secret <-""
consumer_key <- ""
consumer_secret <- ""
setup_twitter_oauth(consumer_key, consumer_secret, access_token, access_secret)
#When it prompts you with the question below, answer 2: No
# Use a local file to cache OAuth access credentials between R sessions?
# 1: Yes
# 2: No
myTweets <- userTimeline("galka_max", n=100)
rstatsTweets = searchTwitter("#rstats", n=100)
|
848a032594a1d0b0c400578e1c37c3002b91acae | 505ee53f4392dca51fccfc3b63f1c9814db93b1d | /R/dd_KI_sim.R | e377728185340ce04d9f4bce5c4cee4f6a64b91b | [] | no_license | EvoLandEco/DDD | 6ba006a696c17801b5f51350e28b5e5f29dc0b0f | 31b10b3f3aeda596570aabc264ca988dd8dbc4fc | refs/heads/master | 2021-07-11T00:36:31.811962 | 2020-12-07T14:48:34 | 2020-12-07T14:48:34 | 220,970,383 | 1 | 0 | null | 2019-11-11T11:51:36 | 2019-11-11T11:51:36 | null | UTF-8 | R | false | false | 13,137 | r | dd_KI_sim.R | dd_KI_lamuN = function(ddmodel,pars,N)
{
laM = pars[1]
muM = pars[2]
KM = pars[3]
laS = pars[4]
muS = pars[5]
KS = pars[6]
NM = N[1]
NS = N[2]
n0 = (ddmodel == 2 | ddmodel == 4)
if(ddmodel == 1)
{
# linear dependence in speciation rate
laMN = max(0,laM - (laM - muM) * NM/KM)
muMN = muM
laSN = max(0,laS - (laS - muS) * NS/KS)
muSN = muS
}
if(ddmodel == 1.3)
{
# linear dependence in speciation rate
laMN = max(0,laM * (1 - NM/KM))
muMN = muM
laSN = max(0,laS * (1 - NS/KS))
muSN = muS
}
if(ddmodel == 2 | ddmodel == 2.1 | ddmodel == 2.2)
{
# exponential dependence in speciation rate
al = (log(laM/muM)/log(KM+n0))^(ddmodel != 2.2)
laMN = laM * (NM + n0)^(-al)
muMN = muM
al = (log(laS/muS)/log(KS+n0))^(ddmodel != 2.2)
laSN = laS * (NS + n0)^(-al)
muSN = muS
}
if(ddmodel == 2.3)
{
# exponential dependence in speciation rate
al = KM
laMN = laM * (NM + n0)^(-al)
muMN = muM
al = KS
laSN = laS * (NS + n0)^(-al)
muSN = muS
}
if(ddmodel == 3)
{
# linear dependence in extinction rate
laMN = laM
muMN = muM + (laM - muM) * NM/KM
laSN = laS
muSN = muS + (laS - muS) * NS/KS
}
if(ddmodel == 4 | ddmodel == 4.1 | ddmodel == 4.2)
{
# exponential dependence in extinction rate
al = (log(laM/muM)/log(KM+n0))^(ddmodel != 4.2)
laMN = laM
muMN = muM * (NM + n0)^al
al = (log(laS/muS)/log(KS+n0))^(ddmodel != 4.2)
laSN = laS
muSN = muS * (NS + n0)^al
}
return(c(laMN,muMN,laSN,muSN))
}
#' Function to simulate a key innovation in macro-evolution with the innovative
#' clade decoupling from the diversity-dependent diversification dynamics of
#' the main clade
#'
#' Simulating a diversity-dependent diversification process where at a given
#' time a new clade emerges with different inherent speciation rate and
#' extinction rate and clade-level carrying capacity and with decoupled
#' dynamics
#'
#'
#' @param pars Vector of parameters: \cr \cr \code{pars[1]} corresponds to
#' lambda_M (speciation rate of the main clade) \cr \code{pars[2]} corresponds
#' to mu_M (extinction rate of the main clade) \cr \code{pars[3]} corresponds
#' to K_M (clade-level carrying capacity of the main clade) \code{pars[4]}
#' corresponds to lambda_S (speciation rate of the subclade) \cr \code{pars[5]}
#' corresponds to mu_S (extinction rate of the subclade) \cr \code{pars[5]}
#' corresponds to K_S (clade-level carrying capacity of the subclade) \cr
#' \code{pars[7]} tinn, the time the shift in rates occurs in the lineage
#' leading to the subclade
#' @param age Sets the crown age for the simulation
#' @param ddmodel Sets the model of diversity-dependence: \cr \code{ddmodel ==
#' 1} : linear dependence in speciation rate with parameter K (= diversity
#' where speciation = extinction)\cr \code{ddmodel == 1.3} : linear dependence
#' in speciation rate with parameter K' (= diversity where speciation = 0)\cr
#' \code{ddmodel == 2} : exponential dependence in speciation rate with
#' parameter K (= diversity where speciation = extinction)\cr \code{ddmodel ==
#' 2.1} : variant of exponential dependence in speciation rate with offset at
#' infinity\cr \code{ddmodel == 2.2} : 1/n dependence in speciation rate\cr
#' \code{ddmodel == 2.3} : exponential dependence in speciation rate with
#' parameter x (= exponent)\cr \code{ddmodel == 3} : linear dependence in
#' extinction rate \cr \code{ddmodel == 4} : exponential dependence in
#' extinction rate \cr \code{ddmodel == 4.1} : variant of exponential
#' dependence in extinction rate with offset at infinity \cr \code{ddmodel ==
#' 4.2} : 1/n dependence in extinction rate with offset at infinity
#' @return \item{ out }{ A list with the following elements: The first element
#' is the tree of extant species in phylo format \cr The second element is the
#' tree of all species, including extinct species, in phylo format \cr The
#' third element is a matrix of all species where \cr - the first column is the
#' time at which a species is born \cr - the second column is the label of the
#' parent of the species; positive and negative values only indicate whether
#' the species belongs to the left or right crown lineage \cr - the third
#' column is the label of the daughter species itself; positive and negative
#' values only indicate whether the species belongs to the left or right crown
#' lineage \cr - the fourth column is the time of extinction of the species \cr
#' If the fourth element equals -1, then the species is still extant.\cr - the
#' fifth column indicates whether the species belong to the main clade (0) or
#' the subclade (1)\cr The fourth element is the subclade tree of extant
#' species (without stem) \cr The fifth element is the subclade tree of all
#' species (without stem) \cr The sixth element is the same as the first,
#' except that it has attributed 0 for the main clade and 1 for the subclade\cr
#' The seventh element is the same as the Second, except that it has attributed
#' 0 for the main clade and 1 for the subclade\cr The sixth and seventh element
#' will be NULL if the subclade does not exist (because it went extinct). }
#' @author Rampal S. Etienne
#' @references - Etienne, R.S. et al. 2012, Proc. Roy. Soc. B 279: 1300-1309,
#' doi: 10.1098/rspb.2011.1439 \cr - Etienne, R.S. & B. Haegeman 2012. Am. Nat.
#' 180: E75-E89, doi: 10.1086/667574
#' @keywords models
#' @examples
#' dd_KI_sim(c(0.2,0.1,20,0.1,0.05,30,4),10)
#' @export dd_KI_sim
dd_KI_sim = function(pars,age,ddmodel = 1)
{
# Simulation of diversity-dependent process
# . start from crown age
# . no additional species at crown node
# . no missing species in present
# pars = [laM muM K laS muS tinn]
# - pars1[1] = laM = (initial) speciation rate of main clade
# - pars1[2] = muM = extinction rate of main clade
# - pars1[3] = K = clade-level carrying capacity of main clade
# - pars1[4] = laS = (initial) speciation rate of subclade
# - pars1[5] = muS = extinction rate of subclade
# - pars1[6] = K = clade-level carrying capacity of subclade
# - pars1[7] = tinn = time of key innovation
# age = crown age
# ddmodel = mode of diversity-dependence
# . ddmodel == 1 : linear dependence in speciation rate with parameter K
# . ddmodel == 1.3: linear dependence in speciation rate with parameter K'
# . ddmodel == 2 : exponential dependence in speciation rate
# . ddmodel == 2.1: variant with offset at infinity
# . ddmodel == 2.2: 1/n dependence in speciation rate
# . ddmodel == 2.3: exponential dependence in speciation rate with parameter x
# . ddmodel == 3 : linear dependence in extinction rate
# . ddmodel == 4 : exponential dependence in extinction rate
# . ddmodel == 4.1: variant with offset at infinity
# . ddmodel == 4.2: 1/n dependence in speciation rate
done = 0
if(pars[7] > age)
{
stop('The key innovation time is before the crown age of the main clade.')
}
if((pars[1] < pars[2]) | (pars[4] < pars[5]))
{
stop('lambda0 is smaller than mu for one or both clades')
}
if(min(pars) < 0)
{
stop('One of the parameters is negative')
}
if(!(ddmodel %in% c(1,1.3,2,2.1,2.2,2.3,3,4,4.1,4.2)))
{
stop('This diversity-dependence model does not exist or is not implemented')
}
while(done == 0)
{
# number of species N at time t
# i = index running through t and N
t = rep(0,1)
L = matrix(0,2,5)
i = 1
t[1] = 0
NM = 2
NS = 0
# L = data structure for lineages,
# . L[,1] = branching times
# . L[,2] = index of parent species
# . L[,3] = index of daughter species
# . L[,4] = time of extinction
# . L[,5] = main clade (0) or subclade (1)
# j = index running through L
L[1,1:5] = c(0,0,-1,-1,0)
L[2,1:5] = c(0,-1,2,-1,0)
linlistM = c(-1,2)
linlistS = NULL
newL = 2
tinn = age - pars[7]
ff = dd_KI_lamuN(ddmodel,pars,c(NM[i],NS[i]))
laMN = ff[1]
muMN = ff[2]
laSN = ff[3]
muSN = ff[4]
denom = (laMN + muMN) * NM[i] + (laSN + muSN) * NS[i]
t[i + 1] = t[i] + stats::rexp(1,denom)
if(t[i + 1] > tinn & t[i] < tinn)
{
NM[i] = NM[i] - 1
NS[i] = NS[i] + 1
linlistS = sample2(linlistM,1)
L[abs(linlistS),5] = 1
linlistM = linlistM[-which(linlistM == linlistS)]
ff = dd_KI_lamuN(ddmodel,pars,c(NM[i],NS[i]))
laMN = ff[1]
muMN = ff[2]
laSN = ff[3]
muSN = ff[4]
denom = (laMN + muMN) * NM[i] + (laSN + muSN) * NS[i]
t[i + 1] = tinn + stats::rexp(1,denom)
}
while(t[i + 1] <= age)
{
event = sample2(x = 1:4,size = 1,prob = c(laMN * NM[i], muMN * NM[i], laSN * NS[i], muSN * NS[i]))
i = i + 1
if(event == 1)
{
# speciation event in main clade
ranL = sample2(linlistM,1)
NM[i] = NM[i - 1] + 1
NS[i] = NS[i - 1]
newL = newL + 1
L = rbind(L,c(t[i],ranL,sign(ranL) * newL,-1,0))
linlistM = c(linlistM,sign(ranL) * newL)
} else if(event == 3)
{
# speciation event in subclade
ranL = sample2(linlistS,1)
NM[i] = NM[i - 1]
NS[i] = NS[i - 1] + 1
newL = newL + 1
L = rbind(L,c(t[i],ranL,sign(ranL) * newL,-1,1))
linlistS = c(linlistS,sign(ranL) * newL)
} else if(event == 2)
{
# extinction event in main clade
ranL = sample2(linlistM,1)
NM[i] = NM[i - 1] - 1
NS[i] = NS[i - 1]
L[abs(ranL),4] = t[i]
w = which(linlistM == ranL)
linlistM = linlistM[-w]
linlistM = sort(linlistM)
} else if(event == 4)
{
# extinction event in subclade
ranL = sample2(linlistS,1)
NM[i] = NM[i - 1]
NS[i] = NS[i - 1] - 1
L[abs(ranL),4] = t[i]
w = which(linlistS == ranL)
linlistS = linlistS[-w]
linlistS = sort(linlistS)
}
if(sum(c(linlistM,linlistS) < 0) == 0 | sum(c(linlistM,linlistS) > 0) == 0)
{
t[i + 1] = Inf
} else {
ff = dd_KI_lamuN(ddmodel,pars,c(NM[i],NS[i]))
laMN = ff[1]
muMN = ff[2]
laSN = ff[3]
muSN = ff[4]
denom = (laMN + muMN) * NM[i] + (laSN + muSN) * NS[i]
t[i + 1] = t[i] + stats::rexp(1,denom)
if(t[i + 1] > tinn & t[i] < tinn)
{
NM[i] = NM[i] - 1
NS[i] = NS[i] + 1
ff = dd_KI_lamuN(ddmodel,pars,c(NM[i],NS[i]))
laMN = ff[1]
muMN = ff[2]
laSN = ff[3]
muSN = ff[4]
linlistS = sample2(linlistM,1)
L[abs(linlistS),5] = 1
linlistM = linlistM[-which(linlistM == linlistS)]
denom = (laMN + muMN) * NM[i] + (laSN + muSN) * NS[i]
t[i + 1] = tinn + stats::rexp(1,denom)
}
}
}
if(sum(c(linlistM,linlistS) < 0) == 0 | sum(c(linlistM,linlistS) > 0) == 0)
{
done = 0
} else {
done = 1
}
}
L[,1] = age - c(L[,1])
notmin1 = which(L[,4] != -1)
L[notmin1,4] = age - c(L[notmin1,4])
L[which(L[,4] == age + 1),4] = -1
tes = L2phylo(L[,1:4],dropextinct = T)
tas = L2phylo(L[,1:4],dropextinct = F)
tesS = NULL
tes2 = NULL
graphics::par(mfrow = c(2,1))
graphics::plot(tes)
graphics::plot(tas)
cols = c("blue","red")
names(cols) = c(0,1)
if(length(linlistS) > 0)
{
namesS = paste('t',abs(linlistS), sep = "")
if(length(linlistS) == 1)
{
m = which(tes$tip.label == namesS)
b2 = 0
}
else if(length(linlistS) > 1)
{
m = ape::getMRCA(phy = tes,tip = namesS)
tesS = ape::extract.clade(phy = tes,node = m)
b2 = age - ape::node.depth.edgelength(tes)[m]
}
m0 = tes$edge[which(tes$edge[,2] == m),1]
b1 = age - ape::node.depth.edgelength(tes)[m0]
tes2 = phytools::paintSubTree(tes,node = m,state = "1",anc.state = "0",stem = (pars[7] - b2)/(b1 - b2))
phytools::plotSimmap(tes2,cols,lwd = 3,pts = F)
}
tasS = NULL
tas2 = NULL
allS = which(L[,5] == 1)
if(length(allS) > 0)
{
namesS = paste('t',abs(allS), sep = "")
if(length(allS) == 1)
{
m = which(tas$tip.label == namesS)
b2 = 0
}
else if(length(allS) > 1)
{
m = ape::getMRCA(phy = tas,tip = namesS)
tasS = ape::extract.clade(phy = tas,node = m)
b2 = age - ape::node.depth.edgelength(tas)[m]
}
m0 = tas$edge[which(tas$edge[,2] == m),1]
b1 = age - ape::node.depth.edgelength(tas)[m0]
tas2 = phytools::paintSubTree(tas,node = m,state = "1",anc.state = "0", stem = (pars[7] - b2)/(b1 - b2))
phytools::plotSimmap(tas2,cols,lwd = 3,pts = F)
}
out = list(tes = tes,tas = tas,L = L,tesS = tesS,tasS = tasS,tes2 = tes2,tas2 = tas2)
return(out)
}
|
a348ccfa0ead4ffbc285940bacde93d1f19c482e | eccfcd967550ac1431856f1a5f83f7b3187b11ab | /data_analysis.R | 0563943fb82beb1540c73a6669986e470021cfaa | [
"MIT"
] | permissive | cgoettel/thesis | b7e32a118b1c036c1f47e218156115611e8c5d89 | b7bd9dbda729212123199d165639dc0b15e7c878 | refs/heads/master | 2021-03-16T05:42:41.521695 | 2018-04-05T20:30:09 | 2018-04-05T20:30:09 | 35,571,363 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,987 | r | data_analysis.R | # Initialization
## Packages.
## To install, run `install.packages("<package_name>")`.
library(car)
library(ggplot2)
library(grid)
library(Kendall)
library(lmtest)
library(sandwich)
library(stargazer)
## Input data
data <- read.csv("data.csv")
## Dataset without IS
data.mis <- data[data$Major != "is",]
## Dummy variables
data.mis$maledum[data.mis$Gender == "m"] <- 1
data.mis$maledum[data.mis$Gender == "f"] <- 0
data.mis$csdum[data.mis$Major == "cs"] <- 1
data.mis$csdum[data.mis$Major != "cs"] <- 0
## CS and IT specific variables
cs_ac_ce<-data.mis$AC.CE[data.mis$Major=="cs"]
cs_ae_ro<-data.mis$AE.RO[data.mis$Major=="cs"]
it_ac_ce<-data.mis$AC.CE[data.mis$Major=="it"]
it_ae_ro<-data.mis$AE.RO[data.mis$Major=="it"]
cs_major_gpa<-data.mis$Major.GPA[data.mis$Major=="cs"]
it_major_gpa<-data.mis$Major.GPA[data.mis$Major=="it"]
# First, let's see if there's a correlation between CS and IT
# AC-CE and AE-RO. "The t-test is used to test whether there is
# a difference between two groups on a continuous dependent
# variable." That is appropriate for checking if there's a
# difference between CS and IT majors with their AC-CE and AE-RO
# values, but not for checking the significance between those
# values and the relationship to GPA.
t.test(cs_ac_ce, it_ac_ce) # p = 0.5411
t.test(cs_ae_ro, it_ae_ro) # p = 0.3501
# So there's no relationship between them. And we can see that
# in the plot:
df <- data.frame("black", cs_ac_ce, cs_ae_ro)
colnames(df) <- c("major","ac_ce","ae_ro")
df2 <- data.frame("red", it_ac_ce, it_ae_ro)
colnames(df2) <- c("major","ac_ce","ae_ro")
bound <- rbind(df,df2)
plot <- ggplot(bound, aes(bound$ac_ce, bound$ae_ro,
color = major)) + geom_point()
plot <- plot + scale_color_manual(name = "Major",
values = unique(bound$major), labels = c("CS", "IT"))
plot <- plot + labs(x="AC-CE", y="AE-RO")
jpeg('figures/chapter4/cs-v-it-plot.jpg', width = 500,
height = 500)
plot
dev.off()
# Research questions
## Question 1: How strong is the correlation between AC-CE and
## AE-RO, and college GPA in CS, IS, and IT?
# Before we know whether to use Pearson's or Spearman's, we have
# to know if the data is normally distributed. If it is, we use
# Pearson's; if not, we use Spearman's.
shapiro.test(cs_major_gpa) # p = 0.148
shapiro.test(it_major_gpa) # p = 0.8639
shapiro.test(cs_ac_ce) # p = 0.02512
shapiro.test(cs_ae_ro) # p = 0.7826
shapiro.test(it_ac_ce) # p = 0.3601
shapiro.test(it_ae_ro) # p = 0.4583
cor.test(cs_major_gpa, cs_ac_ce) # p = 0.8177
cor.test(cs_major_gpa, cs_ae_ro) # p = 0.6704
cor.test(it_major_gpa, it_ac_ce) # p = 0.9727
cor.test(it_major_gpa, it_ae_ro) # p = 0.02017, cor = 0.4914967
# This means that as IT AE-RO increases, there's a positively
# related change in GPA which we can see in the Figure.
# Let's get a summary of it_major_gpa so we can see just what
# that means.
summary(it_major_gpa)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 2.340 2.868 3.200 3.204 3.480 3.970
sd(it_major_gpa) # sd = 0.4439921
df <- data.frame(it_major_gpa)
hist <- ggplot(data = df, aes(it_major_gpa)) +
geom_histogram(color="black", fill="white",
breaks=seq(2.34, 4, by=0.1))
hist <- hist + labs(x = "IT major GPA", y = "Count")
hist <- hist + geom_density(alpha=0.2, fill="red")
hist <- hist + geom_vline(aes(xintercept=mean(it_major_gpa)),
color="blue", linetype="dashed")
jpeg('figures/chapter4/it-major-gpa-hist.jpg')
hist
dev.off()
# Should only be used on this first one.
cor.test(cs_major_gpa, cs_ac_ce, method = "spearman") # p=0.8232
cor.test(cs_major_gpa, cs_ae_ro, method = "spearman") # p=0.8342
cor.test(it_major_gpa, it_ac_ce, method = "spearman") # p=0.9262
cor.test(it_major_gpa, it_ae_ro, method = "spearman") # p=0.0161,
# rho = 0.5066
# Spearman's and Pearson's are showing the same thing which is
# great news. But, Spearman's freaks out when there's ties, so
# let's use Kendall's tau-b (which handles ties) to check.
Kendall(cs_major_gpa,cs_ac_ce) # p = 0.71763
Kendall(cs_major_gpa,cs_ae_ro) # p = 0.82436
Kendall(it_major_gpa,it_ac_ce) # p = 0.95484
Kendall(it_major_gpa,it_ae_ro) # p = 0.02154, tau = 0.365
# And again the same p-value being significant holds. I'm
# convinced.
# Let's see what they look like
# First plot
df<-data.frame(cs_major_gpa, cs_ac_ce)
cs_major_ac_ce_plot<-ggplot(df, aes(x=cs_major_gpa, y=cs_ac_ce))+
geom_point()
cs_major_ac_ce_plot<-cs_major_ac_ce_plot + geom_smooth(method=lm)
cs_major_ac_ce_plot<-cs_major_ac_ce_plot + labs(x="CS Major GPA",
y="CS AC-CE")
cs_major_ac_ce_plot<-cs_major_ac_ce_plot +
coord_cartesian(xlim = c(2.2,4.1), ylim = c(-32,32),
expand = FALSE)
# Second plot
df<-data.frame(cs_major_gpa, cs_ae_ro)
cs_major_ae_ro_plot<-ggplot(df, aes(x=cs_major_gpa, y=cs_ae_ro))+
geom_point()
cs_major_ae_ro_plot<-cs_major_ae_ro_plot + geom_smooth(method=lm)
cs_major_ae_ro_plot<-cs_major_ae_ro_plot + labs(x="CS Major GPA",
y="CS AE-RO")
cs_major_ae_ro_plot<-cs_major_ae_ro_plot +
coord_cartesian(xlim = c(2.2,4.1), ylim = c(-32,32),
expand = FALSE)
# Third plot
df<-data.frame(it_major_gpa, it_ac_ce)
it_major_ac_ce_plot<-ggplot(df, aes(x=it_major_gpa, y=it_ac_ce))+
geom_point()
it_major_ac_ce_plot<-it_major_ac_ce_plot + geom_smooth(method=lm)
it_major_ac_ce_plot<-it_major_ac_ce_plot + labs(x="IT Major GPA",
y="IT AC-CE")
it_major_ac_ce_plot<-it_major_ac_ce_plot +
coord_cartesian(xlim = c(2.2,4.1), ylim = c(-32,32),
expand = FALSE)
# Fourth plot
df<-data.frame(it_major_gpa, it_ae_ro)
it_major_ae_ro_plot<-ggplot(df, aes(x=it_major_gpa, y=it_ae_ro))+
geom_point()
it_major_ae_ro_plot<-it_major_ae_ro_plot + geom_smooth(method=lm)
it_major_ae_ro_plot<-it_major_ae_ro_plot + labs(x="IT Major GPA",
y="IT AE-RO")
it_major_ae_ro_plot<-it_major_ae_ro_plot +
coord_cartesian(xlim = c(2.2,4.1), ylim = c(-32,32),
expand = FALSE)
# Print them side-by-side
jpeg('figures/chapter4/major_gpa_lm_plots.jpg', width = 1000,
height = 1000)
pushViewport(viewport(layout = grid.layout(2,2)))
print(cs_major_ac_ce_plot, vp = viewport(layout.pos.row = 1,
layout.pos.col = 1))
print(cs_major_ae_ro_plot, vp = viewport(layout.pos.row = 1,
layout.pos.col = 2))
print(it_major_ac_ce_plot, vp = viewport(layout.pos.row = 2,
layout.pos.col = 1))
print(it_major_ae_ro_plot, vp = viewport(layout.pos.row = 2,
layout.pos.col = 2))
dev.off()
## Question 2: What is the best multiple regression model to fit
## these correlations?
# Let's calculate the robust standard of error and use that
# throughout these calculations. The assumption is that you will
# use robust standard of error; if you choose not to, you have
# to prove why you don't have to.
# Robust standard error: normal standard error does not handle
# heteroskedasticity so we need to have the robust standard
# error. So, "I ran this model with standard errors computed with
# Huber-White (HC1) robust standard error to account for the
# heteroskedasticity of the data."
### Model 1
fit1 <- lm(data = data.mis, Major.GPA ~ csdum + Age +
Parents.education)
cov1 <- vcovHC(fit1, type = "HC1")
robust_se_1 <- sqrt(diag(cov1))
### Model 2
fit2 <- lm(data = data.mis, Major.GPA ~ csdum + Age +
Parents.education + AE.RO)
cov2 <- vcovHC(fit2, type = "HC1")
robust_se_2 <- sqrt(diag(cov2))
### Model 3
fit3 <- lm(data = data.mis, Major.GPA ~ csdum + Age +
Parents.education + AE.RO + AC.CE)
cov3 <- vcovHC(fit3, type = "HC1")
robust_se_3 <- sqrt(diag(cov3))
# Let's visualize that
stargazer(fit1, fit2, fit3, se = c(robust_se_1, robust_se_2,
robust_se_3))
# Plot fit1
df <- data.frame(data.mis)
df$fit1 <- stats::predict(fit1, newdata=data.mis)
fit1_plot <- ggplot(df)
fit1_plot <- fit1_plot + geom_point(aes(x=major_gpa, y = fit1),
size = 2)
fit1_plot <- fit1_plot + geom_smooth(data=df, aes(x = major_gpa,
y = fit1), size = 1.5, colour = "blue", se = TRUE,
stat = "smooth", method = lm)
fit1_plot <- fit1_plot + labs(x = "Major GPA", y =
"CS dummy variable + Age + Parents Education")
fit1_plot <- fit1_plot + coord_cartesian(xlim = c(2.3, 4.05),
ylim = c(2.75,4), expand = FALSE)
# Plot fit2
df$fit2 <- stats::predict(fit2, newdata=data.mis)
fit2_plot <- ggplot(df)
fit2_plot <- fit2_plot + geom_point(aes(x=major_gpa, y = fit2),
size = 2)
fit2_plot <- fit2_plot + geom_smooth(data=df, aes(x=major_gpa,
y=fit2), size = 1.5, colour = "blue", se = TRUE,
stat = "smooth", method = lm)
fit2_plot <- fit2_plot + labs(x = "Major GPA", y =
"CS dummy variable + Age + Parents Education + AE-RO")
fit2_plot <- fit2_plot + coord_cartesian(xlim = c(2.3, 4.05),
ylim = c(2.75,4), expand = FALSE)
jpeg('figures/chapter4/mr_models_1_2.jpg', width = 1000,
height = 500)
pushViewport(viewport(layout = grid.layout(1,2)))
print(fit1_plot, vp = viewport(layout.pos.row = 1,
layout.pos.col = 1))
print(fit2_plot, vp = viewport(layout.pos.row = 1,
layout.pos.col = 2))
dev.off()
#### Test of joint significance (linear hypothesis test)
# We haven't been looking at the individual variables before, so
# let's look at them individually and see if there's a joint
# significance. The null hypothesis is that all of these are 0
# so let's see if at least one of them isn't.
# This is checking the residual (error) sum of squares to see
# if we can explain the deviance in the model (by running two
# models and comparing those error sum of squares values). So not
# including these variables (if the F statistic is high)
# increases the amount of unexplained variance in the data.
# So, including these variables explains more of the variance
# than when you don't include them.
# In reality, having one that's significant at the single level
# will generally make you fail the null hypothesis (meaning there
# is a joint significance so they should be included in the
# regression). If they fail the null hypothesis,
# then you should keep them in the regression because they
# somehow help contribute to the model because they help
# explain the std error (meaning, they help explain the deviance
# in the model). Additionally, if they're all significant then
# we'll reject the joint significance null hypothesis even
# though none of them have a p < 0.05.
# Removing unnecessary variables will give you more power with
# this small of a dataset.
cs_it_lht <- lm(data = data.mis, Major.GPA ~ csdum + AE.RO +
Age + Parents.education)
lht(cs_it_lht, c("AE.RO = 0",
"Parents.educationgraduate degree = 0",
"Parents.educationpost-graduate degree = 0",
"Parents.educationsome college = 0",
"Parents.educationundergraduate degree = 0"),
white.adjust = "hc1")
# With the p-value being so low, we confidently reject the null
# hypothesis that these variables are not significant in
# explaining the variance in the data.
# I feel comfortable not including age here because it has
# already been shown to be significant in the multiple regression
# model.
## Question 3: How strong is the correlation between AC-CE and
## AE-RO, and student satisfaction in CS, IS, and IT?
### Data cleaning: AMSS values index minus questions 3 and 6.
amss_index <- (6 - data.mis$amss1) + (6 - data.mis$amss2) +
data.mis$amss4 + data.mis$amss5
cs_amss_index <- amss_index[data.mis$Major == "cs"]
it_amss_index <- amss_index[data.mis$Major == "it"]
summary(amss_index)
sd(amss_index) # 2.259477
# Just to be sure, let's look at each major
summary(cs_amss_index)
sd(cs_amss_index) # 2.362094
summary(it_amss_index)
sd(it_amss_index) # 2.113654
# Let's visualize how skewed this is
df <- data.frame(amss_index)
amss_plot <- ggplot(data = df, aes(amss_index)) +
geom_histogram(color="black", fill="white",
breaks = seq(12, 20, by = 1))
amss_plot <- amss_plot + labs(x = "AMSS Index", y = "Count")
amss_plot <- amss_plot + geom_vline(aes(xintercept =
mean(amss_index)), color="blue", linetype="dashed")
jpeg('figures/chapter4/amss_index_plot.jpg', width = 400,
height = 400)
amss_plot
dev.off()
### Regression analysis
fit1 <- lm(data = data.mis, cs_amss_index ~ cs_ac_ce + cs_ae_ro )
cov1 <- vcovHC(fit1, type = "HC1")
robust_se_1 <- sqrt(diag(cov1))
fit2 <- lm(data = data.mis, it_amss_index ~ it_ac_ce + it_ae_ro )
cov2 <- vcovHC(fit2, type = "HC1")
robust_se_2 <- sqrt(diag(cov2))
stargazer(fit1, fit2, se = c(robust_se_1, robust_se_2))
## Question 4: Is there a correlation between college GPA and
## student satisfaction?
### Pearson's correlation coefficient between GPA and
### satisfaction
fit1 <- lm(data = data.mis, cs_major_gpa ~ cs_amss_index )
cov1 <- vcovHC(fit1, type = "HC1")
robust_se_1 <- sqrt(diag(cov1))
fit2 <- lm(data = data.mis, it_major_gpa ~ it_amss_index )
cov2 <- vcovHC(fit2, type = "HC1")
robust_se_2 <- sqrt(diag(cov2))
stargazer(fit1, fit2, se = c(robust_se_1, robust_se_2))
# Let's visualize that
## CS AMSS plot
df <- data.frame(cs_major_gpa, cs_amss_index)
cs_major_amss_plot <- ggplot(df, aes(x = cs_major_gpa,
y = cs_amss_index)) + geom_point()
cs_major_amss_plot <- cs_major_amss_plot + geom_smooth(method=lm)
cs_major_amss_plot <- cs_major_amss_plot + labs(x =
"CS Major GPA", y = "CS AMSS")
cs_major_amss_plot <- cs_major_amss_plot + coord_cartesian(ylim =
c(12,20))
## IT AMSS plot
df <- data.frame(it_major_gpa, it_amss_index)
it_major_amss_plot <- ggplot(df, aes(x = it_major_gpa,
y = it_amss_index)) + geom_point()
it_major_amss_plot <- it_major_amss_plot + geom_smooth(method=lm)
it_major_amss_plot <- it_major_amss_plot + labs(x="IT Major GPA",
y="IT AMSS")
it_major_amss_plot <- it_major_amss_plot + coord_cartesian(ylim =
c(12,20))
# Print them side-by-side
jpeg('figures/chapter4/major_gpa_amss_plots.jpg', width = 1000,
height = 500)
pushViewport(viewport(layout = grid.layout(1,2)))
print(cs_major_amss_plot, vp = viewport(layout.pos.row = 1,
layout.pos.col = 1))
print(it_major_amss_plot, vp = viewport(layout.pos.row = 1,
layout.pos.col = 2))
dev.off()
# Demographics
demogs <- lm(amss_index ~ major_gpa + csdum + Age + Gender,
data = data.mis)
cov1 <- vcovHC(demogs, type = "HC1")
robust_se_d <- sqrt(diag(cov1))
stargazer(demogs, se = robust_se_d)
|
fc73087f6336149d28c594066e8e8ec6f70a6f10 | 727ee66a4f5da790a3aeda05a7fd58e0d3ab6a4d | /Rpractical03.R | d64a4a82a9bb3e723fe22a38688a20befaaf8b83 | [] | no_license | TheEWave/My-R-practicals | b12cc63cc714d3d76c594b0a960804232006291c | 476981afb78b4f3efb2a9f8d904011004ef69289 | refs/heads/master | 2020-04-27T08:15:07.193068 | 2020-03-17T13:55:26 | 2020-03-17T13:55:26 | 174,163,762 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 187 | r | Rpractical03.R | y<- c(1,2,3,4,5,6,7,8,3,9,10)
#mean
mean(y)
#median
median(y)
#mode
names(sort(-table(y)))[1]
#range
range(y)
#inter-quartile range
IQR(y)
#Standard Deviation
sd(y)
#quartiles
quantile(y) |
fba9d5d97ab4e3abd5186dcb443e376238cb6bfc | e6c99af609e16b03a1e26f6c24a4d92b6edb3e4c | /Alex/tadpole_prep_dates.r | 077396d7e58341e78cb79e952b802e15be62d026 | [] | no_license | apclarkva/tadpole-project | 381144441d610c200275ec4ddf622bac945b6d41 | 46f85c7a7754b0387536bba5b43c726c50ed3491 | refs/heads/master | 2020-04-01T15:07:58.303689 | 2018-12-06T13:58:02 | 2018-12-06T13:58:02 | 153,322,675 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,696 | r | tadpole_prep_dates.r | ## Load in training information
trainingInputsRNN <- read.csv("data/trainingInputsRNN.csv")
validationInputsRNN <- read.csv("data/validationInputsRNN.csv")
## Load in targets
trainingTargets <- read.csv("TADPOLE_TargetData_train_pre_processed.csv")
validationTargets <- read.csv("TADPOLE_TargetData_validation_pre_processed.csv")
countDays <- function(inputs, targets){
# exams includes only the exams for one ptid
# find the number of days between the current exam date, and the next date.
inputs <- inputs[order(as.Date(as.character(inputs$Date), format="%m/%d/%y")),]
targets <- targets[order(as.Date(as.character(targets$Date), format="%m/%d/%y")),]
inputsLength = length(inputs$Date)
targetsLength = length(targets$Date)
numLoops = inputsLength + targetsLength - 1
inputs$DaysUntilNext <- rep(0,inputsLength)
targets$DaysUntilNext <- rep(0, targetsLength)
for(index in c(1:numLoops)) {
if(index == inputsLength) {
currentDate = as.Date(as.character(inputs$Date[index]), format="%m/%d/%y")
nextDate = as.Date(as.character(targets$Date[index - inputsLength+1]), format="%m/%d/%y")
inputs$DaysUntilNext[index] = as.integer(nextDate - currentDate)
} else if (index > inputsLength) {
currentDate = as.Date(as.character(targets$Date[index - inputsLength]), format="%m/%d/%y")
nextDate = as.Date(as.character(targets$Date[index+1-inputsLength]), format="%m/%d/%y")
targets$DaysUntilNext[index-inputsLength] = as.integer(nextDate - currentDate)
} else {
currentDate = as.Date(as.character(inputs$Date[index]), format="%m/%d/%y")
nextDate = as.Date(as.character(inputs$Date[index+1]), format="%m/%d/%y")
inputs$DaysUntilNext[index] = as.integer(nextDate - currentDate)
}
}
return(list(inputs, targets))
}
newTrainingInRNN <- data.frame(matrix(ncol = dim(trainingInputsRNN)[2], nrow = 0))
names(newTrainingInRNN) <- names(trainingInputsRNN)
newTrainingOut <- data.frame(matrix(ncol = dim(trainingTargets)[2], nrow = 0))
names(newTrainingOut) <- names(trainingTargets)
newValidationInRNN <- data.frame(matrix(ncol = dim(validationInputsRNN)[2], nrow = 0))
names(newValidationInRNN) <- names(validationInputsRNN)
newValidationOut <- data.frame(matrix(ncol = dim(validationTargets)[2], nrow = 0))
names(newValidationOut) <- names(validationTargets)
for(ptid in unique(trainingInputsRNN$PTID_Key)) {
newFrames = countDays(trainingInputsRNN[trainingInputsRNN$PTID_Key == ptid,], trainingTargets[trainingTargets$PTID_Key == ptid,])
inputs <- as.data.frame(newFrames[1])
targets <- as.data.frame(newFrames[2])
newTrainingInRNN <- rbind(newTrainingInRNN, inputs)
newTrainingOut <- rbind(newTrainingOut, targets)
}
for(ptid in unique(validationInputsRNN$PTID_Key)) {
newFrames = countDays(validationInputsRNN[validationInputsRNN$PTID_Key == ptid,], validationTargets[validationTargets$PTID_Key == ptid,])
inputs <- as.data.frame(newFrames[1])
targets <- as.data.frame(newFrames[2])
newValidationInRNN <- rbind(newValidationInRNN, inputs)
newValidationOut <- rbind(newValidationOut, targets)
}
write.csv(newTrainingInRNN, file = "data/trainingInWithDays.csv")
write.csv(newTrainingOut, file = "data/trainingOutWithDays.csv")
write.csv(newValidationInRNN, file = "data/validationInWithDays.csv")
write.csv(newValidationOut, file = "data/validationOutWithDays.csv")
## Practice
# inputs <- trainingInputsRNN[trainingInputsRNN$PTID_Key == 694,]
# targets <- trainingTargets[trainingTargets$PTID_Key == 694,]
#
# newFrames = countDays(inputs, targets)
# inputs <- newFrames[1]
# targets <- newFrames[2]
|
eedfc5277ee824d5f5e34022b008d1d38b2f6d61 | 30fdbcadb43c57a414ffa409cd70e5a52b478948 | /06-social-media-data.r | a6bf381a4e7b786b7d0db0e55577dbf24f7571a3 | [] | no_license | eborbath/rscraping-eui-2017 | 1d05cb91b1b26398e87333da0c69610cf74288e2 | 41b602f43deaa0c001f4c541cf3fc7ec1ab02167 | refs/heads/master | 2021-01-22T08:01:59.314758 | 2017-05-22T14:00:25 | 2017-05-22T14:00:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,715 | r | 06-social-media-data.r | ### -----------------------------
### simon munzert
### gathering social media data
### -----------------------------
## peparations -------------------
source("00-course-setup.r")
wd <- getwd()
## mining Twitter with R ----------------
## about the Twitter APIs
# two APIs types of interest:
# REST APIs --> reading/writing/following/etc., "Twitter remote control"
# Streaming APIs --> low latency access to 1% of global stream - public, user and site streams
# authentication via OAuth
# documentation at https://dev.twitter.com/overview/documentation
## how to get started
# 1. register as a developer at https://dev.twitter.com/ - it's free
# 2. create a new app at https://apps.twitter.com/ - choose a random name
# 3. go to "Keys and Access Tokens" and keep the displayed information ready
# again: how to register at Twitter as developer, obtain and use access tokens
browseURL("https://mkearney.github.io/rtweet/articles/auth.html")
## R packages that connect to Twitter API
# twitteR: connects to REST API; weird design decisions regarding data format
# streamR: connects to Streaming API; works very reliably, connection setup a bit difficult
# rtweet: connects to both REST and Streaming API, nice data formats, still under active development
library(rtweet)
## name assigned to created app
appname <- "TwitterToR"
## api key (example below is not a real key)
load("/Users/munzerts/rkeys.RDa")
key <- TwitterToR_twitterkey
## api secret (example below is not a real key)
secret <- TwitterToR_twittersecret
twitter_token <- create_token(
app = appname,
consumer_key = key,
consumer_secret = secret)
rt <- search_tweets("data science", n = 200, token = twitter_token)
View(rt)
## streaming Tweets with the rtweet package -----------------
# set keywords used to filter tweets
q <- paste0("clinton,trump,hillaryclinton,imwithher,realdonaldtrump,maga,electionday")
q <- paste0("schulz,merkel,btw17,btw2017")
# parse directly into data frame
twitter_stream_ger <- stream_tweets(q = q, timeout = 30, token = twitter_token)
# set up directory and JSON dump
rtweet.folder <- "data/rtweet-data"
dir.create(rtweet.folder)
streamname <- "clintontrump"
filename <- file.path(rtweet.folder, paste0(streamname, "_", format(Sys.time(), "%F-%H-%M-%S"), ".json"))
# create file with stream's meta data
streamtime <- format(Sys.time(), "%F-%H-%M-%S")
metadata <- paste0(
"q = ", q, "\n",
"streamtime = ", streamtime, "\n",
"filename = ", filename)
metafile <- gsub(".json$", ".txt", filename)
cat(metadata, file = metafile)
# sink stream into JSON file
stream_tweets(q = q, parse = FALSE,
timeout = 30,
file_name = filename)
# parse from json file
rt <- parse_stream(filename)
# inspect tweets data
names(rt)
head(rt)
# inspect users data
users_data(rt) %>% head()
users_data(rt) %>% names()
## mining tweets with the rtweet package ------
rt <- parse_stream("data/rtweet-data/clintontrump_2017-05-19-16-27-32.json")
clinton <- str_detect(rt$text, regex("hillary|clinton", ignore_case = TRUE))
trump <- str_detect(rt$text, regex("donald|trump", ignore_case = TRUE))
mentions_df <- data.frame(clinton,trump)
colMeans(mentions_df, na.rm = TRUE)
## mining twitter accounts with the rtweet package ------
user_df <- lookup_users("RDataCollection")
names(user_df)
user_timeline_df <- get_timeline("RDataCollection")
names(user_timeline_df)
user_favorites_df <- get_favorites("RDataCollection")
names(user_favorites_df)
## getting pageviews from Wikipedia ---------------------------
## IMPORTANT: If you want to gather pageviews data before July 2015, you need the statsgrokse package. Check it out here:
browseURL("https://github.com/cran/statsgrokse")
ls("package:pageviews")
trump_views <- article_pageviews(project = "en.wikipedia", article = "Donald Trump", user_type = "user", start = "2015070100", end = "2017040100")
head(trump_views)
clinton_views <- article_pageviews(project = "en.wikipedia", article = "Hillary Clinton", user_type = "user", start = "2015070100", end = "2017040100")
plot(ymd(trump_views$date), trump_views$views, col = "red", type = "l")
lines(ymd(clinton_views$date), clinton_views$views, col = "blue")
german_parties_views <- article_pageviews(
project = "de.wikipedia",
article = c("Christlich Demokratische Union Deutschlands", "Christlich-Soziale Union in Bayern", "Sozialdemokratische Partei Deutschlands", "Freie Demokratische Partei", "Bündnis 90/Die Grünen", "Die Linke", "Alternative für Deutschland"),
user_type = "user",
start = "2015090100",
end = "2017040100"
)
table(german_parties_views$article)
parties <- unique(german_parties_views$article)
dat <- filter(german_parties_views, article == parties[1])
plot(ymd(dat$date), dat$views, col = "black", type = "l")
dat <- filter(german_parties_views, article == parties[2])
lines(ymd(dat$date), dat$views, col = "blue")
dat <- filter(german_parties_views, article == parties[3])
lines(ymd(dat$date), dat$views, col = "red")
dat <- filter(german_parties_views, article == parties[7])
lines(ymd(dat$date), dat$views, col = "brown")
## getting data from Google Trends ---------------------------
# IMPORTANT: The current gtrendsR version that is available on CRAN does not work. Install the developer version from GitHub by uncommenting the following line and running it (you might have to install the devtools package before that)
#devtools::install_github('PMassicotte/gtrendsR')
library(gtrendsR)
gtrends_merkel <- gtrends("Merkel", geo = c("DE"), time = "2017-01-01 2017-05-15")
gtrends_schulz <- gtrends("Schulz", geo = c("DE"), time = "2017-01-01 2017-05-15")
plot(gtrends_merkel)
plot(gtrends_schulz)
|
d6fb31197fa56aad4e1e8fd37363ca946d02013e | 13c0687d2111ea43aad32f319b46a6f67bd2f53a | /__09_week_01/__09_week_serialize_02blobs/__f_funs.R | 4260e0e85817e1171583e1a92d60a7ce628e47ef | [] | no_license | davezes/MAS405_S2021 | 0a672892ca320dbad952697948b610adcffc0656 | 025a3ea1de268c05c1a426bdb8944d2367789847 | refs/heads/master | 2023-05-10T22:14:20.857391 | 2021-06-05T21:33:26 | 2021-06-05T21:33:26 | 350,537,508 | 4 | 6 | null | null | null | null | UTF-8 | R | false | false | 3,521 | r | __f_funs.R |
##### to use these, table must have fields
## "file TEXT, ",
## "segment INT(14), ",
## "raw_data MEDIUMTEXT, ",
### xobj <- xwav ; xfile <- xthisFN ; xdrv <- drv; xtableName <- "MAS405audio_serialSegs" ; xsegSize <- 10^6 ; xcomp <- "xz" ; xoverWrite <- TRUE ; xverbosity <- 2
f_dbwrite_raw <- function(
xobj,
xtableName,
xfile,
xdrv,
xdbuser,
xdbpw,
xdbname,
xdbhost,
xdbport,
xoverWrite=FALSE,
xsegSize=10^6,
xverbosity=1,
xcomp="xz"
) {
xxx <- serialize(xobj, con=NULL)
if( xverbosity > 0 ) {
cat("object class:", class(xobj), "\n")
cat("serialized object size MB:", format(object.size(xxx), "MB"), "\n")
cat("serialized vector length in millions:", length(xxx) / 10^6, "\n")
}
xxnow <- Sys.time()
yyy <- memCompress(from=xxx, type=xcomp)
xxdiffTime <- difftime(Sys.time(), xxnow, units="secs")
if( xverbosity > 0 ) {
cat("compressed serialized object size MB:", format(object.size(yyy), "MB"), "\n")
cat("compressed serialized vector length in millions:", length(yyy) / 10^6, "\n")
cat("time taken for compression:", xxdiffTime, "seconds", "\n")
}
##################################### segment
xbrks <- sort(unique(c(seq(1, length(yyy), by=xsegSize), length(yyy)+1))) ; xbrks
Ns <- length(xbrks) - 1
xout <- character( Ns ) ; xout
for(jj in 2:(Ns+1)) {
this_seg <- paste(yyy[ (xbrks[jj-1]):(xbrks[jj]-1) ], collapse="")
xout[ jj-1 ] <- this_seg
if( xverbosity > 1 ) {
cat("processing segment", jj-1, "of" , Ns, "\n")
}
}
xdf_data <-
data.frame(
"file"=rep(xfile, Ns),
"segment"=(1:Ns),
"raw_data"=xout
)
con <- dbConnect(xdrv, user=xdbuser, password=xdbpw, dbname=xdbname, host=xdbhost, port=xdbport, unix.sock=xdbsock)
if(xoverWrite) {
qstr <- paste0(
"DELETE FROM ", xtableName,
" WHERE file='", xfile, "'"
)
qstr
xx <- try( dbGetQuery(con, qstr), silent=TRUE )
xx
}
xxnow <- Sys.time()
dbWriteTable(con, xtableName, xdf_data, field.types = NULL, append=TRUE, ##### notice appeand is TRUE
row.names=FALSE, overwrite=FALSE)
yydiffTime <- difftime(Sys.time(), xxnow, unit="secs")
if( xverbosity > 0 ) {
cat("time taken to write to table, ", xtableName, ":", yydiffTime, "seconds", "\n")
}
dbDisconnect(con)
}
f_dbread_raw <- function(
xtableName,
xfile,
xdrv,
xdbuser,
xdbpw,
xdbname,
xdbhost,
xdbport,
xverbosity=1,
xcomp="xz"
) {
con <- dbConnect(xdrv, user=xdbuser, password=xdbpw, dbname=xdbname, host=xdbhost, port=xdbport, unix.sock=xdbsock)
qstr <- paste0(
"SELECT * FROM ", xtableName,
" WHERE file='", xfile, "'"
)
xxnow <- Sys.time()
df_read_back <- dbGetQuery(con, qstr)
xxdiffTime <- difftime(Sys.time(), xxnow, units="secs")
dbDisconnect(con)
if( xverbosity > 0 ) {
cat("time taken to read from DB:", xxdiffTime, "seconds", "\n")
cat("dim of df:", dim(df_read_back), "\n")
}
xperm <- order(df_read_back[ , "segment" ])
xla <- lapply( df_read_back[ xperm, "raw_data"], function(x) { return(substring(x, seq(1, nchar(x), by=2), seq(2, nchar(x), by=2))) } )
uuu <- unlist(xla)
vvv <- as.raw(as.hexmode( uuu ))
www <- memDecompress(from=vvv, type=xcomp)
zobj <- unserialize(www)
return(zobj)
}
|
47f80e4395f59cecec00df6add5a27ef05626f20 | 4673e7ccf7fe53ff8dacb56453580cdb11075319 | /scripts/mapaincidenciagif.R | 3696f828f5aa9b4963859b0cb63739e70f00100f | [] | no_license | dogomoreno/Covid19-Sonora-Municipios | 2beb16fed697160b5785c961a6d88d6c6417a7d9 | 440eee0435dba671936194d8927e3dc4db95d690 | refs/heads/master | 2023-08-14T22:10:20.303409 | 2021-10-04T06:14:27 | 2021-10-04T06:14:27 | 274,054,270 | 6 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,727 | r | mapaincidenciagif.R | # Paquetes
library(tidyverse)
library(extrafont)
library(scales)
library(plotly)
library(htmlwidgets)
library(showtext)
library(tint)
library(rgdal)
library(rgeos)
library(ggiraph)
library(miniUI)
library(units)
library(reactable)
library(zoo)
library(lubridate)
library(treemapify)
library(wesanderson)
library(ggsci)
library("Cairo")
library(gganimate)
library(ggsci)
#library(wesanderson)
#library(ggsci)
#library(RColorBrewer)
library(rcartocolor)
#library(NineteenEightyR)
Fechahoy<- "Corte al 21 de agosto de 2021"
capa_munison <- readOGR("Shapes", layer="MUNSON")
capa_son <- readOGR("Shapes", layer="ENTSON")
Casos <- read_csv("Bases/Casosdiarios.csv",
col_types = cols(CASOS = col_integer(),
CVEGEO = col_character(), Fecha = col_date(format = "%Y-%m-%d"),
MUNICIPIO = col_character(), NUEVOS = col_integer(), X1 = col_skip()),
locale = locale(encoding = "ISO-8859-1"))
POBMUN <- read_csv("Bases/POBMUN.csv", col_types = cols(CVEGEO = col_character()),
locale = locale(encoding = "ISO-8859-1"))
# Mapa incidencia
Casossemana <- Casos %>% group_by(MUNICIPIO) %>%
mutate(diasemana = weekdays(Fecha), Casossemana = rollsum(NUEVOS, 7, align="right", fill = 0)) %>%
filter(diasemana=="sábado") %>%
left_join(POBMUN, by = "CVEGEO")
Casossemana <- Casossemana %>% mutate (INCIDENCIA= round((Casossemana*100000)/POB,1))
Casossemana$INCIDENCIA[Casossemana$INCIDENCIA==0] <- NA
# Muyalto <- quantile(Casossemana$INCIDENCIA, 0.90, na.rm=TRUE)
# Alto <- quantile(Casossemana$INCIDENCIA, 0.75, na.rm=TRUE)
# Medio <- quantile(Casossemana$INCIDENCIA, 0.50, na.rm=TRUE)
# Bajo <- quantile(Casossemana$INCIDENCIA, 0.25, na.rm=TRUE)
# casossempob <- Casossemana %>% mutate(IS=if_else(INCIDENCIA>(round(Muyalto,0)),5,
# if_else(INCIDENCIA>(round(Alto,0)),4,
# if_else(INCIDENCIA>(round(Medio,0)),3,
# if_else(INCIDENCIA>(round(Bajo,0)),2,1)))))
casossempob <- Casossemana %>% mutate(IS=if_else(INCIDENCIA>=100,4,
if_else(INCIDENCIA>=50,3,
if_else(INCIDENCIA>=10,2,1))))
casossempob <- casossempob %>% mutate(id=CVEGEO)
capa_munison <- readOGR("Shapes", layer="MUNSON")
capa_reg <- readOGR("Shapes", layer="REGSON")
capa_munison_df <- fortify(capa_munison, region="concat")
capa_munison_inci<- inner_join(capa_munison_df, casossempob, by="id")
discrete <- c("4" = "#CE3F41","3" = "#FFA17B","2" = "#FECF7D", "1" = "#31859C")
subtitulo <- "Casos de covid-19 en los últimos 7 días por 100 mil habitantes\nCorte al 18/06/2021"
marcas <- c( "Alta\n(100 o más)", "Substancial\n(50-99)", "Moderada\n(10-49)","Baja\n(+0-9)")
romp <- c("4", "3", "2", "1")
Mapa_incidencia<- ggplot(capa_munison_inci, aes(map_id = id)) +
geom_polygon(data=capa_munison, aes(x=long, y=lat, group=group),
fill="gray90", color="white", size=0.12) +
geom_map(aes(fill = factor(IS)),color = "white",size=0.22, map = capa_munison_df) +
scale_fill_manual(values = discrete,
breaks= romp,
labels = marcas) +
theme_void() +
theme(plot.title = (element_text(family = "Lato Black", size = 20, color = "black")),
plot.subtitle = (element_text(family = "Lato Light", size = 8, color = "#01787E")),
plot.margin = margin(0.5,1, 0.5, 0.5, "cm"),
legend.position = "right",
plot.background = element_rect(fill = "white", color="black", size=3),
legend.key.height = unit (0.5, "cm"), legend.key.width = unit (0.2, "cm"), axis.text = element_blank(),
legend.text = element_text(family = "Lato", size = 6, color = "black"),
legend.title = element_text(family = "Lato Black", size = 5, color = "black"),
plot.caption = element_text(family = "Lato Light", size = 6.5, color = "gray40"),
axis.title = element_blank()) +
labs(y = NULL, x = NULL, title = "Incidencia semanal",
subtitle = subtitulo, fill = NULL,
caption ="Elaboración Luis Armando Moreno con información de la Secretaría de Salud del Estado de Sonora")+
geom_polygon(data=capa_reg, aes(x=long, y=lat, group=group),
fill="transparent", color="black", size=0.2)
diacasossemana <- seq(min(casossempob$Fecha), max(casossempob$Fecha),1)
SonoraMCsemanal <- filter(casossempob,Fecha %in% diacasossemana)
capa_munison_df <- fortify(capa_munison, region="concat")
capa_munison_casos<- inner_join(capa_munison_df, SonoraMCsemanal, by="id")
Mapa_inci <- function(capa_son, capa_munison_casos) { ggplot(capa_munison_casos, aes(map_id = id)) +
geom_polygon(data=capa_munison, aes(x=long, y=lat, group=group),
fill="gray90", color="white", size=0.6) +
geom_map(aes(fill = as.factor(IS)),color = "white",size=0.6, map = capa_munison_df) +
geom_polygon(data=capa_son, aes(x=long, y=lat, group=group),
fill="transparent", color="black", size=0.6) +
scale_fill_manual(values = discrete, breaks= romp,
labels = marcas)+
theme_void() +
theme(plot.title = (element_text(family = "Lato Black", size = 54, color = "black")),
plot.subtitle = (element_text(family = "Lato Light", size = 22, color = "#01787E")),
plot.margin = margin(1, 2.5, 1, 2, "cm"),
legend.position = c(0.18,0.4),
plot.background = element_rect(fill = "white", color="black", size=3),
legend.key.height = unit (3, "cm"), legend.key.width = unit (0.75, "cm"), axis.text = element_blank(),
legend.text = element_text(family = "Lato", size = 20, color = "black"),
legend.title = element_text(family = "Lato Black", size = 28, color = "black"),
plot.caption = element_text(family = "Lato Light", size = 20, color = "gray40"),
axis.title = element_blank()) +
labs(axis = NULL, y = NULL, x = NULL, title = "Incidencia semanal", subtitle = "Casos de covid-19 en los últimos 7 días por 100 mil habitantes",
caption ="Elaboración Luis Armando Moreno con información de la Secretaría de Salud estatal")
}
Incisemanaanim <- Mapa_inci(capa_son, capa_munison_casos) +
transition_manual(Fecha) +
shadow_mark() +
labs(fill = "{current_frame}")
gifincisem <- animate(Incisemanaanim, end_pause = 6, fps = 20,duration = 30, width = 950, height =950, renderer = gifski_renderer())
anim_save("./Gráficos/Incidenciasemanal.gif", animation=gifincisem)
|
cdcd9d242d26c1bc5901260675363c06d2b17269 | 7d8d298b008f1171bd11055ad311a16f63826eaf | /modeling/vector-occurrence-sdm/eval-cv-gam-tgb.R | 63a9860a991de9d8e0566b6e016723a31b004e15 | [] | no_license | jgjuarez/chagas-vector-sdm | d387b5a24a8468a4d3bf03a390e1a93c5382f451 | 2fe11f5cad13c2b179784af439327a6fb4f2ec0c | refs/heads/master | 2022-07-03T21:29:13.575668 | 2020-05-19T16:49:49 | 2020-05-19T16:49:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,517 | r | eval-cv-gam-tgb.R | library(purrr)
devtools::load_all("../../tcruziutils")
library(mgcv)
library(tmap)
folds <- readRDS("../../preprocessing/folds_list_vector_presence.Rds")
cv_res <- readRDS("cv-results.Rds")
# endemic zone country polygons
countries <- readRDS("../../preprocessing/countries.Rds")
p_endemic_zone <- tm_shape(countries) + tm_borders(alpha = .5)
env_grids <- raster::brick("../../preprocessing/env_grids.grd")
p_endemic_zone <- function(countries, hull) {
countries <- countries %>%
raster::crop(hull)
}
get_test_auc <- function(cv) {
round(max(cv$cv_results$test_auc), 2)
}
map_blocks <- function(cv, fold, countries) {
block <- fold$blocks$blocks
folds_train <- fold$train$fold
folds_test <- fold$test$fold
df <- rbind(fold$train, fold$test)
n_presence <- sum(df$presence)
tm_shape(countries) + tm_borders(alpha = .5) +
tm_shape(fold$hull) + tm_borders(alpha = .5) +
tm_shape(df[sample(1:nrow(df), nrow(df)), ]) +
tm_dots(col = "presence", style = "cat", palette = c("steelblue", "black"),
alpha = .7, size = .15, title = paste0("presence (n = ", n_presence, ")")) +
tm_shape(block[block$folds %in% folds_train, ]) +
tm_borders() + tm_text(text = "folds", palette = "black", size = .7) +
tm_shape(block[block$folds %in% folds_test, ]) +
tm_polygons(palette = "grey70", alpha = .7) +
tm_text(text = "folds", palette = "black", size = .7) +
tm_layout(
title = cv$species,
title.size = 1.5,
title.fontface = "italic",
legend.position = c("left", "bottom"),
legend.text.size = 1.2,
legend.hist.size = 1.2,
legend.title.size = 1.5,
bg.color = "whitesmoke")
}
map_sdm <- function(cv, fold, countries) {
print(cv$species)
tm_shape(countries) + tm_borders(alpha = .5) +
tm_shape(fold$hull) + tm_borders(alpha = .5) +
tm_shape(cv$grid) +
tm_raster(alpha = .7, breaks = seq(0, 1, by = .2),
palette = viridis::magma(1e3), style = "cont",
title = paste0("prediction (AUC: ", get_test_auc(cv), ")")) +
tm_layout(
title = cv$species,
title.size = 1.5,
title.fontface = "italic",
legend.position = c("left", "bottom"),
legend.text.size = 1.2,
legend.hist.size = 1.2,
legend.title.size = 1.5,
bg.color = "whitesmoke")
}
tmap_arrange(
map_blocks(cv_res[[5]], folds[[5]], countries),
map_sdm(cv_res[[5]], folds[[5]], countries),
nrow = 1)
ind_keep <- map_lgl(cv_res, ~class(.x)[1] != "try-error") &
map_lgl(folds, ~ (sum(.x$train$presence) + sum(.x$test$presence)) >= 100) &
map_lgl(folds, ~ !grepl("other", .x$species))
sdm_map_list <- map2(cv_res[ind_keep], folds[ind_keep],
~tmap_arrange(
map_blocks(.x, .y, countries),
map_sdm(.x, .y, countries),
nrow = 2))
pdf("species_maps5.pdf", width = 6, height = 12)
for (map in sdm_map_list) {
print(map)
}
dev.off()
# individual maps
for (i in seq_along(sdm_map_list)) {
species <- folds[ind_keep][[i]]$species
pdf(paste0("prediction-maps/map5x5_", sub(" ", "-", species), ".pdf"), width = 6, height = 12)
print(map)
dev.off()
}
for (i in seq_along(sdm_map_list)) {
print(species)
species <- folds[ind_keep][[i]]$species
png(paste0("prediction-maps/map5x5_", sub(" ", "-", species), ".png"), width = 300, height = 600)
print(sdm_map_list[[i]])
dev.off()
}
png("species_maps.png", width = 300, height = 600)
for (map in sdm_map_list) {
print(map)
}
dev.off()
# save predictions as GeoTIFF
dir.create("prediction-maps")
dir.create("presence-data")
for (i in seq_along(cv_res[ind_keep])) {
print(names(cv_res)[ind_keep][i])
grid_i <- cv_res[ind_keep][[i]]$grid
df_i <- subset(
rbind(
folds[ind_keep][[i]]$train,
folds[ind_keep][[i]]$test),
subset = presence == 1,
select = c("presence"))
names(grid_i) <- "prediction"
raster::writeRaster(
grid_i,
filename = paste0("prediction-maps/",
sub(" ", "-", cv_res[ind_keep][[i]]$species),
"-prediction.tiff"),
overwrite = TRUE)
readr::write_csv(as.data.frame(df_i),
path = paste0("presence-data/",
sub(" ", "-", cv_res[ind_keep][[i]]$species), "-presence.csv"))
}
### check ranges and block size
block_widths <- map_dbl(folds, ~.x[["block_width"]])
ranges <- map_dbl(folds, ~.x[["range"]])
sum(block_widths > ranges)
names(folds)[block_widths > ranges]
block_widths / ranges
data.frame(block = block_widths[ind_keep], range = ranges[ind_keep],
auc = map_dbl(cv_res[ind_keep]))
|
ec15cfbe604096cd3a24657924c8faae6a84f7e0 | 2ecbc024125f8d8bb983627cc8322f8682f79524 | /R/strip_.randomForest.R | 4dee1e85f13b2eacb6558b539d20a7706fac5c01 | [] | no_license | cran/strip | 1d83857506ee3882925539ac44888fcbb23dfb86 | af5187116b6e480a90c9f387293ed3f9550a339e | refs/heads/master | 2020-03-27T04:02:43.301804 | 2018-10-29T14:30:03 | 2018-10-29T14:30:03 | 145,907,925 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,712 | r | strip_.randomForest.R |
#' @author
#' The method for \code{randomForest} objects is adapted
#' from \href{http://stats.stackexchange.com/a/171096/55854}{ReKa's answer}
#' on StackExchange.
#'
#' @export
#' @rdname strip
#'
strip_.randomForest <-
function(object,
keep,
...)
{
keep <- match.arg(tolower(keep),
c("everything", "predict", "print"),
several.ok = TRUE)
cl <- class(object)
ca <- object$call
if ("everything" %in% keep) {
return(object)
}
if ("predict" %in% keep) {
op <- object
op$finalModel$predicted <- NULL
op$finalModel$oob.times <- NULL
op$finalModel$y <- NULL
op$finalModel$votes <- NULL
op$control$indexOut <- NULL
op$control$index <- NULL
op$trainingData <- NULL
attr(op$terms,".Environment") <- NULL
attr(op$formula,".Environment") <- NULL
} else {
op <- list(call = ca)
}
if ("print" %in% keep) {
t <- object$type
oq <- list(call = ca,
type = t,
ntree = object$ntree,
mtry = object$mtry,
coefficients = object$coefficients,
confusion = object$confusion,
err.rate = object$err.rate,
mse = object$mse,
rsq = object$rsq,
test = list(err.rate = object$test$err.rate,
confusion = object$test$confusion,
mse = object$test$mse,
rsq = object$test$rsq))
} else {
oq <- list(call = ca)
}
object <- rlist::list.merge(op, oq)
class(object) <- cl
object
}
|
5d96998d9feda4ef0e79b197f5a5fefa8ff7144d | 47a6df31ea34de98fded35bc7fef1974ee35062e | /fetchNRSATable.r | cea8a375cec1341a0e78319223cdcabfc40ae1a2 | [] | no_license | jasonelaw/nrsa-epa | 4e2ec7b04ba1e441679496b7af9d41a780482e15 | 7ac13283e34a65890a0dad024ce8239168159fa2 | refs/heads/master | 2020-05-16T23:04:45.345580 | 2013-11-21T22:27:52 | 2013-11-21T22:27:52 | 14,342,574 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,663 | r | fetchNRSATable.r | # fetchNRSATable.r
#
# 10/05/09 cws Created
# 11/09/09 cws Does not create UID column unless BATCHNO column exists.
# Explicitely de-factors character columns.
# 11/13/09 cws Explicitely sets NA UNITS to ''
# 12/03/09 cws Standardizing empty UNITS as NONE and upcasing UNITS.
# 12/04/09 ssr/mrc Brilliant work, now confident...added at and where arguments... took about 30 seconds.
# 2/25/2010 cws moved source() calls to NRSAvalidation.r
# 4/14/2010 mrc write a unit test for fetchNRSATable
# exclude datalines for sidechannels when PROTOCOL is BOATABLE
fetchNRSATable <- function (chan, tblName, at=NULL, where=NULL, filterSideChannels=TRUE)
# Retrieves the specified NRSA table via an ODBC connection and standardizes
# the contents as follows:
# a) Changes numeric column 'BATCHNO' to character column 'UID'
# b) Trims surrounding whitespace from the contents of character columns
#
# Returns the standardized table contents as a dataframe if successful, or
# a character string if an error occurs.
#
# ARGUMENTS:
# chan an open RODBC channel object through which the data table is read.
# tblName a string specifying the table to read.
# where optional string containing an SQL WHERE clause for retrieving
# a subset of the table.
# ASSUMPTIONS:
#
{
if (filterSideChannels==TRUE) {
# side channels only meaningful if TRANSECT column exists
tt <- sqlColumns(chan, tblName)
if(is.character(tt)) {
return(sprintf("Error: Could not retrieve column information for %s: %s"
,tblname, tt
))
}
if('TRANSECT' %in% tt$COLUMN_NAME) {
#first to collect some protocol data so we can exclude river sidechannels
visitInfo <- dbFetch (chan, 'tblVISITS2', at=NULL, where=NULL)
if (is.character (visitInfo))
return (sprintf('Error: Could not retrieve tblVISITS2: %s', visitInfo))
visitInfo <- rename (visitInfo, 'BATCHNO', 'UID')
riverSites <- siteProtocol(visitInfo$UID)
riverSites <- subset(riverSites, PROTOCOL=='BOATABLE')
riverSites <- riverSites$UID
filterClause <- paste ("NOT(TRANSECT IN ('XA', 'XB', 'XC', 'XD', 'XE', 'XF', 'XG', 'XH','XI', 'XJ', 'XK')
AND BATCHNO IN ("
,paste (sprintf ("'%s'",riverSites), collapse=', ')
,"))"
)
whereClause <- paste (c(where, filterClause), collapse=' AND ')
} else {whereClause=where}
} else {
whereClause = where
}
#cat('.1')
# Attempt to retrieve data, returning error message if unsuccessful
tt <- dbFetch(chan, tblName, at, where=whereClause)
if(class(tt) == 'character') {
#cat('.x')
return(tt)
}
#cat('.2')
# Convert numeric column 'BATCHNO' to character column 'UID'
if('BATCHNO' %in% names(tt)) {
tt<-rename(tt, 'BATCHNO', 'UID')
tt$UID <- as.character(tt$UID)
}
#cat('.3')
# Trim surrounding whitespace from the contents of character columns
for(colName in names(tt)) {
if(is.factor(unlist(tt[colName]))) {
tt[colName] <- as.character(unlist(tt[colName]))
}
if(is.character(unlist(tt[colName]))) {
tt[colName] <- trimws(unlist(tt[colName]))
}
}
#cat('.4')
if('UNITS' %in% names(tt)) {
# Standardize missing units as NONE
if(any(is.na(tt$UNITS) | trimws(tt$UNITS)=='')) {
tt[is.na(tt$UNITS) | trimws(tt$UNITS)=='',]$UNITS <- 'NONE'
}
# Standardize units as all caps
tt$UNITS <- toupper(tt$UNITS)
}
return(tt)
}
fetchNRSATableTest <- function ()
# Tests fetchNRSATable
{
# Create test data ... channel, and table; use NORTHWIND test db
# chan <- odbcConnect ('Northwind')
# baseTest <- rbind(expand.grid(UID=as.character(1000 + 1:10)
# ,TRANSECT=LETTERS[1:11]
# ,TRANSDIR=c('LF','RT','CU','CD','CL','CR')
# ,SAMPLE_TYPE='PHAB_CHANW'
# )
# ,expand.grid(UID=as.character(2000 + 1:10)
# ,TRANSECT=LETTERS[1:11]
# ,TRANSDIR=c('LF','RT','UP','DN')
# ,SAMPLE_TYPE='PHAB_CHANB'
# )
# )
}
# end of file |
5da857731855f58ed2b55a3fdf20bc352380192a | 75011b657b7473e51ac41f1ca3f00a9bcc557c9d | /man/plot.amce.Rd | 6c8d9073a14a71522bec3dfda6083d381806a620 | [] | no_license | cran/cjoint | 9f9efd775ac6aed4b1928b0f035b02a4abd59844 | f9f84fb2ae9c8dee6b7b4db64ef00b17a77c903b | refs/heads/master | 2023-09-01T01:26:43.447652 | 2023-08-22T07:50:07 | 2023-08-22T09:30:51 | 27,119,034 | 4 | 11 | null | 2023-01-09T23:28:38 | 2014-11-25T09:46:21 | R | UTF-8 | R | false | false | 8,669 | rd | plot.amce.Rd | \name{plot.amce}
\alias{plot.amce}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Plot AMCE Estimates
}
\description{
\code{plot} method for "amce" objects
}
\usage{
\method{plot}{amce}(x, main = "", xlab = "Change in E[Y]", ci = 0.95,
colors = NULL, xlim = NULL, breaks = NULL,
labels = NULL, attribute.names = NULL, level.names = NULL,
label.baseline = TRUE, text.size = 11, text.color = "black",
point.size = 0.5, dodge.size = 0.9, plot.theme = NULL,
plot.display = "all",
facet.names = NULL, facet.levels = NULL,
group.order = NULL,font.family = NULL, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x }{
An object of class "amce", a result of a call to \code{\link{amce}}
}
\item{main }{
Title of the plot.
}
\item{xlab }{
Label of the x-axis of the plot (AMCE or ACIE). Default is "Change in E[Y]"
}
\item{ci }{
Levels for confidence intervals to plot around point estimates. Must be between 0 and 1. Default is .95
}
\item{colors }{
Vector of color names to be used for points and confidence intervals. The \code{plot} function will alternate between the colors in the vector for each attribute being plotted. If NULL, \code{plot} will use a default \code{ggplot2} color scheme.
}
\item{xlim }{
Numeric vector denoting the upper and lower bounds of the x-axis in the plot. If \code{NULL} the plot function will automatically set a range that includes all effect estimates.
}
\item{breaks }{
Numeric vector denoting where x-axis tick marks should be placed. If \code{NULL} plot will use \code{ggplot2} defaults.
}
\item{labels }{
Vector denoting how x-axis tick marks should be labeled. If \code{NULL} plot will use ggplot2 defaults.
}
\item{attribute.names }{
Character vector of attribute names to be plotted as labels. By default \code{plot.amce} will use the attribute names in the "amce" object passed to it.
}
\item{level.names }{
A list containing character vector elements with names in \code{attribute.names}. Each character vector in the list contains the level names to be plotted as labels beneath the corresponding attribute. By default \code{plot.amce} will use the level names in the "amce" object passed to it.
}
\item{label.baseline }{
If \code{TRUE}, the baseline levels for each attribute will be labeled as such. Defaults to \code{TRUE}.
}
\item{text.size }{
Size of text. Defaults to 11.
}
\item{text.color }{
Color of text in plot. Defaults to "black".
}
\item{point.size }{
Size of points in the plot. Defaults to 0.5.
}
\item{dodge.size }{
Width to dodge overlaps to the side. Defaults to 0.9.
}
\item{plot.theme }{
A ggplot2 'theme' object to be added to the plot. If NULL, defaults to black-and-white theme. Note that passing a theme object will override text and point color/size options.
}
\item{plot.display}{
Character string, one of "all", "unconditional", or "interaction". Option "all" will display both unconditional and interaction estimates. The "unconditional" option will display only 1 plot for unconditional estimates (both AMCE and ACIE) ignoring any facets provided to "facet.names" or respondent-varying characteristics. Option "interaction" will drop the unconditional plot and instead display only (unconditional) ACIE's or estimates conditional on respondent-varying characteristics as specified in the user-supplied option "facet.names". Defaults to "all".}
\item{facet.names }{
To facet plots (i.e., make separate plots for each value of a variable) give "facet.names" a vector of character strings containing the names of the variable(s) (either profile attribute or respondent-varying) to facet by. Unless given specific levels in "facet.levels", the plotted levels will consist of all levels of a factor variable or the quantiles of a continuous variable. Multiple facet variables cannot currently be varied at the same time within the same plot. Instead conditional effects will be calculated for one facet at a time while others are held at their first value in facet.levels, by default the bottom quantile for continuous variables and the baseline for factors.}
\item{facet.levels }{
To manually set facet levels, provide a list to "facet.levels". Names of list entries should correspond with variable names. The content of each entry should be a vector giving the desired levels, whether factors or continuous. To change the displayed names of the levels, assign names to each vector entry.
}
\item{group.order }{
To manually set the order of the attributes, provide a vector to "group.order". Names of the vector entries should correspond with name of the attribute.
}
\item{\dots }{
Other graphical parameters passed to \code{ggplot}.
}
\item{font.family }{
Will be passed to the ggplot function as the argument for font.family. If NULL, defaults will be used.
}
}
\value{
A \code{ggplot} object containing a dotplot of estimated AMCEs.
}
\references{
Hainmueller, J., Hopkins, D., and Yamamoto T. (2014) Causal Inference in Conjoint Analysis: Understanding Multi-Dimensional Choices via Stated Preference Experiments. Political Analysis 22(1):1-30
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{amce}} for the main estimation routine.
}
\examples{
\dontrun{
# Immigration Choice Conjoint Experiment Data from Hainmueller et. al. (2014).
data("immigrationconjoint")
data("immigrationdesign")
# Run AMCE estimator using all attributes in the design
results <- amce(Chosen_Immigrant ~ Gender + Education + `Language Skills` +
`Country of Origin` + Job + `Job Experience` + `Job Plans` +
`Reason for Application` + `Prior Entry`, data=immigrationconjoint,
cluster=TRUE, respondent.id="CaseID", design=immigrationdesign)
# Plot results
plot(results, xlab="Change in Pr(Immigrant Preferred for Admission to U.S.)",
ylim=c(-.3,.3), breaks=c(-.2, 0, .2), labels=c("-.2","0",".2"), text.size=13)
# Plot results with user-specified order of attributes
plot(results, xlab="Change in Pr(Immigrant Preferred for Admission to U.S.)",
ylim=c(-.3,.3), breaks=c(-.2, 0, .2), labels=c("-.2","0",".2"), text.size=13,
group.order=c("Gender","Education","Job",
"Language Skills","Job Experience",
"Job Plans","Reason for Application",
"Prior Entry","Country of Origin"))
# Run AMCE estimator with an interaction with a respondent-varying characteristic
interaction_results <- amce(Chosen_Immigrant ~ Gender + Education
+ Job + ethnocentrism:Job,
data = immigrationconjoint,na.ignore=TRUE,
design = immigrationdesign, cluster = FALSE,
respondent.varying = "ethnocentrism")
# Plot results with additional plots for quantiles of the respondent-varying characteristic
plot(interaction_results)
# Plot results with user-specified order of attributes
plot(interaction_results, group.order=c("Gender","Education","Job"))
# Do not show output for variables that do not vary with facetted levels
plot(interaction_results,plot.display="unconditional")
# RUN AMCE estimator with an interaction between two factor variables
interaction_results <- amce(Chosen_Immigrant ~ Gender + Education + Job
+ Education:Job, data = immigrationconjoint,
cluster = FALSE, design = immigrationdesign)
# Plot results with different plots for all levels of ACIE
plot(interaction_results,facet.names = "Education")
# Plot results with different plots for only two levels of one interacted variable
facet.levels1 <- list()
facet.levels1[["Education"]] <- c("college degree","graduate degree")
plot(interaction_results,facet.names = "Education",facet.levels = facet.levels1)
# Display only interaction panes
plot(interaction_results,facet.names = "Education",plot.display="interaction")
#Display only non-interaction panes
plot(interaction_results,facet.names = "Education",plot.display="unconditional")
#Change displayed attribute and level names
results <- amce(Chosen_Immigrant ~ Gender + Education + Job, data = immigrationconjoint,
cluster = FALSE, design = immigrationdesign)
levels.test<-list()
levels.test[["Gender"]]<-c("level1","level2")
levels.test[["Education"]]<-c("level1","b","c","d","e","f","g")
levels.test[["Job"]]<-c("a","b","c","d","e","f","g","h","i","j","k")
plot(results, level.names = levels.test, main="test", xlab="test",
ci=0.95, breaks=c(-0.2,-0.1,0,0.1,0.2), attribute.names = c("attribute1","attribute2","attribute3"))
}
}
|
91df1120ef6fe5e4427b4c31ac2671f33382f5d1 | e4211288e2e0818b3147f711b28e757cad82b6bb | /2_metab_models/run1/code/run_model.R | 1df982d2982ae65a628c9e0323fc4af9a6bcab21 | [
"LicenseRef-scancode-public-domain-disclaimer"
] | permissive | aappling-usgs/stream_metab_usa | 04a804c65fff3aeae8ab1d50d6c737fbd2cb4ce8 | 6b9e21de352df12001edcd91a9b5fc042b9744ec | refs/heads/master | 2021-01-24T01:01:45.831107 | 2019-03-19T14:45:25 | 2019-03-19T14:45:25 | 34,343,300 | 1 | 0 | null | 2015-04-21T17:57:37 | 2015-04-21T17:57:36 | null | UTF-8 | R | false | false | 1,787 | r | run_model.R | #' Do the modeling stuff specific to one config row. For the prep MLE run, run
#' the model and compile daily K and Q into a summary data.frame
#'
#' @import streamMetabolizer
#' @import mda.streams
#' @import dplyr
#' @import unitted
run_model <- function(config_row, verbose, outdir, model_name) {
# sleep somewhere between 0 and 20 minutes to stagger the load on ScienceBase
Sys.sleep(60*runif(1,0,20))
# run the model
if(verbose) message('ready to run model')
print(config_row)
if(verbose) message('running model')
model_out <- config_to_metab(config=config_row, rows=1, verbose=verbose)[[1]]
if(verbose) message('printing model')
tryCatch({
print(class(model_out))
print(model_out)
}, error=function(e) warning(e))
if(length(model_out) == 0 || is.character(model_out)) {
message('modeling failed; returning')
return(model_out)
} else {
message('modeling appears to have succeeded')
}
# summarize the model & associated data
if(verbose) message('summarizing model')
fit <- get_fit(model_out)
site <- get_info(model_out)$config$site
Q <- get_ts(c('sitedate_calcLon','dischdaily_calcDMean'), site)
date_stats <- mm_model_by_ply(mm_model_by_ply_prototype, data=get_data(model_out), day_start=4, day_end=28)
smry <- fit %>%
mutate(site_name=site) %>%
left_join(v(Q), by=c(date='sitedate')) %>%
left_join(v(date_stats), by='date') %>%
select(site_name, date, K600.daily, K600.daily.sd, discharge.daily=dischdaily, data_ply_nrow, ply_validity, timestep_days, everything())
# write the summary
smry.file <- file.path(outdir, sprintf("summary %s.tsv", model_name))
write.table(smry, smry.file, sep='\t', row.names=FALSE)
# [the model gets written by the calling function]
return(model_out)
}
|
2c8b662995ea50a104e547650b1a73b0b4d33a12 | 58d7df72a0a4991039adb3fa0f162f48dda1850d | /app.R | eb93c30588f7e8a86140dcb525d07a412e47aff3 | [] | no_license | annettetran/shinyapp | 2db05a48e66b798c472aa52d598b588cb81068c7 | df4e65a7f05ffb023967b40398f5a2c81761b4fb | refs/heads/master | 2021-04-03T04:59:17.891508 | 2018-03-14T06:37:26 | 2018-03-14T06:37:26 | 124,464,989 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,768 | r | app.R | #####################################
#Salton Sea App
####################################
library(shiny)
library(shinydashboard)
library(tidyverse)
library(leaflet)
library(sf)
library(devtools)
library(maptools)
library(rgdal)
dust <- st_read(dsn = ".", layer = "Emiss_Dissolve")
dust_df <- st_transform(dust, "+init=epsg:4326")
emiss <- dust_df %>%
select(Emiss_Cat)
shoreline <- st_read(dsn = ".", layer = "SeaLevel2")
shoreline_df <- st_transform(shoreline, "+init=epsg:4326")
playa <- shoreline_df %>%
select(Year)
parcel <-st_read(dsn = ".", layer = "FutureExposedParcels")
parcel_df <- st_transform(parcel, "+init=epsg:4326")
landowners <- parcel_df %>%
select(OWNER_CLAS)
order_l <- factor(landowners$OWNER_CLAS, levels = c("Coachella Valley Water", "Federal", "IID", "Private", "Tribal", "Other"))
order_e <- factor(emiss$Emiss_Cat, levels = c("Least Emissive", "Moderately Emissive", "Highly Emissive", "Most Emissive"))
ui <-
dashboardPage(
dashboardHeader(title = "Salton Sea Future Shoreline & Emissivity", titleWidth = 450),
dashboardSidebar(
sidebarMenu(
menuItem("Information", tabName = "info", icon = icon("info-circle")),
menuItem("Map", tabName = "map", icon = icon("map"))
)
),
dashboardBody(
tabItems(
tabItem(tabName = "info",
box(status = "warning", title = "Introduction", includeText("intro.txt")),
box(status = "warning", title = "Data Sources", includeMarkdown("datasources.Rmd"))
),
tabItem(tabName = "map",
fluidPage(
box(title = "Inputs", status = "success",
sliderInput("shore", "Select Year:", min = 2018, max = 2047, value = 2018, step = 5, sep = NA),
radioButtons("dusty",
"Select Emissivity of Future Exposed Playa:", choices = levels(factor(order_e))),
selectInput("owner", "Select Landowner:", choices = levels(factor(order_l))),
submitButton(text = "Apply Changes")
),
box(title = "Cumulative Acreage of Exposed Playa:", status = "success", span(textOutput("acres"), style = 'font-weight:bold; font-size:30px; color:red;')),
box(title = "Map of Predicted Shoreline and Emissivity of Exposed Playa",
status = "success", leafletOutput("full_map")))
)))
)
server <- function(input, output) {
output$full_map <- renderLeaflet({
emiss_sub <- emiss %>%
filter(Emiss_Cat == input$dusty)
shore_sub <- playa %>%
filter(Year == input$shore)
parcel_sub <- landowners %>%
filter(OWNER_CLAS == input$owner)
leaflet() %>%
addProviderTiles("Stamen.Terrain") %>%
addPolygons(data = emiss_sub,
color = "red",
stroke = FALSE,
smoothFactor = 0.2,
fillOpacity = 0.6) %>%
addPolylines(data = shore_sub, weight = 3, color = "blue", opacity = 0.6) %>%
addPolygons(data = parcel_sub, weight = 1.5, fill = NA, color = "green", opacity = 1) %>%
addLegend(colors = c("blue", "red", "green"),
labels = c("Predicted Shoreline", "Exposed Playa in 2047", "Landowner Parcels"))
})
output$acres <- renderText({
acreage <- shoreline_df %>%
filter(Year == input$shore)
acreage$Cumul_Ac})
#Cumul_Ac
}
shinyApp(ui = ui, server = server)
|
e09aa1ce70cb081e2980e34d7421d4abee554826 | 6fa6ffc373d37fbbfeb8e625987698100ee114e9 | /Pgm_SR/Barents.R | 29f6644d02d90099dfe553f96091b391f4e63b3d | [] | no_license | Rmomal/these | 57cd82130c6844fc422f53bb3212f2e22eaad9b3 | 9e65bf3048c407232fb9882c3b7f3f99026d935a | refs/heads/master | 2021-06-11T12:04:11.485315 | 2021-01-26T08:09:55 | 2021-01-26T08:09:55 | 107,094,010 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 912 | r | Barents.R | # PLNtree for Barents fish data
rm(list=ls()); par(pch=20)
library(PLNmodels)
source('../pack1/R/codes/FunctionsMatVec.R')
source('../pack1/R/codes/FunctionsTree.R')
source('../pack1/R/codes/FunctionsInference.R')
# Data
data.dir = '../Data_SR/'
data.name = 'BarentsFish_Group'
load(paste0(data.dir, data.name, '.Rdata'))
# Functions
TreeGGM <- function(CorY){
p = ncol(CorY);
phi = 1/sqrt(1 - CorY^2); diag(phi) = 0
beta.unif = matrix(1, p, p);
FitEM = FitBetaStatic(Y, beta.init=beta.unif, phi=phi)
return(list(P=Kirshner(FitEM$beta)$P, L=FitEM$logpY))
}
# Fit VEM-PLN with 1 = no covariates, 2 = depth+temp, 3 = all covariates
# VEM = list()
# VEM[[1]] = PLN(Data$count ~ 1)
# VEM[[2]] = PLN(Data$count ~ Data$covariates[, 1:2])
# VEM[[3]] = PLN(Data$count ~ Data$covariates)
# save(VEM, file=paste0(data.dir, data.name, '-VEM.Rdata'))
load(paste0(data.dir, data.name, '-VEM.Rdata'))
|
cdbafdaf5dd51ffd8471428541557c7ab10aabd1 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/pivot/tests/test-unpivot.R | 4921038c93d3def24e9156cec8039c518adc48a9 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,437 | r | test-unpivot.R | context("unpivot.R")
test_that("UNPIVOT construction", {
con <- simulate_mssql()
src <- src_dbi(con)
base <- list( x = ident('##iris')
, vars = tbl_vars(iris)
) %>% structure(class=c('op_base_remote', 'op_base', 'op'))
db_iris <- structure( list( src = src
, ops = base
)
, class = c('tbl_dbi', 'tbl_sql', 'tbl_lazy', 'tbl'))
levels <- quos(Sepal.Length, Sepal.Width, Petal.Length, Petal.Width)
long <- unpivot(db_iris, Variable, Value, !!!levels)
expect_is(long, 'tbl_sql')
expect_identical(long$src, src)
expect_identical(long$ops$x, base)
expect_equal(long$ops$dots, levels)
expect_equal(long$ops$args$key, quo(Variable))
expect_equal(long$ops$args$value, quo(Value))
built <- sql_build(long)
expect_is(built, 'unpivot_query')
expect_is(built, 'query')
expect_equal(built$from, ident('##iris'))
expect_equal(built$key, ident('Variable'))
expect_equal(built$value, ident('Value'))
expect_equal(built$levels
, ident(rlang::set_names(c('Sepal.Length', 'Sepal.Width'
, 'Petal.Length', 'Petal.Width'))))
expect_equal(built$select, ident(c('Species'='Species')))
expect_equal(tbl_vars(long), c('Species', 'Variable', 'Value'))
expect_equal(group_vars(long), character(0))
query <- sql_render(built, con=con)
expect_is(query, 'sql')
expect_is(query, 'character')
expect_equal(query,
sql(paste( 'SELECT "Species", "Variable", "Value"'
, 'FROM "##iris"'
, 'UNPIVOT'
, ' ("Value" FOR "Variable" IN'
, ' ("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width")'
, ' ) AS "Value"'
, sep='\n'))
)
})
test_that("unpivot.data.frame", {
levels <- quos(Sepal.Length, Sepal.Width, Petal.Length, Petal.Width)
long <- unpivot(iris, Variable, Value, !!!levels)
expected <- gather(iris, Variable, Value, !!!levels)
expect_identical(long, expected)
})
test_that("order_by", {
query <- unpivot_query( from = ident('my_table')
, key = ident('Key')
, value = ident('Value')
, levels = ident(c('a', 'b', 'c'))
, select = ident('Variable')
, order_by = ident(c('Key', 'Variable'))
)
expect_is(query, 'unpivot_query')
expect_is(query, 'query')
expect_identical(query$from , ident('my_table'))
expect_identical(query$key , ident('Key'))
expect_identical(query$value , ident('Value'))
expect_identical(query$levels , ident(c('a', 'b', 'c')))
expect_identical(query$select , ident('Variable'))
expect_identical(query$order_by, ident(c('Key', 'Variable')))
sql <- sql_render(con=dbplyr::simulate_mssql(), query)
expect_is(sql, 'sql')
expect_identical(sql,
sql(paste( 'SELECT "Variable", "Key", "Value"'
, 'FROM "my_table"'
, 'UNPIVOT'
, ' ("Value" FOR "Key" IN'
, ' ("a", "b", "c")'
, ' ) AS "Value"'
, 'ORDER BY "Key", "Variable"'
, sep='\n'))
)
})
|
019e6ef8cb7457eba4d037bc254d0f5ebadff414 | 29585dff702209dd446c0ab52ceea046c58e384e | /geotopbricks/R/read.vectorized.geotop.recovery.R | a0553bf2af126a360caad96b57d668e0f2d2506f | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,788 | r | read.vectorized.geotop.recovery.R | # TODO: Add comment
#
# Author: ecor
###############################################################################
NULL
#'
#' Reads a text file like the one generated by \code{\link{write.vectorized.geotop.recovery}}
#'
#'
#' #. containing values and matedata of a z-layer brick referred to a time instant (e.g. date). The file is formatted like an ascii format like \code{'geotop.inpts'} file.
#'
#' @param file file name to write
#' @param comment character. Comment indicator. Default is \code{"!"}.
#' @param formatter,extension,xx see \code{\link{get.geotop.recovery.state}}.
#' @param NAflag numeric. Default is -9999, see \code{\link{writeRasterxGEOtop}}.
#' @param crs Character or object of class CRS. PROJ4 type description of a Coordinate Reference System (map projection) (optional). See \code{\link{brick}} or \code{\link{raster}}.
#' @param matlab.syntax logical value. Default is \code{TRUE}. If \code{TRUE} the file syntax is like the one of a *.m Matlab script file.
#' @param ... further aguments inserted as attribute
# @export
#'
#'
#'
#' @return a \code{\link{list}} object like \code{\link{get.geotop.recovery.state}}
#'
#' @export
#' @seealso \code{\link{write.vectorized.geotop.recovery}}
#'
#' @examples
#' # see the examples of read.ascii.vectorized.brick
read.vectorized.geotop.recovery <- function(file=file,comment="!",matlab.syntax=TRUE,xx="0000",formatter="L%04d",extension=".asc",NAflag=-9999,crs="",...) {
lnames <- c("noLayers","soilLayersWithZero","soilLayers","snowLayers")
if (matlab.syntax) comment="#"
df <- declared.geotop.inpts.keywords(inpts.file=file,comment=comment,warn=FALSE,wpath=NULL,...) # commented wpath='.'
xmx <- get.geotop.inpts.keyword.value("xmx",inpts.frame=df,numeric=TRUE)
xmn <- get.geotop.inpts.keyword.value("xmn",inpts.frame=df,numeric=TRUE)
ymx <- get.geotop.inpts.keyword.value("ymx",inpts.frame=df,numeric=TRUE)
ymn <- get.geotop.inpts.keyword.value("ymn",inpts.frame=df,numeric=TRUE)
## IN
nrow <- get.geotop.inpts.keyword.value("nrow",inpts.frame=df,numeric=TRUE)
ncol <- get.geotop.inpts.keyword.value("ncol",inpts.frame=df,numeric=TRUE)
lnames <- get.geotop.inpts.keyword.value(lnames,inpts.frame=df,matlab.syntax=matlab.syntax,vector_sep=",")
names <- as.vector(unlist(lnames))
out <- lapply(X=lnames,FUN=function(x){names %in% x})
out$names <- names
# See get.geotop.recovery.state
layer <- array(formatter,length(names))
if (!is.null(out$noLayers)) layer[out$noLayers] <- ""
out$files <- paste(names,xx,layer,extension,sep="")
nlayers <- as.vector(unlist(lapply(X=paste("nlayers_",names(lnames),sep=""),FUN=function(x){get.geotop.inpts.keyword.value(x,inpts.frame=df,numeric=TRUE)})))
names(nlayers) <- names(lnames)
# out[names] <- get.geotop.inpts.keyword.value(names,inpts.frame=df,numeric=TRUE,vector_sep=",",matlab.syntax=TRUE)
# out[names] <- lapply(X=out[names],FUN=function(x,NAflag) { x[x==NAflag] <- NA; return(x)},NAflag=NAflag)
b <- lapply(X=nlayers,FUN=function(x,nrow,ncol, xmn, xmx, ymn, ymx,crs) {brick(nrow=nrow,ncol=ncol, xmn=xmn, xmx=xmx, ymn=ymn, ymx=ymx,crs=crs,nl=x)},nrow=nrow,ncol=ncol,xmn=xmn, xmx=xmx, ymn=ymn, ymx=ymx,crs=crs)
names(b) <- names(nlayers)
#out$b <- b
for (il in names(b)) {
names0 <- names[out[[il]]]
##### print(il)
##### print(":")
for (it in names0) {
x <- b[[il]]
######### print("start")
brickvalues <- get.geotop.inpts.keyword.value(it,inpts.frame=df,numeric=TRUE)
######### print("end")
brickvalues[brickvalues==NAflag] <- NA
for (i in 1:nlayers(x)) {
element <- nrow*ncol*(i-1)+1:(nrow*ncol)
M <- (array(brickvalues[element],c(ncol,nrow)))
x <- setValues(x,M,layer=i)
}
out[[it]] <- x
}
}
return(out)
} |
d3fbc294b48ec5d241d89f99e7709d1276cbf882 | 98ed123ae4350c79f3b4aeb2075a786e07e8f711 | /Assignment/Hypothesis Testing/BuyerRatio_Hypothesis.R | 99e8bc1e0e60927356d65b203f8ed0847e8978d3 | [] | no_license | ShilpaAChavan/Datascience-With-R | e1c582f08e2f2d32e055a09aacf54adb6a44c329 | 2a062317de101c72bb5d767d9bc036f2efb2b556 | refs/heads/master | 2021-01-05T05:15:30.844529 | 2020-09-20T15:36:13 | 2020-09-20T15:36:13 | 240,893,393 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,573 | r | BuyerRatio_Hypothesis.R | #################################Hypothesis Testing#####################################
#Problem Statement:Sales of products in four different regions is tabulated for males and females.
#Find if male-female buyer rations are similar across regions.
# Data : BuyerRatio.csv
#####################################################################################
BuyerRatio <- read.csv(file.choose()) #BuyerRatio.csv
attach(BuyerRatio)
View(BuyerRatio)
region1 <- East
region2 <- West
region3 <- North
region4 <- South
region= data.frame(region1,region2,region3,region4)
View(region)
#Ho : Proportions of male and female are same.
#Ha : Proportions of male and female are not same.
?chisq.test
chisq.test(region)
# p-value = 0.66 > 0.05 p high null fly => accept null hypothesis
#Hence the male female buyer ratio is similar accross region.
##Second Approach:
stacked_data<- stack(BuyerRatio)
View(stacked_data)
attach(stacked_data)
chisq.test(table(values,ind))
#p-value = 0.2931 > 0.05 p high null fly =>accept null hypothesis
#Hence the male female buyer ratio is similar accross region.
#Warning message:
# In chisq.test(table(values, ind)) :
# Chi-squared approximation may be incorrect
`` `
#simulate.p.value = TRUE
#data: table(values, ind)
#X-squared = 24, df = NA, p-value = 1
|
abaa8a8d957aba546c3f4c04630cb77d81844866 | 1e620d83967acb48dfba21436d88bf4697904ba0 | /scripts/14B-dynamical_scvelo_on_all_cbps_hto_samples.R | 60ca055b6a57eda9835eb1510cc434bbdf92d0bf | [
"MIT"
] | permissive | umr1283/LGA_HSPC_PAPER | e6c32af5fd7303dd93b802b7a2d04704d421b305 | 5ff879dc64d555452d8ee980b242a957f412b6c6 | refs/heads/main | 2023-04-17T15:57:57.025596 | 2022-07-18T15:27:35 | 2022-07-18T15:27:35 | 404,302,229 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 140 | r | 14B-dynamical_scvelo_on_all_cbps_hto_samples.R | #run scvelo in dynamical mode in cbps hto merged samples
reticulate::py_run_file("scripts/14B-dynamical_scvelo_on_all_cbps_hto_samples.py")
|
26d3674153d38652fbdfa521644bd865227364e5 | fd25b96601cf7ac4e7762183c0faeec87963aede | /supervised/mixmod1_train.R | 0d878c92b9939167ec6e2f150b013a4dfa283382 | [] | no_license | edz504/plankton | 08b9cf88db36a97a89b33d3e06da3bb169e15e5e | 3d45f585bd8644feaa9f991402c80d0fc3e0d548 | refs/heads/master | 2021-01-13T02:05:55.243329 | 2015-02-09T03:56:02 | 2015-02-09T03:56:02 | 28,106,911 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,146 | r | mixmod1_train.R | library(e1071)
library(dplyr)
wd.top <- "C:/Users/edz504/Documents/Data Science Projects/Kaggle/plankton"
wd.train <- paste(wd.top, "/train", sep="")
load("training_30x30.RData")
labels.df <- data.frame(labels)
counts <- labels.df %>% group_by(labels) %>% summarise(count=n())
# take the first N of each kind
labels.df <- cbind(labels.df, seq(1, nrow(labels.df), by=1))
colnames(labels.df) <- c("labels", "ind")
# find the min and max index for each label (runs)
inds.df <- labels.df %>%
group_by(labels) %>%
summarise(first_ind=min(ind), last_ind=max(ind))
# find pre-allocation space (first N)
N <- 100
prealloc <- 0
for (i in seq(1, nrow(counts), by=1)) {
if (counts$count[i] > N) {
prealloc <- prealloc + N
} else {
prealloc <- prealloc + counts$count[i]
}
}
# pre-allocate (9045 here again)
selected.ind <- rep(NA, prealloc)
j <- 1
for (i in seq(1, nrow(inds.df), by=1)) {
if (inds.df$last_ind[i] - inds.df$first_ind[i] > N) {
these.inds <- seq(inds.df$first_ind[i],
inds.df$first_ind[i] + (N - 1), by=1)
} else {
these.inds <- seq(inds.df$first_ind[i],
inds.df$last_ind[i], by=1)
}
selected.ind[j: (j + length(these.inds) - 1)] <- these.inds
j <- j + length(these.inds)
}
# train using semi-supervised GMM
library(Rmixmod)
# semi-supervised, using both testing and training
load("testing_30x30.RData")
selected.train <- train.data.1[selected.ind,]
selected.labels <- labels[selected.ind]
F <- 10 # 1/10 of testing set used
M <- max(prealloc, nrow(test.data.1) / F)
# ^ we want at least as many unsupervised as supervised
selected.data <- data.frame(rbind(selected.train,
test.data.1[sample(nrow(test.data.1), M), ]))
selected.data.labels <- c(selected.labels, rep(NA, M))
# the last M are NA because they don't have labels, but
# we have the known labels for the training set
start.time <- Sys.time()
mixmod.semi <- mixmodCluster(selected.data, nbCluster=121,
knownLabels=selected.data.labels)
end.time <- Sys.time()
print(end.time - start.time)
# 27 min to train
save(mixmod.semi, file="mixmodel_N100_F10_30x30.RData") |
da816f63115c3c203fb4b26447abf8b1ad0cdda1 | ffe87a0a6134783c85aeb5b97332b201d50aca9d | /MINI_2015/prace_domowe/PD_4/Sudol_Adrianna_pd4.R | 7013f92296e8e63e6d16cae47aec36000e1ed844 | [] | no_license | smudap/RandBigData | d34f6f5867c492a375e55f04486a783d105da82d | 4e5818c153144e7cc935a1a1368426467c3030a5 | refs/heads/master | 2020-12-24T15:51:11.870259 | 2015-06-16T08:50:34 | 2015-06-16T08:50:34 | 32,064,294 | 0 | 0 | null | 2015-03-12T07:53:56 | 2015-03-12T07:53:56 | null | UTF-8 | R | false | false | 1,146 | r | Sudol_Adrianna_pd4.R | library(dplyr)
library(PogromcyDanych)
tmp <- tbl_df(auta2012)
info=function(marka,model,rodzaj_paliwa,moc_silnika,rok_produkcji,przebieg){
if (!missing(marka)) {tmp <- filter(tmp,
Marka == marka) }
if (!missing(model)) {tmp <- filter(tmp,
Model == model)}
if (!missing(rodzaj_paliwa)) {tmp <- filter(tmp,
Rodzaj.paliwa == rodzaj_paliwa)}
if (!missing(moc_silnika)) {tmp <- filter(tmp,
KM == moc_silnika)}
if (!missing(rok_produkcji)) {tmp <- filter(tmp,
Rok.produkcji == rok_produkcji)}
if (!missing(przebieg)) {tmp <- filter(tmp,
Przebieg.w.km == przebieg)}
tmp %>%
summarise(minimum = min(Cena.w.PLN, na.rm=TRUE),
maksimum = max(Cena.w.PLN, na.rm=TRUE),
Q1=quantile(Cena.w.PLN, probs=0.25),
Q2=quantile(Cena.w.PLN, probs=0.50),
Q3=quantile(Cena.w.PLN, probs=0.75)
)
}
#przyklady:
info(model='Carens',marka='Kia')
info(marka='Kia')
info()
|
6132717cd879a8a9e4082ab14f18e292c23d0f78 | 6579fa137eda608c0cdb22fa7236667972fb57de | /R/helpers.R | 89028ac022a32c55c679244d1d3abd2a2e5a58ac | [
"MIT"
] | permissive | nischalshrestha/sketch | 258a700bb0de38b5832861c7365605640c75cdbb | ab3258c3707aa768d31bfc7fe9c9d8916c8e1632 | refs/heads/master | 2022-11-24T16:54:46.185048 | 2019-12-09T07:57:32 | 2019-12-09T07:57:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 831 | r | helpers.R | #' Get the source link of a JavaScript library
#' @param x A character string; name of the JavaScript library
#' @export
src <- function(x) {
switch(x,
"dataframe" = "https://gmousse.github.io/dataframe-js/dist/dataframe.min.js",
"math" = "https://cdnjs.cloudflare.com/ajax/libs/mathjs/6.2.2/math.min.js",
"p5" = "https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.9.0/p5.js",
"plotly" = "https://cdn.plot.ly/plotly-latest.min.js",
"chart" = "https://cdn.jsdelivr.net/npm/chart.js@2.8.0",
"d3" = "https://d3js.org/d3.v5.min.js"
)
}
#' Empty functions
#' @description This function does nothing. It is created to ensure
#' the keywords `let` and `declare` are defined.
#' @param ... Any arguments
#' @export
let <- declare <- function(...) { invisible(NULL) }
|
ec178a925be367f1f570812b4080fd969142bde1 | 953754f186ab7b6d0634868567dc60efa7130875 | /programs/panelmodel.R | dee14bd214563b72650abae7e5523935bdb66af2 | [] | no_license | lfthwjx/PriceDiscovery | 58693f4108842e894c54943560dacd23d18f30cc | e677d8672d2471a0c3222d840a31fa479fa87a9a | refs/heads/master | 2021-05-02T16:00:32.769644 | 2016-11-01T21:23:50 | 2016-11-01T21:23:50 | 72,577,563 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,980 | r | panelmodel.R | library(xlsx)
library(xts)
library(timeSeries)
library(plm)
#library(car)
#library(dynlm)
setwd("C:\\UM\\R\\calGARCH\\output")
load("./.RData")
lr.model.arg<-list()
lr.arg.inter<-list()
lr.model.met<-list()
lr.met.inter<-list()
cleandata<-function(cpi.met){
cpi.met<-unname(cpi.met)
cpi.met.num<-apply(cpi.met[,c(2:9)],2,as.numeric)
cpi.met.num<-cbind(cpi.met.num,cpi.met.num[,1]*cpi.met.num[,3],cpi.met.num[,1]*cpi.met.num[,4])
cpi.met<-cbind(cpi.met[,1],cpi.met.num,cpi.met[,10])
colnames(cpi.met)<-c("date","mu","vol","voi","oi","il1","il2","il3","is","mu*voi","mu*oi","future.name")
cpi.met<-as.data.frame(cpi.met)
for (k in c(2:11)) {
cpi.met[,k]<-as.numeric(levels(cpi.met[,k])[cpi.met[,k]])
}
return(cpi.met)
}
auf.path<-'./futures voo'
mu.path<-'./uncertainty'
dire.names<-list.dirs(auf.path,full.names = FALSE)[-1]
me.names<-list.files(mu.path)
for (i in (1:length(me.names))) {
#i=1
mu.file<-me.names[i]
mu.cpi<-read.xlsx2(paste0(mu.path,'/',mu.file),startRow = 1,colIndex = c(1:2),sheetIndex = 1, header=T)
for (j in (1:length(dire.names))) {
#j=17
auf.file<-dire.names[j]
auf<-read.xlsx2(paste0(auf.path,'/',auf.file,'/v',auf.file,'.xls'), sheetIndex=1,startRow = 1, header=T)
aui<-read.xlsx2(paste0('./price discovery/',auf.file,'.xlsx'), sheetIndex=1,startRow = 1, colIndex = c(1:9), header=T)
aui.futures<-aui[,c(1,7)]
aufi<-merge(auf,aui.futures)
aufimu<-merge(mu.cpi,aufi)
au.name<-rep(auf.file,nrow(aufimu))
#View(au.name)
aufimu<-cbind(aufimu,au.name)
aufimu<-as.matrix(aufimu)
#View(aufimu)
#str(aufimu)
if(j==1||j==6||j==9||j==10||j==16)
{
if(j==1)
{
cpi.met<-aufimu
}
else
{
cpi.met<-rbind(cpi.met,aufimu)
}
}
else if(j==4||j==12||j==14)
{
if(j==12)
{
cpi.pvc<-aufimu
}
else if(j==14)
{
cpi.xj<-aufimu
}
else
{
cpi.bean2<-aufimu
}
}
else
{
if(j==2)
{
cpi.arg<-aufimu
}
else
{
cpi.arg<-rbind(cpi.arg,aufimu)
}
}
}
cpi.met<-cleandata(cpi.met = cpi.met)
cpi.arg<-cleandata(cpi.met = cpi.arg)
form.arg<-is ~ voi + il3
model.arg<-lm(form.arg,data = cpi.arg)
#summary(model.arg)
lr.model.arg[[i]]<-summary(model.arg)
form.arg.inter<-is ~ mu*vol + mu*voi + mu*il3
model.arg.inter<-lm(form.arg.inter,data = cpi.arg)
lr.arg.inter[[i]]<-summary(model.arg.inter)
form.met<-is ~ voi + il3
model.met<-lm(form.met,data = cpi.met)
summary(model.met)
lr.model.met[[i]]<-summary(model.met)
form.met.inter<-is ~ mu*vol + mu*voi + mu*il1
model.met.inter<-lm(form.met.inter,data = cpi.met)
lr.met.inter[[i]]<-summary(model.met.inter)
}
#plot(model.arg)
for (i in (1:length(me.names))) {
print(paste0("linear regression for met."," with ",me.names[i]))
print(lr.model.met[[i]])
}
for (i in (1:length(me.names))) {
print(paste0("linear regression for met. interactively"," with ",me.names[i]))
print(lr.met.inter[[i]])
}
for (i in (1:length(me.names))) {
print(paste0("linear regression for agr."," with ",me.names[i]))
print(lr.model.arg[[i]])
}
for (i in (1:length(me.names))) {
print(paste0("linear regression for agr. interactively"," with ",me.names[i]))
print(lr.arg.inter[[i]])
}
#View(cpi.arg)
#cor(cpi.arg[,(2:11)])
#form.arg.late<-is~L(mu,1)+L(vol,1)+voi+il3
#model.arg.late<-dynlm(form.arg.late,data=cpi.arg)
#summary(model.arg.late)
#met.cpi.pl <- plm.data(cpi.met, index = c("future.name", "date"))
#View(met.cpi.pl)
#gr_pool <- plm(is ~ mu+vol+voi, data = met.cpi.pl,model = "pooling")
#met.cpi.pl<-pgmm(dynformula(form, list(0, 0, 0, 0)),
# data=cpi.met,index=c("future.name", "date"),
# effect = "twoways", model = "twosteps",
# gmm.inst = ~ is) |
665ccad8b21351e9919da715953463efbd14a8e6 | 591771c6a3972cab8c680696771fd4b4aa0c3f20 | /R/getURI_Depth.R | 04e610d009a871b0ce4874982c46ac4cbb1d77a4 | [] | no_license | Sumpfohreule/S4Level2 | a36dfc014dde47763009dcc4420a198ce11a9a5d | 9034cddbd04efed8cea8c5b90cb2e4fbf16209e7 | refs/heads/main | 2023-08-19T08:58:05.616624 | 2021-09-29T14:47:03 | 2021-09-29T14:47:03 | 304,371,990 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 223 | r | getURI_Depth.R | ########################################################################################################################
setGeneric("getURI_Depth", def = function(.Object) {
standardGeneric("getURI_Depth")
}
)
|
914f5802d33cfcf29044b145ae3a289185b20aae | 21c830330e6343d2fc389949441b1cdb8dafa307 | /man/pkpd_hemtox.Rd | 52ec8623bd9f226e491f1a394aed67acf5b7e014 | [
"MIT"
] | permissive | yookhwan/PKPDsim | 778cd5278023dc1ed68134a83408a38fbf5cec7d | a26546513fdffe96abeaf16f65b33e2a9bec3a88 | refs/heads/master | 2021-01-18T08:39:35.286597 | 2015-08-06T18:29:59 | 2015-08-06T18:29:59 | 32,412,709 | 1 | 0 | null | 2015-03-17T18:37:03 | 2015-03-17T18:37:02 | null | UTF-8 | R | false | false | 363 | rd | pkpd_hemtox.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/pkpd_hemtox.R
\name{pkpd_hemtox}
\alias{pkpd_hemtox}
\title{ODE system for PKPD - 2 compartment IV, neutropenia model (Friberg et al. JCO 2002)}
\usage{
pkpd_hemtox(t, A, p)
}
\description{
ODE system for PKPD - 2 compartment IV, neutropenia model (Friberg et al. JCO 2002)
}
|
168db820fc3df498d4b12998f60c188d31272b51 | b4d8f682a6ffa6932130fa0565d3e69e7bd38296 | /r/figure2.R | d85ad558e49b82938d2e0428df41391ac6aa3e1b | [] | no_license | micahwoods/2016_mlsn_paper | db56599a089cecc22a2b647589a684f19affc9d7 | aea91c42304f7f444d62c5ff73f98b6473b43884 | refs/heads/master | 2021-01-19T02:31:10.887330 | 2016-11-08T01:59:32 | 2016-11-08T01:59:32 | 60,707,479 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,123 | r | figure2.R | # for figure 2, ecdf of data and then of modeled data
# set.seed at MLSN by letter of alphabet, thus 13,12,19,14 13121914
# write function to make an ECDF for an element
# x is vector to plot, zed is the axis label
# put outside function so is defined globally
col2 <- adjustcolor("grey", alpha.f = 0.6)
ecdfPlot <- function(zed, xLabInput) {
# col2 to be for the data
# trying to make clearer distinction between data and model
fit.x <- vglm(zed ~ 1, fisk)
z <- Coef(fit.x)
set.seed(13121914)
sim.data <- data.frame(y = rfisk(n = length(na.omit(zed)),
z[1], z[2]))
# makes a plot
plot(ecdf(zed), verticals=TRUE, do.p=FALSE,
col.h = col2,
col.v = col2,
main = NULL, xlab = bquote(paste(.(xLabInput), ~kg^{-1}*")")),
cex.lab = 1.5, cex.axis = 1.5, cex.main = 1.5, cex.sub = 1.5)
lines(ecdf(sim.data$y), verticals = FALSE,
col.h = "#1b9e77",
col.v = "#1b9e77")
}
normalEcdfPlot <- function(zed, xLabInput) {
meanData <- mean(zed, na.rm = TRUE)
sdData <- sd(zed, na.rm = TRUE)
set.seed(13121914)
sim.data <- data.frame(y = rnorm(length(na.omit(zed)),
meanData, sdData))
# makes a plot
plot(ecdf(zed), verticals=TRUE, do.p=FALSE,
col.h = "#d95f02",
col.v = "#d95f02",
main = NULL, xlab = bquote(paste(.(xLabInput), ~kg^{-1}*")")),
cex.lab = 1.5, cex.axis = 1.5, cex.main = 1.5, cex.sub = 1.5)
lines(ecdf(sim.data$y), verticals=TRUE,
col.h = "black",
col.v = "black")
}
par(mfrow=c(3,2))
par(mar=c(5.1,4.1,4.1,2.1))
ecdfPlot(potassium, "K (mg")
ecdfPlot(phosphorus, "P (mg")
# pre-2016 worked with normal for Ca
# in 2016 switch to log-logistic
# normalEcdfPlot(calcium, "Ca (mg")
ecdfPlot(calcium, "Ca (mg")
ecdfPlot(magnesium, "Mg (mg")
ecdfPlot(sulfur, "S (mg")
plot(x = 0, y = 0, type='n', bty='n', xaxt='n', yaxt='n',
xlab = "", ylab = "")
# col <- c("#d95f02", "#1b9e77")
legend("center", c("data", "MLSN model"),
col = c(col2, "#1b9e77"),
lty = 1,
lwd = 1,
bty = "n",
cex = 1.5)
|
1bb7a1eae91bc908925c6d0f00833e95c6e8519c | a465e1d61e996bd5c018c764d9b46dcd115893c8 | /R/TraceCorrelationTSIR.R | 40e54cb5e19a2b457a5bacc8ce80e3c246e828d4 | [] | no_license | hectorhaffenden/TSIR | 9154107523002d4b3619bf70a298cd7dad7e242e | c4d2f79c3516d8e594b2799d582955eb59e4fae9 | refs/heads/master | 2020-07-10T03:03:23.948745 | 2019-08-24T21:57:34 | 2019-08-24T21:57:34 | 204,149,950 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 328 | r | TraceCorrelationTSIR.R | TraceCorrelationTSIR = function(v1,v2, d = 1){
if (dim(as.matrix(v1))[2] == 1)
{p1 = v1%*%t(v1)/c(t(v1)%*%v1)
p2 = v2%*%t(v2)/c(t(v2)%*%v2)
di = sum(diag(p1%*%p2))/d
return(di)} else
p1 <- v1%*%matpower(t(v1)%*%v1,-1)%*%t(v1)
p2 <- v2%*%matpower(t(v2)%*%v2,-1)%*%t(v2)
di = sum(diag(p1%*%p2))/d
return(di)
}
|
826a963613ac6a246263f18569d9417210e275c3 | 2c277539ab857e30a1f49e76695727089b92a325 | /man/GetCOSMICMutSigsNames.Rd | d981054c30d8401baede417d5f7a5305a21f089d | [
"MIT"
] | permissive | cgab-ncc/FIREVAT | 0c99bb9af00f715fde3b19204798f0acd65ee700 | 093ab9b241288b5f1251b60da8a23ce20c752da2 | refs/heads/master | 2022-12-01T12:25:33.831570 | 2022-11-18T16:47:28 | 2022-11-18T16:47:28 | 170,807,255 | 22 | 6 | null | null | null | null | UTF-8 | R | false | true | 308 | rd | GetCOSMICMutSigsNames.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/firevat_utils.R
\name{GetCOSMICMutSigsNames}
\alias{GetCOSMICMutSigsNames}
\title{GetCOSMICMutSigsNames}
\usage{
GetCOSMICMutSigsNames()
}
\value{
a character vector
}
\description{
Returns all COSMIC mutational signature names
}
|
8842bb23b45a66cc2c93819fdd671830acf9820c | 41693ade868da170fcc46acc58a74dc38d24903e | /tests/testthat/test-reframe.R | b86d0413a55948250041e6c345e8cec0f71b1364 | [
"MIT"
] | permissive | tidyverse/dtplyr | 6dfa2492af55482cda9654be6a7071aa20a8f787 | 774f00dd90c6a017c9311458a425466ac47df028 | refs/heads/main | 2023-08-26T12:47:14.841458 | 2023-08-19T18:24:49 | 2023-08-19T18:24:49 | 53,366,659 | 518 | 48 | NOASSERTION | 2023-09-14T16:40:33 | 2016-03-07T23:28:16 | R | UTF-8 | R | false | false | 1,431 | r | test-reframe.R | test_that("`reframe()` allows summaries", {
df <- lazy_dt(tibble(g = c(1, 1, 1, 2, 2), x = 1:5))
expect_identical(
collect(reframe(df, x = mean(x))),
tibble(x = 3)
)
expect_identical(
collect(reframe(df, x = mean(x), .by = g)),
tibble(g = c(1, 2), x = c(2, 4.5))
)
})
test_that("`reframe()` allows size 0 results", {
df <- lazy_dt(tibble(g = c(1, 1, 1, 2, 2), x = 1:5))
gdf <- group_by(df, g)
expect_identical(
collect(reframe(df, x = which(x > 5))),
tibble(x = integer())
)
expect_identical(
collect(reframe(df, x = which(x > 5), .by = g)),
tibble(g = double(), x = integer())
)
expect_identical(
collect(reframe(gdf, x = which(x > 5))),
tibble(g = double(), x = integer())
)
})
test_that("`reframe()` allows size >1 results", {
df <- lazy_dt(tibble(g = c(1, 1, 1, 2, 2), x = 1:5))
gdf <- group_by(df, g)
expect_identical(
collect(reframe(df, x = which(x > 2))),
tibble(x = 3:5)
)
expect_identical(
collect(reframe(df, x = which(x > 2), .by = g)),
tibble(g = c(1, 2, 2), x = c(3L, 1L, 2L))
)
expect_identical(
collect(reframe(gdf, x = which(x > 2))),
tibble(g = c(1, 2, 2), x = c(3L, 1L, 2L))
)
})
test_that("`reframe()` ungroups output", {
df <- lazy_dt(tibble(g = c(1, 1, 1, 2, 2), x = 1:5))
gdf <- group_by(df, g, x)
res <- reframe(gdf, row_num = row_number())
expect_true(length(group_vars(res)) == 0)
})
|
1e336983bf14d30818a265c5376f2a565c7188c4 | 2d34708b03cdf802018f17d0ba150df6772b6897 | /googledataprocv1.auto/man/SparkSqlJob.Rd | f8a20951f26dbf7bc87b4fe761d0a1328ae667f0 | [
"MIT"
] | permissive | GVersteeg/autoGoogleAPI | 8b3dda19fae2f012e11b3a18a330a4d0da474921 | f4850822230ef2f5552c9a5f42e397d9ae027a18 | refs/heads/master | 2020-09-28T20:20:58.023495 | 2017-03-05T19:50:39 | 2017-03-05T19:50:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,460 | rd | SparkSqlJob.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataproc_objects.R
\name{SparkSqlJob}
\alias{SparkSqlJob}
\title{SparkSqlJob Object}
\usage{
SparkSqlJob(SparkSqlJob.scriptVariables = NULL,
SparkSqlJob.properties = NULL, queryFileUri = NULL, queryList = NULL,
scriptVariables = NULL, jarFileUris = NULL, loggingConfig = NULL,
properties = NULL)
}
\arguments{
\item{SparkSqlJob.scriptVariables}{The \link{SparkSqlJob.scriptVariables} object or list of objects}
\item{SparkSqlJob.properties}{The \link{SparkSqlJob.properties} object or list of objects}
\item{queryFileUri}{The HCFS URI of the script that contains SQL queries}
\item{queryList}{A list of queries}
\item{scriptVariables}{Optional Mapping of query variable names to values (equivalent to the Spark SQL command: SET name='value';)}
\item{jarFileUris}{Optional HCFS URIs of jar files to be added to the Spark CLASSPATH}
\item{loggingConfig}{Optional The runtime log config for job execution}
\item{properties}{Optional A mapping of property names to values, used to configure Spark SQL's SparkConf}
}
\value{
SparkSqlJob object
}
\description{
SparkSqlJob Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/) queries.
}
\seealso{
Other SparkSqlJob functions: \code{\link{SparkSqlJob.properties}},
\code{\link{SparkSqlJob.scriptVariables}}
}
|
28b229aeb5fd7fab8bdf3bd6ec2e348e0ce1578e | cdcbac50a8a378f1b5fcb7600386951081f3bbfa | /inst/templates/method.R | 9121ac5aefd44387e619e19800c90ebe8f95d026 | [] | no_license | cran/zooimage | 278c083204d6604049ca8d8fccfa8add274d618e | f1f6cebc1faae5f49087275c69b6c67037a20b0c | refs/heads/master | 2020-05-20T12:21:01.539855 | 2018-06-29T15:00:26 | 2018-06-29T15:00:26 | 17,700,978 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,968 | r | method.R | ## Zoo/PhytoImage process REPHY version 1.0
## Copyright (c) 2014, Philippe Grosjean (phgrosjean@sciviews.org)
## Note: we need to start with the directory containing this script as default one!
# e.g., setwd(~/Desktop/ZooPhytoImage/_analyses)
## Should use source("~/dir/file.R", chdir = TRUE) to get there temporarily
################################################################################
#### Parameters for this method
## This is the name of this method
.ZI <- list(user = "", date = Sys.time(), method = "Rephy 4X lugol v.1.0",
wdir = getwd(), system = "")
.ZI$scriptfile <- paste(.ZI$method, "R", sep = ".")
## This is the training set to use
.ZI$train <- "trainRephy_4Xlugol.01"
.ZI$traindir <- file.path("..", "_train", .ZI$train)
.ZI$trainfile <- paste(.ZI$traindir, "RData", sep = ".")
.ZI$classif <- "classrfRephy_4Xlugol.01"
.ZI$classifile <- file.path("..", "_train",
paste(.ZI$classif, "RData", sep = "."))
.ZI$classifcmd <- paste0('ZIClass(Class ~ ., data = ', .ZI$train,
', method = "mlRforest", calc.vars = calcVars, cv.k = 10)')
.ZI$cellsModelsfile <- file.path(.ZI$traindir, "_cellModels.RData", sep = "")
## Conversion factors for biovolume
## Biovolume calculation is P1 * ECD^P3 + P2
## TODO: fill this table, or use read.delim() on a text file
## TODO: also use number of cells per colony here...
.ZI$biovolume <- data.frame(
Class = c("Chaetoceros_spp", "[other]"),
P1 = c(1, 1),
P2 = c(0, 0),
P3 = c(1, 1)
)
.ZI$breaks <- seq(0, 200, by = 10) # In um
################################################################################
if (!require(zooimage)) stop("Please, install the 'zooimage' package")
if (!require(svDialogs)) stop("Please, install the 'svDialogs' package")
if (!require(shiny)) stop("Please, install the 'shiny' package")
## First of all, get system info and ask for the user name
.ZI$system <- paste(ZIverstring, R.version$version.string, R.version$platform,
sep = "; ")
.ZI$user <- dlgInput("Who are you?", Sys.info()["user"])$res
if (!length(.ZI$user) || .ZI$user == "") { # The user clicked the 'cancel' button
stop("You must identify yourself!")
}
## Change the way warnings are displayed
.owarn <- getOption("warn")
options(warn = 1) # Immediate issue of warnings
## Start... check that I am in the right directory
## The directory should be '_analyses', there must be a file named <method>.R
## in it, and a file named "../_train/<train>[.RData] must be available too!
if (basename(.ZI$wdir) != "_analyses")
stop("I am not in the right directory (should be '_analyses')")
if (!file.exists(.ZI$scriptfile))
stop("A .R script file named '", .ZI$scriptfile,
"' is not found in the current directory")
if (!file.exists(.ZI$traindir) & !file.exists(.ZI$trainfile))
stop("Training set '", .ZI$train, "' not found in '",
dirname(.ZI$traindir), "'")
## Make sure the subdirectory for this method is created
if (!file.exists(.ZI$method)) {
dir.create(.ZI$method)
} else if (!file.info(.ZI$method)$isdir) {
stop("A file exists for the method '", .ZI$method,
"', but it is not a directory!")
}
## Start reporting results
cat("\n=== Session with method", .ZI$method, "===\n\n")
## Do we need to import the training set?
if (!file.exists(.ZI$trainfile)) {
cat("Please wait: we import the training set data now...")
assign(.ZI$train, getTrain(.ZI$traindir))
cat(" done!\n")
cat("\nThe training set is saved as native R data for faster access.\n")
save(list = .ZI$train, file = .ZI$trainfile)
} else { # Load the training set now
cat("Loading the training set '", .ZI$train, "'...", sep = "")
load(.ZI$trainfile)
cat(" done!\n")
}
.ZITrain <- get(.ZI$train) # Copied into .ZITrain for easier use
## Give some stats about the training set
cat("The initial training set contains:\n")
print(table(.ZITrain$Class))
## Do we need to recreate the classifier?
if (!file.exists(.ZI$classifile)) {
## TODO: recreate it!
cat("\nPlease wait: we build the classifier now...")
assign(.ZI$classif, eval(parse(text = .ZI$classifcmd)))
cat(" done!\n")
cat("\nThe classifier is saved as native R data for faster access.\n")
save(list = .ZI$classif, file = .ZI$classifile)
} else { # Load the classifier now
cat("\nLoading the classifier '", .ZI$classif, "'...", sep = "")
load(.ZI$classifile)
cat(" done!\n")
}
.ZIClass <- get(.ZI$classif) # Copied into .ZIClass for easier use
attr(.ZIClass, "ActiveLearning") <- FALSE # No active learning yet!
## Give some stats about the classifier
cat("The classifier is:\n\n")
print(.ZIClass)
## Launch the errorcorrection Shiny app
cat("\nStarting error correction session...\n")
runApp(system.file("gui", "errorcorrection", package = "zooimage"))
## Reset the system
options(warn = .owarn)
## Done
cat("\n================================= done! =====\n")
## TODO: if we have .zid files + description.zis => convert first into .zidb!
|
ba13526527d0b97de87e46177ea4166277ba2fa1 | 6d98dfe9d7ed3319ccda5cc7978885114c68c09d | /R/switch_branch.R | 7b5665ade2e09661da1259f92da54fae7dd0d6a4 | [
"MIT"
] | permissive | MyKo101/mpipe | 44c552abc758f316ba6aed2b03f933a4afa43383 | d1e16c77525288123b0491b836bcac05f0c0b790 | refs/heads/main | 2023-02-22T04:27:48.580286 | 2021-01-31T01:32:54 | 2021-01-31T01:32:54 | 256,473,147 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,903 | r | switch_branch.R | #' @name switch_branch
#'
#' @title
#' Perform an switch-like branch in a pipeline
#'
#' @description
#' Allows the user to perform a switch-like
#' branch without breaking out of a pipeline.
#' To maintain the flow of a pipeline, it is recommended
#' to use `fseq` style arguments (i.e. pipelines) for the cases,
#' however any function can be used. If no cases
#' match, then the original data is passed unchanged
#'
#' @param data
#' the data being passed through the pipeline.
#'
#' @param case
#' an expression to be evaluated in the context of `data` to
#' decide which branch to follow. Must evaluate to numeric or a
#' character string.
#'
#' @param ...
#' the list of alternatives. If `case` is numeric, then the `case`-th alternative
#' will be chosen (if it exists), if `case` is a character, then it will be
#' compared against the names of one of these alternatives.
#' If no character matches are found (or the numeric is out of range), then
#' the `data` will be returned untouched.
#'
#' @param warn
#' whether or not to warn that no cases were chosen
#'
#'
#' @export
#'
#' @examples
#'
#'
#' tibble::tibble(
#' x = rnorm(10),
#' y = sample(c("red", "blue", "yellow"),
#' 10,
#' replace = TRUE
#' )
#' ) %>%
#' dplyr::arrange(x) %>%
#' switch_branch(. %>%
#' dplyr::slice(1) %>%
#' dplyr::pull(y),
#' red = . %>%
#' pipe_cat("top was red\n") %>%
#' dplyr::filter(y == "red"),
#' blue = . %>%
#' pipe_cat("top was blue\n") %>%
#' dplyr::filter(x < 0)
#' ) %>%
#' dplyr::summarise(m.x = mean(x))
#'
#' palmerpenguins::penguins %>%
#' dplyr::mutate(species = factor(species, levels = c("Gentoo", "Adelie", "Chinstrap"))) %>%
#' dplyr::sample_n(1) %>%
#' switch_branch(
#' . %>%
#' dplyr::pull(species) %>%
#' as.numeric(),
#' . %>%
#' pipe_cat("Selected row is Gentoo\n"),
#' . %>%
#' pipe_cat("Selected row is Adelie\n"),
#' . %>%
#' pipe_cat("Selected row is Chinstrap\n")
#' )
switch_branch <- function(data, case, ..., warn = F) {
parent <- rlang::caller_env()
env <- new.env(parent = parent)
fs <- rlang::list2(...)
original_data <- data
if (dplyr::is_grouped_df(data)) data <- dplyr::ungroup(data)
case_eval <- eval_expr(data, !!enquo(case), env = env)
if (!is.character(case_eval) && !is.numeric(case_eval)) {
rlang::abort("case must evaluate to character or numeric")
}
case_list <- names(fs)
if (is.numeric(case_eval) && case_eval > length(fs)) {
if (warn) rlang::warn(paste0("Only ", length(fs), "case(s) supplied, case evaluated to ", case_eval))
chosen_f <- identity
} else if (is.character(case_eval) && !(case_eval %in% names(fs))) {
if (warn) rlang::warn(paste0("case evaluated to ", case_eval, " which was not supplied"))
chosen_f <- identity
} else {
chosen_f <- fs[[case_eval]]
}
chosen_f(original_data)
}
|
86667a8e0fd803fe97ee97956e1893cd6dee2fea | b3f5ac1755f52e3671351df6b2fbc702a75d9b26 | /R/assertions.R | 91520c07536c5a79fdcbc27f1f9d4069146fa27c | [
"MIT"
] | permissive | wch/webdriver | 26d52755512564b3f06b8c6574080b4551537ca8 | 4ab54a9b030ca29e12f8b831a45396407d931791 | refs/heads/master | 2021-07-05T04:41:56.990084 | 2016-10-08T11:42:08 | 2016-10-08T11:44:12 | 70,522,580 | 0 | 0 | null | 2016-10-10T19:44:24 | 2016-10-10T19:44:24 | null | UTF-8 | R | false | false | 1,001 | r | assertions.R |
is_string <- function(x) {
is.character(x) && length(x) == 1 && !is.na(x)
}
assert_string <- function(x) {
stopifnot(is_string(x))
}
assert_url <- assert_string
assert_filename <- assert_string
assert_count <- function(x) {
stopifnot(
is.numeric(x),
length(x) == 1,
!is.na(x),
x == as.integer(x),
x >= 0
)
}
assert_port <- assert_count
assert_window_size <- assert_count
assert_window_position <- assert_count
assert_timeout <- assert_count
assert_session <- function(x) {
stopifnot(
inherits(x, "session")
)
}
assert_named <- function(x) {
stopifnot(
!is.null(names(x)) && all(names(x) == "")
)
}
assert_mouse_button <- function(x) {
if (is.numeric(x)) {
x <- as.integer(x)
stopifnot(identical(x, 1L) || identical(x, 2L) || identical(x, 3L))
} else if (is.character(x)) {
stopifnot(is_string(x), x %in% c("left", "middle", "right"))
} else {
stop("Mouse button must be 1, 2, 3, \"left\", \"middle\" or \"right\"")
}
}
|
2edd4fca5bc5412a76e08f6de0210289a776aa78 | cf36e0840325ff945b56a0a96436d6000daf3018 | /scripts/Figure_S1_ABCD.R | 82ff1bba6611455a2e0b7051cc3b77226a25785b | [
"MIT"
] | permissive | SysBioChalmers/EPO_GFP | 30c5fe36d1276daf99c225703eb32ab2d93740de | 838b9b12194c3fa58de29c1c6264f92e010852f4 | refs/heads/master | 2023-04-11T05:56:32.806196 | 2022-05-05T07:57:47 | 2022-05-05T07:57:47 | 290,705,446 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,727 | r | Figure_S1_ABCD.R | library(pheatmap)
library(ggplot2)
library(grid)
library(gridExtra)
library(GenomicRanges)
library(GenomicFeatures)
library(rtracklayer)
library(plyr)
library(biomaRt)
library(readr)
library(data.table)
library(ggrepel)
library(ggthemes)
library(RColorBrewer)
library(ggpubr)
library(ggExtra)
library(cowplot)
library(DESeq2)
library(gg3D)
# import TPMs
TPM <- read.delim('./data/35samples_TPM.txt')
rownames(TPM) <- TPM[,1]
TPM <- TPM[,-1]
ds<-TPM[rowSums(TPM) > 1,]
#QC on normalised counts
rld <- log2(ds+1)
#PCA
pca <- prcomp(t(rld), scale=T)
pcasum <- as.data.frame(apply(pca$x, 2, var))
colnames(pcasum)<-"Variance"
pcasum$Component <- rownames(pcasum)
pcasum <-within(pcasum, Variance_Fraction <- 100*(Variance/sum(Variance)))
pcasum <-within(pcasum, Variance_csum <- signif(cumsum(Variance_Fraction), digits = 2))
pcasum <-within(pcasum, Order <- as.numeric(substring(pcasum$Component, 3)))
pcasum$Component <- factor(pcasum$Component, levels = pcasum$Component[order(pcasum$Order)])
sampinfo<-samples
scores <- merge(sampinfo, as.data.frame(pca$x[,1:3]), by.x="row.names",by.y="row.names", all.x = FALSE, all.y = TRUE)
scores$condition<-factor(scores$sampleName)
scores$protein<-factor(scores$protein)
nb.cols <- length(unique(scores$sampleName))
mycolors <- colorRampPalette(brewer.pal(8, "Set2"))(nb.cols)
#PCA1vsPCA2
pc1.2 <- ggplot(scores, aes(x=PC1, y=PC2) )+geom_point(aes(color=sampleName, shape=protein))
pc1.2 <- pc1.2 + scale_x_continuous(paste('PC1 (Variance ', signif(pcasum$Variance_Fraction[pcasum$Component=="PC1"], digits = 2), ' %)', sep=""))+ scale_y_continuous(paste('PC2 (Variance ', signif(pcasum$Variance_Fraction[pcasum$Component=="PC2"], digits = 2), ' %)', sep="") ) + ggtitle("log2(TPM+1) PCA plot")
pc1.2 <- pc1.2 + theme(plot.title=element_text(size = 20,face = "bold"), axis.text.y = element_text(angle = 30, hjust = 0.7, size = 12), axis.text.x = element_text(angle = 30, hjust = 0.7, size = 12), axis.title.y = element_text(size = 16), axis.title.x = element_text(size = 16))
pc1.2 <- pc1.2 + scale_color_manual(values = mycolors)
pc1.2 <- pc1.2 + geom_label_repel(aes(label = replicate), box.padding = 0.35, point.padding = 0.5, segment.color = 'grey50')
ggsave(file=paste('results/',"PCA1vsPCA2_log2TPM.pdf", sep=""), plot=pc1.2, width = 14, height = 7)
#PCA1vsPCA3
pc1.3 <- ggplot(scores, aes(x=PC1, y=PC3) )+geom_point(aes(color=sampleName, shape=protein))
pc1.3 <- pc1.3 + scale_x_continuous(paste('PC1 (Variance ', signif(pcasum$Variance_Fraction[pcasum$Component=="PC1"], digits = 2), ' %)', sep=""))+ scale_y_continuous(paste('PC3 (Variance ', signif(pcasum$Variance_Fraction[pcasum$Component=="PC3"], digits = 2), ' %)', sep="") ) + ggtitle("log2(TPM+1) PCA plot")
pc1.3 <- pc1.3 + theme(plot.title=element_text(size = 20,face = "bold"), axis.text.y = element_text(angle = 30, hjust = 0.7, size = 12), axis.text.x = element_text(angle = 30, hjust = 0.7, size = 12), axis.title.y = element_text(size = 16), axis.title.x = element_text(size = 16))
pc1.3 <- pc1.3 + scale_color_manual(values = mycolors)
pc1.3 <- pc1.3 + geom_label_repel(aes(label = replicate), box.padding = 0.35, point.padding = 0.5, segment.color = 'grey50')
ggsave(file=paste('results/',"PCA1vsPCA3_log2TPM.pdf", sep=""), plot=pc1.3, width = 14, height = 7)
#PCA2vsPCA3
pc2.3 <- ggplot(scores, aes(x=PC2, y=PC3) )+geom_point(aes(color=sampleName, shape=protein))
pc2.3 <- pc2.3 + scale_x_continuous(paste('PC2 (Variance ', signif(pcasum$Variance_Fraction[pcasum$Component=="PC2"], digits = 2), ' %)', sep=""))+ scale_y_continuous(paste('PC3 (Variance ', signif(pcasum$Variance_Fraction[pcasum$Component=="PC3"], digits = 2), ' %)', sep="") ) + ggtitle("log2(TPM+1) PCA plot")
pc2.3 <- pc2.3 + theme(plot.title=element_text(size = 20,face = "bold"), axis.text.y = element_text(angle = 30, hjust = 0.7, size = 12), axis.text.x = element_text(angle = 30, hjust = 0.7, size = 12), axis.title.y = element_text(size = 16), axis.title.x = element_text(size = 16))
pc2.3 <- pc2.3 + scale_color_manual(values = mycolors)
pc2.3 <- pc2.3 + geom_label_repel(aes(label = replicate), box.padding = 0.35, point.padding = 0.5, segment.color = 'grey50')
ggsave(file=paste('results/',"PCA2vsPCA3_log2TPM.pdf", sep=""), plot=pc2.3, width = 14, height = 7)
#Correlation clustering
cormat<-cor(rld, method="spearman")
sampleDists <- as.dist(1-cor(rld, method="spearman"))
saminfo<-subset(sampinfo,select=c(sampleName,protein,producibility,cellType))
saminfo$sampleName<-as.character(saminfo$sampleName)
saminfo$protein <-as.character(saminfo$protein)
saminfo$producibility<-as.character(saminfo$producibility)
saminfo$cellType<-as.character(saminfo$cellType)
sample_colors = list(
"sampleName" = c("CTRL293F"=mycolors[[1]],"CTRL293Free"=mycolors[[2]], "EPO7"=mycolors[[3]],"EPO8"=mycolors[[4]],"EPOB9"=mycolors[[5]],"EPOF21"=mycolors[[6]],"EPOI2"=mycolors[[7]],"EPOpoly"=mycolors[[8]],
"GFP1"=mycolors[[9]],"GFP25"=mycolors[[10]],"GFP26"=mycolors[[11]], "GFP27"=mycolors[[12]],"GFP28"=mycolors[[13]], "GFP29"=mycolors[[14]], "GFP3"=mycolors[[15]],"GFPpoly"=mycolors[[16]]),
"protein" = c("EPO"="#f7f7f7","EPO_Control"="#cccccc", "GFP"="#969696","GFP_Control"="#525252"),
"producibility"=c("Control"="#c994c7","Producer"="#dd1c77"),
"cellType" = c("HEK293F"="#99d8c9","HEK293freestyle"="#2ca25f")
)
pheatmap(cormat,cluster_rows=T, cluster_cols=T,
annotation_col=saminfo,
annotation_colors=sample_colors,
clustering_distance_rows = sampleDists,clustering_distance_cols = sampleDists,
show_rownames = F, show_colnames = T,annotation_legend = T,
filename =paste0('results/',"CorrClust_log2TPM.pdf.pdf"),width = 12, height = 10 ) |
21519990c9de98addbf975e8c9a37e4ac96d1a53 | 691e8d13247c4278de689a24992d00cc7560f9b7 | /man/regions_map.Rd | d2be533553b776ca0a7c520a7d992f94ad45b785 | [
"CC0-1.0"
] | permissive | jas1/chilemaps | 2876a40b0770fc0d9e79e89838584e0737a5f0b1 | 62b3a7b3e3538072b4dca59cb62ce2b90f2feaeb | refs/heads/master | 2020-08-12T09:48:30.658043 | 2019-09-05T20:27:35 | 2019-09-05T20:27:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 852 | rd | regions_map.Rd | \name{regions_map}
\alias{regions_map}
\docType{data}
\title{
Map of Chile at region level (new regions)
}
\description{
Contains all the regions in the nation's territory. This dataset
considers 16 regions.
}
\usage{regions_map}
\format{
List of 16
> str(regions_map[[1]])
Classes ‘sf’ and 'data.frame': 1 obs. of 3 variables:
$ region_id : chr "01"
$ geometry :sfc_POLYGON of length 1; first list element: List of 1
..$ : num [1:1653, 1:2] -70.1 -70.1 -70.1 -70.1 -70.1 ...
..- attr(*, "class")= chr "XY" "POLYGON" "sfg"
- attr(*, "sf_column")= chr "geometry"
- attr(*, "agr")= Factor w/ 3 levels "constant","aggregate",..: NA NA
..- attr(*, "names")= chr "region_id" NA
}
\source{
Sistema Nacional de Informacion Municipal (SINIM), Subsecretaria de Desarrollo
Regional (SUBDERE) and Ministerio del Interior
}
\keyword{datasets}
|
caa9b081a1f152b71b8e27723ae5c0aac2775849 | afdbf7c244976b8b240fedc85efb46b5c364cf82 | /man/dsQueryBuild.Rd | e0d0002c0dffcb6795f6625a75858be9a0f11645 | [] | no_license | cran/dimensionsR | b7c7f57f264093d603c7bb9efb0e81d0bf597e34 | 13ace74be692215ed4bedb93c4cf0c3bb4f0eca5 | refs/heads/master | 2022-02-21T17:14:14.581212 | 2022-02-07T12:50:02 | 2022-02-07T12:50:02 | 248,759,156 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,721 | rd | dsQueryBuild.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dsQueryBuild.R
\name{dsQueryBuild}
\alias{dsQueryBuild}
\title{Generate a DSL query from a set of parameters
It generates a valid query, written following the Dimensions Search Language (DSL), from a set of search parameters.}
\usage{
dsQueryBuild(
item = "publications",
words = "bibliometric*",
words_boolean_op = "OR",
full.search = FALSE,
type = "article",
categories = "",
output_fields = "all",
start_year = NULL,
end_year = NULL
)
}
\arguments{
\item{item}{is a character. It indicates the type of document to search.
The argument can be equal to \code{item = ("publications", "grants", "patents", "clinical_trials", "policy_documents")}.
Default value is \code{item = "publications"}.}
\item{words}{is a character vector. It contains the search terms.}
\item{words_boolean_op}{is character. It indicates which boolean operator have to be used to link words. It can be c("OR","AND"). Default is "OR".}
\item{full.search}{is logical. If TRUE, full-text search finds all instances of a term (keyword) in a document, or group of documents. If False, the search finds all instances in titles and abstracts only.}
\item{type}{is a character. It indicates the document type to include in the search. Default is \code{type = "article"}.}
\item{categories}{is a character vector. It indicates the research categories to include in the search. If empty \code{categories = ""}, all categories will be included in the search.}
\item{output_fields}{is a character vector. It contains a list of fields which have to exported. Default is "all".}
\item{start_year}{is integer. It indicate the starting publication year of the search timespan.}
\item{end_year}{is integer. It indicate the ending publication year of the search timespan.}
}
\value{
a character containing the query in DSL format.
For more extensive information about Dimensions Search Language (DSL), please visit: \href{https://docs.dimensions.ai/dsl/}{https://docs.dimensions.ai/dsl/}
To obtain a free access to Dimenions API fro no commercial use, please visit: \href{https://ds.digital-science.com/NoCostAgreement}{https://ds.digital-science.com/NoCostAgreement}
}
\description{
Generate a DSL query from a set of parameters
It generates a valid query, written following the Dimensions Search Language (DSL), from a set of search parameters.
}
\examples{
\dontrun{
query <- dsQueryBuild(item = "publications", words = "bibliometric*",
type = "article", categories = "management",
start_year=1980,end_year = 2020)
}
}
\seealso{
\code{\link{dsApiRequest}}
\code{\link{dsAuth}}
\code{\link{dsApi2df}}
}
|
f6087b7ee4e9afe4a38e8fa2477496f72957c881 | b31760e9aa2bc6f89f8818f5c1b921a484166140 | /man/cb_load_cohort.Rd | 0de1e1da3e5c3e1657d5332b52e32294193bf57a | [
"MIT"
] | permissive | abrahamlifebit/cloudos | 36226edf8f023a6aad95b1eee725653f7af37089 | 8a1d6f2d5105e49fd0320df65c5ed5b08e39c893 | refs/heads/master | 2023-07-06T15:43:15.514280 | 2021-08-09T11:14:40 | 2021-08-09T11:14:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 530 | rd | cb_load_cohort.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cb_class.R
\name{cb_load_cohort}
\alias{cb_load_cohort}
\title{Get cohort information}
\usage{
cb_load_cohort(cohort_id, cb_version = "v2")
}
\arguments{
\item{cohort_id}{Cohort id (Required)}
\item{cb_version}{cohort browser version (Optional) [ "v1" | "v2" ]}
}
\value{
A \linkS4class{cohort} object.
}
\description{
Get all the details about a cohort including
applied query.
}
\seealso{
\code{\link{cb_create_cohort}} for creating a new cohort.
}
|
8c0d598bdb8f56b13843b85d5342f969ea809baa | a21cbae78a994c4598bd10cb39db39ebefe633b7 | /Lesson03-Receive_arguments_from_command_line/receive_args.r | 976e86e323c98e204c8911c79f695159eb96afd8 | [] | no_license | lilca/Lessons-for-learning-R-language | b68968b1f23435c95cc4da89cbf0383fef773602 | c763d417cafd905304ff48603a459b6b52a830a5 | refs/heads/master | 2020-05-31T16:08:53.199269 | 2015-03-24T00:24:19 | 2015-03-24T00:24:19 | 15,925,566 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 523 | r | receive_args.r | # Receive arguments following "--args" when you set "trailingOnly=TRUE" in a argument of "commandArgs()"
# commandArgs()の引数にtrailingOnly=TRUEを設定すると、--argsにつづく引数を受け取る
arg1 <- commandArgs(trailingOnly=TRUE)[1]
print(arg1)
# Other than above case, receive command line
# 上記以外の場合、コマンドラインを受け取る
cmd1 <- commandArgs()[1]
cmd2 <- commandArgs()[2]
cmd3 <- commandArgs()[3]
cmd4 <- commandArgs()[4]
print(cmd1)
print(cmd2)
print(cmd3)
print(cmd4)
|
bcbae15e6d7c0ce1ff12fff7c4bee340613b0a5c | bbdfb40766403ed5da4e517b78b6daf45275ac47 | /analysis_code/spells.R | 12df774d70bb2d37e1884b7832c39415f52d0baf | [] | no_license | uvastatlab/libwifi | d8566b8b93834e9d549fb2c79d02e17cc1c0a804 | fcd64d0318e61faa0e03529cadb0e534922f741a | refs/heads/master | 2020-07-24T06:11:49.903212 | 2020-01-17T19:11:05 | 2020-01-17T19:11:05 | 207,824,406 | 0 | 2 | null | 2019-11-13T17:11:19 | 2019-09-11T13:51:36 | HTML | UTF-8 | R | false | false | 1,544 | r | spells.R | # Spells: duration of visits
# reads in files created by spells.py, creates spells.csv?
# Margot Bjoring
library(tidyverse)
library(lubridate)
clem_spells <- read_csv('clemons-spells.csv')
ald_spells <- read_csv('alderman-spells.csv')
har_spells <- read_csv('harrison-spells.csv')
clem_spells <- clem_spells %>%
mutate(minstay = (daystay*24)+hour(hourstay)+(minute(hourstay)/60),
maxstay = minstay+0.5,
location = "clemons")
ald_spells <- ald_spells %>%
mutate(minstay = (daystay*24)+hour(hourstay)+(minute(hourstay)/60),
maxstay = minstay+0.5,
location = "alderman")
har_spells <- har_spells %>%
mutate(minstay = (daystay*24)+hour(hourstay)+(minute(hourstay)/60),
maxstay = minstay+0.5,
location = "harrison")
spells <- bind_rows(clem_spells, ald_spells, har_spells)
spells <- spells %>% mutate(date = as.Date(time))
spells %>% filter(maxstay < 10) %>% ggplot() +
geom_histogram(aes(maxstay, fill = location),binwidth = 0.5) +
facet_grid(cols = vars(location), scales = "free")
spells %>%
group_by(location,user,yearday) %>%
summarize(count = n()) %>%
ggplot() +
geom_histogram(aes(count, fill = location),binwidth = 1) +
facet_grid(cols = vars(location), scales = "free")
spells %>% filter(location != "harrison") %>% group_by(location, date) %>%
summarize(meanstay = mean(maxstay),count = n()) %>%
ggplot() +
geom_line(aes(x = date, y = meanstay, color = location)) +
scale_y_continuous(limits = c(0,24))
spells <- read_csv("spells/spells.csv")
|
fb9c4f582a8271025f40cbb64f49e753653a3f29 | c1439351216e4cd99ba17f3f0cdc7290e4ba6fe3 | /man/filterExogenous.Rd | 3b12b91671ac60783bc58ef9eb15cb71897b4cf7 | [] | no_license | cran/piecewiseSEM | cb751749c7ba0485eb81840b366dd8aae3dbe12d | c8264234681c9954c88c5926d477f5dd181112cf | refs/heads/master | 2023-03-08T20:59:05.204323 | 2023-03-04T17:00:02 | 2023-03-04T17:00:02 | 48,085,794 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 409 | rd | filterExogenous.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/basisSet.R
\name{filterExogenous}
\alias{filterExogenous}
\title{Filter relationships among exogenous variables from the basis set (ignoring add.vars)}
\usage{
filterExogenous(b, modelList, amat)
}
\description{
Filter relationships among exogenous variables from the basis set (ignoring add.vars)
}
\keyword{internal}
|
df3dcd8e5107f5f2611b9d597f3ca737b9b86979 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/BayesS5/examples/Bernoulli_Uniform.Rd.R | c2e90e1af8ab6322afcdec32036c12228409679e | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 198 | r | Bernoulli_Uniform.Rd.R | library(BayesS5)
### Name: Bernoulli_Uniform
### Title: Bernoulli-Uniform model prior
### Aliases: Bernoulli_Uniform
### ** Examples
p = 5000
ind = 1:3
m = Bernoulli_Uniform(ind,p)
print(m)
|
979d2af82dea05d36b32ebc71a3887fc1eb7c46a | fa696d2dffa2548f80b5ffe67cc71dae7c81d537 | /cachematrix.R | 7052cb8070dd2fe49f771e98830b565c95e5ea40 | [] | no_license | txv428/ProgrammingAssignment2 | 563c13e07882cc27ca562205c8c393c810e95376 | 1c0aa0d075e9a74d26e29329e53d493452b3c9df | refs/heads/master | 2021-01-19T22:05:19.862491 | 2017-04-19T15:38:02 | 2017-04-19T15:38:02 | 88,750,066 | 0 | 0 | null | 2017-04-19T13:44:10 | 2017-04-19T13:44:10 | null | UTF-8 | R | false | false | 1,425 | r | cachematrix.R | ## Here in the makeCacheMatrix, a special "matrix" object is created which cache its inverse
## The first function, makeCacheMatrix, creates a list of four
## functions functions:
## 1. set the value of a matrix (get)
## 2. get the value of a matrix (set)
## 3. set the value of the inverse of the matrix (setinverse)
## 4. get the value of the inverse of the matrix (getinverse)
makeCacheMatrix <- function(x = matrix()) {
p<-NULL
set<-function(y){
x<<-y
p<<-NULL
}
get <- function() {
x
}
setInverse <- function(inverse) {
i <<- inverse
}
getInverse <- function() {
p
}
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## In cacheSolve function, inverse to the "matrix" object gets computed and returned by the makeCacheMatrix function
cacheSolve <- function(x, ...) {
p <- x$getInverse()
if( !is.null(p) ) {
message("getting cached data")
return(p)
}
data <- x$get()
p <- solve(data) %*% data
x$setInverse(p)
p
}
To verify the function and results
## x = rbind(c(1, -1/4), c(-1/4, 1))
## m = makeCacheMatrix(x)
## m$get()
## [,1] [,2]
##[1,] 1.0000000 -0.02325581
##[2,] -0.3333333 1.00000000
## No cache in the first run
## > cacheSolve(m)
## [,1] [,2]
## [1,] 1.0666667 0.2666667
## [2,] 0.2666667 1.0666667
|
7e27ca93da804627a36437e7e906f3c8a2975ed0 | e127968280caa872ea7479f88a5b9b1c29849cf1 | /R/examples/get_param_vals.R | fd22e3a3509c337cffd20375db94dfb6a55d3b6d | [] | no_license | gilesjohnr/hmob | 7a03882f8487027e5bf775aa3729f45ab0acc3b2 | a999c1f83d55b96b30cb8d48e4a90ac0036268df | refs/heads/master | 2021-07-07T23:58:00.771489 | 2020-08-06T18:49:40 | 2020-08-06T18:49:40 | 168,587,776 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 362 | r | get_param_vals.R | load('./output/decay_1day_62dists_summary.Rdata') # mod.decay # Summary of decay model parameters (Lambda)
# Get trip duration decay rate rate parameter lambda
lam <- get.param.vals(n.districts=62,
name='lambda',
level='route',
stats=mod.decay,
n.cores=4)
see(lam$mean)
|
3b080f7234c6213ed10ab3a20c479fb3b0036bd6 | 5a345571b8855ed9c95c2a42021f6b9125817830 | /R/src/R_operator.R | b592da625017703b0fde20b803397194b3e9c136 | [] | no_license | HaYongdae/TIL | 6eb9db589b5425bec0ff5afad9c5c003b954dd63 | 175368f4930a3339b9cc823f43ef7d12da8c108b | refs/heads/master | 2021-06-17T11:46:45.485585 | 2021-03-23T08:16:06 | 2021-03-23T08:16:06 | 188,196,967 | 0 | 0 | null | 2019-08-30T04:21:18 | 2019-05-23T08:52:37 | null | UTF-8 | R | false | false | 332 | r | R_operator.R | ################## 연산자 ##################
# 타 연산자와 동일
# 특이사항 ↓
## T/F에서 다르면 FALSE, 같은면 TRUE
x < FALSE
y <- FALSE
xor(x,y)
# 비교 연산
c(TRUE,TRUE) & c(TRUE, FALSE)
# 각각의 값을 비교 => [1] TRUE FALSE
c(TRUE,TRUE) && c(TRUE, FALSE)
# 전체 형태를 비교 => [1] TRUE
|
308e195411f3295ff1eecd0517a2de80ca466315 | a0cf3e3d7cd0487520afe2db18cc27d823be2608 | /examples/example_1/script.r | ecd0e4cb099e972abd6dc4ef432d655ada87b1ae | [
"MIT"
] | permissive | franckverrot/mruby-r | 9ce8bca3a8714d6843260635167923fa9951d45d | 5882a3a5492bab45e5d1868df37d4c40b2421b6f | refs/heads/master | 2021-01-16T18:19:37.431429 | 2015-08-08T19:26:15 | 2015-08-08T19:26:15 | 38,016,830 | 31 | 0 | null | null | null | null | UTF-8 | R | false | false | 213 | r | script.r | # script.r
# This script will generate 100 strings
dyn.load('../../build/mruby-r')
values <- .C("mruby_r_eval", source="./source.rb", output=rep(c(''),each=1000))
dist <- values[2]
table(dist)
plot(table(dist))
|
440b0a8979fc09f945b9b50a7d53dcece64d0ad4 | 49796db8334d0a1fc15e012a855bf71d793ac695 | /pB_data_codes/Preprocessing_Pipeline_BConly.R | 2d7712a91184fdc8f7ce8acabbcbbc8120e5dbe7 | [] | no_license | katiemeis/code_gradlab | 97bf08b549b446b0abcd0d1fa02a67f8bf9ea1fd | 1dab064afc9f6405f969b53198664cbe51eaea2d | refs/heads/master | 2020-03-29T21:06:27.483895 | 2018-10-24T01:37:51 | 2018-10-24T01:37:51 | 150,349,943 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,678 | r | Preprocessing_Pipeline_BConly.R | ### Set working directory -- START HERE!!!
setwd("C://Users//katie//Documents//ferdig_rotation//pB_data//Data//")
### attempt to load packages, may need to install if not already installed
library(reshape)
library(preprocessCore)
### Get list of txt files in working directory and then remove .txt extension
FileNames <- list.files(pattern = "*.txt")
name <- sub(".txt", "", FileNames)
### Output filenames without .txt to create a file mapping filename to sample - use text to columns in excel and then
write.csv(FileNames,"Sample_Names2_katie.csv")
### Read back in the file that maps file name to sample ID
Filename_data <- read.delim("Sample_Names2.csv",sep=",",header=FALSE,as.is=TRUE)
name <- Filename_data[,2]
### Read in probe files
# Probe.csv maps ProbeName to gene and exon identifiers
probes <- read.csv("Probe.csv",header=TRUE)
Probe_data <- matrix(probes$ProbeName,ncol=1,nrow=length(probes[,1]))
colnames(Probe_data) <- "ProbeName"
# Read in expression probes (have identifier 0 in design file)
ExpProbes <- read.csv("ExpProbes.csv",header=FALSE)
# Read in all probes on array (not parsed by expression and control probes)
All.probes <- read.csv("AllProbes.csv",header=TRUE)
# Read in control probes, these have ControlType identifier 1
Control.probes <- read.csv("Control_Probes.csv",header=FALSE)
# Read in all probe indentifiers
All_Probe_data <- matrix(ncol=length(FileNames)+1,nrow=62976)
rownames(All_Probe_data) <- All.probes[,2]
colnames(All_Probe_data) <- c(t(name), "ControlType")
### Read in all data to RMA all probes together ###
# For all txt files in the data folder get gProcessedSignal column and add to All_Probe_data
# All output from Agilent feature extraction files are identical for a given array design, so can just fill in matrix, no need to merge
for(j in 1:length(FileNames)) {
data <- read.delim(file=FileNames[j], header=TRUE, sep="\t",skip=9)
All_Probe_data[,j] <- data[,"gProcessedSignal"]
}
# Pull controlType information from last file
All_Probe_data[,length(FileNames)+1] <- data[,"ControlType"]
### Save unnormalized probe summary file
write.csv(All_Probe_data,"unnormalized_raw_transcript_expression_final_katie.csv")
### Perform quantile normalization using BioConductor package preprocessCore
norm_Probe_data <- normalize.quantiles(All_Probe_data[,-(length(FileNames)+1)])
colnames(norm_Probe_data) <- t(name)
rownames(norm_Probe_data) <- rownames(All_Probe_data)
# Save quantile normalized probe file
write.csv(norm_Probe_data,"quantilenormalized_raw_transcript_expression_final_katie.csv")
### Batch Effect Section - Need to add this
#didn't work for me, missing Metadata2.csv file
### Create a new data.frame to hold quantile normalized results
Probe_Data_Full <- data.frame(combat2_evdata)
colnames(Probe_Data_Full) <- colnames(combat2_evdata)
# Add a column for ProbeName
Probe_Data_Full$ProbeName <- All.probes[,1]
# Merge ControlType info into Probe_Data_Full
Probe_Data_Full <- merge(Probe_Data_Full,All_Probe_data[,"ControlType"],by="row.names") #ControlTypes is row names
rownames(Probe_Data_Full) <-Probe_Data_Full[,"Row.names"]
Probe_Data_Full <- Probe_Data_Full[,-1]
colnames(Probe_Data_Full)[length(Probe_Data_Full[1,])] <- "ControlType"
### Find control probes ##
Control_Probes <- Probe_Data_Full[Probe_Data_Full$ControlType==1,]
### Subset out expression probes ##
Expression_Probes <- Probe_Data_Full[Probe_Data_Full$ControlType==0,]
Expression_Probes$ProbeID <- rownames(Expression_Probes)
### Need Gene and Exon info in the Probe file
Probe_data <- merge(Expression_Probes,probes,by="ProbeName")
### Count up probes for each Gene_Exon identifier and save as Replicate column in Probe_data
list <- unique(Probe_data[,"Gene_Exon"])
for (i in 1:length(list)){
ind_gene <- which(Probe_data[,"Gene_Exon"]==list[i])
Probe_data[ind_gene,"Replicate"] <- seq(1:length(ind_gene))
}
### Create Final Data Matrix to merge data into
Final_Data <- data.frame(unique(Probe_data[,"GeneID"]))
colnames(Final_Data) <- c("GeneID")
### Loop through samples and summarize data for exon and gene
for(j in 1:length(FileNames)) {
# Create a new dataset with ProbeName, j+1th column, ProbeID, GeneID, ExonID and Replicate
Probe_data_sample <- Probe_data[,c("ProbeName",colnames(Probe_data)[j+1],"ProbeID","GeneID","ExonID","Replicate","Gene_Exon")]
# Use recast function to get orientation from long format to wide format so we can use rowMeans
ProbeData_WF <-recast(Probe_data_sample,GeneID+ExonID~Replicate,measure.var=colnames(Probe_data)[j+1],id.var=c("GeneID","ExonID","Replicate"))
# Create an Exons dataset to fill in probe counts, Exon averages and SD
Exons <- ProbeData_WF[,1:5]
for (i in 1:length(Exons[,3])){
Exons[i,3] <- length(which(is.na(ProbeData_WF[i,3:54])==FALSE))
temp <- t(ProbeData_WF[i,3:(3+Exons[i,3]-1)])
if(Exons[i,3]>1) {
Exons[i,5] <- apply(temp,2,sd,na.rm=TRUE)
}
else {
Exons[i,5] <- 0
}
}
Exons[,4] <- rowMeans(ProbeData_WF[,3:54],na.rm=TRUE)
colnames(Exons) <- c("GeneID","ExonID","Num_Probes","Mean","SD")
# If we are on first sample, don't merge just create Exon_Data, otherwise for subsequent samples merge into Exon_Data
if(j==1){
Exon_Data <- Exons[,c(1,2,4)]
}
else{
Exon_Data <- merge(Exon_Data,Exons[,c(1,2,4)],by=c("GeneID","ExonID"))
}
# Now summarize at gene level
Test <- as.data.frame(Exons[,c("GeneID","ExonID","Mean")]) #gets rid of extra information stored during first recast
# Get unique GeneID values from Exons file
list_gene <- unique(Test[,"GeneID"])
# Get number of Exons per GeneID
for (i in 1:length(list_gene)){
ind_gene <- which(Test[,"GeneID"]==list_gene[i])
Test[ind_gene,"ExonID"] <- seq(1:length(ind_gene))
}
#Test[which(is.na(Test[,"Mean"])==TRUE),"Mean"] <- 0
# Recast data in wide format to take rowMeans
Exon_WF <- recast(Test,GeneID~ExonID,measure.var="Mean",id.var=c("GeneID","ExonID"))
# Create Genes file and calculate gene summaries
Genes <- Exon_WF[,1:4]
for (i in 1:length(Genes[,2])){
# Get number of exons
Genes[i,2] <- length(which(is.na(Exon_WF[i,2:32])==FALSE))
# store exon values in temp
temp <- t(Exon_WF[i,2:(2+Genes[i,2]-1)])
# calculate SD
if(Genes[i,2]>1) {
Genes[i,4] <- apply(temp,2,sd,na.rm=TRUE)
}
else {
Genes[i,4] <- 0
}
}
# Calculate rowmeans to get average Gene level summary
Genes[,3] <- rowMeans(Exon_WF,na.rm=TRUE)
colnames(Genes) <- c("GeneID","Num_Exons","Mean","SD")
# Output Exon and Gene files for each entry in name
write.csv(Exons,paste(colnames(Probe_data)[j+1],"Exon_FDR.csv",sep="_"))
write.csv(Genes,paste(colnames(Probe_data)[j+1],"Gene_FDR.csv",sep="_"))
# merge Gene info into Final_Data file
Final_Data<- merge(Final_Data,Genes[,c("GeneID","Mean")],by="GeneID")
}
colnames(Final_Data) <- c("GeneID",colnames(Probe_data[,2:147]))
colnames(Exon_Data) <- c("GeneID","ExonID",colnames(Probe_data[,2:147]))
write.csv(Exon_Data,"Exon_Data_BC_all_katie.csv") ##same as Katie's
write.csv(Final_Data,"Gene_Data_BC_all_katie.csv") ##same as Katie's
BCGene_pca <- prcomp(t(Final_Data[,-1]),center=TRUE,scale=TRUE)
barplot(BCGene_pca$sdev^2,xlab="Eigenvalues",ylab="variation")
plot(BCGene_pca$x[,1],BCGene_pca$x[,2],main="PCA of BC Gene expression profiles outliers removed",xlab="PC1",ylab="PC2")
(BCGene_pca$sdev[1]^2+BCGene_pca$sdev[2]^2+BCGene_pca$sdev[3]^2+BCGene_pca$sdev[4]^2)/sum(BCGene_pca$sdev^2)
|
5dbe6a1b2f23774e17adc5ce2213117e1ce1e52a | 541fff10228b07c9b54437c7e5f8e674eb7a3211 | /man/show-commaNMFns-method.Rd | ff50354ba00cb6364a826cb466c068164efa8d89 | [] | no_license | cran/NMF | 8d125625278b93f52bdcc646e382a969d6ea717d | 14e3f913b2de158149bf1fc869952cf6bb3d0561 | refs/heads/master | 2023-06-28T00:46:53.707046 | 2023-03-20T14:30:02 | 2023-03-20T14:30:02 | 17,692,093 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 293 | rd | show-commaNMFns-method.Rd | \docType{methods}
\name{show,NMFns-method}
\alias{show,NMFns-method}
\title{Show method for objects of class \code{NMFns}}
\usage{
\S4method{show}{NMFns}(object)
}
\arguments{
\item{object}{Any R object}
}
\description{
Show method for objects of class \code{NMFns}
}
\keyword{methods}
|
77da22b80c9ff2cea19ff4a2cafa9f2360cff3d2 | 59be5f9071b9bcaf2ab2fd017fc731a0ef8daf62 | /plot1.R | 805c29ef88dc784bfcdaaa6947b4255b296836ea | [] | no_license | Ankit-anand18/ExData_Plotting1 | a8b37c62f36bfb19afd2d627d5409ee2b855c457 | 85445ce7dcdf8826b401d97c3f3752123a95b6c8 | refs/heads/master | 2020-06-02T00:00:25.009996 | 2019-06-09T09:06:11 | 2019-06-09T09:06:11 | 190,971,595 | 0 | 0 | null | 2019-06-09T06:21:11 | 2019-06-09T06:21:10 | null | UTF-8 | R | false | false | 761 | r | plot1.R | # Plot 1
# read the dataset
powerCon <- read.table("household_power_consumption.txt",
header = TRUE,
sep = ";",
na.strings = "?"
)
# coerce column1 to Date
powerCon$Date <- as.Date(powerCon$Date, "%d/%m/%Y")
# Subset to get data only for 1-feb-2007 & 2-feb-2007
powerPlot <- subset(powerCon, Date == as.Date("2/2/2007", "%d/%m/%Y") | Date == as.Date("1/2/2007","%d/%m/%Y") )
# Plot Histogram
hist(powerPlot$Global_active_power, col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
# Copy to jpeg
dev.copy(jpeg, "plot1.jpeg")
dev.off()
|
8f9ab459563d904bd49932a6aca786b3e5dcbb2d | de7bf478ce5b3bd796ab9bd8054b00e8b94cc9d2 | /R/roundConstantSum.R | fb0ca0aa3dd5959c083b17c8a910c54de4aaf769 | [] | no_license | cran/FFD | b454ef48bf69f08d5014bb5b3a42f7df7401bb02 | 78eefbd15c03bf7362d9f39aca0be1c0d3d02c98 | refs/heads/master | 2022-11-11T01:55:17.186774 | 2022-11-08T09:10:06 | 2022-11-08T09:10:06 | 17,679,131 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,449 | r | roundConstantSum.R | ## roundConstantSum
##
## Ian Kopacka
## 2011-07-05
##
## Inputparameter:
## numVec...Numerischer Vektor. Die Summe der Elemente sollte eine
## ganze Zahl sein. Falls nicht, wird mit einem gerundeten Wert
## gerechnet und wahlweise eine Warnung augegeben.
## output.........0 oder 1. Bei 0 wird keine Warnung ausgegeben falls
## 'sum(numVec)' keine ganze Zahl ist. Bei 1 (=default)
## wird eine Warnung auf dem Bildschirm ausgegeben.
## Outputparameter: numerischer Vektor der selben Laenge wie 'numVec'. Die
## Elemente sind ganze Zahlen, und
## sum(roundConstantSum(numVec)) == sum(numVec)
roundConstantSum <- function(numVec, output = 1){
Summe <- sum(numVec)
if(Summe - round(Summe) > 0){
if(output == 1){
warnung <- paste("WARNUNG - Sum of 'numVec' is not an integer: ", Summe,
"\n Proceeding with rounded value: ", round(Summe), "\n", sep = "")
cat(warnung)
}
Summe <- round(Summe)
}
gerundet <- round(numVec)
## Es wurde zu oft abgerundet:
if(sum(gerundet) < Summe){
## Wie oft wurde faelschlicherweise abgerundet:
anzFehler <- Summe - sum(gerundet)
## Konzept: Suche von den Zahlen, die abgerundet wurden
## die mit den groessten Nachkommastellen heraus und
## runde sie auf:
rest <- numVec - gerundet
names(rest) <- seq(along = rest)
rest <- rest[order(numVec, decreasing = TRUE)]
rest <- sort(rest, decreasing = TRUE)
index <- as.numeric(names(rest)[1:anzFehler])
gerundet[index] <- gerundet[index] + 1
return(gerundet)
}
## Es wurde zu oft aufgerundet:
if(sum(gerundet) > Summe){
## Wie oft wurde faelschlicherweise aufgerundet:
anzFehler <- sum(gerundet) - Summe
## Konzept: Suche von den Zahlen die aufgerundet wurden
## die mit den kleinsten Nachkommastellen heraus und
## runde sie ab:
rest <- numVec - gerundet
names(rest) <- seq(along = rest)
rest <- rest[order(numVec, decreasing = FALSE)]
rest <- sort(rest, decreasing = FALSE)
index <- as.numeric(names(rest)[1:anzFehler])
gerundet[index] <- gerundet[index] - 1
return(gerundet)
}
return(gerundet)
}
|
6a219bc984d2b2b3ea0eaee9006f57025cf00a8f | ee54a85e446285fcc568caffe2dab934089e12e2 | /xp10/plotpost.R | a2a80bb9fd2bc5339955f8a834b9be14a28c0423 | [
"CC0-1.0"
] | permissive | bricebeffara/rwa_evaluative_conditioning_data_analysis | e8697aee9cb7097c19ec9207a092d547658246e2 | 81c35c84148f9579b68624e32c0efc0dbad43e3c | refs/heads/main | 2023-06-03T01:15:34.778559 | 2021-06-17T09:55:12 | 2021-06-17T09:55:12 | 375,408,897 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,696 | r | plotpost.R | old.par <- par(mfrow=c(3, 2))
BEST::plotPost(posterior_samples(spread_resp, "b")$b_RWAscore,
credMass = 0.95, compVal = 0,
ROPE = c(-0.2, 0.2),
xlab = expression(beta[RWA["positive indirect"]]),
col = as.character(bayesplot::color_scheme_get("brightblue")[2]),
showMode = FALSE, showCurve = FALSE)
BEST::plotPost(posterior_samples(spread_resp_uspos_ind, "b")$b_RWAscore,
credMass = 0.95, compVal = 0,
ROPE = c(-0.2, 0.2),
xlab = expression(beta[RWA["positive indirect"]]),
col = as.character(bayesplot::color_scheme_get("brightblue")[2]),
showMode = FALSE, showCurve = FALSE)
BEST::plotPost(posterior_samples(spread_resp_uspos_dir, "b")$b_RWAscore,
credMass = 0.95, compVal = 0,
ROPE = c(-0.2, 0.2),
xlab = expression(beta[RWA["positive direct"]]),
col = as.character(bayesplot::color_scheme_get("brightblue")[2]),
showMode = FALSE, showCurve = FALSE)
BEST::plotPost(posterior_samples(spread_resp_usneg, "b")$"b_spreading:RWAscore",
credMass = 0.95, compVal = 0,
ROPE = c(-0.2, 0.2),
xlab = expression(beta[RWA["positive direct"]]),
col = as.character(bayesplot::color_scheme_get("brightblue")[2]),
showMode = FALSE, showCurve = FALSE)
library(sjstats)
library(sjmisc)
equi_test(spread_resp, out = "plot", rope = c(-0.2, 0.2))
equi_test(spread_resp, rope = c(-0.2, 0.2))
tidy_stan(spread_resp_uspos_ind,
typical = "mean",
prob = .95)
summary(spread_resp_uspos_ind)
|
e303539fb1308b29c44011d393a5d73888517564 | ec98f3ad87391c21ef03c31ea3b102a9507d4cfd | /cachematrix.R | 2ed204829d3337dbc5c8bd4c9e9e8624f0925225 | [] | no_license | kentlottis/ProgrammingAssignment2 | 646353b65ae4b3ad5b166928c6cf99b0a06e628b | 500ddece450a89b1382d073dd39c6c89d8b5d181 | refs/heads/master | 2020-12-25T16:26:03.448989 | 2014-08-23T17:26:51 | 2014-08-23T17:26:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,745 | r | cachematrix.R | ## R Programming - Assignment 2 - Caching Matrix Inverse
## makeCacheMatrix() This function creates a special "matrix" object that can cache its inverse.
#
# Note: this implementation intentionally differs from the cacheMean example in that it omits a 'setInverse' method
# instead, the getInverse() method itself is responsible for caching the inverse when it is first computed
makeCacheMatrix <- function(x = matrix()) {
cachedInverse <- NULL
# the set() method sets the value of the matrix and clears the cached inverse
# returns the new value of the matrix
set <- function(value) {
cachedInverse <<- NULL
x <<- value
}
# the get() method returns the value of the matrix
get <- function() {
return(x)
}
# the getInverse() method returns the inverse of the matrix
# if the inverse has previously been computed, the cached value is returned
# otherwise, the inverse is computed and cached for future reference
getInverse <- function()
{
if (is.null(cachedInverse))
{
message("performing matrix inversion")
cachedInverse <<- solve(x)
}
return(cachedInverse)
}
list (set = set, get = get, getInverse = getInverse)
}
## cacheSolve() - computes the inverse of the special "matrix" returned by makeCacheMatrix above
#
# Note: this implemenation differs from the cacheMean() example in the assignment in that it realized on the
# getInverse() method itself to compute and cache the inverse. This approach makes it possible to use
# the "matrix" object in isolation without the need for the cacheSolve() helper method
#
cacheSolve <- function(x) {
x$getInverse()
}
|
d69270a8e15eeb127d9abc73bd1d7a9dc7cd180b | c16684c93fb95310a315c223af03f9dc9bc77a76 | /linregpackage/R/methods.R | ca57ef48a9f3c6d37478f4f9e212982ca451409b | [] | no_license | gitter-badger/Lab4 | 15dda9957c5c81b6b8ec86704d6348dc76741f22 | 22b628f755f5f4e78ae7367e000e8013a7557b6a | refs/heads/master | 2020-04-01T18:20:40.753966 | 2015-09-18T10:40:17 | 2015-09-18T10:40:17 | 42,716,424 | 0 | 0 | null | 2015-09-18T10:58:44 | 2015-09-18T10:58:43 | null | UTF-8 | R | false | false | 453 | r | methods.R | #tries to do the plot operations
data(iris)
mod_object <- lm(Petal.Length~Species, data=iris)
print(mod_object)
as.data.frame.linreg <- function(x) {
df <- data.frame(fitted=x$fitted_values, residuals=x$resi)
return(df)
}
plot.linreg <- function(y, ...) {
y <- as.data.frame(y)
ggplot(y) + geom_point(shape=1, size=10, aes(x=fitted, y=residuals)) + ggtitle("Residuals vs Fitted")
}
r <- linreg(Petal.Length~Species, data=iris)
plot.linreg(r) |
5285f739180f10a827c596c234410ba1a15db498 | e0bae273f83296aefc64ddc6b88a35a8d4aa6d84 | /man/ubiquity.Rd | 21bfac812e8883388403adf96a74554ff508692e | [] | no_license | PABalland/EconGeo | 9958dcc5a7855b1c00be04215f0d4853e61fc799 | 420c3c5d04a20e9b27f6e03a464ca1043242cfa9 | refs/heads/master | 2023-08-08T04:22:25.972490 | 2022-12-20T11:03:49 | 2022-12-20T11:03:49 | 63,011,730 | 37 | 11 | null | 2018-09-18T15:22:21 | 2016-07-10T18:00:42 | R | UTF-8 | R | false | true | 1,680 | rd | ubiquity.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ubiquity.r
\name{ubiquity}
\alias{ubiquity}
\title{Compute a simple measure of ubiquity of industries}
\usage{
ubiquity(mat, RCA = FALSE)
}
\arguments{
\item{mat}{An incidence matrix with regions in rows and industries in columns}
\item{RCA}{Logical; should the index of relative comparative advantage (RCA - also refered to as location quotient) first be computed? Defaults to FALSE (a binary matrix - 0/1 - is expected as an input), but can be set to TRUE if the index of relative comparative advantage first needs to be computed}
}
\description{
This function computes a simple measure of ubiquity of industries by counting the number of regions in which an industry can be found (location quotient > 1) from regions - industries (incidence) matrices
}
\examples{
## generate a region - industry matrix with full count
set.seed(31)
mat <- matrix(sample(0:10,20,replace=T), ncol = 4)
rownames(mat) <- c ("R1", "R2", "R3", "R4", "R5")
colnames(mat) <- c ("I1", "I2", "I3", "I4")
## run the function
ubiquity (mat, RCA = TRUE)
## generate a region - industry matrix in which cells represent the presence/absence of a RCA
set.seed(31)
mat <- matrix(sample(0:1,20,replace=T), ncol = 4)
rownames(mat) <- c ("R1", "R2", "R3", "R4", "R5")
colnames(mat) <- c ("I1", "I2", "I3", "I4")
## run the function
ubiquity (mat)
}
\author{
Pierre-Alexandre Balland \email{p.balland@uu.nl}
}
\references{
Balland, P.A. and Rigby, D. (2017) The Geography of Complex Knowledge, \emph{Economic Geography} \strong{93} (1): 1-23.
}
\seealso{
\code{\link{diversity}} \code{\link{location.quotient}}
}
\keyword{ubiquity}
|
8934438509b66421f58e060c10f3dd6c66b77434 | 5c0b215a506702310ebcd13d32ef398d2be23a1c | /DA2_Classification_LogReg_Intro.R | c7eb8c3d971693dc5c0fd99991a5b6d670e69c88 | [] | no_license | CollinCroskery/Foundations | ebda9e2b2b3fd2d72217fec6ffde1e819894cc08 | 9f1e9ca22ad8a4ec0c24d148039b3a477eb67a16 | refs/heads/master | 2022-12-19T13:09:40.319729 | 2020-08-27T19:01:28 | 2020-08-27T19:01:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,375 | r | DA2_Classification_LogReg_Intro.R | library(tidyverse)
library(MASS)
library(ISLR)
# from book
dfDefault <- Default
glm.fit <- glm(default ~ student, data = dfDefault, family = binomial)
summary(glm.fit)
dfDefault$Prob <- predict(glm.fit, type = "response")
ggplot(dfDefault, aes(x=balance, y=Prob)) + geom_point()
#--
glm.fit <- glm(default ~ balance, data = dfDefault, family = binomial)
summary(glm.fit)
dfDefault$Prob <- predict(glm.fit, type = "response")
ggplot(dfDefault, aes(x=balance, y=Prob)) + geom_point()
# ---------------------- multiple logistic regression ---------------- #
mglm.fit <- glm(default ~ student + balance + income, data = dfDefault, family = binomial)
summary(mglm.fit)
dfDefault$mProb <- predict(mglm.fit, type = "response")
alpha <- mglm.fit$coefficients[1]
beta <- mglm.fit$coefficients[2:4]
test <- dfDefault
test$student <- as.integer(dfDefault$student)-1
test$tProb <- (exp(alpha[1]+(beta[1]*test[,2]+ beta[2]*test[,3]+beta[3]*test[,4])))/
(1+(exp(alpha[1]+(beta[1]*test[,2]+ beta[2]*test[,3]+beta[3]*test[,4]))))
# or using matrix algebra to make this easier:
tst1 <- data.matrix(test[,2:4])
bet1 <- as.numeric(beta)
test$tmProb <- exp(alpha[1] + t(bet1%*%t(tst1)))/(1+exp(alpha[1] + t(bet1%*%t(tst1))))
# looks like just as much effort, but it's not when you're working!!
ggplot(test, aes(x=balance, y=tmProb, color = factor(student))) + geom_point()
|
51abd05169f8911fb40742858909bfb7d6e843a6 | 29585dff702209dd446c0ab52ceea046c58e384e | /TraMineR/R/normdist.R | 1302bb9775e41e818b5c970853356ddf211b035d | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,000 | r | normdist.R | ## Normalize distance using the following codes (same as in C code)
## 0 => no normalization
## 1 => Abbott
## 2 => Elzinga
## 3 => Maximum possible distance normalization (divide by maxdist)
## 4 => Yujian and Bo (2007) (divide by maxdist+rawdist)
normdist <- function(rawdist, maxdist, l1, l2, norm) {
if (rawdist==0) {
return(0)
}
if (norm==0) { #Without normalization
return(rawdist)
} else if (norm==1) { #Abbott normalization
if (l1>l2) return(rawdist/l1)
else if (l2>0) return(rawdist/l2)
return(0)
} else if (norm==2) { #Elzinga normalization
if (l1*l2==0) {
if (l1!=l2) return(1)
return(0)
}
return(1-((maxdist-rawdist)/(2*sqrt(l1*l2))))
} else if (norm==3) { #Maximum possible distance normalization
if (maxdist==0) return(1)
return(rawdist/maxdist)
} else if(norm==4) { #Yujian and Bo (2007)
if (maxdist==0) return(1)
return(2*rawdist/(rawdist+maxdist))
}
stop("Unknow distance normalization used")
} |
c67bb6bc7cf653a6dc39b5697520c80fb6d578af | 0736b7c3b67690cae38122c0d82b74ce5c339329 | /datawrapper_code.R | abbd068e9eff022ce8082fb2391785788cc9f6e6 | [] | no_license | pmannino5/datawrapper_learn | 54dd1a496fcaa9b449d830a4868ffdcea9519f37 | 7cbc9a4693c5861e4a88dc662e1e3ddbe4b1fdfd | refs/heads/main | 2023-05-04T13:10:05.888664 | 2021-05-20T08:51:39 | 2021-05-20T08:51:39 | 368,796,577 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,723 | r | datawrapper_code.R | # This short script begins using the Datawrapper API
# Fortunately, there is a nice package called DatawRappr that makes using the API very easy.
# https://munichrocker.github.io/DatawRappr/index.html
# For this learning script, I'm going to use the exoneration data I downloaded a couple years ago
# and make a line graph of exonerations per year and table of exonerations by race
# I'll work on additional charts and tables in the future
library(DatawRappr)
library(dplyr)
# set working directory
setwd('/Users/petermannino/Documents/Exoneration')
# download the exoneration dataset
exon_dat<-readxl::read_excel('publicspreadsheet.xlsx')
# set up connection with datawrapper api
datawrapper_auth(api_key="your api key")
dw_test_key() # test connection
##### Make a line graph in datawrapper #####
# summarize total exonerations per year
dw_data<-exon_dat %>%
mutate(year = lubridate::year(`Posting Date`)) %>%
group_by(year) %>%
summarise(Total_Exonerations=n())
# Create an empty chart in datawrapper
dw_chart<-dw_create_chart()
# Add data to the chart
dw_data_to_chart(x=dw_data, chart_id=dw_chart)
# Set chart type
dw_edit_chart(chart_id = dw_chart, type = "d3-lines")
# Edit other attributes of the chart
dw_edit_chart(chart_id=dw_chart,
title="Exonerations Per Year", # Title
annotate = "Data Downloaded in 2019", # Note at the bottom of chart
source_name = "National Registry of Exonerations at the University of Michigan", # Source name at bottom
visualize = list("y-grid-labels" = "inside", # Put the Y-labels within the chart
"base-color" = 3, # change the color of the line
"line-dashes" = list(Total_Exonerations=2))) # Make the line dashed
# Export the chart
export_chart<-dw_export_chart(chart_id = dw_chart,
type=c("png"),
plain =FALSE) # This ensures the title, annotations, and everything is included
export_chart # look at the chart
# The exported chart is a magick class object, so use image_write from that package to save
magick::image_write(export_chart,"line_graph_export.png")
# Publish the chart online
dw_publish_chart(chart_id = dw_chart)
# Delete it
dw_delete_chart(dw_chart)
##### Make a table in datawrapper #####
# df of exonerations by race
dw_table_dat<-exon_dat %>%
group_by(Race) %>%
summarize(Total_Exonerations = n())
dw_table<-dw_create_chart(type='tables') # create empty table
dw_data_to_chart(chart_id = dw_table, x = dw_table_dat) # add data to table
# Add title, annotations, and source to table
dw_edit_chart(chart_id = dw_table,
title = "Exonerations by Race",
source_name = "National Exoneration Registry at the University of Michigan",
annotate = "Data downloaded in 2019")
# Edit visuals
dw_edit_chart(chart_id = dw_table,
visualize = list(header=list(style=list(bold=TRUE, # bold the header
background="#f0f0f0"), # make the background gray
borderBottom="2px"), # make the line below the header thicker
striped=TRUE, # make the table stripped
sortTable=TRUE, # Sort the table
sortBy = "Total_Exonerations")) # by exonerations
# export table
table_export<-dw_export_chart(chart_id = dw_table, type=c("png"), plain = FALSE)
table_export
# Save the table
magick::image_write(table_export, "table_export.png")
# publish table online
dw_publish_chart(dw_table)
# delete table
dw_delete_chart(chart_id = dw_table)
|
ed8254c5676d6ae8c884119591a321da2b79e065 | d1fa97a9e157f5e18745db31763d87ea252baa39 | /man/ncdfFlowSet-Subset.Rd | ff7bb3dba9fcd2510431f77882a0465f39077e0d | [] | no_license | RGLab/ncdfFlow | eb9766e1fb26b7cdcdf3b2fd54c2141e90ee5141 | 53144b18262197ad498a9422c2a5549675dbff05 | refs/heads/master | 2022-07-16T18:49:45.433285 | 2022-07-01T01:38:22 | 2022-07-01T01:38:22 | 7,230,355 | 6 | 7 | null | 2021-01-22T02:07:13 | 2012-12-18T21:02:00 | R | UTF-8 | R | false | true | 1,163 | rd | ncdfFlowSet-Subset.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ncdfFlowSet-Subset-methods.R
\name{Subset,ncdfFlowSet,filterResultList-method}
\alias{Subset,ncdfFlowSet,filterResultList-method}
\alias{Subset,ncdfFlowList,filterResultList-method}
\alias{Subset,ncdfFlowSet,filter-method}
\alias{Subset,ncdfFlowList,filter-method}
\alias{Subset,ncdfFlowSet,list-method}
\title{subset a ncdfFlowSet by filter}
\usage{
\S4method{Subset}{ncdfFlowSet,filterResultList}(x, subset, select, ...)
\S4method{Subset}{ncdfFlowList,filterResultList}(x, subset, select, ...)
\S4method{Subset}{ncdfFlowSet,filter}(x, subset, ...)
\S4method{Subset}{ncdfFlowList,filter}(x, subset, ...)
\S4method{Subset}{ncdfFlowSet,list}(x, subset, select, validityCheck = TRUE, ...)
}
\arguments{
\item{x}{\code{ncdfFlowSet} or \code{ncdfFlowList}}
\item{subset, select, ...}{see \code{\link[flowCore]{Subset-methods}}}
\item{validityCheck}{\code{logical} whether to skip validity check for speed.}
}
\value{
one or more \code{ncdfFlowSet} objects which share the same hdf5 file with the original one.
}
\description{
Equivalent to \code{Subset} method for \code{flowSet}.
}
|
ffc5d6d1c4602a555b4553f4e267efe6e7aa2c9c | 93704f74aae600d3cf2d5307026b2bfb44c678c4 | /Week3/R/week3_Navie_Bayes.R | f1a216545ae356a5cfb1dc3bc14f72001201b27a | [] | no_license | mrjaypandya/Practical-Machine-Learning-on-H2O | edffd9339572a3c3e0a1652c7bfe094c3469aca0 | 602ca359ea3ddda2ae5b28e71bd236dfb3840653 | refs/heads/master | 2020-05-18T11:14:06.438683 | 2019-05-01T05:21:59 | 2019-05-01T05:21:59 | 184,372,422 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 500 | r | week3_Navie_Bayes.R | library('h2o')
h2o.init()
url <- "http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv"
iris <- h2o.importFile(url)
parts <- h2o.splitFrame(iris, 0.8)
train <-parts[[1]]
test <-parts[[2]]
nrow(train)
nrow(test)
mNB <- h2o.naiveBayes(1:4,5,train)
mNB
p <- h2o.predict(mNB,test)
p
h2o.performance(mNB, test)
mNB_1 <- h2o.naiveBayes(1:4, 5, train, laplace = 1)
h2o.performance(mNB_1,test)
mNB_2 <- h2o.naiveBayes(1:4, 5, train, laplace = 12)
h2o.performance(mNB_2,test)
|
afae54bc061b2354c4c611ef860dcb44a147537e | 03bfebaf16626cedda6487d231bcba89a7679762 | /Sample code/TT_RScript.R | f02ca2ce0236d53d049886466e8bfd446e4472da | [] | no_license | ynchen08/Accessibility_Analysis_Demo | 48e0859d68611df53682ff2a30b46cf3165a7c03 | d4699ec16d751cfaa2a2b1d394b978461f2546ea | refs/heads/master | 2022-05-16T00:31:17.107206 | 2018-12-23T22:33:25 | 2018-12-23T22:33:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,768 | r | TT_RScript.R | setwd("D:/SA study/R travel Time")
library(readxl)
#import excel coordinate file
LatLon_forR <- read_excel("D:/SA study/R travel Time/LatLon_forR.xlsx")
View(LatLon_forR)
#create new coordinate var by concatenating lat and long
LatLon_forR$coord=paste(LatLon_forR$LatDD,LatLon_forR$LongDD, sep="+")
#Install R google api package
install.packages("gmapsdistance")
library(gmapsdistance)
set.api.key("AIzaSyBOHv2ETFy8VMjOVqXxqJ5B2wDhz9ZjzKQ")
options(max.print=1000000)
install.packages("plyr")
library(plyr)
#estimate travel time
y=c(LatLon_forR$coord)
##No highway, pessimistic
r_noH=gmapsdistance(origin = y,
destination = "-29.838457+30.997391",
mode = "driving", traffic_model = "pessimistic", avoid= "highways", dep_date="2017-10-11", dep_time="10:00:00")
TT=r_noH$Time
Dis=r_noH$Distance
TT$Time_min_NoHP=TT$'Time.-29.838457+30.997391'/60
Dis$Dist_km_NoHP=Dis$`Distance.-29.838457+30.997391`/1000
TT2=subset(rename(TT, c("or"="coord")),select=c('coord','Time_min_NoHP'))
Dis2=subset(rename(Dis, c("or"="coord")), select=c('coord', 'Dist_km_NoHP'))
Lat=LatLon_forR
TT3=merge(x=Lat,y=TT2, by="coord", all.x=TRUE)
NoH_P=subset(merge(x=TT3,y=Dis2,by="coord", all.x=TRUE), select=c('Patient_ID','Time_min_NoHP', 'Dist_km_NoHP'))
XX=merge(x=TT3,y=Dis2,by="coord", all.x=TRUE)
write.csv(XX, "D:/SA study/R travel Time/ZZ.csv")
##highway, pessimistic
r_HP=gmapsdistance(origin = y,
destination = "-29.838457+30.997391",
mode = "driving", traffic_model = "pessimistic", dep_date="2017-10-11", dep_time="10:00:00")
TT=r_HP$Time
Dis=r_HP$Distance
TT$Time_min_HP=TT$'Time.-29.838457+30.997391'/60
Dis$Dist_km_HP=Dis$`Distance.-29.838457+30.997391`/1000
TT2=subset(rename(TT, c("or"="coord")),select=c('coord','Time_min_HP'))
Dis2=subset(rename(Dis, c("or"="coord")), select=c('coord', 'Dist_km_HP'))
Lat=LatLon_forR
TT3=merge(x=Lat,y=TT2, by="coord", all.x=TRUE)
H_P=subset(merge(x=TT3,y=Dis2,by="coord", all.x=TRUE), select=c('Patient_ID','Time_min_HP', 'Dist_km_HP'))
print(H_P)
##No Highway, Optimistic
r_noH_O=gmapsdistance(origin = y,
destination = "-29.838457+30.997391",
mode = "driving", traffic_model = "optimistic", avoid= "highways", dep_date="2017-10-11", dep_time="10:00:00")
TT=r_noH_O$Time
Dis=r_noH_O$Distance
TT$Time_min_NoHO=TT$'Time.-29.838457+30.997391'/60
Dis$Dist_km_NoHO=Dis$`Distance.-29.838457+30.997391`/1000
TT2=subset(rename(TT, c("or"="coord")),select=c('coord','Time_min_NoHO'))
Dis2=subset(rename(Dis, c("or"="coord")), select=c('coord', 'Dist_km_NoHO'))
Lat=LatLon_forR
TT3=merge(x=Lat,y=TT2, by="coord", all.x=TRUE)
NoH_O=subset(merge(x=TT3,y=Dis2,by="coord", all.x=TRUE), select=c('Patient_ID','Time_min_NoHO', 'Dist_km_NoHO'))
##Highway, Optimistic
r_HO=gmapsdistance(origin = y,
destination = "-29.838457+30.997391",
mode = "driving", traffic_model = "optimistic", dep_date="2017-10-11", dep_time="10:00:00")
TT=r_HO$Time
Dis=r_HO$Distance
TT$Time_min_HO=TT$'Time.-29.838457+30.997391'/60
Dis$Dist_km_HO=Dis$`Distance.-29.838457+30.997391`/1000
TT2=subset(rename(TT, c("or"="coord")),select=c('coord','Time_min_HO'))
Dis2=subset(rename(Dis, c("or"="coord")), select=c('coord', 'Dist_km_HO'))
Lat=LatLon_forR
TT3=merge(x=Lat,y=TT2, by="coord", all.x=TRUE)
H_O=subset(merge(x=TT3,y=Dis2,by="coord", all.x=TRUE), select=c('Patient_ID','Time_min_HO', 'Dist_km_HO'))
#combine all time and distance dataset
GA1=merge(x=NoH_P, y=NoH_O, by='Patient_ID', All.x=TRUE)
GA2=merge(x=GA1, y=H_P, by='Patient_ID', All.x=TRUE)
GA3=merge(x=GA2, y=H_O, by= 'Patient_ID', All.x=TRUE)
print(GA3)
write.csv(GA3, "D:/SA study/R travel Time/GeoAccess.csv")
|
27a6a0550b3876ce56dd8e1a8c1ae876ea4b119e | ae881520d35b6205e13c6200a0e78ba98515738b | /R/table.R | 039a1a6b83b0b348eb9692347b8bd4127e60a90c | [] | no_license | subreddit-emportugues/pagina | d793723229e94b6cd677cb8fe0b56301877484bd | 9ce04a517a70b655b0c9a52085e4b5a08574faa8 | refs/heads/master | 2020-04-27T15:56:23.488540 | 2019-03-11T07:19:36 | 2019-03-11T07:19:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,779 | r | table.R | datatable(
get_data(),
rownames = FALSE,
class = 'cell-border stripe hover',
style = 'bootstrap',
escape = FALSE,
extensions = c('ColReorder', 'FixedHeader', 'Responsive'),
options = list(
language = list(
url = 'https://raw.githubusercontent.com/subreddit-emportugues/lista/master/res/lang/pt_BR.json',
searchPlaceholder = 'Pesquisar'
),
pageLength = 100,
searchHighlight = TRUE,
autoWidth = TRUE,
searchDelay = 300,
lengthMenu = c(5, 25, 100, 500, 1000),
colReorder = TRUE,
fixedHeader = TRUE,
processing = TRUE,
order = list(
list(2, 'desc')
),
columnDefs = list(
list(
width = '170px',
targets = c(0)
),
list(
width = '350px',
targets = c(1)
),
list(
render = JS(
"function(data, type) {",
"if (type != 'display') {",
"return parseInt(data.replace('.', ''))",
"}",
"return data",
"}"
),
targets = c(2)
),
list(
render = JS(
"function(data, type) {",
"if (type != 'display') {",
"return new Date(data.split('/').reverse().join('-')).getTime() / 1000",
"}",
"return data",
"}"
),
targets = c(3)
)
)
)
)
|
2f985c164c5521285ef8b025e49bf1500f7ed75f | df712880b661e0148f7173d2e5e26fc69f84a07f | /attic/sanity.R | 65ad343670203e9061f25a19c83b05e8ddcecddc | [] | no_license | mlr-org/mlr3mbo | 006de583ae52232e32237e7a8986f65e6bde5cc9 | 520622de10a518b4c52a128b79398960dc7e6e09 | refs/heads/main | 2023-06-21T23:12:53.325192 | 2023-06-20T08:01:40 | 2023-06-20T08:01:40 | 212,462,939 | 23 | 1 | null | 2023-08-23T21:20:31 | 2019-10-02T23:44:40 | R | UTF-8 | R | false | false | 3,009 | r | sanity.R | devtools::load_all()
library(bbotk)
library(paradox)
fun = function(xdt) {
a = 20
b = 0.2
c = 2 * pi
y = - a * exp(- b * sqrt((1 / 2) * rowSums(xdt^2))) -
exp((1 / 2) * rowSums(cos(c * xdt))) + a + exp(1)
data.table(y = y)
}
domain = ps(x1 = p_dbl(lower = -33, upper = 33), x2 = p_dbl(lower = -33, upper = 33))
codomain = ps(y = p_dbl(tags = "minimize"))
objective = ObjectiveRFunDt$new(fun = fun, domain = domain, codomain = codomain)
instance = OptimInstanceSingleCrit$new(objective, terminator = trm("evals", n_evals = 100))
optimizer = OptimizerMbo$new()
optimizer$optimize(instance)
library(ggplot2)
dat = generate_design_grid(domain, resolution = 300)$data
dat[, y:= objective$eval_dt(dat)]
p1 = ggplot(aes(x1, x2, z = y), data = dat) +
geom_contour(colour = "black") +
geom_point(aes(x1, x2, z = y, colour = batch_nr), data = instance$archive$data) +
geom_point(aes(x1, x2, z = y), colour = "red", data = instance$archive$best()) +
geom_point(aes(x1, x2, z = y), colour = "green", data = instance$archive$data[batch_nr == 1])
###
instance = OptimInstanceSingleCrit$new(objective, terminator = trm("evals", n_evals = 100))
bo_gp_ei = OptimizerMbo$new()
bo_rf_ei = OptimizerMbo$new(acq_function = default_acqfun(instance, default_surrogate(instance, learner = lrn("regr.ranger"))))
res = map_dtr(1:30, function(i) {
instance$archive$clear()
bo_gp_ei$optimize(instance)
tmp1 = instance$archive$data[, c("y", "batch_nr")]
tmp1[, method := "gp"]
instance$archive$clear()
bo_rf_ei$optimize(instance)
tmp2 = instance$archive$data[, c("y", "batch_nr")]
tmp2[, method := "rf"]
tmp = rbind(tmp1, tmp2)
tmp[, repl := i]
})
res[, best := cummin(y), by = .(method, repl)]
agg = res[, .(mb = mean(best), sdb = sd(best), n = length(best)), by = .(batch_nr, method)]
agg[, seb := sdb / sqrt(n) ]
ggplot(aes(x = batch_nr, y = mb, colour = method, fill = method), data = agg) +
geom_line() +
geom_ribbon(aes(ymin = mb - seb, ymax = mb + seb) , colour = NA, alpha = 0.25)
###
instance = OptimInstanceSingleCrit$new(objective, terminator = trm("evals", n_evals = 50))
acqo = AcqOptimizer$new(opt("nloptr", algorithm = "NLOPT_GN_DIRECT_L"), trm("evals", n_evals = 100))
bo_gp_ei_rs = OptimizerMbo$new()
bo_gp_ei_dr = OptimizerMbo$new(acq_optimizer = acqo)
res = map_dtr(1:30, function(i) {
instance$archive$clear()
bo_gp_ei_rs$optimize(instance)
tmp1 = instance$archive$data[, c("y", "batch_nr")]
tmp1[, method := "rs"]
instance$archive$clear()
bo_gp_ei_dr$optimize(instance)
tmp2 = instance$archive$data[, c("y", "batch_nr")]
tmp2[, method := "dr"]
tmp = rbind(tmp1, tmp2)
tmp[, repl := i]
})
res[, best := cummin(y), by = .(method, repl)]
agg = res[, .(mb = mean(best), sdb = sd(best), n = length(best)), by = .(batch_nr, method)]
agg[, seb := sdb / sqrt(n) ]
ggplot(aes(x = batch_nr, y = mb, colour = method, fill = method), data = agg) +
geom_line() +
geom_ribbon(aes(ymin = mb - seb, ymax = mb + seb) , colour = NA, alpha = 0.25)
|
ef71a4dc29d963c1062f4179b9eb38fa5fe98639 | 65f74c82c53d9cead52187da34967e3b3e90b381 | /stylo_ternarize.R | e0fb6c8185e3c414956fccb8acf37519b89f4c77 | [] | no_license | EdgarRandLivid/university-master | 239a90e2d8e472c1e4608f622670e66bcae9a53c | 809b02cc6ee02bd73471d0386e8b0be4052e9d2f | refs/heads/master | 2020-03-12T09:50:07.913890 | 2018-05-16T15:48:34 | 2018-05-16T15:48:34 | 130,560,415 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,026 | r | stylo_ternarize.R | library(stylo)
## Skript verarbeitet z-scores aus stylo und ternarized die Werte
## zu -1, 0 oder 1. Es wird eine Tabelle der 50 haeufigsten Woerter
## erstellt, die fuer einen erneuten stylo()-Aufruf als frequencies dient.
path = "" #Pfad
setwd(path)
mycorpus = "corpus"
example = stylo(corpus.dir = mycorpus)
###eigenes ternarize:
our_zscores = example$table.with.all.zscores
gt_0.43 = which(our_zscores>0.43)
lt_0.43 = which(our_zscores<(-0.43))
our_zscores[gt_0.43]=1
our_zscores[lt_0.43]=-1
tozero = which(our_zscores!=1&our_zscores!=(-1))
our_zscores[tozero]=0
our_zscores
ternarizes_zscores = t(our_zscores) #Transponieren der Tabelle
ternarizes_zscores
df = as.data.frame(ternarizes_zscores)
top50 = df[c(1:50),] #hier flexible Wahl der top Woerter
top50 # Ergebnis
write.table(top50, file = "ternarized_zscores_50.txt",row.names=TRUE,col.names=TRUE, sep="\t", dec=".")
stylo(corpus.dir=mycorpus, frequencies = "ternarized_zscores_50.txt")
#erstellte Textdatei kann nun als frequencies- Tabelle benutzt werden
|
24a4869df72b0f79b299e86ac303f6a67bda0c79 | 8743cb01e02a5cf45d5e40d59fd04a40e68139b6 | /postprocessing/performance/model/gb14/mesh_based_model_gb.r | 71adfa00d72a2a398be03eda01b0e417011b8ea5 | [
"BSD-3-Clause"
] | permissive | SeisSol/SeisSol | a2c9ae29021251db5f811c343762e0699756b39d | 7fde47786f10aebbb4225f4c5125829ea9b541a1 | refs/heads/master | 2023-09-01T08:57:51.863085 | 2023-08-31T07:29:26 | 2023-08-31T07:29:26 | 21,459,883 | 227 | 129 | BSD-3-Clause | 2023-09-13T10:06:09 | 2014-07-03T11:17:47 | C++ | UTF-8 | R | false | false | 17,021 | r | mesh_based_model_gb.r | ##
# @file
# This file is part of SeisSol.
#
# @author Alex Breuer (breuer AT mytum.de, http://www5.in.tum.de/wiki/index.php/Dipl.-Math._Alexander_Breuer)
#
# @section LICENSE
# Copyright (c) 2014, SeisSol Group
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# @section DESCRIPTION
# Performance model for SeisSol setups.
computeMeshStatistics <- function( i_meshName,
i_pathToCsvFile,
i_performanceParameters,
i_outputFileTable,
i_outputFilePlot ) {
message( 'reading mesh', i_pathToCsvFile )
l_meshStatistics <- read.csv( file = i_pathToCsvFile,
header = TRUE )
#
# compute data to send and receive in MPI-communication
#
# mpi-information per time step in GB, regular mpi faces
l_meshStatistics$mpi_send <- l_meshStatistics$mpi_faces*i_performanceParameters$dofs_size/1024^3
# mpi-information per time step in GB, dr mpi face come on top
l_meshStatistics$mpi_send <- l_meshStatistics$mpi_send + (l_meshStatistics$mpi_dr_faces*i_performanceParameters$dofs_size/1024^3)
# so far everything is the same
l_meshStatistics$mpi_receive <- l_meshStatistics$mpi_send
#
# compute ratios
#
l_meshStatistics$ratio_dynamic_rupture_faces_elements <- l_meshStatistics$dynamic_rupture_faces / l_meshStatistics$elements
# compute timings for
# * time integraton of all elements
# * volume integration of all elements
# * boundary integration of all elements
# * additional flux computation for dunamic rupture faces
# * time integration of MPI-elements
# * communication of MPI-elements
# * computation of volume integration and non-MPI time integration
if( !i_performanceParameters$mic ) {
l_meshStatistics$time_integration <- l_meshStatistics$elements*i_performanceParameters$time_integration
l_meshStatistics$volume_integration <- l_meshStatistics$elements*i_performanceParameters$volume_integration
}
l_meshStatistics$boundary_integration <- l_meshStatistics$elements*i_performanceParameters$boundary_integration
l_meshStatistics$time_communication <- (l_meshStatistics$mpi_send + l_meshStatistics$mpi_receive) / i_performanceParameters$network_bandwidth
l_meshStatistics$dynamic_rupture_fluxes <- l_meshStatistics$dynamic_rupture_faces*i_performanceParameters$dynamic_rupture
if( i_performanceParameters$mic ) {
l_meshStatistics$time_integration_mpi <- l_meshStatistics$mpi_elements*i_performanceParameters$time_integration
l_meshStatistics$pci_communication <- ( (l_meshStatistics$mpi_faces*i_performanceParameters$dofs_size*2 + l_meshStatistics$mpi_dr_faces*i_performanceParameters$dofs_size) /1024^3) / i_performanceParameters$pci_bandwidth
l_meshStatistics$volume_time_integration_interior <- l_meshStatistics$elements*i_performanceParameters$volume_integration + l_meshStatistics$interior_elements*i_performanceParameters$time_integration
l_meshStatistics$overlapped_comm_time_volume <- pmax( l_meshStatistics$time_communication + l_meshStatistics$pci_communication, l_meshStatistics$volume_time_integration_interior )
l_meshStatistics$cpu_dynamic_rupture <- l_meshStatistics$dynamic_rupture_fluxes - pmax( l_meshStatistics$volume_time_integration_interior - l_meshStatistics$overlapped_comm_time_volume, 0 )
l_meshStatistics$overlapped_dyn_rupture_fluxes <- pmax( l_meshStatistics$cpu_dynamic_rupture + (l_meshStatistics$dynamic_rupture_faces*i_performanceParameters$dofs_size/1024^3) / i_performanceParameters$pci_bandwidth,
l_meshStatistics$boundary_integration )
}
#new overlap
# if( i_performanceParameters$mic ) {
# l_meshStatistics$time_integration_mpi <- l_meshStatistics$mpi_elements*i_performanceParameters$time_integration
# l_meshStatistics$pci_communication <- ( (l_meshStatistics$mpi_faces*i_performanceParameters$dofs_size*2 + l_meshStatistics$mpi_dr_faces*i_performanceParameters$dofs_size) /1024^3) / i_performanceParameters$pci_bandwidth
# l_meshStatistics$volume_time_integration_interior <- l_meshStatistics$elements*i_performanceParameters$volume_integration + l_meshStatistics$interior_elements*i_performanceParameters$time_integration + l_meshStatistics$interior_elements*i_performanceParameters$boundary_integration
#
# l_meshStatistics$overlapped_comm_time_volume <- pmax( l_meshStatistics$time_communication + l_meshStatistics$pci_communication, l_meshStatistics$volume_time_integration_interior )
# l_meshStatistics$cpu_dynamic_rupture <- l_meshStatistics$dynamic_rupture_fluxes - pmax( l_meshStatistics$volume_time_integration_interior - l_meshStatistics$overlapped_comm_time_volume, 0 )
# l_meshStatistics$overlapped_dyn_rupture_fluxes <- pmax( l_meshStatistics$cpu_dynamic_rupture + (l_meshStatistics$dynamic_rupture_faces*i_performanceParameters$dofs_size/1024^3) / i_performanceParameters$pci_bandwidth,
# l_meshStatistics$mpi_elements*i_performanceParameters$boundary_integration )
# }
if( !i_performanceParameters$mic ) {
l_meshStatistics$time_total <- l_meshStatistics$time_integration + l_meshStatistics$volume_integration + l_meshStatistics$boundary_integration + l_meshStatistics$dynamic_rupture_fluxes + l_meshStatistics$time_communication
}
else {
l_meshStatistics$time_total <- l_meshStatistics$time_integration_mpi + l_meshStatistics$overlapped_comm_time_volume + l_meshStatistics$overlapped_dyn_rupture_fluxes
}
message( 'summary mesh statistics')
print( summary( l_meshStatistics ) )
message( 'writing csv' )
write.csv( x=l_meshStatistics, file=i_outputFileTable)
message( 'plotting information' )
pdf( file = i_outputFilePlot,
paper = 'a4r',
width=20,
height=10)
# print title page
plot(0:10, type = "n", xaxt="n", yaxt="n", bty="n", xlab = "", ylab = "")
text(5, 8, i_meshName)
text(5, 7, Sys.time())
# overview boxplots
if( !i_performanceParameters$mic ) {
par( mfrow=c(1,6) )
boxplot(l_meshStatistics$time_integration, main='time integration' )
boxplot(l_meshStatistics$volume_integration, main='volume integration' )
boxplot(l_meshStatistics$boundary_integration, main='boundary integration' )
boxplot(l_meshStatistics$dynamic_rupture_fluxes, main='dyn. rupt. fluxes' )
boxplot(l_meshStatistics$time_communication, main='communication' )
boxplot(l_meshStatistics$time_total, main='total time' )
}
else {
par( mfrow=c(1,6) )
boxplot( l_meshStatistics$time_integration_mpi, main='time integration of copy layer' )
boxplot( l_meshStatistics$volume_time_integration_interior, main='time & volume int.' )
boxplot( l_meshStatistics$overlapped_comm_time_volume, main='overlapped comm., int. time and vol. integration' )
boxplot( l_meshStatistics$boundary_integration, main='boundary integration' )
boxplot( l_meshStatistics$overlapped_dyn_rupture_fluxes, main='overlapped dyn. rupt. and bnd. intgration' )
boxplot( l_meshStatistics$time_total, main='total time' )
}
# detailed plot of every characteristic value
#for( l_name in names(l_meshStatistics[-1]) ) {
# layout( matrix(c(1,2), 2, 2, byrow = TRUE),
# widths=c(4,1) )
# plot( x=l_meshStatistics$partition,
# y=l_meshStatistics[,l_name],
# xlab="partition",
# ylab=l_name )
# boxplot(l_meshStatistics[l_name])
#}
dev.off()
return( median(l_meshStatistics$time_total) )
}
l_config = list ( mesh_names = list( 'statistics_cube320_400_640_1024',
'statistics_cube400_640_640_2048',
'statistics_cube640_640_800_4096',
'statistics_cube640_900_1280_9216',
'statistics_Landers191M_02_05_02_512',
'statistics_Landers191M_02_05_02_768',
'statistics_Landers191M_02_05_02_1024',
'statistics_Landers191M_02_05_02_1536',
'statistics_Landers191M_02_05_02_3072',
'statistics_Landers191M_02_05_02_2048',
'statistics_Landers191M_02_05_02_4096',
'statistics_Landers191M_02_05_02_6144',
'statistics_Landers191M_02_05_02_9216',
'statistics_Landers191M_02_05_02_12288',
'statistics_Landers191M_02_05_02_24756'),
statistics_directory = 'mesh_statistics_gb',
performance_parameters = list( supermuc = list( basisFunctions = 56,
quantities = 9,
dofs_size = 56*9*8,
network_bandwidth = 0.8, # TODO: missing reference point
mic = FALSE,
boundary_integration = 389.68 * 1024 / 191098540 / 1000, # reference, landers_1024
time_integration = 169.04 * 1024 / 191098540 / 1000, # reference, landers_1024
volume_integration = 126.88 * 1024 / 191098540 / 1000, # reference, landers_1024
#boundary_integration = 43.26 * 9216 / 191098540 / 1000, # reference, landers_9216
#time_integration = 18.86 * 9216 / 191098540 / 1000, # reference, landers_9216
#volume_integration = 14.06 * 9216 / 191098540 / 1000, # reference, landers_9216
#time_integration = 36.33 / 400000 / 100, # reference, cubes_9216
#volume_integration = 27.18 / 400000 / 100, # reference, cubes_9216
#boundary_integration = 84.86 / 400000 / 100, # reference, cubes_9216
dynamic_rupture = 109.33 / 22798 / 1000 ), # TODO: missing reference point
stampede = list( basisFunctions = 56,
quantities = 9,
dofs_size = 56*9*8,
network_bandwidth = 0.8,
mic = TRUE,
pci_bandwidth = 6.3,
boundary_integration = 230.68 * 2 / 386518 / 1000 * 56 / 60, # extrapolated from LOH1_2
time_integration = 131.84 * 2 / 386518 / 1000 * 56 / 60, # extrapolated frpm LOH1_2
volume_integration = 88.51 * 2 / 386518 / 1000 * 56 / 60, # extrapolated from LOH1_2
dynamic_rupture = 109.33 / 22798 / 1000 ), # same as SuperMUC, but not relevant here; no impact on TH-2
tianhe = list( basisFunctions = 56,
quantities = 9,
dofs_size = 56*9*8,
#network_bandwidth = 0.367, # fitted via cube-runtimes of ~100.25 seconds
#network_bandwidth = 0.61, # fitted via landers_2048, 3 cards
network_bandwidth = 0.525, #fitted via landers_2048, rank 2474: 4582 mpi-facesm 65.56s comm. time
mic = TRUE,
pci_bandwidth = 3.2,
boundary_integration = 230.68 * 2 / 386518 / 1000, # extrapolated from LOH1_2
time_integration = 131.84 * 2 / 386518 / 1000, # extrapolated from LOH1_2
volume_integration = 88.51 * 2 / 386518 / 1000, # extrapolated from LOH1_2
dynamic_rupture = 26.79 / 2800 / 1000 ) # rank 2931 for 6144 ranks on TH-2
)
)
l_expectedSimulationTime = list()
message( 'lets go' )
for( l_machine in list('supermuc', 'stampede', 'tianhe') ) {
for( l_meshName in l_config$mesh_names ) {
message( 'analyzing: ', l_meshName )
l_expectedSimulationTime[[paste(l_meshName,'_',l_machine, sep='')]] =
computeMeshStatistics( i_meshName = l_meshName,
i_pathToCsvFile = paste(l_config$statistics_directory,'/',l_meshName,'.csv', sep=''),
i_performanceParameters = (l_config$performance_parameters)[[l_machine]],
i_outputFileTable = paste(l_meshName,'_',l_machine,'.csv', sep=''),
i_outputFilePlot = paste(l_meshName,'_',l_machine,'.pdf', sep='') )
}
}
l_expectedSimulationTime$statistics_Landers191M_02_05_02_768_tianhe = 0.581
for( l_machine in list('supermuc', 'stampede', 'tianhe') ) {
l_strongScalingNodes = list(768, 1024, 1536, 2048, 3072, 4096, 6144, 9216, 12288, 24756 )
l_runtime = list()
for( l_nodes in l_strongScalingNodes ) {
l_runtime = c( l_runtime, l_expectedSimulationTime[[paste('statistics_Landers191M_02_05_02_', l_nodes, '_', l_machine, sep='')]] * ( l_nodes / l_strongScalingNodes[[1]]) )
}
l_runtime <- l_runtime[[1]] / unlist(l_runtime)
print(l_machine)
print(unlist(l_strongScalingNodes))
print(l_runtime)
plot( x=l_strongScalingNodes, l_runtime, ylab='parallel efficiency', main=l_machine )
}
|
21288f7a79ce292605b88571b15545a5efdf1869 | 2b1e495d311dbd7fec69a37afcd0e70a6a755f48 | /run_analysis.R | 4e54f8909e93a85357bd8ef6f4c38b2199b6e703 | [] | no_license | desantisll/GCD_Project | 334ccd85f1e40d850ad9f983217debfa9cec3948 | bf547fcc6baf7b60e34aa6703beb6de73824ce67 | refs/heads/master | 2016-09-09T20:21:02.024236 | 2015-07-27T15:57:09 | 2015-07-27T15:57:09 | 39,306,953 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,387 | r | run_analysis.R | #Getting and Cleaning Data project
library(plyr)
library(dplyr)
#load and merge test + train subjectID data containing geach data points Subject ID
subject_test = read.table("UCI HAR Dataset/test/subject_test.txt")
subject_train = read.table("UCI HAR Dataset/train/subject_train.txt")
subjectAll <- rbind(subject_test, subject_train)
names(subjectAll) <- "subjectId" ##name single column
#merge xtest and xtrain data into one
xTest = read.table("UCI HAR Dataset/test/X_test.txt")
xTrain = read.table("UCI HAR Dataset/train/X_train.txt")
xAll <- rbind(xTest, xTrain)
#subset xAll into just specified features (mean and std)
featuresAll <- read.table("UCI HAR Dataset/features.txt", col.names = c("featureId", "featureLabel"))
featuresOnly <- grep("-mean\\(\\)|-std\\(\\)", featuresAll$featureLabel)
#subset xAll (561) by just featuresOnly (66)
xAll <- xAll[, featuresOnly]
##replace featureID column names in xAll with feature Label
names(xAll)<-featuresAll[featuresOnly,2]
##xAll now containes every subjects x/y/z info fall for the 66 specified mean/std features
#merge ytest and ytrain into yfull with each observations activity ID
yTest = read.table("UCI HAR Dataset/test/Y_test.txt")
yTrain = read.table("UCI HAR Dataset/train/Y_train.txt")
yAll <- rbind(yTest, yTrain)
#load activities (walk sit...), name columns and get rid of underscores
activities <- read.table("UCI HAR Dataset/activity_labels.txt", col.names=c("activityId", "activityLabel"))
activities$activityLabel <- gsub("_", "", as.character(activities$activityLabel))
#replace activityID in yAll with corresponding descriptive activity name and rename column
yAll[,1] <- activities[yAll[,1],2]
names(yAll) = "activity"
#merge feature data in xAll with activity data in yAll with subject data in subjectAll
##subjectAll containes subjectID of all 10299 data points
##xAll contains feature value for all 10299 values
##yAll contains activity value for all 10299 values
final <- cbind(subjectAll, yAll, xAll)
##get rid of confusing abbreviations in column names with gsub()
names(final) = gsub('Acc', 'acceleration', names(final))
names(final) = gsub('Mag', 'magnitude', names(final))
##create tidy data set with the average of each feature for each subject and activity.
finalTidy <- ddply(final, c("subjectId", "activity"), numcolwise(mean))
write.table(finalTidy, file = "final_tidy_data", row.name = FALSE)
|
b22574abb2acc134ad1de060a9c7ecb9f8024790 | 28b8ae24faf48299abba720d98473557c7933349 | /Task_08/task08.r | a964039aab7f8a40b8409086a4d95ec61616109f | [] | no_license | elg0012/Tasks | 2326e53122b7463f29117d499ebfd7eedec273fa | ff9a4866990bd128a07ef4ca703bf9230b1513c7 | refs/heads/master | 2023-04-12T10:52:46.409424 | 2021-04-30T13:03:41 | 2021-04-30T13:03:41 | 332,598,800 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,590 | r | task08.r | setwd("~/Desktop/Evolution/Tasks/Task_08")
library(phytools)
tree <- read.tree("https://jonsmitchell.com/data/anolis.tre")
plot(tree, type="fan")
tree$tip.label
#QUESTION 1: There are 82 tips, and branched lengths are present.
data <- read.csv("https://jonsmitchell.com/data/svl.csv", stringsAsFactors=F, row.names=1)
data
data[,1]
#QUESTION 2: "Data" contains species of lizards and their snout-vent length and includes 100 dimensions.
svl <- setNames(data$svl, rownames(data))
svl
Ancestors <- fastAnc(tree, svl, vars=TRUE, CI=TRUE)
Ancestors
?fastAnc
#QUESTION 3: The estimated values are stored in a list (ace). The CI95 element is the confidence interval.
#QUESTION 4: Two assumptions made in the estimation of the ancestral states using fastAnc are 1) that the state calculated for the root node of the tree is also the MLE of the root node, and that variance is the contrast state.
par(mar=c(0.1,0.1,0.1,0.1))
plot(tree, type="fan", lwd=2, show.tip.label=F)
tiplabels(pch=16, cex=0.25*svl[tree$tip.label])
nodelabels(pch=16, cex=0.25*Ancestors$ace)
obj <- contMap(tree, svl, plot=F)
plot(obj, type="fan", legend=0.7*max(nodeHeights(tree)), sig=2, fsize=c(0.7,0.9))
fossilData <- data.frame(svl=log(c(25.4, 23.2, 17.7, 19.7, 24, 31)), tip1=c("Anolis_aliniger", "Anolis_aliniger", "Anolis_occultus", "Anolis_ricordii", "Anolis_cristatellus", "Anolis_occultus"), tip2=c("Anolis_chlorocyanus", "Anolis_coelestinus", "Anolis_hendersoni", "Anolis_cybotes", "Anolis_angusticeps", "Anolis_angusticeps"))
fossilData
#QUESTION 5: Shown below
fossilNodes <- c()
nodeN <- c()
{
for(i in 1:nrow(fossilData))
i <- 1
if(i == 1){
print(Ancestors)
}
}
Node <- fastMRCA(tree, fossilData[i, "tip1"], fossilData[i, "tip2"])
Node
fossilNodes[i] <- fossilData[i, "svl"]
fossilNodes[i]
nodeN[i] <- Node
names(fossilNodes) <- nodeN
Ancestors_withFossils <- fastAnc(tree, svl, anc.states=fossilNodes, CI=TRUE, var=TRUE)
Ancestors_withFossils
Ancestors_withoutFossils <- fastAnc(tree, svl, CI=TRUE, var=TRUE)
Ancestors_withoutFossils
plot(Ancestors_withFossils$ace, Ancestors_withoutFossils$ace, xlab="With Fossils", ylab="Without Fossils")
#QUESTION 7: The data for "With Fossils" increase the estimated ancestral sizes.
#QUESTIONS 8-10: Shown below
install.packages("geiger")
library("geiger")
?fitContinuous
?geiger
fitContinuous(tree, svl, model="BM")
fitContinuous(tree, svl, model="OU")
fitContinuous(tree, svl, model="EB")
#FastAnc uses the function including "BM" to decide on what it is going to assume is correct. In this case, the function including "EB" has the AIC meaning best fit. |
5df42cf43b6381495c879de361514fbfff65662e | d317f7e6a38bd252cfdf69e3846905c24e14f2fc | /R/make_qc_dt.R | 279065045260e3455aa92c8557e63166c66d868f | [
"MIT"
] | permissive | Yixf-Self/SampleQC | 208bb170884c6c9dbc1596a277186abd5a43d548 | 82f4483eafdaac93c17710bf605e147ad4611f0c | refs/heads/master | 2023-07-28T06:09:25.504772 | 2021-08-29T14:05:10 | 2021-08-29T14:05:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,577 | r | make_qc_dt.R | # make_qc_dt.R
# Make nice data.table of QC metrics
#' Checks specified QC metrics and makes data.table for input to
#' \code{calc_pairwise_mmds}.
#'
#' Takes a \code{data.frame} of raw QC metrics, and makes a nice neat
#' \code{data.table} output that can be used in \pkg{SampleQC}. For example,
#' users with a \pkg{SingleCellExperiment} object \code{sce} may first run
#' \code{scater::calculateQCMetrics}, then call \code{make_qc_dt(colData(sce))}.
#' We work with \code{data.frame}/\code{data.table} objects to have the most
#' flexible possible approach (and to save work on e.g. keeping up with changes
#' to dependencies like \pkg{SingleCellExperiment} and \pkg{Seurat}).
#'
#' This code also calculates some sample-level statistics, e.g. median log
#' library size per sample, and adds columns with binned values for these.
#'
#' @param qc_df data.frame object containing calculated QC metrics
#' @param sample_var which column of qc_df has sample labels? (e.g. sample, group,
#' batch, library)
#' @param qc_names list of qc_names that need to be extracted
#' @param annot_vars list of user-specified sample-level annotations
#' @importFrom assertthat assert_that
#' @importFrom data.table setcolorder
#' @return qc_dt, a data.table containing the sample variable plus qc metrics
#' @export
make_qc_dt <- function(qc_df, sample_var = 'sample_id',
qc_names = c('log_counts', 'log_feats', 'logit_mito'), annot_vars = NULL) {
# some checks
if ( 'DFrame' %in% class(qc_df) )
qc_df = as.data.frame(qc_df)
assert_that( is.data.frame(qc_df), msg = "qc_df must be a data.frame" )
assert_that( sample_var %in% colnames(qc_df),
msg = sprintf("%s is listed as variable for samples but is not in data.frame",
sample_var))
reserved_ns = c('sample_id', 'group_id', 'cell_id')
assert_that( length(intersect(annot_vars, reserved_ns)) == 0,
msg = paste0("The following variable names are reserved and cannot be used ",
"as annot_vars:\n", paste(reserved_ns, collapse = ", ")))
assert_that( all(annot_vars %in% names(qc_df)),
msg = sprintf("the following variables are listed in annot_vars but not in qc_df:\n%s",
paste(setdiff(annot_vars, names(qc_df)), collapse = ", ")))
# set up qc_dt
qc_dt = .init_qc_dt(qc_df, sample_var)
# add known metrics
if ('log_counts' %in% qc_names) {
qc_dt = .add_log_counts(qc_dt, qc_df)
}
if ('log_feats' %in% qc_names) {
qc_dt = .add_log_feats(qc_dt, qc_df)
}
if ('logit_mito' %in% qc_names) {
qc_dt = .add_logit_mito(qc_dt, qc_df)
}
if ('log_splice' %in% qc_names) {
qc_dt = .add_log_splice(qc_dt, qc_df)
}
# add unknown metrics
qc_dt = .add_unknown_metrics(qc_dt, qc_df, qc_names)
# add some useful annotations
qc_dt = .add_qc_annots(qc_dt)
# add specified annotation variables
qc_dt = .add_annot_vars(qc_dt, qc_df, annot_vars)
# put in nice order
setcolorder(qc_dt, c('cell_id', 'sample_id', qc_names))
# double-check everything is ok
.check_qc_dt(qc_dt, qc_names, annot_vars)
return(qc_dt)
}
#' Initializes qc_dt object
#'
#' @param qc_df input data
#' @param sample_var which column of df has sample labels? (e.g. sample, group,
#' batch, library)
#' @importFrom assertthat assert_that
#' @keywords internal
.init_qc_dt <- function(qc_df, sample_var) {
# add cell identifiers
if ('cell_id' %in% colnames(qc_df)) {
qc_dt = data.table(cell_id = qc_df$cell_id)
} else if ( !is.null(rownames(qc_df)) ) {
qc_dt = data.table(cell_id = rownames(qc_df))
} else {
stop("input data.frame must have either rownames or 'cell_id' as a column")
}
assert_that( length(unique(qc_dt$cell_id)) == nrow(qc_dt),
msg = "cell identifiers are not unique")
# add sample identifiers
qc_dt[, sample_id := qc_df[[sample_var]] ]
# check no missing values or NAs
assert_that( all(!is.na(qc_dt$cell_id)), msg = "missing values in cell_id")
assert_that( all(!is.na(qc_dt$sample_id)), msg = "missing values in sample_id")
return(qc_dt)
}
#' Add log counts to qc_dt
#'
#' @param qc_dt data.table of QC metrics
#' @param qc_df input data
#' @importFrom data.table ":="
#' @importFrom assertthat assert_that
#' @keywords internal
.add_log_counts <- function(qc_dt, qc_df) {
# what names do we have, and want?
df_names = colnames(qc_df)
valid_ns = c('log_counts', 'total', 'sum', 'nCount_RNA')
# check which are present
here_ns = vapply(valid_ns, function(v) v %in% df_names, logical(1))
assert_that( sum(here_ns) >= 1,
msg = paste0(
"no valid column present for log_counts\n",
paste0("valid columns are: ", paste(valid_ns, collapse = ", "))
))
to_use = valid_ns[here_ns][[1]]
# add values
if (to_use %in% 'log_counts') {
qc_dt[, log_counts := qc_df[[ to_use ]] ]
} else if (to_use %in% c('total', 'sum', 'nCount_RNA')) {
assert_that( all(qc_df[[ to_use ]] > 0) )
qc_dt[, log_counts := log10(qc_df[[ to_use ]]) ]
} else {
stop("log_counts requested but required variables not present")
}
# do some checks
assert_that( "log_counts" %in% names(qc_dt) )
assert_that( !any(is.na(qc_dt$log_counts)),
msg = "some log_counts values are NA")
assert_that( !any(is.infinite(qc_dt$log_counts)),
msg = "some log_counts values are infinite")
assert_that( all(qc_dt$log_counts >= 0),
msg = "some log_counts values are <= 0")
return(qc_dt)
}
#' Add log feats to qc_dt
#'
#' @param qc_dt data.table of QC metrics
#' @param qc_df input data
#' @importFrom data.table ":="
#' @importFrom assertthat assert_that
#' @keywords internal
.add_log_feats <- function(qc_dt, qc_df) {
# what names do we have, and want?
df_names = colnames(qc_df)
valid_ns = c('log_feats', 'detected', 'nFeature_RNA')
# check which are present
here_ns = vapply(valid_ns, function(v) v %in% df_names, logical(1))
assert_that( sum(here_ns) >= 1,
msg = paste0(
"no valid column present for log_feats\n",
paste0("valid columns are: ", paste(valid_ns, collapse = ", "))
))
to_use = valid_ns[here_ns][[1]]
# add values
if (to_use %in% 'log_feats') {
qc_dt[, log_feats := qc_df[[ to_use ]] ]
} else if (to_use %in% c('detected', 'nFeature_RNA')) {
assert_that( all(qc_df[[ to_use ]] > 0) )
qc_dt[, log_feats := log10(qc_df[[ to_use ]]) ]
} else {
stop("log_feats requested but required variables not present")
}
# do some checks
assert_that( "log_feats" %in% names(qc_dt) )
assert_that( !any(is.na(qc_dt$log_feats)),
msg = "some log_feats values are NA")
assert_that( !any(is.infinite(qc_dt$log_feats)),
msg = "some log_feats values are infinite")
assert_that( all(qc_dt$log_feats >= 0),
msg = "some log_feats values are <= 0")
return(qc_dt)
}
#' Add logit-transformed mitochondrial proportions to qc_dt
#'
#' @param qc_dt data.table of QC metrics
#' @param qc_df input data
#' @importFrom data.table ":="
#' @importFrom assertthat assert_that
#' @keywords internal
.add_logit_mito <- function(qc_dt, qc_df) {
# what names do we have, and want?
df_names = colnames(qc_df)
# add logit-transformed mitochondrial proportion to qc_dt
if ('logit_mito' %in% df_names) {
qc_dt[, logit_mito := qc_df$logit_mito ]
} else if ( ('subsets_mito_sum' %in% df_names) & ('total' %in% df_names) ) {
qc_dt[, logit_mito := qlogis( (qc_df$subsets_mito_sum + 1) / (qc_df$total + 2) ) ]
} else if ( ('subsets_mt_sum' %in% df_names) & ('total' %in% df_names) ) {
qc_dt[, logit_mito := qlogis( (qc_df$subsets_mt_sum + 1) / (qc_df$total + 2) ) ]
} else if ( ('percent.mt' %in% df_names) & ('nCount_RNA' %in% df_names) ) {
total_counts = qc_df$nCount_RNA
mt_counts = qc_df$nCount_RNA * qc_df$percent.mt / 100
assert_that( all(abs(mt_counts - round(mt_counts, 0)) < 1e-10) )
qc_dt[, logit_mito := qlogis( (mt_counts + 1) / (total_counts + 2) ) ]
} else if ( ('mito_prop' %in% df_names) & ('log_counts' %in% df_names) ) {
total_counts = 10^qc_df$log_counts
mt_counts = qc_df$mito_prop * total_counts
assert_that( all(abs(mt_counts - round(mt_counts, 0)) < 1e-8) )
qc_dt[, logit_mito := qlogis( (mt_counts + 1) / (total_counts + 2) ) ]
} else {
stop("logit_mito requested but required variables not present")
}
# do some checks
assert_that( "logit_mito" %in% names(qc_dt) )
assert_that( !any(is.na(qc_dt$logit_mito)),
msg = "some logit_mito values are NA")
assert_that( !any(is.infinite(qc_dt$logit_mito)),
msg = "some logit_mito values are infinite")
return(qc_dt)
}
#' Add log splice ratio to qc_dt
#'
#' @param qc_dt data.table of QC metrics
#' @param qc_df input data
#' @importFrom data.table ":="
#' @importFrom assertthat assert_that
#' @keywords internal
.add_log_splice <- function(qc_dt, qc_df) {
# what names do we have, and want?
df_names = colnames(qc_df)
# add logit-transformed mitochondrial proportion to qc_dt
if ('log_splice' %in% df_names) {
qc_dt[, log_splice := qc_df$log_splice ]
} else if ( ('total_spliced' %in% df_names) & ('total_unspliced' %in% df_names) ) {
qc_dt[, log_splice := qlogis( (qc_df$total_spliced + 1) / (qc_df$total_unspliced + 1) ) ]
} else {
stop("logit_mito requested but required variables not present")
}
# do some checks
assert_that( "log_splice" %in% names(qc_dt) )
assert_that( !any(is.na(qc_dt$logit_mito)),
msg = "some logit_mito values are NA")
assert_that( !any(is.infinite(qc_dt$logit_mito)),
msg = "some logit_mito values are infinite")
return(qc_dt)
}
#' Shows the list of QC metrics that for which \pkg{SampleQC} currently has
#' specific functionality. \pkg{SampleQC} will happily use metrics that aren't
#' in this list, however for those in this list it can plot a couple of extra
#' things.
#'
#' @return character vector of QC metrics that SampleQC knows about
#' @export
list_known_metrics <- function() {
return(c('log_counts', 'log_feats', 'logit_mito', 'log_splice'))
}
#' Adds metrics that SampleQC doesn't have specific functions for
#'
#' @param qc_dt data.table of QC metrics
#' @param qc_df data.frame object containing calculated QC metrics
#' @param qc_names list of qc_names that need to be extracted
#' @importFrom assertthat assert_that
#' @keywords internal
.add_unknown_metrics <- function(qc_dt, qc_df, qc_names) {
# anything to add?
to_add = setdiff(qc_names, list_known_metrics())
if ( length(to_add) == 0 )
return(qc_dt)
# add them
message("adding the following metrics that are not known to `SampleQC`:")
message(paste(to_add, collapse = ", "))
for (v in to_add) {
assert_that( v %in% names(qc_df), msg = paste0(v, " missing from qc_df"))
qc_dt$v = qc_df$v
assert_that( !any(is.na(qc_dt$v)), msg = paste0("NA values for ", v))
assert_that( !any(is.infinite(qc_dt$v)), msg = paste0("infinite values for ", v))
}
return(qc_dt)
}
#' Adds sample-level annotations for each known QC metric
#'
#' @param qc_dt data.table
#' @importFrom data.table ":="
#' @return qc_dt, a data.table containing the sample variable plus qc metrics
#' @keywords internal
.add_qc_annots <- function(qc_dt) {
# add annotations for sample size
qc_dt[, log_N := log10(.N), by='sample_id']
# and factor version
N_cuts = c(1,100,200,400,1000,2000,4000,10000,20000,40000,Inf)
N_labs = paste0('<=', N_cuts[-1])
qc_dt[, N_cat := factor(
cut(10^log_N, breaks = N_cuts, labels = N_labs),
levels = N_labs), by = 'sample_id']
# add annotations relating to library sizes
if ('log_counts' %in% names(qc_dt) ) {
# add median log counts per sample
qc_dt[, med_counts := median(log_counts), by='sample_id']
# put mito level into categories
counts_cuts = c(1,100,300,1000,3000,10000,30000, Inf)
counts_labs = paste0('<=', counts_cuts[-1])
qc_dt[, counts_cat := factor(
cut(10^med_counts, breaks = counts_cuts, labels = counts_labs),
levels = counts_labs), by = 'sample_id']
}
# add annotations relating to features
if ('log_feats' %in% names(qc_dt) ) {
# add median log feats per sample
qc_dt[, med_feats := median(log_feats), by='sample_id']
# put mito level into categories
feats_cuts = c(1,100,300,1000,3000,10000,30000, Inf)
feats_labs = paste0('<=', feats_cuts[-1])
qc_dt[, feats_cat := factor(
cut(10^med_feats, breaks = feats_cuts, labels = feats_labs),
levels = feats_labs), by = 'sample_id']
}
# add annotations relating to mitochondrial proportions
if ('logit_mito' %in% names(qc_dt) ) {
# add median mito proportion
qc_dt[, med_mito := median(plogis(logit_mito)), by='sample_id']
# put mito level into categories
mito_cuts = c(0,0.01,0.05,0.1,0.2,0.5,1)
mito_labs = paste0('<=', mito_cuts[-1])
qc_dt[, mito_cat := factor(
cut(med_mito, breaks = mito_cuts, labels = mito_labs),
levels = mito_labs), by = 'sample_id']
}
# add annotations relating to mitochondrial proportions
if ('log_splice' %in% names(qc_dt) ) {
# add median mito proportion
qc_dt[, med_splice := median(plogis(log_splice)), by='sample_id']
# put mito level into categories
splice_cuts = c(0, 0.01, 0.05, 0.1, 0.2, 0.5, 1)
splice_labs = paste0('<=', splice_cuts[-1])
qc_dt[, splice_cat := factor(
cut(med_splice, breaks = splice_cuts, labels = splice_labs),
levels = splice_labs), by = 'sample_id']
}
return(qc_dt)
}
#' Adds user-specified annotation variables
#'
#' @param qc_dt data.table
#' @param qc_df data.frame object containing calculated QC metrics
#' @importFrom magrittr "%>%"
#' @importFrom data.table ":=" ".N"
#' @return qc_dt, a data.table containing the sample variable plus qc metrics
#' @keywords internal
.add_annot_vars <- function(qc_dt, qc_df, annot_vars) {
# check all present
assert_that( all(annot_vars %in% names(qc_df)) )
# add them
for (v in annot_vars)
qc_dt[[v]] = qc_df[[v]]
# check that they all sample level
for (v in annot_vars) {
check_dt = qc_dt[, c('sample_id', v), with = FALSE] %>%
.[, .N, by = c('sample_id', v) ]
assert_that( nrow(check_dt) == length(unique(check_dt$sample_id)),
msg = paste0("annotation variable ", v, " has more than one value per\n",
"sample (should be sample-level only)"))
}
return(qc_dt)
}
#' Checks that output is ok
#'
#' @param qc_dt data.table
#' @param qc_names list of qc_names that need to be extracted
#' @param annot_vars list of annotation variables
#' @importFrom assertthat assert_that
#' @keywords internal
.check_qc_dt <- function(qc_dt, qc_names, annot_vars) {
# unpack
col_names = colnames(qc_dt)
# check specific names
if ('log_counts' %in% col_names)
assert_that( all(qc_dt$log_counts >= 0) )
if ('log_feats' %in% col_names)
assert_that( all(qc_dt$log_feats >= 0) )
if ('logit_mito' %in% col_names)
assert_that( all(is.finite(qc_dt$logit_mito)) )
if ('log_splice' %in% col_names)
assert_that( all(is.finite(qc_dt$log_splice)) )
# check qc metrics and annotations for NAs
for (n in qc_names) {
assert_that( all(!is.na(qc_dt[[n]])) )
}
annots_auto = c(
"med_counts", "counts_cat",
"med_feats", "feats_cat",
"med_mito", "mito_cat",
"med_splice", "splice_cat",
"log_N", "N_cat")
for (n in c(annots_auto, annot_vars)) {
if ( n %in% names(qc_dt) )
assert_that( all(!is.na(qc_dt[[n]])),
msg = paste0('NA present in an annotation variable, ', n) )
}
}
|
ef9d20402eeaa4e646d2eec79c7364461f9a57f8 | e5b96e28f051beb07e94d9d9ff27c91f464a1fe4 | /R/20-upwards.R | 47921c3cb915eca8f5491eb383ad49bbfc0dcd44 | [] | no_license | vicky-rojas/30DayChartChallenge2021 | f6f33337a95da7c1cc866b335a4615b44d2cbbf9 | f6eec8ccd13e79be8feb74f01b7a06e38682cb51 | refs/heads/main | 2023-04-22T19:23:27.773394 | 2021-04-30T19:52:08 | 2021-04-30T19:52:08 | 353,714,097 | 0 | 0 | null | 2021-04-01T13:44:19 | 2021-04-01T13:44:18 | null | UTF-8 | R | false | false | 4,704 | r | 20-upwards.R | library(tidyverse)
library(lubridate)
library(gganimate)
# Temperatura promedio anual
df_temperatura <- read.delim("input/cr2_tasDaily_2018/cr2_tasDaily_2018.txt",
sep = ",",
header = T,
na.strings = c("NA","-9999")) %>%
select(1, contains("330020"))
df_temperatura_f <- df_temperatura[15:43181,]
df_temperatura_f <- df_temperatura_f %>%
rename(date = 1,
temperatura = 2) %>%
mutate(date = parse_date_time(date, orders = "ymd"),
date = format(date, "%Y"),
temperatura = as.numeric(temperatura)) %>%
group_by(date) %>%
summarise(temperatura = mean(temperatura, na.rm = T)) %>%
filter(temperatura != 'NaN') %>%
filter(date > 1970 & date < 2018)
str(df_temperatura_f)
temperatura <- df_temperatura_f %>%
mutate(date = as.numeric(date)) %>%
ggplot(aes(x = date, y = temperatura)) +
geom_point(aes(group = seq_along(date)), color = "#22b455") +
geom_line(aes(group = 1), color = "#22b455") +
geom_text(aes(label = scales::comma(temperatura, accuracy = 0.1)), color = "#92e5a1") +
scale_y_continuous(limits = c(0,20)) +
scale_x_continuous(breaks = seq(1970,2020,5)) +
transition_reveal(date) +
labs(title = "Día 20: Upwards (gganimate) #30DayChartChallenge",
subtitle = "Temperatura promedio anual (1970-2017)",
x = "Año",
y = "Temperatura (promedio anual)",
caption = "Fuente: elaboración propia con datos compilados por el equipo de Datos y Cómputos del (CR)2.\n
Estación Quinta Normal, código 330020\n
@desuc_estudios") +
theme_minimal() +
theme(axis.text = element_text(colour = "#22b455", family = "Courier New"),
axis.title = element_text(colour = "#22b455", family = "Courier New"),
panel.background = element_rect(fill = "#020204", color = NA),
plot.background = element_rect(fill = "#020204", color = NA),
plot.title = element_text(colour = "#22b455", family = "Courier New", face = "bold"),
plot.caption = element_text(colour = "#22b455", family = "Courier New"),
plot.subtitle = element_text(colour = "#22b455", family = "Courier New"),
panel.grid.major = element_line(colour = "#204829"),
panel.grid.minor = element_line(colour = "#204829"))
animate(temperatura, height = 500, width = 800)
anim_save("output/20-upwards.gif")
# Lluvia promedio anual ----
df_lluvia <- read.delim("input/cr2_prAmon_2019/cr2_prAmon_2019.txt",
sep = ",",
header = T,
na.strings = c("NA","-9999")) %>%
select(1, contains("330020"))
df_lluvia_f <- df_lluvia[15:1454,]
df_lluvia_f <- df_lluvia_f %>%
rename(date = 1,
lluvia = 2) %>%
mutate(date = parse_date_time(date, orders = "ym"),
date = format(date, "%Y"),
lluvia = as.numeric(lluvia)) %>%
group_by(date) %>%
summarise(lluvia = mean(lluvia, na.rm = T)) %>%
filter(lluvia != 'NaN') %>%
filter(date > 1970 & date < 2018)
lluvia <- df_lluvia_f %>%
mutate(date = as.numeric(date)) %>%
ggplot(aes(x = date, y = lluvia)) +
geom_point(aes(group = seq_along(date)), color = "#22b455") +
geom_line(aes(group = 1), color = "#22b455") +
geom_text(aes(label = scales::comma(lluvia, accuracy = 0.1)), color = "#92e5a1") +
scale_y_continuous(limits = c(0,60)) +
scale_x_continuous(breaks = seq(1970,2020,5)) +
labs(title = "Día 21: Downwards (gganimate) #30DayChartChallenge",
subtitle = "Precipicación acumulada promedio anual (1970-2017)",
x = "Año",
y = "Datos observados de precipitación\nacumulada mensual (promedio anual)",
caption = "Fuente: elaboración propia con datos compilados por el equipo de Datos y Cómputos del (CR)2.\n
Estación Quinta Normal, código 330020\n
@desuc_estudios") +
transition_reveal(date) +
theme_minimal() +
theme(axis.text = element_text(colour = "#22b455", family = "Courier New"),
axis.title = element_text(colour = "#22b455", family = "Courier New"),
panel.background = element_rect(fill = "#020204", color = NA),
plot.background = element_rect(fill = "#020204", color = NA),
plot.title = element_text(colour = "#22b455", family = "Courier New", face = "bold"),
plot.caption = element_text(colour = "#22b455", family = "Courier New"),
plot.subtitle = element_text(colour = "#22b455", family = "Courier New"),
panel.grid.major = element_line(colour = "#204829"),
panel.grid.minor = element_line(colour = "#204829"))
animate(lluvia, height = 500, width = 800)
anim_save("output/21-downwards.gif")
|
60aba6bb25f57d7bad76ccaafcc18892923643d0 | a8903875bd24b53c0e4d2e771c054f2b144960c4 | /R/makeDT_lab.R | 4c1f29a2f007ee26c106873ce51c4aa45b799944 | [] | no_license | chang70480/YCfun | ceef82a02e55ad16a0c0a50e7e9aeedb7b2de254 | a055dc63946e2d9f012b67bf497c73d6b9b69065 | refs/heads/master | 2021-03-01T20:34:18.422508 | 2020-11-30T07:50:52 | 2020-11-30T07:50:52 | 245,812,658 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,343 | r | makeDT_lab.R | #' \code{makeDT_lab} read spss and dta
#'
#' This function is based on \code{foreign} and \code{readstata13} this two package, but use this function can operater data easier after some recode dealing.
#'
#' There are three attribute providing for user to konw variable information in each variables. One is \code{variable.labels}, which provides explanation of variable, the other is \code{value.labels},which provides value label for user to contrast value to it's meaning, and the other is \code{table} which summarises variable distribution.
#'
#' @param path file path
#' @param type default="sav". Read dta(stata) or sav(spss)
#' @param label_length default=200. how long \code{variable.labels} you want
#' @param reencode default="Utf-8". some data need to change into "Big5"
#'
#' @return data_frame
#' @export
makeDT_lab <- function(path="",type=c("sav","dta"),label_length=20,reencode=NULL){
ll <- function(vec){
if(length(vec)>label_length){
return(vec[1:label_length])
}else{
return(vec)
}
}
if(type[1]=="sav"){
library(foreign)
if(is.null(reencode)){
DT <- read.spss(file = path,use.value.labels = F,to.data.frame = T)
}else{
DT <- read.spss(file = path,use.value.labels = F,to.data.frame = T,reencode = reencode)
}
temp.lab<- attr(DT,"variable.labels")
name <- names(DT)
f_v <- names(DT)[sapply(DT, class)=="factor"]
for (i in f_v) {
DT[[i]] <- as.character(DT[[i]])
DT[[i]] <- gsub(" ","",DT[[i]])
}
for(i in 1:length(name)){
attr(DT[,name[i]],"variable.labels") <- as.character(temp.lab[i])
attr(DT[,name[i]],"table") <- table(DT[,name[i]],useNA = "always")
}
return(DT %>% as_tibble())
}else if(type[1]=="dta"){
library(readstata13)
if(is.null(reencode)){
DT_ <- read.dta13(path,convert.factors = F)
DT_n <- read.dta13(path,convert.factors = F)
}else{
DT_ <- read.dta13(path,convert.factors = F,encoding = reencode)
DT_n <- read.dta13(path,convert.factors = F,encoding = reencode)
}
attr(DT_n,"variable.labels") <- varlabel(DT_n)
name_ <- get.label.name(DT_)
for (l in 1:length(DT_)) {
attr(DT_n[[l]],"value.labels") <- ll(get.label(DT_,name_[l]))
attr(DT_n[[l]],"variable.labels") <- as.character(varlabel(DT_)[l])
attr(DT_n[[l]],"table") <- ll(table(DT_n[[l]],useNA = "always"))
}
return(DT_n %>% as_tibble())
}
}
|
40bf64e5bf3eaa730c6a3c485541637168ce3b11 | f997b825b89a191ef89709870065d375dd84358d | /man/set_stamp.Rd | 264e1434bc49dc172220f7d1e1e715482d77b5b8 | [
"MIT"
] | permissive | teunbrand/datastamp | 1557b85d423c7a892696a4900c3e239870d1bf72 | ebd6c3bc3a3bd9efe08bc615cc7d9e38ea252ebd | refs/heads/master | 2022-12-28T19:44:54.396093 | 2020-10-19T08:49:09 | 2020-10-19T08:49:09 | 304,337,082 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,438 | rd | set_stamp.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stamp.R
\name{set_stamp}
\alias{set_stamp}
\alias{set_stamp.default}
\alias{set_stamp.datastamp}
\alias{set_stamp.Vector}
\alias{get_stamp.default}
\alias{get_stamp.Vector}
\alias{get_stamp.datastamp}
\title{Stamp setter}
\usage{
set_stamp(object, stamp)
\method{set_stamp}{default}(object, stamp)
\method{set_stamp}{datastamp}(object, stamp)
\method{set_stamp}{Vector}(object, stamp)
\method{get_stamp}{default}(object)
\method{get_stamp}{Vector}(object)
\method{get_stamp}{datastamp}(object)
}
\arguments{
\item{object}{An R object.}
\item{stamp}{A \code{datastamp} object.}
}
\value{
The \code{object} with a datastamp attached.
}
\description{
Attaches a datastamp to an object.
}
\details{
For Bioconductor objects in the S4Vectors framework, the datastamp
is stored in the \code{metadata} slot instead of the attributes.
}
\section{Methods (by class)}{
\itemize{
\item \code{default}: Attaches \code{stamp} to \code{datastamp} attribute.
\item \code{datastamp}: Raises an error.
\item \code{Vector}: Attaches \code{stamp} to list in \code{metadata} slot.
\item \code{default}: Retrieves the \code{"datastamp"} attribute.
\item \code{Vector}: Retrieves the list-item named \code{"datastamp"} form the
\code{metadata} slot.
\item \code{datastamp}: Returns \code{object} itself.
}}
\examples{
x <- 1:5
y <- make_stamp()
x <- set_stamp(x, y)
}
|
ec4645a2344114bbbd7cfb182ab4669fb682bbc3 | 112f803e7d96e533166d05be62286add3dabaaad | /CMEEMainProject/Code/QuickSOMBoxplots.R | 8e4f1bba1a605e9b7b00ee092b8008d35b504ad2 | [] | no_license | PetraGuy/CMEECourseWork | 51a9b559e0463585827c07ef7a9a776d21f48488 | 0f14009a1e1b41f916bd76df7000a526dd0d6204 | refs/heads/master | 2018-10-21T23:07:18.916906 | 2018-10-02T07:31:28 | 2018-10-02T07:31:28 | 105,747,959 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 10,036 | r | QuickSOMBoxplots.R |
library(dplyr)
require(ggplot2)
require(reshape)
library(gridExtra)
rm(list = ls())
cat("\014")
site_data = read.csv("../Data/CompleteSiteLevelVars.csv")
AllPlotsvars = read.csv("../Data/AllPlotsVarsRichness.csv")
W4 = AllPlotsvars%>%filter(ShortNVC == "W4")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W8 = AllPlotsvars%>%filter(ShortNVC == "W8")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W10 = AllPlotsvars%>%filter(ShortNVC == "W10")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W16 = AllPlotsvars%>%filter(ShortNVC == "W16")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W21 = AllPlotsvars%>%filter(ShortNVC == "W21")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
M15 = AllPlotsvars%>%filter(ShortNVC == "M15")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
M16 = AllPlotsvars%>%filter(ShortNVC == "M16")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
OV27 = AllPlotsvars%>%filter(ShortNVC == "OV27")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W6 = AllPlotsvars%>%filter(ShortNVC == "W6")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W12 = AllPlotsvars%>%filter(ShortNVC == "W12")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W13 = AllPlotsvars%>%filter(ShortNVC == "W13")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W14 = AllPlotsvars%>%filter(ShortNVC == "W14")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W15 = AllPlotsvars%>%filter(ShortNVC == "W15")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
W16 = AllPlotsvars%>%filter(ShortNVC == "W16")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
MG7 = AllPlotsvars%>%filter(ShortNVC == "MG7A")%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
df = as.data.frame(rbind(W4,W8,W10,W16,W21,M15,M16))
df$SOM_div_10 = df$SOMYr2/10
df$pH_div_10 = df$pHYr2
df$PlotRichness = df$plot_richness/10
df_mod = df[-c(2,3,4)]
melted = melt(df_mod)
ggplot(data=melted, aes(y = value, x = ShortNVC,colour = variable))+
geom_boxplot(varwidth = FALSE, outlier.colour = NULL)+
scale_y_continuous(breaks = seq(0,10, by = 1))+
geom_vline(xintercept = c(1.5,2.5,3.5,4.5,5.5,6.5))
ggplot(AllPlotsvars, aes(x = SOMYr2, y = plot_richness))+geom_point()
ggplot(AllPlotsvars, aes(x=mean_dbh, y=plot_richness), varwidth = TRUE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(mean_dbh, 5)), na.rm = TRUE)
ggplot(AllPlotsvars, aes(x=LiveBasalAreaYr2, y=plot_richness), varwidth = TRUE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(LiveBasalAreaYr2, 1)), na.rm = TRUE)
ggplot(AllPlotsvars, aes(x=SOMYr2, y=plot_richness), varwidth = TRUE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 5)), na.rm = TRUE)
ggplot(AllPlotsvars, aes(x=pHYr2, y=plot_richness), varwidth = TRUE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(pHYr2, 1)), na.rm = TRUE)
data = AllPlotsvars%>%select(ShortNVC,SOMYr2,pHYr2,plot_richness)
codefreq = as.data.frame(table(AllPlotsvars$ShortNVC))
codefreq = codefreq[order(codefreq$Freq),]
bigNVC = codefreq%>%filter(Freq>10)
bigNVC = bigNVC[-3,]
codes = bigNVC$ShortNVC
data$ShortNVC = as.character(data$ShortNVC)
bigNVC$ShortNVC = as.character(bigNVC$ShortNVC)
colnames(bigNVC) = c("ShortNVC","Freq")
data = data[order(data$ShortNVC),]
AllNVC = unique(data$ShortNVC)
databigNVC = data%>%filter(ShortNVC %in% codes)
databigNVC$SOMYr2 = databigNVC$SOMYr2/10
databigNVC$plot_richness = databigNVC$plot_richness/10
melted = melt(databigNVC)
ggplot(data=melted, aes(y = value, x = ShortNVC,colour = variable))+
geom_boxplot(varwidth = FALSE, outlier.colour = NULL, na.rm = TRUE)
gw4= ggplot(W4, aes(x=SOMYr2, y=plot_richness)) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), varwidth = FALSE, na.rm = TRUE)+
annotate("label", x = 80, y = 80, label = "W4")+
geom_point(alpha = 0.2, colour = "red")+
scale_y_continuous(limits = c(0,100) )
gw6 = ggplot(W6, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W6")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gw8 = ggplot(W8, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W8")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gw10 = ggplot(W10, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W10")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
w10lm = lm(plot_richness~SOMYr2, w10)
gw12 = ggplot(W12, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W12")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gw13 = ggplot(W13, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W13")+
geom_point(alpha = 0.2, colour = "red")+
scale_y_continuous(limits = c(0,100) )
gw14 = ggplot(W14, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W14")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gw15 = ggplot(W8, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W15")+
geom_point(alpha = 0.2, colour = "red")+
scale_y_continuous(limits = c(0,100) )
gw16 = ggplot(W8, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W16")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gw21 = ggplot(W8, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "W21")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gov27 = ggplot(W8, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "OV27")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
gmg7 = ggplot(MG7, aes(x=SOMYr2, y=plot_richness), varwidth = FALSE) +
geom_boxplot(fill="skyblue", aes(group = cut_width(SOMYr2, 10)), na.rm = TRUE)+
annotate("text", x = 80, y = 40, label = "MG7")+
geom_point(alpha = 0.2, colour = "red")+
geom_smooth(method = "lm")
scale_y_continuous(limits = c(0,100) )
grid.arrange(gw6,gw8,gw10,gw16,gw21,gov27,ncol = 2)
w10all = AllPlotsvars%>%filter(ShortNVC == "W10")
W8all = AllPlotsvars%>%filter(ShortNVC == "W8")
w16all = AllPlotsvars%>%filter(ShortNVC=="W16")
groundcover = read.csv("../data/GroundCover.csv")
w10high1= groundcover%>%filter(SITE==42)%>%filter(PLOT==16)
w10high2 = groundcover%>%filter(SITE==77)%>%filter(PLOT==13)
w10low1 = groundcover%>%filter(SITE==98)%>%filter(PLOT==5)
w10low2 = groundcover%>%filter(SITE==91)%>%filter(PLOT==12)
w8high1 = groundcover%>%filter(SITE==60)%>%filter(PLOT==4)
w8high2= groundcover%>%filter(SITE==55)%>%filter(PLOT==10)
w8low1= groundcover%>%filter(SITE==4)%>%filter(PLOT==8)
w8low2 =groundcover%>%filter(SITE==71)%>%filter(PLOT==9)
Ellenbergs = read.csv("../Data/Ellenbergs.csv")
colnames(Ellenbergs) = c("Amalgams","Taxon.name","L" , "F" , "R" , "N" , "S" )
Ellenbergs$Amalgams = gsub(" ", "", Ellenbergs$Amalgams, fixed = TRUE)
Ellenbergs$Amalgams = as.numeric(Ellenbergs$Amalgams)
vegcodes = read.csv("../Data/vegetation_codes.csv")
colnames(vegcodes) = c("Species","Amalgams")
w10lowveg1 = inner_join(vegcodes,w10low1)
w10lowveg1ellen = inner_join(w10lowveg1,Ellenbergs)
w10lowveg2 = inner_join(vegcodes,w10low2)
w10lowveg2ellen = inner_join(w10lowveg2,Ellenbergs)
w10highveg1 = inner_join(vegcodes,w10high1)
w10highveg1ellen = inner_join(w10highveg1,Ellenbergs)
w10highveg2 = inner_join(vegcodes,w10high2)
w10high2ellen = inner_join(w10highveg2,Ellenbergs)
diffW10 = setdiff(w10highveg1ellen,w10lowveg1ellen)
diffW10low = setdiff(w10lowveg1ellen$Amalgams,w10highveg1ellen$Amalgams)
w10lownothigh = vegcodes%>%filter(Amalgams %in% diffW10low)
diffW10high = setdiff(w10highveg1ellen$Amalgams,w10lowveg1ellen$Amalgams)
w10highnotlow = vegcodes%>%filter(Amalgams %in% diffW10high)
#################
w8lowveg1 = inner_join(vegcodes, w8low1)
w8lowveg1ellen = inner_join(w8lowveg1,Ellenbergs)
w8lowveg2 = inner_join(vegcodes,w8low2)
w8lowveg2ellen = inner_join(w8lowveg2, Ellenbergs)
w8highveg1 = inner_join(vegcodes, w8high1)
w8highveg1ellen = inner_join(w8highveg1, Ellenbergs)
w8highveg2 = inner_join(vegcodes, w8high2)
w8highveg2ellen = inner_join(w8highveg2, Ellenbergs)
get_ave_ellens = function(site,plot){
ellens = vector()
plot = groundcover%>%filter(SITE==site)%>%filter(PLOT==plot)
plotveg = inner_join(vegcodes,plot)
plotvegellen = inner_join(plotveg,Ellenbergs)
ave_N = mean(plotvegellen$N)
ave_R = mean(plotvegellen$R)
ellens[1] = ave_N
ellens[2] = ave_R
return(ellens)
}
w10low1 = get_ave_ellens(98,5)
w10low2 = get_ave_ellens(91,12)
w10high1 = get_ave_ellens(42,16)
w10high2 = get_ave_ellens(77,13)
w8low1 = get_ave_ellens(4,8)
w8low2 = get_ave_ellens(71,9)
w8high1 = get_ave_ellens(60,4)
w8high2 = get_ave_ellens(55,10)
|
842f1bc65ff495db86042fee28717daefef6688c | 7917fc0a7108a994bf39359385fb5728d189c182 | /cran/paws.security.identity/man/securityhub_accept_invitation.Rd | c9eb9aa6a9ecddf59f161bf14bd3b3dca32ee9db | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | true | 1,146 | rd | securityhub_accept_invitation.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/securityhub_operations.R
\name{securityhub_accept_invitation}
\alias{securityhub_accept_invitation}
\title{Accepts the invitation to be a member account and be monitored by the
Security Hub master account that the invitation was sent from}
\usage{
securityhub_accept_invitation(MasterId, InvitationId)
}
\arguments{
\item{MasterId}{[required] The account ID of the Security Hub master account that sent the
invitation.}
\item{InvitationId}{[required] The ID of the invitation sent from the Security Hub master account.}
}
\value{
An empty list.
}
\description{
Accepts the invitation to be a member account and be monitored by the
Security Hub master account that the invitation was sent from.
This operation is only used by member accounts that are not added
through Organizations.
When the member account accepts the invitation, permission is granted to
the master account to view findings generated in the member account.
}
\section{Request syntax}{
\preformatted{svc$accept_invitation(
MasterId = "string",
InvitationId = "string"
)
}
}
\keyword{internal}
|
9abba53dc0ed7406fe51325dc2b3aae9ffca444b | 37a875fb2142480f3bf8da9d3ab5457d006bcd26 | /ifelseBasics.R | 03f48664b9ca37745ae970fd0376c5b6b3531be9 | [] | no_license | Goddoye/R-Coding | 62bdecf611a789e768a9b14b0800c1ee3346b25d | 6c857347687ad547b4b46bdebb20899f36b74dd5 | refs/heads/master | 2022-05-20T11:50:35.352989 | 2020-04-29T12:27:06 | 2020-04-29T12:27:06 | 254,621,629 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 289 | r | ifelseBasics.R | #If else, and else if statements
x = 15
if (x==10){
print ('X is equal to 10')
} else if (x==12){
print('X is equal to 12')
} else {
print("X is not equal to 10 or 12")
}
hot <- FALSE
temp <- 60
if (temp >80){
print("It is hot")
}else{
print("It is not hot")
} |
818ffb63c4225aabf53c2ca5a263cdb6d7de0caa | fd56b6a77bbb080ac7d1e3109446b635ae8eed69 | /man/extractPIDs.Rd | fea3d81cb58b3db298d4917a917cf8ad4d93e9f2 | [] | no_license | philliplab/MotifBinner2 | 325c010b18d662be1abf700e6028eaf138705ad5 | 734b24c2f9d009cd6c8d3ea4a8f8085ac9d4a7dd | refs/heads/master | 2021-07-01T12:36:35.369658 | 2020-09-07T15:11:15 | 2020-09-07T15:11:15 | 149,005,542 | 0 | 0 | null | 2018-09-16T14:51:06 | 2018-09-16T14:51:05 | null | UTF-8 | R | false | true | 493 | rd | extractPIDs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extractPIDs.R
\name{extractPIDs}
\alias{extractPIDs}
\title{Extracts the PIDs after affixes were trimmed and add then to the sequence names}
\usage{
extractPIDs(all_results, config)
}
\arguments{
\item{all_results}{A list of all results given the class 'all_results'.}
\item{config}{A list of configuration options}
}
\description{
Extracts the PIDs after affixes were trimmed and add then to the sequence names
}
|
cfdcead77d63edf01d4b8c15cbf798803937c658 | 3d329328120305f23142ac17ea0b29cec007fae8 | /R/data.R | d69957b11e4831341b955a0f89142068c3c46aae | [] | no_license | yufree/rmwf | 61031a8e3e58328022f8a645799df55c985ed7d7 | 4306be624f36c2d7ee1e4295ec899a320866257c | refs/heads/master | 2023-04-09T19:18:37.829131 | 2023-03-27T22:40:42 | 2023-03-27T22:40:42 | 161,683,036 | 6 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,536 | r | data.R | #' Demo mzrt list object
#' @docType data
#' @format mzrt list object
"mzrt"
#' Demo mzrt list object for negative mode
#' @docType data
#' @format mzrt list object
"mzrtn"
#' Demo ipo parameters object
#' @docType data
#' @format ipo parameters
"para"
#' Demo ipo parameters object for negative mode
#' @docType data
#' @format ipo parameters
"paran"
#' Demo xcmseic object
#' @docType data
#' @format xcmseic object
"srmeic"
#' Demo xcmseic object for negative mode
#' @docType data
#' @format xcmseic object
"srmneic"
#' Demo xcmsset object
#' @docType data
#' @format xcmsset object
"srmxset"
#' Demo xcmsset object for negative mode
#' @docType data
#' @format xcmsset object
"srmnxset"
#' A list containing HMDB qqq MS/MS data with peaks larger than 10 percentage for PMD annotation
#' @docType data
#' @format A list containing HMDB qqq MS/MS data with peaks larger than 10 percentage for PMD annotation
#' \describe{
#' \item{name}{HMDB ID}
#' \item{mz}{mass to charge ratio}
#' \item{msms}{msms pmd}
#' \item{msmsraw}{raw msms data}
#' }
"qqq"
#' A list containing HMDB qtof MS/MS data with peaks larger than 10 percentage for PMD annotation
#' @docType data
#' @format A list containing HMDB qtof MS/MS data with peaks larger than 10 percentage for PMD annotation
#' \describe{
#' \item{name}{HMDB ID}
#' \item{mz}{mass to charge ratio}
#' \item{msms}{msms pmd}
#' \item{msmsraw}{raw msms data}
#' }
"qtof"
#' A list containing HMDB orbitrap MS/MS data with peaks larger than 10 percentage for PMD annotation
#' @docType data
#' @format A list containing HMDB orbitrap MS/MS data with peaks larger than 10 percentage for PMD annotation
#' \describe{
#' \item{name}{HMDB ID}
#' \item{mz}{mass to charge ratio}
#' \item{msms}{msms pmd}
#' \item{msmsraw}{raw msms data}
#' }
"orb"
#' A data frame with compounds from hmdb and refmet
#' @docType data
#' @format A data frame with compounds from hmdb and refmet including name, InChIKey, chemical_formula, data source and exact mass
#' \describe{
#' \item{name}{compounds name}
#' \item{InChIKey}{InChIKey}
#' \item{chemical_formula}{chemical formula}
#' \item{db}{database sources}
#' \item{mass}{exact mass}
#' }
"hr"
#' A list containing HMDB GC-EI-MS spectra database
#' @docType data
#' @format A list with compounds from hmdb for GC-MS simulation
"hmdbcms"
#' A list containing MoNA LC-MS spectra database
#' @docType data
#' @format A list with compounds from MoNA LC-MS spectra database for GC-MS simulation
"monams1"
|
52b5e38cc045b06616561c7480b45bb158704cd2 | fadcb5eff5af4586fcc574ea73c7679780762674 | /R_Basics_01.R | 7a152ca0d5dc841db572fa291ac87cbf95d2f687 | [] | no_license | ojask1205/Machine-Learning-using-R | 7fa2d6ac21396d3113505a83db29e877371d44ad | 1b37634ca8617e779546abe51bb04e7281e092bf | refs/heads/master | 2021-05-03T08:06:47.809535 | 2018-02-07T04:00:38 | 2018-02-07T04:00:38 | 120,560,624 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,696 | r | R_Basics_01.R | a=5
a
a=5
a
class(a)
a=5
a
class(a)
b="c"
b
class(b)
a=5
a
class(a)
b="c"
b
class(b)
b="5"
b
class(b)
a=5
a
class(a)
b='c'
b
class(b)
b="5"
b
class(b)
a=c(2,3,4,5)
a
b=c("2","3")
b
b=c("2","3")
b
a=c(2,3,4,5)
a
b=c(2,"3")
b
a=c(2,3,4,5)
a
b=c(2,"3")
b
class(b)
a=c(2,3,4,5)
a
b=c(2,"3")
b
class(b)
class(a)
a=matrix(c(1,2,3,4,5,6),nrow=2,ncol=3,byrow=TRUE)
a
b=matrix(c(1,2,3,4,5,6),nrow=2,ncol=3,byrow=FALSE)
b
c=matrix(c(1,2,3,4,5,6),nrow=2,ncol=3)
c
a=matrix(c(1,2,3,4,5,6),nrow=2,ncol=3,byrow=TRUE)
a
b=matrix(c(1,2,3,4,5,6),nrow=2,ncol=3,byrow=FALSE)
b
c=matrix(c(1,2,3,4,5,6),nrow=2,ncol=3)
c
a[1,]
a[,1]
a=matrix(c(1,2,3,4,5,6),nrow=2,ncol=3,byrow=TRUE)
a
b=matrix(c(1,2,3,4,5,6),nrow=2,ncol=3,byrow=FALSE)
b
c=matrix(c(1,2,3,4,5,6),nrow=2,ncol=3)
c
a[1,]
a[,1]
class(a)
a=read.csv("dataset1.csv")
a
setwd("C:\Users\Admin\Desktop\Dataset")
p=read.csv("dataset1.csv")
setwd("C:/Users/Admin/Desktop/Dataset")
p=read.csv("dataset1.csv")
setwd("C:/Users/Admin/Desktop/Dataset")
p=read.csv("dataset1.csv")
p
setwd("C:/Users/Admin/Desktop/Dataset")
p=read.csv("dataset1.csv")
p
class(p)
setwd("C:/Users/Admin/Desktop/Dataset")
p=read.csv("dataset1.csv")
p
p$Height
setwd("C:/Users/Admin/Desktop/Dataset")
p=read.csv("dataset1.csv")
p
p$Height
p$Girth
Name=c("a","b","c")
English=c(1,2,3)
Maths=c(1,2,3)
Science=c(1,2,3)
c=data.frame(Name,English,Maths,Science)
Name=c("a","b","c")
English=c(1,2,3)
Maths=c(1,2,3)
Science=c(1,2,3)
c=data.frame(Name,English,Maths,Science)
c
Name=c("a","b","c")
English=c(1,2,3)
Maths=c(1,2,3)
Science=c(1,2,3)
c=data.frame(Name,English,Maths,Science)
c |
64b10e534e2c4ccbbe4c80cbc74f1ffe63460c46 | b6a4b68ec502322a8ba8a9151e67e818cd112cb8 | /man/getRecordForUser.Rd | 13e42075165f0100519cea6c52f8e32479ce5a27 | [] | no_license | ralmond/EABN | ffd67e3ba2e112bf69e42ee5c60eb1e2ec1734c5 | ff55aa44c756cb6157d907f66b7d54f33766c01c | refs/heads/master | 2023-07-25T13:29:02.241959 | 2023-07-12T20:45:02 | 2023-07-12T20:45:02 | 240,610,408 | 1 | 1 | null | 2023-07-11T22:00:12 | 2020-02-14T22:36:52 | R | UTF-8 | R | false | false | 4,929 | rd | getRecordForUser.Rd | \name{getRecordForUser}
\alias{getRecordForUser}
\title{Gets or makes the student record for a given student.}
\description{
The \code{\linkS4class{BNEngine}} contains a
\code{\linkS4class{StudentRecordSet}}, which is a collection of
\code{\linkS4class{StudentRecord}} objects. The function
\code{getRecordForUser} fetches one from the collection (if it exists)
or creates a new one.
}
\usage{
getRecordForUser(eng, uid, srser = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{eng}{The \code{\linkS4class{BNEngine}} in question.}
\item{uid}{A character scalar giving the unique identifier for the
student. }
\item{srser}{A serialized version of the student record. Used to
extract the student record in database-free mode. This should
either be a list which is the output of
\code{\link[jsonlite]{fromJSON}} or \code{NULL}.}
}
\details{
The student record set can either be attached to a database (the
\code{dburi} field passed to \code{\link{StudentRecordSet}} is
non-empty, or not. In the database mode, recrods are saved in the
database, so that they can be retrieved across sessions. In the
database-free mode, the serialized student record (if it exists)
should be passed into the \code{getRecordForUser} function.
If no student record is available for the \code{uid}, then a new one
is created by cloning the default student record (see
\code{\link{setupDefaultSR}}).
This function mostly just calls \code{\link{getSR}} on the
\code{\linkS4class{StudentRecordSet}}; however, if a new record is
generated, then \code{\link{announceStats}} is called to advertise the
baseline statistics for the new user.
}
\section{Warning}{
Calling this multiple times will not return the same student record.
In particular, the student model associated with the old version of
the record could be replaced with a new version, rendering the student
model in the old records inactive. Be careful when dealing with old
records.
}
\value{
The \code{\linkS4class{StudentRecord}} object is returned.
}
\references{
Almond, Mislevy, Steinberg, Yan and Williamson (2015). \emph{Bayesian
Networks in Educational Assessment}. Springer. Especially Chapter
13.
}
\author{Russell Almond}
\seealso{
\code{\linkS4class{BNEngine}}, \code{\linkS4class{StudentRecordSet}},
\code{\linkS4class{StudentRecord}}
\code{\link{handleEvidence}}, \code{\link{setupDefaultSR}},
\code{\link{fetchSM}}, \code{\link{getSR}}
}
\examples{
%PNetica%\dontrun{#Requires PNetica
library(PNetica)
##Start with manifest
sess <- RNetica::NeticaSession()
RNetica::startSession(sess)
## BNWarehouse is the PNetica Net Warehouse.
## This provides an example network manifest.
config.dir <- file.path(library(help="Peanut")$path, "auxdata")
netman1 <- read.csv(file.path(config.dir,"Mini-PP-Nets.csv"),
row.names=1, stringsAsFactors=FALSE)
net.dir <- file.path(library(help="PNetica")$path, "testnets")
stattab <- read.csv(file.path(config.dir, "Mini-PP-Statistics.csv"),
as.is=TRUE)
Nethouse <- PNetica::BNWarehouse(manifest=netman1,session=sess,key="Name",
address=net.dir)
cl <- new("CaptureListener")
listeners <- list("cl"=cl)
ls <- ListenerSet(sender= "EAEngine[Test]",
db=MongoDB(noMongo=TRUE), listeners=listeners)
eng <- newBNEngineNDB(app="Test",warehouse=Nethouse,
listenerSet=ls,manifest=netman,
profModel="miniPP_CM",
histNodes="Physics",
statmat=stattab,
activeTest="EAActive.txt")
## Standard initialization methods.
loadManifest(eng,netman1)
eng$setHistNodes("Physics")
configStats(eng,stattab)
setupDefaultSR(eng)
sr0a <- getRecordForUser(eng,"Student1")
sr0 <- getRecordForUser(eng,"Student1")
## This is announcing twice, so not quite working with NDB engine.
stopifnot(is.active(sm(sr0)),!is.active(sm(sr0a)))
stopifnot(all.equal(stats(sr0),stats(sr0a)))
eap0<- stat(sr0,"Physics_EAP")
e1 <- EvidenceSet(uid="Student1",app="Test",context="PPcompEM",
obs=list("CompensatoryObs"="Right"))
e1 <- logEvidence(eng,sr0,e1)
sr1 <- accumulateEvidence(eng,sr0,e1)
stopifnot(m_id(sr1)!=m_id(sr0),sr1@prev_id==m_id(sr0))
stopifnot(seqno(sr1)==1L, seqno(e1)==1L)
eap1 <- stat(sr1,"Physics_EAP")
stopifnot(abs(eap1-eap0) > .001)
stopifnot(nrow(history(sr1,"Physcis"))==2L)
sr1.ser <- as.json(sr1)
WarehouseFree(Nethouse,PnetName(sm(sr1))) # Delete student model to
# force restore.
sr1a <- getRecordForUser(eng,"Student1",jsonlite::fromJSON(sr1.ser))
#PnetCompile(sm(sr1a))
eap1a <- stat(sr1a,"Physics_EAP")
stopifnot(abs(eap1-eap1a) < .001)
stopifnot(nrow(history(sr1a,"Physcis"))==2L)
## <<Here>> Need test with Mongo engine
%PNetica%}
}
\keyword{ manip }
|
60d6d339e7abf5bc6eedf26b961be7c499e7e7cb | 0409799b662616035ff32251511f1f8fe56766a6 | /WIRDS/homework/praca_domowa_2_orchowski.r | 46feaee988646e5ba87aa993eff06ccbfc6f5f85 | [] | no_license | tomasznierychly/Dydaktyka | c79f51181f0438f03f5182ae1f70d98a51c866c3 | 3433ef5e034f362ce43ce2f2742967540eb92b77 | refs/heads/master | 2021-05-28T19:29:20.715906 | 2015-03-24T08:50:00 | 2015-03-24T08:50:00 | 32,805,457 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 625 | r | praca_domowa_2_orchowski.r | library(XLConnect)
library(dplyr)
library(tidyr)
library(ggplot2)
wb <- loadWorkbook('./Dane_zaj3_wlasnosc.xlsx')
dane <- readWorksheet(wb,1)
dane_liniowy <- dane %>% gather(rok, y, -Kategoria)
ggplot(data = dane_liniowy,
aes(x = rok,
y = y,
colour = Kategoria,
group = Kategoria))+
geom_point(size = 5) +
geom_line() +
theme_bw() +
xlab('Rok spisu')+
ylab('Udział‚ (%)') +
ggtitle('Udział poszczególnych kategorii własności u respondentów') +
geom_text(aes(label=Kategoria), hjust=0, vjust=2, size = 3) +
geom_text(aes(label=y), hjust=2, vjust=1, size =3) |
38d653ced08ce2ee8cca257469905d547f27ceaf | effe14a2cd10c729731f08b501fdb9ff0b065791 | /cran/paws.customer.engagement/man/pinpoint_update_apns_channel.Rd | 88c9167e2dcfe3689aeee18731a3a3d9353116ba | [
"Apache-2.0"
] | permissive | peoplecure/paws | 8fccc08d40093bb25e2fdf66dd5e38820f6d335a | 89f044704ef832a85a71249ce008f01821b1cf88 | refs/heads/master | 2020-06-02T16:00:40.294628 | 2019-06-08T23:00:39 | 2019-06-08T23:00:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 869 | rd | pinpoint_update_apns_channel.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pinpoint_operations.R
\name{pinpoint_update_apns_channel}
\alias{pinpoint_update_apns_channel}
\title{Use to update the APNs channel for an app}
\usage{
pinpoint_update_apns_channel(APNSChannelRequest, ApplicationId)
}
\arguments{
\item{APNSChannelRequest}{[required]}
\item{ApplicationId}{[required] The unique ID of your Amazon Pinpoint application.}
}
\description{
Use to update the APNs channel for an app.
}
\section{Request syntax}{
\preformatted{svc$update_apns_channel(
APNSChannelRequest = list(
BundleId = "string",
Certificate = "string",
DefaultAuthenticationMethod = "string",
Enabled = TRUE|FALSE,
PrivateKey = "string",
TeamId = "string",
TokenKey = "string",
TokenKeyId = "string"
),
ApplicationId = "string"
)
}
}
\keyword{internal}
|
b78d1f4e3676b100257ecbdd3295c0d0cf5d15f5 | eb6f8b652a5c600f5f84d67b2e643fb6fe29cd17 | /99_debug_model.R | 2df27f204571f4dc007faba1bdc9ea4b2d5c7c9b | [] | no_license | jkbest2/spatq_sims | b528ab6a81f3a42473700bfa295247152f0186a0 | e2f4c1e65a32ce437e878717773dd41ff3574c6e | refs/heads/master | 2022-08-15T05:01:25.337849 | 2022-05-18T21:33:29 | 2022-05-18T21:33:29 | 225,442,641 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,385 | r | 99_debug_model.R | library(tidyverse)
library(TMB)
devtools::load_all("../spatq", helpers = FALSE, export_all = FALSE)
source("97_debug_fns.R")
compile("src/spatq_simplified.cpp")
dyn.load(dynlib("src/spatq_simplified"))
## estd <- specify_estimated(omega = list(
## omega_n = list(
## log_kappa = TRUE,
## log_tau = TRUE
## ),
## omega_w = list(
## log_kappa = TRUE,
## log_tau = TRUE
## )
## ), lambda = TRUE)
estd <- specify_estimated(beta = TRUE,
gamma = FALSE,
omega = FALSE,
epsilon1 = list(epsilon1_n = TRUE,
epsilon1_w = FALSE),
lambda = TRUE,
eta = FALSE,
phi = FALSE,
psi = FALSE)
repl <- 1
scen <- "pref"
## Code from `make_sim_adfun`
catch_df <- read_catch(repl, scen)
truepop_df <- read_popstate(repl, scen)
index_df <- create_index_df(step = 5, T = attr(catch_df, "T"))
mesh <- generate_mesh()
fem <- generate_fem(mesh)
## prepare normal version
data <- prepare_data(catch_df, index_df, mesh, fem)
parameters <- prepare_pars(data, mesh)
map <- prepare_map(parameters, estd)
random <- prepare_random(map)
## simplify
data <- simplify_data(data)
parameters <- simplify_pars(parameters)
map <- simplify_map(map)
names(map) <- gsub("epsilon1", "epsilon", names(map))
## ## map$epsilon_n <- factor(rep(NA, 404 * 25))
## map$epsilon_w <- factor(rep(NA, 404 * 25))
random <- simplify_random(random)
data$proc_switch <- simple_proc_switch(random)
data$norm_flag <- TRUE
data$incl_data <- FALSE
parameters$epsilon1_n <- matrix(rnorm(404 * 25, 0, 0.1), nrow = 404, ncol = 25)
parameters$epsilon1_w <- matrix(rnorm(404 * 25, 0, 0.1), nrow = 404, ncol = 25)
names(parameters) <- gsub("epsilon1", "epsilon", names(parameters))
## random[3:4] <- c("epsilon_n", "epsilon_w")
random <- "epsilon_n"
obj <- MakeADFun(
data = data,
parameters = parameters,
map = map,
random = random,
silent = FALSE,
DLL = "spatq_simplified"
)
## TODO Try without normalization: make sure that it's implemented correctly and
## make sure that it's actually faster!
if (!data$norm_flag) {
obj <- TMB::normalize(obj, flag = "incl_data", value = FALSE)
}
if (length(random > 0)) {
TMB::runSymbolicAnalysis(obj)
}
fit1 <- fit_spatq(obj)
fit1 <- fit_spatq(obj, fit1)
rep1 <- report_spatq(obj)
sdr1 <- sdreport_spatq(obj)
index_est <- rescale_index(sdr$value)
index_sd <- sdr$sd / attr(index_est, "scale")
index_df <- tibble(
type = rep(c("true", "est"), each = 25),
year = rep(1:25, 2),
index = c(rescale_index(truepop_df$pop), index_est),
low = index - c(rep(NA, 25), qnorm(0.975) * index_sd),
high = index + c(rep(NA, 25), qnorm(0.975) * index_sd)
)
ggplot(index_df, aes(
x = year, y = index,
color = type, fill = type,
ymin = low, ymax = high
)) +
geom_ribbon(alpha = 0.5) +
geom_point() +
geom_line()
Ilog_n <- array(rep$Ilog_n, dim = c(20, 20, 25))
Ilog_w <- array(rep$Ilog_w, dim = c(20, 20, 25))
I_b <- exp(Ilog_n + Ilog_w)
image(Ilog_n[, , 25])
image(Ilog_w[, , 1])
image(I_b[, , 1])
|
85d4ff050470e914d6930e79a2142de628c33006 | 10a373397568b0e5235221d509d71f824e67e871 | /Counting_DNA_Nucleotides/countLetters.R | 8f43d0a9ee615a18ab141b2c5b5ab3dba6f4b450 | [] | no_license | j23414/Franklin | bb3ca90c5ec3404080ff2cf9e28463e7cd9d9883 | 516a1f8341c4691e670973cdebbac5e51840d008 | refs/heads/master | 2021-01-11T23:09:51.369752 | 2017-01-11T18:49:45 | 2017-01-11T18:49:45 | 78,554,737 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 621 | r | countLetters.R | #! /usr/bin/env Rscript
#====================================Libraries
library(ggplot2)
#====================================Analysis
f <- file("stdin")
open(f)
d<-data.frame(Letters=c())
while(length(line <- readLines(f,n=1)) > 0) {
temp<-data.frame(Letters=strsplit(line,split=""))
names(temp)<-"Letters"
d<-rbind(d,temp)
#write(line, stderr())
}
p<-qplot(d$Letters,fill=I('royalblue'),xlab="Letters",main="Letter Frequency")+
theme_bw()+
geom_text(stat='count',aes(label=..count..),vjust=-0.25)
#+ theme(axis.text.x = element_text(angle = 0, hjust = 1))
ggsave(filename="barchart.png",plot=p,dpi=600)
|
2294d028e963a184c19f16ae69cb43259208229f | 84c71eebf4fc69c5cf052b222fd378eb7c6af4d2 | /moneyBall.R | 58af8da0bca00c45bb9180dccb8a12a3c4108bbb | [] | no_license | G-M-C/MoneyBall | 42f304aa35d9ec0d8e4280e39855c8aa04f5515a | abe6354f8584495efc8cd6d172a483a46bebe0b2 | refs/heads/main | 2023-02-07T18:26:33.432566 | 2020-12-21T17:25:46 | 2020-12-21T17:25:46 | 323,400,653 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,240 | r | moneyBall.R | library(dplyr)
library(ggplot2)
batting <- read.csv('Batting.csv')
print(head(batting))
batting$BA <- batting$H/batting$AB
batting$OBP <- (batting$H + batting$BB + batting$HBP)/(batting$AB + batting$BB + batting$HBP + batting$SF)
batting$X1B <- batting$H - batting$X2B - batting$X3B - batting$HR
batting$SLG <- ((1 * batting$X1B) + (2 * batting$X2B) + (3 * batting$X3B) + (4 * batting$HR) ) / batting$AB
str(batting)
sal <- read.csv('Salaries.csv')
batting <- subset(batting,yearID >= 1985)
print(summary(batting))
sal_batting <- merge(batting,sal,by=c('playerID','yearID'))
print(summary(sal_batting))
lost_players <- subset(sal_batting,playerID %in% c('giambja01','damonjo01','saenzol01') )
lost_players <- subset(lost_players,yearID == 2001)
lost_players <- lost_players[,c('playerID','H','X2B','X3B','HR','OBP','SLG','BA','AB')]
print(head(lost_players))
avail.players <- filter(sal_batting,yearID==2001)
plt <- ggplot(avail.players,aes(x=OBP,y=salary)) + geom_point()
print(plt)
avail.players <- filter(avail.players,salary<8000000,OBP>0)
avail.players <- filter(avail.players,AB >= 500)
possible <- head(arrange(avail.players,desc(OBP)),10)
possible <- possible[,c('playerID','OBP','AB','salary')]
print(possible[2:4,])
|
3bbe6912cc0e0a0b0c0aba5bf55a8cccb8f48820 | e7f016514dabb463518cd24ef5740cbf77dd70ec | /man/DYNAMIC_DATA.Rd | 939d85a39c6684bf0db51d846b5bae3512f1aa52 | [] | no_license | dr-consulting/ibi_VizEdit | 5d4bd02fdb60419c6f26202912a3e6e3b284e01f | a56672add886c2ac0e0064d428566c3bf6878a7c | refs/heads/dev | 2023-01-23T19:37:08.986103 | 2020-11-21T20:44:00 | 2020-11-21T20:44:00 | 119,615,926 | 0 | 0 | null | 2020-07-04T03:31:21 | 2018-01-31T01:13:19 | R | UTF-8 | R | false | true | 490 | rd | DYNAMIC_DATA.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_init_global_vars.R
\docType{data}
\name{DYNAMIC_DATA}
\alias{DYNAMIC_DATA}
\title{Initialization of DYNAMIC_DATA reactiveValues that can change and be modified based on user actions}
\format{
An object of class \code{reactivevalues} of length 3.
}
\usage{
DYNAMIC_DATA
}
\description{
Initialization of DYNAMIC_DATA reactiveValues that can change and be modified based on user actions
}
\keyword{datasets}
|
2b889461f9e295358a6dd3eddf5bd3ce10ad8bda | 23093affc35a4376384b3a47b24382e11078999b | /R/mqtls_LC.R | 3545feb273a636e3889a5d014af19541af1b53c3 | [] | no_license | roderickslieker/CONQUER.db | f35d36c57fc937a1e489c6a2dd1f1a4f5287943a | 85139b8feaac3bbfe69dacec22519364ebf1c303 | refs/heads/master | 2021-07-31T18:46:35.240183 | 2021-07-27T06:16:38 | 2021-07-27T06:16:38 | 250,022,871 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 399 | r | mqtls_LC.R | #' Data frame of lQTLs
#'
#' Author: E. Rhee
#' Title: A Genome-wide Association Study of the Human Metabolome in a Community-Based Cohort
#' Journal: Cell Metab DOI:https://doi.org/10.1016/j.cmet.2013.06.013
#' GWAS Data for All 217 Metabolites Measured by Our Platform, Including All Loci with P < 1.0 × 10−3
#'
#' @format A [[data.frame]] object
#' @source [mQTLs](see description)
"mqtls_LC"
|
57218921504f1477c247cd48a4814eeb7badd2dc | 6b9091d3dfc7827e00b0f1fbe25f615280fee906 | /EDA Assignment 2/plot4.R | 80e4cfd757d26d084f0e7a5e79bdef58f14de1e4 | [] | no_license | avkarthik/ExData_Plotting1 | 4e28f3a05fe097f39b33327c96ca7a25aa11842f | ac9f62506b9666de89f9d9810869e9f69dbadc93 | refs/heads/master | 2021-05-29T08:24:03.614696 | 2015-05-24T03:33:24 | 2015-05-24T03:33:24 | null | 0 | 0 | null | null | null | null | WINDOWS-1252 | R | false | false | 1,663 | r | plot4.R | #Question 4:Across the United States, how have emissions from coal combustion-related sources changed from 1999–2008?
#Loading necessary packages
library("ggplot2")
library("sqldf")
#Reading data from the files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Renaming Column name by replacing . with _ for sql query to work
names(SCC)[names(SCC)=="EI.Sector"] <- "EI_Sector"
#Joining data between the 2 data frames by using column SCC
NEI_4 <- sqldf("select NEI.SCC, NEI.Emissions, NEI.type, NEI.year from NEI inner join SCC on NEI.SCC=SCC.SCC where SCC.EI_Sector like '%coal%' ")
#Calculating sum of emissions by year
sum99 <- 0
sum02 <- 0
sum05 <- 0
sum08 <- 0
for(i in 1:nrow(NEI_4) ){
if(NEI_4$year[i] ==c("1999")){
sum99 <- sum99 + NEI_4$Emissions[i]
}
else if(NEI_4$year[i] ==c("2002")){
sum02 <- sum02 + NEI_4$Emissions[i]
}
else if(NEI_4$year[i] ==c("2005")){
sum05 <- sum05 + NEI_4$Emissions[i]
}
else if(NEI_4$year[i] ==c("2008")){
sum08 <- sum08 + NEI_4$Emissions[i]
}
}
#Creating a new data frame based on the above calculated values
emissions <- data.frame(x=c("1999","2002","2005","2008"),y=c(sum99,sum02,sum05,sum08))
#Converting variables type to create readable and accurate plots
emissions$x<-as.Date(emissions$x, "%Y")
#Saving the plot as png
png(filename = "plot4.png", width = 480, height = 480)
#Creating the plot
g <- ggplot(emissions, aes(x,y))
g + geom_line(color="red") + geom_point(color="blue") + labs(x="Year") + labs(y="Total PM2.5 Emission") + labs(title="Coal Combustion Emissions by Year")
#Changing the graphics device back to screen
dev.off() |
f024b6713d6c961f25cb43944ed06a1c6ffacd7b | 6c781fd2987dc22589ffc00855c9ed242e44c377 | /hierarchicalclustering_analysis.R | 473bbcd7b0ab6988fbc2083e731bbe86bbf01c97 | [] | no_license | AmyLei96/unsupervised-learning-breast-cancer-subtypes | 65469e119c292aae15eb65b4e07a0f634bac72af | 7bdd96c76353ea6f52b0ea9bb1575202518d265a | refs/heads/master | 2020-05-19T03:21:32.521602 | 2019-05-19T00:06:23 | 2019-05-19T00:06:23 | 184,798,138 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,473 | r | hierarchicalclustering_analysis.R | ###########################################################################################################
# hierarchical clustering analysis
###########################################################################################################
library("xlsx")
library("dplyr")
source("src/normalization.R")
source("src/hierarchicalclustering.R")
source("src/heatmap.R")
samples <- read.csv("data/samples.csv", stringsAsFactors = FALSE)
##########################################################################################################
## import rna pam50
rna_pam50 <- read.csv("data/rna_pam50.csv")
rna_pam50_mat <- as.matrix(rna_pam50[,-c(1,2)])
## normalize
rna_pam50_mat_norm <- getLog2Scaled(rna_pam50_mat)
## plot heatmap - can change plot_title
plotHeatmap(
filename = "output/rna_hierarchicalclustering_pam50",
mydata = rna_pam50_mat_norm,
plot_title = "",
row_labels = rna_pam50$GeneSym,
col_label = colnames(rna_pam50[-c(1,2)]),
annotations = samples,
hclust_met = "complete", k = 4
)
## get cluster assignments
clusters <- getColClusters(annotations = samples, mydata = rna_pam50_mat_norm, hclust_met = "complete")
## write to file
write.xlsx(clusters, file = "output/hierarchicalclustering_clusters.xlsx",
row.names = FALSE, sheetName = "rna_pam50", append = TRUE)
##########################################################################################################
## import protein pam50
protein_pam50 <- read.csv("data/protein_pam50.csv")
protein_pam50_mat <- as.matrix(protein_pam50[,-c(1,2)])
## impute missing values and normalize
protein_pam50_mat_norm <- getImputedScaled(protein_pam50_mat)
## plot heatmap - can change plot_title
plotHeatmap(
filename = "output/protein_hierarchicalclustering_pam50",
mydata = protein_pam50_mat_norm,
plot_title = "",
row_labels = protein_pam50$GeneSym,
col_label = colnames(protein_pam50[-c(1,2)]),
annotations = samples,
hclust_met = "complete", k = 4
)
## get cluster assignments
clusters <- getColClusters(annotations = samples, mydata = protein_pam50_mat_norm, hclust_met = "complete")
## write to file
write.xlsx(clusters, "output/hierarchicalclustering_clusters.xlsx",
row.names = FALSE, sheetName = "protein_pam50", append = TRUE)
##########################################################################################################
## import combined rna and protein pam50 - already normalized
rna_protein_pam50 <- read.csv("data/rna_protein_pam50_norm.csv")
rna_protein_pam50_mat_norm <- as.matrix(rna_protein_pam50[,-c(1:4)])
## plot heatmap - can change plot_title
plotHeatmap(
filename = "output/rna_protein_hierarchicalclustering_pam50",
mydata = rna_protein_pam50_mat_norm,
plot_title = "",
row_labels = rna_protein_pam50$GeneSym_Revised,
col_labels = colnames(rna_protein_pam50[-c(1:4)]),
annotations = samples,
hclust_met = "complete", k = 4
)
## get cluster assignments
clusters <- getColClusters(annotations = samples, mydata = rna_protein_pam50_mat_norm, hclust_met = "complete")
## write to file
write.xlsx(clusters, file = "output/hierarchicalclustering_clusters.xlsx", row.names = FALSE,
sheetName = "rna_protein_pam50", append = TRUE)
#########################################################################################################
## import filtered rna data
rna_filtered <- read.csv("data/rna_filtered.csv")
rna_filtered[is.na(rna_filtered)] <- 0
## import rna pam50 and top 47 genes from mofa lf6
rna_pam50_lf6 <- read.csv("data/rna_pam50_mofa_lf6_n47.csv")
## get expression information for top 47 genes from mofa lf6
rna_pam50_lf6 <-
rna_filtered %>%
filter(GeneSym %in% rna_pam50_lf6$GeneSym)
rna_pam50_lf6_mat <- as.matrix(rna_pam50_lf6[,-c(1,2)])
## normalize
rna_pam50_lf6_mat_norm <- getLog2Scaled(rna_pam50_lf6_mat)
## plot heatmap - can change plot_title
plotHeatmap(
filename = "output/rna_mofa_lf6_n47_hierarchicalclustering",
mydata = rna_pam50_lf6_mat_norm,
plot_title = "",
row_labels = rna_pam50_lf6$GeneSym,
col_labels = colnames(rna_pam50_lf6[-c(1,2)]),
annotations = samples,
hclust_met = "complete", k = 4
)
## get cluster assignments
clusters <- getColClusters(annotations = samples, mydata = rna_pam50_lf6_mat_norm, hclust_met = "complete")
## write to file
write.xlsx(clusters, file = "output/hierarchicalclustering_clusters.xlsx", row.names = FALSE,
sheetName = "rna_pam50_mofa_lf6_n47", append = TRUE)
|
286460f606638e55c8da197b21bde599b395079b | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/multisensi/examples/gsi.Rd.R | b36dcb5d41eff8430b13b502dec726598126f1a8 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 946 | r | gsi.Rd.R | library(multisensi)
### Name: gsi
### Title: Generalised Sensitivity Indices: GSI
### Aliases: gsi
### ** Examples
# Test case : the Winter Wheat Dynamic Models (WWDM)
# input factors design
data(biomasseX)
# input climate variable
data(Climat)
# output variables (precalculated to speed up the example)
data(biomasseY)
#
GSI <- gsi(2, biomasseY, biomasseX, inertia=3, normalized=TRUE, cumul=FALSE,
climdata=Climat)
summary(GSI)
print(GSI)
plot(x=GSI, beside=FALSE)
#plot(GSI, nb.plot=4) # the 'nb.plot' most influent factors
# are represented in the plots
#plot(GSI,nb.comp=2, xmax=1) # nb.comp = number of principal components
#plot(GSI,nb.comp=3, graph=1) # graph=1 for first figure; 2 for 2nd one
# and 3 for 3rd one; or 1:3 etc.
#graph.bar(GSI,col=1, beside=F) # sensitivity bar plot on the first PC
#graph.bar(GSI,col=2, xmax=1) #
|
7efd752ddfee376e37674cb4e94e59186b036a4c | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.machine.learning/man/voiceid_delete_domain.Rd | 27aa790528f45369c339810f068b9357b715a01d | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 511 | rd | voiceid_delete_domain.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/voiceid_operations.R
\name{voiceid_delete_domain}
\alias{voiceid_delete_domain}
\title{Deletes the specified domain from Voice ID}
\usage{
voiceid_delete_domain(DomainId)
}
\arguments{
\item{DomainId}{[required] The identifier of the domain you want to delete.}
}
\description{
Deletes the specified domain from Voice ID.
See \url{https://www.paws-r-sdk.com/docs/voiceid_delete_domain/} for full documentation.
}
\keyword{internal}
|
422ef00df0b74675110e882cceb53cc4bba2b90e | 1d8da8cb8048c2d8ee87e977be9f90291e0ccf84 | /R/fit_model.R | d7f3a695241c94328f96c0966c2da795feb4fa58 | [] | no_license | cran/HCR | 59cc96fc83dcc777c0e6ad751b97754d4210c0cc | 827a42d1c3dd91f1b7844cb580a0d6221c7e85ad | refs/heads/master | 2020-03-31T08:54:11.989703 | 2018-10-26T13:50:32 | 2018-10-26T13:50:32 | 152,076,559 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,095 | r | fit_model.R | L<-function(X,YPrime,Y,score_type="bic",is_anm=FALSE,is_cyclic=FALSE,...){
freqx=as.data.frame(table(X))$Freq
freqx<-freqx[freqx!=0]
px= freqx/sum(freqx)
nx<-uniqueN(X)
nyp<-uniqueN(YPrime)
if (is_anm) {
# data preprocessing for ANM
if(is_cyclic){
Y=(Y-YPrime)%%(max(Y)-min(Y)+1)
}else{
Y=Y-YPrime
}
tab=table(Y) # |Y'|=1
nyp=1 # |Y'|=1
freq=as.data.frame(tab)$Freq
B=as.data.frame(tab/sum(tab))$Freq # |Y'|=1
}else{
tab=table(YPrime,Y)
freq=as.data.frame(tab)$Freq
B=as.data.frame(tab/rowSums(tab))$Freq
}
# remove the categories that have zero frequency.
sel<-(freq!=0)
B=B[sel]
freq=freq[sel]
#d=nyp*(uniqueN(Y)-1)+(nx-1)+nx
d=nyp*(uniqueN(Y)-1)+nx-1
if(score_type=="log"){
return(sum(freq*log(B))+sum(freqx*log(px))) #log-likelihood
}else if(score_type=="bic"){
return(sum(freq*log(B))+sum(freqx*log(px))-d/2*log(length(X))) #bic
}else if(score_type=="aic"){
return(sum(freq*log(B))+sum(freqx*log(px))-d) #aic
}else if(score_type=="aicc"){
return(sum(freq*log(B))+sum(freqx*log(px))-d-(d*(d+1)/(length(X)-d-1))) #aicc
}
}
#' @title Hidden Compact Representation Model
#' @description Causal Discovery from Discrete Data using Hidden Compact Representation.
#' @param X The data of cause.
#' @param Y The data of effect.
#' @param score_type You can choose "bic","aic","aicc","log" as the type of score to fit the HCR model. Default: bic
#' @param is_anm If is_anm=TRUE, it will enable a data preprocessing to adjust for the additive noise model.
#' @param is_cyclic If is_anm=TRUE and is_cyclic=TRUE, it will enable a data preprocessing to adjust the cyclic additive noise model.
#' @param verbose Show the score at each iteration.
#' @param max_iteration The maximum iteration.
#' @param ... Other arguments passed on to methods. Not currently used.
#' @return The fitted HCR model and its score.
#' @export
#' @examples
#' library(data.table)
#' set.seed(10)
#' data=simuXY(sample_size=200)
#' r1<-HCR(data$X,data$Y)
#' r2<-HCR(data$Y,data$X)
#' # The canonical hidden representation
#' unique(r1$data[,c("X","Yp")])
#' # The recovery of hidden representation
#' unique(data.frame(data$X,data$Yp))
#'
HCR<-function(X,Y,score_type="bic",is_anm=FALSE,is_cyclic=FALSE,verbose=FALSE,max_iteration=1000,...){
Yp=NULL
.=NULL
setx=unique(X)
sety=unique(Y)
nx=length(setx)
ny=length(sety)
dt=data.table(X,Y,key="X")
if(is_anm){
if(is.character(Y)||is.factor(Y)||is.numeric(Y)){
Y=as.integer(factor(Y,labels=1:ny))
dt=data.table(X,Y,key="X")
setx=unique(dt$Y)
}
if(is.character(X)||is.factor(X)||is.numeric(X)){
X=as.integer(factor(X,labels=1:nx))
dt=data.table(X,Y,key="X")
setx=unique(dt$X)
}
setyp=min(sety):max(sety)
}else{
setyp=1:nx #random assign :default setting
}
for(i in setx){
temp=dt[.(i),.N,by=Y]
dt[.(i),Yp:=temp$Y[which.max(temp$N)]] #set Yp to the most freq Y
}
if(!is_anm){
# rearrange Y'
dt$Yp<-factor(dt$Yp,labels=1:uniqueN(dt$Yp))
dt$Yp<-as.integer(dt$Yp)
}
newScore=L(X=dt$X,YPrime=dt$Yp,Y=dt$Y,score_type=score_type,is_anm=is_anm,is_cyclic=is_cyclic,...)
bestdt=dt
iteration=0L
oldScore=-Inf
while(newScore>oldScore){
if(iteration>max_iteration){
break
}
if(verbose){
#if(iteration%%10==0){
print(sprintf("iteration:%d score:%f",iteration,newScore))
#}
}
iteration=iteration+1L
oldScore=newScore
for(i in setx){
for(j in setyp){
temp<-copy(dt)
temp[.(i),Yp:=j]
score<-L(X=temp$X,YPrime=temp$Yp,Y=temp$Y,score_type=score_type,is_anm=is_anm,is_cyclic=is_cyclic,...)
if(score>newScore){
newScore=score
bestdt=copy(temp)
}
}
}
dt=bestdt
}
newScore<-oldScore
if(verbose){
print(sprintf("iteration:%d score:%f",iteration,newScore))
}
return(list(data=bestdt,score=newScore))
}
#' @title The Fast Version for Fitting Hidden Compact Representation Model
#' @description A fast implementation for fitting the HCR model.
#' This implementation caches all intermediate results to speed up the greedy search.
#' The basic idea is that if there are two categories need to be combined, for instance, X=1 and X=2 mapping to the same Y'=1, then the change of the score only depend on the frequency of the data where X=1 and X=2.
#' Therefore, after combination, if the increment of the likelihood is greater than the penalty, then we will admit such combination.
#' @param X The data of cause.
#' @param Y The data of effect.
#' @param score_type You can choose "bic","aic","aicc","log" as the type of score to fit the HCR model. Default: bic
#' @param ... Other arguments passed on to methods. Not currently used.
#' @return The fitted HCR model and its score.
#' @export
#' @examples
#' library(data.table)
#' set.seed(1)
#' data=simuXY(sample_size=2000)
#' r1=HCR.fast(data$X,data$Y)
#' r2=HCR.fast(data$Y,data$X)
#' # The canonical hidden representation
#' unique(r1$data[,c("X","Yp")])
#' # The recovery of hidden representation
#' unique(data.frame(data$X,data$Yp))
#'
HCR.fast<-function(X,Y,score_type="bic",...){
Yp=NULL
.=NULL
X=as.integer(factor(X))
Y=as.integer(factor(Y))
setx=unique(X)
sety=unique(Y)
nx=length(setx)
ny=length(sety)
m=length(X)
dt=data.table(X,Y,key="X")
dt[,Yp:=X] #set Yp to the most freq Y
nyp=nx
N=table(X,Y)
Nx=rowSums(N)
pxy=N/Nx
pxy[pxy==0]<-1
likxy<-rowSums(N*log(pxy)) #likelihood on P(Y|X=x)
besteps=0
repeat{
# Combine best ith, jth categories at each iteration.
iscontinue=F
# d =nyp*(ny-1)+(nx-1)+nx
# dnew=(nyp-1)*(ny-1)+(nx-1)+nx
d =nyp*(ny-1)+nx-1
dnew=(nyp-1)*(ny-1)+nx-1
if (score_type=="bic") {
penalty1=d/2*log(m)
penalty2=dnew/2*log(m)
}else if(score_type=="aic"){
penalty1=d
penalty2=dnew
}else if(score_type=="log"){
penalty1=0
penalty2=0
}else if (score_type=="aicc") {
penalty1=d+(d*(d+1)/(m-d-1))
penalty2=dnew+(dnew*(dnew+1)/(m-dnew-1))
}
#rowx=as.integer(rownames(N))
for(i in 1:(nyp-1)){
for (j in (i+1):nyp) {
nij=N[i,]+N[j,]
sel=nij!=0
combine_lik=sum(nij[sel]*log(nij[sel]/(Nx[i]+Nx[j])))
eps=combine_lik-penalty2-likxy[i]-likxy[j]+penalty1
if (eps>=besteps) {
besteps=eps
besti=i
bestj=j
bestrownamesi=as.integer(rownames(N)[i])
bestrownamesj=as.integer(rownames(N)[j])
bestlik=combine_lik
iscontinue=T
}
}
}
if(!iscontinue){
break
}
besteps=0
nyp=nyp-1
N[besti,]=N[besti,]+N[bestj,]
N=N[-bestj,]
likxy[besti]=bestlik
likxy=likxy[-bestj]
dt[Yp==bestrownamesj,Yp:=dt[.(bestrownamesi),"Yp"][1]]
if (is.null(nrow(N))) {
Nx=sum(N)
break
}else{
Nx=rowSums(N)
}
}
newScore=L(X=dt$X,YPrime=dt$Yp,Y=dt$Y,score_type=score_type,...)
return(list(data=dt,score=newScore))
}
|
97c7a4496d30b4f22e8a59d22a488f9115dbb5c2 | dc2823335a53bbe74ae29810ed3273ce8890b7e7 | /Resampling_ex.R | fabb3375d084c7b3723bb34241b68cdeb83edfb5 | [] | no_license | hcasendino/FISH.553 | 2088fe575089c69cb9598523fa670dd2aaa5070b | 5a4d7718cae4eb1847bad7660246b38fcadfd7c5 | refs/heads/main | 2023-07-26T04:24:51.729345 | 2021-09-09T07:47:40 | 2021-09-09T07:47:40 | 404,631,387 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,078 | r | Resampling_ex.R | #==========FISH 553 Lecture 3 Quiz=======================
#==hindex function
#calculates h index (Hirsch 2005)
#Hirsch JE 2005 An index to quantify an individual's
#scientific research output. PNAS 102:16569-16572
#"A scientist has index h if h of his or her Np papers
#have >=h citations each and the other (Np-h)
#papers have <=h citations"
hindex <- function(citation.vec) {
sorted <- sort(citation.vec,decreasing=T)
paper.vec <- 1:length(citation.vec)
hvalue <- sum(paper.vec <= sorted)
return(hvalue)
}
hindex(c(5,4,3,2,1,1,1,1))
hindex(c(10,6,6,6,6,6,6,6,6,6,6))
hindex(c(1,4,3,2,5,4,0,0,0,0,0))
hindex(c(1012, 10, 5,4,3,1))
#Use resampling inference to determine if one journal has a higher
#impact than the other. The test-statistics is the h-index
#==Read in the data
ICES <- read.csv("ICES.csv")
CJFAS <- read.csv("CJFAS.csv")
####METHOD 1
niter <- 10000
CJFAS.better <- vector(length=niter)
for (i in 1:niter) {
X <- hindex(sample(CJFAS$Citations, 100, replace=F))
Y <- hindex(sample(ICES$Citations, 100, replace=F))
CJFAS.better[i] <- X>Y
}
#p-value: is CJFAS better than the other?
sum(CJFAS.better)/niter
####METHOD 2
niter <- 10000
num.better <- 0
for (i in 1:niter) {
X <- hindex(sample(CJFAS$Citations, 100, replace=F))
Y <- hindex(sample(ICES$Citations, 100, replace=F))
if (X > Y) {
num.better <- num.better+1
}
}
#p-value: is CJFAS better than the other?
num.better/niter
#============h-index next method
hindex2 <- function(citation.vec) {
sorted <- sort(citation.vec,decreasing=T)
i <- 1
while (i <= citation.vec[i]) {
i <- i+1
}
return(i-1)
}
hindex2(c(5,4,3,2,1,1,1,1))
hindex2(c(10,6,6,6,6,6,6,6,6,6,6))
hindex2(c(1,4,3,2,5,4,0,0,0,0,0))
hindex2(c(1012, 10, 5,4,3,1))
#==========extra calculations.... plots etc.
niter <- 10000
sampleh <- matrix(nrow=niter, ncol=2, dimnames=list(NULL,c("ICES","CJFAS")))
for (i in 1:niter) {
sampleh[i,1] <-
hindex(sample(ICES$Citations, 100, replace=F))
sampleh[i,2] <-
hindex(sample(CJFAS$Citations, 100, replace=F))
}
#p-value: is one better than the other?
sum(sampleh[,"CJFAS"] > sampleh[,"ICES"])/niter
#No, only in 65% of occurrences is CJFAS better
head(sampleh)
table(sampleh[,"ICES"])
table(sampleh[,"CJFAS"])
mean(sampleh[,"ICES"])
mean(sampleh[,"CJFAS"])
sd(sampleh[,"ICES"])
sd(sampleh[,"CJFAS"])
par(mfrow=c(2,1), oma=c(5,5,1,1), mar=c(0,0,0,0))
hist(sampleh[,"ICES"], breaks=seq(7.5,19.5,1),
freq=F, ylim=c(0,0.45), axes=F,
main="",xlab="", ylab="", col="grey")
axis(2, at=seq(0,0.4,0.2), las=1, pos=7.5)
mtext(side=3,line=-1.5,"ICES J. Mar. Sci.")
hist(sampleh[,"CJFAS"], breaks=seq(7.5,19.5,1),
freq=F, ylim=c(0,0.45), axes=F,
main="",xlab="", ylab="", col="grey")
mtext(side=3,line=-1.5,"Can. J. Fish. Aq. Sci.")
axis(1, pos=0)
axis(2, at=seq(0,0.4,0.2), las=1, pos=7.5)
mtext(side=1,line=3,"Sampled h-index (2008-12)", cex=1.3)
mtext(side=2,outer=T, line=3,"Frequency", cex=1.3)
|
85381f83f519266d0eb85d32c10410d88d2042da | 29b41178e146e3b53e410827b74518519150655b | /IndividualScenarioAnalysis/MAMHMt.R | 8624583e065aa923e413550e0e81fe98bb524038 | [] | no_license | timotheenivalis/PodarcisAnalysis | 945dae6af881ecd4351405de52fb4adbf0a34fe1 | 3108a7934f484524d06b1dc1bfe6d4a0984eb325 | refs/heads/master | 2021-03-30T17:45:52.279793 | 2018-01-23T04:22:32 | 2018-01-23T04:22:32 | 53,960,957 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 5,563 | r | MAMHMt.R | ###################################################################
source(file="C:/Users/Timothée/Documents/Studies/PodarcisCBGP/New simulations/FunctionsSimul.R")
setwd("C:/Users/Timothée/Documents/Studies/PodarcisCBGP/New simulations/MAMHMt/")
Simul<-"MAMHMt0"
outR<-mtfunc(Simul)
results<-outR$results
resultsnofix<-outR$resultsNoFix
DistriMtMaxDF<-data.frame(rep(Simul,nrow(outR$DistriMtMax)),outR$DistriMtMax)
names(DistriMtMaxDF)<-c("Simul","DistriMtMax","DistriNuMtMax")
results$Param<-1
resultsnofix$Param<-1
DistriMtMaxDF$Param<-1
write.table(results,file="MAMHMtresults.txt", sep="\t",quote=F,append=F,col.names=T)
write.table(resultsnofix,file="MAMHMtresultsnofix.txt", sep="\t",quote=F,append=F,col.names=T)
write.table(DistriMtMaxDF,file="MAMHMtDistriMtMax.txt", sep="\t",quote=F,append=F,col.names=T,row.names=F)
for (nbsimul in 1:17)
{
Simul<-paste("MAMHMt",nbsimul,sep="")
outR<-mtfunc(Simul)
results<-outR$results
resultsnofix<-outR$resultsNoFix
DistriMtMaxDF<-data.frame(rep(Simul,nrow(outR$DistriMtMax)),outR$DistriMtMax)
results$Param<-1
resultsnofix$Param<-1
DistriMtMaxDF$Param<-1
write.table(results,file="MAMHMtresults.txt", sep="\t",quote=F,append=T,col.names=F)
write.table(resultsnofix,file="MAMHMtresultsnofix.txt", sep="\t",quote=F,append=T,col.names=F)
write.table(DistriMtMaxDF,file="MAMHMtDistriMtMax.txt", sep="\t",quote=F,append=T,col.names=F,row.names=F)
}
##############################################################################################
#############################################################################################
par(mfrow=c(1,1))
setwd("C:/Users/Thimothee Admin/Documents/external research/PodarcisCBGP/New simulations/MAMHMt/")
simul<-read.table(file="MAMHMtresults.txt",header=T)
Param<-matrix(data=c(0:17),nrow=18,ncol=1)
simul$Param<-Param
plot(simul$Param,simul$FixMt,col="red",type="b",yaxp=c(0,1,10),ylim=c(0,1.3),pch=1,lwd=2,xlab="sélection mt",ylab="Proportion de simulations",main="Proportion de simulations avec Capture ou Introgression Mitochondriale,\n En fonction de la sélecion mt",cex.lab=1.8,cex.axis=1.5,cex.main=1.6)
points(simul$Param,simul$IntMt,col="blue",pch=2,lwd=2,type="b")
legend(x="topleft",legend=c("Simulations avec Capture Mitochondriale","Simulations avec Introgression Mitochondriale"),col=c("red","blue"),pch=c(1,2),ncol=1,pt.lwd=2,cex=1.2)
plot(simul$Param,simul$FixMt,col="dark blue",type="b",yaxp=c(0,1,10),ylim=c(0,1.3),pch=1,lwd=2,xlab=c(expression(paste("log du rapport des ",sigma^2))),ylab="",main="Proportion de simulations avec Capture Mt ou Introgression nucléaire ,\n En fonction du logarithme du rapport de dispersion entre sexe (Femelle/Male)",cex.lab=1.3,cex.axis=1.3,cex.main=1.3)
points(jitter(simul$Param),simul$IntAut10,col="light green",pch=4,lwd=2,type="b")
legend(x="topleft",legend=c("Simulations avec Capture Mitochondriale","Proportion de loci nucléaires introgressés"),col=c("dark blue","light green"),pch=c(1,4),ncol=1,pt.lwd=2,cex=1.2)
plot(simul$Param,simul$IntAut,col="dark green",type="b",yaxp=c(0,1,10),ylim=c(0,1.4),pch=3,lwd=2,xlab=c(expression(paste("log du rapport des ",sigma^2))),ylab="Proportion",main="Force de l'introgression Autosomale lors de captures mitochondriales,\n En fonction de la diff?rence de dispersion entre sexe (Femelle - Male)")
points(jitter(simul$Param),simul$IntAut10,col="green",pch=4,lwd=2,type="b")
points(simul$Param,simul$FixAut,col="orange",pch=5,lwd=2,type="b")
points(simul$Param,simul$MeanExoAut,lwd=2,pch=6,type="b")
legend(x="topleft",legend=c("Proportion moyenne de loci autosomaux introgress?s","Proportion moyenne de loci autosomaux dont plus de 10% des copies sont introgress?es","Proportion de loci autosomaux captur?s","Proportion moyenne de copies introgresses"),col=c("dark green","green","orange","black"),pch=c(3,4,5,6),ncol=1,pt.lwd=2,cex=1)
plot(simul$Param,simul$FstAut,col="dark green",type="p",ylim=c(0,1),pch=1,lwd=2,xlab=c(expression(paste("D",Delta,sigma^2))),ylab="Fst entre les deux habitats",main="Fst en fonction de la diff?rence de dispersion entre sexe (Femelle - Male)")
points(simul$Param,simul$FstZ,col="green",pch=2,lwd=2)
points(jitter(simul$Param),simul$FstW,col="blue",pch=3,lwd=2)
points(jitter(simul$Param),simul$FstMt,col="red",pch=4,lwd=2)
legend(x="topleft",legend=c("Autosomes","Z","W","Mt"),col=c("dark green","green","blue","red"),pt.lwd=2,pch=c(1,2,3,4))
simul$Nselection<-matrix(data=c(rep(x=0.9,6),rep(0.5,6),rep(0.1,6)),nrow=18,ncol=1)
simul$Homogamy<-matrix(data=rep(c(0.1,0.2,0.3,0.4,0.45,0.49),3),nrow=18,ncol=1)
library("reshape")#for melt
forgrid<-melt.data.frame(simul,id.vars=c(14,15),measure.vars="IntAut10")
forgrid<-forgrid[order(forgrid$Nselection),]
forim<-matrix(forgrid[,4],nrow=6,ncol=3)
bonsens<-seq(min(forim),max(forim),by=1/15)
#bonsens<-forim[order(forim)]
image(x=sort(unique(simul[,15])),y=sort(unique(simul[,14])),z=forim,xlab="homogamy",ylab="nuclear selection",col=rgb(red=bonsens,green=0,blue=1-bonsens), main="Homogamy, selecion N et selection mt \n Recombi totale")
col=rgb(red=bonsens,green=0,blue=1-bonsens)
legend(title="introgression nucléaire >10%",legend=c(max(forim),min(forim)),col=c(col[length(col)],col[1]),x="topleft",lwd=10)
image(x=sort(unique(simul[,15])),y=sort(unique(simul[,14])),z=forim,xlab="homogamy",ylab="nuclear selection",col=heat.colors(30), main="Homogamy, selecion N et selection mt \n Recombi totale")
col=heat.colors(30)
legend(title="introgression nucléaire >10%",legend=c(max(forim),min(forim)),col=c(col[30],col[1]),x="topleft",lwd=10)
|
535aeadfe00c7df3e94373ca2fc80d02447e9f1c | ab731773df55f8f608d820357c92450e5fc83537 | /openarch_deaths_2_zeelandchallenge.R | 3dba55ea2777e29162d8ade582559f04583ef88a | [] | no_license | CLARIAH/wp4-civreg | 390cd3482f65d515215a4b2161b77759503381b7 | 56587e000e1f1b9becdea7fa1799f0a68fbf322e | refs/heads/master | 2021-12-06T20:42:53.466119 | 2021-08-26T17:33:56 | 2021-08-26T17:33:56 | 209,570,553 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,366 | r | openarch_deaths_2_zeelandchallenge.R | # reshape openarch data into zeeland challenge format
rm(list = ls())
library("data.table")
library("stringi")
setDTthreads(threads = 8)
# openarch = fread("/data/auke/civreg/openarch/openarch_deaths_dedup.csv")
openarch = fread("gunzip -c openarch_deaths_dedup_amco_ages_sex.csv.gz")
# replace "" by NA for character variables
for (j in 1:ncol(openarch)){
if (class(openarch[[j]]) != "character") next
set(x = openarch, i = which(openarch[[j]] == ""), j = j, NA)
}
openarch[, id_registration := .I]
# as.Date fastest
# NA propagates in as.Date, though maybe something is to be said for making something out of year/month
openarch[, death_date := as.Date(
stri_join(EVENT_YEAR, "-", EVENT_MONTH, "-", EVENT_DAY),
format = "%Y-%m-%d")]
openarch[, registration_date := as.Date(
stri_join(SOURCE_DATE_YEAR, "-", SOURCE_DATE_MONTH, "-", SOURCE_DATE_DAY),
format = "%Y-%m-%d")]
# proxy marriage from registration date and flag
openarch[, death_date_flag := ifelse(is.na(death_date), 0, 1)]
openarch[is.na(death_date), death_date := registration_date]
openarch[death_date_flag == 0 & !is.na(death_date), death_date_flag := 2]
openarch[, .N, by = death_date_flag]
# set relevant variables to cleaned ones to preserve patterns
openarch[, PR_AGE := PR_AGE_year]
openarch[, PR_GENDER := PR_GENDER_2]
openarch[, EVENT_PLACE := EVENT_PLACE_ST]
setkey(openarch, clarid)
# this takes about 10-15m
openarch_deduplicated = openarch[duplicate == TRUE,
lapply(.SD, function(x) x[!is.na(x)][1]),
by = clarid,
.SDcols = patterns("^id_registration$|^death_date$|^EVENT_YEAR$|^EVENT_PLACE$|amco|match|_NAME_GN$|_NAME_SPRE$|_NAME_SURN$|_AGE$|_BIR_YEAR$|_BIR_MONTH$|_BIR_DAY$|_BIR_PLACE$|_GENDER$|_OCCUPATION$")]
# minute or two
openarch = rbindlist(
list(openarch[duplicate == FALSE],
openarch_deduplicated),
fill = TRUE)
# source_place is in there twice, identical though
x = melt(openarch,
id.vars = c("id_registration", "clarid", "death_date", "EVENT_PLACE","amco", "EVENT_YEAR"),
measure.vars = patterns(firstname = "_NAME_GN$",
prefix = "_NAME_SPRE$",
familyname = "_NAME_SURN$",
age_year = "_AGE$",
occupation = "_OCCUPATION$",
bir_year = "_BIR_YEAR", # maybe prebake this, only bride and groom
bir_month = "_BIR_MONTH",
bir_day = "_BIR_DAY",
birth_location = "_BIR_PLACE",
sex = "_GENDER$"))
dim(x)
# rm("openarch") # some operations below are mem hungry
x[, .N, by = variable]
x[, id_person := .I]
# variable -> role
x[variable == 1, role := 10] # deceased
x[variable == 2, role := 3] # father
x[variable == 3, role := 2] # mother
# quick check
x[role == 10 & age_year == 0, .N] == openarch[PR_AGE == 0, .N]
x[role == 10 & age_year == 10, .N] == openarch[PR_AGE == 10, .N]
x[role == 10 & age_year == 23, .N] == openarch[PR_AGE == 23, .N]
x[, birth_date := as.Date(
stri_join(bir_year, "-", bir_month, "-", bir_day),
format = "%Y-%m-%d")]
x[, bir_year := NULL]
x[, bir_month := NULL]
x[, bir_day := NULL]
# these are only removed because schema does not use them
# todo: see what info can be recovered
setnames(x, "EVENT_PLACE", "death_location")
setnames(x, "EVENT_YEAR", "death_year")
# civil_status -> empty
x[, civil_status := NA]
# birth_date_flag 0 if empty 1 if good 2/3 if proxied, supplement birth_date?
# keep NA, doesn't make sense to impute birth dates on a marriage certificate
x[, birth_date_flag := NA]
# recode sex + impute from role
# first four unnecessary now that it is pre-standardised
# x[sex == "Man", sex := "m"]
# x[sex == "Vrouw", sex := "f"]
# x[sex == "Onbekend", sex := "u"]
# x[sex == "", sex := NA]
x[is.na(sex) & role %in% c(2) & firstname != "" & familyname != "",
sex := "f"]
x[is.na(sex) & role %in% c(3) & firstname != "" & familyname != "",
sex := "m"]
# tolower
x[, firstname := stringi::stri_trans_tolower(firstname)]
x[, prefix := stringi::stri_trans_tolower(prefix)]
x[, familyname := stringi::stri_trans_tolower(familyname)]
# strip diacretics
# much time!
x[, firstname := stringi::stri_trans_general(firstname, "Latin-ASCII")]
x[, prefix := stringi::stri_trans_general(prefix, "Latin-ASCII")]
x[, familyname := stringi::stri_trans_general(familyname, "Latin-ASCII")]
# x[, fullname := paste0(firstname, prefix, familyname, sep = " ")]
# fwrite(
# x[fullname != ""][stri_detect_regex(fullname, "[^A-z\\s\\-.'’]"), .(unique(fullname))],
# "badnames.csv",
# append = TRUE)
# x[, fullname := NULL]
# encoding check
x[, all(validUTF8(firstname))]
x[, all(validUTF8(familyname))]
x[, all(validUTF8(prefix))]
# x[, all(validUTF8(death_location))] now amco
# type of source
x[, registration_maintype := 3] # 2 marriage, 1 births, 3 deaths
x[, death := "n"]
x[role != 10 & !is.na(age_year), death := "a"]
x[role != 10 & !is.na(occupation) & occupation != "geen beroep vermeld", death := "a"]
x[, stillbirth := NA] # todo: get this out of EVENT_REMARK "levenloos"
fwrite(x, "openarch/openarch2links/openarch_persons_deaths_v2.csv.gz", compress = "gzip")
|
7b02ab8de32d0d5fd438992312799e5597953658 | 08f8df4123d1d49a1b14e29f71c86410399b2a5b | /R/likelihood.R | 35809a1b529c7df005922c8112001f45d3a91e62 | [
"MIT"
] | permissive | mrc-ide/first90release | 67652154cb6c142906a49d94f617bc138f7a4e1b | 200d2e4d47dbe2214d52bf4d072d0b2ffa241089 | refs/heads/master | 2023-05-10T21:12:05.587741 | 2023-05-08T18:28:39 | 2023-05-08T18:28:39 | 167,279,837 | 5 | 0 | NOASSERTION | 2023-05-08T18:28:40 | 2019-01-24T01:17:53 | R | UTF-8 | R | false | false | 8,535 | r | likelihood.R | #' @export
ll_hts <- function(theta, fp, likdat) {
fp <- create_hts_param(theta, fp)
mod <- simmod(fp)
val1 <- ll_evertest(mod, fp, likdat$evertest)
if (!is.null(likdat$hts)) {
val2 <- ll_prgdat(mod, fp, likdat$hts)
} else { val2 <- 0 }
if (!is.null(likdat$hts_pos)) {
val3 <- ll_prgdat(mod, fp, likdat$hts_pos)
} else { val3 <- 0 }
val_prior <- lprior_hts(theta, mod, fp)
val <- val1 + val2 + val3 + val_prior
return(val)
}
# Likelihood function for ever tested
#' @export
ll_evertest <- function(mod, fp, dat) {
mu <- evertest(mod, fp, dat)
## If projection_period = "calendar", interpolate proportion
## ever tested predictions to mid-year.
if (fp$projection_period == "calendar") {
dat_last <- dat
dat_last$yidx <- dat_last$yidx - 1L
mu_last <- evertest(mod, fp, dat_last)
mu <- 0.5 * (mu + mu_last)
}
if (any(is.na(mu)) || any(mu < 0) || any(mu > 1)) {
llk <- log(0)
} else {
llk <- sum(dbinom(x = dat$nsuccess, size = dat$neff, prob = mu, log = TRUE))
}
return(llk)
}
# Likelihood function for number of test
#'@export
ll_prgdat <- function(mod, fp, dat) {
if (as.character(dat$hivstatus[1]) == 'all') {
mu <- total_tests(mod, df = add_ss_indices(dat, fp$ss))
llk <- sum(dnorm(x = dat$tot, mean = mu, sd = dat$l_est_se, log = TRUE))
}
if (as.character(dat$hivstatus[1]) == 'positive') {
mu <- total_tests(mod, df = add_ss_indices(dat, fp$ss))
llk <- sum(dnorm(x = dat$tot, mean = mu, sd = dat$l_est_se, log = TRUE))
}
if (!(as.character(dat$hivstatus[1]) %in% c('all', 'positive'))) {
print('Error - HIV status is incorrect'); break
}
return(llk)
}
## -- UPDATE HERE --
## * max_year = <current_year> incremented each year
art_constraint_penalty <- function(mod, fp, max_year = 2022) {
ind_year <- c(2000:max_year) - fp$ss$proj_start + 1L
tot_late <- apply(attr(mod, "late_diagnoses")[,,, ind_year], 4, sum)
tot_untreated_pop <- apply(attr(mod, "hivpop")[,,, ind_year], 4, sum)
ratio_late_per_1000_untreated <- tot_late / tot_untreated_pop * 1000
penalty <- sum(dnorm(x = 0, mean = ratio_late_per_1000_untreated,
sd = 20, log = TRUE))
return(penalty)
}
# Include this in ll_hts if you want to incorporate the likelihood constraint on ART.
# val_art_penalty <- art_constraint_penalty(mod, fp, max_year = 2022)
# val <- val1 + val2 + val3 + val_prior + val_art_penalty
# Function to prepare the data for input in the likelihood function.
#' @export
prepare_hts_likdat <- function(dat_evertest, dat_prg, fp) {
# Testing behavior data
dat_evertest$est <- ifelse(is.na(dat_evertest$est), NA, dat_evertest$est)
dat_evertest <- dat_evertest[complete.cases(dat_evertest$est),]
dat_evertest$l_est <- qlogis(dat_evertest$est)
dat_evertest$l_est_se <- dat_evertest$se / (dat_evertest$est * (1 - dat_evertest$est))
# if using binomial, we calculate Neff from SE
dat_evertest$neff <- (dat_evertest$est * (1 - dat_evertest$est)) / dat_evertest$se^2
dat_evertest$neff <- ifelse(dat_evertest$est < 1e-5, dat_evertest$counts, dat_evertest$neff)
dat_evertest$neff <- ifelse(dat_evertest$est > 0.999, dat_evertest$counts, dat_evertest$neff)
dat_evertest$neff <- ifelse(dat_evertest$neff == Inf, dat_evertest$counts, dat_evertest$neff)
dat_evertest$nsuccess <- round(dat_evertest$est * dat_evertest$neff, 0)
dat_evertest$neff <- ifelse(round(dat_evertest$neff, 0) < 1, 1, round(dat_evertest$neff, 0))
dat_evertest <- add_ss_indices(dat_evertest, fp$ss)
# Verifying if input data is OK
if(any(dat_prg$agegr != '15-99')) { stop(print('Error, HTS program data should be for the 15+ age group only /n
other age-grouping not supported at the moment'))}
# We remove years with NA from programmatic data
dat_prg_pos <- dat_prg
dat_prg_pos <- dat_prg_pos[complete.cases(dat_prg_pos$totpos),]
dat_prg_pos_sex <- subset(dat_prg_pos, sex != 'both')
yr_sex <- unique(dat_prg_pos_sex$year)
dat_prg_pos1 <- subset(dat_prg_pos, sex == 'both' & !(year %in% yr_sex))
dat_prg_pos2 <- subset(dat_prg_pos, sex != 'both' & (year %in% yr_sex))
dat_prg_pos <- rbind(dat_prg_pos1, dat_prg_pos2)
dat_prg_pos$tot <- dat_prg_pos$totpos
dat_prg_sex <- subset(dat_prg, sex != 'both')
yr_sex <- unique(dat_prg_sex$year)
dat_prg1 <- subset(dat_prg, sex == 'both' & !(year %in% yr_sex))
dat_prg2 <- subset(dat_prg, sex != 'both' & (year %in% yr_sex))
dat_prg <- rbind(dat_prg1, dat_prg2)
dat_prg <- dat_prg[complete.cases(dat_prg$tot),]
# Programmatic data - nb of test (total: VCT + PMTCT + other; ideally among 15+)
if (dim(dat_prg)[1] > 0) {
dat_prg$l_est_se <- ifelse(dat_prg$sex == 'both', dat_prg$tot * 0.05, dat_prg$tot * 0.05 * sqrt(1 + 1 + 2*1)) # the sqrt() is to maintain same SE as if fitting on both sex - assuming perfect correlation
dat_prg$hivstatus <- 'all'
dat_prg <- add_ss_indices(dat_prg, fp$ss)
} else { dat_prg <- NULL }
# Programmatic data - nb pos tests
if (dim(dat_prg_pos)[1] > 0) {
dat_prg_pos$l_est_se <- ifelse(dat_prg_pos$sex == 'both', dat_prg_pos$tot * 0.10, dat_prg_pos$tot * 0.10 *sqrt(1 + 1 + 2*1))
dat_prg_pos$hivstatus <- 'positive'
dat_prg_pos <- add_ss_indices(dat_prg_pos, fp$ss)
} else { dat_prg_pos <- NULL }
return(list(evertest = dat_evertest, hts = dat_prg, hts_pos = dat_prg_pos))
}
lprior_hts <- function(theta, mod, fp) {
## Penalty to smooth testing rates among females aged 15-24 (reference group)
## We calculate penalty for RR of males on the log(rate) scale (and use same SD as for females)
## -- UPDATE HERE --
## * Extend knots by 1 year to current year
knots <- 2000:2023 - fp$ss$proj_start + 1L
## -- UPDATE ABOVE --
n_k1 <- length(knots)
n_k2 <- n_k1 * 2 - 10
penalty_f <- log(fp$hts_rate[1,2,1, knots[-1]]) - log(fp$hts_rate[1,2,1, knots[-n_k1]])
penalty_rr_dxunt <- theta[c((n_k1 + 2):n_k2)] - theta[c((n_k1 + 1):(n_k2 - 1))]
penalty_rr_m <- log(plogis(theta[n_k2 + 2]) * 1.1) - log(plogis(theta[n_k2 + 1]) * 1.1)
penalty_rr_test <- theta[n_k2 + 4] - theta[n_k2 + 3]
lprior <-
## Prior for first baseline rate for females # exp(log(0.001) + 1.96*0.25)
dnorm(x = theta[1], mean = log(0.005), sd = 0.25, log = TRUE) +
## Relative testing among PLHIV diagnosed, untreated. 1.50 (95%CI: 0.14-6.00) # plogis(qlogis(1.5/8) - 1.96*1.31)*8
dnorm(x = theta[n_k2], mean = qlogis(1.5/8), sd = 1.31, log = TRUE) +
## Prior for male RR 0.6 (95%CI: 0.07-1.05)# plogis(qlogis(0.6/1.1) + 1.96*1.46) * 1.1
sum(dnorm(x = theta[n_k2 + 1], mean = qlogis(0.6/1.1), sd = 1.46, log = TRUE)) +
## Relative increase among previously tested. 1.93 (95%CI: 1.08-5.00) # 0.95 + plogis(qlogis((1.93 - 0.95)/7.05) - qnorm(0.975)*1.084)*7.05
dnorm(x = theta[n_k2 + 3], mean = qlogis((1.93 - 0.95)/7.05), sd = 1.084, log = TRUE) +
## Relative factor for re-testing among PLHIV. 1.00 (95%CI: 0.10-1.90) # 0.05 + plogis(qlogis(0.95 / 1.90) - 1.96*1.85) * (1.95 - 0.05)
dnorm(x = theta[n_k2 + 5], mean = qlogis(0.95 / 1.90), sd = 1.85, log = TRUE) +
## Relative testing among PLHIV already on ART (95%CI: 0.01-0.90) # plogis(qlogis(0.25) - 1.96*1.68)
dnorm(x = theta[n_k2 + 6], mean = qlogis(0.25), sd = 1.68, log = TRUE) +
## Prior for age (95% CI is 0.14-5.0)# 0.1 + invlogit(logit(0.9/5.9) - 1.96*1.5) * (6 - 0.1)
sum(dnorm(x = theta[c((n_k2 + 7):(n_k2 + 10))], mean = qlogis(0.9/5.9), sd = 1.685, log = TRUE)) +
## RR OI diangosed for HIV relative to ART coverage 1.0 (0.3-1.7) # 0.25 + plogis(qlogis(0.5) - 1.96*1.75) * (1.75 - 0.25)
dnorm(x = theta[n_k2 + 11], mean = qlogis(0.5), sd = 1.75, log = TRUE)
prior_sd_f <- 0.205 # exp(log(0.5) - 1.96*0.205); previous of 0.35 when double-counting
prior_sd_rr_m <- 0.26 # exp(log(0.6) + 1.96*0.26)
prior_sd_rr_dxunt <- 0.25
prior_sd_rr_test <- 0.25
hyperprior <-
## Penalty for 1st degree difference (female baseline rate)
sum(dnorm(x = penalty_f, mean = 0, sd = prior_sd_f, log = TRUE)) +
## Penalty for 1st degree difference (male RR)
sum(dnorm(x = penalty_rr_m, mean = 0, sd = prior_sd_rr_m, log = TRUE)) +
## Penalty for 1st degree difference (RR_dxunt)
sum(dnorm(x = penalty_rr_dxunt, mean = 0, sd = prior_sd_rr_dxunt, log = TRUE)) +
## Penalty for 1st degree difference (RR_test)
sum(dnorm(x = penalty_rr_test, mean = 0, sd = prior_sd_rr_test, log = TRUE))
return(lprior + hyperprior)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.