blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f6bc231710d993079a548b91a57406c6856342e6 | 3d410069bf8c676a32dc8fb91276b2b5c3c5de3e | /code/BayGLM3_bayesian.R | 9aad4bff3a4f672dbc497a0c09a9519ac98c5a78 | [] | no_license | jasa-acs/A-Bayesian-General-Linear-Modeling-Approach-to-Cortical-Surface-fMRI-Data-Analysis | ecdb007babf5550a7a7d8788d3abfb9f64433bae | 1bc5387343b120a44834fdadd1945cffbaf5ff25 | refs/heads/master | 2020-09-03T23:40:37.113161 | 2020-01-09T19:20:55 | 2020-01-09T19:20:55 | 219,604,853 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 31,047 | r | BayGLM3_bayesian.R | #library(devtools)
#install_github('muschellij2/gifti')
library(gifti)
#install.packages('sp')
#install.packages("INLA", repos="https://www.math.ntnu.no/inla/R/testing")
library(INLA)
INLA:::inla.dynload.workaround() #to avoid error on creating mesh
inla.setOption("pardiso.license", "~/pardiso.lic") #required for parallel INLA (much faster)
# inla.update(testing=T)
library(excursions)
library(fields)
library(expm) #sqrtm
library(reshape2)
library(ggplot2)
library(RColorBrewer)
library(MASS) #mvrnorm
library(parallel)
data_dir <- '~/HCP/data/hpc/'
wb_cmd <- '~/workbench/bin_rh_linux64/wb_command'
maindir <- '~/BayesianGLM/' #contains the following subdirectories:
#EVs: contains task design matrix (task convolved with HRF) for each subject
#motion: contains motion covariates (6 plus their temporal derivatives) for each subject
#locations: contains coordinates of surface vertices and CIFTI/GIFTI files used in transformations, used to store SPDE objects
#timeseries: contains smoothed and resampled fMRI timeseries for classical and Bayesian GLMs, respectively
#prewhitening: contains AR coefficient estimates, and sparse prewhitening matrix for Bayesian GLM
#results_OLS: contains results from classical GLM
#results_sphere: contains results from Bayesian GLM
#code: additional functions
source(paste0(maindir,'code/BayGLMfun.R'))
setwd(maindir)
###########################################################
# LOAD IN SETUP OBJECTS (FROM BAYGLM2.R)
###########################################################
load(file='subjects.Rdata')
M <- length(subjects)
load('locations/loc32K.Rdata') #ns.32K, isNA
load('locations/ind_obs.Rdata') #ns.all, idx.obs, idx.obs1, idx.obs2, ns1, ns2, ns
load('locations/mesh.sphere.resamp6K.Rdata') #loc.norm1, loc.norm2, mesh1, mesh2
load('locations/Amat.Rdata') #Amat =list(Amat1, Amat2)
load('locations/spde.Rdata') #spde = list(spde1, spde2), spatial = list(spatial1, spatial2)
mesh <- list(mesh1, mesh2)
loc.norm <- list(loc.norm1, loc.norm2)
idx.obs <- list(idx.obs1, idx.obs2)
###########################################################
# FOR VISUALIZATION
###########################################################
ts_6K <- paste0(maindir,'locations/ts.6K.dtseries.nii')
ts_32K <- paste0(maindir,'locations/ts.32K.dtseries.nii')
rsurface_32K <- paste0(maindir,'locations/Sphere.32K.R.surf.gii')
lsurface_32K <- paste0(maindir,'locations/Sphere.32K.L.surf.gii')
rsurface_6K <- paste0(maindir,'locations/Sphere.6k.R.surf.gii')
lsurface_6K <- paste0(maindir,'locations/Sphere.6k.L.surf.gii')
thresholds <- c(0,0.5,1) #activation thresholds
U <- length(thresholds)
for(task in c('MOTOR','GAMBLING')){
if(task=='MOTOR') { ntime <- 284; K <- 6 }
if(task=='GAMBLING') { ntime <- 253; K <- 3 }
###########################################################
# GET SPARSE JOINT PREWHITENING MATRIX
###########################################################
p <- 6
load(file=paste0('prewhitening/',task,'/sqrtinvcovAR.6K.Rdata')) #sqrtInv_all, rows.rm
###########################################################
###########################################################
# RUN SUBJECT-LEVEL MODELS
###########################################################
###########################################################
#for group models
y_all_left <- c()
y_all_right <- c()
Xmat_all_left <- NULL
Xmat_all_right <- NULL
Xmat_list_left <- NULL
Xmat_list_right <- NULL
for(mm in 1:M){
print(mm)
t00 <- Sys.time()
###########################################################
# READ IN DESIGN MATRIX
###########################################################
# Build up covariates
file.name <- paste("EVs/",task,'/',subjects[mm],"_RL.csv",sep="")
Z <- as.matrix(read.csv(file.name, header=T)) ## task
Z <- Z/max(Z) #standardize
ntime <- dim(Z)[1]
Z <- scale(Z, scale=FALSE) #center to remove baseline
file.name <- paste("motion/",task,'/',subjects[mm],"_RL.txt",sep="")
X <- read.table(file.name) ## nuisance
X <- X/max(X) #standardize
trend1 <- (1:ntime)/ntime #linear trends
trend2 <- trend1^2 #quadratic trends
X <- as.matrix(cbind(X, trend1, trend2))
X <- scale(X, scale=FALSE)
# Regress intercept & nuisance parameters from task
invxvx <- solve(t(X) %*% X) #(X'X)^{-1}
betaX <- invxvx %*% t(X) %*% Z #(X'X)^{-1} X'Z
residZ <- Z - X %*% betaX
#############################################
# Fit Bayesian GLM
#############################################
# MOTOR TASK: 45 MIN / HEMISPHERE
# GAMBLING TASK: 30 MIN / HEMISPHERE
# Time Series
file.name <- paste("timeseries/",task,'/',subjects[mm],".6K.csv",sep="")
data.all <- as.matrix(read.csv(file.name, header=F))
local_means <- matrix(rowMeans(data.all, na.rm=TRUE), nrow=nrow(data.all), ncol=ncol(data.all))
y.all <- t(100*(data.all - local_means)/local_means) #scale to units of pct local signal change AND CENTER
# Regress nuisance parameters & save for group models
betaX <- invxvx %*% t(X) %*% y.all #(X'X)^{-1} X'Z
residY <- y.all - X %*% betaX
y <- residY
Z <- residZ
save(y, file=paste0('timeseries/',task,'/y_',subjects[mm],'.Rdata'))
save(Z, file=paste0('timeseries/',task,'/Z_',subjects[mm],'.Rdata'))
#load(file=paste0('timeseries/',task,'/y_',subjects[mm],'.Rdata'))
#load(file=paste0('timeseries/',task,'/Z_',subjects[mm],'.Rdata'))
# Loop through Hemispheres
for(h in 1:2){
print(paste0('~~~~~~~~~~ HEMISPHERE ',h, ' ~~~~~~~~~~'))
if(task=='MOTOR'){
if(h==1) cols <- c(1,4:6) # Left: EVs 1, 4, 5, 6
if(h==2) cols <- c(1:3,6) # Right: EVs 1, 2, 3, 6
} else { cols <- 1:3 }
nxz <- length(cols)
y.h <- as.vector(y[,idx.obs[[h]]])
ix <- 1:(ntime*ns[h])
iy <- rep(1:ns[h], each = ntime)
############################################
# Set up design matrix
############################################
nxz <- length(cols)
nmesh <- length(spatial[[h]])
for(k in cols){
Z_k <- sparseMatrix(ix, iy, x=rep(Z[,k], ns[h])) #don't actually need to rep Z because it will be recycled
if(k==cols[1]) A <- Z_k else A <- cbind(A, Z_k)
}
############################################
# Pre-whiten y and A
############################################
A <- sqrtInv_all[[h]] %*% A #~10 seconds
y.h <- as.vector(sqrtInv_all[[h]] %*% y.h) #<1 seconds
A <- A[-rows.rm[[h]], ]
y.h <- y.h[-rows.rm[[h]]]
if(h==1) y_all_left <- c(y_all_left, y.h)
if(h==2) y_all_right <- c(y_all_right, y.h)
if(h==1) {
Xmat_all_left <- rbind(Xmat_all_left, A) #for GE group model
#Xmat_list_left <- c(Xmat_list_left, list(A)) #for FE group model
}
if(h==2) {
Xmat_all_ringht <- rbind(Xmat_all_right, A) #for GE group model
#Xmat_list_right <- c(Xmat_list_right, list(A)) #for FE group model
}
############################################
# Construct formula and data
############################################
BOLD <- list(y=y.h)
formula <- 'y ~ -1'
bbeta_list <- vector('list',nxz)
for(j in 1:nxz){
bbeta_list[[j]] <- c(rep(NA, nmesh*(j-1)), spatial[[h]], rep(NA, (nxz-j)*nmesh))
BOLD <- c(BOLD, list(bbeta_list[[j]]))
formula_k <- paste0('f(bbeta',cols[j],', model=spde[[h]], hyper=list(theta=list(initial=c(-2,2))))')
formula <- paste0(formula,' + ',formula_k)
}
names(BOLD)[-1] <- paste0('bbeta',cols)
formula <- as.formula(formula)
############################################
# Run model
############################################
t0 <- Sys.time()
result <- inla(formula, data=BOLD, control.predictor=list(A=A),
verbose = TRUE, keep = FALSE, num.threads = 4,
control.inla = list(strategy = "gaussian", int.strategy = "eb"),
control.family=list(hyper=list(prec=list(initial=0.1))),
control.compute=list(config=TRUE)) #needed for excursions
print(Sys.time()-t0)
res.beta <- result$summary.random
res.hyper <- result$summary.hyperpar
residual <- y - result$summary.fitted.values$mean[1:length(y)]
mu.tmp <- result$misc$theta.mode #for joint group model
Q.tmp <- solve(result$misc$cov.intern) #for joint group model
file.name <- paste0("results_sphere/",task,"/result.",subjects[mm],".sphere",h,".resamp6K.RData")
save(res.beta, res.hyper, residual, mu.tmp, Q.tmp, file=file.name)
load(file=file.name)
############################################
# Visualize estimates
############################################
betas.h <- matrix(nrow = length(res.beta[[1]]$mean), ncol = K)
for(j in 1:nxz){
k <- cols[j]
betas.h[,k] <- res.beta[[j]]$mean
}
betas.h <- Amat[[h]] %*% betas.h #project from mesh back to original data locations
if(h==1) betas.mm <- matrix(nrow=nrow(data.all), ncol=K)
betas.mm[idx.obs[[h]],] <- as.matrix(betas.h)
} #end loop across hemispheres
#write CIFTI (6K)
names <- paste0("results_sphere/",task,"/beta",1:K,".",subjects[mm],"_6K")
writeCIFTIs(betas.mm, names, hemisphere='both', template=ts_6K, data_field = 'x6k')
#convert to 32K
fnames_6K <- paste0(maindir,names,'.dtseries.nii')
fnames_32K <- gsub('6K','32K',fnames_6K)
for(k in 1:K){
print(k)
fname_6K <- fnames_6K[k]
fname_32K <- fnames_32K[k]
cmd <- paste0(wb_cmd,' -cifti-resample ', fname_6K,' COLUMN ', ts_32K,' COLUMN BARYCENTRIC CUBIC ',
fname_32K, ' -left-spheres ',lsurface_6K,' ',lsurface_32K,' -right-spheres ',rsurface_6K,' ',rsurface_32K)
system(cmd)
}
}#end loop across subjects
### PLOT HYPERPARAMETER ESTIMATES
param_names <- list()
if(task=='MOTOR'){
param_names[[1]] <- c('prec','log_tau1','log_kappa1','log_tau4','log_kappa4','log_tau5','log_kappa5','log_tau6','log_kappa6')
param_names[[2]] <- c('prec','log_tau1','log_kappa1','log_tau2','log_kappa2','log_tau3','log_kappa3','log_tau6','log_kappa6')
} else if(task=='GAMBLING'){
param_names[[1]] <- param_names[[2]] <- c('prec','log_tau1','log_kappa1','log_tau2','log_kappa2','log_tau3','log_kappa3')
}
for(h in 1:2){
for(mm in (1:M)){
print(mm)
file.name <- paste0("results_sphere/",task,"/result.",subjects[mm],".sphere",h,".resamp6K.RData")
load(file=file.name)
params.mm.h <- res.hyper
params.mm.h$subject <- mm
params.mm.h$param <- param_names[[h]]
if(mm==1) params <- params.mm.h else params <- rbind(params,params.mm.h)
}
names(params)[3:5] <- c('Q025','Q50','Q975')
pdf(paste0('plots/',task,'/hyperparams',h,'.pdf'))
print(ggplot(params, aes(x=param, y=mean, group=subject)) + geom_point() +
geom_linerange(aes(ymin=Q025, ymax=Q975)) + theme(legend.position='bottom'))
dev.off()
}
###################################################################
###################################################################
# GROUP-LEVEL MODELS
###################################################################
###################################################################
###################################################################
# FULLY BAYESIAN MODEL (10-13 HOURS)
###################################################################
#Note: Fully Bayes model can be run for all 20 subjects for gambling task
#with ~280GB of memory. For motor task, would need more memory or fewer subjects.
if(task=='GAMBLING'){
dir <- paste0('results_sphere/',task,'/pop_full')
betas.all <- matrix(0, nrow=ns.all, ncol=K)
probs.all <- array(0, dim=c(ns.all, K, U)) #last dimension is for different activation thresholds
for(h in 1:2){
if(task=='MOTOR'){
if(h==1) cols <- c(1,4:6) # Left: EVs 1, 4, 5, 6
if(h==2) cols <- c(1:3,6) # Right: EVs 1, 2, 3, 6
} else { cols <- 1:3 }
nxz <- length(cols)
############################################
# Group Effect (GE) Model
############################################
### Construct formula and data
nmesh <- length(spatial[[h]])
if(h==1) BOLD <- list(y=y_all_left) else BOLD <- list(y=y_all_right)
formula <- 'y ~ -1'
bbeta_list <- vector('list',K)
for(k in 1:K){
bbeta_list[[k]] <- c(rep(NA, nmesh*(k-1)), spatial[[h]], rep(NA, (K-k)*nmesh))
BOLD <- c(BOLD, list(bbeta_list[[k]]))
formula_k <- paste0('f(bbeta',cols[k],', model=spde[[h]], hyper=list(theta=list(initial=c(-2,2))))')
formula <- paste0(formula,' + ',formula_k)
}
names(BOLD)[-1] <- paste0('bbeta',cols)
formula <- as.formula(formula)
### Run INLA: 10-13 hrs for 20 subjects
if(h==1) Xmat_all <- Xmat_all_left else Xmat_all <- Xmat_all_right
inla.setOption("num.threads", 4)
t0 <- Sys.time()
result <- inla(formula, data=BOLD, family='gaussian',
control.predictor=list(A = Xmat_all, compute=FALSE),
control.compute = list(config = TRUE),
control.family=list(hyper=list(prec=list(initial=-0.1))),
control.inla = list(strategy = "gaussian", int.strategy = 'eb'), verbose=T, keep=F)
print(Sys.time() - t0)
### Visualize beta estimates
res.beta <- result$summary.random
#would need to change looping for motor task
for(k in 1:K){
betas.all[idx.obs[[h]],k] <- as.vector(Amat[[h]] %*% res.beta[[k]]$mean)
}
dir <- paste0('results_sphere/',task,'/pop_full')
fname <- file.path(dir,'result.pop.sphere.RData')
save(betas.all, file=fname)
names_beta <- file.path(dir, paste0("beta",cols,"_6K"))
writeCIFTIs(betas.all, names_beta, hemisphere='both', template=ts_6K, data_field = 'x6k')
#convert to 32K
fnames_6K <- paste0(maindir,names_beta,'.dtseries.nii')
fnames_32K <- gsub('6K','32K',fnames_6K)
for(k in 1:K){
print(k)
fname_6K <- fnames_6K[k]
fname_32K <- fnames_32K[k]
cmd <- paste0(wb_cmd,' -cifti-resample ', fname_6K,' COLUMN ', ts_32K,' COLUMN BARYCENTRIC CUBIC ',
fname_32K, ' -left-spheres ',lsurface_6K,' ',lsurface_32K,' -right-spheres ',rsurface_6K,' ',rsurface_32K)
system(cmd)
}
### Compute Excursion Functions:
#0.0% = 6-7 min
#0.5% = 5-6 min
#1.0% = 4 min
for(u in 1:U){
print(paste0('threshold: ',thresholds[u], '%'))
thr <- thresholds[u]
for(k in cols){
t0 <- Sys.time()
print(beta.k <- paste0('bbeta',k))
res.exc.k <- excursions.inla(result, name=beta.k, u=thr, type='>', method='QC')
probs.all[idx.obs[[h]],k,u] <- as.vector(Amat[[h]]%*%res.exc.k$F)
rm(res.exc.k)
print(Sys.time()-t0)
fname <- file.path(dir,'result.pop.sphere.RData')
save(betas.all, probs.all, file=fname)
}
}
}
### Visualize activation maps
for(u in 1:U){
print(paste0('threshold: ',thresholds[u], '%'))
#1. Write probs.all and probs.all0 to CIFTI
probs.all.u <- probs.all[,,u]
names_probs <- file.path(dir,paste0("probs",1:K,"_thr",u,"_6K_RH"))
probs.all.u[probs.all.u < 0.01] <- 0.01
writeCIFTIs(probs.all.u, names_probs, hemisphere='both', template=ts_6K, data_field = 'x6k')
fnames_6K <- paste0(maindir,names_probs,'.dtseries.nii')
fnames_32K <- gsub('6K','32K',fnames_6K)
#2. Resample to 32K and read into R
probs.all.u.32K <- matrix(nrow=ns.32K*2, ncol=K)
for(k in 1:K){
print(k)
fname_6K <- fnames_6K[k]
fname_32K <- fnames_32K[k]
cmd <- paste0(wb_cmd,' -cifti-resample ', fname_6K,' COLUMN ', ts_32K,' COLUMN BARYCENTRIC CUBIC ', fname_32K, ' -left-spheres ',lsurface_6K,' ',lsurface_32K,' -right-spheres ',rsurface_6K,' ',rsurface_32K)
system(cmd)
probs.k <- readCIFTI_ts(fname_32K)
probs.all.u.32K[,k] <- probs.k[,1]
}
#3. Threshold 32K PPMs at 0.95 and 0.99
active_99.u <- 1*(probs.all.u.32K >= 0.99)
#4. Combine activation thresholds
if(u==1) { active_99 <- active_99.u } else { active_99 <- active_99 + active_99.u }
}
#5. Write to CIFTI
active_99[active_99==0] <- 0.01
names99 <- file.path(dir,paste0("active",1:K,"_99_32K"))
writeCIFTIs(active_99, names99, hemisphere='both', template=ts_32K, data_field = 'x32k')
############################################
# Fixed Effect (FE) Model (VERY SLOW)
############################################
# for(h in 1:2){
# beta1 <- rep(spatial[[h]], M)
# beta2 <- rep(spatial[[h]], M)
# beta3 <- rep(spatial[[h]], M)
# nmesh <- length(spatial[[h]])
# bbeta1 <- c(beta1, rep(NA, nmesh*M), rep(NA, nmesh*M))
# bbeta2 <- c(rep(NA, nmesh*M), beta2, rep(NA, nmesh*M))
# bbeta3 <- c(rep(NA, nmesh*M), rep(NA, nmesh*M), beta3)
# bbeta_list <- c(list(bbeta1), list(bbeta2), list(bbeta3))
# rep1 <- c(rep(1:M, each = nmesh), rep(NA, nmesh*M), rep(NA, nmesh*M))
# rep2 <- c(rep(NA, nmesh*M), rep(1:M, each = nmesh), rep(NA, nmesh*M))
# rep3 <- c(rep(NA, nmesh*M), rep(NA, nmesh*M), rep(1:M, each = nmesh))
# if(h==1) BOLD <- list(y=y_all_left) else BOLD <- list(y=y_all_right)
# formula <- 'y ~ -1'
# bbeta_list <- vector('list',K)
# for(k in 1:K){
# BOLD <- c(BOLD, list(bbeta_list[[k]]))
# formula_k <- paste0('f(bbeta',cols[k],', model=spde[[h]], replicate=rep',k,', hyper=list(theta=list(initial=c(-2,2))))')
# formula <- paste0(formula,' + ',formula_k)
# }
# names(BOLD)[-1] <- paste0('bbeta',cols)
# formula <- as.formula(formula)
# if(h==1) Xmat_all <- bdiag(Xmat_list_left) else Xmat_all <- bdiag(Xmat_list_right)
# t0 <- Sys.time()
# result <- inla(formula, data=BOLD, family='gaussian',
# control.predictor=list(A = Xmat_all, compute=FALSE),
# control.compute = list(config = TRUE),
# control.family=list(hyper=list(prec=list(initial=0.1))),
# control.inla = list(strategy = "gaussian", int.strategy = 'eb'), verbose = T)
# print(Sys.time() - t0)
# }
}
###################################################################
###################################################################
# JOINT BAYESIAN APPROACH (FASTER ALTERNATIVE TO FULLY BAYESIAN APPROACH)
###################################################################
###################################################################
betas.all <- matrix(0, nrow=ns.all, ncol=K)
probs.all <- array(0, dim=c(ns.all, K, U)) #last dimension is for different activation thresholds
#30 min per hemisphere (with sampling in parallel)
for(h in 1:2){
t0h <- Sys.time()
if(task=='MOTOR'){
if(h==1) cols <- c(1,4:6) # Left: EVs 1, 4, 5, 6
if(h==2) cols <- c(1:3,6) # Right: EVs 1, 2, 3, 6
} else { cols <- 1:3 }
nxz <- length(cols)
print('Collecting theta posteriors from subject models')
#0.3-0.7 seconds per subject
theta.sub <- NULL
mu.theta.tmp <- Q.theta <- 0
for(mm in 1:M){
t0 <- Sys.time()
file.name <- paste("results_sphere/",task,"/result.", subjects[mm],".sphere", h, ".resamp6K.RData", sep = "")
load(file.name)
# sum_m Q_m * mu_m
mu.theta.tmp <- mu.theta.tmp + as.vector(Q.tmp%*%mu.tmp)
# sum_m Q_m
Q.theta <- Q.theta + Q.tmp
theta.sub <- cbind(theta.sub, res.hyper$mode)
rm(mu.tmp, Q.tmp)
}
#(sum_m Q_m)^(-1) * sum_m Q_m * mu_m
mu.theta <- solve(Q.theta, mu.theta.tmp)
print('Drawing samples from q(theta|y)')
nsamp <- 50
logwt <- rep(NA, nsamp)
theta.tmp <- mvrnorm(nsamp, mu.theta, solve(Q.theta))
for(i in 1:nsamp){ logwt[i] <- F.logwt(theta.tmp[i,], spde[[h]], mu.theta, Q.theta, M) }
#weights to apply to each posterior sample of theta
wt.tmp <- exp(logwt - max(logwt))
wt <- wt.tmp/(sum(wt.tmp))
print('Computing cross-products for each subject')
Xcros.all <- Xycros.all <- vector("list", M)
#9 seconds per subject
for(mm in 1:M){
print(mm)
## Read response and design matrix after nuisance regession & centering
load(file=paste0('timeseries/',task,'/y_',subjects[mm],'.Rdata')) #y
load(file=paste0('timeseries/',task,'/Z_',subjects[mm],'.Rdata')) #Z
y <- as.vector(y[,idx.obs[[h]]])
Z <- Z[,cols]
yZ <- list(y, Z)
cross.mm <- compute.cross(yZ, Amat[[h]], sqrtInv_all[[h]], rows.rm[[h]])
Xcros.all[[mm]] <- cross.mm$Xcros
Xycros.all[[mm]] <- cross.mm$Xycros
}
print('Computing posterior quantities of beta for each theta')
## Create index vectors
n.mesh <- mesh[[h]]$n
ind_beta <- list()
for(k in 1:nxz){
ind_beta[[k]] <- 1:n.mesh + (k-1)*n.mesh
}
#get posterior quantities of beta, conditional on a value of theta
no_cores <- min(detectCores() - 1, 25)
cl <- makeCluster(no_cores)
t0 <- Sys.time()
#in sequence, 8 min per iteration for motor task, 5-6 min for gambling task (for 20 subjects and 3 activation thresholds)
#in parallel, 21 min total for gambling task!
#in parallel, 24 min for motor task!
#with 50 iterations, we save 50*8 - 25 = 375 min (6.25 hours!)
beta.post.samps <- parApply(cl, theta.tmp, MARGIN=1, FUN=beta.posterior.thetasamp, spde=spde[[h]], K=nxz, M, Xcros.all, Xycros.all, thresholds=thresholds, alpha=0.01, ind_beta=ind_beta)
print(Sys.time() - t0)
stopCluster(cl)
#organize samples
mu.tot <- matrix(nrow=nxz*n.mesh, ncol=nsamp)
F.tot <- rep(list(rep(list(matrix(nrow=n.mesh, ncol=nsamp)), K)), U) #for each activation threshold and task, a Vx50 matrix
for(itheta in 1:nsamp){
mu.tot[,itheta] <- beta.post.samps[[itheta]]$mu
for(u in 1:U){
for(k in 1:nxz){
F.tot[[u]][[k]][,itheta] <- beta.post.samps[[itheta]]$F[[u]][,k]
}
}
}
print('Computing posterior quantities of beta, summing over theta')
### Sum over samples using weights, combine hemispheres (< 1 sec)
#posterior mean
beta.pop <- as.vector(mu.tot%*%wt)
for(k in 1:nxz){
beta.pop.k <- beta.pop[ind_beta[[k]]]
betas.all[idx.obs[[h]],cols[k]] <- as.vector(Amat[[h]]%*%beta.pop.k)
}
#posterior probabilities
for(u in 1:U){
for(k in 1:nxz){
F.pop.uk <- as.vector(F.tot[[u]][[k]]%*%wt)
probs.all[idx.obs[[h]],cols[k],u] <- as.vector(Amat[[h]]%*%F.pop.uk)
}
}
print(Sys.time() - t0h)
} #end loop over hemispheres
### VISUALIZE RESULTS
dir <- paste0('results_sphere/',task,'/pop_joint/full_model')
# BETA MAPS
names_beta <- file.path(dir, paste0("beta",1:K,"_6K"))
writeCIFTIs(betas.all, names_beta, hemisphere='both', template=ts_6K, data_field = 'x6k')
#convert to 32K
fnames_6K <- paste0(maindir,names_beta,'.dtseries.nii')
fnames_32K <- gsub('6K','32K',fnames_6K)
for(k in 1:K){
print(k)
fname_6K <- fnames_6K[k]
fname_32K <- fnames_32K[k]
cmd <- paste0(wb_cmd,' -cifti-resample ', fname_6K,' COLUMN ', ts_32K,' COLUMN BARYCENTRIC CUBIC ',
fname_32K, ' -left-spheres ',lsurface_6K,' ',lsurface_32K,' -right-spheres ',rsurface_6K,' ',rsurface_32K)
system(cmd)
}
# ACTIVATION MAPS
for(u in 1:U){
print(paste0('threshold: ',thresholds[u], '%'))
#1. Write probs.all and probs.all0 to CIFTI
probs.all.u <- probs.all[,,u]
names_probs <- file.path(dir,paste0("probs",1:K,"_thr",u,"_6K"))
probs.all.u[probs.all.u < 0.01] <- 0.01
writeCIFTIs(probs.all.u, names_probs, hemisphere='both', template=ts_6K, data_field = 'x6k')
fnames_6K <- paste0(maindir,names_probs,'.dtseries.nii')
fnames_32K <- gsub('6K','32K',fnames_6K)
#2. Resample to 32K and read into R
probs.all.u.32K <- matrix(nrow=ns.32K*2, ncol=K)
for(k in 1:K){
print(k)
fname_6K <- fnames_6K[k]
fname_32K <- fnames_32K[k]
cmd <- paste0(wb_cmd,' -cifti-resample ', fname_6K,' COLUMN ', ts_32K,' COLUMN BARYCENTRIC CUBIC ', fname_32K, ' -left-spheres ',lsurface_6K,' ',lsurface_32K,' -right-spheres ',rsurface_6K,' ',rsurface_32K)
system(cmd)
probs.k <- readCIFTI_ts(fname_32K)
probs.all.u.32K[,k] <- probs.k[,1]
}
#3. Threshold 32K PPMs at 0.95 and 0.99
active_99.u <- 1*(probs.all.u.32K >= 0.99)
active_95.u <- 1*(probs.all.u.32K >= 0.95)
#4. Combine activation thresholds
if(u==1) {
active_99 <- active_99.u
active_95 <- active_95.u
} else {
active_99 <- active_99 + active_99.u
active_95 <- active_95 + active_95.u
}
}
active_99[active_99==0] <- 0.01
active_95[active_95==0] <- 0.01
#5. Write to CIFTI
names99 <- file.path(dir,paste0("active",1:K,"_99_32K"))
names95 <- file.path(dir,paste0("active",1:K,"_95_32K"))
writeCIFTIs(active_99, names99, hemisphere='both', template=ts_32K, data_field = 'x32k')
writeCIFTIs(active_95, names95, hemisphere='both', template=ts_32K, data_field = 'x32k')
###################################################################
###################################################################
# TWO-LEVEL BAYESIAN APPROACH (NOT RECOMMENDED DUE TO OVERSMOOTHING)
###################################################################
###################################################################
# COMBINE SUBJECT-LEVEL RESULTS AND RUN GROUP-LEVEL MODELS
betas.all <- matrix(0, nrow=ns.all, ncol=K)
probs.all <- array(0, dim=c(ns.all, K, U)) #last dimension is for different activation thresholds
inla.setOption("num.threads", 16)
for(h in 1:2){
print(paste0('~~~~~~~~~~~~~~~~ HEMISPHERE ',h,' ~~~~~~~~~~~~~~~~'))
t0h <- Sys.time()
if(task=='MOTOR'){
if(h==1) cols <- c(1,4:6) # Left: EVs 1, 4, 5, 6
if(h==2) cols <- c(1:3,6) # Right: EVs 1, 2, 3, 6
} else { cols <- 1:3 }
nxz <- length(cols)
######################################################
# COMBINE SUBJECT-LEVEL RESULTS
######################################################
print('Gathering Subject-Level Results')
betas.tot <- matrix(nrow = ns[h]*M, ncol = K)
sds.tot <- matrix(nrow = ns[h]*M, ncol = K)
for(mm in 1:M){
print(mm)
file.name <- paste0("results_sphere/",task,"/result.",subjects[mm],".sphere",h,".resamp6K.RData")
load(file.name)
betas <- matrix(nrow = length(res.beta$bbeta1$mean), ncol = K)
sds <- matrix(nrow = length(res.beta$bbeta1$mean), ncol = K)
for(k in cols){
colname <- paste0('bbeta',k)
betas[,k] <- res.beta[[colname]]$mean
sds[,k] <- res.beta[[colname]]$sd
}
rows <- (1:ns[h]) + (mm-1)*ns[h]
for(k in 1:K){
betas.tot[rows,k] <- as.vector(Amat[[h]]%*%betas[,k])
sds.tot[rows,k] <- as.vector(Amat[[h]]%*%sds[,k])
}
}
######################################################
# RUN GROUP-LEVEL MODELS
######################################################
mesh.tot <- inla.mesh.2d(loc.norm[[h]], max.edge = 0.07)
spde.tot <- inla.spde2.matern(mesh.tot)
node <- mesh.tot$idx$loc
bbeta <- rep(node, M)
Amat.tot <- inla.spde.make.A(mesh.tot, as.matrix(loc.norm[[h]]))
beta.pop <- NULL
result.tot <- list(); length(result.tot) <- K
for(k in cols){
print(k)
dat.inla <- list(y = betas.tot[,k], x = bbeta, z = 1:dim(sds.tot)[1], s = 1/(sds.tot[,k])^2)
formula <- y ~ -1 + f(x, model = spde.tot) + f(z, model = 'iid', hyper = list(theta=list(scale=s)))
#formula0 <- y ~ -1 + f(x, model = spde.tot)
#formula1 <- y ~ -1 + f(x, model = spde.tot) + f(z, model = 'iid')
print(paste0('Fitting Model of Column ',k,' with INLA'))
#7-8 minutes
t0 <- Sys.time()
result <- inla(formula, data=dat.inla, control.compute=list(config=TRUE))
#result0 <- inla(formula0, data=dat.inla, control.compute=list(config=TRUE))
#result1 <- inla(formula1, data=dat.inla, control.compute=list(config=TRUE))
print(Sys.time()-t0)
result.tot[[k]] <- result
mu.post <- result$summary.random$x$mean
mu.post <- as.vector(Amat.tot%*%mu.post) #resample to data size
betas.all[idx.obs[[h]],k] <- mu.post
# var.post <- (result$summary.random$x$sd)^2
# var.post0 <- (result0$summary.random$x$sd)^2
# var.post1 <- (result1$summary.random$x$sd)^2
# mar.var.2level[idx.obs[[h]],i] <- as.vector(Amat.tot%*%var.post)
# mar.var.2level0[idx.obs[[h]],i] <- as.vector(Amat.tot%*%var.post0)
# mar.var.2level1[idx.obs[[h]],i] <- as.vector(Amat.tot%*%var.post1)
print(paste0('Identifying Active Regions for Column ',k))
#0.0% = 6-7 min
#0.5% = 5-6 min
#1.0% = 4 min
for(u in 1:U){
t0 <- Sys.time()
thr <- thresholds[u]
res.exc <- excursions.inla(result, name='x', u=thr, type='>', method='QC')
#joint posterior probabilities
F.post <- as.vector(Amat.tot%*%res.exc$F)
probs.all[idx.obs[[h]],k,u] <- F.post
print(Sys.time()-t0)
}
}
print(Sys.time() - t0h)
fname <- paste0('results_sphere/',task,'/pop_2level/original_approach/result.full.sphere',h,'.RData')
save(result.tot, file = fname)
fname <- paste0('results_sphere/',task,'/pop_2level/original_approach/result.pop.sphere',h,'.RData')
save(beta.pop, mesh.tot, file=fname)
}
dir <- paste0('results_sphere/',task,'/pop_2level/original_approach')
fname <- file.path(dir,'result.pop.sphere.RData')
save(betas.all, probs.all, file=fname)
#load(file=fname)
#######################
### VISUALIZE TWO-LEVEL MODEL RESULTS
#######################
# BETA MAPS
names_beta <- file.path(dir,paste0("beta",1:K,"_6K"))
writeCIFTIs(betas.all, names_beta, hemisphere='both', template=ts_6K, data_field = 'x6k')
#convert to 32K
fnames_6K <- paste0(maindir,names_beta,'.dtseries.nii')
fnames_32K <- gsub('6K','32K',fnames_6K)
for(k in 1:K){
print(k)
fname_6K <- fnames_6K[k]
fname_32K <- fnames_32K[k]
cmd <- paste0(wb_cmd,' -cifti-resample ', fname_6K,' COLUMN ', ts_32K,' COLUMN BARYCENTRIC CUBIC ',
fname_32K, ' -left-spheres ',lsurface_6K,' ',lsurface_32K,' -right-spheres ',rsurface_6K,' ',rsurface_32K)
system(cmd)
}
# ACTIVATION MAPS
for(u in 1:U){
#1. Write PPMs to 6K CIFTI
probs.all.u <- probs.all[,,u]
names_probs <- file.path(dir,paste0("probs",1:K,"_thr",u,"_6K"))
probs.all.u[probs.all.u < 0.01] <- 0.01
writeCIFTIs(probs.all.u, names_probs, hemisphere='both', template=ts_6K, data_field = 'x6k')
fnames_6K <- paste0(maindir,names_probs,'.dtseries.nii')
fnames_32K <- gsub('6K','32K',fnames_6K)
#2. Resample to 32K and read into R
probs.all.u.32K <- matrix(nrow=ns.32K*2, ncol=K)
for(k in 1:K){
print(k)
fname_6K <- fnames_6K[k]
fname_32K <- fnames_32K[k]
cmd <- paste0(wb_cmd,' -cifti-resample ', fname_6K,' COLUMN ', ts_32K,' COLUMN BARYCENTRIC CUBIC ', fname_32K, ' -left-spheres ',lsurface_6K,' ',lsurface_32K,' -right-spheres ',rsurface_6K,' ',rsurface_32K)
system(cmd)
probs.k <- readCIFTI_ts(fname_32K)
probs.all.u.32K[,k] <- probs.k[,1]
}
#3. Threshold 32K PPMs at 0.95 and 0.99
active_99.u <- 1*(probs.all.u.32K >= 0.99)
active_95.u <- 1*(probs.all.u.32K >= 0.95)
#4. Combine both activation thresholds
if(u==1) {
active_99 <- active_99.u
active_95 <- active_95.u
} else {
active_99 <- active_99 + active_99.u
active_95 <- active_95 + active_95.u
}
}
active_99[active_99==0] <- 0.01
active_95[active_95==0] <- 0.01
#5. Write to CIFTI
names99 <- file.path(dir,paste0("active",1:K,"_99_32K"))
names95 <- file.path(dir,paste0("active",1:K,"_95_32K"))
writeCIFTIs(active_99, names99, hemisphere='both', template=ts_32K, data_field = 'x32k')
writeCIFTIs(active_95, names95, hemisphere='both', template=ts_32K, data_field = 'x32k')
} #end loop over tasks (motor, gambling)
|
414a3311a885b89082e6da986b08a3407925e14b | 0e5049ff00dc9466961163aaea6f3654fd59ac33 | /likelihoods.R | 6a8fde0ac6eec6a3d614b93dbb9ee6e36b1fbaa4 | [] | no_license | jonlachmann/irls_demo | 494fb99b3c7872c0831eeea80002a3e200b266d5 | 7d5b79d4b05b638c9bed6bad0fee97248a66233b | refs/heads/master | 2023-04-09T13:17:39.406364 | 2021-04-15T11:33:19 | 2021-04-15T11:33:19 | 357,663,921 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 676 | r | likelihoods.R | # Title : TODO
# Objective : TODO
# Created by: jonlachmann
# Created on: 2021-04-14
logistic.loglik.aic.sub <- function (y, x, model, complex, params) {
mod <- irls(as.matrix(x[,model]), y, binomial(), 1, params$subs, maxit=75, cooling = c(3,0.9,0.95), expl=c(3,1.5))
ret1 <- mod$loglik - mod$rank
dev2 <- get_deviance(mod$betahist[which.min(mod$devhist),], as.matrix(x[,model]), y, binomial())
ret2 <- -(dev2/2) - mod$rank
return(list(sub=ret1, full=ret2))
}
logistic.loglik.aic <- function (y, x, model, complex, params) {
suppressWarnings({mod <- glm.fit(as.matrix(x[,model]), y, family=binomial())})
ret <- -(mod$deviance/2) - mod$rank
return(ret)
} |
c85078b07e9c690338250ef640fd436cc5f18db9 | 19f18d1755fe9d518ec500c67ea9c4d4553ffb69 | /man/generate_folder_opt_report.Rd | 93e3cf332b9fd022bce3e9442856eb81a8203026 | [] | no_license | jcrodriguez1989/rco | f9483d79d31672eabb0db2587cf118ae04c398ea | 9f8ff5eb12e1ac572af407284f105783179c4187 | refs/heads/master | 2021-07-23T03:26:42.762558 | 2021-07-14T12:28:16 | 2021-07-14T12:28:16 | 178,254,891 | 91 | 29 | null | 2021-07-14T12:28:17 | 2019-03-28T17:47:31 | R | UTF-8 | R | false | true | 818 | rd | generate_folder_opt_report.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reporting-fun-folder.R
\name{generate_folder_opt_report}
\alias{generate_folder_opt_report}
\title{Report possible optimizations in folder containing `.R` files.}
\usage{
generate_folder_opt_report(
folder,
optimizers = all_optimizers,
pattern = "\\\\.R$",
recursive = TRUE
)
}
\arguments{
\item{folder}{Path to a directory with files to optimize.}
\item{optimizers}{A named list of optimizer functions.}
\item{pattern}{An optional regular expression. Only file names which match
the regular expression will be optimized.}
\item{recursive}{A logical value indicating whether or not files
in subdirectories of `folder` should be optimized as well.}
}
\description{
Report possible optimizations in folder containing `.R` files.
}
|
a517ae19607ffc2c17ecb6c077f06b34459f1031 | 00ec80463a5e47060b058e2b3f134627276c4184 | /plot1.R | 3ac6f8593b42c867d3b4fd2616483b6ba564b213 | [] | no_license | msasheikh/ExData_Plotting1 | 2280a8ff223f5a949d4dd3e460198fe04bc2b2ca | f440f822dbf147ab850380e525d8608e9420cdaa | refs/heads/master | 2021-01-16T20:49:08.574328 | 2014-06-07T22:41:42 | 2014-06-07T22:41:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 596 | r | plot1.R | setwd("C:/Users/school/Dropbox/Cousera/Exploratory Data Analysis/project1")
x<-"exdata-data-household_power_consumption/household_power_consumption.txt"
install.packages("lubridate")
library(lubridate)
file<-read.table(x,header = T, sep = ";")
clean<-file == "?"
file[clean]<-NA
data<-subset(file,Date == "1/2/2007")
data1<-subset(file,Date == "2/2/2007")
info<-rbind(data,data1)
final<-as.numeric(as.character(info$Global_active_power))
png(file = "plot1.png", width = 480, height = 480)
hist(final, col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
dev.off()
|
8ed3831cd07c114f69b045fe496688668272e3c4 | 082f9357ce7153b53936219a9764c882af3677d5 | /tests/testthat/test_oml_long.R | 8b679b2a3e0409bef083401f8d5c27d15e2e25f8 | [] | no_license | damirpolat/farff | 3fbfe7e3664d5c9ae91fed760ba395bea3892b0d | 15d6a60f400ce5f355040ed6568a5abdf40cb359 | refs/heads/master | 2022-01-07T13:43:28.263322 | 2019-05-03T12:22:35 | 2019-05-03T12:22:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,211 | r | test_oml_long.R | if (identical(Sys.getenv("TRAVIS"), "true")) {
context("long test with many OML data sets")
library(OpenML)
dchars = listOMLDataSets()
dchars2 = subset(dchars, status == "active" & number.of.instances < 100 & number.of.features < 10)
dids = dchars2$did
# FIXME: data set 292 is in sparse format. this is valid but we cannot parse it.
dids = setdiff(dids, c(292))
# FIXME: data set has multi-instance observations. Currently, we cannot parse it but we throw an error and have a unit test for this.
dids = setdiff(dids, c(1438))
# FIXME: there are quoting issues in data.table here. if we have doubles quotes ", it is unclear
# how data.table should get them fed into after preproc.
# - one " does not work, eg if we get a comma after an unescaped " in dquotes
# - the correct way would be to escape all dquotes in char fields like this : \".
# but now data.table produces \\\"
dids.datatable.broken = c(374, 376, 379, 380)
# for (dreader in c("readr", "data.table")) {
for (dreader in c("readr")) {
for (did in dids) {
message(did)
if (dreader == "readr" || did %nin% dids.datatable.broken)
compareOML(did, data.reader = dreader)
}
}
}
|
9a9fc4619ac342cd075545dab402e43dee4b1c6a | 8eda575f2fa56357988945de6013eb082d768b3d | /R/tts.R | 9abc123f0360d05d58cdedd7aefa5b37c1a8bd3e | [] | no_license | skfogg/temptoolr | 0547d5899667e6fab6462c7ffeb3954c3bd1ab9c | 3f458d60ffaea879bce7a79bdacb959b2ab39545 | refs/heads/master | 2023-08-21T18:27:02.033712 | 2021-10-18T20:56:03 | 2021-10-18T20:56:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,735 | r | tts.R | #' Temperature Time Series
#'
#' @param odbcConnection RODBC object
#' @param holonName the holon name of the cell to query
#' @param tableName the table to query
#' @param runID character
#' @param xtsIndex lubridate object
#'
#' @return tts object
#' @export
tts <- function(odbcConnection, holonName, tableName, runID = "", xtsIndex = NULL, select = "modelTime, svValue") {
newTemperatureSeries <- structure(
list(stateVal = "TEMP", holon = holonName, table = tableName, runID = runID),
class = "TemperatureSeries"
)
sqlStmt <- paste0("SELECT ", select, " FROM ", tableName, " WHERE holonname = '", holonName, "' AND stateVal = 'TEMP'")
queriedTemp <- sqlQuery(odbcConnection, sqlStmt)
nReturned <- nrow(queriedTemp)
if(nReturned == 0 ) stop("No records found for: ", paste0(names(newTemperatureSeries), "='", newTemperatureSeries, "'", collapse = ", "))
if (!is.null(xtsIndex)){
queriedTemp <- xts(zoo(queriedTemp, order.by = xtsIndex))
}
tts <- structure(
queriedTemp,
class = c("tts", class(queriedTemp)),
spec = newTemperatureSeries,
queryDate = date(),
runID = runID
)
return(tts)
}
plot.tts = function(x, ...) {
if(is.xts(x)) {
plot(x = as.zoo.default(x$svValue), ...)
} else {
plot(x = x$modelTime, y = x$svValue, ...)
}
}
lines.tts = function(x, ...) {
if(is.xts(x)) {
lines(x = as.zoo(x$svValue), ...)
} else {
lines(x = x$modelTime, y = x$svValue, ...)
}
}
points.tts = function(x, ...) {
if(is.xts(x)) {
points(x = as.zoo(x$svValue), ...)
} else {
points(x = x$modelTime, y = x$svValue, ...)
}
}
print.TemperatureSeries = function(x, ...) {
print(paste0(names(x), " = '", x, "'", collapse = "; "), ...)
}
|
0ee7463f793f98174a2943ea1dadd06100c4e325 | fde3f786a46570dcdc728538f756d6e3f30045eb | /R/misc/fitDistr.R | a6ae76b97edb02bf4c0e83d19eecec8ffb9cb0ab | [] | no_license | jrminter/snippets | 3fcb155721d3fd79be994c9d0b070859447060d7 | bb3daaad5c198404fef6eaa17b3c8eb8f91a00ed | refs/heads/master | 2021-01-13T14:20:51.451786 | 2017-04-29T05:26:11 | 2017-04-29T05:26:11 | 16,347,209 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,206 | r | fitDistr.R | # fitDistr.R
# from
# http://statistical-research.com/finding-the-distribution-parameters/
require(MASS)
raw <- t( matrix(c(
1, 0.4789,
1, 0.1250,
2, 0.7048,
2, 0.2482,
2, 1.1744,
2, 0.2313,
2, 0.3978,
2, 0.1133,
2, 0.1008,
1, 0.7850,
2, 0.3099,
1, 2.1243,
2, 0.3615,
2, 0.2386,
1, 0.0883), nrow=2
) )
( fit.distr <- fitdistr(raw[,2], "gamma") )
qqplot(rgamma(nrow(raw),fit.distr$estimate[1], fit.distr$estimate[2]), (raw[,2]),
xlab="Observed Data", ylab="Random Gamma")
abline(0,1,col='red')
simulated <- rgamma(1000, fit.distr$estimate[1], fit.distr$estimate[2])
hist(simulated, main=paste("Histogram of Simulated Gamma using",round(fit.distr$estimate[1],3),"and",round(fit.distr$estimate[2],3)),
col=8, xlab="Random Gamma Distribution Value")
( fit.distr <- fitdistr(raw[,2], "normal") )
qqplot(rnorm(nrow(raw),fit.distr$estimate[1], fit.distr$estimate[2]), (raw[,2]))
abline(0,1,col='red')
( fit.distr <- fitdistr(raw[,2], "lognormal") )
qqplot(rlnorm(nrow(raw),fit.distr$estimate, fit.distr$sd), (raw[,2]))
abline(0,1,col='red')
( fit.distr <- fitdistr(raw[,2], "exponential") )
qqplot(rexp(nrow(raw),fit.distr$estimate), (raw[,2]))
abline(0,1,col='red') |
7a62ed860cb9cee169fedbb8c41dbccaadc8c738 | 44c12bf5db12471edba464b652f9b2133a38e80e | /R/cyclone.R | 98f3a9aa82886962a5a1eb779853928c7bca4d78 | [] | no_license | MarioniLab/scran | af4d01246208a12d40fc01b4d7d49df6a5f59b9f | f238890d5642dfb8062cf0254e0257fd28c5f28d | refs/heads/master | 2023-08-10T08:58:35.499754 | 2023-08-04T23:19:40 | 2023-08-04T23:30:29 | 100,610,090 | 43 | 31 | null | 2023-04-09T15:14:31 | 2017-08-17T14:06:03 | R | UTF-8 | R | false | false | 10,853 | r | cyclone.R | #' Cell cycle phase classification
#'
#' Classify single cells into their cell cycle phases based on gene expression data.
#'
#' @param x A numeric matrix-like object of gene expression values where rows are genes and columns are cells.
#'
#' Alternatively, a \linkS4class{SummarizedExperiment} object containing such a matrix.
#' @param pairs A list of data.frames produced by \code{\link{sandbag}}, containing pairs of marker genes.
#' @param gene.names A character vector of gene names, with one value per row in \code{x}.
#' @param iter An integer scalar specifying the number of iterations for random sampling to obtain a cycle score.
#' @param min.iter An integer scalar specifying the minimum number of iterations for score estimation.
#' @param min.pairs An integer scalar specifying the minimum number of pairs for cycle estimation.
#' @param BPPARAM A \linkS4class{BiocParallelParam} object to use for parallel processing across cells.
#' @param verbose A logical scalar specifying whether diagnostics should be printed to screen.
#' @param subset.row See \code{?"\link{scran-gene-selection}"}.
#' @param ... For the generic, additional arguments to pass to specific methods.
#'
#' For the SummarizedExperiment method, additional arguments to pass to the ANY method.
#' @param assay.type A string specifying which assay values to use, e.g., \code{"counts"} or \code{"logcounts"}.
#'
#' @details
#' This function implements the classification step of the pair-based prediction method described by Scialdone et al. (2015).
#' To illustrate, consider classification of cells into G1 phase.
#' Pairs of marker genes are identified with \code{\link{sandbag}}, where the expression of the first gene in the training data is greater than the second in G1 phase but less than the second in all other phases.
#' For each cell, \code{cyclone} calculates the proportion of all marker pairs where the expression of the first gene is greater than the second in the new data \code{x} (pairs with the same expression are ignored).
#' A high proportion suggests that the cell is likely to belong in G1 phase, as the expression ranking in the new data is consistent with that in the training data.
#'
#' Proportions are not directly comparable between phases due to the use of different sets of gene pairs for each phase.
#' Instead, proportions are converted into scores (see below) that account for the size and precision of the proportion estimate.
#' The same process is repeated for all phases, using the corresponding set of marker pairs in \code{pairs}.
#' Cells with G1 or G2M scores above 0.5 are assigned to the G1 or G2M phases, respectively.
#' (If both are above 0.5, the higher score is used for assignment.)
#' Cells can be assigned to S phase based on the S score, but a more reliable approach is to define S phase cells as those with G1 and G2M scores below 0.5.
#'
#' Pre-trained classifiers are provided for mouse and human datasets, see \code{?\link{sandbag}} for more details.
#' However, note that the classifier may not be accurate for data that are substantially different from those used in the training set, e.g., due to the use of a different protocol.
#' In such cases, users can construct a custom classifier from their own training data using the \code{\link{sandbag}} function.
#' This is usually necessary for other model organisms where pre-trained classifiers are not available.
#'
#' Users should \emph{not} filter out low-abundance genes before applying \code{cyclone}.
#' Even if a gene is not expressed in any cell, it may still be useful for classification if it is phase-specific.
#' Its lack of expression relative to other genes will still yield informative pairs, and filtering them out would reduce power.
#'
#' @section Description of the score calculation:
#' To make the proportions comparable between phases, a distribution of proportions is constructed by shuffling the expression values within each cell and recalculating the proportion.
#' The phase score is defined as the lower tail probability at the observed proportion.
#' High scores indicate that the proportion is greater than what is expected by chance if the expression of marker genes were independent
#' (i.e., with no cycle-induced correlations between marker pairs within each cell).
#'
#' % The shuffling assumes that the marker genes are IID from the same distribution of expression values, such that there's no correlations.
#' % The question is then what distribution of expression values to use - see below.
#' % Training also should protect against non-cycle-based correlations, as such they should be present across all phases and not get included in the marker set.
#'
#' By default, shuffling is performed \code{iter} times to obtain the distribution from which the score is estimated.
#' However, some iterations may not be used if there are fewer than \code{min.pairs} pairs with different expression, such that the proportion cannot be calculated precisely.
#' A score is only returned if the distribution is large enough for stable calculation of the tail probability, i.e., consists of results from at least \code{min.iter} iterations.
#'
#' Note that the score calculation in \code{cyclone} is slightly different from that described originally by Scialdone et al.
#' The original code shuffles all expression values within each cell, while in this implementation, only the expression values of genes in the marker pairs are shuffled.
#' This modification aims to use the most relevant expression values to build the null score distribution.
#'
#' % In theory, this shouldn't matter, as the score calculation depends on the ranking of each gene.
#' % That should be the same regardless of the distribution of expression values -- each set of rankings is equally likely, no matter what.
#' % In practice, the number of tied expression values will differ between different set of genes, e.g., due to abundance (low counts more likely to get ties).
#' % The most appropriate comparison would involve the same number of ties as that used to calculate the observed score.
#' % It doesn't make sense, for example, to shuffle in a whole bunch of non-expressed genes (lots of zeroes, ties) when the markers are always expressed.
#'
#' @return
#' A list is returned containing:
#' \describe{
#' \item{\code{phases}:}{A character vector containing the predicted phase for each cell.}
#' \item{\code{scores}:}{A data frame containing the numeric phase scores for each phase and cell (i.e., each row is a cell).}
#' \item{\code{normalized.scores}:}{A data frame containing the row-normalized scores (i.e., where the row sum for each cell is equal to 1).}
#' }
#'
#' @author
#' Antonio Scialdone,
#' with modifications by Aaron Lun
#'
#' @seealso
#' \code{\link{sandbag}}, to generate the pairs from reference data.
#'
#' @examples
#' set.seed(1000)
#' library(scuttle)
#' sce <- mockSCE(ncells=200, ngenes=1000)
#'
#' # Constructing a classifier:
#' is.G1 <- which(sce$Cell_Cycle %in% c("G1", "G0"))
#' is.S <- which(sce$Cell_Cycle=="S")
#' is.G2M <- which(sce$Cell_Cycle=="G2M")
#' out <- sandbag(sce, list(G1=is.G1, S=is.S, G2M=is.G2M))
#'
#' # Classifying a new dataset:
#' test <- mockSCE(ncells=50)
#' assignments <- cyclone(test, out)
#' head(assignments$scores)
#' table(assignments$phases)
#'
#' @references
#' Scialdone A, Natarajana KN, Saraiva LR et al. (2015).
#' Computational assignment of cell-cycle stage from single-cell transcriptome data.
#' \emph{Methods} 85:54--61
#'
#' @name cyclone
NULL
#' @export
#' @rdname cyclone
setGeneric("cyclone", function(x, ...) standardGeneric("cyclone"))
#' @importFrom BiocParallel SerialParam bpstart bpstop
#' @importFrom scuttle .bpNotSharedOrUp .subset2index
#' @importFrom beachmat colBlockApply
.cyclone <- function(x, pairs, gene.names=rownames(x), iter=1000, min.iter=100, min.pairs=50,
BPPARAM=SerialParam(), verbose=FALSE, subset.row=NULL)
{
if (length(gene.names)!=nrow(x)) {
stop("length of 'gene.names' must be equal to 'x' nrows")
}
iter <- as.integer(iter)
min.iter <- as.integer(min.iter)
min.pairs <- as.integer(min.pairs)
# Checking subset vector and blanking out the unused names.
subset.row <- .subset2index(subset.row, x, byrow=TRUE)
gene.names[-subset.row] <- NA
# Only keeping training pairs where both genes are in the test data.
for (p in names(pairs)) {
curp <- pairs[[p]]
m1 <- match(curp$first, gene.names)
m2 <- match(curp$second, gene.names)
keep <- !is.na(m1) & !is.na(m2)
m1 <- m1[keep]
m2 <- m2[keep]
# Reformatting it to be a bit easier to access during permutations.
retained <- logical(length(gene.names))
retained[m1] <- TRUE
retained[m2] <- TRUE
new.indices <- cumsum(retained)
pairs[[p]] <- list(first=new.indices[m1]-1L, second=new.indices[m2]-1L,
index=which(retained)-1L) # For zero indexing.
}
if (verbose) {
for (cl in names(pairs)) {
message(sprintf("Number of %s pairs: %d", cl, length(pairs[[cl]][[1]])))
}
}
if (.bpNotSharedOrUp(BPPARAM)) {
bpstart(BPPARAM)
on.exit(bpstop(BPPARAM))
}
# Run the allocation algorithm.
all.scores <- vector('list', length(pairs))
names(all.scores) <- names(pairs)
for (cl in names(pairs)) {
pcg.state <- .setup_pcg_state(ncol(x))
pairings <- pairs[[cl]]
cur.scores <- colBlockApply(x, FUN=.cyclone_scores, niters=iter, miniters=min.iter,
minpairs=min.pairs, marker1=pairings$first, marker2=pairings$second, indices=pairings$index,
seeds=pcg.state$seeds[[1]], streams=pcg.state$streams[[1]], BPPARAM=BPPARAM)
all.scores[[cl]] <- unlist(cur.scores)
}
# Assembling the output.
scores <- do.call(data.frame, all.scores)
scores.normalised <- scores/rowSums(scores)
# Getting the phases.
phases <- ifelse(scores$G1 >= scores$G2M, "G1", "G2M")
phases[scores$G1 < 0.5 & scores$G2M < 0.5] <- "S"
list(phases=phases, scores=scores, normalized.scores=scores.normalised)
}
#' @importFrom DelayedArray currentViewport makeNindexFromArrayViewport
.cyclone_scores <- function(block, ..., seeds, streams) {
vp <- currentViewport()
cols <- makeNindexFromArrayViewport(vp, expand.RangeNSBS=TRUE)[[2]]
if (!is.null(cols)) {
seeds <- seeds[cols]
streams <- streams[cols]
}
cyclone_scores(block, ..., seeds=seeds, streams=streams)
}
#' @export
#' @rdname cyclone
setMethod("cyclone", "ANY", .cyclone)
#' @export
#' @rdname cyclone
#' @importFrom SummarizedExperiment assay
setMethod("cyclone", "SummarizedExperiment", function(x, ..., assay.type="counts") {
cyclone(assay(x, i=assay.type), ...)
})
|
08897ea5725cd4e9cea336a7650eee10b759aa9a | d71f89f90448ca3dae2efe3cfd2212e59d41eec4 | /man/RunCrossValidation.Rd | 615ad2321974054600b6c4342ff86aa7c8fc8c48 | [] | no_license | warnbergg/regone | ce2b5f9db882b40c025fc8a669e9d65f5404e3a8 | 851be8e37259b1b0f599038e08ae14c27365b454 | refs/heads/master | 2023-08-22T02:18:39.488804 | 2021-10-13T14:11:07 | 2021-10-13T14:11:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,006 | rd | RunCrossValidation.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RunCrossValidation.R
\name{RunCrossValidation}
\alias{RunCrossValidation}
\title{RunCrossValidation}
\usage{
RunCrossValidation(dv, data, k = 10, dir = "./", save.plot = TRUE)
}
\arguments{
\item{dv}{Character vector of length 1. Dependent variable. No default.}
\item{data}{data.frame object. Data as prepared in "RunProject.R". Regressor values, predicted response, and residuals. No default.}
\item{k}{Numeric vector of length 1. Number of folds to use in k-fold cross-validation. Defaults to 10.}
\item{dir}{Character vector of length 1. Directory in which to store the cv-plot. Defaults to "./"}
\item{save.plot}{Logical vector of length 1. If TRUE then the mean cv error and all possible regression plots are saved to disk. Defaults to TRUE}
}
\description{
Run variable selection nested into cross-validation. Source: https://uc-r.github.io/model_selection. See section 6.1 of Introduction to Statistical Learning.
}
|
c827c694d497733d5f2801517644babd7852ff34 | 2f6efa6d60037a2ff6f608e7d3df3db6be460bbb | /R/query.R | dba1870ba218164d5fe9a49e8f2ee7b1b5323d61 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | glin/reactlog | 17b964877b21de5716aace895c9267492ff2e6e1 | 698ac245f71d3f35d32867013e60681ce57abff3 | refs/heads/master | 2021-05-08T21:04:12.901767 | 2018-03-02T04:31:48 | 2018-03-02T05:02:35 | 119,627,353 | 7 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,242 | r | query.R | #' Get a reactive context node
#'
#' @param x A reactive context, expression, observer. Defaults to the
#' current reactive context.
#' @param graph A reactive graph. Defaults to the reactive graph for the
#' current Shiny session.
#' @param invalidated If `TRUE`, get the last invalidated context.
#' @return A reactive context node.
#'
#' @family graph queries
#'
#' @export
getContextNode <- function(x = getCurrentContext(), graph = getReactGraph(),
invalidated = FALSE) {
stopifnot(is.ReactGraph(graph))
if (is.null(x)) return(NULL)
if (is.ContextNode(x)) {
ctx <- x
} else {
ctxId <- if (is.character(x)) x else getLastContextId(x)
ctx <- graph$nodes[[ctxId]]
}
if (!is.ContextNode(ctx)) {
if (shiny::is.reactive(x) && ctxId == "") {
# Unevaluated reactive expression
return(NULL)
}
msg <- sprintf('Could not find a context with ID "%s" in the reactive graph', ctxId)
if (!reactLogEnabled()) {
msg <- paste0(
msg, "\n",
"Did you enable the reactive log? See ?getReactGraph")
}
stop(msg, call. = FALSE)
}
if (invalidated && !ctx$invalidated) {
ctx <- ctx$prevNode
}
ctx
}
#' Get a reactive value node
#'
#' @param x A reactive value or reactive values object.
#' @param name The name of a value in a reactive values object.
#' @param graph A reactive graph. Defaults to the reactive graph for the
#' current Shiny session.
#' @return A reactive value node.
#'
#' @family graph queries
#'
#' @export
getValueNode <- function(x, name = NULL, graph = getReactGraph()) {
stopifnot(is.ReactGraph(graph))
if (is.ValueNode(x)) return(x)
if (shiny::is.reactivevalues(x) && is.null(name)) {
stop("The name of a reactive value must be specified", call. = FALSE)
}
label <- if (is.character(x)) x else getValueLabel(x, name = name)
value <- graph$nodes[[label]]
if (!is.ValueNode(value)) {
msg <- sprintf(
'Could not find a value with label "%s" in the reactive graph',
label
)
if (!reactLogEnabled()) {
msg <- paste0(
msg, "\n",
"Did you enable the reactive log? See ?getReactGraph"
)
}
stop(msg, call. = FALSE)
}
value
}
|
ff26caf77218c4211c5155b42eaea8f1d3d28592 | 1528645b51076a3036642e54ebfc4aeb525ece92 | /fastinR/R/DataIO.R | c2197aa361234175109b9179d1ee01843e948975 | [] | no_license | Philipp-Neubauer/fastinR | e1d058bca1b4f40917cb2f4fec5cd4c41a9092ce | e6081d0e65348131a8f17058cc35e95cab0513b5 | refs/heads/master | 2021-07-10T19:23:52.889509 | 2018-06-20T10:42:13 | 2018-06-20T10:42:13 | 8,904,070 | 4 | 5 | null | 2017-02-28T01:09:18 | 2013-03-20T12:54:23 | TeX | UTF-8 | R | false | false | 18,895 | r | DataIO.R | #' Add Covariates and Group indices for predators
#'
#' Files must be in .csv format
#'
#' @param Groups Index of group membership for each predator, one (named) column per grouping variable. Can be a file path to a csv file with the data or a data frame.
#' @param Covariates Covariate values for each predator, one (named) column per covariate. Can be a file path to a csv file with the data or a data frame.
#' @details Use \code{\link{simulation}} to simulate and write these files to inspect the file structure.
#' @seealso \code{\link{add_FA}},\code{\link{add_SI}},\code{\link{run_MCMC}},\code{\link{simulation}}
#' @author Philipp Neubauer
#' @references Neubauer,.P. and Jensen, O.P. (in prep)
#' @export
#' @examples
#' Cov_path <- system.file("extdata", "Simdata_Covariates.csv", package="fastinR")
#' Group_path<- system.file("extdata", "Simdata_Groups.csv", package="fastinR")
#' add_Covs(Groups=Group_path,Covariates=Cov_path)
add_Covs <- function(Groups='',Covariates=''){
# check if GUI is being used
if(exists('GUI',envir=.GlobalEnv)){
GUI <- get('GUI',envir=.GlobalEnv)
} else {
GUI=F
}
# deal with Covariates
if (is.character(Covariates) & nchar(Covariates)>0){
Covs <- read.csv(Covariates,header=T)
} else if (is.data.frame(Covariates)){
Covs <- Covariates
} else {Covs <- NULL}
if (is.character(Groups) & nchar(Groups)>0){
Grps <- read.csv(Groups,header=T)
} else if (is.data.frame(Groups)){
Grps <- Groups
} else {Grps <- NULL}
if (!is.null(Covs) & is.null(Grps)>0) {
Covs <- cbind(rep(1,nrow(Covs)),Covs)
n.covs <- ncol(Covs)
if(GUI) guiSet('Covs',Covs)
} else if (is.null(Covs) & !is.null(Grps)>0)
{
Grp.names <- unlist(unique(Grps))
for (i in 1:ncol(Grps)){
vg <- as.vector(Grps[,i])
Grps[,i] <- as.factor(vg)
}
Covs <- model.matrix(attr(model.frame(1:nrow(Grps)~.,data=Grps),'terms'),data=Grps)[,]
colnames(Covs) <- Grp.names[length(Grp.names):1]
if(GUI) guiSet('Covs',Covs)
} else if (!is.null(Covs) & !is.null(Grps)>0)
{
Covnames <- names(Covs)
Grp.names <- unlist(unique(Grps))
for (i in 1:ncol(Grps)){
vg <- as.vector(Grps[,i])
Grps[,i] <- as.factor(vg)
}
Covs <- cbind(model.matrix(attr(model.frame(1:nrow(Grps)~.,data=Grps),'terms'),data=Grps)[,],Covs)
colnames(Covs) <- c(Grp.names[length(Grp.names):1],Covnames)
if(GUI) guiSet('Covs',Covs)
}
if(!GUI) return(Covs)
}
#' Add Stable Isotope data for predators and prey items
#'
#' Files must be in .csv format.
#'
#' @param SI.predators A data frame or a path to a csv file with the predator index/names (first column) and Stable Isotopes (1 row per predator), with Stable Isotope named across the first row.
#' @param SI.preys A data frame or a path to a csv file with the prey names/sample id (first column) and SI measurements (1 row pey prey item), with Stable Isotope named across the first row. Samples from the same species should have the same prey name/sample id.
#' @param Frac.Coeffs.mean A data frame or a path to a csv file with the prey specific additive fractionation coefficient means: Prey names (first column) and an n x P matrix for n preys and P Stable Isotopes
#' @param Frac.Coeffs.var A data frame or a path to a csv file with the prey specific fractionation coefficient variances, dimensions as for the means
#' @param FC.mean optional - if no prey specific fractionation coefficiants are supplied via Frac.Coeffs.mean, FC mean can provide either a global (single) mean coefficient or fatty acid specific mean coefficients using R's c(FA_1,FA_2,...) notation for ALL fatty acids.
#' @param FC.var optional - if no prey specific fractionation coefficiants are supplied via Frac.Coeffs.mean, FC var can provide either a global (single) coefficient variance or fatty acid specific coefficient variances using R's c(FA_1,FA_2,...) notation for ALL fatty acids.
#' @param datas a data structure as produced by \code{\link{add_SI}}, needed if fatty acids and stable isotopes are added sequentially.
#' @details Use \code{\link{simulation}} to simulate and write these files to inspect the file structure.
#' @seealso \code{\link{add_FA}},\code{\link{add_Covs}},\code{\link{run_MCMC}},\code{\link{simulation}}
#' @author Philipp Neubauer
#' @references Neubauer,.P. and Jensen, O.P. (in prep)
#' @examples
#' SI.predators <- system.file("extdata", "Simdata_SI_preds.csv", package="fastinR")
#' SI.preys <- system.file("extdata", "Simdata_SI_preys.csv", package="fastinR")
#' Frac.Coeffs.mean <- system.file("extdata", "Simdata_SI_fc_means.csv", package="fastinR")
#' Frac.Coeffs.var <- system.file("extdata", "Simdata_SI_fc_var.csv", package="fastinR")
#' dats <- add_SI(SI.predators=SI.predators,SI.preys=SI.preys,Frac.Coeffs.mean=Frac.Coeffs.mean,Frac.Coeffs.var=Frac.Coeffs.var)
#' @export
add_SI <- function(SI.predators=NULL,SI.preys=NULL,Frac.Coeffs.mean='',Frac.Coeffs.var='',FC.mean=1,FC.var=1,datas=NULL){
# check if GUI is being used
if(exists('GUI',envir=.GlobalEnv)){
GUI <- get('GUI',envir=.GlobalEnv)
if (GUI) datas <- guiGetSafe('datas')
} else {
GUI=F
}
## first check for potential conflicts
#stopifnot(nchar(SI.predators)>0 & nchar(SI.preys)>0)
# import predator and prey data - note that the first column is names, or an index
if(is.character(SI.predators)) {
predators.SI = read.csv(SI.predators,header=T,row.names=1)
} else {
predators.SI = SI.predators
}
if(is.character(SI.preys)) {
preys.SI = read.csv(SI.preys,header=T)
} else {
preys.SI = SI.preys
}
n.preds <- dim(predators.SI)[1]
preys.ix.SI <- as.character(preys.SI[,1])
preys.SI <- preys.SI[,-1]
# check that samples are in the same order
#if(length(datas)>1) stopifnot(preys.ix==datas$prey.ix)
preys.names <- unique(preys.ix.SI)
if(GUI) guiSet('prey.names',preys.names )
# set number of isotopes
isos=ncol(predators.SI)
#number of preys species/groups
n.preys <- length(preys.names)
# deal with fractionation coeffs
if ((nchar(Frac.Coeffs.mean)>0 & nchar(Frac.Coeffs.var)==0) | (nchar(Frac.Coeffs.mean)==0 & nchar(Frac.Coeffs.var)>0))
{
stop('The mean AND variances of FCs for each isotope need to be supplied')
} else if (nchar(Frac.Coeffs.mean)>0 & nchar(Frac.Coeffs.var)>0)
{
if(is.character(SI.predators)) {
mean_cs = read.csv(Frac.Coeffs.mean,header=T,row.names=1)
var_cs = read.csv(Frac.Coeffs.var,header=T,row.names=1)
} else {
mean_cs = Frac.Coeffs.mean
var_cs = Frac.Coeffs.var
}
stopifnot(dim(mean_cs)[1]==n.preys & dim(mean_cs)[2]==isos)
stopifnot(dim(var_cs)[1]==n.preys & dim(var_cs)[2]==isos)
} else if (nchar(Frac.Coeffs.mean)==0 & nchar(Frac.Coeffs.var)==0)
{
mean_cs = matrix(FC.mean,ncol=isos,nrow=n.preys,byrow=T, dimnames = list(unique(preys.ix.SI), NULL))
var_cs = matrix(FC.var,ncol=isos,nrow=n.preys,byrow=T, dimnames = list(unique(preys.ix.SI), NULL))
}
# prey means and vars
preym.SI <- matrix(,n.preys,isos)
# calc prey emans and FC means
for (i in 1:n.preys){
preym.SI[i,] <- apply(preys.SI[preys.ix.SI==unique(preys.ix.SI)[i],],2,mean)
}
# now prepare data for analysis ---
R.SI <- array(,c(isos,isos,n.preys))
ni.SI<-rep(NA,n.preys)
for (i in 1:n.preys){
ni.SI[i] <- max(isos+1,sum(preys.ix.SI==unique(preys.ix.SI)[i])-1)
R.SI[,,i]=cov(preys.SI[preys.ix.SI==unique(preys.ix.SI)[i],])*ni.SI[i]
}
if (length(datas$datas.FA)>1){
# if rownames don't match, make matching representation for both
if(length(rownames(datas$datas.FA$preds)) != length(rownames(predators.SI)) |
any(rownames(datas$datas.FA$preds) %in% rownames(predators.SI) ==F)) {
urows <- unique(c(rownames(datas$datas.FA$preds.FA),rownames(predators.SI)))
n.preds <- length(urows)
# change FAP
new.FA.rep <- data.frame(matrix(,n.preds,datas$datas.FA$n.fats))
rownames(new.FA.rep) <- urows
colnames(new.FA.rep) <- colnames(datas$datas.FA$preds.FA)
mix <- match(rownames(datas$datas.FA$preds.FA),rownames(new.FA.rep))
new.FA.rep[mix,] <- datas$datas.FA$preds.FA
datas$datas.FA$preds.FA <- new.FA.rep
datas$datas.FA$preds <- alr(datas$datas.FA$preds.FA)
# change SI
new.SI.rep <- data.frame(matrix(,n.preds,isos))
rownames(new.SI.rep) <- urows
colnames(new.SI.rep) <- colnames(predators.SI)
mix <- match(rownames(predators.SI),rownames(new.SI.rep))
new.SI.rep[mix,] <- predators.SI
predators.SI <- new.SI.rep
}
}
datas.SI <- list(isos=isos,R.SI=R.SI,Rnot.SI=NULL,preys.SI=preys.SI,preym.SI=preym.SI,preds.SI=predators.SI,ni.SI=ni.SI,mean_cs=mean_cs,tau_cs=1/var_cs)
if(length(datas)<=1){
datas <- list(n.preys = n.preys,n.preds=n.preds,prey.ix.SI=preys.ix.SI,datas.FA=NULL,datas.SI=datas.SI,even=NULL)
class(datas) <- 'Stable_Isotopes'
} else {
datas$datas.SI = datas.SI
datas$n.preys = n.preys
datas$n.preds = n.preds
datas$prey.ix.SI = preys.ix.SI
class(datas) <- 'Combined_Markers'
}
ifelse(GUI,guiSet('datas',datas),return(datas))
}
#' Add Fatty Acid profile data for predators and prey items
#'
#' Files must be in .csv format.
#'
#' @param FA.predators A data frame or a path to a csv file with the predator index/names (first column) and fatty acid profiles (1 row pey predator), with fatty acids named across the first row
#' @param FA.preys A data frame or a path to a csv file with the prey names/sample id (first column) and fatty acid profiles (1 row pey prey item), with fatty acids names across the first row
#' @param fat.conts A data frame or a path to a csv file with the prey fat contents, as (columnwise) mean and variance per prey species or specified for each prey sample for the main analysis, in that case the first column is the prey sample id id and the second column is the individual sample's fat content
#' @param Conv.Coeffs.mean A data frame or a path to a csv file with the prey specific conversion coefficient means: Prey names (first column) and an n x P matrix for n preys and P fatty acids
#' @param Conv.Coeffs.var A data frame or a path to a csv file with the prey specific conversion coefficient variances, dimensions as for the means
#' @param FC.mean optional - if no prey or sample specific fat content means are supplied in a fat.conts file, prey specific coefficients can be entered here using R's c(FC_1,FC_2,...) notation.
#' @param FC.var optional - if no prey or sample specific fat content variances are supplied in a fat.conts file, prey specific coefficients can be entered here using R's c(FC_1,FC_2,...) notation.
#' @param CC.mean optional - if no prey specific fractionation coefficiants are supplied via Conv.Coeffs.mean, CC.mean can provide either a global (single) mean coefficient or fatty acid specific mean coefficients using R's c(FA_1,FA_2,...) notation for ALL fatty acids.
#' @param CC.var optional - if no prey specific fractionation coefficiants are supplied via Conv.Coeffs.mean, CC.var can provide either a global (single) coefficient variance or fatty acid specific coefficient variances using R's c(FA_1,FA_2,...) notation for ALL fatty acids.
#' @param datas a data structure as produced by \code{\link{add_SI}}, needed if fatty acids and stable isotopes are added sequentially.
#' @param LN.par - are fat content means and variances given as log-normal parameters or sample mean and variance?
#' @details Use \code{\link{simulation}} to simulate and write these files to inspect the file structure.
#' @seealso \code{\link{add_SI}},\code{\link{add_Covs}},\code{\link{select_vars}},\code{\link{run_MCMC}},\code{\link{simulation}}
#' @author Philipp Neubauer
#' @references Neubauer,.P. and Jensen, O.P. (in prep)
#' @examples
#' FA.predators <- system.file("extdata", "Simdata_FA_preds.csv", package="fastinR")
#' FA.preys <- system.file("extdata", "Simdata_FA_preys.csv", package="fastinR")
#' Conv.Coeffs.mean <- system.file("extdata", "Simdata_FA_cc_means.csv", package="fastinR")
#' Conv.Coeffs.var <- system.file("extdata", "Simdata_FA_cc_var.csv", package="fastinR")
#' fat.conts <- system.file("extdata", "Simdata_fat_cont.csv", package="fastinR")
#' dats <- add_FA(FA.predators=FA.predators,FA.preys=FA.preys,fat.conts=fat.conts,Conv.Coeffs.mean=Conv.Coeffs.mean,Conv.Coeffs.var=Conv.Coeffs.var)
#' @export
add_FA <- function(FA.predators=NULL,FA.preys=NULL,fat.conts = '',Conv.Coeffs.mean='',Conv.Coeffs.var='',FC.mean=1,FC.var=1,CC.mean=1,CC.var=1,datas=NULL,LN.par=F){
# check if GUI is being used
if(exists('GUI',envir=.GlobalEnv)){
GUI <- get('GUI',envir=.GlobalEnv)
if (GUI) datas <- guiGetSafe('datas')
} else {
GUI=F
}
# import predator and prey FA profiles
if (is.character(FA.predators)) {
predators = read.csv(FA.predators,header=T,row.names=1)
} else {predators = FA.predators}
if (is.character(FA.preys)){
preys = read.csv(FA.preys,header=T)
} else {
preys = FA.preys
}
n.preds <- dim(predators)[1]
preys.ix <- as.character(preys[,1])
#if(length(datas)>1) stopifnot(preys.ix==datas$prey.ix)
preys.names <- as.character(unique(preys.ix))
if (GUI) guiSet('prey.names',preys.names )
preys = preys[,-1]
if (any(preys<0) | any(predators<0)){
stop('Fatty acid values must all be greater than 0. Please replace zeros with a small number or use a more advanced heuristic or statistic to figure out an appopriate value.')
}
n.fats = ncol(predators)
m.fats = n.fats-1
#number of preys species/groups
n.preys <- length(unique(preys.ix))
# treat conversion coeffs
if (nchar(Conv.Coeffs.mean)>0 & nchar(Conv.Coeffs.var)>0)
{
mean_c = read.csv(Conv.Coeffs.mean,header=T,row.names=1)
var_c = read.csv(Conv.Coeffs.var,header=T,row.names=1)
if(dim(mean_c)[1]!=n.preys) stop('Number of prey in Conv.Coeffs.mean does not equal number of prey in FA.preys.')
if(dim(mean_c)[2]!=n.fats) stop('Number of fatty acids in Conv.Coeffs.mean does not equal number of fatty acids in FA.predators.')
if(dim(var_c)[1]!=n.preys) stop('Number of prey in Conv.Coeffs.var does not equal number of prey in FA.preys.')
if(dim(var_c)[2]!=n.fats) stop('Number of fatty acids in Conv.Coeffs.var does not equal number of fatty acids in FA.predators.')
} else if (nchar(Conv.Coeffs.mean)==0 & nchar(Conv.Coeffs.var)==0)
{
if(length(CC.mean)==n.fats){
mean_c = matrix(CC.mean,n.preys,n.fats,byrow=T)
var_c =matrix(CC.var,n.preys,n.fats,byrow=T)
} else {
mean_c = matrix(CC.mean,n.preys,n.fats, dimnames = list(unique(preys.ix), NULL))
var_c =matrix(CC.var,n.preys,n.fats, dimnames = list(unique(preys.ix), NULL))
}
} else
{
print('Known conversion coefficients, or a mean AND variance for conversion coefficients need to be supplied')
}
# # make sure that cs sum to one
# if(any(rowSums(mean_c))!=1){
# mean_c <- t(apply(mean_c,1,function(x) x/sum(x)))
# sums = (apply(mean_c,1,function(x) sum(x)))
# var_c = var_c/(sums^2)
# }
# covert to gamma parameters
rate <- mean_c/var_c
shape <- mean_c^2/var_c
mean_c <- shape
var_c <- rate
# deal with fat content
if(nchar(fat.conts)==0)
{
if(length(FC.mean) == n.preys & length(FC.var) == n.preys)
{
fc.mean <- FC.mean; fc.var <- FC.var
} else if(length(FC.mean) == 1 & length(FC.var) == 1){
fc.mean <- rep(FC.mean,n.preys); fc.var <- rep(FC.var,n.preys)
} else {stop('Fat content mean and variance need to be either a single number, or supplied as a vector of length equal to the number of prey items - use R c() notation in that case. In the latter case, or for individual sample fat content please supply a file')}
} else
{
fat.cont <- read.csv(fat.conts,header=F)
if (dim(fat.cont)[2]>2){
fat.cont <- read.csv(fat.conts,header=F,row.names=1)
fc.mean <- fat.cont[,1];fc.var <- fat.cont[,2]
} else if (dim(fat.cont)[2]==2){
fc.mean <- fat.cont[,1];fc.var <- fat.cont[,2]
} else {
fc.mean <- tapply(fat.cont,preys.ix,mean)
fc.var <- tapply(fat.cont,preys.ix,var)
}
}
if(LN.par == F){
fc.var = log(fc.var + fc.mean^2) - 2*log(fc.mean)
fc.mean = log(fc.mean)-fc.var/2
}
# make sure everything sums to 1
predators <- clo(predators)
preys <- clo(preys)
# get prey means
mprey <- aggregate(preys,list(preys.ix),gmean)[,2:(n.fats+1)]
preym <- unclass(alr(mprey))
preds <- unclass(alr(predators))
# now prepare data for analysis
R <- array(,c(m.fats,m.fats,n.preys))
ni<-rep(NA,n.preys)
for (i in 1:n.preys){
if(sum(preys.ix==unique(preys.ix)[i]) < 2)
stop(paste("There must be at least two of each kind of prey: There is only", sum(preys.ix==unique(preys.ix)[i]), unique(preys.ix)[i], "."))
ni[i] <- max(n.fats+1,sum(preys.ix==unique(preys.ix)[i])-1)
R[,,i]=cov(alr(preys[preys.ix==unique(preys.ix)[i],]))*ni[i]
}
# align predators for SI and FAP
if (length(datas$datas.SI)>1){
# if rownames don't match, make matching representation for both
if(length(rownames(datas$datas.SI$preds.SI)) != length(rownames(predators)) |
any(rownames(datas$datas.SI$preds.SI) %in% rownames(predators) ==F)) {
urows <- unique(c(rownames(datas$datas.SI$preds.SI),rownames(predators)))
n.preds <- length(urows)
# change FAP
new.FA.rep <- data.frame(matrix(,n.preds,n.fats))
rownames(new.FA.rep) <- urows
colnames(new.FA.rep) <- colnames(predators)
mix <- match(rownames(predators),rownames(new.FA.rep))
new.FA.rep[mix,] <- predators
predators <- new.FA.rep
preds <- alr(predators)
# change SI
new.SI.rep <- data.frame(matrix(,n.preds,datas$datas.SI$isos))
rownames(new.SI.rep) <- urows
colnames(new.SI.rep) <- colnames(datas$datas.SI$preds.SI)
mix <- match(rownames(datas$datas.SI$preds.SI),rownames(new.SI.rep))
new.SI.rep[mix,] <- datas$datas.SI$preds.SI
datas$datas.SI$preds.SI <- new.SI.rep
}
}
## first some data and inits ----
datas.FA <- list(fc_mean=fc.mean,fc_tau=1/fc.var,n.fats=n.fats,m.fats=m.fats,R=R,Rnot=NULL,preys=preys,preds.FA=predators,preym=preym,preds=preds,ni=ni,mean_c=mean_c,tau_c=var_c)
if(length(datas)<=1){
datas <- list(n.preys = n.preys,n.preds=n.preds,prey.ix=preys.ix,datas.FA=datas.FA,datas.SI=NULL,even=NULL)
class(datas) <- 'Fatty_Acid_Profiles'
} else {
datas$datas.FA = datas.FA
datas$n.preys = n.preys
datas$n.preds=n.preds
datas$prey.ix=preys.ix
class(datas) <- 'Combined_Markers'
}
ifelse(GUI,guiSet('datas',datas),return(datas))
}
|
cd221dbd76a6242f9764f94691489bbfb6a141b2 | c54dbb850db9657df7c4a9f805263d8701addd5e | /Tools/EPIC/x86_64-pc-linux-gnu-library/3.4/pcaMethods/doc/missingValues.R | 7484ffffa6eace8d853f5c7b9d3af9507587445e | [] | no_license | grst/benchmarking-transcriptomics-deconvolution | 873cc32eb6908d1200fb7a7fb0dfffafb1554aa6 | 521fb8610b5d1e68bef0a725bea66f8b36e64b01 | refs/heads/master | 2023-07-05T15:16:42.446003 | 2021-08-27T07:11:02 | 2021-08-27T07:11:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,724 | r | missingValues.R | ### R code from vignette source 'missingValues.Rnw'
###################################################
### code chunk number 1: missingValues.Rnw:43-44
###################################################
library(pcaMethods)
###################################################
### code chunk number 2: missingValues.Rnw:46-49
###################################################
data(metaboliteData)
mD <- metaboliteData
sum(is.na(mD))
###################################################
### code chunk number 3: missingValues.Rnw:52-54
###################################################
pc <- pca(mD, nPcs=3, method="ppca")
imputed <- completeObs(pc)
###################################################
### code chunk number 4: missingValues.Rnw:58-61
###################################################
data(metaboliteDataComplete)
mdComp <- metaboliteDataComplete
sum((mdComp[is.na(mD)] - imputed[is.na(mD)])^2) / sum(mdComp[is.na(mD)]^2)
###################################################
### code chunk number 5: missingValues.Rnw:64-66
###################################################
imputedNipals <- completeObs(pca(mD, nPcs=3, method="nipals"))
sum((mdComp[is.na(mD)] - imputedNipals[is.na(mD)])^2) / sum(mdComp[is.na(mD)]^2)
###################################################
### code chunk number 6: missingValues.Rnw:71-80
###################################################
library(Biobase)
data(sample.ExpressionSet)
exSet <- sample.ExpressionSet
exSetNa <- exSet
exprs(exSetNa)[sample(13000, 200)] <- NA
lost <- is.na(exprs(exSetNa))
pc <- pca(exSetNa, nPcs=2, method="ppca")
impExSet <- asExprSet(pc, exSetNa)
sum((exprs(exSet)[lost] - exprs(impExSet)[lost])^2) / sum(exprs(exSet)[lost]^2)
|
efcfcbbe5a25119bfbb7422b506a78627a9a0431 | 1d7e7ca5f9c2ef36da39190f544f98257cbe4108 | /AustinMMSDWorkflow.R | 21f9b259b768e97e025b0d07ead6f39a699cfe5f | [] | no_license | jlthomps/GLRIBMPs | 1d2d4524abcb5dcf2d6e1959106e58c7113ee172 | 7b92e55995b4e39f6204f799bedf19379700afdc | refs/heads/master | 2020-03-27T03:30:08.516809 | 2016-01-25T18:55:29 | 2016-01-25T18:55:29 | 12,004,796 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 25,138 | r | AustinMMSDWorkflow.R | ###########################################################################
# Do once:
#library(devtools)
#install.packages(c("USGSwsData","USGSwsBase","USGSwsGraphs","USGSwsQW","USGSwsStats","dataRetrieval","GSqwsr"),repos="http://usgs-r.github.com")
###########################################################################
library(dataRetrieval)
siteNo <- "04087120"
StartDt <- "2008-11-01"
EndDt <- "2009-12-31"
adaps_disch_in <- readNWISuv(siteNo,'00060',StartDt,EndDt,tz="America/Chicago")
adaps_cond_in <- readNWISuv(siteNo,'00095',StartDt,EndDt,tz="America/Chicago")
adaps_turb_in <- readNWISuv(siteNo,'63680',StartDt,EndDt,tz="America/Chicago")
adaps_temp_in <- readNWISuv(siteNo,'00010',StartDt,EndDt,tz="America/Chicago")
adaps_do_in <- readNWISuv(siteNo,'00300',StartDt,EndDt,tz="America/Chicago")
StartDt <- "2010-01-01"
EndDt <- "2010-12-31"
adaps_disch_in2 <- readNWISuv(siteNo,'00060',StartDt,EndDt,tz="America/Chicago")
adaps_cond_in2 <- readNWISuv(siteNo,'00095',StartDt,EndDt,tz="America/Chicago")
adaps_turb_in2 <- readNWISuv(siteNo,'63680',StartDt,EndDt,tz="America/Chicago")
adaps_temp_in2 <- readNWISuv(siteNo,'00010',StartDt,EndDt,tz="America/Chicago")
adaps_do_in2 <- readNWISuv(siteNo,'00300',StartDt,EndDt,tz="America/Chicago")
StartDt <- "2011-01-01"
EndDt <- "2011-12-31"
adaps_disch_in3 <- readNWISuv(siteNo,'00060',StartDt,EndDt,tz="America/Chicago")
adaps_cond_in3 <- readNWISuv(siteNo,'00095',StartDt,EndDt,tz="America/Chicago")
adaps_turb_in3 <- readNWISuv(siteNo,'63680',StartDt,EndDt,tz="America/Chicago")
adaps_temp_in3 <- readNWISuv(siteNo,'00010',StartDt,EndDt,tz="America/Chicago")
adaps_do_in3 <- readNWISuv(siteNo,'00300',StartDt,EndDt,tz="America/Chicago")
adaps_disch_in <- rbind(adaps_disch_in,adaps_disch_in2,adaps_disch_in3)
adaps_cond_in <- rbind(adaps_cond_in,adaps_cond_in2,adaps_cond_in3)
adaps_turb_in <- rbind(adaps_turb_in,adaps_turb_in2,adaps_turb_in3)
adaps_temp_in <- rbind(adaps_temp_in,adaps_temp_in2,adaps_temp_in3)
adaps_do_in <- rbind(adaps_do_in,adaps_do_in2,adaps_do_in3)
siteNo <- "04087120"
StartDt <- "2012-01-01"
EndDt <- "2012-12-31"
adaps_disch_in4 <- readNWISuv(siteNo,'00060',StartDt,EndDt,tz="America/Chicago")
adaps_cond_in4 <- readNWISuv(siteNo,'00095',StartDt,EndDt,tz="America/Chicago")
adaps_turb_in4 <- readNWISuv(siteNo,'63680',StartDt,EndDt,tz="America/Chicago")
adaps_temp_in4 <- readNWISuv(siteNo,'00010',StartDt,EndDt,tz="America/Chicago")
adaps_do_in4 <- readNWISuv(siteNo,'00300',StartDt,EndDt,tz="America/Chicago")
StartDt <- "2013-01-01"
EndDt <- "2013-12-31"
adaps_disch_in5 <- readNWISuv(siteNo,'00060',StartDt,EndDt,tz="America/Chicago")
adaps_cond_in5 <- readNWISuv(siteNo,'00095',StartDt,EndDt,tz="America/Chicago")
adaps_turb_in5 <- readNWISuv(siteNo,'63680',StartDt,EndDt,tz="America/Chicago")
adaps_temp_in5 <- readNWISuv(siteNo,'00010',StartDt,EndDt,tz="America/Chicago")
adaps_do_in5 <- readNWISuv(siteNo,'00300',StartDt,EndDt,tz="America/Chicago")
StartDt <- "2014-01-01"
EndDt <- "2014-12-31"
adaps_disch_in6 <- readNWISuv(siteNo,'00060',StartDt,EndDt,tz="America/Chicago")
adaps_cond_in6 <- readNWISuv(siteNo,'00095',StartDt,EndDt,tz="America/Chicago")
adaps_turb_in6 <- readNWISuv(siteNo,'63680',StartDt,EndDt,tz="America/Chicago")
adaps_temp_in6 <- readNWISuv(siteNo,'00010',StartDt,EndDt,tz="America/Chicago")
adaps_do_in6 <- readNWISuv(siteNo,'00300',StartDt,EndDt,tz="America/Chicago")
adaps_disch_in <- rbind(adaps_disch_in,adaps_disch_in4,adaps_disch_in5,adaps_disch_in6)
colnames(adaps_disch_in) <- c("agency","siteNo","pdate","tz_cd","rmrk","disch")
adaps_cond_in <- rbind(adaps_cond_in,adaps_cond_in4,adaps_cond_in5,adaps_cond_in6)
colnames(adaps_cond_in) <- c("agency","siteNo","pdate","tz_cd","rmrk","cond")
adaps_turb_in <- rbind(adaps_turb_in,adaps_turb_in4,adaps_turb_in5,adaps_turb_in6)
colnames(adaps_turb_in) <- c("agency","siteNo","pdate","tz_cd","rmrk","turb")
adaps_temp_in <- rbind(adaps_temp_in,adaps_temp_in4,adaps_temp_in5,adaps_temp_in6)
colnames(adaps_temp_in) <- c("agency","siteNo","pdate","tz_cd","rmrk","temp")
adaps_do_in <- rbind(adaps_do_in,adaps_do_in4,adaps_do_in5,adaps_do_in6)
colnames(adaps_do_in) <- c("agency","siteNo","pdate","tz_cd","rmrk","do")
StartDt <- "2008-11-01"
EndDt <- "2014-12-31"
#00530 TSS
tss_data <- readNWISqw(siteNo,'00530',StartDt,EndDt,tz="America/Chicago")
#00665 Phosphorus
tp_data <- readNWISqw(siteNo,'00665',StartDt,EndDt,tz="America/Chicago")
#00940 Chloride
cl_data <- readNWISqw(siteNo,'00940',StartDt,EndDt,tz="America/Chicago")
#31616 fecal coliform
fecal_data <- readNWISqw(siteNo,'31616',StartDt,EndDt,tz="America/Chicago")
#50468 E coli
ecoli_data <- readNWISqw(siteNo,'50468',StartDt,EndDt,tz="America/Chicago")
#library(lubridate)
#adaps_cond_in$pdate2 <- force_tz(adaps_cond_in$pdate,tzone="UTC")
#adaps_disch_in$pdate2 <- force_tz(adaps_disch_in$pdate,tzone="UTC")
#adaps_do_in$pdate2 <- force_tz(adaps_do_in$pdate,tzone="UTC")
#adaps_temp_in$pdate2 <- force_tz(adaps_temp_in$pdate,tzone="UTC")
#adaps_turb_in$pdate2 <- force_tz(adaps_turb_in$pdate,tzone="UTC")
# Merge response variable samples
dataMerge <- merge(cl_data[,c(2,14:15,19,23)],fecal_data[,c(14:15,19,23)],by="startDateTime",all=TRUE)
dataMerge <- merge(dataMerge,tss_data[,c(14:15,19,23)],by="startDateTime",all=TRUE)
dataMerge <- merge(dataMerge,tp_data[,c(14:15,19,23)],by="startDateTime",all=TRUE)
dataMerge <- merge(dataMerge,ecoli_data[,c(14:15,19,23)],by="startDateTime",all=TRUE)
colnames(dataMerge) <- c("dateTime","site","remark00940Cl","result00940Cl","rptlev00940Cl","remark31616Fec","result31616Fec","rptlev31616Fec","remark00530TSS","result00530TSS","rptlev00530TSS","remark00665TP","result00665TP","rptlev00665TP","remark50468Ec","result50468Ec","rptlev50468Ec")
# Add nearest instantaneous measurements to response variable samples
n <- nrow(dataMerge)
for (i in 1:n) {
adaps_disch_sub <- adaps_disch_in[adaps_disch_in$pdate<=dataMerge$dateTime[i]+(15*60) & adaps_disch_in$pdate>=dataMerge$dateTime[i]-(15*60),]
adaps_cond_sub <- adaps_cond_in[adaps_cond_in$pdate<=dataMerge$dateTime[i]+(15*60) & adaps_cond_in$pdate>=dataMerge$dateTime[i]-(15*60),]
adaps_do_sub <- adaps_do_in[adaps_do_in$pdate<=dataMerge$dateTime[i]+(15*60) & adaps_do_in$pdate>=dataMerge$dateTime[i]-(15*60),]
adaps_temp_sub <- adaps_temp_in[adaps_temp_in$pdate<=dataMerge$dateTime[i]+(15*60) & adaps_temp_in$pdate>=dataMerge$dateTime[i]-(15*60),]
adaps_turb_sub <- adaps_turb_in[adaps_turb_in$pdate<=dataMerge$dateTime[i]+(15*60) & adaps_turb_in$pdate>=dataMerge$dateTime[i]-(15*60),]
if (nrow(adaps_disch_sub)>0) {
dataMerge$q[i] <- adaps_disch_sub[sapply(dataMerge$dateTime[i],function(x) which.min(abs(difftime(x,adaps_disch_sub$pdate)))),c("disch")]
} else {
dataMerge$q[i] <- NA
}
if (nrow(adaps_do_sub)>0) {
dataMerge$do[i] <- adaps_do_sub[sapply(dataMerge$dateTime[i],function(x) which.min(abs(difftime(x,adaps_do_sub$pdate)))),c("do")]
} else {
dataMerge$do[i] <- NA
}
if (nrow(adaps_cond_sub)>0) {
dataMerge$cond[i] <- adaps_cond_sub[sapply(dataMerge$dateTime[i],function(x) which.min(abs(difftime(x,adaps_cond_sub$pdate)))),c("cond")]
} else {
dataMerge$cond[i] <- NA
}
if (nrow(adaps_temp_sub)>0) {
dataMerge$temp[i] <- adaps_temp_sub[sapply(dataMerge$dateTime[i],function(x) which.min(abs(difftime(x,adaps_temp_sub$pdate)))),c("temp")]
} else {
dataMerge$temp[i] <- NA
}
if (nrow(adaps_turb_sub)>0) {
dataMerge$turb[i] <- adaps_turb_sub[sapply(dataMerge$dateTime[i],function(x) which.min(abs(difftime(x,adaps_turb_sub$pdate)))),c("turb")]
} else {
dataMerge$turb[i] <- NA
}
}
save(adaps_disch_in,adaps_do_in,adaps_temp_in,adaps_turb_in,adaps_cond_in,dataMerge,file="AustinDataWawa.RData")
siteNo <- "04087120"
library(GSqwsr)
dataMerge$decYear <- getDecYear(dataMerge$dateTime)
dataMerge$sinDY <- sin(dataMerge$decYear*2*pi)
dataMerge$cosDY <- cos(dataMerge$decYear*2*pi)
dataMerge <- dataMerge[dataMerge$q>0,]
##############################################################################
# Regression for Computed Chloride
data_sub <- dataMerge[!is.na(dataMerge$result00940Cl),]
#data_sub <- data_sub[which(data_sub$dateTime<strptime("2011-10-01",format="%Y-%m-%d")),]
#choose columns to keep for analysis
keepVars <- names(dataMerge)[-which(names(dataMerge) %in% c("site","rptlev31616Fec","result31616Fec","remark31616Fec","remark00530TSS","result00530TSS","rptlev00530TSS","result00665TP","remark00665TP","rptlev00665TP","rptlev50468Ec","remark50468Ec","result50468Ec","do","cond","temp","turb"))]
keepAll <- keepVars
data_sub <- data_sub[which(names(data_sub) %in% keepAll)]
data_sub <- data_sub[!is.na(data_sub$q),]
data_sub <- data_sub[,c("dateTime","remark00940Cl","result00940Cl","rptlev00940Cl","q","decYear","sinDY","cosDY")]
#data_sub$result00940Cl <- log(data_sub$result00940Cl)
#data_sub$rptlev00940Cl <- log(data_sub$rptlev00940Cl)
# set necessary site information and inputs to step-wise regression
#library(GSqwsr)
data_sub_cens <- importQW(data_sub,c("q","decYear","sinDY","cosDY"),"result00940Cl","remark00940Cl","","rptlev00940Cl","User","tons","Unk","","00940","CompCl")
#data_sub_cens <- importQW(data_sub,c("intensity","I5","I10","I60","ARF1","rain","duration","peakDisch","decYear"),"TPLoad","remark","",0.005,"User","tons","Unk","","00665","TPLoading")
siteName <- "MenomoneeWawa"
siteINFO <- readNWISsite(siteNo)
# name of value column in data_sub_cens object
investigateResponse <- "CompCl"
# choose 'normal' or 'lognormal' distribution for data
transformResponse <- "lognormal"
pathToSave <- paste("/Users/jlthomps/Documents/R/MMSD/",siteName,sep="")
#################################################################################################
#Kitchen sink:
predictVariables <- names(data_sub_cens)[-which(names(data_sub_cens) %in% investigateResponse)]
predictVariables <- predictVariables[which(predictVariables != "datetime")]
predictVariables <- predictVariables[which(predictVariables != "decYear")]
kitchenSink <- createFullFormula(data_sub_cens,investigateResponse)
returnPrelim <- prelimModelDev(data_sub_cens,investigateResponse,kitchenSink,
"BIC", #Other option is "AIC"
transformResponse)
steps <- returnPrelim$steps
modelResult <- returnPrelim$modelStuff
modelReturn <- returnPrelim$DT.mod
colnames(steps) <- c("step","BIC","Deviance","Resid.Dev","Resid.Df","Correlation","Slope","RMSE","PRESS","scope","response")
#Save plotSteps to file:
source("/Users/jlthomps/Desktop/git/GLRIBMPs/plotStepsMMSD.R")
source("/Users/jlthomps/Desktop/git/GLRIBMPs/analyzeStepsGLRI.R")
pdf(paste(pathToSave,"/",investigateResponse,"_plotSteps.pdf",sep=""))
plotStepsMMSD(steps,data_sub_cens,transformResponse)
dev.off()
pdf(paste(pathToSave,"/",investigateResponse,"_analyzeSteps.pdf",sep=""))
analyzeStepsGLRI(steps, investigateResponse,siteINFO, xCorner = 0.01)
dev.off()
#################################################################################################
#####################################################
# Print summary in console:
source("/Users/jlthomps/Desktop/git/GLRIBMPs/summaryPrintoutGLRI.R")
fileName <- paste(pathToSave,"/", investigateResponse,"Summary_2.txt", sep="")
summaryPrintoutGLRI(modelReturn, siteINFO, saveOutput=TRUE,fileName)
#####################################################
##############################################################################
# Regression for Computed Fecal Coliform
data_sub <- dataMerge[!is.na(dataMerge$result31616Fec),]
data_sub <- data_sub[data_sub$remark31616Fec!=">",]
#choose columns to keep for analysis
keepVars <- names(dataMerge)[-which(names(dataMerge) %in% c("site","rptlev00940Cl","result00940Cl","remark00940Cl","remark00530TSS","result00530TSS","rptlev00530TSS","result00665TP","remark00665TP","rptlev00665TP","rptlev50468Ec","remark50468Ec","result50468Ec","do","cond","temp","turb"))]
keepAll <- keepVars
data_sub <- data_sub[which(names(data_sub) %in% keepAll)]
data_sub <- data_sub[!is.na(data_sub$q),]
data_sub <- data_sub[,c("dateTime","remark31616Fec","result31616Fec","rptlev31616Fec","q","decYear","sinDY","cosDY")]
#data_sub$result31616Fec <- log(data_sub$result31616Fec)
#data_sub$rptlev31616Fec <- log(data_sub$rptlev31616Fec)
# set necessary site information and inputs to step-wise regression
#library(GSqwsr)
data_sub_cens <- importQW(data_sub,c("q","decYear","sinDY","cosDY"),"result31616Fec","remark31616Fec","","rptlev31616Fec","User","tons","Unk","","31616","CompFec")
#data_sub_cens <- importQW(data_sub,c("intensity","I5","I10","I60","ARF1","rain","duration","peakDisch","decYear"),"TPLoad","remark","",0.005,"User","tons","Unk","","00665","TPLoading")
siteName <- "MenomoneeWawa"
siteINFO <- readNWISsite(siteNo)
# name of value column in data_sub_cens object
investigateResponse <- "CompFec"
# choose 'normal' or 'lognormal' distribution for data
transformResponse <- "lognormal"
pathToSave <- paste("/Users/jlthomps/Documents/R/MMSD/",siteName,sep="")
#################################################################################################
#Kitchen sink:
predictVariables <- names(data_sub_cens)[-which(names(data_sub_cens) %in% investigateResponse)]
predictVariables <- predictVariables[which(predictVariables != "datetime")]
predictVariables <- predictVariables[which(predictVariables != "decYear")]
kitchenSink <- createFullFormula(data_sub_cens,investigateResponse)
returnPrelim <- prelimModelDev(data_sub_cens,investigateResponse,kitchenSink,
"BIC", #Other option is "AIC"
transformResponse)
steps <- returnPrelim$steps
modelResult <- returnPrelim$modelStuff
modelReturn <- returnPrelim$DT.mod
colnames(steps) <- c("step","BIC","Deviance","Resid.Dev","Resid.Df","Correlation","Slope","RMSE","PRESS","scope","response")
#Save plotSteps to file:
source("/Users/jlthomps/Desktop/git/GLRIBMPs/plotStepsMMSD.R")
source("/Users/jlthomps/Desktop/git/GLRIBMPs/analyzeStepsGLRI.R")
pdf(paste(pathToSave,"/",investigateResponse,"_plotSteps.pdf",sep=""))
plotStepsMMSD(steps,data_sub_cens,transformResponse)
dev.off()
pdf(paste(pathToSave,"/",investigateResponse,"_analyzeSteps.pdf",sep=""))
analyzeStepsGLRI(steps, investigateResponse,siteINFO, xCorner = 0.01)
dev.off()
#################################################################################################
#####################################################
# Print summary in console:
source("/Users/jlthomps/Desktop/git/GLRIBMPs/summaryPrintoutGLRI.R")
fileName <- paste(pathToSave,"/", investigateResponse,"Summary_2.txt", sep="")
summaryPrintoutGLRI(modelReturn, siteINFO, saveOutput=TRUE,fileName)
#####################################################
##############################################################################
# Regression for Computed TSS
data_sub <- dataMerge[!is.na(dataMerge$result00530TSS),]
#data_sub <- data_sub[which(data_sub$dateTime<strptime("2011-10-01",format="%Y-%m-%d")),]
#choose columns to keep for analysis
keepVars <- names(dataMerge)[-which(names(dataMerge) %in% c("site","rptlev00940Cl","result00940Cl","remark00940Cl","remark31616Fec","result31616Fec","rptlev31616Fec","result00665TP","remark00665TP","rptlev00665TP","rptlev50468Ec","remark50468Ec","result50468Ec","do","cond","temp","turb"))]
keepAll <- keepVars
data_sub <- data_sub[which(names(data_sub) %in% keepAll)]
data_sub <- data_sub[!is.na(data_sub$q),]
data_sub <- data_sub[,c("dateTime","remark00530TSS","result00530TSS","rptlev00530TSS","q","decYear","sinDY","cosDY")]
#data_sub$result00530TSS <- log(data_sub$result00530TSS)
#data_sub$rptlev00530TSS <- log(data_sub$rptlev00530TSS)
# set necessary site information and inputs to step-wise regression
#library(GSqwsr)
data_sub_cens <- importQW(data_sub,c("q","decYear","sinDY","cosDY"),"result00530TSS","remark00530TSS","","rptlev00530TSS","User","tons","Unk","","00530","CompTSS")
#data_sub_cens <- importQW(data_sub,c("intensity","I5","I10","I60","ARF1","rain","duration","peakDisch","decYear"),"TPLoad","remark","",0.005,"User","tons","Unk","","00665","TPLoading")
siteName <- "MenomoneeWawa"
siteINFO <- readNWISsite(siteNo)
# name of value column in data_sub_cens object
investigateResponse <- "CompTSS"
# choose 'normal' or 'lognormal' distribution for data
transformResponse <- "lognormal"
pathToSave <- paste("/Users/jlthomps/Documents/R/MMSD/",siteName,sep="")
#################################################################################################
#Kitchen sink:
predictVariables <- names(data_sub_cens)[-which(names(data_sub_cens) %in% investigateResponse)]
predictVariables <- predictVariables[which(predictVariables != "datetime")]
predictVariables <- predictVariables[which(predictVariables != "decYear")]
kitchenSink <- createFullFormula(data_sub_cens,investigateResponse)
returnPrelim <- prelimModelDev(data_sub_cens,investigateResponse,kitchenSink,
"BIC", #Other option is "AIC"
transformResponse)
steps <- returnPrelim$steps
modelResult <- returnPrelim$modelStuff
modelReturn <- returnPrelim$DT.mod
colnames(steps) <- c("step","BIC","Deviance","Resid.Dev","Resid.Df","Correlation","Slope","RMSE","PRESS","scope","response")
#Save plotSteps to file:
source("/Users/jlthomps/Desktop/git/GLRIBMPs/plotStepsMMSD.R")
source("/Users/jlthomps/Desktop/git/GLRIBMPs/analyzeStepsGLRI.R")
pdf(paste(pathToSave,"/",investigateResponse,"_plotSteps.pdf",sep=""))
plotStepsMMSD(steps,data_sub_cens,transformResponse)
dev.off()
pdf(paste(pathToSave,"/",investigateResponse,"_analyzeSteps.pdf",sep=""))
analyzeStepsGLRI(steps, investigateResponse,siteINFO, xCorner = 0.01)
dev.off()
#################################################################################################
#####################################################
# Print summary in console:
source("/Users/jlthomps/Desktop/git/GLRIBMPs/summaryPrintoutGLRI.R")
fileName <- paste(pathToSave,"/", investigateResponse,"Summary_2.txt", sep="")
summaryPrintoutGLRI(modelReturn, siteINFO, saveOutput=TRUE,fileName)
#####################################################
##############################################################################
# Regression for Computed Total Phosphorus
data_sub <- dataMerge[!is.na(dataMerge$result00665TP),]
#data_sub <- data_sub[which(data_sub$dateTime<strptime("2011-10-01",format="%Y-%m-%d")),]
#choose columns to keep for analysis
keepVars <- names(dataMerge)[-which(names(dataMerge) %in% c("site","rptlev31616Fec","result31616Fec","remark31616Fec","remark00530TSS","result00530TSS","rptlev00530TSS","result00530TSS","remark00530TSS","rptlev00530TSS","rptlev50468Ec","remark50468Ec","result50468Ec","do","cond","temp","turb"))]
keepAll <- keepVars
data_sub <- data_sub[which(names(data_sub) %in% keepAll)]
data_sub <- data_sub[!is.na(data_sub$q),]
data_sub <- data_sub[,c("dateTime","remark00665TP","result00665TP","rptlev00665TP","q","decYear","sinDY","cosDY")]
#data_sub$result00665TP <- log(data_sub$result00665TP)
#data_sub$rptlev00665TP <- log(data_sub$rptlev00665TP)
# set necessary site information and inputs to step-wise regression
#library(GSqwsr)
data_sub_cens <- importQW(data_sub,c("q","decYear","sinDY","cosDY"),"result00665TP","remark00665TP","","rptlev00665TP","User","tons","Unk","","00665","CompTP")
#data_sub_cens <- importQW(data_sub,c("intensity","I5","I10","I60","ARF1","rain","duration","peakDisch","decYear"),"TPLoad","remark","",0.005,"User","tons","Unk","","00665","TPLoading")
siteName <- "MenomoneeWawa"
siteINFO <- readNWISsite(siteNo)
# name of value column in data_sub_cens object
investigateResponse <- "CompTP"
# choose 'normal' or 'lognormal' distribution for data
transformResponse <- "lognormal"
pathToSave <- paste("/Users/jlthomps/Documents/R/MMSD/",siteName,sep="")
#################################################################################################
#Kitchen sink:
predictVariables <- names(data_sub_cens)[-which(names(data_sub_cens) %in% investigateResponse)]
predictVariables <- predictVariables[which(predictVariables != "datetime")]
predictVariables <- predictVariables[which(predictVariables != "decYear")]
kitchenSink <- createFullFormula(data_sub_cens,investigateResponse)
returnPrelim <- prelimModelDev(data_sub_cens,investigateResponse,kitchenSink,
"BIC", #Other option is "AIC"
transformResponse)
steps <- returnPrelim$steps
modelResult <- returnPrelim$modelStuff
modelReturn <- returnPrelim$DT.mod
colnames(steps) <- c("step","BIC","Deviance","Resid.Dev","Resid.Df","Correlation","Slope","RMSE","PRESS","scope","response")
#Save plotSteps to file:
source("/Users/jlthomps/Desktop/git/GLRIBMPs/plotStepsMMSD.R")
source("/Users/jlthomps/Desktop/git/GLRIBMPs/analyzeStepsGLRI.R")
pdf(paste(pathToSave,"/",investigateResponse,"_plotSteps.pdf",sep=""))
plotStepsMMSD(steps,data_sub_cens,transformResponse)
dev.off()
pdf(paste(pathToSave,"/",investigateResponse,"_analyzeSteps.pdf",sep=""))
analyzeStepsGLRI(steps, investigateResponse,siteINFO, xCorner = 0.01)
dev.off()
#################################################################################################
#####################################################
# Print summary in console:
source("/Users/jlthomps/Desktop/git/GLRIBMPs/summaryPrintoutGLRI.R")
fileName <- paste(pathToSave,"/", investigateResponse,"Summary_2.txt", sep="")
summaryPrintoutGLRI(modelReturn, siteINFO, saveOutput=TRUE,fileName)
#####################################################
##############################################################################
# Regression for Computed E Coli
data_sub <- dataMerge[!is.na(dataMerge$result50468Ec),]
data_sub <- data_sub[data_sub$remark50468Ec!=">",]
data_sub <- data_sub[data_sub$result50468Ec>0,]
#data_sub <- data_sub[which(data_sub$dateTime<strptime("2011-10-01",format="%Y-%m-%d")),]
#choose columns to keep for analysis
keepVars <- names(dataMerge)[-which(names(dataMerge) %in% c("site","rptlev31616Fec","result31616Fec","remark31616Fec","remark00530TSS","result00530TSS","rptlev00530TSS","result00665TP","remark00665TP","rptlev00665TP","rptlev00665TP","remark00665TP","result00665TP","do","cond","temp","turb"))]
keepAll <- keepVars
data_sub <- data_sub[which(names(data_sub) %in% keepAll)]
data_sub <- data_sub[!is.na(data_sub$q),]
data_sub <- data_sub[,c("dateTime","remark50468Ec","result50468Ec","rptlev50468Ec","q","decYear","sinDY","cosDY")]
#data_sub$result50468Ec <- log(data_sub$result50468Ec)
#data_sub$rptlev50468Ec <- log(data_sub$rptlev50468Ec)
# set necessary site information and inputs to step-wise regression
#library(GSqwsr)
data_sub_cens <- importQW(data_sub,c("q","decYear","sinDY","cosDY"),"result50468Ec","remark50468Ec","","rptlev50468Ec","User","tons","Unk","","50468","CompEc")
#data_sub_cens <- importQW(data_sub,c("intensity","I5","I10","I60","ARF1","rain","duration","peakDisch","decYear"),"TPLoad","remark","",0.005,"User","tons","Unk","","00665","TPLoading")
siteName <- "MenomoneeWawa"
siteINFO <- readNWISsite(siteNo)
# name of value column in data_sub_cens object
investigateResponse <- "CompEc"
# choose 'normal' or 'lognormal' distribution for data
transformResponse <- "lognormal"
pathToSave <- paste("/Users/jlthomps/Documents/R/MMSD/",siteName,sep="")
#################################################################################################
#Kitchen sink:
predictVariables <- names(data_sub_cens)[-which(names(data_sub_cens) %in% investigateResponse)]
predictVariables <- predictVariables[which(predictVariables != "datetime")]
predictVariables <- predictVariables[which(predictVariables != "decYear")]
kitchenSink <- createFullFormula(data_sub_cens,investigateResponse)
returnPrelim <- prelimModelDev(data_sub_cens,investigateResponse,kitchenSink,
"BIC", #Other option is "AIC"
transformResponse)
steps <- returnPrelim$steps
modelResult <- returnPrelim$modelStuff
modelReturn <- returnPrelim$DT.mod
colnames(steps) <- c("step","BIC","Deviance","Resid.Dev","Resid.Df","Correlation","Slope","RMSE","PRESS","scope","response")
#Save plotSteps to file:
source("/Users/jlthomps/Desktop/git/GLRIBMPs/plotStepsMMSD.R")
source("/Users/jlthomps/Desktop/git/GLRIBMPs/analyzeStepsGLRI.R")
pdf(paste(pathToSave,"/",investigateResponse,"_plotSteps.pdf",sep=""))
plotStepsMMSD(steps,data_sub_cens,transformResponse)
dev.off()
pdf(paste(pathToSave,"/",investigateResponse,"_analyzeSteps.pdf",sep=""))
analyzeStepsGLRI(steps, investigateResponse,siteINFO, xCorner = 0.01)
dev.off()
#################################################################################################
#####################################################
# Print summary in console:
source("/Users/jlthomps/Desktop/git/GLRIBMPs/summaryPrintoutGLRI.R")
fileName <- paste(pathToSave,"/", investigateResponse,"Summary_2.txt", sep="")
summaryPrintoutGLRI(modelReturn, siteINFO, saveOutput=TRUE,fileName)
#####################################################
|
e89d2003be049683afbacf3602b3e5bb8e1688b1 | b2936989935e5ca30e62e349ec6a925697624d92 | /Quiz/Quiz1.R | 1b7a634a53b6acae1450150e8094acae97abeda6 | [] | no_license | FloraC/Coursera-RProgramming | 3348b7ecb003aa734edb2ad1fe83c78b93033e30 | 377aaaf09364ea8c339e3ca8771c93d60272c789 | refs/heads/master | 2021-01-18T07:40:49.642322 | 2015-05-15T07:13:10 | 2015-05-15T07:13:10 | 33,906,178 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,862 | r | Quiz1.R | ## Quiz1 Review
## verctor addition
x1<-1:4
y1<-2:3
z1<-2
Ans1_1<-x1+y1
Ans1_1
class(Ans1_1)
Ans1_2<-x1+z1
Ans1_2
class(Ans1_2)
##Suppose I have a vector x <- c(3, 5, 1, 10, 12, 6) and I want to set all elements of this vector that are less than 6 to be equal to zero. What R code achieves this?
##You can create a logical vector with the expression x %in% 1:5 and then use the [ operator to subset the original vector x.
x[x %in% 1:5] <- 0
x
##unpack the data package
Q1data<- read.csv(unzip("rprog-data-quiz1_data.zip"))
## what are the column names of the dataset?
names(Q1data)
## Extract the first 2 rows of the data frame and print them to the console.
Q1data[1:2,]
## How many observations (i.e. rows) are in this data frame?
nr<-nrow(Q1data)
nr
##Extract the last 2 rows of the data frame and print them to the console.
##The `tail()' function is an easy way to extract the last few elements of an R object.
tail(Q1data,2)
Q1data[(nr-1):nr,]
##What is the value of Ozone in the 47th row?
OR47<-Q1data[["Ozone"]][47]
OR47
##How many missing values are in the Ozone column of this data frame?
cOzone<-Q1data[["Ozone"]]
sum(is.na(cOzone))
##What is the mean of the Ozone column in this dataset? Exclude missing values (coded as NA) from this calculation.
mean(cOzone[!is.na(cOzone)])
##Extract the subset of rows of the data frame where Ozone values are above 31 and Temp values are above 90. What is the mean of Solar.R in this subset?
cSolar<-Q1data[["Solar.R"]]
cSolarCal<-cSolar[(cOzone>31)&(Q1data[["Temp"]]>90)]
mean(cSolarCal[!is.na(cSolarCal)])
##What is the mean of "Temp" when "Month" is equal to 6?
cTemp<-Q1data[["Temp"]]
cMonth<-Q1data[["Month"]]
cTempCal<-cTemp[cMonth==6]
mean(cTempCal[!is.na(cTempCal)])
##What was the maximum ozone value in the month of May (i.e. Month = 5)?
cOzoneCal<-cOzone[cMonth==5]
max(cOzoneCal[!is.na(cOzoneCal)]) |
7b47076b051d8928999ceb241d0468969691e926 | 393554988b25cb0b7f1a25956c84520f7f1ca2ae | /sample1.R | d5bde793def12a2b0328aa10bfcf6cf1f10dd5e0 | [] | no_license | debajyoti7/R_wordCloud | fb1b233bee4d5d96af6c6d7280bfecd239f4fed7 | 45f046712cd08f579f7507e20cbbb6b8d88e6ac8 | refs/heads/master | 2021-01-10T12:52:11.294252 | 2015-10-16T09:42:08 | 2015-10-16T09:42:08 | 44,367,861 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 419 | r | sample1.R | library(tm)
txt <- Corpus(DirSource("poems/"))
#txt <- tm_map(txt, stripWhitespace)
txt <- tm_map(txt, content_transformer(tolower))
txt <- tm_map(txt, removeWords, stopwords("english"))
txt <- tm_map(txt, removePunctuation)
txt <- tm_map(txt, stemDocument)
library(wordcloud)
wordcloud(txt, scale=c(5,0.5), max.words = 100, random.order=FALSE,
rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, "Dark2"))
|
24ad1b8fa8c5e1f540bcc570d2888051ddd0219c | 719e18d76fd6580449c1eddae055aefe0f4dc73e | /R/Summarizing.R | 10b38885cce56b3701e84291509e0d24f2dc4d14 | [] | no_license | RIVA-ISC/openeddy | 4c4a73d6fcaa2d0867f941133f3194e071751ff1 | e53fa95be1a0f238baecef10e76c4e16e02463c4 | refs/heads/master | 2022-12-30T06:29:47.466582 | 2020-10-14T14:35:39 | 2020-10-14T14:35:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 25,812 | r | Summarizing.R | #' Time series summarization
#'
#' Utilities that simplify aggregation of data and their uncertainties over
#' defined time intervals.
#'
#' \code{agg_mean} and \code{agg_sum} compute mean and sum over intervals
#' defined by \code{format} and/or \code{breaks} for all columns.
#'
#' \code{agg_fsd} and \code{agg_DT_SD} estimate aggregated mean and summed
#' uncertainties over defined time periods for \code{REddyProc} package
#' gap-filling and daytime-based flux partitioning outputs, respectively. The
#' uncertainty aggregation accounts for autocorrelation among records. It is
#' performed only for autodetected columns with appropriate suffixes (see
#' further). Note that uncertainty products of \code{agg_fsd} and
#' \code{agg_DT_SD} are reported as standard deviations (\code{SD}) and require
#' further correction to represent uncertainty bounds for given confidence
#' interval (e.g. \code{SD * 1.96} for 95\% confidence level).
#'
#' The summarizations are done on a data frame \code{x} with required timestamp
#' column (\code{x$timestamp}) of class \code{"POSIXt"}. With exception of
#' \code{agg_mean}, the timestamp must form regular sequence without \code{NA}s
#' due to time resolution estimation.
#'
#' Change of aggregation interval can be achieved through \code{breaks} and
#' \code{format} arguments.
#'
#' The data frame \code{x} can be \link[=cut.POSIXt]{cut} to custom intervals
#' using argument \code{breaks}. Note that labels are constructed from the
#' left-hand end of the intervals and converted to \code{"POSIXct"} class. This
#' can be useful when aggregating e.g. half-hourly data over hourly
#' (\code{breaks = "60 mins"}) or three-day (\code{breaks = "3 days"})
#' intervals.
#'
#' The formatting of the timestamp (original or after cutting) using
#' \code{format} is another (preferable) way to change aggregation intervals.
#' For example changing original \code{"POSIXt"} time format (\code{"\%Y-\%m-\%d
#' \%H:\%M:\%S"}) to \code{"\%Y-\%m-\%d"}, \code{"\%W_\%y"}, \code{"\%m-\%y"} or
#' \code{"\%Y"} will result in daily, weekly, monthly or yearly aggregation
#' intervals, respectively. Note that improper \code{format} can repress
#' expected effect of \code{breaks}.
#'
#' \code{agg_fsd} and \code{agg_DT_SD} require certain columns with defined
#' suffixes in order to evaluate uncertainty correctly. These columns are a
#' product of \code{REddyProc} package gap-filling and flux partitioning methods
#' and are documented here:
#' \url{https://www.bgc-jena.mpg.de/bgi/index.php/Services/REddyProcWebOutput}.
#' Detailed description of uncertainty aggregation is available here:
#' \url{https://github.com/bgctw/REddyProc/blob/master/vignettes/aggUncertainty.md}.
#'
#' \code{agg_fsd} requires columns with suffixes \code{_fall}, \code{_orig},
#' \code{_fqc} and \code{_fsd} for each variable.
#'
#' \code{agg_DT_SD} requires corresponding columns with \code{\link{regexp}}
#' patterns \code{"^NEE_.*_orig$"}, \code{"^NEE_.*_fqc$"}, \code{"^Reco_DT_"},
#' \code{"^GPP_DT_"}, \code{"^Reco_DT_.*_SD$"} and \code{"^GPP_DT_.*_SD$"}.
#'
#' @section Unit Conversion: In case of aggregation using \code{sum}, i.e.
#' \code{agg_sum}, \code{agg_fsd} and \code{agg_DT_SD}, appropriate unit
#' conversion can be applied to columns defined by \code{quant}, \code{power}
#' and \code{carbon} arguments.
#'
#' @section Sign Correction: Although the sign convention used for measured NEE
#' (Net Ecosystem Exchange) denotes negative fluxes as CO2 uptake, summed NEE
#' is typically reported with the opposite sign convention and is assumed to
#' converge to NEP (Net Ecosystem Production), especially over longer
#' aggregation intervals. Similarly, estimated negative GPP (Gross Primary
#' Production) typically denotes carbon sink but should be corrected to
#' positive values if summed over a time period.
#'
#' \code{agg_sum} automatically detects all NEE and GPP columns in \code{x}
#' using regular expressions and changes their sign. For GPP columns, sign
#' change is performed only if mean GPP < 0 (sign convention autodetection).
#' Note that it assumes that average GPP signal denotes carbon sink and it
#' could fail if such sink is missing or negligible (e.g. urban measurements).
#' In cases when NEE or its uncertainty is summed (\code{agg_sum} or
#' \code{agg_fsd}), NEE is renamed to NEP.
#'
#' @section References: Bayley, G. and Hammersley, J., 1946. The "Effective"
#' Number of Independent Observations in an Autocorrelated Time Series.
#' Supplement to the Journal of the Royal Statistical Society, 8(2), 184-197.
#' doi: \url{https://doi.org/10.2307/2983560}
#'
#' Zieba, A. and Ramza, P., 2011. Standard Deviation of the Mean of
#' Autocorrelated Observations Estimated with the Use of the Autocorrelation
#' Function Estimated From the Data. Metrology and Measurement Systems, 18(4),
#' 529-542. doi: \url{https://doi.org/10.2478/v10178-011-0052-x}
#'
#' @param x A data frame with required timestamp column (\code{x$timestamp}) of
#' class \code{"POSIXt"}.
#' @param format A character string specifying \code{x$timestamp} formatting for
#' aggregation through internal \code{\link{strftime}} function.
#' @param breaks A vector of cut points or number giving the number of intervals
#' which \code{x$timestamp} is to be cut into or an interval specification,
#' one of \code{"sec"}, \code{"min"}, \code{"hour"}, \code{"day"},
#' \code{"DSTday"}, \code{"week"}, \code{"month"}, \code{"quarter"} or
#' \code{"year"}, optionally preceded by an integer and a space, or followed
#' by \code{"s"}.
#' @param agg_per A character string providing the time interval of aggregation
#' that will be appended to units (e.g. \code{"hh-1"}, \code{"week-1"} or
#' \code{"month-1"}).
#' @param quant A character vector listing variable names that require
#' conversion from quantum to energy units before aggregation.
#' @param power A character vector listing variable names that require
#' conversion from power to energy units before aggregation.
#' @param carbon A character vector listing variable names that require
#' conversion from CO2 concentration to C mass flux units before aggregation.
#' @param tz A character string specifying the time zone to be used for the
#' conversion. System-specific (see \code{\link{as.POSIXlt}} or
#' \code{\link{timezones}}), but \code{""} is the current time zone, and
#' \code{"GMT"} is UTC. Invalid values are most commonly treated as UTC, on
#' some platforms with a warning.
#' @param ... Further arguments to be passed to the internal
#' \code{\link{aggregate}} function.
#'
#' @return \code{agg_mean} and \code{agg_sum} produce a data frame with
#' attributes varnames and units assigned to each respective column.
#'
#' \code{agg_fsd} and \code{agg_DT_SD} produce a list with two data frames
#' \code{mean} and \code{sum} with attributes varnames and units assigned to
#' each respective column or \code{NULL} value if required columns are not
#' recognized.
#'
#' @seealso \code{\link{aggregate}}, \code{\link{as.POSIXlt}},
#' \code{\link{cut.POSIXt}}, \code{\link{mean}}, \code{\link{regexp}},
#' \code{\link{strftime}}, \code{\link{sum}}, \code{\link{timezones}},
#' \code{\link{varnames}}
#'
#' @examples
#' \dontrun{
#' library(REddyProc)
#' DETha98 <- fConvertTimeToPosix(Example_DETha98, 'YDH', Year = 'Year',
#' Day = 'DoY', Hour = 'Hour')[-(2:4)]
#' EProc <- sEddyProc$new('DE-Tha', DETha98,
#' c('NEE', 'Rg', 'Tair', 'VPD', 'Ustar'))
#' names(DETha98)[1] <- "timestamp"
#' DETha98$timestamp <- DETha98$timestamp - 60*15
#' agg_mean(DETha98, "%m-%y")
#' agg_mean(DETha98, "%m-%y", na.rm = TRUE)
#' (zz <- agg_sum(DETha98, "%m-%y", agg_per = "month-1"))
#' units(zz, names = TRUE)
#'
#' EProc$sMDSGapFillAfterUstar('NEE', uStarTh = 0.3, FillAll = TRUE)
#' for (i in c('Tair', 'Rg', 'VPD')) EProc$sMDSGapFill(i, FillAll = TRUE)
#' results <- cbind(DETha98["timestamp"], EProc$sExportResults())
#' agg_fsd(results, "%m-%y", agg_per = "month-1")
#' EProc$sSetLocationInfo(LatDeg = 51.0, LongDeg = 13.6, TimeZoneHour = 1)
#' EProc$sGLFluxPartition(suffix = "uStar")
#' results <- cbind(DETha98["timestamp"], EProc$sExportResults())
#' agg_DT_SD(results, "%m-%y", agg_per = "month-1")}
agg_mean <- function(x, format, breaks = NULL, tz = "GMT", ...) {
x_names <- names(x)
if (!is.data.frame(x) || is.null(x_names)) {
stop("'x' must be of class data.frame with colnames")
}
if (!"timestamp" %in% x_names) stop("missing 'x$timestamp'")
if (!inherits(x$timestamp, "POSIXt")) {
stop("'x$timestamp' must be of class 'POSIXt'")
}
if (!is.null(breaks)) {
x$timestamp <- as.POSIXct(cut(x$timestamp, breaks = breaks), tz = tz)
}
x$timestamp <- strftime(x$timestamp, format = format, tz = tz)
x$timestamp <- factor(x$timestamp, levels = unique(x$timestamp))
out <- aggregate(x[names(x) != "timestamp"],
list(Intervals = x$timestamp), mean, ...)
openeddy::varnames(out) <- c("Intervals",
openeddy::varnames(x[names(x) != "timestamp"]))
openeddy::units(out) <- c("-", openeddy::units(x[names(x) != "timestamp"]))
names(out) <- c("Intervals", paste0(names(out[-1]), "_mean"))
return(out)
}
#' @rdname agg_mean
agg_sum <- function(x, format, agg_per = NULL, breaks = NULL,
quant = grep("^PAR|^PPFD|^APAR", names(x), value = TRUE),
power = grep("^GR|^Rg|^SW|^SR|^LW|^LR|^Rn|^NETRAD|^H|^LE",
names(x), value = TRUE),
carbon = grep("^NEE|^GPP|^Reco", names(x), value = TRUE),
tz = "GMT", ...) {
x_names <- names(x)
if (!is.data.frame(x) || is.null(x_names)) {
stop("'x' must be of class data.frame with colnames")
}
if (!"timestamp" %in% x_names) stop("missing 'x$timestamp'")
if (!inherits(x$timestamp, "POSIXt")) {
stop("'x$timestamp' must be of class 'POSIXt'")
}
if (any(is.na(x$timestamp))) stop("NAs in 'x$timestamp' not allowed")
if (any(diff(as.numeric(x$timestamp)) !=
mean(diff(as.numeric(x$timestamp))))) {
stop("x$timestamp does not form regular sequence")
}
tres <- max(diff(as.numeric(x$timestamp))) # Time resolution in secs
if (!is.null(breaks)) {
x$timestamp <- as.POSIXct(cut(x$timestamp, breaks = breaks), tz = tz)
}
x$timestamp <- strftime(x$timestamp, format = format, tz = tz)
x$timestamp <- factor(x$timestamp, levels = unique(x$timestamp))
# Change sign in all NEE variables
NEE_cols <- names(x) %in% grep("NEE", names(x[carbon]), value = TRUE)
NEE <- names(x)[NEE_cols]
x[NEE_cols] <- -x[NEE_cols]
# Perform sign correction in case the mean GPP is negative
GPP <- grep("GPP", x_names, value = TRUE)
sign_cor <- if (length(GPP)) {
sapply(x[GPP], function(x) mean(x, na.rm = TRUE) < 0)
} else FALSE
x[GPP][sign_cor] <- -x[GPP][sign_cor]
cat("Sign correction (x -> -x):\n")
cat(if (length(c(NEE, GPP[sign_cor])) > 0) paste(
NEE, GPP[sign_cor], collapse = ", ") else "None", "\n\n")
cat("Unit conversion\n===============\n")
# conversion from quantum to radiometric units and to energy units
# - from umol+1s-1m-2 to MJ+1hh-1m-2 (hh = half-hour)
quant <- quant[quant %in% names(x)]
x[quant] <- x[quant] * tres * 1e-6 / 4.57 # 4.57 Thimijan and Heins (1983)
energy_units <- "MJ m-2"
if (length(quant) > 0) {
cat("Quantum to energy (", openeddy::units(x[quant])[1],
" -> ", trimws(paste(energy_units, agg_per)), "):\n\n",
paste(quant, collapse = ", "),
"\n-------------------------------------------------------\n", sep = "")
}
openeddy::units(x[quant]) <- rep(energy_units, ncol(x[quant]))
# conversion from power to energy units
# - from W m-2 to MJ+1hh-1m-2
power <- power[power %in% names(x)]
x[power] <- x[power] * tres * 1e-6
if (length(power) > 0) {
cat("Power to energy (", openeddy::units(x[power])[1],
" -> ", trimws(paste(energy_units, agg_per)), "):\n\n",
paste(power, collapse = ", "),
"\n-------------------------------------------------------\n", sep = "")
}
openeddy::units(x[power]) <- rep(energy_units, ncol(x[power]))
# conversion from mean CO2 concentration flux to integrated mass flux of C
# - from umol+1s-1m-2 to g(C)+1hh-1m-2
carbon <- carbon[carbon %in% names(x)]
x[carbon] <- x[carbon] * tres * 12e-6
carbon_units <- "g(C) m-2"
if (length(carbon) > 0) {
cat("CO2 concentration to C mass flux (",
openeddy::units(x[carbon])[1],
" -> ", trimws(paste(carbon_units, agg_per)), "):\n\n",
paste(carbon, collapse = ", "),
"\n-------------------------------------------------------\n", sep = "")
}
openeddy::units(x[carbon]) <- rep(carbon_units, ncol(x[carbon]))
if (sum(length(c(quant, power, carbon))) == 0)
cat("No variables available for conversion\n")
# Rename relevant NEE variables to NEP
names(x)[NEE_cols] <- gsub("NEE", "NEP", names(x)[NEE_cols])
out <- aggregate(x[names(x) != "timestamp"],
list(Intervals = x$timestamp), sum, ...)
openeddy::varnames(out) <- c("Intervals",
openeddy::varnames(x[names(x) != "timestamp"]))
openeddy::units(out) <- c("-", openeddy::units(x[names(x) != "timestamp"]))
if (!is.null(agg_per)) openeddy::units(out)[-1] <-
trimws(paste(openeddy::units(out)[-1], agg_per))
names(out) <- c("Intervals", paste0(names(out[-1]), "_sum"))
return(out)
}
#' @rdname agg_mean
agg_fsd <- function(x, format, agg_per = NULL, breaks = NULL,
quant = grep("^PAR|^PPFD|^APAR", names(x), value = TRUE),
power = grep("^GR|^Rg|^SW|^SR|^LW|^LR|^Rn|^NETRAD|^H|^LE",
names(x), value = TRUE),
carbon = grep("^NEE", names(x), value = TRUE),
tz = "GMT") {
x_names <- names(x)
if (!is.data.frame(x) || is.null(x_names)) {
stop("'x' must be of class data.frame with colnames")
}
if (!"timestamp" %in% x_names) stop("missing 'x$timestamp'")
if (!inherits(x$timestamp, "POSIXt")) {
stop("'x$timestamp' must be of class 'POSIXt'")
}
if (any(is.na(x$timestamp))) stop("NAs in 'x$timestamp' not allowed")
if (any(diff(as.numeric(x$timestamp)) !=
mean(diff(as.numeric(x$timestamp))))) {
stop("x$timestamp does not form regular sequence")
}
tres <- max(diff(as.numeric(x$timestamp))) # Time resolution in secs
if (!is.null(breaks)) {
x$timestamp <- as.POSIXct(cut(x$timestamp, breaks = breaks), tz = tz)
}
x$timestamp <- strftime(x$timestamp, format = format, tz = tz)
x$timestamp <- factor(x$timestamp, levels = unique(x$timestamp))
fall_names <- grep("_fall$", names(x), value = TRUE) # no fall for GPP & Reco
fall <- x[fall_names]
names(fall) <- gsub("_fall", "", names(fall))
orig_names <- grep("_orig$", names(x), value = TRUE) # no orig for GPP & Reco
orig <- x[orig_names]
names(orig) <- gsub("_orig", "", names(orig))
orig <- orig[order(match(names(orig), names(fall)))] # order columns
if (!identical(names(fall), names(orig))) stop(
"'x' columns with suffix '_orig' not fitting '_fall' columns"
)
if (ncol(fall) == 0) return(NULL)
resid <- orig - fall
autoCorr <- lapply(resid, lognorm::computeEffectiveAutoCorr)
fqc_names <- grep("_fqc$", names(x), value = TRUE)
fqc_names <- fqc_names[
!fqc_names %in% grep("^Ustar|^GPP", fqc_names, value = TRUE)]
# order fqc_names
fqc_names <- fqc_names[order(match(
gsub("_fqc", "", fqc_names), names(fall)))]
if (!identical(gsub("_fqc", "", fqc_names), names(fall))) stop(
"'x' columns with suffix '_fqc' not fitting '_fall' columns"
)
resid_l <- split(resid, x$timestamp)
l <- list()
for (i in seq_along(resid_l)) l[[i]] <-
mapply(lognorm::computeEffectiveNumObs, res = resid_l[[i]],
effAcf = autoCorr, MoreArgs = list(na.rm = TRUE))
nEff <- as.data.frame(do.call(rbind, l))
fsd_names <- grep("_fsd$", names(x), value = TRUE)
fsd <- x[fsd_names]
names(fsd) <- gsub("_fsd", "", names(fsd))
fsd <- fsd[order(match(names(fsd), names(fall)))] # order columns
fsd_names <- names(fsd) # save the ordered variant - used later
if (!identical(fsd_names, names(fall))) stop(
"'x' columns with suffix '_fsd' not fitting '_fall' columns"
)
# SD considered only for measured records (fqc == 0)
for (i in seq_along(fsd)) fsd[which(x[, fqc_names[i]] != 0), i] <- NA
agg_SD <- aggregate(fsd, by = list(Intervals = x$timestamp), function(x)
if (all(is.na(x))) NA_real_ else mean(x^2, na.rm = TRUE), drop = FALSE)
openeddy::varnames(agg_SD["Intervals"]) <- "Intervals"
openeddy::units(agg_SD["Intervals"]) <- "-"
res_SD <- as.data.frame(mapply(function(SD, nEff)
sqrt(SD / ifelse(nEff <= 1, NA_real_, nEff - 1)),
SD = agg_SD[-1], nEff = nEff, SIMPLIFY = FALSE))
names(res_SD) <- paste0(fsd_names, "_fsd")
openeddy::varnames(res_SD) <- names(res_SD)
openeddy::units(res_SD) <- openeddy::units(fsd)
res_mean <- res_sum <- cbind(agg_SD["Intervals"], res_SD)
# Compute sums as mean * nTot
nTot <- unname(sapply(resid_l, nrow))
res_sum[-1] <- as.data.frame(lapply(res_mean[-1], function(x) x * nTot))
cat("Unit conversion\n===============\n")
# conversion from quantum to radiometric units and to energy units
# - from umol+1s-1m-2 to MJ+1hh-1m-2 (hh = half-hour)
quant <- quant[quant %in% names(res_sum)]
res_sum[quant] <- as.data.frame(lapply(res_sum[quant], function(x)
x * tres * 1e-6 / 4.57)) # 4.57 Thimijan and Heins (1983)
energy_units <- "MJ m-2"
if (length(quant) > 0) {
cat("Quantum to energy (", openeddy::units(res_sum[quant])[1],
" -> ", trimws(paste(energy_units, agg_per)), "):\n\n",
paste(quant, collapse = ", "),
"\n-------------------------------------------------------\n", sep = "")
}
openeddy::units(res_sum[quant]) <- rep(energy_units, ncol(res_sum[quant]))
# conversion from power to energy units
# - from W m-2 to MJ+1hh-1m-2
power <- power[power %in% names(res_sum)]
res_sum[power] <- as.data.frame(lapply(res_sum[power], function(x)
x * tres * 1e-6))
if (length(power) > 0) {
cat("Power to energy (", openeddy::units(res_sum[power])[1],
" -> ", trimws(paste(energy_units, agg_per)), "):\n\n",
paste(power, collapse = ", "),
"\n-------------------------------------------------------\n", sep = "")
}
openeddy::units(res_sum[power]) <- rep(energy_units, ncol(res_sum[power]))
# conversion from mean CO2 concentration flux to integrated mass flux of C
# - from umol+1s-1m-2 to g(C)+1hh-1m-2
carbon <- carbon[carbon %in% names(res_sum)]
res_sum[carbon] <- as.data.frame(lapply(res_sum[carbon], function(x)
x * tres * 12e-6))
carbon_units <- "g(C) m-2"
if (length(carbon) > 0) {
cat("CO2 concentration to C mass flux (",
openeddy::units(res_sum[carbon])[1],
" -> ", trimws(paste(carbon_units, agg_per)), "):\n\n",
paste(carbon, collapse = ", "),
"\n-------------------------------------------------------\n", sep = "")
}
openeddy::units(res_sum[carbon]) <- rep(carbon_units, ncol(res_sum[carbon]))
if (sum(length(c(quant, power, carbon))) == 0)
cat("No variables available for conversion\n")
# Rename relevant NEE variables to NEP
NEE_cols <-
names(res_sum) %in% grep("NEE", names(res_sum[carbon]), value = TRUE)
names(res_sum)[NEE_cols] <- gsub("NEE", "NEP", names(res_sum)[NEE_cols])
names(res_mean)[-1] <- paste0(names(res_mean[-1]), "_mean")
names(res_sum)[-1] <- paste0(names(res_sum[-1]), "_sum")
if (!is.null(agg_per)) openeddy::units(res_sum)[-1] <-
trimws(paste(openeddy::units(res_sum)[-1], agg_per))
out <- list(mean = res_mean, sum = res_sum)
return(out)
}
#' @rdname agg_mean
agg_DT_SD <- function(x, format, agg_per = NULL, breaks = NULL,
carbon = grep("^Reco|^GPP", names(x), value = TRUE),
tz = "GMT") {
x_names <- names(x)
if (!is.data.frame(x) || is.null(x_names)) {
stop("'x' must be of class data.frame with colnames")
}
if (!"timestamp" %in% x_names) stop("missing 'x$timestamp'")
if (!inherits(x$timestamp, "POSIXt")) {
stop("'x$timestamp' must be of class 'POSIXt'")
}
if (any(is.na(x$timestamp))) stop("NAs in 'x$timestamp' not allowed")
if (any(diff(as.numeric(x$timestamp)) !=
mean(diff(as.numeric(x$timestamp))))) {
stop("x$timestamp does not form regular sequence")
}
tres <- max(diff(as.numeric(x$timestamp))) # Time resolution in secs
if (!is.null(breaks)) {
x$timestamp <- as.POSIXct(cut(x$timestamp, breaks = breaks), tz = tz)
}
x$timestamp <- strftime(x$timestamp, format = format, tz = tz)
x$timestamp <- factor(x$timestamp, levels = unique(x$timestamp))
orig_names <- grep("^NEE_.*_orig$", names(x), value = TRUE)
orig <- x[orig_names]
names(orig) <- gsub("^NEE_|_orig$", "", names(orig))
if (ncol(orig) == 0) return(NULL)
# Order following columns according to orig
DT_names <- grep("_DT_", names(x), value = TRUE)
Reco_names <- grep("Reco", DT_names, value = TRUE)
Reco_SD <- x[grep("_SD$", Reco_names, value = TRUE)]
Reco <- x[Reco_names[!Reco_names %in% names(Reco_SD)]]
names(Reco_SD) <- gsub("^Reco_DT_|_SD$", "", names(Reco_SD))
names(Reco) <- gsub("^Reco_DT_", "", names(Reco_SD))
# order Reco(_SD) columns
Reco_SD <- Reco_SD[order(match(names(Reco_SD), names(orig)))]
Reco <- Reco[order(match(names(Reco), names(orig)))]
GPP_names <- grep("GPP", DT_names, value = TRUE)
GPP_SD <- x[grep("_SD$", GPP_names, value = TRUE)]
GPP <- x[GPP_names[!GPP_names %in% names(GPP_SD)]]
names(GPP_SD) <- gsub("^GPP_DT_|_SD$", "", names(GPP_SD))
names(GPP) <- gsub("^GPP_DT_", "", names(GPP_SD))
# order GPP(_SD) columns
GPP_SD <- GPP_SD[order(match(names(GPP_SD), names(orig)))]
GPP <- GPP[order(match(names(GPP), names(orig)))]
if (!all(sapply(list(names(Reco_SD), names(GPP), names(GPP_SD)),
identical, names(Reco)))) {
stop("'x' columns '^GPP_DT_' and '^Reco_DT_' not fitting")
}
orig <- orig[names(Reco_SD)]
if (!identical(names(Reco_SD), names(orig))) stop(
"'x' columns '^NEE_.*_orig$' not fitting '^Reco_DT_.*_SD$' columns"
)
if (ncol(orig) == 0) return(NULL)
resid_DT <- orig - (Reco - GPP)
autoCorr <- lapply(resid_DT, lognorm::computeEffectiveAutoCorr)
resid_l <- split(resid_DT, x$timestamp)
l <- list()
for (i in seq_along(resid_l)) l[[i]] <-
mapply(lognorm::computeEffectiveNumObs, res = resid_l[[i]],
effAcf = autoCorr, MoreArgs = list(na.rm = TRUE))
nEff_DT <- as.data.frame(do.call(rbind, l))
# SD considered only for measured NEE records (fqc == 0)
fqc_names <- grep("^NEE_.*_fqc$", names(x), value = TRUE)
# order fqc_names
fqc_names <- fqc_names[order(match(
gsub("^NEE_|_fqc", "", fqc_names), names(orig)))]
if (!identical(gsub("^NEE_|_fqc", "", fqc_names), names(Reco_SD))) stop(
"'x' columns '^NEE_.*_fqc$' not fitting '^Reco_DT_.*_SD$' columns"
)
# fqc == 0: measured data; fqc != 0 gap-filled (should be excluded)
for (i in seq_along(Reco_SD)) Reco_SD[which(x[, fqc_names[i]] != 0), i] <- NA
for (i in seq_along(GPP_SD)) GPP_SD[which(x[, fqc_names[i]] != 0), i] <- NA
agg_Reco_SD <-
aggregate(Reco_SD, by = list(Intervals = x$timestamp), function(x)
if (all(is.na(x))) NA_real_ else mean(x^2, na.rm = TRUE), drop = FALSE)
openeddy::varnames(agg_Reco_SD["Intervals"]) <- "Intervals"
openeddy::units(agg_Reco_SD["Intervals"]) <- "-"
res_Reco_SD <- as.data.frame(mapply(function(SD, nEff)
sqrt(SD / ifelse(nEff <= 1, NA_real_, nEff - 1)),
SD = agg_Reco_SD[-1], nEff = nEff_DT, SIMPLIFY = FALSE))
names(res_Reco_SD) <- paste0("Reco_DT_", names(Reco_SD), "_SD")
openeddy::varnames(res_Reco_SD) <- names(res_Reco_SD)
openeddy::units(res_Reco_SD) <- openeddy::units(Reco_SD)
agg_GPP_SD <-
aggregate(GPP_SD, by = list(Intervals = x$timestamp), function(x)
if (all(is.na(x))) NA_real_ else mean(x^2, na.rm = TRUE), drop = FALSE)
res_GPP_SD <- as.data.frame(mapply(function(SD, nEff)
sqrt(SD / ifelse(nEff <= 1, NA_real_, nEff - 1)),
SD = agg_GPP_SD[-1], nEff = nEff_DT, SIMPLIFY = FALSE))
names(res_GPP_SD) <- paste0("GPP_DT_", names(GPP_SD), "_SD")
openeddy::varnames(res_GPP_SD) <- names(res_GPP_SD)
openeddy::units(res_GPP_SD) <- openeddy::units(GPP_SD)
res_mean <- res_sum <- cbind(agg_GPP_SD["Intervals"], res_Reco_SD, res_GPP_SD)
# Compute sums as mean * nTot
nTot <- unname(sapply(resid_l, nrow))
res_sum[-1] <- as.data.frame(lapply(res_mean[-1], function(x) x * nTot))
cat("Unit conversion\n===============\n")
# conversion from mean CO2 concentration flux to integrated mass flux of C
# - from umol+1s-1m-2 to g(C)+1hh-1m-2
carbon <- carbon[carbon %in% names(res_sum)]
res_sum[carbon] <- as.data.frame(lapply(res_sum[carbon], function(x)
x * tres * 12e-6))
carbon_units <- "g(C) m-2"
if (length(carbon) > 0) {
cat("CO2 concentration to C mass flux (",
openeddy::units(res_sum[carbon])[1],
" -> ", trimws(paste(carbon_units, agg_per)), "):\n\n",
paste(carbon, collapse = ", "),
"\n-------------------------------------------------------\n", sep = "")
}
openeddy::units(res_sum[carbon]) <- rep(carbon_units, ncol(res_sum[carbon]))
if (length(carbon) == 0)
cat("No variables available for conversion\n")
names(res_mean)[-1] <- paste0(names(res_mean[-1]), "_mean")
names(res_sum)[-1] <- paste0(names(res_sum[-1]), "_sum")
if (!is.null(agg_per)) openeddy::units(res_sum)[-1] <-
trimws(paste(openeddy::units(res_sum)[-1], agg_per))
out <- list(mean = res_mean, sum = res_sum)
return(out)
}
|
3fb89be2f6926bca9cfc1c356059c7eec247db9f | 13756dddacf2e8c3c3355ad32adcf94700576a8e | /R/timeStack.R | 0880cc06b269ba600e82d90771296f92e3b014ab | [
"Apache-2.0"
] | permissive | mengluchu/bfastspatial2 | 6e96b2b2af96448626b03cdffc584e28aa95936e | 1254e661e33404d8b2ad3e5d830f864204b33a4f | refs/heads/master | 2016-09-06T00:06:39.757295 | 2014-12-10T09:51:48 | 2014-12-10T09:51:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,433 | r | timeStack.R |
#' @title Creates a time stack of Landsat layers
#'
#' @description Creates a stack of Landsat layers, reordering them chronologically
#'
#'
#'
#' @param x character. dir containing the files to be stacked or character list (the files). IN the former case, it is recommended to use the \code{pattern} argument
#' @param pattern See \link{list.files}
#' @param orderChrono Logical. Should layers in the output object be orderred chronologically. If set to FALSE, layer order will be alphabetical.
#' @param ... Arguments to be passed to \link{writeRaster}. If specifying a filename, it is strongly recommended to also set a datatype.
#' @author Loic Dutrieux
#' @examples
#' # 1 - Produce individual VI layers (using processLandsatBatch())
#' # Get the directory where the Landsat archives are stored
#' dir <- system.file('external', package='bfastSpatial')
#'
#' # Set the location of output and intermediary directories (everything in tmpdir in that case)
#' srdir <- dirout <- file.path(rasterOptions()$tmpdir, 'bfmspatial')
#' dir.create(dirout, showWarning=FALSE)
#' processLandsatBatch(x=dir, pattern=glob2rx('*.zip'), outdir=dirout, srdir=srdir, delete=TRUE, vi='ndvi', mask='fmask', keep=0, overwrite=TRUE)
#'
#' # Visualize one of the layers produced
#' list <- list.files(dirout, pattern=glob2rx('*.grd'), full.names=TRUE)
#'
#'
#' # Stack the layers
#' stackName <- file.path(dirout, 'stack/stackTest.grd')
#' dir.create(file.path(dirout, 'stack'))
#' s <- timeStack(x=dirout, pattern=glob2rx('*.grd'), filename=stackName, datatype='INT2S')
#'
#' plot(s)
#'
#'
#'
#'
#'
#' @import stringr
#' @import raster
#' @export
#'
timeStack <- function(x, pattern=NULL, orderChrono = TRUE, ...) {
if(!is.character(x)){
stop('x must be a character (directory) or a list of characters')
}
if (length(x) == 1){
x <- list.files(x, pattern=pattern, full.names=TRUE)
}
orderChronoFun <- function(list) {
list2 <- list[order(substr(str_extract(string=basename(list), '(LT4|LT5|LE7|LC8)\\d{13}'), 4, 16))]
return(list2)
}
if(orderChrono){
x <- orderChronoFun(x)
}
s <- stack(x)
time <- getSceneinfo(str_extract(string=basename(x), '(LT4|LT5|LE7|LC8)\\d{13}'))$date
s <- setZ(x=s, z=time)
if(hasArg(filename)) {
out <- writeRaster(s, ...)
return(out)
}
return(s)
}
|
14d62e19c1a7cd1137393894f8a506d79a9c2be4 | e797f4d4383c5db9c78372e264ba2e66cd8e5859 | /webtext2.R | 93f2c6f6426781e11992c4c483b4a3c9237de272 | [] | no_license | vmsmith/WebTextAnalysis | f782e8f2fbc90a8b22f570ac0cb4939545ed8c55 | 7dd5bee32324e240aed041482ba4ef10beea1c27 | refs/heads/master | 2020-12-02T23:56:45.753783 | 2017-07-11T14:49:58 | 2017-07-11T14:49:58 | 95,964,727 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 897 | r | webtext2.R | # Set up the environment --------------------------------------------------
library(rvest)
library(magrittr)
library(selectr)
# Read in the page --------------------------------------------------------
webtext2 <- read_html("1_Data/webtext2.html")
webtext2
# Select by "id" ----------------------------------------------------
wt2nodes <- webtext2 %>% html_nodes("#music_table")
wt2nodes[[1]]
wt2nodes <- webtext2 %>% html_nodes("#music_table tr")
wt2nodes
wt2nodes[[1]]
wt2nodes <- webtext2 %>% html_nodes("#music_table tr")
wt2nodes
wt2nodes[[2]]
wt2nodes <- webtext2 %>% html_nodes("#music_table .american td")
wt2nodes
html_text(wt2nodes[c(1:5)])
html_text(wt2nodes[c(1, 6)])
# Exercise ----------------------------------------------------------------
# Extract all the titles from the book table
# Extract all the information about "Everyday Italian |
61a0f61c16b703d16da20b0d0406488b9730d45f | 01b73f895eaf0fdbe48fdc9d79d20989cbd4e78d | /112. Data Visualization with ggplot2 (Part 1)/New Text Document.R | b27fd65f954704f2ff4d02a3d712af8ee5a1127d | [] | no_license | mohammadshadan/DC-R | 77070d9dc10c5d6a052ae19e915652886e877773 | 05569ac7a7d616820944098b394139255e6a87e8 | refs/heads/master | 2021-10-01T19:12:41.525055 | 2018-11-28T19:15:57 | 2018-11-28T19:15:57 | 108,579,271 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 464 | r | New Text Document.R | d <- read.delim("clipboard")
head(d)
i <- read.delim("clipboard")
head(i)
i$diff <- i$Sunrise - i$Fajr
str(i)
i$Sunrise <- as.numeric(i$Sunrise)
i$Fajr <- as.numeric(i$Fajr)
i$diff
white <- read.csv("http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv", sep=';')
head(white)
red <- read.csv("http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv", sep=';')
head(red)
|
fda2fc8da843464b4eb8a038facc8dbbcca29ce7 | 42686fcc79e9fe3af36a942e293551707effa548 | /CampMyTrip.R | 111302afb92a3346a3894fa5f8f63c42c7f900dc | [] | no_license | jescoyle/CampMyTrip | 8934132fdef022691fd828ffdf944e1442fdfdce | 256474184260ad0e5e331ec893259c3ac7a7f1b5 | refs/heads/master | 2021-01-10T09:05:41.342603 | 2016-02-01T02:28:24 | 2016-02-01T02:28:24 | 50,749,841 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,512 | r | CampMyTrip.R | ### This script visulaizes campground data from Reserve America and finds a preliminary itinerary for a West Coast road trip
# Load packages
library(XML) # for queries to campsites API
library(jsonlite) # for queries to Google Distance API
library(rgdal) # for spatial data
library(sp) # for spatial data
library(maps) # for US map
library(maptools) # for manipulating map
library(rgeos) # for buffer
library(ggmaps)
# Define directories
working_dir = 'C:/Users/jrcoyle/Documents/SF Jobs/Data Incubator/CampMySite/'
# Load campground data
camps = read.csv('campgrounds_US.csv')
# Define variables
camps_key = ''
camps_api_loc = 'http://api.amp.active.com/camping/campgrounds?'
sites_api_loc = 'http://api.amp.active.com/camping/campsites?'
goog_api_loc = 'https://maps.googleapis.com/maps/api/distancematrix/json?'
goog_dir_api_loc = 'https://maps.googleapis.com/maps/api/directions/json?'
goog_api_key = ''
my_proj = '+proj=aeqd +lat_0=40 +lon_0=-97 +units=km'
### Visualize campground locations ###
# Some campgrounds are missing coordinates
# Exclude for now. TO DO: USE GOOGLE LOOKUP TO GET COORDS
camps = subset(camps, !is.na(longitude))
# Some campgrounds have errors in their coordindates - fix them
subset(camps, longitude < -180)
camps[camps$facilityID==110452,'longitude'] = -121.86 # typo
subset(camps, longitude > 0)
camps[camps$longitude >0, 'longitude'] = -camps[camps$longitude >0, 'longitude'] # wrong sign
subset(camps, longitude > -66) # typos: TO DO: USE GOOGLE LOOKUP TO RESOLVE, EXCLUDE FOR NOW
# Convert camps to spatial data
camps_sp = camps
coordinates(camps_sp) = c('longitude','latitude')
proj4string(camps_sp) = CRS('+proj=longlat')
plot(camps_sp)
camps_eqd = spTransform(camps_sp, CRS(my_proj))
# Some campgrounds clearly in wrong place.
# For now, exclude using polygon of USA
usa = map('world', 'usa', fill=T)
usa_sp = map2SpatialPolygons(usa, usa$names, CRS('+proj=longlat'))
usa_sp = spTransform(usa_sp, CRS(my_proj))
usa_buffer = gBuffer(usa_sp, byid=F, width=20) # Use a buffer 20km wider than outline
inUS = over(camps_eqd, usa_buffer) # Check which camps in this polygon
camps_eqd = subset(camps_sp, !is.na(inUS))
camps_sp = subset(camps_sp, !is.na(inUS))
camps = subset(camps, !is.na(inUS))
plot(camps_sp)
# Load map of interstates: from http://www.nws.noaa.gov/geodata/catalog/transportation/html/interst.htm
roads = readOGR('interstates','in101503')
proj4string(roads) = CRS('+proj=longlat')
roads_eqd = spTransform(roads, CRS(my_proj))
## Calculate number of campgrounds within 25km of each interstate
# A function that finds all camps within a certain distance (rad) of a point (pt)
# rad : distance to search (in km)
# pt : c(lon, lat)
# dat : spatial data in lon-lat with camps
find_camps = function(pt, rad, dat){
dists = spDistsN1(coordinates(dat), pt, longlat=T)
dat[dists<rad,]
}
# Go through each line segment and calculate the number of camps within 25 km of each point that defines the segment
nlines = length(roads@lines)
ncamps = sapply(1:nlines, function(i){
this_line = roads@lines[[i]]
# Find midpoints of line segments
these_points = coordinates(this_line)[[1]]
npoints = nrow(these_points)
if(npoints>1){
mid_points = these_points[1:(npoints-1),] + (these_points[2:npoints,] - these_points[1:(npoints-1),])/2
mid_points = matrix(mid_points, ncol=2)
} else {
mid_points = these_points
}
# Search for camps within 25 km of midpoints
found_camps = apply(mid_points, 1, function(pt) find_camps(pt, 25, camps_sp))
sapply(found_camps, nrow)
})
# Create spatial lines data frame for plotting
line_list = sapply(1:nlines, function(i){
this_line = roads@lines[[i]]
these_points = coordinates(this_line)[[1]]
linelist = sapply(1:(nrow(these_points)-1), function(j) Lines(Line(these_points[j:(j+1),]), paste(i,j,sep='_')))
})
sl = SpatialLines(unlist(line_list, recursive=F), CRS('+proj=longlat'))
df = data.frame(ncamps=unlist(ncamps))
sldf = SpatialLinesDataFrame(sl, df, match.ID=F )
sldf_eqd = spTransform(sldf, CRS(my_proj))
## Create map
library(lattice)
# Read in better outline of North America
nam = readOGR('C:/Users/jrcoyle/Documents/UNC/GIS shape files/N Am Outline','na_base_Lambert_Azimuthal')
nam_eqd = spTransform(nam, CRS(my_proj))
# Define colors and cuts
mycol = read.csv('C:/Users/jrcoyle/Documents/UNC/Projects/blue2red_10colramp.txt')
mycol = apply(mycol,1,function(x) rgb(x[1],x[2],x[3],maxColorValue=256))
mycol = mycol[10:1]
use_col = c('#000000', colorRampPalette(c(mycol))(5))
use_col = c('#000000','#640603','#820D07','#B93712','#D7531A','#F37323')
use_col = c('#000000', colorRampPalette(c('darkred','orange'))(5))
colcuts = c(0,1,2, 5, 10, 20, max(df$ncamps))
sldf_eqd$ncamps_fact = cut(sldf_eqd$ncamps, colcuts, include.lowest=T, right=F)
# Define plot window and clip data
bbox_ll = SpatialPoints(matrix(c(-125, -65, 22, 48), nrow=2), CRS('+proj=longlat'))
bbox_eqd = spTransform(bbox_ll, CRS(my_proj))
pdf('campsites_25km.pdf', height=6, width=9)
trellis.par.set(axis.line=list(col=NA))
spplot(sldf_eqd, 'ncamps_fact', panel=function(x,y,subscripts,...){
sp.polygons(nam_eqd, fill='grey80', col='white', lwd=1)
sp.points(camps_eqd, pch=2, col='grey30', cex=.5)
panel.polygonsplot(x,y,subscripts,...)
}, lwd=3, col.regions=use_col,
colorkey=list(labels=list(labels=c('0','1','2-4','5-9','10-19','20-34'), cex=1), height=.3),
xlim=coordinates(bbox_eqd)[,1], ylim=coordinates(bbox_eqd)[,2]
)
dev.off()
### Find campgrounds between two locations and summarize their desirability ###
# Define location from which to search
my_loc = c(-122.508545,37.762216)
dest_loc = c(-123.264885,44.578915)
stop_loc = c(-122.581029,41.847379)
# Find distance between my_loc and destination without stopover
dist_query = paste0(goog_api_loc, 'origins=', my_loc[2], ',', my_loc[1],
'&destinations=', dest_loc[2], ',', dest_loc[1],
'&key=', goog_api_key)
dists = fromJSON(dist_query)
straight_dist = dists$rows$elements[[1]][1]$distance$value
straight_time = dists$rows$elements[[1]][2]$duration$value
# Define maximum search distance
maxD = 100
# Find potential campgrounds within max distance
cg = find_camps(stop_loc, maxD, camps_sp)
results = data.frame()
# For each campground, calculate distances between campground, origin and destination
for(i in 1:nrow(cg)){
cg_loc = coordinates(cg)[i,]
dist_query = paste0(goog_api_loc, 'origins=', my_loc[2], ',', my_loc[1], '|',
dest_loc[2], ',', dest_loc[1], '&destinations=', cg_loc[2], ',', cg_loc[1],
'&key=', goog_api_key)
dists = fromJSON(dist_query)
DandT = t(sapply(1:2, function(i){
c(dists$rows$elements[[i]][1]$distance$value, dists$rows$elements[[i]][2]$duration$value)
}))
new_row = data.frame(distTo=DandT[1,1], timeTo=DandT[1,2], distFrom=DandT[2,1], timeFrom=DandT[2,2])
results = rbind(results, new_row)
}
cg = cbind(cg, results)
# Calculate time and distance added to trip by staying at campground
cg$distAdd = cg$distTo + cg$distFrom - straight_dist
cg$timeAdd = cg$timeTo + cg$timeFrom - straight_time
## Plot campgrounds on map colored by time and distance added to travel
colorby = factor(cg$contractType)
use_col = c('cornflowerblue','orange')
pdf('campgrounds_homeToinlaws.pdf', height=4, width=4)
par(mar=c(4,4,1,1))
plot(I(timeAdd/60) ~ I(distTo/1000), data=cg, pch=21, bg=use_col[colorby],
las=1, xlab='Distance from home (km)', ylab='Time added to travel (min)')
legend('topleft', levels(colorby), pch=16, col=use_col, bty='n')
legend('topleft', c('',''), pch=1, bty='n')
dev.off()
|
e8e059d1812ac59ba12aea0dc941986dd2befcd5 | ffe6df4e4c8f95e942601b2e0904d0a1b2d48d60 | /R/utils-tables.R | aa5a0d03d5be2bfc726576435fc0caba6797527c | [
"MIT"
] | permissive | hamilton-institute/hamiltonCovid19 | 55eea1643c1fc527a84181769d21f707bf559d19 | 1629ff8a525bfc9661efe56c1c23858f424e55b5 | refs/heads/master | 2023-07-07T12:42:06.557058 | 2021-08-05T19:28:48 | 2021-08-05T19:28:48 | 288,237,889 | 2 | 0 | NOASSERTION | 2020-11-20T21:28:16 | 2020-08-17T17:02:27 | R | UTF-8 | R | false | false | 674 | r | utils-tables.R | summaryTab_table <- function(tab) {
col_spec <- purrr::map2(
c("left", "left", "center"),
c(75, 78, 75),
~ reactable::colDef(align = .x, minWidth = .y, maxWidth = 200)
)
col_spec <- purrr::set_names(col_spec, names(tab))
reactable::reactable(
tab,
defaultPageSize = 10,
searchable = TRUE,
pagination = TRUE,
rownames = TRUE,
highlight = TRUE,
height = 580,
paginationType = "simple",
showPageInfo = FALSE,
defaultColDef = reactable::colDef(
align = "left",
format = reactable::colFormat(separators = TRUE),
maxWidth = 40,
style = "font-size: 11px;"
),
columns = col_spec
)
}
|
e4811b69a8c072701e3d965928fba241b7286632 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/RSEIS/examples/PLOT.MATN.Rd.R | 9ab01e9cdf8021a54b0a89b601d8509cce9ab8bf | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 456 | r | PLOT.MATN.Rd.R | library(RSEIS)
### Name: PLOT.MATN
### Title: plot a matrix of several seismograms
### Aliases: PLOT.MATN
### Keywords: hplot
### ** Examples
dt <- 0.001
t <- seq(0, 6, by=0.001)
thefreqs <- seq(from=10, to=100, by=10)
theamps <- runif(length(thefreqs))
# sample rate is 1000 Hz, 0.001 seconds 601 samples
x <- NULL
for(i in 1:length(thefreqs))
{
x <- cbind(x, theamps[i]*sin(2*pi*thefreqs[i]*t))
}
PLOT.MATN(x, dt = dt)
|
ee34f57aa21d981fa5b6e4cdcb14aed73798e4b9 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/fmri/examples/plot.fmridata.Rd.R | 395131f261aab09c0d910dfda2d2a37ca642cdca | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 181 | r | plot.fmridata.Rd.R | library(fmri)
### Name: plot.fmridata
### Title: I/O functions
### Aliases: plot.fmridata plot.fmrisegment
### Keywords: hplot iplot
### ** Examples
## Not run: plot(pvalue)
|
360cbd34b6de7c296e30a0855049f47b97de249c | 9c9263977da97b2a97555910c01d6774c2beee9b | /server.R | 7582ccef50a64ca1f99937195f3f9d7351520bd9 | [] | no_license | laderast/shinyTutorial | 9c6cc3f9c994dd323d0967d5f4c4c0c1838471a6 | 4a571b3f5932ecd0247cc053183affca8a9c4c37 | refs/heads/master | 2021-01-01T04:35:01.683965 | 2016-05-24T18:19:28 | 2016-05-24T18:19:28 | 59,597,417 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 687 | r | server.R |
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(ggplot2)
library(dplyr)
##paste data loading code here
##can also paste it into global.R if you want
##ui.R to also be able to access it (can be useful for column names
##in select boxes, etc.)
shinyServer(function(input, output) {
#laceData is a "reactive" version of laceScores that we can filter
#ignore this for tasks 0-2
laceData <- reactive({
out <- laceScores %>%
filter(L <= 10)
return(out)
})
output$lacePlot <- renderPlot({
#paste plotting code here
})
})
|
56561065c5396a95616b67612dea3d36bbdc535e | 61ef69ad3a6cf4deb8103a39c07e83f2916ee245 | /text_analysis.R | dc46b300a4409a7bec05eaf7dd22320abab54e4f | [] | no_license | paulvanderlaken/2018_WIDM | 4c5c8628c42652e23415f3854cf098834bba4457 | 26a3676b5d50388492fb1703d2ceab18dd2c5ac1 | refs/heads/master | 2021-05-14T01:52:43.090685 | 2018-01-07T15:37:05 | 2018-01-07T15:37:05 | 116,577,637 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,427 | r | text_analysis.R | ### 2018/06/01
### WIE IS DE MOL
### TWITTER ANALYSIS
### PART 3: TEXT ANALYSIS
### PAULVANDERLAKEN.COM
### SETUP
pkg <- c("here", "tidyverse", "tidytext", "tm", "patchwork")
sapply(pkg, function(x){
if(!x %in% installed.packages()) install.packages(x)
library(x, character.only = TRUE)
return(x)
})
source(here("standard_file.R"))
censor = " "
tweets %>%
group_by(screen_name) %>%
mutate(user_tweets = n()) %>%
ungroup() %>%
mutate(text_clean = str_replace_all(text, "https://t.co/[:alnum:]+|http://[:alnum:]+", censor) %>% # links
str_replace_all("(:)([:punct:]|[A-z])+", censor) %>% # smileys
str_replace_all("(&|#)", censor) %>% # hashtags
str_replace_all("(@|@)", censor) %>% # mentions
str_replace_all("[^[:ascii:]]", censor) %>% # unicode stuff
str_replace_all("[^[:alnum:]-\\s]", censor) %>% # punctuation except -
str_replace_all("\\s\\s", censor)
) ->
tweets
tweets %>%
unnest_tokens(word, text_clean, drop = FALSE, token = "regex", pattern = "\\s") ->
tokens
top_size = 30
tokens %>%
group_by(word) %>%
count() %>%
ungroup() %>%
arrange(desc(n)) %>%
anti_join(y = tibble(word = stopwords("nl")), by = "word") %>%
filter(nchar(word) > 2) %>%
top_n(top_size) %>%
filter(!word %in% c("widm", "widm2018", "wieisdemol")) %>%
ggplot(aes(x = reorder(word, n), y = n)) +
annotation_custom(rast) +
geom_bar(aes(alpha = n, fill = factor(word %in% kan)), stat = "identity") +
coord_flip() +
theme_light() +
scale_fill_manual(values = c("grey", "green")) +
scale_alpha_continuous(range = c(0.6, 1)) +
theme(legend.position = "none") +
labs(y = NULL, x = NULL) ->
plot_words
plot_words +
labs(title = paste("Top", top_size, "meest gebruikte woorden"),
subtitle = subtitle_data_date,
y = "Aantal tweets", x = NULL,
caption = "paulvanderlaken.com")
save_ggplot("woorden_gebruikt", width = 3.5, height = 7)
tokens %>%
mutate(split = ifelse(created_at < afl$afl1$start, 1, 2)) %>%
group_by(split, word) %>%
count() %>%
group_by(split) %>%
arrange(desc(n)) %>%
anti_join(y = tibble(word = stopwords("nl")), by = "word") %>%
filter(!word %in% c("widm", "widm2018", "wieisdemol")) %>%
filter(nchar(word) > 2) %>%
top_n(top_size, n) %>%
ungroup() ->
tokens_split
tokens_split %>%
mutate(split = factor(split,
labels = c("Voorafgaand aan aflevering 1",
"Tijdens/na aflevering 1"),
ordered = TRUE)) %>%
ggplot(aes(x = reorder_within(word, n, split), y = n)) +
annotation_custom(rast) +
geom_bar(aes(alpha = n, fill = factor(word %in% kan)), stat = "identity") +
coord_flip() +
theme_light() +
scale_x_reordered() +
scale_fill_manual(values = c("grey", "green")) +
scale_alpha_continuous(range = c(0.6, 1)) +
facet_wrap(~ split, scales = "free_y") +
theme(legend.position = "none",
strip.background = element_rect(fill = "white"),
strip.text = element_text(colour = "black", size = 11)) +
labs(x = NULL, y = NULL) ->
plot_words_split
plot_words_split +
labs(title = paste("Top", top_size, "meest gebruikte woorden"),
subtitle = subtitle_data_date,
y = "Aantal tweets", x = NULL,
caption = "paulvanderlaken.com")
save_ggplot("woorden_gebruikt_split_afl1", width = 6, height = 7)
|
842925a886f98db6a7029c7f7d37a689e9d031b6 | 865cecdd703505d582b91242f9c37c0bed740095 | /R/ensemblgtf.R | 9172571945116a28d88b7fc0a7ae788dd76fac01 | [] | no_license | csiu/CEMTscripts | 4fb78442e540e2ff6e95b734b8f96043e3509e74 | 74baea3a0a59f4e21405c96a91d65d7a36764048 | refs/heads/master | 2021-08-16T16:44:42.236037 | 2017-11-20T05:19:39 | 2017-11-20T05:19:39 | 71,932,230 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 911 | r | ensemblgtf.R | #' Protein coding transcripts
#'
#' List of protein coding transcripts from Homo sapiens
#' on chromosomes 1-22, X and Y. Data is
#' obtained from Ensembl gene set annotations GR37.75
#' @format
#' An object of class GRanges of length 81732.
#' This number refers to the number of
#' protein_coding transcripts on chromosome 1-22,X,Y.
#' These transcripts also map to 20154 ensembl gene ids.
#' @source Downloaded from \url{http://www.ensembl.org/info/data/ftp/index.html}
#' @examples
# ensembl_gtf <- "Homo_sapiens.GRCh37.75.gtf"
#' ## Object produce by importing the data & applying filters
#' ensemblgtf <- rtracklayer::import(ensembl_gtf)
#' ensemblgtf <-
#' ensemblgtf[ensemblgtf$source=="protein_coding" &
#' ensemblgtf$type=="transcript"]
#' ensemblgtf <-
#' keepSeqlevels(ensemblgtf, value = c(1:22,"X","Y"))
#'
#' save(ensemblgtf, file="data/ensemblgtf.RData")
"ensemblgtf"
|
c4c38d293e2a71c0cf8ce5f59299b154ea0ce200 | a391230c74fe8abb2ce577ed76d07bef2c87ecb8 | /man/dataKNMI.Rd | df2662addd371d633fa403574fea38b5dec3f34c | [] | no_license | cran/tailDepFun | 9f490074ce7a5f6beba9ab9748d96cdf1d3e04f0 | d9625211ef0f628d7352578a1582a03db6570708 | refs/heads/master | 2021-06-09T15:05:33.434450 | 2021-06-03T10:00:02 | 2021-06-03T10:00:02 | 54,787,454 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,184 | rd | dataKNMI.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Other.R
\name{dataKNMI}
\alias{dataKNMI}
\title{Wind speeds in the Netherlands.}
\format{
dataKNMI$data is a matrix with 672 rows and 22 columns, dataKNMI$loc is a matrix with 22 rows
and 2 columns.
}
\source{
KNMI
}
\description{
Daily maximal speeds of wind gusts, measured in 0.1 m/s. The data are observed at
22 inland weather stations in the Netherlands. Only the summer months are presented
here (June, July, August). Also included are the Euclidian coordinates of the 22
weather stations, where a distance of 1 corresponds to 100 kilometers.
}
\examples{
data(dataKNMI)
n <- nrow(dataKNMI$data)
locations <- dataKNMI$loc
x <- apply(dataKNMI$data, 2, function(i) n/(n + 0.5 - rank(i)))
indices <- selectGrid(cst = c(0,1), d = 22, locations = locations, maxDistance = 0.5)
EstimationBR(x, locations, indices, k = 60, method = "Mestimator", isotropic = TRUE,
covMat = FALSE)$theta
}
\references{
Einmahl, J.H.J., Kiriliouk, A., Krajina, A., and Segers, J. (2016). An Mestimator of spatial tail dependence. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 78(1), 275-298.
}
|
2adeffd90bd5860d86c620e713f658ea71e41727 | 4a6f88bb4547c0e3258358fda6fe9d6a43c75271 | /plot4.R | 47705729c745d27f55da691a67e569656612874a | [] | no_license | mausha/ExData_Plotting1 | c207770c821e94aa5cff47c0c0079b3a523a92a4 | 2ef4a05006b8ac009ce26a76a78c204af586297f | refs/heads/master | 2021-01-05T09:55:14.123903 | 2020-02-17T04:31:25 | 2020-02-17T04:51:47 | 240,983,073 | 0 | 0 | null | 2020-02-16T23:29:28 | 2020-02-16T23:29:27 | null | UTF-8 | R | false | false | 6,579 | r | plot4.R | ###############################################################################
# This is an R script that creates the fourth plot (plot4.png)
# required for the Coursera Exploratory Data Analysis course,
# week-1 assignment:
# A 2x2 matrix containing 4 plots in 2 rows and 2 columns:
# - Top Left: (plot2 except no units on Y label)
# - Bottom Left: (plot3)
# - Top Right: a line graph with Date + Time on x-axis and Voltage on y-axis
# - Bottom Right: a line graph with Date + Time on x-axis and
# Global_reactive_power on y-axis
###############################################################################
###############################################################################
# Load all required libraries.
library(dplyr)
library(lubridate)
###############################################################################
# 0) Load all of the required data from the data set.
# download files
if (!file.exists("exdata_data_household_power_consumption.zip")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile = "exdata_data_household_power_consumption.zip")
unzip("exdata_data_household_power_consumption.zip")
}
###############################################################################
# Loading the data:
# When loading the dataset into R, please consider the following:
# - The dataset has 2,075,259 rows and 9 columns.
# - We will only be using data from the dates 2007-02-01 and 2007-02-02.
# - You may find it useful to convert the Date and Time variables to Date/Time
# classes in R using the strptime() and as.Date() functions.
# - Note that in this dataset missing values are coded as ?.
power_data <- as_tibble(read.csv("household_power_consumption.txt", header=TRUE,
skip=0, stringsAsFactors = FALSE, sep = ";"))
# Column names:
# Date;Time;Global_active_power;Global_reactive_power;Voltage;Global_intensity;
# Sub_metering_1;Sub_metering_2;Sub_metering_3
# Convert the values in the Date column from character & filter to 2 required days.
power_data <- mutate(power_data, Date = dmy(Date))
power_data <- filter(power_data,
Date == ymd("2007-02-01") | Date == ymd("2007-02-02"))
# Convert the remaining column values from character.
power_data <- mutate(power_data,
Time = hms(Time),
Global_active_power = as.numeric(Global_active_power),
Global_reactive_power = as.numeric(Global_reactive_power),
Voltage = as.numeric(Voltage),
Global_intensity = as.numeric(Global_intensity),
Sub_metering_1 = as.numeric(Sub_metering_1),
Sub_metering_2 = as.numeric(Sub_metering_2),
Sub_metering_3 = as.numeric(Sub_metering_3))
# Filter out missing data.
power_data <- filter(power_data, complete.cases(power_data))
###############################################################################
# Making Plots:
# Our overall goal here is simply to examine how household energy
# usage varies over a 2-day period in February, 2007. Your task is to
# reconstruct the following plots below, all of which were constructed using the
# base plotting system.
#
# For each plot you should:
# - Construct the plot and save it to a PNG file with a width of 480 pixels and
# a height of 480 pixels.
# - Name each of the plot files as plot1.png, plot2.png, etc.
###############################################################################
###############################################################################
###############################################################################
# Construct plot4:
# A 2x2 matrix containing 4 plots in 2 rows and 2 columns:
# - Top Left: (plot2 except no units on Y label)
# - Bottom Left: (plot3)
# - Top Right: a line graph with Date + Time on x-axis and Voltage on y-axis
# - Bottom Right: a line graph with Date + Time on x-axis and
# Global_reactive_power on y-axis
# Open the png file graphic device with the required size.
png(filename = "plot4.png", width = 480, height = 480, units = "px")
# Setup to plot a 2x2 matrix of plots, filling in by column
par(mfcol = c(2, 2))
###############################################################################
# Top Left Plot: copy of plot2 code except no units on Y label
plot(power_data$Date + power_data$Time,
power_data$Global_active_power,
type = "l",
xlab = "",
ylab = "Global Active Power",
main = "")
###############################################################################
# Bottom Left Plot: copy of plot3 code
# Construct the initial plot with Sub_metering_1 line graph
plot(power_data$Date + power_data$Time,
power_data$Sub_metering_1,
type = "l",
xlab = "",
ylab = "Energy sub metering",
main = "")
# Add Sub_metering_2 line graph
lines(power_data$Date + power_data$Time,
power_data$Sub_metering_2, col = "red")
# Add Sub_metering_3 line graph
lines(power_data$Date + power_data$Time,
power_data$Sub_metering_3, col = "blue")
# Add a legend at the top right corner of the plot
legend("topright",
lty = 1,
col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
###############################################################################
# Top Right Plot:
# - a line graph with Date + Time on x-axis and Voltage on y-axis
# - X label: datetime
# - Y label: (black) Voltage
# - Title: none
plot(power_data$Date + power_data$Time,
power_data$Voltage,
type = "l",
xlab = "datetime",
ylab = "Voltage",
main = "")
###############################################################################
# Bottom Right Plot:
# - a line graph with Date + Time on x-axis and Voltage on y-axis
# - X label: datetime
# - Y label: (black) Global_reactive_power
# - Title: none
plot(power_data$Date + power_data$Time,
power_data$Global_reactive_power,
type = "l",
xlab = "datetime",
ylab = "Global_reactive_power",
main = "")
# Close the png graphic device.
dev.off()
###############################################################################
############################# End of file. #######################
############################################################################### |
b85a6d07dfa57fd10d20288fc76aa66815a51c2d | 3688bf629f77329a46522a91b975ef401ed6f278 | /01-06_reading-xlsx-files.R | 7eeade477d0aafcca406c072e0c7f821adf89a35 | [] | no_license | mareq/coursera_data-science_03-getting-and-cleaning-data_lectures | 922f15ba9e940bccc89b4782d86d49d8a40fd4a2 | d32789b5c2985cdd6767c0b1741614942779ff63 | refs/heads/master | 2016-09-06T06:32:31.778377 | 2015-02-21T00:17:17 | 2015-02-21T00:17:17 | 31,090,284 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 412 | r | 01-06_reading-xlsx-files.R | datadir <- "data"
setwd(datadir)
library(xlsx)
cameraData <- read.xlsx("cameras.xlsx", sheetIndex=1, header=TRUE)
head(cameraData)
colIndex <- 2:3
rowIndex <- 1:4
cameraDataSubset <- read.xlsx("cameras.xlsx", sheetIndex=1, colIndex=colIndex, rowIndex=rowIndex)
print(cameraDataSubset)
#write.xlsx
#write.xlsx2 # this one's faster
#XLConnect package has more options for writing and manipulating Excel files
|
c78ba12b3f2c07e2cd82886f711ad1080ae6e8a9 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/pauwels2014/examples/add_noise.Rd.R | 6db07bd71eea8e09a84720885ca73b5380367694 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 645 | r | add_noise.Rd.R | library(pauwels2014)
### Name: add_noise
### Title: Noise generative process for the simulations
### Aliases: add_noise
### Keywords: Model specific functions
### ** Examples
data(experiment_list1)
data(observables)
## Generate the knowledge object with correct parameter value
knobj <- generate_our_knowledge(transform_params)
## Generate a time cours matrix
tempCourse <- simulate_experiment(
knobj$global_parameters$true_params_T,
knobj,
experiment_list1$nothing
)[
knobj$global_parameters$tspan %in% observables[["mrnaLow"]]$reso,
observables[["mrnaLow"]]$obs
]
## Add noise to the time course matrix
add_noise(tempCourse)
|
f567e820c7e3673210fda8e8730cd72e58eb4848 | 258241cdac524bbdeafb21f857a13b90b7e3d149 | /R/geom_timeline_label.R | 0e6387a649f14f6a424ce71facb9d3db07ed2df6 | [] | no_license | tcoopermont/eqtools | 19396a730b0da82c595f11302c6a00015f35cc2c | a166fbfb480429ba8e1dc5718c21b56f9f969d10 | refs/heads/master | 2021-01-01T06:02:01.072807 | 2017-09-27T16:33:47 | 2017-09-27T16:33:47 | 97,335,061 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,168 | r | geom_timeline_label.R |
library(scales)
library(ggplot2)
library(grid)
library(dplyr)
draw_panel_function <- function(data, panel_scales, coord) {
#str(data)
#str(panel_scales)
# pull out the top n_max by magnitude - for annotation
one_max <- data$n_max[1]
if(!is.na(one_max)){
data <- dplyr::mutate_(data, size = ~ as.numeric(size)) %>%
dplyr::arrange_(~ dplyr::desc(size)) %>%
dplyr::slice(1:one_max)
}
#str(data)
coords <- coord$data
coords <- coord$transform(data, panel_scales) #%>%
#mutate(xmin = rescale(xmin, from = panel_scales$x.range),
# xmax = rescale(xmax, from = panel_scales$x.range))
#str(coords)
#should the length come in as a parameter?
connLine = grid::segmentsGrob(coords$x,coords$y,coords$x,coords$y + 0.07)
labelTxt = grid::textGrob(coords$label,coords$x,coords$y + 0.07,just="left",rot=45)
#timeLine = segmentsGrob(0.25,0.5,0.8,0.5)
grid::gTree(children = grid::gList(connLine,labelTxt))
#timeLine
}
GeomTimelineLabel <- ggproto("GeomTimelineLabel", Geom,
required_aes = c("x","label"),
default_aes = aes(shape = 19, lwd = 2,colour = "black",
fill = "black",alpha = 0.9,stroke = 1,n_max = NA),
draw_key = draw_key_point,
draw_panel = draw_panel_function
)
#' geom_timeline_label
#'
#' This function generates annotation to accompany `geom_timeline` plot objects.
#' The name of the earthquake location is shown above the timeline point and a
#' line connection the name with the point is drawn.
#'
#' @inheritParams geom_timeline
#'
#' @details aes parameters act similar to `geom_label`
#' aes
#' \itemize{
#' \item{ x: a Date object}
#' \item{ y: single integer or a factor. (points will be grouped by y) example: COUNTRY}
#' \item{n_max: integer max number of labels sorted by size }
#' \item{size: numeric data field to base limiting of number of labels }
#' \item{label: character text to be diplayed over points }
#'
#' }
#'
#' @examples
#' \dontrun{
#' ggplot(quakesFiltered,aes(x = DATE, y = COUNTRY,
#' xmin = as.Date("2000-1-1","%Y-%m-%d"),
#' xmax = as.Date("2015-12-31","%Y-%m-%d"),
#' )) +
#' geom_timeline(aes(size = EQ_PRIMARY)) +
#' geom_timeline_label(aes(size = EQ_PRIMARY)) +
#' }
#'
# ??@importFrom grid textGrob segmentsGrob
#'
#' @export
geom_timeline_label<- function(mapping = NULL, data = NULL, stat = "identity",
position = "identity", show.legend = NA,
na.rm = FALSE, inherit.aes = TRUE, ...) {
ggplot2::layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomTimelineLabel,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...)
)
}
|
d461bea97590affea27586bfd78532d442c4792a | d78f04a654c9c7c4213f1d277579a7851658e9ec | /R/RcppExports.R | 012cf365c8bd993051580b73f3170f91a41dfc35 | [
"MIT"
] | permissive | const-ae/einsum | 0c4653a479b3250fe1fbb11e15e91715f615155a | caae57d2b17e2d2b43ffe175237dd8bd7fb323c2 | refs/heads/master | 2023-05-04T15:43:34.332269 | 2021-05-28T08:23:48 | 2021-05-28T08:23:48 | 274,106,728 | 6 | 1 | NOASSERTION | 2021-05-14T09:19:01 | 2020-06-22T10:25:21 | R | UTF-8 | R | false | false | 351 | r | RcppExports.R | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
einsum_impl_fast <- function(lengths_vec, array_vars_list, not_result_vars_vec, result_vars_vec, arrays) {
.Call(`_einsum_einsum_impl_fast`, lengths_vec, array_vars_list, not_result_vars_vec, result_vars_vec, arrays)
}
|
905196bb13c55a0d42548a937fd4dc474ec80847 | cac5b2fa0e551581716c778d6448c137d37dfeda | /w3/hclust.R | b20d245dd632c8bbe6436976ba586d7e1e18759b | [] | no_license | xiaohan2012/coursera-data-analysis | 786f14a59e8b0f31207934f65d46c40b693b59c3 | b474bfe49385b54f2fc05b4796c08f59a38a9e69 | refs/heads/master | 2021-01-10T01:18:44.778973 | 2013-02-27T12:21:09 | 2013-02-27T12:21:09 | 8,455,293 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 291 | r | hclust.R | set.seed(1234)
x <- rnorm(12, mean=c(1,2,3), sd=0.2)
y <- rnorm(12, mean=c(1,2,1), sd=0.2)
plot(x, y, col=rep(c(1,2,3),4), pch=19)
text(x+0.05, y+0.05, labels=as.character(1:12))
coords <- data.frame(x=x, y=y)
dist.matrix = dist(coords)
hclust.obj <- hclust(dist.matrix)
plot(hclust.obj)
|
067d7a891cab18ab448f912e7413a44de40fe72d | 7ba5a802f9d02cd075c78e2c1c302c34bdfab48f | /man/transform_df_fixed.Rd | 9dabcb3aa6310000c565b32872226a1964e3f341 | [] | no_license | nliulab/AutoScore | 8b2e23be56d9f75504341df6c2f004d31d985ac4 | 6eeb658441a47560b841545862ce667b8861e6a7 | refs/heads/master | 2023-07-19T23:11:57.036461 | 2023-07-16T02:28:40 | 2023-07-16T02:28:40 | 201,586,484 | 25 | 6 | null | 2019-08-31T01:26:44 | 2019-08-10T05:51:07 | null | UTF-8 | R | false | true | 584 | rd | transform_df_fixed.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/common.R
\name{transform_df_fixed}
\alias{transform_df_fixed}
\title{Internal function: Categorizing continuous variables based on cut_vec (AutoScore Module 2)}
\usage{
transform_df_fixed(df, cut_vec)
}
\arguments{
\item{df}{dataset(training, validation or testing) to be processed}
\item{cut_vec}{fixed cut vector}
}
\value{
Processed \code{data.frame} after categorizing based on fixed cut_vec
}
\description{
Internal function: Categorizing continuous variables based on cut_vec (AutoScore Module 2)
}
|
aa2aa6688ff3908bb83d6251fe37f79d4ce24a60 | 8a97255cb66455dbef0cf01864a3b334cf20a66b | /BucketVarComb/memoryMangment.R | b64c03845ed96eb77635746e14feb48efc13e840 | [] | no_license | AshutoshAgrahari/R_Practice | c56bbb3c0893e101305f150c0b74045f24cf5a44 | 4c31ce94f130b363f894177a1505ccac290547e0 | refs/heads/master | 2020-03-19T17:51:05.826260 | 2020-01-25T10:34:55 | 2020-01-25T10:34:55 | 136,781,266 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 594 | r | memoryMangment.R | # Source: http://adv-r.had.co.nz/memory.html
rm(list = ls())
library(pryr)
object_size(1:10)
object_size(mean)
object_size(mtcars)
sizes <- sapply(0:50, function(n) seq_len(n))
sizes <- sapply(0:50, function(n) object_size(seq_len(n)))
plot(0:50, sizes, xlab = "Length", ylab = "Size (bytes)", type = "s")
plot(0:50, sizes - 40, xlab = "Length", ylab = "Bytes excluding overhead", type = "n")
abline(h = 0, col = "grey80")
abline(h = c(8, 16, 32, 48, 64, 128), col = "grey80")
abline(a = 0, b = 4, col = "grey90", lwd = 4)
lines(sizes - 40, type = "s")
mem_used()
gcinfo(TRUE)
gctorture()
|
3180f90019f9e3f6e10a62ebf068961d2484fa37 | d9c64fa3b49039807f51d3b666c08ff586f8f67a | /Plot3.R | ce7692995d6d7a6c7e0c0459225b80d4f4febd14 | [] | no_license | LarissaVaans/ExData_Plotting1 | b2e1143d276f745db78ff2bfe38bc427ca4174da | 6a913f9b1d74dbf27cd8ae442d16f129f868f0ac | refs/heads/master | 2021-01-22T16:53:24.516722 | 2015-11-08T11:11:51 | 2015-11-08T11:11:51 | 45,738,312 | 0 | 0 | null | 2015-11-07T13:51:49 | 2015-11-07T13:51:49 | null | UTF-8 | R | false | false | 868 | r | Plot3.R | png(filename = "Plot3.png", width = 480, height = 480)
temp <- read.csv2("household_power_consumption.txt", colClasses = "character")
temp$Date <- as.Date(temp$Date, "%d/%m/%Y")
epcdata <- temp[temp$Date == "2007-02-01" | temp$Date == "2007-02-02",]
epcdata$Sub_metering_1 <- as.numeric(epcdata$Sub_metering_1)
epcdata$Sub_metering_2 <- as.numeric(epcdata$Sub_metering_2)
epcdata$Sub_metering_3 <- as.numeric(epcdata$Sub_metering_3)
epcdata$newdate <- as.POSIXct(paste(epcdata$Date, epcdata$Time))
plot(epcdata$newdate,epcdata$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "", col = "black")
lines(epcdata$newdate, epcdata$Sub_metering_2, col = "red")
lines(epcdata$newdate, epcdata$Sub_metering_3, col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off() |
a925c2281818f5b916638abbbf51804e85046fce | 251d08baff87c53b3d31d7b09016931fef821c4a | /src/02_modLimma.R | 89922bf2b8a186f9262979c1e69e36a659c29c00 | [
"MIT"
] | permissive | nhejazi/neurodevstat | db98217a63309886975335c0fec691a85056e7b6 | 8039afeec5842f8fda4bdfcaf146a3e61b4447e0 | refs/heads/master | 2021-05-01T01:19:39.257767 | 2016-11-16T06:42:26 | 2016-11-16T06:42:26 | 69,637,755 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,114 | r | 02_modLimma.R | # fit linear models to each gene using voom with simple design matrix
vfit_simple <- limma::lmFit(v_simple)
vfit_simple <- limma::eBayes(vfit_simple)
tt1 <- limma::topTable(vfit_simple,
coef = which(colnames(design_simple) == "type"),
adjust.method = "BH", number = Inf,
sort.by = "none", confint = TRUE)
# clean up topTable output to generate results tables
tt_out1 <- tt1 %>%
dplyr::mutate(
geneID = geneIDs,
lowerCI = exp(CI.L),
FoldChange = exp(logFC),
upperCI = exp(CI.R),
pvalue = I(P.Value),
fdrBH = I(adj.P.Val)
) %>%
dplyr::select(which(colnames(.) %ni% colnames(tt1)))
data.table::fwrite(data.table(data.frame(tt_out1)),
file = paste0(proj_dir, "/results/ttLimma_simplemod.csv"))
# fit linear models to each gene using voom with the full design matrix
vfit_full <- limma::lmFit(v_full)
vfit_full <- limma::eBayes(vfit_full)
tt2 <- limma::topTable(vfit_full,
coef = which(colnames(design_full) == "type"),
adjust.method = "BH", number = Inf,
sort.by = "none", confint = TRUE)
# clean up topTable output to generate results tables
tt_out2 <- tt2 %>%
dplyr::mutate(
geneID = geneIDs,
lowerCI = exp(CI.L),
FoldChange = exp(logFC),
upperCI = exp(CI.R),
pvalue = I(P.Value),
fdrBH = I(adj.P.Val)
) %>%
dplyr::select(which(colnames(.) %ni% colnames(tt2)))
data.table::fwrite(data.table(data.frame(tt_out2)),
file = paste0(proj_dir, "/results/ttLimma_fullmod.csv"))
#test <- fread(paste0(proj_dir, "/results/ttLimma_fullmod.csv"))
#test_clean <- test %>%
# dplyr::arrange(., fdrBH) %>%
# dplyr::mutate(
# geneID = geneID,
# FoldChange = I(FoldChange),
# pvalueRaw = I(pvalue),
# pvalueAdj = I(fdrBH)
# ) %>%
# dplyr::select(which(colnames(.) %in% c("geneID", "FoldChange", "pvalueRaw",
# "pvalueAdj")))
#data.table::fwrite(test_clean, file = paste0(proj_dir, "/resultSubmit.tsv"),
# sep = "\t") |
c96fd7d1fb1f4e3e8aa064f665a7c9e1a2919674 | 5dbc2ca4a6b12839bdebe5e8658b02671f078225 | /man/rapport-helpers.Rd | e93c37a1b7b87f3a111e740e5bf341abde09946c | [] | no_license | abhik1368/rapport | 2b15c817a49b30c60be787218c433eaad4788e79 | 5749d564588b9b3dc6f0f8ed4d6c8d18026b502a | refs/heads/master | 2021-01-17T19:57:51.066661 | 2014-12-16T12:37:05 | 2014-12-16T12:37:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,060 | rd | rapport-helpers.Rd | \name{rapport-helpers}
\alias{rapport-helpers}
\title{\emph{rapport helpers}}
\description{
\code{rapport} package comes with bunch of helper functions
that make your template writing and report creation easier,
although most of these helpers were migrated to the
\code{rapportools} package.
}
\details{
\emph{Export helpers}
\itemize{ \item \code{\link{rapport.docx}} \item
\code{\link{rapport.html}} \item \code{\link{rapport.odt}}
\item \code{\link{rapport.pdf}} \item
\code{\link{rapport.export}} }
Please load the \code{rapportools} package if you would use
any of the below functions in the \code{.GlobalEnv}, or
simply add \code{rapportools} to the required packages
section in your template file. That latter is a lot cleaner
solution.
\emph{General purpose helpers}
\itemize{ \item \code{\link{adj.rle}} \item
\code{\link{alike.integer}} \item \code{\link{capitalise}}
\item \code{\link{catn}} \item \code{\link{fml}} \item
\code{\link{is.boolean}} \item \code{\link{is.empty}} \item
\code{\link{is.number}} \item \code{\link{is.string}} \item
\code{\link{is.tabular}} \item \code{\link{is.variable}}
\item \code{\link{messagef}} \item \code{\link{pct}} \item
\code{\link{stopf}} \item \code{\link{tocamel}} \item
\code{\link{trim.space}} \item \code{\link{vgsub}} \item
\code{\link{warningf}} }
\emph{Summary statistics}
\itemize{ \item \code{\link{rp.desc}} \item
\code{\link{rp.freq}} }
\emph{Univariate descriptive statistics}
\itemize{ \item \code{\link{rp.iqr}} \item
\code{\link{rp.max}} \item \code{\link{rp.mean}} \item
\code{\link{rp.median}} \item \code{\link{rp.min}} \item
\code{\link{rp.missing}} \item \code{\link{rp.percent}}
\item \code{\link{rp.range}} \item \code{\link{rp.sd}}
\item \code{\link{rp.se.mean}} \item \code{\link{rp.sum}}
\item \code{\link{rp.valid}} \item \code{\link{rp.var}} }
\emph{Miscelaneous stats helpers}
\itemize{ \item \code{\link{htest}} \item
\code{\link{htest.short}} \item \code{\link{kurtosis}}
\item \code{\link{skewness}} \item
\code{\link{lambda.test}} \item \code{\link{rp.outlier}} }
}
|
5117c2d3ba642857c3156f7554a39ad930aca055 | 01b1446adcc5612fe9a1dd49172a87c59200882b | /man/summary.egf.Rd | cb9e5693f9351ba7741315ae31ec35e4b28702c3 | [] | no_license | davidearn/epigrowthfit | de5f046c123aecff7ca4b88d484e438b25e5c8cf | 36aac5d2b33c064725434bf298ac008e3929f9d6 | refs/heads/master | 2022-09-30T14:35:07.931181 | 2022-09-18T21:28:28 | 2022-09-18T21:28:28 | 250,906,109 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,815 | rd | summary.egf.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.R
\name{summary.egf}
\alias{summary.egf}
\title{Summarize a model object}
\usage{
\method{summary}{egf}(object, ...)
}
\arguments{
\item{object}{An \code{"\link{egf}"} object.}
\item{...}{Unused optional arguments.}
}
\value{
A list inheriting from class \code{"egf_summary"}, with elements:
\item{fitted}{
A numeric matrix. Each column is the result of applying
\code{\link{summary.default}} to a numeric vector listing
the fitted values of a top level nonlinear model parameter.
Fitted values are obtained via \code{\link[=fitted.egf]{fitted}}.
}
\item{convergence}{
An integer code returned by the optimizer, with 0 indicating successful
convergence within the specified absolute or relative tolerance.
}
\item{value, gradient}{
Numeric vectors giving the value and gradient of the negative log
likelihood function at the parameter vector returned by the optimizer.
}
\item{hessian}{
A logical flag indicating whether the Hessian matrix of the negative log
likelihood function at the parameter vector returned by the optimizer is
positive definite.
\code{NA} means that the matrix was not computed by \code{\link{egf}},
either because \code{\link{egf}} was not called with \code{se = TRUE},
or because an error was thrown during computation.
In the first case, \code{object$sdreport} is \code{NULL}.
In the second case, it is a \code{"\link[=try]{try-error}"} object
preserving the error message.
}
}
\description{
Summarizes fitted values of top level nonlinear model parameters
and gathers diagnostic information that can be used to quickly
assess convergence of the optimizer.
}
\examples{
object <- egf_cache("egf-1.rds")
zz <- egf_cache("summary-egf-1.rds", summary(object))
str(zz)
}
|
bdf8a9c4f13939e98066b2f9b96e51ea6b65d4bb | 4de6571e0ff033cc724517e0b59c562fd6388524 | /model_nowy.R | 195dad96a39b99ff609f2b10c469bb9512c27097 | [] | no_license | szymonsocha/econometrics-data-processing-bachelor-thesis | 65793b2ece97f9b13d9c01e43a35f87bbe21d1a2 | 70e73513de96197bf6bb2a957ee158b65bb162be | refs/heads/main | 2023-06-07T08:36:32.177348 | 2021-07-06T22:45:59 | 2021-07-06T22:45:59 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | R | false | false | 7,385 | r | model_nowy.R | library("lmtest")
library("sandwich")
library("stargazer")
library(rcompanion)
library("car")
options(scipen=999)
setwd("C:\\Users\\szymo\\Desktop\\Nauka\\Licencjat")
dane = read.csv(file = "Dane\\licencjat dane\\merged.csv", sep=",", header = TRUE)
# Usuwam te kraje, w których byly braki danych
unique(dane$country_region)
dane = dane[!dane$country_region == 'Cyprus',]
dane = dane[!dane$country_region == 'Luxembourg',]
dane = dane[!dane$country_region == 'Russia',]
dane = dane[!dane$country_region == 'Ukraine',]
dane = dane[!dane$country_region == 'South Korea',]
# Sprawdzenie w ktorych krajach brakuje obserwacji
for (val in unique(dane$country_region)) {
x = dane[dane$country_region == val,]
print(length(x$data))
print(unique(x$country_region))
}
remove(x)
View(dane[dane$country_region == 'Bulgaria',])
# Przesuniecie danych w celu zlogarytmowania zmiennej
dane$zmiana_log <- dane$zmiana + 46
# Zmiana poziomu bazowego zmiennnej weekday i month
dane$weekday = relevel(factor(dane$weekday), "monday")
dane$month = relevel(factor(dane$month), "April")
dane$country_region = relevel(factor(dane$country_region), "Poland")
#############################
# MODELE
#############################
model_dumm = lm(zmiana~as.factor(dane$country_region)+workplaces_percent_change_from_baseline+as.factor(dane$weekday)+as.factor(dane$month),
data=dane)
summary(model_dumm)
model_dumm_log = lm(log(zmiana_log)~as.factor(dane$country_region)+workplaces_percent_change_from_baseline+as.factor(dane$weekday)+as.factor(dane$month),
data=dane)
summary(model_dumm_log)
######
# glowny
model = lm(zmiana~workplaces_percent_change_from_baseline+as.factor(dane$weekday)+as.factor(dane$month),
data=dane)
summary(model)
######
model_log = lm(log(zmiana_log)~I(workplaces_percent_change_from_baseline^2)+as.factor(dane$weekday)+as.factor(dane$month),
data=dane)
summary(model_log)
#############################
# TESTY
#############################
resettest(model, power=2:3, type="fitted")
# p-value = 0.06155
# tylko ten model spelnia RESET, dodanie kwadratow, logarytmu zle wplywa na model
car::vif(model) # dobrze
plot(model) # tez chyba dobrze
bptest(model)
# p-value = 0, niestety heteroskedastycznosc
# Stosuje zatem macierz odporna White'a
coeftest(model, vcov.=vcovHC(model, type = "HC0"))
model_robust = (coeftest(model, vcov.=vcovHC(model, type = "HC0")))
#stargazer(model_dumm, model, model_robust, type="html", df=FALSE, out="C:\\Users\\szymo\\Desktop\\star_dumm2.doc")
stargazer(model, model_dumm, model_robust, type="text", df=FALSE)
# Efekty stale dla krajow nie spelniaja testu RESET dlatego porownuje dwa modele (fixed i zwykly)
# i patrze czy oszacowanie parametru sie zmienia (nie zmienia sie)
###############################################
model1 = lm(zmiana~workplaces_percent_change_from_baseline,
data=dane)
coeftest(model1, vcov.=vcovHC(model1, type = "HC0"))
model_robust1 = (coeftest(model1, vcov.=vcovHC(model1, type = "HC0")))
model2 = lm(zmiana~workplaces_percent_change_from_baseline+as.factor(dane$month),
data=dane)
coeftest(model2, vcov.=vcovHC(model2, type = "HC0"))
model_robust2 = (coeftest(model2, vcov.=vcovHC(model2, type = "HC0")))
model3 = lm(zmiana~workplaces_percent_change_from_baseline+as.factor(dane$month)+as.factor(dane$weekday),
data=dane)
coeftest(model3, vcov.=vcovHC(model3, type = "HC0"))
model_robust3 = (coeftest(model3, vcov.=vcovHC(model3, type = "HC0")))
model4 = lm(zmiana~workplaces_percent_change_from_baseline+as.factor(dane$month)+as.factor(dane$weekday)+as.factor(dane$country_region),
data=dane)
coeftest(model4, vcov.=vcovHC(model4, type = "HC0"))
model_robust4 = (coeftest(model4, vcov.=vcovHC(model4, type = "HC0")))
stargazer(model_robust1, model_robust2, model_robust3, model_robust4, type="text", df=FALSE)
#stargazer(model_robust1, model_robust2, model_robust3, model_robust4, type="html", df=FALSE, out="C:\\Users\\szymo\\Desktop\\star_fix.doc")
########
# TESTY
########
plot(log(dane$zmiana), dane$workplaces_percent_change_from_baseline)
model1 = lm(zmiana~workplaces_percent_change_from_baseline,
data=dane)
plot(model1)
min(dane$workplaces_percent_change_from_baseline)
resettest(model1, power=2:3, type="fitted")
resettest(model2, power=2:3, type="fitted")
resettest(model3, power=2:3, type="fitted")
resettest(model4, power=2:3, type="fitted")
bptest(model1)
bptest(model2)
bptest(model3)
bptest(model4)
###
dane_high = dane[dane$income == 'high',]
dane_mid_high = dane[dane$income == 'upper_middle',]
dane_mid_low = dane[dane$income == 'lower_middle',]
dane_high$country_region = relevel(factor(dane_high$country_region), "Poland")
dane_mid_high$country_region = relevel(factor(dane_mid_high$country_region), "Turkey")
dane_mid_low$country_region = relevel(factor(dane_mid_low$country_region), "Egypt")
model_high = lm(zmiana~workplaces_percent_change_from_baseline+as.factor(dane_high$month)+as.factor(dane_high$weekday)+as.factor(dane_high$country_region),
data=dane_high)
coeftest(model_high, vcov.=vcovHC(model_high, type = "HC0"))
model_robust_high = (coeftest(model_high, vcov.=vcovHC(model_high, type = "HC0")))
model_mid_high = lm(zmiana~workplaces_percent_change_from_baseline+as.factor(dane_mid_high$month)+as.factor(dane_mid_high$weekday)+as.factor(dane_mid_high$country_region),
data=dane_mid_high)
coeftest(model4, vcov.=vcovHC(model_mid_high, type = "HC0"))
model_robust_mid_high = (coeftest(model_mid_high, vcov.=vcovHC(model_mid_high, type = "HC0")))
model_mid_low = lm(zmiana~workplaces_percent_change_from_baseline+as.factor(dane_mid_low$month)+as.factor(dane_mid_low$weekday)+as.factor(dane_mid_low$country_region),
data=dane_mid_low)
coeftest(model_mid_low, vcov.=vcovHC(model_mid_low, type = "HC0"))
model_robust_mid_low = (coeftest(model_mid_low, vcov.=vcovHC(model_mid_low, type = "HC0")))
stargazer(model_robust_high, model_robust_mid_high, model_robust_mid_low, type="text", df=FALSE)
#stargazer(model_robust_high, model_robust_mid_high, model_robust_mid_low, type="html", df=FALSE, out="C:\\Users\\szymo\\Desktop\\star_income.doc")
###
# Testy
resettest(model_high, power=2:3, type="fitted")
resettest(model_mid_high, power=2:3, type="fitted")
resettest(model_mid_low, power=2:3, type="fitted")
bptest(model_high)
bptest(model_mid_high)
bptest(model_mid_low)
library("tseries")
jarque.bera.test(model1$residuals)
jarque.bera.test(model2$residuals)
jarque.bera.test(model3$residuals)
jarque.bera.test(model4$residuals)
jarque.bera.test(model_high$residuals)
jarque.bera.test(model_mid_high$residuals)
jarque.bera.test(model_mid_low$residuals)
library(rcompanion)
plotNormalHistogram(model_mid_low$residuals) # te wykresy musza sie znalezc w modelu
boxplot(model1$residuals)
plot(model1, which = 5)
###
unique(dane_mid_low$country_region)
###
mean(dane_high$srednia)
mean(dane_mid_high$srednia)
mean(dane_mid_low$srednia)
min(dane_high$srednia)
min(dane_mid_high$srednia)
min(dane_mid_low$srednia)
max(dane_high$srednia)
max(dane_mid_high$srednia)
max(dane_mid_low$srednia) |
29aafc2c34469fbeb184fdcd18a6d3ec924415aa | 3d4635d0c22ae2812db525482f19c3b986e29a03 | /add_columns.R | f6cfd7300f7f346af8ad1adc8d25d0a39cf0222e | [] | no_license | anniecasey/camCompare | b02a0dfd3228a2b35550a04abb2e17299469f4ad | ad6d36dcb4d201f6282f33d93fd8b89ae2332472 | refs/heads/master | 2021-07-23T03:51:17.157170 | 2017-11-02T21:51:36 | 2017-11-02T21:51:36 | 99,351,505 | 0 | 0 | null | 2017-08-07T09:52:40 | 2017-08-04T14:35:53 | null | UTF-8 | R | false | false | 1,219 | r | add_columns.R | #' Adding to Dataset
#'
#' Adds column for properly formatted datetime, and column for ST.Dec
#'
#' @param file The metadata file containing your camera trap data. NOTE: Make sure your file has the following columns: "Year", "Month", "Day", "Hour", "Minute", "Second"
#'
#'
#' @author Annie Casey, \email{annie.casey15@gmail.com}
#' @keywords metadata species indexes
#'
#'
#' @examples
#'
#' @export
add_columns <- function(file, colname)
{
start_list <- final_start(file, colname)
end_list <- final_end(file, colname)
df <- as.data.frame(file)
df$Species <- species_list(file, colname)
df$Date_and_Time <- date_time(file, 1:nrow(file))
timecol <- list()
for (i in 1:nrow(file))
{
datetimes <- date_time(file, 1:nrow(file))
timecol[i] <- decimate_time(datetimes[i])
}
df$ST.Dec <- as.numeric(timecol)
endtimes_data <- df[end_list, "Date_and_Time"] # List of Datetimes according to indexes
new_dataset <- df[start_list, c("Species", "Date_and_Time", "ST.Dec")] # dataframe according to indexes
new_dataset$End_Time <- endtimes_data
new_dataset
}
# Save into new excel file
# library(xlsx)
# write.xlsx(x = new_dataset, file = "43_Species_Visits(2013).xlsx")
|
d61d8253119523def3f89d8b8f423eb980c3bed8 | 893ba5093a71361ef12c3fe1f88ea71f56731b80 | /code/01_subset_data_to_any_state.R | b7faf3b100b5381cbaf96cae27731a1c00580be9 | [] | no_license | tswayd/analyzing_covid_mobility_dataset | ce5f9476d7f515faa3f5bbb907aef193d757fe31 | 821a3dc0536bd196f6245f10bbc1b575bc33f73e | refs/heads/master | 2023-01-11T01:42:59.789149 | 2020-11-17T08:00:28 | 2020-11-17T08:00:28 | 296,526,930 | 0 | 0 | null | 2020-11-03T01:20:57 | 2020-09-18T05:54:27 | HTML | UTF-8 | R | false | false | 571 | r | 01_subset_data_to_any_state.R | # Script to read in Apple mobility data from a csv file
# and subset the data to just include rows from any particular US state
# and then write out a csv file that only has that subset
# uses functions in the code/function directory.
# and notes
# Delong Tsway
# dttsway@dons.usfca.edu
# September 13, 2020
# load functions
source("code/functions/subset_mobility_data_to_state.R")
# test out the use of the function
subset_mobility_data_to_state(
input_file_name = "data/raw_data/applemobilitytrends-2020-09-12.csv",
state_to_subset = "Massachusetts")
# comment
|
060f02008053ad4209a3ca41d78e41d3c9b97720 | c0e766a6a57e3c5c32f8b0afe130b8df66e6dbf9 | /rsellShiny/R/infoBoxT.R | 38fce77b6ef347c9784b27d8891aa142199377f0 | [] | no_license | t2tech-corp/Rsell-Packages | b450fec180754aa9cf0cf3ab6b369c74c57b7e70 | 047a2348650e5a2ee0bc52500824a34f16167056 | refs/heads/main | 2023-03-03T00:22:15.006720 | 2021-02-14T22:58:12 | 2021-02-14T22:58:12 | 329,474,392 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,112 | r | infoBoxT.R | #' Custom Infobox
#'
#' This function generates a custom Infobox. Built from shinydashboard infoBox.
#'
#' Requires:
#'
#' @param title Title
#' @param value Value
#' @param subtitle Subtitle
#' @param caption Caption
#' @return
#' @export
#' @examples
#'
infoBoxT <- function (title, value = NULL, subtitle = NULL, caption = NULL, icon = shiny::icon("bar-chart"), color = "#ffffff", width = 4, href = NULL, fill = FALSE) {
colorClass <- paste0("bg-", color)
boxContent <- div(class = "info-box",
class = if (fill) colorClass,
span(class = "info-box-icon",
class = if (!fill) colorClass, icon),
div(class = "info-box-content",
span(class = "info-box-text", title),
if (!is.null(value)) span(class = "info-box-number", value),
if (!is.null(subtitle)) p(subtitle))
)
if (!is.null(href))
boxContent <- a(href = href, boxContent)
div(class = if (!is.null(width))
paste0("col-sm-", width), boxContent)
}
|
1979c364c46dcd998d0243e9177c0615ebe6e2fb | e4614050f43d7a5fdf9bbffbb45bdf37e25930d6 | /EPC_OAC.R | 275959f9eaba76070b544020639eb98a68d67905 | [] | no_license | sgetalbo/EPC_data | 5ea29acbe9cc368328fca33d1479d84a1d09aa8b | e93c1b520bc573422e46b907532c68d91bcd0218 | refs/heads/master | 2020-06-05T08:31:07.190295 | 2019-06-17T16:08:45 | 2019-06-17T16:08:45 | 192,376,956 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,112 | r | EPC_OAC.R | library(data.table)
library(dplyr)
library(magrittr)
library(ggplot2)
# just select PCS and current energy efficiency columns
epc <- fread("./99_New_Data/05_The_EUC_data/epc_full_clean.csv", na.strings = c(" ", ""), select = c(4,8))
# just select postcode and OA code
NSPL <- fread("./99_New_Data/NSPL_MAY_2018_UK/Data/NSPL_MAY_2018_UK.csv" , select = c(1, 33))
########## EPC energy efficiency by OAC group ########
NSPL$pcd <- gsub(' ', '', NSPL$pcd)
NSPL$oac11 <- substr(NSPL$oac11, 1 , 2)
epc_oac <- merge(epc, NSPL, by.x="POSTCODE", by.y = "pcd", all.x = TRUE)
epc_oac %<>%
group_by(oac11) %>%
summarise(mean_cee = round(mean(CURRENT_ENERGY_EFFICIENCY),digits = 2))
epc_oac$SPRGRP <- substring(epc_oac$oac11, 1, 1)
epc_oac <- as.data.frame(epc_oac)
epc_oac$oac11 <- tolower(epc_oac$oac11)
epc_oac %<>%
filter(oac11 != "")
### Fix labels
group_labels <- fread("99_New_Data/05_The_EUC_data/Output Area Classification/Tables/Cluster_Names.csv", select = 2, na.strings = c(" ", ""))
group_labels %<>%
filter(Group != "NA")
group_labels$grp_ltr <- substring(group_labels$Group, 1, 2) # numbers represent start and end position.
epc_oac <- merge(epc_oac, group_labels, by.x = "oac11", by.y = "grp_ltr")
names(epc_oac)[3] <- "Supergroup"
### Plot EPC / OAC group
p <-ggplot(epc_oac, aes(x=Group, y=mean_cee, fill=Supergroup)) +
geom_bar(stat="identity") +
theme_minimal() +
xlab("OAC group") +
ylab("Mean Current Energy Efficiency Score") +
ggtitle("Average current energy efficiency rating by OAC group") +
theme(axis.text.x = element_text(angle = 90))
#theme(legend.position = "none") +
p
### EPC energy efficiency by IMD decile ###
epc_lsoa <- merge(epc, OA_PC[,c(1,4)], by.x="POSTCODE", by.y="PCD7", all.x = TRUE)
IMD <- fread("./99_New_Data/05_The_EUC_data/IMD_deciles.csv", select = c(1:6))
IMD %>%
filter(Measurement == "Decile") %>%
filter(`Indices of Deprivation` == "a. Index of Multiple Deprivation (IMD)") -> IMD_D
IMD_D <- as.data.table(IMD_D)
epc_imdD <- merge(epc_lsoa[,c(2,3)], IMD_D[,c(1,3)], by.y ="GeographyCode", by.x="LSOA11CD", allow.cartesian = TRUE)
epc_imdD %<>%
group_by(Value) %>%
summarise(mean_cee = round(mean(CURRENT_ENERGY_EFFICIENCY), digits = 2))
p2 <- ggplot(epc_imdD, aes(x=Value, y=mean_cee, fill=Value)) +
geom_bar(stat = "identity") +
theme_minimal()
p2
### Correlation between EPC energy efficiency score and IMD score ###
IMD %>%
filter(Measurement == "Score") %>%
filter(`Indices of Deprivation` == "a. Index of Multiple Deprivation (IMD)") -> IMD_S
IMD_S <- as.data.table(IMD_S)
epc_imdS <- merge(epc_lsoa, IMD_S[,c(1,5)], by.y ="GeographyCode", by.x="LSOA11CD")
epc_imdS %<>%
group_by(LSOA11CD) %>%
mutate(mean_cee = round(mean(CURRENT_ENERGY_EFFICIENCY), digits = 2))
p3 <- ggplot(epc_imdS, aes(x=Value, y=mean_cee)) +
geom_point(shape=0) + # 0 = hollow squares
geom_smooth(method=lm) # Add linear regression line
p3
mod_p3 <- lm(Value ~ mean_cee, epc_imdS)
### Geographically weighted regression
# Next steps stepwise regression and backward elimination using the OLS |
cd8d9e2ef0f0b6e98c8c73340092d80956cc114f | c54f442cb02f4ec3845ddc9c36294cfc7a84b30d | /code/pilot/nc/01_geocode.R | 9afea8dee03e2cb1be85874229719cb12ad7dec0 | [] | no_license | ktmorris/blm_turnout | ba8a24bc28fb0e08a49ac16d978a925ab59de39b | 7211f01c197a73182acd8bdbcb541a0fa470da6f | refs/heads/master | 2023-07-03T23:02:59.440156 | 2021-08-04T14:29:35 | 2021-08-04T14:29:35 | 315,758,514 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 853 | r | 01_geocode.R |
roll <- fread("E:/rolls/north_carolina/roll_full_20210101.csv",
select = c("county_desc", "voter_reg_num", "ncid", "status_cd",
"voter_status_reason_desc", "last_name", "first_name", "midl_name",
"house_num", "street_dir", "street_name", "street_type_cd",
"res_city_desc", "state_cd", "zip_code", "race_code", "ethnic_code",
"party_cd", "sex_code", "age", "registr_dt", "cancellation_dt"), fill = T, header = T)
actives <- filter(roll, status_cd %in% c("A", "I"))
rm(roll)
actives <- clean_streets(actives, c("house_num", "street_dir", "street_name",
"street_type_cd"))
actives <- geocode(rename(actives, state = state_cd, city = res_city_desc, zip = zip_code))
saveRDS(actives, "temp/nc_geo.rds")
|
cb107ae54d7a7b805f4e267dcbfef1e03f0e57bf | cfa49ce0eaf2f77fc403ea72521eef30f975a1ce | /tests/testthat/test-get_wormsid.R | e8649c17f900c516a8a7b243814438b033312974 | [
"MIT"
] | permissive | vanearranz/taxizesoap | a03f00c691d3189c84fe5ab33eb0ea8a5b85c188 | ba1dc20d76e0e7ad6c69c674a5ce6e943c5a3ca2 | refs/heads/master | 2021-10-07T16:46:25.821098 | 2021-06-01T21:53:11 | 2021-06-01T21:53:11 | 224,568,757 | 0 | 0 | NOASSERTION | 2019-11-28T04:19:04 | 2019-11-28T04:19:03 | null | UTF-8 | R | false | false | 906 | r | test-get_wormsid.R | context("get_wormsid")
test_that("get_wormsid basic usage works", {
skip_on_cran()
aa <- get_wormsid(searchterm = "Salvelinus fontinalis", verbose = FALSE)
expect_is(aa, "wormsid")
expect_is(aa[1], "character")
expect_equal(length(aa), 1)
expect_equal(attr(aa, "match"), "found")
})
test_that("get_wormsid works for multiple name inputs", {
skip_on_cran()
aa <- get_wormsid(c("Salvelinus fontinalis","Pomacentrus brachialis"), verbose = FALSE)
expect_is(aa, "wormsid")
expect_is(aa[1], "character")
expect_is(aa[2], "character")
expect_equal(length(aa), 2)
expect_equal(attr(aa, "match"), c("found", "found"))
})
test_that("get_wormsid fails well", {
skip_on_cran()
# missing first arg, that's required
expect_error(get_wormsid(), "argument \"searchterm\" is missing")
# when not found, gives NA
expect_true(is.na(get_wormsid("asdfafasdfs", verbose = FALSE)))
})
|
7ac3ba80420a626511749127533a2a13df75e85c | 8f058b2d07c414246eaef2530dbaafd39cac7452 | /dq5-people3-diversity-inclusion-maeva/shinyapp/ui.R | c33500cc2669f701552f990c679a7d7673e99901 | [] | no_license | zylstraa/People3Diversity | 9f4ddb246bd59b5435be973aa58623f1ce5bcf0f | ada548a3e787cfdd9339d812fb9d28239dac0621 | refs/heads/main | 2023-06-27T08:04:44.013653 | 2021-07-16T14:53:19 | 2021-07-16T14:53:19 | 365,837,565 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,354 | r | ui.R | # All libraries are imported in global.R
# Source codes that defines the Home Page
# homePage variable is defined here
source("pages/homePage.r") # -> homePage
# Source codes that defines the Analysis Page
# analysisPage variable
source("pages/analysisPage.r") # -> analysisPage
# Source codes that defines the Age Page
# agePage variable
#source("pages/agePage.r") # -> agePage
# Source codes that defines the Education Page
# educationPage variable
#source("pages/educationPage.r") # -> educationPage
# Source codes that defines the Race and Ethnicity Page
# racePage variable
#source("pages/racePage.r") # -> racePage
# Source codes that defines the Upload Data Page
# uploadDataPage variable
#source("pages/uploadDataPage.r") # -> uploadDataPage
# Source codes that defines the DataSources Page
# dataSourcesPage variable
source("pages/creditsPage.r") # -> creditsPage
# Putting the UI Together
shinyUI(
# Using Navbar with custom CSS
navbarPage(title=tags$a(href='https://people3.co',
tags$img(src='img/people3logo.png', height='30', width='140')),
theme="styles.css", # Shiny will look in the www folder for this
homePage,
#agePage,
#educationPage,
#racePage,
analysisPage,
#uploadDataPage,
creditsPage)
) |
6ca870d151a5cfce2363ad5cea6abcc88819019c | 3a13e815d25ac2a82b41a918dfb973d96d5b98ab | /R/imlplots.R | 26f855140e8384825162c8ea7eaa14d109d54b12 | [] | no_license | slds-lmu/imlplots | 6ddf32d966be17b83a17d826c418165347f90080 | cccff1609aae1b72df1a0709f060414c4364c601 | refs/heads/master | 2021-10-23T19:47:50.329478 | 2018-09-19T10:27:31 | 2018-09-19T10:27:31 | 128,178,323 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 38,937 | r | imlplots.R | #' Interactive Plots for Interpretable Machine Learning
#' @description
#' The function \code{imlplots()} creates an interactive shiny based dashboard
#' for visualizing the effects of statistical models.
#' The utilization of mlr (Machine Learning in R) is necessary.
#' For more infos go to \url{https://github.com/mlr-org}
#'
#' There are three types of plots: Partial Dependence Plots (PDP), Individual Conditional
#' Expectation (ICE) plots and Accumulated Local Effects (ALE) plots.
#' @param data Input data frame.
#' Has to contain exactly the same variables as the training data.
#' @param task The mlr task the models were being trained on,
#' e.g. \code{iris.task = makeClassifTask(data = iris, target = "Species")}.
#' Classification and regression tasks are supported.
#' @param models A list of mlr trained models, e.g. \code{list(rf.mod, glm.mod)}. \cr
#' You can provide differently tuned models of the same learner by assigning
#' a unique ID to the learner, e.g.
#' \code{makeLearner("regr.randomForest", id = "ownId")}
#'
#' @param model.check A string. A model check is performed upon initialization,
#' whether the provided models can be used to properly predict. \cr
#' 'all.features' iteratively checks all model/feature combinations. 'sample.feature' randomly
#' selects a single feature from the feature space and checks all models with
#' it.
#'
#' @examples
#' tsk = makeRegrTask(data = boston, target = "medv")
#' mod.rf = train("regr.randomForest", task = tsk)
#' mod.glm = train("regr.glm", task = tsk)
#' imlplots(boston, tsk, list(mod.rf, mod.glm))
#'
#' @note
#' The plots display combinations of different inputs and outputs/ predictions.
#' Therefore they are highly sensitive to the trained and provided models.
#'
#' The variable of interest provides variations of different inputs, while all other
#' variables are held constant. You can look at how the predictions change,
#' if you had provided different test data, by either filtering/ subsetting
#' the data or manually setting a variable to a fixed value for all observations.
#'
#' The function performs a basic check upon initialization,
#' whether the provided models can be used to properly predict.
#' If the check fails, it is recommended to manually test the model with the
#' \code{marginalPrediction()} function of the mmpf package.
#'
#' @author Julia Fried, Tobias Riebe, Christian Scholbeck; in cooperation with
#' the working group for computational statistics at
#' Ludwigs-Maximilians-University Munich.
#'
#' @references
#'
#' Apley (2016). "Visualizing the Effects of Predictor Variables in Black Box Supervised
#' Learning Models"
#'
#' Bischl et. al (2016). "mlr: Machine Learning in R." Journal of Machine Learning
#' Research, 17(170), pp.
#'
#' Friedman, J.H. (2001). "Greedy Function Approximation: A Gradient Boosting
#' Machine." Annals of Statistics 29: 1189 - 1232.
#'
#' Goldstein et al. (2013). "Peeking Inside the Black Box: Visualizing Statistical Learning with Plots of
#' Individual Conditional Expectation"
#'
#' Jones (2017). "mmpf: Monte-Carlo Methods for Prediction Functions "The R Journal Vol. XX/YY, AAAA 20ZZ
#' @export
imlplots = function(data, task, models, model.check = "all.features") {
if (!(is.vector(models))) {models = list(models)}
assertDataFrame(data)
assertClass(task, classes = "Task")
lapply(models, FUN = function(elem) assertClass(elem, class = "WrappedModel"))
learner.models = lapply(models, function(x) x[["learner.model"]])
learner.models.names = lapply(models, function(x) x[["learner"]][["id"]])
target = getTaskDesc(task)$target
task.type = getTaskDesc(task)$type
if (task.type == "regr") {
target.levels = levels(data[[target]])
} else {
target.levels = NULL
}
features = names(data)[!names(data) %in% target]
features.numeric = features[sapply(data[!names(data) %in% target], is.numeric)]
features.factor = features[sapply(data[!names(data) %in% target], is.factor)]
do.call(
modelCheck,
list(data = data, models = models, features = features, model.check))
# basic check whether provided models throw error when using
# marginalPrediction()
app.ui = dashboardPage(
shinyjs::useShinyjs(),
header = dashboardHeader(
title = "LMU Data Science Innovationslabor",
titleWidth = 350,
tags$li(
class = "dropdown",
actionButton(
"reload", "Reload application",
width = "100%",
icon("refresh"),
style = "font-size: 16px; color: #fff; background-color: #337ab7;
border-color: #2e6da4; padding: 13px"))),
sidebar = dashboardSidebar(disable = TRUE),
body = dashboardBody(
h2("Interactive plots for interpretable machine learning"),
fluidRow(
tabBox(
width = 12,
height = NULL,
tabPanel(title = "Data", {
# data table with filters
fluidRow(
column(
width = 12,
box(
width = NULL,
status = "primary",
title = "Data used for plotting ICE curves",
selectInput("data_selection_mode",
"Select observations to sample from",
choices = c(
"Plot all sampled observations",
paste("Plot individual observations",
"(only influences computations of ICE and PDP curves)")
)
),
div(style = "overflow-x: scroll",
DT::dataTableOutput("table")
)
)
)
)
}),
tabPanel(
# plot settings and preview
title = "Settings",
fluidRow(
column(
width = 3,
# left column with plot settings
box(
title = "Plot settings",
width = NULL,
status = "primary",
selectInput("gfx.package", "Select graphics package",
choices = c("ggplot2",
"plotly (resource intensive)")
),
selectInput("models", "Choose predictive model",
choices = learner.models.names,
selected = 1, multiple = FALSE),
selectInput(
"plot_type",
"Choose plot type",
choices = c(
"Individual Conditional Expectation",
"Partial Dependence",
"Accumulated Local Effects"),
selected = 1, multiple = FALSE),
conditionalPanel(
condition =
"input.plot_type ==
'Individual Conditional Expectation'",
selectInput(
"iceplot_mode", "Ice plot mode",
choices = c("Regular",
"Centered"),
selected = "Regular")
),
conditionalPanel(
condition =
"input.plot_type == 'Individual Conditional Expectation'
& input.iceplot_mode == 'Centered'",
uiOutput("iceplot_center")
),
conditionalPanel(
condition =
"input.plot_type == 'Accumulated Local Effects'",
selectInput(
"aleplot_mode", "ALE Plot Mode",
choices = c("Main Effects",
"Second Order Effects"),
selected = "ALE Main Effects")
),
selectInput(
"var", "Variable of interest",
choices = features,
selected = NULL, multiple = FALSE),
conditionalPanel(
condition =
"input.plot_type == 'Accumulated Local Effects' &
input.aleplot_mode == 'Second Order Effects'",
uiOutput("ale_interaction")
),
uiOutput("checkbox"), # checkbox for adjusting the data
uiOutput("knots"),
uiOutput("lines")
),
box(
title = "Adjust feature values",
width = NULL,
uiOutput("sliders"),
uiOutput("selectors")
)
),
column(
width = 9,
# right column with plot preview
fluidRow(
style = "position:fixed; width:70%;",
uiOutput("plot")
)
)
)
),
tabPanel(
title = "Plots",
tabsetPanel(
tabPanel(
title = "Zoomed Plot",
fluidRow(
column(
width = 12,
box(
width = NULL,
height = "600px",
status = "primary",
uiOutput("zoomed_plot")
)
)
)
),
tabPanel(
title = "Scatterplot",
fluidRow(
column(
width = 12,
box(
width = NULL,
status = "primary",
h2("Filtered data"),
uiOutput("scatter_filtered"),
HTML('<hr style="color: purple;">'),
h2("Unfiltered data"),
uiOutput("scatter_unfiltered")
)
)
)
)
)
),
tabPanel(
title = "Learner Summary",
fluidRow(
column(
width = 12,
box(
width = NULL,
status = "primary",
verbatimTextOutput("learner_summary")
)
)
)
)
)
)
)
)
app.server = function(input, output, session) {
# --------------------------------------------------------------------------
# reactive values
df = reactiveValues(
# reactive values for current (adjusted) data frame, available features
# and prediction values
values.adj = data, features = NULL, pred = NULL,
table.rows.selected = NULL)
selected = reactiveValues(
# reactive values only for selected values
knots = 30, lines = 30)
# --------------------------------------------------------------------------
# ui outputs
output$iceplot_center = renderUI({
knot.values = df$pred[, 1, with = FALSE]
# sampled values appearing on the horizontal axis in ICE and PDP
selectInput(
"iceplot_center_x",
"Select horizontal axis value to center ICE curves around
(depends on knots)",
choices = knot.values, selected = selected$iceplot.center.x)
})
output$ale_interaction = renderUI({
variable.options = features[!features %in% c(selected$var)]
selectInput("ale_interaction_var",
"ALE interaction variable",
choices = variable.options)
})
output$checkbox = renderUI({
# checkbox for adjustable features
checkboxGroupInput("checks", "Select adjustable features",
choices = df$features.unused)
})
sliderList = reactive({
# list of strings with input$[feature] for numeric features
lapply(selected$features.numeric, FUN = function(feature) {
text = paste("input$", feature, sep = "")})
})
selectorList = reactive({
# list of strings with input$[feature] for factor features
lapply(selected$features.factor, FUN = function(feature) {
text = paste("input$", feature, sep = "")})
})
featureSliders = reactive({
# make reactive list of slider input expressions (for numeric variables)
# by parsing strings from slider.list
# use this function to capture inputs from feature sliders
lapply(sliderList(), FUN = function(x) eval(parse(text = x)))
})
featureSelectors = reactive({
# make reactive list of selector input expressions (for factor variables)
# by parsing strings from selector.list
# use this function to capture inputs from feature selectors
lapply(selectorList(), FUN = function(x) eval(parse(text = x)))
})
observeEvent({
# rendering feature sliders for numeric features
selected$features.numeric},
{
output$sliders = renderUI({
if (is.null(selected$features.numeric)) {
} else {
sliders = lapply(1:length(selected$features.numeric), function(i) {
input.name = selected$features.numeric[i]
min = min(data[[input.name]])
max = max(data[[input.name]])
decimal.places = max(getDecimalPlaces(min), getDecimalPlaces(max))
step.length = createSliderStepLength(decimal.places)
mean = mean(data[[input.name]])
sliderInput(
inputId = input.name,
label = input.name,
min = as.numeric(min),
max = as.numeric(max),
value = mean,
step = step.length,
sep = "")
})
do.call(tagList, sliders)
# create HTML tags for all sliders
}
})
}
)
observeEvent({
# render feature selectors
selected$features.factor},
{
output$selectors = renderUI({
if (is.null(selected$features.factor)) {
} else {
selectors = lapply(1:length(selected$features.factor), function(i) {
input.name = selected$features.factor[i]
factor.levels = levels(data[[input.name]])
selectInput(input.name, input.name, choices = factor.levels)})
do.call(tagList, selectors)
# create HTML tags for all selectors
}
})
}
)
output$knots = renderUI({
if (is.null(selected$knots)) {
selected$knots = 30
}
# setting to 30 upon initialization; setting init value in selected
# does not work
if (selected$plot.type == "ale") {
sliderInput(
"knots",
"Number of intervals into which the predictor range is divided",
min = 1,
max = 100,
value = selected$knots,
step = 1)
} else {
sliderInput(
"knots",
"Number of knots for each line",
min = 1,
max = nrow(df$values.filtered),
value = selected$knots,
step = 1)
}
})
output$lines = renderUI({
if (is.null(selected$lines)) {
selected$lines = 30
}
# setting to 30 upon initialization; setting init value in selected
# does not work
if (selected$data.selection.mode == "individual" ||
selected$plot.type == "ale") {
selected$lines = 10
# makes lines have full width
return(invisible(NULL))
} else {
sliderInput(
"lines",
"Number of individual observations (lines) to sample from data",
min = 1,
max = nrow(df$values.filtered),
value = selected$lines,
step = 1)
}
})
output$learner_summary = renderPrint({
capture.output(selected$model)
})
output$table = DT::renderDataTable({
shiny::req(!is.null(selected$datatable.select))
DT::datatable(
df$values.adj,
filter = list(position = "top", clear = TRUE, plain = TRUE),
selection = selected$datatable.select
)},
server = TRUE
)
# --------------------------------------------------------------------------
# plot outputs and event reactive functions
output$scatter_unfiltered_basic = renderPlot({
# render ggplot2 object of unfiltered scatter plot
scatterPlotUnfiltered()},
width = 800,
height = 400
)
output$scatter_unfiltered_plotly = renderPlotly({
# render plotly object of unfiltered scatter plot
p = plotly_build(scatterPlotUnfiltered())
p$elementId = NULL
p$x$layout$width = 800
p$x$layout$height = 400
p$width = NULL
p$height = NULL
return(p)
})
# both ggplot2 and plotly versions are ready to be rendered upon
# changing the UI selections; lazy loading keeps one version from not
# being rendered
output$scatter_filtered_basic = renderPlot({
# render ggplot2 object of filtered scatter plot
scatterPlotFiltered()},
width = 800,
height = 400
)
output$scatter_filtered_plotly = renderPlotly({
# render plotly object of filtered scatter plot
p = plotly_build(scatterPlotFiltered())
p$elementId = NULL
p$x$layout$width = 800
p$x$layout$height = 400
p$width = NULL
p$height = NULL
return(p)
})
# both ggplot2 and plotly versions are ready to be rendered upon
# changing the UI selections; lazy loading keeps one version from not
# being rendered
output$scatter_unfiltered = renderUI({
# decide which rendered unfiltered scatter plot to display in UI based on
# selected$gfx.package
if (selected$gfx.package == "plotly") {
plotlyOutput("scatter_unfiltered_plotly")
} else if (selected$gfx.package == "ggplot2") {
plotOutput("scatter_unfiltered_basic")
}
})
output$scatter_filtered = renderUI({
# decide which rendered filtered scatter plot to display in UI based on
# selected$gfx.package
if (selected$gfx.package == "plotly") {
plotlyOutput("scatter_filtered_plotly")
} else if (selected$gfx.package == "ggplot2") {
plotOutput("scatter_filtered_basic")
}
})
scatterPlotUnfiltered = eventReactive({
# plot function for unfiltered scatter plot
selected$table.rows
selected$plot.type
selected$aleplot.mode
selected$gfx.package
selected$var
selected$ale.interaction},
ignoreNULL = FALSE,
{
if (selected$plot.type == "ale" &&
selected$aleplot.mode == "Second Order Effects" &&
selected$gfx.package == "plotly") {
scatterPlot3D(
data = data, target = target,
var = c(selected$var, selected$ale.interaction),
highlighted = selected$table.rows)
} else {
scatterPlot(
data = data, target = target,
var = selected$var,
highlighted = selected$table.rows)
}
}
)
scatterPlotFiltered = eventReactive({
# plot function for filtered scatter plot
df$values.filtered
selected$table.rows
selected$plot.type
selected$aleplot.mode
selected$gfx.package
selected$var
selected$ale.interaction},
ignoreNULL = FALSE,
{
if (selected$plot.type == "ale" &&
selected$aleplot.mode == "Second Order Effects" &&
selected$gfx.package == "plotly") {
scatterPlot3D(
data = df$values.filtered, target = target,
var = c(selected$var, selected$ale.interaction),
highlighted = selected$table.rows)
} else {
scatterPlot(
data = df$values.filtered, target = target,
var = selected$var,
highlighted = selected$table.rows)
}
})
output$iml_plotly_plot = renderPlotly({
# rendering plotly version of imlplot output
p = plotly_build(imlPlot())
p$elementId = NULL
p$x$layout$width = 800
p$x$layout$height = 400
p$width = NULL
p$height = NULL
return(p)
})
output$iml_basic_plot = renderPlot({
# rendering ggplot2 version of imlplot output
imlPlot()},
width = 800,
height = 400
)
output$plot = renderUI({
# decide which plot to display in UI based on selected$gfx.package
if (selected$gfx.package == "plotly") {
plotlyOutput("iml_plotly_plot")
} else if (selected$gfx.package == "ggplot2") {
plotOutput("iml_basic_plot")
}
})
output$zoomed_plotly_plot = renderPlotly({
# render plotly version of zoomed imlplot
p = plotly_build(imlPlot())
p$elementId = NULL
p$x$layout$width = 1200
p$x$layout$height = 600
p$width = NULL
p$height = NULL
return(p)
})
output$zoomed_basic_plot = renderPlot({
# render ggplot2 version of zoomed imlplot
imlPlot()},
width = 1200,
height = 600
)
output$zoomed_plot = renderUI({
# decide which zoomed plot version to display in UI based on
# selected$gfx.package
if (selected$gfx.package == "plotly") {
plotlyOutput("zoomed_plotly_plot")
} else if (selected$gfx.package == "ggplot2") {
plotOutput("zoomed_basic_plot")
}
})
imlPlot = eventReactive({
# plots the predicted values by calling predefined plot functions with
# current reactive values
df$pred
df$values.filtered
selected$table.rows
selected$data.selection.mode
selected$plot.type
selected$iceplot.center.x
selected$gfx.package
selected$var
selected$ale.interaction
selected$knots
selected$lines},
ignoreInit = FALSE,
ignoreNULL = FALSE,
{
shiny::req(!is.null(df$pred))
shiny::req((selected$var %in% names(df$pred)) ||
"error" %in% df$pred)
shiny::req(selected$lines)
if (!"error" %in% df$pred) {
shiny::req(
!(TRUE %in% apply(df$pred, MARGIN = 2,
function(column) {NA %in% column})))
}
withProgress(
message = "Rendering plot..",
detail = "Please wait.",
min = 0, max = 100, value = 100,
{
if (nrow(df$values.filtered) == 0) {
plot = placeholderPlot()
return(plot)
} else if (
((selected$data.selection.mode == "individual") &&
(is.null(selected$table.rows)) &&
(!selected$plot.type == "ale"))) {
plot = placeholderPlot()
return(plot)
} else {
if (task.type == "regr") {
if (selected$plot.type == "ice") {
plot = regrIcePlot(
pred = df$pred,
var = selected$var,
target = target,
knots = selected$knots,
lines = selected$lines,
centered = selected$centered,
center.x = selected$iceplot.center.x)
return(plot)
} else if (selected$plot.type == "pdp") {
plot = regrPartialDependencePlot(
pred = df$pred,
var = selected$var,
target = target,
knots = selected$knots)
return(plot)
} else if (selected$plot.type == "ale") {
plot = regrAlePlot(
data = df$pred,
target = target,
var1 = selected$var,
var2 = selected$ale.interaction,
knots = selected$knots,
gfx.package = selected$gfx.package)
return(plot)
}
} else if (task.type == "classif") {
if (selected$plot.type == "ice") {
plot = classifIcePlot(
pred = df$pred,
var = selected$var,
knots = selected$knots,
lines = selected$lines,
centered = selected$centered,
center.x = selected$iceplot.center.x)
return(plot)
} else if (selected$plot.type == "pdp") {
plot = classifPartialDependencePlot(
pred = df$pred,
var = selected$var,
target = target,
knots = selected$knots)
return(plot)
} else if (selected$plot.type == "ale") {
plot = classifAlePlot(
data = df$pred,
target = target,
target.levels = target.levels,
var1 = selected$var,
var2 = selected$ale.interaction)
return(plot)
}
}
}
}
) # ending: withProgress(...)
})
# --------------------------------------------------------------------------
# Observers
observeEvent({
# observer for calculating predictions
df$values.filtered
selected$table.rows
selected$data.selection.mode
selected$iceplot.center.x
selected$centered
selected$model
selected$plot.type
selected$var
selected$ale.interaction
selected$knots
selected$lines},
{
shiny::req(nrow(df$values.filtered) > 0)
shiny::req(!(TRUE %in% apply(df$values.filtered, MARGIN = 2,
function(column) {NA %in% column})))
if (selected$plot.type == "ale") {
# use ALEPlot::ALEPlot(..) to predict for ale
shiny::withProgress(
message = "Calculating Predictions..",
detail = "Please wait.",
min = 0, max = 100, value = 100,
{
df$pred = makePredictionsAle(
data = df$values.filtered,
target = target,
model = selected$model,
var1 = selected$var,
var2 = selected$ale.interaction,
knots = selected$knots,
task.type = task.type)
}
)
} else {
# (selected$plot.type == "ice" || selected$plot.type == "pdp")
shiny::req(nrow(df$values.filtered) >= selected$knots)
shiny::req(nrow(df$values.filtered) >= selected$lines)
if (selected$data.selection.mode == "individual") {
# mmpf::marginalPrediction(...) marginalizes only over selected
# observations
shiny::req(!is.null(selected$table.rows))
shiny::req(selected$table.rows %in% as.numeric(
row.names(df$values.filtered)))
} else {}
shiny::withProgress(
message = "Calculating predictions..",
detail = "Please wait.",
min = 0, max = 100, value = 100,
{
prediction = makePredictionsIce(
data = df$values.filtered,
var = selected$var,
model = selected$model,
knots = selected$knots,
lines = selected$lines,
task.type = task.type,
selected.rows = selected$table.rows,
data.selection.mode = selected$data.selection.mode)
}
)
if (selected$centered == TRUE) {
# shiny::req(
# selected$iceplot.center.x %in% prediction[, 1, with = FALSE])
shiny::req(!is.null(selected$iceplot.center.x))
shiny::req(!is.na(selected$iceplot.center.x))
shiny::req(selected$var %in% names(prediction))
shiny::withProgress(
message = "Centering predictions..",
detail = "Please wait.",
min = 0, max = 100, value = 100,
{
df$pred = centerPredictions(
predictions = prediction,
center.x = selected$iceplot.center.x,
var = selected$var)
}
)
} else if (selected$centered == FALSE) {
df$pred = prediction
}
}
}
)
observeEvent({
# reload button action on top right corner
input$reload},
{
session$reload()
}
)
#
# observeEvent({
# # line sampling not necessary when individual observations are selected
# # or in ale plot mode
# selected$data.selection.mode
# selected$plot.type},
# {
# req(!is.null(selected$data.selection.mode))
# req(!is.null(selected$plot.type))
# if (selected$data.selection.mode == "individual") {
# shinyjs::disable("lines")
# } else {
# shinyjs::enable("lines")
# }
# }
# )
observeEvent({
input$iceplot_mode
selected$plot.type},
ignoreNULL = FALSE,
{
if (input$iceplot_mode == "Centered" && selected$plot.type == "ice") {
selected$centered = TRUE
} else {
selected$centered = FALSE
}
}
)
observeEvent({
input$gfx.package},
{
if (input$gfx.package == "ggplot2") {
selected$gfx.package = "ggplot2"
} else if (input$gfx.package == "plotly (resource intensive)") {
selected$gfx.package = "plotly"
}
}
)
observeEvent({
selected$plot.type},
{
if (task.type == "classif" && selected$plot.type == "ale") {
updateSelectInput(
session = session,
inputId = "aleplot_mode",
label = "For classification ALE plots only main effects are
supported",
choices = "Main Effects")
}
selected$ale.interaction = NULL
})
observeEvent({
# select model for marginal prediction function based on selected string
# in UI
input$models},
{
for (i in 1:length(learner.models.names)) {
model = learner.models[[i]]
if (input$models == learner.models.names[[i]]) {
selected$model = model
} else {}
}
}
)
observeEvent({
input$ale_interaction_var
selected$plot.type
input$aleplot_mode
selected$gfx.package},
ignoreNULL = FALSE,
{
if (selected$plot.type == "ale" &&
input$aleplot_mode == "Second Order Effects" &&
selected$gfx.package == "ggplot2") {
selected$ale.interaction = input$ale_interaction_var
} else if (selected$plot.type == "ale" &&
input$aleplot_mode == "Second Order Effects" &&
selected$gfx.package == "plotly") {
selected$ale.interaction = input$ale_interaction_var
} else {
selected$ale.interaction = NULL
}
}
)
observeEvent({
# reset the reactive data values when variable of interest changes
input$var},
{
df$values.adj = data
}
)
observeEvent({
# differentiates checked input features into numeric and factor features;
# important, because factor features are adjusted with selectors and need
# to be provided the right values when adjusting
input$checks},
ignoreNULL = FALSE,
{
numerics = c()
factors = c()
if (is.null(input$checks)) {
selected$features.numeric = NULL
selected$features.factor = NULL
} else {
for (elem in input$checks) {
if (elem %in% features.numeric) {
numerics = c(numerics, elem)
} else if (elem %in% features.factor) {
factors = c(factors, elem)
}
}
selected$features.numeric = numerics
selected$features.factor = factors
}
}
)
observeEvent({
# output$knots is a dynamic server side rendered UI and could be caught
# in endless loop when setting init value to old reactive value;
# hence the input is disabled upon init so that UI can finish rendering
input$knots},
{
shinyjs::disable("knots")
selected$knots = input$knots
}
)
observeEvent({
# output$lines is a dynamic server side rendered UI and could be caught
# in endless loop when setting init value to old reactive value;
# hence the input is disabled upon init so that UI can finish rendering
input$lines},
{
shinyjs::disable("lines")
selected$lines = input$lines
}
)
observeEvent({
# set reactive values to UI values
input$var
input$plot_type
input$aleplot_mode
input$data_selection_mode
input$checks
input$iceplot_center_x},
ignoreNULL = FALSE,
{
selected$var = input$var
selected$aleplot.mode = input$aleplot_mode
selected$features = input$checks
selected$iceplot.center.x = input$iceplot_center_x
if (input$data_selection_mode == "Plot all sampled observations") {
selected$data.selection.mode = "sampling"
} else if (input$data_selection_mode ==
paste("Plot individual observations",
"(only influences computations of ICE and PDP curves)")
) {
selected$data.selection.mode = "individual"
}
if (input$plot_type == "Individual Conditional Expectation") {
selected$plot.type = "ice"
} else if (input$plot_type == "Partial Dependence") {
selected$plot.type = "pdp"
} else if (input$plot_type == "Accumulated Local Effects") {
selected$plot.type = "ale"
}
}
)
observeEvent({
selected$data.selection.mode
input$table_rows_all
input$table_rows_selected},
ignoreNULL = FALSE,
{
if (selected$data.selection.mode == "sampling") {
selected$datatable.select = list(mode = "none")
df$table.rows.filtered = input$table_rows_all
# use all filtered observations from data table
selected$table.rows = NULL
} else if (selected$data.selection.mode == "individual") {
# only the selected observations in data are marginalized over,
# so sampling observations/lines is not necessary
selected$datatable.select = list(mode = "multiple")
df$table.rows.filtered = input$table_rows_all
# use all filtered observations from data table
selected$table.rows = input$table_rows_selected
# row indicies of selected observations in data table
}
}
)
observeEvent({
# available features for adjusting the data cannot contain variable
# of interest and (if ale selected) ale interaction variable
selected$var
selected$ale.interaction},
ignoreNULL = FALSE,
{
df$features.unused = names(data)[!names(data) %in% c(
target, selected$var,
selected$ale.interaction)]
}
)
proxy = DT::dataTableProxy("table")
observeEvent({
df$table.rows.filtered},
{
DT::selectRows(proxy, list())
}
)
observeEvent({
df$table.rows.filtered
df$values.adj},
ignoreNULL = FALSE,
ignoreInit = FALSE,
{
df$values.filtered = df$values.adj[df$table.rows.filtered, ]
}
)
observeEvent({
# capture slider and selector values
# set reactive values to NULL if input contains NA or NULL values
# this sometimes happens when the input value is evaluated before the UI
# has finished rendering
featureSliders()
featureSelectors()},
{
if ((TRUE %in% lapply(featureSliders(), is.null)) ||
(TRUE %in% lapply(featureSliders(), is.na))) {
selected$values.numeric = NULL
} else {
selected$values.numeric = unlist(
featureSliders(),
function(x) return(as.numeric(x)))
}
# now do the same with selectors
if ((TRUE %in% lapply(featureSelectors(), is.null)) ||
(TRUE %in% lapply(featureSelectors(), is.na))) {
selected$values.factor = NULL
} else {
selected$values.factor = unlist(
featureSelectors(),
function(x) return(as.numeric(x)))
}
}
)
observeEvent({
# update numeric feature data when user adjusts sliders
selected$values.numeric
selected$features.numeric},
ignoreNULL = FALSE,
{
if (!(is.null(selected$values.numeric))) {
for (i in 1:length(selected$features.numeric)) {
numeric.feature = selected$features.numeric[i]
numeric.value = selected$values.numeric[i]
df$values.adj[numeric.feature] = numeric.value
}
}
unselected.features = df$features.unused[
!df$features.unused %in% c(selected$features)]
df$values.adj[unselected.features] = data[unselected.features]
}
)
observeEvent({
# update factor feature data when user adjusts selectors
selected$values.factor
selected$features.factor},
ignoreNULL = FALSE,
{
if (!(is.null(selected$features.factor))) {
for (j in 1:length(selected$features.factor)) {
factor.feature = selected$features.factor[j]
factor.value = selected$values.factor[j]
factor.levels = levels(data[[factor.feature]])
df$values.adj[factor.feature] = factor(
x = factor.value,
levels = factor.levels)
}
}
unselected.features =
df$features.unused[!df$features.unused %in% c(selected$features)]
df$values.adj[unselected.features] = data[unselected.features]
}
)
} # end server
shinyApp(ui = app.ui, server = app.server)
}
|
d8143725270eadc29514464bd6190d686e429bdf | c76d2e7847ce3b2a875d962678de6538ffffecf9 | /man/multiplot.Rd | a8804b76fecfb744a8421775b4743745bb288b83 | [] | no_license | sbujarski/SpPack | 67b54e11a20f7bd33813f2cd1b963c08f277e6f2 | c4365a8835783ce1b7efca421dc21fcf039972d4 | refs/heads/master | 2022-12-15T13:37:13.253135 | 2020-09-20T01:32:54 | 2020-09-20T01:32:54 | 112,982,817 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 632 | rd | multiplot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multiplot.R
\name{multiplot}
\alias{multiplot}
\title{Printing Multiple Plots in Grid}
\arguments{
\item{...}{Series of plots}
\item{cols}{number of columns (defaults to 1)}
\item{layout}{specifiable layout}
}
\description{
Computes the datapoints in order to plot uncertainty elipses \cr
}
\examples{
Data <- data.frame(x1=seq(1:10), y1=runif(10), x2=rnorm(n=10), y2=runif(10))
p1 <- ggplot(Data, aes(x=x1, y=y1)) + geom_point() + SpTheme()
p2 <- ggplot(Data, aes(x=x2, y=y2)) + geom_point() + SpTheme()
multiplot(p1, p2, cols=2)
}
\keyword{plotting}
|
95e7391c1562e5d907a6009d6ff7e68cd6afcc9d | 70a2ee912d76d6686aa461f1ae1059abfa589599 | /man/run_simulation.Rd | 0c03165a725197018ab20588d7d7018612a280fe | [] | no_license | jiyingz/clinicalMPI | fa497f468880ef792c8c28e642e6a98411072990 | 0f92d15a9ae9a15cccd50f84a05ac998052ea2da | refs/heads/master | 2023-08-03T08:36:45.745238 | 2021-06-02T22:12:08 | 2021-06-02T22:12:08 | 263,835,191 | 0 | 1 | null | 2023-07-22T18:50:43 | 2020-05-14T06:35:24 | R | UTF-8 | R | false | true | 4,118 | rd | run_simulation.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_simulation.R
\name{run_simulation}
\alias{run_simulation}
\title{Run Simulation}
\usage{
library(stats)
true_probs = matrix(data = c(0.95, 0.0, 0.05,
0.5, 0.4, 0.1,
0.2, 0.65, 0.15,
0.1, 0.4, 0.5,
0.05, 0.0, 0.95), nrow = 5, ncol = 3, byrow = TRUE)
dose_levels = c(60,80,100,120,140)
starting_dose = 100
PF = 0.2
PF_tolerance = 0.05
eta = 0.8
PT = 0.2
PT_tolerance = 0.05
zeta = 0.8
#cohort size is 3, max sample size is 24, using defaults
results = run_simulation(true_probs, dose_levels, starting_dose, PF, PF_tolerance, eta, PT, PT_tolerance, zeta)
}
\arguments{
\item{true_probs}{A Dx3 matrix containing the true chances of treatment futility, efficacy, and toxicity, where D = # of dose levels}
\item{dose_levels}{A length D vector containing dose levels to be included in the trial. Can be in index or true value form (e.g. could be either c(1,2,3,4,5) or c(40,60,80,100,120)).}
\item{starting_dose}{Which dose to start at, must be a value within \code{dose_levels}}
\item{PF}{Target tolerable futility probability}
\item{PF_tolerance}{Half-length of futility equivalence interval (epsilon_F)}
\item{eta}{Upper futility threshold beyond which invokes the futility rule}
\item{PT}{Target tolerable toxicity probability}
\item{PT_tolerance}{Half-length of toxicity equivalence interval (epsilon_T)}
\item{zeta}{Upper toxicity threshold beyond which invokes the safety rule}
\item{cohort_size}{How many new patients to test in each round of the trial}
\item{max_samp_size}{The maximum number of subjects for the entire trial (default = 24)}
\item{num_sims}{How many trials to perform in simulation (default = 1000)}
\item{filter}{Posterior sum threshold to determine dose availability for final selection (pf+pt <= filter) (default = 0.5)}
\item{seed}{A random seed (default = 111)}
}
\value{
A list containing:
\itemize{
\item prop_time_selected -- Percent of trials where each dose was selected as the final dose recommendation
\item times_early_stopping -- How many trials stopped before reaching maximum sample size
\item mean_patients_perdose -- Mean number of patients allocated to each dose level
\item min_patients_perdose -- Minimum number of patients allocated to each dose level
\item max_patients_perdose -- Maximum number of patients allocated to each dose level
}
}
\description{
Run simulations to get performance characteristics with given parameters. Performance outcomes are averaged over 1000 simulation trials by default.
}
\details{
In order to run simulations, decision tables must be pre-specified (e.g. by using the \code{make_decision_table()} function) and loaded into the working environment.
All decision tables must be named in the format "decision_table#", where # = dose sample size.
For simulations the cohort size must be a fixed value, although this may not necessarily be true in real-life trials.
Simulations are meant to give an idea of how variable trial results may be given the chosen parameters, and is not to be taken as a statistical guarantee of any sort.
Results depend on the assumptions that
\enumerate{
\item the "true" probabilities of futility and toxicity are valid, and
\item no unexpected cohort additions or dropouts occur throughout the trial.
}
These assumptions are difficult to adhere to in real-life scenarios, and so the actual performance characteristics may differ from those outputted by the simulation.
In other words, take the simulation performance results with a grain of salt. However, the closer the real-life trial adheres to the ideal simulation conditions,
and the more sure we are about the "true" chances of toxicity and futility, the closer the actual outcome will adhere to what is expected from simulation results!
For example, if simulation results show that the correct dose is chosen in 73\% of trials, then in one real-life trial there is a 73\% chance that the trial outcomes will point to a truly acceptable dose.
}
|
804e644655b3552679c0b629e063081b9574ddf3 | 5cb2f48f447529cb8beea212395dd661182258c8 | /01-r-basics/demo-speed-vector-loop.R | 95ceba67479131ce23dd964a1c2b8db98f36af19 | [
"CC0-1.0"
] | permissive | etnetera-activate/r-minicourse | 44013056cbd8573e38bc27d8bf4724e3bf33f1b0 | 91998bc638a0ba902a8c6188fc03e43bba4f97d8 | refs/heads/master | 2021-06-27T01:52:30.832342 | 2020-10-30T12:30:03 | 2020-10-30T12:30:03 | 42,094,789 | 6 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,169 | r | demo-speed-vector-loop.R |
#srovnani vykonu vektorove operace a operace provadene cyklem
#pripravime si funkci pro mereni doby behu funkce (neni nutne - takova existuje, ale je to demo)
zmer <- function(x,fun) {
t1 <- as.numeric(Sys.time())
fun(x)
t2 <- as.numeric(Sys.time())
return(t2 - t1)
}
#funkce vektorove
vektorove <- function(x) {
y <- sqrt(x)
}
#funkce cyklem
cyklem <- function(x) {
for (i in 1:length(x)) {
sqrt(x[i])
}
}
hromadne <- function(x) {
vapply(x,FUN = sqrt,FUN.VALUE = 1)
}
#empty dataframe
data <- data.frame()
#a jedem
for (mocnina in 1:7) {
message(sprintf("Calculating for 10^%d",mocnina))
max = 10 ^ mocnina;
a = 1:max
tv <- zmer(a,vektorove)
tf <- zmer(a,cyklem)
th <- zmer(a,hromadne)
data <-
rbind(data,data.frame(list(
N = mocnina,time = tv,method = "vector"
)))
data <-
rbind(data,data.frame(list(
N = mocnina,time = tf,method = "cyclus"
)))
data <-
rbind(data,data.frame(list(
N = mocnina,time = th,method = "sapply"
)))
}
data
library(ggplot2)
ggplot(data,aes(x = N,y = time,col = method)) + geom_line()
|
452c89e90d34621ab39c62a350bede7fc93dc923 | a329d887c850b4dabe78ea54b8bf3587136f7571 | /tests/testthat/test_data.R | fb33326c361316f9a16ffdf6d1e5566200fcf71f | [
"MIT"
] | permissive | mllg/compboost | 2838ab72b1505305136fa8fbf752a62f886303ec | f493bb92050e27256f7937c82af6fa65e71abe67 | refs/heads/master | 2020-04-06T09:45:45.958662 | 2018-11-13T09:53:05 | 2018-11-13T09:53:05 | 157,355,755 | 0 | 0 | NOASSERTION | 2018-11-13T09:39:14 | 2018-11-13T09:39:13 | null | UTF-8 | R | false | false | 637 | r | test_data.R | context("Data works")
test_that("data objects works correctly", {
X = as.matrix(1:10)
expect_silent({ data.source = InMemoryData$new(X, "x") })
expect_silent({ data.target = InMemoryData$new() })
expect_equal(data.source$getData(), X)
expect_equal(data.source$getIdentifier(), "x")
expect_equal(data.target$getData(), as.matrix(0))
expect_equal(data.target$getIdentifier(), "")
expect_silent({ lin.factory = BaselearnerPolynomial$new(data.source, data.target,
list(degree = 3, intercept = FALSE)) })
expect_equal(data.target$getData(), X^3)
expect_equal(data.target$getIdentifier(), "x")
}) |
e4f9d2cb85fef2ab0b256675a67938e1027337b4 | 69c09257bdf31c54ec9897a15b28d931212591cf | /R/SamplePopulation.R | 1a4a6c99dcfc8433eb2b693e60ab9b3b140aeb0b | [] | no_license | ohdsi-studies/SmallSampleEstimationEvaluation | 9a503cf67bd5c502e27435074f0d78c41dab70ba | 881d9d81b456ed20dbe2e28a98361ca715e305d9 | refs/heads/master | 2023-07-23T11:35:47.458471 | 2023-07-14T13:12:00 | 2023-07-14T13:12:00 | 318,160,286 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,985 | r | SamplePopulation.R | # Copyright 2023 Observational Health Data Sciences and Informatics
#
# This file is part of SmallSampleEstimationEvaluation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' @export
samplePopulation <- function(sourceCmFolder,
sampleFolder,
numberOfSamples = NULL,
sampleSize = NULL,
seed = 123) {
if (dir.exists(sampleFolder)) {
return()
}
set.seed(seed)
dir.create(sampleFolder)
subsetCmData <- function(cmData, sampleRowIds, outputFileName) {
sampleCmData <- Andromeda::andromeda()
sampleCmData$cohorts <- cmData$cohorts %>%
filter(.data$rowId %in% sampleRowIds)
sampleCmData$covariates <- cmData$covariates %>%
filter(.data$rowId %in% sampleRowIds)
sampleCmData$outcomes <- cmData$outcomes %>%
filter(.data$rowId %in% sampleRowIds)
sampleCmData$covariateRef <- cmData$covariateRef
sampleCmData$analysisRef <- cmData$analysisRef
metaData <- attr(cmData, "metaData")
metaData$populationSize <- length(sampleRowIds)
targetSize <- sum(pull(sampleCmData$cohorts, .data$treatment))
metaData$attrition <- rbind(
metaData$attrition,
tibble(
description = "Random sample",
targetPersons = targetSize,
comparatorPersons = length(sampleRowIds) - targetSize,
targetExposures = targetSize,
comparatorExposures = length(sampleRowIds) - targetSize
)
)
attr(sampleCmData, "metaData") <- metaData
class(sampleCmData) <- class(cmData)
CohortMethod::saveCohortMethodData(sampleCmData, outputFileName)
}
if (is.null(numberOfSamples)) {
cmDataFiles <- list.files(sourceCmFolder, "CmData")
for (cmDataFile in cmDataFiles) {
cmData <- CohortMethod::loadCohortMethodData(file.path(sourceCmFolder, cmDataFile))
rowIds <- cmData$cohorts %>%
pull(.data$rowId)
# TODO: need to handle case when original is smaller than requested sample size
sampleRowIds <- sample(rowIds, sampleSize, replace = FALSE)
subsetCmData(
cmData = cmData,
sampleRowIds = sampleRowIds,
outputFileName = file.path(sampleFolder, cmDataFile)
)
}
} else {
cmDataFiles <- list.files(sourceCmFolder, "CmData")
for (cmDataFile in cmDataFiles) {
cmData <- CohortMethod::loadCohortMethodData(file.path(sourceCmFolder, cmDataFile))
rowIds <- cmData$cohorts %>%
pull(.data$rowId)
# TODO: need to handle case when original is smaller than requested sample size
# Create equally-sized non-overlapping random samples without replacement:
rnd <- runif(length(rowIds))
breaks <- quantile(rnd, (1:(numberOfSamples - 1)) / numberOfSamples)
breaks <- unique(c(0, breaks, 1))
sampleId <- as.integer(as.character(cut(rnd,
breaks = breaks,
labels = 1:(length(breaks) - 1)
)))
pb <- txtProgressBar(style = 3)
for (i in 1:numberOfSamples) {
sampleRowIds <- rowIds[sampleId == i]
sampleSubFolder <- file.path(sampleFolder, sprintf("Sample_%d", i))
if (!file.exists(sampleSubFolder)) {
dir.create(sampleSubFolder)
}
subsetCmData(
cmData = cmData,
sampleRowIds = sampleRowIds,
outputFileName = file.path(sampleSubFolder, cmDataFile)
)
setTxtProgressBar(pb, i / numberOfSamples)
}
close(pb)
}
}
}
|
0d16cf056c42571643e197844088df0d19775407 | 39f4df1f5c2faadbdf366d65ede30aa5edba3497 | /man/writeCSV.Rd | 325acee47d909a369a6d657b3381e5c269cf9e68 | [] | no_license | cran/kutils | a19c69b6730548aa849ca841291d95de92dd3863 | 53ada7e4308f456a0a109955ecfd9122f6263aba | refs/heads/master | 2023-07-10T17:11:29.010416 | 2023-06-26T21:40:02 | 2023-06-26T21:40:02 | 77,183,151 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,307 | rd | writeCSV.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{writeCSV}
\alias{writeCSV}
\title{Write CSV files with quotes same as MS Excel 2013 or newer}
\usage{
writeCSV(x, file, row.names = FALSE)
}
\arguments{
\item{x}{a data frame}
\item{file}{character string for file name}
\item{row.names}{Default FALSE for row.names}
}
\value{
the return from write.table, using revised quotes
}
\description{
R's write.csv inserts quotes around all elements in a character
vector (if quote = TRUE). In contrast, MS Excel CSV export no
longer inserts quotation marks on all elements in character
variables, except when the cells include commas or quotation
marks. This function generates CSV files that are, so far as we
know, exactly the same "quoted style" as MS Excel CSV export
files.
}
\details{
This works by manually inserting quotation marks where necessary and
turning FALSE R's own method to insert quotation marks.
}
\examples{
set.seed(234)
x1 <- data.frame(x1 = c("a", "b,c", "b", "The \"Washington, DC\""),
x2 = rnorm(4), stringsAsFactors = FALSE)
x1
fn <- tempfile(pattern = "testcsv", fileext = ".csv")
writeCSV(x1, file = fn)
readLines(fn)
x2 <- read.table(fn, sep = ",", header = TRUE, stringsAsFactors = FALSE)
all.equal(x1,x2)
}
\author{
Paul Johnson
}
|
dee391f156a7689e31c93d6da5726adc941f153e | 7a0cc1a29da34a761327a45f506c4b097cd33bd8 | /man/MassPool.Rd | 682bba039f274d0008a7747ae732c2270eb63e29 | [
"CC0-1.0"
] | permissive | USFWS/AKaerial | 3c4d2ce91e5fac465a38077406716dd94c587fc8 | 407ccc5bf415d8d5ed0d533a5148693926306d27 | refs/heads/master | 2023-07-19T22:04:12.470935 | 2023-07-14T19:50:31 | 2023-07-14T19:50:31 | 254,190,750 | 0 | 1 | CC0-1.0 | 2023-07-14T19:50:33 | 2020-04-08T20:14:41 | R | UTF-8 | R | false | true | 1,110 | rd | MassPool.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MassReport.R
\name{MassPool}
\alias{MassPool}
\title{Pool data from multiple observers across all MBM surveys where appropriate}
\usage{
MassPool()
}
\description{
MassPool will selectively apply the \code{\link{PoolData}} function to all known survey, year, and observer
combinations that need pooling.
}
\details{
Pooling data files becomes necessary when observers and pilots switch seats or
are replaced during a survey. The resulting files are generally pooled by seat side (left or right).
MassPool currently applies the \code{\link{PoolData}} function to the following data files:
\itemize{
\item 1986 YKG files from observers C. Lensink and K. Bollinger are pooled into a right front seat file.
Lensink observed from 6-2-1986 through 6-22-1986 with the exception of 6-13-1986. Bollinger observed on 7 design transects on 6-13-1986.
W. Butler was the only pilot for the 1986 YKG survey.
\item 1988 CRD files
}
}
\references{
\url{https://github.com/USFWS/AKaerial}
}
\author{
Charles Frost, \email{charles_frost@fws.gov}
}
|
b27537301ebf37e7252b16ced9b2a1b47f41c31f | 46067bee5b325b74c70d7e4c946ddd90e32844f8 | /Africa_Soil/knn.R | 1967555f1de0e9fa76dc1a11126a3ee1f1b6191f | [] | no_license | mpricope/kaggle | 7a11120155865716c8f230e53a1fc6fd27fceecb | 269eebbe8d2a34caf7a0bd444321863655094a1e | refs/heads/master | 2016-09-05T23:38:14.649838 | 2015-10-31T09:41:14 | 2015-10-31T09:41:14 | 24,673,746 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,516 | r | knn.R | library(ridge)
library(RSofia)
#load(paste('data/input',p,'RData',sep='.'))
load('data/f.RData')
#usedCols <- c(myData$mNames)
usedCols <- c(myData$otherVars,myData$mNames)
#usedCols <- myData$otherVars
#thold <- list(SOC=0,pH=0,Ca=0,P=0,Sand=0)
#thold <- list(SOC=0.3,pH=0.1,Ca=0.15,P=0.05,Sand=0.3)
thold <- list(SOC=0.05,pH=0.05,Ca=0.05,P=0.05,Sand=0.05)
result <- data.frame(PIDN = test$PIDN)
for (i in myData$vars) {
tmf <- c()
for (j in usedCols) {
if (myData$CM[[i,j]] > thold[[i]]) {
tmf <- c(tmf,j)
}
}
print(paste(i,length(tmf),sep=":"))
tmE <- paste(i,paste(tmf,collapse=' + '),sep = ' ~ ')
xtrain <- as.matrix(train[,tmf])
ytrain <- as.vector(train[,i])
#
print ('Fitting Model')
#x <- parse_formula(formula(tmE),train)
tmp <- tempfile()
write.svmlight(ytrain, xtrain, tmp)
#write.svmlight(x$labels, x$data, tmp)
fit <- sofia(tmp,iterations = 1e+05, learner_type="romma")
unlink(tmp)
#fit <- sofia(formula(tmE),train,scaling="none",iterations=10, learner_type="sgd-svm")
# fit <- svm(xtrain,ytrain,type="eps-regression",
# kernel="radial basis",cost=10000)
print("Prediction")
# xtest <- test[i,]
ypred = predict(fit,newdata=as.matrix(test[,tmf]), prediction_type = "linear")
result[,i] <- ypred
}
colnames(result) <- c('PIDN',myData$vars)
save(result,file='data/result.rdg.CData')
#[1] 0.07624523 0.11366035 0.11834555 0.81061377 0.12388556
#[1] 0.08729766 0.14411501 0.11366048 0.81696271 0.10589954 |
d55da65a086b63ea2a510e68284c21599c54009a | a8959c9bf467ae24e62eadfca114d79b9f2b6239 | /memo.R | 3aecf6d4ade774b64695f201fd149f70377291d2 | [] | no_license | ryushy/R-Training | 44f080832761f4c0ea267e0aeb66ddd4cce5d817 | 632dd9ad866c89f4ab3e10aa5028e01fdf3eccb2 | refs/heads/master | 2022-04-14T11:54:40.965821 | 2020-04-12T14:27:23 | 2020-04-12T14:27:23 | 227,976,555 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 154 | r | memo.R | items = c("stringr","lubridate","jsonlite","readxl","tidyverse")
for (item in items){
print(item)
}
for (item in items){
install.packages(item)
}
|
12caa06ba5828973b3428e25e145fccaa2cda53f | b091f838853235ec8871301346af84de77bf2067 | /R/RcppExports.R | 07516e6ca16c389f9e6f91eff9d6d652c619bc4c | [] | no_license | ypan1988/lapacker | e3cd322ab8976bdd65d8d0c17d6846f58e15b709 | 62e5c30da4a7f369ce16efc0454230fd81147c62 | refs/heads/master | 2020-04-29T02:49:54.446526 | 2019-11-21T22:52:40 | 2019-11-21T22:52:40 | 175,785,075 | 0 | 0 | null | 2019-03-15T09:03:30 | 2019-03-15T09:03:30 | null | UTF-8 | R | false | false | 410 | r | RcppExports.R | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#'@title Example: using lapacke_dgels
#'@description Solve linear systems using lapacke_dgels
#'@examples
#'## Expected output:
#'example_lapacke_dgels()
#'@export
example_lapacke_dgels <- function() {
invisible(.Call('_lapacker_example_lapacke_dgels', PACKAGE = 'lapacker'))
}
|
89d3f8475991c8edfcb2eabcd4ab8e38c055c8fb | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.business.applications/man/chime_delete_attendee.Rd | f9c9d5565697e6dab0e4898193bfd0157d86daa5 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 1,002 | rd | chime_delete_attendee.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chime_operations.R
\name{chime_delete_attendee}
\alias{chime_delete_attendee}
\title{Deletes an attendee from the specified Amazon Chime SDK meeting and
deletes their JoinToken}
\usage{
chime_delete_attendee(MeetingId, AttendeeId)
}
\arguments{
\item{MeetingId}{[required] The Amazon Chime SDK meeting ID.}
\item{AttendeeId}{[required] The Amazon Chime SDK attendee ID.}
}
\value{
An empty list.
}
\description{
Deletes an attendee from the specified Amazon Chime SDK meeting and
deletes their \code{JoinToken}. Attendees are automatically deleted when a
Amazon Chime SDK meeting is deleted. For more information about the
Amazon Chime SDK, see \href{https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html}{Using the Amazon Chime SDK} in
the \emph{Amazon Chime Developer Guide}.
}
\section{Request syntax}{
\preformatted{svc$delete_attendee(
MeetingId = "string",
AttendeeId = "string"
)
}
}
\keyword{internal}
|
dec85196b5f408662805aa56f66eb9d7fb3f3950 | 55491b74836b08710bb23a4ce3a2c13be3d96c9a | /wordsintransition/R/imitation_matches.R | c9adc04d84e2abdb3688a491b9d170e5d699e127 | [] | no_license | lupyanlab/words-in-transition | 2fed5a549694f99921d8ffb52f94802e906e8017 | 7086b9fa57e3739420ef866d3ff23d162ee5ab49 | refs/heads/master | 2021-04-09T17:26:18.262940 | 2017-05-25T12:35:32 | 2017-05-25T12:35:32 | 52,300,498 | 0 | 0 | null | 2016-04-12T05:05:16 | 2016-02-22T19:44:38 | Python | UTF-8 | R | false | false | 698 | r | imitation_matches.R | #' Recode survey type for modeling.
#' @import dplyr
#' @export
recode_survey_type <- function(frame) {
survey_map <- data_frame(
survey_type = c("between", "same", "within"),
# Treatment contrasts
same_v_between = c(1, 0, 0),
same_v_within = c(0, 0, 1)
)
if (missing(frame)) return(survey_map)
left_join(frame, survey_map)
}
#' Recode generation to have a meaningful 0 value.
#'
#' Where generation_1 == 0, generation == 1.
#'
#' @import dplyr
#' @export
recode_generation <- function(frame) {
generation_map <- data_frame(
generation = 1:8,
generation_1 = generation - 1
)
if (missing(frame)) return(generation_map)
frame %>% left_join(generation_map)
}
|
7b9eb8a601528c6aa0b1360a6203e7f155b6c131 | e2960a88261010e9ef8aabdb04f4e2efb9a489d4 | /man/result_names.Rd | b3b09f5f9fe85a468db222a74f41b26252905116 | [] | no_license | const-ae/proDA | 0625f4a697afa2e4dc5ef465523318a66503f9ec | 56615c9c7366e382507a62427d0d80c1d9108a98 | refs/heads/master | 2023-04-15T01:23:21.907625 | 2022-11-01T15:20:16 | 2022-11-01T15:20:16 | 184,236,748 | 11 | 8 | null | 2022-10-02T10:48:50 | 2019-04-30T09:51:48 | R | UTF-8 | R | false | true | 536 | rd | result_names.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R
\name{result_names}
\alias{result_names}
\title{Get the result_names}
\usage{
result_names(fit, ...)
}
\arguments{
\item{fit}{the fit to get the result_names from}
\item{...}{additional arguments used by the concrete implementation}
}
\value{
a character vector
}
\description{
Get the result_names
}
\examples{
syn_data <- generate_synthetic_data(n_proteins = 10)
fit <- proDA(syn_data$Y, design = syn_data$groups)
result_names(fit)
}
|
980723f49da4f7355d840ef4afbf959eb0cd4ede | 6f07dccb7e29b191dde05af2c0d7eca21a521c60 | /labwork/dilution_samples.R | 7b69600d8bcf0c379f58eb022c6b1fb209cb983e | [] | no_license | squisquater/bees | 8f0619235cc0c9b126e97d71fa88675ae6384ddb | 0a93371f52528e2f2c2a4c572a3e9e5a5262ee6e | refs/heads/master | 2022-11-29T07:02:59.160548 | 2020-08-11T05:48:35 | 2020-08-11T05:48:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,523 | r | dilution_samples.R | library(dplyr)
# dilution concentrations
# goal = concentrations of 0.25 to 0.5 ng/uL
# original qubit concentration reads:
x1 <- c(2.94, 4.84, 2.90, 4.25)
x2 <- c(3.01, 1.19, 1.98, 3.03, 1.94, 6.10, 3.48, 4.84, 8.25, 4.69, 5.65, 3.46)
x3 <- c(5.50, 7.20, 5.00, 15.20, 4.93, 7.90, 4.42, 3.84, 11.20, 5.00, 5.95, 6.25,
10.10, 4.95, 6.75, 4.35, 4.57, 4.52, 2.90, 5.70, 5.25, 7.80, 12.10, 2.61)
x4 <- c(3.36, 2.73, 5.15, 14.10, 3.14, 5.35, 5.75, 4.15, 2.40, 3.13, 2.82, 5.50,
3.69, 2.59, 2.81, 3.95, 5.85, 3.67, 4.29, 4.32, 4.78, 3.61, 1.64)
x_all <- list(x1=x1, x2=x2, x3=x3, x4=x4)
# new concentration = (x * volume_x) / (volume_x + volume_buffer)
buffer2add = function(x, vol_x, target) {
vol_b = vol_x * (x/target - 1)
return(vol_b)
}
bufferAndDNA2add = function(x, vol_tot, target){
# at least 3 microliters, otherwise round value to end up with around vol_tot total
vol_x = ceiling(vol_tot * target / x)
vol_x = ifelse(vol_x<3, 3, vol_x)
vol_b = buffer2add(x = x, vol_x = vol_x, target = target) # find volume of buffer to add
return(c(DNA = vol_x, buffer=round(vol_b, digits = 3))) # round to 3 digits
}
DNA2add = function(x, vol_b, target) {
vol_x = (target * vol_b)/(x - target)
return(round(vol_x,2))
}
DNA2add(x = 1.18, vol_b = 63.267, target = 0.45)
# how much buffer should I add to 5 nanoliters of each library to get 0.35 ng/uL concentration?
#v5 = lapply(x_all, function(x) buffer2add(x = x, vol_x = 5, target = 0.35))
#v3 = lapply(x_all, function(x) buffer2add(x = x, vol_x = 3, target = 0.35))
#v10 = lapply(x_all, function(x) buffer2add(x = x, vol_x = 10, target = 0.35))
# how much buffer and DNA should I add to get ~40 uL at 0.5 ng/uL concentration?
# without having to pippet less than 3uL at any point
v = lapply(x_all, function(x) t(sapply(x, function(i)
bufferAndDNA2add(x = i, vol_tot = 40, target = 0.5))))
ids = lapply(1:4, function(i) read.table(paste0("../labwork/ids2extract_", i)))
extract = lapply(1:4, function(i) rep(i, nrow(ids[[i]])))
extractN = lapply(1:4, function(i) 1:nrow(ids[[i]]))
d = data.frame(extract = unlist(extract), N = unlist(extractN),
ids = do.call(rbind, ids)$x, ng_uL = unlist(x_all),
do.call(rbind, v))
rownames(d) <- NULL
d$tot = round(d$DNA + d$buffer)
write.table(d, "../labwork/dilution0.5_numbers_extract1-4.txt",
row.names = F, col.names = T, sep = "\t", quote = F)
# first measurements were off, I did an initial dilution to get samples in the ~ 5-10ng/uL range
# and then re-measured DNA concentrations to do a final dilution down to 0.5 concentration
new <- read.table("../labwork/dilution_new_QUBIT_first_dilution_extract_1-4.txt",
header = T, stringsAsFactors = F)
new$DNA_2_add = 5
new$buffer_2_add = buffer2add(x = new$Qubit_HS_ng_uL, vol_x = new$DNA_2_add, target = 0.5)
new$"DNA:buffer_D1" = paste(new$DNA_added, new$buffer_added, sep = ":")
#new$"DNA:buffer_D0.5" = paste(new$DNA_2_add, new$buffer_2_add, sep = ":")
write.table(new[, c("extract", "N", "ids", "DNA:buffer_D1", "Qubit_HS_ng_uL", "DNA_2_add", "buffer_2_add")], "../labwork/dilution0.5_numbers_extract1-4_NEW.txt",
row.names = F, col.names = T, sep = "\t", quote = F)
# one at a time
lane5 <- c(5.76, 8.86, 5.68, 4.8, 4.52, 5.16, 5.8, 6.56, 4.48, 5.32, 7.32, 7.84, 5.84, 7.88, 5.80, 16.6, 6.48, 8.2, 6.72, 7.88, 6.4, 5.84, 7.76, 7.48, 5.3, 7.2, 5.72, 8.04, 9.28, 5.68, 5.48, 9.36, 5.52, 4.96, 4.48, 6.16, 6.0, 6.44, 7.08, 7.84, 6.28, 7.28, 6.12, 5.64, 6.8, 7.48, 2.68, 7.48, 6.64, 6.52, 6.36, 7.08, 6.16, 2.78, 3.75, 2.63)
data.frame(undil_conc = lane5, t(sapply(lane5, function(x) bufferAndDNA2add(x = x, vol_tot = 100, target = 0.375))))
redo5 <- data.frame(ID = c("12-9", "12-10", "12-11", "13-7", "13-10", "13-15", "13-17", "13-24"),
dilA = c(0.584, 9.76, 0.696, 0.576, 0.592, 0.572, 0.724, 0.588))
data.frame(redo5, t(sapply(redo5$dilA, function(x)
bufferAndDNA2add(x = x, vol_tot = 100, target = 0.5)))) %>%
dplyr::arrange(desc(dilA))
lane2 <- c(3.20, 3.31, 2.72, 2.89, 3.4, 2.68, 2.87, 3.06, 2.11, 2.72, 2.11, 3.51, 3.64, 2.18, 2.97, 2.14, 3.58, 2.69, 3.22, 4.40, 3.30, 2.63, 2.31, 2.94, 2.7, 2.34, 2.15, 3.12, 3.58, 2.67, 1.99, 2.82, 3.18, 2.44, 1.97, 3.11, 2.13, 2.18, 2.46, 1.71, 2.6, 2.27, 2.09, 4.08, 2.72, 2.51, 4.52, 3.39, 2.72, 2.1, 2.44, 3.19, 1.8, 2.0, 1.39, 1.59)
data.frame(undil_conc = lane2, t(sapply(lane2, function(x) bufferAndDNA2add(x = x, vol_tot = 100, target = 0.375))))
lane3 <- c(3.65, 1.64, 4.46, 3.9, 2.63, 3.98, 3.1, 3.22, 2.66, 5.36, 3.67, 3.7, 3.48, 3.52, 3.81, 1.54, 2.63, 3.95, 3.36, 4.28, 3.03, 2.24, 4.04, 4.2, 3.2, 2.68, 3.21, 2.81, 3.16, 4.72, 3.12, 3.38, 2.93, 3.03, 3.91, 3.46, 2.55, 5.48, 3.68, 2.98, 3.82, 3.77, 1.42, 3.46, 4.28, 3.18, 2.93, 3.17, 2.68, 3.68, 3.17, 2.59, 4.64, 4.32, 1.83, 2.51)
data.frame(undil_conc = lane3, t(sapply(lane3, function(x) bufferAndDNA2add(x = x, vol_tot = 100, target = 0.375))))
lane4 <- c(3.75, 2.16, 3.75, 3.5, 2.86, 2.46, 2.57, 3.4, 2.97, 3.01, 2.64, 1.96, 2.47, 3.69, 2.94, 5.04, 3.34, 2.96, 2.79, 2.82, 3.0, 2.96, 3.35, 3.06, 2.96, 3.16, 4.04, 3.06, 3.26, 3.47, 4.44, 2.7, 3.57, 2.54, 2.06, 3.03, 2.93, 2.83, 2.2, 1.32, 2.02, 2.58, 2.25, 2.04, 3.25, 3.22, 2.78, 2.98, 2.53, 2.45, 1.94, 2.05, 2.63, 3.58, 2.33, 1.55)
data.frame(undil_conc = lane4, t(sapply(lane4, function(x) bufferAndDNA2add(x = x, vol_tot = 100, target = 0.375))))
# lane 3 was too high at dilution A, consistently about double what I wanted, so I'm making a dilution B:
lane3_A2B <- c(0.808, 0.808, 0.872, 1.34, 0.868, 0.796, 0.732, 0.780, 1.06, 0.696, 0.808,
0.748, 0.668, 0.868, 0.764, 0.984, 0.864, 0.856, 0.792, 0.764, 0.748, 0.892,
0.756, 0.764, 0.816, 0.936, 0.828, 0.836, 0.78, 0.832, 0.868, 0.968, 0.788,
0.864, 0.884, 0.712, 0.752, 0.596, 0.868, 0.84, 0.832, 0.812, 0.976, 0.74,
0.7, 0.972, 0.804, 0.884, 0.812, 0.884, 0.812, 0.884, 0.888, 0.94, 0.828, 0.876)
data.frame(undil_conc = lane3_A2B, t(sapply(lane3_A2B, function(x) bufferAndDNA2add(x = x, vol_tot = 100, target = 0.45)))) %>%
bind_cols(data.frame(lane = 3,
extract = c(rep(7, 18), rep(8, 24), rep(9,12), rep(11,2)),
n = c(7:24, 1:24, 1:12, 29, 31)),
.)
# two I want to redo dilB at lower volume because of a very small chance I mixed them up going from A->B dilution on the first pass
bufferAndDNA2add(x = 0.696, vol_tot = 45, target = 0.45) # 7-16 dilB->dilC
bufferAndDNA2add(x = 0.808, vol_tot = 75, target = 0.45) # 7-17 dilA->dilB
# first set of qubit didn't seem to work so I redid it for lane 4 using my HS qubit
# all similar concentration, so I'm just adding 200uL buffer
# and calculating how much undiluted DNA sample to add
lane4_redo <- c(10.2, 7.96, 10.3, 9.0, 10.2, 7.96, 8.56, 11, 9.32, 9.76, 8.32, 7.6, 8.4, 9.92, 8.92, 11.6, 11.3, 9.24, 8.8, 11.3, 8.84, 8.96,
9.64, 8.88, 9.32, 10.8, 10.1, 10, 9.92, 9.76, 10.9, 8.8, 10.2, 9.0, 7.24, 9.48, 9.32, 7.84, 7.32, 5.68, 6.56, 7.84, 7.96, 7.48,
9.04, 9.32, 8.88, 9.24, 8.76, 8.84, 7.52, 8.28, 7.72, 11.1, 9.04, 6.8)
data.frame(undil = lane4_redo,
DNA2add = sapply(lane4_redo, function(x) DNA2add(x = x, vol_b = 200, target = 0.4)))
bufferAndDNA2add(x = 0.424, vol_tot = 100, target = 0.34)
c4 <- c(0.278, 0.285, 0.277, 0.269, 0.279)
v4 <- c(10.58, 8.16, 11.11, 15.15, 12.5)
x4 <- c4*(200+v4)/v4
data.frame(undil = x4,
DNA2add = sapply(x4, function(x) DNA2add(x = x, vol_b = 200, target = 0.34)))
lane2_redo <- c(12.5, 9.4, 7.68, 8.96, 9.6, 10.9, 11.5, 8.88, 6.56, 10.5, 6.92, 10, 10.8, 7.2, 8.16, 8.16, 9.56, 10.4, 9.24, 11.7, 10.4, 8.16, 7.2, 9.48, 7.88, 7.68, 6.76, 8.92, 9.68, 7.84, 6.68, 8.6, 9.28, 7.92,
7.64, 8.72, 7.04, 7.32, 7.52, 5.12, 7.96, 7.2, 7.04, 10.4, 9.08, 7.4, 10.8, 8.92, 8.28, 6.48, 7.04, 9.88, 6.24, 7.04, 5, 4.92)
data.frame(undil = lane2_redo,
DNA2add = sapply(lane2_redo, function(x) DNA2add(x = x, vol_b = 200, target = 0.4)))
id2 <- c("5-7", "5-16", "6-19", "7-6")
c2 <- c(0.283, 0.296, 0.266, 0.288)
v2 <- c(7.21, 10.31, 12.05, 12.05)
x2 <- c2*(200+v2)/v2
data.frame(id = id2,
undil = x2,
DNA2add = sapply(x2, function(x) DNA2add(x = x, vol_b = 200, target = 0.34)))
high2 = data.frame(id = c("5-21", "6-4", "6-20", "6-23"),
dilB = c(0.408, 0.424, 0.436, 0.436))
t(sapply(high2$dilB, function(x) bufferAndDNA2add(x = x, vol_tot = 100, target = 0.38)))
bufferAndDNA2add(x = .416, vol_tot = 100, target = 0.38)
|
f234d4c6ce7a45aeb3d41923ca029fbfc712d1b4 | f5920b419edbdc99dc6147e68a4965cbc587167d | /prepare_data.R | dacd420fe68ff928bccd133d4bd32fe6ab58abea | [
"MIT"
] | permissive | dradisavljevic/FootballStatistics | b1f6523c5e95873f2bfa9b697f5fd43cbf46b2c6 | c4093c8009a016f7e11785da2f20ad6f6950efa7 | refs/heads/master | 2020-06-06T07:31:28.125041 | 2019-07-21T09:29:08 | 2019-07-21T09:29:08 | 192,659,258 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,448 | r | prepare_data.R | GetDateFromString <- function(date) {
# Convert date from string to a YYYY-MM-DD format.
#
# Args:
# date: string value of the date to be processed
#
# Returns:
# Date object in correct format
date <- gsub("\\.", "/", date)
date <- dmy(date)
return(date)
}
CapitalizeWords <- function(words) {
# Converts string to one where every word is capitalized.
# Equivalent to python's title() function.
#
# Args:
# words: string to be capitalized
#
# Returns:
# String with all words capitalized
words <- as.character(words)
capitalizedWords <- strsplit(words, " ")[[1]]
capitalizedWords <- paste(toupper(substring(capitalizedWords, 1,1)),
substring(capitalizedWords, 2),
sep="", collapse=" ")
return(capitalizedWords)
}
MergeClubAndCityNames <- function(df) {
# Merges names of the football clubs with the name of cities that they
# play in. This is in order to avoid bundling clubs with same names, but
# from different cities together.
#
# Args:
# df: dataframe containing seasonal league information
#
# Returns:
# Dataframe with club name columns changed to the format of ClubName (City)
teamIDVector <- c()
teamNameVector <- c()
by(df, 1:nrow(df), function(row) {
if (!(row$HostID) %in% teamIDVector){
teamIDVector[length(teamIDVector)+1] <<- row$HostID
teamNameVector[length(teamNameVector)+1] <<-
paste(row$Host, " (",row$HostCity, ")", sep="")
}
})
for (i in seq(1,length(teamIDVector))){
df$Host <- ifelse(df$HostID==teamIDVector[i],
as.character(teamNameVector[i]),
as.character(df$Host))
df$Guest <- ifelse(df$GuestID==teamIDVector[i],
as.character(teamNameVector[i]),
as.character(df$Guest))
}
return (df)
}
RemoveWhiteSpace <- function(df) {
# Removes trailing and leading whitespace from the football
# club name and league name columns.
#
# Args:
# df: dataframe containing seasonal league information
#
# Returns:
# Dataframe with formated columns containing no extra whitespace
df$Host <- trimws(gsub("\\s+", " ", df$Host),
"both", whitespace = "[ \t\r\n]")
df$Guest <- trimws(gsub("\\s+", " ", df$Guest),
"both", whitespace = "[ \t\r\n]")
df$League <- trimws(gsub("\\s+", " ", df$League),
"both", whitespace = "[ \t\r\n]")
return (df)
}
RoundUpSeason <- function(df) {
# Rounds up the season to the end year, changing the column format from
# YYYY-YYYY to YYYY.
#
# Args:
# df: dataframe containing seasonal league information
#
# Returns:
# Dataframe with season column changed
df$Season <- as.numeric(substr(df$Season, 6, 9))
return (df)
}
ReplaceMissingDates <- function(df) {
# Replaces the missing date values from the dataframe by replacing them with
# assumed new values. Assumption is that every matchday is played exactly 7
# days later than that which came before it.
#
# Args:
# df: dataframe containing seasonal league information
#
# Returns:
# Dataframe with date column filled in
missingValuesRow <- df[is.na(df$Date),]
missingDates <- c()
for (i in seq(1,nrow(missingValuesRow))){
missingDates[i] <- df$Date[!is.na(df$Date)
& df$Season == missingValuesRow$Season[i]
& df$Matchday == missingValuesRow$Matchday[i]
& df$Level == missingValuesRow$Level[i]][1]
}
for (i in seq(1,length(missingDates))){
df$Date[is.na(df$Date)][1] <- as.Date(missingDates[i], origin=lubridate::origin)
}
missingValuesRow <- df[is.na(df$Date),]
if (dim(missingValuesRow)[1] != 0) {
matchday <- missingValuesRow$Matchday[1]-1
missingDate <- df$Date[!is.na(df$Date)
& df$Season == missingValuesRow$Season[1]
& df$Matchday+1 == missingValuesRow$Matchday[1]
& df$Level == missingValuesRow$Level[1]][1]
iterations <- dim(missingValuesRow)[1]
for (i in seq(1,iterations)){
df$Date[is.na(df$Date)][1] <- as.Date(missingDate, origin=lubridate::origin)+7*(df$Matchday[is.na(df$Date)][1]-matchday)
}
}
return(df)
}
|
5ac623dc1663c8de89444ca94c6efe91b7361b0e | 34b1ab46a70fe81143874a40d6493c0254f1e5c9 | /rjags/bugs book/sp2_linreg_with_out_dclone_mixture.R | 41f0745da17c874ffddcf703262d3ac81f2e82d1 | [] | no_license | yama1968/Spikes | 5f974a20812dbd88f789cabf7720826d358f8e76 | 498b0cacfc23627ecee743f012a6fda6451cda7f | refs/heads/master | 2021-06-06T00:33:33.637745 | 2020-11-14T18:49:25 | 2020-11-14T18:49:25 | 29,531,065 | 2 | 0 | null | 2020-11-12T21:13:21 | 2015-01-20T13:29:35 | Jupyter Notebook | UTF-8 | R | false | false | 2,843 | r | sp2_linreg_with_out_dclone_mixture.R |
library('dclone')
library('rjags')
# prepare data
N <- 2000
x <- (1:N)/N
set.seed(1234)
epsilon <- rnorm(N, 0, 0.1)
outliers <- rnorm(N, 0 , 1) * rbinom(N, 1, 0.05)
y <- 2 * x + 1 + epsilon
z <- y + outliers
# model
m <- NA
cluster <- NA
chains <- 4
nodes <- 4
cluster <- makeCluster(spec = nodes,
type = "SOCK")
system.time (m <- jags.parfit(cl = cluster,
model = 'sp2_linreg_with_out_mixture.bug',
data = list('x' = x,
'y' = y,
'z' = z,
'N' = N),
n.chains = chains,
n.adapt = 1000,
params = c('a1', 'b1', 'sigma1',
'a2', 'b2', 'sigma2',
'a3', 'b3', 'sigma',
'pClust'),
n.iter = 10000))
summary(m)
stopCluster(cluster)
Parallel computation in progress
utilisateur système écoulé
1.369 1.136 940.400
> summary(m)
#
# Iterations = 2001:12000
# Thinning interval = 1
# Number of chains = 4
# Sample size per chain = 10000
#
# 1. Empirical mean and standard deviation for each variable,
# plus standard error of the mean:
#
# Mean SD Naive SE Time-series SE
# a1 1.99990 0.002208 1.104e-05 1.104e-05
# a2 2.00010 0.002326 1.163e-05 1.156e-05
# a3 2.00056 0.002336 1.168e-05 1.224e-05
# b1 2.00703 0.007738 3.869e-05 3.891e-05
# b2 2.01154 0.008109 4.055e-05 3.991e-05
# b3 2.00454 0.008069 4.035e-05 4.341e-05
# pClust[1] 0.72832 0.396251 1.981e-03 4.662e-05
# pClust[2] 0.27168 0.396251 1.981e-03 4.662e-05
# sigma[1] 0.29415 0.339304 1.697e-03 2.724e-04
# sigma[2] 0.68017 0.341890 1.709e-03 4.659e-04
# sigma1 0.09900 0.001594 7.970e-06 7.969e-06
# sigma2 0.07763 0.001846 9.232e-06 9.232e-06
#
# 2. Quantiles for each variable:
#
# 2.5% 25% 50% 75% 97.5%
# a1 1.99555 1.99842 1.99990 2.00137 2.00425
# a2 1.99558 1.99852 2.00010 2.00167 2.00464
# a3 1.99592 1.99900 2.00057 2.00216 2.00509
# b1 1.99184 2.00184 2.00701 2.01227 2.02217
# b2 1.99564 2.00612 2.01152 2.01699 2.02744
# b3 1.98875 1.99912 2.00457 2.00998 2.02034
# pClust[1] 0.03494 0.71337 0.95481 0.95978 0.96703
# pClust[2] 0.03297 0.04022 0.04519 0.28663 0.96506
# sigma[1] 0.09637 0.09880 0.10034 0.23928 0.98040
# sigma[2] 0.09733 0.51316 0.83613 0.90202 1.02824
# sigma1 0.09594 0.09790 0.09897 0.10006 0.10220
# sigma2 0.07408 0.07637 0.07760 0.07886 0.08133
|
06b1083a97ac87eab1efc570b1af6c9413b6dfba | 1785ec29e940032068233d4ab160220c1eb02e38 | /syn-diversity/tidyData.R | ee32a761d1000e4b4765a7e6c307ccc11ca25037 | [] | no_license | Upward-Spiral-Science/data | bc374c0a24dcaad2a69158b8bf6326c6317aced6 | 135d683207334d4f405de53fa505c9f3ad809e4a | refs/heads/master | 2021-01-10T15:21:54.114367 | 2016-11-21T17:31:18 | 2016-11-21T17:31:18 | 51,005,215 | 0 | 3 | null | 2016-11-21T17:31:18 | 2016-02-03T14:33:34 | null | UTF-8 | R | false | false | 2,544 | r | tidyData.R | ###
### This script takes the data set and creates tidy dataset
### in RData and csv formats.
###
### Jesse Leigh Patsolic <jpatsol1@jhu.edu>
### 2016
### S.D.G
require(data.table)
feat <- fread("synapsinR_7thA.tif.Pivots.txt.2011Features.txt",
showProgress=FALSE)
loc <- fread("synapsinR_7thA.tif.Pivots.txt",showProgress=FALSE)
## Setting the channel names
channel <- c('Synap1','Synap2','VGlut1t1','VGlut1t2','VGlut2','Vglut3',
'psd','glur2','nmdar1','nr2b','gad','VGAT',
'PV','Gephyr','GABAR1','GABABR','CR1','5HT1A',
'NOS','TH','VACht','Synapo','tubuli','DAPI')
## Setting the channel types
channel.type <- c('ex.pre','ex.pre','ex.pre','ex.pre','ex.pre','in.pre.small',
'ex.post','ex.post','ex.post','ex.post','in.pre','in.pre',
'in.pre','in.post','in.post','in.post','in.pre.small','other',
'ex.post','other','other','ex.post','none','none')
nchannel <- length(channel)
nfeat <- ncol(feat) / nchannel
## Createing factor variables for channel and channel type sorted properly
ffchannel <- (factor(channel.type,
levels= c("ex.pre","ex.post","in.pre","in.post","in.pre.small","other","none"), ordered = TRUE
))
fchannel <- as.numeric(factor(channel.type,
levels= c("ex.pre","ex.post","in.pre","in.post","in.pre.small","other","none")
))
ford <- order(fchannel)
exType <- factor(c(rep("ex",11),rep("in",6),rep("other",7)),ordered=TRUE)
exCol<-exType;levels(exCol) <- c("#197300","#990000","mediumblue");
exCol <- as.character(exCol)
fname <- as.vector(sapply(channel,function(x) paste0(x,paste0("_F",0:5))))
names(feat) <- fname
fcol <- rep(ccol, each=6)
f <- lapply(1:6,function(x){seq(x,ncol(feat),by=nfeat)[ford]})
featF <- lapply(f,function(x){subset(feat,select=x)})
data01 <- data.table(Reduce(cbind, featF))
at <- data.table(names = names(data01),
feature = factor(paste0('F',rep(0:5,each=nchannel)), ordered = TRUE),
marker = factor(channel[ford], levels = channel[ford],ordered = TRUE) ,
type0 = exType,
type1 = ffchannel[ford],
keep.rownames = TRUE)
setkey(attribs, names, feature, marker)
attributes(data01)$attr <- at
system.time(
write.csv(data01, file = "cleanData.csv")
write.csv(t(at), file = "dataAttributes.csv")
)
system.time(
save(data01, file = "cleanDataWithAttributes.RData")
)
# Time:
## Working status:
### Comments:
####Soli Deo Gloria
|
82d99539bbaada68e5b91af83faf5c5291e4626a | 38cddc874cf10f65b6bd93e410582b9c17c8b2da | /_drake.R | 6a63f8ce5688aed588693d555046fae3e4175caf | [
"MIT"
] | permissive | the-Hull/2019-feature-selection | 2dea0f0c37d83b1324b53715d2da1691185259ed | e64808007a6961a72cd2b73142acbb9ac11fc3ed | refs/heads/master | 2021-04-05T14:26:08.056114 | 2020-03-16T09:04:11 | 2020-03-16T09:04:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,185 | r | _drake.R | # load packages ----------------------------------------------------------------
source("code/99-packages.R")
library("drake")
library("magrittr")
suppressMessages(library("R.utils"))
# load mlr extra learner -------------------------------------------------------
source("https://raw.githubusercontent.com/mlr-org/mlr-extralearner/master/R/RLearner_regr_ranger_mtry_pow.R")
# Plans ------------------------------------------------------------------------
# project
# learners_plan = code_to_plan("code/05-modeling/project/learner.R")
# resampling_plan = code_to_plan("code/05-modeling/project/resamp.R")
# param_set_plan = code_to_plan("code/05-modeling/project/param-set.R")
# tune_ctrl_plan = code_to_plan("code/05-modeling/project/tune-ctrl.R")
# tuning_plan = code_to_plan("code/05-modeling/project/tune.R")
# train_plan = code_to_plan("code/05-modeling/project/train.R")
# task_plan = code_to_plan("code/05-modeling/project/task.R")
sourceDirectory("R")
# FIXME: This regex ignores the "project" folder temporarily
sourceDirectory("code")
# # Combine all ----------------------------------------------------------------
# plan_project = bind_plans(data_plan, download_plan, hyperspectral_plan, learners_plan,
# resampling_plan, param_set_plan, tune_ctrl_plan, train_plan,
# tuning_plan, task_plan, reports_plan_project, sentinel_plan
# )
plan_paper = bind_plans(download_data_plan,
hyperspectra_processing_plan,
sentinel_processing_plan,
data_preprocessing_plan,
tasks_plan,
filter_eda_plan,
param_sets_plan,
learners_plan,
filter_wrapper_plan,
resampling_plan,
tune_ctrl_plan,
tune_wrapper_plan,
benchmark_plan,
train_plan,
feature_imp_plan
)
options(clustermq.scheduler = "slurm",
clustermq.template = "~/papers/2019-feature-selection/slurm_clustermq.tmpl")
# plan_project %<>% dplyr::mutate(stage = as.factor(stage))
# plan_paper %<>% dplyr::mutate(stage = as.factor(stage))
# project ----------------------------------------------------------------------
# drake_config(plan_paper,
# verbose = 2, lazy_load = "eager",
# console_log_file = "log/drake.log",
# caching = "worker",
# template = list(log_file = "log/worker%a.log", n_cpus = 4, memory = "12G", job_name = "paper2"),
# # prework = list(
# # #uote(future::plan(future::multisession, workers = 25))#,
# # #quote(future::plan(future.callr::callr, workers = 4))
# # quote(parallelStart(
# # mode = "multicore", cpus = ignore(25)))
# # ),
# prework = list(quote(set.seed(1, "L'Ecuyer-CMRG")),
# quote(parallelStart(mode = "multicore", cpus = 4, level = "mlr.resample"))
# ),
# garbage_collection = TRUE, jobs = 55, parallelism = "clustermq", lock_envir = FALSE,
# keep_going = TRUE
# )
# paper -----------------------------------------------------------------------
# not running in parallel because mclapply gets stuck sometimes
drake_config(plan_paper,
# targets = c("bm_vi_task_svm_borda_mbo", "bm_vi_task_xgboost_borda_mbo",
# "bm_vi_task_rf_borda_mbo"),
# targets = c("bm_hr_task_corrected_xgboost_borda_mbo", "bm_hr_task_corrected_xgboost_cmim_mbo",
# "bm_hr_task_corrected_rf_mrmr_mbo", "bm_hr_task_corrected_xgboost_mrmr_mbo",
# "bm_hr_task_corrected_svm_carscore_mbo"),
targets = "benchmark_no_models_new",
#targets = c("vi_task", "hr_task", "nri_task", "hr_nri_vi_task", "hr_nri_task", "hr_vi_task"),
verbose = 2,
lazy_load = "eager",
packages = NULL,
console_log_file = "log/drake.log",
caching = "master",
template = list(log_file = "log/worker%a.log", n_cpus = 4,
memory = 6000, job_name = "paper2",
partition = "all"),
# prework = quote(future::plan(future::multisession, workers = 4)),
prework = list(quote(load_packages()),
#quote(future::plan(callr, workers = 4)),
quote(set.seed(1, "L'Ecuyer-CMRG")),
quote(parallelStart(mode = "multicore",
cpus = 4,
#level = "mlr.resample",
mc.cleanup = TRUE,
mc.preschedule = FALSE))
),
garbage_collection = TRUE, jobs = 40, parallelism = "clustermq",
keep_going = TRUE, recover = TRUE, lock_envir = TRUE, lock_cache = FALSE
)
|
ab1f4821a93318b4a0eae12d6acd673a3e3a90dc | bd0fb3536e67f94f44b1a534d0e67208f1afa90d | /metabolics/result_analysis/post_glmnet_result_subset_transctips.R | 2fb2fa447a7406cef669a91fdf30968f3896e4ff | [] | no_license | csbio123/stat_learning_code | d3a4d1f7f63ad148f74fdfc0c181c2a7caf9d2d2 | fc353ef9568b1c2ef7f1bee690c055399bbf15be | refs/heads/master | 2018-09-22T23:50:15.119031 | 2018-07-07T09:44:10 | 2018-07-07T09:44:10 | 111,194,134 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,873 | r | post_glmnet_result_subset_transctips.R | #P value analysis conrad
pval_anal<-function(path, top=0, type, all_genes_names){
filenames <- list.files(path, pattern = "pvalues*", full.names = TRUE)
files = sapply(filenames, read.csv, sep="\t", simplify = FALSE)
p_value_results = do.call(rbind, files)
positive_freq = table(p_value_results[c("Genes","positive_binary")])
positive_freq = cbind(positive_freq, positive_freq[,2]/length(files))
genes=levels(droplevels(p_value_results[,1]))
probs = sapply(unique(genes), function(x) {
sub = p_value_results[which(genes == x), ]
if(nrow(sub) > 1) {
p = mean(sub["positive_p_value"])
} else {
p = sub["positive_p_value"]
}
return (p)
}, simplify=FALSE)
probs = do.call(rbind, probs)
probs = cbind(probs, rownames(probs))
positive_freq[order(positive_freq[,1], positive_freq[,3], decreasing = TRUE),]
top_genes = rownames(positive_freq)
like = sapply(0:length(top_genes),function(x){
p = probs[top_genes[x] == probs[,2],]
p = as.numeric(p[1])
k = as.numeric(positive_freq[x,2])
likelihood = dbinom(k, as.numeric(length(files)), prob = p)
return(likelihood)
}, simplify = FALSE)
like = do.call(rbind, like)
positive_freq = cbind(positive_freq, like)
positive_freq = cbind(abs(positive_freq[,2] - positive_freq[,1]), positive_freq)
positive_freq = positive_freq[order(positive_freq[,1], decreasing = TRUE),]
if (top >0) {
positive_genes = intersect(as.character(rownames(positive_freq)), as.character(all_genes_names))
subset_genes = head(positive_genes, as.numeric(top))
print(subset_genes)
out_name = '/remove_top_'
if (type == "random") {
print("random stuff")
subset_genes = sample(positive_genes, as.numeric(top))
out_name = paste0(out_name, "_random_")
print(subset_genes)
}
write.table(file=paste(path, out_name, top , '.csv', sep=""), subset_genes, quote = FALSE, row.names = FALSE, col.names = FALSE)
}
}
read_data <-function(my_dir, input_filename){
setwd(my_dir)
imputation_data_generated = load(input_filename)
data = list(hba1c_descrip=hba1c.descrip, GX_sva=GX.sva, gene_symbols=gene_symbols, random_seed=.Random.seed, Tri_revert=TriG.revert,
svmod_bmi_catg_sv=svmod.bmi.catg.sv, svmod_hba1c_catg=svmod.hba1c.catg, t_expr_fep=t.expr.fep, fglu_revert=fglu.revert,
Age_revert=Age.revert, Height_revert=Height.revert,
svmod_hba1c_catg_sv=svmod.hba1c.catg.sv, pump_imp=pump.imp, gx_prejoin_1=gx.prejoin.1, pheno_sva=pheno.sva, hba1c_revert=hba1c.revert,
imp_out=imp.out, imp_out_all_list=imp.out.all.list,
BMI_revert=BMI.revert, svobj_hba1c_catg=svobj.hba1c.catg, svmod_bmi_catg=svmod.bmi.catg, svobj_bmi_catg=svobj.bmi.catg, pheno_mini=pheno.mini,
test_obj=test.obj, imp_list_Tall=imp.list.Tall)
return(data)
}
#arg function below is in-built. It allows you to interact with the script from the command line.
#####USE AS FOLLOWS#####:
#Rscript p_value_analysis.R /users/spjtcoi/brc_scratch/project_tomi/conrad/reanalyse/drug_naive/all_transcripts 476####
args <- commandArgs(trailingOnly = TRUE)#trailing only stops the argument function from requiring specification of too much information eg R version, etc
print(paste0('Input directory for p value analysis:', args[1]))#This line will tell you the input directory
args[1] = "/Users/ti1/Documents/conrad/results/default_data/all_features/"
args[2] = 10
args[3] = "ranasddom"
input_data=read_data("/Users/ti1/Google\ Drive/raw data/", "imputation_data_generated.RData")
my_genes = rownames(input_data$GX_sva)
if (length(args) == 3) {
pval_anal(path = args[1], top = args[2], type = args[3], all_genes_names=my_genes)#if 2 arguments are entered then the first will be the path and the second will be the number of top genes to remove
}
|
e05a2ea1788dffbf7e153c8863dcb4893a7a16ae | 18666d876d23e05ab7b365adb01755ded173c071 | /Cluster_Analysis.R | 51998bdbd96b3d45eb7ccad3a10e7f063112f991 | [] | no_license | Allan-pixe/Cluster_Analysis | 8f01f69c2d44ac115f6ea2e9a50d4e73ac3f7db9 | c5cd96ce7d20911673a6bc0228151bc3c2379357 | refs/heads/master | 2021-09-15T01:30:06.175882 | 2018-05-23T12:35:27 | 2018-05-23T12:35:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,763 | r | Cluster_Analysis.R | #install packages
install.packages("Rserve")
library(Rserve)
Rserve(args='--vanilla')
# set WD
setwd("~/Desktop/Pharma_Hackathon")
# import the dataset
cosmetics = read.csv("pharma2.csv")
# Observe the dataset
str(cosmetics)
# Subset the products sold/profit/profitmargin data
sold = cosmetics[,c(2,9)]
profit = cosmetics[,c(2,12)]
profitmargin = cosmetics[,c(2,13)]
# Plot subset data
plot(sold, main = "Products Sold", pch =20, cex =2)
plot(profit, main = "Profit", pch =20, cex =2)
plot(profitmargin, main = "Margin", pch =20, cex =2)
# Perform K-Means with 2 clusters
set.seed(7)
km1 = kmeans(sold, 2, nstart=100)
# Plot results
plot(sold, col =(km1$cluster +1) , main="K-Means result with 2 clusters", pch=20, cex=2)
km1a = kmeans(profit, 2, nstart=100)
plot(profit, col =(km1a$cluster +1) , main="K-Means result with 2 clusters", pch=20, cex=2)
km1b = kmeans(profitmargin, 2, nstart=100)
plot(profitmargin, col =(km1b$cluster +1) , main="K-Means result with 2 clusters", pch=20, cex=2)
# Check for the optimal number of clusters given the data
mydata <- sold
wss <- (nrow(mydata)-1)*sum(apply(mydata,2,var))
for (i in 2:15) wss[i] <- sum(kmeans(mydata,
centers=i)$withinss)
plot(1:15, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares",
main="Assessing the Optimal Number of Clusters with the Elbow Method",
pch=20, cex=2)
mydataa <- profit
wssa <- (nrow(mydataa)-1)*sum(apply(mydataa,2,var))
for (i in 2:15) wssa[i] <- sum(kmeans(mydataa,
centers=i)$withinss)
plot(1:15, wssa, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares",
main="Assessing the Optimal Number of Clusters with the Elbow Method",
pch=20, cex=2)
mydatab <- profitmargin
wssb <- (nrow(mydatab)-1)*sum(apply(mydatab,2,var))
for (i in 2:15) wssb[i] <- sum(kmeans(mydatab,
centers=i)$withinss)
plot(1:15, wssb, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares",
main="Assessing the Optimal Number of Clusters with the Elbow Method",
pch=20, cex=2)
# Perform K-Means with the optimal number of clusters identified from the Elbow method
set.seed(7)
km2 = kmeans(sold, 4, nstart=100)
km2a = kmeans(profit, 4, nstart=100)
km2b = kmeans(profitmargin, 4, nstart=100)
# Examine the result of the clustering algorithm
km2
km2a
km2b
# Plot results
plot(sold, col =(km2$cluster +1) , main="K-Means result with 4 clusters (Sold)", pch=20, cex=2)
plot(profit, col =(km2a$cluster +1) , main="K-Means result with 4 clusters (Profit)", pch=20, cex=2)
plot(profitmargin, col =(km2b$cluster +1) , main="K-Means result with 4 clusters (Margin)", pch=20, cex=2)
|
d5151bc93f0cbf9730c64c7485d6a9f6979e77e0 | 09be257589c57f0b2c031cfaf2bcb52767dad40a | /R/make-newdata.R | 058820e458af27de70d29b3283710a7546218633 | [
"MIT"
] | permissive | adibender/mgcvtools | f0591ec98c0c3882dfa9bd5586c59f8642dacbbd | 77cc237b6a5a62521939a956c2db3dea614592e8 | refs/heads/master | 2021-03-30T17:01:52.090147 | 2019-01-04T12:31:10 | 2019-01-04T12:31:10 | 75,950,156 | 0 | 1 | MIT | 2019-01-04T12:31:11 | 2016-12-08T15:29:32 | R | UTF-8 | R | false | false | 5,044 | r | make-newdata.R | #' Extract information of the sample contained in a data set
#'
#' Given a data set and grouping variables, this function returns mean values
#' for numeric variables and modus for characters and factors. Usually
#' this function should not be called directly but will rather be called
#' as part of a call to \code{make_newdata}.
#'
#' @rdname sample_info
#' @param x A data frame (or object that inherits from \code{data.frame}).
#' @importFrom stats median
#' @return A data frame containing sample information (for each group).
#' If applied to an object of class \code{ped}, the sample means of the
#' original data is returned.
#' Note: When applied to a \code{ped} object, that doesn't contain covariates
#' (only interval information), returns data frame with 0 columns.
#'
#' @export
#' @keywords internal
sample_info <- function(x) {
UseMethod("sample_info", x)
}
#' @inheritParams sample_info
#' @import checkmate dplyr
#' @importFrom purrr compose
#' @export
#' @rdname sample_info
sample_info.data.frame <- function(x) {
cn <- colnames(x)
num <- summarize_if (x, .predicate = is.numeric, ~mean(., na.rm = TRUE))
fac <- summarize_if (x, .predicate = compose("!", is.numeric), modus)
nnames <- intersect(names(num), names(fac))
if (length(nnames) != 0) {
suppressMessages(
x <- left_join(num, fac) %>% grouped_df(vars = lapply(nnames, as.name))
)
} else {
x <- bind_cols(num, fac)
}
return(select(x, one_of(cn)))
}
#' Create a data frame from all combinations of data frames
#'
#' Works like \code{\link[base]{expand.grid}} but for data frames.
#'
#' @importFrom dplyr slice bind_cols combine
#' @importFrom purrr map map_lgl map2 transpose cross
#' @importFrom checkmate test_data_frame
#' @param ... Data frames that should be combined to one data frame.
#' Elements of first df vary fastest, elements of last df vary slowest.
#' @examples
#' combine_df(
#' data.frame(x=1:3, y=3:1),
#' data.frame(x1=c("a", "b"), x2=c("c", "d")),
#' data.frame(z=c(0, 1)))
#' @export
#' @keywords internal
combine_df <- function(...) {
dots <- list(...)
if (!all(sapply(dots, test_data_frame))) {
stop("All elements in ... must inherit from data.frame!")
}
ind_seq <- map(dots, ~ seq_len(nrow(.x)))
not_empty <- map_lgl(ind_seq, ~ length(.x) > 0)
ind_list <- ind_seq[not_empty] %>% cross() %>% transpose() %>% map(combine)
map2(dots[not_empty], ind_list, function(.x, .y) slice(.x, .y)) %>%
bind_cols()
}
#' Construct a data frame suitable for prediction
#'
#' Given a data set, returns a data set that can be used
#' as \code{newdata} argument in a call to \code{predict} and similar functions.
#'
#' @rdname newdata
#' @aliases make_newdata
#' @inheritParams sample_info
#' @param ... Covariate specifications (expressions) that will be evaluated
#' by looking for variables in \code{x} (or \code{data}). Must be of the form \code{z = f(z)}
#' where \code{z} is a variable in the data set \code{x} and \code{f} a known
#' function that can be usefully applied to \code{z}. See examples below.
#' @import dplyr
#' @importFrom checkmate assert_data_frame assert_character
#' @importFrom purrr map cross_df
#' @details Depending on the class of \code{x}, mean or modus values will be
#' used for variables not specified in ellipsis. If x is an object that inherits
#' from class \code{ped}, useful data set completion will be attempted depending
#' on variables specified in ellipsis.
#' @examples
#' library(dplyr)
#' iris %>% make_newdata()
#' iris %>% make_newdata(Sepal.Length=c(5))
#' iris %>% make_newdata(Sepal.Length=seq_range(Sepal.Length, 3),
#' Sepal.Width=c(2, 3))
#' iris %>% make_newdata(Sepal.Length=seq_range(Sepal.Length, 3),
#' Species=unique(Species), Sepal.Width=c(2, 3))
#' # mean/modus values of unspecified variables are calculated over whole data
#' iris %>% make_newdata(Species=unique(Species))
#' iris %>% group_by(Species) %>% make_newdata()
#' # You can also pass a part of the data sets as data frame to make_newdata
#' purrr::cross_df(list(Sepal.Length = c(0, 500, 1000),
#' Species = c("setosa", "versicolor"))) %>%
#' make_newdata(x=iris)
#' @export
make_newdata <- function(x, ...) {
UseMethod("make_newdata", x)
}
#' @inherit make_newdata
#' @importFrom purrr map_lgl is_atomic
#' @importFrom lazyeval f_eval
#' @rdname newdata
#' @export
make_newdata.default <- function(x, ...) {
assert_data_frame(x, all.missing = FALSE, min.rows = 2, min.cols = 1)
orig_names <- names(x)
expressions <- quos(...)
expr_evaluated <- map(expressions, f_eval, data = x)
# construct data parts depending on input type
lgl_atomic <- map_lgl(expr_evaluated, is_atomic)
part1 <- expr_evaluated[lgl_atomic] %>% cross_df()
part2 <- do.call(combine_df, expr_evaluated[!lgl_atomic])
ndf <- combine_df(part1, part2)
rest <- x %>% select(-one_of(c(colnames(ndf))))
if (ncol(rest) > 0) {
si <- sample_info(rest) %>% ungroup()
ndf <- combine_df(si, ndf)
}
ndf %>% select(one_of(orig_names))
}
|
f7636ee33f6aa6c0abb18a78cec37b827d70fe4b | 7a3176649ff51c775607d290fa6b929e8c327521 | /LogisticRegressionCalculateMetrics.R | 8c1cbdd24955bf956a3e22230717bd28a0422412 | [] | no_license | Santosh-Sah/Logistic_Regression_R | bf5cf73af20196992ebe9ea8620ebf1f6821e93f | 0b88169ff7db1d65c811eaf306384d481d4afc07 | refs/heads/master | 2022-06-04T17:09:11.209157 | 2020-04-30T16:02:02 | 2020-04-30T16:02:02 | 260,256,015 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 608 | r | LogisticRegressionCalculateMetrics.R | source("LogisticRegressionUtils.R")
#reading LogisticRegression model
logisticRegressionModel = readRDS("LogisticRegressionModel.RDS")
#reading testing set data
logisticRegressionTestingSet <- readRDS("LogisticRegressionTestingSet.RDS")
#reading predicted values of the testing set
logisticRegressionPredictedValuesForTestingSet <- readRDS("LogisticRegressionPredictedValuesForTestingSet.RDS")
logisticRegressionConfusionMatrix = table(logisticRegressionTestingSet[, 3], logisticRegressionPredictedValuesForTestingSet > 0.5)
logisticRegressionConfusionMatrix
# FALSE TRUE
# 0 59 5
# 1 9 27
|
a117bdd82816b94f2a7af46c3cad239af882143a | ea3435d66f8cbebb4a46981386f5afe3b67e4d00 | /man/ordinal.Rd | b14a383d511a32ecf6cd1d64ea82d873c91733eb | [] | no_license | ramnathv/intellidate | 0ae1eb4c4514513a9af91b6dc7eec56bdbe00641 | 497df967e67d7f691721b162fe169503abc21519 | refs/heads/master | 2020-06-02T05:13:47.819698 | 2011-12-15T15:04:42 | 2011-12-15T15:04:42 | 2,976,112 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 214 | rd | ordinal.Rd | \name{ordinal}
\alias{ordinal}
\title{Converts a number to its ordinal equivalent (e.g. 12 => 12th)}
\usage{
ordinal(number)
}
\description{
This function is a port of the ruby code at
http://goo.gl/JTZCm
}
|
ffdf3610d012e60280e13868ab99df7cdf581eb1 | d1e5a7fef70b4b01b8fb5dca666d872edf792f8f | /tgExplore5.R | 2007ac9027e064b36faba3b6edcd12262f821466 | [] | no_license | gking2224/StatInfCourseProject | fba1de5408340a840cb0f317517313bba8bd62a6 | 79291bb3336f87b4ee51713c32ea8bb0f1b126bc | refs/heads/master | 2020-03-30T22:53:25.690052 | 2015-02-01T22:40:57 | 2015-02-01T22:40:57 | 28,970,644 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 120 | r | tgExplore5.R | ## @knitr tgExplore5
g5 <- ggplot(data = ToothGrowth, aes(x = len, fill=supp, shape = dose))+ geom_density()
print(g5) |
fdef0d56f8afc98527e04550697f3c37e5973237 | 45f6da0f2c07e2fb4e8e13a510653630216c00eb | /Code/Other/stationaryIntergrated.R | 13185157c594443ac910fa15815c0462b1897222 | [] | no_license | arijoh/DataDrivenForecastModels | b13cd87bbc781c76290d58bd0ea5dbcd179f0f12 | 5926ca3fb47b5cd280c994017f6fc28cc59011e9 | refs/heads/master | 2023-02-09T07:00:04.460417 | 2021-01-03T18:22:38 | 2021-01-03T18:22:38 | 283,324,215 | 1 | 0 | null | 2020-12-10T02:13:00 | 2020-07-28T20:47:16 | null | UTF-8 | R | false | false | 471 | r | stationaryIntergrated.R |
t <- 100
x <- e <- rnorm(n = t, mean = 0, sd = 1)
for (t in 2:t) {
x[t] <- x[t - 1] + e[t]
}
par(mfrow=c(1,2))
setEPS()
plot.ts(x, xlab="Time", ylab = "Y(t)", main = "Random walk, I = 0", cex.main=1)
plot.ts(diff(x), xlab="Time", ylab = "Y(t)", main = "Random walk, I = 1",cex.main=1)
postscript(file = "../Figures/eps/Differentiation.eps", onefile = TRUE, horiz=TRUE, width = 5, height = 5)
dev.off()
plot(acf(x, plot = FALSE))
plot(acf(diff(x), plot = FALSE))
|
a013261f728cdb03ea719176c2bc25522795eea3 | f7c205369b307ef300fabbeb59bfd9043c73bad8 | /pl3 h.R | 744c72fa928e33c51f0d75316690705a793ef32a | [] | no_license | b62ropaa/R | 26ba444f38cd00ef9ab7ee33692df00d662e92ca | 218fa8327a8bb234fc4b47e2b3951b7a39da8a6b | refs/heads/master | 2020-04-13T03:20:36.251013 | 2019-09-09T10:54:14 | 2019-09-09T10:54:14 | 162,928,557 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 147 | r | pl3 h.R | #ejercicio para poner la tabla de multiplicar de un numero introducido del 1 al 9
t<-scan()
for (i in 1:9){
l<-t*i
cat(t,"*",i,"=",l,"\n")
}
|
93a5da9d11da6bfa17e14d0217fd199a530fc2e6 | 57daf5402daf6b34a8ca4a23298eb837169e54e4 | /ch3/3-2-3.R | 0c73354696776905dd1293432a92815703931336 | [] | no_license | albertgoncalves/rethinker | 9c0e15553fa1f3e7723f129648492c091e57dcb2 | 2dd3c01dfc314097b7cb32102d11b66a056b56c0 | refs/heads/master | 2020-04-13T13:53:54.231916 | 2019-06-08T13:21:34 | 2019-06-08T13:21:34 | 163,245,793 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 749 | r | 3-2-3.R | #!/usr/bin/env Rscript
source("../src/rethinking.R")
source("3-1.R")
loss_f = function(posterior, p_grid) {
f = function(d) {
return(sum(posterior * abs(d - p_grid)))
}
return(f)
}
if (sys.nframe() == 0) {
n = 25000
nn = 10000
p_grid = seq(from=0, to=1, length.out=n)
prior = rep(1, n)
likelihood = dbinom(3, size=3, prob=p_grid)
posterior = std_posterior(likelihood * prior)
samples = posterior_samples(p_grid, posterior, nn)
print(p_grid[which.max(posterior)])
print(chainmode(samples, adj=0.01))
print(mean(samples))
print(median(samples))
print(sum(posterior * abs(0.5 - p_grid)))
loss = vapply(p_grid, loss_f(posterior, p_grid), 0)
print(p_grid[which.min(loss)])
}
|
49a4640b70bb435a3d7345b533333c76dff1d122 | 9c63e407b9aa6083d4c83202e82c90a9224b99a4 | /brouillon.R | f030c13245b45a77ed914cd6b7bb944a84100498 | [
"MIT"
] | permissive | AnisBouhadida/MedEnviR | 5f6f24128ce65ba25c2acaf1df761adf6aa575e8 | 20c82392c4afad2587b6c503ff71946f25d1d462 | refs/heads/master | 2020-04-12T17:01:11.856268 | 2019-01-08T15:48:59 | 2019-01-08T15:48:59 | 162,631,923 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,834 | r | brouillon.R | # rechercher doublons
test0 <- duplicated(data_dechet)
length(test0)
for (i in 1:length(test0)){
if(test0[[i]]){dup<-TRUE } else {dup<-FALSE}
}
######
skim(data_dechet)
data_dechet <- na.omit(data_dechet)
data_dechet_clean <- data_dechet[complete.cases(data_dechet),]
skim(data_INSEE_clean)
data_INSEE_clean <- data_INSEE[complete.cases(data_INSEE),]
# Entrepot de donnees:
ED_faitRepartitionPoluant <- data.frame() %>% tbl_df()
ED_faitRepartitionPoluant <- select(ED_dimensionDechet,id_dim_dechet)
ED_faitRepartitionPoluant <-add_column(ED_faitRepartitionPoluant,id_fait=round(runif(nrow(ED_faitRepartitionPoluant), min=200, max=7000)))
ED_faitRepartitionPoluant <- select(ED_dimensionRadioActivite,id_dim_radio)%>% bind_cols(ED_faitRepartitionPoluant)
ED_faitRepartitionPoluant <- select(ED_dimensionProducteurDechet,id_dim_producteur)%>% bind_cols(ED_faitRepartitionPoluant)
test <-select(ED_dimensionGeo,INSEE_COM,id_dim_geo) %>% tbl_df()
test1 <- inner_join(test, data_dechet, by = c("INSEE_COM" = "CODE.INSEE")) %>%
drop_na() %>% distinct(INSEE_COM,NOM.DU.SITE,DESCRIPTION.PHYSIQUE,VOLUME.EQUIVALENT.CONDITIONNE, .keep_all = TRUE)
test2 <- inner_join(test1, ED_dimensionDechet, by = NULL, copy = FALSE) %>%
drop_na() %>% distinct(INSEE_COM,NOM.DU.SITE,DESCRIPTION.PHYSIQUE,VOLUME.EQUIVALENT.CONDITIONNE, .keep_all = TRUE)
test3 <- inner_join(test2, ED_faitRepartitionPoluant, by = c("id_dim_dechet"), copy = FALSE) %>%
drop_na() %>% distinct(INSEE_COM,NOM.DU.SITE,DESCRIPTION.PHYSIQUE,VOLUME.EQUIVALENT.CONDITIONNE, .keep_all = TRUE)
test4 <- select(test3,id_dim_dechet,id_dim_producteur, id_dim_radio, id_dim_geo) %>%
tbl_df() %>% add_column(id_fait=round(runif(5633, min=200, max=7000)))
#essai avec leaflet----
re_temp<- ED_faitRepartitionPoluant %>%
left_join(ED_dimensionDechet) %>%
left_join(ED_dimensionProducteurDechet) %>%
left_join(ED_dimensionGeo)
dep.sf<-st_transform(dep, "+proj=longlat +datum=WGS84") #transformation dans le mode polygon accepté par Leaflet
couleurs <- colorNumeric("YlOrRd", dep.sf$ratio, n = 10) #palette couleur
pal <- colorBin("YlOrRd", domain = dep.sf$ratio)
leaflet() %>% #attention dans cette partie "()" du re_temp enlevée
addLegend(data=dep.sf, #légende à mettre en premier sinon ne sait plus quelle carte prendre
pal = pal,
values=~dep.sf$ratio,
opacity = 0.7,
title = "Incidence/100.000 hab.") %>%
addMeasure( #addin pour faire des mesures sur la carte
position = "bottomleft",
primaryLengthUnit = "meters",
primaryAreaUnit = "sqmeters",
activeColor = "#3D535D",
completedColor = "#7D4479")%>%
addEasyButton(easyButton( #bouton zoom réinitialiser à vérifier si marche lorsque choix de région
icon="fa-globe", title="Zoom to France", #sinon changer titre
onClick=JS("function(btn, map){ map.setZoom(5); }"))) %>%
addEasyButton(easyButton(
icon="fa-crosshairs", title="Locate Me",
onClick=JS("function(btn, map){ map.locate({setView: true}); }")))%>%
addMiniMap(
tiles = providers$Esri.WorldStreetMap,
toggleDisplay = TRUE)%>%
addProviderTiles(providers$CartoDB.Positron) %>%
addMarkers(data=re_temp,
~as.numeric(lng),
~as.numeric(lat),
clusterOptions = markerClusterOptions(),
popup = paste(
"<b>Site : ", re_temp$`NOM DU SITE`, "</b><br/>",
"<b>Activité en Bq : ", re_temp$`ACTIVITE ( Bq)`,"</b> <br/>",
"Quantité en VEC :", re_temp$`VOLUME EQUIVALENT CONDITIONNE`, "<br/>",
"Groupe de déchet :", re_temp$`GROUPE DE DECHETS`, "<br/>"),
label = ~ as.character(`NOM_COM`),
icon= makeIcon(iconUrl = "./img/radioactif.png", iconWidth = 50, iconHeight = 50))%>%
addPolygons(data= dep.sf, color = "#444444", weight = 1, smoothFactor = 0.5,
fillColor = ~pal(ratio),
opacity = 1.0, fillOpacity = 0.7,
dashArray = "3",# limite en pointillé
label = str_c(dep.sf$NOM_DEPT),
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto"),
# pour la surbrillance du polygone lorsque souris dessus:
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE))
# Pour l'affichage d'une seule region:
dep_idf <- data_departement %>% filter(NOM_REG=='ILE-DE-FRANCE')
choroLayer(dep_idf,
var = 'ratio',
method = 'quantile', # méthode de discrétisation
col = carto.pal("orange.pal", 9),
border = "black",
lwd = 1.5,
legend.values.rnd = 1,
legend.pos = 'left',
legend.title.txt = 'Incidence/100000 hab.')
layoutLayer(title = paste("Incidence en 2016 de la maladie X et répartition des déchets radioactif en",
str_c(dep_idf$NOM_REG)),
sources = "Sources : ",
author = "Auteur : ",
scale = 0,
frame = FALSE,
col = "#688994")
dechet_IDF.coor <- dechet %>% select(lng,lat,NOM_REG) %>% filter(NOM_REG=="ILE-DE-FRANCE") %>% drop_na() %>%
st_as_sf(coords = c("lng", "lat"), crs = 4326) %>% st_transform(crs = 2154)
plot(dechet_IDF.coor, pch = 20, add = TRUE, col="darkblue")
#code pour les polygones sur shiny carte poluants
#addPolygons(data= data_departement.sf,color = "#444444", weight = 1, smoothFactor = 0.5,
# opacity = 1.0, fillOpacity = 0.1,
# highlightOptions = highlightOptions(color = "white", weight = 2,
# bringToFront = TRUE))%>% |
3bff4373f2f1bc965ad39a1341391f2cd3f8d431 | 7d32ff022cf3cb5a5fdc3bb33556956bc54f4555 | /propernoun_based_scoring.R | 2a69143e9734fcf9ec89095e10203b20a20d0a97 | [] | no_license | shashankgarg1/text-summarisation | 6388743ef3913f569181489d9ea000568d5a9582 | c4a863a2399f92847ed8aff53c07132e7d16981e | refs/heads/master | 2021-01-22T09:13:11.621721 | 2016-10-03T17:39:24 | 2016-10-03T17:39:24 | 69,893,545 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,998 | r | propernoun_based_scoring.R | propernoun_based_scoring<-function(corpus, qwerty){
print("propernoun_based_scoring")
OriginalDocument <- as.String(corpus[[qwerty]])
a1 <- annotate(OriginalDocument, Simple_Para_Token_Annotator(blankline_tokenizer))
a1 <- annotate(OriginalDocument, Maxent_Sent_Token_Annotator(), a1)
a1 <- annotate(OriginalDocument, Maxent_Word_Token_Annotator(), a1)
a1 <- annotate(OriginalDocument, Maxent_POS_Tag_Annotator(), a1)
##meta(corpus[[1]], tag = "paragraph") <- a1[a1$type == "paragraph"]
##meta(corpus[[1]], tag = "sentence") <- a1[a1$type == "sentence"]
##meta(corpus[[1]], tag = "word") <- a1[a1$type == "word"]
spans_of_sentences <- as.Span(a1[a1$type == "sentence"])
spans_of_words <- as.Span(a1[a1$type == "word"])
a2 <- a1[a1$type == "word"]
scoring <- c()
j<-1
sent_len<-c()
for(i in 1 : length(spans_of_sentences))
{
dummy<-0
while(spans_of_words[j]$end<spans_of_sentences[i]$end)
{
dummy<-dummy+1
j<-j+1
}
sent_len<-c(sent_len,dummy)
}
##print(sent_len)
total_words<-length(spans_of_words)
temp<-c()
for(i in 1:length(spans_of_words))
{
if((a2[i]$features[[1]]$POS == "NNP") || (a2[i]$features[[1]]$POS == "NNPS"))
{temp<-c(temp,OriginalDocument[spans_of_words[i]])
## print(OriginalDocument[spans_of_words[i]])
}
}
temp<-as.String(temp)
vecSource <- VectorSource(temp)
cp<- VCorpus(vecSource)
text<-as.String(cp[[1]])
tf <- TermDocumentMatrix(cp, control = list(removePunctuation = TRUE, stopwords = FALSE, stemming = FALSE, wordLengths = c(1, Inf)))
mat <- inspect(tf)
terms <- dimnames(mat)$Terms
for(i in 1 : length(spans_of_sentences)){
dummy <- 0
cnt <- 0
for(j in 1 : length(terms))
{
if(gregexpr(terms[j], OriginalDocument[spans_of_sentences[i]], ignore.case = TRUE, fixed = TRUE)[[1L]] != - 1)
{
temp <- length(c(terms[j], OriginalDocument[spans_of_sentences[i]], ignore.case = TRUE, fixed = TRUE)[[1L]])
dummy <- dummy + temp;
}
}
##print(dummy)
scoring <- c(scoring, dummy / sent_len[i])
}
scoring
} |
fb8ed1fe397363343d34c429d231c14b32cc7257 | 35bfba0d62c8bd5497e9ad68d587ba4496331182 | /scripts/validation/Verify_Forecast_SPI12_case_studies.R | 2c3d3812eafc52dd256bf5f6f5e7d5fd357a78a6 | [] | no_license | marcoturco/DROP | 14469e39eae5827ddc28c3403122a26b32c2d5d2 | 445bb86879a967cc8cb4af1a828218d045ce14e7 | refs/heads/master | 2022-07-07T10:26:11.037488 | 2020-05-18T17:06:03 | 2020-05-18T17:06:03 | 235,117,037 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,434 | r | Verify_Forecast_SPI12_case_studies.R | rm(list = ls())
graphics.off()
gc()
library(sp)
library(maptools) # loads sp library too
library(RColorBrewer) # creates nice color schemes
library(classInt) # finds class intervals for continuous variables
library(fields)
library(maps)
library(s2dverification)
source("/Users/marco/Dropbox/estcena/scripts/BSC/R/common/ColorBarM.R")
## fix parameters
dir_oss = '/Users/marco/Documents/dati/obs'
dir_out = '/Users/marco/Documents/output/obs_uncertainties'
datasets = c('GPCP',
'CAMS_OPI',
'CHIRPS',
'CPC',
'GPCC',
'JRA55',
'PRECL',
'ERA5',
'NCEP',
'MERRA2')
# casi = c('ASIA',
# 'Australia',
# 'USA',
# 'AMAZONIA',
# 'AFRICA',
# 'EUROPE')
casi = c('AFRICA','USA')
for (icaso in 1:length(casi)) {
caso = casi[icaso]
if (caso == "Australia") {
# 2006-12 Australia
#http://www.abs.gov.au/ausstats/abs@.nsf/a9ca4374ed453c6bca2570dd007ce0a4/ccc8ead2792bc3c7ca2573d200106bde!OpenDocument
lon1 = 110
lon2 = 155
lat1 = -45
lat2 = -10
anno_case = 2006
} else if (caso == "USA") {
lon1 = -170
lon2 = -45
lat1 = 5
lat2 = 85
anno_case = 2012
lon_pt=-101.25
lat_pt=41.25
} else if (caso == "AMAZONIA") {
lon1 = -90
lon2 = -30
lat1 = -60
lat2 = 20
anno_case = 2015
} else if (caso == "AFRICA") {
lon1 = -20
lon2 = 55
lat1 = -40
lat2 = 40
anno_case = 1984
lon_pt=26.25
lat_pt=13.75
} else if (caso == "EUROPE") {
lon1 = -15
lon2 = 60
lat1 = 30
lat2 = 75
anno_case = 2003
} else if (caso == "ASIA") {
lon1 = 35
lon2 = 145
lat1 = 0
lat2 = 80
anno_case = 2011
} else {
print('dataset not known')
}
data(wrld_simpl)
anni = 1981:2015
mesi = rep(1:12, length(anni))
## load dati
load(file.path(
dir_oss,
#paste("GPCPv2_3/SPI12_GPCP_1981_2017.RData", sep = "")
paste("MSWEP/SPI12_MSWEP_1981_2016.RData", sep = "")
))
#dum = get(tolower(extreme_index))
#erai = dum[, , seq(mese, length(anni_rean) * 12, 12)]
#rm(dum)
obs = spi12[, , seq(12, dim(spi12)[3], 12)]
load(file.path(dir_oss, "GPCPv2_3/lon_GPCP_1981_2017.RData"))
load(file.path(dir_oss, "GPCPv2_3/lat_GPCP_1981_2017.RData"))
## load data
pred = array(data = NA, dim = c(dim(spi12)[1], dim(spi12)[2], dim(obs)[3], 10))
load(file.path(
paste(dir_oss, "/GPCPv2_3/SPI12_GPCP_1981_2016.RData", sep = "")
))
pred[, , , 1] = spi12[, , seq(12, dim(spi12)[3], 12)]
load(file.path(
paste(dir_oss, "/CAMS_OPI/SPI12_CAMS_OPI_1981_2016.RData", sep = "")
))
pred[, , , 2] = spi12[, , seq(12, dim(spi12)[3], 12)]
load(file.path(
paste(
"/Users/marco/Documents/dati/SPIF/data/SPI12_CHIRPS_1981_2016.RData",
sep = ""
)
))
pred[, , , 3] = spi12[, , seq(12, dim(spi12)[3], 12)]
load(file.path(
paste(
dir_oss,
"/CPC_GLOBAL_PRECIP/SPI12_CPC_1981_2016.RData",
sep = ""
)
))
pred[, , , 4] = spi12[, , seq(12, dim(spi12)[3], 12)]
load(file.path(
paste(dir_oss, "/GPCCv2018/SPI12_GPCC_1981_2016.RData", sep = "")
))
pred[, , , 5] = spi12[, , seq(12, dim(spi12)[3], 12)]
load(file.path(
paste(dir_oss, "/JRA55/SPI12_JRA55_1981_2016.RData", sep = "")
))
pred[, , , 6] = spi12[, , seq(12, dim(spi12)[3], 12)]
load(file.path(
paste(dir_oss, "/PRECL/SPI12_PRECL_1981_2016.RData", sep = "")
))
pred[, , , 7] = spi12[, , seq(12, dim(spi12)[3], 12)]
load(file.path(paste(
dir_oss, "/ERA5/SPI12_ERA5_1981_2016.RData", sep = ""
)))
pred[, , , 8] = spi12[, , seq(12, dim(spi12)[3], 12)]
load(file.path(paste(
dir_oss, "/NCEP/SPI12_NCEP_1981_2016.RData", sep = ""
)))
pred[, , , 9] = spi12[, , seq(12, dim(spi12)[3], 12)]
load(file.path(
paste(
"/Users/marco/Documents/dati/MERRA2/SPI12_MERRA2_1981_2016.RData",
sep = ""
)))
pred[, , , 10] = spi12[, , seq(12, dim(spi12)[3], 12)]
load(file.path(
dir_out,
paste("/SPI12_ENS_1981_2016.RData", sep = "")
))
ni = length(lon)
nj = length(lat)
## select case study
ilon = which(lon > lon1 & lon < lon2)
ilat = which(lat > lat1 & lat < lat2)
anno_for = which(anni == anno_case)
image.plot(lon[ilon], lat[ilat], obs[ilon, ilat , anno_for])
plot(wrld_simpl, add = TRUE)
sid = obs[ilon, ilat , anno_for]
lonsid = lon[ilon]
latsid = lat[ilat]
idx = which(sid[, ] == min(sid, na.rm = TRUE), arr.ind = T)
lonsid[idx[1, 1]]
latsid[idx[1, 2]]
sid[idx[1, 1], idx[1, 2]] = -5
image.plot(lon[ilon], lat[ilat], sid)
plot(wrld_simpl, add = TRUE)
spei6obs = obs[ilon, ilat, anno_for]
obs_drought = spei6obs*NA
obs_drought[(spei6obs > -0.5)] = 0
obs_drought[(spei6obs <= -0.5) & (spei6obs > -0.8)] = 1
obs_drought[(spei6obs <= -0.8) & (spei6obs > -1.3)] = 2
obs_drought[(spei6obs <= -1.3) & (spei6obs > -1.6)] = 3
obs_drought[(spei6obs <= -1.6) & (spei6obs > -2)] = 4
obs_drought[(spei6obs <= -2)] = 5
brk <- seq(-1, 5, length.out = 7)
pal.1 = colorRampPalette(c("yellow", "red", "black"), space = "rgb")
col = pal.1(length(brk) - 1)
col[1] = "#C0C0C0"
image.plot(lon[ilon], lat[ilat], obs_drought,zlim=c(-1,5))
plot(wrld_simpl, add = TRUE)
## plot obs level
postscript(
file.path(
dir_out,
paste("SPI12_obs_", anno_case, "_level_", caso, ".eps", sep = "")
),
paper = "special",
width = 11,
height = 7,
horizontal = T
)
layout(matrix(c(1, 2), 1, 2, byrow = T), widths = c(11, 1.5))
par(oma = c(1, 1, 4, 1))
tit <- ('')
PlotEquiMap(
obs_drought,
lon[ilon],
lat[ilat],
toptitle = '',
sizetit = 0.8,
brks = brk,
cols = col,
axelab =
F,
filled.continents = FALSE,
drawleg = F,
colNA = "white"
)
title(tit, line = 0.5, outer = T)
ce = 1.4
ColorBar(
brks = brk,
cols = col,
vert = T,
cex = 1
)
dev.off()
## plot obs spi
brk_new = c(-2,-1.6,-0.8,-0.5, 0, 0.5, 0.8, 1.3, 2)
brk2 = union(-1e+05, brk_new)
brk2 = union(brk2, 1e+05)
col <- (colorRampPalette(brewer.pal(11, "BrBG"))(10))
image.plot(lon[ilon], lat[ilat], spei6obs)
plot(wrld_simpl, add = TRUE)
postscript(
file.path(
dir_out,
paste("spi12_obs_", anno_case, "_", caso, ".eps", sep = "")
),
paper = "special",
width = 11,
height = 7,
horizontal = T
)
layout(matrix(c(1, 2), 1, 2, byrow = T), widths = c(11, 1.5))
par(oma = c(1, 1, 4, 1))
tit <- ('')
PlotEquiMap(
spei6obs,
lon[ilon],
lat[ilat],
toptitle = '',
sizetit = 0.8,
brks = brk2,
cols = col,
axelab =
F,
filled.continents = FALSE,
drawleg = F,
colNA = "white"
)
title(tit, line = 0.5, outer = T)
ce = 1.4
ColorBarM(
brks = brk2,
cols = col,
vert = T,
cex = 1,
labs = seq(2, length(brk2) - 1, 1)
)
dev.off()
## Plot Probability Moderate Drought
spei6espfor = pred[ilon, ilat, anno_for,]
for_drought_prob = array(data = NA, dim = c(dim(spei6espfor)[1], dim(spei6espfor)[2]))
for (i in 1:dim(spei6espfor)[1]) {
for (j in 1:dim(spei6espfor)[2]) {
aux = spei6espfor[i, j,]
for_drought_prob[i, j] = sum(aux[!is.na(aux)] <= -0.8) / length(!is.na(aux))
}
}
points <- expand.grid(lon[ilon], lat[ilat])
data(wrld_simpl)
pts = SpatialPoints(points, proj4string = CRS(proj4string(wrld_simpl)))
ii <- !is.na(over(pts, wrld_simpl))
inout = ii[, 1]
dim(inout) <- c(nrow(obs[ilon, ilat,]), ncol(obs[ilon, ilat,]))
inout[inout == 0] = NA
for_drought_prob = inout * for_drought_prob
image.plot(lon[ilon], lat[ilat], for_drought_prob)
plot(wrld_simpl, add = TRUE)
brk_prob <- seq(0, 1, length.out = 6)
#pal.1=colorRampPalette(c("yellow","red","black"), space="rgb")
#col_prob=pal.1(length(brk_prob)-1)
col_prob <-
(colorRampPalette(brewer.pal(length(brk_prob), "YlOrBr"))(length(brk_prob) -
1))
col_prob[1] = "#C0C0C0"
for_drought_prob[for_drought_prob == 0] = 0.1
postscript(
file.path(
dir_out,
paste("SPI12_prob_", anno_case, "_", caso, ".eps", sep = "")
),
paper = "special",
width = 11,
height = 7,
horizontal = T
)
layout(matrix(c(1, 2), 1, 2, byrow = T), widths = c(11, 1.5))
par(oma = c(1, 1, 4, 1))
tit <- ('')
PlotEquiMap(
for_drought_prob,
lon[ilon],
lat[ilat],
toptitle = '',
sizetit = 0.8,
brks = brk_prob,
cols = col_prob,
axelab =
F,
colNA = "white",
filled.continents = FALSE,
drawleg = F
)
title(tit, line = 0.5, outer = T)
ce = 1.4
ColorBar(
brks = brk_prob,
cols = col_prob,
vert = T,
cex = 1
)
dev.off()
## Plot ensemble mean
aux = spi[ilon, ilat, seq(12, dim(spi12)[3], 12)]
mean_pred=aux[,,anno_for]
# mean_pred = spi(spei6espfor, c(1, 2), mean, na.rm = TRUE))
pvalue=mean_pred*NA
pvalue[lon_pt==lon[ilon],lat_pt==lat[ilat]]=0
brk_new = c(-2,-1.6,-0.8,-0.5, 0, 0.5, 0.8, 1.3, 2)
brk2 = union(-1e+05, brk_new)
brk2 = union(brk2, 1e+05)
col <- (colorRampPalette(brewer.pal(11, "BrBG"))(10))
image.plot(lon[ilon], lat[ilat], spei6obs)
plot(wrld_simpl, add = TRUE)
postscript(
file.path(
dir_out,
paste("spi12_ens_mean_", anno_case, "_", caso, ".eps", sep = "")
),
paper = "special",
width = 11,
height = 7,
horizontal = T
)
layout(matrix(c(1, 2), 1, 2, byrow = T), widths = c(11, 1.5))
par(oma = c(1, 1, 4, 1))
tit <- ('')
PlotEquiMap(
mean_pred,
lon[ilon],
lat[ilat],
toptitle = '',
sizetit = 0.8,
brks = brk2,
cols = col,
axelab =
F,
labW = F,
filled.continents = FALSE,
drawleg = F,
colNA = "white",
dots = pvalue <= 0.05
)
title(tit, line = 0.5, outer = T)
ce = 1.4
ColorBarM(
brks = brk2,
cols = col,
vert = T,
cex = 1,
labs = seq(2, length(brk2) - 1, 1)
)
dev.off()
## Plot spread
sd_pred = apply(spei6espfor, c(1, 2), sd, na.rm = TRUE)
sd_pred[sd_pred>=2]=2
cols_sd <- brewer.pal(5, "BuPu")
brk_sd <- seq(0, 2, length.out = 6)
image.plot(lon[ilon], lat[ilat], sd_pred)
plot(wrld_simpl, add = TRUE)
postscript(
file.path(
dir_out,
paste("spi12_ens_spread_", anno_case, "_", caso, ".eps", sep = "")
),
paper = "special",
width = 11,
height = 7,
horizontal = T
)
layout(matrix(c(1, 2), 1, 2, byrow = T), widths = c(11, 1.5))
par(oma = c(1, 1, 4, 1))
tit <- ('')
PlotEquiMap(
sd_pred,
lon[ilon],
lat[ilat],
toptitle = '',
sizetit = 0.8,
brks = brk_sd,
cols = cols_sd,
axelab =
F,
labW = F,
filled.continents = FALSE,
drawleg = F,
colNA = "white"
)
title(tit, line = 0.5, outer = T)
ce = 1.4
ColorBarM(
brks = brk_sd,
cols = cols_sd,
vert = T,
cex = 1,
labs = seq(2, length(brk2) - 1, 1)
)
dev.off()
## warning level
load(paste0(dir_out, "/SPI_TRAF_LIG_12_DROP_1981_2016.RData"))
aux = spi_tl[ilon, ilat, seq(12, dim(spi12)[3], 12)]
aux=aux[,,anno_for]
image.plot(lon[ilon], lat[ilat], aux)
plot(wrld_simpl, add = TRUE)
cols_tl=c('#FFFF00','#FFA500','#FF0000')
brk_tl <- seq(1, 4, length.out = 4)
postscript(
file.path(
dir_out,
paste("SPI12_warning_level_", anno_case, "_", caso, ".eps", sep = "")
),
paper = "special",
width = 11,
height = 7,
horizontal = T
)
layout(matrix(c(1, 2), 1, 2, byrow = T), widths = c(11, 1.5))
par(oma = c(1, 1, 4, 1))
tit <- ('')
PlotEquiMap(
aux,
lon[ilon],
lat[ilat],
toptitle = '',
sizetit = 0.8,
brks = brk_tl,
cols = cols_tl,
axelab =
F,
colNA = "white",
labW = F,
filled.continents = FALSE,
drawleg = F
)
title(tit, line = 0.5, outer = T)
ce = 1.4
ColorBar(
brks = brk_tl,
cols = cols_tl,
vert = T,
cex = 1
)
dev.off()
for (idata in 1:length(datasets)) {
#for (idata in 1:1) {
dataset = datasets[idata]
#spei6espfor = pred[ilon, ilat, anno_for, ]
mean_pred = pred[ilon, ilat, anno_for, idata]
image.plot(lon[ilon], lat[ilat], mean_pred)
plot(wrld_simpl, add = TRUE)
postscript(
file.path(
dir_out,
paste(
"spi12_ens_mean_",
anno_case,
"_",
caso,
"_",
dataset,
".eps",
sep = ""
)
),
paper = "special",
width = 11,
height = 7,
horizontal = T
)
layout(matrix(c(1, 2), 1, 2, byrow = T), widths = c(11, 1.5))
par(oma = c(1, 1, 4, 1))
tit <- ('')
PlotEquiMap(
mean_pred,
lon[ilon],
lat[ilat],
toptitle = '',
sizetit = 0.8,
brks = brk2,
cols = col,
axelab =
F,
labW = F,
filled.continents = FALSE,
drawleg = F,
colNA = "white"
)
title(tit, line = 0.5, outer = T)
ce = 1.4
ColorBarM(
brks = brk2,
cols = col,
vert = T,
cex = 1,
labs = seq(2, length(brk2) - 1, 1)
)
dev.off()
}
}
|
c73c555594ce1d1219883ea824eaf392df46c6b5 | 49ff0bc7c07087584b907d08e68d398e7293d910 | /st_gpr/st_gpr_core_code/stgpr/r_functions/utilities/db_tools.r | 1e689c8797e904618a290b9689118a1cb0d6f0db | [] | no_license | The-Oxford-GBD-group/typhi_paratyphi_modelling_code | db7963836c9ce9cec3ca8da3a4645c4203bf1352 | 4219ee6b1fb122c9706078e03dd1831f24bdaa04 | refs/heads/master | 2023-07-30T07:05:28.802523 | 2021-09-27T12:11:17 | 2021-09-27T12:11:17 | 297,317,048 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,882 | r | db_tools.r | ###########################################################
### Author: Patrick Liu
### Date: 1/26/2015
### Project: ST-GPR
### Purpose: Database utility functions
###########################################################
###################
### Setting up ####
###################
library(data.table)
library(stringr)
library(plyr)
library(ini)
library(RMySQL)
library(parallel)
####################################################################################################################################################
# Table of Contents
####################################################################################################################################################
## Base
## query
## Pulls
## get_best_lvid
## get_location_hierarchy
## get_gbd_round_id
####################################################################################################################################################
# Base
####################################################################################################################################################
gbd_2020_decomp_step_id <- list()
gbd_2020_decomp_step_id['step1'] <- 16
gbd_2020_decomp_step_id['step2'] <- 17
gbd_2020_decomp_step_id['step3'] <- 18
gbd_2020_decomp_step_id['step4'] <- 19
gbd_2020_decomp_step_id['step5'] <- 20
gbd_2020_decomp_step_id['iterative'] <- 15
gbd_2019_decomp_step_id <- list()
gbd_2019_decomp_step_id['step1'] <- 1
gbd_2019_decomp_step_id['step2'] <- 2
gbd_2019_decomp_step_id['step3'] <- 3
gbd_2019_decomp_step_id['step4'] <- 4
gbd_2019_decomp_step_id['step5'] <- 5
gbd_2019_decomp_step_id['iterative'] <- 7
gbd_2019_decomp_step_id['usa_re'] <- 14
decomp_step_id_from_decomp_step <- function(step, gbd_round_id){
if (gbd_round_id == 7){
step_map <- gbd_2020_decomp_step_id
} else if (gbd_round_id == 6) {
step_map <- gbd_2019_decomp_step_id
} else if (gbd_round_id == 5) {
return(9)
} else if (gbd_round_id == 4) {
return(10)
} else if (gbd_round_id == 3) {
return(11)
} else if (gbd_round_id == 2) {
return(12)
}
return(as.integer(step_map[step]))
}
query <- function(query, conn_def) {
cluster_obdc_path <- "/ihme/cc_resources/credentials/.odbc.ini"
odbc <- if (file.exists(cluster_obdc_path)) {
read.ini(cluster_obdc_path)
} else {
return(read.ini("/home/j/temp/central_comp/credentials/.odbc.ini"))
}
conn <- dbConnect(RMySQL::MySQL(),
host = odbc[[conn_def]]$server,
username = odbc[[conn_def]]$user,
password = odbc[[conn_def]]$password)
dt <- dbGetQuery(conn,query) %>% data.table
dbDisconnect(conn)
return(dt)
}
####################################################################################################################################################
# Pulls
####################################################################################################################################################
get_best_lvid <- function(location_set_id, gbd_round_id, decomp_step = NULL){
host <- "cod"
decomp_step_id <- if (is.null(decomp_step)) {
"NULL"
} else {
decomp_step_id_from_decomp_step(decomp_step, gbd_round_id)
}
q <- paste0("SELECT shared.active_location_set_decomp_step_version(",
location_set_id, ",", gbd_round_id, ",", decomp_step_id, ")")
location_set_best <- query(q, host) %>% unlist %>% unname
return(location_set_best)
}
# This is 10x faster than the shared function version and I'm not sure
# how often it's used/called so I don't want to make big performance changes
# right away
get_location_metadata <- function(location_set_id, gbd_round_id, decomp_step = NULL) {
host <- "cod"
location_set_version_id <- get_best_lvid(location_set_id = location_set_id, gbd_round_id = gbd_round_id,
decomp_step = decomp_step)
q <- sprintf('SELECT * FROM shared.location_hierarchy_history WHERE location_set_version_id=%i',
location_set_version_id)
df <- query(q, host)
# assert existing location_set
if (nrow(df) == 0) {
stop("Locations dataframe is empty! Make sure your location_set_id and gbd_round_id are legit.")
}
return(df[])
}
# Vectorizable function that returns 1 if a location_id is a standard location; 0 otherwise
# Since standard locations didn't exist before GBD 2019, earlier rounds are handled differently
is_standard_location <- function(location_id, level, gbd_round_id, standard_location_ids) {
if (gbd_round_id > 5) {
return(as.integer(location_id %in% standard_location_ids))
} else {
old_standard_locs <- c(4749, 4636, 434, 433)
return(as.integer(level == 3 | location_id %in% old_standard_locs))
}
}
get_location_hierarchy <- function(location_set_id = 22, gbd_round_id = 7, decomp_step = NULL, standard_location_set_id = 101) {
# pull best location_set_version_id from location_set_version_active and
# location hierarchy based on best location_set_version_id for given hierarchy and GBD round
df <- get_location_metadata(location_set_id = location_set_id, gbd_round_id = gbd_round_id,
decomp_step = decomp_step)
std_locs <- if (gbd_round_id > 5) {
get_location_metadata(location_set_id = standard_location_set_id, gbd_round_id = gbd_round_id)[, location_id]
} else {
NULL
}
# Create hierarchy
hierarchy <- str_split_fixed(df$path_to_top_parent, ",", max(df$level) + 1) %>% data.table
hierarchy <- hierarchy[, lapply(.SD, as.numeric)]
setnames(hierarchy, names(hierarchy), paste0("level_", seq(0, max(df$level))))
df <- cbind(df, hierarchy)
# Create indicator column for standard locations, using the janky setup from GBD2017 for all rounds before GBD2019
df[, standard_location := mapply(is_standard_location, location_id, level,
MoreArgs = list(gbd_round_id = gbd_round_id, standard_location_ids = std_locs))]
return(df[])
}
#####################################################################################################################################################
get_gbd_round_id <- function(location_set_version_id){
# This function is used to pull old (GBD 2017 and before) ST-GPR data.
# It should be deleted when there is a new GPR Viz.
#Databaser stuff
host <- "cod"
## Find active version id if not specified
q <- paste0("SELECT gbd_round FROM shared.location_set_version WHERE location_set_version_id = ", location_set_version_id)
gbd_round <- query(q, host)
#Re-query to match gbd_round (only available thing) to gbd_round_id
q <- paste0("SELECT gbd_round_id FROM shared.gbd_round WHERE gbd_round = ", gbd_round)
gbd_round_id <- query(q, host) %>% as.integer()
return(gbd_round_id)
}
|
df1f0a38b0d84b3af7656e1e5e6f129723ddb877 | 73424cecc7c40787af4abd1c39704acea6f4d35a | /server.R | df3aeb8f473cd6a385de62035c3639f24073eba6 | [] | no_license | gu-stat/dadoscoronavirus | baa4536ca21533321ad9275a75e17ff2d6ffd010 | 1338bf6f1e85c2d2209bd14b9fde9e16f6957599 | refs/heads/master | 2021-10-21T11:23:50.148634 | 2021-10-20T21:55:29 | 2021-10-20T21:55:29 | 251,186,609 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 11,615 | r | server.R | # ************************************************************************* ----
# Server ----
# ************************************************************************* ----
function(input, output, session) {
# |_ Dados ===================================================================
# \___ Dados Municipio -------------------------------------------------------
dados_selecionados_cidade <- reactive({
dados_originais_br_io() %>%
tibble::as_tibble() %>%
rename(
"uf" = "state",
"municipio" = "city",
"cod_municipio" = "city_ibge_code"
) %>%
mutate(
dia = as.Date(date),
uf_num = substr(cod_municipio, 1, 2),
casos_confirmados = as.numeric(confirmed),
mortes_confirmadas = as.numeric(deaths),
confirmed_per_100k_inhabitants = round(
as.numeric(confirmed_per_100k_inhabitants), 2
)
) %>%
mutate(
uf_nome = case_when(
uf_num == 11 ~ 'Rondônia',
uf_num == 12 ~ 'Acre',
uf_num == 13 ~ 'Amazonas',
uf_num == 14 ~ 'Roraima',
uf_num == 15 ~ 'Pará',
uf_num == 16 ~ 'Amapá',
uf_num == 17 ~ 'Tocantins',
uf_num == 21 ~ 'Maranhão',
uf_num == 22 ~ 'Piauí',
uf_num == 23 ~ 'Ceará',
uf_num == 24 ~ 'Rio Grande do Norte',
uf_num == 25 ~ 'Paraíba',
uf_num == 26 ~ 'Pernambuco',
uf_num == 27 ~ 'Alagoas',
uf_num == 28 ~ 'Sergipe',
uf_num == 29 ~ 'Bahia',
uf_num == 31 ~ 'Minas Gerais',
uf_num == 32 ~ 'Espírito Santo',
uf_num == 33 ~ 'Rio de Janeiro',
uf_num == 35 ~ 'São Paulo',
uf_num == 41 ~ 'Paraná',
uf_num == 42 ~ 'Santa Catarina',
uf_num == 43 ~ 'Rio Grande do Sul',
uf_num == 50 ~ 'Mato Grosso do Sul',
uf_num == 51 ~ 'Mato Grosso',
uf_num == 52 ~ 'Goiás',
uf_num == 53 ~ 'Distrito Federal'
),
regiao = case_when(
uf_num == 11 ~ 'Norte',
uf_num == 12 ~ 'Norte',
uf_num == 13 ~ 'Norte',
uf_num == 14 ~ 'Norte',
uf_num == 15 ~ 'Norte',
uf_num == 16 ~ 'Norte',
uf_num == 17 ~ 'Norte',
uf_num == 21 ~ 'Nordeste',
uf_num == 22 ~ 'Nordeste',
uf_num == 23 ~ 'Nordeste',
uf_num == 24 ~ 'Nordeste',
uf_num == 25 ~ 'Nordeste',
uf_num == 26 ~ 'Nordeste',
uf_num == 27 ~ 'Nordeste',
uf_num == 28 ~ 'Nordeste',
uf_num == 29 ~ 'Nordeste',
uf_num == 31 ~ 'Sudeste',
uf_num == 32 ~ 'Sudeste',
uf_num == 33 ~ 'Sudeste',
uf_num == 35 ~ 'Sudeste',
uf_num == 41 ~ 'Sul',
uf_num == 42 ~ 'Sul',
uf_num == 43 ~ 'Sul',
uf_num == 50 ~ 'Centro-Oeste',
uf_num == 51 ~ 'Centro-Oeste',
uf_num == 52 ~ 'Centro-Oeste',
uf_num == 53 ~ 'Centro-Oeste'
)
) %>%
select(
dia,
is_last,
regiao,
uf_nome,
uf,
uf_num,
municipio,
cod_municipio,
place_type,
casos_confirmados,
mortes_confirmadas,
confirmed_per_100k_inhabitants,
estimated_population_2019
) %>%
arrange(uf_num, cod_municipio, dia) %>%
# Ajustes Mortes CE, dia 04/04
# Fonte: https://dev.org.br/api/casos-ceara-por-dia
mutate(
mortes_confirmadas = case_when(
is_last == "True" & cod_municipio == 2304400 & mortes_confirmadas == 0 ~ 17,
is_last == "True" & cod_municipio == 2304285 & mortes_confirmadas == 0 ~ 1,
is_last == "True" & cod_municipio == 2312205 & mortes_confirmadas == 0 ~ 1,
is_last == "True" & cod_municipio == 2306900 & mortes_confirmadas == 0 ~ 1,
is_last == "True" & cod_municipio == 2313401 & mortes_confirmadas == 0 ~ 1,
TRUE ~ mortes_confirmadas
)
)
})
# \___ Dados Estados ---------------------------------------------------------
dados_estados <- reactive({
dados_selecionados_cidade() %>%
filter(place_type == "state") %>%
mutate(
confirmados_dia = fcn_dia_uf(., variavel = "casos_confirmados"),
mortes_dia = fcn_dia_uf(., variavel = "mortes_confirmadas"),
confirmed_per_100k_inhabitants = prettyNum(
round(confirmed_per_100k_inhabitants, 2),
big.mark = ".",
decimal.mark = ","
)
)
})
# \___ Dados Brasil ----------------------------------------------------------
dados_brasil <- reactive({
populacao_2019 <-
dados_estados() %>%
group_by(dia, uf) %>%
summarise(
estimated_population_2019 = mean(estimated_population_2019)
) %>%
ungroup() %>%
distinct(estimated_population_2019) %>%
sum()
dados_estados() %>%
group_by(dia) %>%
summarise(
confirmados_dia = sum(confirmados_dia, na.rm = TRUE),
mortes_dia = sum(mortes_dia, na.rm = TRUE)
) %>%
mutate(
casos_confirmados = cumsum(confirmados_dia),
mortes_confirmadas = cumsum(mortes_dia),
confirmed_per_100k_inhabitants = casos_confirmados/populacao_2019*100000,
confirmed_per_100k_inhabitants = prettyNum(
round(confirmed_per_100k_inhabitants, 2),
big.mark = ".",
decimal.mark = ","
)
)
})
dados <- reactive({
req(input$localidade)
if (input$localidade == "Brasil") {
tmp_casos <- dados_brasil()
} else {
tmp_casos <-
dados_estados() %>%
filter(uf_num == input$localidade)
}
tmp_casos
})
# |_ Inputs ==================================================================
# \__ Selecao de Localidade --------------------------------------------------
output$selecao_localidade <- renderUI({
tmp_locais <-
dados_selecionados_cidade() %>%
select(regiao, uf_nome, uf_num) %>%
distinct() %>%
arrange(regiao, uf_nome, uf_num)
selectInput(
inputId = "localidade",
label = h4("Localidade:"),
choices = c(
"Brasil",
with(
tmp_locais,
split(
setNames(
tmp_locais$uf_num,
tmp_locais$uf_nome
),
regiao
)
)
),
multiple = FALSE,
selectize = TRUE
)
})
# Outputs ====================================================================
# \__ Markdown Sobre ---------------------------------------------------------
output$markdown_sobre <- renderUI({
sobre_html <- "./www/S03_Modulos/M05_Secao_Sobre/sobre.html"
if (!file.exists(sobre_html)) {
rmarkdown::render(
input = "./www/S03_Modulos/M05_Secao_Sobre/sobre.rmd",
output_format = html_document(self_contained = TRUE),
output_file = "sobre.html"
)
}
shiny::includeHTML(sobre_html)
})
# \__ Markdown Questoes Importantes ------------------------------------------
output$markdown_questoes <- renderUI({
questoes_html <- "./www/S03_Modulos/M05_Secao_Sobre/questoes.html"
if (!file.exists(questoes_html)) {
rmarkdown::render(
input = "./www/S03_Modulos/M05_Secao_Sobre/questoes.rmd",
output_format = html_document(self_contained = TRUE),
output_file = "questoes.html"
)
}
shiny::includeHTML(questoes_html)
})
# \__ Datas ------------------------------------------------------------------
# \___ Datas das Observacoes ---------------------------------------------------
data_range <- reactive({
dados_selecionados_cidade() %>%
select(dia) %>%
distinct() %>%
arrange(desc(dia))
})
#data_comeco <- ymd(data_range$dia[dim(data_range)[1]])
data_final <- reactive(ymd(data_range()$dia[1]))
#data_comeco_br <- format(as.Date(data_comeco), "%d/%m/%Y")
data_final_br <- reactive(format(as.Date(data_final()), "%d/%m/%Y"))
# \\____ Ultima Observacao ####
output$ultima_observacao <- renderUI({
h5(paste0("Dados observados até ", data_final_br()))
})
# \\____ Ultima Atualizacao ####
output$ultima_atualizacao <- renderUI({
# tmp_texto_atualizacao_br <-
# read_html("https://brasil.io/dataset/covid19/caso") %>%
# html_nodes(., "p") %>%
# str_subset(., "Importação dos dados feita em") %>%
# str_split(., "\n") %>%
# unlist()
#
# tmp_posicao_data <-str_which(tmp_texto_atualizacao_br, "Importação")
#
# data_atualizacao_br <-
# tmp_texto_atualizacao_br[tmp_posicao_data] %>%
# str_squish() %>%
# str_remove(., "Importação dos dados feita em")
data_atualizacao_br <- "03/04/2020"
h5(paste0("Dados atualizados em ", data_atualizacao_br))
})
# |_ Modulos =================================================================
# \__ Painel - Resumo --------------------------------------------------------
# \\____ Boxes Casos ####
callModule(
module = boxesModule,
id = "caixas-info",
dados_analise = dados
)
# \\____ Mapa ####
callModule(
module = mapaModule,
id = "mapas",
local = reactive(input$localidade),
dados_estados = dados_estados,
dados_selecionados_cidade = dados_selecionados_cidade,
data_final = data_final
)
# \\____ Grafico - Mais Afetados ####
callModule(
module = graficoMAModule,
id = "grafico-mais-afetados",
dados_analise = dados,
local = reactive(input$localidade),
dados_estados = dados_estados,
dados_selecionados_cidade = dados_selecionados_cidade
)
# \\____ Grafico - Numero Total (Acumulado) ####
callModule(
module = graficoTAModule,
id = "grafico-total-acumulado",
dados_analise = dados,
local = reactive(input$localidade)
)
# \\____ Grafico - Casos Novos por Dia ####
callModule(
module = graficoNCModule,
id = "grafico-novos-casos",
dados_analise = dados,
local = reactive(input$localidade)
)
# # \__ Painel - Sobre ---------------------------------------------------------
#
# callModule(
# module = sobreModule,
# id = "sobre"
# )
#
# # \__ Painel - Previsao ------------------------------------------------------
#
# callModule(
# module = previsaoModule,
# id = "previsao",
# dados_brasil = dados_brasil,
# data_final = data_final
# )
# \__ Painel - Dados Brutos --------------------------------------------------
callModule(
module = dadosModule,
id = "dados-brutos",
dados_brasil = dados_brasil,
dados_estados = dados_estados,
dados_selecionados_cidade = dados_selecionados_cidade,
dados_originais_br_io = dados_originais_br_io
)
# \__ Painel - Fontes --------------------------------------------------------
callModule(
module = fontesModule,
id = "fontes"
)
}
# ************************************************************************* ####
# FIM ####
# **************************************************************************** #
|
20f68b4a3397e8f8c35e83c8962c11cba1aca440 | 0841838ba8723e94b37a1514409a5a9767cbf181 | /SpatialR21_project/code/aim2_analyses/spat21_astmh_visualizations_2019.R | 0ea3968cde3f6a0463625250002ed946fc8e1518 | [] | no_license | kelseysumner/taylorlab | cfa2358b5c552e7853b111de12940983d081de6a | 8801f5d32b7f81f2a66b3efd763cc18c5d35f42b | refs/heads/master | 2021-08-07T03:55:06.004801 | 2021-06-20T21:29:08 | 2021-06-20T21:29:08 | 150,612,627 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 31,466 | r | spat21_astmh_visualizations_2019.R | # ----------------------------------------- #
# Create aim 2 visualizations for astmh #
# Mozzie Phase 1 #
# CSP data #
# October 31, 2019 #
# K. Sumner #
# ----------------------------------------- #
# color pallete: http://colorbrewer2.org/#type=qualitative&scheme=Set1&n=5
# symptomatic (red): #b2182b
# asymptomatic (green): #238443
# mosquitoes (blue): #2166ac
# no infection (grey): #A9A9A9
#### --------- load packages ----------------- ####
library(tidyverse)
library(devtools)
library(streamgraph)
library(lubridate)
library(ggalluvial)
library(gridExtra)
#### ---------- read in the data sets ---------- ####
# read in the merged csp abdomen edgelist ready for the multilevel models
csp_abdomens = read_rds("Desktop/clean_ids_haplotype_results/CSP/model data set/spat21_csp_abdomen_sharing_final_model_data_31OCT2019.rds")
# read in the merged anopheles mosquito data set
anoph_merged_data = read_rds("/Users/kelseysumner/Desktop/Dissertation Materials/SpatialR21 Grant/Final Dissertation Materials/Final Data Sets/Final Cohort data June 2017 to July 2018/Mosquito data/clean data/merged_data/spat21_mosquito_anopheles_merged_data_18JAN2019.RDS")
# read in the csp haplotype data
# load in the data set (the haplotypes after chimeras have been removed and haplotypes censored - seqtab_final.rds)
csp_haplotypes <- read_rds("Desktop/clean_ids_haplotype_results/CSP/spat21_CSP_haplotype_table_censored_final_version_with_moi_and_ids_CLEANVERSION_30SEPT2019.rds")
# read in the full human data set
final_data = read_rds("Desktop/Dissertation Materials/SpatialR21 Grant/Final Dissertation Materials/Final Data Sets/Final Cohort data June 2017 to July 2018/Human data/spat21_clean_human_files/merged_files/final merged data/spat21_human_final_censored_data_for_dissertation_with_exposure_outcome_1OCT2019.rds")
#### -------- make visualization 1 --------- ####
## make a plot of the anopheles mosquitoes using the stream graph plot
# set up the data set
mosquito_data = anoph_merged_data %>%
select(collection_date,abdominal_status) %>%
mutate(value=rep(1,nrow(anoph_merged_data)), month_date = floor_date(collection_date, "month"),
new_abdominal_status = ifelse(abdominal_status=="Gravid" | abdominal_status == "Half Gravid", "Gravid", ifelse(
abdominal_status == "Blood Fed","Blood Fed",ifelse(
abdominal_status == "Un-identified" | abdominal_status == "Undetermined", "Undetermined", "Unfed")))) %>%
group_by(month_date,new_abdominal_status) %>%
tally(wt=value)
# make the plot
mosquito_plot = mosquito_data %>%
streamgraph("new_abdominal_status","n","month_date", offset="zero", interactive = F) %>%
sg_fill_brewer("RdBu")
mosquito_plot
## make a plot of the mosquito infection status in abdomens using the stream graph plot
# set up the data set
mosquito_data_infected = anoph_merged_data %>%
filter(!(is.na(pf_pcr_infection_status_sample_level_a))) %>%
select(collection_date,pf_pcr_infection_status_sample_level_a) %>%
mutate(value=rep(1,length(!(is.na(pf_pcr_infection_status_sample_level_a)))), month_date = floor_date(collection_date, "month")) %>%
group_by(month_date,pf_pcr_infection_status_sample_level_a) %>%
tally(wt=value)
# relevel the data
mosquito_data_infected$pf_pcr_infection_status_sample_level_a = relevel(mosquito_data_infected$pf_pcr_infection_status_sample_level_a,"positive")
# make the plot
mosquito_plot_infected = mosquito_data_infected %>%
streamgraph("pf_pcr_infection_status_sample_level_a","n","month_date", offset="zero", interactive = F) %>%
sg_fill_manual(values = c("#A9A9A9","#2166ac"))
mosquito_plot_infected
# symptomatic (red): #b2182b
# asymptomatic (green): #238443
# mosquitoes (blue): #2166ac
# no infection (grey): #A9A9A9
#### -------- make visualization 2 --------- ####
# merge in human dates with the csp_haplotypes data set
dates_df = final_data %>%
select(sample_name_dbs,sample_id_date)
csp_haplotype_merge = left_join(csp_haplotypes,dates_df, by = "sample_name_dbs")
# check the merge
setdiff(csp_haplotypes$sample_name_dbs,csp_haplotype_merge$sample_name_dbs)
length(which(is.na(csp_haplotype_merge$sample_id_date)))
# fix dates
anoph_merged_data$sample_id_abdomen[which(anoph_merged_data$sample_id_mosquito=="K01 00030")] = "K01 A00030"
anoph_merged_data$sample_id_head[which(anoph_merged_data$sample_id_mosquito=="K01 00030")] = "K01 H00030"
anoph_merged_data$sample_id_abdomen[which(anoph_merged_data$sample_id_mosquito=="K01 00047")] = "K01 A00047"
anoph_merged_data$sample_id_head[which(anoph_merged_data$sample_id_mosquito=="K01 00047")] = "K01 H00047"
# now merge in mosquito dates with csp_haplotypes_merge data set
dates_df_ab = anoph_merged_data %>%
select(sample_id_abdomen,collection_date) %>%
rename(sample_name_dbs = sample_id_abdomen)
dates_df_he = anoph_merged_data %>%
select(sample_id_head,collection_date) %>%
rename(sample_name_dbs = sample_id_head)
csp_haplotype_merge = left_join(csp_haplotype_merge,dates_df_ab,by="sample_name_dbs")
csp_haplotype_merge = left_join(csp_haplotype_merge,dates_df_he,by="sample_name_dbs")
# check the merge
colnames(csp_haplotype_merge)
csp_haplotype_merge$date_all = ifelse(!(is.na(csp_haplotype_merge$sample_id_date)),csp_haplotype_merge$sample_id_date,ifelse(
!(is.na(csp_haplotype_merge$collection_date.x)),csp_haplotype_merge$collection_date.x,csp_haplotype_merge$collection_date.y))
csp_haplotype_merge$date_all = as_date(csp_haplotype_merge$date_all)
csp_haplotype_merge %>%
select(sample_id_date,collection_date.x,collection_date.y,date_all,sample_name_dbs) %>%
View()
# create a new variable that is just the month
csp_haplotype_merge$month = paste0(lubridate::month(csp_haplotype_merge$date_all),"-",lubridate::year(csp_haplotype_merge$date_all))
table(csp_haplotype_merge$month, useNA = "always")
# make a HH_ID variable
HH_ID = rep(NA,nrow(csp_haplotype_merge))
for (i in 1:nrow(csp_haplotype_merge)) {
if (str_detect(csp_haplotype_merge$sample_name_dbs[i]," ")){
HH_ID[i] = str_split(csp_haplotype_merge$sample_name_dbs[i]," ")[[1]][1]
}
if (str_detect(csp_haplotype_merge$sample_name_dbs[i],"-")){
HH_ID[i] = str_split(csp_haplotype_merge$sample_name_dbs[i],"-")[[1]][1]
}
}
table(HH_ID, useNA = "always")
csp_haplotype_merge$HH_ID = HH_ID
# only keep the haplotype columns, location, and month
colnames(csp_haplotype_merge)
csp_haplotype_merge = csp_haplotype_merge %>%
select(-c(sample_type,haplotype_number,haplotype_reads,sample_id_date,collection_date.x,collection_date.y,MiSeq.ID,Run,sample_name_dbs,date_all))
colnames(csp_haplotype_merge)
# create a data frame summarizing each haplotype and the months it is present
# trying gathering the code to long format
long_csp_merged = gather(data=csp_haplotype_merge, "haplotype","readdepth", -month,-HH_ID)
# remove all rows with reads_present equal to 0
long_csp_merged = long_csp_merged[-which(long_csp_merged$readdepth == 0),]
# summarize the new data set by month
month_summary = long_csp_merged %>%
group_by(month,haplotype) %>%
summarize(n_samples=n())
# summarize the new data set by location
location_summary = long_csp_merged %>%
group_by(month,haplotype,HH_ID) %>%
summarise(n_1 = n_distinct(HH_ID)) %>%
select(month,haplotype,n_1) %>%
summarise(n_households=sum(n_1,na.rm=T))
# merge the month and location summaries
merged_summary = left_join(month_summary,location_summary,by=c("month","haplotype"))
# check the output
length(which(csp_haplotype_merge$month == "1-2018" & csp_haplotype_merge$H1 > 0))
length(which(csp_haplotype_merge$month == "1-2018" & csp_haplotype_merge$H10 > 0))
unique(csp_haplotype_merge[which(csp_haplotype_merge$month == "1-2018" & csp_haplotype_merge$H1 > 0),c("HH_ID")])
unique(csp_haplotype_merge[which(csp_haplotype_merge$month == "1-2018" & csp_haplotype_merge$H10 > 0),c("HH_ID")])
# set order for x-axis for months
table(merged_summary$month, useNA = "always")
month_order = c("6-2017","7-2017","8-2017","9-2017","10-2017","11-2017","12-2017","1-2018","2-2018","3-2018","4-2018","5-2018","6-2018","7-2018")
merged_summary <- within(merged_summary, month <- factor(month, levels=month_order))
# set order for y-axis based on how many months each haplotype is present
months_hap_present_summary = long_csp_merged %>%
group_by(haplotype,month) %>%
summarise(n_present_1 = n_distinct(month)) %>%
select(haplotype,n_present_1) %>%
summarise(n_present = sum(n_present_1,na.rm=T))
haplotype_order = months_hap_present_summary[order(months_hap_present_summary$n_present),]
merged_summary <- within(merged_summary, haplotype <- factor(haplotype, levels=haplotype_order$haplotype))
# make a figure of the csp haplotypes present over time across all samples (regardless if human or mosquito)
csp_month_plot = ggplot(merged_summary, aes(x=month, y=haplotype, size=n_samples, color=n_households)) +
geom_point() +
scale_colour_gradient(low = "#fcbba1", high = "#67000d") +
labs(x = "Month and year",y="Haplotype", color = "Number of households", size = "Number of samples") +
theme_bw()
csp_month_plot
# export the plot
# ggsave(csp_month_plot, filename="/Users/kelseysumner/Desktop/spat21_aim2_csp_month_plot.png", device="png",
# height=35, width=11.2, units="in", dpi=500)
#### ------ now make figure 3 - the alluvial plot ------- ####
# make a plot of how malaria infection status changes over time (from having an asymptomatic or symptomatic infection to having no infection during that month)
# select variables you need for human data and make a variable that represents whether or not you have an infection (regardless of symptomatic status)
colnames(final_data)
human_data = final_data %>%
filter(main_exposure_primary_case_def == "asymptomatic infection" | main_exposure_primary_case_def == "no infection") %>%
dplyr::select(visit_type,sample_id_date,sample_name_final,sample_name_dbs,age_cat_baseline,unq_memID,village_name,HH_ID,main_exposure_primary_case_def,main_outcome_primary_case_def,pfr364Q_std_combined,age_all_baseline) %>%
rename(infection_status = main_exposure_primary_case_def)
# create a new variable that is just the month
human_data$month = paste0(lubridate::month(human_data$sample_id_date),"-",lubridate::year(human_data$sample_id_date))
table(human_data$month, useNA = "always")
# cut down the data set to just the variables of interest
plot_human_data = human_data %>%
select(infection_status,month,unq_memID) %>%
group_by(month,infection_status,unq_memID) %>%
summarize(n=n())
plot_human_data_withperc = plot_human_data %>%
group_by(month) %>%
mutate(perc_n=n/sum(n))
# set order for x-axis for months
table(plot_human_data_withperc, useNA = "always")
month_order = c("6-2017","7-2017","8-2017","9-2017","10-2017","11-2017","12-2017","1-2018","2-2018","3-2018","4-2018","5-2018","6-2018","7-2018")
plot_human_data_withperc <- within(plot_human_data_withperc, month <- factor(month, levels=month_order))
# reorder so asymptomatic infections are on the bottom
plot_human_data_withperc$infection_status = relevel(as.factor(plot_human_data_withperc$infection_status),"no infection")
# now make an alluvial plot of how infection status changes over time
figure3_plot = ggplot(plot_human_data_withperc,
aes(x = month, stratum = infection_status, alluvium = unq_memID,
y = perc_n,
fill = infection_status, label = infection_status)) +
geom_flow(na.rm=T,alpha=0.25) +
geom_stratum() +
scale_fill_manual(values=c("#A9A9A9","#238443")) +
theme_bw() +
xlab("Month")+
ylab("Proportion of participants")+
labs(fill="Infection status")
figure3_plot
ggsave(figure3_plot, filename="/Users/kelseysumner/Desktop/figure3_plot_alluvial.png", device="png",
height=5.25, width=11, units="in", dpi=500)
#### -------- make a plot of symptomatic infections over time ------- ####
# make a data set of just symptomatic infections
symptomatic_df = final_data %>%
filter(visit_type == "monthly and sick visit" | visit_type== "sick visit") %>%
select(c(unq_memID,sample_id_date,sample_name_final,HH_ID,village_name,main_outcome_primary_case_def)) %>%
mutate(symp_infection = ifelse(!(is.na(main_outcome_primary_case_def)),"symptomatic infection","no infection"))
table(symptomatic_df$symp_infection, useNA = "always")
# create a new variable that is just the month
symptomatic_df$month = paste0(lubridate::month(symptomatic_df$sample_id_date),"-",lubridate::year(symptomatic_df$sample_id_date))
table(symptomatic_df$month, useNA = "always")
# cut down the data set to just the variables of interest
plot_human_data_symp = symptomatic_df %>%
select(symp_infection,month,unq_memID) %>%
group_by(month,symp_infection) %>%
summarize(n=n())
# set order for x-axis for months
table(plot_human_data_symp, useNA = "always")
month_order = c("6-2017","7-2017","8-2017","9-2017","10-2017","11-2017","12-2017","1-2018","2-2018","3-2018","4-2018","5-2018","6-2018","7-2018")
plot_human_data_symp <- within(plot_human_data_symp, month <- factor(month, levels=month_order))
# make a stacked bar plot of the symptomatic infections tested over time
plot4 = ggplot(data = plot_human_data_symp,aes(x=month,y=n,fill=symp_infection)) +
geom_bar(stat="identity")+
scale_fill_manual(values=c("#A9A9A9","#b2182b")) +
theme_bw() +
xlab("Month")+
ylab("Number of participants")+
labs(fill="Infection status")
plot4
# export the plot
ggsave(plot4, filename="/Users/kelseysumner/Desktop/figure4_plot_stackedsymp.png", device="png",
height=5.25, width=11, units="in", dpi=500)
#### ------ make a plot of the mois for humans and mosquito abdomens for csp ------ ####
### create histograms of moi subset by sample type
human_data_exposure = final_data %>%
filter(main_exposure_primary_case_def == "asymptomatic infection" | main_outcome_primary_case_def == "symptomatic infection") %>%
dplyr::select(visit_type,sample_id_date,sample_name_final,sample_name_dbs,age_cat_baseline,unq_memID,village_name,HH_ID,main_exposure_primary_case_def,main_outcome_primary_case_def,pfr364Q_std_combined,age_all_baseline) %>%
mutate(aim2_exposure = ifelse(is.na(main_exposure_primary_case_def),as.character(main_outcome_primary_case_def),as.character(main_exposure_primary_case_def))) %>%
dplyr::select(-main_exposure_primary_case_def,-main_outcome_primary_case_def,-visit_type)
# merge in symptomatic info with the haplotype data set
merge_hap_human_data = left_join(csp_haplotypes,human_data_exposure,by="sample_name_dbs")
# check the merge
setdiff(csp_haplotypes$sample_name_dbs,merge_hap_human_data$sample_name_dbs)
setdiff(human_data_exposure$sample_name_dbs,merge_hap_human_data$sample_name_dbs)
length(which(is.na(merge_hap_human_data$sample_id_date)))
merge_hap_human_data %>%
filter(is.na(merge_hap_human_data$sample_id_date)) %>%
select(sample_name_dbs,pfr364Q_std_combined,aim2_exposure,haplotype_reads) %>%
View()
# create a summarized data frame of the number of abdomens with each MOI for csp
# for humans asymptomatic
csp_human_df_asymp <- merge_hap_human_data %>%
filter(!(is.na(haplotype_number)) & sample_type == "Human" & aim2_exposure == "asymptomatic infection") %>%
group_by(haplotype_number) %>%
summarise(n=n())
csp_human_df_asymp$haplotype_number = as.numeric(csp_human_df_asymp$haplotype_number)
sum(csp_human_df_asymp$n)
# for humans symptomatic
csp_human_df_symp <- merge_hap_human_data %>%
filter(!(is.na(haplotype_number)) & sample_type == "Human" & aim2_exposure == "symptomatic infection") %>%
group_by(haplotype_number) %>%
summarise(n=n())
csp_human_df_symp$haplotype_number = as.numeric(csp_human_df_symp$haplotype_number)
sum(csp_human_df_symp$n)
# for abdomens
csp_abdomen_df <- csp_haplotypes %>%
filter(!(is.na(haplotype_number)) & sample_type=="Abdomen") %>%
group_by(haplotype_number) %>%
summarise(n=n())
csp_abdomen_df$haplotype_number = as.numeric(csp_abdomen_df$haplotype_number)
sum(csp_abdomen_df$n)
# make csp moi figures by sample type
# for human samples asymptomatic
csp_human_title_asymp <- expression(paste(italic("pfcsp"), ": Asymptomatic humans"))
csp_human_plot_asymp = ggplot() +
geom_bar(data=csp_human_df_asymp,aes(x=haplotype_number,y=n), alpha=0.95,stat="identity",fill="#ff7f00") +
labs(x="Number of haplotypes", y="Number of samples", title= csp_human_title_asymp, pch=18) +
theme_bw() +
scale_x_continuous(breaks=c(0,5,10,15,20), limits=c(0,20)) +
scale_y_continuous(breaks=c(0,60,120,180,240,300,360), limits=c(0,320)) +
theme(plot.title = element_text(size = 26, face = "bold", hjust = 0.5), text = element_text(size=25))
csp_human_plot_asymp
# for human samples symptomatic
csp_human_title_symp <- expression(paste(italic("pfcsp"), ": Symptomatic humans"))
csp_human_plot_symp = ggplot() +
geom_bar(data=csp_human_df_symp,aes(x=haplotype_number,y=n), alpha=0.95,stat="identity",fill="#e31a1c") +
labs(x="Number of haplotypes", y="Number of samples", title= csp_human_title_symp, pch=18) +
theme_bw() +
scale_x_continuous(breaks=c(0,5,10,15,20), limits=c(0,20)) +
scale_y_continuous(breaks=c(0,60,120,180,240,300,360), limits=c(0,320)) +
theme(plot.title = element_text(size = 26, face = "bold", hjust = 0.5), text = element_text(size=25))
csp_human_plot_symp
# for abdomen samples
csp_abdomen_title <- expression(paste(italic("pfcsp"), ": Mosquito abdomens"))
csp_abdomen_plot = ggplot() +
geom_bar(data=csp_abdomen_df,aes(x=haplotype_number,y=n), alpha=0.95,stat="identity",fill="#fdd0a2") +
labs(x="Number of haplotypes", y="Number of samples", title= csp_abdomen_title, pch=18) +
theme_bw() +
scale_x_continuous(breaks=c(0,5,10,15,20), limits=c(0,20)) +
scale_y_continuous(breaks=c(0,60,120,180,240,300,360), limits=c(0,320)) +
theme(plot.title = element_text(size = 26, face = "bold", hjust = 0.5), text = element_text(size=25))
csp_abdomen_plot
# put both csp moi plots on same grid
figure2_csp_subset_moi = gridExtra::grid.arrange(csp_human_plot_asymp,csp_human_plot_symp,csp_abdomen_plot,ncol=3)
# export the figure
ggsave(figure2_csp_subset_moi, filename="/Users/kelseysumner/Desktop/figure2_csp_subset_moi.png", device="png",
height=10.5, width=17, units="in", dpi=400)
# calculate median values
# for csp
csp_asymp = merge_hap_human_data %>%
filter(aim2_exposure=="asymptomatic infection")
summary(csp_asymp$haplotype_number)
csp_symp = merge_hap_human_data %>%
filter(aim2_exposure == "symptomatic infection")
summary(csp_symp$haplotype_number)
csp_mosq = merge_hap_human_data %>%
filter(sample_type == "Abdomen")
summary(csp_mosq$haplotype_number)
#### -------- figure 6: make layered histogram of human-mosquito pairs and haplotype sharing ------- ####
## make a plot of the haplotypes shared across human-mosquito pairs
# make a data set of the number of pairs that shared at least 1 haplotype across households
csp_abdomen_shared_pairs_in_HH = csp_abdomens %>%
filter(haps_shared > 0) %>%
group_by(village_name,HH_ID) %>%
summarize(number_pairs_with_sharing = n())
# make a data set of the number of Pf+ infections in humans detected across each household
pf_infection_in_HH_mosq = final_data %>%
filter(pf_pcr_infection_status == "positive") %>%
group_by(HH_ID) %>%
summarize(number_infection=n())
# make a data set of the number of Pf+ infections in mosquito abdomens detected across each household
pf_infection_in_HH_humans = anoph_merged_data %>%
filter(pf_pcr_infection_status_sample_level_a == "positive") %>%
group_by(HH_ID) %>%
summarize(number_infection=n())
# make a layered plot of the pairs over time
# layer1
layered1_plot = ggplot() +
geom_bar(data=csp_abdomen_shared_pairs_in_HH,aes(x=HH_ID,y=number_pairs_with_sharing), alpha=0.95,stat="identity",fill="grey") +
labs(x="Households", y="Number of pairs with at least 1 shared haplotype", title= "Human-mosquito abdomen haplotype pairs", pch=18) +
theme_bw() +
theme(plot.title = element_text(size = 20, face = "bold", hjust = 0.5))
layered1_plot
# layer2
layered2_plot = ggplot() +
geom_bar(data=pf_infection_in_HH_humans,aes(x=HH_ID,y=number_infection), alpha=0.95,stat="identity",fill="#e31a1c") +
labs(x="Households", y="Number of Pf positive infections", title= "Human infections", pch=18) +
theme_bw() +
theme(plot.title = element_text(size = 20, face = "bold", hjust = 0.5))
layered2_plot
# layer3
layered3_plot = ggplot() +
geom_bar(data=pf_infection_in_HH_mosq,aes(x=HH_ID,y=number_infection), alpha=0.95,stat="identity",fill="#fdd0a2") +
labs(x="Households", y="Number of Pf positive infections", title= "Mosquito abdomen infections", pch=18) +
theme_bw() +
theme(plot.title = element_text(size = 20, face = "bold", hjust = 0.5))
layered3_plot
# put both csp moi plots on same grid
figure6_layer_plot = gridExtra::grid.arrange(layered1_plot,layered2_plot,layered3_plot,nrow=3)
# export the figure
# ggsave(figure6_layer_plot, filename="/Users/kelseysumner/Desktop/figure2_csp_subset_moi.png", device="png",
# height=10.5, width=17, units="in", dpi=400)
#### ----- figure 7: dot plot of number of haplotypes shared in human-mosquito pairs ------- ####
## make a plot of the outcome of the number of haplotypes shared across human-mosquito pairs
# set up the data set for a dot plot
dot_plot_df <- csp_abdomens %>%
group_by(haps_shared) %>%
summarise(n=n())
# make figures of the number of haps shared
dot_plot = ggplot(csp_abdomens, aes(x = factor(village_name), fill = factor(village_name), y = haps_shared)) +
geom_dotplot(binaxis = "y", stackdir = "center",alpha=0.8,dotsize=0.5)+
labs(y="Number of haplotypes shared", x="Village name", pch=18) +
theme_bw() +
theme(legend.position = "none", text = element_text(size=25)) +
scale_fill_manual(values=c("#c7e9b4","#41b6c4"))
dot_plot
# export the plot
ggsave(dot_plot, filename="/Users/kelseysumner/Desktop/figure7_dot_plot.png", device="png",
height=8, width=18, units="in", dpi=400)
#### ------- make a plot with the haplotype sharing outcome across a range of definitions for a successful transmission event --------- ####
# look at the number of human-mosquito pairs sharing across symptomatic status with different definitions for a transmission event
# at least 1 haplotype shared = transmission event
csp_abdomens_1hap = csp_abdomens %>%
group_by(aim2_exposure) %>%
summarize(number_of_pairs = n(), number_with_a_shared_hap = length(which(haps_shared>0)), prop_shared = (number_with_a_shared_hap/number_of_pairs)*100) %>%
mutate(hap_range = "1 or more haplotypes")
# at least 2 haplotypes shared = transmission event
csp_abdomens_2hap = csp_abdomens %>%
group_by(aim2_exposure) %>%
summarize(number_of_pairs = n(), number_with_a_shared_hap = length(which(haps_shared>1)), prop_shared = (number_with_a_shared_hap/number_of_pairs)*100) %>%
mutate(hap_range = "2 or more haplotypes")
# at least 3 haplotypes shared = transmission event
csp_abdomens_3hap = csp_abdomens %>%
group_by(aim2_exposure) %>%
summarize(number_of_pairs = n(), number_with_a_shared_hap = length(which(haps_shared>2)), prop_shared = (number_with_a_shared_hap/number_of_pairs)*100) %>%
mutate(hap_range = "3 or more haplotypes")
# at least 4 haplotypes shared = transmission event
csp_abdomens_4hap = csp_abdomens %>%
group_by(aim2_exposure) %>%
summarize(number_of_pairs = n(), number_with_a_shared_hap = length(which(haps_shared>3)), prop_shared = (number_with_a_shared_hap/number_of_pairs)*100) %>%
mutate(hap_range = "4 or more haplotypes")
# at least 5 haplotypes shared = transmission event
csp_abdomens_5hap = csp_abdomens %>%
group_by(aim2_exposure) %>%
summarize(number_of_pairs = n(), number_with_a_shared_hap = length(which(haps_shared>4)), prop_shared = (number_with_a_shared_hap/number_of_pairs)*100) %>%
mutate(hap_range = "5 or more haplotypes")
# make a combined data frame
hap_sharing_combined_df = rbind(csp_abdomens_1hap,csp_abdomens_2hap,csp_abdomens_3hap,csp_abdomens_4hap,csp_abdomens_5hap)
hap_sharing_combined_df = data.frame(hap_sharing_combined_df)
# use the binom package to compute an exact confidence interval
library(binom)
# create an empty vector
hap_sharing_combined_df$lower_ci = rep(NA,nrow(hap_sharing_combined_df))
hap_sharing_combined_df$upper_ci = rep(NA,nrow(hap_sharing_combined_df))
# for at least 1 hap
# for asymptomatic
full = binom.confint(hap_sharing_combined_df$number_with_a_shared_hap[1],hap_sharing_combined_df$number_of_pairs[1])
hap_sharing_combined_df$lower_ci[1] = full$lower[5]
hap_sharing_combined_df$upper_ci[1] = full$upper[5]
# for symptomatic
full = binom.confint(hap_sharing_combined_df$number_with_a_shared_hap[2],hap_sharing_combined_df$number_of_pairs[2])
hap_sharing_combined_df$lower_ci[2] = full$lower[5]
hap_sharing_combined_df$upper_ci[2] = full$upper[5]
# for at least 2 hap
# for asymptomatic
full = binom.confint(hap_sharing_combined_df$number_with_a_shared_hap[3],hap_sharing_combined_df$number_of_pairs[3])
hap_sharing_combined_df$lower_ci[3] = full$lower[5]
hap_sharing_combined_df$upper_ci[3] = full$upper[5]
# for symptomatic
full = binom.confint(hap_sharing_combined_df$number_with_a_shared_hap[4],hap_sharing_combined_df$number_of_pairs[4])
hap_sharing_combined_df$lower_ci[4] = full$lower[5]
hap_sharing_combined_df$upper_ci[4] = full$upper[5]
# for at least 3 hap
# for asymptomatic
full = binom.confint(hap_sharing_combined_df$number_with_a_shared_hap[5],hap_sharing_combined_df$number_of_pairs[5])
hap_sharing_combined_df$lower_ci[5] = full$lower[5]
hap_sharing_combined_df$upper_ci[5] = full$upper[5]
# for symptomatic
full = binom.confint(hap_sharing_combined_df$number_with_a_shared_hap[6],hap_sharing_combined_df$number_of_pairs[6])
hap_sharing_combined_df$lower_ci[6] = full$lower[5]
hap_sharing_combined_df$upper_ci[6] = full$upper[5]
# for at least 4 hap
# for asymptomatic
full = binom.confint(hap_sharing_combined_df$number_with_a_shared_hap[7],hap_sharing_combined_df$number_of_pairs[7])
hap_sharing_combined_df$lower_ci[7] = full$lower[5]
hap_sharing_combined_df$upper_ci[7] = full$upper[5]
# for symptomatic
full = binom.confint(hap_sharing_combined_df$number_with_a_shared_hap[8],hap_sharing_combined_df$number_of_pairs[8])
hap_sharing_combined_df$lower_ci[8] = full$lower[5]
hap_sharing_combined_df$upper_ci[8] = full$upper[5]
# for at least 5 hap
# for asymptomatic
full = binom.confint(hap_sharing_combined_df$number_with_a_shared_hap[9],hap_sharing_combined_df$number_of_pairs[9])
hap_sharing_combined_df$lower_ci[9] = full$lower[5]
hap_sharing_combined_df$upper_ci[9] = full$upper[5]
# for symptomatic
full = binom.confint(hap_sharing_combined_df$number_with_a_shared_hap[10],hap_sharing_combined_df$number_of_pairs[10])
hap_sharing_combined_df$lower_ci[10] = full$lower[5]
hap_sharing_combined_df$upper_ci[10] = full$upper[5]
# split into an asymp and symp data sets for the plot
asymp_data = hap_sharing_combined_df %>% filter(aim2_exposure == "asymptomatic infection")
symp_data = hap_sharing_combined_df %>% filter(aim2_exposure == "symptomatic infection")
asymp_data$hap_range = as.factor(asymp_data$hap_range)
symp_data$hap_range = as.factor(symp_data$hap_range)
# make plots of how the number of pairs changes over time
csp_abdomen_hap_pairs_plot = ggplot() +
geom_line(data=asymp_data,aes(x=hap_range,y=prop_shared,group=1),cex=1.5,col="#ff7f00") +
geom_ribbon(data=asymp_data,aes(x=1:length(hap_range),ymin = lower_ci*100, ymax = upper_ci*100),alpha=0.2,fill="#ff7f00") +
geom_line(data=symp_data,aes(x=hap_range,y=prop_shared,group=1),cex=1.5,col="#e31a1c") +
geom_ribbon(data=symp_data,aes(x=1:length(hap_range),ymin = lower_ci*100, ymax = upper_ci*100),alpha=0.2,fill="#e31a1c") +
theme_bw() +
xlab("Number of haplotypes that signified a successful transmission event") +
ylab("Percentage of pairs with successful transmission event")
csp_abdomen_hap_pairs_plot
# export the plot
ggsave(csp_abdomen_hap_pairs_plot, filename="/Users/kelseysumner/Desktop/csp_abdomen_hap_pairs_over_time.png", device="png",
height=5, width=8, units="in", dpi=400)
#### -------- make a plot of the number of samples within each haplotype ----- ####
# make separate data sets for humans and mosquitoes
human_haps = csp_haplotypes %>%
filter(sample_type=="Human")
human_haps = human_haps[,c(4:301)]
abdomen_haps = csp_haplotypes %>%
filter(sample_type=="Abdomen")
abdomen_haps = abdomen_haps[,c(4:301)]
# summarize the number of samples within each haplotype for the human samples
haplotype.names = rep(1:ncol(human_haps))
haplotypes_in_samples = rep(NA,ncol(human_haps))
total_reads_in_samples = rep(NA,ncol(human_haps))
for (k in 1:ncol(human_haps)){
haplotypes_in_samples[k] = length(which(human_haps[,k] > 0))
total_reads_in_samples[k] = sum(human_haps[,k],na.rm=T)
}
human_hap_summary = data.frame("haplotype_ids" = haplotype.names, "haplotypes_across_samples" = haplotypes_in_samples, "total_reads_across_samples" = total_reads_in_samples)
# summarize the number of samples within each haplotype for the mosquito abdomen samples
haplotype.names = rep(1:ncol(abdomen_haps))
haplotypes_in_samples = rep(NA,ncol(abdomen_haps))
total_reads_in_samples = rep(NA,ncol(abdomen_haps))
for (k in 1:ncol(abdomen_haps)){
haplotypes_in_samples[k] = length(which(abdomen_haps[,k] > 0))
total_reads_in_samples[k] = sum(abdomen_haps[,k],na.rm=T)
}
abdomen_hap_summary = data.frame("haplotype_ids" = haplotype.names, "haplotypes_across_samples" = haplotypes_in_samples, "total_reads_across_samples" = total_reads_in_samples)
hap_order = order(-human_hap_summary$haplotypes_across_samples)
human_hap_summary = human_hap_summary[hap_order,]
abdomen_hap_summary = abdomen_hap_summary[hap_order,]
human_hap_summary$haplotype_ids = factor(human_hap_summary$haplotype_ids, levels=human_hap_summary$haplotype_ids[order(-human_hap_summary$haplotypes_across_samples)])
abdomen_hap_summary$haplotype_ids = factor(abdomen_hap_summary$haplotype_ids, levels=abdomen_hap_summary$haplotype_ids[order(-human_hap_summary$haplotypes_across_samples)])
# make plot of human haplotypes
human_hap_plot = ggplot() +
geom_bar(data=human_hap_summary,aes(x=haplotype_ids,y=haplotypes_across_samples),alpha=0.8,fill="#e31a1c",stat = "identity") +
theme_bw() +
xlab("Haplotype ID") +
ylab("Number of samples") +
ggtitle("Human samples") +
theme(plot.title = element_text(size = 26, face = "bold", hjust = 0.5), text = element_text(size=25),axis.text.x=element_blank(),
axis.ticks.x=element_blank())
human_hap_plot
# make plot of mosquito abdomen haplotypes
abdomen_hap_plot = ggplot() +
geom_bar(data=abdomen_hap_summary,aes(x=haplotype_ids,y=haplotypes_across_samples),alpha=0.8,fill="#fdd0a2",stat = "identity") +
theme_bw() +
xlab("Haplotype ID") +
ylab("Number of samples") +
ggtitle("Mosquito samples") +
theme(plot.title = element_text(size = 26, face = "bold", hjust = 0.5), text = element_text(size=25),axis.text.x=element_blank(),
axis.ticks.x=element_blank())
abdomen_hap_plot
# put both csp moi plots on same grid
figure_number_samples_in_haplotypes = gridExtra::grid.arrange(human_hap_plot,abdomen_hap_plot,nrow=2)
# export the figure
ggsave(figure_number_samples_in_haplotypes, filename="/Users/kelseysumner/Desktop/figure_number_samples_in_haplotypes.png", device="png",
height=10.5, width=17, units="in", dpi=400)
|
f98f7c248dcbf62904cbc04a90ef18439e25ae22 | 83959b9f644e2ed0573e1ccd12332d0ced6781bd | /data_cleaning/CCH2_scripts_data/split_by_herb_PI.R | 27dcf0aad3fd01071abbc47574d971aa8abb336d | [] | no_license | devgamble/nemo_herb | fbf118da36e98d9048a311be0afd609ea83f16f5 | a0c70835093bc4c30ea6179340c7c14f8ed7f731 | refs/heads/master | 2023-01-21T17:48:04.231498 | 2023-01-15T02:14:44 | 2023-01-15T02:14:44 | 227,468,239 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,245 | r | split_by_herb_PI.R | ###############
## Splitting up georeferenced records by herbarium for PI scoring (CCH2)
###############
#
#Using the georeferenced data sheets with lat/long=NA observations removed, split up the data by herbarium for Phenological Index scoring
#Load Packages
library(stringr)
library(magrittr)
library(tidyverse)
library(here)
#Don't need to recombine herbarium splits since the same splits will be used for georeferencing
#Be sure records tagged for removal are deleted before creating new csv for PI scoring
#
###UPDATE: New splits specified AT THE BOTTOM of this script. Many cch2 duplicates removed from `combine_cch1_cch2_V2.R`
#
#Record Starting and ending number of records (accounts for removal)!
##Filter out observations that will not be included
#Block A-C
nemo_cch2_AC_g <-
read_csv(here::here("data_cleaning", "CCH2_scripts_data", "georef_splits", "nemo_cch2_A-C_V2.csv"))
nemo_cch2_AC_p <- nemo_cch2_AC_g %>%
filter(!(remove_obs %in% "y")) %>%
mutate(scored_by = NA,
notes = NA,
plant = NA,
Total = NA,
buds_1 = NA,
flowers_2 = NA,
spent_3 = NA,
immature_4 = NA,
mature_5 = NA) %>%
select(specimen_number:DOY, sub_sp, repro, county:habitat, elev_m, lat:long, error_dist_m, georef_by, georef_notes, datum, references, source, everything()) %>%
mutate(datum = case_when(georef_by != '' | datum == "WGS84" ~ "WGS84",
datum == "NAD83" | datum == "NAD 83" ~ "NAD83",
datum == "NAD27" ~ "NAD27")) %>% #Adds WGS84 datum for all records where GEOLocate was used
select(c(-remove_obs))
write_csv(nemo_cch2_AC_p, here("data_cleaning", "CCH2_scripts_data", "splits_for_PI", "nemo_cch2_AC_p.csv"), na = "")
#Saves updated csv (obs removed) to 'splits_for_PI' folder
#Block D-L
nemo_cch2_DL_g <-
read_csv(here::here("data_cleaning", "CCH2_scripts_data", "georef_splits", "nemo_cch2_D-L_V2.csv"))
nemo_cch2_DL_p <- nemo_cch2_DL_g %>%
filter(!(remove_obs %in% "y")) %>%
mutate(scored_by = NA,
notes = NA,
plant = NA,
Total = NA,
buds_1 = NA,
flowers_2 = NA,
spent_3 = NA,
immature_4 = NA,
mature_5 = NA) %>%
select(specimen_number:DOY, sub_sp, repro, county:habitat, elev_m, lat:long, error_dist_m, georef_by, georef_notes, datum, references, source, everything()) %>%
mutate(datum = case_when(georef_by != '' | datum == "WGS84" ~ "WGS84",
datum == "NAD83" | datum == "NAD 83" ~ "NAD83",
datum == "NAD27" ~ "NAD27")) %>%
select(c(-remove_obs))
write_csv(nemo_cch2_DL_p, here("data_cleaning", "CCH2_scripts_data", "splits_for_PI", "nemo_cch2_DL_p.csv"), na = "")
#Block O-SD
nemo_cch2_OS_g <-
read_csv(here::here("data_cleaning", "CCH2_scripts_data", "georef_splits", "nemo_cch2_O-SD_V2.csv"))
nemo_cch2_OS_p <- nemo_cch2_OS_g %>%
filter(!(remove_obs %in% "y")) %>%
mutate(scored_by = NA,
notes = NA,
plant = NA,
Total = NA,
buds_1 = NA,
flowers_2 = NA,
spent_3 = NA,
immature_4 = NA,
mature_5 = NA) %>%
select(specimen_number:DOY, sub_sp, repro, county:habitat, elev_m, lat:long, error_dist_m, georef_by, georef_notes, datum, references, source, everything()) %>%
mutate(datum = case_when(georef_by != '' | datum == "WGS84" ~ "WGS84",
datum == "NAD83" | datum == "NAD 83" ~ "NAD83",
datum == "NAD27" ~ "NAD27")) %>%
select(c(-remove_obs))
write_csv(nemo_cch2_OS_p, here("data_cleaning", "CCH2_scripts_data", "splits_for_PI", "nemo_cch2_OS_p.csv"), na = "")
#Block SF-UC
nemo_cch2_SU_g <-
read_csv(here::here("data_cleaning", "CCH2_scripts_data", "georef_splits", "nemo_cch2_SF-UC_V2.csv"))
nemo_cch2_SU_p <- nemo_cch2_SU_g %>%
filter(!(remove_obs %in% "y")) %>%
mutate(scored_by = NA,
notes = NA,
plant = NA,
Total = NA,
buds_1 = NA,
flowers_2 = NA,
spent_3 = NA,
immature_4 = NA,
mature_5 = NA) %>%
select(specimen_number:DOY, sub_sp, repro, county:habitat, elev_m, lat:long, error_dist_m, georef_by, georef_notes, datum, references, source, everything()) %>%
mutate(datum = case_when(georef_by != '' | datum == "WGS84" ~ "WGS84",
datum == "NAD83" | datum == "NAD 83" ~ "NAD83",
datum == "NAD27" ~ "NAD27")) %>%
select(c(-remove_obs))
write_csv(nemo_cch2_SU_p, here("data_cleaning", "CCH2_scripts_data", "splits_for_PI", "nemo_cch2_SU_p.csv"), na = "")
#Block UCR
nemo_cch2_UCR_g <-
read_csv(here::here("data_cleaning", "CCH2_scripts_data", "georef_splits", "nemo_cch2_UCR_V2.csv"))
nemo_cch2_UCR_p <- nemo_cch2_UCR_g %>%
mutate(scored_by = NA,
notes = NA,
plant = NA,
Total = NA,
buds_1 = NA,
flowers_2 = NA,
spent_3 = NA,
immature_4 = NA,
mature_5 = NA) %>%
select(specimen_number:DOY, sub_sp, repro, county:habitat, elev_m, lat:long, error_dist_m, georef_by, georef_notes, datum, references, source, everything()) %>%
mutate(datum = case_when(georef_by != '' | datum == "WGS84" ~ "WGS84",
datum == "NAD83" | datum == "NAD 83" ~ "NAD83",
datum == "NAD27" ~ "NAD27"))
write_csv(nemo_cch2_UCR_p, here("data_cleaning", "CCH2_scripts_data", "splits_for_PI", "nemo_cch2_UCR_p.csv"), na = "")
#Block UCSB
nemo_cch2_UCSB_g <-
read_csv(here::here("data_cleaning", "CCH2_scripts_data", "georef_splits", "nemo_cch2_UCSB_V2.csv"))
nemo_cch2_UCSB_p <- nemo_cch2_UCSB_g %>%
filter(!(remove_obs %in% "y")) %>%
mutate(scored_by = NA,
notes = NA,
plant = NA,
Total = NA,
buds_1 = NA,
flowers_2 = NA,
spent_3 = NA,
immature_4 = NA,
mature_5 = NA) %>%
select(specimen_number:DOY, sub_sp, repro, county:habitat, elev_m, lat:long, error_dist_m, georef_by, georef_notes, datum, references, source, everything()) %>%
mutate(datum = case_when(georef_by != '' | datum == "WGS84" ~ "WGS84",
datum == "NAD83" | datum == "NAD 83" ~ "NAD83",
datum == "NAD27" ~ "NAD27")) %>%
select(c(-remove_obs))
write_csv(nemo_cch2_UCSB_p, here("data_cleaning", "CCH2_scripts_data", "splits_for_PI", "nemo_cch2_UCSB_p.csv"), na = "")
########################
#This chunk added after running `combine_cch1_cch2_V2.R`
##
##########
# NEW SPLITS for PI Scoring
##########
##
#
nemo_all_1_cch2 <- read_csv(here::here("data_cleaning", "nemo_all_2.csv")) %>%
filter(source == "cch2")
#1238 cch2 obs, 530 cch1 obs
#AC
cch2_AC_pi <- nemo_all_1_cch2 %>%
filter(str_detect(specimen_number, "^AHUC|BSCA|CAS|COLL|CSUS"))
write_csv(cch2_AC_pi, here("data_cleaning", "CCH2_scripts_data", "splits_for_PI", "nemo_cch2_AC_pi.csv"), na = "")
#23 obs
#DL
cch2_DL_pi <- nemo_all_1_cch2 %>%
filter(str_detect(specimen_number, "DAV|HSC|IRVC|JEPS|LOB"))
write_csv(cch2_DL_pi, here("data_cleaning", "CCH2_scripts_data", "splits_for_PI", "nemo_cch2_DL_pi.csv"), na = "")
#236 obs
#UPDATE 235 after rm based on bad ClimateNA data
#OS
cch2_OS_pi <- nemo_all_1_cch2 %>%
filter(str_detect(specimen_number, "OBI|RSA|SBBG|SD00|SDSU"))
write_csv(cch2_OS_pi, here("data_cleaning", "CCH2_scripts_data", "splits_for_PI", "nemo_cch2_OS_pi.csv"), na = "")
#327 obs
#SU
cch2_SU_pi <- nemo_all_1_cch2 %>%
filter(str_detect(specimen_number, "SFV|^UC1|^UC2|^UC3|^UC4|^UC5|^UC6|^UC7|^UC8|^UC9"))
write_csv(cch2_SU_pi, here("data_cleaning", "CCH2_scripts_data", "splits_for_PI", "nemo_cch2_SU_pi.csv"), na = "")
#376 obs
#UPDATE 375 after rm based on bad ClimateNA data
#UCR
cch2_UCR_pi <- nemo_all_1_cch2 %>%
filter(str_detect(specimen_number, "UCR0"))
write_csv(cch2_UCR_pi, here("data_cleaning", "CCH2_scripts_data", "splits_for_PI", "nemo_cch2_UCR_pi.csv"), na = "")
#199 obs
#UCSB
cch2_UCSB_pi <- nemo_all_1_cch2 %>%
filter(str_detect(specimen_number, "UCSB"))
write_csv(cch2_UCSB_pi, here("data_cleaning", "CCH2_scripts_data", "splits_for_PI", "nemo_cch2_UCSB_pi.csv"), na = "")
#77 obs
|
0ddf9c66fe9e96a611f338ca2d8bcada93402782 | 82fbaf71a21ccf576369d2a61a7f1458a4f4c4e2 | /split_code.R | 872e0a1d698418535510792a14c5d73ef3b74ed9 | [] | no_license | denohora/myRtricks | f18dea0b264dbe627d47459baf4b80bea12fd993 | 5f01b5e91430443a5cb63223c6c5825c458fae35 | refs/heads/master | 2023-07-07T20:54:21.404592 | 2023-07-04T14:58:16 | 2023-07-04T14:58:16 | 121,754,877 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 156 | r | split_code.R | split.code <- function(text, split, output = "All") {
if(output == "All") unlist(strsplit(text, split) )
else unlist(strsplit(text, split)) [output]
} |
d64bbdf91d978f6aff5418b48a8a741da95c9c9f | 3ad3ce5f38d636b649abd6e2e8741d482d8f6d72 | /man/Genewise.Rd | 3ef97ec4e8cc9be3a5a236fd01e47c4e2c2719b4 | [] | no_license | cran/pubmed.mineR | 279ce024df5b7913885ec96ad906226aa78633ce | 7612a2d68f503794f8dee8479e8b388949e4b70d | refs/heads/master | 2021-12-09T18:39:44.578225 | 2021-11-26T14:50:03 | 2021-11-26T14:50:03 | 17,919,888 | 5 | 6 | null | null | null | null | UTF-8 | R | false | false | 985 | rd | Genewise.Rd | \name{Genewise}
\alias{Genewise}
\title{To Search the number of abstracts for Genes}
\description{\code{Genewise} reports the number of abstracts for given gene(s) name(s)}
\usage{
Genewise(object, gene)
}
\arguments{
\item{object}{An S4 object of class Abstracts}
\item{gene}{a character input of gene name(HGNC approved symbol)}
}
\details{This function will report the number of abstracts containing the query gene term(s) [HGNC approved symbols], and the result is saved in a text file "dataout.txt". Genewise() will report numbers of abstracts only. The abstracts themselves for corresponding gene names can be obtained using searchabsL() and searchabsT.}
\value{Genewise will return an R object containing the abstracts for given gene, and a text file named "dataout.txt" containing the number of abstracts}
\author{S. Ramachandran, Jyoti Rani}
\examples{
\dontrun{Genewise(x, "TLR4")}
## here 'x' contains the S4 object of Abstracts.
}
\keyword{Function}
|
7a31da2829f59e8a0073a5e64f8bb3fb05e0b2e1 | 42c19dd6b25367211b2ebd27536ad19d0f8d79b3 | /R/generate_data.R | e9e30edfbdb809241c4970d67ea3bb50d522853b | [
"MIT"
] | permissive | StevenGolovkine/simulater | 610bbdce545cbbe0bbfcacb8148740c4d0da78e3 | 26b50620d520e4a27867cd0ec7859a587212c30e | refs/heads/main | 2023-04-10T22:40:48.219961 | 2022-03-19T17:10:02 | 2022-03-19T17:10:02 | 386,667,875 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,313 | r | generate_data.R | # -----
# Generate some data
# -----
#' @title Generate realistic datasets
#' @description This function generates realistic irregularly sampled functional
#' dataset given mean and covariance functions.
#'
#' @param n Number of curves to generate.
#' @param m Mean number of observation points per curve.
#' @param model_mean \code{\link[glmnet]{glmnet}} model for the mean curve.
#' @param covariance Matrix for the covariance surface.
#' @param model_noise Object of class 'gam' from the function `learn_noise`.
#' @param lambda Value of the penalty parameter for the mean curve.
#' @param ti Sampling points of each curves, default=NULL.
#' @param grid Common grid for the curves, default=seq(0, 1, length.out = 101).
#' @param p Uncertainty for the number of observation per curve, default=0.2.
#' @param k Multiplicative factor for the noise variance, default=1.
#'
#' @return List containing \code{n} entries. Each of the entry represents a
#' simulated curve as another list with three entries:
#' \itemize{
#' \item \strong{$t} the sampling points.
#' \item \strong{$x} the observed points.
#' \item \strong{$x_true} the observed points without noise.
#' }
#'
#' @details The data are generated as
#'
#' \deqn{X = \mu + \Sigma u + \epsilon,}
#'
#' where \eqn{\mu} is the mean function, \eqn{\Sigma} is the square-root of the
#' covariance matrix, \eqn{u} and \eqn{\epsilon} are random normal variables.
#' Heteroscedasticity is allowed using the \code{coefs} parameter.
#'
#' @examples
#' \dontrun{
#' if(interactive()){
#' attach(powerconsumption)
#' mod <- learn_mean(df = powerconsumption, k = 50)
#' cov <- learn_covariance(powerconsumption, 'lm')
#' coefs <- learn_noise(df = powerconsumption)
#' df <- generate_data(n = 10, m = 40, model_mean = mod, covariance = cov,
#' model_noise = coefs, lambda = exp(-3.5),
#' ti = NULL, grid = seq(0, 1, length.out = 101),
#' p = 0.2, k = 1)
#' }
#' }
#' @seealso
#' \code{\link[purrr]{map}},\code{\link[purrr]{map2}}
#' \code{\link[stats]{Uniform}},\code{\link[stats]{Normal}}
#' \code{\link[MASS]{mvrnorm}}
#' @rdname generate_data
#' @export
#' @importFrom purrr map pmap
#' @importFrom stats runif rnorm
#' @importFrom MASS mvrnorm
#' @importFrom magrittr %>%
generate_data <- function(n, m, model_mean, covariance, model_noise, lambda,
ti = NULL, grid = seq(0, 1, length.out = 101),
p = 0.2, k = 1){
if(is.null(ti)){
mi <- sample(floor((1 - p) * m):floor((1 + p) * m), n, replace = TRUE)
ti <- mi %>% purrr::map(~ sort(stats::runif(.x)))
}
ti_c <- ti %>% purrr::map(~ sort(c(.x, grid)))
mui <- ti_c %>% purrr::map(~ predict_mean(.x, model_mean, lambda, k = 50))
covi <- ti_c %>% purrr::map(~ predict_covariance(.x, covariance))
list(ti, ti_c, mui, covi) %>%
purrr::pmap(function(tt, tt_c, m, c) {
x <- MASS::mvrnorm(1, m, c)
list(
t = tt,
x = x[tt_c %in% tt],
x_grid = x[!(tt_c %in% tt)]
)
}) %>%
purrr::map(function(x) {
noise <- sqrt(k * as.vector(predict_noise(x$t, x$x, model_noise)))
list(
t = x$t,
x = x$x + noise * stats::rnorm(length(x$t)),
x_true = x$x,
x_grid = x$x_grid
)
})
}
|
865109760b022b0429b2ce9930e7b2d088d131a4 | aeaa6e8300c98130b981887c4f7c317563d64cfc | /eu.R | 5783c65d93d4f8bb2eea9dac14ab8f2071fcf16e | [
"Apache-2.0"
] | permissive | vz-risk/veris_scripts | e6ef83c40c8ab6b6dc99d6ce1bcf920c82f2847c | a73f2eb85baa3b519098608990781dd00dd66909 | refs/heads/master | 2020-12-29T02:34:35.530443 | 2016-02-13T01:20:33 | 2016-02-13T01:20:33 | 39,145,471 | 12 | 5 | null | null | null | null | UTF-8 | R | false | false | 3,704 | r | eu.R | DESCRIPTION <- "Data Amount, Actions, and Attributes for EU states from 2013 back."
# Set static variables
data <- "./data-full.rda"
countries <- c('victim.country.AT',
'victim.country.BE',
'victim.country.BG',
'victim.country.HR',
'victim.country.CY',
'victim.country.CZ',
'victim.country.DK',
'victim.country.EE',
'victim.country.FI',
'victim.country.FR',
'victim.country.DE',
'victim.country.GR',
'victim.country.HU',
'victim.country.IE',
'victim.country.IT',
'victim.country.LV',
'victim.country.LT',
'victim.country.LU',
'victim.country.MT',
'victim.country.NL',
'victim.country.PL',
'victim.country.PT',
'victim.country.RO',
'victim.country.SK',
'victim.country.SI',
'victim.country.ES',
'victim.country.SE',
'victim.country.GB')
# load libraries
library(data.table)
library(verisr)
library(dplyr)
library(tidyr)
# load the data
load(data)
# Create an European Union column so we can filter on it
vz.EU <- vz %>% select(match(countries, names(vz)))
vz$victim.country.EU <- ifelse(apply(vz.EU, 1, function(x) any(x==T)), T, F)
# Evalutate the number of records per year
vz %>% filter(victim.country.EU == T) %>% group_by(timeline.incident.year) %>% summarize(count = n()) %>% arrange(desc(timeline.incident.year))
# subset to 2013 and earlier
vz_pre_2014 <- vz %>% filter(timeline.incident.year <= 2013) %>% filter(victim.country.EU == T)
# Break Down Actions
eu_action <- vz_pre_2014 %>% getenum('victim.country', 'action') %>% filter(x!=0)
eu_malware <- vz_pre_2014 %>% getenum('victim.country', 'action.malware.variety') %>% filter(x!=0)
eu_hacking <- vz_pre_2014 %>% getenum('victim.country', 'action.hacking.variety') %>% filter(x!=0)
eu_social <- vz_pre_2014 %>% getenum('victim.country', 'action.social.variety') %>% filter(x!=0)
eu_misuse <- vz_pre_2014 %>% getenum('victim.country', 'action.misuse.variety') %>% filter(x!=0)
eu_error <- vz_pre_2014 %>% getenum('victim.country', 'action.error.variety') %>% filter(x!=0)
# Break Down Attributes
eu_attribute <- vz_pre_2014 %>% getenum('victim.country', 'attribute') %>% filter(x!=0)
eu_availability <- vz_pre_2014 %>% getenum('victim.country', 'attribute.availability.variety') %>% filter(x!=0)
eu_confidentiality_variety <- vz_pre_2014 %>% getenum('victim.country', 'attribute.confidentiality.data.variety') %>% filter(x!=0)
vz_pre_2014$row <- row.names(vz_pre_2014)
temp <- vz_pre_2014 %>% select(match(c(countries, c('row')), names(vz_pre_2014))) %>%
gather(country, value, -row) %>%
filter(value) %>%
select(row, country)
temp$row <- as.numeric(temp$row)
vz_pre_2014$country <- temp %>% arrange(row) %>% select(country)
rm(temp)
# Do the same aggregation through 'gather' for amounts
amt_cols <- grep('attribute.confidentiality.data.amount', names(vz_pre_2014), value=T)
eu_records <- vz_pre_2014 %>% select(match(c(amt_cols, c('country')), names(vz_pre_2014))) %>%
gather(data.variety, amount, -country) %>%
group_by(country, data.variety) %>%
summarize(amount = sum(amount, na.rm=T))
save(eu_records, eu_confidentiality_variety, eu_availability, eu_attribute, eu_error, eu_misuse, eu_social, eu_hacking, eu_malware, eu_action, file="./eu.rda") |
893413ca0ffd82f38bb23211c6765ba8ac4cb540 | 28c70d7950a6e34b8670501873b990736f3cba5e | /dSMFseqAnalysis_1aln.R | 90be23c8260b75fc31a8432bde64581971dfd2ff | [] | no_license | jsemple19/20170323_dSMF_N2 | b388486b55a9776f558edabdfbe8eea96ac7bebf | cd30a994d75fa9085d674cf23a0e3d0729ee1163 | refs/heads/master | 2020-12-06T19:16:41.683528 | 2017-06-27T13:02:13 | 2017-06-27T13:02:13 | 95,531,676 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,281 | r | dSMFseqAnalysis_1aln.R | library(QuasR)
library("BSgenome.Celegans.UCSC.ce11")
# collect citations for packages used
packageBib<-toBibtex(c(citation("QuasR"),citation("BSgenome.Celegans.UCSC.ce11")))
setwd("~/Documents/MeisterLab/sequencingData/20170323_dSMF_N2/dSMFseq_scripts")
source('./callAllCs.r') #to call all Cs
source('./useful_functionsV1.r') #load the ranges
#library("BSgenome.Ecoli.NCBI.20080805") # delete this line??
# make clusters, needed to run in parallel
cluObj=makeCluster(3)
#setup directory structure this to desired location of your alignments
if (!dir.exists("../aln")){
dir.create("../aln")
}
if (!dir.exists("../tmp")) { #for trimmomatic quality trimmed reads
dir.create("../tmp")
}
if (!dir.exists("../rds")) {
dir.create("../rds")
}
if (!dir.exists("../plots")) {
dir.create("../plots")
}
if (!dir.exists("../bed")) {
dir.create("../bed")
}
path='../'
my.alignmentsDir=paste(path,'aln/',sep='')
tmp=paste(path,'tmp/',sep='')
#load the experiments
seqExp=read.table(paste0(path,'rawData/sampleList.txt'),sep='\t',header=T)
#create the QuasR Aln file
samples=as.data.frame(cbind(FileName1=paste(path,"rawData/",seqExp$FileName1,sep=''),
FileName2=paste(path,"rawData/",seqExp$FileName2,sep=''),
SampleName=as.character(seqExp$SampleName)))
###########################
#trim the low quality bases
###########################
for(i in sl(samples[,1])){
#i=1
spID=as.character(samples$SampleName[i])
#clip the low quality bases #remove adapters
system(paste(
'java -jar /home/jenny/Trimmomatic-0.32/trimmomatic-0.32.jar PE ',
samples$FileName1[i],' ', samples$FileName2[i], ' ',
'../tmp/',samples$SampleName[i],'_forward_paired.fq.gz ',
'../tmp/',samples$SampleName[i],'_forward_unpaired.fq.gz ',
'../tmp/',samples$SampleName[i],'_reverse_paired.fq.gz ',
'../tmp/',samples$SampleName[i],'_reverse_unpaired.fq.gz ',
'ILLUMINACLIP:TruSeq3-PE.fa:2:30:10 LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 MINLEN:36',
sep='')
)
}
AlnInput=as.data.frame(cbind(
FileName1=paste(tmp,samples$SampleName,'_forward_paired.fq.gz',sep=''),
FileName2=paste(tmp,samples$SampleName,'_reverse_paired.fq.gz',sep=''),
SampleName=as.character(samples$SampleName)))
write.table(AlnInput,'QuasR_input.txt',quote=F,row.names=F,sep='\t')
###########################
#Align the full length fragments
###########################
QuasRdef='-k 2 --best --strata'
NOMEproj=qAlign(sampleFile=paste(path,"dSMFseq_scripts/QuasR_input.txt",sep=''),
genome="BSgenome.Celegans.UCSC.ce11",
#auxiliaryFile="/work2/gschub/Juliane/scripts/Altuna/auxFile.forQuasR.txt",
aligner="Rbowtie",
paired="fr",
bisulfite="undir",
projectName="dSMF_N2",
alignmentsDir=my.alignmentsDir,
clObj=cluObj ,
alignmentParameter=paste('-e 150 -X 600 ',QuasRdef,sep=''),
cacheDir = tmp)
#QC of the alignment
qQCReport(NOMEproj,'QC_QualityTrimmed_130616.pdf',clObj=cluObj)
alignments1=as.data.frame(alignments(NOMEproj)[[1]])
write.table(alignments1,'QuasR_Aligned.txt',quote=F,col.names=F,row.names=F,sep='\t',append=T)
|
bb783120bde512d5e1ee885e7055bffbf7545474 | 29585dff702209dd446c0ab52ceea046c58e384e | /modeest/R/asselin.R | 2f913c768795e33f679bc03fb860f19248b6d7c3 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 2,864 | r | asselin.R | # Author : P. Poncet
#! regarder la fonction na.contiguous, je crois qu'elle fait exactement ce qu'il faut...
asselin <-
function(x,
bw = NULL, # bw = 1 donne une chaine modale longue, bw < 1 est plus sévère
...)
{
if (is.null(bw)) bw <- 1
nx <- length(x)
kmax <- floor(ifelse(nx < 30, 10, 15)*log(nx))
y <- sort(x)
ok1 <- FALSE
while (!ok1) {
ny <- length(y)
if (ny==1) return(y)
qy <- quantile(y, probs = c(0.1, 0.25, 0.5, 0.75, 0.9), names = FALSE, ...)
delta <- min(qy[5] - qy[4], qy[2] - qy[1])
a <- qy[1] - 3*delta
b <- qy[5] + 3*delta
yab <- y[y>=a & y <= b]
k <- kmax
ok2 <- FALSE
while (!ok2) {
#hy <- hist(yab, breaks = k, plot = FALSE);b <- hy$breaks;n <- c(hy$counts, 0)
b <- seq(from = min(yab), to = max(yab), length = k+1)
n <- c(tabulate(findInterval(yab, b[-(k+1)])), 0)
N <- sum(n)
v <- as.numeric(n >= N/k)
## Beginning of the first chain
w <- which.max(v)
v2 <- v[w:(k+1)]
## End of the first chain
w2 <- which.min(v2) + w - 1
v3 <- v[w2:(k+1)]
## Length of the first chain
nc <- sum(n[w:(w2-1)])
## There exists another chain, and the first chain has only one element
if (any(v3==1) & nc==1) {
if (k > 3) {
k <- k-1
} else if (k==3) {
if (n[3] > 1) {
w <- 3
w2 <- 4
}
ok2 <- TRUE
} else {
stop("k < 3")
}
## There exists another chain, and the first chain has more than one element
} else if (any(v3==1) & nc > 1) {
if (k > 3) {
k <- k-1
### In this case, w = 1 necessarily
} else if (k==3) {
if (n[3] > 1) {
p1 <- (1/n[1])*prod(diff(yab[yab >= b[w] & yab <= b[w2]])) # here, n[1] = length(first chain)
p2 <- (1/n[3])*prod(diff(yab[yab >= b[3] & yab <= b[4]])) # and n[3] = length(second chain)
if (p1 > p2) {
w <- 3
w2 <- 4
}
}
ok2 <- TRUE
} else {
stop("k < 3")
}
## There is no other chain: the modal chain is found!
} else if (!any(v3==1)) {
ok2 <- TRUE
}
}
## Update 'nc'
nc <- sum(n[w:(w2-1)])
#cat("Modal chain length = ", nc, "\n")
d <- abs((qy[4] + qy[2] - 2*qy[3])/(qy[4] - qy[2]))
nc2 <- ny*(1-d)
#cat("d = ", d, "\n")
y <- yab[yab >= b[w] & yab <= b[w2]]
if (nc == ny) {
ok1 <- TRUE
} else if (nc <= ifelse(nx < 30, nx/3, bw*nc2)) {
ok1 <- TRUE
} else {
ok1 <- FALSE
}
}
return(median(y))
}
|
4b92ff31357af26270dcc4cd5f96e2c06d8859ba | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/orientlib/examples/vector-classes.Rd.R | a1b03038f8b2488f4684888d798d946f97bb4768 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 326 | r | vector-classes.Rd.R | library(orientlib)
### Name: vector-classes
### Title: Orientation classes
### Aliases: eulerzyx-class eulerzxz-class rotvector-class quaternion-class
### skewvector-class vector-classes
### Keywords: classes
### ** Examples
x <- eulerzyx(0,pi/4,0)
x
eulerzxz(x)
rotmatrix(x)
rotvector(x)
quaternion(x)
skewvector(x)
|
908212b38b8d8230f35e7f690b5314f188afe8ca | e86279ca1dd11714fa8008119070fe1f8ffc09a1 | /nhmrcData/man/nhmrcRegion.Rd | 5c45a49d3885ed1ddfcdbcc4d4f6c06f7de76ff2 | [] | no_license | neilfws/politics | ce54fc128bcc9d24e56d253adb8dbc6592a03b8f | f5b97ef18424fd9ecb0738e254c06dc2e74c0dff | refs/heads/master | 2020-07-02T07:52:42.811420 | 2018-09-07T05:02:27 | 2018-09-07T05:02:27 | 74,316,422 | 6 | 1 | null | null | null | null | UTF-8 | R | false | true | 608 | rd | nhmrcRegion.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nhmrcRegion.R
\docType{data}
\name{nhmrcRegion}
\alias{nhmrcRegion}
\title{NHMRC Expenditure by Region.}
\format{Data frame with columns
\describe{
\item{calendar_year}{Calendar year}
\item{state}{State or territory}
\item{value}{Expenditure for state or territory}
}}
\source{
https://www.nhmrc.gov.au/_files_nhmrc/file/grants/dataset/2015/2._summary_tables_2000_-_2015.xlsx
}
\usage{
nhmrcRegion
}
\description{
Summary of expenditure by state/territory and year.
}
\examples{
nhmrcRegion \%>\% glimpse()
}
\keyword{datasets}
|
caf64ee1e5ac5215dc8ef746898828ac88fc3c9b | 8b79e777c876ef1a24dcbb8c037293d46fd4d653 | /iowa_tweets/Untitled.R | 1c2aba88adb8986cd6f6b425c699d04e66629b60 | [
"MIT"
] | permissive | anjalimbhatt/sicss-culturalvariation | 8bf2148fb373f79b171601d7151e0e8c483a196d | 37839c1fca17b37232c9bef82bc9b895e426b50b | refs/heads/master | 2021-01-17T16:44:08.054507 | 2017-06-30T20:03:39 | 2017-06-30T20:03:39 | 95,474,175 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,361 | r | Untitled.R | get_our_places <- function(lat, lon, location, radius, apikey, pause_length){
lat = as.numeric(lat)
lon = as.numeric(lon)
MAX_PAGES <- 3
current.page = 1
token = "first"
all.places = data.frame(place.id = NA,
name = NA,
types = NA,
latitude = NA,
longitude = NA,
location = NA)
# first page
while(!is.null(token) & current.page < (MAX_PAGES + 1)) {
#print("hi")
page = list(google_places(radius = radius,
location = c(lat, lon),
key = apikey))
if(is.null(names(page))) {page = purrr::flatten(page)}
Sys.sleep(runif(1, 0, pause_length))
places = as.data.frame(cbind(place.id = page$results$place_id,
name = unlist(page$results$name),
types = page$results$types))
if(current.page == 1){
all.places = places
print(all.places)
} else {
all.places = bind_rows(all.places, places)
}
token = page$next_page_token
current.page = current.page + 1
print(current.page)
}
all.places
}
|
5c124029ca5d41b058f46f40ca1c6971bef95d34 | 1eede178f9f3a065d31fc12eb3393690a09a9a50 | /man/dot-extract_ranger_rules.Rd | d0fdd68b79b7e1bc884b9cc45b32d1b8add87661 | [
"MIT"
] | permissive | Hobbeist/varppRule | 86db8a984ec5cfb319edf93203ea198c0735584c | b1ecc6f188e4d4cfe27b2caeacd449d8783b8d7d | refs/heads/main | 2023-06-24T16:54:30.051550 | 2021-07-28T07:53:57 | 2021-07-28T07:53:57 | 317,065,570 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 492 | rd | dot-extract_ranger_rules.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{.extract_ranger_rules}
\alias{.extract_ranger_rules}
\title{Extract rules from ranger trees}
\usage{
.extract_ranger_rules(rf_results)
}
\arguments{
\item{rf_results}{the results fro mthe ranger tree generation within the varpp function}
}
\value{
a named vector of rules
}
\description{
This function returns rules based on the decision trees built in ranger. It depends on the function varpp
}
|
f9a555fdba81d5db0e6a39f8c7fb1cd155667489 | e8fc5fed9ea3cfd8b1cf1d513373ac85c2870e9a | /man/blast_parser.Rd | 60c9c45d0551d432139a8bf37e6f23c75c65c206 | [
"Apache-2.0"
] | permissive | phac-nml/plasmidprofiler | 53c06bcf7175bd3ed8821bb7e91aa08dd53c8145 | 3fb0e7692ffba4521b6bcdcd29f38b065cc2a571 | refs/heads/master | 2022-06-24T03:18:08.374418 | 2022-05-30T18:47:50 | 2022-05-30T18:47:50 | 81,258,002 | 6 | 3 | Apache-2.0 | 2022-05-30T18:47:50 | 2017-02-07T21:49:56 | R | UTF-8 | R | false | true | 691 | rd | blast_parser.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse_related.R
\name{blast_parser}
\alias{blast_parser}
\title{Blast Results Parser Function}
\usage{
blast_parser(blast.results)
}
\arguments{
\item{blast.results}{Blast results loaded from read_blast or Global Env}
}
\value{
Blast table with pID adjusted by ratio of hit length to query length (larger as denominator)
}
\description{
Loads the imported blast results, extracts desired columns, Create new column of ratio between hit
length to query length - higher as denominator, adjusts pID by this ratio. Any AMR results are removed from the returned df.
}
\examples{
\dontrun{
blast_parser(blastdata)
}
}
|
d52225723466baa965ad6312236c10fe59679485 | c1e70cdcdd6cba2a99b92525523d95742a7cfbd8 | /man/plotPosts.Rd | 950fbcf82a79fc145aee0f817b951e973edbba30 | [] | no_license | luiarthur/rcommon | 5fb2b1d6b1b884111396d699121631c3ad89c288 | 372595270399640670a88a169cc3ed9461c032b7 | refs/heads/master | 2021-06-24T09:12:27.902109 | 2021-01-13T17:19:55 | 2021-01-13T17:19:55 | 69,061,449 | 3 | 2 | null | null | null | null | UTF-8 | R | false | true | 307 | rd | plotPosts.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotPost.R
\name{plotPosts}
\alias{plotPosts}
\title{plot posteriors}
\usage{
plotPosts(M, cnames = colnames(M), ...)
}
\description{
plot posteriors
}
\examples{
M <- matrix(rnorm(1000),ncol=4)
colnames(M) <- 1:4
plotPosts(M)
}
|
60fc7fbe6b9c9edd576c2e9980cb58ad44f7bb1b | 49b5e4e853a5f33934dd6ecfdc529da1d8cae88d | /man/compareTests.Rd | 49de883822ca9dea200b38f61eae372f453e85f1 | [] | no_license | davetgerrard/kanute | c452dfed9650888e1285232a13d2ed6cfa7cdd70 | 5a933db3b52ee448a7c9e081d2019e6827330073 | refs/heads/master | 2020-05-18T12:50:51.185802 | 2015-02-09T16:20:40 | 2015-02-09T16:20:40 | 29,590,609 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 900 | rd | compareTests.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/compareTests.R
\name{compareTests}
\alias{compareTests}
\title{compareTests}
\usage{
compareTests(x, softwares = names(x), versions = lapply(x, names),
ref.software = NULL, ref.versions = NULL, mapped.tests = NULL,
show.all.tests = is.null(mapped.tests))
}
\arguments{
\item{x}{param description}
\item{softwares}{param description}
\item{versions}{param description}
\item{ref.software}{param description}
\item{ref.versions}{param description}
\item{mapped.tests}{param description}
\item{show.all.tests}{param description}
}
\value{
some comparison results
}
\description{
Compares Tests.
}
\details{
Current version take library of test results from compileTestResults(). could write generic function that take result from above as argument or that can accept all the data and run the above too.
}
|
09fb6e14a3544bc10e2a3e957b2da3a65c6aac51 | b634345d659f11ee4b8ec617891410f07ffc4819 | /R/colSums.R | a0d778b982014a214324147214f0dbf3eda0c4df | [] | no_license | cran/arrayhelpers | e7f5e6df5d968210a2c31881d050bfa6f4400a96 | c45a11e6828c933306c64daf0e7898d33c22e688 | refs/heads/master | 2021-01-17T14:53:08.472934 | 2020-02-04T15:10:09 | 2020-02-04T15:10:09 | 17,694,470 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,860 | r | colSums.R | ##' @include arrayhelpers.R
## From base/R/colSums.R
.colSums <- function (x, na.rm = FALSE, dims = 1L, drop = TRUE) {
if (length (dim (x)) < 2)
x <- as.matrix (x)
z <- base::colSums (x = x, na.rm = na.rm, dims = dims)
if (! drop){
d <- dim (x)
d [1L : dims] <- 1L
dn <- dimnames (x)
dn [1L : dims] <- list (NULL)
z <- structure (z, .Dim = d, .Dimnames = lon (dn))
}
z
}
.unclasscolSums <- function (x, ...) {
colSums (unclass (x), ...)
}
.test (.colSums) <- function (){
ao <- array (1:24, 4:2)
for (d in 1 : 2){
default <- base::colSums (a, dims = d)
drop <- colSums (a, dims = d, drop = TRUE)
nodrop <- colSums (a, dims = d, drop = FALSE)
checkEquals (default, drop, sprintf ("base version ./. drop = TRUE, dim = %i", d))
checkEqualsNumeric (c (default), c (nodrop), sprintf ("drop = TRUE ./. FALSE, dim = %i", d))
dd <- dim (default)
if (is.null (dd)) dd <- length (default)
checkEquals (dim (nodrop) [-(1 : d)], dd, sprintf ("result dimensions, d = %i", d))
checkTrue (all (sapply (dimnames (nodrop) [1 : d], is.null)))
checkEquals (dimnames (nodrop) [(d + 1) : ndim (nodrop)],
dimnames (a) [(d + 1) : ndim (a) ])
nodrop <- colSums (ao, dims = d, drop = FALSE)
checkEquals (dimnames (nodrop) [(d + 1) : ndim (nodrop)],
dimnames (ao) [(d + 1) : ndim (ao) ])
}
}
## TODO: Tests for AsIs, matrix
.colMeans <- function(x, na.rm = FALSE, dims = 1L, drop = TRUE){
if (length (dim (x)) < 2)
x <- as.matrix (x)
z <- base::colMeans (x, na.rm = na.rm, dims = dims)
if (! drop){
d <- dim (x)
d [1L : dims] <- 1L
dn <- dimnames (x)
dn [1L : dims] <- list (NULL)
z <- structure (z, .Dim = d, .Dimnames = lon (dn))
}
z
}
.unclasscolMeans <- function (x, ...) {
colMeans (unclass (x), ...)
}
.rowSums <- function(x, na.rm = FALSE, dims = 1L, drop = TRUE) {
if (length (dim (x)) < 2)
x <- as.matrix (x)
z <- base::rowSums (x, na.rm = na.rm, dims = dims)
if (! drop){
d <- dim (x)
d [(dims + 1L) : length (d)] <- 1L
dn <- dimnames (x)
dn [(dims + 1L) : length (dn)] <- list (NULL)
z <- structure (z, .Dim = d, .Dimnames = lon (dn))
}
z
}
.unclassrowSums <- function (x, ...) {
rowSums (unclass (x), ...)
}
.rowMeans <- function(x, na.rm = FALSE, dims = 1L, drop = TRUE)
{
if (length (dim (x)) < 2)
x <- as.matrix (x)
z <- base::rowMeans (x, na.rm = na.rm, dims = dims)
if (! drop){
d <- dim (x)
d [(dims + 1L) : length (d)] <- 1L
dn <- dimnames (x)
dn [(dims + 1L) : length (dn)] <- list (NULL)
z <- structure (z, .Dim = d, .Dimnames = lon (dn))
}
z
}
.unclassrowMeans <- function (x, ...) {
rowMeans (unclass (x), ...)
}
.test (.rowSums) <- function (){
a <- array (1:24, 4:2)
for (d in 1 : 2){
default <- base::rowSums (a, dims = d)
drop <- rowSums (a, dims = d, drop = TRUE)
nodrop <- rowSums (a, dims = d, drop = FALSE)
checkEquals (default, drop, sprintf ("base version ./. drop = TRUE, dim = %i", d))
checkEquals (c (default), c (nodrop), sprintf ("drop = TRUE ./. FALSE, dim = %i", d))
dd <- dim (default)
if (is.null (dd)) dd <- length (default)
checkEquals (dim (nodrop) [1 : d], dd, sprintf ("result dimensions, d = %i", d))
}
}
.test (.rowMeans) <- function (){
a <- array (1:24, 4:2)
for (d in 1 : 2){
default <- base::rowMeans (a, dims = d)
drop <- rowMeans (a, dims = d, drop = TRUE)
nodrop <- rowMeans (a, dims = d, drop = FALSE)
checkEquals (default, drop, sprintf ("base version ./. drop = TRUE, dim = %i", d))
checkEquals (c (default), c (nodrop), sprintf ("drop = TRUE ./. FALSE, dim = %i", d))
dd <- dim (default)
if (is.null (dd)) dd <- length (default)
checkEquals (dim (nodrop) [1 : d], dd, sprintf ("result dimensions, d = %i", d))
}
}
.test (.colMeans) <- function (){
a <- array (1:24, 4:2)
for (d in 1 : 2){
default <- base::colMeans (a, dims = d)
drop <- colMeans (a, dims = d, drop = TRUE)
nodrop <- colMeans (a, dims = d, drop = FALSE)
checkEquals (default, drop, sprintf ("base version ./. drop = TRUE, dim = %i", d))
checkEquals (c (default), c (nodrop), sprintf ("drop = TRUE ./. FALSE, dim = %i", d))
dd <- dim (default)
if (is.null (dd)) dd <- length (default)
checkEquals (dim (nodrop) [-(1L : d)], dd, sprintf ("result dimensions, d = %i", d))
}
}
##' @noRd
setGeneric ("colSums")
##' @noRd
setGeneric ("colMeans")
##' @noRd
setGeneric ("rowSums")
##' @noRd
setGeneric ("rowMeans")
##' Row and column sums and means for numeric arrays.
##'
##' These functions extend the respective base functions by (optionally) preserving the shape of the
##' array (i.e. the summed dimensions have length 1).
##'
##' @param x an array of two or more dimensions, containing numeric, complex, integer or logical
##' values, or a numeric data frame.
##' @param na.rm logical indicating treatment of missing values
##' @param dims integer: Which dimensions are regarded as \sQuote{rows} or \sQuote{columns} to sum
##' over. For \code{row*}, the sum or mean is over dimensions \code{dims + 1, \dots}; for \code{col*}
##' it is over dimensions \code{1 : dims}.
##' @param ... the \code{signature = "AsIs"} methods hand on all parameters
##' @param drop If \code{FALSE}, the number of dimensions is retained: the length of the dimensions
##' that are summed or averaged is set to 1. \code{TRUE} yield the same behaviour as
##' \code{\link[base]{colSums}}
##' @return like \code{\link[base]{colSums}} if \code{drop = TRUE}, otherwise an array where the
##' summed dimensions have length 1.
##' @author Claudia Beleites
##' @seealso \code{\link[base]{colSums}}
##' @keywords array algebra arith
##' @docType methods
##' @rdname colSums
##' @export
##'
##' @examples
##' a <- array (1 : 24, 4 : 2)
##' a
##'
##' rowSums (a)
##' rowSums (a, drop = FALSE)
##'
##' colSums (a)
##' colSums (a, drop = FALSE)
##'
##' colSums (a, dim = 2)
##' colSums (a, dim = 2, drop = FALSE)
##'
setMethod ("colSums", signature = signature (x = "matrix"), .colSums)
# colSums.matrix <- .colSums # I still get base::colSums :-(
##' @rdname colSums
##' @export
colSums.AsIs <- .unclasscolSums
# setMethod ("colSums", signature = signature (x = "AsIs"), .unclasscolSums)
##' @rdname colSums
##' @export
setMethod ("colSums", signature = signature (x = "array"), .colSums)
##' @rdname colSums
##' @export
setMethod ("colMeans", signature = signature (x = "matrix"), .colMeans)
##' @rdname colSums
##' @export
colMeans.AsIs <- .unclasscolMeans
##setMethod ("colMeans", signature = signature (x = "AsIs"), .unclasscolMeans)
##' @rdname colSums
##' @export
setMethod ("colMeans", signature = signature (x = "array"), .colMeans)
##' @rdname colSums
##' @export
setMethod ("rowSums", signature = signature (x = "matrix"), .rowSums)
##' @rdname colSums
##' @export
rowSums.AsIs <- .unclassrowSums
#setMethod ("rowSums", signature = signature (x = "AsIs"), .unclassrowSums)
##' @rdname colSums
##' @export
setMethod ("rowSums", signature = signature (x = "array"), .rowSums)
##' @rdname colSums
##' @export
setMethod ("rowMeans", signature = signature (x = "matrix"), .rowMeans)
##' @rdname colSums
##' @export
rowMeans.AsIs <- .unclassrowMeans
##setMethod ("rowMeans", signature = signature (x = "AsIs"), .unclassrowMeans)
##' @rdname colSums
##' @export
setMethod ("rowMeans", signature = signature (x = "array"), .rowMeans)
testAsIs <- function (){
methods <- c("colSums", "colMeans", "rowSums", "rowMeans")
for (fn in methods){
f <- get (fn)
for (d in 1L : 2L)
checkEquals (f (a, dims = d), f (I (a), dims = d), msg = sprintf ("AsIs: %s, dims = %i", fn, d))
}
}
|
ef0d09db440c3366121439ffe3af65d028a4ca7d | 0f17bbb804fd000348fb001806216622cf3aae4b | /LT2D/man/round.lik.Rd | b75e4c4ea8f5287da5c2da3655ef6ed72f602081 | [] | no_license | calliste-fagard-jenkin/LT2D-work | 858ec0bdcd3790e4a3d66acc10f40675b0f8dcdd | e1a4c72ab0888ab2f831f8f24b94b58a24f07947 | refs/heads/master | 2022-09-08T04:20:25.201070 | 2022-09-05T12:06:37 | 2022-09-05T12:06:37 | 147,014,297 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 976 | rd | round.lik.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/2DLTfunctions mixture.r
\name{round.lik}
\alias{round.lik}
\title{negative log likelihood for rounded data}
\usage{
\method{round}{lik}(rounded, pi.x, logphi, rmin, ymax, hr, b, w)
}
\arguments{
\item{pi.x}{character ; user-chosen density model}
\item{logphi}{numeric vector ; parameters for pi.x}
\item{rmin}{numeric ; value below which radial distances have been
rounded to 0}
\item{ymax}{numeric ; maximum forward distance at which we can detect animals}
\item{hr}{character ; user-chosen hazard rate}
\item{b}{numeric vector ; parameters for hr}
\item{w}{numeric ; perpendicular truncation distance}
\item{x}{numeric ; perpendicular distance of animal from the transect}
\item{undrounded}{numeric ; the number of observations in the data set with
radial distance <= rmin}
}
\value{
numeric ; negative log likelihood of the rounded data
}
\description{
log likelihood for rounded data
}
|
7be39b0e9b0651b81260d1d3d213592948258b1e | c6fcb9d0c9bd3b243f62eb73a30152f7cd46587f | /man/plotHigherOrderSequence.Rd | 2a6b6f16a062270a8bed9fc1066601dbcc939c07 | [] | no_license | shazanfar/scHOT | e94d30b6bc54b6e05d16d8abf6b06305bc02f0be | b937f48890c3bf81a9ca5a1fcc96376b9b0c013e | refs/heads/master | 2023-06-13T04:36:15.623158 | 2023-05-30T05:35:04 | 2023-05-30T05:35:04 | 235,541,754 | 10 | 1 | null | 2023-05-30T05:35:06 | 2020-01-22T09:43:09 | R | UTF-8 | R | false | true | 2,209 | rd | plotHigherOrderSequence.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scHOT_plot.R
\name{plotHigherOrderSequence}
\alias{plotHigherOrderSequence}
\title{plotHigherOrderSequence}
\usage{
plotHigherOrderSequence(
scHOT,
gene,
positionType = NULL,
branches = NULL,
positionColData = NULL
)
}
\arguments{
\item{scHOT}{A scHOT object with higherOrderSequence in scHOT_output slot}
\item{gene}{is either a logical vector matching rows of entries
in wcorsList, or a character of a gene}
\item{positionType}{A string indicates the position type,
either trajectory or spatial}
\item{branches}{A character indicates that the colnames stored
the branch information in colData (for trajectory type of data)}
\item{positionColData}{A vector indicates column names of colData
that stored the postion informaton (for spatial type of data)}
}
\value{
\code{ggplot} object with line plots
}
\description{
the plotHigherOrderSequence function plots weighted higher order
statistic vectors (stored in higherOrderSequence) as line plots
}
\examples{
data(liver)
scHOT_traj <- scHOT_buildFromMatrix(
mat = liver$liver_branch_hep,
cellData = list(pseudotime = liver$liver_pseudotime_hep),
positionType = "trajectory",
positionColData = "pseudotime")
scHOT_traj
plotColouredExpression(scHOT_traj, c("Cdt1","Top2a"), n = 5)
scHOT_traj <- scHOT_addTestingScaffold(scHOT_traj,
t(as.matrix(c("Cdt1", "Top2a"))))
scHOT_traj <- scHOT_setWeightMatrix(scHOT_traj,
positionColData = c("pseudotime"),
positionType = "trajectory",
nrow.out = NULL,
span = 0.25)
scHOT_traj <- scHOT_calculateGlobalHigherOrderFunction(scHOT_traj,
higherOrderFunction =
weightedSpearman,
higherOrderFunctionType =
"weighted")
scHOT_traj <- scHOT_calculateHigherOrderTestStatistics(scHOT_traj,
higherOrderSummaryFunction =
sd)
slot(scHOT_traj, "scHOT_output")
plotHigherOrderSequence(scHOT_traj, c("Cdt1_Top2a"))
}
|
0c9471b75f421954a3065b9b362f01113c3a44d2 | ecc70a299e288f8e918d35e1d224de70584cb3fc | /man/mscale_derivative.Rd | 79dac21e0b7fdb3a72bbb0f324ad5f0993749272 | [] | no_license | cran/pense | 3688619d950a64e9ba63d252397717a807326f51 | 5db123d0f391536b1ba8b659e3103f9601342eb9 | refs/heads/master | 2023-02-18T19:27:26.789833 | 2023-02-07T07:12:32 | 2023-02-07T07:12:32 | 108,246,999 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,337 | rd | mscale_derivative.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{mscale_derivative}
\alias{mscale_derivative}
\alias{max_mscale_derivative}
\alias{max_mscale_grad_hess}
\title{Compute the Gradient and Hessian of the M-Scale Function}
\usage{
mscale_derivative(
x,
bdp = 0.25,
order = 1,
cc = consistency_const(bdp, "bisquare"),
opts = mscale_algorithm_options()
)
max_mscale_derivative(
x,
grid,
n_change,
bdp = 0.25,
cc = consistency_const(bdp, "bisquare"),
opts = mscale_algorithm_options()
)
max_mscale_grad_hess(
x,
grid,
n_change,
bdp = 0.25,
cc = consistency_const(bdp, "bisquare"),
opts = mscale_algorithm_options()
)
}
\arguments{
\item{x}{numeric values. Missing values are verbosely ignored.}
\item{bdp}{desired breakdown point (between 0 and 0.5).}
\item{order}{compute the gradient (\code{order=1}) or the gradient and the
Hessian (\code{order=2}).}
\item{cc}{cutoff value for the bisquare rho function.
By default, chosen to yield a consistent estimate for the
Normal distribution.}
\item{opts}{a list of options for the M-scale estimation algorithm,
see \code{\link[=mscale_algorithm_options]{mscale_algorithm_options()}} for details.}
\item{grid}{a grid of values to replace the first 1 - \code{n_change} elements in\code{ x}.}
\item{n_change}{the number of elements in \code{x} to replace with each value in \code{grid}.}
}
\value{
a vector of derivatives of the M-scale function, one per element in \code{x}.
a vector with 4 elements:
\enumerate{
\item the maximum absolute value of the gradient,
\item the maximum absolute value of the Hessian elements,
\item the M-scale associated with 1., and
\item the M-scale associated with 2.
}
the maximum absolute derivative over the entire grid.
}
\description{
Compute the derivative (gradient) or the Hessian of the M-scale function
evaluated at the point \code{x}.
Compute the maximum derivative of the M-scale function with respect to each element over
a grid of values.
Compute the maximum element in the gradient and Hessian of the M-scale
function with respect to each element over a grid of values.
}
\section{Functions}{
\itemize{
\item \code{max_mscale_derivative()}: maximum of the gradient
\item \code{max_mscale_grad_hess()}: maximum of the gradient and hessian
}}
\keyword{internal}
|
617c44bd7a67198f5c8f6c96c06da49a92a7ead6 | f317887c7d83e62235ba2cf19065dcef9244f645 | /R/add_refmark.bare.R | a2fc11c1deebcdca6bf4101e393ddac44706a1c0 | [] | no_license | rrprf/tablesgg | 3fec64842266f8a7f28e29899d31c673b5dad09c | 1a60f894869326b34eff1804c9378a1c05e78a79 | refs/heads/master | 2023-05-07T14:12:05.102317 | 2021-06-03T14:45:34 | 2021-06-03T14:45:34 | 318,291,905 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,088 | r | add_refmark.bare.R | #===== Source file: ../add_refmark.r on 2021-06-02
#-----
add_refmark <- function(text, textspec, mark, side, raise)
{
stopifnot(length(textspec) == length(text))
is_math <- (textspec == "plotmath") # text already includes plotmath
is_mkdn <- (textspec == "markdown") # text already includes markdown/HTML
is_plain <- !(is_math | is_mkdn)
# Whether markdown should be used as the default for raised reference marks:
default_mkdn <- tablesggOpt("allowMarkdown")
if (raise && !default_mkdn && any(grepl("\\n", text[is_plain]))) stop(
"Newlines found in entry text will not display correctly when a reference ",
"mark is added using 'plotmath'. Consider enabling markdown instead.")
if (side == "before") {
if (raise) {
text[is_math] <- paste0('phantom()^paste("', mark, '")*', text[is_math])
text[is_mkdn] <- paste0('<sup>', mark, '</sup>', text[is_mkdn])
if (default_mkdn) {
text[is_plain] <- paste0('<sup>', mark, '</sup>', text[is_plain])
} else {
text[is_plain] <- paste0('phantom()^paste("', mark, '")*paste("',
text[is_plain], '")')
}
} else {
text[is_math] <- paste0('paste("', mark, '")*', text[is_math])
text[!is_math] <- paste0(mark, text[!is_math])
}
} else {
if (raise) {
text[is_math] <- paste0(text[is_math], '*phantom()^paste("', mark, '")')
text[is_mkdn] <- paste0(text[is_mkdn], '<sup>', mark, '</sup>')
if (default_mkdn) {
text[is_plain] <- paste0(text[is_plain], '<sup>', mark, '</sup>')
} else {
text[is_plain] <- paste0('paste("', text[is_plain],
'")*phantom()^paste("', mark, '")')
}
} else {
text[is_math] <- paste0(text[is_math], '*paste("', mark, '")')
text[!is_math] <- paste0(text[!is_math], mark)
}
}
if (raise) { # need to update 'textspec' for "plain" entries
textspec[is_plain] <- { if (default_mkdn) "markdown" else "plotmath" }
}
dim(textspec) <- dim(text)
list(text=text, textspec=textspec)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.