blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
70c739066ad5e3c82770f0fb4aa0d955458ed923
|
ab79177ad95b0e89d70210a3478b91f98cdb6b30
|
/tests/testthat/test_fmriglm.R
|
f7f87b498ea1c279f8509588dec65423f432f5b2
|
[] |
no_license
|
bbuchsbaum/fmrireg
|
93e69866fe8afb655596aa23c6f9e3ca4004a81c
|
2dd004018b3b7997e70759fc1652c8d51e0398d7
|
refs/heads/master
| 2023-05-10T17:01:56.484913
| 2023-05-09T14:38:24
| 2023-05-09T14:38:24
| 18,412,463
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,516
|
r
|
test_fmriglm.R
|
options(mc.cores=21)
facedes <- read.table(system.file("extdata", "face_design.txt", package = "fmrireg"), header=TRUE)
facedes$repnum <- factor(facedes$rep_num)
gen_mask_file <- function(d, perc) {
arr = array(0,d)
vals <- ifelse(runif(prod(d)) > .5, 1, 0)
vol <- NeuroVol(vals, NeuroSpace(d))
fname <- paste0(tempfile(), ".nii")
write_vol(vol, fname)
fname
}
gen_fake_dataset <- function(d, nscans) {
onames <- vector(length=nscans, mode="list")
for (i in 1:nscans) {
arr <- array(rnorm(prod(d)), d)
bspace <- neuroim2::NeuroSpace(dim=d)
vec <- neuroim2::NeuroVec(arr, bspace)
fname <- paste0(tempfile(), ".nii")
write_vec(vec, fname)
onames[i] <- fname
}
onames
}
## test that latent and fmri_mem_dataset of same underlying latent dataset produce the same betas
test_that("can construct and run a simple fmri glm from in memory dataset", {
scans <- lapply(1:length(unique(facedes$run)), function(i) {
arr <- array(rnorm(10*10*10*244), c(10,10,10, 244))
bspace <- neuroim2::NeuroSpace(dim=c(10,10,10,244))
neuroim2::NeuroVec(arr, bspace)
})
mask <- neuroim2::LogicalNeuroVol(array(rnorm(10*10*10), c(10,10,10)) > 0, neuroim2::NeuroSpace(dim=c(10,10,10)))
#scans <- list.files("test_data/images_study/epi/", "rscan0.*nii", full.names=TRUE)
dset <- fmri_mem_dataset(scans=scans,
mask=mask,
TR=1.5,
event_table=facedes)
mod <- fmri_lm(onset ~ hrf(repnum), block = ~ run, dataset=dset, durations=0, strategy="chunkwise", nchunks=4)
expect_true(!is.null(mod))
})
test_that("can construct and run a simple fmri glm from in memory dataset and one contrast", {
scans <- lapply(1:length(unique(facedes$run)), function(i) {
arr <- array(rnorm(10*10*10*244), c(10,10,10, 244))
bspace <- neuroim2::NeuroSpace(dim=c(10,10,10,244))
neuroim2::NeuroVec(arr, bspace)
})
mask <- neuroim2::LogicalNeuroVol(array(rnorm(10*10*10), c(10,10,10)) > 0, neuroim2::NeuroSpace(dim=c(10,10,10)))
#scans <- list.files("test_data/images_study/epi/", "rscan0.*nii", full.names=TRUE)
dset <- fmri_mem_dataset(scans=scans,
mask=mask,
TR=1.5,
event_table=facedes)
con <<- contrast_set(pair_contrast( ~ repnum == 1, ~ repnum == 2, name="rep2_rep1"))
mod1 <- fmri_lm(onset ~ hrf(repnum, contrasts=con), block = ~ run, dataset=dset, durations=0)
mod1a <- fmri_lm(onset ~ hrf(repnum, contrasts=con), block = ~ run, dataset=dset, durations=0,
meta_weighting="equal")
mod2 <- fmri_lm(onset ~ hrf(repnum, contrasts=con), block = ~ run, dataset=dset, durations=0,
strategy="chunkwise", nchunks=10)
expect_true(!is.null(mod1))
expect_true(!is.null(mod1a))
expect_true(!is.null(mod2))
expect_equal(ncol(mod1$result$contrasts$estimate), 1)
expect_equal(ncol(mod1a$result$contrasts$estimate), 1)
expect_equal(ncol(mod2$result$contrasts$estimate), 1)
expect_equal(nrow(mod1$result$contrasts$estimate), nrow(mod2$result$contrasts$estimate))
c1 <- cor(mod1$result$contrasts$estimate[,1], mod2$result$contrasts$estimate[,1])
expect_true(c1> .97)
})
test_that("can construct and run a simple fmri glm from a matrix_dataset with 1 column", {
vals <- rep(rnorm(244),6)
dset <- matrix_dataset(as.matrix(vals),TR=1.5, run_length=rep(244,6), event_table=facedes)
c1 <- pair_contrast( ~ repnum == 1, ~ repnum == 2, name="rep2_rep1")
c2 <- pair_contrast( ~ repnum == 3, ~ repnum == 4, name="rep3_rep4")
con <<- contrast_set(c1,c2)
mod1 <- fmri_lm(onset ~ hrf(repnum, contrasts=con), block = ~ run, dataset=dset, durations=0)
mod2 <- fmri_lm(onset ~ hrf(repnum, contrasts=con), block = ~ run, dataset=dset, durations=0,
strategy="chunkwise", nchunks=1)
expect_true(!is.null(mod1))
expect_equal(ncol(mod1$result$contrasts$estimate), 2)
expect_equal(ncol(mod2$result$contrasts$estimate), 2)
})
test_that("fmri glm for multivariate matrix and complex contrast ", {
vals <- do.call(cbind, lapply(1:100, function(i) rnorm(244*6)))
fd <- subset(facedes, null == 0 & rt < 2)
fd$letter <- sample(factor(rep(letters[1:4], length.out=nrow(fd))))
dset <- matrix_dataset(vals,TR=1.5, run_length=rep(244,6), event_table=fd)
cset <<- contrast_set(pair_contrast( ~ letter %in% c("a", "b"),
~ letter %in% c("c", "d"),
name="abcd_efgh"),
pair_contrast( ~ letter %in% c("a", "c"),
~ letter %in% c("b", "d"),
name="ijkl_mnop"),
unit_contrast(~ letter, "letter"))
#c3 <- unit_contrast(~ letter, "letter")
# bmod <- baseline_model(basis="constant", degree=1, intercept="none", sframe=dset$sampling_frame)
mod1 <- fmri_lm(onset ~ hrf(letter, contrasts=cset),
#baseline_model=bmod,
block = ~ run, dataset=dset, durations=0, nchunks=1,strategy="chunkwise")
zz <- stats(mod1, "contrasts")
betas <- mod1$result$betas$estimate
expect_true(!is.null(mod1))
})
test_that("can construct and run a simple fmri glm from a matrix_dataset with 2 columns", {
vals <- cbind(rep(rnorm(244),6), rep(rnorm(244),6))
dset <- matrix_dataset(as.matrix(vals),TR=1.5, run_length=rep(244,6), event_table=facedes)
c1 <- pair_contrast( ~ repnum == 1, ~ repnum == 2, name="rep2_rep1")
c2 <- pair_contrast( ~ repnum == 2, ~ repnum == 3, name="rep3_rep2")
con <<- contrast_set(c1,c2)
mod1 <- fmri_lm(onset ~ hrf(repnum, contrasts=con), block = ~ run, dataset=dset, durations=0)
mod2 <- fmri_lm(onset ~ hrf(repnum, contrasts=con), block = ~ run, dataset=dset, durations=0,
strategy="chunkwise", nchunks=1)
expect_true(!is.null(mod1))
expect_true(!is.null(mod2))
expect_equal(ncol(mod1$result$contrasts$estimate), 2)
expect_equal(ncol(mod2$result$contrasts$estimate), 2)
})
test_that("can construct and run a simple fmri glm two terms and prefix args", {
vals <- cbind(rep(rnorm(244),6), rep(rnorm(244),6))
dset <- matrix_dataset(as.matrix(vals),TR=1.5, run_length=rep(244,6), event_table=facedes)
mod1 <- fmri_lm(onset ~ hrf(repnum, subset=repnum %in% c(1,2), prefix="r12")+
hrf(repnum, subset=repnum %in% c(3,4), prefix="r34"),
block = ~ run, dataset=dset, durations=0)
expect_true(!is.null(mod1))
#expect_true(!is.null(mod2))
expect_equal(ncol(mod1$result$betas$estimate), 4)
#expect_equal(ncol(mod2$result$contrasts$estimate()), 2)
})
test_that("can run video fmri design with matrix_dataset", {
des <- read.table(system.file("extdata", "video_design.txt", package = "fmrireg"), header=TRUE)
events <- rep(320,7)
sframe <- sampling_frame(rep(320, length(events)), TR=1.5)
evmod <- event_model(Onset ~ hrf(Video, Condition, basis="spmg1"),
block = ~ run, sampling_frame=sframe, data=des)
bmod <- baseline_model(basis="bs", degree=4, sframe=sframe)
fmod <- fmri_model(evmod, bmod)
dset <- matrix_dataset(matrix(rnorm(320*7*100), 320*7, 100),TR=1.5, run_length=rep(320,7), event_table=des)
#conset <- fmrireg::one_against_all_contrast(levels(des$Video), "Video")
conset <<- do.call("contrast_set", lapply(levels(factor(des$Video)), function(v) {
f1 <- as.formula(paste("~ Video == ", paste0('"', v, '"')))
f2 <- as.formula(paste("~ Video != ", paste0('"', v, '"')))
pair_contrast(f1, f2, name=paste0(v, "_vsall"))
}))
res1 <- fmrireg:::fmri_lm(Onset ~ hrf(Video, subset=Condition=="Encod", contrasts=conset) +
hrf(Video, subset=Condition=="Recall", prefix="rec"), block= ~ run, dataset=dset,
strategy="runwise")
res2 <- fmrireg:::fmri_lm(Onset ~ hrf(Video, subset=Condition=="Encod", contrasts=conset) +
hrf(Video, subset=Condition=="Recall", prefix="rec"), block= ~ run, dataset=dset,
strategy="chunkwise", nchunks=12)
res3 <- fmrireg:::fmri_lm(Onset ~ hrf(Video, subset=Condition=="Encod", contrasts=conset) +
hrf(Video, subset=Condition=="Recall", prefix="rec"), block= ~ run, dataset=dset,
strategy="chunkwise", nchunks=1)
expect_true(!is.null(coef(res1)))
expect_true(!is.null(coef(res2)))
expect_true(!is.null(coef(res3)))
expect_true(!is.null(coef(res1, "contrasts")))
expect_true(!is.null(coef(res2, "contrasts")))
expect_true(!is.null(coef(res3, "contrasts")))
})
test_that("can run video fmri design with fmri_file_dataset", {
library(neuroim2)
des <- read.table(system.file("extdata", "video_design.txt", package = "fmrireg"), header=TRUE)
events <- rep(320,7)
sframe <- sampling_frame(rep(320, length(events)), TR=1.5)
scans <- gen_fake_dataset(c(10,10,10,320), 7)
maskfile <- gen_mask_file(c(10,10,10))
dset <- fmri_dataset(scans, maskfile,TR=1.5, rep(320,7), base_path="/", mode="bigvec", event_table=as_tibble(des))
evmod <- event_model(Onset ~ hrf(Video, Condition, basis="spmg1"),
block = ~ run, sampling_frame=sframe, data=des)
bmod <- baseline_model(basis="bs", degree=4, sframe=sframe)
fmod <- fmri_model(evmod, bmod)
conset <<- NULL
conset <<- do.call("contrast_set", lapply(levels(factor(des$Video)), function(v) {
f1 <- as.formula(paste("~ Video == ", paste0('"', v, '"')))
f2 <- as.formula(paste("~ Video != ", paste0('"', v, '"')))
pair_contrast(f1, f2, name=paste0(v, "_vsall"))
}))
res2 <- fmrireg:::fmri_lm(Onset ~ hrf(Video, subset=Condition=="Encod", contrasts=conset) +
hrf(Video, subset=Condition=="Recall", prefix="rec"), block= ~ run,
dataset=dset,
strategy="chunkwise", nchunks=22)
res3 <- fmrireg:::fmri_lm(Onset ~ hrf(Video, subset=Condition=="Encod", contrasts=conset) +
hrf(Video, subset=Condition=="Recall", prefix="rec"), block= ~ run, dataset=dset,
strategy="chunkwise", nchunks=1)
expect_true(!is.null(coef(res2)))
expect_true(!is.null(coef(res3)))
expect_true(!is.null(coef(res2, "contrasts")))
expect_true(!is.null(coef(res3, "contrasts")))
})
test_that("can run video fmri design with latent_dataset", {
#library(multivarious)
des <- read.table(system.file("extdata", "video_design.txt", package = "fmrireg"), header=TRUE)
events <- rep(320,7)
sframe <- sampling_frame(rep(320, length(events)), TR=1.5)
scans <- gen_fake_dataset(c(10,10,10,320), 7)
vecs <- lapply(scans, read_vec)
maskfile <- gen_mask_file(c(10,10,10))
mask <- read_vol(maskfile)
mats <- lapply(vecs, function(v) series(v, mask!=0))
mat <- do.call(rbind, mats)
pres <- multivarious::pca(mat, ncomp=488, preproc=multivarious::pass())
lvec <- neuroim2::LatentNeuroVec(pres$s, pres$v, add_dim(space(mask), nrow(mat)),
mask=mask)
ldset <- latent_dataset(lvec, 1.5, run_length=rep(320,7), des)
evmod <- event_model(Onset ~ hrf(Video, Condition, basis="spmg1"),
block = ~ run, sampling_frame=sframe, data=des)
conset <<- NULL
conset <<- do.call("contrast_set", lapply(levels(factor(des$Video)), function(v) {
f1 <- as.formula(paste("~ Video == ", paste0('"', v, '"')))
f2 <- as.formula(paste("~ Video != ", paste0('"', v, '"')))
pair_contrast(f1, f2, name=paste0(v, "_vsall"))
}))
conset2 <<- do.call("contrast_set", lapply(levels(factor(des$Video)), function(v) {
f1 <- as.formula(paste("~ rec_Video == ", paste0('"', v, '"')))
f2 <- as.formula(paste("~ rec_Video != ", paste0('"', v, '"')))
pair_contrast(f1, f2, name=paste0("rec_", v, "_vsall"))
}))
res2 <- fmrireg:::fmri_latent_lm(Onset ~ hrf(Video, subset=Condition=="Encod", contrasts=conset) +
hrf(Video, subset=Condition=="Recall", prefix="rec", contrasts=conset2),
block= ~ run,
autocor="none", dataset=ldset)
se1 <- standard_error.fmri_latent_lm(res2, "contrasts", recon=TRUE)
con1 <- stats.fmri_latent_lm(res2, "contrasts", recon=TRUE)
dset <- fmri_dataset(scans, maskfile,TR=1.5, rep(320,7), base_path="/", event_table=des)
res2 <- fmrireg:::fmri_latent_lm(Onset ~ hrf(Video, subset=Condition=="Encod", contrasts=conset) +
hrf(Video, subset=Condition=="Recall", prefix="rec", contrasts=conset2),
block= ~ run,
autocor="none", bootstrap=TRUE, nboot=50, dataset=ldset)
res2a <- fmrireg:::fmri_latent_lm(Onset ~ hrf(Video, subset=Condition=="Encod", contrasts=conset) +
hrf(Video, subset=Condition=="Recall", prefix="rec", contrasts=conset2),
block= ~ run,
autocor="ar1", dataset=ldset)
res3 <- fmrireg:::fmri_lm(Onset ~ hrf(Video, subset=Condition=="Encod", contrasts=conset) +
hrf(Video, subset=Condition=="Recall", prefix="rec"), block= ~ run,
strategy="chunkwise", nchunks=1, dataset=dset)
se2 <- standard_error(res3, "contrasts")
con2 <- stats(res3, "contrasts")
expect_true(!is.null(se2))
expect_true(!is.null(con2))
})
# test_that("a one-run, one-contrast linear model analysis", {
# df1 <- subset(imagedes,run==1)
# df1 <- subset(df1, !is.na(onsetTime))
#
# df1$sdur <- scale(df1$duration)[,1]
#
# dmat <- matrix(rnorm(400*100), 400, 100)
# md <- matrix_dataset(dmat, TR=1.5, run_length=400, event_table=df1)
# con <- contrast_set(contrast( ~ Thorns - Massage, name="Thorns_Massage"))
# mod <- fmri_lm(onsetTime ~ hrf(imageName, subset = !is.na(onsetTime), contrasts=con), ~ run, dataset=md, durations=sdur)
#
# })
# test_that("a two-run, one contrast linear model analysis", {
# df1 <- subset(imagedes,run %in% c(1,2))
# df1 <- subset(df1, !is.na(onsetTime))
#
# df1$sdur <- scale(df1$duration)[,1]
#
# dmat <- matrix(rnorm(800*100), 800, 100)
# md <- matrix_dataset(dmat, TR=1.5, run_length=c(400,400), event_table=df1)
# con <- contrast_set(contrast( ~ Thorns - Massage, name="Thorns_Massage"))
# mod <- fmri_lm(onsetTime ~ hrf(imageName, contrasts=con), ~ run, dataset=md, durations=sdur)
#
#
# })
# test_that("can load and run a simple config file", {
# config <- read_fmri_config("test_data/images_study/config.R")
# dset <- fmri_dataset(config$scans, config$mask, config$TR,
# config$run_length,
# config$event_table,
# config$aux_table,
# base_path=config$base_path)
#
# frame <- sampling_frame(dset$run_length, config$TR)
# mod <- fmri_model(config$event_model, config$baseline_model, config$design, dset$aux_table,
# basis=HRF_SPMG1, dset$runids, dset$run_length, config$TR, drop_empty=TRUE)
#
#
# mod <- fmri_glm(config$event_model,
# dataset=dset, durations=0)
# })
|
0704e4d368ca0018f2be60e59a10218b85f0c9c3
|
3b6e33423205f0766e050f5e215b44c69b730210
|
/experiment/ModelBSP-Subsequence.R
|
422a4a760d08e5f169d6c4b58f147d3c288aca8f
|
[
"MIT"
] |
permissive
|
marcosamaris/BSPGPU
|
f200b920fa28255ecebfc3bc801a7a948656d192
|
41e6e8cf836fdc2b501e48ce58de7d752daf2926
|
refs/heads/master
| 2021-01-22T04:57:44.867505
| 2015-11-17T13:14:27
| 2015-11-17T13:14:27
| 26,322,065
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,666
|
r
|
ModelBSP-Subsequence.R
|
############ Maximum Subsequence GLobal Memory #################
### BSP MULTILEVEL MEMORY
# The palette of color
cbbPalette <- gray(1:6/ 8)#c("red", "blue", "darkgray", "orange","black","brown", "lightblue","violet")
Nn <- 17:29;
N <- 2^Nn;
gridsize <- 32;
blocksize <- 128
numberthreads <- gridsize * blocksize;
N_perBlock <- N/gridsize;
N_perThread <- N_perBlock/blocksize;
latencySharedMemory <- 5; #Cycles per processor
latencyGlobalMemory <- latencySharedMemory* 100; #Cycles per processor
latencyL1 <- latencySharedMemory; #Cycles per processor
latencyL2 <- latencyGlobalMemory*0.5; #Cycles per processor
SubSeqMax_gt630 <- 1:12;
SubSeqMax_gtx660 <- 1:12;
SubSeqMax_gtx680 <- 1:12;
SubSeqMax_Titan <- 1:12;
SubSeqMax_Tesla <- 1:12;
SubSeqMax_Tesla_k40 <- 1:12;
##### GeForce GT-630 #####
setwd("/home/marcos/Dropbox/Doctorate/Results/2015/BSPGPU/experiment/gt-630/")
temp <- as.matrix(read.table("./SubSeqMax.txt", sep="\t", header=F,fill = TRUE))
for (i in 1:12){
SubSeqMax_gt630[i]<- mean(temp[i,1:10])
}
PCoresNumber_GPU <- 96;
clockFrequency_GPU <- 1620; # Mhz
#bandWidth_GT630 <- memoryClockRate_GT630 * memoryBusWidth_GT630 *2; # MB/s
flopsTheoreticalpeakGPU <- clockFrequency_GPU * PCoresNumber_GPU ; # Mflops/second
#Cycles operations per Thread operations
timeComputationKernel <- 100*numberthreads * N_perThread;
# MODEL Multi_BSP_GPUModelPredictionTime Miliseconds
reads <- numberthreads*N_perThread;
L1Effect <- 0.1*reads
#L2Effect <- c(0.1, 0.15, 0.1, 0.05, 0.05, 0.05, 0.1, 0.05, 0.025, 0 ,0,0,0)*reads
L2Effect <- 0.1*reads
W <- 1.1
CommGM <- ((numberthreads*N_perThread - L1Effect - L2Effect + numberthreads*5)*latencyGlobalMemory + L1Effect*latencyL1 + L2Effect*latencyL2);
CommSM <- (numberthreads*N_perThread + numberthreads*5)*latencySharedMemory
timeKernel_timeKernel_SubSeqMax_630 <- ( W^-1*(timeComputationKernel + CommGM + CommSM)/(flopsTheoreticalpeakGPU*10^6))*10^3;
SpeedupSubSeqMax_630 <- timeKernel_timeKernel_SubSeqMax_630[1:12]/SubSeqMax_gt630;
##### GeForce GTX-660 #####
setwd("/home/marcos/Dropbox/Doctorate/Results/2015/BSPGPU/experiment/gtx-660/")
temp <- as.matrix(read.table("./SubSeqMax.txt", sep="\t", header=F,fill = TRUE))
for (i in 1:12){
SubSeqMax_gtx660[i] <- mean(temp[i,1:10])
}
PCoresNumber_GPU <- 960;
clockFrequency_GPU <- 1058; # Mhz
#bandWidth_GT630 <- memoryClockRate_GT630 * memoryBusWidth_GT630 *2; # MB/s
flopsTheoreticalpeakGPU <- clockFrequency_GPU * PCoresNumber_GPU ; # Mflops/second
#Cycles operations per Thread operations
timeComputationKernel <- 100*numberthreads * N_perThread;
# MODEL Multi_BSP_GPUModelPredictionTime Miliseconds
reads <- numberthreads*N_perThread;
L1Effect <- 0
L2Effect <- 0.1*reads
W <- .65
CommGM <- ((numberthreads*N_perThread - L1Effect - L2Effect + numberthreads*5)*latencyGlobalMemory + L1Effect*latencyL1 + L2Effect*latencyL2);
CommSM <- (numberthreads*N_perThread + numberthreads*5)*latencySharedMemory
timeKernel_timeKernel_SubSeqMax_660 <- ( W^-1*(timeComputationKernel + CommGM + CommSM)/(flopsTheoreticalpeakGPU*10^6))*10^3;
SpeedupSubSeqMax_660 <- timeKernel_timeKernel_SubSeqMax_660[1:12]/SubSeqMax_gtx660;
##### GeForce GTX-680 #####
setwd("/home/marcos/Dropbox/Doctorate/Results/2015/BSPGPU/experiment/gtx-680/")
temp <- as.matrix(read.table("./SubSeqMax.txt", sep="\t", header=F,fill = TRUE))
for (i in 1:12){
SubSeqMax_gtx680[i]<- mean(temp[i,1:10])
}
PCoresNumber_GPU <- 1536;
clockFrequency_GPU <- 1006; # Mhz
flopsTheoreticalpeakGPU <- clockFrequency_GPU * PCoresNumber_GPU ; # Mflops/second
#Cycles operations per Thread operations
timeComputationKernel <- 100*numberthreads * N_perThread;
# MODEL Multi_BSP_GPUModelPredictionTime Miliseconds
reads <- numberthreads*N_perThread;
L1Effect <- 0
L2Effect <- 0.1*reads
# W <- .76
CommGM <- ((numberthreads*N_perThread - L1Effect - L2Effect + numberthreads*5)*latencyGlobalMemory + L1Effect*latencyL1 + L2Effect*latencyL2);
CommSM <- (numberthreads*N_perThread + numberthreads*5)*latencySharedMemory
timeKernel_timeKernel_SubSeqMax_680 <- ( W^-1*(timeComputationKernel + CommGM + CommSM)/(flopsTheoreticalpeakGPU*10^6))*10^3;
SpeedupSubSeqMax_680 <- timeKernel_timeKernel_SubSeqMax_680[1:12]/SubSeqMax_gtx680;
##### GeForce GTX-Titan #####
setwd("/home/marcos/Dropbox/Doctorate/Results/2015/BSPGPU/experiment/gtx-Titan/")
temp <- as.matrix(read.table("./SubSeqMax.txt", sep="\t", header=F,fill = TRUE))
for (i in 1:12){
SubSeqMax_Titan[i]<- mean(temp[i,1:10])
}
PCoresNumber_GPU <- 2688;
clockFrequency_GPU <- 876; # Mhz
#bandWidth_GT630 <- memoryClockRate_GT630 * memoryBusWidth_GT630 *2; # MB/s
flopsTheoreticalpeakGPU <- clockFrequency_GPU * PCoresNumber_GPU ; # Mflops/second
#Cycles operations per Thread operations
timeComputationKernel <- 100*numberthreads * N_perThread;
# MODEL Multi_BSP_GPUModelPredictionTime Miliseconds
reads <- numberthreads*N_perThread;
L1Effect <- 0
L2Effect <- 0.1*reads
# W <- .55
CommGM <- ((numberthreads*N_perThread - L1Effect - L2Effect + numberthreads*5)*latencyGlobalMemory + L1Effect*latencyL1 + L2Effect*latencyL2);
CommSM <- (numberthreads*N_perThread + numberthreads*5)*latencySharedMemory
timeKernel_timeKernel_SubSeqMax_Titan <- ( W^-1*(timeComputationKernel + CommGM + CommSM)/(flopsTheoreticalpeakGPU*10^6))*10^3;
SpeedupSubSeqMax_Titan <- timeKernel_timeKernel_SubSeqMax_Titan[1:12]/SubSeqMax_Titan;
##### Tesla K-20 #####
setwd("/home/marcos/Dropbox/Doctorate/Results/2015/BSPGPU/experiment/tesla-k20/")
temp <- as.matrix(read.table("./SubSeqMax.txt", sep="\t", header=F,fill = TRUE))
for (i in 1:12){
SubSeqMax_Tesla[i]<- mean(temp[i,1:10])
}
PCoresNumber_GPU <- 2496;
clockFrequency_GPU <- 706; # Mhz
#bandWidth_GT630 <- memoryClockRate_GT630 * memoryBusWidth_GT630 *2; # MB/s
flopsTheoreticalpeakGPU <- clockFrequency_GPU * PCoresNumber_GPU ; # Mflops/second
#Cycles operations per Thread operations
timeComputationKernel <- 100*numberthreads * N_perThread;
# MODEL Multi_BSP_GPUModelPredictionTime Miliseconds
reads <- numberthreads*N_perThread;
L1Effect <- 0
L2Effect <- 0.1*reads
#W <- 0.63
CommGM <- ((numberthreads*N_perThread - L1Effect - L2Effect + numberthreads*5)*latencyGlobalMemory + L1Effect*latencyL1 + L2Effect*latencyL2);
CommSM <- (numberthreads*N_perThread + numberthreads*5)*latencySharedMemory
timeKernel_timeKernel_SubSeqMax_Tesla <- ( W^-1*(timeComputationKernel + CommGM + CommSM)/(flopsTheoreticalpeakGPU*10^6))*10^3;
SpeedupSubSeqMax_Tesla <- timeKernel_timeKernel_SubSeqMax_Tesla[1:12]/SubSeqMax_Tesla;
##### Tesla K-40 #####
setwd("/home/marcos/Dropbox/Doctorate/Results/2015/BSPGPU/experiment/tesla-k40/")
temp <- as.matrix(read.table("./SubSeqMax.txt", sep="\t", header=F,fill = TRUE))
for (i in 1:12){
SubSeqMax_Tesla_k40[i]<- mean(temp[i,1:10])
}
PCoresNumber_GPU <- 2880;
clockFrequency_GPU <- 745; # Mhz
#bandWidth_GT630 <- memoryClockRate_GT630 * memoryBusWidth_GT630 *2; # MB/s
flopsTheoreticalpeakGPU <- clockFrequency_GPU * PCoresNumber_GPU ; # Mflops/second
#Cycles operations per Thread operations
timeComputationKernel <- 100*numberthreads * N_perThread;
# MODEL Multi_BSP_GPUModelPredictionTime Miliseconds
reads <- numberthreads*N_perThread;
L1Effect <- 0
L2Effect <- 0.1*reads
#W <- 0.52
CommGM <- ((numberthreads*N_perThread - L1Effect - L2Effect + numberthreads*5)*latencyGlobalMemory + L1Effect*latencyL1 + L2Effect*latencyL2);
CommSM <- (numberthreads*N_perThread + numberthreads*5)*latencySharedMemory
timeKernel_timeKernel_SubSeqMax_Tesla_k40 <- ( W^-1*(timeComputationKernel + CommGM + CommSM)/(flopsTheoreticalpeakGPU*10^6))*10^3;
SpeedupSubSeqMax_Tesla_k40 <- timeKernel_timeKernel_SubSeqMax_Tesla_k40[1:12]/SubSeqMax_Tesla_k40;
############ Difference between predicted and measured SubSequenceMaximum ##############
dataN <- 4:12
setwd("/home/marcos/Dropbox/Doctorate/Results/2015/BSPGPU/experiment/")
png(filename="./SubSeqMax-GPU.png", width=800, height=600)
par(mar=c(1, 4, 2, 1) + 0.1)
layout(rbind(1,2), heights=c(15,1)) # put legend on bottom 1/8th of the chart
plot(N[dataN], SpeedupSubSeqMax_630[dataN], type="l", log="x", lty = 1, lwd=c(7.5,7.5), xaxt="n",
ylim=c(0.7, 1.2), xlim=c(1048576, 268435456),
col=cbbPalette[1], ylab = " ", cex.axis=3.5, cex.lab=3.5,cex.main=3.5,
xlab = " ", main = paste(" ", sep=""));
points(N[dataN], SpeedupSubSeqMax_630[dataN], col = cbbPalette[1], type = "p", pch=20,cex = 3.5)
lines(N[dataN], SpeedupSubSeqMax_660[dataN], col = cbbPalette[2], lty = 2,lwd=c(7.5,7.5));
points(N[dataN], SpeedupSubSeqMax_660[dataN], col = cbbPalette[2], pch=21,cex = 3.5);
lines(N[dataN], SpeedupSubSeqMax_680[dataN], col = cbbPalette[3], lty = 3,lwd=c(7.5,7.5));
points(N[dataN], SpeedupSubSeqMax_680[dataN], col = cbbPalette[3], pch=22,cex = 3.5);
lines(N[dataN], SpeedupSubSeqMax_Titan[dataN], col = cbbPalette[4], lty = 4,lwd=c(7.5,7.5));
points(N[dataN], SpeedupSubSeqMax_Titan[dataN], col = cbbPalette[4], pch=23,cex = 3.5);
lines(N[dataN], SpeedupSubSeqMax_Tesla[dataN], col = cbbPalette[5], lty = 5,lwd=c(7.5,7.5));
points(N[dataN], SpeedupSubSeqMax_Tesla[dataN], col = cbbPalette[5], pch=24,cex = 3.5);
lines(N[dataN], SpeedupSubSeqMax_Tesla_k40[dataN], col = cbbPalette[6], lty = 6,lwd=c(5,5));
points(N[dataN], SpeedupSubSeqMax_Tesla_k40[dataN], col = cbbPalette[6], pch=25,cex = 3.5);
#axis(1, 2^pow, cex.axis = 1.5,lwd=c(5,5));
# axis(1, at = c(N[dataN]), labels = paste('2^',log2(c(N[dataN])),sep="") , cex.axis=1)
#axis(2, at=c(SpeedupMatSumGmUnSP_GT630));
grid()
par(mar=c(2, 2, 0, 0))
plot.new()
legend('center',
col=c(cbbPalette[1:6]), lty=c(1:6),pch=c(20:25), lwd=c(5,5), cex=1, ncol=6,bty ="n",
legend=c("GT-630", "GTX-660", "GTX-680","GTX-Titan", "Tesla-K20","Tesla-K0"))
dev.off()
setwd("/home/marcos/Dropbox/Doctorate/Results/2015/BSPGPU/experiment/")
png(filename="./SubSeqMax-GPU-Times.png", width=800, height=600)
par(mar=c(1, 4, 2, 1) + 0.1)
layout(rbind(1,2), heights=c(15,1)) # put legend on bottom 1/8th of the chart
plot(N[dataN], SubSeqMax_gt630[dataN], type="l", log="xy", lty = 1, lwd=c(5,5), xaxt="n",
ylim=c(0.1, max(SubSeqMax_gt630[dataN])), xlim=c(1048576, 268435456),
col=cbbPalette[1], ylab = " ", cex.axis=2.5, cex.lab=3,cex.main=3.5,
xlab = " ", main = paste(" ", sep=""));
points(N[dataN], SubSeqMax_gt630[dataN], col = cbbPalette[1], type = "p", pch=20,cex = 3.5)
lines(N[dataN], SubSeqMax_gtx660[dataN], col = cbbPalette[2], lty = 2,lwd=c(7.5,7.5));
points(N[dataN], SubSeqMax_gtx660[dataN], col = cbbPalette[2], pch=21,cex = 3.5);
lines(N[dataN], SubSeqMax_gtx680[dataN], col = cbbPalette[3], lty = 3,lwd=c(5,5));
points(N[dataN], SubSeqMax_gtx680[dataN], col = cbbPalette[3], pch=22,cex = 3.5);
lines(N[dataN], SubSeqMax_Titan[dataN], col = cbbPalette[4], lty =4,lwd=c(7.5,7.5));
points(N[dataN], SubSeqMax_Titan[dataN], col = cbbPalette[4], pch=23,cex = 3.5);
lines(N[dataN], SubSeqMax_Tesla[dataN], col = cbbPalette[5], lty = 5,lwd=c(7.5,7.5));
points(N[dataN], SubSeqMax_Tesla[dataN], col = cbbPalette[5], pch=24,cex = 3.5);
lines(N[dataN], SubSeqMax_Tesla_k40[dataN], col = cbbPalette[6], lty = 6,lwd=c(7.5,7.5));
points(N[dataN], SubSeqMax_Tesla_k40[dataN], col = cbbPalette[6], pch=25,cex = 3.5);
#axis(1, 2^pow, cex.axis = 1.5,lwd=c(5,5));
axis(1, at = c(N[dataN]), labels = paste('2^',log2(c(N[dataN])),sep="") , cex.axis=1)
#axis(2, at=c(SpeedupMatSumGmUnSP_GT630));
grid()
# par(mar=c(2, 2, 0, 0))
# plot.new()
# legend('center',
# lty=1, col=c(cbbPalette[1:6]), pch=c(20:25), lwd=c(5,5), cex=2.2, ncol=6,bty ="n",
# legend=c("GT-630", "GTX-660", "GT-680","GTX-Titan", "Tesla-K20", "Tesla-K40"))
dev.off()
|
95a54fef7a012ed466d6a24614ff443509332be8
|
1c1e0c617214a4e13304e47df84167a646e94935
|
/ExploratoryDataAnalysis/assignment1/plot3.R
|
6ef1bf92996fafb171b927f59e01ef2731cd0fb8
|
[] |
no_license
|
kn2m/datasciencecoursera
|
667e0710c939518438a9d4982c76f3b4f12ff41d
|
1455050fe5983f35969732c86d9c2c83bad23902
|
refs/heads/master
| 2016-09-05T11:04:29.116509
| 2015-06-07T07:46:46
| 2015-06-07T07:46:46
| 25,230,530
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 950
|
r
|
plot3.R
|
#get data if needed
power <- read.table(file="./household_power_consumption.txt", header=TRUE, sep=";", na="?")
power$Time <- strptime(paste(power$Date, power$Time), "%d/%m/%Y %H:%M:%S") #convert to time class
power$Date <- as.Date(power$Date, "%d/%m/%Y") #convert to date class
powersub <- subset(power, Date %in% as.Date(c("2007-02-01", "2007-02-02"))) #subset data
remove(power)
#create plot 3
plot3 <- function(data=powersub) {
#name file & set parameters
png(file="plot3.png", width=480, height=480)
#create plot
plot(powersub$Time, powersub$Sub_metering_1,
type="l", col="black", ylab="Energy sub metering", xlab="")
lines(powersub$Time, powersub$Sub_metering_2,
col="red")
lines(powersub$Time, powersub$Sub_metering_3,
col="blue")
legend("topright", col=c("black", "red", "blue"),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1)
dev.off()
}
plot3()
|
10e21ed480ee8598027545ebde48bf6bf939d0ca
|
c275a9710945be41621d3981d8c78fc7a6e74873
|
/R/plot.R
|
f357a1eccfce56e90fb4799c82a312899c564e2e
|
[] |
no_license
|
mhu48/pudms
|
e60bbc1a359e191a28787d2debfa3ccb6394e40c
|
1564b34f6c241b9e02f814ac73d0cbd12bfc3112
|
refs/heads/master
| 2023-02-06T01:36:16.598004
| 2020-12-29T22:53:18
| 2020-12-29T22:53:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 148
|
r
|
plot.R
|
#' @export
#' @method plot vpudms.fit
plot.vpudms.fit<-function(x,y,...){
idx=which(x$py1 == x$py1.opt)
rocplot(x$roc_curves[[idx]],x$py1.opt)
}
|
1aa0bb38a869c3f841cc9e3ab95e55233173a904
|
707dc4af84fb0269217e4d611ba3c55ee953df89
|
/practice.R
|
d0025b2abc08560bf96a9376bfd875e80add2ef9
|
[] |
no_license
|
solayman34/intro_to_r
|
82544c917299b8363799502cdff83f6821acd203
|
57e5b35ac5d5a308ec623b213583173a9c1171e3
|
refs/heads/master
| 2020-07-04T10:58:14.887074
| 2016-11-17T23:39:29
| 2016-11-17T23:39:29
| 74,071,055
| 0
| 0
| null | 2016-11-17T21:56:59
| 2016-11-17T21:56:59
| null |
UTF-8
|
R
| false
| false
| 179
|
r
|
practice.R
|
library(ggplot2)
library(dplyr)
load('suicides.rdata')
all_suicides <- copy(suicides)
suicides <- suicides %>%
group_by(year, state, means)%>%
mutate(deaths = sum(deaths))
|
0ab16f67febe071dc5bf4ed349c4e4ef0a44e164
|
13358b75a418e71fa66078bb84832428707e656e
|
/R/getSEcoverage.R
|
3180396083f5743bb26eacd53af4e51efa12b419
|
[] |
no_license
|
vjcitn/groupAct2018
|
dc2acbb0a6afd96fbcd730c6c5067282e168c9ba
|
901d3a2f0573ee4785fbb29628800eb24c970e06
|
refs/heads/master
| 2020-03-23T20:04:35.036796
| 2018-07-23T14:49:55
| 2018-07-23T14:49:55
| 142,019,939
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 684
|
r
|
getSEcoverage.R
|
#' retrieve a covr report for SummarizedExperiment
#' @import covr
#' @export
getSEcoverage = function() {
get(load(system.file("coverageObjs/SE_covr.rda", package="groupAct2018")))
}
#' retrieve a covr report for DESEQ2
#' @export
getDESEQ2coverage = function() {
get(load(system.file("coverageObjs/DESEQ2_covr.rda", package="groupAct2018")))
}
#' retrieve a covr report for tximport
#' @export
getTximportCoverage = function() {
get(load(system.file("coverageObjs/tximport_covr.rda", package="groupAct2018")))
}
#' retrieve a covr report for tximport
#' @export
getScranCoverage = function() {
get(load(system.file("coverageObjs/scran_coverage.rda", package="groupAct2018")))
}
|
478bb37494cae5741170d5fcd0f7c73aa5dec902
|
a44911027ee6ec4612407db8b32b6428c6f61474
|
/Data/KNI/Archive/wind 2.R
|
68ed5e6c5480dc3db0c2b004abb6daac8a3ea479
|
[] |
no_license
|
OVazquezVillegas/Complex-knowledge-repository
|
8db607994ebff801a9573a3e8ebf37a685d89b15
|
7b28cc886121dbcae1797cf2ccaaed03d79bccb9
|
refs/heads/main
| 2023-08-15T07:13:01.282231
| 2021-10-07T14:57:42
| 2021-10-07T14:57:42
| 414,311,720
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,487
|
r
|
wind 2.R
|
###################################
#### #####
#### Calculating KCI #####
#### #####
###################################
setwd("~/OneDrive - UNIVERSIDAD NACIONAL AUTÓNOMA DE MÉXICO/MSc Innovation Sciences/Thesis/Data/KNI")
library(dplyr)
library(igraph)
# ==================== #
# Data preparation # ----------------------------------------------------------------
# ==================== #
## Loading files
wind.2 <- read.delim("wind 2.txt")
library(EconGeo)
# convert matrix
wind.2.matrix <- get.matrix(wind.2)
# compute the LQ with the 'binary' argument set to 'TRUE'
wind.2.RCA <- location.quotient(wind.2.matrix, binary = TRUE)
# compute KCI
KCI <- KCI(wind.2.matrix, RCA = T)
View(KCI)
# compute relatedness
c_nuts = co.occurrence(wind.2.matrix) # relatedness between nuts
c_clusters = co.occurrence(t(wind.2.matrix)) #relatedness between clusters
r = relatedness(c_clusters)
r[r<1] = 0 #defining thresholds
r[r>1] = 1
g = graph_from_adjacency_matrix(r, mode = "undirected") #knowledge space
plot(g,layout=layout.fruchterman.reingold, vertex.size = 0.2, vertex.label.cex = 0.1)
# compute relatedness density
mat = RCA(wind.2.matrix, binary = T) #matrix of comparative advtange
c1 = co.occurrence(t(mat)) #co-occurrence between clusters
r2 = relatedness(c1) #relatedness between clusters
r2[r2<1] = 0 #defining thresholds
r2[r2>1] = 1
rd = relatedness.density(mat, r2)
rd_list = get.list(rd)
|
29d18237442d3ac23e0d1d4118649cfc463be55f
|
54cf3cbbf1bd7d7561bbdc91c6ed2731f06863ba
|
/testOfMVaR.R
|
c37e5d6a4c9e2d01989d61399fa38cbd5bf3e30a
|
[] |
no_license
|
mcastagnaa/ODBCtest
|
037994e96330424dec448e1d4955b8d4c26c0d88
|
6f93ad94b992033fa51ecbeb9623f82022914222
|
refs/heads/master
| 2020-05-26T21:45:43.637906
| 2015-07-06T09:30:25
| 2015-07-06T09:30:25
| 38,609,739
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,059
|
r
|
testOfMVaR.R
|
rm(list =ls(all=TRUE))
perfSet <- readRDS("perfSet")
finalSet <- readRDS("riskSet_allData")
# Define top n risk contributors
n = 1
typicalPortSize <- nrow(finalSet)/nrow(perfSet)
finalSet$WBeta <- finalSet$Weight*finalSet$Beta
#change this to perform different sorting
##############################
selector <- finalSet$Beta
##############################
#order the risk contributors by date
finalSet <- finalSet[order(finalSet$pDate, selector, decreasing = T),]
#pick the top n
riskSet <-
finalSet[ave(selector, finalSet$pDate, FUN = seq_along) <= n, ]
#saveRDS(riskSet, file="riskSet")
#uniquePerfComp <- unique(perfSet[,c("assetName", "assetCode")])
uniqueRiskComp <- unique(riskSet[,c("AssetId", "Descr")])
perfSet[3] <- lapply(perfSet[3], as.character)
for(code in uniqueRiskComp$AssetId) {
perfSet$assetCode[perfSet$assetCode == substr(code, 1,6)] <- code
}
combSet <- merge(perfSet, riskSet, by.x = "pDate" , by.y = "pDate")
combSet$Test <- combSet$assetCode == combSet$AssetId
match <- sum(combSet$Test)/nrow(perfSet)
print(match)
|
586dbfb92585f5db5b4335a6ab4e8b049aa1c304
|
a2a59e1b3f41868d7e1206cbd54381e60340d303
|
/main.R
|
8e651c0c6bff3f33b14ee8dabaa680da88f754af
|
[] |
no_license
|
jakub-tomczak/HerringsLengthAnalysis
|
cf1180730e76a274c5798e2d73c153b7e87f8390
|
7951dc23caf22e8d8d3a3e2bbd5e9dc5e51e213c
|
refs/heads/master
| 2020-08-21T07:50:20.098513
| 2019-12-14T23:36:57
| 2019-12-14T23:36:57
| 216,113,486
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,287
|
r
|
main.R
|
source("src/functions.R")
herringsFilename <- 'files/herrings.csv'
rawData <- loadData(herringsFilename)
####################################################
################ data transform ####################
####################################################
herringsData <- transformData(rawData)
herringsDataYears <- addYearsColumns(herringsData)
variablesAnalysis.lengthAll(herringsDataYears)
variablesAnalysis.lengthByYear(herringsDataYears)
variablesAnalysis.lengthByYear_(herringsDataYears)
# convert column recr from factor to numeric values
herringsData$recr <- as.numeric(herringsData$recr)
####################################################
################## data summary ####################
####################################################
dataSummary(herringsData)
####################################################
############# variables analysis ###################
####################################################
variableAnalysis.drawDifferentCharts(analysedHerringsData)
variableAnalysis.drawHistograms(analysedHerringsData)
####################################################
############### correlation analysis ###############
####################################################
variableAnalysis.drawCorrelationPlot(herringsData)
columnsToRemove <- variableAnalysis.getTooCorrelatedColumns(herringsData)
print(columnsToRemove)
columnsToRemove <- c(columnsToRemove, "totaln")
analysedHerringsData <- variableAnalysis.removeColumns(herringsData, columnsToRemove)
importantFeatures <- featureSelection.rankImportance(predictionData$train, floor(dim(rawData)[1]*.05))
print(importantFeatures)
####################################################
############ herrings length animation #############
####################################################
animation.run()
####################################################
################# models training ##################
####################################################
predictionData <- predictions.prepareData(analysedHerringsData)
methods <- list(
list(name="linear regression", fun=predictions.lm),
list(name="lasso",fun=predictions.lasso),
list(name="ridge",fun=predictions.ridge),
list(name="ElasticNet",fun=predictions.elasticNet),
list(name="knn",fun=predictions.knn),
list(name="extreme gradient boosting", fun = predictions.xGradientBoosting))
methodsWithPredictions <- predictions.runMethods(methods, predictionData)
predictionCharts <- lapply(methodsWithPredictions, function(x)
{
predictions.predictionChart(x$predictions, x$name, predictionData)
})
plot_grid(plotlist = predictionCharts)
importancesCharts <- lapply(methodsWithPredictions, function(x)
{
predictions.importanceCharts(x)
})
plot_grid(plotlist = importancesCharts)
stats <- predictions.aggregateStats(methodsWithPredictions)
predictions.plotStats(stats, "RMSE", "RMSE")
predictions.plotStats(stats, "R.Squared", "R^2")
predictions.plotStats(stats, "Time", "Czas wykonywania [s]")
####################################################
### second analysis, only with chosen attributes ###
####################################################
# top5 best attributes (based on all algorithms)
bestAttributes <- c("sst", "nao", "fbar", "sal", "cfin1", "length")
methods <- list(
list(name="linear regression", fun=predictions.lm),
list(name="lasso",fun=predictions.lasso),
list(name="ridge",fun=predictions.ridge),
list(name="ElasticNet",fun=predictions.elasticNet),
list(name="extreme gradient boosting", fun = predictions.xGradientBoosting))
bestAttributesData <- herringsData[, bestAttributes]
bestPredictionData <- predictions.prepareData(bestAttributesData)
methodsWithPredictionsBestData <- predictions.runMethods(methods, bestPredictionData)
bestImportancesCharts <- lapply(methodsWithPredictionsBestData, function(x)
{
predictions.importanceCharts(x)
})
plot_grid(plotlist = bestImportancesCharts)
bestStats <- predictions.aggregateStats(methodsWithPredictionsBestData)
predictions.plotStats(bestStats, "RMSE", "RMSE")
predictions.plotStats(bestStats, "R.Squared", "R^2")
predictions.plotStats(bestStats, "Time", "Czas wykonywania [s]")
####################################################
### third analysis, only with feature selected ###
####################################################
# top5 best attributes (based on all algorithms)
# remind features selection analysis
print(importantFeatures)
featureSelection <- names(importantFeatures$fit$coefficients[2:6])
print(featureSelection)
# "fbar" "sal" "sst" "cfin2" "recr"
methods <- list(
list(name="linear regression", fun=predictions.lm),
list(name="lasso",fun=predictions.lasso),
list(name="ridge",fun=predictions.ridge),
list(name="ElasticNet",fun=predictions.elasticNet),
list(name="extreme gradient boosting", fun = predictions.xGradientBoosting))
selectedAttributes <- herringsData[, c(featureSelection, 'length')]
selectedPredictionData <- predictions.prepareData(selectedAttributes)
methodsWithPredictionsSelectedData <- predictions.runMethods(methods, selectedPredictionData)
bestImportancesCharts <- lapply(methodsWithPredictionsSelectedData, function(x)
{
predictions.importanceCharts(x)
})
plot_grid(plotlist = bestImportancesCharts)
selectedAttrStats <- predictions.aggregateStats(methodsWithPredictionsSelectedData)
predictions.plotStats(selectedAttrStats, "RMSE", "RMSE")
predictions.plotStats(selectedAttrStats, "R.Squared", "R^2")
predictions.plotStats(selectedAttrStats, "Time", "Czas wykonywania [s]")
####################################################
## length by year vs best and selected attributes ##
####################################################
mean_by_year <- herringsDataYears %>%
select(c(bestAttributes, "year", "cfin2")) %>%
group_by(year) %>%
summarise_all(mean)
variablesAnalysis.lengthByYearVsVariable(mean_by_year, "sst")
variablesAnalysis.lengthByYearVsVariable(mean_by_year, "nao")
variablesAnalysis.lengthByYearVsVariable(mean_by_year, "fbar")
variablesAnalysis.lengthByYearVsVariable(mean_by_year, "sal")
variablesAnalysis.lengthByYearVsVariable(mean_by_year, "cfin1")
# cfin2 is being picked by rfe
variablesAnalysis.lengthByYearVsVariable(mean_by_year, "cfin2")
|
f909d97ce63b326d0bfee90c224d7009286a2dd7
|
294e36fef96337698265224df79603842e6f242b
|
/Q3.R
|
02cb3cda296816d29c2755d339e9807003a586c0
|
[] |
no_license
|
rizwanhaidar/Data-Visualisation-probability-and-statistic-Assignments
|
5b3c6d43fe2061b87e16d749c8a0a39f1cf67b4b
|
c82bf519597408b7ed32b0345b3851765854bc2d
|
refs/heads/master
| 2022-11-14T21:08:00.835527
| 2020-07-02T05:32:16
| 2020-07-02T05:32:16
| 242,201,248
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 373
|
r
|
Q3.R
|
#including Plot library for 3-d Plot
library(plotrix)
Robots_Name_vector <- c('Legs','Wheels','Both','Neither')
Robots_Number_vector <- c(63,20,8,15)
color_vec = c ('bisque2','darksalmon', 'lightyellow3', 'lightcoral')
#Pie chart of Robots
pie3D( labels = Robots_Name_vector , explode=0.09, Robots_Number_vector, main="Pie Chart of Robots", col = color_vec)
|
d0cd5366da8be9f88e6c361b95b70d5eed778719
|
3f14dbf3fe52796815b2a898cceeeebad084e01b
|
/R/allele.predictors.R
|
5b5978638a610a2a7999cbcca3284744c70b8b00
|
[] |
no_license
|
IFIproteomics/E2Predictor
|
27ad22c3cd3e47ca5eeb1fc2e05610033317c399
|
ee8208b13547fcaf25c11802459e453088053b36
|
refs/heads/master
| 2020-04-06T07:10:44.952674
| 2016-09-12T09:27:38
| 2016-09-12T09:27:38
| 65,803,735
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,328
|
r
|
allele.predictors.R
|
#' predictNeoEpitopes predicts the most likely alleles for ligands
#'
#' @param ligands a data frame, which should contain at least two variables: Sequence, cell.line
#' @param predictor.value indicates which value is used to estimate the best prediction between different alleles
#'
#' @return netMHCpredictions data.frame containing allele predictions
#'
#' @export
#'
predictNeoEpitopes <- function(ligands, predictor.value = c("rank", "ic50"), seq.min.length = 8, seq.max.length = 14){
# do for each cell line present in ligands data frame (in most cases there is just one, but...)
cell.lines = unique(ligands$cell.line)
predict.cellline <- function(cell.line){
ligs.cellline <- ligands %>%
filter(cell.line == cell.line &
nchar(Sequence) >= seq.min.length &
nchar(Sequence) <= seq.max.length)
message(paste("Predicting with netMHC 4.0 the alleles of ligands from ", cell.line, sep=" "))
predictions.cellline <- netMHCpan.alleles(ligs.cellline$Sequence, cell.line)
predictions.cellline$cell.line <- cell.line
return(predictions.cellline)
}
predictions <- do.call("rbind", lapply(cell.lines, predict.cellline))
predictions <- predictions %>% group_by(cell.line, Peptide)
predictions$predictor <- NA
predictions_min.rank <- predictions %>% top_n(1, desc(Rank))
predictions_min.rank$predictor <- "rank"
predictions_min.ic50 <- predictions %>% top_n(1, desc(nM))
predictions_min.ic50$predictor <- "ic50"
predictions_min <- predictions %>% filter(cell.line == "IonlyWantAnEmptyDataFrame")
if("rank" %in% predictor.value){
predictions_min <- rbind(predictions_min, predictions_min.rank)
}
if("ic50" %in% predictor.value){
predictions_min <- rbind(predictions_min, predictions_min.ic50)
}
return(predictions_min)
}
#' exec.netMHCpan executes the command netMHCpan for a set of peptides of SAME length and ONE allele (a single prediction)
#'
#' @param allele allele name (defined as of E2Predictor.Config$hla_alleles)
#' @param peptide peptide sequence string
#' @param outputPath
#'
exec.netMHCpan <- function(allele, peptides, outputPath = file.path(E2Predictor.Config$working.path, "temp")){
len_pep <- nchar(peptides[1])
#check peptide length is the same for all sequences
check_length <- unique(sapply(peptides, nchar))
if(length(check_length) > 1){
stop("All peptides supplied to E2Predictor::exec.netMHCpan function must have the same length!")
}
mkdir(outputPath)
allele_txt <- gsub("\\*", "", allele)
allele_txt <- gsub(":", "", allele_txt)
output.file <- file.path(outputPath, "netmhcInput.fasta")
zz <- file(output.file, open = "wt")
sink(zz, type = "message")
for(i in 1:length(peptides))
{
message(paste0(">", i))
message(peptides[i])
}
sink(type = "message")
close(zz)
netMHCpan.command <- paste0(E2Predictor.Config$netMHCpath,
" -xls -xlsfile ", file.path(outputPath, "test.xls"),
" -l " , len_pep,
" -a HLA-", allele_txt,
" -f ", output.file
)
message( paste0("Predicting ", len_pep, "-mers for allele ", allele))
dd <- system(netMHCpan.command, wait = TRUE, ignore.stdout = T)
netMHC.prediction <- readr::read_tsv(file.path(outputPath, "test.xls"), skip = 1)
netMHC.prediction$allele <- allele
return(netMHC.prediction)
}
netMHCpan.alleles <- function(peptides, cell.line){
alleles <- E2Predictor.Config$hla_alleles[cell.line][[1]]
#do for each different peptide length in vector peptides
peptides.df <- data.frame( peptides = peptides, pep.length = nchar(peptides))
pep.lengths <- sort(unique(peptides.df$pep.length))
netMHC.length <- function(sel.pep.length){
peps.l <- as.vector((peptides.df %>% filter(pep.length == sel.pep.length))$peptides)
peps.l.preds <- do.call("rbind", lapply(alleles, exec.netMHCpan, peps.l))
return(peps.l.preds)
}
netMHC.predictions <- do.call("rbind", lapply(pep.lengths, netMHC.length))
return(netMHC.predictions)
}
|
9da06e908264cc275cd5640fe12a42fd040e8b32
|
603bb2c32e69c8aaf72d14c503411160c63944b3
|
/eseguiScript.R
|
5474f18715e85b9fcd2c3379891ee8f536cc1ebc
|
[
"MIT"
] |
permissive
|
flavioaic/LongTermPlanningDISD
|
ba3697d32d3c1a6dd48ab743dbfb6f1c8f860277
|
59ec9011dc129be8c437f25ec17b05a53915d5dd
|
refs/heads/master
| 2021-01-25T10:21:40.930682
| 2018-02-26T17:50:46
| 2018-02-26T17:50:46
| 123,350,191
| 0
| 0
| null | 2018-02-28T22:31:16
| 2018-02-28T22:31:15
| null |
UTF-8
|
R
| false
| false
| 469
|
r
|
eseguiScript.R
|
#Modulo1 - Interpretazione db
source("InterpretazioneDB\\caricamento_serie_da_db.R", echo = TRUE)
#crea le rotte 1
source("AnalisiDatabaseDataMining\\rotte2.R", echo = TRUE)
#crea le rotte 2
source("AnalisiDatabaseDataMining\\AnalisiRegressioneLineare\\voli_vs_rotte.R", echo = TRUE)
#clustering
source("ClusterAnalysis\\clustering_aerei_vs_km.R", echo = TRUE)
#analisi da clustering
source("NuovaAnalisiDaCluster\\time_series_analysis_from_clusters.R", echo = TRUE)
|
c618a12847b10916e37a87b1b28f314445ec99f8
|
432ae33415269ec9ec4d22dce53f5375422c138f
|
/plot1.R
|
8ee22bea7661a75c9e4f7943ef66e5763cccd61e
|
[] |
no_license
|
davijeo89/ExData_Plotting1
|
74482c0f78f41dce1ee8bb6fb4d25d1378a2e067
|
2f010515fdd28e6d93a832bda5be0ebea46b0b62
|
refs/heads/master
| 2021-01-18T00:14:14.683023
| 2015-11-08T01:09:43
| 2015-11-08T01:09:43
| 45,704,103
| 0
| 0
| null | 2015-11-06T19:55:22
| 2015-11-06T19:55:22
| null |
UTF-8
|
R
| false
| false
| 393
|
r
|
plot1.R
|
data = read.table("household_power_consumption.txt", sep=";", header = TRUE)
data$Date = as.Date(data[,1], format = "%d/%m/%Y")
dataToUse <- data[data$Date >= "2007-02-01" & data$Date <= "2007-02-02", ]
png(file = "plot1.png")
hist(as.numeric(as.character(dataToUse[,3])), breaks = 12, col = "red", main = "Global Active Power", ylab = "Frequency", xlab = "Global Active Power [kW]")
dev.off()
|
923b88da922e2622947e56e40d3073bda4fe8669
|
2f6f49557b57baeaaf4993d5f1cfcf72c2e797f5
|
/installs_for_sangerseqR.R
|
520e05c54c06db4e7976ed8d12c7335fe87f0bcc
|
[] |
no_license
|
James-Kitson/OPM_nest_associates
|
b91292a39144c59eb01a66395da80f68048f8e61
|
822d4a96c33216e7cfcb80ba9200e64e5bb8ed7d
|
refs/heads/master
| 2021-01-19T09:03:50.124650
| 2017-02-17T16:15:26
| 2017-02-17T16:15:26
| 82,082,363
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 468
|
r
|
installs_for_sangerseqR.R
|
## clear workspace
rm(list=ls())
# CRAN packages
#install.packages("parallel")
install.packages("ape")
install.packages("reshape2")
install.packages("phangorn")
install.packages("stringi")
install.packages("stringr")
# Bioconductor packages
source("https://bioconductor.org/biocLite.R")
biocLite("DECIPHER")
biocLite("Biostrings")
biocLite("sangerseqR")
install.packages("devtools")
library(devtools)
install_github("roblanf/sangeranalyseR")
library(sangeranalyseR)
|
e8b300399143c5d449df71cab14dcd779100cb77
|
1f9b2393f1ad1408b5f41723d76b93a6c3817640
|
/productorapp/R/spinnerbox.R
|
0fb601e84d9b18d2e3ab193d41845da2e268e7ef
|
[] |
no_license
|
dscheel42/productor
|
259d8bf31da3e12dd3153af5c4739c06c8348990
|
413387ed6b35e503777820ffeb47e8f625d36064
|
refs/heads/master
| 2022-12-03T12:04:35.841905
| 2020-07-29T21:21:18
| 2020-07-29T21:21:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 111
|
r
|
spinnerbox.R
|
#' @export spinnerbox
spinnerbox <- function(output, width = 12) {
box(withSpinner(output), width = width)
}
|
aca97539e03512b199b535a8e884e4ea8b5fa359
|
9634fe37b7cc53537d85e456886d842f8233401b
|
/R/robust_variation.R
|
63047426a656f62bbc7425f49c7c82a9167ffb1e
|
[] |
no_license
|
MoseleyBioinformaticsLab/manuscript.ICIKendallTau
|
685ee22b6f46a54c611bd92f7377ada0161ee23b
|
8fd33d7a52432ff4a4b89d14c899b13c3050837a
|
refs/heads/main
| 2023-08-03T21:02:02.111442
| 2023-07-25T19:36:40
| 2023-07-25T19:36:40
| 429,873,882
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 711
|
r
|
robust_variation.R
|
calculate_variation = function(median_cor_df){
suppressMessages(median_cor_df %>%
dplyr::group_by(method, sample_class, keep_num) %>%
dplyr::summarise(median = median(med_cor), mad = mad(med_cor)))
}
calculate_differences = function(median_stats, groups = NULL){
if (is.null(groups)) {
groups = unique(median_stats$sample_class)
}
median_stats %>%
dplyr::filter(sample_class %in% groups) %>%
dplyr::group_by(method, keep_num) %>%
dplyr::summarise(med_diff = abs(median[1] - median[2]), mad_diff = abs(mad[1] - mad[2])) %>%
tidyr::pivot_longer(cols = c(med_diff, mad_diff),
names_to = "which_diff",
values_to = "diff")
}
|
f8af4e066076f3852f765f4778d2da75221d7889
|
33dcb93db307d9c524b20476104fa1109570351b
|
/man/fac_set.Rd
|
31d843b06c1aacdd0bda94c77ef84473654f6654
|
[] |
no_license
|
STAT545-UBC-hw-2018-19/hw07-chenchenguo
|
2e5d93c1dbf06803533327e3257c38282aa869b0
|
45eb5588ed25c18002ce38512142b11b26535e1d
|
refs/heads/master
| 2020-04-05T09:20:05.706634
| 2018-11-13T03:40:35
| 2018-11-13T03:40:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 335
|
rd
|
fac_set.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fac_set.R
\name{fac_set}
\alias{fac_set}
\title{Setting levels as it appears in data}
\usage{
fac_set()
}
\arguments{
\item{one}{factor}
}
\value{
same factor but whose level set to the order of itself
}
\description{
Setting levels as it appears in data
}
|
27d79ab20f04697369d51b4f821c7cf4d7a211b2
|
2c263c502c2b46a067ad8af056a29e29980b6aa8
|
/Final Codes/Data cleaning.R
|
c1e4242b903cd7d66461b02547dac662fd44bd8e
|
[] |
no_license
|
shivamnegi92/Prudential-Life-Insurance-Assessment--Machine-Learning
|
9e58aa69b9a726199f5575f2b7b69b10c99ea1b9
|
61de86df3af746de48d0f7a6c8f1e52ad3e8dfeb
|
refs/heads/master
| 2021-09-07T21:05:42.409606
| 2018-03-01T05:09:50
| 2018-03-01T05:09:50
| 108,802,485
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,846
|
r
|
Data cleaning.R
|
#installing all packages
install.packages("forecast")
install.packages("ade4")
install.packages("forecastHybrid")
install.packages("e1071")
library(forecast)
library(ade4)
library(e1071)
library(ggplot2)
library("rpart")
library("rpart.plot")
# Loading our dataset into R
mydata <- dataset
#1. Pre-processing and cleaning the data
#REmoving columns which are having more than 60% data value as null
dataclean1<-mydata[, -which(colMeans(is.na(mydata)) > .6)]
mydata$Id <- NULL
#Verifying the columns which are removed
removedcoumns<-mydata[,which(colMeans(is.na(mydata)) > 0.6)]
#Family_Hist_5 Medical_History_10 Medical_History_15 Medical_History_24 Medical_History_32
#38 48 53 62 70 53 62 70
#Setting missing values to mean value
#Finding columns having missing values
missingvalues <- c(unlist(lapply(dataclean1, function(x) any(is.na(x)))))
#Finding columns having missing values
View(missingvalues)
#Employment_Info_1 Employment_Info_4 Employment_Info_6 Insurance_History_5 Family_Hist_2
#TRUE TRUE TRUE TRUE TRUE
#Family_Hist_3 Family_Hist_4 Medical_History_1
#TRUE TRUE TRUE
#Filling missing values with mean values for the obtained columns
dataclean1$Employment_Info_1[is.na(dataclean1$Employment_Info_1)] <- mean(dataclean1$Employment_Info_1, na.rm = T)
dataclean1$Employment_Info_4[is.na(dataclean1$Employment_Info_4)] <- mean(dataclean1$Employment_Info_4, na.rm = T)
dataclean1$Employment_Info_6[is.na(dataclean1$Employment_Info_6)] <- mean(dataclean1$Employment_Info_6, na.rm = T)
dataclean1$Insurance_History_5[is.na(dataclean1$Insurance_History_5)] <- mean(dataclean1$Insurance_History_5, na.rm = T)
dataclean1$Family_Hist_2[is.na(dataclean1$Family_Hist_2)] <- mean(dataclean1$Family_Hist_2, na.rm = T)
dataclean1$Family_Hist_4[is.na(dataclean1$Family_Hist_4)] <- mean(dataclean1$Family_Hist_4, na.rm = T)
dataclean1$Medical_History_1[is.na(dataclean1$Medical_History_1)] <- mean(dataclean1$Medical_History_1, na.rm = T)
dataclean1$Family_Hist_3[is.na(dataclean1$Family_Hist_3)] <- mean(dataclean1$Family_Hist_3, na.rm = T)
#Encoding Categorical variables into numberical variables using 1 to C Coding
Categorical_data <- mydata[,c("Medical_History_1","Product_Info_1", "Product_Info_2", "Product_Info_3", "Product_Info_5", "Product_Info_6", "Product_Info_7", "Employment_Info_2", "Employment_Info_3", "Employment_Info_5", "InsuredInfo_1", "InsuredInfo_2", "InsuredInfo_3", "InsuredInfo_4", "InsuredInfo_5", "InsuredInfo_6", "InsuredInfo_7", "Insurance_History_1", "Insurance_History_2", "Insurance_History_3", "Insurance_History_4", "Insurance_History_7", "Insurance_History_8", "Insurance_History_9", "Family_Hist_1", "Medical_History_2", "Medical_History_3", "Medical_History_4", "Medical_History_5", "Medical_History_6", "Medical_History_7", "Medical_History_8", "Medical_History_9", "Medical_History_11", "Medical_History_12", "Medical_History_13", "Medical_History_14", "Medical_History_16", "Medical_History_17", "Medical_History_18", "Medical_History_19", "Medical_History_20", "Medical_History_21", "Medical_History_22", "Medical_History_23", "Medical_History_25", "Medical_History_26", "Medical_History_27", "Medical_History_28", "Medical_History_29", "Medical_History_30", "Medical_History_31", "Medical_History_33", "Medical_History_34", "Medical_History_35", "Medical_History_36", "Medical_History_37", "Medical_History_38", "Medical_History_39", "Medical_History_40", "Medical_History_41")]
converted_data <- acm.disjonctif(Categorical_data)
#Combinging for continuous data
Continuous_data <- mydata[c("Product_Info_4", "Ins_Age", "Ht", "Wt", "BMI", "Employment_Info_1", "Employment_Info_4", "Employment_Info_6", "Insurance_History_5", "Family_Hist_2", "Family_Hist_4")]
#Combinging for dummy data
data_dummy<-mydata[c("Medical_Keyword_1","Medical_Keyword_2","Medical_Keyword_3","Medical_Keyword_4","Medical_Keyword_5","Medical_Keyword_6","Medical_Keyword_7","Medical_Keyword_8","Medical_Keyword_9","Medical_Keyword_10","Medical_Keyword_11","Medical_Keyword_12","Medical_Keyword_13","Medical_Keyword_14","Medical_Keyword_15","Medical_Keyword_16","Medical_Keyword_17","Medical_Keyword_18","Medical_Keyword_19", "Medical_Keyword_20", "Medical_Keyword_21", "Medical_Keyword_22", "Medical_Keyword_23","Medical_Keyword_24", "Medical_Keyword_25", "Medical_Keyword_26", "Medical_Keyword_27", "Medical_Keyword_28", "Medical_Keyword_29","Medical_Keyword_30", "Medical_Keyword_31", "Medical_Keyword_32", "Medical_Keyword_33","Medical_Keyword_34", "Medical_Keyword_35","Medical_Keyword_36", "Medical_Keyword_37", "Medical_Keyword_38", "Medical_Keyword_39", "Medical_Keyword_40", "Medical_Keyword_41", "Medical_Keyword_42", "Medical_Keyword_43", "Medical_Keyword_44", "Medical_Keyword_45","Medical_Keyword_46", "Medical_Keyword_47","Medical_Keyword_48")]
#Merging the categorical, continuous and dummy data into final dataset
Merged_data <- data.frame(c(converted_data, data_cntg,data_dummy))
#Performing Dimensionality Reduction
#Calculating PCA on data
pcadata <- prcomp(merged_data)
summary(pcadata)
plot(final_pca_data)
#After plotting graph and manual observation of PCA Components,we chose 116 PCA Components having 90% Variance
reduced_pca_data <- data.frame(pcadata$x[,1:116])
#Append Response variable to the dataset
final_data <- data.frame(c(reduced_pca_data, mydata[c("Response")]))
final_data$Response<-as.numeric(final_data$Response)
#Divinding our dataset into train(80%) and test(20%)
set.seed(123)
index = sample(seq_len(nrow(Filtered_Data_new)), size = floor(0.8*nrow(Filtered_Data_new)))
train_data <- final_data[index, ]
test_data <- final_data[-index, ]
|
92e266174cb31ebfdfd5a2eb97596abf43464efc
|
495a06460a2a3d55be2de4a707bfea492a88fb59
|
/program/leave-vs-leave.R
|
a4142e9aa3225d19d095d6a2eaf8616fc39b10bb
|
[] |
no_license
|
mlmagniette/Key_ingredients_Differential_Analysis_RNAseq
|
582bb95ec5e4df5751e5153cc5596f8f42a67594
|
b0f401551b2c0d2c617828c8ca94fbdb43a43d00
|
refs/heads/master
| 2020-05-29T16:09:16.095448
| 2016-09-19T06:31:33
| 2016-09-19T06:31:33
| 59,275,079
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,094
|
r
|
leave-vs-leave.R
|
origin<-getwd()
source("DiffAnalysis.limmavoom.R")
source("DiffAnalysis.edgeR.R")
source("DiffAnalysis.glm.edgeR.R")
source("DiffAnalysis.DESeq.R")
source("DiffAnalysis.DESeq2.R")
## data
dat <- read.table("../real_datasets/Arabidopsis_dataset.txt",h=T)
countTable<- dat[, c(4, 6, 5, 7)]
rownames(countTable)<-dat$ID
## Differential analysis
setwd("../real_datasets/full_H0_dataset/Leave_vs_Leave_dataset")
DiffAnalysis.limmavoom(countTable)
DiffAnalysis.limmavoom(countTable,filter=FALSE)
DiffAnalysis.edgeR(countTable)
DiffAnalysis.edgeR(countTable,filter=FALSE)
DiffAnalysis.glm.edgeR(countTable)
DiffAnalysis.glm.edgeR(countTable,filter=FALSE)
DiffAnalysis.DESeq(countTable)
DiffAnalysis.DESeq2(countTable)
DiffAnalysis.DESeq2(countTable,filter=FALSE)
file<-system("ls *Comple*",T)
alpha=0.05
DE.FDR<-character()
for (i in 1:length(file))
{
tmp<-read.table(file[i],sep="\t",h=T)
DE.FDR<-append(DE.FDR,rownames(tmp)[which(tmp$padj<=alpha)])
}
## 17 genes are removed
write.table(unique(DE.FDR),file="../../removeAGI.txt",sep="\n",quote=F,col.names=F,row.names=F)
setwd(origin)
|
8eb79ceb33428a7be68be173d85d1548c0971ba3
|
eb62b4e11c2fabee75b4dcfbe5ab2e11a64450b9
|
/man/search_variants.Rd
|
81d975158f5d5702d5fc75f9e9316a5b7fa4e8b9
|
[] |
no_license
|
cran/Rga4gh
|
af195a0de5a16298c01d9c0692715ec1e582bf5b
|
76cd4974e4a1d9fc6dcebe3c46c27f4f164b75e6
|
refs/heads/master
| 2020-12-24T10:57:01.533892
| 2016-11-07T21:07:40
| 2016-11-07T21:07:40
| 73,116,702
| 0
| 1
| null | 2017-02-08T20:45:22
| 2016-11-07T20:12:44
|
R
|
UTF-8
|
R
| false
| true
| 1,971
|
rd
|
search_variants.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/search.R
\name{search_variants}
\alias{search_variants}
\title{Search for Variants}
\usage{
search_variants(client, variant_set_id, call_set_ids = NULL,
reference_name = NULL, start = NULL, end = NULL, page_size = NULL,
page_token = NULL)
}
\arguments{
\item{client}{A ga4gh_client object}
\item{variant_set_id}{The ID of the Variant Set to search within}
\item{call_set_ids}{A list of Call Set IDs. Only return variant calls which belong to call sets
with these IDs. If unspecified, return all variants and no variant call objects.}
\item{reference_name}{Only return variants on this reference.}
\item{start}{The beginning of the window (0-based, inclusive) for which overlapping variants should be returned.
Genomic positions are non-negative integers less than reference length.}
\item{end}{The end of the window (0-based, exclusive) for which overlapping variants should be returned.}
\item{page_size}{Specifies the maximum number of results to return in a single page.
If unspecified, the client default will be used.}
\item{page_token}{The continuation token, which is used to page through
large result sets. To get the next page of results, set this parameter to
the value of nextPageToken from the previous response.}
}
\description{
Search for Variants
}
\examples{
ref_client <- ga4gh_client("http://1kgenomes.ga4gh.org", api_location = "")
\dontrun{
library(magrittr)
## Find a dataset to search in
datasets <- ref_client \%>\% search_datasets() \%>\% content()
d_id <- datasets$datasets[[1]]$id
## Find a variant set to search in
variant_sets <- ref_client \%>\% search_variant_sets(d_id) \%>\% content()
vs_id <- variant_sets$variantSets[[1]]$id
## Search for variants in the variant set
variants <- ref_client \%>\% search_variants(vs_id, reference_name = "1") \%>\%
content()
variants$variants[[1]]
}
}
|
0eb5f38692d3f9fbc30665fdabfcb9e19f8b5ab0
|
17456a986046b7762673ca2f77bcc68c33108e88
|
/man/streambugs.write.sys.def.Rd
|
9492a866381534e95bfce8e32808be5f7bf6fe3d
|
[] |
no_license
|
cran/streambugs
|
1616a27c952f788df0cd9def1656676ba46adb9e
|
a9e608ff6575e9cab9148b8be6a61389eecc7933
|
refs/heads/master
| 2023-02-02T20:42:30.709825
| 2023-01-27T14:00:02
| 2023-01-27T14:00:02
| 112,500,145
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 766
|
rd
|
streambugs.write.sys.def.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/streambugs_aux.r
\name{streambugs.write.sys.def}
\alias{streambugs.write.sys.def}
\title{Write system definition of the streambugs ODE model}
\usage{
streambugs.write.sys.def(sys.def, file = NA)
}
\arguments{
\item{sys.def}{system definition generated by the function
\code{\link{streambugs.get.sys.def}}}
\item{file}{file name}
}
\description{
Write system definition of the streambugs ODE model into a human-readable
text file.
}
\examples{
m <- streambugs.example.model.toy()
sys.def <- streambugs.get.sys.def(y.names=m$y.names, par=m$par, inp=m$inp)
file.name <- tempfile(m$name, fileext=".dat")
streambugs.write.sys.def(sys.def, file.name)
file.show(file.name, delete.file=TRUE)
}
|
4724a92776de9566d77b1fb2373df55cbfada211
|
bd9e04f73b0026b6be309d67ee78f786ef3d32f4
|
/man/convert_gene_id.Rd
|
db1f10119c2ccba0055e3eb38c8802f1ceb03f0d
|
[
"MIT"
] |
permissive
|
12379Monty/Larson_cfRNA_DarkChannelBiomarkers
|
b3ccc4d4664c5457611735c69c1b3aa9ddf67e57
|
8d891283a41a004b7e759076f66d4573e02aedb8
|
refs/heads/master
| 2023-02-24T08:30:11.663742
| 2021-01-29T17:26:08
| 2021-01-29T17:26:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 483
|
rd
|
convert_gene_id.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert_gene_id.R
\name{convert_gene_id}
\alias{convert_gene_id}
\title{Convert the gene_id to gene_symbols}
\usage{
convert_gene_id(data)
}
\arguments{
\item{data}{a data frame for the expression matrix. The first column is the
feature name and each column corresponds to a sample.}
}
\value{
the data whose first column is converted to gene symbol.
}
\description{
Convert the gene_id to gene_symbols
}
|
4f76525b67e6975755dfcb6ee211245d51d86cf7
|
4d07eecae0429dc15066b34fbe512b8ff2ae53ea
|
/mds-writeup/figs/wt2-mds.R
|
b044c5ec74b4bfa3f8b3ba9d1007cb4c01e1d8d8
|
[] |
no_license
|
distanceModling/phd-smoothing
|
7ff8ba7bace1a7d1fa9e2fcbd4096b82a126c53c
|
80305f504865ce6afbc817fff83382678864b11d
|
refs/heads/master
| 2020-12-01T09:31:24.448615
| 2012-03-27T18:35:45
| 2012-03-27T18:35:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,534
|
r
|
wt2-mds.R
|
# wt2 MDS diagram
source("mds.R")
bnd <- read.csv("wt2-verts.csv",header=FALSE)
names(bnd)<-c("x","y")
# data
gendata <- read.csv("wt2truth.csv",header=TRUE)
gendata<- list(x=gendata$x[gendata$inside==1],
y=gendata$y[gendata$inside==1],
z=gendata$z[gendata$inside==1])
na.ind<-!(is.na(gendata$x)&is.na(gendata$y)&is.na(gendata$z))
gendata<- list(x=gendata$x[na.ind],
y=gendata$y[na.ind],
z=gendata$z[na.ind])
# attempt to get around the inside bug
bnd.neg<-list(x=-bnd$x,y=-bnd$y)
onoff<-inSide(bnd.neg,-gendata$x,-gendata$y)
gendata<- list(x=gendata$x[onoff],
y=gendata$y[onoff],
z=gendata$z[onoff])
# find the distances
D<-create_distance_matrix(gendata$x,gendata$y,bnd)
# 2d case
mds2<-cmdscale(D,eig=TRUE,x.ret=TRUE,k=2)
pdf("wt2-2d-proj.pdf",width=5,height=2.5)
## plot it
par(mfrow=c(1,2),las=1,mar=c(4,4,1.8,1.5),cex.axis=0.5,cex.lab=0.75)
plot(gendata$x,gendata$y,asp=1,pch=19,cex=0.2,xlab="x",ylab="y")
lines(bnd,lwd=2)
plot(mds2$points[,1],mds2$points[,2],asp=1,pch=19,cex=0.2,xlab="x*",ylab="y*")
dev.off()
# 3d case
mds3<-cmdscale(D,eig=TRUE,x.ret=TRUE,k=3)
#pdf("wt2-3d-proj.pdf",width=6,height=2)
## plot it
par(mfrow=c(1,3),las=1,mar=c(4,4,1.8,1.5))#,cex.axis=0.3)
plot(mds3$points[,1],mds3$points[,2],asp=1,pch=19,cex=0.3,xlab="x*",ylab="y*")
plot(mds3$points[,2],mds3$points[,3],asp=1,pch=19,cex=0.3,xlab="y*",ylab="z*")
plot(mds3$points[,1],mds3$points[,3],asp=1,pch=19,cex=0.3,xlab="x*",ylab="z*")
#dev.off()
|
5f6d8c24b73794c55674635194ca78316f03b0a4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/NRejections/examples/resample_resid.Rd.R
|
8ce5cd566ea7e77e3faf758da0234e33c44186d1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 806
|
r
|
resample_resid.Rd.R
|
library(NRejections)
### Name: resample_resid
### Title: Resample residuals for OLS
### Aliases: resample_resid
### ** Examples
samp.res = dataset_result( X = "complaints",
C = c("privileges", "learning"),
Ys = c("rating", "raises"),
d = attitude,
center.stats = FALSE,
bhat.orig = NA, # bhat.orig is a single value now for just the correct Y
alpha = 0.05 )
resamps = resample_resid( X = "complaints",
C = c("privileges", "learning"),
Ys = c("rating", "raises"),
d = attitude,
alpha = 0.05,
resid = samp.res$resid,
bhat.orig = samp.res$b,
B=20,
cores = 2)
|
7aed99c0ad1b534519017b5c9fcdc5061bb1f8a2
|
abf5d4febfcbf9fc3e482bfb1b9d2b6beffab12d
|
/inst/examples/geoquery.R
|
5f7063378f720ad9781bceb4a273e40b187b85d2
|
[
"MIT"
] |
permissive
|
jrnold/UKCensusAPI
|
d7fb489a166f81b4d84821910713b2ea0abe8c06
|
7523bca9454ecd7cc1d670861adb6c134a52ce74
|
refs/heads/master
| 2021-07-03T15:23:31.965972
| 2017-09-22T21:23:17
| 2017-09-22T21:23:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 846
|
r
|
geoquery.R
|
library("UKCensusAPI")
cacheDir = "/tmp/UKCensusAPI"
# Here's a predefined query using Leeds at MSOA resolution,
# but we want to change the geographical area and refine the resolution
table = "KS401EW"
table_internal = "NM_618_1"
queryParams = list(
date = "latest",
RURAL_URBAN = "0",
MEASURES = "20100",
CELL = "7...13",
geography = "1245710558...1245710660,1245714998...1245714998,1245715007...1245715007,1245715021...1245715022",
select = "GEOGRAPHY_CODE,CELL,OBS_VALUE"
)
api = instance(cacheDir)
# Define the new region and resolution
coverage = c("City of London")
resolution = 299 # OA - see NomiswebApi.py
# Modify the query
coverageCodes = getLADCodes(api, coverage)
queryParams["geography"] = geoCodes(api, coverageCodes, resolution)
# Fetch the new data
KS401EW = getData(api, table, table_internal, queryParams)
|
f8142ac070433ce682034a3f4911c8edb4e45db8
|
15b0e0513e6206bb50c8fe6be4abbb16793eb560
|
/man/mapsources.Rd
|
e10447dbb876747826788c94a77aabc754ead0fa
|
[
"MIT"
] |
permissive
|
terminological/arear
|
2566759ae77342a79a28b2a0d1cb389da337194b
|
98cece8079c4470e029c966c6b6d103797207eba
|
refs/heads/main
| 2023-06-07T16:02:46.919877
| 2023-05-26T14:49:17
| 2023-05-26T14:49:17
| 340,773,310
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 982
|
rd
|
mapsources.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{mapsources}
\alias{mapsources}
\title{Locations of UK and international shapefiles relevant to COVID}
\format{
A list with:
\describe{
\item{source}{the human readable location of the map}
\item{url}{the web location of the downloadable map shapefile}
\item{mapName}{the name of the map contained in the shapefile (which can contain multiple maps)}
\item{codeCol}{the name of the shapefile column containing the code of the area}
\item{nameCol}{the name of the shapefile column containing the name of the area}
\item{altCodeCol}{the name of the shapefile column containing the an alternative code for the area}
\item{simplify}{should the map be simplified when loaded?}
\item{license}{license terms}
}
}
\usage{
mapsources
}
\description{
A list of URLs to get maps, and metadata about the maps in the shapefiles and the column labelling.
}
\keyword{datasets}
|
73f3c8065fe0f186cf1305832fe65087e93cab13
|
103cefcd0a90175d953b11b1a13a6c76adb28aef
|
/analyses/traits/results_SeedMass_plot.R
|
522a638506829930fcd6792e33946711ae53a821
|
[] |
no_license
|
lizzieinvancouver/ospree
|
8ab1732e1245762194db383cdea79be331bbe310
|
9622af29475e7bfaa1b5f6697dcd86e0153a0a30
|
refs/heads/master
| 2023-08-20T09:09:19.079970
| 2023-08-17T10:33:50
| 2023-08-17T10:33:50
| 44,701,634
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,857
|
r
|
results_SeedMass_plot.R
|
# rm(list=ls())
# options(stringsAsFactors = FALSE)
## Load libraries
# library(rstan)
# require(shinystan)
# library(hdrcde) ## better quantiles
# ## Set seed
# set.seed(202109)
#
# # Specify if this code should be run on Midge or on your own computer.
# MidgeFlag <- FALSE
#
# if(MidgeFlag == TRUE){
# traitsData1 <- read.csv("../../data/Ospree_traits/try_bien_nodups_1.csv", stringsAsFactors = FALSE)
# traitsData2 <- read.csv("../../data/Ospree_traits/try_bien_nodups_2.csv", stringsAsFactors = FALSE)
# ospree <- read.csv("../../data/Ospree_traits/bbstan_allspp.utah.csv", stringsAsFactors = FALSE, header = TRUE)
# posterior_sm <- extract(readRDS(file = "../../data/Ospree_traits/SeedMass_log10_stanfit.RDS"))
# } else{
# traitsData1 <- read.csv("input/try_bien_nodups_1.csv", stringsAsFactors = FALSE)
# traitsData2 <- read.csv("input/try_bien_nodups_2.csv", stringsAsFactors = FALSE)
# ospree <- read.csv("input/bbstan_allspp_utah_37spp.csv", stringsAsFactors = FALSE, header = TRUE)
posterior_sm <- rstan::extract(readRDS(file = "output/SeedMass_log10_stanfit_37spp_wp.RDS"))
# posterior_smOld <- extract(readRDS(file = "output/SeedMass_log10_stanfit.RDS"))
#
# }
#
traitsData <- rbind(traitsData1,traitsData2)
#
traitors.sp <- c("Acer_pensylvanicum", "Acer_pseudoplatanus","Acer_saccharum","Aesculus_hippocastanum","Alnus_glutinosa","Alnus_incana","Betula_papyrifera","Betula_pendula","Betula_populifolia","Betula_pubescens","Corylus_avellana","Fagus_grandifolia","Fagus_sylvatica","Fraxinus_excelsior","Fraxinus_nigra","Hamamelis_virginiana","Juglans_cinerea","Juglans_regia","Populus_grandidentata","Populus_tremula","Prunus_avium","Prunus_padus","Prunus_pensylvanica","Prunus_persica","Prunus_serotina","Quercus_alba","Quercus_coccifera","Quercus_ellipsoidalis","Quercus_ilex","Quercus_petraea","Quercus_robur","Quercus_rubra","Quercus_shumardii","Quercus_velutina","Rhamnus_cathartica","Sorbus_aucuparia","Ulmus_pumila")
# # traitors.26 <- c("Acer_pensylvanicum", "Acer_pseudoplatanus", "Acer_saccharum", "Aesculus_hippocastanum", "Alnus_glutinosa", "Alnus_incana", "Betula_pendula", "Betula_populifolia", "Corylus_avellana", "Fagus_grandifolia","Fagus_sylvatica", "Fraxinus_excelsior", "Juglans_regia", "Populus_tremula", "Prunus_padus", "Prunus_serotina", "Quercus_alba", "Quercus_coccifera", "Quercus_ilex", "Quercus_petraea", "Quercus_robur", "Quercus_rubra", "Quercus_velutina", "Rhamnus_cathartica", "Sorbus_aucuparia", "Ulmus_pumila")
#
# # Subset data to traitors species list
traitsData <- subset(traitsData, traitsData$speciesname %in% traitors.sp)
# Seed mass trait only
seedData <- traitsData[traitsData$traitname == "seed mass",]
#
# # Read Ospree data and subset
# ospree$speciesname <- paste(ospree$genus, ospree$species, sep = "_")
# ospreeData <- subset(ospree, ospree$speciesname %in% traitors.sp)
#
# ospreeData <- subset(ospree, ospree$speciesname %in% traitors.sp)
# #ospreeData.26 <- subset(ospree, ospree$speciesname %in% traitors.26)
#
# # Exclude 12_bien as a study due to one data point
# aggregate(seedData$traitvalue, by = list(seedData$datasetid), FUN = length) # check
# ## aggregate(seedData$traitvalue, by = list(seedData$datasetid, seedData$speciesname), FUN = length)
# ### Subset
seedData <- subset(seedData, !(seedData$datasetid == "12_bien"))
#
# # Sorted species and study list
specieslist <- sort(unique(seedData$speciesname))
studylist <- sort(unique(seedData$datasetid))
## Obtain mean effect of forcing, chilling, photoperiod, interaction
forceeff <- apply(posterior_sm$betaForceSp, MARGIN = 2, FUN = mean)
chilleff <- apply(posterior_sm$betaChillSp, MARGIN = 2, FUN = mean)
photoeff <- apply(posterior_sm$betaPhotoSp, MARGIN = 2, FUN = mean)
mugrandeff <- apply(posterior_sm$mu_grand_sp, MARGIN = 2, FUN = mean)
betaTraitForceeff <- mean(posterior_sm$betaTraitxForce) #-1.306722
betaTraitChilleff <- mean(posterior_sm$betaTraitxChill) #-2.57087
betaTraitPhotoeff <- mean(posterior_sm$betaTraitxPhoto) # -0.6908688
# forceeff.26 <- apply(posterior_smOld$betaForceSp, MARGIN = 2, FUN = mean)
# chilleff.26 <- apply(posterior_smOld$betaChillSp, MARGIN = 2, FUN = mean)
# photoeff.26 <- apply(posterior_smOld$betaPhotoSp, MARGIN = 2, FUN = mean)
# mugrandeff.26 <- apply(posterior_smOld$mu_grand_sp, MARGIN = 2, FUN = mean)
# betaTraitForceeff.26 <- mean(posterior_smOld$betaTraitxForce) #-1.799758
# betaTraitChilleff.26 <- mean(posterior_smOld$betaTraitxChill) #-3.269759
# betaTraitPhotoeff.26 <- mean(posterior_smOld$betaTraitxPhoto) #-0.6908688
## Species to plot and other plotting parameters
plot.sp <- c("Alnus_incana", "Aesculus_hippocastanum")
col.sp <- c( rgb(149 / 255, 216 / 255, 64 / 255, alpha = 0.9), rgb(72 / 255, 38 / 255, 119 / 255, alpha = 0.8))
col1.sp <- c( rgb(149 / 255, 216 / 255, 64 / 255, alpha = 0.2),rgb(72 / 255, 38 / 255, 119 / 255, alpha = 0.14))
col2.sp <- c( rgb(149 / 255, 216 / 255, 64 / 255, alpha = 0.5),rgb(72 / 255, 38 / 255, 119 / 255, alpha = 0.4))
#pdf(file = "figures/results_seedmass_37spp_ac.pdf", width = 15, height = 5)
## Plotting
### Forcing
#par(mar = c(5, 5, 2, 2), mfrow = c(1,3))
xrange <- seq(-2.5, 2.5, by = 0.25)
ospreeBB <- ospreeData
ospreeBB$forceadj1 <- ospreeBB$response.time
for(j in 1:nrow(ospreeBB)){
ospreeBB$forceadj1[j] = ospreeBB$response.time[j] - chilleff * ospreeBB$chill.z[j] - photoeff * ospreeBB$photo.z[j]
}
plot(NA, xlim = c(min(xrange), max(xrange)), ylim = c(min(ospreeBB$forceadj1), max(ospreeBB$forceadj1)),
xlab = expression("Forcing (z-scored"*~degree*C*")"), ylab = "Day of budburst",
bty = "n",
xaxt = "n",
yaxt = "n",
cex.lab = 1.2)
axis(side = 1, at = seq(min(xrange), max(xrange), by = .5), tcl = -.5, cex.axis = 0.9)
axis(side = 2, at = seq(round(min(ospreeBB$forceadj1),0), round(max(ospreeBB$forceadj1),0), by = 20), tcl = -.5, las = 1, cex.axis = 0.9)
mtext(side = 3, text = "Seed Mass", adj = 0, cex = 1.2)
## Add species to plot
for(i in 1:length(plot.sp)){
stor1 <- matrix(NA, ncol = length(xrange), nrow = 4000)
stor2 <- matrix(NA, ncol = length(xrange), nrow = 4000)
for(k in 1:4000){
stor1[k, ] <- rnorm(n = length(xrange), mean = posterior_sm$alphaPhenoSp[k, which(specieslist == plot.sp[i])] + posterior_sm$alphaForceSp[k, which(specieslist == plot.sp[i])] * xrange, sd = posterior_sm$sigmapheno_y[k])
stor2[k, ] <- rnorm(n = length(xrange), mean = posterior_sm$alphaPhenoSp[k, which(specieslist == plot.sp[i])] + posterior_sm$betaForceSp[k, which(specieslist == plot.sp[i])] * xrange, sd = posterior_sm$sigmapheno_y[k])
}
temp1.hdr <- apply(stor1, MARGIN = 2, FUN = function(X) hdrcde::hdr(X, prob = c(50))$hdr[1, ])
temp2.hdr <- apply(stor2, MARGIN = 2, FUN = function(X) hdrcde::hdr(X, prob = c(50))$hdr[1, ])
polygon(x = c(xrange, rev(xrange)), y = c(temp1.hdr[1, ], rev(temp1.hdr[2, ])), col = col1.sp[i], border = NA)
polygon(x = c(xrange, rev(xrange)), y = c(temp2.hdr[1, ], rev(temp2.hdr[2, ])), col = col2.sp[i], border = NA)
}
for(i in 1:length(plot.sp)){
ospree.temp <- subset(ospreeData, ospreeData$speciesname == plot.sp[i])
## Add adjusted columns
ospree.temp$forceadj1 <- ospree.temp$response.time
for(j in 1:nrow(ospree.temp)){
ospree.temp$forceadj1[j] = ospree.temp$response.time[j] - chilleff[which(specieslist == plot.sp[i])] * ospree.temp$chill.z[j] - photoeff[which(specieslist == plot.sp[i])] * ospree.temp$photo.z[j]
}
points(forceadj1 ~ jitter(force.z, factor = 0.75), data = ospree.temp, pch = 21, col = "black", bg = col.sp[i], cex = 1)
}
my.label <- paste("d", ".", sep="")
put.fig.letter(label=my.label, location= "topleft", font=2)
# legend("topleft", legend = c(expression(paste("Acquisitive (", italic("Populus tremula"), ")")),
# expression(paste("Conservative (", italic("Aesculus hippocastanum"), ")")), expression(paste("Trait effect", " = 0", " (50% interval)", sep = "")),
# expression(paste("Full model", " (50% interval)"))),
# col = c("black", "black", rgb(0, 0, 0, alpha = 0.18), rgb(0, 0, 0, alpha = 0.85)), pt.bg = c(col.sp, NA, NA),
# inset = 0.02, pch = c(21, 21, 15, 15), cex = 0.85, bty = "n")
# dev.off()
#pdf(file = "figures/results_seedmass_chilling_37spp_ac.pdf", width = 7, height = 6)
## Plotting
### Chilling
# par(mar = c(5, 5, 2, 2))
xrange <- seq(-2, 5, by = 0.25)
ospreeBB <- ospreeData
ospreeBB$chilladj1 <- ospreeBB$response.time
for(j in 1:nrow(ospree.temp)){
ospree.temp$chilladj1[j] = ospree.temp$response.time[j] - forceeff * ospree.temp$force.z[j] - photoeff * ospree.temp$photo.z[j]
}
plot(NA, xlim = c(min(xrange), max(xrange)), ylim = c(min(ospreeBB$chilladj1), max(ospreeBB$chilladj1)),
xlab = expression("Chilling (z-scored"*~degree*C*")"), ylab = "Day of budburst",
bty = "n",
xaxt = "n",
yaxt = "n",
cex.lab = 1.2)
axis(side = 1, at = seq(min(xrange), max(xrange), by = 1), tcl = -.5, cex.axis = 0.9)
axis(side = 2, at = seq(round(min(ospreeBB$chilladj1),0), round(max(ospreeBB$chilladj1)), by = 20), tcl = -.5, las = 1, cex.axis = 0.9)
mtext(side = 3, text = "Seed Mass", adj = 0, cex = 1.2)
## Add species to plot
for(i in 1:length(plot.sp)){
stor1 <- matrix(NA, ncol = length(xrange), nrow = 4000)
stor2 <- matrix(NA, ncol = length(xrange), nrow = 4000)
for(k in 1:4000){
stor1[k, ] <- rnorm(n = length(xrange), mean = posterior_sm$alphaPhenoSp[k, which(specieslist == plot.sp[i])] + posterior_sm$alphaChillSp[k, which(specieslist == plot.sp[i])] * xrange, sd = posterior_sm$sigmapheno_y[k])
stor2[k, ] <- rnorm(n = length(xrange), mean = posterior_sm$alphaPhenoSp[k, which(specieslist == plot.sp[i])] + posterior_sm$betaChillSp[k, which(specieslist == plot.sp[i])] * xrange, sd = posterior_sm$sigmapheno_y[k])
}
temp1.hdr <- apply(stor1, MARGIN = 2, FUN = function(X) hdrcde::hdr(X, prob = c(50))$hdr[1, ])
temp2.hdr <- apply(stor2, MARGIN = 2, FUN = function(X) hdrcde::hdr(X, prob = c(50))$hdr[1, ])
polygon(x = c(xrange, rev(xrange)), y = c(temp1.hdr[1, ], rev(temp1.hdr[2, ])), col = col1.sp[i], border = NA)
polygon(x = c(xrange, rev(xrange)), y = c(temp2.hdr[1, ], rev(temp2.hdr[2, ])), col = col2.sp[i], border = NA)
}
for(i in 1:length(plot.sp)){
ospree.temp <- subset(ospreeData, ospreeData$speciesname == plot.sp[i])
## Add adjusted columns
ospree.temp$chilladj1 <- ospree.temp$response.time
for(j in 1:nrow(ospree.temp)){
ospree.temp$chilladj1[j] = ospree.temp$response.time[j] - forceeff[which(specieslist == plot.sp[i])] * ospree.temp$force.z[j] - photoeff[which(specieslist == plot.sp[i])] * ospree.temp$photo.z[j]
}
points(chilladj1 ~ jitter(chill.z, factor = 0.75), data = ospree.temp, pch = 21, col = "black", bg = col.sp[i], cex = 1)
}
my.label <- paste("e", ".", sep="")
put.fig.letter(label=my.label, location= "topleft", font=2)
# legend("topleft", legend = c(expression(paste("Acquisitive (", italic("Populus tremula"), ")")),
# expression(paste("Conservative (", italic("Aesculus hippocastanum"), ")")), expression(paste("Trait effect", " = 0", " (50% interval)", sep = "")),
# expression(paste("Full model", " (50% interval)"))),
# col = c("black", "black", rgb(0, 0, 0, alpha = 0.18), rgb(0, 0, 0, alpha = 0.85)), pt.bg = c(col.sp, NA, NA),
# inset = 0.02, pch = c(21, 21, 15, 15), cex = 0.85, bty = "n")
#dev.off()
#pdf(file = "figures/results_seedmass_photoperiod_37spp_ac.pdf", width = 7, height = 6)
## Plotting
### Photoperiod
#par(mar = c(5, 5, 2, 2))
xrange <- seq(-1.5, 2.5, by = 0.25)
ospreeBB <- ospreeData
ospreeBB$photoadj1 <- ospreeBB$response.time
for(j in 1:nrow(ospree.temp)){
ospree.temp$photoadj1[j] = ospree.temp$response.time[j] - forceeff * ospree.temp$force.z[j] - chilleff * ospree.temp$chill.z[j]
}
plot(NA, xlim = c(min(xrange), max(xrange)), ylim = c(min(ospreeBB$photoadj1), max(ospreeBB$photoadj1)),
xlab = "Photoperiod (z-scored hours)", ylab = "Day of budburst",
bty = "n",
xaxt = "n",
yaxt = "n",
cex.lab = 1.2)
axis(side = 1, at = seq(min(xrange), max(xrange), by = 0.5), tcl = -.5, cex.axis = 0.9)
axis(side = 2, at = seq(round(min(ospreeBB$photoadj1),0), round(max(ospreeBB$photoadj1)), by = 20), tcl = -.5, las = 1, cex.axis = 0.9)
mtext(side = 3, text = "Seed Mass", adj = 0, cex = 1.2)
## Add species to plot
for(i in 1:length(plot.sp)){
stor1 <- matrix(NA, ncol = length(xrange), nrow = 4000)
stor2 <- matrix(NA, ncol = length(xrange), nrow = 4000)
for(k in 1:4000){
stor1[k, ] <- rnorm(n = length(xrange), mean = posterior_sm$alphaPhenoSp[k, which(specieslist == plot.sp[i])] + posterior_sm$alphaPhotoSp[k, which(specieslist == plot.sp[i])] * xrange, sd = posterior_sm$sigmapheno_y[k])
stor2[k, ] <- rnorm(n = length(xrange), mean = posterior_sm$alphaPhenoSp[k, which(specieslist == plot.sp[i])] + posterior_sm$betaPhotoSp[k, which(specieslist == plot.sp[i])] * xrange, sd = posterior_sm$sigmapheno_y[k])
}
temp1.hdr <- apply(stor1, MARGIN = 2, FUN = function(X) hdrcde::hdr(X, prob = c(50))$hdr[1, ])
temp2.hdr <- apply(stor2, MARGIN = 2, FUN = function(X) hdrcde::hdr(X, prob = c(50))$hdr[1, ])
polygon(x = c(xrange, rev(xrange)), y = c(temp1.hdr[1, ], rev(temp1.hdr[2, ])), col = col1.sp[i], border = NA)
polygon(x = c(xrange, rev(xrange)), y = c(temp2.hdr[1, ], rev(temp2.hdr[2, ])), col = col2.sp[i], border = NA)
}
for(i in 1:length(plot.sp)){
ospree.temp <- subset(ospreeData, ospreeData$speciesname == plot.sp[i])
## Add adjusted columns
ospree.temp$photoadj1 <- ospree.temp$response.time
for(j in 1:nrow(ospree.temp)){
ospree.temp$photoadj1[j] = ospree.temp$response.time[j] - forceeff[which(specieslist == plot.sp[i])] * ospree.temp$force.z[j] - chilleff[which(specieslist == plot.sp[i])] * ospree.temp$chill.z[j]
}
points(photoadj1 ~ jitter(photo.z, factor = 0.75), data = ospree.temp, pch = 21, col = "black", bg = col.sp[i], cex = 1)
}
legend("topright", legend = c(expression(paste("Acquisitive (", italic("Alnus incana"), ")")),
expression(paste("Conservative (", italic("Aesculus hippocastanum"), ")")),
expression(paste("Trait effect", " = 0", " (50% interval)", sep = "")),
expression(paste("Full model", " (50% interval)"))),
col = c("black", "black", rgb(0, 0, 0, alpha = 0.18), rgb(0, 0, 0, alpha = 0.85)), pt.bg = c(col.sp, NA, NA),
inset = 0.02, pch = c(21, 21, 15, 15), cex = 1, bty = "n")
my.label <- paste("f", ".", sep="")
put.fig.letter(label=my.label, location= "topleft", font=2)
#dev.off()
# pdf("figures/seedmass_prior_post_dist_narrow.pdf", width = 15, height = 25)
# par(mfrow = c(4,4))
# #plot priors against posterior_sms
# h1 <- hist(rnorm(1000, -15,10), col=rgb(1,0,1,1/4))
# hist(posterior_sm$muForceSp,add=T,col=rgb(0,0,1,1/4))
#
# h1 <- hist(rnorm(1000, -15,10), col = rgb(1,0,1,1/4))
# hist(posterior_sm$muChillSp,add=T,col=rgb(0,0,1,1/4))
#
# h1 <- hist(rnorm(1000, -15,10), col = rgb(1,0,1,1/4))
# hist(posterior_sm$muPhotoSp,add=T,col=rgb(0,0,1,1/4))
#
# h1 <- hist(rnorm(1000, log10(100),0.5), col = rgb(1,0,1,1/4))
# hist(posterior_sm$mu_grand,add=T,col=rgb(0,0,1,1/4))
#
# h1 <- hist(rnorm(1000,40,2), col = rgb(1,0,1,1/4))
# hist(posterior_sm$muPhenoSp,add=T,col=rgb(0,0,1,1/4))
#
# h1 <- hist(rnorm(1000, 0,2), col = rgb(1,0,1,1/4))
# hist(posterior_sm$betaTraitxForce,add=T,col=rgb(0,0,1,1/4))
#
# h1 <- hist(rnorm(1000, 0,2), col = rgb(1,0,1,1/4))
# hist(posterior_sm$betaTraitxChill,col=rgb(0,0,1,1/4),add=T)
#
# h1 <- hist(rnorm(1000, 0,2), col = rgb(1,0,1,1/4))
# hist(posterior_sm$betaTraitxPhoto,col=rgb(0,0,1,1/4),add=T)
#
# h1 <- hist(rnorm(1000, 1,1), col = rgb(1,0,1,1/4))
# hist(posterior_sm$sigma_sp,col=rgb(0,0,1,1/4),add=T)
#
# h1 <- hist(rnorm(1000,0.5,0.1), col = rgb(1,0,1,1/4))
# hist(posterior_sm$sigma_study,col=rgb(0,0,1,1/4),add=T)
#
# h1 <- hist(rnorm(1000, 0.2,0.1), col = rgb(1,0,1,1/4))
# hist(posterior_sm$sigma_traity,col=rgb(0,0,1,1/4),add=T)
#
# h1 <- hist(rnorm(1000, 5,2), col = rgb(1,0,1,1/4))
# hist(posterior_sm$sigmaForceSp,col=rgb(0,0,1,1/4),add=T)
#
# h1 <- hist(rnorm(1000, 5,2), col = rgb(1,0,1,1/4))
# hist(posterior_sm$sigmaChillSp,col=rgb(0,0,1,1/4),add=T)
#
# h1 <- hist(rnorm(1000, 5,2), col = rgb(1,0,1,1/4))
# hist(posterior_sm$sigmaPhotoSp,col=rgb(0,0,1,1/4),add=T)
#
# h1 <- hist(rnorm(1000, 10,2), col = rgb(1,0,1,1/4))
# hist(posterior_sm$sigmaPhenoSp,col=rgb(0,0,1,1/4),add=T)
#
# h1 <- hist(rnorm(1000, 2,2), col = rgb(1,0,1,1/4), xlim = c(-6,20))
# hist(posterior_sm$sigmapheno_y,add=T,col=rgb(0,0,1,1/4))
#
# dev.off()
|
91cd1829f13683fb8d0c61625191a51ff3e6308c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/metafolio/examples/count_quasi_exts.Rd.R
|
2d9832f65289362df84f716aee2bb953730aa1ac
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,203
|
r
|
count_quasi_exts.Rd.R
|
library(metafolio)
### Name: count_quasi_exts
### Title: Take 'meta_sim' output objects and count quasi extinctions
### Aliases: count_quasi_exts
### ** Examples
## Not run:
##D set.seed(1)
##D w_plans <- list()
##D w_plans[[1]] <- c(5, 1000, 5, 1000, 5, 5, 1000, 5, 1000, 5)
##D w_plans[[2]] <- c(5, 5, 5, 1000, 1000, 1000, 1000, 5, 5, 5)
##D w_plans[[3]] <- c(rep(1000, 4), rep(5, 6))
##D w_plans[[4]] <- rev(w_plans[[3]])
##D plans_name_sp <- c("Full range of responses", "Most stable only",
##D "Lower half", "Upper half")
##D n_trials <- 50 # number of trials at each n conservation plan
##D n_plans <- 4 # number of plans
##D num_pops <- c(2, 4, 8, 16) # n pops to conserve
##D w <- list()
##D for(i in 1:n_plans) { # loop over number conserved
##D w[[i]] <- list()
##D for(j in 1:n_trials) { # loop over trials
##D w[[i]][[j]] <- matrix(rep(625, 16), nrow = 1)
##D w[[i]][[j]][-sample(1:16, num_pops[i])] <- 5
##D }
##D }
##D arma_env_params <- list(mean_value = 16, ar = 0.1, sigma_env = 2, ma = 0)
##D
##D x_arma_sp <- run_cons_plans(w, env_type = "arma", env_params = arma_env_params)
##D count_quasi_exts(x_arma_sp$plans_port, quasi_thresh = 200)
## End(Not run)
|
acb14900d3a589aa026db9eae07460e9df81d799
|
980ba508e74a2bae7724f32b74451453956ce61a
|
/functions/Screeplots.R
|
1f3ed42253ff2e5df9f46c5c6b4171a7e46403f4
|
[] |
no_license
|
admash/aJIVE_NOWAC_code
|
1727a4e571e9637b47c43f172d2e9a59e3a5d5bd
|
16baeeb7335187225b0693a5e48a181047bb495b
|
refs/heads/master
| 2023-04-25T07:46:14.848379
| 2021-05-03T08:52:59
| 2021-05-03T08:52:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 419
|
r
|
Screeplots.R
|
# screeplot function
screeplot <- function(data, title=''){
for(l in 1:length(data)){
singular.values <- svd(data[[l]])[['d']]
singular.values <- singular.values[2:50]
index <- 1:length(singular.values)
print(ggplot(as.data.frame(x=cbind(index, singular.values)),
aes(x=index, y=singular.values)) +
geom_point() +
geom_line() +
labs(y='singular value', x='index', title=title))
}
}
|
23c071233da7f56e68528d5233bac919188be59f
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/CompR/R/function_annexe.R
|
714dbb1190b3d7ef5402bf717e29d4f33029c17d
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,450
|
r
|
function_annexe.R
|
somme<-function(X,poids)
{
return(sum(X*poids))
}
contrindiv<-function(Pinoyau)
{
ncrit<-ncol(Pinoyau)
nprod<-nrow(Pinoyau)
vpi<-vector("list")
u<-vector("list")
denom<-vector("list")
res<-vector("list")
for (k in 1:ncrit)
{
vpi[[k]]<-rbind(t(Pinoyau[,k]),matrix(Pinoyau[,k],nrow=nprod,ncol=nprod))
u[[k]]<-diag(1,nprod)
u[[k]]<-cbind(rep(1,nprod),u[[k]])
denom[[k]]<-u[[k]]%*%vpi[[k]]
vpi[[k]]<-t(matrix(Pinoyau[,k],nrow=nprod,ncol=nprod))
res[[k]]<-t(vpi[[k]]/denom[[k]])
}
return(res)
}
estimlambda<-function(Pi,Data,lambda)
{
Tcla<-length(Pi)
ncrit<-length(Data@Paircomp)
nsujet<-length(Data@Cons)
puissance<-vector("list")
for (t in 1:Tcla)
{
contrindivt<-contrindiv(Pi[[t]])
puissancet<-vector("list")
for (k in 1:ncrit)
{
base<-as.matrix(contrindivt[[k]])
Matbase<-lapply(Data@Paircomp[[k]],as.matrix)
puissancet[[k]]<-simplify2array(lapply(lapply(Matbase,contri,base),prod))
}
puissance[[t]]<-exp(rowSums(log(simplify2array(puissancet))))
puissance[[t]]<-puissance[[t]]*lambda[t]
}
lvrnouv<-sum(log(rowSums(simplify2array(puissance))))
Zhtnew<-(simplify2array(puissance))/(rowSums(simplify2array(puissance)))
lambdanew<-colSums(Zhtnew)/nsujet
resu<-list(lambdanouv=lambdanew,Zhtnouv=Zhtnew,lvrnouv=lvrnouv)
return(resu)
}
contri<-function(X,A)
{
return(A^X)
}
|
6c6e93fa5d8f2b3d875393831e210f436160247f
|
db5539ed692b76179486d44db061599e5b3f37bb
|
/plot1.R
|
89741bcaa270dcb17f32f6a7a7bdf5f97607aa31
|
[] |
no_license
|
Bluntgiantpanda/exploratory_analysis_project1
|
01c9596160789ce7036691d1ab2935ffc55bd939
|
12e35f83dfb4e3e619228bb4f2bfa7a58aa000c4
|
refs/heads/master
| 2021-01-12T21:09:40.593445
| 2016-07-24T19:36:35
| 2016-07-24T19:36:35
| 64,076,884
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 524
|
r
|
plot1.R
|
# download the data to local directory and read the it as table.
data <- read.table(choose.files(), header= T, sep = ";", stringsAsFactors = F, dec = ".")
names(data)
newdata <- data[data$Date %in% c("1/2/2007", "2/2/2007"),] # only extract the rows between 1/2/2007 and 2/2/2007
str(newdata)
globalactivepower <- as.numeric(newdata$Global_active_power)
png("plot1.png", width = 480, height = 480)
hist(globalactivepower, col = "red", main = "Global Active Power", xlab = " Global Active Power(kilowatts)")
dev.off()
|
fd96992cd78a471832f797a9b851dac0fe0e49c5
|
523b71e8c9f9c792ae04ebcc32def70cc5b73dd7
|
/biolog/script/20181210_dataprep.R
|
9adf455599049fcc8400cbe34f09c3e632e410a4
|
[] |
no_license
|
padpadpadpad/alex_biolog_sequencing
|
10c622a923782a0b9378c7145dc58f35d947f018
|
cb7c96d169c8aecc87accc4a25d2cfae738ac751
|
refs/heads/master
| 2021-01-20T01:31:00.909879
| 2019-09-17T09:19:26
| 2019-09-17T09:19:26
| 101,292,510
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,274
|
r
|
20181210_dataprep.R
|
# prepping biolog files
rm(list = ls())
# load packages
library(ggplot2)
library(dplyr)
library(tidyr)
library(viridis)
library(ggridges)
library(lme4)
library(patchwork)
# figure path
path_fig <- 'biolog/figs'
# extra functions
read_plate <- function(x, type = c('raw_data', 'meta_data')){
if(missing(type)){type <- 'raw_data'}
temp <- readxl::read_excel(x) %>%
janitor::clean_names() %>%
dplyr::select(1:13) %>%
tidyr::gather(., 'well', 'od', 2:13) %>%
dplyr::mutate(., well = readr::parse_number(well),
well = paste(x_1, well, sep = '_'),
file = basename(tools::file_path_sans_ext(x))) %>%
dplyr::select(file, well, od)
if(type == 'raw_data'){temp <- dplyr::mutate(temp, time = file.mtime(x))}
if(type == 'meta_data'){temp <- rename(temp, treatment = od)}
return(temp)
}
# source extra functions
source('biolog/script/functions.R')
# load in data ####
# list all files
files <- list.files('biolog/data/20181203', full.names = TRUE)
d <- MicrobioUoE::bind_biolog_all('biolog/data/20181203/T6_8.xlsx', sheets = 'Sheet1')
# read in all files
d <- purrr::map_df(files, MicrobioUoE::bind_biolog_all, sheets = 'Sheet1')
# read in metadata for substrates
meta <- read_plate('biolog/data/biolog_ecoplate_metadata.xlsx', type = 'meta_data') %>%
select(., -file) %>%
rename(substrate=treatment)
d <- merge(d, meta, by = 'well')
# read in metadata for treatments
meta2 <- read.csv('biolog/data/20181203_metadata.csv', stringsAsFactors = FALSE) %>%
gather(., set, sample, starts_with('set')) %>%
merge(., read.csv('biolog/data/20181203_metadata_treatment.csv', stringsAsFactors = FALSE), by = 'sample', all.x = TRUE) %>%
mutate(., evolved = case_when(sample == 'M9' ~ 'control',
grepl('anc', sample) ~ 'ancestor',
TRUE ~ evolved),
population = ifelse(is.na(population), evolved, population))
# bind all pieces together
d <- separate(d, file, c('tp', 'plate'), sep = '_') %>%
mutate(., set = case_when(readr::parse_number(well) %in% 1:4 ~ 'set_1',
readr::parse_number(well) %in% 5:8 ~ 'set_2',
readr::parse_number(well) %in% 9:12 ~ 'set_3'))
d <- merge(d, meta2, by = c('set', 'plate'))
# voila
# save this out
write.csv(d, 'biolog/data/20181203_processed.csv', row.names = FALSE)
# data and metadata together
d <- merge(d, meta, by = 'id')
# load in ancestral data
d_ancest <- MicrobioUoE::bind_biolog_all('biolog/data/20170124_Ancestors_gn2biolog.xlsx', sheets = 'Sheet1') %>%
mutate(id = id + 50,
pop = 13)
meta_ancest <- data.frame(id = 51:58, treatment = 'wild_type', stringsAsFactors = FALSE)
d_ancest <- merge(d_ancest, meta_ancest, by = 'id') %>%
filter(., id > 54)
d <- bind_rows(d, d_ancest) %>%
filter(., id != 49)
# which columns are substrates
Carb_cols <- colnames(d)[grepl('X', colnames(d))]
# stack
d_stack <- gather_(d, 'C_source', 'OD', Carb_cols) %>%
mutate(., C_source = as.numeric(gsub('X', '', C_source)))
# filter out blank
blank <- filter(d_stack, id == 50) %>%
rename(., blank = OD) %>%
select(., blank, C_source)
d_stack <- filter(d_stack, id != 50) %>%
merge(., blank, by = 'C_source') %>%
mutate(., OD_cor = OD - blank)
d_noWT <- filter(d_stack, treatment != 'wild_type')
WT <- filter(d_stack, treatment == 'wild_type') %>%
rename(., WT = OD_cor) %>%
select(., C_source, WT)
# add column for ranked mean OD_cor
d_noWT <- group_by(d_noWT, C_source) %>%
mutate(mean_OD = mean(OD_cor)) %>%
ungroup() %>%
merge(., WT, by = 'C_source') %>%
mutate(., rank = dense_rank(desc(mean_OD)))
d_stack2 <- group_by(d_stack, C_source) %>%
mutate(mean_OD = mean(OD_cor)) %>%
ungroup() %>%
merge(., WT, by = 'C_source') %>%
mutate(., rank = dense_rank(desc(mean_OD)))
# plot performance across wells, ranked by best performance
plot1a <- group_by(d_stack, id) %>%
arrange(., desc(OD_cor)) %>%
mutate(., rank = 1:96) %>%
ggplot(.) +
geom_line(aes(rank, OD_cor, group = id, col = treatment), alpha = 0.25) +
stat_summary(aes(rank, OD_cor, col = treatment, group = treatment), fun.y = mean, geom = 'line') +
scale_color_viridis(discrete = TRUE) +
theme_bw(base_size = 12, base_family = 'Helvetica') +
theme(legend.position = c(0.9, 0.8)) +
ylab('optical density') +
xlab('substrate rank') +
ggtitle('Substrate rank across populations') +
guides(col = guide_legend(override.aes = list(alpha = 1)))
# plot performance across well, without ranking by best performance
plot1b <- ggplot(d_stack) +
geom_line(aes(C_source, OD_cor, group = id, col = treatment), alpha = 0.25) +
theme_bw(base_size = 12, base_family = 'Helvetica') +
theme(legend.position = 'none') +
ylab('optical density') +
xlab('substrate') +
ggtitle('Performance across substrates') +
scale_color_viridis(discrete = TRUE)
plot1 <- gridExtra::grid.arrange(plot1a, plot1b, ncol = 1)
ggsave(file.path(path_fig, 'performance_plot.pdf'), plot1, height = 10, width = 10)
ggsave(file.path(path_fig, 'performance_plot2.pdf'), plot1a + facet_wrap(~treatment) + scale_color_manual(values = rep('black', 3)), height = 5, width = 12)
# Calculate phenotypic variance ####
# Take all the distance matrices of each pairwise combination of clones in each treatment
# average Euclidean distance
pop_dists_df <- filter(d, treatment %in% c('comm', 'no_comm')) %>%
group_by(., treatment) %>%
do(pop_dists(x = .)) %>%
data.frame() %>%
rename(., clone_i = row, clone_j = col)
# create average phenotypic diversity per population
# this is similar to a PCA and PCoA and betadisper()?
V_P <- group_by(pop_dists_df, treatment) %>%
summarise(., V_P = mean(value)) %>%
data.frame()
# only two points here!!!
# calculate V_G - genotypic variance ####
# variance of OD across each genotype averaged over all the environments
V_G <- group_by(d_stack, treatment, C_source) %>%
summarise(V_G = var(OD_cor)) %>%
data.frame()
V_G_pop <- group_by(V_G, treatment) %>%
summarise(V_G = mean(V_G)) %>%
data.frame()
# calculate V_E - environmental variance
# average variance of each clone across all the environments
V_E <- group_by(d_stack, treatment, id) %>%
summarise(V_E = var(OD_cor)) %>%
data.frame()
V_E_pop <- group_by(V_E, treatment) %>%
summarise(V_E = mean(V_E)) %>%
data.frame()
# analyses
# Genotypic variance
mod_vg <- lmer(V_G ~ treatment + (1|C_source), V_G)
lsmeans::lsmeans(mod_vg, pairwise ~ treatment)
# NOPE
# Phentypic variance
mod_pg <- lm(V_E ~ treatment, V_E)
lsmeans::lsmeans(mod_pg, pairwise ~ treatment)
# plot genotypic and environmental variance across treatments ####
# plot V_G and V_E ####
V_G_plot <- ggplot(V_G, aes(treatment, V_G)) +
geom_boxplot(aes(fill = treatment, col = treatment), outlier.shape = NA, width = 0.5, position = position_dodge(width = 0.55)) +
stat_summary(position = position_dodge(width = 0.55), geom = 'crossbar', fatten = 0, color = 'white', width = 0.4, fun.data = function(x){ return(c(y=median(x), ymin=median(x), ymax=median(x)))}) +
geom_point(aes(treatment, V_G, col = treatment), shape = 21, fill ='white', position = position_jitter(width = 0.1)) +
ylab('genotypic variance') +
xlab('Treatment') +
scale_x_discrete(labels = c('Community', 'No Community', 'Wild Type')) +
theme_bw() +
theme(legend.position = 'none') +
ggtitle(expression(Genotypic~variance~(V[G]))) +
scale_color_viridis(discrete = TRUE) +
scale_fill_viridis(discrete = TRUE) +
ylim(c(0, 0.46))
V_E_plot <- ggplot(V_E, aes(treatment, V_E)) +
geom_boxplot(aes(col = treatment, fill = treatment), outlier.shape = NA, width = 0.5, position = position_dodge(width = 0.55)) +
stat_summary(position = position_dodge(width = 0.55), geom = 'crossbar', fatten = 0, color = 'white', width = 0.4, fun.data = function(x){ return(c(y=median(x), ymin=median(x), ymax=median(x)))}) +
geom_point(aes(treatment, V_E, col = treatment), shape = 21, fill ='white', position = position_jitter(width = 0.2)) +
ylab('environmental variance') +
xlab('Treatment') +
scale_x_discrete(labels = c('Community', 'No Community', 'Wild Type')) +
theme_bw() +
theme(legend.position = 'none') +
ggtitle(expression(Environmental~variance~(V[E]))) +
scale_color_viridis(discrete = TRUE) +
scale_fill_viridis(discrete = TRUE)
# plot
plot2 <- gridExtra::grid.arrange(V_G_plot, V_E_plot, ncol = 2)
ggsave(file.path(path_fig, 'geno_enviro_var_plot.pdf'), plot2, height = 5, width = 10)
# plot all carbon sources ####
y_axis_change <- group_by(d_stack2, rank) %>%
summarise(C_source = unique(C_source)) %>%
pull(C_source)
ggplot(d_stack2) +
geom_density_ridges2(aes(x = OD_cor, y = factor(rank), fill = treatment, col = treatment), alpha = 0.5, rel_min_height = 0.01) +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) +
geom_point(aes(x = WT, y = factor(rank)), size = 0.5) +
theme_bw() +
scale_y_discrete(labels = y_axis_change) +
ylab('Carbon source') +
xlab('Optical Density')
ggsave(file.path(path_fig, 'crazy_ggjoy_plot.pdf'), last_plot(), height = 12, width = 6)
# Calculate G x E interaction for each population ####
# see Barrett et al. 2005 Am Nat and Venail et al. 2008 Nature
# 1. calculate responsiveness - indicates differences in the environmental variances and thus measures diversity of resource exploitation strategies (specialists and generalists)
# sum (V_Gj - V_Gi)^2/(2*n_genotypes(n-genotypes - 1))
# create dataframe for standard deviation per clone across environments
d_sd <- group_by(d_stack, treatment, pop, id) %>%
summarise(., sd_E = sd(OD)) %>%
data.frame()
# create 2 copies of this for merging later
sd_j_clone <- rename(d_sd, clone_j = id, sd_j = sd_E)
sd_i_clone <- rename(d_sd, clone_i = id, sd_i = sd_E)
# create every pairwise combination of 1:n (clones/genotypes) for each population
d_R <- group_by(d_sd, treatment, pop) %>%
do(data.frame(expand.grid(clone_j = .$id, clone_i = .$id))) %>%
ungroup() %>%
filter(., clone_j > clone_i) %>%
merge(., sd_j_clone, by = c('clone_j', 'treatment', 'pop')) %>%
merge(., sd_i_clone, by = c('clone_i', 'treatment', 'pop'))
# calculate R for each pairwise combination
d_R <- group_by(d_R, treatment, pop) %>%
mutate(., R_comb = (sd_j - sd_i)^2/(length(unique(clone_i))*(length(unique(clone_i))-1))) %>%
ungroup()
# calculate responsiveness for each population
# sum of all the pairwise combinations
d_R_pop <- group_by(d_R, treatment, pop) %>%
summarise(., R_pop = sum(R_comb)) %>%
data.frame()
# Plot responsiveness
r_plot <- ggplot(d_R_pop, aes(treatment, R_pop)) +
geom_point(aes(treatment, R_pop, col = treatment), size = 3) +
ylab('responsiveness') +
xlab('Treatment') +
theme_bw() +
theme(legend.position = 'none') +
ggtitle('(a) Responsiveness') +
scale_colour_viridis(discrete = TRUE)
# not significantly different
# summary(lm(R_pop ~ treatment, d_R_pop))
# calculate inconsistency ####
d <- filter(d, id != 50)
# prep data for calculating correlations
d_pearson <- group_by(d, treatment, pop) %>%
do(pop_cor(x = ., id = 'id', rows_delete = c('treatment', 'id', 'sheet', 'pop'))) %>%
data.frame()
# merge dataframe to responsiveness dataframe
d_Inconsist <- merge(d_R, d_pearson, by = c('treatment', 'pop', 'clone_j', 'clone_i')) %>%
mutate(., i = (sd_j*sd_i*(1-pear_cor))/(length(unique(clone_i))*(length(unique(clone_i))-1)))
d_I_pop <- group_by(d_Inconsist, treatment, pop) %>%
summarise(., I_pop = sum(i),
pear_pop = mean(pear_cor)) %>%
data.frame()
# plot inconsistency
I_plot <- ggplot(d_I_pop, aes(treatment, I_pop)) +
geom_point(aes(treatment, I_pop, col = treatment), size = 3) +
ylab('Inconsistency') +
xlab('Treatment') +
theme_bw() +
theme(legend.position = 'none') +
ggtitle('(b) Inconsistency') +
scale_color_viridis(discrete = TRUE)
p_V_by_G <- r_plot + I_plot
ggsave(file.path(path_fig, 'responsiveness.pdf'), p_V_by_G, height = 5, width = 10)
summary(lm(I_pop ~ treat, d_I_pop))
# try a pca ####
d <- unite(d, 'id2', c(treatment, id), sep = '_', remove = FALSE)
# data set ready for PCA
d2 <- filter(d, id != 50 & id != 49)
d_PCA <- d2 %>%
select(., starts_with('X'))
row.names(d_PCA) <- d2$id2
# create matrix
Euclid_mat <- dist(d_PCA)
# get variables for PCA
d_vars <- select(d2, id, id2, treatment) %>%
mutate(., treatment = as.factor(treatment))
row.names(d_vars) <- d2$id2
PCA <- prcomp(d_PCA)
biplot(PCA)
# quick and dirty beta disper model
mod_adonis <- vegan::adonis(d_PCA ~ treatment, d_vars) # yes they have different centroids
mctoolsr::calc_pairwise_permanovas(Euclid_mat, d_vars, 'treatment')
Euclid_mat <- dist(d_PCA)
mod <- vegan::betadisper(Euclid_mat, d_vars$treatment)
anova(mod)
TukeyHSD(mod)
# get betadisper dataframes
betadisper_dat <- get_betadisper_data(mod)
# do some transformations on the data
betadisper_dat$eigenvalue <- mutate(betadisper_dat$eigenvalue, percent = eig/sum(eig))
# add convex hull points
betadisper_dat$chull <- group_by(betadisper_dat$eigenvector, group) %>%
do(data.frame(PCoA1 = .$PCoA1[c(chull(.$PCoA1, .$PCoA2), chull(.$PCoA1, .$PCoA2)[1])],
PCoA2 = .$PCoA2[c(chull(.$PCoA1, .$PCoA2), chull(.$PCoA1, .$PCoA2)[1])])) %>%
data.frame()
# combine centroid and eigenvector dataframes to plot
betadisper_lines <- merge(select(betadisper_dat$centroids, group, PCoA1, PCoA2), select(betadisper_dat$eigenvector, group, PCoA1, PCoA2), by = c('group'))
ggplot() +
geom_point(aes(PCoA1, PCoA2, col = group), betadisper_dat$centroids, size = 4) +
geom_point(aes(PCoA1, PCoA2, col = group), betadisper_dat$eigenvector) +
geom_path(aes(PCoA1, PCoA2, col = group), betadisper_dat$chull ) +
geom_segment(aes(x = PCoA1.x, y = PCoA2.x, yend = PCoA2.y, xend = PCoA1.y, col = group), betadisper_lines) +
theme_bw(base_size = 12, base_family = 'Helvetica') +
ylab('PCoA Axis 2 [10.3%]') +
xlab('PCoA Axis 1 [36.1%]') +
scale_color_viridis('', discrete = TRUE) +
#coord_fixed(sqrt(betadisper_dat$eigenvalue$percent[2]/betadisper_dat$eigenvalue$percent[1])) +
coord_fixed() +
theme(legend.position = 'top') +
ggtitle('PCoA across treatments') +
guides(col = guide_legend(ncol = 8))
ggsave(file.path(path_fig, 'PCoA_across_treatments.pdf'), last_plot(), height = 5, width = 7)
# distance plot
ggplot(betadisper_dat$distances, aes(group, distances, fill = group, col = group)) +
geom_boxplot(outlier.shape = NA, width = 0.5, position = position_dodge(width = 0.55)) +
stat_summary(position = position_dodge(width = 0.55), geom = 'crossbar', fatten = 0, color = 'white', width = 0.4, fun.data = function(x){ return(c(y=median(x), ymin=median(x), ymax=median(x)))}) +
geom_point(shape = 21, fill ='white', position = position_jitterdodge(dodge.width = 0.55, jitter.width = 0.2)) +
theme_bw(base_size = 12, base_family = 'Helvetica') +
scale_color_viridis('', discrete = TRUE, labels = c('Community', 'No Community', 'Wild Type')) +
scale_fill_viridis('', discrete = TRUE, labels = c('Community', 'No Community', 'Wild Type')) +
ylab('Distance to centroid') +
xlab('') +
scale_x_discrete(labels = c('Community', 'No Community', 'Wild Type'))
mod <- lm(distances ~ group, betadisper_dat$distances)
summary(mod)
|
f77859085ce6b809fc8233974af48816ac9d2116
|
c8c6861c5b847a7a7c22402537cf616780dcbfe8
|
/historical_stock_data.R
|
f5772fdee673ccde09fb674cc34ee1ba2a885fa0
|
[] |
no_license
|
rafiqd/StockData
|
3c7e630ba93a248fbbaad303a8171a0ae613a249
|
2d1f82d4ddd78ed739ce9eb1a863dd89a71f4a50
|
refs/heads/master
| 2021-01-23T16:18:22.829229
| 2017-06-04T04:57:15
| 2017-06-04T05:02:20
| 93,291,149
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,469
|
r
|
historical_stock_data.R
|
library(RCurl)
library(XML)
library(ggplot2)
get_historical_data <- function(symbol){
url <- sprintf("https://www.nasdaq.com/symbol/%s/historical",
symbol)
html <- getURL(url, followlocation = TRUE)
doc = htmlTreeParse(html, useInternalNodes = TRUE)
text <- xpathSApply(doc, "//div[@id='historicalContainer']//table/tbody//tr")
data_list = list()
i <- 1
for(t in text){
t <- as(t, "character" )
x <- gsub(" |[\r\n]|[\n]|( )|<tr>|</tr>", "", t)
x <- gsub("</td><td>", ",", x)
x <- gsub("<td>|</td>", "", x)
if(x == ",,,,,")
next()
x <- strsplit(x, ",")
dat <- data.frame(as.Date(x[[1]][1], "%m/%d/%Y"),
as.numeric(x[[1]][2]),
as.numeric(x[[1]][3]),
as.numeric(x[[1]][4]),
as.numeric(x[[1]][5]),
as.numeric(x[[1]][6])
)
data_list[[i]] <- dat
i <- i + 1
}
big_data = do.call(rbind, data_list)
colnames(big_data) <- c("Date", "Open", "High", "Low", "Close/Last", "volume")
return(big_data)
}
get_rsi <- function(dat, start=15){
dat <- dat[order(dat$Date),]
avg_loss = rep(0, start-1)
avg_gain = rep(0, start-1)
rsi = rep(0, start-1)
for(i in start:nrow(dat)){
if(i == start){
# calculate the first RSI by using the previous 14 days
first_avg_gain = 0
first_avg_loss = 0
for(j in 2:start){
diff = dat[j,"Close/Last"] - dat[j-1, "Close/Last"]
if(diff > 0){
first_avg_gain = first_avg_gain + diff
}else{
first_avg_loss = first_avg_loss + (diff * -1)
}
}
avg_loss[i] <- first_avg_loss / (start-1)
avg_gain[i] <- first_avg_gain / (start-1)
rs <- avg_gain[i] / avg_loss[i]
rsi[i] <- 100 - (100 / (1 + rs))
next()
}
current_gain = 0
current_loss = 0
current_delta = dat[i, "Close/Last"] - dat[i-1, "Close/Last"]
if(current_delta > 0){
current_gain = current_delta
}else{
current_loss = current_delta * -1
}
# start is one higher than it should be since our first datapoint doesn't have
# an gain/loss avaiable, so we start at 15 instead of 14, thus this needs to be
# start-2 so that it's the previous 13 instead of 14
current_avg_loss = (avg_loss[i-1] * (start-2) + current_loss) / (start-1)
current_avg_gain = (avg_gain[i-1] * (start-2) + current_gain) / (start-1)
avg_gain[i] <- current_avg_gain
avg_loss[i] <- current_avg_loss
rs <- current_avg_gain / current_avg_loss
rsi[i] <- 100 - (100 / (1 + rs))
}
full_dat <- cbind(dat, "Average Gain"=avg_gain, "Average Loss"=avg_loss, "RSI"=rsi)
return(full_dat)
}
plot_rsi <- function(dat, days_back=51){
if(days_back > 51)
print("Not enough data to go further back")
return()
qplot(Date, RSI, data = dat )
}
main <- function(){
historical_data <- get_historical_data("AMZN")
rsi_dat <- get_rsi(historical_data, 15)
sub_setted <- rsi_dat[15:nrow(rsi_dat),]
plot_rsi(dat)
}
|
5d28a1332865eece8f0b4de9a46737d4b666a3f9
|
ed8d071495fc93ea0f724b5bb6d23961342e3c0d
|
/man/cyclic_fit.Rd
|
81e9bd55e18b77ca5d40d2ac1f5c86282573eb75
|
[
"MIT"
] |
permissive
|
staggelab/spibayes
|
0f456c8b260f6473a4e015dcdfccea5996f85a76
|
364024c1facc8598f50f7d68f0a9425f1ea188ee
|
refs/heads/master
| 2023-04-12T19:33:01.296625
| 2020-11-12T16:43:06
| 2020-11-12T16:43:06
| 254,717,440
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 908
|
rd
|
cyclic_fit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_run.R
\name{cyclic_fit}
\alias{cyclic_fit}
\title{Run Cyclic model}
\usage{
cyclic_fit(
spi_input,
n_chains = 1,
iter = 1000,
cores = 1,
engine = "sample",
output_dir = getwd()
)
}
\arguments{
\item{engine}{options are sample (full MCMC), variational (variational approximation of posterior), optimize (Stan's LBFGS algorithm),}
\item{data}{Dataframe with the underlying data. Columns must include variable names}
\item{spline_type}{List of spline types. Either cc or cr are accepted. Names must agree with knot_loc}
\item{knot_loc}{List of knot locations}
}
\value{
A matrix of the infile
}
\description{
This is where you describe the function itself
contains the rownames and the subsequent columns are the sample identifiers.
Any rows with duplicated row names will be dropped with the first one being
}
|
6ddb4609e95497871921cb8c4b605358d65e6c8e
|
3fe120aa90b0038d20249c62c9a289606a869994
|
/R/Figure_1.R
|
596fae4443ce5ab7c548eab731b749189c332d69
|
[] |
no_license
|
bradduthie/RandomMatrixStability
|
571aa3664336ec0d138adddaea3bd10ad7151ced
|
cb445a765119e235e144277614bfdf19709e78e7
|
refs/heads/master
| 2022-11-20T14:33:56.175466
| 2022-11-15T22:00:54
| 2022-11-15T22:00:54
| 125,352,540
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,209
|
r
|
Figure_1.R
|
#' Find a stabilised system
#'
#' This function finds a random matrix (M) for which the complex system it
#' represents is unstable before the addition of variation in component
#' response rate, but stable after its addition.
#'
#'@return If successful, a list that includes a matrix (A0) that is unstable in
#'the absence of component response rate variation and a matrix (A1) that is
#'is identical to A0 but includes variation in component response rate and is
#'stable.
#'@param S Size of the complex system
#'@param C Connectedness of complex system
#'@param Osd Standard deviation of interaction strength
#'@param iters Number of random matrices to simulate
#'@examples
#'find_bgamma(S = 200, C = 0.05, Osd = 0.4, iters = 2);
#'@export
find_bgamma <- function(S = 200, C = 0.05, Osd = 0.4, iters = 10000){
while(iters > 0){
A_dat <- rnorm(n = S * S, mean = 0, sd = Osd);
A_mat <- matrix(data = A_dat, nrow = S);
C_dat <- rbinom(n = S * S, size = 1, prob = C);
C_mat <- matrix(data = C_dat, nrow = S, ncol = S);
A_mat <- A_mat * C_mat;
gammas <- c(rep(1.95, S/2), rep(0.05, S/2))
mu_gam <- mean(gammas);
diag(A_mat) <- -1;
A1 <- gammas * A_mat;
A0 <- mu_gam * A_mat;
A0_e <- eigen(A0)$values;
A0_r <- Re(A0_e);
A0_i <- Im(A0_e);
A1_e <- eigen(A1)$values;
A1_r <- Re(A1_e);
A1_i <- Im(A1_e);
if(max(A0_r) >= 0 & max(A1_r) < 0){
return(list(A0 = A0, A1 = A1));
break;
}
print(iters);
iters <- iters - 1;
}
}
#' Find stable bimodal systems
#'
#' Produces a two panel plot for a matrix (M) before and after the addition of
#' varying component response rate (gamma).
#'
#'@return A plot as in Figure 1 of the manuscript, showing distribution of
#'eigenvalues before and after the addition of variation in component response
#'rate
#'@param A0 Matrix (M) before addition of varying component response rate
#'@param A1 Matrix (M) after additoin of varying component response rate
#'@examples
#'load(bi_pr_st);
#'plot_Fig_1(A0 = A0, A1 = A1);
#'@export
plot_Fig_1 <- function(A0, A1){
S_val <- dim(A0)[1];
A0_e <- eigen(A0)$values;
A0_r <- Re(A0_e);
A0_i <- Im(A0_e);
A1_e <- eigen(A1)$values;
A1_r <- Re(A1_e);
A1_i <- Im(A1_e);
A0_vm <- A0;
diag(A0_vm) <- NA;
A0vec <- as.vector(t(A0_vm));
A0vec <- A0vec[is.na(A0vec) == FALSE];
A1_vm <- A1;
diag(A1_vm) <- NA;
A1vec <- as.vector(t(A1_vm));
A1vec <- A1vec[is.na(A1vec) == FALSE];
fhalf <- 1:(0.5*length(A1vec));
shalf <- (0.5*length(A1vec)+1):length(A1vec);
par(mfrow = c(1, 2), mar = c(0.5, 0.5, 0.5, 0.5), oma = c(5, 5, 0, 0));
plot(A0_r, A0_i, xlim = c(-3.7, 0.3), ylim = c(-2, 2), pch = 4, cex = 0.7,
xlab = "", ylab = "", cex.lab = 1.5, cex.axis = 1.5, asp = 1);
vl <- seq(from = 0, to = 2*pi, by = 0.001);
A0x0 <- sqrt(S_val) * sd(A0vec) * cos(vl) + mean(diag(A0));
A0y0 <- sqrt(S_val) * sd(A0vec) * sin(vl);
text(x = -3.5, y = 2.25, labels = "a", cex = 2);
points(x = A0x0, y = A0y0, type = "l", lwd = 3, col = "dodgerblue4");
points(A0_r, A0_i, pch = 4, cex = 0.7);
plot(A1_r, A1_i, xlim = c(-3.7, 0.3), ylim = c(-2, 2), pch = 4, cex = 0.7,
xlab = "", ylab = "", cex.lab = 1.5, cex.axis = 1.5, asp = 1,
col = "dodgerblue4", yaxt = "n");
vl <- seq(from = 0, to = 2*pi, by = 0.001);
A0x1a <- sqrt(0.5*S_val) * sd(A1vec[fhalf]) * cos(vl) +
mean(diag(A1)[1:(0.5*S_val)]);
A0y1a <- sqrt(S_val) * sd(A1vec[fhalf]) * sin(vl);
#points(x = A0x1a, y = A0y1a, type = "l", lwd = 3, col = "grey");
A0x1b <- sqrt(0.5*S_val) * sd(A1vec[shalf]) * cos(vl) +
mean( diag(A1)[( (0.5*S_val) + 1 ):S_val] );
A0y1b <- sqrt(0.5*S_val) * sd(A1vec[shalf]) * sin(vl);
#points(x = A0x1b, y = A0y1b, type = "l", lwd = 3, col = "grey");
points(A1_r[1:S_val], A1_i[1:S_val],pch = 4, cex = 0.7, col = "firebrick");
text(x = -3.5, y = 2.25, labels = "b", cex = 2);
mtext(side = 1, "Real", outer = TRUE, line = 3, cex = 2);
mtext(side = 2, "Imaginary", outer = TRUE, line = 2.5, cex = 2);
}
#' Find stable bimodal systems
#'
#' Produces random matrices and determines whether or not they are stable using
#' eigenanalysis given two different component response rates of 1.95 and 0.05
#' over some number of iterations
#'
#'@return A table showing whether matrices are unstable (0) or stable (1) before
#'(column 1) and after (column 2) including variation in component response rate
#'in eigenanalysis for some number (rows) of random matrices.
#'@param S Size of the complex system
#'@param C Connectedness of complex system
#'@param Osd Standard deviation of interaction strength
#'@param iters Number of random matrices to simulate
#'@examples
#'dat <- stab_bgamma(iters = 4);
#'@export
stab_bgamma <- function(S = 200, C = 0.05, Osd = 0.4, iters = 10000){
ress <- matrix(data = 0, nrow = iters, ncol = 2);
A0_count <- 0;
A1_count <- 0;
while(iters > 0){
A_dat <- rnorm(n = S * S, mean = 0, sd = Osd);
A_mat <- matrix(data = A_dat, nrow = S);
C_dat <- rbinom(n = S * S, size = 1, prob = C);
C_mat <- matrix(data = C_dat, nrow = S, ncol = S);
A_mat <- A_mat * C_mat;
gammas <- c(rep(1.95, S/2), rep(0.05, S/2))
mu_gam <- mean(gammas);
diag(A_mat) <- -1;
A1 <- gammas * A_mat;
A0 <- mu_gam * A_mat;
A0_e <- eigen(A0)$values;
A0_r <- Re(A0_e);
A0_i <- Im(A0_e);
A1_e <- eigen(A1)$values;
A1_r <- Re(A1_e);
A1_i <- Im(A1_e);
if(max(A0_r) < 0){
ress[iters, 1] <- 1;
A0_count <- A0_count + 1;
}
if(max(A1_r) < 0){
ress[iters, 2] <- 1;
A1_count <- A1_count + 1;
}
print(c(iters, A0_count, A1_count));
iters <- iters - 1;
}
return(ress);
}
# Add a comment at the end.
|
5ac12134f4ef07eab86718d32ed6b0526cebbbac
|
1efb93841792888abf660cc06107e6f405f08c63
|
/metaanalysis.R
|
24f6459ecfe128a805ec8ceb36a403d10184de23
|
[] |
no_license
|
sarraahr/Thesis_supplementary_material
|
804c2243041cb08b690ff8278650ed7972a11d29
|
04cdf1818b4864c85397537b42a11ead48826e95
|
refs/heads/main
| 2023-07-04T08:02:33.032601
| 2021-07-31T17:13:11
| 2021-07-31T17:13:11
| 391,414,435
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,857
|
r
|
metaanalysis.R
|
setwd('/...')
library(metafor)
library(tidyverse)
library(outliers)
library(PerformanceAnalytics)
#----PREPARE DATA----
# read the data set
data <- read.csv(
"meta-dataset.csv",
sep = ",",
dec = ".")
# inspect data
glimpse(data)
# inspect and visualize important values
summary(data$Percentage_IntegrativePotential_achieved)
hist(data$Percentage_IntegrativePotential_achieved, breaks= 100)
summary(data$SD_meta)
hist(data$SD_meta, breaks = 100)
summary(data$SE)
hist(data$SE, breaks = 100)
# add impasse scores relative to unit of analysis
data$perc_impasses_total <- data$Number.of.total.Impasses / data$N..unit.of.analysis..total.
data$perc_impasses_partial <- data$Number.of.partial.Impasses / data$N..unit.of.analysis..total.
#----MULTILEVEL META ANALYSIS - MEAN EFFECT SIZE----
# generate the null model
full.model <- rma.mv(yi = Percentage_IntegrativePotential_achieved,
V = Variance,
slab = PaperNo,
data = data,
random = ~ 1 | PaperNo/ID,
test = "t",
method = "REML")
# calculate the amount of heterogeneity
i2 <- var.comp(full.model)
summary(i2)
plot(i2) # plot heterogeneity
# generate a forest plot (caterpillar)
forest(full.model, header=TRUE,
slab = data$ID,
order="obs",
showweights = TRUE,
alim = c(0,2),
ilab.xpos=-1, ylim=c(0,119))
# draw points once more to make them more visible
points(sort(full.model$yi), full.model$k:1, pch=19, cex=0.2)
#----MULTILEVEL META ANALYSIS - CONFLICT STRENGTH----
# model with Conflict Strength
full.model_mod <- rma.mv(yi = Percentage_IntegrativePotential_achieved,
V = Variance,
slab = PaperNo,
data = data,
random = ~ 1 | PaperNo/ID,
mods = ~ ConflictStrength,
test = "t",
method = "REML")
# plot the model as a regression plot
regplot(full.model_mod, label = TRUE, labsize= 0.6, offset= 1,
ylab = "Percentage of Integrative Potential achieved")
# summary statistics including goodness of fit
summary.rma(full.model_mod)
# confidence interval
confint.rma.mv(full.model_mod)
# weights
weights.rma.mv(full.model_mod)
# pseudo R^2 value compared to the null model
r_squared_mod <- (sum(full.model$sigma2) - sum(full.model_mod$sigma2)) / sum(full.model$sigma2)
#----MULTILEVEL META ANALYSIS - CONFLICT STRENGTH FULL MODEL----
# correlation matrix to investigate multicollinearity
data[,c("ConflictStrength",
"Performance.based.incentive",
"X..Number.of.distributive.issues",
"X..Number.of.issues",
"perc_impasses_total")] %>% chart.Correlation()
# generate a full Conflict Strenght model
full.model_all <- rma.mv(yi = Percentage_IntegrativePotential_achieved,
V = Variance,
slab = PaperNo,
data = data,
random = ~ 1 | PaperNo/ID,
mods = ~ ConflictStrength *
Performance.based.incentive +
X..Number.of.issues +
perc_impasses_total,
test = "t",
method = "REML")
# compare pseudo r^2 from null model and full Conflict Strength model
r_squared_all <- (sum(full.model$sigma2) - sum(full.model_all$sigma2)) / sum(full.model$sigma2)
# plot the regression model as a bubble plot, moderators plotted can be changed via the mod argument
regplot(full.model_all,mod = 2, label = TRUE, labsize= 0.6, offset= 1,
ylab = "Percentage of Integrative Potential achieved")
# summary statistics including goodness of fit
summary.rma(full.model_all)
# confidence interval
confint.rma.mv(full.model_all)
# weights
weights.rma.mv(full.model_all)
#----DILEMMA STRENGTHS----
#model with Dilemma Strength Dg as main moderator
full.model_Dg <- rma.mv(yi = Percentage_IntegrativePotential_achieved,
V = Variance,
slab = PaperNo,
data = data,
random = ~ 1 | PaperNo/ID,
mods = ~ DilemmaStrength_Dg,
test = "t",
method = "REML")
# plot the model as a regression plot
regplot(full.model_Dg, label = TRUE, labsize= 0.6, offset= 1,
ylab = "Percentage of Integrative Potential achieved")
# compare pseudo r^2 from null model and Dg model
r_squared_Dg <- (sum(full.model$sigma2) - sum(full.model_Dg$sigma2)) / sum(full.model$sigma2)
# generate a full Dg model
# control for distributive issues as dilemma strengths do no acocunt for them
full.model_Dg_all <- rma.mv(yi = Percentage_IntegrativePotential_achieved,
V = Variance,
slab = PaperNo,
data = data,
random = ~ 1 | PaperNo/ID,
mods = ~ DilemmaStrength_Dg *
Performance.based.incentive +
X..Number.of.issues +
X..Number.of.distributive.issues +
perc_impasses_total,
test = "t",
method = "REML")
# compare pseudo r^2 from null model and full Dg model
r_squared_Dg_all <- (sum(full.model$sigma2) - sum(full.model_Dg_all$sigma2)) / sum(full.model$sigma2)
# model with Dilemma Strength Dr as main moderator
full.model_Dr <- rma.mv(yi = Percentage_IntegrativePotential_achieved,
V = Variance,
slab = PaperNo,
data = data,
random = ~ 1 | PaperNo/ID,
mods = ~ DilemmaStrength_Dr,
test = "t",
method = "REML")
# plot the regression model
regplot(full.model_Dr, label = TRUE, labsize= 0.6, offset= 1,
ylab = "Percentage of Integrative Potential achieved")
# compare pseudo r^2 from null model and Dr model
r_squared_Dr <- (sum(full.model$sigma2) - sum(full.model_Dr$sigma2)) / sum(full.model$sigma2)
# generate a full Dr model
full.model_Dr_all <- rma.mv(yi = Percentage_IntegrativePotential_achieved,
V = Variance,
slab = PaperNo,
data = data,
random = ~ 1 | PaperNo/ID,
mods = ~ DilemmaStrength_Dr *
Performance.based.incentive +
X..Number.of.issues +
X..Number.of.distributive.issues +
perc_impasses_total,
test = "t",
method = "REML")
# compare pseudo r^2 from null model and full Dr model
r_squared_Dr_all <- (sum(full.model$sigma2) - sum(full.model_Dr_all$sigma2)) / sum(full.model$sigma2)
#----WITHOUT OUTLIERS----
# identify studies with an outlying standard deviation
boxplot.stats(data$SD_meta)$out # 6 studies identified
# remove outliers and save a new test data set
testdata_wo <- subset(data, SD_meta < 0.2805700)
# generate a null model without outliers
full.model_wo <- rma.mv(yi = Percentage_IntegrativePotential_achieved,
V = Variance,
slab = PaperNo,
data = testdata_wo,
random = ~ 1 | PaperNo/ID,
test = "t",
method = "REML")
# investigate variance
i2_wo <- var.comp(full.model_wo)
summary(i2_wo)
# plot a forest (caterpillar) to eyeball heterogeneity and effect sizes
forest(full.model_wo, header=TRUE,
slab = testdata_wo$ID,
order="obs",
showweights = TRUE,
ilab.xpos=-1, ylim=c(0,113))
# compare pseudo r^2 from null models with and without outliers
r_squared_wo <- (sum(full.model$sigma2) - sum(full.model_wo$sigma2)) / sum(full.model$sigma2)
# generate a model with Conflict Strength as the main predictor
full.model_CSwo <- rma.mv(yi = Percentage_IntegrativePotential_achieved,
V = Variance,
slab = PaperNo,
data = testdata_wo,
mods = ~ ConflictStrength,
random = ~ 1 | PaperNo/ID,
test = "t",
method = "REML")
# plot a regression model
regplot(full.model_CSwo, label = TRUE, labsize= 0.6, offset= 1,
ylab = "Percentage of Integrative Potential achieved")
# compare pseudo r^2 from null model and CS model without outliers
r_squared_CSwo <- (sum(full.model$sigma2) - sum(full.model_CSwo$sigma2)) / sum(full.model$sigma2)
# generate a full model
full.model_all_wo <- rma.mv(yi = Percentage_IntegrativePotential_achieved,
V = Variance,
slab = PaperNo,
data = testdata_wo,
random = ~ 1 | PaperNo/ID,
mods = ~ ConflictStrength *
Performance.based.incentive +
X..Number.of.issues +
perc_impasses_total,
test = "t",
method = "REML")
# plot a regression model
regplot(full.model_all_wo, label = TRUE, labsize= 0.6, offset= 1,
ylab = "Percentage of Integrative Potential achieved", mod = 2)
# compare pseudo r^2 from null model and full model without outliers
r_squared_all_wo <- (sum(full.model$sigma2) - sum(full.model_all_wo$sigma2)) / sum(full.model$sigma2)
|
e5179eab8d22e108a855086df706cc7d71005d63
|
e3ce3ad557ebd51429ed7acfea936723149a8d4c
|
/R/sof.hosaki.R
|
9a8ea2e804a3af97e2e50131977e079b0f7edb95
|
[] |
permissive
|
jakobbossek/smoof
|
87512da9d488acfe3a7cc62aa3539a99e82d52ba
|
d65247258fab57d08a5a76df858329a25c0bb1b8
|
refs/heads/master
| 2023-03-20T02:05:12.632661
| 2023-03-08T13:59:27
| 2023-03-08T13:59:27
| 22,465,741
| 32
| 27
|
BSD-2-Clause
| 2022-01-21T10:02:19
| 2014-07-31T10:39:43
|
R
|
UTF-8
|
R
| false
| false
| 1,395
|
r
|
sof.hosaki.R
|
#' Hosaki Function
#'
#' Two-dimensional test function \eqn{f} with
#' \deqn{f(\mathbf{x}) = (1 - 8 \mathbf{x}_1 + 7 \mathbf{x}_1^2 - 7/3 \mathbf{x}_1^3 + 1/4 \mathbf{x}_1^4)\mathbf{x}_2^2e^{-\mathbf{x}_2}}
#' subject to \eqn{0 \leq \mathbf{x}_1 \leq 5} and \eqn{0 \leq \mathbf{x}_2 \leq 6}.
#'
#' @references G. A. Bekey, M. T. Ung, A Comparative Evaluation of Two Global
#' Search Algorithms, IEEE Transaction on Systems, Man and Cybernetics, vol. 4,
#' no. 1, pp. 112- 116, 1974.
#'
#' @template ret_smoof_single
#' @export
makeHosakiFunction = function() {
makeSingleObjectiveFunction(
name = "Hosaki Function",
id = "hosaki_2d",
fn = function(x) {
assertNumeric(x, len = 2L, any.missing = FALSE, all.missing = FALSE)
(1 - 8 * x[1] + 7 * x[1]^2 - 7 * x[1]^3 / 3 + 0.25 * x[1]^4) * x[2]^2 * exp(-x[2])
},
par.set = makeNumericParamSet(
len = 2L,
id = "x",
lower = c(0, 0),
upper = c(5, 6),
vector = TRUE
),
tags = attr(makeHosakiFunction, "tags"),
global.opt.params = c(4, 2),
global.opt.value = -2.3458
)
}
class(makeHosakiFunction) = c("function", "smoof_generator")
attr(makeHosakiFunction, "name") = c("Hosaki")
attr(makeHosakiFunction, "type") = c("single-objective")
attr(makeHosakiFunction, "tags") = c("single-objective", "continuous", "differentiable", "non-separable", "non-scalable", "multimodal")
|
2d5373cce82be3f45fa4a316c7c2739dcc52db41
|
abaf4eeb0c3b490b9ec499591fbbdeaf09e2e138
|
/man/test.loom.Rd
|
e05ad53dc28a5ef5824681e4451cab260aefba9b
|
[] |
no_license
|
drisso/LoomExperiment
|
ab0b0eb876907236b399ab1802129fecf44903a7
|
99268dd36b2520d41dfdba816103d9772e3718e4
|
refs/heads/master
| 2021-09-08T12:47:10.678887
| 2018-03-09T19:56:23
| 2018-03-09T19:56:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 297
|
rd
|
test.loom.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test_loom.R
\docType{data}
\name{test.loom}
\alias{test.loom}
\title{test.loom}
\format{An hdf5 file in the .loom format}
\description{
A simple loom file with dummy data used for testing purposes.
}
\keyword{datasets}
|
6882356ef257d8f1a7f4eedc4c44bcc795f10426
|
8fb835ddd09b3c4d9a82341e9ce4786df1185c3a
|
/R/redundant.R
|
7b493d96d3fc9384ee6d2d2d3446deaf7b3ff7d8
|
[] |
no_license
|
cran/cna
|
45d270414ce1e8773476bc61804cfde9cf9f4f7d
|
5cb4a6902ad3c08e0cce3a20feaf5eb8a118e297
|
refs/heads/master
| 2023-08-16T22:57:16.661350
| 2023-08-10T18:00:11
| 2023-08-10T19:30:27
| 19,706,878
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,671
|
r
|
redundant.R
|
redundant <- function(cond, x = NULL, simplify = TRUE){
cond <- noblanks(cond)
.redund(x, cond, simplify)
}
# ==== .redund() ====
# Switch order of first 2 args to provide dispatching on x
# Generic function
.redund <- function(x, cond, ...) UseMethod(".redund")
# ==== Default Method (for matrix or data.frame) ====
# builds full.ct if x is NULL
# x configTable or NULL
# value: configTable, mv if original is mv, cs else
.redund.default <- function(x, cond, simplify = TRUE, ...){
if (is.null(x)){
x <- auxConfigTable(cond)
if (attr(x, "type") == "mv")
message("redundant() with cond of type \"mv\" usually requires explicit specification of x")
} else {
x <- auxConfigTable(cond, x)
}
.redund.configTable(x, cond, simplify = simplify, full = FALSE)
}
# ==== Method for class 'configTable' ====
# Function suited for interactive use
.redund.configTable <- function(x, cond, simplify = TRUE, full = FALSE, ...){
if (!length(cond)) return(logical(0))
cti <- ctInfo(x)
qtypes <- .qcondType(cond, colnames(cti$scores), cti$type,
stdComplex.multiple.only = FALSE)
ok <- qtypes %in% c("stdAtomic", "stdComplex")
if (any(!ok)){
stop("Invalid condition(s):\n",
paste0(" ", cond[!ok], collapse = "\n"),
"\nredundant() expects valid asf or csf in standard form.",
call. = FALSE)
}
if (useCtiList(cti)) cti <- ctiList(cti, cond)
.redund(cti, cond, simplify = simplify, full = full)
}
# ==== Method for class 'cti' ====
# identifies the asf that are redundant within some csf
# x cti
# cond character vector with the csf
# simplify output matrix instead of list if all csf have the same number of asf
# value: A list of logical vectors (lengths corresponding to the number of asf),
# or a matrix if simplify=TRUE and all csf have the same number of asf
.redund.cti <- function(x, cond, simplify = TRUE, full = FALSE, names = TRUE,
qc_full = qcond_csf(cond, sc, flat = TRUE), ...){
if (!full) x <- full.ct(x, cond = cond)
sc <- x$scores
asfs <- extract_asf(cond)
uasfs <- unique(unlist(asfs))
hmatches <- happly(asfs, match, table = uasfs)
qc <- qcond_asf(uasfs, sc, force.bool = TRUE)
mode(qc) <- "logical"
out <- lapply(hmatches, function(x) C_redund(qc[, x, drop = FALSE]))
names(out) <- cond
if (simplify && length(ul <- unique(lengths(out, use.names = FALSE))) == 1L){
nms <- names(out)
out <- matrix(unlist(out, use.names = FALSE), ncol = ul, byrow = TRUE)
if (names) rownames(out) <- nms
}
out
}
|
cde6fa9f36be4ae6283c2a3b9a40578a7b9a310a
|
990bf4ec6404fb77557dfe1fc65841b327fd426d
|
/man/geobon_get.Rd
|
f884f18c58c58f5acf9d6eed7765eefe7132b685
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
VLucet/rgeobon
|
83a42d0b824c5ec4f2f9ea6934230fba039a432d
|
247a522330c5a4c30c7341d66c0e0789e527a179
|
refs/heads/master
| 2023-03-21T18:04:38.711471
| 2021-03-13T19:35:34
| 2021-03-13T19:35:34
| 273,297,516
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 741
|
rd
|
geobon_get.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geobon_get.R
\name{geobon_get}
\alias{geobon_get}
\title{Get specific GEOBON EBV record}
\usage{
geobon_get(id = NULL, ebv_class = NULL, ebv_name = NULL)
}
\arguments{
\item{id}{(integer) ID of the dataset}
\item{ebv_class}{(character string) The EBV class of the dataset(s)}
\item{ebv_name}{(character string) The EBV name of the dataset(s)}
}
\value{
a \code{tibble} with the record(s) matching the request.
}
\description{
Retrives a specific GEOBON EBV record. Note that only one of the three options
can be specified at a time.
}
\examples{
geobon_get(id = 2)
geobon_get(ebv_class = "Community composition")
geobon_get(ebv_name = "Taxonomic diversity")
}
|
ba95d1727b1eaa16f4500b9ad1bf7d13f4622413
|
adbc26a052ea437a933c2dd2e9cb45a216d90946
|
/Calibration.R
|
5f97af66af97af0a4a90eb5ad1eee4169a957e91
|
[] |
no_license
|
jamarabe/Math5
|
2bba471ee315eda379252cb4b990209b39408bf3
|
a96d32c1197d6b21a92d8a1b7647455c5d5521d4
|
refs/heads/main
| 2023-08-27T13:18:28.091914
| 2021-11-01T21:36:00
| 2021-11-01T21:36:00
| 423,619,159
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,605
|
r
|
Calibration.R
|
#####
#Defining Q matrix
Qmatrix <- matrix(0, nrow = 57, ncol = 4)
dim.1 <- c(1,2,3,4,5,6,9,15,19,32,33,41,42,45,49,53,55,56,57)
dim.2 <- c(7,11,13,16,17,18,20,22,23,24,27,35,37,39,40,43,54)
dim.3 <- c(8,28,29,30,34,36,44,51,52)
dim.4 <- c(10,12,14,21,25,26,31,38,46,47,48,50)
Qmatrix[dim.1,1] <- Qmatrix[dim.2,2] <- Qmatrix[dim.3,3] <- Qmatrix[dim.4,4] <- 1
Qmatrix
#####
# Unidimensional Rasch calibration
mod.uni.R <- TAM::tam.mml(scored.strat_4_it)
# Unidimensional 2-PL calibration
mod.uni.2PL <- TAM::tam.mml.2pl( scored.strat_4_it, irtmodel = "2PL" )
# Unidimensional 3-PL calibration
mod.uni.3PL <- TAM::tam.mml.3pl(resp = , irtmodel)
# Consecutive Rasch
mod.cons.R.1 <- TAM::tam.mml(scored.strat_4_it[,dim.1])
mod.cons.R.2 <- TAM::tam.mml(scored.strat_4_it[,dim.2])
mod.cons.R.3 <- TAM::tam.mml(scored.strat_4_it[,dim.3])
mod.cons.R.4 <- TAM::tam.mml(scored.strat_4_it[,dim.4])
# Consecutive 2-PL
mod.cons.2PL.1 <- TAM::tam.mml.2pl( scored.strat_4_it[,dim.1], irtmodel = "2PL" )
mod.cons.2PL.2 <- TAM::tam.mml.2pl( scored.strat_4_it[,dim.2], irtmodel = "2PL" )
mod.cons.2PL.3 <- TAM::tam.mml.2pl( scored.strat_4_it[,dim.3], irtmodel = "2PL" )
mod.cons.2PL.4 <- TAM::tam.mml.2pl( scored.strat_4_it[,dim.4], irtmodel = "2PL" )
# Consecutive 3-PL
mod.cons.3PL.1
mod.cons.3PL.2
mod.cons.3PL.3
mod.cons.3PL.4
# Multidimensional Rasch calibration
mod.mult.R <- TAM::tam.mml(scored.strat_4_it, Q = Qmatrix)
# Multidimensional 2-PL calibration
mod.mult.2PL <- TAM::tam.mml.2pl(scored.strat_4_it, irtmodel = "2PL" , Q = Qmatrix)
# Multidimensional 3-PL calibration
mod.mult.3PL
|
1aa02bb590ac4bfdc67e2969337b8e1717396585
|
edf2d3864db8751074133b2c66a7e7995a960c6b
|
/man/adjacency_knn.Rd
|
d0598dd08b5ac485063aa396ad657606991cccf9
|
[] |
no_license
|
jkrijthe/RSSL
|
78a565b587388941ba1c8ad8af3179bfb18091bb
|
344e91fce7a1e209e57d4d7f2e35438015f1d08a
|
refs/heads/master
| 2023-04-03T12:12:26.960320
| 2023-03-13T19:21:31
| 2023-03-13T19:21:31
| 7,248,018
| 65
| 24
| null | 2023-03-28T06:46:23
| 2012-12-19T21:55:39
|
R
|
UTF-8
|
R
| false
| true
| 609
|
rd
|
adjacency_knn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LaplacianKernelLeastSquaresClassifier.R
\name{adjacency_knn}
\alias{adjacency_knn}
\title{Calculate knn adjacency matrix}
\usage{
adjacency_knn(X, distance = "euclidean", k = 6)
}
\arguments{
\item{X}{matrix; input matrix}
\item{distance}{character; distance metric used in the \code{dist} function}
\item{k}{integer; Number of neighbours}
}
\value{
Symmetric binary adjacency matrix
}
\description{
Calculates symmetric adjacency: objects are neighbours is either one of them is in the set of nearest neighbours of the other.
}
|
ac7071da0d15b49dca7d2ebaa557d756c4402941
|
6a6cd2d4604796f13b728b0294400a63d28d806c
|
/Caso1/completos.R
|
4de95ef38d47d648da26f7f5f596cc6c167eb783
|
[] |
no_license
|
ArianEscobar/Programacion_Actuarial_III
|
49be34251df8f12e4b5caeb360495370b60209c8
|
4d9fec7f98e6decd4f7520334fe9feb0f7aae52e
|
refs/heads/master
| 2021-01-09T05:59:49.221776
| 2017-06-01T14:35:36
| 2017-06-01T14:35:36
| 80,865,970
| 0
| 2
| null | null | null | null |
ISO-8859-10
|
R
| false
| false
| 1,056
|
r
|
completos.R
|
setwd("C:/Users/Naira/Documents/GitHub/Programacion_Actuarial_III//Caso1/specdata")
completos <- function(directorio,id=1:332){
num<- c()
num2<-c()
for(i in id){
if(i>=1 && i<10){NombreArchivo<-paste("00", i, ".csv", sep="")}
else { if(i>=10 && i<100){NombreArchivo<-paste("0", i, ".csv", sep="")}
else {if(i>=100 && i<=332){NombreArchivo<-paste( i, ".csv", sep="")}} }
LeerArchivo <- read.csv(NombreArchivo)
columnas<-LeerArchivo[,2:3]#extraemos las columnas 2 y 3
y<-complete.cases(columnas) #vectorlogico con los casos completos
comp<-columnas[y,] #extrae de las dos columas los datos completos
filas<-nrow(comp) #cuenta el numero de monitores completos
num<-c(num,i) #en este vector vamos almacenando los idīs
num2<-c(num2,filas)#en este vector almacenamos los numero casos completos
}
tabla<-data.frame(id=num,nobs=num2)
tabla
}
#probamos la funcion con
completos("spectdata",5:8)
|
a7a152ce02f9f31e15b7086cfaa62e0d581087fc
|
05ac3ce77d44e177c591eb931448b2a602aeb603
|
/Scripts/to_be_loaded_packages.R
|
7150b424af82204fec658d100bd78f37c850bc5e
|
[] |
no_license
|
JasperMCDenissen/2022_Denissen_ELI_CMIP6_Nat_Clim_Change
|
1ce01deaed62fe9f53e3602ef37c69ea6369df2f
|
78c7450341747e1d10e84c106d35530787c3118e
|
refs/heads/main
| 2023-04-12T17:35:15.585380
| 2023-02-27T20:57:50
| 2023-02-27T20:57:50
| 452,419,867
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 541
|
r
|
to_be_loaded_packages.R
|
# load packages
library(fields)
library(ncdf4)
library(lubridate)
library(raster)
library(dplyr)
library(ggplot2)
library(RColorBrewer)
library(Hmisc)
library(maptools)
library(egg)
library(grid)
library(MuMIn)
library(relaimpo)
library(akima)
library(ggpubr)
library(precrec)
library(pdp)
library(purrr)
library(EnvStats)
library(viridis)
library(gtable)
library(abind)
library(ppcor)
library(ncmeta)
library(doParallel)
library(foreach)
library(snow)
library(rgdal)
library(cowplot)
library(tidyverse)
library(gridExtra)
library(spatstat)
|
188cb52071de274f0adb835b89fadf7006afb4b9
|
c8ee0d4306cc6d0aabaecaec53b3c4634e506045
|
/golchi resources/MonGP_mod/R/plots.R
|
ebc69cba94b6862890fec1a4e7b19c739ab857b1
|
[] |
no_license
|
561GH/masters
|
b6944977e2075b09ce5613edc689cfadc2e77ef2
|
2e10312e66c770ad90f16c13c2cbc8072b9b150b
|
refs/heads/master
| 2021-03-27T20:13:34.600061
| 2017-11-14T22:42:24
| 2017-11-14T22:42:24
| 90,776,844
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,560
|
r
|
plots.R
|
par_density = function(obj, par) {
if (par == 'l') {
df0 = as.data.frame(obj$l)
df0 = melt(df0)
names(df0) = c('step', 'l')
L = length(levels(df0$step))
levels(df0$step) = 1:L
select_step = floor(seq(1, L, length = 5))
df0 = df0 %>%
filter(step %in% select_step)
p = ggplot(df0, aes(x = l, fill = step)) +
geom_density(alpha = .5, color = 'grey') + scale_fill_brewer()
}
if (par == 'sig2') {
df0 = as.data.frame(obj$sig2)
df0 = melt(df0)
names(df0) = c('step', 'sig2')
L = length(levels(df0$step))
levels(df0$step) = 1:L
select_step = floor(seq(1, L, length = 5))
df0 = df0 %>%
filter(step %in% select_step)
p = ggplot(df0, aes(x = sig2, fill = step)) +
geom_density(alpha = .5, color = 'grey') + scale_fill_brewer()
}
return(p)
}
#' Plot sample paths
#'
#'
#' @param obj output of SMC_MonGP
#' @export
#' @return ggplott object
#'
sample_path = function(obj) {
L = dim(obj$l)[2]
N = dim(obj$l)[1]
dimnames(obj$y) = list(sample = 1:N, step = 1:L, point = obj$newx)
df0 <- as.data.frame.table(obj$y)
names(df0)[4] = 'y'
select_step = floor(seq(1, L, length = 5))
df0 = df0 %>%
filter(step %in% select_step) %>%
group_by(point, step) %>%
summarise(mean = mean(y), lower = quantile(y, .025), upper = quantile(y, .975))
p = ggplot(df0, aes(x = as.numeric(as.character(point)), y = mean, fill = step)) + geom_line() +
geom_ribbon(aes(ymin=lower, ymax=upper), alpha=0.2) + scale_fill_brewer() + xlab('x') + ylab('y')
return(p)
}
|
6b84b86d80762bdb24fc8f09e6e9590a99969d56
|
a1a702de311f4ff1671b27215421a7fb0677a3c9
|
/man/postr_observed.Rd
|
f7a33676f59a0252268d57db91fb02a38ff59e14
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
josherrickson/postr
|
f16b1c943ca02380d0d99956fc2487fd57a6688e
|
10c594b409d07bbe7f6f69834bacbb9ffeaeeda2
|
refs/heads/master
| 2021-06-29T04:15:00.806107
| 2020-11-07T01:03:37
| 2020-11-07T01:03:37
| 182,087,218
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 348
|
rd
|
postr_observed.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/observed.R
\name{postr_observed}
\alias{postr_observed}
\alias{pr_observed}
\title{Return observed values}
\usage{
postr_observed(model)
pr_observed(model)
}
\arguments{
\item{model}{A supported model}
}
\value{
Observed values
}
\description{
Return observed values
}
|
ec4f2c22fac83470c75444c333cb501ab341de0a
|
4097d74a1c1645d25397e7a13f20b78da11d7acb
|
/R/modelPlot.r
|
9bab5508feb5a45c347ad7f41f94bda330a4dbdf
|
[] |
no_license
|
robintrayler/modifiedBChron
|
553d85db0a0a3153e9f90bf76d15c66f9d463416
|
10b8704c3e543a53b27daeb11cee033b0f50bb9d
|
refs/heads/master
| 2023-04-06T04:54:44.579936
| 2023-03-31T23:38:17
| 2023-03-31T23:38:17
| 144,030,282
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,650
|
r
|
modelPlot.r
|
#' This function generates two age model plots given the output of an ageModel run.
#'
#' @param model Output of the \code{ageModel} function
#' @param agePredictOutput Output of the agePredict function. If specified, points and error bars will be added to the age model plots for the predicted points
#' @param scale Scaling factor for age PDFs
#' @param predictLabels c('ages','ids','both', NA) should predicted ages and names for for points be displayed? Defaults to display both. Set to NA for no labels
#' @param type c('PDF', contour') Shound probability be displayed as likelihood input PDFs or as contours of posterior probability. Defaults to PDF
#' @param legend c('color', 'adjacent', NA) type of legend to be drawn. color draws color coded boxes for each sample, adjacent displays sample names next to each PDF. NA omits the legend.
#' @param ... Optional arguments to be passed to plot (xlim, ylim, xlab, ylab, main)
#' @export
#
modelPlot <- function(model,
agePredictOutput = NA,
predictLabels = 'both',
scale = 1,
type = 'PDF',
legend = 'color',
...){
##-------------------------------------------------------------------------
## create a scaling factor and color ramp for likelihood PDFs
scl <- (diff(range(model$predictPositions)) / ncol(model$thetas)) / max(model$nAges) * scale
colsPal <- colorRampPalette(c('#d7191c',
'#fdae61',
'#ffffbf',
'#abd9e9',
'#2c7bb6'))
cols <- colsPal(ncol(model$thetas))
##-------------------------------------------------------------------------
## get a list of optional arguments
ex <- list(...)
##-------------------------------------------------------------------------
## assign some default arguments if things arent specified
## default x limits
if(is.null(ex$xlim)){ex$xlim = as.numeric(c(model$HDI[3, 1],
model$HDI[1, length(model$predictPositions)]))}
## default y limits
if(is.null(ex$ylim)){ex$ylim = c(min(model$predictPositions),
max(model$predictPositions) + scl)}
## default x label
if(is.null(ex$xlab)){ex$xlab = 'Age'}
## default y label
if(is.null(ex$ylab)){ex$ylab = 'Position'}
## required defaults
ex$tcl = 0.25
ex$x = 1
ex$y = 1
ex$type = 'n'
args <- ex
##-------------------------------------------------------------------------
## open up a blank plot
do.call('plot', args)
graphics::grid()
##-------------------------------------------------------------------------
## draw a polygon for the confidence interval
polygon(x = c(model$HDI[1, ],
rev(model$HDI[3, ])),
y = c(model$predictPositions,
rev(model$predictPositions)),
col = rgb(0,0,0,.25),
border = NA)
##-------------------------------------------------------------------------
## add a line for the median
lines(y = model$predictPositions,
x = model$HDI[2, ],
lwd = 2)
##-------------------------------------------------------------------------
## add polygons for each likelihood PDF
if(type == 'PDF'){
for(i in ncol(model$thetas):1){
polygon(y = model$likelihoods[, i] / max(model$likelihoods[, i]) * scl + model$masterPositions[i],
x = model$ageGrid,
border = NA,
col = cols[i])
}
}
##-------------------------------------------------------------------------
if(type == 'contour'){
for(n in 1:ncol(model$thetas)){
x <- MASS::kde2d(model$thetas[model$burn:model$MC, n],
jitter(model$positionStore[model$burn:model$MC, n], amount = 0.01),
n = 100)
x$z <- x$z/max(x$z)
#image(x,add = T,col = colorRampPalette(c(rgb(0,0,0,0),rainbow(ncol(model$thetas),alpha = .5)[n]),alpha = 1)(10))
contour(x,
add = T,
nlevels = 5,
drawlabels = F,
col = cols[n])
}
}
##-------------------------------------------------------------------------
if(all(!is.na(agePredictOutput))){
l <- nrow(agePredictOutput$HDI)
for(i in 1:l){
arrows(x0 = agePredictOutput$HDI[i, 2],
y0 = agePredictOutput$HDI[i, 1],
x1 = agePredictOutput$HDI[i, 4],
y1 = agePredictOutput$HDI[i, 1],
length = 0.025,
angle = 90,
code = 3,
lwd = 2,
col = rgb(0.97, 0.46, 0.43, .5))
points(agePredictOutput$HDI[i, 3],
agePredictOutput$HDI[i, 1],
pch = 21,
bg = rgb(0.97, 0.46, 0.43, .5))
if(!is.na(predictLabels)){
if(predictLabels == 'ages' | predictLabels == 'both'){
minus <- as.numeric(round(agePredictOutput$HDI[i, 3] - agePredictOutput$HDI[i, 2], 3))
plus <- as.numeric(round(agePredictOutput$HDI[i, 4] - agePredictOutput$HDI[i, 3], 3))
median <- as.numeric(round(agePredictOutput$HDI[i, 3],3))
text(x = agePredictOutput$HDI[i, 4],
y = agePredictOutput$HDI[i, 1],
paste(median, '+', plus,'/ -',minus), cex = 0.6,
pos = 2)
}
if(predictLabels == 'ids' | predictLabels == 'both'){
text(x = agePredictOutput$HDI[i, 2],
y = agePredictOutput$HDI[i, 1],
labels = agePredictOutput$HDI$ids[i], cex = 0.6,
pos = 4)
}
}
}
}
##-------------------------------------------------------------------------
legend('bottomright',
lwd = c(2,NA),
pch = c(NA, 15),
legend = c('median',paste(paste(model$probability*100,'%',sep = ''),'HDI')),
col = c('black', rgb(0,0,0,.5)),
bty = 'n')
##-------------------------------------------------------------------------
if(!is.na(legend)){
if(legend == 'color'){
legend('topleft',
legend = rev(model$ids),
fill = rev(cols),
bty = 'n',
cex = .75,
ncol = 4)
}
if(legend == 'adjacent'){
for(i in 1:length(model$ids)){
x <- model$ageGrid[which(cumsum(model$likelihoods[, i]) > .01 & cumsum(model$likelihoods[, i]) < .02)[1]]
y <- model$masterPositions[i]
t <- model$ids[i]
text(x = x, y = y, labels = t, pos = 4, cex = 0.6)
}
}
}
##-------------------------------------------------------------------------
}
|
d7b3e617b1b580b42a7ca9b84d34943ac5521f77
|
1224064889c5743eae03082ed64d48dae632052d
|
/vaja4.R
|
3a4c93614429ace165f1e30204995765e05a0a8c
|
[] |
no_license
|
LampicJ15/Financni-praktikum
|
720ef38e72168bad7f48846902c38368eeabe7ed
|
0e87edd912c682e5fb511282e6e738c853a0f77a
|
refs/heads/master
| 2021-09-01T14:22:08.239137
| 2017-12-22T10:18:59
| 2017-12-22T10:18:59
| 111,736,011
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,183
|
r
|
vaja4.R
|
#VAJA4
library(dplyr)
library(Quandl)
#1.naloga
#a.)
podatki <- Quandl("LBMA/GOLD", collapse="monthly", start_date="2012-12-07")[c(1,6)]#uvozi podatke o zaključnih tečajih bitcoina iz Quandl-a
podatki <- podatki[-1,] #zbrišem eno vrstico
podatki <- podatki[c(60:1),] #obrnem podatke za pravi vrstni red datumov
#b.)
t.series1 <- ts(podatki[2],start=c(2012,12), frequency=12)
graf.zlato <- ts.plot(t.series1,
xlab='Leto',
ylab ='Vrednost v evrih',
main = 'Vrednost zlata',
col ="cornflowerblue",
lwd = 3)
#2.naloga
#a.)
G <- function(vrsta,k){
glajene.vrednosti <- c()
for (i in 1:(length(vrsta)-k)){
glajene.vrednosti[i] <- sum(vrsta[i:(k+i-1)])/k
}
zac_leto <- ceiling(2012 + k/12)
zac_mesec <- (k/12 - floor(k/12)) * 12
zglajena_vrsta <- ts(glajene.vrednosti, start = c(zac_leto, zac_mesec), frequency = 12)
return(zglajena_vrsta)
}
#b.)
glajena.ts7 <- G(t.series1,7)
#napoved
napoved <- function(vrsta, k){
return(sum(vrsta[(length(vrsta)-k+1):length(vrsta)])/k)
}
napoved(t.series1,7) #1095.52
#c.)
graf.zlato2 <- ts.plot(t.series1,glajena.ts7,
main ="Drseče povprečje k=7",
xlab='Leto',
ylab ='Vrednost v evrih',
col =c("cornflowerblue","red"),
lwd = 3)
legend('bottomright',
legend = c('vrsta', 'glajena vrsta'),
col =c("cornflowerblue","red"),
lwd = 1:1,
bty = 'n')
#d.)
SKN <- function(vrsta, glajena.vrsta, k){
l <- length(vrsta)
napaka <- 0
for (i in (k+1):l){
napaka <- napaka + (vrsta[i] - glajena.vrsta[i-k])^2
}
return (napaka/(l-k))
}
#Srednja kvadratična napaka
SKN(t.series1,glajena.ts7,7) #4261,148
#e)
#red glajenja k=14
glajena.ts14 <-G(t.series1,14)
napoved(t.series1,14)
SKN(t.series1,glajena.ts14,14)
#red glajenja k=30
glajena.ts30 <-G(t.series1,30)
napoved(t.series1,30)
SKN(t.series1,glajena.ts30,30)
par(mfrow(c(2,2)))
graf.zlato2 <- ts.plot(t.series1,glajena.ts7,
main ="Drseče povprečje k=7",
xlab='Leto',
ylab ='Vrednost v evrih',
col =c("cornflowerblue","red"),
lwd = 3)
legend('bottomright',
legend = c('vrsta', 'glajena vrsta'),
col =c("cornflowerblue","red"),
lwd = 1:1,
bty = 'n')
graf.zlato14 <- ts.plot(t.series1,glajena.ts14,
main ="Drseče povprečje k=14",
xlab='Leto',
ylab ='Vrednost v evrih',
col =c("cornflowerblue","red"),
lwd = 3)
legend('bottomright',
legend = c('vrsta', 'glajena vrsta'),
col =c("cornflowerblue","red"),
lwd = 1:1,
bty = 'n')
graf.zlato30 <- ts.plot(t.series1,glajena.ts30,
main ="Drseče povprečje k=30",
xlab='Leto',
ylab ='Vrednost v evrih',
col =c("cornflowerblue","red"),
lwd = 3)
legend('bottomright',
legend = c('vrsta', 'glajena vrsta'),
col =c("cornflowerblue","red"),
lwd = 1:1,
bty = 'n')
#3.naloga
#a.)
EG <- function(vrsta, alpha){
dolzina <- length(vrsta)
zglajene_vrednosti <- vrsta[1]
for (i in 2:dolzina){
zglajene_vrednosti[i] <- alpha*vrsta[i] + (1-alpha)*zglajene_vrednosti[i-1]
}
zglajena_vrsta <- ts(zglajene_vrednosti, start = c(2013,1), frequency = 12)
return(zglajena_vrsta)
}
#b.)
#alpha <- 0,75
eksponentno.glajena <- EG(t.series1,0.75)
last(eksponentno.glajena) #napoved
graf.zlatoE <- ts.plot(t.series1,eksponentno.glajena,
main ="Eksponentno glajenje alpha=0.75",
xlab='Leto',
ylab ='Vrednost v evrih',
col =c("cornflowerblue","red"),
lwd = 3)
legend('bottomright',
legend = c('vrsta', 'glajena vrsta'),
col =c("cornflowerblue","red"),
lwd = 1:1,
bty = 'n')
#c.)
SKN.E <-function(vrsta, alpha){
dolzina <- length(vrsta)
napaka <- 0
glajena <- EG(vrsta, alpha)
for (i in 1:(dolzina-1)){
napaka <- napaka + (vrsta[i+1] - glajena[i+1])^2
}
return(napaka/(dolzina-1))
}
optimal.alpha <- optimize(SKN.E, c(0,1), vrsta = t.series1)
#d.)alpha optimalna
eksponentno.glajena2 <- EG(t.series1,optimal.alpha$minimum)
last(eksponentno.glajena2) #napoved
graf.zlatoE2 <- ts.plot(t.series1,eksponentno.glajena2,
main ="Eksponentno glajenje alpha=0.75",
xlab='Leto',
ylab ='Vrednost v evrih',
col =c("cornflowerblue","red"),
lwd = 3)
legend('bottomright',
legend = c('vrsta', 'glajena vrsta'),
col =c("cornflowerblue","red"),
lwd = 1:1,
bty = 'n')
|
a0f5d615626d3053ca9906bc0399997281723de8
|
e1765730475b01c631e466f6a2eb265761d277e9
|
/R/subsample.R
|
0c1794ec8a937d1e8ea2e40b5c5645abf310e2b5
|
[] |
no_license
|
cran/sgee
|
37ac3ad4077b2c14e6eb5bc4203cd5c0e895e2fe
|
0b1213c5a15ee40e670650b3d9ba7643aee7196a
|
refs/heads/master
| 2021-01-11T04:39:03.313718
| 2018-01-08T17:34:38
| 2018-01-08T17:34:38
| 71,120,410
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,471
|
r
|
subsample.R
|
################################################################################
##
## R package sgee by Gregory Vaughan, Kun Chen, and Jun Yan
## Copyright (C) 2016-2018
##
## This file is part of the R package sgee.
##
## The R package sgee is free software: You can redistribute it and/or
## modify it under the terms of the GNU General Public License as published
## by the Free Software Foundation, either version 3 of the License, or
## any later version (at your option). See the GNU General Public License
## at <http://www.gnu.org/licenses/> for details.
##
## The R package sgee is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
##
################################################################################
#' subsample
#'
#' Internal function to execute the subsampling component of
#' the stochastic stagewise approach. If a user provides a \code{stochastic}
#' value between 0 and 1, it is assumed that some proportion of subsampling
#' is desired. The \code{samplingDistCalculation} function calculates the
#' distribution of the clusters and the \code{subsample} function uses that
#' distribution to draw the actual subsample.
#'
#' @param sampleDist A vector whose length is equal to the number of clusters
#' that indicates the probability of sampling each cluster
#' @param sampleSize A scalar value indicating how larger of a subsample is
#' being drawn
#' @param withReplacement A logical value indicating whether the
#' subsampling is beign done with or without replacement
#' @param clusterIDs A vector of all of the UNIQUE cluster IDs
#' @param clusterID A vector of length equal to the number of observations
#' indicating which cluster each observation is in
#'
#'
#' @return A list with two variables: \code{subSampleIndicator}, which
#' indicates which observations are in the current subsample, and
#' \code{clusterIDCurr}, which indicates the clusterID for the subsample.
#'
#' @note Internal function.
#'
#' While most of the subsample can be determined from the
#' \code{subSampleIndicator}, the \code{clusterIDCurr} value has to be
#' constructed inside the \code{subsample function} as the way the cluster
#' IDs is handled is different depending o n whether we are sampling with
#' or without replacement.
#'
#'
#' @author Gregory Vaughan
#' @keywords internal
subsample <- function(sampleDist,
sampleSize,
withReplacement,
clusterIDs,
clusterID){
IDsSample <- sample( clusterIDs, sampleSize, prob = sampleDist, replace = withReplacement)
## special counters need to be set up
## to sample clusters with replacement
## properly
if(withReplacement){
subSampleIndicator <- numeric(0)
clusterIDCurr <- numeric(0)
counter <- 0
for(id in IDsSample){
counter <- counter + 1
subSampleIndicator <- c(subSampleIndicator, which(clusterID == id))
clusterIDCurr <- c(clusterIDCurr, rep(counter, sum(clusterID == id)))
}
} else{
subSampleIndicator <- clusterID %in% IDsSample
clusterIDCurr <- clusterID[subSampleIndicator]
}
subsampleOutput <- list(subSampleIndicator = subSampleIndicator,
clusterIDCurr = clusterIDCurr)
subsampleOutput
}
|
a0e3ee0c9d5e1b5bbeaf53683041d5187221397a
|
9978cd1445ca2c95301169faadc6c1337649132a
|
/tests/testthat/test_generateEnc.R
|
9093968876b7df466171e25e68db34c60bdac0c8
|
[] |
no_license
|
cran/wyz.code.rdoc
|
f9e607ab38b89c62340388ad353d07fa553fb4b9
|
983f4cc651fe63f909a03caaa1089ca7254a49a0
|
refs/heads/master
| 2021-10-08T20:22:59.419778
| 2021-10-06T06:00:02
| 2021-10-06T06:00:02
| 213,606,275
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 225
|
r
|
test_generateEnc.R
|
context("generateEnc")
a <- generateEnc(list(list(text = 'Français', ascii = 'Francais')))
test_that("generateEnc", {
expect_true(is.list(a))
expect_length(a, 1)
expect_equal(a[[1]], "\\enc{Français}{Francais}")
})
|
2b125674c880aa36deec95fe83c826fbc0a9b8b5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/oce/examples/summary-oce-method.Rd.R
|
24726ee0a5367a5990b8df196f4eadee5144098f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 159
|
r
|
summary-oce-method.Rd.R
|
library(oce)
### Name: summary,oce-method
### Title: Summarize an oce Object
### Aliases: summary,oce-method
### ** Examples
o <- new("oce")
summary(o)
|
a548deee10724f5ff10567e4a8f7b499b57f26e8
|
44a71491f4ebc032aaabf236f9740d149c84cafd
|
/Chapter_10/Chp_10_Example_4.R
|
f815870d0fb93f5a831276b339d1446dadb6daa2
|
[] |
no_license
|
artofstat/RCode
|
82ae8f7b3319888d3a5774fe2bcafbae3ed17419
|
8e8d55d1ac4bc111e5d14798f59033d755959ae5
|
refs/heads/main
| 2023-03-22T03:17:45.284671
| 2022-08-15T18:09:08
| 2022-08-15T18:09:08
| 503,484,584
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,278
|
r
|
Chp_10_Example_4.R
|
#############################################################
## R code to accompany the textbook
## Statistics: The Art & Science of Learning from Data
## by A. Agresti, C. Franklin and B. Klingenberg
## 5th Edition, Pearson 2021
## Web: ArtofStat.com
## Copyright: Bernhard Klingenberg
############################################################
####################
### Chapter 10 ###
### Example 4 ###
####################
###############################################################
## Confidence Interval for the Difference of Two Proportions ##
###############################################################
# Reading in data for the first proportion
x1 <- 347
n1 <- 11535
phat1 <- x1 / n1
# Reading in data for the second proportion
x2 <- 327
n2 <- 14035
phat2 <- x2 / n2
# To compute the mean of the difference
mean <- phat1 - phat2
# To compute the standard error of the difference
se <- sqrt((phat1 * (1 - phat1) / n1) + (phat2 * (1 - phat2) / n2))
# To find the zscore for a 95% confidence interval
zscore <- qnorm(0.975)
# To compute a 95% confidence interval for the difference of two proportions
mean + c(-1, 1) * zscore * se
# Alternatively, you can also use the `prop.test()` function
prop.test(c(347, 327), c(11535, 14035), correct = FALSE)
|
3345e121967f4287409f21278139b59b7a7830bb
|
1332d8b68c6b86c2be5ed064473794f9f808d16a
|
/binomial/man/bin_probability.Rd
|
0593c424e7495dd22819079e7cca92d7a62e0364
|
[] |
no_license
|
stat133-sp19/hw-stat133-mallikakolar
|
e7ffe1bbd22c2b9e2d466c12bdc3685db88af096
|
bc99e849eebe2c233455053271aed6f3fae8c541
|
refs/heads/master
| 2020-04-28T14:45:55.706460
| 2019-05-02T22:54:45
| 2019-05-02T22:54:45
| 175,348,070
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 604
|
rd
|
bin_probability.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main-functions.R
\name{bin_probability}
\alias{bin_probability}
\title{bin_probability}
\usage{
bin_probability(success, trials, prob)
}
\arguments{
\item{int}{integer success}
\item{int}{integer trials}
\item{int}{integer prob}
}
\value{
binomial probability
}
\description{
calculated binomial probability of having k successes in n trials
}
\examples{
bin_probability(success = 2, trials = 5, prob = 0.5)
bin_probability(success = 0:2, trials = 5, prob = 0.5)
bin_probability(success = 55, trials = 100, prob = 0.45)
}
|
1d33b12d21b294d25f491db0df1aedc30479d78e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MVB/examples/unilps.Rd.R
|
61ba9e7bba372c777119fdd26dc8541143ce5626
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 295
|
r
|
unilps.Rd.R
|
library(MVB)
### Name: unilps
### Title: univariate model fitting with lasso penalty
### Aliases: unilps
### ** Examples
n <- 100
p <- 4
x <- matrix(rnorm(n * p, 0, 4), n, p)
eta <- x
pr <- exp(eta) / (1+ exp(eta))
res <- rbinom(n, 1, pr)
fit <- unilps(res ~ x - 1, family = 'binomial')
|
1ace358c4cc2e5a1b094f44e1c7f72b69a81acfd
|
660bbad7d5e31395180ef0c30b8c79d9d1176f4c
|
/real/AML/analysis.R
|
20831d08614903fe24380b84a2e572605b06202f
|
[] |
no_license
|
Lei-D/sparsePC
|
e9ed663c5a9cd4f6b64e29e5e00bab71498b595f
|
5503e09c9ad49d33857554e8c5db5cf496ecf0c3
|
refs/heads/master
| 2021-06-19T19:58:22.654037
| 2021-01-20T07:37:32
| 2021-01-20T07:37:32
| 152,515,723
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,042
|
r
|
analysis.R
|
source("~/Desktop/Lei/AIMER2/github/sparsePC_reg_real.R")
# read in data
expressionData = read.table("~/Desktop/Lei/AIMER/AML/rawdata/Bullinger2004_expression_data.txt",
sep = ",")
survivalData = read.table("~/Desktop/Lei/AIMER/AML/rawdata/Bullinger2002_survival_data.txt",
sep = ",")
# transform covariates and response
Xtot = t(expressionData)
Xtot = scale(Xtot, center = T, scale = T)
Ytot = log(survivalData[,1]+1)
# set lambda values
lambda.min = 0
lambda.max = 1
nlambda = 10
lambda = lambda.max * log10(seq(1, 10, length.out = nlambda)) +
lambda.min * (1 - log10(seq(1, 10, length.out = nlambda)))
lambda = sort(lambda, decreasing = T)
# suffPCR
set.seed(8372)
suffPCR = sparsePC.reg.real.CV(X = Xtot, Y = Ytot, d = 3,
lambda = lambda, Kfold = 5,
maxnvar = ncol(Xtot),
screening = TRUE)
# save output
save(suffPCR, file = "~/Desktop/Lei/AIMER2/ISMB/real/AML/suffPCR.RData")
|
e928d94a3966a54fd21e34a1fb8786a108b9bf44
|
c952a238bed44ef7e9b5714c832bad745449abc5
|
/plot6.R
|
a31ff525f55d949fbbdda8f7f35455d88bb34c31
|
[] |
no_license
|
kevinroche22/EDAProject2
|
5080c257d1b4bc8e2837b9c5c2e4705dd6b10916
|
172d161ceaeee21a8e0a34370c313559527e16fb
|
refs/heads/main
| 2023-07-06T16:19:53.083680
| 2021-08-09T16:31:58
| 2021-08-09T16:31:58
| 394,334,618
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,767
|
r
|
plot6.R
|
## Load Packages
library(tidyverse)
library(ggplot2)
## Load Data
NEI <- readRDS("exdata_data_NEI_data/summarySCC_PM25.rds")
SCC <- readRDS("exdata_data_NEI_data/Source_Classification_Code.rds")
#############################################################################
# Compare emissions from motor vehicle sources in Baltimore City with #
# emissions from motor vehicle sources in Los Angeles County, California. #
# Which city has seen greater changes over time in motor vehicle emissions? #
#############################################################################
## Save as png
#png("plot6.png", width = 460, height = 480)
fips <- data.frame(fips = c("06037", "24510"), county = c("Los Angeles", "Baltimore"))
sccMV <- SCC[grep("(?i)vehicle", SCC$EI.Sector), "SCC"] # case insensitive catches vehicle, Vehicle, VEHICLE, etc.
MVEmissions <- NEI %>%
filter(SCC %in% sccMV & fips %in% fips) %>%
group_by(fips, year) %>%
summarize(Emissions = sum(Emissions))
MVEmissions <- merge(MVEmissions, fips)
MVEmissions %>% ggplot(aes(x = factor(year), y = Emissions, fill = factor(year), label = round(Emissions, 2))) +
geom_bar(stat = "identity") +
facet_wrap(. ~ county) +
geom_smooth(method = "lm", aes(group = county), se = FALSE, col = "black") +
theme_bw() +
xlab("Year") +
ylab("Annual Emissions") +
ggtitle(expression('PM'[2.5]*' Emissions from Vehicles in Baltimore and LA 1999 - 2008')) +
guides(fill=guide_legend(title='Year'))
#dev.off()
##########
# Answer #
##########
# Baltimore's vehicle emissions are about 1/10 that of LA's. Baltimore's
# emissions have been declining since 1999 while LA's rose from 1999-2005
# before declining in 2008.
|
b753c1dce5373cd24a5da7f5046e0bb0deb45c2f
|
8630045b7197bbcbc42bf58f77284d6b8fd39d50
|
/R/MulticoreParam-class.R
|
db0247b8acafdee5433c84f837980d1a4f793ea3
|
[] |
no_license
|
Feigeliudan01/BiocParallel
|
6356b01f4e08db1794571de5869d44f05e5135f9
|
e46d9535938923b84a98378a025cdd1402c67794
|
refs/heads/master
| 2020-03-21T05:56:59.170113
| 2018-05-26T18:07:09
| 2018-05-26T18:17:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,159
|
r
|
MulticoreParam-class.R
|
### =========================================================================
### MulticoreParam objects
### -------------------------------------------------------------------------
multicoreWorkers <- function()
.snowCores("multicore")
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Constructor
###
.MulticoreParam <- setRefClass("MulticoreParam",
contains="SnowParam",
fields=list(),
methods=list()
)
MulticoreParam <- function(workers=multicoreWorkers(), tasks=0L,
catch.errors=TRUE, stop.on.error=TRUE,
progressbar=FALSE, RNGseed=NULL, timeout= 30L * 24L * 60L * 60L,
log=FALSE, threshold="INFO", logdir=NA_character_,
resultdir=NA_character_, jobname = "BPJOB",
manager.hostname=NA_character_, manager.port=NA_integer_, ...)
{
if (.Platform$OS.type == "windows")
warning("MulticoreParam() not supported on Windows, use SnowParam()")
if (!missing(catch.errors))
warning("'catch.errors' is deprecated, use 'stop.on.error'")
args <- c(list(spec=workers, type="FORK"), list(...))
.MulticoreParam(.clusterargs=args, cluster=.NULLcluster(),
.controlled=TRUE, workers=as.integer(workers),
tasks=as.integer(tasks),
catch.errors=catch.errors, stop.on.error=stop.on.error,
progressbar=progressbar,
RNGseed=RNGseed, timeout=timeout,
log=log, threshold=threshold, logdir=logdir,
resultdir=resultdir, jobname=jobname,
hostname=manager.hostname, port=manager.port)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Methods - control
###
setReplaceMethod("bpworkers", c("MulticoreParam", "numeric"),
function(x, value)
{
value <- as.integer(value)
max <- .snowCoresMax("multicore")
if (value > max)
stop(
"'value' exceeds ", max, " available workers; see ?multicoreWorkers"
)
x$workers <- value
x$.clusterargs$spec <- value
x
})
setMethod("bpschedule", "MulticoreParam",
function(x)
{
if (.Platform$OS.type == "windows")
FALSE
else
TRUE
})
|
f034b5c43cfc6f5d1a6183ec1dfc36f8298a2226
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/VaRES/examples/lfr.Rd.R
|
4808bc8e7eaf60fb4d85414b4c98535b6f4bdc3a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 237
|
r
|
lfr.Rd.R
|
library(VaRES)
### Name: lfr
### Title: Linear failure rate distribution
### Aliases: dlfr plfr varlfr eslfr
### Keywords: Value at risk, expected shortfall
### ** Examples
x=runif(10,min=0,max=1)
dlfr(x)
plfr(x)
varlfr(x)
eslfr(x)
|
3e3248c2db60f16b0507cf83275e0193fdd1561c
|
2419bdaa9a452b9c420760335cb04d95067cd05a
|
/Postprocessing/North_East/Cluster_script_ne_full_predictions_smaller.R
|
69bd1959b2f135ac7d72a68c386da0f834395363
|
[
"BSD-3-Clause"
] |
permissive
|
RachelKillick/Daily_benchmarks
|
856f2df4d98bc768cf24752bd11a0179f3b78a98
|
5867ec606b8b5c825f1f6fe9151fe11d2ad47a84
|
refs/heads/master
| 2021-08-18T02:44:58.686402
| 2021-06-09T15:58:46
| 2021-06-09T15:58:46
| 137,796,182
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,094
|
r
|
Cluster_script_ne_full_predictions_smaller.R
|
# Hopefully verging on my final model in the Northeast:
# Load the necessary packages first:
library(mgcv)
library(akima)
library(fields)
# Load the necessary dataframes and other R things:
load('modelnecoast5gf3other.RData')
load('loesssurf1.RData')
load('NEcoast1relocor_1714.RData')
# Create my predictions - Allow 1Gb:
pred5other=predict.gam(modelnecoast5gf3other,newdata=NEcoast1relocor,type="response")
save(pred5other,file='pred5other_full_small.RData')
# Add the gamma noise onto my predictions:
# Step 1: Create the data frames to store the info in:
pred5othergam=as.data.frame(matrix(nrow=4540640,ncol=1))
# Step 2: Get the shape parameter (for each model this will be the same for all predictions):
shmodel5other=1/modelnecoast5gf3other$sig2
# Step3: Generate the new predictions with gamma noise:
i=1
while (i< 4540641) {
mnmodel5other=pred5other[i]
scmodel5other=mnmodel5other/shmodel5other
pred5othergam[i,1]=rgamma(1,shape=shmodel5other,scale=scmodel5other)
i=i+1
}
save(pred5other,pred5othergam,file='preds_ne_11714_relocation_full_smaller.RData')
|
9e69501f080d718dd6d4ef3328565410631c6b21
|
61180649c781ca23ee434754577acea001eb4bc0
|
/man/gettfvalid.Rd
|
ff2b32e6c058c73920db128a173a939c371a2061
|
[] |
no_license
|
malexan/fclhs
|
87e0b4c9b86eb1c954644bbdb699699677d4163b
|
f93e69dd96bbd15bdbc68a5c52db5fe0dde3a0fa
|
refs/heads/master
| 2020-05-31T15:48:22.614152
| 2015-08-09T07:59:08
| 2015-08-09T07:59:08
| 27,025,796
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 329
|
rd
|
gettfvalid.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/faostatdb.R
\name{gettfvalid}
\alias{gettfvalid}
\title{Returns data from FAOSTAT.TF_VALID}
\usage{
gettfvalid(reporter = NULL, year = NULL, partner = NULL, flow = NULL,
fcl = NULL, ...)
}
\description{
Returns data from FAOSTAT.TF_VALID
}
|
458c46c79f66174e426ae53666608ee638ecb724
|
b3905a249f9bfbd2af1bdb3f4fbabadae16fa70f
|
/cleaner_succotash_sims/succotash_diff_init/muscle_succ_diff_init.R
|
e926860f8a2f5434d0994c3abcd2384586d8fe9f
|
[] |
no_license
|
Feigeliudan01/sim_code
|
13b146c330a66d90b45c66f18656aa55e50ec733
|
488d538529040c8c26a739ed1d7dfc1af9cf9182
|
refs/heads/master
| 2021-04-15T17:58:03.145322
| 2016-10-12T21:01:39
| 2016-10-12T21:01:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,733
|
r
|
muscle_succ_diff_init.R
|
library(Rmpi)
library(snow)
one_rep <- function(new_params, current_params) {
source("../code/datamaker_only_counts.R")
args_val <- append(current_params, new_params)
set.seed(new_params$current_seed)
d_out <- datamaker_counts_only(args_val)
which_null <- d_out$meta$null
half_null <- which_null
half_null[half_null == 1][sample(1:sum(which_null), size = sum(which_null) / 2)] <- 0
beta_true <- rep(0, length = args_val$Ngene)
beta_true[!which_null] <- d_out$meta$true_log2foldchange
X <- as.matrix(model.matrix(~d_out$input$condition))
colnames(X) <- c("Intercept", "Treatment")
Y <- t(log2(as.matrix(d_out$input$counts + 1)))
num_sv <- sva::num.sv(t(Y), mod = X, method = "be")
start.time <- proc.time()
succ_nullmle <- succotashr::succotash(Y = Y, X = X, k = num_sv,
two_step = TRUE,
z_init_type = "null_mle",
var_scale_init_type = "null_mle",
optmethod = "em")
succ_random <- succotashr::succotash(Y = Y, X = X, k = num_sv,
two_step = TRUE,
z_init_type = "random",
var_scale_init_type = "random",
optmethod = "em")
tot.time <- proc.time() - start.time
postmean_df <- data.frame(succ_nullmle = succ_nullmle$betahat,
succ_random = succ_random$betahat)
pi0vec <- c(succ_nullmle = succ_nullmle$pi0,
succ_random = succ_random$pi0)
scale_val_vec <- c(succ_nullmle = succ_nullmle$scale_val,
succ_random = succ_random$scale_val)
llike_vec <- c(succ_nullmle = succ_nullmle$llike,
succ_random = succ_random$llike)
null_llike_vec <- c(succ_nullmle = succ_nullmle$null_llike,
succ_random = succ_random$null_llike)
lfdr_df <- data.frame(succ_nullmle = succ_nullmle$lfdr,
succ_random = succ_random$lfdr)
nmeth <- length(pi0vec)
mse <- colMeans((postmean_df - beta_true) ^ 2)
auc <- rep(NA, nmeth)
if(args_val$nullpi < 1) {
for (index in 1:nmeth) {
auc[index] <- pROC::roc(predictor = lfdr_df[, index], response = which_null)$auc
}
}
return_vec <- c(pi0vec, mse, auc, scale_val_vec, llike_vec, null_llike_vec)
return(return_vec)
}
itermax <- 100
## these change
Nsamp_seq <- c(5, 10, 20)
nullpi_seq <- c(0.5, 0.9, 1)
par_vals <- expand.grid(list(1:itermax, Nsamp_seq, nullpi_seq))
colnames(par_vals) <- c("current_seed", "Nsamp", "nullpi")
par_vals$poisthin <- TRUE
par_vals$poisthin[abs(par_vals$nullpi - 1) < 10 ^ -10] <- FALSE
par_list <- list()
for (list_index in 1:nrow(par_vals)) {
par_list[[list_index]] <- list()
for (inner_list_index in 1:ncol(par_vals)) {
par_list[[list_index]][[inner_list_index]] <- par_vals[list_index, inner_list_index]
names(par_list[[list_index]])[inner_list_index] <- colnames(par_vals)[inner_list_index]
}
}
## these do not change
args_val <- list()
args_val$log2foldsd <- 1
args_val$tissue <- "muscle"
args_val$path <- "../../../data/gtex_tissue_gene_reads/"
args_val$Ngene <- 1000
args_val$log2foldmean <- 0
args_val$skip_gene <- 0
## ## If on your own computer, use this
library(parallel)
cl <- makeCluster(detectCores()-1)
sout <- t(parSapply(cl = cl, par_list, FUN = one_rep, current_params = args_val))
stopCluster(cl)
## ## on RCC, use this
## np <- mpi.universe.size() - 1
## cluster <- makeMPIcluster(np)
## sout <- t(snow::parSapply(cl = cluster, X = par_list, FUN = one_rep, current_params = args_val))
## stopCluster(cluster)
## mpi.exit()
save(sout, file = "sout_succ_diff_init.Rd")
colnames(sout) <- rep(c("succ_null_mle", "succ_random"), times = 6)
pi0_mat <- cbind(par_vals, sout[, 1:2])
mse_mat <- cbind(par_vals, sout[, 3:4])
auc_mat <- cbind(par_vals, sout[, 5:6])
scale_val_mat <- cbind(par_vals, sout[, 7:8])
llike_mat <- cbind(par_vals, sout[, 9:10])
null_llike_mat <- cbind(par_vals, sout[, 11:12])
write.csv(pi0_mat, file = "pi0_ruvash_alpha1.csv", row.names = FALSE)
write.csv(mse_mat, file = "mse_ruvash_alpha1.csv", row.names = FALSE)
write.csv(auc_mat, file = "auc_ruvash_alpha1.csv", row.names = FALSE)
write.csv(scale_val_mat, file = "scale_val_ruvash_alpha1.csv", row.names = FALSE)
write.csv(llike_mat, file = "llike_ruvash_alpha1.csv", row.names = FALSE)
write.csv(null_llike_mat, file = "null_llike_ruvash_alpha1.csv", row.names = FALSE)
|
a32893871d4e6c8d29cdd5382f9e7dee65b93f1d
|
d6c1e61d203ccec997f422899a0e1b4e8d5c7d92
|
/data wrangling.R
|
cd5fc794088121d392e8041fe08e1c8a6376c2c4
|
[] |
no_license
|
MuchenZhu/RShiny_Toll_Transactions
|
bc1e7fe093830f22a80107550413fa39b1382100
|
a69ab93869bb59f2ddc0be4c2f371aff0741beb0
|
refs/heads/master
| 2020-04-25T07:40:10.323207
| 2019-02-26T02:23:33
| 2019-02-26T02:23:33
| 172,621,179
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,584
|
r
|
data wrangling.R
|
#---Muchen Zhu Individual Project Data Preparation---
setwd("~/Desktop/programming-class/individualproject/Toll_Transactions")
library("tidyr")
library("dplyr")
mydf <- read.csv("Toll_Transactions_wrangled.csv")
mydf <- separate(mydf, Location, c("lat","long"), sep = ",")
mydf <- separate(mydf, Date, c('Date','Time'), sep = " ")
mydf <- mydf[-2]
mydf$Date <- as.Date(mydf$Date,"%m/%d/%Y")
mydf$lat <- as.numeric(mydf$lat)
mydf$long <- as.numeric(mydf$long)
str(mydf)
mydf <- filter(mydf, mydf$Total_Transactions>=mydf$Transponder_Transactions)
mydf <- filter(mydf, mydf$Facility == 'BHT' | mydf$Facility =='FMT' | mydf$Facility =='FSK'
| mydf$Facility =='HMB' | mydf$Facility =='JFK' | mydf$Facility =='BAY')
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]}
unique(mydf$Facility)
mydf1 <- filter(mydf,mydf$Facility == 'BHT')
mydf2 <- filter(mydf,mydf$Facility == 'FMT')
mydf3 <- filter(mydf,mydf$Facility == 'FSK')
mydf4 <- filter(mydf,mydf$Facility == 'HMB')
mydf5 <- filter(mydf,mydf$Facility == 'JFK')
mydf6 <- filter(mydf,mydf$Facility == 'BAY')
Mode(mydf1$lat)
Mode(mydf1$long)
mydf <- filter(mydf, mydf$lat == Mode(mydf1$lat) | mydf$lat == Mode(mydf2$lat) | mydf$lat == Mode(mydf3$lat)
| mydf$lat == Mode(mydf4$lat) | mydf$lat == Mode(mydf5$lat) | mydf$lat == Mode(mydf6$lat))
mydf <- filter(mydf, mydf$long == Mode(mydf1$long) | mydf$long == Mode(mydf2$long) | mydf$long == Mode(mydf3$long)
| mydf$long == Mode(mydf4$long) | mydf$long == Mode(mydf5$long) | mydf$long == Mode(mydf6$long))
|
45129a0806b81b285d8fc7193c5bca76a5bdfa2e
|
3dfcad5e4ca29823a6e7899dcd22aaf7f5df971c
|
/R/getDeviceResolution.R
|
66cbb6a1e5f20c90dd54d4e887ebb0aa293534c8
|
[] |
no_license
|
HenrikBengtsson/aroma.core
|
f22c931029acf55f3ad2fdb6eb3bc2f0d2ba04e4
|
1bf20e2b09f4b8c0ca945dfb26fdf1902c187109
|
refs/heads/master
| 2022-12-01T09:01:49.295554
| 2022-11-15T18:29:17
| 2022-11-15T18:29:52
| 20,845,682
| 2
| 3
| null | 2018-04-21T02:06:48
| 2014-06-15T01:32:43
|
R
|
UTF-8
|
R
| false
| false
| 121
|
r
|
getDeviceResolution.R
|
setMethodS3("getDeviceResolution", "default", function(scale=1, ...) {
res <- scale * par("cra") / par("cin")
res
})
|
7cc39ffd4bf123119a2ba967ecd57b4d74a30d08
|
2e1a9e1d0d038293bc8dba83d0473e9dc2f7f93e
|
/stats/scripts/plos_one_twitter.R
|
ffe339d868854197c3da06de3bea18b6a959fab9
|
[
"MIT",
"CC0-1.0"
] |
permissive
|
neostoic/plos_altmetrics_study
|
92821fa2634e75235f0fc5603b7ee7e25d298c91
|
5d4bd840763286c77cb834ef351e137eefb7946b
|
refs/heads/master
| 2020-12-25T08:29:47.940176
| 2011-03-22T15:00:48
| 2011-03-22T15:00:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,216
|
r
|
plos_one_twitter.R
|
options(width=250)
setwd("/home/jason/projects/Plos altmetrics study")
# load raw_events.txt
d<-read.csv("./datasets/raw_events.txt", sep="\t")
# get the tweets
bt <- d[d$eventType=="backtweets",]
# column for which journal
bt$journal <- substr(bt$doi, 17, 20)
# frame for just Plos ONE, where we have much more data
bt.pone <- bt[bt$journal == "pone",]
bt.pone$doi <- factor(bt.pone$doi[1:nrow(bt.pone)]) #drop unused levels
bt.pone$creator <- factor(bt.pone$creator[1:nrow(bt.pone)])
# remove tweets with negative latency
nrow(bt.pone[bt.pone$latency <= 0,]) / nrow(bt.pone) # 3.5%
bt.pone = bt.pone[bt.pone$latency > 0,]
# look for evidence of a decay function
## should use nlm() to actually fit curve...
latency.days = bt.pone$latency / 86400
latency.hist <- hist(latency.days, breaks=max(latency.days), plot=FALSE)
plot(latency.hist$counts + 1, log="xy", xlab="latency in days", ylab="number of tweets")
# check out the distributions (as expected, mostly power-law)
creators <- rev(sort(table(bt.pone$creator)))
plot(creators, log="xy")
dois <- rev(sort(table(bt.pone$doi)))
plot(dois, log="xy")
# save the dataset
write.table(bt.pone, file="./datasets/plos_one_tweets.txt", row.names=FALSE, sep="\t")
|
04a4c821a773d3af7d25699aef8093badf633d06
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/B_analysts_sources_github/trestletech/ArrestAnalysis/getRecords.R
|
3244ce6dbbf0c1c7bb5bb87b2c2e9321a8458e1c
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,220
|
r
|
getRecords.R
|
library(lubridate)
library(httr)
#' Itemize all days in the specified format between the start date and the end date.
itemizeDates <- function(startDate, endDate,
format="%Y-%m-%d") {
out <- seq(as.Date(startDate, format=format),
as.Date(endDate, format=format), by="days")
format(out, format)
}
#'
#' @param startDate The date to start from in the format of YYYY-MM-DD
#' @param endDate The date to end on in the format of YYYY-MM-DD
getWeather <- function(wu_key=options("wu_key")[[1]], zip, startDate, endDate, outputFile="weather.Rds", sleepInterval=7, format="%Y-%m-%d"){
if (missing(wu_key) || is.null(wu_key)){
stop("No Weather Underground API key provided")
}
allDays <- itemizeDates(startDate, endDate, format)
pb <- txtProgressBar(min=0, max=length(allDays), initial=0, style=3)
weather <- list(zip=zip)
counter <- 0
for (d in allDays){
Sys.sleep(sleepInterval)
tryCatch(weather[[d]] <- getHistoricWeather(wu_key, zip=zip, date=d), error=function(e){warning("Error downloading weather ", d)})
saveRDS(weather, file=outputFile)
counter <- counter+1
setTxtProgressBar(pb, counter)
}
close(pb)
weather
}
|
c888ed8622c39dc890d477bbf56d9545f239ace3
|
eb38ef31796289e538b81139186bf86ef18e88f6
|
/R/kinplot.R
|
41116586cba1138043c49f8132df111a065c7b43
|
[] |
no_license
|
cran/kinfit
|
c1d22a473685c48550802f7f3ada9559e2027473
|
4ae9d56c94f3be2fc44fb48917b2eddc741b8bb1
|
refs/heads/master
| 2016-08-04T16:03:48.555799
| 2015-07-03T00:00:00
| 2015-07-03T00:00:00
| 17,696,895
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,218
|
r
|
kinplot.R
|
# $Id: kinplot.R 117 2011-06-14 08:52:14Z kati $
# Copyright (C) 2008-2013 Johannes Ranke
# Contact: mkin-devel@lists.berlios.de
# This file is part of the R package kinfit
# kinfit is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>
if(getRversion() >= '2.15.1') utils::globalVariables("x")
kinplot <- function(kinobject,
main = "",
xlab = "Time [days]", ylab = "Parent [% of applied radioactivity]",
ylim = c("auto", "auto"),
lpos = "topright")
{
kindata <- na.omit(kinobject$data)
kinfits <- kinobject$fits
if (ylim[1] == "auto") ylim[1] <- 0
if (ylim[2] == "auto") ylim[2] <- max(kindata$parent)
ylim <- as.numeric(ylim)
plot(kindata$t, kindata$parent,
main = main,
xlab = xlab,
ylab = ylab,
ylim = ylim
)
n.m <- length(kinfits)
colors <- ltys <- 1:n.m
names(colors) <- names(ltys) <- names(kinfits)
ltext <- paste(kinobject$parent, "measured")
for (kinmodel in names(kinfits))
{
m = kinfits[[kinmodel]]
if(class(m) == "nls") {
if (!"parent.0" %in% names(coef(m))) {
switch(kinmodel,
SFO = lines(
t <- seq(min(kindata$t), max(kindata$t), length.out=500),
predict(m,
newdata = data.frame(t)),
col = colors[[kinmodel]],
lty = ltys[[kinmodel]]),
FOMC = lines(
t <- seq(min(kindata$t), max(kindata$t), length.out=500),
predict(m,
newdata = data.frame(t)),
col = colors[[kinmodel]],
lty = ltys[[kinmodel]]),
HS = lines(
t <- seq(min(kindata$t), max(kindata$t), length.out=500),
predict(m,
newdata = data.frame(t)),
col = colors[[kinmodel]],
lty = ltys[[kinmodel]]),
DFOP = lines(
t <- seq(min(kindata$t), max(kindata$t), length.out=500),
predict(m,
newdata = data.frame(t)),
col = colors[[kinmodel]],
lty = ltys[[kinmodel]])
)
ltext <- c(ltext, paste("Fitted", kinmodel, "model"))
} else {
switch(kinmodel,
SFO = curve(SFO(x,
coef(m)[["parent.0"]],
coef(m)[["k"]]),
from = min(kindata$t), to = max(kindata$t), add=TRUE,
col = colors[[kinmodel]],
lty = ltys[[kinmodel]]),
FOMC = curve(FOMC(x,
coef(m)[["parent.0"]],
coef(m)[["alpha"]],
coef(m)[["beta"]]),
from = min(kindata$t), to = max(kindata$t), add=TRUE,
col = colors[[kinmodel]],
lty = ltys[[kinmodel]]),
HS = curve(HS(x,
coef(m)[["parent.0"]],
coef(m)[["k1"]],
coef(m)[["k2"]],
coef(m)[["tb"]]),
from = min(kindata$t), to = max(kindata$t), add=TRUE,
col = colors[[kinmodel]],
lty = ltys[[kinmodel]]),
DFOP = curve(DFOP(x,
coef(m)[["parent.0"]],
coef(m)[["k1"]],
coef(m)[["k2"]],
coef(m)[["g"]]),
from = min(kindata$t), to = max(kindata$t), add=TRUE,
col = colors[[kinmodel]],
lty = ltys[[kinmodel]]))
ltext <- c(ltext, paste("Fitted", kinmodel, "model"))
}
} else {
ltext <- c(ltext, paste(kinmodel, "model failed"))
ltys[[kinmodel]] <- NA
}
}
legend(lpos, bty="n", inset = 0.05,
legend = ltext,
pch = c(1, rep(NA, n.m)),
lty = c(NA, ltys),
col = c(1, colors))
}
|
c5bdf1ff28d6660a8ba63868550acede29a121de
|
9ef498d146d1b59ac392e673f2f07c10e83224bd
|
/scripts/DefactoDescriptionSwaziland.R
|
e77671d3a7e356bfc74bced6fe0bbebe2b922a4b
|
[] |
no_license
|
ceh-2000/ABM
|
647cfbaecbfa0e7c413525b3cbbb63905ed169f4
|
a05793fe6e9b122c8c2b3cf7b168e39ca3a1200f
|
refs/heads/master
| 2023-01-13T23:39:16.354682
| 2020-11-24T04:53:37
| 2020-11-24T04:53:37
| 288,858,761
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,804
|
r
|
DefactoDescriptionSwaziland.R
|
rm(list = ls(all = TRUE))
library(raster)
library(sf)
library(tidyverse)
library(maptools)
library(spatstat)
library(units)
setwd("~/Desktop/data-science/data")
# Get raster information for adm2
swz_pop20 <- raster("world_pop/swz_ppp_2020.tif")
# Get shapefile information for adm2
swz_adm2 <- read_sf("gadm36_SWZ_shp/gadm36_SWZ_2.shp")
# We only want the shape information for Pigg's Peak
pigg_adm2 <- swz_adm2 %>%
filter(NAME_2 == "Pigg's Peak")
# We only want raster data for Pigg's Peak
swz_pop20_adm2 <- crop(swz_pop20, pigg_adm2)
swz_pop20_adm2 <- mask(swz_pop20_adm2, pigg_adm2)
# Get the population of Hhohho and round down for people
pop <- floor(cellStats(swz_pop20_adm2, "sum"))
pop
# Save as a PNG file
png("images/agg_pigg.png", width = 800, height = 800)
plot(swz_pop20_adm2, main = NULL)
plot(st_geometry(pigg_adm2), add = TRUE)
dev.off()
# Write `sf` object as a shapefile so spatstat can bool out
st_write(pigg_adm2, "pigg_adm2.shp", delete_dsn = TRUE)
pigg_adm2_with_mtools <- readShapeSpatial("pigg_adm2.shp")
win <- as(pigg_adm2_with_mtools, "owin")
plot(win, main = NULL)
win
# Use spatial probability distribution to find places to put people
# `as.im` converts raster to pixel image and then distributes random points according
set.seed(5)
pigg_adm2_ppp <- rpoint(pop, f = as.im(swz_pop20_adm2), win = win)
# Save both as a PNG
png("images/pigg_random_people.png", width = 2000, height = 2000)
plot(win, main = NULL)
plot(pigg_adm2_ppp, cex = 0.09, add = TRUE)
dev.off()
# Create spatial probability density function or kernel density estimation is a three dimension version
# bw <- bw.ppl(pigg_adm2_ppp)
# save(bw, file = "bw.RData")
load("Rdata/bw.RData")
pigg_density_image <- density.ppp(pigg_adm2_ppp, sigma = bw)
# Convert density image to spatial grid
Dsg <- as(pigg_density_image, "SpatialGridDataFrame")
# Convert back to image
Dim <- as.image.SpatialGridDataFrame(Dsg)
# Create polygon to contour our image and create initial settlements
Dcl <- contourLines(Dim, levels = 8e5) # Define arbitrarily at first
# Create Spatial Lines Data Frame
SLDF <- ContourLines2SLDF(Dcl, CRS("+proj=longlat +datum=WGS84 +no_defs"))
# Convert back to sf object which we know how to use
sf_multiline_obj <- st_as_sf(SLDF, sf)
# Plot density image and sf objects
png("images/pigg_lines_density_image.png", width = 2000, height = 2000)
plot(pigg_density_image, main = NULL)
plot(sf_multiline_obj, add = TRUE)
dev.off()
# Convert valid polygons on the inside (not touching adm1 boundaries) as polygons
png("images/pigg_inner_polygons.png", width = 2000, height = 2000)
inside_polys <- st_polygonize(sf_multiline_obj)
plot(st_geometry(inside_polys)) # Plot only internal polygons
dev.off()
# Now we want to handle the polygons that do not close
png("images/pigg_outer_polygons.png", width = 2000, height = 2000)
outside_lines <- st_difference(sf_multiline_obj, inside_polys) # get back the polygon lines that are not lines
plot(st_geometry(outside_lines)) # Plot only internal polygons and outer polygon lines
dev.off()
# Let's make our outer polygons by intersecting them with our adm1 and bounding them!
my_outer_polys <- st_buffer(outside_lines, 0.0014) %>%
st_difference(pigg_adm2, .) %>%
st_cast(., "POLYGON")
plot(st_geometry(my_outer_polys))
# Remove the polygon that is everything not in the contour line by filtering out the largest closed area
my_outer_polys$area <- as.numeric(st_area(my_outer_polys)) # Find areas of polygons
subpolys <- my_outer_polys %>%
filter(area < 1e8) # CHANGE THIS NUMBER FOR MY DATA
# Get population of each newly created polygon and store in a new column into
subpolys_extract <- raster::extract(swz_pop20_adm2, subpolys, df = TRUE) # df = TRUE outputs as a data frame
subpolys_totals <- subpolys_extract %>%
group_by(ID) %>%
summarize(pop20 = sum(swz_ppp_2020, na.rm = TRUE))
subpolys <- subpolys %>%
add_column(pop20 = subpolys_totals$pop20)
# Plot `subpolys` over the density function to check
png("images/subpolys.png", width = 1200, height = 1200)
plot(pigg_density_image, main = NULL)
plot(st_geometry(subpolys), border="white", add = TRUE)
dev.off()
# Remove places with tiny populations
subpolys_filtered <- subpolys %>%
filter(pop20 > 10)
# New, population-filtered image
png("images/subpolys_filtered.png", width = 1200, height = 1200)
plot(pigg_density_image, main = NULL)
plot(st_geometry(subpolys_filtered), border="white", add = TRUE)
dev.off()
# Repeat with inner polygons to filter out small population polygons
inside_polys <- st_collection_extract(inside_polys, "POLYGON")
ips_extract <- raster::extract(swz_pop20, inside_polys, df = TRUE)
ips_totals <- ips_extract %>%
group_by(ID) %>%
summarize(pop20 = sum(swz_ppp_2020, na.rm = TRUE))
inside_polys <- inside_polys %>%
add_column(pop20 = ips_totals$pop20)
inside_polys_filtered <- inside_polys %>%
filter(pop20 > 10)
# Combine inner and outer polygons
uas <- st_union(inside_polys_filtered, subpolys_filtered)
# Convert to type polygon
urban_areas <- st_cast(uas, "POLYGON")
# Remove columns that we don't need; we only care about geometry
urban_areas[ ,1:19] <- NULL
# Plot urban areas
png("images/urban_areas.png", width = 1200, height = 1200)
plot(pigg_density_image, main = NULL)
plot(st_geometry(urban_areas), border="white", add = TRUE)
dev.off()
# Extract populations for urban_area polygons
uas_extract <- raster::extract(swz_pop20, urban_areas, df = TRUE)
uas_totals <- uas_extract %>%
group_by(ID) %>%
summarize(pop20 = sum(swz_ppp_2020, na.rm = TRUE))
# Add data to our sf object for urban areas
urban_areas <- urban_areas %>%
add_column(pop20 = uas_totals$pop20)
# Only include the unique urban areas
urban_areas <- urban_areas %>%
unique()
# Describe each new geometry's density
urban_areas <- urban_areas %>%
mutate(area = st_area(urban_areas) %>%
set_units(km^2)) %>%
mutate(density = as.numeric(pop20 / area))
# Plot urban areas
ggplot() +
theme_light() +
geom_sf(data = pigg_adm2,
size = 0.75,
color = "gray50",
fill = "pink",
alpha = 0.3) +
geom_sf(data = urban_areas,
mapping = aes(fill = pop20),
size = 0.45,
alpha = 0.5) +
geom_sf_text(
data = urban_areas,
mapping = aes(label = floor(density)),
nudge_y = -0.001
) +
scale_fill_gradient2(low = "blue", mid = "pink", high = "red", midpoint = 2500)+
xlab("longitude") +
ylab("latitude") +
ggtitle("Pigg's Peak with De Facto Settlements") +
theme(
plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5),
panel.background = element_rect(fill = "azure"),
panel.border = element_rect(fill = NA),
panel.grid = element_blank()
)
ggsave("images/urban_areas_plot.png")
|
8b255d09b91637c112ce82d8550699a1b632368a
|
459749b2629cf7697105c3e1289b2b875a1d5822
|
/man/moveToGroup.Rd
|
5f6a6e94efc2fb54819e34e07ef1bcf27783fd04
|
[] |
no_license
|
GitBrianLaw/rcrunch
|
52d0be58a552c9609fb487fdb8de408d081d9577
|
f75deb52282175b6f9d4c69db954ee7ea8b89a9b
|
refs/heads/master
| 2021-01-09T08:05:51.008417
| 2018-01-25T22:08:16
| 2018-01-25T22:08:16
| 48,709,208
| 0
| 0
| null | 2016-02-10T19:20:37
| 2015-12-28T19:37:57
|
R
|
UTF-8
|
R
| false
| true
| 1,053
|
rd
|
moveToGroup.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shoji-order.R
\name{moveToGroup}
\alias{moveToGroup}
\alias{moveToGroup<-}
\title{Move entities to a group}
\usage{
moveToGroup(x, value)
moveToGroup(x) <- value
}
\arguments{
\item{x}{VariableGroup}
\item{value}{Variable, VariableCatalog subset, or Dataset subset}
}
\value{
\code{x} with the entities in \code{value} appended to it. If the
containing order object has \code{duplicates=FALSE}, the entities will be "moved"
to this group. Otherwise, their references will be copied to the group.
}
\description{
Shoji entities can be placed into groups, this is mostly used for grouping
variables for display in the app, but is technically possible for any of the
order catalogs. This function moves an entity to one of these groups.
}
\details{
The function has two versions: a regular function and a setter. They do the
same thing, but the setter is probably more succinct.
}
\examples{
\dontrun{
moveToGroup(ordering(ds)[["Demographics"]]) <- ds[c("gender", "age")]
}
}
|
3c1e17a352976be31ba6087c9525b107c2eaca61
|
2347ff63059616ee941ba140aaf12531d9ce9540
|
/ML-4-MLE.R
|
e6d6dce2c3467dc6b4ad46f21968494806578517
|
[] |
no_license
|
AshwinSarath/R
|
7bd6a50a824fd11188a519894c769ab556c09179
|
92d33d6b0a3092bdb74bdff0ba2eb66efe53bec4
|
refs/heads/master
| 2023-06-27T19:52:54.749273
| 2021-08-03T06:13:50
| 2021-08-03T06:13:50
| 223,977,764
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 935
|
r
|
ML-4-MLE.R
|
#EXP4
# MLE
#AIM: To estimate the parameter of the binomial distribution using maximum likelihood estimation in R
# To estimate the parameters of the normal distribution using maximum likelihood estimation in R
#(1)
set.seed(1001) # even its randomly generated, the randomness remains the same
N=100
x=rnorm(N,mean=3,sd=2) # norm data for generating 100 data points
x
mean(x)
sd(x)
LL<-function(mu,sigma){
R=dnorm(x,mu,sigma);
return( -sum(log(R)) ) #return val
} #negative log likely-hood (-sum(log(R)))
LL
library(stats4)
mle(LL,start=list(mu=1,sigma=1))
mle(LL,start=list(mu=1,sigma=1),method='L-BFGS-B',lower=c(-Inf,0),upper=c(Inf,Inf)) # y plane
#(2)
#MLE for Binomial Dist
set.seed(22)
heads<-rbinom(1,100,0.5);heads
likelihood<-function(p){
dbinom(heads,100,p)
}
neg_liklihood<-function(m){
dbinom(heads,100,m)*(-1) #test
}
nlm(neg_liklihood,0.5,stepmax = 0.5)
|
05cf51c7bb4c99d9ed8295053a39ba7ea9829c60
|
c5baacf45414864179c18c4878af5464e103ece8
|
/Lab18/Interfacing R and Python/python_from_r.r
|
37c9344a3f267163414a69a08494eae037c56419
|
[] |
no_license
|
VladimirShleyev/Method_R_doc
|
ed1cbbd9b59cc1cec445e87a9e5696f665f83065
|
85aa7c64e3816108f0e84a0ff1efa11cc8e37d3b
|
refs/heads/master
| 2023-07-16T00:29:57.871114
| 2021-09-03T12:13:19
| 2021-09-03T12:13:19
| 286,023,236
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,308
|
r
|
python_from_r.r
|
renv::init() # инициализация виртуального окружения
renv::install("reticulate") # установка библиотеки из CRAN
renv::snapshot() # делаем снимок версий библиотек в нашем виртуальном окружении
# фиксируем этот список в .lock-файле для возможности восстановления
# renv::restore() # команда отктиться к предыдушему удачному обновления библиотек
# -------------------
library(reticulate)
use_python(python = Sys.which("python3"), required = TRUE)
# alias на главный модуль
py <- import_main()
# устанавливаем параметры для Python непосредственно из R сессии
# интерактивный туториал по Regex - https://regex101.com/
py$pattern <- c("\\.*(телефон).*?([\\d-]+)")
# запускаем файл CLI /py-script.py
result <- py_run_file("./Lab18/Interfacing R and Python/py_script.py")
result$data # посмотрим на составные части python-объекта
result$find_phone
result$pattern
# ------------------
# а так мы можем вызвать REPL Python из R интерпретатора
# обратите внимание - приглашение в консоли будет (>>) если это REPL Python
# и (>) если это интерпретатор R
repl_python()
# py_list = [1,2,3,4,5] # создадим python объект - вводить нужно в консоли!
# exit # выйдем из REPL - вводим в консоли!
# обратимся к только что созданному python-объекту из R
# py$py_list
# В завершение - возможность внутри R скрипта включать отдельные исполняемые Python-строки
# А также - импортировать любые Python дата-сеты (например, которых еще нет под R) в R окружение
# и работать с ними, как с R-объектами!
py_run_string("print('Эта строка уже из Питона')", local = FALSE, convert = TRUE)
|
0b737a07cf1a6ab028f6d2bdda03696dff6e4b39
|
845c33e99e5a2475ae334ed00b12f08f7346c8b0
|
/man/seqfishplus.Rd
|
b8e0ccfd7bfe96b2502a0cbbf92117fd5a631a6f
|
[] |
no_license
|
cran/smfishHmrf
|
0236d9802a718b6477a8732e8a0130bdcdf7fda4
|
41aae463be028aeb53e34eb5e49a044af0472565
|
refs/heads/master
| 2023-01-05T12:54:43.094726
| 2020-11-03T11:20:02
| 2020-11-03T11:20:02
| 310,523,065
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 965
|
rd
|
seqfishplus.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seqfishplus.R
\docType{data}
\name{seqfishplus}
\alias{seqfishplus}
\title{SeqFISHplus dataset}
\format{
A list containing the following fields: y, nei, blocks, damp, mu, sigma
\describe{
\item{y}{gene expression matrix}
\item{nei}{cell adjacency matrix}
\item{blocks}{vertex (or cell) update order; a list of vertex colors; cells marked with the same color are updated at once}
\item{damp}{dampening constants (length k, the number of clusters)}
\item{mu}{initialization (means). Means is a (i,k) matrix}
\item{sigma}{initialization (sigmas). Sigmas is a (i,j,k) 3D matrix. k is cluster id. (i,j) is covariance matrix}
}
}
\usage{
data(seqfishplus)
}
\description{
Data from SeqFISH experiment on SS cortex. This is a dataset with 523 cells and the expression of about 500 spatial genes
}
\examples{
data(seqfishplus)
}
\references{
\insertRef{Eng2019}{smfishHmrf}
}
\keyword{datasets}
|
f2ea79b3384b259b57d3af54991cf9a07835fb64
|
a0bfaeef32beb7fd99b611108fbbf1b9d2067bce
|
/ui.R
|
02d8b45ccf1141c7045fdec8a00b5863458b5702
|
[] |
no_license
|
HCH1/demo_fig_app1
|
7c1d3df492fd2a199bd65487a342d8b5a3a24bae
|
2d6132b7e7a987f41d92ddd5d50bb9ba66659e79
|
refs/heads/master
| 2020-03-30T22:21:08.900302
| 2018-10-05T08:33:27
| 2018-10-05T08:33:27
| 151,663,546
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,157
|
r
|
ui.R
|
library(shiny)
ui <- fluidPage(
titlePanel("Auto-export allow layer list app"),
sidebarLayout(
sidebarPanel(
textInput("text1", "what's ur Tech Variant?", value = "22FDX"),
fileInput("file1", "(1) Pls input 22FDX's LPO***.csv -> ALL",
multiple = TRUE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
##code HTML in shiny
##https://shiny.rstudio.com/articles/tag-glossary.html
tags$a(href="https://text-compare.com/", "(2) Suggest to check text_diff"),
## <a href="www.rstudio.com">Click here!</a>
tags$br(),
tags$br(),
tags$a(href="https://drive.google.com/open?id=1MKb-9hGF7S4KKJ16Cv54CuJrdXpNd_6G", "(3) Upload to google drive"),
tags$br(),
tags$br(),
tags$code("This text will be displayed as computer code."),
## <code>This text will be displayed as computer code.</code>
tags$br(),
tags$br(),
#https://shiny.rstudio.com/articles/tag-glossary.html
tags$img(src='myImage.png', align = "left"),
tags$br(),
tags$br(),
tags$video(src = "1005v2.mov", type = "video/mov", autoplay = NA, controls = NA, width = 700, height = 500),
## <video src="video.mp4" type="video/mp4" autoplay controls></video>
tags$br(),
tags$br(),
downloadButton("downloadData", "Download")
),
mainPanel(
tableOutput("DRC_grep")
)
)
)
####################################################end
####################################################end
#must below command separately
##runApp()
####################################################end
####################################################end
#Deploying applications
#http://docs.rstudio.com/shinyapps.io/getting-started.html#deploying-applications
#rsconnect::setAccountInfo(name='hch1', token='41CC9A84435E2E7CAE08CE65D932569D', secret='mEv+SQx+ACmrHLfl1qbGFdH/cc9EQj7lP8fymw3x')
##library(rsconnect)
##deployApp()
####################################################end
####################################################end
|
eb1e3af2e8116cee6a7da882b7bb1b35f66b4041
|
01ececa7c221357eaedf85a1c2b8414fd45302a2
|
/helperScripts/PutNewCodeHere.R
|
d8f65bce6c73c794f132b57fcb797a813ac4a503
|
[] |
no_license
|
sonejilab/cellexalvrR
|
d7e30f147d3d991e6858f50b11e90a31863e759c
|
c533136f59fa906e1c173a46cc4f2e36608c0204
|
refs/heads/master
| 2023-03-31T19:05:15.669831
| 2023-03-17T07:24:40
| 2023-03-17T07:24:40
| 133,559,720
| 4
| 2
| null | 2018-05-15T18:52:17
| 2018-05-15T18:52:15
| null |
UTF-8
|
R
| false
| false
| 64
|
r
|
PutNewCodeHere.R
|
CompareToSubset <- function (x, linearSelection, cells ) {
}
|
5f44bde49eb16269ffdd2e0d535ab87472e50523
|
37794cfdab196879e67c3826bae27d44dc86d7f7
|
/Chemistry/Proteins.Structure.R
|
8538aefb132509fb4ea742d2ee15b29a974e7c3a
|
[] |
no_license
|
discoleo/R
|
0bbd53a54af392ef53a6e24af85cec4f21133d17
|
e9db8008fb66fb4e6e17ff6f301babde0b2fc1ff
|
refs/heads/master
| 2023-09-05T00:43:32.381031
| 2023-08-31T23:03:27
| 2023-08-31T23:03:27
| 213,750,865
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,257
|
r
|
Proteins.Structure.R
|
# this file:
# source("Proteins.Structure.R")
###########################
### Fi-Score
# - based on package Fiscore:
# https://github.com/AusteKan/Fiscore/blob/main/Fiscore/R/PDB_prepare.R;
# - specific code has been extracted as individual helper-functions,
# see functions below;
# - the initial code has been refactored substantially;
source("Proteins.Structure.FiScore.R")
########################
# Open & Clean PDB file:
read.pdb = function(file_name, lim.protein=5, ...) {
pdb_file_temp = bio3d::read.pdb(file_name, ...);
# clean file to remove terminal residues and ligand data
pdb_file_temp = bio3d::clean.pdb(pdb_file_temp,
consecutive = TRUE, force.renumber = FALSE, fix.chain = FALSE, fix.aa = TRUE,
rm.wat = TRUE, rm.lig = TRUE, rm.h = FALSE, verbose = FALSE);
# Warnings:
pdb.seq = bio3d::pdbseq(pdb_file_temp);
if(length(pdb.seq) == 0) { stop("The file has no amino acid residues!"); }
if(lim.protein > 0 && length(pdb.seq) <= lim.protein) {
stop("This is a peptide and not protein!");
}
return(pdb_file_temp);
}
### Structure
as.type.helix = function(type) {
types = unique(type);
isType = types %in% c(1:10);
types.id = c(1:10, types[ ! isType]);
type.nms = c(
'Right-handed alpha helix',
'Right-handed omega helix',
'Right-handed pi helix',
'Right-handed gamma helix',
'Right-handed 310 helix',
'Left-handed alpha helix',
'Left-handed omega helix',
'Left-handed gamma helix',
'27 ribbon/helix helix',
'Polyproline helix',
paste0("Other: ", types[ ! isType])
)
type = factor(type, levels=types.id);
levels(type) = type.nms;
return(type);
}
as.type.sheet = function(type) {
types = unique(type);
isType = types %in% c(-1,0,1);
types.id = c(0, 1, -1, types[ ! isType]);
type.nms = c(
'Parallel sheet', # TODO: separate level "Start sheet"?
'Parallel sheet',
'Antiparalel sheet',
paste0("Other: ", types[ ! isType])
)
type = factor(type, levels=types.id);
levels(type) = type.nms;
return(type);
}
features.pdb = function(pdb) {
pdb_file = pdb;
pdb.nms = attributes(pdb_file)$names;
feature_list = list();
if(is.null(pdb.nms)) return(feature_list);
# Reference:
# https://www.wwpdb.org/documentation/file-format-content/format23/sect5.html
#
### TYPE OF HELIX
#
# TYPE OF HELIX CLASS NUMBER
# (COLUMNS 39 - 40)
# ---------------------------------------------------
# Right-handed alpha (default) 1
# Right-handed omega 2
# Right-handed pi 3
# Right-handed gamma 4
# Right-handed 310 5
# Left-handed alpha 6
# Left-handed omega 7
# Left-handed gamma 8
# 27 ribbon/helix 9
# Polyproline 10
#
#
### TYPE OF SHEET
#
# The sense indicates whether strand n is parallel (sense = 1)
# or anti-parallel (sense = -1) to strand n-1.
# Sense is equal to zero (0) for the first strand of a sheet.
#
### TURNS
#
# Turns include those sets of residues which form beta turns, i.e.,
# have a hydrogen bond linking (C- O)[i] to (N-H)[i + 3].
# Turns which link residue i to i+2 (gamma-bends) may also be included.
# Others may also be classified as turns.
# Helix:
if("helix" %in% pdb.nms) {
# test if attribute is not NULL:
if( ! is.null(pdb_file$helix$start)) {
helix_df = as.data.frame(pdb_file$helix);
type = as.vector(helix_df$type);
helix_df$Type = as.type.helix(type);
feature_list[["helix"]] = helix_df;
}
}
# Sheets:
if("sheet" %in% pdb.nms) {
# prepare sheet data frame;
# test if attribute is not NULL;
if( ! is.null(pdb_file$sheet$start)) {
sheet_df = as.data.frame(pdb_file$sheet);
type = as.vector(sheet_df$sense);
sheet_df$Type = as.type.sheet(type);
feature_list[["sheet"]] = sheet_df;
}
}
# Turns:
if("turn" %in% pdb.nms){
# prepare turn data frame;
# test if attribute is not NULL;
if( ! is.null(pdb_file$turn$start)) {
turn_df = as.data.frame(pdb_file$turn);
type = as.vector(turn_df$turnId);
turn_df$Type = type;
feature_list[["turn"]] = turn_df;
}
}
return(feature_list);
}
torsions.pdb = function(pdb) {
### Torsion angles
torsion_angles = bio3d::torsion.pdb(pdb);
# Extract torsion angle table:
pdb_df = torsion_angles$tbl;
# - leave rows that contain full dihedral angle visualization;
# NOTE: terminal residues do not contain all of the angles;
isComplete = stats::complete.cases(pdb_df[ , c("phi","psi")]);
pdb_df = pdb_df[isComplete, , drop=FALSE];
# Extract residue numbers:
# Note: the vectorised code should be faster;
df_resno = as.numeric(stringr::str_extract(rownames(pdb_df), "[0-9]{1,}"));
# df_resno = as.numeric(sapply(rownames(pdb_df), function(x) {
# stringr::str_extract(x, "[0-9]{1,4}");
# }));
# Extract residue names:
df_res = as.vector(stringr::str_extract(rownames(pdb_df), "[A-Z]{3}"));
# df_res = as.vector(sapply(rownames(pdb_df), function(x) {
# stringr::str_extract(x, "[A-Z]{3}");
# }));
# Construct the data.frame to contain residue names and numbers
pdb_df = cbind.data.frame(pdb_df, df_resno);
pdb_df = cbind.data.frame(pdb_df, df_res);
attr(pdb_df, "complete") = isComplete;
return(pdb_df);
}
BFactor.pdb = function(pdb, torsions=NULL, normalize=TRUE) {
### Torsion angles:
if(is.null(torsions)) { pdb_df = torsions.pdb(pdb); }
else pdb_df = torsions;
### B-factor extraction:
# Full data frame:
# - includes dihedral angles coordinates and residue info;
# Extracting B factor information for C alpha atom to match dihedral angles
isComplete = attr(pdb_df, "complete");
idCA = which(pdb$atom$elety == "CA")[isComplete];
# & (pdb$atom$resno %in% pdb_df$"df_resno") );
pdb_b = pdb$atom[idCA, c("resno","resid","b")];
# Adding B factor information
if(nrow(pdb_df) != nrow(pdb_b)) {
# TODO: ugly ERROR;
# - should be corrected by using isComplete;
# names(pdb_b)[3] = "B_factor";
# pdb_df = merge(pdb_df, pdb_b[, c("resno", "B_factor")],
# by.x = "df_resno", by.y = "resno");
} else {
pdb_df$B_factor = pdb_b$b;
}
if(all(pdb_df$B_factor == 0)) {
warning("All B-factors are 0 and the analysis will be limited");
}
### B-factor normalization
# - added as norm column;
if(normalize) {
pdb_df$B_normalised = MINMAX_normalisation_func(pdb_df$B_factor);
}
return(pdb_df);
}
### Fi-score
FiScore = function(pdb_df) {
# - Generate Fi-score per residue and store in the data.frame;
# - Calculate Fi-score for the whole protein and individual aa;
### phi/psi normalization:
# normalization is only scaled by SD
psi_SD = stats::sd(pdb_df$psi);
phi_SD = stats::sd(pdb_df$phi);
div = psi_SD * phi_SD;
fi_score = pdb_df$phi * pdb_df$psi * pdb_df$B_normalised / div;
# OLD code:
# for(index in seq(nrow(pdb_df))) {
# fi_score = c(fi_score,
# pdb_df[index,'phi'] * pdb_df[index,'psi'] * pdb_df[index,'B_normalised'] / div);
# }
return(fi_score);
}
### AA Indices
# from: data(bio3d::aa.index)
hydrophob = function() {
tmp = sapply(bio3d::aa.index, function(x) grepl("(?i)Hydropath|Hydrophob", x$D))
I = lapply(bio3d::aa.index[tmp], function(x) x$I);
do.call(rbind, I);
}
describe = function(width = 0.9 * getOption("width"), indent.ext = 3, print=TRUE) {
txt = sapply(bio3d::aa.index, '[[', "D");
txt = paste0(names(txt), ": ", txt);
if(print) {
cat(strwrap(txt, width=width, exdent=indent.ext), sep = "\n");
}
invisible(txt);
}
####################
### Helper functions for the analysis
### MIN-MAX normalisation based on the input array
# input = numeric array;
# returns normalised array values;
MINMAX_normalisation_func = function(array) {
# check for cases where all B-factor values are 0;
rg = range(array);
rg.diff = rg[2] - rg[1];
if(rg.diff == 0) {
if(rg[2] == 0) return (0);
# TODO: rg[2] != 0;
}
return ((array - rg[1])/rg.diff);
}
|
e9b72f49cc17a8ad57e83c3fc7be2cede66d6513
|
540929ccef8ecc44bb1c2971f6fa605f4c603130
|
/code/covidmultistartparaV.R
|
cf8e8101f95685c8401a64066293d8e772e5a4f0
|
[] |
no_license
|
tahmid-usc/covidGP
|
46667ff5fcfd94365190e8d08eab9f17b4021197
|
44e8ab17307ca7a923df58ddd303e9187b315aa6
|
refs/heads/master
| 2022-12-01T12:00:13.246927
| 2020-08-13T22:36:23
| 2020-08-13T22:36:23
| 261,368,391
| 2
| 0
| null | 2020-05-14T09:18:11
| 2020-05-05T05:21:07
|
R
|
UTF-8
|
R
| false
| false
| 2,923
|
r
|
covidmultistartparaV.R
|
library(mvtnorm)
library(kernlab)
library(optimx)
# RBF kernel
ker <- function(x, l, sigf) {
rbf <- rbfdot(sigma = 1/l^2)
return(sigf^2 * kernelMatrix(rbf, x = x))
}
ker2 <- function(x, y, l, sigf) {
rbf <- rbfdot(sigma = 1/l^2)
return(sigf^2 * kernelMatrix(rbf, x = x, y = y))
}
mu <- function(t, a = 1, b0 = 1, b1 = 1, v = 1) {
a / (1 + b0 * exp(- b1 * t))^(v^2)
}
#--- Multistart
parseq <- c(.01,.1,1)
parmat <- expand.grid(parseq, parseq, parseq, parseq, parseq, parseq, parseq)
Hyper.ms <- function(x, y) {
marlik <- function(theta) {
x <- as.matrix(x)
n <- dim(x)[1]
#theta <- theta^2
k <- ker(x = x, l = theta[1], sigf = theta[2])
-dmvnorm(x = y, mean = mu(x, a = theta[4], b0 = theta[5], b1 = theta[6], v = theta[7]), sigma = k + theta[3]^2 * diag(n), log = T)
}
hyp <- multistart(parmat=parmat, fn = marlik, method = 'Nelder-Mead',
control=list(maxit = 10000))
#print(hyp)
return(hyp)
}
fitlogistic <- function(n, tmin = 0, tmax = 7) {
#simulate data
t <- seq(tmin, tmax, length.out = n)
mut <- mu(t,a = 1, b0 = 2, b1 = 1, v = 1)
covmat <- ker(t, l = 1.5, sigf = .05)
genY <- rmvnorm(1, mut, covmat)
genY <- genY + rnorm(n,0,.06)
genY <- as.numeric(genY)
#estimate hyperparamters
dt <- cbind(genY, t)
dt <- as.data.frame(dt)
nonlin <- nls(genY ~ a / (1 + b0 * exp(- b1 * t))^(v^2), data = dt, start = list(a = 1, b0 = 1, b1 = 1, v = 1))
nonlin.par <- coef(nonlin)
theta.ms <- Hyper.ms(x = t, y = genY)
theta <- theta.ms[which(theta.ms$value == min(theta.ms$value)),]
theta <- theta[1:7]
theta <- as.numeric(theta)
#fit
tstar <- seq(-15,15, length.out = 100)
mustar <- mu(tstar, a = 1, b0 = 2, b1 = .7, v = 1)
mu.pred <- mu(t,a = theta[4], b0 = theta[5], b1 = theta[6], v = theta[7])
mustar.pred <- mu(tstar,a = theta[4], b0 = theta[5], b1 = theta[6], v = theta[7])
n <- length(t)
nx <- length(tstar)
kx <- ker2(x = tstar, y = t, l = theta[1], sigf = theta[2])
kxx <- ker(x = tstar, l = theta[1], sigf = theta[2]) + theta[3]^2 * diag(nx)
k <- ker(x = t, l = theta[1], sigf = theta[2]) + theta[3]^2 * diag(n)
kinv <- chol2inv(chol(k))
posmu <- kx %*% (kinv %*% matrix(genY - mu.pred, ncol = 1))
posmu <- pred + posmu
return(list(mu.pred = mu.pred, mustar.pred = mustar.pred, posmu = posmu,
theta = theta, t = t, tstar = tstar, mustar = mustar, genY = genY))
}
lf <- fitlogistic(50)
plot(lf$tstar, lf$mustar, type = 'l', lwd = 3, col = 1, ylim = c(0, 1.1),
main = 'Parametric and GP based prediction of logistic growth curve', cex.main = 1.5)
lines(lf$tstar, lf$mustar.pred, lwd = 3, col = 2)
points(lf$t, lf$genY, pch = 16, cex =1.2, col = rgb(0,0,0,.2))
lines(lf$tstar, lf$posmu, lwd = 3, col = 3)
legend('bottomright', c('True', 'Parametric fit', 'Posterior mean'), lty = 1,
lwd = 3, col = 1:3, bty = 'n')
|
96f6b74c1dbc40c8f3f7114598cb7f116695bc8f
|
84af03713835e6ec0792f24e15265006e9e64eff
|
/R/geteegdata.R
|
aa6dd8827807ca07fb5f615af840898f4e8f3bc0
|
[] |
no_license
|
cran/eegkitdata
|
56848e2fb91623c1c186a1c2c6743f58376d5cd3
|
62a293d9cfa2332e2d41a240ba5ca32b9dfaad5e
|
refs/heads/master
| 2022-06-12T08:05:11.413203
| 2022-05-16T13:50:02
| 2022-05-16T13:50:02
| 23,852,728
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,897
|
r
|
geteegdata.R
|
geteegdata <-
function(indir, outdir = indir, cond = c("S1", "S2m", "S2n"), nt = NULL,
filename = "eegdata", filetype = c(".rda", ".csv", ".txt")){
###### Create data matrix from UCI EEG data
###### Nathaniel E. Helwig (helwig@umn.edu)
###### Last modified: May 16, 2022
### initial checks
filetype=filetype[1]
if(is.na(match(filetype,c(".rda",".csv",".txt")))){stop("Incorrect filetype input.")}
cond=cond[1]
if(is.na(match(cond,c("S1","S2m","S2n")))){stop("Incorrect cond input.")}
if(is.null(nt)==FALSE){nt=as.integer(nt); if(nt<1){stop("Incorrect nt input.")}}
filename=as.character(filename)
ix=file.info(indir)
if(is.na(ix[1]) | ix[2]==FALSE){stop("Invalid input directory (indir is not a valid directory).")}
ox=file.info(outdir)
if(is.na(ox[1]) | ox[2]==FALSE){stop("Invalid output directory (outdir is not a valid directory).")}
### load list of file directories (and possibly untar)
alldir=list.dirs(indir,full.names=FALSE)[-1]
if(length(alldir)==0L){
alldir=list.files(indir)
lad=length(alldir)
cat(paste("untarring",lad,"files...\n"))
for(j in 1:length(alldir)){untar(paste(indir,alldir[j],sep=""),exdir=indir)}
alldir=list.dirs(indir,full.names=FALSE)[-1]
if(length(alldir)==0L){stop("Invalid input directory (no data directories).")}
}
### load all data
eegdata=NULL
for(j in 1:length(alldir)){
cat(paste("subject:",alldir[j],"\n"))
egroup=strsplit(as.character(alldir[j]),"")[[1]][4]
thedir=paste(indir,alldir[j],sep="") # directory
flist=list.files(path=thedir) # list files
flen=length(flist) # number of files
k=m=0; maxk=FALSE
while(k<flen & maxk==FALSE){
k=k+1
fn=paste(thedir,flist[[k]],sep="/")
einfo=scan(file=fn,what=character(),skip=3,nlines=1,quiet=TRUE)
condition=paste(einfo[2],einfo[3],sep="")
if(condition=="S1obj"){
condition="S1"
} else if(condition=="S2match"){
condition="S2m"
} else if(condition=="S2nomatch,"){
condition="S2n"
}
if(condition==cond){
eegtab=read.table(file=fn) # load data
colnames(eegtab)=c("trial","channel","time","voltage")
eegdata=rbind(eegdata,data.frame(subject=alldir[j],group=egroup,condition,eegtab))
if(is.null(nt)==FALSE){m=m+1; if(m==nt){maxk=TRUE}}
}
} # end while(k<flen & maxk==FALSE)
} # end for(j in 1:length(alldir))
### save data
if(filetype==".rda"){
save(eegdata,file=paste(outdir,filename,filetype,sep=""))
return(eegdata)
} else if(filetype==".csv"){
write.csv(eegdata,file=paste(outdir,filename,filetype,sep=""))
} else {
write.table(eegdata,file=paste(outdir,filename,filetype,sep=""))
}
}
|
2d3b369beefe1492dd194536668cc21ad3184579
|
a11ed9642e3b20e27f0fefa69eeafe6acef43cae
|
/tests/testthat.R
|
b9edfcf3180f9750fe371036d75a1d77da0f6eba
|
[] |
no_license
|
jackingg/ChangbaiLiTools
|
2dd6f190d53b662822d56ca4cbd21a9ab9008f8f
|
df572c4939eea8b914c1e5b0da8c4d6ab2038cb9
|
refs/heads/master
| 2021-04-06T18:04:05.563721
| 2018-03-10T04:21:30
| 2018-03-10T04:21:30
| 124,169,552
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 74
|
r
|
testthat.R
|
library(testthat)
library(ChangbaiLiTools)
test_check("ChangbaiLiTools")
|
9911dfb538db835a8efee8aee97b806c2547a327
|
16024c7bbfbbbc54160341360b7e60d46e76880c
|
/src/analyze_cell_features/invado_analysis_lib.R
|
08b96ba4cad2635acd29258f704b427c87c8d9d8
|
[] |
no_license
|
gomezlab/single_invado_analysis
|
d7fa19410f99947b4df40e486dfd21d3d0213c39
|
c05c7b48dc53c66da389917c4f4f76611cacff48
|
refs/heads/master
| 2021-01-18T22:36:06.795333
| 2019-07-30T15:40:03
| 2019-07-30T15:40:03
| 10,411,091
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,274
|
r
|
invado_analysis_lib.R
|
################################################################################
# invado_analysis_lib.R: the R functions associated with processing the
# invaodopodia data
################################################################################
gather_invado_properties <- function(results_dirs,time.spacing, conf.level = 0.95,
results.file = NA, build_plots=TRUE,
debug=FALSE) {
for (this_exp_dir in results_dirs) {
if (! is.na(results.file)) {
dir.create(file.path(this_exp_dir,'invado_analysis'),recursive=T);
}
all_props = list();
########################################################################
#Reading in raw data
########################################################################
data_folder = file.path(this_exp_dir,'lin_time_series');
area_data = read.table(file.path(data_folder, 'Area.csv'),
sep=",",header=F);
local_diff_data = read.table(file.path(data_folder,'Local_gel_diff.csv'),
sep=",",header=F);
local_diff_corrected_data = read.table(file.path(data_folder,'Local_diff_corrected.csv'),
sep=",",header=F);
pre_diff_data = read.table(file.path(data_folder,'Pre_birth_diff.csv'),
sep=",",header=F);
edge_dist_data = read.table(file.path(data_folder,'Centroid_dist_from_edge.csv'),
sep=",",header=F);
########################################################################
#Building the filter sets
########################################################################
longevity_uncertain = rowSums(! is.na(area_data))*time.spacing;
#We only want to consider puncta that live for at least 60 minutes
longev_filter = longevity_uncertain >= 60;
overall_filt = longev_filter;
if (sum(overall_filt) == 0) {
print("None of the lineages passed the filtering step, returning from function with no output.");
return(all_props);
}
all_props$birth_observed = is.na(area_data[overall_filt,1]);
all_props$death_observed = is.na(area_data[overall_filt,dim(area_data)[2]]);
longevity = longevity_uncertain;
longevity[! (all_props$birth_observed & all_props$death_observed)] = NA;
all_props$lineage_nums = which(overall_filt)
all_props$experiment = rep(this_exp_dir,sum(overall_filt));
all_props$longevity_uncertain = longevity_uncertain[overall_filt]
all_props$longevity = longevity[overall_filt]
all_props$mean_area = rowMeans(area_data[overall_filt,],na.rm=T);
all_props$mean_edge_dist = rowMeans(edge_dist_data[overall_filt,],na.rm=T);
if (build_plots) {
pdf(file.path(this_exp_dir,'invado_analysis','local_degrade_plots.pdf'));
}
#analyzing each of the puncta in the filtered set to identify invadopodia
for (lin_num in which(overall_filt)) {
###############################################
# Local Diff
###############################################
local_diff = na.omit(as.numeric(local_diff_data[lin_num,]));
all_props$mean_local_diff = c(all_props$mean_local_diff, mean(local_diff))
all_props$max_local_diff = c(all_props$max_local_diff, max(local_diff));
all_props$min_local_diff = c(all_props$min_local_diff, min(local_diff));
stat_tests = list();
stat_tests$local_diff = tryCatch(
t.test(local_diff,conf.level=conf.level),
error = t.test.error);
all_props$low_conf_int = c(all_props$low_conf_int, stat_tests$local_diff$conf.int[1]);
all_props$high_conf_int = c(all_props$high_conf_int, stat_tests$local_diff$conf.int[2]);
all_props$p_value = c(all_props$p_value, stat_tests$local_diff$p.value);
###############################################
# Pre-birth
###############################################
pre_diff = na.omit(as.numeric(pre_diff_data[lin_num,]));
stat_tests$pre_diff = tryCatch(
t.test(pre_diff,conf.level=conf.level),
error = t.test.error);
all_props$mean_pre_diff = c(all_props$mean_pre_diff,mean(pre_diff));
all_props$mean_pre_diff_p_value = c(all_props$mean_pre_diff_p_value,
stat_tests$pre_diff$p.value);
###############################################
# Local Diff Corrected
###############################################
local_diff_corrected = na.omit(as.numeric(local_diff_corrected_data[lin_num,]));
stat_tests$local_diff_corrected = tryCatch(
t.test(local_diff_corrected,conf.level=conf.level),
error = t.test.error);
all_props$local_diff_corrected_p_value = c(all_props$local_diff_corrected_p_value,
stat_tests$local_diff_corrected$p.value);
all_props$mean_local_diff_corrected = c(all_props$mean_local_diff_corrected,
as.numeric(stat_tests$local_diff_corrected$estimate));
time = seq(0,by=time.spacing,along.with=local_diff_corrected);
stat_tests$loess_model = loess(local_diff_corrected ~ time,span=0.5);
high_sample_time_points = seq(from=0,to=max(time),by=1);
loess_predictions = predict(stat_tests$loess_model,high_sample_time_points);
hit_average_time = which(loess_predictions >= stat_tests$local_diff_corrected$estimate)[1]
all_props$hit_average_time = c(all_props$hit_average_time, hit_average_time);
hit_max_time = which(loess_predictions >= max(loess_predictions)*0.9)[1]
all_props$hit_max_time = c(all_props$hit_max_time, hit_max_time);
###############################################
# Local Diff Corrected vs Pre Diff
###############################################
stat_tests$local_diff_pre_diff = tryCatch(
t.test(pre_diff,local_diff_corrected),
error = t.test.error);
all_props$local_pre_p_value = c(all_props$local_pre_p_value,
stat_tests$local_diff_pre_diff$p.value);
if (build_plots) {
all_three_sets = cbind(local_diff, pre_diff, local_diff_corrected);
build_single_invado_plot(all_three_sets,time.spacing,stat_tests,lin_num);
}
}
if (build_plots) {
graphics.off();
}
all_props = as.data.frame(all_props);
if (! is.na(results.file)) {
this_file = file.path(this_exp_dir,'invado_analysis',results.file);
if (! file.exists(dirname(this_file))) {
dir.create(dirname(this_file),recursive=TRUE);
}
save(all_props,file = this_file);
}
}
return(all_props);
}
gather_all_puncta_summary <- function(results_dirs,time.spacing,results.file=NA) {
for (this_exp_dir in results_dirs) {
all_props = list();
########################################################################
#Reading in raw data
########################################################################
data_folder = file.path(this_exp_dir,'lin_time_series');
area_data = read.table(file.path(data_folder, 'Area.csv'),
sep=",",header=F);
local_diff_data = read.table(file.path(data_folder,'Local_gel_diff.csv'),
sep=",",header=F);
local_diff_corrected_data = read.table(file.path(data_folder,'Local_diff_corrected.csv'),
sep=",",header=F);
pre_diff_data = read.table(file.path(data_folder,'Pre_birth_diff.csv'),
sep=",",header=F);
edge_dist_data = read.table(file.path(data_folder,'Centroid_dist_from_edge.csv'),
sep=",",header=F);
########################################################################
#Building the filter sets
########################################################################
all_props$longevity_uncertain = rowSums(! is.na(area_data))*time.spacing;
all_props$birth_observed = is.na(area_data[,1]);
all_props$death_observed = is.na(area_data[,dim(area_data)[2]]);
all_props$longevity = all_props$longevity_uncertain;
all_props$longevity[! (all_props$birth_observed & all_props$death_observed)] = NA;
all_props$mean_area = rowMeans(area_data,na.rm=T);
all_props$mean_edge_dist = rowMeans(edge_dist_data,na.rm=T);
all_props$lineage_nums = 1:dim(area_data)[1];
all_props$experiment = rep(this_exp_dir,dim(area_data)[1]);
if (! is.na(results.file)) {
this_file = file.path(this_exp_dir,'invado_analysis',results.file);
if (! file.exists(dirname(this_file))) {
dir.create(dirname(this_file),recursive=TRUE);
}
save(all_props,file = this_file);
}
}
return(all_props);
}
build_single_invado_plot <- function(data_sets,time.spacing,stat_tests, lin_num) {
time_points = seq(from=0,by=time.spacing,along.with=data_sets[,1]);
par(bty='n', mar=c(4,4,2,0))
matplot(time_points, data_sets,
typ='l', lty=c(1,2,4), xlab='Time (min)', ylab='Difference Metric', main=lin_num,
lwd=2, xlim=c(0,max(time_points)*1.05))
high_sample_time_points = seq(from=0,to=max(time_points),by=1);
loess_predictions = predict(stat_tests$loess_model,high_sample_time_points);
points(high_sample_time_points,loess_predictions,col=rgb(0,0,1,alpha=0.5))
lines(c(0,max(time_points)),rep(0.9*max(loess_predictions),2));
plot_limits = par("usr");
legend('topleft',c('Local Diff','Pre-birth Local Diff', 'Local Diff - Pre-birth Diff' ),
fill=c('black','red', 'green'))
segments(0,0,max(time_points),0,lty=4)
if("Hmisc" %in% rownames(installed.packages()) == FALSE) {
library(Hmisc);
errbar(max(time_points)*1.01, stat_tests$local_diff$estimate,
stat_tests$local_diff$conf.int[2], stat_tests$local_diff$conf.int[1], add=T)
errbar(max(time_points)*1.03, stat_tests$pre_diff$estimate,
stat_tests$pre_diff$conf.int[2], stat_tests$pre_diff$conf.int[1], add=T, col='red')
errbar(max(time_points)*1.05, stat_tests$local_diff_corrected$estimate,
stat_tests$local_diff_corrected$conf.int[2], stat_tests$local_diff_corrected$conf.int[1],
add=T, col='green')
}
#Adding the areas to the same plot
# plot_props = par('usr');
#
# scale_factor = (plot_props[4]-plot_props[3])/max(only_area_data)
# lines(time_points,only_area_data*scale_factor+plot_props[3],col='purple')
#
# tick_labels = (axTicks(2) - plot_props[3])/scale_factor;
# axis(4,at=axTicks(2),labels=sprintf('%.3f',(axTicks(2) - plot_props[3])/scale_factor))
}
t.test.error <- function(e) {
list(conf.int = c(Inf, -Inf), p.value = 1)
}
build_filter_sets <- function(raw_data_set, conf.level = 0.99,min_mean_local_diff_corrected = NA) {
filter_sets = list();
filter_sets$local_diff_filter = raw_data_set$mean_local_diff > 0 &
raw_data_set$p_value < (1 - conf.level);
filter_sets$pre_diff_filter = raw_data_set$mean_local_diff_corrected > 0 &
raw_data_set$local_diff_corrected_p_value < (1 - conf.level);
filter_sets$invado_filter = filter_sets$local_diff_filter & filter_sets$pre_diff_filter;
if (!is.na(min_mean_local_diff_corrected)) {
filter_sets$min_local_diff_corrected =
raw_data_set$mean_local_diff_corrected > min_mean_local_diff_corrected;
filter_sets$invado_filter = filter_sets$invado_filter & filter_sets$min_local_diff_corrected;
}
filter_sets$not_invado_filter = ! filter_sets$invado_filter;
return(filter_sets);
}
round_output_data_sets <- function(data_set) {
data_set$mean_area = round(data_set$mean_area,1);
data_set$p_value = signif(data_set$p_value,2);
data_set$mean_local_diff = round(data_set$mean_local_diff,1);
data_set$local_diff_corrected_p_value = signif(data_set$local_diff_corrected_p_value,2);
data_set$mean_local_diff_corrected = round(data_set$mean_local_diff_corrected,1);
return(data_set);
}
################################################################################
# Main Program
################################################################################
args = commandArgs(TRUE);
if (length(args) != 0) {
debug = FALSE;
#set a default time between images of 1 min
time.spacing = 1
#split out the arguments from the passed in parameters and assign variables
#in the current scope
for (this_arg in commandArgs()) {
split_arg = strsplit(this_arg,"=",fixed=TRUE)
if (length(split_arg[[1]]) == 1) {
assign(split_arg[[1]][1], TRUE);
} else {
assign(split_arg[[1]][1], split_arg[[1]][2]);
}
}
time.spacing = as.numeric(time.spacing);
print(paste("Working on:",data_dir));
print(paste("Time between images:",time.spacing));
if (exists('data_dir')) {
gather_all_puncta_summary(data_dir,time.spacing,results.file = file.path('all_puncta_summary.Rdata'));
exp_props = gather_invado_properties(data_dir,time.spacing,
results.file = file.path('models','puncta_props_corr.Rdata'));
if (dim(exp_props)[1] == 0) {
print("Didn't find any lineages to analyze, probably nothing long-lived enough.");
return;
}
data_types_to_include = c('lineage_nums', 'longevity',
'hit_max_time','mean_area','mean_local_diff','p_value',
'mean_local_diff_corrected','local_diff_corrected_p_value');
filter_sets = build_filter_sets(exp_props);
invado_lineage_data = subset(exp_props, filter_sets$invado_filter,
select = data_types_to_include);
invado_lineage_data = round_output_data_sets(invado_lineage_data);
not_invado_lineage_data = subset(exp_props, filter_sets$not_invado_filter,
select = data_types_to_include);
not_invado_lineage_data = round_output_data_sets(not_invado_lineage_data);
write.table(invado_lineage_data, file.path(data_dir, 'invado_data.csv'),
row.names=F, col.names=T, sep=',')
write.table(not_invado_lineage_data, file.path(data_dir, 'not_invado_data.csv'),
row.names=F, col.names=T, sep=',')
write.table(invado_lineage_data$lineage_nums,
file.path(data_dir,'invado_analysis','invado_nums.csv'),
row.names=F, col.names=F, sep=',')
write.table(not_invado_lineage_data$lineage_nums,
file.path(data_dir,'invado_analysis','not_invado_nums.csv'),
row.names=F, col.names=F, sep=',')
}
}
|
31f93e7f354ad6ca8cd10670f26d7532321a94d8
|
9290006b5108e9fe9f22ea786ac421d1a6125ec4
|
/Combine_Data_IN/db_site_ingest.R
|
0897aa45e675da826bf3168b8f9d1714bfb62003
|
[
"MIT"
] |
permissive
|
lehostert/Kaskaskia-River-CREP-Monitoring-Data-Tools
|
82bedacdccf103ad7ccb866b833531a62adb1bd3
|
b55a8b1b199e463c4c7e946ef184e60f964a5d3f
|
refs/heads/master
| 2022-05-03T11:59:16.697581
| 2022-03-29T17:29:45
| 2022-03-29T17:32:59
| 161,382,841
| 0
| 0
|
MIT
| 2022-02-02T17:35:13
| 2018-12-11T19:24:38
|
R
|
UTF-8
|
R
| false
| false
| 3,702
|
r
|
db_site_ingest.R
|
library(tidyverse)
library(odbc)
library(DBI)
# library(docstring)
network_prefix <- if_else(as.character(Sys.info()["sysname"]) == "Windows", "//INHS-Bison.ad.uillinois.edu", "/Volumes")
network_path <- paste0(network_prefix,"/ResearchData/Groups/Kaskaskia_CREP")
### with odbc
odbcListDrivers() # to get a list of the drivers your computer knows about
# con <- dbConnect(odbc::odbc(), "Testing_Database")
con <- dbConnect(odbc::odbc(), "2019_CREP_Database")
options(odbc.batch_rows = 1) # Must be defined as 1 for Access batch writing or appending to tables See .
dbListTables(con) # To get the list of tables in the database
##### Locations 2019 2020 #####
# loc_19 <- readxl::read_xlsx(paste0(network_path, "/Data/Data_IN/SITES/Sites_2019.xlsx"))
# loc_20 <- readxl::read_xlsx(paste0(network_path, "/Data/Data_IN/SITES/Sites_2020.xlsx"))
#
# loc_1920 <- loc_19 %>%
# bind_rows(loc_20) %>%
# select(-c(Event_Date, Event_Year, Event_Month, Event_Day, Event_Purpose)) %>%
# mutate(Latitude = round(Latitude, digits = 5),
# Longitude = round(Longitude, digits = 5))
#
# loc_1920_unique <- unique(loc_1920)
#
# loc_1920_unique$Latitude[[49]] == loc_1920_unique$Latitude[[1]]
# loc_1920_unique$Latitude[[1]]
#
# #### pull in the Locations from DB ####
# loc_db <- as_tibble(tbl(con, "Established_Locations")) %>%
# select(-c(Established_Locations_ID))
#
# bm_fish_loc <- readxl::read_xlsx(path = "~/CREP/Data/Query/2021/CREP_Intern/INHS_CREP_Fish_Locations.xlsx")
# bm_all_loc <- read_csv(file = "~/CREP/Data/Query/2021/CREP_Intern/INHS_CREP_Site_Locations.csv")
# bm_all_loc_xl <- readxl::read_xlsx(path = "~/CREP/Data/Query/2021/CREP_Intern/INHS_CREP_Site_Locations.xlsx")
# bm_all_loc_update <- readxl::read_xlsx(path = "~/CREP/Data/Query/2021/CREP_Intern/INHS_CREP_DataQuery_20210915/INHS_CREP_Site_Locations.xlsx", na = "NA")
#
# loc_all20 <- bm_all_loc %>%
# select(PU_Gap_Code, Reach_Name, Site_Type, Latitude, Longitude, Stream_Name) %>%
# unique()
#
# loc_all20_update <- bm_all_loc_update %>%
# select(PU_Gap_Code, Reach_Name, Site_Type, Latitude, Longitude, Stream_Name) %>%
# unique()
#
# loc_fish20 <- bm_fish_loc %>%
# select(PU_Gap_Code, Reach_Name, Site_Type, Latitude, Longitude, Stream_Name) %>%
# unique()
#
# names(loc_db)
# names(loc_1920)
#
# new_locations <- setdiff(loc_1920, loc_db)
#
# loc_db <- loc_db %>% select(-c(PU_Code, Gap_Code))
# db_bm <- setdiff(loc_all20, loc_db)
#
# fish_db <- as_tibble(tbl(con, "Fish_Locations"))
#
# fish_db_missing <- fish_db %>%
# filter(is.na(Site_Type))
#
#
# loc_missing <- fish_db_missing %>%
# select(PU_Gap_Code, Reach_Name)
#
# all_locations_with_dup <- bind_rows("new" = new_locations , "db" = loc_db, .id = "source")
#
# map <- all_locations_with_dup %>%
# filter(Site_Type == "random") %>%
# select(-c(Stream_Name, source, PU_Code, Gap_Code, Site_Type)) %>%
# unique()
#
# names(map) <- stringr::str_to_lower(names(map))
#
# map_fish <- fish_df_random %>%
# left_join(map, by = c("pu_gap_code", "reach_name")) %>%
# select(1:5, 78:79, 6:77)
#
# write_csv(map_fish, path = "~/GitHub/Kaskaskia-River-CREP-Monitoring-Data-Tools/Combine_Data_IN/fish_locations_random_sites.csv")
#
#### pull in the Locations from DB ####
loc_db <- as_tibble(tbl(con, "Established_Locations")) %>%
select(-c(Established_Locations_ID))
new_locations <- read_csv(file = paste0(network_path, "/Data/Data_IN/SITES/All_New_Sites_2021_kasky1038_removed.csv"))
diff <- setdiff(new_locations$Reach_Name, loc_db$Reach_Name)
diff <- setdiff(loc_db$Reach_Name, new_locations$Reach_Name)
dbAppendTable(con, name = "Established_Locations", new_locations)
dbDisconnect(con)
|
7079d894345872a3d0aeed40041b03477bd5d795
|
2e627e0abf7f01c48fddc9f7aaf46183574541df
|
/PBStools/man/uniqtows.Rd
|
fcaaf5ddad4b369b90de608279c37049220e9297
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
pbs-software/pbs-tools
|
30b245fd4d3fb20d67ba243bc6614dc38bc03af7
|
2110992d3b760a2995aa7ce0c36fcf938a3d2f4e
|
refs/heads/master
| 2023-07-20T04:24:53.315152
| 2023-07-06T17:33:01
| 2023-07-06T17:33:01
| 37,491,664
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,805
|
rd
|
uniqtows.Rd
|
\name{uniqtows}
\alias{uniqtows}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
PJS -- Ensure Unique Tows
}
\description{
Ensure unique tows occur for each survey, collapse records as needed.
}
\usage{
uniqtows(dat)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dat}{\code{data.frame} -- survey data object.}
}
\details{
Function checks for more than one observation per tow by looking at unique combinations of \code{"year"} and \code{"set"}.
}
\value{
Revised (if needed) survey data object.
}
\author{
\href{mailto:paul@starrfish.net}{Paul J. Starr}, Chief Groundfish Scientist\cr
Canadian Groundfish Research and Conservation Society (CGRCS), Nanaimo BC
\href{mailto:rowan.haigh@dfo-mpo.gc.ca}{Rowan Haigh}, Program Head -- Offshore Rockfish\cr
Pacific Biological Station (PBS), Fisheries & Oceans Canada (DFO), Nanaimo BC\cr
\emph{locus opus}: Institute of Ocean Sciences (IOS), Sidney BC\cr
Last modified \code{Rd: 2019-12-13}
}
\note{
PJS maintains code in statistical software called \href{https://www.stata.com/}{STATA}.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
In package \pkg{PBStools}:\cr
\code{\link[PBStools]{calcBiom}},
\code{\link[PBStools]{doSynoptic}},
\code{\link[PBStools]{getLabels}},
\code{\link[PBStools]{keepAtts}},
\code{\link[PBStools]{plotIndex}},
\code{\link[PBStools]{prepGFsurv}},
\code{\link[PBStools]{restratify}}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory (show via RShowDoc("KEYWORDS")):
\keyword{manip}
% \keyword{ ~kwd2 }
% Use only one keyword per line.
% For non-standard keywords, use \concept instead of \keyword:
\concept{indices}
\concept{PJS}
% Use only one concept per line.
|
cb6176ebd16858c9a64c3e884533c1b47a0b0fb3
|
f3eb97cecdc4a514e4981980352eee9a01652a17
|
/time_series/random_walk.R
|
9cabf45c97bbd392874c2ee49f13fe117deb65e8
|
[] |
no_license
|
RyotaBannai/r
|
88ccc2791d769ed92f145d0710a77a59e7f6bed2
|
f861e8b632f885d2eae11a301eab1b017bd89866
|
refs/heads/master
| 2023-08-19T07:44:52.497319
| 2023-08-14T11:06:54
| 2023-08-14T11:06:54
| 166,720,009
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 889
|
r
|
random_walk.R
|
# ランダムウォーク、ドランカーズウォーク、酔歩
# ホワイトノイズの累積和を取ると簡単に作れる
# 和を取っているからランダムウォークは和分過程
# 同じ数値が使われる=似てる=自己相関がある
#
#
set.seed(1)
white.noise <- rnorm(n = 400) # 正規分布
random.walk <- cumsum(white.noise)
par(family = "HiraKakuProN-W3") # plot するグラフの日本語化
plot(random.walk, type = "l", main = "和分過程:ランダムウォーク")
acf(random.walk) # 自己相関が無いかどうか確認
model.RW <- auto.arima(
random.walk,
ic = "aic",
trace = T,
stepwise = F,
approximation = F
)
plot(forecast(model.RW, h = 100), flwd = 5)
abline(h = mean(random.walk)) # random walk の平均
abline(h = random.walk[400], col = 2, lty = 2) # random walkの最後に得られたデータの大きさ
|
7842554fe9c765ae85c7fa6074f84948e6cbf539
|
228f43bdc34da185743bc696e9e4deabafedf051
|
/R/utils.R
|
27af9bb3d46cd63db58dbc1062d5980f97911523
|
[
"MIT"
] |
permissive
|
seandavi/BIRSBIO2020.scNMTseq.PLS
|
2f5b34e1b54fbc7e79c1ff821427b30bcbd21669
|
9b80860d7deb5068f2698514ec3a8117c7bcd5ec
|
refs/heads/master
| 2022-11-20T22:00:15.952855
| 2020-07-14T18:48:22
| 2020-07-14T18:48:22
| 281,267,958
| 0
| 0
| null | 2020-07-21T01:48:13
| 2020-07-21T01:48:12
| null |
UTF-8
|
R
| false
| false
| 1,462
|
r
|
utils.R
|
## ----------- Imports -----------
#' @import MultiAssayExperiment ggplot2 data.table reticulate
#' @import uwot nipals biomaRt
#' @import reshape2 grDevices
## ----------- all_identical -----------
## check if elements in a list are identical
#' @export
all_identical <- function(lst) {
for (j in seq_along(lst[-1])) {
if (!identical(lst[[j]], lst[[j+1]]))
stop(sprintf("not identical elements: %s and %s",j , j+1 ), call. = FALSE)
}
TRUE
}
## ----------- rbindListWithNames -----------
## base::rbind a named list of data.frames adding a new column
## indicating the name of the dataset in the list
#' rbind a list of arrays
#'
#' @param lst A named list of arrays
#' @param new_col Character, the name of the new column which adds the list names
#' to the resulting array
#'
#' @rdname utils
#' @export
rbindListWithNames <- function(lst, new_col = "dataset") {
lst_with_newcol <- mapply(x=names(lst), y=lst, FUN = function(x, y){
y[,new_col] <- x
y
}, SIMPLIFY = FALSE)
Reduce(rbind, lst_with_newcol)
}
## ----------- named_list -----------
## create a named list from a character vector
## that can be used with apply family and
## return a named list
#' @export
named_list <- function(char) {
out <- as.list(char)
names(out) <- char
out
}
## ----------- ggplot color hue -----------
#' @export
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
grDevices::hcl(h = hues, l = 65, c = 100)[1:n]
}
|
3a9c739fd8a699ba2d54b901bc3dc3e36eca2d02
|
8f8eac85cfbf8d3bc768318848ec964cb297b1cb
|
/casen/r-script/scripts/4_health_data.R
|
99f059f3efae8d6727326145061b8a655922521a
|
[] |
no_license
|
jnaudon/datachile-etl
|
5231a3762dd32f3f3def4d568fc63934d603cf8b
|
8fa577378d38f8d63f6dfdb00ed515bbb439f154
|
refs/heads/master
| 2023-03-23T00:36:35.698292
| 2019-03-23T03:30:16
| 2019-03-23T03:30:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,574
|
r
|
4_health_data.R
|
try(dir.create("tidy_data"))
try(dir.create("tidy_data/health_system"))
comunas <- as_tibble(fread("~/GitHub/datachile-data/official_ids/2017_06_27_comunas_datachile_fixed.csv"))
casenclean_zip <- list.files(path = "fixed_comunas", recursive = T, full.names = T)
healthclean_zip <- grep(casenclean_zip, pattern = paste(2000:2015, collapse = "|"), value = T)
for (j in 1:length(healthclean_zip)) {
try(
system(paste("7z e -aos", healthclean_zip[[j]], "-oc:temp/"))
)
}
healthclean_csv <- list.files(path = "temp", recursive = T, full.names = T)
years <- c(seq(2000, 2009, 3), seq(2011, 2015, 2))
healthsystem_ids <- tibble(health_system = c(paste("Fonasa", LETTERS[1:4]), "Fonasa (no sabe grupo)", "Fuerzas Armadas, de Orden y Seguridad P\u00fablica", "Isapre", "Ninguno", "Otro sistema", "No sabe", "NaN"),
health_system_id = c(1:10, NA),
s1_00_03 = c(0:9, NA),
s1_06_09 = c(1:9,99, NA),
s17_11 = s1_06_09,
s14_13 = s1_06_09,
s12_15 = s1_06_09)
healthsystemgroup_ids <- tibble(health_system_group = c(rep("Fonasa", 5), "Fuerzas Armadas, de Orden y Seguridad P\u00fablica", "Isapre", "Ninguno", "Otro sistema", "No sabe", "NaN"),
health_system_id = c(1:10, NA),
health_system_group_id = c(rep(1, 5), 2:6, NA))
for (t in 1:length(healthclean_csv)) {
raw <- as_tibble(fread(healthclean_csv[[t]])) %>%
mutate(year = years[[t]])
if (years[[t]] <= 2003) {
raw <- raw %>%
select(year, comuna_datachile_id, s1, expr, expc) %>%
left_join(healthsystem_ids %>% select(health_system_id, s1_00_03), by = c("s1" = "s1_00_03")) %>%
select(-s1)
}
if (years[[t]] >= 2006 & years[[t]] <= 2009) {
raw <- raw %>%
select(year, comuna_datachile_id, s1, expr, expc) %>%
left_join(healthsystem_ids %>% select(health_system_id, s1_06_09), by = c("s1" = "s1_06_09")) %>%
select(-s1)
}
if (years[[t]] == 2011) {
raw <- raw %>%
select(year, comuna_datachile_id, s17, expr, expc) %>%
left_join(healthsystem_ids %>% select(health_system_id, s17_11), by = c("s17" = "s17_11")) %>%
select(-s17)
}
if (years[[t]] == 2013) {
raw <- raw %>%
select(year, comuna_datachile_id, s14, expr, expc) %>%
left_join(healthsystem_ids %>% select(health_system_id, s14_13), by = c("s14" = "s14_13")) %>%
select(-s14)
}
if (years[[t]] == 2015) {
raw <- raw %>%
select(year, comuna_datachile_id, s12, expr, expc) %>%
left_join(healthsystem_ids %>% select(health_system_id, s12_15), by = c("s12" = "s12_15")) %>%
select(-s12)
}
assign(paste0("casen_",years[[t]]), raw)
}
healthsystem <- mget(ls(pattern = "casen_[0-9]")) %>%
bind_rows()
rm(list = ls(pattern = "casen_"))
# healthsystem <- healthsystem %>%
# group_by(year, comuna_datachile_id, health_system_id) %>%
# summarise(n = sum(expr, na.rm = T))
#
# healthsystem %>%
# group_by(year, health_system_id) %>%
# summarise(n = sum(n, na.rm = T)) %>%
# filter(health_system_id == 6)
healthsystem <- healthsystem %>%
select(year, comuna_datachile_id, health_system_id, expc, expr)
fwrite(healthsystem_ids, "ids/healthsystem_ids.csv")
fwrite(healthsystemgroup_ids, "ids/healthsystemgroup_ids.csv")
fwrite(healthsystem, "tidy_data/health_system/healthsystem.csv")
healthsystem %>% filter(is.na(comuna_datachile_id)) %>% distinct(year)
|
d657979736f215469698dec5af9be556bdefde83
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.machine.learning/R/forecastqueryservice_interfaces.R
|
2103582f398ab1e042caa826128041e9894a13fe
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| false
| 2,402
|
r
|
forecastqueryservice_interfaces.R
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common populate
#' @include forecastqueryservice_service.R
NULL
.forecastqueryservice$query_forecast_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ForecastArn = structure(logical(0), tags = list(type = "string")), StartDate = structure(logical(0), tags = list(type = "string")), EndDate = structure(logical(0), tags = list(type = "string")), Filters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.forecastqueryservice$query_forecast_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Forecast = structure(list(Predictions = structure(list(structure(list(structure(list(Timestamp = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "double"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.forecastqueryservice$query_what_if_forecast_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(WhatIfForecastArn = structure(logical(0), tags = list(type = "string")), StartDate = structure(logical(0), tags = list(type = "string")), EndDate = structure(logical(0), tags = list(type = "string")), Filters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.forecastqueryservice$query_what_if_forecast_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Forecast = structure(list(Predictions = structure(list(structure(list(structure(list(Timestamp = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "double"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
|
1c0122ab8b27534b003e68b1ca80f83de93f8ff9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MiDA/examples/MiNTreesAjust.Rd.R
|
51b5e492d6f416725c086c3a8f432d78785a696c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 806
|
r
|
MiNTreesAjust.Rd.R
|
library(MiDA)
### Name: MiNTreesAjust
### Title: Ajust number of trees parameter for fitting generalized boosted
### regression models
### Aliases: MiNTreesAjust
### ** Examples
#get gene expression and specimen data
data("IMexpression");data("IMspecimen")
#sample expression matrix and specimen data for binary classification,
#only "NORM" and "EBV" specimens are left
SampleMatrix<-MiDataSample(IMexpression, IMspecimen$diagnosis,"norm", "ebv")
SampleSpecimen<-MiSpecimenSample(IMspecimen$diagnosis, "norm", "ebv")
#Fitting, low tuning for faster running. Test ntrees
set.seed(1)
ClassRes<-MiNTreesAjust(SampleMatrix, SampleSpecimen, test.frac = 5, times = 3,
ntrees = c(10, 20), shrinkage = 1, intdepth = 2)
ClassRes[[1]] # train accuracy
ClassRes[[2]] # test accuracy
|
f839f4005a788aae6c695b798f51e4ee85dde19f
|
0e92fae9265aa15dc939b0a95d497888e8d946d3
|
/R_func/names.R
|
3705d54757d12f13d5eaf66e62a189ab3a46e938
|
[] |
no_license
|
qBioTurin/Multiple-Sclerosis
|
9b7d59924916098e6bad0910e8b5f2001ac4a006
|
f7732970a6cdfec750ab8242f31c281cf322c226
|
refs/heads/master
| 2022-11-13T04:28:26.523355
| 2020-07-06T13:56:57
| 2020-07-06T13:56:57
| 255,663,155
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 207
|
r
|
names.R
|
Names <- read_table2("Net/MS_Model.PlaceTransition")
transStart<- which(Names[,1] == "#TRANSITION")
NAMES = unlist(Names[1:(transStart-1),1])
NAMES = unname(NAMES)
saveRDS(NAMES,file="./input/NAMES.RDS")
|
4325f3eb167987b32af4d876abc0cc453c5214a3
|
c12893fddae5e96acdfd8fcbc99ffde079a84857
|
/attic/08-feature-eng/rsrc/ames-encoding.R
|
094c8f1a461abc5acdf2764f9d02cab0c6789d90
|
[
"CC-BY-4.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
Pizzaknoedel/lecture_i2ml
|
0ddb98f02f27377bf3de73235adb5025fe6630f5
|
a630c6608aa642d45439a0742511d47267a61c20
|
refs/heads/master
| 2023-02-24T19:10:23.075536
| 2021-01-31T11:43:20
| 2021-01-31T11:43:20
| 315,969,310
| 0
| 0
|
CC-BY-4.0
| 2021-01-31T11:43:21
| 2020-11-25T14:49:45
| null |
UTF-8
|
R
| false
| false
| 1,128
|
r
|
ames-encoding.R
|
library(tidyverse)
library(mlr)
library(mlrCPO)
library(parallelMap)
parallelStartSocket(7)
data = read.csv("data/ames_housing_extended.csv")
task = data %>%
select(SalePrice, MS.Zoning, Street, Lot.Shape, Land.Contour, Bldg.Type) %>%
makeRegrTask(id = "None", target = "SalePrice") %>>% cpoFixFactors()
task1 = createDummyFeatures(task, method = "1-of-n")
task1$task.desc$id = "One-Hot"
task2 = createDummyFeatures(task, method = "reference")
task2$task.desc$id = "Dummy"
lrns = list(
makeLearner(id = "Linear Regression", "regr.lm"),
makeLearner(id = "Random Forest", "regr.ranger"))
set.seed(1)
rin = makeResampleInstance(cv10, task1)
res = benchmark(lrns, list(task1, task2, task), rin, mae)
pl = as.data.frame(res) %>%
filter(task.id != "None" | learner.id != "Linear Regression") %>%
ggplot(aes(y = mae, x = task.id)) +
geom_boxplot() +
facet_wrap(~learner.id) +
theme_minimal() +
theme(axis.text.x = element_text(angle = 20, hjust = 1)) +
ylab("Mean Absolute Error") +
xlab("") + ggtitle("Ames House Price Prediction")
ggsave("figure_man/ames-encoding.png", width = 6, height = 5)
|
d8706deb0151f001e8135213bd9d35a8a6ebf681
|
681ea7c375e718bb0faabfed9a544918efaf08d6
|
/man/kproto_gower.Rd
|
dd0148dd3faebdc8ed81c41fe73fbbf8a215d013
|
[] |
no_license
|
cran/clustMixType
|
d434fa2577642ca3cdc8e6b93ed6d2b146939a4d
|
fd399a4c6fcba3f7073885323ab21c431fe7b8c1
|
refs/heads/master
| 2022-12-25T15:13:46.543557
| 2022-12-14T18:50:02
| 2022-12-14T18:50:02
| 52,676,831
| 6
| 8
| null | 2018-01-24T20:17:13
| 2016-02-27T16:44:43
|
R
|
UTF-8
|
R
| false
| true
| 5,213
|
rd
|
kproto_gower.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kprototypes_gower.R
\name{kproto_gower}
\alias{kproto_gower}
\title{k-Prototypes Clustering using Gower Dissimilarity}
\usage{
kproto_gower(
x,
k,
lambda = NULL,
iter.max = 100,
na.rm = "yes",
keep.data = TRUE,
verbose = TRUE
)
}
\arguments{
\item{x}{Data frame with both numerics and factors (also ordered factors are possible).}
\item{k}{Either the number of clusters, a vector specifying indices of initial prototypes, or a data frame of prototypes of the same columns as \code{x}.}
\item{lambda}{Parameter > 0 to trade off between Euclidean distance of numeric variables
and simple matching coefficient between categorical variables. Also a vector of variable specific factors is possible where
the order must correspond to the order of the variables in the data. In this case all variables' distances will be multiplied by
their corresponding lambda value.}
\item{iter.max}{Maximum number of iterations if no convergence before.}
\item{na.rm}{Character; passed from \code{\link{kproto}}. For "no" observations where all variables are missinf are assigned cluster membershim \code{NA}.}
\item{keep.data}{Logical whether original should be included in the returned object.}
\item{verbose}{Logical whether information about the cluster procedure should be given. Caution: If \code{verbose=FALSE}, the reduction of the number of clusters is not mentioned.}
}
\value{
\code{\link{kmeans}} like object of class \code{\link{kproto}}:
\item{cluster}{Vector of cluster memberships.}
\item{centers}{Data frame of cluster prototypes.}
\item{lambda}{Distance parameter lambda. For \code{code}{type = "gower"} only a vector of variable specific weights is possible.}
\item{size}{Vector of cluster sizes.}
\item{withinss}{Vector of within cluster distances for each cluster, i.e. summed distances of all observations belonging to a cluster to their respective prototype.}
\item{tot.withinss}{Target function: sum of all observations' distances to their corresponding cluster prototype.}
\item{dists}{Matrix with distances of observations to all cluster prototypes.}
\item{iter}{Prespecified maximum number of iterations.}
\item{stdization}{List of standardized ranks for ordinal variables and and an additional element \code{num_ranges} with ranges of all numeric variables. Used by \code{\link{predict.kproto}}.}
\item{trace}{List with two elements (vectors) tracing the iteration process:
\code{tot.dists} and \code{moved} number of observations over all iterations.}
}
\description{
Internal function. Computes k-prototypes clustering for mixed-type data using Gower dissimilarity.
}
\details{
Internal function called by \code{\link{kproto}}. Note that there is no \code{nstart} argument.
Higher values than \code{nstart = 1} can be specified within \code{kproto} which will call \code{kproto_gower}
several times.
For Gower dissimilarity range-normalized absolute distances from the cluster median
are computed for the numeric variables (and for the ranks of the ordered factors respectively).
For factors simple matching distance is used as in the original k prototypes algorithm.
The prototypes are given by the median for numeric variables, the mode for factors and the level with the closest rank
to the median rank of the corresponding cluster.
In case of \code{na.rm = "no"}: for each observation variables with missings are ignored
(i.e. only the remaining variables are considered for distance computation).
In consequence for observations with missings this might result in a change of variable's weighting compared to the one specified
by \code{lambda}. Further note: For these observations distances to the prototypes will typically be smaller as they are based
on fewer variables.
}
\examples{
datasim <- function(n = 100, k.ord = 2, muk = 1.5){
clusid <- rep(1:4, each = n)
# numeric
mus <- c(rep(-muk, n),
rep(-muk, n),
rep(muk, n),
rep(muk, n))
x1 <- rnorm(4*n) + mus
# ordered factor
mus <- c(rep(-muk, n),
rep(muk, n),
rep(-muk, n),
rep(muk, n))
x2 <- rnorm(4*n) + mus
# ordered factor
quants <- quantile(x2, seq(0, 1, length.out = (k.ord+1)))
quants[1] <- -Inf
quants[length(quants)] <- Inf
x2 <- as.ordered(cut(x2, quants))
x <- data.frame(x1, x2)
return(x)
}
n <- 100
x <- datasim(n = n, k.ord = 10, muk = 2)
truth <- rep(1:4, each = n)
# calling the internal kproto_gower() directly
kgres <- kproto_gower(x, 4, verbose = FALSE)
# calling kproto gower via kproto:
kgres2 <- kproto(x, 4, verbose = FALSE, type = "gower", nstart = 10)
table(kgres$cluster, truth)
clprofiles(kgres, x)
}
\references{
\itemize{
\item Gower, J. C. (1971): A General Coefficient of Similarity and Some of Its Properties. {\emph{Biometrics, 27(4)}}, 857–871.
\doi{10.2307/2528823}.
\item Podani, J. (1999): Extending Gower's general coefficient of similarity to ordinal characters. {\emph{TAXON, 48}}, 331-340.
\doi{10.2307/1224438}.
}
}
\author{
\email{gero.szepannek@web.de}
}
\keyword{classif}
\keyword{cluster}
\keyword{multivariate}
|
1d114c5c7f23aebbdff99d78377565f77b91784d
|
ca1f3439c33953bf3ab0a6667849ec2f06e46fc1
|
/examples/South_African_Municipal_Population_Density/app.R
|
d330e388b6007766eff5628fb269305601e10982
|
[] |
no_license
|
artilery/shiny_leaflet_choropleth
|
1ed0fd346148c14bb3d28309674d8ae5b311a49e
|
9fbc0012ed0059bf465ef85b07b680ba8611e69b
|
refs/heads/master
| 2021-01-22T03:40:35.951569
| 2015-08-17T08:51:14
| 2015-08-17T08:51:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,876
|
r
|
app.R
|
library(shiny)
library(leaflet)
library(DT)
load("R/v1.RData")
ui <- bootstrapPage(
# Add CSS to give floating Panels a style
tags$head(tags$style(".floater { background-color: white;
padding: 8px; opacity: 0.7; border-radius: 6px;
box-shadow: 0 0 15px rgba(0,0,0,0.2); }")),
title = "South African Demographics",
leafletOutput(outputId = 'map1', width = "100%", height = "650px"),
absolutePanel(
right=0,top=70,width =275, class="floater",
strong(h4("SA Population Density")),
uiOutput('hoverInfo')
),
absolutePanel( # Zoom level
left=20,top=720,width =200,
textOutput(outputId='message5',container=span,inline=T)
),
absolutePanel( # Boundary Coordinates
left=240,top=720,width =610,
textOutput(outputId='message6',container=span,inline=T)
),
absolutePanel( # Click coordinates
left=850,top=720,width =300,
textOutput(outputId='message3',container=span,inline=T)
),
absolutePanel( # Click coordinates
left=1200,top=720,width =100,
textOutput(outputId='message7',container=span,inline=T)
)
)
server <- function(input,output,session) {
v <- reactiveValues(msg1=NULL) # Municipality Name
v <- reactiveValues(msg2=NULL) # Population Density
v <- reactiveValues(msg3=NULL) # Click Coordinates
v <- reactiveValues(msg4=NULL) # Click coordinates
v <- reactiveValues(msg5=NULL) # Zoom level
v <- reactiveValues(msg6=NULL) # Boundary Coordinates
v <- reactiveValues(msg7=0)
observeEvent(input$map1_topojson_mouseover, {
v$msg7 <- 1
})
observeEvent(input$map1_shape_mouseout, {
v$msg7 <- 0
})
output$map1 <-renderLeaflet({
leaflet() %>%
setView(zoom=6,lng=26,lat=-29) %>%
addTiles() %>%
addTopoJSON(
topojson=town_tj,stroke=T,color="white",opacity=1,fill=T,
weight=1
) %>%
addLegend(
position = "bottomright",
pal = town_binpal,#(town_tj_spd@data$DENSITY),
opacity = 1,
labFormat = labelFormat(big.mark = " "),
values = town_tj_spd@data$DENSITY
)
})
observeEvent(input$map1_topojson_mouseover, label = "town_message_event",{
v$msg1 <- input$map1_topojson_mouseover$properties$MUNICNAME
v$msg2 <- input$map1_topojson_mouseover$properties$DENSITY
})
observeEvent(input$map1_topojson_mouseover, label = "town_proxymap_event", {
proxy1 <- leafletProxy(
"map1", data = subset(town_tj_spd, town_tj_spd@data$MUNICNAME == v$msg1)
)
single_data <- subset(town_tj_spd@data, town_tj_spd@data$MUNICNAME == v$msg1)
proxy1 %>%
clearShapes() %>%
addPolygons(stroke=T,weight=4,color="#FFFFFF",opacity=1,
smoothFactor=1,fill=T,fillOpacity=0
)
})
observeEvent(input$map1_shape_mouseout, {
proxy <- leafletProxy(mapId = 'map1')
proxy %>%
clearShapes()
})
output$hoverInfo <- renderUI({
if (v$msg7 == 0) {
return(
div(
paste("Hover over a Municipality")
))
} else {
return(
div(
strong(v$msg1),
br(),
span(round(v$msg2,1), HTML("people/km<sup>2</sup>"))
)
)
}
})
observeEvent(input$map1_click, {
v$msg3 <- paste(
"Click coordinates: Lat",round(input$map1_click$lat,3),
"Long",round(input$map1_click$lng,3))
})
observeEvent(input$map1_shape_click, {
v$msg3 <- paste(
"Click coordinates: Lat",round(input$map1_shape_click$lat,3),
"Long",round(input$map1_shape_click$lng,3))
})
observeEvent(input$map1_zoom, {
v$msg5 <- paste("Zoom level is",input$map1_zoom)
})
observeEvent(input$map1_bounds, {
v$msg6 <- paste(
"View Bounds: lat",substr(paste(input$map1_bounds[1]),start = 1, stop = 6),"long",
substr(paste(input$map1_bounds[4]),start=1,stop=6),"(topleft); lat",
substr(paste(input$map1_bounds[3]),start=1,stop=6),"long",
substr(paste(input$map1_bounds[2]),start=1,stop=6),"(bottomright)")
})
output$message3 <- renderText(v$msg3) # Click Coordinates
output$message4 <- renderText(v$msg4) # Click coordinates
output$message5 <- renderText(v$msg5) # Zoom level
output$message6 <- renderText(v$msg6) # Boundary Coordinates
output$message7 <- renderText(paste("Actice mouseover events =", v$msg7)) # Mouse Events
}
shinyApp(ui, server)
|
82d3aed03b4c4739c8b330e88157ece0f53a6e53
|
72183028eec7ba0efe2d626ab158f51d773618ec
|
/man/sdm_thresh.Rd
|
86e33a55e74cb8ce7fb441fd05ec1b46ee2d68b2
|
[] |
no_license
|
silasprincipe/sdmvis
|
35b56cc128f2b89b0231a251550066f773096141
|
ef7d1f7cf082da776730888a389dbd7e83f79d6b
|
refs/heads/main
| 2023-06-03T02:24:47.642458
| 2021-06-15T13:12:49
| 2021-06-15T13:12:49
| 373,350,187
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,544
|
rd
|
sdm_thresh.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sdm_thresh.R
\name{sdm_thresh}
\alias{sdm_thresh}
\alias{sdm_thresh,Raster-method}
\alias{sdm_thresh,data.frame-method}
\title{SDM Leaflet map with different thresholds}
\usage{
sdm_thresh(
sdm,
thresh,
tname = NULL,
pts = NULL,
pal = NULL,
crs = "standard",
cluster = FALSE,
simplify = TRUE,
thresh.color = NULL
)
\S4method{sdm_thresh}{Raster}(
sdm,
thresh,
tname = NULL,
pts = NULL,
pal = NULL,
crs = "standard",
cluster = FALSE,
simplify = TRUE,
thresh.color = NULL
)
\S4method{sdm_thresh}{data.frame}(
sdm,
thresh,
tname = NULL,
pts = NULL,
pal = NULL,
crs = "standard",
cluster = FALSE,
simplify = TRUE,
thresh.color = NULL
)
}
\arguments{
\item{sdm}{The SDM/ENM result. Should be in the Raster* format.}
\item{thresh}{A vector containing the thresholds to be applied (numeric values).}
\item{tname}{An optional character vector containing the names of the thresholds to be used in the legend. If not supplied, numerals will be used instead.}
\item{pts}{A data frame containing the presence or presence/absence points (optional). The first column should be longitude (x) and the sencond latitude (y). In the case of presence/absence data, an additional collumn should be provided, coded as 0 (absence) and 1 (presence).}
\item{pal}{Character string indicating the name of the continuous mode palette (see \link[sdmvis]{gen_pal}). If not supplied, the default will be used.}
\item{crs}{Enables to change the default projection used in the Leaflet package. For now, not functional.}
\item{cluster}{Should the points be clustered (i.e., aggregated)? Only valid if `pts` is supplied. Default is FALSE.}
\item{simplify}{Should the polygons be simplified? If TRUE, the output became lighter.}
\item{thresh.color}{Vector of color(s) to be used for the threshold(s) polygon(s)}
}
\value{
A Leaflet map.
}
\description{
Create a Leaflet map with the results of an SDM and draw how different thresholds will result.
This map can be explored interactively in the viewer, so its possible to have a better comprehension of the outcomes of your SDM.
}
\section{Functions}{
\itemize{
\item \code{sdm_thresh,Raster-method}: Method for Raster*
\item \code{sdm_thresh,data.frame-method}: Method for data frames
}}
\examples{
library(sdmvis)
# Load data
data("original_sdm")
data("pa_data")
# Plot
sdm_thresh(sdm = original_sdm[[1]],
thresh = -2,
tname = "TSS",
pts = pa_data)
}
|
45a9e376741f6593935504cdb4dc7f94252d65bd
|
23c09350187a1d88b914765a56d2b68e3158cdee
|
/official/explore.R
|
696158cee219c70185e5a83d9ecbde4210fb4d44
|
[] |
no_license
|
mdelhey/esther-data
|
dd01fbe6714ebdc49f2d7debf0ee6c478003483e
|
2e47d1daa42e62592ecf987278a7c025cfb3bf68
|
refs/heads/master
| 2020-04-15T23:56:52.904813
| 2013-08-20T23:20:42
| 2013-08-20T23:20:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,131
|
r
|
explore.R
|
### Esther exploratory data analysis
library(plyr)
# Load the data into one data set (harder than it sounds)
options(stringsAsFactors = FALSE)
sp12 <- read.csv("sp12.csv")
sp11 <- read.csv("sp11.csv")
sp10 <- read.csv("sp10.csv")
sp09 <- read.csv("sp09.csv")
sp08 <- read.csv("sp08.csv")
sp07 <- read.csv("sp07.csv")
sp06 <- read.csv("sp06.csv")
sp05 <- read.csv("sp05.csv")
fa12 <- read.csv("fa12.csv")
fa11 <- read.csv("fa11.csv")
fa10 <- read.csv("fa10.csv")
fa09 <- read.csv("fa09.csv")
fa08 <- read.csv("fa08.csv")
fa07 <- read.csv("fa07.csv")
fa06 <- read.csv("fa06.csv")
fa05 <- read.csv("fa05.csv")
fa04 <- read.csv("fa04.csv")
# Create two new variables: year and semester
fa12$year <- "2012-2013"
sp12$year <- "2011-2012"
fa11$year <- "2011-2012"
sp11$year <- "2010-2011"
fa10$year <- "2010-2011"
sp10$year <- "2009-2010"
fa09$year <- "2009-2010"
sp09$year <- "2008-2009"
fa08$year <- "2008-2009"
sp08$year <- "2007-2008"
fa07$year <- "2007-2008"
sp07$year <- "2006-2007"
fa06$year <- "2006-2007"
sp06$year <- "2005-2006"
fa06$year <- "2005-2006"
sp05$year <- "2004-2005"
fa05$year <- "2004-2005"
# Join the data sets
|
087aef8f4c2e010cc982a33ff1ce0193ac8be463
|
852beff699832333f51a9cbaf95e8ba9b7c972b4
|
/InstaRF/R/gaussian_blur.R
|
c6b9a0e576533b7eaaa2394ed9b145e94c8d3e9a
|
[] |
no_license
|
cheukman1207/InstaRF
|
468c2474f9b669e2ffe432e30a504aaf7c8dd74c
|
8e54a59a90ab1ddbafe6e9de0237f7891bec665a
|
refs/heads/master
| 2020-04-21T05:11:00.729163
| 2019-02-09T01:00:05
| 2019-02-09T01:00:05
| 169,333,325
| 0
| 0
| null | 2019-02-06T00:12:46
| 2019-02-06T00:12:45
| null |
UTF-8
|
R
| false
| false
| 288
|
r
|
gaussian_blur.R
|
#' Gaussian blur
#'
#' @param string, path to the input png file
#' @param string, path to the output png file
#' @return png file in the output path
#' @export
#'
#' @examples
#' gaussian_blur("input,png", "output.png")
gaussian_blur <- function() {
print("gaussian blur working")
}
|
2e3be784a60e3c8fb2dfe4ef00a342e5882ce78e
|
700d8121a4e3a9fc4c31e015db643758cb843569
|
/inst/registered/NCBI_assemblies/Drosophila_virilis.R
|
8e6578e69db82567a688582be12bf7d7a5672b90
|
[] |
no_license
|
Bioconductor/GenomeInfoDb
|
727c90f03c289f692999860a12077775f4d65317
|
9dba03f8d2a4f76732e2b12beac7c0ee3230a693
|
refs/heads/devel
| 2023-08-09T21:33:11.074781
| 2023-06-20T21:40:39
| 2023-06-20T21:40:39
| 102,149,975
| 14
| 15
| null | 2023-03-13T17:45:24
| 2017-09-01T20:19:20
|
R
|
UTF-8
|
R
| false
| false
| 290
|
r
|
Drosophila_virilis.R
|
ORGANISM <- "Drosophila virilis"
### List of assemblies by date.
ASSEMBLIES <- list(
list(assembly="dvir_caf1",
date="2006/07/12",
extra_info=c(strain="TSC#15010-1051.87"),
assembly_accession="GCA_000005245.1", # droVir3
circ_seqs=character(0))
)
|
1959d3bdf35d83fb9844565736ac8a592e11c81a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/beginr/examples/plotcolors.Rd.R
|
6868453f04f41ea438aecf6d3fa50dff3bde5e96
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 131
|
r
|
plotcolors.Rd.R
|
library(beginr)
### Name: plotcolors
### Title: A reminder for colors
### Aliases: plotcolors
### ** Examples
plotcolors()
|
fe0cbdb48a70a5742518f3cd5d3e6a4111c48aa3
|
5febc1e3f2dd766ff664f8e0ae79002072359bde
|
/man/scdb_mctnetwork.Rd
|
4c631afbc9a9f2ff667ba7aec85e697d75245fa5
|
[
"MIT"
] |
permissive
|
tanaylab/metacell
|
0eff965982c9dcf27d545b4097e413c8f3ae051c
|
ff482b0827cc48e5a7ddfb9c48d6c6417f438031
|
refs/heads/master
| 2023-08-04T05:16:09.473351
| 2023-07-25T13:37:46
| 2023-07-25T13:37:46
| 196,806,305
| 89
| 30
|
NOASSERTION
| 2023-07-25T13:38:07
| 2019-07-14T07:20:34
|
R
|
UTF-8
|
R
| false
| true
| 314
|
rd
|
scdb_mctnetwork.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scdb.r
\name{scdb_mctnetwork}
\alias{scdb_mctnetwork}
\title{scdb_mctnetwork - get a mctnetwork object}
\usage{
scdb_mctnetwork(id)
}
\arguments{
\item{id}{- id of mctnetwork}
}
\description{
scdb_mctnetwork - get a mctnetwork object
}
|
f7eecbb922c1bd4309110e6006a8ec078b23a2db
|
c6ab22e84df7b7dcf7a1ab462a99484237694fa7
|
/man/SpMAverage.Rd
|
900f62ebe4a3f5007d989967a89764a6aef29e28
|
[] |
no_license
|
Nisus-Liu/bbturns
|
d2f55950450851946987ed816b02f557bee5c471
|
84ea91b86568c060609c644bf2c6a4f022970740
|
refs/heads/master
| 2021-01-11T17:37:58.308645
| 2017-02-06T14:54:19
| 2017-02-06T14:54:19
| 79,809,970
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 313
|
rd
|
SpMAverage.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SpMAverage.R
\name{SpMAverage}
\alias{SpMAverage}
\title{Spencer moving average}
\usage{
SpMAverage(x)
}
\arguments{
\item{x}{numeric vector.}
}
\description{
return a 15 period moving average.
}
\examples{
x = c(1:20)
SpMAverage(x)
}
|
570d10f7466165556accc669c8bc560ed300c822
|
830f6de1a8cd3d852de79dc0c172bd7b534ee210
|
/Scripts/pseudotimeRyu.R
|
a4b9cdacb8b04e4f80a15e51e3256a855d939173
|
[] |
no_license
|
ahmadazim/scPlastidDiff
|
c823badc2d0cdb587841abd1f59d32f2e9246fbb
|
8d4cfa9d10296f77ba99e5dfce2af5f5246ca88e
|
refs/heads/master
| 2022-11-20T04:44:51.594138
| 2020-07-28T05:12:24
| 2020-07-28T05:12:24
| 283,099,671
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 35,883
|
r
|
pseudotimeRyu.R
|
##=======================================================================================
## Running pseudotime on Ryu dataset
## Ideas:
## Will try between meristem and other cell types to see if we can remove gap
## Try to find genes that are indicative of plastid and cell state
## Track the expression of such genes to see if it can suggest anything about cell state and plastid state
## Try to address question of synchronicity between cell and plastid dofferentiation
##=======================================================================================
# First, I installed docker on my VM by typing this into debian
# curl -fsSL https://get.docker.com | sh;
# sudo service docker start
# sudo usermod -a -G docker $USER
# install the following packages if needed...
library(Seurat)
library(dyno)
library(dplyr)
library(tidyverse)
#=======================================================
# First try between Meristem and Root Cap Cells
# Ended up not doing this because of gap -- ignore
#=======================================================
nonPlast <- row.names(NDpWTdata)[!is.element(row.names(NDpWTdata), gene.uncur)]
# Also removing mitochondrial genes
nonPlast <- nonPlast[!(nonPlast %in% as.character(genes$V2[27207:27416]))]
meriRC <- names(NDpWTdata@active.ident[NDpWTdata@active.ident == "Root Cap Cells" | NDpWTdata@active.ident == "Meristem"])
ptimeNoPlast.meriRC <- subset(NDpWTdata, features = nonPlast, cells = meriRC)
ptimeNoPlast.meriRC <- FindVariableFeatures(ptimeNoPlast.meriRC)
varGenes.meriRC <- VariableFeatures(ptimeNoPlast.meriRC)
allExp.meriRC <- Matrix::t(ptimeNoPlast.meriRC@assays$SCT@scale.data)
allCount.meriRC <- Matrix::t(ptimeNoPlast.meriRC$SCT@counts)
varExp.meriRC <- allExp.meriRC[,colnames(allExp.meriRC) %in% varGenes.meriRC]
varCount.meriRC <- allCount.meriRC[,colnames(allCount.meriRC) %in% varGenes.meriRC]
meriRCNoPlast.dyn <- wrap_expression(
expression = varExp.meriRC,
counts = varCount.meriRC
)
meriRCNoPlast.dyn <- add_grouping(meriRCNoPlast.dyn, ptimeNoPlast.meriRC@active.ident)
initials.meriRC <- names(ptimeNoPlast.meriRC@active.ident[ptimeNoPlast.meriRC@active.ident == "Meristem"])
meriRCNoPlast.dyn <- add_prior_information(meriRCNoPlast.dyn, start_id = initials.meriRC)
ptimemeriRC <- infer_trajectory(meriRCNoPlast.dyn, ti_scorpius(), verbose = TRUE)
ptimemeriRC.simp <- simplify_trajectory(ptimemeriRC)
png("./NDmeriRCTI.png", units = "in", width = 10, height = 9, res = 600)
plot_dimred(ptimemeriRC, label_milestones = TRUE, color_cells = "grouping", size_cells = 1.5, grouping = ptimeNoPlast.meriRC@active.ident) + ggtitle("Slingshot TI Method used with Mersitem and Root Cap Cells")
dev.off()
png("./NDmeriRCTISimp.png", units = "in", width = 10, height = 9, res = 600)
plot_dimred(ptimemeriRC.simp, label_milestones = TRUE, color_cells = "grouping", size_cells = 1.9, grouping = ptimeNoPlast.meriRC@active.ident) + ggtitle("Simplified Slingshot TI Method used with Meristem and Root Cap Cells")
dev.off()
#==================================================================
# Trying between Meristem and Endodermis
# The difference between this and the next section is that this
# pseudotime is run on the meristem and endodermis cells without
# first re-normalizing those cell separately
#===================================================================
# Assumed that you have the Seurat object named "NDpWTdata", which is all the wild-type (WTdata) cells from the Ryu dataset, worked with post-RSI (pWTdata), with No Doublets included in the data (NDpWTdata)
meriEndo <- names(NDpWTdata@active.ident[NDpWTdata@active.ident == "Endodermis" | NDpWTdata@active.ident == "Meristem"])
ptimeNoPlast.meriEndo <- subset(NDpWTdata, features = nonPlast, cells = meriEndo) # only including meristematic/endodermal cells "nonPlast" genes in pseudotime object (independent of plastid development)
ptimeNoPlast.meriEndo <- FindVariableFeatures(ptimeNoPlast.meriEndo)
varGenes.meriEndo <- VariableFeatures(ptimeNoPlast.meriEndo)
allExp.meriEndo <- Matrix::t(ptimeNoPlast.meriEndo@assays$SCT@scale.data) # grabbing scaled EXPRESSION matrix (genes as columns and cells as rows)
allCount.meriEndo <- Matrix::t(ptimeNoPlast.meriEndo$SCT@counts) # grabbing scaled COUNTS matrix (genes as columns and cells as rows)
varExp.meriEndo <- allExp.meriEndo[,colnames(allExp.meriEndo) %in% varGenes.meriEndo] # scaled expression with only 3000 variable features
varCount.meriEndo <- allCount.meriEndo[,colnames(allCount.meriEndo) %in% varGenes.meriEndo] # scaled counts with only 3000 variable features
# Creating (called "wrapping") the dynverse object
meriEndoNoPlast.dyn <- wrap_expression(
expression = varExp.meriEndo,
counts = varCount.meriEndo
)
meriEndoNoPlast.dyn <- add_grouping(meriEndoNoPlast.dyn, ptimeNoPlast.meriEndo@active.ident) # adding cell identity (endodermis or meristem) to dynverse object as a grouping
initials.meriEndo <- names(ptimeNoPlast.meriEndo@active.ident[ptimeNoPlast.meriEndo@active.ident == "Meristem"]) # list of cell names which should be starting point of trajectory
meriEndoNoPlast.dyn <- add_prior_information(meriEndoNoPlast.dyn, start_id = initials.meriEndo) # adding starting cell names to dynverse object
ptimemeriEndo <- infer_trajectory(meriEndoNoPlast.dyn, ti_scorpius(), verbose = TRUE) # actually inferring the trajectory with the SCORPIUS TI method
ptimemeriEndo.simp <- simplify_trajectory(ptimemeriEndo) # simplifying the trajectory (so we only have a beginning point and end point)
# Vizualizing the trjectories
png("./NDmeriEndoTI.png", units = "in", width = 10, height = 9, res = 600)
plot_dimred(ptimemeriEndo, label_milestones = TRUE, color_cells = "grouping", size_cells = 1.5, grouping = ptimeNoPlast.meriEndo@active.ident) + ggtitle("SCORPIUS TI Method used with Mersitem and Endodermis (Original Embedding)")
dev.off()
##===========================================================================================
## Normalizing separate dataset of only meristematic and endodermal cells before pseudotime
##===========================================================================================
meriEndo.names <- names(NDpWTdata@active.ident[NDpWTdata@active.ident == "Endodermis" | NDpWTdata@active.ident == "Meristem"]) # endodermal and meristematic cells names
meriEndo.obj <- merge(WT1.obj, y = c(WT2.obj, WT3.obj), add.cell.ids = c("R1", "R2", "R3"), project = "plantRNA") # creating Seurat object with all Ryu data first
nonPgene <- row.names(meriEndo.obj)[which(row.names(meriEndo.obj) %in% gene.uncur == F)] # nonPgene are all genes not included in "gene.uncur" (list of uncurated plastid genes list)
nonPgene <- nonPgene[!(nonPgene %in% as.character(genes$V2[27207:27416]))] # also taking out all genes that start with MT- or CT- which are at the end of the gene list ("genes")
meriEndo.obj <- subset(meriEndo.obj, features = nonPgene, cells = meriEndo.names) # subsetting Seurat object with only meri/endo genes and non plastid genes
# Basic quality control and clustering workflow
meriEndo.obj[["percent.mt"]] <- PercentageFeatureSet(meriEndo.obj, pattern = "^MT-")
VlnPlot(meriEndo.obj, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
meriEndo.obj <- subset(meriEndo.obj, subset = nFeature_RNA > 2000 & nFeature_RNA < 8000 & percent.mt < 0.0075 & nCount_RNA < 75000)
meriEndo.obj <- SCTransform(meriEndo.obj, verbose = TRUE)
meriEndo.obj <- RunPCA(meriEndo.obj, verbose = TRUE)
meriEndo.obj <- RunTSNE(meriEndo.obj, verbose = TRUE)
meriEndo.obj <- RunUMAP(meriEndo.obj, dims = 1:30, verbose = TRUE)
meriEndo.obj <- FindNeighbors(meriEndo.obj, dims = 1:30, verbose = TRUE)
meriEndo.obj <- FindClusters(meriEndo.obj, verbose = TRUE, resolution = 0.1) # note the low resolution of 0.1 to only capture 2 clusters of meristem and endodermis
# Labeling meristem and endodermis clusters
newClusterIDs <- c("Meristem", "Endodermis")
names(newClusterIDs) <- levels(meriEndo.obj)
meriEndo.obj <- RenameIdents(meriEndo.obj, newClusterIDs)
# Vizualizing UMAP plot of meristem and endodermis cells
png("meriEndoUMAP.png", units = "in", width = 10, height = 8, res = 600)
DimPlot(meriEndo.obj, reduction = "umap", label = TRUE, label.size = 7, pt.size = 0.5)
dev.off()
##==============================================================================
## Actually running pseudotime between mersitem and endodermis
## For annotations, refer to lines 64-94
##==============================================================================
varGenes.x <- VariableFeatures(meriEndo.obj)
allExp.x <- Matrix::t(meriEndo.obj@assays$SCT@scale.data)
allCount.x <- Matrix::t(meriEndo.obj$SCT@counts)
varExp.x <- allExp.x[,colnames(allExp.x) %in% varGenes.x]
varCount.x <- all.Count.x[,colnames(all.Count.x) %in% varGenes.x]
xNoPlast.dyn <- wrap_expression(
expression = varExp.x,
counts = varCount.x
)
xNoPlast.dyn <- add_grouping(xNoPlast.dyn, meriEndo.obj@active.ident)
initials.x <- names(meriEndo.obj@active.ident[meriEndo.obj@active.ident == "Meristem"])
xNoPlast.dyn <- add_prior_information(xNoPlast.dyn, start_id = initials.x)
ptimemeriEndo.norm <- infer_trajectory(xNoPlast.dyn, ti_slingshot(), verbose = TRUE) # Using Slingshot TI method
ptimemeriEndo.norm.simp <- simplify_trajectory(ptimemeriEndo.norm)
# Getting pseudotime progression percentages from dyno object
slingProg <- ptimemeriEndo.norm.simp$progressions
slingProg <- slingProg[order(slingProg$percentage),]
# Vizulaizing simplified trajectory (slingshot TI method)
png("meriEndoNormSimp.png", units = "in", width = 10, height = 8, res = 600)
plot_dimred(ptimemeriEndo.norm.simp, label_milestones = TRUE, color_cells = "grouping", size_cells = 1.5, grouping = meriEndo.obj@active.ident) + ggtitle("Slingshot TI Method used with Re-Normalized Meristem and Endodermis")
dev.off()
# Trying scorpius ti method
meriEndoPtime.scorpius <- infer_trajectory(xNoPlast.dyn, ti_scorpius(), verbose = TRUE)
png("meriEndoNormSCORPIUS.png", units = "in", width = 10, height = 8, res = 600) # Vizualizing trajectory
plot_dimred(meriEndoPtime.scorpius, label_milestones = TRUE, color_cells = "grouping", size_cells = 1.5, grouping = meriEndo.obj@active.ident) + ggtitle("SCORPIUS TI Method used with Re-Normalized Meristem and Endodermis")
dev.off()
scorpProg <- meriEndoPtime.scorpius$progressions
scorpProg <- scorpProg[order(scorpProg$percentage),] # getting progression from scorpius TI method
# Trying embeddr ti method
meriEndoPtime.embeddr <- infer_trajectory(xNoPlast.dyn, ti_embeddr(), verbose = TRUE)
png("meriEndoNormEmbeddr.png", units = "in", width = 10, height = 8, res = 600) # Vizualizing trajectory
plot_dimred(meriEndoPtime.embeddr, label_milestones = TRUE, color_cells = "grouping", size_cells = 1.5, grouping = meriEndo.obj@active.ident) + ggtitle("Embeddr TI Method used with Re-Normalized Meristem and Endodermis")
dev.off()
embProg <- meriEndoPtime.embeddr$progressions
embProg <- embProg[order(embProg$percentage),] # getting progression from Embeddr TI method
# Comparing scorpius to slingshot to embeddr by plotting cell ordering (x-axis) against its progression percentage (y-axis)
png("scorpVSslingVSembeddr.png", units = "in", width = 10, height = 8, res = 600)
plot(slingProg$percentage, col = "red", ylab = "Percentage Along Pseudotime Axis", xlab = "Cell Index", main = "Comparison of Pseudotime Progression with SCORPIUS versus Slingshot")
points(embProg$percentage, col = "navyblue")
points(scorpProg$percentage, col = "black")
legend(1050, 0.20, col = c("red", "navyblue", "black"), legend = c("Slingshot", "Embeddr", "SCORPIUS"), pch = c(16, 16, 16))
dev.off()
# CHOSE SCORPIUS AS TI INFERENCE METHOD --> most stable increasing along ptime
##================================================================================================
## Creating meriEndo.plast to...
## First, see if I can score cells based on previously identified proplastid genes
## Also, will serve as a re-normalized plastid clustering (did same with cell)
## Allowing me to analyze plastid gene expression independently of non-plastid gene expresion
##================================================================================================
meriEndoPlast <- merge(WT1.obj, y = c(WT2.obj, WT3.obj), add.cell.ids = c("R1", "R2", "R3"), project = "plantRNA") # first creating Seurat object with all Ryu data
meriEndoPlast <- subset(meriEndoPlast, features = gene.uncur, cells = meriEndo.names) # subset to only include meristematic/endodermal cells and only plastid genes ("gene.uncur")
meriEndoPlast[["percent.mt"]] <- PercentageFeatureSet(meriEndoPlast, pattern = "^MT-") # basic quality control, clustering workflow...
meriEndoPlast <- SCTransform(meriEndoPlast, verbose = TRUE)
meriEndoPlast <- RunPCA(meriEndoPlast, verbose = TRUE)
meriEndoPlast <- RunTSNE(meriEndoPlast, verbose = TRUE)
meriEndoPlast <- RunUMAP(meriEndoPlast, dims = 1:30, verbose = TRUE)
meriEndoPlast <- FindNeighbors(meriEndoPlast, dims = 1:30, verbose = TRUE)
meriEndoPlast <- FindClusters(meriEndoPlast, verbose = TRUE, resolution = 1)
# Vizualizing UMAP plot of meriEndoPlast (plastid classes among meri/endo cells)
png("meriEndoPlastUMAP.png", units = "in", width = 10, height = 8, res = 600)
DimPlot(meriEndoPlast, reduction = "umap", label = TRUE, label.size = 7, pt.size = 0.5)
dev.off()
# First plotting expression of 2 known proplastid genes against psuedotime
# THIS GOT ME NOWHERE SO YOU CAN SKIP TO LINE 258 IF YOU WANT
proplastMarkers <- c("LPA3","PAC") # only 2 known proplastid marker genes
meriEndoPlast <- CellCycleScoring(meriEndoPlast, s.features = proplastMarkers, g2m.features = amyloplastMarkers$gene, set.ident = TRUE) # Note that I am using amyloplast markers, but that odesnt really matter --> I just need to give another set of genes (again you can skip this section)
names(meriEndoPlast@meta.data)[names(meriEndoPlast@meta.data) == "S.Score"] <- "proplastScore"
names(meriEndoPlast@meta.data)[names(meriEndoPlast@meta.data) == "G2M.Score"] <- "amyloplastScore" # refer to comment on line 216
proplastScore <- as.data.frame(meriEndoPlast@meta.data[,"proplastScore"]) # scores for proplastid expression from CellCycleScoring()
row.names(proplastScore) <- row.names(meriEndoPlast@meta.data) # cell names as row.names
proplastScore$extra <- 0 # just to keep as data.frame and not list
scorpProg <- meriEndoPtime.scorpius$progressions
scorpProg <- scorpProg[order(scorpProg$percentage),] # just getting progression percentages
row.names(scorpProg) <- scorpProg$cell_id
proplastScore <- proplastScore[row.names(proplastScore) %in% row.names(scorpProg),] # only using cells common between proplastScore and scorpProg data frames
proplastScore$extra <- NULL
scorpProg <- scorpProg[row.names(scorpProg) %in% row.names(proplastScore),] # only using cells common between proplastScore and scorpProg data frames
scorpProg$cell_id <- NULL; scorpProg$from <- NULL; scorpProg$to <- NULL # removing columns that will not be used later
proplastScore$id <- row.names(proplastScore); row.names(proplastScore) <- NULL # making row names into a column ("id")
scorpProg$id <- row.names(scorpProg); row.names(scorpProg) <- NULL
linkage <- merge(scorpProg, proplastScore, by = "id") # merging progressions with expression of proplastid genes
colnames(linkage) <- c("id", "Pseudotime", "ProplastidScore")
plot(linkage$Pseudotime, linkage$ProplastidScore) # plotting ptime vs expression of the 2 proplastid markers
lines(smooth.spline(linkage$Pseudotime, linkage$ProplastidScore, spar = 0.95), lwd=4, col = 'red')
# For some reason, proplastid expression is not decreasing along pseudotime....
# Ideas: maybe check where paseudotime is actually starting and make sure it
# is at meristem. Also, find better marker genes as these could just be
# trashy genes for proplastid expression. Maybe look at plastid active
# differentiation genes. Could also be because I used amyloplast genes,
# which down-regulated some proplastid genes.
# Also, these genes really have nothing to do with proplastids; the were just differentially expressed --> find better genes
# CONSENSUS: bad work done. Try something else
# let's try something else to get better genes...
##=======================================================================================
## Using dynfeature to see cell-type features that change anywhere along the trajectory
##=======================================================================================
# THIS IS ONLY FOR CELL-TYPE GENES (no plastid genes yet -- just trying this method out and validating that it works)
overall_feature_importances <- dynfeature::calculate_overall_feature_importance(meriEndoPtime.scorpius, expression_source= t(meriEndo.obj@assays$SCT@scale.data)) # basically calculating how important each gene contributes to shift from meristem to endodermis along pseudotime
features <- overall_feature_importances %>% top_n(40, importance) %>% pull(feature_id) # getting top 40 genes that contribute to development from meristem to endodermis (most dramatic change in expression along ptime)
# plot_dimred(meriEndoPtime.scorpius) for trajectory again
png("TIheatmap.png", units = "in", width = 16, height = 10, res = 700) # plotting heatmap to show top 20 genes that changed dramatically over ptime from meristem to endodermis
dynplot::plot_heatmap(meriEndoPtime.scorpius, expression_source= t(meriEndo.obj@assays$SCT@scale.data)) #red represents high expression of gene while blue represents low expression
dev.off()
# After looking at top 20 important genes...
# meristem (beiginning) shows high expression of genes involved in differentiation and formation of cell parts
# endodermis (end) shows high expression of genes chracteristic to endodermis, especially casparian strip
# Next, see if I can do this same thing but based on plastid gene expression profiles
# Validating use of pseudotime to evaluate development by showing with cell type genes
# Will score genes from heatmap (red to blue as proplastid, blue to red as endodermis) and plot against pseudotime just for validation
endoCell <- c("DIR24","CASP1","PER72", "DIR18", "AT1G61590") # genes that go from blue to red along pseudotime
meriCell <- c("SMT3", "ATHB-20", "ACT7","AT3G44870","AGP4","PIP2-7","PIP2-2","AGP9","TUBA4","CYSD2","PER3","PER39","PME18", "PAP4", "PER45") # genes that go from red to blue along pseudotime
meriEndo.obj <- CellCycleScoring(meriEndo.obj, s.features = endoCell, g2m.features = meriCell, set.ident = TRUE) # scoring cells based on endo/meri genes
names(meriEndo.obj@meta.data)[names(meriEndo.obj@meta.data) == "S.Score"] <- "EndoCellGE" # renaming meta data columns
names(meriEndo.obj@meta.data)[names(meriEndo.obj@meta.data) == "G2M.Score"] <- "MeriCellGE"
endoScore <- as.data.frame(meriEndo.obj@meta.data[,"EndoCellGE"])
meriScore <- as.data.frame(meriEndo.obj@meta.data[,"MeriCellGE"])
cellScores <- cbind(endoScore, meriScore) # merging meri and endo scores
colnames(cellScores) <- c("endoScore", "meriScore")
cellScores$id <- row.names(meriEndo.obj@meta.data)
# scorpProg <- meriEndoPtime.scorpius$progressions
# scorpProg <- scorpProg[order(scorpProg$percentage),]
cellScorePtime <- merge(scorpProg, cellScores, by = "id") # merging scorpius prgressions wth endo/meri scores
png("./validatePtimeCell.png", units = "in", width = 10, height = 8, res = 600) # plotting scores versus pseudotime (again, just validation since I used cell-type genes)
plot(cellScorePtime$percentage, cellScorePtime$endoScore, main = "Cell-Type Gene Expression Along Pseudotime", ylab = "Cell-Type Gene Expression", xlab = "Progression Along Pseudotime", col = "navy blue", pch = 16, ylim = c(-4,4))
abline(lm(cellScorePtime$endoScore ~ cellScorePtime$percentage), lwd = 3, col = "blue", lty = 2)
points(cellScorePtime$percentage, cellScorePtime$meriScore, col = "dark red", pch = 16)
abline(lm(cellScorePtime$meriScore ~ cellScorePtime$percentage), lwd = 3, col = "red", lty = 2)
dev.off()
##===========================================================================================
## Using dynfeature to see plastid-type features that change anywhere along the trajectory
## Trying to find plastid genes that dramatically change in expresion
## somehere along the trajectory by feeding in plastid gene expression
## instead of cell type gene expression
##===========================================================================================
pExp <- t(meriEndoPlast@assays$SCT@scale.data) # sclaed expression of PLASTID genes from meriEndoPlast object
pExp <- pExp[row.names(pExp) %in% meriEndoPtime.scorpius$cell_ids,] # only using cell names that are in the pseudotime ordering
overall_pfeature_importances <- dynfeature::calculate_overall_feature_importance(meriEndoPtime.scorpius, expression_source= pExp) # see line 259
pfeatures <- overall_feature_importances %>% top_n(40, importance) %>% pull(feature_id) # see line 260
# plot_dimred(meriEndoPtime.scorpius)
png("TIheatmapPlastid.png", units = "in", width = 16, height = 10, res = 700)
dynplot::plot_heatmap(meriEndoPtime.scorpius, expression_source= pExp) #red represents high expression of gene while blue represents low expression
dev.off()
# Looking at proplastid and "endodermis-plastid" gene expression versus cell development (progression along pseudotime)
endoPlast <- c("NCED3", "NUDT17", "GSTF8", "E1-BETA-2") # genes that go from blue to red along pseudotime (endodermal plastid genes)
meriPlast <- c("PSP","GLT1","KAS1","CYP74A","BCCP2","CAC2","MOD1","PDH-E1 BETA","LOX4","PYD1","IPP1","BCCP1","DAD1","PPA6", "DRP1C", "AT2G31670") # genes that go from red to blue along pseudotime (proplastid genes)
meriEndoPlast <- CellCycleScoring(meriEndoPlast, s.features = endoPlast, g2m.features = meriPlast, set.ident = TRUE) # givng each cell a proplastid and endodermal score
names(meriEndoPlast@meta.data)[names(meriEndoPlast@meta.data) == "S.Score"] <- "EndoPlastGE"
names(meriEndoPlast@meta.data)[names(meriEndoPlast@meta.data) == "G2M.Score"] <- "MeriPlastGE"
endoScorePlast <- as.data.frame(meriEndoPlast@meta.data[,"EndoPlastGE"])
meriScorePlast <- as.data.frame(meriEndoPlast@meta.data[,"MeriPlastGE"])
plastScores <- cbind(endoScorePlast, meriScorePlast)
colnames(plastScores) <- c("endoScore", "meriScore")
plastScores$id <- row.names(meriEndoPlast@meta.data)
# scorpProg <- meriEndoPtime.scorpius$progressions
# scorpProg <- scorpProg[order(scorpProg$percentage),]
plastScorePtime <- merge(scorpProg, plastScores, by = "id") # merging scores and progressions
png("./ptimePlastid.png", units = "in", width = 10, height = 8, res = 600) # plotting plastid dev (pExp) vs cell dev (ptime)
plot(plastScorePtime$percentage, plastScorePtime$endoScore, main = "Plastid-Type Gene Expression Along Pseudotime", ylab = "Plastid-Type Gene Expression", xlab = "Progression Along Pseudotime", col = "navy blue", pch = 16)
abline(lm(plastScorePtime$endoScore ~ plastScorePtime$percentage), lwd = 3, col = "blue", lty = 2)
points(plastScorePtime$percentage, plastScorePtime$meriScore, col = "dark red", pch = 16)
abline(lm(plastScorePtime$meriScore ~ plastScorePtime$percentage), lwd = 3, col = "red", lty = 2)
dev.off()
summary(lm(plastScorePtime$endoScore ~ plastScorePtime$percentage)) # seeing if slopes are significant
summary(lm(plastScorePtime$meriScore ~ plastScorePtime$percentage))
##==============================================================================
## Plotting expression of single plastid genes along pseudotime
##==============================================================================
# NCED3
which(row.names(meriEndoPlast@assays$SCT@scale.data) == "NCED3") # finding which row of expression matrix is this plastid gene
NCED3 <- as.data.frame(meriEndoPlast@assays$SCT@scale.data[571,]) # expression data for each cell
NCED3$ids <- row.names(NCED3) # making cell names as a column not row name
colnames(NCED3) <- c("GE", "id")
row.names(NCED3) <- NULL
ptNCED3 <- merge(NCED3, scorpProg, by = "id") # merging expression data with ptime progression
png("./NCED3.png", units = "in", width = 10, height = 6, res = 400) # plotting expression of genes versus ptime progression
plot(ptNCED3$percentage, ptNCED3$GE, pch = 16, col= "gray", main = "NCED3", xlab = "Pseudotime", ylab = "Gene Expression", cex = 1.1)
lines(smooth.spline(ptNCED3$percentage, ptNCED3$GE, spar = 0.95), lwd=4, col = 'red')
dev.off()
# IPP1 (annotations above applied below)
which(row.names(meriEndoPlast@assays$SCT@scale.data) == "IPP1")
IPP1 <- as.data.frame(meriEndoPlast@assays$SCT@scale.data[822,])
IPP1$ids <- row.names(IPP1)
colnames(IPP1) <- c("GE", "id")
row.names(IPP1) <- NULL
ptIPP1 <- merge(IPP1, scorpProg, by = "id")
png("./IPP1.png", units = "in", width = 10, height = 6, res = 400)
plot(ptIPP1$percentage, ptIPP1$GE, pch = 16, col= "gray", main = "IPP1", xlab = "Pseudotime", ylab = "Gene Expression", cex = 1.1)
lines(smooth.spline(ptIPP1$percentage, ptIPP1$GE, spar = 0.95), lwd=4, col = 'red')
dev.off()
# LOX4
which(row.names(meriEndoPlast@assays$SCT@scale.data) == "LOX4")
LOX4 <- as.data.frame(meriEndoPlast@assays$SCT@scale.data[251,])
LOX4$ids <- row.names(LOX4)
colnames(LOX4) <- c("GE", "id")
row.names(LOX4) <- NULL
ptLOX4 <- merge(LOX4, scorpProg, by = "id")
png("./LOX4.png", units = "in", width = 10, height = 6, res = 400)
plot(ptLOX4$percentage, ptLOX4$GE, pch = 16, col= "gray", main = "LOX4", xlab = "Pseudotime", ylab = "Gene Expression", cex = 1.1)
lines(smooth.spline(ptLOX4$percentage, ptLOX4$GE, spar = 0.95), lwd=4, col = 'red')
dev.off()
# KAS1
which(row.names(meriEndoPlast@assays$SCT@scale.data) == "KAS1")
KAS1 <- as.data.frame(meriEndoPlast@assays$SCT@scale.data[817,])
KAS1$ids <- row.names(KAS1)
colnames(KAS1) <- c("GE", "id")
row.names(KAS1) <- NULL
ptKAS1 <- merge(KAS1, scorpProg, by = "id")
png("./KAS1.png", units = "in", width = 10, height = 6, res = 400)
plot(ptKAS1$percentage, ptKAS1$GE, pch = 16, col= "gray", main = "KAS1", xlab = "Pseudotime", ylab = "Gene Expression", cex = 1.1)
lines(smooth.spline(ptKAS1$percentage, ptKAS1$GE, spar = 0.95), lwd=4, col = 'red')
dev.off()
# NUDT17
which(row.names(meriEndoPlast@assays$SCT@scale.data) == "NUDT17")
NUDT17 <- as.data.frame(meriEndoPlast@assays$SCT@scale.data[316,])
NUDT17$ids <- row.names(NUDT17)
colnames(NUDT17) <- c("GE", "id")
row.names(NUDT17) <- NULL
ptNUDT17 <- merge(NUDT17, scorpProg, by = "id")
png("./NUDT17.png", units = "in", width = 10, height = 6, res = 400)
plot(ptNUDT17$percentage, ptNUDT17$GE, pch = 16, col= "gray", main = "NUDT17", xlab = "Pseudotime", ylab = "Gene Expression", cex = 1.1)
lines(smooth.spline(ptNUDT17$percentage, ptNUDT17$GE, spar = 0.95), lwd=4, col = 'red')
dev.off()
# GSTF8
which(row.names(meriEndoPlast@assays$SCT@scale.data) == "GSTF8")
GSTF8 <- as.data.frame(meriEndoPlast@assays$SCT@scale.data[514,])
GSTF8$ids <- row.names(GSTF8)
colnames(GSTF8) <- c("GE", "id")
row.names(GSTF8) <- NULL
ptGSTF8 <- merge(GSTF8, scorpProg, by = "id")
png("./GSTF8.png", units = "in", width = 10, height = 6, res = 400)
plot(ptGSTF8$percentage, ptGSTF8$GE, pch = 16, col= "gray", main = "GSTF8", xlab = "Pseudotime", ylab = "Gene Expression", cex = 1.1)
lines(smooth.spline(ptGSTF8$percentage, ptGSTF8$GE, spar = 0.95), lwd=4, col = 'red')
dev.off()
# geneName --> use for any gene to do same thing as above
plotGEptime <- function(gene){
geneName <- as.data.frame(meriEndoPlast@assays$SCT@scale.data[(which(row.names(meriEndoPlast@assays$SCT@scale.data) == gene)),])
geneName$ids <- row.names(geneName)
colnames(geneName) <- c("GE", "id")
row.names(geneName) <- NULL
ptgeneName <- merge(geneName, scorpProg, by = "id")
plot(ptgeneName$percentage, ptgeneName$GE, pch = 16, col= "gray", main = gene, xlab = "Pseudotime", ylab = "Gene Expression", cex = 1.1)
lines(smooth.spline(ptgeneName$percentage, ptgeneName$GE, spar = 1), lwd=4, col = 'red')
}
##========================================================================
## Checking if plastid genes are just "housekeeping" genes
## Are those genes changing dramatically just because they are being
## downregulated by the high expression of endodermal genes toward
## the end of the trajectory??
## Try to see how much other clusters express those genes....
##========================================================================
png("./propGenesClust.png", units = "in", width = 20, height = 20, res = 1000)
FeaturePlot(NDpWTdata.psub, features = meriPlast)
dev.off()
png("./endoGenesClust.png", units = "in", width = 8, height = 8, res = 1000)
FeaturePlot(NDpWTdata.psub, features = endoPlast)
dev.off()
## Making a function to get expression of each a gene within a cluster
pGeneExp <- NDpWTdata.psub@assays$SCT@scale.data
meanExp.byClust <- function(gene){
gene.exp <- as.data.frame(pGeneExp[row.names(pGeneExp) == gene,])
gene.exp$ids <- row.names(gene.exp); row.names(gene.exp) <- NULL; colnames(gene.exp) <- c("Exp", "ids")
pIdent <- as.data.frame(NDpWTdata.psub@active.ident)
pIdent$ids <- row.names(pIdent); row.names(pIdent) <- NULL; colnames(pIdent) <- c("ident", "ids")
gene.expId <- merge(pIdent, gene.exp, by = "ids")
meansGene <- gene.expId %>%
group_by(ident) %>%
summarize(average = mean(Exp))
as.data.frame(meansGene)
}
propGE <- data.frame(ident = 1:13, Exp = 0)
for(i in meriPlast){
x <- meanExp.byClust(i)
colnames(x) <- c("ident", i)
propGE <- merge(propGE, x, by = "ident")
}
propGE <- propGE[,-(1:2)]
endoGE <- data.frame(ident = 1:13, Exp = 0)
for(i in endoPlast){
x <- meanExp.byClust(i)
colnames(x) <- c("ident", i)
endoGE <- merge(endoGE, x, by = "ident")
}
endoGE <- endoGE[,-(1:2)]
##============================================
## Comparing embeddings (original vs Re-...)
##============================================
scorpProg.orig <- ptimemeriEndo$progressions
scorpProg.orig <- scorpProg.orig[order(scorpProg.orig$percentage),]
row.names(scorpProg.orig) <- scorpProg.orig$cell_id
scorpProg.orig$cell_id <- NULL; scorpProg.orig$from <- NULL; scorpProg.orig$to <- NULL
scorpProg.orig$id <- row.names(scorpProg.orig); row.names(scorpProg.orig) <- NULL
plastScorePtime.orig <- merge(scorpProg.orig, plastScores, by = "id")
png("./compareEmbeddings.png", units = "in", width = 21, height = 8, res = 600)
par(mfrow = c(1,2))
plot(plastScorePtime.orig$percentage, plastScorePtime.orig$endoScore, main = "Original Embedding", ylab = "Plastid-Type Gene Expression", xlab = "Progression Along Pseudotime", col = "navy blue", pch = 16, ylim = c(-2,3), cex = 1)
points(plastScorePtime.orig$percentage, plastScorePtime.orig$meriScore, col = "dark red", pch = 16, cex = 1)
abline(lm(plastScorePtime.orig$meriScore ~ plastScorePtime.orig$percentage), lwd = 3, col = "red", lty = 2)
abline(lm(plastScorePtime.orig$endoScore ~ plastScorePtime.orig$percentage), lwd = 3, col = "blue", lty = 2)
plot(plastScorePtime$percentage, plastScorePtime$endoScore, main = "Re-Normalized, Re-Scaled, Re-Clustered Embedding", ylab = "Plastid-Type Gene Expression", xlab = "Progression Along Pseudotime", col = "navy blue", pch = 16)
abline(lm(plastScorePtime$endoScore ~ plastScorePtime$percentage), lwd = 3, col = "blue", lty = 2)
points(plastScorePtime$percentage, plastScorePtime$meriScore, col = "dark red", pch = 16)
abline(lm(plastScorePtime$meriScore ~ plastScorePtime$percentage), lwd = 3, col = "red", lty = 2)
dev.off()
png("./compareEmbeddings_progression.png", units = "in", width = 10, height = 10, res = 600)
plot(sort(ptimemeriEndo$progressions$percentage), col = "navy blue", main = "\"Gradualness\" of Pseudotime Progression", xlab = "Cell Ordering", ylab = "Progression Along Pseudotime")
points(sort(plastScorePtime$percentage), col = "dark red")
legend("bottomright", legend = c("Original Embedding", "Re-Normalized, Re-Scaled, Re-Clustered Embedding"), pch = 16, col = c("blue", "red"))
dev.off()
x <- t(NDpWTdata.psub@assays$SCT@scale.data)
x <- x[row.names(x) %in% ptimemeriEndo$cell_ids,]
dynplot::plot_heatmap(ptimemeriEndo, expression_source= x)
## Running pseudotime with plastid genes only (2/15/20) RE-EMBEDDING
meriEndo.names <- names(NDpWTdata@active.ident[NDpWTdata@active.ident == "Endodermis" | NDpWTdata@active.ident == "Meristem"]) # endodermal and meristematic cells names
meriEndoPlast.obj <- merge(WT1.obj, y = c(WT2.obj, WT3.obj), add.cell.ids = c("R1", "R2", "R3"), project = "plantRNA") # creating Seurat object with all Ryu data first
meriEndoPlast.obj <- subset(meriEndoPlast.obj, features = gene.uncur, cells = meriEndo.names) # subsetting Seurat object with only meri/endo genes and non plastid genes
# Basic quality control and clustering workflow
meriEndoPlast.obj[["percent.mt"]] <- PercentageFeatureSet(meriEndoPlast.obj, pattern = "^MT-")
VlnPlot(meriEndoPlast.obj, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
meriEndoPlast.obj <- subset(meriEndoPlast.obj, subset = nFeature_RNA < 480 & nCount_RNA < 2500)
meriEndoPlast.obj <- SCTransform(meriEndoPlast.obj, verbose = TRUE)
meriEndoPlast.obj <- RunPCA(meriEndoPlast.obj, verbose = TRUE)
meriEndoPlast.obj <- RunTSNE(meriEndoPlast.obj, verbose = TRUE)
meriEndoPlast.obj <- RunUMAP(meriEndoPlast.obj, dims = 1:30, verbose = TRUE)
meriEndoPlast.obj <- FindNeighbors(meriEndoPlast.obj, dims = 1:30, verbose = TRUE)
meriEndoPlast.obj <- FindClusters(meriEndoPlast.obj, verbose = TRUE, resolution = 0.5)
# Labeling meristem and endodermis clusters
newClusterIDs <- c("Meristem", "Endodermis")
names(newClusterIDs) <- levels(meriEndoPlast.obj)
meriEndoPlast.obj <- RenameIdents(meriEndoPlast.obj, newClusterIDs)
# Vizualizing UMAP plot of meristem and endodermis cells
DimPlot(meriEndoPlast.obj, reduction = "umap", label = TRUE, label.size = 7, pt.size = 0.5)
ptimePlast.meriEndo <- subset(meriEndoPlast.obj, features = gene.uncur)
ptimePlast.meriEndo <- FindVariableFeatures(ptimePlast.meriEndo)
varPlastGenes.meriEndo <- VariableFeatures(ptimePlast.meriEndo)
allPlastExp.meriEndo <- Matrix::t(ptimePlast.meriEndo@assays$SCT@scale.data)
allPlastCount.meriEndo <- Matrix::t(ptimePlast.meriEndo$SCT@counts)
varPlastExp.meriEndo <- allPlastExp.meriEndo[,colnames(allPlastExp.meriEndo) %in% varPlastGenes.meriEndo]
varPlastCount.meriEndo <- allPlastCount.meriEndo[,colnames(allPlastCount.meriEndo) %in% varPlastGenes.meriEndo]
# Creating (called "wrapping") the dynverse object
meriEndoPlast.dyn <- wrap_expression(
expression = varPlastExp.meriEndo,
counts = varPlastCount.meriEndo
)
meriEndoPlast.dyn <- add_grouping(meriEndoPlast.dyn, ptimePlast.meriEndo@active.ident)
meriEndoPlast.dyn <- add_prior_information(meriEndoPlast.dyn, start_id = meriNames)
ptimePlastMeriEndo <- infer_trajectory(meriEndoPlast.dyn, ti_scorpius(), verbose = TRUE)
ptimePlastMeriEndo.simp <- simplify_trajectory(ptimePlastMeriEndo)
# Vizualizing the trjectories
plot_dimred(ptimePlastMeriEndo, label_milestones = TRUE, color_cells = "grouping", size_cells = 1.5, grouping = ptimeNoPlast.meriEndo@active.ident) + ggtitle("SCORPIUS TI Method used with Mersitem and Endodermis PLASTID GENES ONLY (Re-embedded)")
|
3307bbad388879e3ebb7a1120b329247142d9b49
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Probability_And_Statistics_For_Engineers_And_Scientists_by_Ronald_E._Walpole,_Raymond_H._Myers,_Sharon_L._Myers,_Keying_Ye/CH11/EX11.3/Ex11_3.R
|
0702fe556ca80384ead8849f59ecfb017044c84e
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 1,267
|
r
|
Ex11_3.R
|
# Chapter 11
# Example 11.3 page no. 404 from the pdf..
# Hypothesis Testing On Slope Of Regression Line..
# to test the hypothesis of beta1 slope of regression line..
# Null - beta1 = 1.0, alternative < 1.0
x <- c(3,7,11,15,18,27,29,30,30,31,31,32,33,33,34,36,36,36,37,38,39,39,39,40,41,42,42,43,44,45,46,47,50)
y <- c(5,11,21,16,16,28,27,25,35,30,40,32,34,32,34,37,38,34,36,38,37,36,45,39,41,40,44,37,44,46,46,49,51)
pol <- data.frame(x,y)
l <- lm(y~x,data = pol)
coefficients(l)
p_value <- function(reg_m,conum,val){
coefi <- coef(summary(reg_m))
t <- (coefi[conum,1]-val)/coefi[conum,2]
pt(abs(t),reg_m$df.residual,lower.tail= F)
}
cat("Since the p value is",p_value(l,2,1)," less than 0.05 suggesting strong evidence that beta1 < 1.0")
# an alternate method is to use the library car and the use linearHypothesis function, since this is one sided hypothesis
# we need to divide the result by 2 as the function does 2 sided hypothesis..
# If "car" package not installed can be installed by install.packages("car")
library(car)
linearHypothesis(l,hypothesis.matrix = c(0,1),rhs = 1)/2
print("Using linearHypothesis function also we get the same result and we can derive the same inference as before.")
|
c4d744a295c113caab8fa38f6b9bbdda9d93c0e4
|
ed9c3b1ae7b4bc58fb287ce22ecf1fedbdb0e7e5
|
/A-MDR.R
|
d56205ce718a483575c0619eaa32e6485e2bbb37
|
[] |
no_license
|
SJAndrews/MDR
|
ee4fdb8743546b58e25d1ca00b2c84a62aedcf51
|
359d0aea7c8b4c241b305585e5bb9af79017f7f3
|
refs/heads/master
| 2021-01-10T15:34:34.131580
| 2015-11-18T04:43:42
| 2015-11-18T04:43:42
| 46,376,297
| 0
| 0
| null | 2015-11-18T04:43:43
| 2015-11-17T21:23:05
|
R
|
UTF-8
|
R
| false
| false
| 5,597
|
r
|
A-MDR.R
|
##-----------------------------------------##
# Functions
##-----------------------------------------##
#classify genotypic combinations as high or low risk
#pi0, equation pg 3
pi0.func <- function(x, z){
ifelse(x[1]/(x[1] + x[2]) >= sum(z)/ length(z), 1, 0)
}
#counts the number of matches of an individual data vector with the rows of a target matrix, and is called by the function mdr
#from package MDR
compare <- function (mat, vec, k)
{
b <- 1
match <- 1:dim(mat)[1]
while (b <= (k + 1)) {
match <- match[mat[match, b] == as.numeric(vec[b])]
b <- b + 1
}
return(length(match))
}
##-----------------------------------------##
#data <- subset(mdr1, select = c(SNP.1, SNP.2, SNP.3, SNP.4, SNP.5, Response))
##-----------------------------------------##
# A-MDR Function
##-----------------------------------------##
CMDR<-function(data=DATA,n.repeat=100,missing=999,loci=2,alpha=0.05, genotype = c(0,1,2)){
PermData<-rbind(t(apply(matrix(data[,dim(data)[2]], n.repeat, dim(data)[1],byrow=TRUE),1,sample)),data[,dim(data)[2]]) #permuted data
interaction <- t(combn(dim(data)[2]-1, loci)) #empty matrix for interactions
n_interaction <- dim(interaction)[1] #setting number of snpxsnp interactions
group<-data[,dim(data)[2]] #identifing disease
RefOR<-RefRR<-Refstat<-matrix(missing, nrow=n_interaction,ncol=n.repeat+1) #empty matrix for permuted results
pOR<-pRR<-pstat<-c(missing,n_interaction) #empty matrix for final results
g <- length(genotype) #number of genotypes
geno <- list(genotype)
geno.comb <- expand.grid(rep(geno, loci)) #possible genotypic combinations
hr.lr <- as.data.frame(matrix(missing, nrow = n_interaction, ncol = (1 + g^loci))) #hr and lr genotypic combinations for interactions
hr.lr[,1] <- apply(interaction, 1, paste, collapse = "-") #potentional interactions
colnames(hr.lr) <- c("interaction", apply(geno.comb, 1, paste, collapse = "-")) #names of genotypic combinations
#loop for running through permuted datasets
for (i2 in 1:(n.repeat+1))
{
y<-PermData[i2,] #Data for permutation (cases vs controls)
stat <-OR<-RR<-pstat<-pOR<-pRR<-rep(NULL,n_interaction) #empty vector
lowrisk<-highrisk<-matrix(999, nrow=n_interaction,ncol=2) #empty matrix for predisposing risk table
colnames(lowrisk) = c('case', 'control'); colnames(highrisk) = c('case', 'control')
case <- cbind(rep(1, g^loci), expand.grid(rep(geno, loci))) #genotype cases
ctrl <- cbind(rep(0, g^loci), expand.grid(rep(geno, loci))) #genotype controls
counts <- matrix(0, dim(case)[1], 3); colnames(counts) <- c('case', 'ctrl', 'ratio') #k-way genotype combinations - hr/lr
#loop for running through GxG interactions to determing high/low risk genotypes
for (j1 in 1:n_interaction)
{
model <- interaction[j1, ] #spcifing the interaction between SNPs
part <- data[,c(model)]
part <- cbind(y, part)
counts[, 1] <- apply(case, 1, compare, mat = part, k = loci) #number of cases for k-kway interaction
counts[, 2] <- apply(ctrl, 1, compare, mat = part, k = loci) #number of controls for k-way interactions
counts[, 3] <- apply(counts, 1, pi0.func, y) #ratio of cases to controls for combination
hr.lr[j1, 2:ncol(hr.lr)] <- counts[,3]
#predisposing risk table, pg4
#sum highrisk/lowrisk cases/controls
highrisk[j1,] <- c(sum(counts[counts[,3] == 1, 1], na.rm = TRUE), sum(counts[counts[,3] == 1, 2], na.rm = TRUE))
lowrisk[j1,] <- c(sum(counts[counts[,3] == 0, 1], na.rm = TRUE), sum(counts[counts[,3] == 0, 2], na.rm = TRUE))
OR[j1]<-highrisk[j1,1]*lowrisk[j1,2]/highrisk[j1,2]/lowrisk[j1,1] #pOR, pg3 eq1
RR[j1]<-highrisk[j1,1]/sum(highrisk[j1,])/lowrisk[j1,1]*sum(lowrisk[j1,]) #pRR, pg4 eq2
stat[j1]<-chisq.test(rbind(lowrisk[j1,],highrisk[j1,]),correct=FALSE)$statistic #pChi pg5 eq3
}
RefOR[,i2]<-OR #All and permuted and final pOR
RefRR[,i2]<-RR #All and permuted and final pRR
Refstat[,i2]<-stat #All and permuted and final pChi
}
for (i3 in 1:length(stat))
{
pstat[i3]<-sum(Refstat[i3,1:n.repeat]>stat[i3])/n.repeat #pvalue for pChi
pOR[i3]<-sum(RefOR[i3,1:n.repeat]>OR[i3])/n.repeat #pvalue for pOR
pRR[i3]<-sum(RefRR[i3,1:n.repeat]>RR[i3])/n.repeat #pvalue for PRR
}
out <- list(data,interaction,OR,pOR,RR,pRR,stat,pstat,hr.lr)
names(out) <- c("data","interaction","OR","pOR","RR","pRR","stat","pstat","hr.lr")
out
#return(stat)
}
CMDR_output<-CMDR(data=data, loci = 1, n.repeat = 10)
CMDR_output
|
9b3791d2a45c805286af694dd49c5a197d6a533b
|
2785f694ee390cfe78b189e9956ff8c58fe68ac6
|
/R/est_pow_het.R
|
d2aa966c2a82f13c3f5738a8823b7651a3c54d9b
|
[] |
no_license
|
VanAndelInstitute/bifurcatoR
|
e8400afad194e41802f5156ba6a6184e6d8d1d5b
|
9346700eb392f494d79485c9734c2a1e54996219
|
refs/heads/main
| 2023-08-17T04:08:22.138059
| 2023-05-30T17:40:16
| 2023-05-30T17:40:16
| 452,341,124
| 6
| 5
| null | 2023-08-30T16:00:10
| 2022-01-26T15:59:38
|
R
|
UTF-8
|
R
| false
| false
| 578
|
r
|
est_pow_het.R
|
#' est_pow_het
#'
#' @param n number of [somethings]
#' @param p pr(event)
#' @param alpha default significance level (0.05)
#' @param x I have no idea what this does
#' @param nsim number of simulations (20)
#'
#' @return a power estimate
#'
#' @export
est_pow_het = function(n,p,alpha,x,nsim){
power = sum(sapply(1:nsim,function(y) as.numeric((I(ols_test_f(lm(val~g,data.frame(val = c(rnorm(round(n*p),0,1),rnorm(round(n*abs((1-p))),0,x)),g=c(rep("x",round(n*p)),rep("y",round(n*abs(1-p)))))))$p<alpha)))))/nsim
return(round(power,4))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.