blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6a35874725ee4df6e2f7819b767351e68cc8e97b
|
9f347509f8cf393cb4d9c1363b87c7726104e63b
|
/other/TCGA.r
|
cfd28b6a22e8ceeedaf5587b1824feb7c89def05
|
[] |
no_license
|
cbg-ethz/nempi
|
c162eae3ea55e8c17d68899647856ebb54534df5
|
49f71a22e912e3681f1a37652c65fe20a25647dc
|
refs/heads/master
| 2023-05-27T04:07:39.080662
| 2023-04-25T15:25:29
| 2023-04-25T15:25:29
| 206,054,343
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 40,598
|
r
|
TCGA.r
|
library(snowfall)
library(TCGAbiolinks)
if (is.null(types)) {
types <- TCGAbiolinks:::getGDCprojects()$project_id
## special "FM-AD"
dont <- paste0(unique(gsub("-.*", "", types)), "-")
dont <- dont[-grep("TCGA", dont)]
samplenr <- matrix(NA, length(types), 2)
rownames(samplenr) <- types
donr <- TRUE
snrcount <- 0
} else {
dont <- "AGCT"
donr <- FALSE
}
sizemat <- matrix(0, 1, 2)
colnames(sizemat) <- c("Tumor", "Normal")
rownames(sizemat) <- ""
path <- "mutclust/"
for (type in types) {
if (donr) {
snrcount <- snrcount + 1
}
if (length(grep(paste(dont, collapse = "|"), type)) > 0) { next() }
print(type)
if (file.exists(paste0(path, type, "_final.rda")) & !newmut & !newllr & !newsave) {
load(paste0(path, type, "_final.rda"))
} else {
summ <- TCGAbiolinks:::getProjectSummary(type)
library(SummarizedExperiment)
## get methylation:
if (file.exists(paste0(path, type, "_meth.rda"))) {
load(paste0(path, type, "_meth.rda"))
} else {
data <- GDCquery(project = paste(type, sep = ""),
sample.type = "Primary Tumor",
data.category = "DNA methylation",
data.type = "Methylation beta value",
legacy = TRUE
)
GDCdownload(data)
data <- GDCprepare(data, summarizedExperiment = 0)
## map methy sites to genes:
library(methyAnalysis)
if (is.data.frame(data)) {
meth2 <- as.matrix(data[, -(1:3)])
rownames(meth2) <- gsub(";.*", "", data[, 1])
} else {
meth <- data@rowRanges
library(TxDb.Hsapiens.UCSC.hg19.knownGene)
probe2gene <- annotateDMRInfo(meth, 'TxDb.Hsapiens.UCSC.hg19.knownGene')
meth2 <- assay(data)
rownames(meth2) <- probe2gene$sigDMRInfo@elementMetadata@listData$GeneSymbol
}
meth <- meth2
meth <- meth[which(apply(meth, 1, function(x) return(any(is.na(x)))) == FALSE), ]
methm <- meth[which(duplicated(rownames(meth)) == FALSE), ]
count <- 0
for (i in which(duplicated(rownames(meth)) == FALSE)) {
count <- count + 1
methm[count, ] <- apply(meth[which(rownames(meth) %in% rownames(methm)[i]), , drop = FALSE], 2, median)
}
meth <- methm
meth <- meth[order(rownames(meth)), order(colnames(meth))]
colnames(meth) <- gsub("A$", "", lapply(strsplit(colnames(meth), "-"), function(x) { y <- x[1:4]; y <- paste(y, collapse = "-"); return(y) }))
methm <- meth[, which(duplicated(colnames(meth)) == FALSE)]
for (i in which(duplicated(colnames(meth)) == TRUE)) {
j <- which(colnames(methm) == colnames(meth)[i])
methm[, j] <- apply(meth[, which(colnames(meth) %in% colnames(methm)[i]), drop = FALSE], 2, median)
}
meth <- methm
meth <- meth[order(rownames(meth)), order(colnames(meth))]
save(meth, file = paste0(path, type, "_meth.rda"))
}
print("meth done")
## get copy number variation:
if (file.exists(paste0(path, type, "_cnv.rda"))) {
load(paste0(path, type, "_cnv.rda"))
} else {
data <- getGistic(gsub("TCGA-", "", type), type = "thresholded")
## data <- GDCquery(project = paste(type, sep = ""),
## sample.type = "Primary solid Tumor",
## data.category = "Copy Number Variation",
## data.type = "Copy Number Segment",
## )
## GDCdownload(data)
## data <- GDCprepare(data)
cnv <- data[, -(1:3)]
cnv <- apply(cnv, c(1,2), as.numeric)
rownames(cnv) <- data[, 1]
colnames(cnv) <- gsub("A$", "", lapply(strsplit(colnames(cnv), "-"), function(x) { y <- x[1:4]; y <- paste(y, collapse = "-"); return(y) }))
cnv <- cnv[order(rownames(cnv)), order(colnames(cnv))]
save(cnv, file = paste0(path, type, "_cnv.rda"))
}
print("cnv done")
## get expression
if (file.exists(paste0(path, type, "_query.rda"))) {
load(paste0(path, type, "_query.rda"))
} else {
if (length(grep("-AML$|-LAML$", type)) > 0) {
sampletype <- c("Primary Blood Derived Cancer - Peripheral Blood", "Solid Tissue Normal")
} else {
sampletype <- c("Primary solid Tumor", "Solid Tissue Normal")
}
data <- GDCquery(project = paste(type, sep = ""),
sample.type = sampletype,
data.category = "Transcriptome Profiling",
data.type = "Gene Expression Quantification",
workflow.type = "HTSeq - Counts"
)
GDCdownload(data)
if (is.null(data)) {
print("is null")
next()
}
data <- GDCprepare(data)
save(data, file = paste0(path, type, "_query.rda"))
}
## process expression data:
D <- assay(data)
class <- data@colData@listData$definition
print("gene expression done")
## get mutation
if (file.exists(paste0(path, type, "_mut.rda"))) {
load(paste0(path, type, "_mut.rda"))
} else {
type2 <- gsub(paste(paste0(unique(gsub("-.*", "", types)), "-"), collapse = "|"), "", type)
library(data.table)
GDCquery_Maf(tumor = type2, save.csv = TRUE, pipeline = "varscan2") # mutation file
GDCquery_Maf(tumor = type2, save.csv = TRUE, pipeline = "muse") # mutation file
GDCquery_Maf(tumor = type2, save.csv = TRUE, pipeline = "somaticsniper") # mutation file
GDCquery_Maf(tumor = type2, save.csv = TRUE, pipeline = "mutect2") # mutation file
mut <- list()
count <- 1
type3 <- gsub("-", "\\.", type)
for (i in list.files("GDCdata")) {
if (length(grep(type3, i)) > 0 & length(grep("csv", i)) > 0) {
mut[[count]] <- fread(paste0("GDCdata/", i))
count <- count + 1
}
}
save(mut, file = paste0(path, type, "_mut.rda"))
}
## clinical
if (file.exists(paste0(path, type, "_clin.rda"))) {
load(paste0(path, type, "_clin.rda"))
} else {
clinical <- GDCquery_clinic(project = type, type = "clinical")
save(clinical, file = paste0(path, type, "_clin.rda"))
}
## process mutation data: (https://cancer.sanger.ac.uk/cosmic/help/mutation/overview)
if (file.exists(paste0(path, type, "_mut0.rda")) & !newmut) {
load(paste0(path, type, "_mut0.rda"))
} else {
n <- nmut # try something to get all patients with at least one mutation
library(snowfall)
countSamples <- function(i, mut.org, genes, mut.mat, coln) {
i <- which(rownames(mut.mat) %in% genes[i])
tmp <- mut.org[which(mut.org$Hugo_Symbol %in% rownames(mut.mat)[i]), coln]
tmp2 <- mut.mat[i, ]
tmp2[which(colnames(mut.mat) %in% tmp)] <- 1
return(tmp2)
}
typeSamples <- function(i, mut.org, genes, type.mat, coln, coln2) {
i <- which(rownames(type.mat) %in% genes[i])
tmp <- mut.org[which(mut.org$Hugo_Symbol %in% rownames(type.mat)[i]), coln]
tmp3 <- mut.org[which(mut.org$Hugo_Symbol %in% rownames(type.mat)[i]), coln2]
tmp2 <- type.mat[i, ]
tmp2[which(colnames(type.mat) %in% tmp)] <- tmp3
return(tmp2)
}
biggl <- list()
for (i in length(mut)) {
mutation <- mut[[i]]
biggl[[i]] <- mutation$Hugo_Symbol
}
freq <- sort(table(unlist(biggl)), decreasing = TRUE)
if (n == 0) {
allsub <- names(freq)
} else {
allsub <- names(freq)[1:n]
}
M <- Mtype <- list()
for (i in 1:length(mut)) {
mutation <- mut[[i]]
mut.mat <- matrix(0, length(allsub), length(unique(mutation$Tumor_Sample_Barcode)))
type.mat <- matrix("", length(allsub), length(unique(mutation$Tumor_Sample_Barcode)))
colnames(type.mat) <- colnames(mut.mat) <- sort(unique(mutation$Tumor_Sample_Barcode))
rownames(type.mat) <- rownames(mut.mat) <- allsub
coln <- which(colnames(mutation) %in% "Tumor_Sample_Barcode")
coln2 <- which(colnames(mutation) %in% "Variant_Classification")
mut.org <- mutation[which(mutation$Hugo_Symbol %in% allsub), ]
sfInit(parallel = T, cpus = 4)
sfExport("mut.mat", "coln")
tmp <- sfLapply(as.list(1:length(allsub)), countSamples, mut.org, allsub, mut.mat, coln)
sfStop()
tmp <- do.call("rbind", tmp)
rownames(tmp) <- allsub
colnames(tmp) <- colnames(mut.mat)
M[[i]] <- tmp
sfInit(parallel = T, cpus = 4)
sfExport("type.mat", "coln2")
tmp <- sfLapply(as.list(1:length(allsub)), typeSamples, mut.org, allsub, type.mat, coln, coln2)
sfStop()
tmp <- do.call("rbind", tmp)
rownames(tmp) <- allsub
colnames(tmp) <- colnames(mut.mat)
Mtype[[i]] <- tmp
}
samples <- intersect(intersect(intersect(colnames(M[[1]]), colnames(M[[2]])), colnames(M[[3]])), colnames(M[[4]]))
M0 <- M[[1]][, which(colnames(M[[1]]) %in% samples)]
Mtype0 <- Mtype[[1]][, which(colnames(Mtype[[1]]) %in% samples)]
for (i in 2:length(M)) {
M0 <- M0 + M[[i]][, which(colnames(M[[i]]) %in% samples)]
Mtype0 <- matrix(paste(Mtype0, Mtype[[i]][, which(colnames(Mtype[[i]]) %in% samples)]), nrow(Mtype0))
}
rownames(Mtype0) <- rownames(M0)
colnames(Mtype0) <- colnames(M0)
save(M0, Mtype0, file = paste0(path, type, "_mut0.rda"))
}
## process expression data:
D <- assay(data)
class <- data@colData@listData$definition
M <- M0
Mtype <- Mtype0
colnames(M) <- lapply(colnames(M), function(x) {
y <- unlist(strsplit(x, "-"))
y <- paste(y[1:4], collapse = "-")
y <- unlist(strsplit(y, ""))
y <- paste(y[1:(length(y)-1)], collapse = "")
return(y)
})
colnames(D) <- lapply(colnames(D), function(x) {
y <- unlist(strsplit(x, "-"))
y <- paste(y[1:4], collapse = "-")
y <- unlist(strsplit(y, ""))
y <- paste(y[1:(length(y)-1)], collapse = "")
return(y)
})
colnames(M) <- gsub("A$", "", lapply(strsplit(colnames(M), "-"), function(x) { y <- x[1:4]; y <- paste(y, collapse = "-"); return(y) }))
M <- M[order(rownames(M)), order(colnames(M))]
Mtype <- Mtype[order(rownames(Mtype)), order(colnames(Mtype))]
print("mutation done")
## log odds:
if (file.exists(paste0(path, type, "_llr.rda")) & !newllr) {
load(paste0(path, type, "_llr.rda"))
} else {
library(edgeR)
if (sum(class %in% "Solid Tissue Normal") < 10) {
distrParNC <- function(i, data) {
data[i, ] <- data[i, ] - median(data[, i])
llrcol <- numeric(ncol(data))
div0 <- quantile(data[i, ], 0.25)
div1 <- quantile(data[i, ], 0.75)
sigma <- sd(data[i, ])
for (j in 1:ncol(data)) {
if (data[i, j] <= 0) {
llrcol[j] <- log2(div0/data[i, j])
} else {
llrcol[j] <- log2(data[i, j]/div1)
}
}
return(llrcol)
}
highcounts <- which(apply(D, 1, median) >= 10)
DC <- D[highcounts, ]
genenames <- rownames(D)[highcounts, ]
nf <- calcNormFactors(DC)
DC <- t(t(DC)/nf) # DC <- DC2
## this is very adventurous:
## DC <- t(scale(t(DC)))
## DC <- abs(DC)
## pc <- 1-min(DC)
## tmp <- log2(DC+pc)
## hist(tmp)
sfInit(parallel = T, cpus = 4)
sfExport("DC")
tmp <- do.call("rbind", sfLapply(1:nrow(DC),
distrParNC, DC))
colnames(tmp) <- colnames(DC)
rownames(tmp) <- genenames
sfStop()
DF <- DC
} else {
library(ks)
mykcde <- function(x, k) {
a <- which.min(abs(k$eval.points - x))
b <- k$estimate[a]
b <- min(b, 1-b)
return(b)
}
distrParKs <- function(i, data, C) {
llrcol <- numeric(ncol(data))
ddistr <- list()
dogenes <- unique(colnames(data))[which(!(unique(colnames(data)) %in% ""))]
for (j in dogenes) {
D <- which(colnames(data) %in% j)
ddistr[[j]] <- kcde(data[i, D])
}
cdistr <- kcde(data[i, C])
for (j in which(!(colnames(data) %in% ""))) {
gene <- colnames(data)[j]
llrcol[j] <- log2(mykcde(data[i, j], ddistr[[gene]])/mykcde(data[i,j], cdistr))
}
llrcol <- llrcol[-C]
return(llrcol)
}
DN <- D[, which(class %in% "Solid Tissue Normal")]
DT <- D[, which(class %in% "Primary solid Tumor")]
DF <- cbind(DT, DN)
C <- (ncol(DT)+1):ncol(DF)
highcounts <- which(apply(DF, 1, median) >= 10)
DF <- DF[highcounts, ]
genenames <- rownames(DF)
colnames(DF)[1:ncol(DT)] <- "P" # not knock-down specific!
colnames(DF)[grep(paste(unique(gsub("-.*", "", types)), collapse = "|"), colnames(DF))] <- ""
nf <- calcNormFactors(DF)
DF <- t(t(DF)/nf)
sfInit(parallel = T, cpus = 4)
sfExport("DF", "C", "mykcde")
sfLibrary(ks)
tmp <- do.call("rbind", sfLapply(1:nrow(DF),
distrParKs, DF, C))
sfStop()
colnames(tmp) <- colnames(DT)
}
save(tmp, DF, file = paste0(path, type, "_llr.rda"))
}
rownames(tmp) <- rownames(DF)
par(mfrow=c(1,3))
hist(tmp)
tmp[which(is.na(tmp) | is.infinite(tmp))] <- 0
hist(tmp)
D <- tmp
print("expression done")
## sd.glob <- sd(tmp)
## tmp <- tmp[-which(apply(tmp, 1, sd) < sd.glob), ]
## hist(tmp)
## prep clinical data:
clinical[which(clinical$vital_status%in% "dead"), which(colnames(clinical) %in% "vital_status")] <- 1
clinical[which(clinical$vital_status%in% "alive"), which(colnames(clinical) %in% "vital_status")] <- 0
count <- 0
for (stage in sort(unique(clinical$tumor_stage))) {
clinical[which(clinical$tumor_stage%in% stage), which(colnames(clinical) %in% "tumor_stage")] <- count
count <- count + 1
}
clinical$tumor_stage <- as.numeric(clinical$tumor_stage)
clinical[which(is.na(clinical$days_to_death)), which(colnames(clinical) %in% "days_to_death")] <- clinical[which(is.na(clinical$days_to_death)), which(colnames(clinical) %in% "days_to_last_follow_up")]
clinical$vital_status<- as.numeric(clinical$vital_status)
print("clinical done")
save(clinical, D, M, Mtype, DF, class, meth, cnv, file = paste0(path, type, "_final.rda"))
}
print(table(class))
sizemat <- rbind(sizemat, table(class))
rownames(sizemat)[nrow(sizemat)] <- type
if (donr) {
samplenr[snrcount, 1] <- sum(class %in% "Primary solid Tumor")
samplenr[snrcount, 2] <- sum(class %in% "Solid Tissue Normal")
}
}
stop("done")
samplenr2 <- samplenr[-which(is.na(samplenr[, 1]) == TRUE), ]
barplot(t(samplenr2[order(apply(samplenr2, 1, sum)), ]), horiz = 1, space = 1, las = 2)
newllr <- 1; newmut <- 1; nmut <- 1; newsave <- 1; types <- c("TCGA-SKCM","TCGA-UVM"); source("~/Documents/testing/general/TCGA.r")
nonesolid <- c("TCGA-LAML")
solidnonormal <- c()
## analysis:
type <- "TCGA-BRCA"
path <- "mutclust/"
load(paste0(path, type, "_final.rda"))
M[which(M < 3)] <- 0
M[which(M > 0)] <- 1
M[grep("Silent", Mtype)] <- 0
M <- M[order(rownames(M)), order(colnames(M))]
cnv[which(cnv == 0)] <- 0
cnv[which(cnv != 0)] <- 1
cnv <- cnv[order(rownames(cnv)), order(colnames(cnv))]
meth[which(meth > 0.5)] <- 1
meth[which(meth <= 0.5)] <- 0
meth <- meth[order(rownames(meth)), order(colnames(meth))]
meth[is.na(meth)] <- 0
P <- matrix(0, length(unique(c(rownames(M), rownames(cnv), rownames(meth)))), length(unique(c(colnames(M), colnames(cnv), colnames(meth)))))
rownames(P) <- sort(unique(c(rownames(M), rownames(cnv), rownames(meth))))
colnames(P) <- sort(unique(c(colnames(M), colnames(cnv), colnames(meth))))
colnames(P) <- gsub("A$", "", lapply(strsplit(colnames(P), "-"), function(x) { y <- x[1:4]; y <- paste(y, collapse = "-"); return(y) }))
P <- P[, which(duplicated(colnames(P)) == FALSE)]
P[which(rownames(P) %in% rownames(M)), which(colnames(P) %in% colnames(M))] <- M[which(rownames(M) %in% rownames(P)), which(colnames(M) %in% colnames(P))]
Pmut <- P
P <- Pmut*0
P[which(rownames(P) %in% rownames(meth)), which(colnames(P) %in% colnames(meth))] <- P[which(rownames(P) %in% rownames(meth)), which(colnames(P) %in% colnames(meth))] + meth[which(rownames(meth) %in% rownames(P)), which(colnames(meth) %in% colnames(P))]
Pmeth <- P
P <- Pmeth*0
P[which(rownames(P) %in% rownames(cnv)), which(colnames(P) %in% colnames(cnv))] <- P[which(rownames(P) %in% rownames(cnv)), which(colnames(P) %in% colnames(cnv))] + cnv[which(rownames(cnv) %in% rownames(P)), which(colnames(cnv) %in% colnames(P))]
Pcnv <- P
P <- Pmut+Pmeth+Pcnv
P2 <- P # full abberations including cnv and meth
P <- Pmut
## Bailey et al Cell 2018
goi <- c("MAP2K4", "GATA3", "GPS2", "TBX3", "PTPRD", "NCOR1", "CBFB", "CDKN1B") # BRCA
P <- P[which(rownames(P) %in% goi), ]
P[which(P > 1)] <- 1
P <- apply(P, 2, function(x) return(x/sum(x)))
P[is.na(P)] <- 0
## data imputation:
library(naturalsort)
library(nem)
library(cluster)
library(Rcpp)
library(Rgraphviz)
library(mnem)
source("~/Documents/mnem/R/mnems.r")
source("~/Documents/mnem/R/mnems_low.r")
sourceCpp("~/Documents/mnem/src/mm.cpp")
source("~/Documents/nempi/R/nempi_main.r")
source("~/Documents/nempi/R/nempi_low.r")
Rho <- cbind(P, matrix(0, nrow(P), sum(!(colnames(D) %in% colnames(P)))))
colnames(Rho) <- c(colnames(P), colnames(D)[which(!(colnames(D) %in% colnames(P)))])
Rho <- Rho[, colnames(D)]
if (sum(apply(Rho, 1, sum) == 0) > 0) {
Rho <- Rho[-which(apply(Rho, 1, sum) == 0), ]
}
Rho[is.na(Rho)] <- 0
sum(apply(Rho, 2, sum) == 0)/ncol(Rho) # unlabelled
pdf("temp.pdf", width = 12, height = 6)
tmp <- Rho
colnames(tmp) <- NULL
epiNEM::HeatmapOP(tmp, col = "RdBu", Rowv = 0, bordercol = "transparent")
dev.off()
inc <- sort(apply(Rho, 1, sum))
D2 <- D[which(apply(D, 1, median) != 0), ]
D3 <- D2[, which(duplicated(colnames(D2)) == FALSE)]
Rho <- Rho[, which(duplicated(colnames(D2)) == FALSE)]
for (i in which(duplicated(colnames(D2)) == TRUE)) {
j <- which(colnames(D3) %in% colnames(D2)[i])
D3[, j] <- apply(D2[, which(colnames(D2) %in% colnames(D2)[i]), drop = FALSE], 1, median)
}
D2 <- D3
colnames(D2) <- c(rownames(Rho), sample(rownames(Rho), ncol(D2)-nrow(Rho), replace = TRUE))
converged <- 10
start <- Sys.time()
nempires <- nempi(D2, Gamma = Rho, full = TRUE, converged = converged)
end <- Sys.time()
print(end - start)
ures <- nempires
sum(ures$lls[2:length(ures$lls)] - ures$lls[1:(length(ures$lls)-1)] < 0)
pdf("temp.pdf", width = 12, height = 6)
epiNEM::HeatmapOP(ures$Gamma, bordercol = rgb(0,0,0,0), col = "RdBu")
#plot(ures, edgewidth = 30)
dev.off()
pdf("temp.pdf", width = 9, height = 6)
par(mfrow=c(2,3))
plotConvergence(ures, type = "b", col = "blue")
dev.off()
source("~/Documents/nempi/R/nempi_main.r")
D4 <- D2
colnames(D4) <- apply(Rho, 2, function(x) {
Sgenes <- paste(sort(rownames(Rho)[which(x > 0)]), collapse = "_")
return(Sgenes)
})
## run on hpc cluster:
path <- ""
type <- "TCGA-BRCA"
if (do == 1) {
library(e1071)
load(paste0(path, type, "_nempi.rda"))
D4 <- D2
colnames(D4) <- apply(Rho, 2, function(x) {
Sgenes <- paste(sort(rownames(Rho)[which(x > 0)]), collapse = "_")
return(Sgenes)
})
svmres <- classpi(D4, full = TRUE, method = "svm")
save(svmres, file = paste0("temp_", do, "_", as.numeric(Sys.time()), ".rda"))
}
if (do == 2) {
library(nnet)
load(paste0(path, type, "_nempi.rda"))
D4 <- D2
colnames(D4) <- apply(Rho, 2, function(x) {
Sgenes <- paste(sort(rownames(Rho)[which(x > 0)]), collapse = "_")
return(Sgenes)
})
nnres <- classpi(D4, full = TRUE, method = "nnet", MaxNWts = 50000, size = 5) # takes forever
save(nnres, file = paste0("temp_", do, "_", as.numeric(Sys.time()), ".rda"))
}
if (do == 3) {
library(CALIBERrfimpute)
load(paste0(path, type, "_nempi.rda"))
D4 <- D2
colnames(D4) <- apply(Rho, 2, function(x) {
Sgenes <- paste(sort(rownames(Rho)[which(x > 0)]), collapse = "_")
return(Sgenes)
})
mfdata <- cbind(as.data.frame(t(D4)), colnames(D4))
mfdata[which(mfdata == "", arr.ind = TRUE)] <- NA
micedata <- mfdata
colnames(micedata) <- paste0(LETTERS[1:ncol(micedata)], 1:ncol(micedata))
miceres <- mice(micedata, method = c(rep('rfcont', ncol(micedata)-1), 'rfcat'), m = 2, maxit = 2)
save(miceres, file = paste0("temp_", do, "_", as.numeric(Sys.time()), ".rda"))
}
if (do == 4) {
library(e1071)
load(paste0(path, type, "_nempi.rda"))
D4 <- D2
colnames(D4) <- apply(Rho, 2, function(x) {
Sgenes <- paste(sort(rownames(Rho)[which(x > 0)]), collapse = "_")
return(Sgenes)
})
rfres <- classpi(D4, full = TRUE, method = "randomForest")
save(rfres, file = paste0("temp_", do, "_", as.numeric(Sys.time()), ".rda"))
}
if (do == 5) {
library(e1071)
load(paste0(path, type, "_nempi.rda"))
D4 <- D2
colnames(D4) <- apply(Rho, 2, function(x) {
Sgenes <- paste(sort(rownames(Rho)[which(x > 0)]), collapse = "_")
return(Sgenes)
})
mfdata <- cbind(as.data.frame(t(D4)), colnames(D4))
mfdata[which(mfdata == "", arr.ind = TRUE)] <- NA
library(missForest)
mfimp <- missForest(mfdata)
D4 <- D2
colnames(D4) <- mfimp$ximp[, ncol(mfimp$ximp)]
tmp <- mynem(D4, multi = TRUE)
Gamma <- getGamma(D4)
ures <- list()
ures$Gamma <- apply(Gamma, 2, function(x) return(x/sum(x)))
ures$res <- list()
ures$res$adj <- tmp$adj
ures$null <- TRUE
ures$combi <- 1
mfres <- ures
save(mfres, file = paste0("temp_", do, "_", as.numeric(Sys.time()), ".rda"))
}
## knn
library(class)
train <- t(D4[, which(colnames(D4) != "")])
test <- t(D4[, which(colnames(D4) == "")])
knn0 <- 0
if (knn0) {
train <- rbind(train, NULL = rep(0, ncol(train)))
cl <- c(colnames(D4)[which(colnames(D4) != "")], "NULL")
} else {
cl <- colnames(D4)[which(colnames(D4) != "")]
}
knnres <- knn(train, test, cl, prob = TRUE)
D3 <- D4
colnames(D3)[which(colnames(D3) %in% "")] <- as.character(knnres)
tmp <- mynem(D3, multi = TRUE)
Gamma <- getGamma(D3)
ures <- list()
ures$Gamma <- Gamma # apply(Gamma, 2, function(x) return(x/sum(x)))
ures$res <- list()
ures$res$adj <- tmp$adj
ures$null <- TRUE
ures$combi <- 1
knnres <- ures
## save(nempires, knnres, rfres, mfres, svmres, nnres, Rho, D2, Pmut, Pmeth, Pcnv, file = paste0(path, type, "_nempi.rda"))
path <- "mutclust/"; type <- "TCGA-BRCA"
load(paste0(path, type, "_nempi.rda"))
## ## load("~/Mount/Euler/temp_1_1573814974.40659.rda") # old
## load("~/Mount/Euler/temp_1_1574076694.65703.rda")
## ## load("~/Mount/Euler/temp_4_1573819581.9749.rda") # old
## load("~/Mount/Euler/temp_4_1574080528.67352.rda")
## ## load("~/Mount/Euler/temp_2_1573821112.2412.rda") # old
## load("~/Mount/Euler/temp_2_1574084503.12431.rda")
## load("~/Mount/Euler/temp_5_1574081415.91547.rda")
## ures <- rfres
## ures <- mfres
## ures <- svmres
## ures <- nnres
## ures <- knnres
ures <- nempires
## check against methylation and cnvs:
pdf("nempi_gamma.pdf", width = 12, height = 6)
tmp <- ures$Gamma
colnames(tmp) <- NULL
epiNEM::HeatmapOP(tmp, bordercol = rgb(0,0,0,0), col = "RdBu", colorkey = NULL)
dev.off()
pdf("nempi_phi.pdf", width = 6, height = 6)
Pgenes <- sort(unique(colnames(D2)))
adjtmp <- ures$res$adj
colnames(adjtmp) <- rownames(adjtmp) <- Pgenes
plotDnf(adjtmp, edgelwd = 2)
dev.off()
## cnv/meth enrichment:
methods <- list("NEM$\\pi$" = nempires, knn = knnres, mf = mfres, nn = nnres, rf = rfres, svm = svmres)
mutinc <- 1
Lall <- Lcnv <- Lmeth <- Lmut <- list()
for (i in 1:length(methods)) {
if (i != 8) {
print(names(methods)[i])
ures <- methods[[i]]
newGamma <- ures$Gamma
} else {
print("random")
newGamma <- newGamma*0
newGamma[sample(1:length(newGamma), floor(0.45*length(newGamma)))] <- 1 # well that is included into the test...
}
hist(newGamma)
if (i == 1) {
rntmp <- rownames(newGamma); newGamma <- t(mytc(ures$res$adj))%*%newGamma; rownames(newGamma) <- rntmp
}
P <- Pmut+Pmeth+Pcnv
if (!mutinc) { # include mutations or not (0)
P[which(Pmut == 1)] <- 0
}
P <- P[which(rownames(P) %in% rownames(Rho)), which(colnames(P) %in% colnames(Rho))]
P <- P[order(rownames(P)), order(colnames(P))]
P[which(P > 1)] <- 1
Ptmp <- cbind(P, matrix(0, nrow(P), sum(!(colnames(Rho) %in% colnames(P)))))
colnames(Ptmp) <- c(colnames(P), colnames(Rho)[which(!(colnames(Rho) %in% colnames(P)))])
P <- Ptmp[, colnames(Rho)]
## fisher:
if (i %in% c(4,5)) {
cut <- 0.07
} else if (i == 6) {
cut <- 0.1
} else {
cut <- 1/8
}
newGamma[which(newGamma > cut)] <- 1
newGamma[which(newGamma <= cut)] <- 0
pmeth <- newGamma
pmeth[which(newGamma == 1 & P == 1)] <- 2
pmeth[which(newGamma == 0 & P == 1)] <- -2
if (!mutinc) { # include mutations or not (0)
pmeth[which(Rho > 0)] <- 0
}
colnames(pmeth) <- NULL
##pdf(paste0("FigS_", names(methods)[i], ".pdf"), height = 6, width = 12)
setEPS()
postscript(paste0("FigS_", names(methods)[i], ".eps"), height = 6, width = 12)
print(epiNEM::HeatmapOP(pmeth, bordercol = rgb(0,0,0,0), col = "RdBu", colorkey = NULL))
dev.off()
print("cnv")
P <- Pcnv
P <- P[which(rownames(P) %in% rownames(Rho)), which(colnames(P) %in% colnames(Rho))]
P <- P[order(rownames(P)), order(colnames(P))]
P[which(P > 1)] <- 1
Ptmp <- cbind(P, matrix(0, nrow(P), sum(!(colnames(Rho) %in% colnames(P)))))
colnames(Ptmp) <- c(colnames(P), colnames(Rho)[which(!(colnames(Rho) %in% colnames(P)))])
P <- Ptmp[, colnames(Rho)]
F <- matrix(c(sum(pmeth >= 1 & P == 1), sum(pmeth >= 1 & P == 0), sum(pmeth == -2 & P == 1), sum(pmeth == 0 & P == 0)), 2)
print(1 - phyper(F[1,1]-1, sum(F[, 1]), sum(F[, 2]), sum(F[1, ])))
Lcnv[[i]] <- F
print("meth")
P <- Pmeth
P <- P[which(rownames(P) %in% rownames(Rho)), which(colnames(P) %in% colnames(Rho))]
P <- P[order(rownames(P)), order(colnames(P))]
P[which(P > 1)] <- 1
Ptmp <- cbind(P, matrix(0, nrow(P), sum(!(colnames(Rho) %in% colnames(P)))))
colnames(Ptmp) <- c(colnames(P), colnames(Rho)[which(!(colnames(Rho) %in% colnames(P)))])
P <- Ptmp[, colnames(Rho)]
F <- matrix(c(sum(pmeth >= 1 & P == 1), sum(pmeth >= 1 & P == 0), sum(pmeth == -2 & P == 1), sum(pmeth == 0 & P == 0)), 2)
print(1 - phyper(F[1,1]-1, sum(F[, 1]), sum(F[, 2]), sum(F[1, ])))
Lmeth[[i]] <- F
print("mut")
P <- Pmut
P <- P[which(rownames(P) %in% rownames(Rho)), which(colnames(P) %in% colnames(Rho))]
P <- P[order(rownames(P)), order(colnames(P))]
P[which(P > 1)] <- 1
Ptmp <- cbind(P, matrix(0, nrow(P), sum(!(colnames(Rho) %in% colnames(P)))))
colnames(Ptmp) <- c(colnames(P), colnames(Rho)[which(!(colnames(Rho) %in% colnames(P)))])
P <- Ptmp[, colnames(Rho)]
F <- matrix(c(sum(pmeth >= 1 & P == 1), sum(pmeth >= 1 & P == 0), sum(pmeth == -2 & P == 1), sum(pmeth == 0 & P == 0)), 2)
print(1 - phyper(F[1,1]-1, sum(F[, 1]), sum(F[, 2]), sum(F[1, ])))
Lmut[[i]] <- F
Fmat <- matrix(c(sum(pmeth == 2), sum(pmeth == 1), sum(pmeth == -2), sum(pmeth == 0)), 2)
Lall[[i]] <- Fmat
## print(fisher.test(Fmat, alternative = "greater"))
print("p-value")
print(1 - phyper(Fmat[1,1]-1, sum(Fmat[, 1]), sum(Fmat[, 2]), sum(Fmat[1, ])))
}
## create tables:
for (i in 1:length(methods)) {
cat(paste0(names(methods)[i], " & ", Lcnv[[i]][1,1], " & ", Lcnv[[i]][2,1], " & ", Lcnv[[i]][2,2], " & ", Lcnv[[i]][1,2], "\\\\\n"))
}
for (i in 1:length(methods)) {
cat(paste0(names(methods)[i], " & ", Lmeth[[i]][1,1], " & ", Lmeth[[i]][2,1], " & ", Lmeth[[i]][2,2], " & ", Lmeth[[i]][1,2], "\\\\\n"))
}
for (i in 1:length(methods)) {
cat(paste0(names(methods)[i], " & ", Lmut[[i]][1,1], " & ", Lmut[[i]][2,1], " & ", Lmut[[i]][2,2], " & ", Lmut[[i]][1,2], "\\\\\n"))
}
for (i in 1:length(methods)) {
if (names(methods)[i] == "nn") { next() }
Fmat <- Lall[[i]]
ptmp <- 1 - phyper(Fmat[1,1]-1, sum(Fmat[, 1]), sum(Fmat[, 2]), sum(Fmat[1, ]))
if (ptmp == 0) {
ptmp <- "$< 2.2\\times10^{-16}$"
} else {
ptmp <- paste0("$", signif(ptmp), "$")
}
cat(paste0(names(methods)[i], " & ", Lall[[i]][1,1], " & ", Lall[[i]][2,1], " & ", Lall[[i]][2,2], " & ", Lall[[i]][1,2], " & ", ptmp, "\\\\\n"))
}
## check correlation
## P4 <- apply(P3, 2, function(x) return(x/sum(x)))
## P4[is.na(P4)] <- 0
## cor(as.vector(newGamma), as.vector(P3))
##
cormat <- matrix(0, nrow(pmeth), 2)
fishres <- numeric(nrow(pmeth))
names(fishres) <- rownames(pmeth)
for (i in 1:nrow(pmeth)) {
Fmat <- matrix(c(sum(pmeth[i, ] == 2), sum(pmeth[i, ] == 1), sum(pmeth[i, ] == -2), sum(pmeth[i, ] == 0)), 2)
fishres[i] <- fisher.test(Fmat, alternative = "g")$p.value
cormat[i, ] <- c(sum(Fmat[1, ]), sum(Fmat[, 1]))
}
## GATA3 & PTPRD:
Fmat <- matrix(c(sum(pmeth[3, ] %in% c(2,-2) & pmeth[7, ] %in% c(2,-2)),
sum(pmeth[3, ] %in% c(1,0) & pmeth[7, ] %in% c(-2,2)),
sum(pmeth[3, ] %in% c(-2,2) & pmeth[7, ] %in% c(1,0)),
sum(pmeth[3, ] %in% c(1,0) & pmeth[7, ] %in% c(1,0))), 2)
1 - phyper(Fmat[1,1]-1, sum(Fmat[, 1]), sum(Fmat[, 2]), sum(Fmat[1, ]))
## pca:
pca <- princomp(D2)
col <- apply(newGamma, 2, sum)
col <- col/max(col)
K <- kmeans(t(newGamma), nrow(newGamma))
plot(pca$loadings[, 1:2], col = K$cluster)#rgb(col,0,0,1))
## tsne:
sne <- tsne(t(newGamma))
plot(sne, col = K$cluster)
## R profiling:
Rprof("temp.txt", line.profiling=TRUE)
ures <- nempi(D2[1:20, ], Gamma = Gamma, complete = 1, full = TRUE, converged = converged, combi = combi)
Rprof(NULL)
summaryRprof("temp.txt", lines = "show")$sampling.time
head(summaryRprof("temp.txt", lines = "show")$by.self, 10)
##
type <- "TCGA-BRCA"
load(paste0(path, type, "_nempi.rda"))
pdf("Fig5.pdf", width = 10, height = 10)
plot(uresn, edgelwd = 2)
dev.off()
pdf("Fig6.pdf", width = 10, height = 5)
tmp <- uresn$Gamma
colnames(tmp) <- NULL
epiNEM::HeatmapOP(tmp, bordercol = rgb(0,0,0,0), col = "RdBu", colorkey = NULL)
dev.off()
pdf("Fig7.pdf", width = 10, height = 10)
phitmp <- mytc(uresn$res$adj)
tmp <- t(phitmp)%*%uresn$Gamma
colnames(tmp) <- NULL
rownames(tmp) <- rownames(uresn$Gamma)
tmp2 <- tmp
colnames(tmp) <- NULL
tmp <- Gamma
colnames(tmp) <- NULL
tmp3 <- tmp
tmp4 <- tmp2
p1 <- epiNEM::HeatmapOP(tmp2, bordercol = rgb(0,0,0,0), col = "RdBu", clusterx = tmp2, colorkey = NULL)
p2 <- epiNEM::HeatmapOP(tmp, bordercol = rgb(0,0,0,0), col = "RdBu", clusterx = tmp2, colorkey = NULL)
print(p1, position=c(0, .5, 1, 1), more=TRUE)
print(p2, position=c(0, 0, 1, .5))
sum(tmp == 1 & tmp2 == 1)/sum(tmp == 1)
dev.off()
pdf("Fig8.pdf", width = 10, height = 10)
plot(uresf, edgelwd = 2)
dev.off()
pdf("Fig9.pdf", width = 10, height = 5)
tmp <- uresf$Gamma
colnames(tmp) <- NULL
epiNEM::HeatmapOP(tmp, bordercol = rgb(0,0,0,0), col = "RdBu", colorkey = NULL)
dev.off()
pdf("Fig10.pdf", width = 10, height = 10)
phitmp <- mytc(uresf$res$adj)
tmp <- t(phitmp)%*%uresf$Gamma
colnames(tmp) <- NULL
rownames(tmp) <- rownames(uresf$Gamma)
tmp2 <- tmp
colnames(tmp) <- NULL
tmp <- Gamma
colnames(tmp) <- NULL
p1 <- epiNEM::HeatmapOP(tmp2, bordercol = rgb(0,0,0,0), col = "RdBu", clusterx = tmp2, colorkey = NULL)
p2 <- epiNEM::HeatmapOP(tmp, bordercol = rgb(0,0,0,0), col = "RdBu", clusterx = tmp2, colorkey = NULL)
print(p1, position=c(0, .5, 1, 1), more=TRUE)
print(p2, position=c(0, 0, 1, .5))
sum(tmp == 1 & tmp2 == 1)/sum(tmp == 1)
dev.off()
## new figure:
P <- t(mytc(uresf$res$adj))%*%uresf$Gamma
rownames(P) <- rownames(uresf$Gamma)
PM <- P
PM[which(PM > 1/6 & Gamma == 1)] <- P[which(PM > 1/6 & Gamma == 1)] + 1
PM[which(PM <= 1/6 & Gamma == 1)] <- P[which(PM <= 1/6 & Gamma == 1)] - 1
epiNEM::HeatmapOP(PM, bordercol = rgb(0,0,0,0), col = "RdYlBu", breaks = seq(-1,2,length.out=5), clusterx = tmp2)
## other plots:
pdf("temp.pdf", width = 10, height = 10)
plotDnf(c("M1=M4", "M2=M4", "M3=M5", "M1=M5"), edgelwd = 2)
dev.off()
M <- matrix(0, 5, 10)
rownames(M) <- paste0("M", 1:5)
colnames(M) <- paste0("S", 1:10)
M[1, 1:4] <- 1
M[2, c(2,7:9)] <- 1
M[3, 5:6] <- 1
M[3, 10] <- 1
phi <- matrix(0, 5, 5)
diag(phi) <- 1
phi[1, 1:5] <- 1
phi[2, 3] <- phi[4, 5] <- 1
## M <- t(phi)%*%M; M[M > 1] <- 1
rownames(M) <- paste0("M", 1:5)
pdf("temp.pdf", width = 8, height = 4)
epiNEM::HeatmapOP(M, Colv = 0, Rowv = 0, col = "RdBu", colorkey = NULL)
dev.off()
## check for mutation type (das führt zu nichts)
colnames(Mtype) <- unlist(lapply(strsplit(colnames(Mtype), "-"), function(x) {
y <- x[1:3]
y <- paste(c(y, "01"), collapse = "-")
return(y)
}))
checkgene <- "CBFB"
van <- intersect(colnames(Gamma)[which(PM[checkgene, ] < 0)], colnames(Mtype))
A <- sum(unlist(lapply(strsplit(Mtype[checkgene, van], " "), function(x) if ("Silent" %in% x) { return(1) } else { return(0) })))
B <- length(van) - A
van <- intersect(colnames(Gamma)[which(PM[checkgene, ] > 0)], colnames(Mtype))
C <- sum(unlist(lapply(strsplit(Mtype[checkgene, van], " "), function(x) if ("Silent" %in% x) { return(1) } else { return(0) })))
D <- length(van) - C
table(unlist(lapply(strsplit(Mtype[checkgene, van], " "), function(x) return(names(table(x))[which.max(table(x))]))))
## table(unlist(strsplit(as.vector(Mtype), " ")))
## pdf("Fig10.pdf", width = 10, height = 10)
## p1 <- epiNEM::HeatmapOP(tmp4, bordercol = rgb(0,0,0,0), col = "RdBu", clusterx = tmp2, colorkey = NULL)
## p2 <- epiNEM::HeatmapOP(tmp3, bordercol = rgb(0,0,0,0), col = "RdBu", clusterx = tmp2, colorkey = NULL)
## p3 <- epiNEM::HeatmapOP(tmp2, bordercol = rgb(0,0,0,0), col = "RdBu", clusterx = tmp2, colorkey = NULL)
## p4 <- epiNEM::HeatmapOP(tmp, bordercol = rgb(0,0,0,0), col = "RdBu", clusterx = tmp2, colorkey = NULL)
## print(p1, position=c(0, .5, .5, 1), more=TRUE)
## print(p2, position=c(0, 0, .5, .5), more=TRUE)
## print(p3, position=c(.5, .5, 1, 1), more=TRUE)
## print(p4, position=c(.5, 0, 1, .5))
## sum(tmp == 1 & tmp2 == 1)/sum(tmp == 1)
## dev.off()
source("~/Documents/testing/R/nempi.r")
source("~/Documents/testing/R/nempi_low.r")
bsres <- unembs(D2, Gamma = Gamma, complete = 1, full = TRUE, converged = converged, combi = combi, bsruns = 10, bssize = 0.5)
pdf("temp.pdf", width = 10, height = 5)
epiNEM::HeatmapOP(bsres$Gamma, bordercol = rgb(0,0,0,0), col = "RdBu", colorkey = NULL)
dev.off()
pdf("temp.pdf", width = 20, height = 10)
par(mfrow=c(1,2))
tmp <- bsres$phi
tmp[which(tmp > 0)] <- 1
diag(tmp) <- 0
freqtop <- as.vector(t(bsres$phi)[which(lower.tri(bsres$phi) == TRUE)])/10
freqtop <- freqtop[which(freqtop != 0)]
tmptop <- tmp
tmptop[lower.tri(tmptop)] <- 0
tmptop <- adj2dnf(tmptop)
tmptop <- tmptop[grep("=", tmptop)]
plotDnf(tmptop, edgelwd = 2, edgelab = freqtop, freq = freqtop)
freqbot <- as.vector(t(bsres$phi)[which(upper.tri(bsres$phi) == TRUE)])/10
freqbot <- freqbot[which(freqbot != 0)]
tmpbot <- tmp
tmpbot[upper.tri(tmpbot)] <- 0
tmpbot <- adj2dnf(tmpbot)
tmpbot <- tmpbot[grep("=", tmpbot)]
plotDnf(tmpbot, edgelwd = 2, edgelab = freqbot, freq = freqbot)
dev.off()
##
source("testing/vignettes/TCGA_cluster.r")
pcares <- prcomp(tmp)
library(naturalsort)
library(nem)
library(cluster)
library(Rcpp)
source("~/Documents/mnem/R/mnems.r")
source("~/Documents/mnem/R/mnems_low.r")
sourceCpp("~/Documents/mnem/src/mm.cpp")
res <- mnem(tmp, starts = 10, search = "greedy", type = "cluster3", complete = 1, multi = 1)
tmp2 <- tmp
Rprof("temp.txt", line.profiling=TRUE)
res <- mnem(tmp2, starts = 10, search = "greedy", type = "cluster3", complete = 1, multi = 1, k = 2)
Rprof(NULL)
summaryRprof("temp.txt", lines = "show")$sampling.time
head(summaryRprof("temp.txt", lines = "show")$by.self)
## resk <- mnemk(tmp2, starts = 10, search = "estimate", type = "cluster3", complete = 1, multi = 1)
cluster <- apply(getAffinity(res$probs, mw = res$mw, complete = TRUE), 2, which.max)
par(mfrow=c(1,2))
plot(pcares$rotation[, 1:2], col = cluster)
names(cluster) <- gsub("-01$|-03$", "", colnames(M))
cluster <- cluster[which(names(cluster) %in% clinical$submitter_id)]
cluster <- cluster[order(names(cluster))]
clinical <- rbind(clinical, clinical[which(clinical[, 1] %in% names(cluster)[which(duplicated(names(cluster)))]), ])
clinical <- clinical[order(clinical[, 1]), ]
print(all(clinical[, 1] == names(cluster)))
fit <- survfit(Surv(days_to_death, vital_status) ~ cluster, clinical)
plot(fit, col = 1:length(table(cluster)), lty = 1:length(table(cluster)))
legend(max(clinical$days_to_death, na.rm = TRUE), 1, 1:length(table(cluster)), lty = 1:length(table(cluster)), col = 1:length(table(cluster)), xjust = 1, yjust = 1)
fit <- coxph(Surv(days_to_death, vital_status) ~ cluster + age_at_diagnosis, clinical)
print(fit)
fit <- coxph(Surv(days_to_death, vital_status) ~ cluster + age_at_diagnosis + tumor_stage, clinical)
print(fit)
fit <- coxph(Surv(days_to_death, vital_status) ~ cluster + tumor_stage, clinical)
print(fit)
fit <- coxph(Surv(days_to_death, vital_status) ~ cluster, clinical)
print(fit)
kres <- clustNEM(tmp, nem = 0, k = length(res$comp), nstart = 10)
## kres <- clustNEM(tmp, nem = 0, nstart = 10)
plot(pcares$rotation[, 1:2], col = kres$cluster)
kcluster <- kres$cluster
names(kcluster) <- gsub("-01$|-03$", "", colnames(M))
kcluster <- kcluster[which(names(kcluster) %in% clinical$submitter_id)]
kcluster <- kcluster[order(names(kcluster))]
fit <- survfit(Surv(days_to_death, vital_status) ~ kcluster, clinical)
plot(fit, col = 1:length(table(cluster)), lty = 1:length(table(cluster)))
legend(max(clinical$days_to_death, na.rm = TRUE), 1, 1:length(table(cluster)), lty = 1:length(table(cluster)), col = 1:length(table(cluster)), xjust = 1, yjust = 1)
fit <- coxph(Surv(days_to_death, vital_status) ~ kcluster + age_at_diagnosis, clinical)
print(fit)
fit <- coxph(Surv(days_to_death, vital_status) ~ kcluster, clinical)
print(fit)
|
a3f0e5b1a37f150dedfede52231cd003a983c551
|
24828ecc432eb0026a427bd1e6caa4e21887b1b8
|
/R/SCEI.R
|
00c8261d76af50ccdd9bf07aba44dc8b9ea3f814
|
[] |
no_license
|
cran/CompoundEvents
|
2d13b48c257a4a3d7b736968a690c265ed5cfa45
|
e32b63dabe629aafde2563c379e5e8bc74b335ac
|
refs/heads/master
| 2022-11-20T13:13:05.446657
| 2022-11-06T14:10:02
| 2022-11-06T14:10:02
| 243,953,107
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,870
|
r
|
SCEI.R
|
#'@title Standardized Compound Event Indicator (SCEI)
#'@description Compute SCEI based on monthly precipitation and temperature.
#'@param mp monthly precipitation
#'@param mt monthly temperature
#'@param ts time scale
#'@usage SCEI(mp,mt,ts)
#'@references Hao, Z. et al., 2019a. Statistical prediction of the severity of compound dry-hot events based on El Ni??o-Southern Oscillation. J. Hydrol., 572, 243-250.
#'@return The monthly SCEI series
#'@examples
#'mp=matrix(rnorm(120,0,1),ncol=1)
#'mt=matrix(rnorm(120,0,1),ncol=1)
#'ts=3; # ts<=12 otherwise you should revise line 98
#'nd<-SCEI(mp,mt,ts)
#'d=cbind(mp,mt,nd)
#'testd<-matrix(d, ncol=3,byrow=FALSE)
#write.table(testd,file="testd.txt", sep=" ",row.names=FALSE,col.names=FALSE,quote=FALSE)
#'@export
#Generate monthly data
SCEI<-function(mp,mt,ts)
{
ny = length(mp)/12 # number of year
#
APD <- matrix(data=NA, nrow = length(mp), ncol = 1)
ATD <- matrix(data=NA, nrow = length(mt), ncol = 1)
for (i in 1:(length(mp)-ts+1))
{
APD[ts+i-1] <- mean(mp[i:(i+ts-1)]);
ATD[ts+i-1] <- mean(mt[i:(i+ts-1)]);
}
AP12=matrix(APD,ncol=12,byrow=T)
AT12=matrix(ATD,ncol=12,byrow=T)
# Define SCEI as a matrix
SCEI0 <- matrix(data=NA, nrow =ny,ncol=12,byrow=T)
for (k in 1:12)
{
mpx=AP12[,k]
mtx=AT12[,k]
# Exclude the first value to compute SCEI (if the first number is NA)
if (is.na(mpx[1])==TRUE)
{
xd=mpx[2:ny]
yd=mtx[2:ny]
jp=CompoundEvents::Empdis2(xd,yd)
SCEI0[2:ny,k]=CompoundEvents::Empdis1(jp)
}
else
# Take the whole month to compute SCEI (if the first number is not NA)
{
xd=mpx
yd=mtx
jp=CompoundEvents::Empdis2(xd,yd)
SCEI0[,k]=CompoundEvents::Empdis1(jp)
}
}
SCEI0=stats::qnorm(SCEI0)
SCEI=matrix(t(SCEI0),ncol=1)
print(SCEI)
return(SCEI)
}
|
533eac77c596da46e49c1ad3659ffd5994048748
|
eba8579b34198209cb9d0dcce1889846f22e4195
|
/R/zzz.TwoPhaseInd.R
|
57fb78590313b8d99038373a2a3ecfe5599fad80
|
[] |
no_license
|
cran/TwoPhaseInd
|
c6753bb900626abc285cdb6ed3398c8609fc7785
|
d4627e4739683b98cb90be6b4f82ac70c0655c4c
|
refs/heads/master
| 2022-03-07T04:08:12.344439
| 2022-02-16T23:50:11
| 2022-02-16T23:50:11
| 17,718,325
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 201
|
r
|
zzz.TwoPhaseInd.R
|
## without NAMESPACE
##.First.lib <- function(lib, pkg) {
## library.dynam("TwoPhaseInd", pkg, lib)
##}
## with NAMESPACE
.onLoad <- function(lib, pkg) {
library.dynam("TwoPhaseInd", pkg, lib)
}
|
7b784ee612541783b5b57c4137b1d07449fedef3
|
4f928020512b4bf640347529246f69c635a60a5a
|
/R/get_cores_nb.R
|
785c8b96ddd0a911dff257e19981c32daf92d673
|
[] |
no_license
|
SticsRPacks/SticsOnR
|
af7ae9001ba58ebb4c798a4732ed7861588bf7b1
|
3a250b188e0b083c2c56a547c5abb23411c73da6
|
refs/heads/main
| 2023-07-26T18:32:25.163603
| 2023-07-13T07:57:57
| 2023-07-13T07:57:57
| 166,790,380
| 3
| 5
| null | 2023-07-13T07:57:59
| 2019-01-21T09:56:15
|
R
|
UTF-8
|
R
| false
| false
| 1,697
|
r
|
get_cores_nb.R
|
#' Getting the available number of cores for parallel calculations
#'
#' @param parallel Logical for performing parallel loop (TRUE) or not (FALSE)
#' @param required_nb Wanted number of cores
#' @param ... To pass additional arguments (i.e. cores_nb, fake machine cores)
#'
#' @return Available cores number tu use
#'
#' @keywords internal
#'
#' @noRd
#'
#' @examples
#'
#' n_cores <- get_cores_nb()
#'
#' n_cores <- get_cores_nb(parallel = TRUE)
#'
#' n_cores <- get_cores_nb(parallel = TRUE, required_nb = 4)
#'
#'
get_cores_nb <- function(parallel = FALSE, required_nb = NA, ...) {
# For sequential execution
if (!parallel) {
return(1)
}
# Getting true (from the machine) or fake cores number,
# forcing it through cores_nb argument in
# three dots arguments (for testing purpose)
cores_nb <- get_cores(...)
# Keeping one free core left
if (cores_nb >= 2) {
cores_nb <- cores_nb - 1
}
# Limiting the required cores, if any
if (base::is.na(required_nb) ||
required_nb > cores_nb) {
return(cores_nb)
}
# Getting the right required cores number
return(required_nb)
}
#' Detecting machine cores number
#'
#' @param ... To pass additional argument (for testing purpose)
#'
#' @return Total cores number
#'
#' @keywords internal
#'
#' @noRd
#'
#' @examples
#'
#' get_cores()
#'
#' get_cores(cores_nb = 4)
#'
#' @importFrom parallel detectCores
#'
get_cores <- function(...) {
# Getting additional args list with cores_nb in it !
dot_args <- list(...)
# Getting real cores number
if (!("cores_nb" %in% names(dot_args))) {
return(detectCores())
}
# Returning a fake number of cores gave as an input
return(dot_args$cores_nb)
}
|
de3879af905e84076d42437530d6b53cf1ecb095
|
99f31a540238f3f15f74d4a37f7d3d2343d47b05
|
/R/class_settings.R
|
af5d3716b2bbf425c50f7b6ca86b346eb5f9b5f9
|
[
"MIT"
] |
permissive
|
Robinlovelace/targets
|
bd8b43e973d9bbeaedee558b08af5ebd3e774a66
|
a6bd17af91c472349a60f802e09168e2b3f6bca0
|
refs/heads/master
| 2022-12-14T16:52:03.539887
| 2020-09-18T15:31:33
| 2020-09-18T15:31:33
| 296,736,268
| 0
| 0
|
NOASSERTION
| 2020-09-18T21:48:38
| 2020-09-18T21:48:37
| null |
UTF-8
|
R
| false
| false
| 2,801
|
r
|
class_settings.R
|
settings_init <- function(
name = character(0),
format = "rds",
pattern = NULL,
iteration = "vector",
error = "stop",
memory = "persistent",
deployment = "remote",
priority = 0,
resources = list(),
storage = "local",
retrieval = storage
) {
growth <- all.vars(pattern, functions = TRUE, max.names = 1L) %|||% "none"
dimensions <- all.vars(pattern, functions = FALSE)
settings_new(
name = name,
format = format,
growth = growth,
dimensions = dimensions,
iteration = iteration,
error = error,
memory = memory,
deployment = deployment,
priority = priority,
resources = resources,
storage = storage,
retrieval = retrieval
)
}
settings_new <- function(
name = NULL,
format = NULL,
growth = NULL,
dimensions = NULL,
iteration = NULL,
error = NULL,
memory = NULL,
deployment = NULL,
priority = NULL,
resources = NULL,
storage = NULL,
retrieval = NULL
) {
force(name)
force(format)
force(growth)
force(dimensions)
force(iteration)
force(error)
force(memory)
force(deployment)
force(priority)
force(resources)
force(storage)
force(retrieval)
environment()
}
settings_produce_store <- function(settings) {
store_init(settings$format, settings$resources)
}
settings_clone <- function(settings) {
settings_new(
name = settings$name,
format = settings$format,
growth = settings$growth,
dimensions = settings$dimensions,
iteration = settings$iteration,
error = settings$error,
memory = settings$memory,
deployment = settings$deployment,
priority = settings$priority,
resources = settings$resources,
storage = settings$storage,
retrieval = settings$retrieval
)
}
settings_validate_pattern <- function(growth, dimensions) {
assert_scalar(growth)
assert_chr(growth)
assert_chr(dimensions)
if (!(growth %in% c("none", "map", "cross"))) {
throw_validate("pattern must be one of \"none\", \"map\", or \"cross\".")
}
if (growth != "none" && length(dimensions) < 1L) {
throw_validate("pattern must accept at least one target")
}
}
settings_validate <- function(settings) {
assert_correct_fields(settings, settings_new)
assert_name(settings$name)
assert_format(settings$format)
settings_validate_pattern(settings$growth, settings$dimensions)
assert_chr(settings$iteration)
assert_in(settings$error, c("stop", "continue", "save"))
assert_in(settings$memory, c("persistent", "transient"))
assert_in(settings$deployment, c("local", "remote"))
assert_scalar(settings$priority)
assert_ge(settings$priority, 0)
assert_le(settings$priority, 1)
assert_list(settings$resources)
assert_in(settings$storage, c("local", "remote"))
assert_in(settings$retrieval, c("local", "remote"))
invisible()
}
|
a9488f4e67aae2b8530fde3acdd479f1af2815ac
|
04166df482f32bfb7bdec0be93b6a7138e47ef7e
|
/code_zebrafish/final_analysis/heatmaps_of_effect_sizes.R
|
e2c0fe6281375cb3a19148e374d821fea1d5c88a
|
[] |
no_license
|
jernejaMislej/all_code
|
3b25ce585d4deb28a42ebb4dccc3be2d6a8cb9ed
|
32d5fe67079e40cea47a5678b30b91de6d9e0ba8
|
refs/heads/master
| 2021-08-28T13:45:13.906582
| 2017-12-12T10:41:49
| 2017-12-12T10:41:49
| 105,473,107
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,720
|
r
|
heatmaps_of_effect_sizes.R
|
args<-commandArgs(trailingOnly = TRUE)
condition<-args[1]
type<-args[2]
library(gplots)
state="hypoactive"
if(condition=="DarkApoHigh"){
state="hyperactive type I"
}else if(condition=="DarkPTZ"){
state="hyperactive type II"
}
files<-list.files()
#files_vals<-files[-grep("all.txt|with_values.txt|p_values.txt",files)]
files_vals<-files[grep("all.txt",files)]
files_p_vals<-files[grep("p_values.txt",files)]
all_files_vals<-read.table(files_vals[1],header=TRUE)
for(fil_val in files_vals[-1]){
all_files_vals<-cbind(all_files_vals,read.table(fil_val,header=TRUE))
}
all_files_p_vals<-read.table(files_p_vals[1],header=TRUE)
for(fil_val in files_p_vals[-1]){
all_files_p_vals<-cbind(all_files_p_vals,read.table(fil_val,header=TRUE))
}
all_files_vals<-all_files_vals[apply(all_files_vals,2,function(x) length(x[x>4])==0)]
sum_all_files_vals<-round(apply(all_files_vals,2,function(x) sum(abs(x-1))),2)
max_vars<-min(25,length(sum_all_files_vals))
all_files_vals<-all_files_vals[,names(sum_all_files_vals[order(-sum_all_files_vals)])[1:max_vars]]
all_files_p_vals<-all_files_p_vals[,names(sum_all_files_vals[order(-sum_all_files_vals)])[1:max_vars]]
row_names_correct<-gsub("Group", "", rownames(all_files_vals))
row_names_correct<-gsub("Dark$", "healthy", row_names_correct)
row_names_correct<-gsub("Dark..*", state, row_names_correct)
row_names_correct<-gsub("Factor", "", row_names_correct)
row_names_correct<-gsub("micro", "u", row_names_correct)
row_names_correct<-gsub("PCAP814", "PCAP3", row_names_correct)
row_names_correct<-gsub("PCAP931", "PCAP4", row_names_correct)
rownames(all_files_vals)<-row_names_correct
rownames(all_files_p_vals)<-row_names_correct
all_files_p_vals_sym<-all_files_p_vals
all_files_p_vals_sym[all_files_p_vals>0.05]<-"n.s"
all_files_p_vals_sym[all_files_p_vals<=0.05 & all_files_p_vals>0.01]<-"*"
all_files_p_vals_sym[all_files_p_vals<=0.01 & all_files_p_vals>0.001]<-"**"
all_files_p_vals_sym[all_files_p_vals<=0.001]<-"***"
right_margin<-18
if(condition!="DarkApoLow"){
right_margin<-21
}
bottom_margin<-10
if(type=="B1"){
bottom_margin<-13
}
#get the keys...stupid R
#1_microM
all_files_vals_1<-all_files_vals[grep("1uM|Control",rownames(all_files_vals)),]
png(paste0("/home/jerneja/git/zebrafish_action_sequence_project/results/output/heatmaps/with_time/",condition,"_heatmap_1_microM_",condition,"_",type,"KEY.png"),width=1500,height=750)
heatmap.2(as.matrix(all_files_vals_1), col=greenred(100), key=T, keysize=0.75,
trace="none",dendrogram="none" ,labRow=row_names_correct[grep("1uM|Control",rownames(all_files_vals))],
srtCol=40, cellnote=as.matrix(all_files_p_vals_sym[grep("1uM|Control",rownames(all_files_vals)),]),notecol="gray60",margins=c(bottom_margin,right_margin),
cexRow=1.2,cexCol=1.2, density.info="density", breaks=seq(0,3.5,length.out = 101),key.title ="Color key")
dev.off()
#3_microM
all_files_vals_3<-all_files_vals[grep("3uM|Control",rownames(all_files_vals)),]
png(paste0("/home/jerneja/git/zebrafish_action_sequence_project/results/output/heatmaps/with_time/",condition,"_heatmap_3_microM_",condition,"_",type,"KEY.png"),width=1500,height=750)
heatmap.2(as.matrix(all_files_vals_3), col=greenred(100), key=T, keysize=0.75,
trace="none",dendrogram="none" ,labRow=row_names_correct[grep("3uM|Control",rownames(all_files_vals))],
srtCol=40, cellnote=as.matrix(all_files_p_vals_sym[grep("3uM|Control",rownames(all_files_vals)),]),notecol="gray60",margins=c(bottom_margin,right_margin),
cexRow=1.2,cexCol=1.2, density.info="density", breaks=seq(0,3.5,length.out = 101),key.title ="Color key")
dev.off()
#10_microM
all_files_vals_10<-all_files_vals[grep("10uM|Control",rownames(all_files_vals)),]
png(paste0("/home/jerneja/git/zebrafish_action_sequence_project/results/output/heatmaps/with_time/",condition,"_heatmap_10_microM_",condition,"_",type,"KEY.png"),width=1500,height=750)
heatmap.2(as.matrix(all_files_vals_10), col=greenred(100), key=T, keysize=0.75,
trace="none",dendrogram="none" ,labRow=row_names_correct[grep("10uM|Control",rownames(all_files_vals))],
srtCol=40, cellnote=as.matrix(all_files_p_vals_sym[grep("10uM|Control",rownames(all_files_vals)),]),notecol="gray60",margins=c(bottom_margin,right_margin),
cexRow=1.2,cexCol=1.2, density.info="density", breaks=seq(0,3.5,length.out = 101),key.title ="Color key")
dev.off()
#without time, all together
all_files_vals_no_time<-all_files_vals[-grep("Time",rownames(all_files_vals)),]
png(paste0("/home/jerneja/git/zebrafish_action_sequence_project/results/output/heatmaps/no_time/",condition,"_heatmap_all_",condition,"_",type,"KEY.png"),width=1500,height=750)
heatmap.2(as.matrix(all_files_vals_no_time), col=greenred(100), key=T, keysize=0.75,
trace="none",dendrogram="none" ,labRow=row_names_correct[-grep("Time",rownames(all_files_vals))],
srtCol=40, cellnote=as.matrix(all_files_p_vals_sym[-grep("Time",rownames(all_files_vals)),]),notecol="gray60",margins=c(bottom_margin,right_margin),
cexRow=1,cexCol=1.2, density.info="density", breaks=seq(0,3.5,length.out = 101),key.title ="Color key")
dev.off()
#get a more streched out map....stupid R
lwid = c(0.4,4)
lhei = c(0.01,2,0.01)
lmat = rbind(c(4,0),c(3,1),c(2,0))
right_margin<-22
if(condition!="DarkApoLow"){
right_margin<-25
}
bottom_margin<-12
if(type=="B1"){
bottom_margin<-16
}
#with time
#1_microM
all_files_vals_1<-all_files_vals[grep("1uM|Control",rownames(all_files_vals)),]
png(paste0("/home/jerneja/git/zebrafish_action_sequence_project/results/output/heatmaps/with_time/",condition,"_heatmap_1_microM_",condition,"_",type,"MAP.png"),width=1500,height=1000)
tryCatch({
heatmap.2(as.matrix(all_files_vals_1), col=greenred(100), key=T, keysize=0.75,lmat=lmat, lhei=lhei, lwid=lwid,
trace="none",dendrogram="none" ,labRow=row_names_correct[grep("1uM|Control",rownames(all_files_vals))],
srtCol=40, cellnote=as.matrix(all_files_p_vals_sym[grep("1uM|Control",rownames(all_files_vals)),]),notecol="gray60",margins=c(bottom_margin,right_margin),
cexRow=1.5,cexCol=1.5, density.info="density", breaks=seq(0,3.5,length.out = 101),key.title ="Color key", notecex=1.2)
}, error=function(e){})
dev.off()
#3_microM
all_files_vals_3<-all_files_vals[grep("3uM|Control",rownames(all_files_vals)),]
png(paste0("/home/jerneja/git/zebrafish_action_sequence_project/results/output/heatmaps/with_time/",condition,"_heatmap_3_microM_",condition,"_",type,"MAP.png"),width=1500,height=1000)
tryCatch({
heatmap.2(as.matrix(all_files_vals_3), col=greenred(100), key=T, keysize=0.75,lmat=lmat, lhei=lhei, lwid=lwid,
trace="none",dendrogram="none" ,labRow=row_names_correct[grep("3uM|Control",rownames(all_files_vals))],
srtCol=40, cellnote=as.matrix(all_files_p_vals_sym[grep("3uM|Control",rownames(all_files_vals)),]),notecol="gray60",margins=c(bottom_margin,right_margin),
cexRow=1.5,cexCol=1.5, density.info="density", breaks=seq(0,3.5,length.out = 101),key.title ="Color key", notecex=1.2)
}, error=function(e){})
dev.off()
#10_microM
all_files_vals_10<-all_files_vals[grep("10uM|Control",rownames(all_files_vals)),]
png(paste0("/home/jerneja/git/zebrafish_action_sequence_project/results/output/heatmaps/with_time/",condition,"_heatmap_10_microM_",condition,"_",type,"MAP.png"),width=1500,height=1000)
tryCatch({
heatmap.2(as.matrix(all_files_vals_10), col=greenred(100), key=T, keysize=0.75,lmat=lmat, lhei=lhei, lwid=lwid,
trace="none",dendrogram="none" ,labRow=row_names_correct[grep("10uM|Control",rownames(all_files_vals))],
srtCol=40, cellnote=as.matrix(all_files_p_vals_sym[grep("10uM|Control",rownames(all_files_vals)),]),notecol="gray60",margins=c(bottom_margin,right_margin),
cexRow=1.5,cexCol=1.5, density.info="density", breaks=seq(0,3.5,length.out = 101),key.title ="Color key", notecex=1.2)
}, error=function(e){})
dev.off()
#without time, all together
all_files_vals_no_time<-all_files_vals[-grep("Time",rownames(all_files_vals)),]
png(paste0("/home/jerneja/git/zebrafish_action_sequence_project/results/output/heatmaps/no_time/",condition,"_heatmap_all_",condition,"_",type,"MAP.png"),width=1500,height=1000)
tryCatch({
heatmap.2(as.matrix(all_files_vals_no_time), col=greenred(100), key=T, keysize=0.75,lmat=lmat, lhei=lhei, lwid=lwid,
trace="none",dendrogram="none" ,labRow=row_names_correct[-grep("Time",rownames(all_files_vals))],
srtCol=40, cellnote=as.matrix(all_files_p_vals_sym[-grep("Time",rownames(all_files_vals)),]),notecol="gray60",margins=c(bottom_margin,right_margin),
cexRow=1.1,cexCol=1.5, density.info="density", breaks=seq(0,3.5,length.out = 101),key.title ="Color key", notecex=1.2)
}, error=function(e){})
dev.off()
|
7cdbe349c42d592764f75ed3a43aa0155ba96bd5
|
3ca5d6ebe2907ae2b76ce11608985dbd672d6ed5
|
/man/add_restart.Rd
|
e6c6d6b627cecdf8f29d5e9b3a384ac9dd5b34d4
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
wlandau-lilly/spiro
|
0d19f6b214f1ef4e8edb8b20056dbeda8139cc86
|
6fb80765f4afc0969ef7e2cb88fc07f4d6906564
|
refs/heads/master
| 2023-03-15T18:06:38.065027
| 2020-12-14T21:16:45
| 2020-12-14T21:16:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,268
|
rd
|
add_restart.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spirograph.R
\name{add_restart}
\alias{add_restart}
\title{Add start and stop animation buttons to image.}
\usage{
add_restart(
input,
location = c(0.05, 0.95),
fill = "white",
color = "black",
radius = 20,
start_label = "Start",
stop_label = "Stop",
font_family = "inherit",
font_size = 14,
openfile = TRUE,
output = input
)
}
\arguments{
\item{input}{File name of .svg file to input}
\item{location}{a vector of coordinates for the start button}
\item{fill}{button fill color}
\item{color}{button text color}
\item{radius}{button radius}
\item{start_label}{start button text}
\item{stop_label}{stop button text}
\item{font_family}{button text font family}
\item{font_size}{button text font size}
\item{openfile}{Open file in default program for .svg format. Defaults to FALSE.}
\item{output}{File name of .svg file to output. Default is to overwrite the input file.}
}
\value{
output name
}
\description{
Add start and stop animation buttons to image.
}
\examples{
\dontshow{.old_wd <- setwd(tempdir())}
library(spiro)
spiro(fixed_radius = 3,
cycling_radius = 1,
file = "image_spin.svg") \%>\%
image_spin(rpm = 1)
\dontshow{setwd(.old_wd)}
}
|
4cacf373921ba09c4fae98a447304871c95333dd
|
017e1d3c8002e6b0835a97985168d6fb2bb652f0
|
/R/stats.R
|
b201c8f589d6783680a3610eb4ec7bbf92895387
|
[] |
no_license
|
wnk4242/Rcheatsheet
|
e38baa4b09713c931caaef64eee5505b2b3a17b8
|
70054150c84b00affe6f525ce0f900755dd3e919
|
refs/heads/master
| 2021-07-26T19:55:23.175155
| 2020-07-03T14:19:32
| 2020-07-03T14:19:32
| 196,735,006
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 348
|
r
|
stats.R
|
######Package code Statistical analysis######
#' Perform multiple regression
#' currently not using
mulreg<- function(){
cat(rep('\n',1))
cat(bold$blue('\n|perform multiple regression\n\n'))
cat(bold$red('Example:\n'),'\t lm(y ~ x1 + x2 + ... + xi)\n\n')
# INSERT EXTRA EXAMPLE CODE IF APPLICABLE #
cat(rep('\n', 3))
ask_stanalysis()
}
|
887c994f6c9e0d3eb1cbc0b92481365e090c6ea4
|
35e707c55cff22002c6c66f967837736e3e0a0d8
|
/R/SWIMw_helper.R
|
06aa36c199803d21d6eab39a3b12783783b033ef
|
[] |
no_license
|
spesenti/SWIM
|
9f4416b990e9bc82109b87b849ffd208f8bfe77f
|
4e9cd0b2b4e4ad36e0798b1d67fdcd03c2d7114a
|
refs/heads/master
| 2022-05-04T10:16:25.964880
| 2022-01-10T12:41:16
| 2022-01-10T12:41:16
| 185,445,679
| 5
| 2
| null | 2022-01-09T23:04:06
| 2019-05-07T17:08:21
|
HTML
|
UTF-8
|
R
| false
| false
| 1,680
|
r
|
SWIMw_helper.R
|
# helper functions
.ab_grid <- function(a, b, N){
eps <- 0.002
u_eps <- 10^(seq(from=-10, to=log10(eps), length.out=10)) - 1e-11
return(c(a + u_eps, seq(from=a + eps, to=b - eps, length.out=N), b - rev(u_eps)))
}
.inverse <- function(f, lower = -100, upper = 100){
return(function(y){stats::uniroot((function(x){f(x) - y}), lower = lower, upper = upper, extendInt = 'yes')$root})
}
.rm <- function(F_inv, gamma, u){
return(.integrate(F_inv*gamma, u))
}
.integrate <- function(f, x){
return(sum(0.5*(f[1:length(f) - 1] + f[2:length(f)])*diff(x)))
}
.get_weights <- function(y_data, y_grid, gY_fn, fY_fn, hY){
# Get dQ/dP
g_val <- gY_fn(y_grid)
# g.val[is.na(g.val)] <- 0
g_val <- g_val/.integrate(g_val, y_grid)
f_val <- fY_fn(y_grid)/.integrate(fY_fn(y_grid), y_grid)
dQ_dP <- g_val / f_val
# Get weights
w <- vector()
for(i in 1:length(y_data)){
w <- c(w, .integrate(dQ_dP*stats::dnorm((y_grid - y_data[i])/hY)/hY, y_grid))
}
# Normalize weights
w <- w / sum(w) * length(y_data)
return(list(w))
}
.hara_utility<- function(a, b, eta, u, F_inv){
# f = (1 - eta) / eta * (a * F_inv / (1 - eta) + b) ^ eta
dummy = a * F_inv / (1 - eta) + b
f = (1 - eta) / eta * sign(dummy) * abs(dummy) ^ eta
return(.integrate(f, u))
}
.utransform <- function(a, b, eta, u, G_inv, lam, upper){
g <- c()
nu <- function(x) x - lam * a * (a / (1 - eta) * x + b) ** (eta - 1)
for (i in 1:length(u)){
val <- stats::uniroot((function(x){nu(x) - G_inv[i]}),
lower = -b*(1-eta)/a + 1e-10, upper = upper)$root
g <- append(g, val)
}
return(g)
}
|
5a5c05567937a3e828793a76c15bbbdc943eb0b7
|
50ed3fc59cbee279520f1d21269f17a87c691e8c
|
/plot4.R
|
87a101a45fab4a8d24a0226b6a3210000041654d
|
[] |
no_license
|
alexnsy/ExData_Plotting1
|
ede49e16b341267ed8d67122b8d6c4d8b315ce23
|
6655e1923453ed668d0ea9d0d61956d0e6d1c666
|
refs/heads/master
| 2021-01-18T07:34:23.870627
| 2015-10-10T16:39:54
| 2015-10-10T16:39:54
| 44,002,298
| 0
| 0
| null | 2015-10-10T08:38:58
| 2015-10-10T08:38:58
| null |
UTF-8
|
R
| false
| false
| 1,623
|
r
|
plot4.R
|
##Read the data file
data <- read.table("household_power_consumption.txt", header=TRUE, sep = ";",na.strings = "?")
## Subset the data to only data for the dates of interest 1/2/007 and 2/2/2007
selectdata <- data[data$Date %in% c("1/2/2007","2/2/2007"),]
## Format the Date
as.Date(selectdata$Date, format = "%d/%m/%Y")
## Format the Time as a combination of Date and Time
selectdata$Time <- strptime(paste(selectdata$Date, selectdata$Time), "%d/%m/%Y %H:%M:%S")
## Set to plot to a png file
png("plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
## 1st plot: Global Active Power vs Datetime
plot(selectdata$Time, selectdata$Global_active_power, xlab="datetime", ylab="Global Active Power", type="n")
lines(selectdata$Time, selectdata$Global_active_power)
## 2nd plot: Voltage vs Datetime
plot(selectdata$Time, selectdata$Voltage, xlab="datetime", ylab="Voltage", type="n")
lines(selectdata$Time, selectdata$Voltage)
## 3rd plot: Sub Metering 1/2/3 vs Datetime
plot(selectdata$Time, selectdata$Sub_metering_1, xlab="", ylab="Energy sub metering", type="n")
lines(selectdata$Time, selectdata$Sub_metering_1)
lines(selectdata$Time, selectdata$Sub_metering_2,col="red")
lines(selectdata$Time, selectdata$Sub_metering_3,col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"), lwd=c(1,1,1))
## 4th plot: Global Reactive Power vs Datetime
plot(selectdata$Time, selectdata$Global_reactive_power, xlab="datetime", ylab="Global_reactive_power", type="n")
lines(selectdata$Time, selectdata$Global_reactive_power)
## Close the graphics device
dev.off()
|
cf046ef8baf9ae8e4fc6a4feb49279106174c1ef
|
bd986e1216c71b4efcddf1c1a835030e524be04a
|
/tests/testthat/test-listReferenceBases.R
|
90c754161217e84b0d3b41e4eb0f4fdcf1b7f463
|
[] |
no_license
|
labbcb/GA4GHclient
|
43ac3a6b4bd9ab802ddff20bfc57ec0c2871c44c
|
ec3a6efba8c3e8698b467620dccf441d8419e335
|
refs/heads/master
| 2021-01-19T07:28:36.178878
| 2017-10-30T16:54:30
| 2017-10-30T16:54:30
| 68,452,125
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 435
|
r
|
test-listReferenceBases.R
|
context("listReferenceBases")
test_that("listReferenceBases works", {
skip_on_bioc()
host <- "http://1kgenomes.ga4gh.org/"
referenceSetId <- searchReferenceSets(host, nrows = 1)$id
referenceId <- searchReferences(host, referenceSetId, nrows = 1)$id
response <- listReferenceBases(host, referenceId, start = 1, end = 3000000)
expect_s4_class(response, "BString")
expect_equal(length(response), 3000000)
})
|
2cac17b23c012037582533180fad9ecb7398d995
|
4af0f7862da50b8a20b8554260285ad32e3840cf
|
/scripts/ch05/geometric-distribution.r
|
427c96be475d9cf4e58d06d53043a2179ad0ad18
|
[] |
no_license
|
StefanoCiotti/MyProgectsFirst
|
aefd345971c5578dfbec7662d11c3f368d6c17b7
|
04794b634b9384da62ae6ba926fd59ca5a7d3d13
|
refs/heads/master
| 2020-04-03T16:35:38.941185
| 2018-10-30T14:05:55
| 2018-10-30T16:57:26
| 155,099,726
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 239
|
r
|
geometric-distribution.r
|
PI <- c(0.3, 0.7) ; x <- 0 : 10
xl <- expression(italic(x))
par(mfrow = c(1, 2))
for(i in 1:2){
distr <- pgeom(x, PI[i])
yl <- bquote(italic(P(X<=x)~~~~~pi) == .(PI[i]))
plot(x, distr, type = 's', lwd = 2,
xlab = xl, ylab = yl)
}
|
0ade65780d0fbed266c13104c7f044f1f60ef610
|
c92e38443e8e4b8091eaf224d9fbc246e8a88179
|
/regression_code.R
|
429b42487ca10ff995ad564cacb2a1a8eea601b8
|
[
"MIT"
] |
permissive
|
NA-Dev/vino-verde-regression
|
d76b23d3722c22b5be800dce55a9d1d150432f27
|
3f987b49c930836d4596496bc5fae8fe62ca1fdc
|
refs/heads/main
| 2023-02-23T18:11:07.449809
| 2021-01-25T21:36:49
| 2021-01-25T21:36:49
| 332,880,308
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,014
|
r
|
regression_code.R
|
library(nnet)
library(ggplot2)
library(caret)
library(ROCR)
library(leaps)
library(dplyr)
library(hrbrthemes)
library(ggcorrplot)
library(tidyverse)
setwd("C:/Users/micha/Documents/STAT 6021/Project/Project 2/Stats-6021-Project-2")
########################### exploratory analysis #####################################
data <-read.csv(file = "wineQualityWhites_Reds_combind.csv")
datared <-read.csv(file = "wineQualityReds.csv")
datawhite <-read.csv(file = "wineQualityWhites.csv")
datared$X <- NULL
datawhite$X <- NULL
attach(data)
#force categorical columns to factors
wine_type <- factor(wine_type)
levels(wine_type) <- c("white", "red")
contrasts(wine_type)
#data exploration
#create quality histogram
ggplot(data, aes(quality, fill = wine_type)) +
geom_histogram(alpha = .4, position = 'identity', binwidth = 1)+
scale_x_continuous(breaks = seq(0,10,1), lim = c(0,10)) +labs(title = "Count of Wine by Quality")
#correlation
#white
data2 <- datawhite
data2$wine_type <- NULL
m <- cor(data2)
ggcorrplot(m, hc.order = TRUE, type = "lower",
lab = TRUE) +labs(title = "White Wine Correlation")
#reds
data2 <- datared
data2$wine_type <- NULL
m <- cor(data2)
ggcorrplot(m, hc.order = TRUE, type = "lower",
lab = TRUE) +labs(title = "Red Wine Correlation")
#quality vs alcohol
#reds
boxplot(datared$alcohol~datared$quality,
data=datawhite,
main="Quality vs Alcohol Content Red Wine",
xlab="Quality",
ylab="Alcohol Content",
col="red",
border="black"
)
#White
boxplot(datawhite$alcohol~datawhite$quality,
data=datawhite,
main="Quality vs Alcohol Content White Wine",
xlab="Quality",
ylab="Alcohol Content",
col="white",
border="black"
)
par(mfrow=c(2,2))
# need to check for outliers for the red and white data sets
#red wine
n<-length(datared$quality)
p<-11
# resultred <- lm(datared$quality~datared$fixed.acidity +
# datared$volatile.acidity + datared$citric.acid +
# datared$residual.sugar + datared$chlorides +
# datared$free.sulfur.dioxide + datared$total.sulfur.dioxide +
# datared$density + datared$pH + datared$sulphates +datared$alcohol)
resultred <- lm(datared$quality ~ datared$alcohol + datared$density + datared$sulphates + datared$volatile.acidity)
#leverage
lev<-lm.influence(resultred)$hat
sort(lev)
2*p/n
plot(lev, main="Red Wine Leverages", ylim=c(0,0.2))
abline(h=2*p/n, col="red")
lev[lev>2*p/n]
# Are there any influential observations based on DFITs?
##influential observations
DFFITS<-dffits(resultred)
DFFITS[abs(DFFITS)>2*sqrt(p/n)]
#1f Are there any influential observations based on Cooks
COOKS<-cooks.distance(resultred)
COOKS[COOKS>qf(0.5,p,n-p)]
plot(resultred, main="Red Wine", pch = 18, col='red' , which =c(4))
#white Wine outliers
n<-length(datawhite$quality)
p<-11
# resultwhite <- lm(datawhite$quality~datawhite$fixed.acidity +
# datawhite$volatile.acidity + datawhite$citric.acid +
# datawhite$residual.sugar + datawhite$chlorides +
# datawhite$free.sulfur.dioxide + datawhite$total.sulfur.dioxide +
# datawhite$density + datawhite$pH + datawhite$sulphates +datawhite$alcohol)
resultwhite <- lm(datawhite$quality ~ datawhite$alcohol + datawhite$pH + datawhite$sulphates + datawhite$density + datawhite$volatile.acidity + datawhite$residual.sugar )
#leverage
lev<-lm.influence(resultwhite)$hat
sort(lev)
2*p/n
plot(lev, main="White Wine Leverages", ylim=c(0,0.6))
abline(h=2*p/n, col="blue")
lev[lev>2*p/n]
# Are there any influential observations based on DFITs?
##influential observations
DFFITS<-dffits(resultwhite)
DFFITS[abs(DFFITS)>2*sqrt(p/n)]
#Are there any influential observations based on Cooks
COOKS<-cooks.distance(resultwhite)
COOKS[COOKS>qf(0.5,p,n-p)]
plot(resultwhite, main="White Wine", pch = 18, col='red' , which =c(4))
#outliers for wine type
par(mfrow=c(1,2))
resultType <- lm(data$quality ~ data$density + data$residual.sugar + data$total.sulfur.dioxide + data$volatile.acidity +
data$chlorides + data$sulphates + data$alcohol + data$free.sulfur.dioxide)
n<-length(data$quality)
p<-11
#leverage
lev<-lm.influence(resultType)$hat
sort(lev)
2*p/n
plot(lev, main="Wine Type Leverages", ylim=c(0,0.6))
abline(h=2*p/n, col="blue")
lev[lev>2*p/n]
# Are there any influential observations based on DFITs?
##influential observations
DFFITS<-dffits(resultType)
DFFITS[abs(DFFITS)>2*sqrt(p/n)]
# Are there any influential observations based on Cooks
COOKS<-cooks.distance(resultType)
COOKS[COOKS>qf(0.5,p,n-p)]
plot(resultType, main="White Wine", pch = 18, col='red' , which =c(4))
################## Red Wine Analysis of Quality ###################
wines_red <- read.csv('wineQualityReds.csv', header=TRUE, row.names=1)
wines_white <- read.csv('wineQualityWhites.csv', header=TRUE, row.names=1)
# Create a binomial class for quality (1 for Good and 0 for Bad)
wines_red$quality_class [wines_red$quality >= 0] <- 0 # Bad
wines_red$quality_class [wines_red$quality >= 6] <- 1 # Good
# Add levels and convert quality class to factor
levels(wines_red$quality_class) <- c('Good', 'Bad')
wines_red$quality_class <- as.factor(wines_red$quality_class)
contrasts(wines_red$quality_class)
# Evaluate model using validation
set.seed(111)
index <- sample.int(nrow(wines_red), floor(.70*nrow(wines_red)), replace = F)
train <-wines_red[index, ]
test <-wines_red[-index, ]
# Produce a logistic regression for quality class on training set
result_red <- glm(quality_class~alcohol+volatile.acidity+fixed.acidity+citric.acid+residual.sugar+chlorides+total.sulfur.dioxide+free.sulfur.dioxide+density+pH+sulphates, data=train, family="binomial")
summary(result_red)
## The summary tells us that some of the predictors are not significant in the presence of all the predictors.
## After validation of this model, we may try a reduced model
# Predictions on test set using logistic regression
preds<-round(predict(result_red, test, type="response"))
# Confusion matrix for predictions
confusion_table <- table(preds, test$quality_class)
n <- sum(confusion_table) # number of instances
diag <- diag(confusion_table)
accuracy <- sum(diag) / n
## Accuracy of the model is 0.7554859
TP <- confusion_table[2,2]
FP <- confusion_table[1,2]
FN <- confusion_table[2,1]
TN <- confusion_table[1,1]
false_pos <- FP/TN
false_negative <- FN/TP
precision <- TP/(TP+FP)
recall <- TP/(TP+FN)
## We will assess the predictors using box plots to decide which predictors
## should stay in the model
# Produce Box Plots
p1<-ggplot(aes(y = fixed.acidity, x = factor(quality_class), fill = factor(quality_class)), data = wines_red) + geom_boxplot()
p2<-ggplot(aes(y = volatile.acidity, x = factor(quality_class), fill = factor(quality_class)), data = wines_red) + geom_boxplot()
p3<-ggplot(aes(y = citric.acid, x = factor(quality_class), fill = factor(quality_class)), data = wines_red) + geom_boxplot()
p4<-ggplot(aes(y = residual.sugar, x = factor(quality_class), fill = factor(quality_class)), data = wines_red) + geom_boxplot()
p5<-ggplot(aes(y = chlorides, x = factor(quality_class), fill = factor(quality_class)), data = wines_red) + geom_boxplot()
p6<-ggplot(aes(y = free.sulfur.dioxide, x = factor(quality_class), fill = factor(quality_class)), data = wines_red) + geom_boxplot()
p7<-ggplot(aes(y = density, x = factor(quality_class), fill = factor(quality_class)), data = wines_red) + geom_boxplot()
p8<-ggplot(aes(y = total.sulfur.dioxide, x = factor(quality_class), fill = factor(quality_class)), data = wines_red) + geom_boxplot()
p9<-ggplot(aes(y = pH, x = factor(quality_class), fill = factor(quality_class)), data = wines_red) + geom_boxplot()
p10<-ggplot(aes(y = sulphates, x = factor(quality_class), fill = factor(quality_class)), data = wines_red) + geom_boxplot()
p11<-ggplot(aes(y = alcohol, x = factor(quality_class), fill = factor(quality_class)), data = wines_red) + geom_boxplot()
require(gridExtra)
grid.arrange(p1, p2, p3, p4, ncol=2, nrow=2) # volatile acidity, citric acid
grid.arrange(p5, p6, p7, p8, ncol=2, nrow=2) # density, total.sulfur.dioxide
grid.arrange(p9, p10, p11, ncol=2, nrow=2) # alcohol, sulphates
grid.arrange(p2 ,p3, p7, p8, ncol=2, nrow=2)
grid.arrange(p10, p11, nrow=2)
## It looks like alcohol, density, sulphates, volatile acidity, citric acid and total sulfur dioxide may be significant.
## We will try a reduced model using only these predictors.
# Check VIF
library(faraway)
vif(train[,c(1:11)])
# Reduced model
result_red_reduced <- glm(quality_class~alcohol+volatile.acidity+chlorides+total.sulfur.dioxide+free.sulfur.dioxide+sulphates, data=train, family="binomial")
summary(result_red_reduced)
# Is this better than the full model?
deltaG2_partial<-result_red_reduced$deviance-result_red$deviance ##take difference in residual deviances between the two models
deltaG2_partial
1-pchisq(deltaG2_partial,5) ##df is 5 since we have 5 additional parameters in the bigger model
# Is this model useful?
deltaG2<-result_white_reduced$null.deviance-result_white_reduced$deviance ##take difference between the null and residual deviance for our model
1-pchisq(deltaG2,5) ##df is 5 since our model has 5 additional parameters other than the intercept
# Predict new values on test set using reduced model
preds<-round(predict(result_red_reduced, test, type='response'))
# Confusion matrix
confusion_table <- table(preds, test$quality_class)
n <- sum(confusion_table) # number of instances
diag <- diag(confusion_table)
accuracy <- sum(diag) / n # accuracy calculation
## Accuracy of the model is 0.7711599, slightly better than first
TP <- confusion_table[2,2]
FP <- confusion_table[1,2]
FN <- confusion_table[2,1]
TN <- confusion_table[1,1]
false_pos <- FP/TN
false_negative <- FN/TP
precision <- TP/(TP+FP)
recall <- TP/(TP+FN)
## The overall error is 0.2288401.
# Produce ROC Curve
rates<-prediction(preds, test$quality_class)
roc_result<-performance(rates,measure="tpr", x.measure="fpr")
# Plot ROC curve and overlay the diagonal line for random guessing
plot(roc_result, main="ROC Curve for Quality Class")
lines(x = c(0,1), y = c(0,1), col="red")
# Compute the AUC
auc<-performance(rates, measure = "auc")
auc@y.values[[1]]
################## White Wine Analysis of Quality ###################
# Create a binomial class for quality (1 for Good and 0 for Bad)
wines_white$quality_class [wines_white$quality >= 1] <- 0 # Bad
wines_white$quality_class [wines_white$quality >= 6] <- 1 # Good
# Add levels and convert quality class to factor
levels(wines_white$quality_class) <- c('Good', 'Bad')
wines_white$quality_class <- as.factor(wines_white$quality_class)
contrasts(wines_white$quality_class)
## The summary tells us that none of the predictors are significant in the presence of all the predictors.
## After validation of this model, we may try a reduced model
# Evaluate model using validation
set.seed(111)
index <- sample.int(nrow(wines_white), floor(.70*nrow(wines_white)), replace = F)
train <-wines_white[index, ]
test <-wines_white[-index, ]
# Produce a logistic regression for quality class regressed against all other variables
result_white <- glm(quality_class~volatile.acidity+fixed.acidity+citric.acid+residual.sugar+chlorides+total.sulfur.dioxide+free.sulfur.dioxide+density+pH+sulphates+alcohol, data=train, family="binomial")
summary(result_white)
# Predictions on test set using logistic regression
preds<-round(predict(result_white,test, type='response'))
# Confusion matrix
confusion_table <- table(preds, test$quality_class)
n <- sum(confusion_table) # number of instances
diag <- diag(confusion_table)
accuracy <- sum(diag) / n # accuracy calculation
## Accuracy of the model is 0.752809
TP <- confusion_table[2,2]
FP <- confusion_table[1,2]
FN <- confusion_table[2,1]
TN <- confusion_table[1,1]
false_pos <- FP/TN
false_negative <- FN/TP
precision <- TP/(TP+FP)
recall <- TP/(TP+FN)
# Produce Box Plots
p1<-ggplot(aes(y = fixed.acidity, x = factor(quality_class), fill = factor(quality_class)), data = wines_white) + geom_boxplot()
p2<-ggplot(aes(y = volatile.acidity, x = factor(quality_class), fill = factor(quality_class)), data = wines_white) + geom_boxplot()
p3<-ggplot(aes(y = citric.acid, x = factor(quality_class), fill = factor(quality_class)), data = wines_white) + geom_boxplot()
p4<-ggplot(aes(y = residual.sugar, x = factor(quality_class), fill = factor(quality_class)), data = wines_white) + geom_boxplot()
p5<-ggplot(aes(y = chlorides, x = factor(quality_class), fill = factor(quality_class)), data = wines_white) + geom_boxplot()
p6<-ggplot(aes(y = free.sulfur.dioxide, x = factor(quality_class), fill = factor(quality_class)), data = wines_white) + geom_boxplot()
p7<-ggplot(aes(y = density, x = factor(quality_class), fill = factor(quality_class)), data = wines_white) + geom_boxplot()
p8<-ggplot(aes(y = total.sulfur.dioxide, x = factor(quality_class), fill = factor(quality_class)), data = wines_white) + geom_boxplot()
p9<-ggplot(aes(y = pH, x = factor(quality_class), fill = factor(quality_class)), data = wines_white) + geom_boxplot()
p10<-ggplot(aes(y = sulphates, x = factor(quality_class), fill = factor(quality_class)), data = wines_white) + geom_boxplot()
p11<-ggplot(aes(y = alcohol, x = factor(quality_class), fill = factor(quality_class)), data = wines_white) + geom_boxplot()
require(gridExtra)
grid.arrange(p1, p2, p3, p4, ncol=2, nrow=2) # volatile acidity, residual.sugar
grid.arrange(p5, p6, p7, p8, ncol=2, nrow=2) # density
grid.arrange(p9, p10, p11, ncol=2, nrow=2) #pH, sulphates, alchohol
grid.arrange(p2, p4, p7, p9, ncol=2, nrow=2)
grid.arrange(p10, p11, ncol=2)
# Reduced model
result_white_reduced <- glm(quality_class~volatile.acidity+residual.sugar+free.sulfur.dioxide+density+pH+sulphates+alcohol, data=train, family="binomial")
summary(result_white_reduced)
# Is this better than the full model?
deltaG2_partial2<-result_white_reduced$deviance-result_white$deviance ##take difference in residual deviances between the two models
deltaG2_partial2
1-pchisq(deltaG2_partial2,5) ##df is 5 since we have 5 additional parameters in the bigger model
# Is this model useful?
deltaG22<-result_white_reduced$null.deviance-result_white_reduced$deviance ##take difference between the null and residual deviance for our model
1-pchisq(deltaG22,7) ##df is 7 since our model has 7 additional parameters other than the intercept
## All of the predictors are significant in the presence of the other predictors.
# Predictions on test set using reduced model
preds<-round(predict(result_white_reduced,test, type='response'))
# Confusion matrix
confusion_table <- table(preds, test$quality_class)
n <- sum(confusion_table) # number of instances
diag <- diag(confusion_table)
accuracy <- sum(diag) / n
## Accuracy of the model is 0.7487232, not much change in accuracy from full model
## so choose simpler model.
TP <- confusion_table[2,2]
FP <- confusion_table[1,2]
FN <- confusion_table[2,1]
TN <- confusion_table[1,1]
false_pos <- FP/TN
false_negative <- FN/TP
precision <- TP/(TP+FP)
recall <- TP/(TP+FN)
# Produce the numbers associated with classification table
rates<-prediction(preds, test$quality_class)
# Produce ROC Curve
roc_result<-performance(rates,measure="tpr", x.measure="fpr")
# Plot ROC curve and overlay the diagonal line for random guessing
plot(roc_result, main="ROC Curve for Quality Class")
lines(x = c(0,1), y = c(0,1), col="red")
# Compute the AUC
auc<-performance(rates, measure = "auc")
auc@y.values[[1]]
#######################################################################################
#predict probability of "Good" quality for red wine
wineRed <- data.frame(alcohol = 13.1, volatile.acidity = .54, chlorides = 0.076, total.sulfur.dioxide=17, free.sulfur.dioxide=8, sulphates = .72 )
prob<-predict(result_red_reduced, wineRed, type="response")
prob
#predict probability of "Good" quality fo for white wine
wineWhite <- data.frame(volatile.acidity = .26, residual.sugar = 1.5, free.sulfur.dioxide=48, density = .9912, alcohol = 12.4 , pH = 3.54 , sulphates = .52)
prob2<-predict(result_white_reduced ,wineWhite, type="response")
prob2
################################## Plots by Type and Quality and Wine Type prediction #######################
data <- read.table("wineQualityWhites_Reds_combind.csv", header=TRUE, sep=",")
# Drop index column
data <- subset(data, select = -c(X))
# Dummy code wine_type as boolean, 1 = red, 0 = white
data$wine_type_orig <- as.factor(data$wine_type)
data$wine_type <- ifelse(data$wine_type == 'red', 1, 0)
# Create a binomial class for quality (1 for Good and 0 for Bad)
data$quality_class[data$quality >= 1] <- 0 # Bad
data$quality_class[data$quality >= 6] <- 1 # Good
data$quality_class_words <- ifelse(data$quality_class, 'Good', 'Bad')
# Add levels and convert quality class to factor
levels(data$quality_class) <- c('Good', 'Bad')
data$quality_class <- as.factor(data$quality_class)
contrasts(data$quality_class)
# Sample random 70% set
set.seed(111)
data.red <- data[data$wine_type == 1,]
data.white <- data[data$wine_type == 0,]
data.red.sampled <- sample.int(nrow(data.red), floor(.70*nrow(data.red)), replace = F)
data.white.sampled <- sample.int(nrow(data.white), floor(.70*nrow(data.white)), replace = F)
# Split test and train
data.train <- rbind(data.red[data.red.sampled,], data.white[data.white.sampled,])
data.test <- rbind(data.red[-data.red.sampled,], data.white[-data.white.sampled,])
attach(data.train)
# Measure correlations
cor(cbind(total.sulfur.dioxide,volatile.acidity,chlorides,fixed.acidity,sulphates,free.sulfur.dioxide,density,residual.sugar,pH,citric.acid,quality,alcohol))
#pairs(data.train, pch = 19, lower.panel=NULL)
library(ggplot2)
require(gridExtra)
# Plots by Type and Quality
ggplot(aes(y = total.sulfur.dioxide, x = quality_class_words, fill = wine_type_orig), data = data) + geom_boxplot() +
ggtitle("Total Sulfur Dioxide by Type and Quality") + xlab("Quality") + ylab("Total Sulfur Dioxide") +
scale_fill_manual(values=c("#E66868", "#E3DD64"), name="Wine Type", breaks=c('red', 'white'), labels=c("Red", "White"))
ggplot(aes(y = volatile.acidity, x = quality_class_words, fill = wine_type_orig), data = data) + geom_boxplot() +
ggtitle("Volatile Acidity by Type and Quality") + xlab("Quality") + ylab("Volatile Acidity") +
scale_fill_manual(values=c("#E66868", "#E3DD64"), name="Wine Type", breaks=c('red', 'white'), labels=c("Red", "White"))
ggplot(aes(y = fixed.acidity, x = quality_class_words, fill = wine_type_orig), data = data) + geom_boxplot() +
ggtitle("Fixed Acidity by Type and Quality") + xlab("Quality") + ylab("Fixed Acidity") +
scale_fill_manual(values=c("#E66868", "#E3DD64"), name="Wine Type", breaks=c('red', 'white'), labels=c("Red", "White"))
ggplot(aes(y = pH, x = quality_class_words, fill = wine_type_orig), data = data) + geom_boxplot() +
ggtitle("pH by Type and Quality") + xlab("Quality") + ylab("pH") +
scale_fill_manual(values=c("#E66868", "#E3DD64"), name="Wine Type", breaks=c('red', 'white'), labels=c("Red", "White"))
ggplot(aes(y = citric.acid, x = quality_class_words, fill = wine_type_orig), data = data) + geom_boxplot() +
ggtitle("Citric Acid by Type and Quality") + xlab("Quality") + ylab("Citric Acid") +
scale_fill_manual(values=c("#E66868", "#E3DD64"), name="Wine Type", breaks=c('red', 'white'), labels=c("Red", "White"))
ggplot(aes(y = chlorides, x = quality_class_words, fill = wine_type_orig), data = data) + geom_boxplot() +
ggtitle("Chlorides by Type and Quality") + xlab("Quality") + ylab("Chlorides") +
scale_fill_manual(values=c("#E66868", "#E3DD64"), name="Wine Type", breaks=c('red', 'white'), labels=c("Red", "White"))
ggplot(aes(y = sulphates, x = quality_class_words, fill = wine_type_orig), data = data) + geom_boxplot() +
ggtitle("Sulphates by Type and Quality") + xlab("Quality") + ylab("Sulphates") +
scale_fill_manual(values=c("#E66868", "#E3DD64"), name="Wine Type", breaks=c('red', 'white'), labels=c("Red", "White"))
ggplot(aes(y = free.sulfur.dioxide, x = quality_class_words, fill = wine_type_orig), data = data) + geom_boxplot() +
ggtitle("Free Sulfur Dioxide by Type and Quality") + xlab("Quality") + ylab("Free Sulphur Dioxide") +
scale_fill_manual(values=c("#E66868", "#E3DD64"), name="Wine Type", breaks=c('red', 'white'), labels=c("Red", "White"))
ggplot(aes(y =alcohol, x = quality_class_words, fill = wine_type_orig), data = data) + geom_boxplot() +
ggtitle("Alcohol by Type and Quality") + xlab("Quality") + ylab("Alcohol Content (%)") +
scale_fill_manual(values=c("#E66868", "#E3DD64"), name="Wine Type", breaks=c('red', 'white'), labels=c("Red", "White"))
ggplot(aes(y =density, x = quality_class_words, fill = wine_type_orig), data = data) + geom_boxplot() +
ggtitle("Density by Type and Quality") + xlab("Quality") + ylab("Density (g/mL)") +
scale_fill_manual(values=c("#E66868", "#E3DD64"), name="Wine Type", breaks=c('red', 'white'), labels=c("Red", "White"))
ggplot(aes(y = residual.sugar, x = quality_class_words, fill = wine_type_orig), data = data) + geom_boxplot() +
ggtitle("Residual Sugar by Type and Quality") + xlab("Quality") + ylab("Residual Sugar") +
scale_fill_manual(values=c("#E66868", "#E3DD64"), name="Wine Type", breaks=c('red', 'white'), labels=c("Red", "White"))
# Create predictive model
result <- glm(wine_type~density+residual.sugar+total.sulfur.dioxide+volatile.acidity+chlorides+sulphates+alcohol+free.sulfur.dioxide+pH+citric.acid+fixed.acidity+quality, family=binomial, data=data.train)
summary(result)
library(faraway)
vif(data.train[,c(1:12)])
# Reduced Model - remove quality,fixed.acidity, citric.acid, pH
reduced <- glm(wine_type~density+residual.sugar+total.sulfur.dioxide+volatile.acidity+chlorides+sulphates+alcohol+free.sulfur.dioxide, family=binomial, data=data.train)
summary(reduced)
# Is this better than the full model?
deltaG2_partial2<-reduced$deviance-result$deviance ##take difference in residual deviances between the two models
deltaG2_partial2
1-pchisq(deltaG2_partial2,4) ##df is 4 since we have 4 additional parameters in the bigger model
# Is this model useful?
deltaG2<-reduced$null.deviance-reduced$deviance ##take difference between the null and residual deviance for our model
1-pchisq(deltaG2,8) ##df is 8 since our model has 8 additional parameters other than the intercept
vif(data.train[,c(2,4:8,10:11)])
### R_squared ####
r_2 <- 1-(reduced$deviance/reduced$null.deviance)
#### Full Model ####
library(ROCR)
# prediction
preds<-predict(result,newdata=data.test, type="response")
##produce the numbers associated with classification table
rates<-prediction(preds, data.test$wine_type)
##store the true positive and false positive rates
roc_result<-performance(rates,measure="tpr", x.measure="fpr")
##plot ROC curve and overlay the diagonal line for random guessing
plot(roc_result, main="ROC Curve for Wine Type")
lines(x = c(0,1), y = c(0,1), col="red")
##compute the AUC
auc<-performance(rates, measure = "auc")
auc
##confusion matrix. Actual values in the rows, predicted classification in cols
table(data.test$wine_type, preds>0.5)
table(data.test$wine_type, preds>0.7)
### Reduced Model ####
# prediction
preds2<-predict(reduced,newdata=data.test, type="response")
##produce the numbers associated with classification table
rates2<-prediction(preds2, data.test$wine_type)
##store the true positive and false positive rates
roc_result2<-performance(rates2,measure="tpr", x.measure="fpr")
##plot ROC curve and overlay the diagonal line for random guessing
plot(roc_result2, main="ROC Curve for Wine Type")
lines(x = c(0,1), y = c(0,1), col="red")
##compute the AUC
auc2<-performance(rates2, measure = "auc")
auc2
##confusion matrix. Actual values in the rows, predicted classification in cols
table(data.test$wine_type_orig, preds2>0.5)
confusion_table <- table(data.test$wine_type, preds2>0.7)
n <- sum(confusion_table) # number of instances
diag <- diag(confusion_table)
accuracy <- sum(diag) / n
TP <- confusion_table[2,2]
FP <- confusion_table[1,2]
FN <- confusion_table[2,1]
TN <- confusion_table[1,1]
false_pos <- FP/TN
false_negative <- FN/TP
precision <- TP/(TP+FP)
recall <- TP/(TP+FN)
wineRed <- data.frame(density=0.99235, residual.sugar=2.5, alcohol = 13.1, volatile.acidity = .54, chlorides = 0.076, total.sulfur.dioxide=17, free.sulfur.dioxide=8, sulphates = .72 )
#predict probability of "Red" type fo red wine
prob<-predict(reduced, wineRed, type="response")
prob
wineWhite <- data.frame(volatile.acidity = .26, residual.sugar = 1.5, total.sulfur.dioxide=143, chlorides=0.044, free.sulfur.dioxide=48, density = .9912, alcohol = 12.4, sulphates = .52)
#predict probability of "Red" type for white wine
prob2<-predict(reduced , wineWhite, type="response")
prob2
|
c523bf6e84bbe2b563180cb7e72c1cd3c348d71a
|
236cdc1ba4d23f14cbdcbd4a53e427506c4caf3f
|
/Scripts/OutlierRecall/all_problem_inspection.R
|
d45ee1c45889757d425a4106e57aa84d8856d2af
|
[
"MIT"
] |
permissive
|
Guliba/FRASER-analysis
|
775b30e079bcb8a50b91f6a8785a631182f076fd
|
3c125dc561de977b89a674e19b720cc72762b392
|
refs/heads/master
| 2023-03-23T03:12:38.876112
| 2020-08-27T05:28:59
| 2020-08-27T05:28:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,199
|
r
|
all_problem_inspection.R
|
#'---
#' title: Inspect p-value problems in FraseR fit
#' author: Ines Scheller
#' wb:
#' input:
#' - allOut: '`sm expand("Output/html/OutlierInjection/{dataset}/{psiType}/{delta}/{method}_fitProblems.html", dataset=config["datasets"], psiType=config["psiTypes"], delta=config["inj_deltas"], method="FraseR-1DecoderBatches")`'
#' output:
#' html_document
#'---
if(FALSE){
snakemake <- readRDS("./tmp/snakemake.RDS")
}
#+ source config
source("./src/r/config.R")
#+ input
allOutFiles <- snakemake@input$allOut
datasets <- snakemake@config$datasets
methods <- snakemake@config$methods
psiTypes <- snakemake@config$psiTypes
deltas <- snakemake@config$inj_deltas
#+ echo=FALSE, results="asis"
cat("<h1>Inspection of fit problems (FraseR) per dataset</h1><p>")
devNull <- sapply(datasets, function(name){
cat(paste0("<h2>Dataset: ", name, "</h1><p>"))
for(type in psiTypes){
for(delta in deltas){
for(method in methods){
cat(paste0(
"</br>", "<a href='OutlierInjection/", name, "/", type, "/", delta, "/", method, "_fitProblems.html'>", name, " ", type, " ", delta, " ", method, " problems</a>"
))
}
}
}
cat("</br> </p>")
})
|
b9895795fa7e2bda67200acf9cd99f60a7b1583a
|
3cdea740d7e550b8041208d4a76b9945e364c0dd
|
/cachematrix.R
|
b61e5a12622464ac2011193fdbbc9aba9b9f0ae5
|
[] |
no_license
|
lizziepoje/ProgrammingAssignment2
|
14f78f629a34366f9e9fd39d5774d259cdeb2bfe
|
7017785c23a39f4e6efd37e6b7e681e26c564abe
|
refs/heads/master
| 2020-12-26T04:05:35.725452
| 2017-03-07T18:58:10
| 2017-03-07T18:58:10
| 64,761,069
| 0
| 0
| null | 2016-08-02T13:53:48
| 2016-08-02T13:53:47
| null |
UTF-8
|
R
| false
| false
| 1,830
|
r
|
cachematrix.R
|
## The first function, 'makeCacheMatrix' creates a special "matrix" object that can cache its inverse.
## It contains the following functions:
## set - sets the value of a matrix
## get - gets the value of a matrix
## setInverse - sets the cached value of the inverse of the matrix
## getInverse - gets the cached value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
cache <-NULL
##Stores the matrix
setMatrix <-function(newInverse){
x<<-newInverse
##As there is a new value, the cache is flushed
cache<<-NULL
}
##Gets the cached Matrix
getMatrix<funcion(){
x
}
##Sets the cached value of the inverse of the matrix
setInverse <-function(solve) {
cache<<-solve
}
getInverse<-function(){
cache
}
list(setMatrix=setMatrix, getMatrix=getMAtrix, setInverse=setInverse, getInverse=getInverse)
}
## The following function calculates the inverse of the special matrix created with the above function, However,
##it first checks to see if the inverse has already been calculated. If so, it returns the inverse
##from the cache and skips the computation. Otherwise, it calculates the inverse of the matrix and sets the
##inverse of the matrix in the cache via the setInverse function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## gets the cached value of the inverse of the matrix
Inverse <-x$getInverse()
##If the value is cached, it will be returned
if(!is.null(Inverse)){
message("getting cached data")
return(Inverse)
}
##If the value is not in the cache, it is calculated and then stored in the cache
data<-x$getMatrix()
Inverse <-solve(data)
x$setInverse(Inverse)
##Returns the Inverse
Inverse
}
|
a33d547b612a694fb6fa987a7382230811b49e37
|
46fed48f95847b2af4b4ddeac9a7529ba60a26f5
|
/code/climate-data/process_climate_data.R
|
fe892604ffd95fdd60ad485239dd140847eaf213
|
[] |
no_license
|
sauer3/Gabon-Climate-Exposure
|
82d4ed9befb1615c7631373cc6287fddf6d900b3
|
bc03d9163cfc6f19059d827865b927fa736e62b0
|
refs/heads/master
| 2022-11-20T00:59:28.572576
| 2020-07-21T23:27:17
| 2020-07-21T23:27:17
| 281,505,751
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,526
|
r
|
process_climate_data.R
|
# Convert values to Celcius Calculate STD for time-series
library(ecoclim)
library(raster)
library(dplyr)
# Function to convert K to C
kelvin_to_celsius <- function(ras, digits){
ras <- ras/10
c <- ras - 273.15
c <- round(c, digits)
return(c)
}
output.dir <- "/Users/AuerPower/Dropbox/Research Projects/Gabon_Climate/data/climate/recent/timeseries/"
files <- list.files("/Users/AuerPower/Dropbox/Research Projects/Gabon_Climate/data/climate/recent/timeseries/tmean", full.names = TRUE)
# convert to celcius and save
for(f in files){
ras <- raster(f)
celsius <- kelvin_to_celsius(ras, 2)
writeRaster(celsius, paste0(output.dir,"tmean_c/", basename(f)), overwrite=TRUE)
}
files <- list.files(c("/Users/AuerPower/Dropbox/Research Projects/Gabon_Climate/data/climate/recent/timeseries/tmean_c",
"/Users/AuerPower/Dropbox/Research Projects/Gabon_Climate/data/climate/recent/timeseries/prec") , full.names = TRUE)
months <- c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12")
variables <- c("tmean", "prec")
for (v in variables){
for (m in months){
m_char <- paste0("_", m, "_")
name <- paste0(output.dir, "std/", "sd_", v, m_char, "1979_2013.tif")
if(file.exists(name)) {
next
}
files_subset <- files[grepl(v, basename(files))]
m_series <- files_subset[grepl(m_char, basename(files_subset))]
m_rasters <- stack(m_series)
std <- calc(m_rasters, sd)
writeRaster(std, name, format="GTiff", overwrite=TRUE)
}
}
|
188ec8bcfdd2dbd5fcd79aebebfbacc241eb0276
|
5a8f971da08a6d38496d594a229f5b4c9ff2f4e5
|
/man/generate_single_moclust.Rd
|
d0fe658af932056dc8d0840411a9525b35b7216b
|
[
"MIT"
] |
permissive
|
agapow/subtypr
|
9abe7cb28a6686d9c719dc71df145f7944ca852f
|
524494d3e083d058e7c58c0971fad6016a9b914a
|
refs/heads/master
| 2020-03-22T17:52:24.634052
| 2018-08-17T08:24:13
| 2018-08-17T08:24:13
| 140,421,443
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 944
|
rd
|
generate_single_moclust.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate.R
\name{generate_single_moclust}
\alias{generate_single_moclust}
\title{Generate simulated data}
\usage{
generate_single_moclust(X, partition, sd_signal = 0.5, sparse = FALSE,
percent_sparsity = 0.3)
}
\arguments{
\item{X}{A feature matrix, rows are patients, columns are features.}
\item{partition}{A partition of the samples.}
\item{sd_signal}{The standard deviation of the signal in each cluster.}
\item{sparse}{logical, to create sparsity in singular vector of Xsim.}
\item{percent_sparsity}{Percentage of sparsity in singular vector of Xsim.}
}
\value{
A simulated structured matrix
}
\description{
Generate simulated data as in 10.1021/acs.jproteome.5b00824 (DOI).
}
\details{
Considering a partition of the samples, the data will be modified to have a
structure corresponding to this partition.
}
\seealso{
DOI: 10.1021/acs.jproteome.5b00824
}
|
23d8175c6ecf54ecc0005ca3aeae525f82adab9f
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610129219-test.R
|
91d21667b1d5509748a9d1b858442c74549d5ac0
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 496
|
r
|
1610129219-test.R
|
testlist <- list(a = 0L, b = 0L, x = c(1769471L, -218959118L, -218959320L, -218959118L, -218959118L, -218959118L, -218959118L, -218959118L, -218959118L, -218959118L, -218959118L, -218959118L, -218959118L, -218959118L, -218959118L, -218959118L, -218959118L, -218959118L, -218959118L, -218959118L, -1L, -16318465L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
84e60254a3d0f783902e2d0332c10d018cb1aee3
|
72f282d407002ce6d089368375724e1ec03714c2
|
/R/read_pkg_dependency_tree.r
|
9e0a22f3d5fb3e5c927ca293c844bd2413f5c191
|
[] |
no_license
|
gfleetwood/sansor
|
1f9963aea9ce430b9a952f5fff266c20b4db9b5a
|
101fe82b914748316f17d55552858921be9f826d
|
refs/heads/master
| 2022-12-16T11:44:18.294805
| 2022-11-18T04:50:13
| 2022-11-18T04:50:13
| 133,735,662
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 734
|
r
|
read_pkg_dependency_tree.r
|
#' @title Read Package Dependency Trees
#' @description Checks if two dataframes are equal.
#' @param df1 A dataframe
#' @param df2 The second dataframe
#' @return TRUE if the dataframes are equal else FALSE
#' @export
read_pkg_dependency_tree <- \(
pack,
dep_level = c("Depends", "Imports", "LinkingTo"),
available_packages = available.packages()
) {
# source; https://gist.github.com/johnrc/faaa796e4b1ac53b7848
# ex: read_pkg_dependency_tree("dplyr")
packages = pack %>%
package_dependencies(available_packages, which = dep_level) %>%
unlist() %>%
unname()
for(pkg in packages) {
packages = c(packages, read_pkg_dependency_tree(pkg, dep_level, available_packages))
}
packages
}
|
e30b31748dd36436fe251f65dfe3d355a3be0c26
|
6ff4d7bfbdb0f44acf33239511002f03f4a8fbd0
|
/R/almevents.R
|
0c7b9a2709ba6411a3a878b5ce2c2e10389f86cd
|
[] |
no_license
|
imclab/alm
|
7d926f8f6d4cd02324722a3383e3cfa771f0f0b5
|
bee622f6eccfaeb45e1718f089a28e39b3277799
|
refs/heads/master
| 2021-01-17T19:22:37.321251
| 2013-11-07T05:06:17
| 2013-11-07T05:06:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,273
|
r
|
almevents.R
|
#' Retrieve PLoS article-level metrics (ALM) events.
#'
#' Events are the details of the metrics that are counted related to PLoS papers.
#'
#' @importFrom RCurl getCurlHandle getForm
#' @importFrom RJSONIO fromJSON
#' @importFrom reshape sort_df
#' @importFrom plyr compact rbind.fill
#' @param doi Digital object identifier for an article in PLoS Journals (character)
#' @param pmid PubMed object identifier (numeric)
#' @param pmcid PubMed Central object identifier (numeric)
#' @param mdid Mendeley object identifier (character)
#' @param url API endpoint, defaults to http://alm.plos.org/api/v3/articles (character)
#' @param months Number of months since publication to request historical data for.
#' See details for a note. (numeric)
#' @param days Number of days since publication to request historical data for.
#' See details for a note. (numeric)
#' @param source The source to get events data from. You can pass in a character
#' vector, like: \code{c("mendeley","crossref")}
#' @param key your PLoS API key, either enter, or loads from .Rprofile (character)
#' @param curl If using in a loop, call getCurlHandle() first and pass
#' the returned value in here (avoids unnecessary footprint)
#' @details You can only supply one of the parmeters doi, pmid, pmcid, and mdid.
#'
#' Query for as many articles at a time as you like. Though queries are broken
#' up in to smaller bits of 30 identifiers at a time.
#'
#' If you supply both the days and months parameters, days takes precedence,
#' and months is ignored.
#'
#' You can get events from many different sources. After calling almevents,
#' then index the output by the data provider you want. The options are:
#' bloglines, citeulike, connotea, crossref, nature, postgenomic, pubmed,
#' scopus, plos, researchblogging, biod, webofscience, pmc, facebook,
#' mendeley, twitter, wikipedia, and scienceseeker.
#'
#' Beware that some data source are not parsed yet, so there may be event data
#' but it is not provided yet as it is so messy to parse.
#'
#' See more info on PLOS's relative metrics event source here
#' \url{http://www.plosone.org/static/almInfo#relativeMetrics}
#' @return PLoS altmetrics as data.frame's.
#' @references See a tutorial/vignette for alm at
#' \url{http://ropensci.org/tutorials/alm_tutorial.html}
#' @examples \dontrun{
#' # For one article
#' out <- almevents(doi="10.1371/journal.pone.0029797")
#' names(out) # names of sources
#' # remove those with no data
#' out <- out[!out %in% c("sorry, no events content yet","parser not written yet")]
#' out[["pmc"]] # get the results for PubMed Central
#' out[["twitter"]] # get the results for twitter (boo, there aren't any)
#' out[c("twitter","crossref")] # get the results for two sources
#'
#' #
#' out <- alm(doi="10.1371/journal.pgen.1003471")
#' out[["wordpress"]]
#'
#' # Another example
#' out <- almevents(doi="10.1371/journal.pone.0001543")
#' # remove those with no data
#' out <- out[!out %in% c("sorry, no events content yet","parser not written yet")]
#' names(out)
#'
#' # Another example
#' out <- almevents(doi="10.1371/journal.pone.0035869")
#' # remove those with no data
#' out <- out[!out %in% c("sorry, no events content yet","parser not written yet")]
#' names(out)
#'
#' # Two doi's
#' dois <- c('10.1371/journal.pone.0001543','10.1371/journal.pone.0040117')
#' out <- almevents(doi=dois)
#' out[[1]]
#' out[[2]]
#' out[[1]][["figshare"]][[2]][[1]]
#'
#' # Specify a specific source
#' almevents(doi="10.1371/journal.pone.0035869", source="crossref")
#'
#' # Specify two specific sources
#' almevents(doi="10.1371/journal.pone.0035869", source=c("crossref","twitter"))
#'
#' # Figshare data
#' almevents(doi="10.1371/journal.pone.0069841")
#' }
#' @export
almevents <- function(doi = NULL, pmid = NULL, pmcid = NULL, mdid = NULL,
url='http://alm.plos.org/api/v3/articles', months = NULL, days = NULL,
source = NULL, key = NULL, curl = getCurlHandle())
{
id <- compact(list(doi=doi, pmid=pmid, pmcid=pmcid, mendeley=mdid))
if(length(id)>1){ stop("Only supply one of: doi, pmid, pmcid, mdid") } else { NULL }
key <- getkey(key)
if(is.null(source)){source2 <- NULL} else{ source2 <- paste(source,collapse=",") }
parse_events <- function() {
args <- compact(
list(
api_key = key, info = 'event', months = months,
days = days, source = source2, type = names(id)
)
)
if(length(id[[1]])==0){stop("Please provide a DOI")} else
if(length(id[[1]])==1){
if(names(id) == "doi") id <- gsub("/", "%2F", id)
args2 <- c(args, ids = id[[1]])
out <- getForm(url, .params = args2, curl = curl)
ttt <- RJSONIO::fromJSON(out)
} else
if(length(id[[1]])>1){
if(length(id[[1]])>50){
slice <- function(x, n) split(x, as.integer((seq_along(x) - 1) / n))
idsplit <- slice(id[[1]], 50)
repeatit <- function(y) {
if(names(id) == "doi"){
id2 <- paste(sapply(y, function(x) gsub("/", "%2F", x)), collapse=",")
} else
{
id2 <- paste(id[[1]], collapse=",")
}
args2 <- c(args, ids = id2)
out <- getForm(url, .params = args2, curl = curl)
ttt <- RJSONIO::fromJSON(out)
}
temp <- lapply(idsplit, repeatit)
ttt <- do.call(c, temp)
} else {
if(names(id) == "doi") {
id2 <- paste(sapply(id, function(x) gsub("/", "%2F", x)), collapse=",")
} else
{
id2 <- paste(id[[1]], collapse=",")
}
args2 <- c(args, ids = id2)
out <- getForm(url, .params = args2, curl = curl)
ttt <- RJSONIO::fromJSON(out)
}
}
# get juse the events data
events <- lapply(ttt, function(x) x$sources)
# Function to extract and parse events data for each source
getevents <- function(x, label=NULL){
# Parser code
parsers <- function(y){
if(y$name == "counter"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
year <- as.numeric(sapply(y$events, `[[`, "year"))
month <- as.numeric(sapply(y$events, `[[`, "month"))
pdf_views <- as.numeric(sapply(y$events, `[[`, "pdf_views"))
html_views <- as.numeric(sapply(y$events, `[[`, "html_views"))
xml_views <- as.numeric(sapply(y$events, `[[`, "xml_views"))
data.frame(year, month, pdf_views, html_views, xml_views)
}
} else if(y$name == "citeulike"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
y$events
}
} else if(y$name == "crossref"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
parsecrossref <- function(x) {
if(is.null(x[[1]][["publication_type"]])){
x[[1]][["publication_type"]] <- NA
}
if(!("contributors" %in% names(x[[1]]))){
x[[1]][["contributors"]] <- list(contributor=NA)
x[[1]]$issn <- paste(x[[1]]$issn, collapse="; ")
data.frame(x[[1]])
} else if(length(x[[1]]$contributors$contributor[[1]])>1){
x[[1]]$contributors$contributor <-
paste(sapply(x[[1]]$contributors$contributor,
function(x) paste(x[1:2], collapse=" ")), collapse="; ")
x[[1]]$issn <- paste(x[[1]]$issn, collapse="; ")
data.frame(x[[1]])
} else {
x[[1]]$contributors$contributor <-
paste(x[[1]]$contributors$contributor[1:2], collapse=" ")
x[[1]]$issn <- paste(x[[1]]$issn, collapse="; ")
data.frame(x[[1]])
}
}
ldply(y$events, parsecrossref)
}
} else if(y$name == "nature"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
parsenature <- function(x){
temp <- x$event
blog_ <- data.frame(temp$blog[names(temp$blog) %in% c('title','url')])
names(blog_) <- c('blog_title','blog_url')
post_ <- data.frame(temp[names(temp) %in% c('title','num_words','url','percent_complex_words','created_at')])
names(post_) <- c('post_percent_complex_words','post_created_at','post_title','post_url','post_num_words')
cbind(blog_, post_)
}
ldply(y$events, parsenature)
}
} else if(y$name == "researchblogging"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
parserblogging <- function(w){
temp <- w$event
bloginfo <- data.frame(temp[names(temp) %in% c('post_title','blog_name','blogger_name','published_date','post_url')])
if(length(temp$citations$citation[[1]])>1){
citations <- paste(sapply(temp$citations$citation, function(z) z$doi), sep="", collapse=",")
} else
{
citations <- temp$citations$citation$doi
}
cbind(bloginfo, citations)
}
if(length(y$events)==1){
parserblogging(y$events)
} else
{
do.call(rbind, lapply(y$events, parserblogging))
}
}
} else if(y$name == "biod"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
if(length(y$events) > 1){
do.call(rbind, lapply(y$events, data.frame))
} else
{
y$events
}
}
} else if(y$name == "pubmed"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{ sapply(y$events, function(x) x[c("event","event_url")]) }
} else if(y$name == "facebook"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
parsefb <- function(x){
x[sapply(x, is.null)] <- "none"
data.frame(x)
}
if(names(y$events)[[1]]=="url"){
parsefb(y$events)
} else
{
lapply(y$events, parsefb)
}
}
} else if(y$name == "mendeley"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
parsemendeley <- function(x){
readers <- data.frame(name="readers", value=x$stats$readers)
disc <- ldply(x$stats$discipline, function(x) data.frame(x))[,-1]
country <- ldply(x$stats$country, function(x) data.frame(x))
status <- ldply(x$stats$status, function(x) data.frame(x))
dfs <- list(readers = readers, discipline = disc, country = country, status = status)
ldply(dfs)
}
parsemendeley(y$events)
}
} else if(y$name == "twitter"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
temp <- lapply(y$events, function(x) data.frame(t(data.frame(x[[1]]))))
tempdf <- do.call(rbind, temp)
row.names(tempdf) <- NULL
tempdf
}
} else if(y$name == "wikipedia"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
df <- data.frame(y$events)
df$lang <- row.names(df)
names(df) <- c("values","lang")
row.names(df) <- NULL
df
}
} else if(y$name == "bloglines"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
parsebloglines <- function(x){
temp <- data.frame(t(x$event))
if(any(names(temp) %in% "author")==TRUE && any(names(temp) %in% "site_name")==TRUE)
{
temp2 <- temp[,c("site_name","author")]
} else
{
temp2 <- data.frame(site_name=temp$site_name, author="none")
}
cbind(temp2, event_url=x$event_url)
}
ldply(y$events, parsebloglines)
}
} else if(y$name == "postgenomic"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
temp <- y$events[[1]]
name <- temp$event$blog_name
eventurl <- temp$event_url
dois <- sapply(temp$event$citing, function(x) x$doi_id )
list(blog_name=name, event_url=eventurl, dois=dois)
}
} else if(y$name == "scopus"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{ y$events[[1]] }
} else if(y$name == "wos"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
if(length(y$events) > 1){
ldply(y$events, function(x) data.frame(t(x)))
} else
{
y$events
}
}
} else if(y$name == "pmc"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
parsepmc <- function(x, names_){
gg <- data.frame(x)
gg$it <- row.names(gg)
if(!names_){as.numeric(as.character(t(sort_df(gg, "it")[,-2])))} else
{ sort_df(gg, "it")[,-1] }
}
df <- data.frame(do.call(rbind, lapply(y$events, parsepmc, names_=FALSE)))
names(df) <- parsepmc(y$events[[1]], TRUE)
df
}
} else if(y$name == "connotea"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{ paste("parser not written yet") }
} else if(y$name == "scienceseeker"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
parsesciseeker <- function(x){
temp <- x$event
info <- temp[c('title','author')]
recommendations <- data.frame(t(sapply(temp$`ss:community`$`ss:recommendations`, function(x) x[[2]])))
names(recommendations) <- c("user","editor")
categories <- paste(sapply(temp$category, function(x) x[[1]]), collapse=",")
cbind(info, recommendations, categories=categories, event_url=x$event_url)
}
ldply(y$events, parsesciseeker)
}
} else if(y$name == "relativemetric"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
meta <- y$events[names(y$events) %in% c("start_date","end_date")]
data <- do.call(rbind.fill,
lapply(y$events$subject_areas, function(x)
data.frame(x[[1]], t(data.frame(x[[2]])))
)
)
row.names(data) <- NULL
# names(data) <- c('reference_set','one','two','three','four','five','six','seven')
list(meta=meta, data=data)
}
} else if(y$name == "f1000"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
data.frame(rbind(y$events), stringsAsFactors=FALSE)
}
} else if(y$name == "figshare"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
y$events$items
}
} else if(y$name == "wordpress"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
lapply(y$events, function(x) do.call(c, x))
}
} else if(y$name == "pmceurope"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
y$events
# paste("parser not written yet")
}
} else if(y$name == "pmceuropedata"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
y$events
# paste("parser not written yet")
}
} else if(y$name == "openedition"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
y$events
# paste("parser not written yet")
}
} else if(y$name == "reddit"){
if(length(y$events)==0){paste("sorry, no events content yet")} else
{
y$events
# paste("parser not written yet")
}
}
}
# Run the parsers on each element
datout <- lapply(x, parsers)
# Assign names to each list element
if(is.null(label)){
# names(datout) <- c("bloglines","citeulike","connotea","crossref","nature",
# "postgenomic","pubmed","scopus","plos","researchblogging",
# "biod","webofscience","pmc","facebook","mendeley","twitter",
# "wikipedia","scienceseeker","relativemetric","f1000","figshare")
names(datout) <- sapply(events[[1]], "[[", "name")
} else
{
names(datout) <- label
}
return( datout )
}
# Actually get the events data
temp <- lapply(events, getevents, label=source)
# Return the data
return( temp )
}
safe_parse_events <- plyr::failwith(NULL, parse_events)
finaldata <- safe_parse_events()
if(length(finaldata)>1){ return( finaldata )} else { finaldata[[1]] }
}
|
27ae3f8447b903fef61a10ec1fce9f99a5e83f53
|
8c48fa77cc61d876d0e76ec36aebce927615336c
|
/script.R
|
ee46dac181e7b54cd2cab4b3585232580a89705c
|
[] |
no_license
|
machio1985/DPC_Open_Data
|
58ca62cf7a052591394eeec33921b9acad0f219b
|
8a92c1f26bf2548d02949d6c6b21ffb129d8fe6e
|
refs/heads/main
| 2023-04-29T11:59:51.584882
| 2021-05-22T08:55:01
| 2021-05-22T08:55:01
| 369,570,948
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,175
|
r
|
script.R
|
# library & setting ------------
library(tidyverse)
library(stringi)
library(rvest)
library(readxl)
library(magrittr)
library(data.table)
dir.create(paste(getwd(),"/01.input",sep=""))
dir.create(paste(getwd(),"/02.output",sep=""))
# functions ---------
#DLする時の処置
dl.dpc<-function(x){
download.file(url = x,
destfile = str_c(file_name,extension),
quiet=TRUE)
}
#ファイルのカラム名を綺麗にする処置(怪しい)
clean_name<-function(x){
x %>%
rename_all(funs(str_remove_all(.,"\r\n"))) %>%
rename_all(funs(str_remove_all(.,"\n"))) %>%
rename_all(funs(str_remove(.,"※[0-9]")))%>%
rename_all(funs(str_remove_all(.,"(\\...[0-9][0-9][0-9])"))) %>%
rename_all(funs(str_remove_all(.,"(\\...[0-9][0-9])"))) %>%
rename_all(funs(str_remove_all(.,"(\\...[0-9])")))
}
#セル内を綺麗にする処置(怪しい)
clean_sell<-function(x){
x %>%
str_remove("\r\n") %>%
str_remove_all("(\\...[0-9][0-9][0-9])") %>%
str_remove_all("(\\...[0-9][0-9])") %>%
str_remove_all("(\\...[0-9])")
}
#欠損セルを埋める処理
fill.colnames<-function(x){
x %>%
mutate(tmp=if_else(tmp=="",NA_character_,as.character(tmp))) %>%
fill(tmp) %>%
mutate(tmp=if_else(is.na(tmp),"",tmp))
}
#カラム名が複数行に渡っているため、ファイル毎に取得する。
#3行に渡っている場合の処理
#extension で拡張子を指定しておく。xlsx / xls
make_data_skip2 <- function(){
#1~3行のデータをそれぞれ取得
suppressMessages(tmp1<-read_excel(str_c(file_name,extension),skip=0))
suppressMessages(tmp2<-read_excel(str_c(file_name,extension),skip=1))
suppressMessages(tmp3<-read_excel(str_c(file_name,extension),skip=2))
suppressMessages(
fix_colnames <- bind_cols(
data.frame(tmp = colnames(tmp1) %>% clean_sell()) %>% fill.colnames(),
data.frame(tmp = colnames(tmp2) %>% clean_sell()) %>% fill.colnames(),
data.frame(tmp = colnames(tmp3) %>% clean_sell()) %>% fill.colnames()) %>%
mutate(tmp=str_c(tmp...2,tmp...1,tmp...3,sep="_"))#文字連結する
)
tmp3 <- tmp3 %>%
set_colnames(fix_colnames$tmp) %>%
rename_all(funs(str_remove_all(.,"(\\__)"))) %>%
gather(手術,値,-c("告示番号","通番","施設名")) %>%
filter(値!="-") %>%
filter(str_detect(手術,"件数")) %>%
mutate(file=str_remove_all(file_name,file.path(dir)))%>%
mutate(file=str_remove_all(file,"/")) %>%
mutate_all(funs(as.character(.)))
write.csv(tmp3,str_c(file_name,".csv"),row.names = FALSE)
file.remove(str_c(file_name,extension))
}
#カラム名が複数行に渡っているため、ファイル毎に取得する。
#4行に渡っている場合の処理
make_data_skip3 <- function(){
suppressMessages(tmp1<-read_excel(str_c(file_name,extension),skip=0))
suppressMessages(tmp2<-read_excel(str_c(file_name,extension),skip=1))
suppressMessages(tmp3<-read_excel(str_c(file_name,extension),skip=2))
suppressMessages(tmp4<-read_excel(str_c(file_name,extension),skip=3))
suppressMessages(
fix_colnames<-bind_cols(
data.frame(tmp = colnames(tmp1) %>% clean_sell) %>% fill.colnames,
data.frame(tmp = colnames(tmp2) %>% clean_sell) %>% fill.colnames,
data.frame(tmp = colnames(tmp3) %>% clean_sell) %>% fill.colnames,
data.frame(tmp = colnames(tmp4) %>% clean_sell) %>% fill.colnames) %>%
mutate(tmp=str_c(tmp...2,tmp...1,tmp...3,tmp...4,sep="_"))
)
tmp4 <- tmp4 %>%
set_colnames(fix_colnames$tmp) %>%
rename_all(funs(str_remove_all(.,"(\\___)"))) %>%
gather(手術,値,-c("告示番号","通番","施設名")) %>%
filter(値!="-") %>%
filter(str_detect(手術,"件数")) %>%
mutate(file=str_remove_all(file_name,file.path(dir)))%>%
mutate(file=str_remove_all(file,"/"))%>%
mutate_all(funs(as.character(.)))
write.csv(tmp4,str_c(file_name,".csv"),row.names = FALSE)
file.remove(str_c(file_name,extension))
}
#カラム名が複数行に渡っているため、ファイル毎に取得する。
#5行に渡っている場合の処理
make_data_skip4 <- function(){
suppressMessages(tmp1<-read_excel(str_c(file_name,extension),skip=0))
suppressMessages(tmp2<-read_excel(str_c(file_name,extension),skip=1))
suppressMessages(tmp3<-read_excel(str_c(file_name,extension),skip=2))
suppressMessages(tmp4<-read_excel(str_c(file_name,extension),skip=3))
suppressMessages(tmp5<-read_excel(str_c(file_name,extension),skip=4))
suppressMessages(
fix_colnames<-bind_cols(
data.frame(tmp = colnames(tmp1) %>% clean_sell) %>% fill.colnames,
data.frame(tmp = colnames(tmp2) %>% clean_sell) %>% fill.colnames,
data.frame(tmp = colnames(tmp3) %>% clean_sell) %>% fill.colnames,
data.frame(tmp = colnames(tmp4) %>% clean_sell) %>% fill.colnames,
data.frame(tmp = colnames(tmp5) %>% clean_sell) %>% fill.colnames) %>%
mutate(tmp=str_c(tmp...2,tmp...1,tmp...3,tmp...4,tmp...5,sep="_"))
)
tmp5 <- tmp5 %>%
set_colnames(fix_colnames$tmp) %>%
rename_all(funs(str_remove_all(.,"(\\____)"))) %>%
gather(手術,値,-c("告示番号","通番","施設名")) %>%
filter(値!="-") %>%
filter(str_detect(手術,"件数")) %>%
mutate(file=str_remove_all(file_name,file.path(dir)))%>%
mutate(file=str_remove_all(file,"/"))%>%
mutate_all(funs(as.character(.)))
write.csv(tmp5,str_c(file_name,".csv"),row.names = FALSE)
file.remove(str_c(file_name,extension))
}
make_list <- function(){
file_list <- list.files(path = dir, full.names = T)
tmp <- lapply(file_list,fread)
dat <- list()
for(i in 1:length(tmp)){
tmp2 <- tmp[[i]] %>%
mutate_all(as.character) %>%
mutate(値=as.numeric(値))
dat<-bind_rows(dat,tmp2)
}
dat_fix <- dat %>%
mutate(手術=stri_trans_nfkc(手術),
手術=str_remove_all(手術," "),
手術=str_remove_all(手術," "),
年次=str_sub(file,-3,-1),
MDC=str_sub(file,-5,-4),
集計内容=str_sub(file,1,-10)) %>%
separate(手術,c("診断群分類","診断群分類コード","件数","手術","処置"),sep="_") %>%
select(-件数) %>%
mutate(
手術=case_when(
手術=="99" ~ "手術なし",
手術=="97" ~ "その他手術あり",
str_detect(手術,"輸血以外") ~ "その他手術あり_輸血以外再掲",
TRUE~手術))
write.csv(dat_fix,str_c("02.output/DPC_",year,".csv",sep="") , row.names = FALSE)
}
### R01 #################################### -----------------------------------------------------------------
year<- "R01"
extension <- "xlsx"
dir <- paste(getwd(),"/01.input/DPC_",year,sep="")
dir.create(dir)
url <- "https://www.mhlw.go.jp/content/12404000/"
# データ読み込み_施設概要表----------------- -----------------------------------------------------------------
contents<-"/施設概要_"
file_name <- str_c(file.path(dir),contents,year)
dl.dpc(str_c(url,"000758182.xlsx"))
tmp <- read_excel(str_c(file_name,extension)) %>% clean_name() %>% filter(!is.na(都道府県))
write.csv(tmp,str_c("01.input/施設概要_",year,".csv"),row.names = FALSE)
file.remove(str_c(file_name,extension))
# データ読み込み_疾患別手術別集計----------- -----------------------------------------------------------------
contents<-"/疾患別手術別集計_"
file_list <- list("000758261.xlsx",
"000758264.xlsx",
"000758265.xlsx",
"000758266.xlsx",
"000758267.xlsx",
"000758269.xlsx",
"000758271.xlsx",
"000758272.xlsx",
"000758274.xlsx",
"000758275.xlsx",
"000758276.xlsx",
"000758277.xlsx",
"000758278.xlsx",
"000758279.xlsx",
"000758280.xlsx",
"000758281.xlsx",
#17がない
"000758282.xlsx"
)
for(i in c(1:17)){
cat("i:",i,"\n")
if(i < 10){
file_name <- str_c(file.path(dir),contents,"MDC0",i,year)
} else if (i==17){
file_name <- str_c(file.path(dir),contents,"MDC",i+1,year) # 17のファイルがないため
} else {
file_name <- str_c(file.path(dir),contents,"MDC",i,year)
}
dl.dpc(str_c(url,file_list[i]))
make_data_skip3()
Sys.sleep(10)
}
# データ読み込み_疾患別手術有無処置1別集計-- -----------------------------------------------------------------
contents<-"/疾患別手術有無処置1別集計_"
file_list <- list("000758327.xlsx",
"000758332.xlsx",
"000758335.xlsx",
"000758336.xlsx",
"000758338.xlsx",
"000758347.xlsx",
"000758348.xlsx",
"000758349.xlsx",
"000758350.xlsx",
"000758351.xlsx",
"000758352.xlsx",
"000758353.xlsx",
"000758354.xlsx",
"000758355.xlsx",
"000758358.xlsx",
"000758359.xlsx",
"000758360.xlsx",
"000758361.xlsx")
for(i in c(1:18)){
cat("i:",i,"\n")
if(i < 10){
file_name <- str_c(file.path(dir),contents,"MDC0",i,year)
} else {
file_name <- str_c(file.path(dir),contents,"MDC",i,year) }
dl.dpc(str_c(url,file_list[i]))
make_data_skip4()
Sys.sleep(10)
}
# データ読み込み_疾患別手術有無処置2別集計-- -----------------------------------------------------------------
contents<-"/疾患別手術有無処置2別集計_"
file_list <- list("000758381.xlsx",
"000758384.xlsx",
"000758385.xlsx",
"000758386.xlsx",
"000758387.xlsx",
"000758389.xlsx",
"000758390.xlsx",
"000758391.xlsx",
"000758395.xlsx",
"000758396.xlsx",
"000758398.xlsx",
"000758399.xlsx",
"000758400.xlsx",
"000758401.xlsx",
"000758403.xlsx",
"000758404.xlsx",
"000758416.xlsx",
"000758417.xlsx")
for(i in c(1:18)){
cat("i:",i,"\n")
if(i < 10){
file_name <- str_c(file.path(dir),contents,"MDC0",i,year)
} else {
file_name <- str_c(file.path(dir),contents,"MDC",i,year) }
dl.dpc(str_c(url,file_list[i]))
make_data_skip4()
Sys.sleep(10)
}
# データ整形/書き出し----------------------- -------------------------------------------------------------------
make_list()
### H30 #################################### -----------------------------------------------------------------
year <- "H30"
extension <- "xlsx"
dir <- paste(getwd(),"/01.input/DPC_",year,sep="")
dir.create(dir)
url <- "https://www.mhlw.go.jp/content/12404000/"
# データ読み込み_施設概要表----------------- -----------------------------------------------------------------
contents<-"/施設概要_"
file_name <- str_c(file.path(dir),contents,year)
dl.dpc(str_c(url,"000612770.xlsx"))
tmp <- read_excel(str_c(file_name,".xls")) %>% clean_name() %>% filter(!is.na(都道府県))
write.csv(tmp,str_c("01.input/施設概要_",year,".csv"),row.names = FALSE)
file.remove(str_c(file_name,extension))
# データ読み込み_疾患別手術別集計----------- -----------------------------------------------------------------
contents<-"/疾患別手術別集計_"
file_list <- list("000612849.xlsx",
"000612850.xlsx",
"000612851.xlsx",
"000612852.xlsx",
"000612853.xlsx",
"000612855.xlsx",
"000612864.xlsx",
"000612865.xlsx",
"000612866.xlsx",
"000612867.xlsx",
"000612868.xlsx",
"000612869.xlsx",
"000612870.xlsx",
"000612871.xlsx",
"000612872.xlsx",
"000612849.xlsx",
"000612873.xlsx",
"000612874.xlsx")
for(i in c(1:18)){
cat("i:",i,"\n")
if(i < 10){
file_name <- str_c(file.path(dir),contents,"MDC0",i,year)
} else {
file_name <- str_c(file.path(dir),contents,"MDC",i,year)
}
dl.dpc(str_c(url,file_list[i]))
make_data_skip3()
Sys.sleep(10)
}
# データ読み込み_疾患別手術有無処置1別集計-- -----------------------------------------------------------------
contents<-"/疾患別手術有無処置1別集計_"
file_list <- list("000612879.xlsx",
"000612881.xlsx",
"000613995.xlsx",
"000612891.xlsx",
"000612897.xlsx",
"000612901.xlsx",
"000612904.xlsx",
"000612905.xlsx",
"000612906.xlsx",
"000612909.xlsx",
"000612910.xlsx",
"000612911.xlsx",
"000612912.xlsx",
"000612915.xlsx",
"000612917.xlsx",
"000612918.xlsx",
"000612919.xlsx",
"000612921.xlsx")
for(i in c(1:18)){
cat("i:",i,"\n")
if(i < 10){
file_name <- str_c(file.path(dir),contents,"MDC0",i,year)
} else {
file_name <- str_c(file.path(dir),contents,"MDC",i,year)
}
dl.dpc(str_c(url,file_list[i]))
make_data_skip4()
Sys.sleep(10)
}
# データ読み込み_疾患別手術有無処置2別集計-- -----------------------------------------------------------------
contents<-"/疾患別手術有無処置2別集計_"
file_list <- list("000612928.xlsx",
"000612929.xlsx",
"000612930.xlsx",
"000612931.xlsx",
"000612932.xlsx",
"000612934.xlsx",
"000612904.xlsx",
"000612940.xlsx",
"000612945.xlsx",
"000612947.xlsx",
"000612951.xlsx",
"000612953.xlsx",
"000612958.xlsx",
"000612959.xlsx",
"000612961.xlsx",
"000612963.xlsx",
"000612964.xlsx",
"000612966.xlsx")
for(i in c(1:18)){
cat("i:",i,"\n")
if(i < 10){
file_name <- str_c(file.path(dir),contents,"MDC0",i,year)
} else {
file_name <- str_c(file.path(dir),contents,"MDC",i,year)
}
dl.dpc(str_c(url,file_list[i]))
make_data_skip4()
Sys.sleep(10)
}
# データ整形/書き出し----------------------- -------------------------------------------------------------------
make_list()
#合算------------
dat_dpc <- bind_rows(
fread("02.output/DPC_H30.csv"),
fread("02.output/DPC_R01.csv"),
)
write.csv(dat_dpc,"02.output/DPC_open_dataH30_R01.csv",row.names = F)
|
edec459ff80efd103d6189461f3e2510364f8c81
|
15a98789576de659ef0c9b32fdbf2cf3c01de5e1
|
/R/ab.gmu.r
|
d48a30133c659ede6116d318c2c923f0fe001929
|
[] |
no_license
|
cran/AIGIS
|
05a44c6ecfae102a65a3f015483f70408278ddf4
|
5d3c47c4000522e003ad7fe094f6f593e78141f1
|
refs/heads/master
| 2020-05-17T14:33:38.875727
| 2012-01-05T00:00:00
| 2012-01-05T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 711
|
r
|
ab.gmu.r
|
ab.gmu <- function(gridgpcobj=gridgpc,gws=gridinws, afracs, recvar = 1, recvals = bgvals, appdam=FALSE, trimmedin=TRUE, dr=damrats, maskobj=MASK){
gtots <- sapply(gws, dp.interp, recvar = recvar, recvals = recvals, appdam=appdam, trimmedin=trimmedin, nobounds=TRUE, dr=dr)
gdams <- afracs*gtots
gstats <- matrix(NA,nrow=nrow(maskobj),ncol=ncol(maskobj))
for(i in 1:length(gridgpcobj[[3]])){
ki = gridgpcobj[[2]][gridgpcobj[[3]][i]]
gridi = ki%/%79 + 4
gridj = ki%%79 + 59
gridi <- gridi - 3
gridj <- gridj - 55
gstats[gridi,gridj] <- gdams[gridgpcobj[[3]][i]]
}
return(gstats)
}
|
6a3ee6317b30b18cb42a908ec3a4fcd2f17f7bf5
|
d107b38a3a0e42320a3c9beadfdf0479c656bec7
|
/src/src_2016/003_wrangling_nonreporting_agency_data.R
|
1cf200d7cdae268aaab698711b5646b03cf72915
|
[] |
no_license
|
sefabey/fbi_hate_crimes_data_viz
|
8a1d75e12cc6c141fd3923e3f3c643eadc93a16b
|
fc7e603c9c894ae5a72e550d94ebc6cc30fc9cfc
|
refs/heads/master
| 2021-06-30T07:43:00.288198
| 2020-08-26T22:42:06
| 2020-08-26T22:42:06
| 140,181,181
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,045
|
r
|
003_wrangling_nonreporting_agency_data.R
|
# wrangling data from agencies that do not report hate crimes
library(tidyverse)
library(ggmap)
non_report_agencies <- read_csv('data/table_14_agency_hate_crime_reporting_by_state_2016.csv') %>%
mutate(agency_with_state= paste(agency_name, state, sep = ", ")) #paste state names to agencies
table(non_report_agencies$agency_type) #cities, metropolitan counties, non metropolitan counties, other agencies, state police, tribal agencies, universities and colleged
city_over_10k <- non_report_agencies %>%
filter(agency_type=="Cities") %>%
filter(population>10000) #2260 observations
not_city_over_10k <- non_report_agencies %>%
filter(agency_type!="Cities") %>%
filter(population>10000) #177 observations
over_10K <- rbind(city_over_10k, not_city_over_10k) #2437 observations
geocodeQueryCheck() #2500 geocoding queries remaining.
over_10K_geo <- over_10K %>%
mutate_geocode(agency_with_state, sensor=F) #991 NAs still
over_10K_geo %>% write_csv('data/table_14_geo_001.csv') #save for future
# iteration 2
over_10K_002 <- read_csv('data/table_14_geo_001.csv') %>%
filter(is.na(lon)) %>%
select(-lon,-lat) %>%
mutate_geocode(agency_with_state, sensor=F) # query 991 rows where lon,lat is NA
over_10K_geo_001 <- read_csv('data/table_14_geo_001.csv') %>%
filter(!is.na(lon))
over_10K_geo_002 <- rbind(over_10K_geo_001,over_10K_002 )
over_10K_geo_002 %>% filter(is.na(lon)) #362 Nas still
over_10K_geo_002 %>% write_csv("data/table_14_geo_002.csv") #save for the future
# iteration 3
over_10K_003 <- read_csv('data/table_14_geo_002.csv') %>%
filter(is.na(lon)) %>%
select(-lon,-lat) %>%
mutate_geocode(agency_with_state, sensor=F) #query remaining 362 rows where lon,lat is NA
over_10K_geo_003 <- read_csv('data/table_14_geo_002.csv') %>%
filter(!is.na(lon))
over_10K_003 %>% nrow() #362
over_10K_geo_003 %>% nrow #2075
over_10K_geo_004 <- rbind(over_10K_003,over_10K_geo_003 )
over_10K_geo_004 %>% write_csv("data/table_14_geo_003.csv")
# iteration 4
over_10K_004 <- read_csv("data/table_14_geo_003.csv") %>%
filter(is.na(lon)) %>%
select(-lon,-lat) %>%
mutate_geocode(agency_with_state, sensor=F) #query remaining 110 rows
over_10K_geo_005 <- read_csv('data/table_14_geo_003.csv') %>%
filter(!is.na(lon))
over_10K_004 %>% nrow() #110
over_10K_geo_005 %>% nrow #2327
over_10K_geo_006 <- rbind(over_10K_004,over_10K_geo_005 )
over_10K_geo_006 %>% write_csv("data/table_14_geo_004.csv")
# iteration 5
over_10K_005 <- read_csv("data/table_14_geo_004.csv") %>%
filter(is.na(lon)) %>%
select(-lon,-lat) %>%
mutate_geocode(agency_with_state, sensor=F) #query remaining 36 rows
over_10K_geo_007 <- read_csv('data/table_14_geo_004.csv') %>%
filter(!is.na(lon))
over_10K_005 %>% nrow() #36
over_10K_geo_007 %>% nrow #2401
over_10K_geo_008 <- rbind(over_10K_005,over_10K_geo_007 )
over_10K_geo_008 %>% write_csv("data/table_14_geo_005.csv")
# iteration 6
over_10K_006 <- read_csv("data/table_14_geo_005.csv") %>%
filter(is.na(lon)) %>%
select(-lon,-lat) %>%
mutate_geocode(agency_with_state, sensor=F)#query remaining 12 rows
over_10K_geo_009 <- read_csv('data/table_14_geo_005.csv') %>%
filter(!is.na(lon))
over_10K_006 %>% nrow() #12
over_10K_geo_009 %>% nrow #2425
over_10K_geo_010 <- rbind(over_10K_006,over_10K_geo_009 )
over_10K_geo_010 %>% write_csv("data/table_14_geo_006.csv")
# iteration 7
over_10K_007 <- read_csv("data/table_14_geo_006.csv") %>%
filter(is.na(lon)) %>%
select(-lon,-lat) %>%
mutate_geocode(agency_with_state, sensor=F)#query remaining 8 rows
over_10K_geo_011 <- read_csv('data/table_14_geo_006.csv') %>%
filter(!is.na(lon))
over_10K_007 %>% nrow() #8
over_10K_geo_011 %>% nrow #2429
over_10K_geo_012 <- rbind(over_10K_007,over_10K_geo_011 )
over_10K_geo_012 %>% write_csv("data/table_14_geo_007.csv")
# iteration 8
over_10K_007 <- read_csv("data/table_14_geo_007.csv") %>%
filter(is.na(lon)) %>%
select(-lon,-lat) %>%
mutate_geocode(agency_with_state, sensor=F)#query remaining 2 rows
|
8096e4b8425acdf462ac499a3adcee7923c430e8
|
cd26830d83292aab743af6cbf46184a781d41fc6
|
/analysis/Misc scripts/aPrioriGenes-Corr-log.R
|
85c8cb15a1abc6b7218d5b7feff0b1ee32314f42
|
[] |
no_license
|
OwenLabMSU/bwteDE
|
5df1b95d6ced868dbfff5f7a5ebdf6c7636b8733
|
a79ffa5d979f7afa60409431a6e268031e56b93e
|
refs/heads/master
| 2023-08-31T10:48:59.035636
| 2020-12-03T01:40:45
| 2020-12-03T01:40:45
| 250,002,956
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,807
|
r
|
aPrioriGenes-Corr-log.R
|
#### aPrioriGenes-Corr ###
## Load packages and data
library(tidyverse)
library(edgeR)
setwd("G:/Shared drives/RNA Seq Supershedder Project/BWTE DE manuscript")
## Load data
annot <- read.delim("./extData/Trinotate.csv", header = TRUE, sep = "\t")
cnt.trans <- read.table("./extData/rsem.isoform.counts.matrix", header = TRUE)
cnt.gene <- read.table("./extData/rsem.gene.counts.matrix", header = TRUE)
covars <- read.csv("./extData/BWTE54_SSgroup_Raw_Pool.csv", header = TRUE)
targets <- read_delim("./extData/aPrioriTranscripts_V2.csv", delim = ",") %>%
filter(include == "Yes" | include == "yes")
## Clean data
cnt.bursa.gene <- cnt.gene %>%
select(-alignEstimateAbundance_BWTE_Ileum_36_S50,
-alignEstimateAbundance_BWTE_Bursa_36_S31,-alignEstimateAbundance_BWTE_Ileum_19_S35,
-alignEstimateAbundance_BWTE_Bursa_19_S14) %>%
rownames_to_column("gene") %>%
separate(gene, into = c(NA, "pt1", "pt2", "gene", NA)) %>%
unite(gene, pt1, pt2, gene) %>%
column_to_rownames("gene") %>%
select(contains("Bursa"))
cnt.ileum.gene <- cnt.gene %>%
select(-alignEstimateAbundance_BWTE_Ileum_36_S50,
-alignEstimateAbundance_BWTE_Bursa_36_S31,-alignEstimateAbundance_BWTE_Ileum_19_S35,
-alignEstimateAbundance_BWTE_Bursa_19_S14) %>%
rownames_to_column("gene") %>%
separate(gene, into = c(NA, "pt1", "pt2", "gene", NA)) %>%
unite(gene, pt1, pt2, gene) %>%
column_to_rownames("gene") %>%
select(contains("Ileum"))
cnt.bursa.trans <- cnt.trans %>%
select(-alignEstimateAbundance_BWTE_Ileum_36_S50,
-alignEstimateAbundance_BWTE_Bursa_36_S31,-alignEstimateAbundance_BWTE_Ileum_19_S35,
-alignEstimateAbundance_BWTE_Bursa_19_S14) %>%
rownames_to_column("transcript") %>%
separate(transcript, into = c(NA, "pt1", "pt2", "gene", "isoform")) %>%
unite(transcript, pt1, pt2, gene, isoform) %>%
column_to_rownames("transcript") %>%
select(contains("Bursa"))
cnt.ileum.trans <- cnt.trans %>%
select(-alignEstimateAbundance_BWTE_Ileum_36_S50,
-alignEstimateAbundance_BWTE_Bursa_36_S31,-alignEstimateAbundance_BWTE_Ileum_19_S35,
-alignEstimateAbundance_BWTE_Bursa_19_S14) %>%
rownames_to_column("transcript") %>%
separate(transcript, into = c(NA, "pt1", "pt2", "gene", "isoform")) %>%
unite(transcript, pt1, pt2, gene, isoform) %>%
column_to_rownames("transcript") %>%
select(contains("Ileum"))
covars <- covars %>%
filter(!bird %in% c("36", "19")) %>%
arrange(bird) %>%
mutate(group = str_remove(group, "-"))
annot <- annot %>%
separate(transcript_id, into = c(NA, "pt1", "pt2", "gene", "isoform")) %>%
unite(gene_id, pt1, pt2, gene, remove = FALSE) %>%
unite(transcript_id, pt1, pt2, gene, isoform)
#### Calculate log(CPM) and assemble master DFs ####
#Convert to DGEList object
dge.bursa.trans <- DGEList(counts=cnt.bursa.trans)
dge.bursa.gene <- DGEList(counts=cnt.bursa.gene)
dge.ileum.trans <- DGEList(counts=cnt.ileum.trans)
dge.ileum.gene <- DGEList(counts=cnt.ileum.gene)
#CPM and log-CPM
lcpm.bursa.trans <- cpm(dge.bursa.trans, log = TRUE)
lcpm.bursa.gene <- cpm(dge.bursa.gene, log = TRUE)
lcpm.ileum.trans <- cpm(dge.ileum.trans, log = TRUE)
lcpm.ileum.gene <- cpm(dge.ileum.gene, log = TRUE)
## Master lcpm tibs
# Trans
lcpm.bursa.tmp <- lcpm.bursa.trans %>%
as_tibble(rownames = NA) %>%
rownames_to_column("transcript")
lcpm.ileum.tmp <- lcpm.ileum.trans %>%
as_tibble(rownames = NA) %>%
rownames_to_column("transcript")
lcpm.trans <- lcpm.bursa.tmp %>%
full_join(lcpm.ileum.tmp) %>%
replace(., is.na(.), "0") %>%
filter(transcript %in% targets$transcript_id) %>%
pivot_longer(cols = contains("_"),
names_to = "sample",
values_to = "lcpm") %>%
separate(sample, into = c(NA, NA, "tissue", "bird", NA)) %>%
mutate(bird = as.integer(bird)) %>%
left_join(covars, by = "bird") %>%
mutate(levelGT = "transcript", identifier = transcript) %>%
select(identifier, levelGT, tissue, bird, lcpm, virus.sac, group, virus.dpi1, virus.dpi2, virus.dpi3, virus.dpi4, virus.dpi5) %>%
mutate(log.virus.sac = log10(virus.sac+1)) %>%
pivot_longer(cols = virus.dpi1:virus.dpi5,
names_to = "dpi",
values_to = "titer") %>%
group_by(identifier, bird, levelGT, tissue, lcpm, log.virus.sac, group) %>%
summarize(log.meanTiter1to5 = log10(mean(titer, na.rm = TRUE)))
# Gene
lcpm.bursa.tmp <- lcpm.bursa.gene %>%
as_tibble(rownames = NA) %>%
rownames_to_column("gene")
lcpm.ileum.tmp <- lcpm.ileum.gene %>%
as_tibble(rownames = NA) %>%
rownames_to_column("gene")
lcpm.all <- lcpm.bursa.tmp %>%
full_join(lcpm.ileum.tmp) %>%
replace(., is.na(.), "0") %>%
filter(gene %in% targets$gene_id) %>%
pivot_longer(cols = contains("_"),
names_to = "sample",
values_to = "lcpm") %>%
separate(sample, into = c(NA, NA, "tissue", "bird", NA)) %>%
mutate(bird = as.integer(bird)) %>%
left_join(covars, by = "bird") %>%
mutate(levelGT = "gene", identifier = gene) %>%
select(identifier, levelGT, tissue, bird, lcpm, virus.sac, group, virus.dpi1, virus.dpi2, virus.dpi3, virus.dpi4, virus.dpi5) %>%
mutate(log.virus.sac = log10(virus.sac+1)) %>%
pivot_longer(cols = virus.dpi1:virus.dpi5,
names_to = "dpi",
values_to = "titer") %>%
group_by(identifier, bird, levelGT, tissue, lcpm, log.virus.sac, group) %>%
summarize(log.meanTiter1to5 = log10(mean(titer + 1, na.rm = TRUE))) %>%
bind_rows(lcpm.trans) %>%
mutate(group = recode(group,
C1 = "Ctl",
C14 = "Ctl")) %>%
mutate(group = factor(group,
levels = c("Ctl", "I1", "I3", "I5", "I14")))
#### Gene query database ####
annot.all <- annot %>%
select(transcript_id,
gene_id,
sprot_Top_BLASTX_hit,
sprot_Top_BLASTP_hit,
gene_ontology_BLASTX,
gene_ontology_BLASTP,
Kegg,
eggnog,
Pfam) %>%
separate(sprot_Top_BLASTX_hit, into = c("sprot_geneName_BlastX", NA, NA, NA, NA, "sprot2", NA), "\\^") %>%
separate(sprot2, sep = "=", into = c(NA, "sprot_geneFunction_BlastX")) %>%
separate(sprot_geneFunction_BlastX, sep = ";", into = c("sprot_geneFunction_BlastX", NA)) %>%
separate(sprot_Top_BLASTP_hit, into = c("sprot_geneName_BlastP", NA, NA, NA, NA, "sprot2", NA), "\\^") %>%
separate(sprot2, sep = "=", into = c(NA, "sprot_geneFunction_BlastP")) %>%
separate(sprot_geneFunction_BlastP, sep = ";", into = c("sprot_geneFunction_BlastP", NA)) %>%
separate(gene_ontology_BLASTX, sep = "\\`", into = paste("GO_BlastX", 1:5, sep = "_"), extra = "drop", fill = "right") %>%
separate(gene_ontology_BLASTP, sep = "\\`", into = paste("GO_BlastP", 1:5, sep = "_"), extra = "drop", fill = "right") %>%
separate(Pfam, sep = "\\`", into = paste("Pfam", 1:5, sep = "_"), extra = "drop", fill = "right") %>%
as_tibble()
#### Analysis function ####
aprioriAnalysis.mean <- function(target, targetTissue, targetLevel, ...) {
library(tidyverse)
datSet <- lcpm.all %>%
filter(levelGT == targetLevel,
identifier == target,
tissue == targetTissue)
correl.mean.all <- cor.test(datSet$log.meanTiter1to5, datSet$lcpm)
I1 <- datSet %>% filter(group == "I1")
correl.mean.I1 <- cor.test(I1$log.meanTiter1to5, I1$lcpm)
I3 <- datSet %>% filter(group == "I3")
correl.mean.I3 <- cor.test(I3$log.meanTiter1to5, I3$lcpm)
I5 <- datSet %>% filter(group == "I5")
correl.mean.I5 <- cor.test(I5$log.meanTiter1to5, I5$lcpm)
I14 <- datSet %>% filter(group == "I14")
correl.mean.I14 <- cor.test(I14$log.meanTiter1to5, I14$lcpm)
as.data.frame(cbind(correl.mean.all$estimate, correl.mean.all$p.value)) %>%
as_tibble() %>%
bind_rows(as_tibble(cbind(correl.mean.I1$estimate, correl.mean.I1$p.value))) %>%
bind_rows(as_tibble(cbind(correl.mean.I3$estimate, correl.mean.I3$p.value))) %>%
bind_rows(as_tibble(cbind(correl.mean.I5$estimate, correl.mean.I5$p.value))) %>%
bind_rows(as_tibble(cbind(correl.mean.I14$estimate, correl.mean.I14$p.value))) %>%
rename("Est" = 1, "pval" = 2) %>%
mutate(target = target, group = c("all", "I1", "I3", "I5", "I14"))
}
aprioriAnalysis.sac <- function(target, targetTissue, targetLevel, ...) {
library(tidyverse)
datSet <- lcpm.all %>%
filter(levelGT == targetLevel,
identifier == target,
tissue == targetTissue)
correl.sac.all <- cor.test(datSet$log.virus.sac, datSet$lcpm)
I1 <- datSet %>% filter(group == "I1")
correl.sac.I1 <- cor.test(I1$log.virus.sac, I1$lcpm)
I3 <- datSet %>% filter(group == "I3")
correl.sac.I3 <- cor.test(I3$log.virus.sac, I3$lcpm)
I5 <- datSet %>% filter(group == "I5")
correl.sac.I5 <- cor.test(I5$log.virus.sac, I5$lcpm)
I14 <- datSet %>% filter(group == "I14")
correl.sac.I14 <- cor.test(I14$log.virus.sac, I14$lcpm)
as.data.frame(cbind(correl.sac.all$estimate, correl.sac.all$p.value)) %>%
as_tibble() %>%
bind_rows(as_tibble(cbind(correl.sac.I1$estimate, correl.sac.I1$p.value))) %>%
bind_rows(as_tibble(cbind(correl.sac.I3$estimate, correl.sac.I3$p.value))) %>%
bind_rows(as_tibble(cbind(correl.sac.I5$estimate, correl.sac.I5$p.value))) %>%
bind_rows(as_tibble(cbind(correl.sac.I14$estimate, correl.sac.I14$p.value))) %>%
rename("Est" = 1, "pval" = 2) %>%
mutate(target = target, group = c("all", "I1", "I3", "I5", "I14"))
}
#### Run analysis loop ####
library(doParallel)
cores=detectCores()
cl <- makeCluster(cores[1]-1) #not to overload your computer
registerDoParallel(cl)
targetTissue <- "Ileum" ## Bursa or Ileum
targetLevel <- "gene" ## gene or transcript
set <- lcpm.all %>%
filter(levelGT == targetLevel, tissue == targetTissue) %>%
group_by(identifier) %>%
summarize(varLCPM = round(var(lcpm), 5)) %>%
filter(varLCPM > 0)
finalMatrix.mean.IG <- foreach(z = unique(set$identifier), .combine = rbind) %dopar% {
tmpMatrix.IG = aprioriAnalysis.mean(z, targetTissue, targetLevel)
tmpMatrix.IG
}
finalMatrix.sac.IG <- foreach(z = unique(set$identifier), .combine = rbind) %dopar% {
tmpMatrix.IG = aprioriAnalysis.sac(z, targetTissue, targetLevel)
tmpMatrix.IG
}
#### Run analysis loop ####
targetTissue <- "Ileum" ## Bursa or Ileum
targetLevel <- "transcript" ## gene or transcript
set <- lcpm.all %>%
filter(levelGT == targetLevel, tissue == targetTissue) %>%
group_by(identifier) %>%
summarize(varLCPM = round(var(lcpm), 5)) %>%
filter(varLCPM > 0)
results.mean <- list()
results.sac <- list()
finalMatrix.mean.IT <- foreach(z = unique(set$identifier), .combine = rbind) %dopar% {
tmpMatrix.IT = aprioriAnalysis.mean(z, targetTissue, targetLevel)
tmpMatrix.IT
}
finalMatrix.sac.IT <- foreach(z = unique(set$identifier), .combine = rbind) %dopar% {
tmpMatrix.IT = aprioriAnalysis.sac(z, targetTissue, targetLevel)
tmpMatrix.IT
}
#### Run analysis loop ####
targetTissue <- "Bursa" ## Bursa or Ileum
targetLevel <- "transcript" ## gene or transcript
set <- lcpm.all %>%
filter(levelGT == targetLevel, tissue == targetTissue) %>%
group_by(identifier) %>%
summarize(varLCPM = round(var(lcpm), 5)) %>%
filter(varLCPM > 0)
results.mean <- list()
results.sac <- list()
finalMatrix.mean.BT <- foreach(z = unique(set$identifier), .combine = rbind) %dopar% {
tmpMatrix.BT = aprioriAnalysis.mean(z, targetTissue, targetLevel)
tmpMatrix.BT
}
finalMatrix.sac.BT <- foreach(z = unique(set$identifier), .combine = rbind) %dopar% {
tmpMatrix.BT = aprioriAnalysis.sac(z, targetTissue, targetLevel)
tmpMatrix.BT
}
#### Run analysis loop ####
targetTissue <- "Bursa" ## Bursa or Ileum
targetLevel <- "gene" ## gene or transcript
set <- lcpm.all %>%
filter(levelGT == targetLevel, tissue == targetTissue) %>%
group_by(identifier) %>%
summarize(varLCPM = round(var(lcpm), 5)) %>%
filter(varLCPM > 0)
results.mean <- list()
results.sac <- list()
finalMatrix.mean.BG <- foreach(z = unique(set$identifier), .combine = rbind) %dopar% {
tmpMatrix.BG = aprioriAnalysis.mean(z, targetTissue, targetLevel)
tmpMatrix.BG
}
finalMatrix.sac.BG <- foreach(z = unique(set$identifier), .combine = rbind) %dopar% {
tmpMatrix.BG = aprioriAnalysis.sac(z, targetTissue, targetLevel)
tmpMatrix.BG
}
#stop cluster
stopCluster(cl)
## Recreate the "sets" for further analysis
set.BT <- lcpm.all %>%
filter(levelGT == "transcript", tissue == "Bursa") %>%
group_by(identifier) %>%
summarize(varLCPM = round(var(lcpm), 5)) %>%
filter(varLCPM > 0)
set.BG <- lcpm.all %>%
filter(levelGT == "gene", tissue == "Bursa") %>%
group_by(identifier) %>%
summarize(varLCPM = round(var(lcpm), 5)) %>%
filter(varLCPM > 0)
set.IT <- lcpm.all %>%
filter(levelGT == "transcript", tissue == "Ileum") %>%
group_by(identifier) %>%
summarize(varLCPM = round(var(lcpm), 5)) %>%
filter(varLCPM > 0)
set.IG <- lcpm.all %>%
filter(levelGT == "gene", tissue == "Ileum") %>%
group_by(identifier) %>%
summarize(varLCPM = round(var(lcpm), 5)) %>%
filter(varLCPM > 0)
## Process DPI1-5 mean results
finalMatrix.mean.BG.sig <- finalMatrix.mean.BG %>%
filter(group == "all") %>%
mutate(adj.p.value = p.adjust(pval, method='fdr', n = nrow(.))) %>%
filter(adj.p.value < 0.1) %>%
mutate(comparison = "mean.BG")
finalMatrix.mean.BT.sig <- finalMatrix.mean.BT %>%
filter(group == "all") %>%
mutate(adj.p.value = p.adjust(pval, method='fdr', n = nrow(.))) %>%
filter(adj.p.value < 0.1) %>%
mutate(comparison = "mean.BT")
finalMatrix.mean.IG.sig <- finalMatrix.mean.IG %>%
filter(group == "all") %>%
mutate(adj.p.value = p.adjust(pval, method='fdr', n = nrow(.))) %>%
filter(adj.p.value < 0.1) %>%
mutate(comparison = "mean.IG")
finalMatrix.mean.IT.sig <- finalMatrix.mean.IT %>%
filter(group == "all") %>%
mutate(adj.p.value = p.adjust(pval, method='fdr', n = nrow(.))) %>%
filter(adj.p.value < 0.1) %>%
mutate(comparison = "mean.IT")
## Process sacrifice day results
finalMatrix.sac.BG.sig <- finalMatrix.sac.BG %>%
filter(group == "all") %>%
mutate(adj.p.value = p.adjust(pval, method='fdr', n = nrow(.))) %>%
filter(adj.p.value < 0.1) %>%
mutate(comparison = "sac.BG")
finalMatrix.sac.BT.sig <- finalMatrix.sac.BT %>%
filter(group == "all") %>%
mutate(adj.p.value = p.adjust(pval, method='fdr', n = nrow(.))) %>%
filter(adj.p.value < 0.1) %>%
mutate(comparison = "sac.BT")
finalMatrix.sac.IG.sig <- finalMatrix.sac.IG %>%
filter(group == "all") %>%
mutate(adj.p.value = p.adjust(pval, method='fdr', n = nrow(.))) %>%
filter(adj.p.value < 0.1) %>%
mutate(comparison = "sac.IG")
finalMatrix.sac.IT.sig <- finalMatrix.sac.IT %>%
filter(group == "all") %>%
mutate(adj.p.value = p.adjust(pval, method='fdr', n = nrow(.))) %>%
filter(adj.p.value < 0.1) %>%
mutate(comparison = "sac.IT")
#### Bind everything up ####
sigResults.mean <- bind_rows(finalMatrix.mean.BG.sig,
finalMatrix.mean.BT.sig,
finalMatrix.mean.IG.sig,
finalMatrix.mean.IT.sig)
sigResults.sac <- bind_rows(finalMatrix.sac.BG.sig,
finalMatrix.sac.BT.sig,
finalMatrix.sac.IG.sig,
finalMatrix.sac.IT.sig)
### Clean up and save ###
remove(
annot,
targets,
cnt.bursa.gene,
cnt.bursa.trans,
cnt.gene,
cnt.ileum.gene,
cnt.ileum.trans,
covars,
dge.bursa.gene,
dge.bursa.trans,
dge.ileum.gene,
dge.ileum.trans,
lcpm.bursa.gene,
lcpm.bursa.tmp,
lcpm.bursa.trans,
lcpm.ileum.gene,
lcpm.ileum.tmp,
lcpm.ileum.trans,
lcpm.trans,
annot.all,
cl,
cnt.trans,
finalMatrix,
finalMatrix.IG,
lcpm.all,
results.mean,
results.sac,
set,
set.BG,
set.BT,
set.IG,
set.IT,
tmpMatrix
)
save.image("aPrioriGenes-Corr-log.Rws")
|
b396401b1d30dc077b938f4fc5fbf9f8948e9702
|
4ada124cda1d123cebb27d7f89b5f4eb79a3821f
|
/JustATest.R
|
649d8efb2a137b7c4d997d2c88314e1a80778f96
|
[] |
no_license
|
mcbtBINF/TestRepo
|
6f5c6c538e23c3e635a42066f53fbc1b11b212e5
|
f488f4c83028202e5e20dfa988e7f907e18d1bb3
|
refs/heads/master
| 2020-03-16T20:15:16.743489
| 2018-05-14T13:42:13
| 2018-05-14T13:42:13
| 132,952,378
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 150
|
r
|
JustATest.R
|
# TODO: Add comment
#
# Author: mbrown67
###############################################################################
# Maybe it is working now
|
badac99b1f567b0535fa0f2b1d65098d4be04ab1
|
9e06252e613edcefaa4d7c569a3f18ab4ede85e4
|
/man/getAllVotes.Rd
|
17948260cd65bd71b5917655c5d8477789a61658
|
[] |
no_license
|
umatter/pvsR
|
b9be083c1224a96fdbc817b2c2749b9763284e7d
|
9ab57a5a67c0bbf9e0342ea37e14ea496d180df4
|
refs/heads/master
| 2021-01-19T08:41:25.275771
| 2021-01-05T06:38:13
| 2021-01-05T06:38:13
| 87,662,576
| 1
| 3
| null | 2021-01-05T06:33:50
| 2017-04-08T20:34:59
|
R
|
UTF-8
|
R
| false
| true
| 2,163
|
rd
|
getAllVotes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getAllVotes.R
\name{getAllVotes}
\alias{getAllVotes}
\title{Get several votes}
\usage{
getAllVotes(actionId, batchsize=100, pause=0, backupfile="votes.list.Rdata")
}
\arguments{
\item{actionId}{a character string or list of character strings with the action ID(s) (see references for details)}
\item{batchsize}{numerical, indicating how many actionIds should be processed in one batch (defaults to 100).}
\item{pause}{numerical, indicating how long (in seconds) the download process should be paused after each batch (defaults to 0)}
\item{backupfile}{character string for the path/file-name of the Rdata-file where the data should be saved (batch-wise) during the download process (default: "votes.list.Rdata").}
}
\value{
A data frame with a row for each vote and columns with the following variables describing the vote:\cr votes.vote*.candidateId,\cr votes.vote*.candidateName,\cr votes.vote*.officeParties,\cr votes.vote*.action.
}
\description{
This function is essentially a wrapper around Votes.getBillActionVotes() specified for large amount of requests.
}
\details{
This functions splits large requests into several batches. The requests are then processed batch-wise and are saved on the local disc to make sure that not too much RAM is assigned to the pvsR task.
}
\examples{
# First, make sure your personal PVS API key is saved as an option
# (options("pvs.key" = "yourkey")) or in the pvs.key variable:
\dontrun{pvs.key <- "yourkey"}
# get all officials of a certain state
\dontrun{bill <- Votes.getBill("17623", separate=c("actions", "sponsors"))}
\dontrun{actionids <- bill$actions$actionId}
# get all votes on this acti
\dontrun{votes <- getAllVotes(actionids, batchsize=2)}
\dontrun{head(votes)}
}
\references{
http://api.votesmart.org/docs/Votes.html\cr
Use Votes.getBill() or Votes.getByOfficial() to get a list of action IDs.\cr
See also: Matter U, Stutzer A (2015) pvsR: An Open Source Interface to Big Data on the American Political Sphere. PLoS ONE 10(7): e0130501. doi: 10.1371/journal.pone.0130501
}
\author{
Ulrich Matter <ulrich.matter-at-unibas.ch>
}
|
c694cb745ae5059a1fc7e751e497e74c5224bfe5
|
a4e387f41f8df311e926d21a7500e9a9b990113d
|
/man/htmlCanvas.Rd
|
3a57281d835c3006dc54d028d4e47d03a49060c6
|
[] |
no_license
|
omegahat/RJSCanvasDevice
|
b3b249379911881495164d0c9b25e312c9438092
|
0f6a9db99da8f91afcb3a3423537516bf7801b5a
|
refs/heads/master
| 2020-04-05T23:24:42.114011
| 2012-02-22T01:54:38
| 2012-02-22T01:54:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,049
|
rd
|
htmlCanvas.Rd
|
\name{htmlCanvas}
\alias{htmlCanvas}
\title{Create HTML document to draw one or more plots}
\description{
This creates an HTML document that contains and displays
the generated plots on JavaScript canvases.
}
\usage{
htmlCanvas(file, dim = c(1000, 800),
template = system.file("template.html", package = "RJSCanvasDevice"),
canvasId = "canvas", ...)
}
\arguments{
\item{file}{the name of the file to which to write the HTML and
JavaScript content.}
\item{dim}{the dimensions of the JavaScript canvas on which the
resulting plot will be displayed.}
\item{template}{the name of the HTML file that serves as a template.}
\item{canvasId}{the identifier(s) (i.e. name(s)) of the canvases on
which to draw.}
\item{\dots}{additional arguments passed on to \code{htmlWrapup} which
writes the generated code to an HTML file.}
}
\details{
}
\value{
A reference to the C-level device.
}
\references{
R Internals Manual for details on graphics devices.
}
\author{
Duncan Temple lang
}
\seealso{
\code{\link{jsCanvas}}
The RKMLDevice package
The FlashMXML device and package.
}
\examples{
# The basic version that creates one plot and draws it on the existing canvas
dev = htmlCanvas("simple.html")
plot(1:10)
dev.off()
# This changes the dimension of the existing canvas in the template
dev = htmlCanvas("simpleDim.html", dim = c(500, 500))
plot(1:10)
dev.off()
# This creates a new canvas, leaving the existing one there.
# We may want to remove it.
dev = htmlCanvas("simpleDim.html", dim = c(500, 500), canvasId = "plot")
plot(1:10)
dev.off()
# Creates two plots, draws the first one and uses the existing
# canvas.
dev = htmlCanvas("foo.html")
plot(1:10)
plot(density(rnorm(100)))
dev.off()
# Draw 2 plots, but use the same canvas. And change the dimension
# We draw the first plot when the document is loaded.
# The second can be drawn on the same canvas by calling the function
# rdraw2() at some point in time during the life of the document,
# e.g. on clicking a button.
dev = htmlCanvas("foo.html", dim = c(500, 400) )
plot(1:10)
plot(density(rnorm(100)))
dev.off()
# add a button to toggle between the 2 plots
# Define a variable to control which plot.
library(XML)
doc = htmlParse("foo.html")
b = getNodeSet(doc, "//body")[[1]]
addJSCode(doc, "var ctr = true;")
newXMLNode("input", attrs = c(type = "button",
onclick = "if(ctr) rdraw2(); else rdraw1(); ctr = !ctr;",
value = "Draw next plot"),
parent = b)
saveXML(doc, "foo.html")
# This specifies the dimensions for the two canvases
# and indicates that we want to plot onto each.
dev = htmlCanvas("foo.html", multiCanvas = TRUE,
dim = matrix(c(1000, 800,
500, 400), 2, 2, byrow = TRUE))
plot(1:10)
plot(density(rnorm(100)))
dev.off()
}
\keyword{graphics}
\keyword{device}
\keyword{dynamic}
|
f89cc0662668addf55a101ecbd2d1800b18846eb
|
369e2ee82082cd9e949796166e30726877f6a3b4
|
/plot3.R
|
57d0ed91e472a0ae930bc9911490d673a8350aba
|
[] |
no_license
|
FyzHsn/Exploratory-data-analysis-project-2
|
e006e5a16c84dd17d214360494ca668738f9b112
|
09e838a24e321af6a9990bdc5336f1e42fea0d41
|
refs/heads/master
| 2020-02-26T14:32:18.928280
| 2016-06-15T21:43:26
| 2016-06-15T21:43:26
| 61,048,144
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,050
|
r
|
plot3.R
|
## Download data
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
destfile <- "C:\\Users\\Windows\\Documents\\JHU_Data_Science\\Course_4\\Exploratory-data-analysis-project-2\\data.zip"
download.file(url, destfile)
## Read in data
nei <- readRDS("~/JHU_Data_Science/Course_4/Exploratory-data-analysis-project-2/data/summarySCC_PM25.rds")
scc <- readRDS("~/JHU_Data_Science/Course_4/Exploratory-data-analysis-project-2/data/Source_Classification_Code.rds")
library(dplyr)
nei <- select(nei, -Pollutant)
## Change names to lowercase
names(nei) <- tolower(names(nei))
names(scc) <- tolower(names(scc))
## Plot 3a
library(ggplot2)
library(reshape2)
nei_melt <- melt(nei[, 3:5], id.vars = c("type", "year"))
head(nei_melt)
pollutiontype <- dcast(nei_melt, type + year ~ variable, fun.aggregate = sum)
head(pollutiontype)
ggplot(pollutiontype, aes(year, emissions)) + geom_line(aes(color = type))
## Plot 3b
ggplot(pollutiontype, aes(type)) + geom_bar(aes(weight = emissions)) + facet_grid(. ~ year)
|
dc8fd508546e4a1e7534a78d27049fdcb4ab4587
|
8f45c2f97fb32c96e1fd83b1494e73875af93b2d
|
/reseau de neurones.R
|
3a9cd3613f90955893461194ae81931bc45b1f1a
|
[] |
no_license
|
KeivanR/Digit-Recognition
|
eb62972bb5f940c1b2a8a36bc86c6047440bd332
|
bf3fa2d4bc35a78cda25535ebfeb2e2b527489ac
|
refs/heads/master
| 2021-05-02T17:55:50.612362
| 2018-02-07T18:46:07
| 2018-02-07T18:46:07
| 120,655,631
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 6,733
|
r
|
reseau de neurones.R
|
library(hier.part)
library(progress)
seuil <- function(x,w,f=sigmoid){
return(f(sum(x%*%w)))
if (sum(x%*%w)>0) return (1)
return (0)
}
sigmoid <- function(x){
return (1/(1+exp(-x)))
}
propa <- function(x,w,couches){
x <- as.numeric(as.character(x))
nombre_couches <- length(couches)
nombre_neurones <- unlist(couches)[length(unlist(couches))]
a <- numeric(nombre_neurones)
#entrées
a[couches[[1]]] <- x
#biais
for (lay in 2:(nombre_couches-1)) {
a[couches[[lay]][1]] <- 1
a[couches[[lay]][-1]] <- apply(w[couches[[lay-1]],couches[[lay]][-1]],2,seuil,x = a[couches[[lay-1]]])
}
a[couches[[nombre_couches]]]<- apply(w[couches[[nombre_couches-1]],couches[[nombre_couches]]],2,seuil,x = a[couches[[nombre_couches-1]]])
return(a)
}
retro_propa <- function(a,w,v,error,couches,convolution){
nombre_couches <- length(couches)
nombre_neurones <- unlist(couches)[length(unlist(couches))]
d <- numeric(nombre_neurones)
d[couches[[nombre_couches]]] <- error
for (lay in (nombre_couches-1):2){
diff1 <- a[couches[[lay]]]*(1-a[couches[[lay]]])
d[couches[[lay]]] <- diff1*rowSums(t(t(w[couches[[lay]],couches[[lay+1]]])*d[couches[[lay+1]]]))
}
for (lay in 3:nombre_couches){
w[couches[[lay-1]],couches[[lay]]] <- w[couches[[lay-1]],couches[[lay]]] + v*t(sapply(a[couches[[lay-1]]],function(x){x*d[couches[[lay]]]}))
}
if (convolution) {
for (i in couches[[2]])
w[pave(i-length(couches[[1]])),i] <- w[pave(i-length(couches[[1]])),i] + v*t(sapply(a[pave(i)],function(x){x*d[i]}))
}
else w[couches[[1]],couches[[2]]] <- w[couches[[1]],couches[[2]]] + v*t(sapply(a[couches[[1]]],function(x){x*d[couches[[2]]]}))
return (w)
}
pave <- function(i,a=4,b=4,p=28,q=28){
j <- i-1
x1 <- a*j+1+(b-1)*p*trunc(a*j/p)
x2 <- x1+a-1
coef <- (x1:x2)+sapply(0:(b-1),function(x){rep(p,a)*x})
return(as.vector(coef))
}
reseau_de_neurones <- function(hidden, v=0.3, nb_tests = 1000, precision = 100,
entrees, sorties, winit=NULL, convolution = FALSE){
tailles_couches <- c(dim(entrees)[2]+1,hidden,dim(sorties)[2])
couches <- list()
num <- 1
for (lay in 1:length(tailles_couches)){
couches[[lay]] <- num:(num+tailles_couches[lay]-1)
num <- num+tailles_couches[lay]
}
nombre_couches <- length(couches)
nombre_neurones <- sum(tailles_couches)
a <- numeric(nombre_neurones)
w <- matrix(0,nrow = nombre_neurones, ncol = nombre_neurones)
for (lay in 2:nombre_couches){
for (i in couches[[lay-1]]){
for (j in couches[[lay]]){
w[i,j] <- runif(1,-1,1)
}
}
}
if (convolution) {
for (i in couches[[2]]){
w[!couches[[1]]%in%pave(i-length(couches[[1]])),i] <- 0
}
}
if (!is.null(winit)) w <- winit
tests <- 1
no_error <- 0
entrees <- cbind(1,entrees)
pct <- progress_bar$new(total = nb_tests)
while (tests<nb_tests){
x <- entrees[tests,]
x[is.na(x)] <- 0
# print(paste('Entrées :',x[2],x[3]))
a <- propa(x,w,couches)
pred <- a[couches[[nombre_couches]]]
# print(paste("Je pense que c'est",pred))
y <- sorties[tests,]
# print(paste('Il fallait trouver :',y))
error <- y-pred
w <- retro_propa(a,w,v,error,couches,convolution)
tests <- tests+1
pct$tick()
}
return (list(weights = w, couches = couches))
}
sign <- function(x){
if (x<0) return (-1)
else return (1)
}
estim <- function(net, y, n){
if (length(which(net$weights[n,]!=0))==0) return (y[n+1-net$couches[[length(net$couches)]][1]])
else{
i <- which.max(abs(net$weights[n,]))
return (0.5*(1+sign(net$weights[n,i]*(estim(net,y,i)-0.5))))
}
}
estimation <- function(nb){
y <- numeric(10)
y[nb+1]<-1
estim_nb<-sapply(1:dim(train)[2],estim,net=net,y=y)
show_digit(estim_nb)
}
estim_nb <- estimation(0)
prediction <- function (net,x,prec=100){
x <- c(1,x)
return (1/prec*trunc(propa(x,net$weights,net$couches)[net$couches[[length(net$couches)]]]*prec))
}
chosen <- function(net,x,prec=100){
return((0:9)[which.max(prediction(net,x,prec))])
}
synapses_entrants <- function(net,couche){
return(net$weights[rowSums(net$weights[,net$couches[[couche]]])!=0,net$couches[[couche]]])
}
entrees <- train/255
reponses <- matrix(0,length(train_y),10)
for (i in 1:length(train_y)){
reponses[i,train_y[i]] <- 1
}
net <- reseau_de_neurones(hidden = c(49), entrees = entrees, sorties = reponses, winit = NULL, nb_tests = 10000,convolution = TRUE)
try <- function(){
k <- 1
while (k<length(test_y)){
pred <- data.frame(0:9,prediction(net = net,x = test[k,]/255,prec = 100000000))
colnames(pred) <- c("num","proba")
show_digit(test[k, ],xlab=paste("Actual :",as.numeric(as.character(test_y[k])),"\nPredicted :",pred$num[which.max(pred$proba)]))
print(pred)
print(pred$num[order(pred$proba,decreasing = T)][pred$proba[order(pred$proba,decreasing = T)]>0])
print(paste("predicted :",pred$num[which.max(pred$proba)]))
print(paste("actual :",as.numeric(as.character(test_y[k]))))
k <- k+1
readline(prompt="next")
}
}
try()
predicted <- apply(test,1,chosen,net = net,prec = 100000000)
length(which(predicted==test_y))/10000
############# APPLICATION AUX SITES TELECOM ##############
sites <- unique(unlist(cam2))
adjacence <- as.data.frame(matrix(0,length(cam2),length(sites)))
colnames(adjacence) <- sites
rownames(adjacence) <- names(cam2)
for (i in 1:length(names(cam2))){
adjacence[i,sites%in%cam2[[i]]]<-1
}
adjacence_pb <- adjacence[!rownames(adjacence)%in%c('cuves intermediaires ?'),]
#Oscillations
serveur <- TOM3
entrees <- t(serveur$fuel_cm_c[serveur$fuel_cm_c$Date>=as.POSIXct(d1)&serveur$fuel_cm_c$Date<=as.POSIXct(d2),])[-1,1:500]
sites_prob <- t(adja_regrouper(adjacence))
sites_prob <- sites_prob[match(row.names(entrees),sites_prob[,1]),]
reponses <- matrix(0,dim(entrees)[1],3)
for (i in 1:dim(entrees)[1]){
if (grepl('oscillations',sites_prob[sites_prob[,1]==row.names(entrees)[i],2])) reponses[i,1] <- 1
else reponses[i,2] <- 1
}
net_osc <- reseau_de_neurones(hidden = 502, entrees = entrees, sorties = reponses, winit = NULL, nb_tests = dim(entrees)[1])
#Phase test
dtest1 <- ymd(d1)%m+%months(-1)
dtest2 <- ymd(d2)%m+%months(-1)
x <- serveur$fuel_cm_c$`TBOE601`[serveur$fuel_cm_c$Date>=as.POSIXct(dtest1)&serveur$fuel_cm_c$Date<=as.POSIXct(dtest2)][1:500]
x <- x/max(x)
x1 <- data.frame(x,x)
afficher(x1,date = serveur$fuel_cm_c$Date[serveur$fuel_cm_c$Date>=as.POSIXct(dtest1)&serveur$fuel_cm_c$Date<=as.POSIXct(dtest2)][1:500])
prediction(net_osc,x,prec = 1000000)
|
d3cf4870200c5ab39916c9665b20bea1a77699fb
|
37cc63b9708638db1fd1e01c0b3e52013654986c
|
/Transcript_Based/analysis/Brendan/Intial Analysis/02_Variance/02_remove unexpressed.R
|
65a164727bb82c2d675de4b6f99da12369aed7b2
|
[] |
no_license
|
brendan4/sravandevanathan
|
e7082bd5892ccc5cf679132aaa06c88abe4c17fc
|
00c54737e09ea29b517a2d008a420748af43d9b7
|
refs/heads/master
| 2022-03-26T08:02:45.299082
| 2019-12-13T21:09:21
| 2019-12-13T21:09:21
| 170,389,554
| 0
| 0
| null | 2019-02-12T20:52:38
| 2019-02-12T20:52:37
| null |
UTF-8
|
R
| false
| false
| 1,308
|
r
|
02_remove unexpressed.R
|
remove.unexpressed = function(dataset){
unexpressed = c()
counter = 1
for (x in 1:nrow(dataset)){
values = c()
for (y in 1:ncol(dataset)-1){
if(is.na(dataset[x,y+1]) == T){
values[y] = 0.0
}else{
values[y] = dataset[x,y+1]
}
}
if(log(sd(values)+0.0001) >= -8){
unexpressed[counter] = x
counter = counter+1
}
}
newdata = dataset[unexpressed,]
return(newdata)
}
#l2_ACAGTG removed
expressed.genesN = remove.unexpressed(GeneAbundance)
expressed.transN = remove.unexpressed(Transcripts)
# Anaylsis after first filter: drop L6_unmatched + L3_ATCACG
boxplot(log2(na.omit(expressed.genes)+ 0.1),
names=colnames(expressed.genes), las=2, ylab="log2(FPKM)",
main="Distribution of FPKMs for all libraries")
summary(expressed.genesN)
#both unmatched removed L3_ATCACG removed
expressed.genesN = remove.unexpressed(GeneAbundance) # cutoff at -7
expressed.transN = remove.unexpressed(Transcripts) # cutoff at -8
expressed.genes <- expressed.genesN
expressed.trans <- expressed.transN
rm(expressed.genesN)
rm(expressed.transN)
rm(GeneAbundance)
rm(Transcripts)
setwd("C:/Users/brendan/Documents/sravandevanathan/")
write.table(expressed.genes, "expressed.genes.tab")
write.table(expressed.trans, "expressed.trans.tab")
|
686608964bc89028dc5aece7b4b9d5eea178c439
|
359be36e71f612fa1f3511b1be01aa5c72010879
|
/PS1/PS1_R_Programming_2021.R
|
34088a39654e576239cc0990b84e268b7591a145
|
[] |
no_license
|
algorsky/ZOO540
|
c0192f344784cb2254fdbd7d705c9975672b7086
|
c1081a119d9542a2f62767634aff6cbd74ca2338
|
refs/heads/master
| 2023-08-10T21:40:43.507260
| 2021-09-29T19:54:43
| 2021-09-29T19:54:43
| 405,999,995
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,370
|
r
|
PS1_R_Programming_2021.R
|
# This problem set introduces you to some of the basic mechanics of R. It is based on Peng (2016) R Programming for Data Science; Chapt headings below correspond to chapter in this book. You can download the book at http://leanpub.com/rprogramming.
# Although PS1 isn't due until 19 September, please start on it immediately so that you are prepared for lectures.
# For the homework, ONLY TURN IN THE R CODE THAT YOU USED. Start with the code from PS1_R_Programming_2021.R and add to it anything you need. If you are adding to my code, identify you new code (so I can find it) by placing it between marker rows #~~~~~~~~~~~~~~~~~~~~~~~~~~~. There is no need for you to write down detailed answers, since we will discuss everything in class. Do write down questions that you have, though.
# As a departure from the norm for later problem sets, EVERYBODY SHOULD TURN IN A COPY OF THEIR R CODE. I know that I will likely get replicate version from those of you i the same group. Still, I want to make sure everybody goes through all of the steps of submitting R code.
# Due: 19Sep21 (midnight)
#~~~~~~~~~~~~~~~~
library(dplyr)
#~~~~~~~~~~~~~~~~
#################################################################
# Chapt 5. Nuts and Bolts
#################################################################
#1. Create a 2 x 5 matrix containing 1:10, with 1:5 on the first row and 6:10 on the second row. (Bonus: Come up with two or more ways to do this.)
#~~~~~~~~~~~~~~~~
#First way
m<- matrix(1:10,nrow = 2, ncol = 5)
# Second Way
m2 <- 1:10
dim(m2) <- c(2,5)
#Third Way
m3<- as.matrix(rbind(1:5,6:10))
#~~~~~~~~~~~~~~~~
#2. Label the rows of your matrix "A" and "B", and the columns "a", ..."e".
#~~~~~~~~~~~~~~~~
rownames(m) <- c("A", "B")
colnames(m) <- c("a", "b", "c", "d","e")
#~~~~~~~~~~~~~~~~
#3. Convert the values of your matrix to double.
#~~~~~~~~~~~~~~~~
m <- as.double(m)
dim(m)<- c(2,5)
rownames(m) <- c("A", "B")
colnames(m) <- c("a", "b", "c", "d","e")
#~~~~~~~~~~~~~~~~
#4. Create a data.frame named "df" with a column named "site" with values 1:5 as factors and a column named "value" with values 100, 200, ..., 500. Then give it row.names "a", ..., "e".
#~~~~~~~~~~~~~~~~
site<- as.factor(1:5)
value<- c(100, 200, 300, 400, 500)
df<-cbind(site, value)
rownames(df)<- c("a", "b", "c", "d","e")
df<-data.frame(df)
#~~~~~~~~~~~~~~~~
#5. Rename the variables (columns) in df "site.cat" and "x".
#~~~~~~~~~~~~~~~~
colnames(df) <- c("site.cat", "X")
#~~~~~~~~~~~~~~~~
#6. Convert df to a list and extract the first element of the list by number and then by name ("site").
#~~~~~~~~~~~~~~~~
dflist<- unlist(as.list(df))
dflist[6]
dflist[1]
#~~~~~~~~~~~~~~~~
#################################################################
# Chapt 6. Uploading data
#################################################################
# Metadata for "grouse_data.csv"
# These data are simulated to have similar characteristics as the original, real data.
# ROUTE
# IDs for 50 roadside routes.
# STATION
# IDs for each survey station, with up to 8 STATIONS per ROUTE.
# LAT
# X coordinate of survey station. UTM zone 15N NAD83 datum.
# LONG
# Y coordinate of survey station. UTM zone 15N NAD83 datum.
# WIND
# Wind speed (km/hour) recorded at 1.4m above ground at the end of each observation.
# TEMP
# Temperature (°C) recorded at 1.4m above ground at the end of each observation.
# GROUSE
# Detection/non-detection of Ruffed Grouse (1 = detected, 0 = not detected).
#7. Create a data.frame called d by reading data from file = "grouse_data.csv". Make the variables "ROUTE" and "STATION" factors, and "GROUSE" integers
#~~~~~~~~~~~~~~~~
d<- read.csv('PS1/grouse_data.csv')
d$ROUTE<- as.factor(d$ROUTE)
d$STATION<- as.factor(d$STATION)
d$GROUSE<- as.integer(d$GROUSE)
summary(d)
#~~~~~~~~~~~~~~~~
#################################################################
# Chapt 10. Subsetting data
#################################################################
#8. Select the subset of d with (a) ROUTE = 1, (b) LAT greater than mean(LAT), (c) LAT greater than median(LAT)
#~~~~~~~~~~~~~~~~
# (a)
#Base R
d[d$ROUTE == 1,]
#dplyr
d%>%
filter(ROUTE == 1)
# (b)
#Base R
d[d$LAT > mean(d$LAT),]
#dplyr
d%>%
filter(LAT > mean(LAT))
# (c)
#Base R
d[d$LAT > median(d$LAT),]
#dplyr
d%>%
filter(LAT > median(LAT))
#~~~~~~~~~~~~~~~~
#9. Create a new variable "NORTHERN" that is TRUE for the more northerly half of the stations and FALSE otherwise.
#~~~~~~~~~~~~~~~~
d1<- d %>%
mutate(NORTHERN = ifelse(LAT > mean(LAT), "TRUE", "FALSE"))
#~~~~~~~~~~~~~~~~
#10. How many different routes are there in d?
#~~~~~~~~~~~~~~~~
#50 different routes
#~~~~~~~~~~~~~~~~
#11. Construct a matrix d.matrix that has the same information as d. Compute the number of routes using d.matrix and the unique() function.
#~~~~~~~~~~~~~~~~
d.matrix <- data.matrix(d1)
length(unique(d.matrix[,1]))
#~~~~~~~~~~~~~~~~
#################################################################
# Chapt 11. Vectorized Operations
#################################################################
#12. Create a new variable "QUADRANT" that divides the stations into four groups, with values 0 for the northwest group, 1 for the northeast group, 2 for the southwest group, and 3 for the southeast group.
#~~~~~~~~~~~~~~~~
d2<- d1%>%
mutate(QUADRANT = ifelse(STATION == 0, "northwest",
ifelse(STATION == 1, "northeast",
ifelse(STATION == 2, "southwest", "southeast"))))
#~~~~~~~~~~~~~~~~
#################################################################
# Chapt 12. Date and times
#################################################################
# You should read this chapter to be aware these features exiset.
#################################################################
# Chapt 13. Managing dataframes
#################################################################
library(dplyr)
#13. Select columns of d that start with "L" (i.e., LAT and LONG). Do this both using and not using dplyr.
#~~~~~~~~~~~~~~~~
d.L <- select(d2, starts_with("L"))
d.L.base<- d2[, c("LAT", "LONG")]
data.frame(LAT = d$LAT, LONG=d$LONG)
#~~~~~~~~~~~~~~~~
#14. Select rows of d that contain the highest 50% of the values of LAT. Do this both using and not using dplyr. Do you see any differences in the data.frame that is produced?
#~~~~~~~~~~~~~~~~
d.LAT50 <- d %>%
filter(LAT > mean(LAT))
d.LAT50.base<- d[d$LAT>median(d$LAT),]
#~~~~~~~~~~~~~~~~
#15. Select rows of d that contain ROUTE > 45. This is much trickier than you might think!
#~~~~~~~~~~~~~~~~
d.RT45 <- d %>%
filter(as.numeric(as.character(ROUTE)) > 45)
#~~~~~~~~~~~~~~~~
#16. Reorder rows by LAT to create a new data.frame d.LAT. Then reorder d.LAT by ROUTE
#~~~~~~~~~~~~~~~~
d.LAT <- d%>%
arrange(LAT)%>%
arrange(ROUTE)
d.LAT.base<- d[order(d$ROUTE),]
#~~~~~~~~~~~~~~~~
#17. Rename the column in d from ROUTE.num to num.ROUTE. Do this both using and not using dplyr.
#~~~~~~~~~~~~~~~~
#dplyr
d.rename<- d%>%
rename(ROUTE.num = ROUTE)%>%
rename(num.ROUTE = ROUTE.num)
#base R
names(d)[1]<- "ROUTE.num"
names(d)[1]<- "num.ROUTE"
names(d)[1]<- "ROUTE"
#~~~~~~~~~~~~~~~~
#18. Add a variable to d called exp.GROUSE than contains exp(GROUSE). Do this both using and not using dplyr.
#~~~~~~~~~~~~~~~~
#dplyr
d<- d%>%
mutate(exp.GROUSE = exp(GROUSE))
# base R
d$exp.GROUSE <- exp(d$GROUSE)
#~~~~~~~~~~~~~~~~
#19. Create a data.frame called d.ROUTE that contains the mean value of GROUSE for each ROUTE. Do this both using and not using dplyr. Bonus: create a new data.frame d.NA which is the same as d but with d$ROUTE[1] <- NA. Then perform the same operation as you just did for d.
#~~~~~~~~~~~~~~~~
d.ROUTE<- d%>%
group_by(ROUTE)%>%
summarize(GROUSE = mean(GROUSE))
d.ROUTE.base<- aggregate(GROUSE~ROUTE, data = d, FUN = mean)
#BONUS
d.NA<- d
d.NA[1,] <- NA
d.NA$num.ROUTE <- as.numeric(d.NA$ROUTE)
d.NA<- d.NA %>%
group_by(ROUTE, num.ROUTE)%>%
summarize(GROUSE = mean(GROUSE))
#~~~~~~~~~~~~~~~~
#20. Perform the same operation as in #19 using piping in dplyr.
#~~~~~~~~~~~~~~~~
d.ROUTE<- d%>%
group_by(ROUTE)%>%
summarize(GROUSE = mean(GROUSE))
#~~~~~~~~~~~~~~~~
#################################################################
# Chapt 14. Control Structures
#################################################################
# The tasks here should really be done as in the previous questions. Here, I'm using the same tasks to illustrate the basics of control structures. These basic structures I'm illustrating are very common, and you will see them repeatedly throughout the course.
#21. Create a data.frame called d.ROUTE that contains the mean value of GROUSE for each ROUTE using a for loop (i.e., group_by() and aggregate() aren't allowed). This involves first creating a data.frame and then looping through the values of ROUTE to fill in values. I've given you the first line creating the data.frame
#~~~~~~~~~~~~~~~~
d$num.ROUTE<- as.numeric(d$ROUTE)
d.ROUTE.loop <- data.frame(ROUTE = unique(d$num.ROUTE))
for(i in 1:length(unique(d$ROUTE))){
d.ROUTE.loop$GROUSE[i] <- mean(d$GROUSE[d$ROUTE == i])
}
print(d.ROUTE.loop)
# d.ROUTE<- data.grame(ROUTE = unique(d$num.ROUTE))
# for(i.ROUTE in unique(d$num.ROUTE)){
# d.ROUTE$GROUSE[d.ROUTE$ROUTE == i.ROUTE] <- mean(d$GROUSE[d$num.ROUTE == i.ROUTE])
#}
#Checking if correct
d.average<- d%>%
group_by(as.factor(ROUTE))%>%
summarise(mean = mean(GROUSE))
#~~~~~~~~~~~~~~~~
#22. Create a data.frame called d.ROUTE that contains the mean value of GROUSE for each ROUTE using a while loop (i.e., group_by() and aggregate() aren't allowed).
#~~~~~~~~~~~~~~~~
d.ROUTE.while <- data.frame(ROUTE = unique(d$num.ROUTE))
i <- 0
while(i <nrow(d.ROUTE.while)){
i <- i + 1
d.ROUTE.while$GROUSE[i] <- mean(d$GROUSE[d$num.ROUTE == d.ROUTE.while$ROUTE[i]])
}
#View the results
cbind(aggregate(GROUSE ~ROUTE, data = d, FUN = mean), d.ROUTE.while)
abs(sum(aggregate(GROUSE ~ num.ROUTE, data = d, FUN = mean)$GROUSE != d.ROUTE.while$GROUSE))
#~~~~~~~~~~~~~~~~
#23. Create a data.frame called d.ROUTE.NA from d.NA that contains the mean value of GROUSE for each ROUTE using a for loop, in which the ROUTE with an NA is given the value NaN. You will need the is.na() function for this.
#~~~~~~~~~~~~~~~~
d.ROUTE.NA<- data.frame(ROUTE = unique(d.NA$ROUTE))
for(i in 1:length(unique(d.NA$num.ROUTE))){
x <- d.NA$GROUSE[d.NA$num.ROUTE[i]]
if((is.na(x)) == TRUE){
d.ROUTE.NA$GROUSE[d.ROUTE.NA$ROUTE == i] <- NaN
}else{
d.ROUTE.NA$GROUSE[d.ROUTE.NA$ROUTE == i] <- x
}
}
#~~~~~~~~~~~~~~~~
#################################################################
# Chapt 15. Functions
#################################################################
#24. Write a function locate_station() that returns the LAT and LONG for a ROUTE and STATION in d. Note you are best to use num.ROUTE, not ROUTE.
locate_station <- function(route, station, data){ #define what you put into it
return(data[data$num.ROUTE == route & data$STATION == station, c("LAT", "LONG")])
}
locate_station(route = 30, station = 3, data = d)
# It is also possible to write the following, but there are risks.
locate_station_bad<- function(route, station){
return(d[d$num.ROUTE == route & d$STATION == station, c("LAT", "LONG")])
}
locate_station_bad(route = 30, station = 3)
#SCOPING RULES: excludes the data file. Not good for 300 lines of code
#25. Write a function distance_station() that returns the Euclidean distance between a specified ROUTE and STATION in d and all other routes and stations. Don't bother about converting to meters -- just calculate distance in terms of latitude and longitude values (ignoring that in Wisconsin, a degree latitude is not equal to a degree longitude).
distance_station<- function(route, station, data){
specific.location <- locate_station(route = route, station = station, data = data)
data$euclidean<-sqrt((specific.location$LAT - data$LAT)^2 + (specific.location$LONG - data$LONG)^2)
return(data$euclidean)
}
distance_station(route = 30, station = 3, data = d)
# 26. Write a function plot_station() that plots the location of stations, with the size of the point for each station decreasing with increasing distance from a focal station specified as input to the function.
library(ggplot2)
plot_station <- function(route, station, data){
point.size<- (distance_station(route = route, station = station, data = data))
rank<- rank(-point.size)
return(ggplot(data = d) + geom_point(aes(x = LONG, y = LAT, size = rank), shape = 1))
}
plot_station(route = 40, station = 3, data = d)
#################################################################
# Chapt 16. Scoping rules in R
#################################################################
# You should read this chapter to be warned. However, I'm going to try to make sure all the code you use in class is properly scoped. The bottom line is that it is often best to define variables in the definition of a function.
# 27. Do you have any questions about scoping?
#################################################################
# Chapt 17. Standards
#################################################################
# These are good principles (but don't expect me to always follow them).
#################################################################
# Chapt 18. Loop functions
#################################################################
# 28. Use tapply() to create the same date.frame as aggregate(GROUSE ~ num.ROUTE, data=d, FUN=mean). You can also try this using ROUTE rather than num.ROUTE, and you will see some problems.
aggregate<- aggregate(GROUSE ~ num.ROUTE, data=d, FUN=mean)
t.apply<- tapply(d$GROUSE,d$num.ROUTE, mean)
print(t.apply)
# 29. First, add columns to data.frame d which give the distance of each station from two locations (e.g., ROUTE = 1, STATION = 1; and ROUTE = 40, STATION = 1). Use apply() to create an additional column that is the sum of these distances.
#not sure I quite understand the question
# 30. Use apply() and split() to create the same date.frame as aggregate(GROUSE ~ num.ROUTE, data=d, FUN=mean). You can also try this using ROUTE rather than num.ROUTE, and you will see some problems.
aggregate(GROUSE ~ num.ROUTE, data=d, FUN=mean)
grouse<- as.matrix(split(d, d$num.ROUTE))
str(grouse)
apply.dataframe<- apply(grouse$GROUSE, 1, FUN = mean)
#################################################################
# Chapts 19/20/21. Regular expressions, Debugging and Profiling
#################################################################
# You should read these chapters to be aware these features exiset.
#################################################################
# Chapt 22. Simulation
#################################################################
# 31. Plot a histogram of 1000 values simulated from a Gaussian distribution. Then plot the probability density function to compare them. You will need the hist() function (with the freq=F option).
x <- rnorm(1000)
hist(x)
hist(x, freq = FALSE)
# 32. Plot a histogram of 1000 values simulated from a Poisson distribution. Then plot the probability density (or mass) function to compare them. You will need the hist() function (with the freq=F option).
y = rpois(1000, 2)
hist(y, freq = FALSE)
#don't worry about process more about inference of the process
|
3be615dfbf1b68fdededc0a2e64acc346e0cef52
|
55b688ae97badb55a279006a5a417d80226ea329
|
/man/newNode.Rd
|
865d9d56db9af03fda564b8528b2adb40d85a95e
|
[] |
no_license
|
cran/mitre
|
8799236b6fd45dc701734dd4bc8c51baa9e83d45
|
b6eaec6277544232bb19b6d7b03db902533def64
|
refs/heads/master
| 2023-05-01T15:12:00.775487
| 2021-05-21T06:20:03
| 2021-05-21T06:20:03
| 317,820,437
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,588
|
rd
|
newNode.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mitre.R
\name{newNode}
\alias{newNode}
\title{Create an empty node}
\usage{
newNode()
}
\value{
data.frame
}
\description{
\code{id} : The id of the node unique value for all standard elements.
\code{label} : The label is the piece of text shown in or under the node, depending on the shape.
\code{group} : When not undefined, the group of node(s)
\code{type} : Used as subgroup to classify different object from
\code{value} : When a value is set, the nodes will be scaled using the options in the scaling object defined above.
\code{title} : Title to be displayed when the user hovers over the node. The title can be an HTML element or a string containing plain text or HTML.
\code{standard} : The id of the standard
\code{shape} : The shape defines what the node looks like. The types with the label inside of it are: ellipse, circle, database, box, text. The ones with the label outside of it are: image, circularImage, diamond, dot, star, triangle, triangleDown, square and icon.
\code{color} : Color for the node.
\code{hidden} : When true, the node will not be shown. It will still be part of the physics simulation though!
\code{mass} : Default to 1. The "barnesHut" physics model (which is enabled by default) is based on an inverted gravity model. By increasing the mass of a node, you increase it's repulsion. Values lower than 1 are not recommended.
\code{description} : Description could include extra information or nested data which include other columns from original data frame observation.
}
|
a1ecec7291603c523fe93b8611fe781f19b917d3
|
1f58edef5e26cc1d6aad896b38a5d27273be2728
|
/genomics/wga-stat.R
|
c3c7980ea8ca391f5101de85eaa4084903fe77b8
|
[] |
no_license
|
lambertonlabs/schisto-scripts
|
28022acdbbe59bf009c6ba3071b1e6e3fec421b4
|
215f0899900995e2ca206fc221c12c6fe06dfd09
|
refs/heads/master
| 2020-04-27T18:07:15.893353
| 2019-03-28T15:47:22
| 2019-03-28T15:47:22
| 174,554,999
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 696
|
r
|
wga-stat.R
|
#r script for performing basic sumamry stats on schisto WGS
#if possible, use VCFtools and then ggplot to generate your nice manhattan plots
#adegenet doesn't tolerate large SNP sets very well, and popgenome a) won't analyse multiple chromosomes and b) is very finicky about inputs
#install external pkgs
install.packages("PopGenome")
install.packages("adegenet")
install.packages("vcfR")
install.packages("poppr")
#load basically all popular popgen modules
library(PopGenome)
library(adegenet)
library(vcfR)
library(poppr)
#load data and convert to object
#setwd to dir containing vcfs
setwd("~/Projects/Schisto/GENOMIC/VCF/popgenome/")
snp <- read.vcfR("CF.vcf")
x <- vcfR2genind(snp)
|
584751c2c6077a91ff560b3e5cf127694e2d570c
|
bf706f52c5d1fc9f560f4cdb9327ce3eb6e10992
|
/aeqd.R
|
b135d03540599a5f131e093cb21c1d84bb28273a
|
[] |
no_license
|
RationShop/tornado_r
|
d52ac4d2208a8d2542e415862c625cd3c2a475d6
|
7e68cab9a3d5c8d35ea515ab6a420856a20a637f
|
refs/heads/master
| 2016-09-06T18:22:26.546648
| 2014-01-23T05:01:59
| 2014-01-23T05:01:59
| 14,630,731
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,691
|
r
|
aeqd.R
|
# project lat-lon values to azimuthal equidistant projection
# to be consistent with Brooks et al 2003 who use a 80-km grid
# and then plot this grid along with state boundaries
# some useful links
# http://r-sig-geo.2731867.n2.nabble.com/Re-projecting-rasters-projectRaster-td5777721.html
# http://stackoverflow.com/questions/17214469/r-crop-raster-data-and-set-axis-limits
# http://stackoverflow.com/questions/10763421/r-creating-a-map-of-selected-canadian-provinces-and-u-s-states
# https://stat.ethz.ch/pipermail/r-sig-geo/2007-December/002939.html
# http://stackoverflow.com/questions/15634882/why-the-values-of-my-raster-map-change-when-i-project-it-to-a-new-crs-projectra
# http://www.nceas.ucsb.edu/scicomp/usecases/createrasterimagemosaic
# http://stackoverflow.com/questions/11891908/how-to-overlay-global-map-on-filled-contour-in-r-language
library(raster)
library(maptools)
library(rgdal)
library(maps)
# lat-lon bounds of the lower 48 states
lat_seq <- c(20, 50)
lon_seq <- c(-125, -65)
ll_df <- expand.grid(lat = lat_seq, lon = lon_seq, KEEP.OUT.ATTRS = TRUE)
# lat-lon and azimuthal equidistant projection info
ll_proj <- "+proj=longlat +datum=WGS84"
ae_proj <- "+proj=aeqd +lat_0=35 +lon_0=-95 +units=m"
# Function to project from geographic to aeqd. Input is a data frame and the name of the columns associated with lon and lat, and the input and output projection info for CRS.
Fn_Get_Projected_Locs <- function(in_df, lon_col, lat_col, in_proj, out_proj) {
# create spatial data frame using sp library
out_locs <- SpatialPointsDataFrame(coords = in_df[, c(lon_col, lat_col)],
data = in_df,
proj = CRS(in_proj))
# project lat-lons to aeqd, using rgdal's sptransform
out_locs <- spTransform(out_locs, CRS(out_proj))
return (out_locs)
}
# Use above to identify the bounds of the 80-km grid and the coordinates.
ae_locs <- Fn_Get_Projected_Locs(ll_df, "lon", "lat", ll_proj, ae_proj)
# set the 80-km grid resolution and dimensions in aeqd
aegrid_res <- 80000 # raster resolution in meters
aegrid_bounds <- apply(ae_locs@coords, 2, range)
aegrid_xcoords <- seq(aegrid_bounds[1, "lon"], aegrid_bounds[2, "lon"], aegrid_res)
aegrid_ycoords <- seq(aegrid_bounds[1, "lat"], aegrid_bounds[2, "lat"], aegrid_res)
aeX <- length(aegrid_xcoords)
aeY <- length(aegrid_ycoords)
# Function to compute the euclidean distance between 2 points
Fn_Compute_Distance <- function(y1, x1, y2, x2) {
return (sqrt((y1 - y2)^2 + (x1 - x2)^2))
}
# matrices used in distance calcs
xindx_mat <- matrix(rep(c(1:aeX), aeY), nrow = aeY, byrow = TRUE)
yindx_mat <- matrix(rep(c(1:aeY), aeX), nrow = aeY, byrow = FALSE)
aegrid_res_km <- aegrid_res / 1000 # grid resolution in km
# calculate distance matrix
dist_mat <- aegrid_res_km * Fn_Compute_Distance(yindx_mat, xindx_mat, 1, 1)
# flip the matrix from S-N to N-S to counteract "raster" package behavior
dist_mat <- dist_mat[c(nrow(dist_mat):1), ]
usa_rast <- raster(dist_mat,
xmn = min(aegrid_xcoords),
xmx = max(aegrid_xcoords),
ymn = min(aegrid_ycoords),
ymx = max(aegrid_ycoords),
crs = ae_proj)
# map of the lower 48 in aeqd
usa_map <- map("state", xlim = range(lon_seq), ylim = range(lat_seq), plot = FALSE)
usa_map <- map2SpatialLines(usa_map)
proj4string(usa_map) <- CRS(ll_proj)
usa_map <- spTransform(usa_map, CRS(ae_proj))
# output
png("aeqd_raster.png", width = ncol(usa_rast)*10, height = nrow(usa_rast)*10)
plot(usa_rast, axes = FALSE)
plot(usa_map, add = TRUE)
contour(usa_rast, add = TRUE, axes = FALSE)
garbage <- dev.off()
|
65876eac9b11b63dde852c4e30948e665162d292
|
1f83c31d62d266921955c1d2fadb4535ced8c388
|
/cachematrix.R
|
2d62a737d7cec416b4a40cceaa3c24d709f412de
|
[] |
no_license
|
woafn/ProgrammingAssignment2
|
432b35861680b15a6e3dd343f4a3db13f7a7c4eb
|
6ab26f250b48c9eb78a5bbc89819de23cee90b33
|
refs/heads/master
| 2020-12-25T23:19:21.002012
| 2014-10-27T21:34:45
| 2014-10-27T21:34:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,102
|
r
|
cachematrix.R
|
## -----------------------------------------
## R Programming
## https://class.coursera.org/rprog-008
## Programming assignment 2
##
## To save resources, use a cached copy of a matrix's
## inverse instead of calculating the inverse each time
##
## makeCacheMatrix(maxtrix)
## -- creates an object with functions to store & retrieve
## -- the cached matrix inverse
##
## cacheSolve (makeCacheMatrix-List, ...)
## -- Uses object created in makeCacheMatrix to calculate or retrieve
## -- the matrix inverse
##
##
## Code is heavily based on the sample provided at:
## https://class.coursera.org/rprog-008/human_grading/view/courses/972581/assessments/3/submissions
##
## makeCacheMatrix (x=matrix)
## Create object that stores a matrix and its inverse, so that the
## inverse can be retrieved from cache instead of being recalculated
## See paired function cacheSolve()
## -----------------------------------------
## Returns an object based on the passed matrix
## with functions to:
## set: Sets the base matrix for the object
## getmatrix: Retrieves the base matrix for the object
## setinv: Caches the inverse of the matrix
## getinv: Returns the inverse of the matrix from the cache
## mname: Name of the matrix in the original environment
makeCacheMatrix <- function(x = matrix()) {
#message("In makeCacheMatrix")
minv <- NULL
mname <- deparse(substitute(x))
## set parameters for the object
set <- function(newval) {
x <<- newval
minv <<- NULL
}
## return the underlying matrix
getmatrix <- function ( ) x
##set the cached inverse of the underlying matrix
setinv <- function(inv) minv <<- inv
## return the inverse
getinv <- function() minv
#originalMatrix <- x
list(set=set, getmatrix=getmatrix, setinv=setinv, getinv=getinv, mname=mname)
}
## cacheSolve (x, ...)
## Use list object from makeCacheMatrix()
## Checks to see if a cache of the target matrix exists
## in object x; if so, uses the cached value of the inverse
## See paired function makeCacheMatrix()
##
## Parameters:
## x List of type created by makeCacheMatrix()
## -----------------------------------------
cacheSolve <- function(x, ...) {
# check if the matrix has changed
# variable passed to this call
cachedObject <- deparse(substitute(x))
# if it's changed, then re-run the make object using the
# current value of the matrix
if(!(identical(x$getmatrix(), eval(parse(text=x$mname))))) {
message(paste(c("WARNING: Matrix", x$mname, "has changed since initiation of object", cachedObject), collapse=" "))
message(paste(c("Updating object", cachedObject), collapse=" "))
x$set(eval(parse(text=x$mname)))
}
# check if inverse is already stored in the object
# if so, return cached value
localvalue <- x$getinv()
if (!is.null(localvalue)) {
message(paste(c("Using cached value of inverse of ",x$mname), sep=""))
return(localvalue)
}
#if it's not recorded, set inverse and return calculated value
locinv <- solve(x$getmatrix())
x$setinv(locinv)
locinv
}
|
2fa104a197cf30be3e5eda878bef60f871fa10ef
|
bcca39b5027094d9151b2b2baef6fa7fb4f9fad4
|
/matrix_convert.r
|
fd5ad9c3756c78284d4d35daade89cfa0a6b2529
|
[] |
no_license
|
TZstatsADS/Fall2016-proj4-KayLim
|
94684cb7004b6068df80b299149e49f76143c619
|
9a0d20069744f1927f55f2b33ce9e1d22fb361ea
|
refs/heads/master
| 2020-12-24T08:41:46.471683
| 2016-12-01T13:31:46
| 2016-12-01T13:31:46
| 73,326,404
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 295
|
r
|
matrix_convert.r
|
#convert strings to matrix
matrix_convert <- function(text_string, language) {
text <- as.vector(text_string)
matx <- create_matrix(text_string, language = language)
rowTotals <- apply(matx , 1, sum)
matx.new <- matx[rowTotals> 0, ]
return(matx.new)
}
|
3a365bfe026331ab6dd809cbce8b99d65d8d7b39
|
2fc262b8029463664f2505217f74c116c6d7c1df
|
/model.R
|
b4eee18f71ce4a1590a5ff84aef34b968a4968c1
|
[] |
no_license
|
paranjapeved/FITS
|
2f07ac0abb7f2abe8f42f8b3e7182984621eac8f
|
3dd941c84b8e03c9569c1890fb49839f728581cd
|
refs/heads/master
| 2020-05-06T20:16:20.062641
| 2019-04-08T21:22:11
| 2019-04-08T21:22:11
| 180,230,346
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 104
|
r
|
model.R
|
train <- read.csv('train_data.csv')
model <- glm(td_min_b ~.,family=binomial(link='logit'),data=train)
|
82c6fb1bbbf0610a22087237732f7575f93b55f7
|
a6fd85360b899ee98d79a4080c05bcefc01f009b
|
/SourceCode/DataCreation/CleanIndividualData/OldDataClean/MakeAmcData.R
|
2b134826c2939c875658b0a68835741b84c596d1
|
[] |
no_license
|
FGCH/amcData
|
951f4aeb9ab217e40509a9657efc1f5a934443a4
|
2209173c3011aa17aac95a36006f1a1d22304048
|
refs/heads/master
| 2020-05-15T01:17:35.288389
| 2014-05-12T09:41:56
| 2014-05-12T09:41:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,418
|
r
|
MakeAmcData.R
|
#########
# Make amcData Data Set
# Christopher Gandrud
# Updated 7 November 2012
#########
# Install required packages
## Code based on https://gist.github.com/3710171
## See also http://bit.ly/PbabKd
doInstall <- FALSE # Change to FALSE if you don't want packages installed.
toInstall <- c("WDI", "countrycode", "devtools", "reshape", "gdata", "xtable")
if(doInstall){install.packages(toInstall, repos = "http://cran.us.r-project.org")}
# Run clean up files
setwd("/git_repositories/amcData/SourceCode/DataCreation/CleanIndividualData/")
source("AddAMCFull.R")
setwd("/git_repositories/amcData/SourceCode/DataCreation/CleanIndividualData/")
source("AddLVFullCrisisYears.R")
setwd("/git_repositories/amcData/SourceCode/DataCreation/CleanIndividualData/")
source("AddLV.R")
setwd("/git_repositories/amcData/SourceCode/DataCreation/CleanIndividualData/")
source("AddDPIVariables.R")
setwd("/git_repositories/amcData/SourceCode/DataCreation/CleanIndividualData/")
source("AddUDSVariable.R")
setwd("/git_repositories/amcData/SourceCode/DataCreation/CleanIndividualData/")
source("AddWorldBank.R")
setwd("/git_repositories/amcData/SourceCode/DataCreation/CleanIndividualData/")
source("AddDreherIMF.R")
# Create merged data set
## Country-Year data
setwd("/git_repositories/amcData/SourceCode/DataCreation/Merge/")
source("MergeSurvival.R")
# Tidy workspace
rm(list = setdiff(ls(), "amcCountryYear"))
|
9ce3cc7b66a47a6af5488f9aeb4b1bbb495eb5dc
|
4b0ef5b828dff774361172d387849befceef90ef
|
/clase7b.R
|
912507047bf1c9b77755302f6fe48b3718a0dbf3
|
[] |
no_license
|
ricardomayerb/ico8306
|
dcf10532d301a888505a307329da06fd2992702d
|
677bad4b0cd61dd08238ab5dddace0a8602f2568
|
refs/heads/master
| 2020-07-05T10:10:38.358611
| 2020-04-06T03:45:34
| 2020-04-06T03:45:34
| 202,618,793
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,721
|
r
|
clase7b.R
|
library(tidyverse)
# Problem
# If there are twelve cars crossing a bridge per minute on average,
# find the probability of having seventeen or more cars crossing
# the bridge in a particular minute.
#
# Solution
# The probability of having sixteen or less cars crossing the bridge
# in a particular minute is given by the function ppois.
ppois(16, lambda=12) # lower tail
# [1] 0.89871
# Hence the probability of having seventeen or more cars crossing the bridge in a minute is in the upper tail of the probability density function.
ppois(16, lambda=12, lower=FALSE) # upper tail
# 0.10129
# Answer
# If there are twelve cars crossing a bridge per minute on average,
# the probability of having seventeen or more cars crossing the bridge
# in a particular minute is 10.1%.
# What is the probability of selecting x = 14 red marbles
# from a sample of k = 20 taken from an urn containing
# m = 70 red marbles and n = 30 green marbles?
# probability
x = 14
k = 20
m = 70
n = 30
dhyper(x = x, m = m, n = n, k = k)
k * m / (m + n)
k * m / (m + n) * (m + n - k) / (m + n) * n / (m + n - 1)
# options(scipen = 999, digits = 2) # sig digits
density = dhyper(x = 1:20, m = m, n = n, k = k)
data.frame(red = 1:20, density) %>%
mutate(red14 = ifelse(red == 14, "x = 14", "other")) %>%
ggplot(aes(x = factor(red), y = density, fill = red14)) +
geom_col() +
geom_text(
aes(label = round(density,2), y = density + 0.01),
position = position_dodge(0.9),
size = 3,
vjust = 0
) +
labs(title = "PMF of X = x Red Balls",
subtitle = "Hypergeometric(k = 20, M = 70, N = 30)",
x = "Number of red balls (x)",
y = "Density")
x = 14
m = 7000
n = 3000
k = 20
d_binom <- dbinom(x = 1:20, size = k, prob = m / (m + n))
df_binom <- data.frame(x = 1:20, Binomial = d_binom)
p <- ggplot(df_binom, aes(x = x, y = Binomial)) +
geom_col()
d_hyper_100 <- dhyper(x = 1:20, m = 70, n = 30, k = k)
d_hyper_250 <- dhyper(x = 1:20, m = 175, n = 75, k = k)
d_hyper_500 <- dhyper(x = 1:20, m = 350, n = 150, k = k)
d_hyper_1000 <- dhyper(x = 1:20, m = 700, n = 300, k = k)
df_hyper = data.frame(x = 1:20,
Hyper_100 = d_hyper_100,
Hyper_250 = d_hyper_250,
Hyper_500 = d_hyper_500,
Hyper_1000 = d_hyper_1000)
df_hyper_tidy <- gather(df_hyper, key = "dist", value = "density", -c(x))
p +
geom_line(data = df_hyper_tidy, aes(x = x, y = density, color = dist)) +
labs(title = "Hypergeometric Distribution Appoximation to Binomial",
subtitle = "Hypergeometric approaches Binomial as population size increases.",
x = "Number of successful observations (x)",
y = "Density")
|
574e08135a2f7d0eba97274c725dac3b64f6e817
|
12fdf5bc55b6f41436fb7ab598f7707857fbf292
|
/seminars/seminar05/Seminar05 R Script.R
|
0dc624613ed1778a5561e75d7ea98513ca18635e
|
[] |
no_license
|
vivianlac/seminars
|
6f8e93dbc2b8402814aa211d05f40979f632c381
|
e29b28de19396f82ad614781badae416f4119131
|
refs/heads/master
| 2021-01-12T00:05:06.774192
| 2017-02-06T08:40:40
| 2017-02-06T08:40:40
| 78,670,441
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,199
|
r
|
Seminar05 R Script.R
|
# load packages
library(lattice)
library(ggplot2)
library(reshape2)
# load data to be analyzed
prDat <- read.table("./data/GSE4051_data.tsv.txt")
str(prDat, max.level = 0)
prDes <- readRDS("./data/GSE4051_design.rds")
str(prDes)
# write function that takes probeset IDs as input and gives output as data.frame
library(plyr)
(luckyGenes <- c("1419655_at","1438815_at"))
prepareData <- subset(prDat, rownames(prDat) %in% luckyGenes)
prepareData <- data.frame(gExp = as.vector(t(as.matrix(prepareData))),
gene = factor(rep(rownames(prepareData), each = ncol(prepareData)),
levels = luckyGenes))
jDat <- suppressWarnings(data.frame(prDes, prepareData))
str(jDat)
head(jDat)
tail(jDat)
# generate figure to check if function is working:
stripplot(gExp ~ devStage | gene, jDat,
group = gType, jitter.data = TRUE,
auto.key = TRUE, type = c('p', 'a'), grid = TRUE)
stripplot(gExp ~ devStage | gene, jDat, pch = 17, cex = 3,
group = gType, jitter.data = TRUE,
auto.key = TRUE, type = c('p', 'a'), grid = TRUE)
# make stripplot using ggplot2:
suppressWarnings(data.frame(prDes, prepareData))
|
f08f0a81223cdd82e5ad65edbeff8e2e75e513cf
|
97aafdeb042b59c18d311c25cc71d5103f23e12c
|
/R/SingleDataSearch.R
|
bd474685b6a9481cc00d18807470a05915f221c5
|
[] |
no_license
|
rlaqudahr88/AI_Nick
|
83cc18cf588b7bb4348c61efa0dac1e93c787d90
|
8f431d6a4bbaf2db863087e42970083d1d23190d
|
refs/heads/master
| 2022-12-11T07:20:57.155186
| 2020-09-08T02:46:15
| 2020-09-08T02:46:15
| 258,415,203
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 4,251
|
r
|
SingleDataSearch.R
|
##단일변수 자료의 탐색
# 1) 자료의 특성 : 범주형(질적), 연속형(양적)
# 범주형은 주로 성별, 혈액형, 색깔, 찬반여부 -> 계산할의미가 없음
#연속형은 : 몸무게, 키, 온도, 자녀의 수 등 -> 계산이 가능
# 2) 변수의 개수에 따른 분류
# 단일변수 자료(일변량 자료): 하나의 변수로만 구성된 자료
# 다중변수 자료(다변량 자료): 두개이상의 변수로 구성된 자료
# 1. 도수분포표 작성(frequency distribution table)
favorite <- c('WINTER', 'SUMMER','SPRING','SUMMER','SUMMER','FALL','FALL',
'SUMMER','SPRING','SPRING')
favorite
#도수분포표 계산
table(favorite)
#비율 출력 (ratio or percentage in decimal)
table(favorite)/(length(favorite))
#2. 맥대 그래프 작성
ds <- table(favorite)
ds
barplot(ds, main='faorite season')
#2-1. 도수분포표데이터의 순서 정렬
ds[c(2,3,1,4)]
ds[c('SPRING','SUMMER','FALL','WINTER')]
table(favorite)[c('SPRING','SUMMER','FALL','WINTER')]
ds.new <- table(favorite)[c('SPRING','SUMMER','FALL','WINTER')]
barplot(ds.new, main='favorite season')
# 3. 원그래프 작성
ds
pie(ds, main='favorite season')
# 4. 숫자로 표현된 범주형 자료(1=초록,2=빨강,3=파랑)
favorite.color <- c(2,3,2,1,1,
2,2,1,3,2,
1,3,2,1,2)
ds <- table(favorite.color)
ds
# 1) 단순한 막대그래프
barplot(ds, main='favorite color')
# 2) 색을 지정한 막대그래프
colors <- c('green','red','blue')
names(ds) <- colors
ds
barplot(ds, main='favorite color', col=colors)
# 3) 색을 지정한 원그래프
pie(ds, main='favorite color', col=colors)
# adding percentage
pct <- round(ds/sum(ds)*100)
lbls <- colors
lbls <- paste(lbls, pct) # add percents to labels
lbls <- paste(lbls,"%",sep="") # ad % to labels
pie(ds,labels=lbls, main='favorite color', col=colors)
ds
## 연속형 자료 탐색
weight <- c(60,62,64,65,68,69)
weight
weight.heavy <- c(weight, 120)
weight.heavy
#평균
mean(weight)
mean(weight.heavy)
#중앙값
median(weight)
median(weight.heavy)
#절사평균(상위하위 20% 제외)
mean(weight, trim=0.2)
mean(weight.heavy, trim=0.2)
# 사분위수
mydata <- c(60,62,64,65,68,69,120)
quantile(mydata)
# 10% 단위로 구간을 나누어 계산
quantile(mydata, (0:10)/10)
summary(mydata) # 차라리 summary를 써서 보는게 편하다
#산포
var(mydata) #분산
#표준편차 standard deviation
sd(mydata)
#값의 범워
range(mydata)
#최대값, 최소값의 차이
diff(range(mydata))
# 히스토그램
cars
dist <- cars[,2] #distance
dist
hist(dist, #data
main='Histogram for Distance', #재목
xlab = 'distance', #x label
ylab= 'Frequency', # y label
border='blue', # bar outer color
col='green', # bar inner color
las=1, # rotate x y axis label(0~3)
breaks=5) # number of x axis bars
# Boxplot
boxplot(dist, main='자동차 제동거리')
# 박스 안이 50% 그안에 라인이 중간값
# 박스 아래 줄이 25%. 박스 위줄이 75%
boxplot.stats(dist)
# $stats: 정상법위 자료의 4분위수에 해당하는 값 표시
# $n : 자료에 있는 관측 값의 개수, 총 50개의
# 관측값을 저장하고 있다.
# $conf: 중앙값에 관련된 신뢰구간을 의미
# $out: 특이값의 목록 outlier
# 상자그림 그룹
iris
boxplot(Petal.Length~Species, data = iris,
main='품종별 꽃잎의 길이')
boxplot(iris$Petal.Length~iris$Species,
main='품종별 꽃잎의 길이')
# 한 화면에 그래프 여러개 출력하기
# 3개로 화면 분할
par(mfrow=c(1,3))
mtcars
barplot(table(mtcars$carb), main='Barplot of Carburetors',
xlab='#of carburetors',
ylab='frequency',
col='blue')
barplot(table(mtcars$cyl), main='Barplot of Cylinder',
xlab='#of cylinder',
ylab='frequency',
col='red')
barplot(table(mtcars$gear), main='Barplot of Gear',
xlab='#of gear',
ylab='frequency',
col='green')
par(mfrow=c(1,1)) # 가상화면 분할 해제
|
e85e94ab439495e985b9a37a450fea87720cfcbf
|
0198bd016fc0867660639ff7b5979c088e42c6c7
|
/man/Fylkesgrense.Rd
|
96920d8462c0927cbc48c485546520ca579ab2cc
|
[
"CC-BY-4.0"
] |
permissive
|
hmalmedal/N5000
|
62f4f046ccbd28e8d3e5a05dacab8f755a6c5950
|
6b7e55001998e3ea05fcb26c564069d6862d7a63
|
refs/heads/master
| 2023-05-12T12:08:37.036517
| 2019-02-21T19:12:57
| 2023-05-01T08:11:06
| 171,929,948
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,850
|
rd
|
Fylkesgrense.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datadoc.R
\docType{data}
\name{Fylkesgrense}
\alias{Fylkesgrense}
\title{Fylkesgrense}
\format{
\if{html}{\out{<div class="sourceCode">}}\preformatted{Simple feature collection with 158 features and 3 fields
Geometry type: LINESTRING
Dimension: XY
Bounding box: xmin: -86015.82 ymin: 6466105 xmax: 627600.6 ymax: 7721400
Projected CRS: ETRS89 / UTM zone 33N
# A tibble: 158 × 4
målemetode nøyaktighet oppdateringsdato grense
* <int> <int> <date> <LINESTRING [m]>
1 64 100000 2021-12-28 (143290.4 6830767, 141653.5 6829327, 139303.…
2 64 100000 2021-12-28 (124979.4 6858917, 124085.4 6855031, 125761.…
3 64 100000 2021-12-28 (101382.4 6881630, 101772.4 6880181, 101808.…
4 64 100000 2021-12-28 (100036.1 6898667, 101629.6 6897947, 101915.…
5 64 100000 2021-12-28 (79950.67 6628972, 79146.73 6630043, 76664.6…
6 64 100000 2021-12-28 (76948.77 6608219, 78328.76 6609894, 82572.3…
7 64 100000 2021-12-28 (84901.58 6599239, 81988.83 6600346, 80121.6…
8 64 100000 2021-12-28 (89234.78 6573105, 88666.04 6574933, 87216.3…
9 64 100000 2021-12-28 (98433.63 6557152, 97449.71 6557064, 96930.8…
10 64 100000 2021-12-28 (114974.8 6551406, 114540.3 6552118, 113162.…
# ℹ 148 more rows
# ℹ Use `print(n = ...)` to see more rows
}\if{html}{\out{</div>}}
}
\source{
\code{Basisdata_0000_Norge_25833_N5000AdministrativeOmrader_GML.gml}
}
\usage{
Fylkesgrense
}
\description{
Fylkesgrense
}
\author{
© \href{https://kartverket.no/}{Kartverket}
}
\keyword{datasets}
|
c12f415bb51cf8bf1bbdd6ce81b905c4c8877169
|
5c7c92ac57086091e529d2101e340ca327916166
|
/man/computePersistence.Rd
|
23702d2baf3df4010535e5ba7ee651d2f7e04471
|
[
"MIT"
] |
permissive
|
nesscoder/TimeCycle
|
2dbb83709e23e7ce8dae4272734d63dea77c6331
|
01a321a0720a141980c4b33121be7fef02790fbf
|
refs/heads/master
| 2023-06-09T20:08:26.075191
| 2021-06-29T01:01:21
| 2021-06-29T01:01:21
| 244,984,693
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,629
|
rd
|
computePersistence.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/computePersistence.R
\name{computePersistence}
\alias{computePersistence}
\title{Computes Persistence Scores For a Data.Frame of Time-Series Across Multiple Lags}
\usage{
computePersistence(
data,
minLag = 2,
maxLag = 5,
cores = parallel::detectCores() - 2,
laplacian = T
)
}
\arguments{
\item{data}{a \code{data.frame} of \code{numeric} gene expression over time (row = genes \emph{x} col = ZT times).}
\item{minLag}{a \code{numeric} specifying the min lag to check in the 3-D embedding. Default is \code{2}.}
\item{maxLag}{a \code{numeric} specifying the max lag to check in the 3-D embedding. Default is \code{5}.}
\item{cores}{a \code{numeric} specifying the number of parallel cores to use. Default number of cores is \code{parallel::detectedCores() - 2}.}
\item{laplacian}{a \code{logical} scalar. Should the Laplacian Eigenmaps be used for dimensionality reduction? Default \code{TRUE}.}
}
\value{
a \code{vector} of the median persistence score across lags (minLag to maxLag) for each gene in data
}
\description{
Takes a \code{data.frame} of numeric gene expression over time (genes X ZT times) and computes the persistence score using \code{\link{getPersistence}}.
For a given gene, each lag (min to max) is used to transform the expression into a 3-D embedded space via time-delay embedding.
A non-linear dimension reduction technique (laplacian eigenmaps) is used to transfrom the 3-D embedding to a 2-D embedding.
Finally, the persistence score of the 2-D embedding is calculated via persistence homology.
The median persistence score across all lags (min to max) for each gene is returned as a numeric vector.
For more details see TimeCycle's vignette:
\code{vignette("TimeCycle")}.
}
\references{
{
\itemize{
\item Wadhwa RR, Williamson DFK, Dhawan A, Scott JG. (2018). "TDAstats: R pipeline for computing persistent homology in topological data analysis." \emph{Journal of Open Source Software}. 2018; 3(28): 860. doi:\href{https://doi.org/10.21105/joss.00860}{[10.21105/joss.00860]}
\item Bauer U. (2019). "Ripser: Efficient computation of Vietoris-Rips persistence barcodes." \emph{arXiv}: 1908.02518.
}
}
}
\seealso{
\itemize{
\item \code{\link[TDAstats]{calculate_homology}} for Persistence Homology calculation.
\item \code{\link{buildTakens_ndim}} for for generating time-delay embedding.
\item \code{\link{computeLaplacianEmbedding}} for 3-D to 2-D laplacian eigenmaps dimension reduction.
\item \code{\link{getPersistence}} for use on a single gene expression time-series.
}
}
|
4cdda6db082999ef74dfdcd2e3d2337bfa4a177d
|
c88dbe917c3a5503f4ada75ba47d393d091a99f1
|
/Seasonality_TTB.R
|
16540bdeaef6bcf11e791fe1c92c6fd0347e6503
|
[] |
no_license
|
HuckleyLab/ThermalStress
|
b618f220cf7a13369a1c1a42d1ee58f32d7f8085
|
14efd38a85e7db2a9719f18497e4c92e354f342d
|
refs/heads/master
| 2022-07-14T10:13:30.286060
| 2022-06-29T19:15:29
| 2022-06-29T19:15:29
| 188,100,526
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,979
|
r
|
Seasonality_TTB.R
|
#------------------------------
#HADCRUT4
#land and sea
library(raster)
require(rgdal)
library(ncdf4)
library(maps)
setwd("/Volumes/GoogleDrive/Team Drives/TrEnCh/Projects/ThermalStress/data/HadCRUT4/")
cru<-nc_open('absolute.nc')
print(cru)
#extract temperature data
temp=ncvar_get(cru,"tem")
#lons
lons= ncvar_get(cru,"lon") #get info about long
#lats
lats= ncvar_get(cru,"lat") #get info about latitude
#month
temp1= temp[,,2]
#calculate SD across months
tempsd= apply(temp, c(1,2), sd)
#to raster
seas <- raster(t(tempsd), xmn=min(lons), xmx=max(lons), ymn=min(lats), ymx=max(lats), crs=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs+ towgs84=0,0,0"))
#plot
tempr <- flip(seas, direction='y')
plot(seas)
#add country outline
map('world', fill = FALSE, col = "grey", add=TRUE)
#---------------------
#relate thermal breath to latitude
#Load GlobTherm
setwd("/Volumes/GoogleDrive/Team Drives/TrEnCh/Projects/ThermalStress/data/CTlimits/")
tol.gt= read.csv('GlobalTherm_upload_10_11_17.csv')
#species with CTmin and CTmax
tol.gt= tol.gt[ which(tol.gt$max_metric=='ctmax' & tol.gt$min_metric=='ctmin'),]
#make tmin numeric
tol.gt$tmin= as.numeric(as.character(tol.gt$tmin))
#Thermal tolerance breadth
tol.gt$TTB= tol.gt$Tmax - tol.gt$tmin
#extract SD
tol.gt$SD= extract(seas, tol.gt[,c('long_max','lat_max')] )
#partition marine and terrestrial
tol.gt$habitat= 'terrestrial'
tol.gt$habitat[which(tol.gt$Order %in% c('Perciformes','Decapoda','Cypriniformes','Cyprinodontiformes','Kurtiformes','Laminariales','Mugiliformes','Osmeriformes','Characiformes','Myliobatiformes','Salmoniformes') )]= 'marine'
##separate into terrestrial and marine, mostly terrestiral
#tol.gt.ter= tol.gt[tol.gt$habitat=='terrestrial',]
#tol.gt.marine= tol.gt[tol.gt$habitat=='marine',]
#============================
#-------------------------
#DATA from Buckley et al. MOVEMENT ANALYSIS
#Seasonality data
setwd("/Volumes/GoogleDrive/Team Drives/TrEnCh/Projects/ThermalStress/data/CRU_Movement/")
tseas<- read.csv("TempSeasonality3.csv")
#T SEASONALITY
#Make spatial points data frame
xy.sp= cbind(tseas$lon, tseas$lat)
xy.cc= coordinates(xy.sp)
bbox(xy.sp)
#Make Spatial pixels data frame
#grd1 <- SpatialPixelsDataFrame(points=xy.sp, data = dat50[,10:13], tolerance=0.1, proj4string=CRS("+proj=longlat +proj=lcc"))
grd1 <- SpatialPixelsDataFrame(points=xy.sp, data = tseas[,4:5], tolerance=0.1)
#Plot SD*100
spplot(grd1, "SD")
#extract values
sdr= raster(grd1, values=TRUE)
plot(sdr)
#extract SD
tol.gt$SDm= extract(sdr, tol.gt[,c('long_max','lat_max')] )
#--------------------------
#Plot and fit models
#plot relationship with SD
ggplot(tol.gt, aes(SD, TTB, color=Class, shape=habitat)) +geom_point()+facet_wrap(~habitat)
#plot relationship with SD from movement analysis
ggplot(tol.gt, aes(SDm, TTB, color=Class)) +geom_point()
#with absolute latitude
ggplot(tol.gt, aes(abs(lat_max), TTB, color=Class, shape=habitat)) +geom_point()
#with absolute latitude
ggplot(tol.gt, aes(abs(lat_max), TTB, color=Class, shape=habitat)) +geom_point()
#plot terrestrial or marine
ggplot(tol.gt, aes(abs(lat_max), TTB)) +geom_point()+facet_wrap(~habitat)+geom_smooth(method='lm',se=FALSE)
#fit models
mod1= lm(TTB~SD*habitat, data=tol.gt)
mod1= lm(TTB~SDm, data=tol.gt)
mod1= lm(TTB~abs(lat_max)*habitat, data=tol.gt)
mod1= lm(TTB~abs(lat_max), data=tol.gt[tol.gt$habitat=='terrestrial',])
mod1= lm(TTB~abs(lat_max), data=tol.gt[tol.gt$habitat=='marine',])
summary(mod1)
#======================
#Fit Huey data
setwd("/Volumes/GoogleDrive/Shared Drives/TrEnCh/Projects/ThermalStress/data/CTlimits/")
tol.h= read.csv('Hueyetal2009.csv', na.strings ='-9999')
#estimate TTB
tol.h$TTB= tol.h$CTmax - tol.h$CTmin
#extract SD
tol.h$SDm= extract(sdr, tol.h[,c('Long','Lat')] )
#estimate Topt as percent
tol.h$ToptPer= (tol.h$newTopt-tol.h$CTmin)/(tol.h$CTmax-tol.h$CTmin)
#--------------------------
#Plot and fit models
#plot relationship with SD from movement analysis
ggplot(tol.h, aes(SDm, TTB, color=Family)) +geom_point()
#with absolute latitude
ggplot(tol.h, aes(AbsLat, TTB, color=Family)) +geom_point()
#plot position of Topt
ggplot(tol.h, aes(AbsLat, newTopt, color=Family)) +geom_point()
ggplot(tol.h, aes(AbsLat, ToptPer, color=Family)) +geom_point()
#Topt as percent of TTB
mod1=lm(ToptPer ~AbsLat , dat=tol.h)
#-------------------------
#Fit TPC
#Performance Curve Function from Deutsch et al. 2008
TPC= function(T,topt,sigma, ctmax){
F=rep(NA, length(T))
ind=which(T<=topt)
F[ind]= exp(-((T[ind]-topt)/(2*sigma))^2)
ind=which(T>topt)
F[ind]= 1- ((T[ind]-topt)/(topt-ctmax))^2
return(F)
}
#==========================================
#SUMMARY FUNCTION
#TTB, based on GlobTherm
#terrestrial
TTB.terr= function(AbsLat) 29.15383 + 0.19897 * AbsLat
#marine
TTB.mar= function(AbsLat) 0.6945813 + 0.0020061 * AbsLat
#TTB= 26.25588 + 0.09722 * AbsLat
#Topt as percent of TTB, based on Huey data
ToptPer= function(AbsLat) 0.6945813 + 0.0020061 * AbsLat
|
1d64209da2bd037e2bd9fa18f09e1f0082574770
|
80828838119e3ff7343549adb90d225e6b6f06d1
|
/demo/npsymtest_npRmpi.R
|
e67a1308c54231f755a1e677484cee53aed3b918
|
[] |
no_license
|
cran/npRmpi
|
49131c9b187b8bddcf63523b30affa22658ae09e
|
1ebad7da39e8ec4bf30775bcc2aea34ef5a87fec
|
refs/heads/master
| 2016-09-05T17:44:47.216211
| 2014-06-27T00:00:00
| 2014-06-27T00:00:00
| 17,697,956
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,743
|
r
|
npsymtest_npRmpi.R
|
## Make sure you have the .Rprofile file from npRmpi/inst/ in your
## current directory or home directory. It is necessary.
## To run this on systems with OPENMPI installed and working, try
## mpirun -np 2 R CMD BATCH npsymtest_npRmpi. Check the time in the
## output file foo.Rout (the name of this file with extension .Rout),
## then try with, say, 4 processors and compare run time.
## Initialize master and slaves.
mpi.bcast.cmd(np.mpi.initialize(),
caller.execute=TRUE)
## Turn off progress i/o as this clutters the output file (if you want
## to see search progress you can comment out this command)
mpi.bcast.cmd(options(np.messages=FALSE),
caller.execute=TRUE)
## Generate some data and broadcast it to all slaves (it will be known
## to the master node)
mpi.bcast.cmd(set.seed(42),
caller.execute=TRUE)
## A function to create a time series
ar.series <- function(phi,epsilon) {
m <- length(epsilon)
series <- numeric(m)
series[1] <- epsilon[1]/(1-phi)
for(i in 2:m) {
series[i] <- phi*series[i-1] + epsilon[i]
}
return(series)
}
n <- 2500
## Stationary persistent symmetric time-series
yt <- ar.series(0.5,rnorm(n))
mpi.bcast.Robj2slave(yt)
## A simple example of the test for symmetry
t <- system.time(mpi.bcast.cmd(output <- npsymtest(yt,
boot.num=399,
boot.method="geom",
method="summation"),
caller.execute=TRUE))
output
cat("Elapsed time =", t[3], "\n")
## Clean up properly then quit()
mpi.bcast.cmd(mpi.quit(),
caller.execute=TRUE)
|
c06e9991bbf6ac326eb821de63d0a47c630133ee
|
33a30dafc83b22ccf434c5ce164de96f82ebc65d
|
/prepare_group_file_for_EPACTS_Rpart.R
|
c8022c8fff84b99d12d4d9960cc89c5f62c773e5
|
[] |
no_license
|
moutsian/lm12_Scripts
|
5f2205d933692c8e77534859927c1c54606d8adb
|
5c32faf2b8c46ca81d2a447bf209689f36fd712e
|
refs/heads/master
| 2021-09-13T13:09:02.797716
| 2018-04-30T09:45:33
| 2018-04-30T09:45:33
| 104,744,198
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,369
|
r
|
prepare_group_file_for_EPACTS_Rpart.R
|
#prepare_group_file_for_EPACTS_Rpart.R
chrom=21
for(chrom in 1:22){
print(paste("chrom:",chrom,sep=""))
varfile=paste("tili.i3.site_QCplus_binom.sample_QC.annot.vcf.",chrom,".variants",sep="")
annofile=paste("tili.i3.site_QC.sample_QC.vcf.annot.",chrom,".annot.fc.tmp",sep="")
outfile=paste("tili.i3.site_QC.sample_QC.",chrom,".annot.fc.genes.epacts",sep="")
var_table=read.table(varfile,stringsAsFactors=F)
anno_table=read.table(annofile,stringsAsFactors=F)
together=merge(anno_table,var_table,by.x="V1",by.y="V3")
# We can do this both for genes and for transcripts. I will do it for genes first.
# Note that I am not applying any MAF threshold here at present.
genes=unique(together[,4])
epacts_table=matrix(ncol=2,nrow=length(genes),"")
epacts_table[,1]=genes
for(i in 1:dim(together)[1]){
if(i%%1000==0){print(paste(i,"out of",dim(together)[1]))}
idx=which(genes==together[i,4])
tmp=epacts_table[idx,2]
epacts_table[idx,2]=paste(tmp," ",together[i,2],"_",together[i,17],"/",together[i,18],sep="")
}
#since we have entries by transcript there will be multiple entries per gene. Thus, unique and sort after you are done
test=lapply(epacts_table[,2],function(x) paste(sort(unique(unlist(strsplit((x),split=" ")))),collapse=' '))
epacts_table=cbind(genes,test)
write.table(epacts_table,outfile,col.names=F,row.names=F,quote=F,sep="\t")
}
#END
|
7ccf896e2ac254798c0cc3e90a3ff43d86c42087
|
55f30a3d624323b427103540af0275e790c68b4f
|
/man/GetCrecheData.Rd
|
a9799cbb6fa86c2de79b5d68ae2185cc88cf6138
|
[] |
no_license
|
bromsk/NETNCoastalBirds
|
6ff6833af11d33801816863c7665909e9e307bdd
|
8dc7ca151efe76393330905a098f241f2132b921
|
refs/heads/master
| 2020-07-14T04:22:00.523164
| 2019-08-22T17:30:27
| 2019-08-22T17:30:27
| 155,464,003
| 0
| 0
| null | 2018-10-30T22:23:27
| 2018-10-30T22:23:26
| null |
UTF-8
|
R
| false
| true
| 1,198
|
rd
|
GetCrecheData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GetCrecheData.R
\name{GetCrecheData}
\alias{GetCrecheData}
\title{Return creche surveys from database}
\usage{
GetCrecheData(x, ODBC_connect = TRUE, export = FALSE)
}
\arguments{
\item{x}{Denote in parentheses to return df}
\item{ODBC_connect}{Should the function connect to the Access DB? The default (TRUE) is to
try to connect using the Windows ODBC manager. If the connection is not available or not desired,
the function can return the saved data from the package.
Note the saved data may not be up-to-date.}
\item{export}{Should the incubation data be exported as a csv file and RData object?
(This argument is used to regenerate the RData for the package.)}
}
\description{
This function connects to the backend of NETN's Coastal Bird Access DB
and returns the raw creche survey data of COEI
}
\details{
This function returns the raw AMOY survey data as a \code{data.frame}.
}
\section{Warning}{
User must have Access backend entered as 'NETNCB' in Windows ODBC manager.
(If ODBC_connect = TRUE).
}
\examples{
creche <- GetCrecheData(x)
}
\seealso{
\url{ https://www.nps.gov/im/netn/coastal-birds.htm}
}
|
8d35b7f647cf8d39a8981cb20941564faea99472
|
58c02f3060b1e3bd9961744ba286d82146867b37
|
/R/maybestep.R
|
0db35ff601e2852dc1b484eb90f1353e4b862c6f
|
[
"MIT"
] |
permissive
|
dfalbel/maybestep
|
093938c2d2f3d75db8b44a1b2da3c8003ac445a4
|
4160fedf8fb48a5ece29f1408995abc0fb700df6
|
refs/heads/main
| 2023-08-08T04:08:59.946841
| 2021-09-15T13:24:31
| 2021-09-15T13:24:31
| 406,755,311
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,620
|
r
|
maybestep.R
|
#' Allows tuning the presence of a step.
#'
#' @inheritParams recipes::step_normalize
#' @param step_fn A recipe step function that will be added to `recipe`.
#' @param ... Additional arguments passed to `step_fn`.
#' @param use Whether to include `step_fn` or not into the `recipe`.
#'
#'
#' @examples
#' library(recipes)
#' df <- data.frame(x = letters[1:4], y = runif(length(letters[1:4])))
#' rec <- recipe(y ~ x, data = df) %>%
#' step_maybe(step_dummy, use = TRUE, all_nominal_predictors())
#'
#' rec %>% prep() %>% bake(df)
#'
#' @importFrom recipes bake prep add_step step rand_id tunable
#' @importFrom tune tune_args
#' @export
step_maybe <- function(
recipe,
step_fn,
use,
...,
trained = FALSE,
skip = FALSE,
id = rand_id("maybestep")
) {
original_step <- step_fn(recipe, ..., trained = trained,
skip = skip, id = id)
original_step <- original_step$steps[[length(original_step$steps)]]
add_step(
recipe,
step_maybe_new(
step = original_step,
use = use,
role = original_step$role,
trained = trained,
skip = skip,
id = id
)
)
}
step_maybe_new <- function(step, use, role, trained, skip, id) {
step(
subclass = "maybe",
step = step,
use = use,
trained = trained,
skip = skip,
id = id,
role = role
)
}
#' @export
prep.step_maybe <- function(x, training, info = NULL) {
x$trained <- TRUE
if (!x$use) return(x)
x$step <- prep(x$step, training = training, info = info)
x
}
#' @export
bake.step_maybe <- function(object, new_data, ...) {
if (!object$use) return(new_data)
bake(object$step, new_data, ...)
}
#' @export
tunable.step_maybe <- function(x, ...) {
dplyr::bind_rows(
tunable(x$step),
tibble::tibble(
name = c("use"),
call_info = list(list(pkg = "maybestep", fun = "use_step")),
source = "recipe",
component = "step_maybe",
component_id = x$id
)
)
}
#' A dials parameter to be used with [step_maybe()].
#' @param values the possible values (TRUE or FALSE by default.)
#' @seealso [step_maybe()]
#' @export
use_step <- function(values = c(FALSE, TRUE)) {
dials::new_qual_param(
type = "logical",
values = values,
default = dials::unknown(),
label = c("use_step" = "Using the selected step"),
finalize = NULL
)
}
#' @export
tune_args.step_maybe <- function(object, full = FALSE, ...) {
args <- NextMethod()
dplyr::bind_rows(args, tune::tune_args(object$step))
}
#' @export
merge.step_maybe <- function(x, y, ...) {
x$step <- merge(x$step, y, ...)
NextMethod()
}
|
86ad41558162421cdc4c52d826fb38585903cbd5
|
16f830f62c6b319652a5a0ae382e02435d05c4ef
|
/man/FIM.Rd
|
0d55dade83e9e01f9c8ac92c4ff064cbb943df82
|
[] |
no_license
|
zhonghualiu/MRCIP
|
641c3314e3ef713d32a760fda40f54eb8f58d4df
|
4568fa94b7a45eace168a1b2efa5cf718f6e2cec
|
refs/heads/main
| 2023-01-05T12:26:01.522258
| 2020-11-04T12:28:43
| 2020-11-04T12:28:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 643
|
rd
|
FIM.Rd
|
\name{FIM}
\alias{FIM}
\title{The estiamted Fisher infomation matrix for the paramters in MRCIP}
\usage{
FIM(sj_Gamma, sj_gamma, theta, w=NA)
}
\description{
This function provides an estiamted Fisher infomation matrix for the paramters in the MRCIP model.
}
\arguments{
\item{sj_Gamma}{A vector containing the estiamted standard errors of the estimated genentic effects on the outcome.}
\item{sj_gamma}{A vector containing the estiamted standard errors of the estimated genentic effects on the exposure.}
\item{theta}{The estimates of the unknown parameters in MRCIP.}
\item{w}{The weights for the genetic variants.}
}
|
d9f6356ea1f3ab9ea05ef5777ba5158a95a5a4ec
|
2948fc466c0935a4d4d07b8dfc79ee564034aac6
|
/replications/Rastogi_2016/R/members_or_not.R
|
01ec439d5b447f14b936735ac09a9cf8e697a860
|
[
"MIT"
] |
permissive
|
micheledinanni/Psychometric-tools-benchmark
|
5a0f8514e1a144787039c62f184a1af7d4fc5283
|
f9074c77c2a6151051a59853c19ce79ade276da7
|
refs/heads/master
| 2020-05-04T08:43:24.907430
| 2019-04-17T09:38:59
| 2019-04-17T09:38:59
| 179,052,341
| 1
| 1
| null | 2019-04-02T10:16:48
| 2019-04-02T10:16:48
| null |
UTF-8
|
R
| false
| false
| 1,893
|
r
|
members_or_not.R
|
rm(list=ls())
source(file="connectTodb.R")
query="select * from cc_project_members"
prc_memb<-dbGetQuery(con,query)
query="select * from pr_project_members"
cc_memb<-dbGetQuery(con,query)
query="select * from cc_not_project_members"
prc_nm<-dbGetQuery(con,query)
query="select * from pr_not_project_members"
cc_nm<-dbGetQuery(con,query)
rm(con,query)
all_memb<-rbind(prc_memb,cc_memb)
all_n_memb<-rbind(prc_nm,cc_nm)
all_members<-aggregate(cbind(O,C,E,A,N)~user_id,data=all_memb,mean,trim=0.2)
all_not_members<-aggregate(cbind(O,C,E,A,N)~user_id,data=all_n_memb,mean,trim=0.2)
#######################
# Visualization
s_r12<-rbind(cbind(all_members,type="1"),cbind(all_not_members,type="2"))
par(mfrow=c(1,5))
boxplot(O~type,data=s_r12,notch=T,outline=F,names=c("M","NM"),xlab="O")
boxplot(C~type,data=s_r12,notch=T,outline=F,names=c("M","NM"),xlab="C")
boxplot(E~type,data=s_r12,notch=T,outline=F,names=c("M","NM"),xlab="E")
boxplot(A~type,data=s_r12,notch=T,outline=F,names=c("M","NM"),xlab="A")
boxplot(N~type,data=s_r12,notch=T,outline=F,names=c("M","NM"),xlab="N")
dev.off()
#calcolo la deviazione standard dei campioni
sd(all_members$O)
sd(all_not_members$O)
sd(all_members$C)
sd(all_not_members$C)
sd(all_members$E)
sd(all_not_members$E)
sd(all_members$A)
sd(all_not_members$A)
sd(all_members$N)
sd(all_not_members$N)
#eseguo il t-test per valori non appaiati
t.test(all_members$O,all_not_members$O)
t.test(all_members$C,all_not_members$C)
t.test(all_members$E,all_not_members$E)
t.test(all_members$A,all_not_members$A)
t.test(all_members$N,all_not_members$N)
#calcolo la d di Cohen per calcolare la differenza standardizzata fra le due medie campionarie
library(lsr)
cohensD(all_members$O,all_not_members$O)
cohensD(all_members$C,all_not_members$C)
cohensD(all_members$E,all_not_members$E)
cohensD(all_members$A,all_not_members$A)
cohensD(all_members$N,all_not_members$N)
|
d2071182aad3e1f144d6df56ef3cb592ae6ce73a
|
54c69f7d2e1c24e1d2d2390664990c6d04fb60ba
|
/man/barycentric.Rd
|
4a898589391f6a773cc2650f5de28be931c92c78
|
[] |
no_license
|
SigbertIngress/ingressWeb
|
f682798edc252ff91bbc35951084369709dc4870
|
2a381b7ae7f1082d1f6d56a5f1901445753cca1f
|
refs/heads/master
| 2021-04-26T22:34:34.212218
| 2018-03-18T15:13:05
| 2018-03-18T15:13:05
| 123,955,719
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,282
|
rd
|
barycentric.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/barycentric.R
\name{barycentric}
\alias{barycentric}
\title{barycentric}
\usage{
barycentric(x, y, xtri, ytri, tolerance = 1e-08)
}
\arguments{
\item{x}{numeric(n): x coordinates}
\item{y}{numeric(n): y coordinates}
\item{xtri}{numeric(3): x coordinates of triangle vertices}
\item{ytri}{numeric(3): y coordinates of triangle vertices}
\item{tolerance}{numeric: if the barycentric coordinates are computed thena coordinate between 0 and \code{tolerance} will be set to zero (default: \code{1e-8})}
}
\value{
numeric(n,3): barycentric coordinates for x-y coordinates
}
\description{
cmoputes barycentric coordinates for x-y coordinates related to x-y triangle coordinates
}
\details{
Barycentric are useful for different purposes:
\enumerate{
\item If all barycentric coordinates are positive then the point is inside the triangle otherwise outside.
\item If two coordinates are zero andd one coordinate is one then the point is one of the vertices of the triangle.
\item For an interior point the barycentric coordinates give the proportion of the area if you subtringulate with the trinangle with this point.
}
}
\examples{
x <- random2(15)
barycentric(x[,1], x[,2], x[13:15,1], x[13:15,2])
}
|
9268303114b95666447d09d0bb2c03923cb9e8da
|
fd98f57aeee26f6cc13ea7a7cd4e2e58e108cd07
|
/tests/testthat/test-check-col-names.R
|
4e2ebeec0611b04ec31dfcad0c490e182453d030
|
[
"MIT"
] |
permissive
|
pitviper6/dccvalidator
|
2b6314a652eb983370586613b6e1341ac0879e7d
|
7273ef372106a7d4bfc634003282b542ba32fb3f
|
refs/heads/master
| 2023-04-05T10:32:47.947642
| 2021-03-31T15:38:25
| 2021-03-31T15:38:25
| 353,089,893
| 0
| 0
|
NOASSERTION
| 2021-03-30T18:31:37
| 2021-03-30T17:43:19
| null |
UTF-8
|
R
| false
| false
| 7,820
|
r
|
test-check-col-names.R
|
context("test-check-col-names.R")
syn <- attempt_instantiate()
test_that("check_col_names returns condition object when check passes", {
template <- data.frame(x = 1, y = 1)
dat <- data.frame(x = 5:10, y = 5:10)
result <- check_col_names(dat, names(template))
expect_true(inherits(result, "check_pass"))
})
test_that("check_col_names returns condition object when check fails", {
template <- data.frame(x = 1, y = 1, z = 1)
dat <- data.frame(x = 5:10, y = 5:10)
result <- check_col_names(dat, names(template))
expect_true(inherits(result, "check_fail"))
})
test_that("check_col_names returns missing columns in the data", {
template <- data.frame(x = 1, y = 1, z = 1)
dat <- data.frame(x = 5:10, y = 5:10)
result <- check_col_names(dat, names(template))
expect_equal(result$data, "z")
})
test_that("get_template fails when not logged in to Synapse", {
skip_if(is.null(syn))
syn$logout()
reticulate::py_capture_output(
expect_error(
get_template("syn12973252", syn)
),
type = "stderr"
)
})
attempt_login(syn)
test_that("check_cols_individual works for individual columns", {
skip_if_not(logged_in(syn = syn))
cols <- get_template("syn12973254", syn, version = 1)
full_col <- data.frame(matrix(ncol = length(cols)))
colnames(full_col) <- cols
incomplete_col <- full_col[, !names(full_col) %in% "yearsEducation"]
expect_true(
inherits(
check_cols_individual(
full_col,
id = "syn12973254",
syn = syn,
version = 1
),
"check_pass"
)
)
expect_true(
inherits(
check_cols_individual(
incomplete_col,
id = "syn12973254",
syn = syn,
version = 1
),
"check_fail"
)
)
})
test_that("check_cols_individual returns invalid columns in condition object", {
skip_if_not(logged_in(syn = syn))
cols <- get_template("syn12973254", syn = syn, version = 1)
full_col <- data.frame(matrix(ncol = length(cols)))
colnames(full_col) <- cols
incomplete_col <- full_col[, !names(full_col) %in% "yearsEducation"]
expect_equal(
check_cols_individual(
incomplete_col,
id = "syn12973254",
syn = syn,
version = 1
)$data,
"yearsEducation"
)
})
test_that("check_cols_biospecimen works for biospecimen columns", {
skip_if_not(logged_in(syn = syn))
biosp_names <- get_template("syn12973252", syn = syn, version = 4)
full_col_biosp <- data.frame(matrix(ncol = length(biosp_names)))
colnames(full_col_biosp) <- biosp_names
incomplete_col_biosp <- full_col_biosp[, !names(full_col_biosp) == "organ"]
expect_true(
inherits(
check_cols_biospecimen(
full_col_biosp,
id = "syn12973252",
syn = syn,
version = 4
),
"check_pass"
)
)
expect_true(
inherits(
check_cols_biospecimen(
incomplete_col_biosp,
id = "syn12973252",
syn = syn,
version = 4
),
"check_fail"
)
)
})
test_that("check_cols_biospecimen returns invalid columns in condition obj.", {
skip_if_not(logged_in(syn = syn))
biosp_names <- get_template("syn12973252", syn = syn, version = 4)
full_col_biosp <- data.frame(matrix(ncol = length(biosp_names)))
colnames(full_col_biosp) <- biosp_names
incomplete_col_biosp <- full_col_biosp[, !names(full_col_biosp) == "organ"]
expect_equal(
check_cols_biospecimen(
incomplete_col_biosp,
id = "syn12973252",
syn = syn,
version = 4
)$data,
"organ"
)
})
test_that("check_cols_biospecimen can get drosophila template", {
skip_if_not(logged_in(syn = syn))
drosophila_names <- get_template("syn20673251", syn = syn, version = 1)
drosophila_data <- data.frame(matrix(ncol = length(drosophila_names)))
colnames(drosophila_data) <- drosophila_names
expect_true(
inherits(
check_cols_biospecimen(
drosophila_data,
id = "syn20673251",
syn = syn,
version = 1
),
"check_pass"
)
)
expect_true(
inherits(
check_cols_biospecimen(
drosophila_data,
id = "syn12973252",
syn = syn,
version = 4
),
"check_fail"
)
)
})
test_that("check_cols_assay works for assay columns", {
skip_if_not(logged_in(syn = syn))
rnaseq_names <- get_template("syn12973256", syn = syn, version = 2)
full_col_assay <- data.frame(matrix(ncol = length(rnaseq_names)))
colnames(full_col_assay) <- rnaseq_names
incomplete_col_assay <- full_col_assay[, !names(full_col_assay) == "RIN"]
expect_true(
inherits(
check_cols_assay(
full_col_assay,
id = "syn12973256",
syn = syn,
version = 2
),
"check_pass"
)
)
expect_true(
inherits(
check_cols_assay(
incomplete_col_assay,
id = "syn12973256",
syn = syn,
version = 2
),
"check_fail"
)
)
})
test_that("check_cols_assay returns invalid columns within condition object", {
skip_if_not(logged_in(syn = syn))
rnaseq_names <- get_template("syn12973256", syn = syn, version = 2)
full_col_assay <- data.frame(matrix(ncol = length(rnaseq_names)))
colnames(full_col_assay) <- rnaseq_names
incomplete_col_assay <- full_col_assay[, !names(full_col_assay) == "RIN"]
expect_equal(
check_cols_assay(
incomplete_col_assay,
id = "syn12973256",
syn = syn,
version = 2
)$data,
"RIN"
)
})
test_that("check_cols_manifest works for manifest columns", {
skip_if_not(logged_in(syn = syn))
cols <- get_template("syn20820080", syn = syn, version = 3)
dat <- data.frame(matrix(ncol = length(cols)))
names(dat) <- cols
incomplete <- dat[, !names(dat) %in% "parent"]
expect_true(
inherits(
check_cols_manifest(dat, id = "syn20820080", version = 3, syn = syn),
"check_pass"
)
)
expect_equal(
check_cols_manifest(
incomplete,
id = "syn20820080",
syn = syn,
version = 3
)$data,
"parent"
)
})
test_that("get_template errors for files that are not xlsx or csv", {
skip_if_not(logged_in(syn = syn))
reticulate::py_capture_output(
expect_error(
get_template("syn17039045", syn = syn)
),
type = "stderr"
)
})
test_that("get_template can read in excel and csv templates", {
skip_if_not(logged_in(syn = syn))
csv <- get_template("syn18384877", syn = syn, version = 1)
xlsx <- get_template("syn18384878", syn = syn, version = 1)
expect_equal(csv, c("a", "b", "c"))
expect_equal(xlsx, c("a", "b", "c"))
})
test_that("get_template can get different version of a template", {
skip_if_not(logged_in(syn = syn))
xlsx1 <- get_template("syn18384878", syn = syn, version = 1)
xlsx2 <- get_template("syn18384878", syn = syn, version = 2)
expect_equal(xlsx1, c("a", "b", "c"))
expect_equal(xlsx2, c("a", "b", "c", "d"))
})
test_that("wrapper functions for specific template gets the correct version", {
skip_if_not(logged_in(syn = syn))
dat <- data.frame(
individualID = 1,
specimenID = 1,
organ = 1,
tissue = 1,
BrodmannArea = 1,
tissueWeight = 1,
nucleicAcidSource = 1,
cellType = 1
)
expect_true(
inherits(
check_cols_biospecimen(dat, id = "syn12973252", syn = syn, version = 2),
"check_pass"
)
)
expect_equal(
check_cols_biospecimen(
dat,
id = "syn12973252",
syn = syn,
version = 3
)$data,
c("samplingDate", "sampleStatus", "tissueVolume", "fastingState")
)
})
test_that("check_cols functions handle NULL input", {
expect_null(check_col_names(NULL))
expect_null(check_cols_manifest(NULL))
expect_null(check_cols_individual(NULL))
expect_null(check_cols_biospecimen(NULL))
expect_null(check_cols_assay(NULL))
})
|
14ee050fd332f37ea4bee84aadbaa94e672b4337
|
d473a271deb529ed2199d2b7f1c4c07b8625a4aa
|
/Stats_and_Math/DescriptiveStats_Outliers.R
|
7b53359cb9353a2aebe12e8a9c5d8607a7b43f5a
|
[] |
no_license
|
yangboyubyron/DS_Recipes
|
e674820b9af45bc71852ac0acdeb5199b76c8533
|
5436e42597b26adc2ae2381e2180c9488627f94d
|
refs/heads/master
| 2023-03-06T05:20:26.676369
| 2021-02-19T18:56:52
| 2021-02-19T18:56:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,831
|
r
|
DescriptiveStats_Outliers.R
|
#Delosh, Jason Predict 401 SEC60 Data Analysis 1 R Code
require(ggplot2)
require(moments)
require(fBasics)
library(gridExtra)
#a) read abalones file into R
ab <- read.csv(file.path("abalones.csv"),sep=" ")
#b)
mydata<-data.frame(ab) #set ab as dataframe
str(mydata) #review mydata structure
head(mydata) #review header data
tail(mydata) #review tail data
sum(is.na(mydata)) #look for null values in df
###consider table to who min max ranges age range for Class
#c)
#create calculated columns (end col header with 'c' to indicate calculated)
mydata$VOLUMEc<-mydata$LENGTH*mydata$DIAM*mydata$HEIGHT
mydata$RATIOc<-mydata$SHUCK/mydata$VOLUME
#1a)
cols<-c(2:7,9:10) #set the columns that contain metric data
#using fbasics package, specify descriptive statistics based on cols vector above
round(basicStats(mydata[,cols])[c('Minimum','1. Qu','Median',
'Mean','3. Qu','Maximum',
'Stdev'),],3)
#1b)
summary(mydata$SEX) #get summmary data for sex var
summary(mydata$CLASS) #get summary data for class var
#create table grouped by sex and class and add min, max, sum, to margin
tbl1<-table(mydata$SEX,mydata$CLASS)
addmargins(tbl1,c(2,1), FUN = list(list(Min = min, Max = max,Sum=sum), Sum = sum))
#create barplot of sex frequency by class
barplot((tbl1) [c(2,1,3),], #re-orders the table columns to align legend
legend.text=c('Infant','Female','Male'),
main='Abalone Sex Frequencies by Class',
ylab='Frequency',
xlab='Age Class',
beside=TRUE,
col=c('light green','red','light blue'),
names.arg=c('A1','A2','A3','A4','A5','A6'))
#1c)
set.seed(123)
#get random sample of 200 observations from mydata dataframe
set.seed(123)
work<-mydata[sample(nrow(mydata),200,replace=FALSE),]
head(work) #check first few rows
plot(work[,2:6]) #plot the continuous variable rows 2-6
#2a)
#plot whole vs. volume
plot(mydata[,c(9,5)],
main='Abalone Whole weight vs. Volume',
xlab='Volume (cm^3)',
ylab='Whole (g)')
#2b)
#plot shuck versus whole and add abline using ration max value of
#shuck to whole ration
m<-max(mydata$SHUCK/mydata$WHOLE) # used as slope of line
plot(mydata$WHOLE, mydata$SHUCK,
main='Abalone Shuck weight vs. Whole weight',
xlab='Whole (g)',ylab='Shuck (g)')
legend('topleft',legend=('Line = y=0.56x+0'))
abline(a=0, b=m) #a= yint, b=slope
#3a)
#use max of all samples to obtain consistent x and y lims
par(mfrow=c(3,3))
hist(mydata$RATIOc[mydata$SEX=='I'],
include.lowest=TRUE, right=TRUE,
main='Infant Abalone Ratio',
xlab='Shuck to Volume Ratio (g/cm^3)',
ylab='Frequency',
col='light green',
xlim=c(0.00,max(mydata$RATIOc)))
hist(mydata$RATIOc[mydata$SEX=='F'],
main='Female Abalone Ratio',
xlab='Shuck to Volume Ratio (g/cm^3)',
ylab='Frequency',
col='red',
xlim=c(0.0,max(mydata$RATIOc)))
hist(mydata$RATIOc[mydata$SEX=='M'],
include.lowest=TRUE, right=TRUE,
main='Male Abalone Ratio',
xlab='Shuck to Volume Ratio (g/cm^3)',
ylab='Frequency',
col='light blue',
xlim=c(0.00,max(mydata$RATIOc)))
boxplot(mydata$RATIOc[mydata$SEX=='I'],
main='Infant Abalone Ratio',
col='light green',
range=1.5,
ylab='Shuck to Volume Ratio (g/cm^3)',
ylim=c(0.0,max(mydata$RATIOc)),
notch=TRUE)
boxplot(mydata$RATIOc[mydata$SEX=='F'],
main='Female Abalone Ratio',
col='red',
range=1.5,
ylab='Shuck to Volume Ratio (g/cm^3)',
ylim=c(0.0,max(mydata$RATIOc)),
notch=TRUE)
boxplot(mydata$RATIOc[mydata$SEX=='M'],
main='Male Abalone Ratio',
col='light blue',
range=1.5,
ylab='Shuck to Volume Ratio (g/cm^3)',
ylim=c(0.0,max(mydata$RATIOc)),
notch=TRUE)
qqnorm(mydata$RATIOc[mydata$SEX=='I'],
main='Infant Abalone Ratio',
ylab='Shuck to Volume Ratio (g/cm^3)',
ylim=c(0.0,max(mydata$RATIOc)),
col='light green')
qqline(mydata$RATIOc[mydata$SEX=='I'])
qqnorm(mydata$RATIOc[mydata$SEX=='F'],
main='Female Abalone Ratio',
ylab='Shuck to Volume Ratio (g/cm^3)',
ylim=c(0.0,max(mydata$RATIOc)),
col='red')
qqline(mydata$RATIOc[mydata$SEX=='F'])
qqnorm(mydata$RATIOc[mydata$SEX=='M'],
main='Male Abalone Ratio',
ylab='Shuck to Volume Ratio (g/cm^3)',
ylim=c(0.0,max(mydata$RATIOc)),
col='light blue')
qqline(mydata$RATIOc[mydata$SEX=='M'])
par(mfrow=c(1,1))
#Determine if there are any extreme outliers
#first calculate the max for each sex group
IARmax<-max(mydata$RATIOc[mydata$SEX=='I'])
FARmax<-max(mydata$RATIOc[mydata$SEX=='F'])
MARmax<-max(mydata$RATIOc[mydata$SEX=='M'])
#using max value, determine if max value is extreme outlier
IAReol<-max(mydata$RATIOc[mydata$SEX=='I']) >=
quantile(mydata$RATIOc[mydata$SEX=='I'],
0.75) + 3.0*IQR(mydata$RATIOc[mydata$SEX=='I'])
FAReol<-max(mydata$RATIOc[mydata$SEX=='F']) >=
quantile(mydata$RATIOc[mydata$SEX=='F'],
0.75) + 3.0*IQR(mydata$RATIOc[mydata$SEX=='F'])
MAReol<-max(mydata$RATIOc[mydata$SEX=='I']) >=
quantile(mydata$RATIOc[mydata$SEX=='M'],
0.75) + 3.0*IQR(mydata$RATIOc[mydata$SEX=='M'])
#determine 1.5 and 3.0 Abalone Ration outlier and extrem outlier
#values for each sex (I,F,M)
IARolV<-quantile(mydata$RATIOc[mydata$SEX=='I'],0.75)+
1.5*IQR(mydata$RATIOc[mydata$SEX=='I'])
IAReolV<-quantile(mydata$RATIOc[mydata$SEX=='I'],0.75)+
3.0*IQR(mydata$RATIOc[mydata$SEX=='I'])
SumIAReolV<-sum(mydata$RATIOc[mydata$SEX=='I']>IAReolV)
SumIARolV<-sum(mydata$RATIOc[mydata$SEX=='I']>IARolV)-SumIAReolV
FARolV<-quantile(mydata$RATIOc[mydata$SEX=='F'],0.75)+
1.5*IQR(mydata$RATIOc[mydata$SEX=='F'])
FAReolV<-quantile(mydata$RATIOc[mydata$SEX=='F'],0.75)+
3.0*IQR(mydata$RATIOc[mydata$SEX=='F'])
SumFAReolV<-sum(mydata$RATIOc[mydata$SEX=='F']>FAReolV)
SumFARolV<-sum(mydata$RATIOc[mydata$SEX=='F']>FARolV)-SumFAReolV
MARolV<-quantile(mydata$RATIOc[mydata$SEX=='M'],0.75)+
1.5*IQR(mydata$RATIOc[mydata$SEX=='M'])
MAReolV<-quantile(mydata$RATIOc[mydata$SEX=='M'],0.75)+
3.0*IQR(mydata$RATIOc[mydata$SEX=='M'])
SumMAReolV<-sum(mydata$RATIOc[mydata$SEX=='M']>MAReolV)
SumMARolV<-sum(mydata$RATIOc[mydata$SEX=='M']>MARolV)-SumMAReolV
# create data frame to show output of extreme outlier check
# false values indicate outlier is not extreme
a<-c('Infant','Female','Male')
b<-c(IARmax,FARmax,MARmax)
c<-c(IAReol,FAReol,MAReol)
d<-c(IARolV,FARolV,MARolV)
e<-c(SumIARolV,SumFARolV,SumMARolV)
f<-c(IAReolV,FAReolV,MAReolV)
g<-c(SumIAReolV,SumFAReolV,SumMAReolV)
h<-data.frame(a,round(b,3),c,round(d,3),e,round(f,3),g)
colnames(h)<-c('Sex','Max Ratio','Extreme OL','OL Val','OL Count',
'Extreme OL Val', 'Extreme OL Count')
h
#4)
#plot volume and whole as a function of class and rings
#grid arrange to plot each of the 4 graphs in one image
grid.arrange(
ggplot(mydata,aes(x=mydata$CLASS,y=mydata$VOLUMEc))+
geom_boxplot(outlier.color='red',
outlier.shape =1, outlier.size=3,
notch=TRUE)+
ylab('Volume (cm^3)')+xlab('Class'),
ggplot(mydata,aes(x=mydata$CLASS,y=mydata$WHOLE))+
geom_boxplot(outlier.color='red',
outlier.shape =1, outlier.size=3,
notch=TRUE)+
ylab('Whole (g)') + xlab('Class'),
ggplot(mydata,aes(x=mydata$RINGS,y=mydata$VOLUMEc))+
geom_point(size=2, show.legend=FALSE, aes(colour=mydata$RINGS))+
scale_colour_gradient(low='red',high='blue')+
xlab('Rings')+ylab('Volume (cm^3)'),
ggplot(mydata,aes(x=mydata$RINGS,y=mydata$WHOLE))+
geom_point(size=2, show.legend=FALSE, aes(colour=mydata$RINGS))+
scale_colour_gradient(low='red',high='blue')+
xlab('Rings')+ylab('Whole (g)'),
nrow=2,top='Abalone Volume and Whole: by Class and Ring Count')
#5a)
VolMean<-aggregate(VOLUMEc~SEX+CLASS,data=mydata,mean)
VolMat<-matrix(round(VolMean$VOLUMEc,2),3)
colnames(VolMat)<-c('A1','A2','A3','A4','A5','A6')
rownames(VolMat)<-c('Female','Infant','Male')
VolMat
RatMean<-aggregate(RATIOc~SEX+CLASS,data=mydata,mean)
RatMat<-matrix(round(RatMean$RATIOc,4),3)
colnames(RatMat)<-c('A1','A2','A3','A4','A5','A6')
rownames(RatMat)<-c('Female','Infant','Male')
RatMat
#5b)
#
grid.arrange(
ggplot(data=VolMean,aes(x=CLASS, y=VOLUMEc, group=SEX, colour=SEX))+
geom_line(size=1,show.legend=FALSE)+geom_point(size=2.5,show.legend=FALSE)+
ggtitle('Mean Volume vs. Class \n Grouped by Sex'),
ggplot(data=RatMean,aes(x=CLASS, y=RATIOc, group=SEX, colour=SEX))+
geom_line(size=1)+geom_point(size=2.5)+
ggtitle('Mean Ratio vs. Class \n Grouped by Sex'),
ncol=2)
write.csv(mydata, file='mydataused.csv',sep=' ')
write.table(tbl1,'sex_class_freq_tbl.csv',sep=" ")
|
b25832fe5950998f1e06b74e8a3283abc04a7688
|
0a0acb21d3ddecef48ccd3ce57d15300c128a6f7
|
/R/run.epiConv.R
|
c9440fb7848c067d550b3c71251846834d5cf2f3
|
[
"MIT"
] |
permissive
|
sparsepenn/epiConv
|
5f8b2ebd1cfcd4c59de602eaa9a40844d0d55b96
|
f9703b9a16432f212dc707dde0b9705b01ff3bab
|
refs/heads/master
| 2023-07-05T13:43:04.938353
| 2021-08-08T10:46:29
| 2021-08-08T10:46:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,795
|
r
|
run.epiConv.R
|
#' Calculate similarities between cells
#'
#' Calculate similarities between cells
#'
#' @param mat Matrix object constains the peak by cell matrix.
#' @param lib_size library size of single cells.
#' @param nbootstrap number of bootstraps performed.
#' @param nsample number of peaks sampled in each bootstrap.
#' @param bin Matrix are divided by bin to avoid very long vectors, It does not affect the results.
#' @param inf a small value used to replace -Inf.
#' @param ... Arguments passed to big.matrix when creating the similarity matrix. You can specify a backup file through "backingfile" and "descriptorfile". If not specified, the function will create a in-memory backup file.
#'
#' @examples
#' \dontrun{
#' Smat<-run.epiConv(mat=mat,
#' lib_size=colSums(mat),
#' nbootstrap=15,
#' nsample=floor(nrow(mat)*0.2),
#' bin=5000,
#' inf=(-8),
#' backingfile="backupfile",
#' descriptorfile="backupfile.descriptor")
#'}
#'
#' @return Return a similarity matrix.
run.epiConv<-function(mat,lib_size,nbootstrap,nsample,bin=5000,inf=(-8),...){
sample_matrix<-lapply(1:nbootstrap,function(x) sample(1:nrow(mat),size=nsample))
mat<-tfidf.norm(mat,lib_size=lib_size)
####calculate pars#########
cell_sample<-500
temp<-sample(1:ncol(mat),cell_sample*2)
retain1<-sort(temp[1:cell_sample])
retain2<-sort(temp[(cell_sample+1):(cell_sample*2)])
Smat_small<-epiConv.matrix(mat1=mat[,retain1],
mat2=mat[,retain2],
sample_matrix=sample_matrix,
inf=inf)
adjust_pars<-stablization.pars(Smat=Smat_small,
lib_size=lib_size[c(retain1,retain2)])
rm(Smat_small)
gc()
####calculate pars#########
Smat<-big.matrix(nrow=ncol(mat),ncol=ncol(mat),init=0,...)
for(i in 1:ceiling(ncol(mat)/bin)-1){
row_index<-(i*bin+1):min((i*bin+bin),ncol(mat))
for(j in (i+1):ceiling(ncol(mat)/bin)-1){
col_index<-(j*bin+1):min((j*bin+bin),ncol(mat))
aa<-paste0(min(row_index),"-",max(row_index))
bb<-paste0(min(col_index),"-",max(col_index))
cat("Calculating similarities between cells",aa,"vs",bb,fill=T)
temp<-epiConv.matrix(mat1=mat[,row_index],
mat2=mat[,col_index],
sample_matrix=sample_matrix,
inf=inf,
lib_size1=lib_size[row_index],
lib_size2=lib_size[col_index],
adjust_pars=adjust_pars)
Smat[row_index,col_index]<-temp
if(i!=j){
Smat[col_index,row_index]<-t(temp)
}
rm(temp)
gc()
}
}
return(Smat)
}
|
5bf2a16271d51daf4aab9fe591abe04a3611ba6e
|
6fc3f0f6935967f151de13f65fd9b2591cb97d04
|
/run_analysis.R
|
28c7ce3f922f7c53e569deea01bdad2c9c119c7d
|
[] |
no_license
|
fffeng16/getting_and_cleaning_data_project
|
f31ee4ac2c7d53669b1796f7f789d4294504ef4a
|
0072c54df973e88a7fa662af2cf526fe7cc79acd
|
refs/heads/master
| 2021-01-10T11:27:45.897731
| 2016-03-26T02:14:40
| 2016-03-26T02:14:40
| 54,757,930
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,456
|
r
|
run_analysis.R
|
library(dplyr)
# 1. download the raw dataset if it does not already exist in the working directory
filename <- "getdata-projectfiles-UCI HAR Dataset.zip"
if(!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL, filename, method="curl")
}
if(!file.exists("UCI HAR Dataset")){
unzip(filename)
}
# 2. loads both the training and test datasets,
train_subject <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names="subject")
train_features <- read.table("UCI HAR Dataset/train/X_train.txt")
train_activity <- read.table("UCI HAR Dataset/train/y_train.txt", col.names="activity_label")
test_subject <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names="subject")
test_features <- read.table("UCI HAR Dataset/test/X_test.txt")
test_activity <- read.table("UCI HAR Dataset/test/y_test.txt", col.names="activity_label")
# 3. merge the training and the test data sets.
train <- cbind(train_subject, train_activity, train_features)
test <- cbind(test_subject, test_activity, test_features)
data <- rbind(train, test)
# 4. Load the features data and use that to extract only the measurements
# on the mean and standard deviation for each measurement.
features <- read.table("UCI HAR Dataset/features.txt")
featuresUsed <- grep("mean\\(\\)|std\\(\\)", features$V2) + 2 #first 2 cols are subject and activity res.
dataUsed <- select(data, subject, activity_label, featuresUsed)
# 5. Load the activity data to use descriptive activity names
# to name the activities (instead of label) in the data set
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt", stringsAsFactors=FALSE,
col.names=c("label", "activity"))
dataUsed <- mutate(dataUsed, activity = activity_labels[match(activity_label, activity_labels$label), 2])
dataUsed <- select(dataUsed, -activity_label)
dataUsed <- dataUsed[ , c(1,68,2:67)]
# 6. Appropriately label the data set with descriptive variable names.
newColNames <- as.character(features$V2[grep("mean\\(\\)|std\\(\\)", features$V2)])
names(dataUsed)[3:68] <- newColNames
# 7. Create a tidy dataset that consists of the average value of each variable
# for each subject and activity pair.
dataTidy <- group_by(dataUsed, subject, activity) %>%
summarise_each(funs(mean))
write.table(dataTidy, "tidy.txt", row.names = FALSE, quote = FALSE)
|
fdc0da0309c92c79cb3a22eb7085fbf5574973a7
|
15a2c84db55c9ee237a6426bef7712810bc120d2
|
/Meetup_2019_05_30/5_cleaned_capital_effect/home_location_5.R
|
b6d0eed05289136faf8cd067cc1b4640936c5e3f
|
[] |
no_license
|
rladies/meetup-presentations_budapest
|
a7c782c6dd160a14d6579c989dc35ba4202e6637
|
f1d2ee524f2adff1bc77bfcb775d21ef4a9f95eb
|
refs/heads/master
| 2022-04-16T17:07:12.847074
| 2020-04-13T14:18:31
| 2020-04-13T14:18:31
| 82,724,566
| 15
| 26
| null | 2019-06-04T05:28:38
| 2017-02-21T20:36:49
|
HTML
|
UTF-8
|
R
| false
| false
| 1,837
|
r
|
home_location_5.R
|
library("tidyverse")
library("plotly")
purrr::walk(list.files("5_cleaned_capital_effect/R", full.names = TRUE), source)
# load data ---------------------------------------------------------------
data_path <- file.path(rprojroot::find_root('clean-r-code-student.Rproj'), "data")
# NA is valid country code, stands for Namibia, so should not be read as NA
countries <- read_csv(file.path(data_path, "countries.csv"), na = "")
home_cities <- read_csv(file.path(data_path, "home_cities_frequent.csv.gz"), na = "")
# data preparation -----------------------------------------------------
get_rows_with_missing_value(countries)
get_rows_with_missing_value(home_cities)
get_cities_with_multiple_coords(home_cities)
# data exploration --------------------------------------------------------
glimpse_extreme_regions(home_cities, countries, country_code, city)
glimpse_extreme_regions(home_cities, countries, country_code)
home_cities %>%
summarize_population(country_code, city, long, lat) %>%
filter(num_contact >= 1000) %>%
plot_city_populations()
home_cities %>%
summarize_population(country_code) %>%
plot_country_populations(countries)
# capital city effect -----------------------------------------------------
home_cities %>%
summarize_population(country_code, city) %>%
get_population_share_of_top_cities() %>%
plot_population_share(countries)
# industry comparison based on spread of clients --------------------------
clients <- read_csv(file.path(data_path, "clients.csv"))
get_rows_with_missing_value(clients)
home_cities %>%
group_by(client_id) %>%
summarize(num_country = n_distinct(country_code)) %>%
inner_join(clients, by = "client_id") %>%
ggplot(aes(x = num_country, color = industry)) +
geom_density() +
theme(axis.text.y = element_blank(), axis.ticks.y = element_blank())
|
2d002adbe5cee7e93d089601528bea7f2de0071b
|
e686d1cc1b59365c5589dbf9cbe2ce625f5a336c
|
/man/get_pref.Rd
|
3b852a8e120a6c4d501b1500e2e80b3756a605d2
|
[] |
no_license
|
systats/hrscraper
|
73399a6fb5f7086eb652d9dbb3f7d1f122778855
|
b69e81a743c1961c7c034f9de9b02704cb568bb2
|
refs/heads/master
| 2020-03-18T00:00:23.994618
| 2018-05-19T15:22:24
| 2018-05-19T15:22:24
| 134,073,107
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 258
|
rd
|
get_pref.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/base_info.R
\name{get_pref}
\alias{get_pref}
\title{get_pref}
\usage{
get_pref(html)
}
\arguments{
\item{html}{params}
}
\value{
df
}
\description{
get self reported preferences
}
|
d807bdd902fe0e45aee302fb8be18d10f9d7df39
|
93482447fb7e1fb0c9786c08bfd24c31f9a85930
|
/plot2.R
|
9102223bd53bda0025bcbdd4368f3f0efc191bab
|
[] |
no_license
|
hfconamore/Exploratory-Data-Analysis---Project-Electric-Power-Consumption-Plotting
|
db835e548ba58abadc42e736142cc6dd1297617c
|
48caeb0e2426c6e0809c8db540cc130167316c4c
|
refs/heads/master
| 2021-06-02T13:30:39.620345
| 2016-09-05T22:53:48
| 2016-09-05T22:53:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,932
|
r
|
plot2.R
|
setwd("/Users/Feng/Documents/AfterGraduationStudy/DataScienceSpecialization/04_ExploratoryDataAnalysis/ExploratoryDataAnalysis_HW_Project/ExploratoryDataAnalysis_W1_Project")
################ Data Manipulation ################
con <- file('household_power_consumption.txt', open = "r")
filtered_lines <- vector()
i <- 1
#' Read the data from just those dates rather than reading in the entire dataset
#' and subsetting to those dates.
# If the connection is open it is read from its current position.
while (length(oneLine <- readLines(con, n = 1, warn = FALSE)) > 0) {
if (grepl('^([12]/2/2007)', oneLine)) {
filtered_lines[i] <- oneLine
i <- i + 1
}
}
close(con)
# Separate each observation by ";" and create a data frame to store the data.
filtered_list <- sapply(filtered_lines, strsplit, ";")
filtered_data2 <- data.frame(matrix(unlist(filtered_list), nrow = 2880, byrow = T),
stringsAsFactors = FALSE)
names(filtered_data2) <- unlist(strsplit(readLines("household_power_consumption.txt", n = 1), split = ";"))
for (i in 3:9) {
filtered_data2[, i] <- as.numeric(filtered_data2[, i])
}
colSums(sapply(filtered_data2, function(x) x == "?"))
# The result shows that no missing value exists in the subset of the data.
# Generate the x axis data.
library(lubridate)
filtered_data2$Date_Time <- dmy_hms(paste(filtered_data2$Date, filtered_data2$Time))
################ Plotting ################
png(filename = "plot2.png")
# Plot the data but hide x tick marks in the first place.
plot(filtered_data2$Date_Time, filtered_data2$Global_active_power, type = "l",
xlab = "", xaxt="n", ylab = "Global Active Power (kilowatts)")
# Add back the customized x tick marks.
axis(side = 1,
at = c(min(filtered_data2$Date_Time), median(filtered_data2$Date_Time), max(filtered_data2$Date_Time)),
labels = c("Thu", "Fri", "Sat"), tick = TRUE)
dev.off()
|
e723c537dc21e0ca52203cbfbd85c5ddea8d400e
|
8e5a091c398a487a454b768f7c59066cbb22d115
|
/code/zzz_archive_code/p2z_seasonalSplit.R
|
e1959de4b3b6986b7f978ec7d56d8b85537ece65
|
[] |
no_license
|
adamnicholasprice/DryingRegimes
|
945d1b43ced2a04960cf09bde91a781157a53c04
|
c108e3f18099c520639338d58a4933045e0de9ea
|
refs/heads/master
| 2021-11-12T10:46:11.838180
| 2021-05-12T03:06:52
| 2021-05-12T03:06:52
| 235,688,274
| 1
| 1
| null | 2021-10-30T11:07:47
| 2020-01-22T23:27:47
|
R
|
UTF-8
|
R
| false
| false
| 2,683
|
r
|
p2z_seasonalSplit.R
|
#####################################################################
##
## Script name:
##
## Author: Adam N. Price
##
## Date Created: 2020-04-17
##
## Copyright (c) Adam N. Price, 2020
## Email: adnprice@ucsc.edu
##
############################# Description ##########################
##
## Devide pead2zero metric into seasons.
##
##
############################# Packages #############################
library(lubridate)
library(dplyr)
library(doParallel)
library(foreach)
############################# Code ################################
# Functions
statCalc <- function(x){
output = c(
mean(x,na.rm = TRUE),
length(x),
max(x),
min(x),
sd(x,na.rm = TRUE)/mean(x,na.rm = TRUE),
sd(x,na.rm = TRUE)
)
return(output)
}
readP2Z <- function(file){
library(lubridate)
library(dplyr)
statCalc <- function(x){
output = c(
mean(x,na.rm = TRUE),
length(x),
max(x),
min(x),
sd(x,na.rm = TRUE)/mean(x,na.rm = TRUE),
sd(x,na.rm = TRUE)
)
return(output)
}
data = read.csv(file)
site = strsplit(file,split = '/|_')[[1]][4]
winter = c(data[between(lubridate::month(data$date),12,13),]$peak2z_length,data[between(lubridate::month(data$date),1,2),]$peak2z_length)
spring = data[between(lubridate::month(data$date),3,5),]$peak2z_length
summer = data[between(lubridate::month(data$date),6,8),]$peak2z_length
fall = data[between(lubridate::month(data$date),9,11),]$peak2z_length
temp = data.frame(c(site,statCalc(winter),statCalc(spring),statCalc(summer),statCalc(fall)))
return(temp)
}
##### Run Functions
files = list.files('../data/peak2z',full.names = TRUE,pattern = '*csv')
# get number of cores, start cluster and load packages on cores.
cores = parallel::detectCores()-1
cl <- parallel::makeCluster(cores)
output = data.frame(matrix(NA,nrow = length(files),ncol=26))
# Use mpapply to exicute function
output = foreach(i = 1:length(files), .combine=rbind) %dopar%
as.data.frame(t(readP2Z(files[i])))
colnames(output) <- c("site_no"
,'p2z_mean_djf','p2z_count_djf','p2z_max_djf','p2z_min_djf','p2z_cv_djf','p2z_sd_djf'
,'p2z_mean_mam','p2z_count_mam','p2z_max_mam','p2z_min_mam','p2z_cv_mam','p2z_sd_mam'
,'p2z_mean_jja','p2z_count_jja','p2z_max_jja','p2z_min_jja','p2z_cv_jja','p2z_sd_jja'
,'p2z_mean_son','p2z_count_son','p2z_max_son','p2z_min_son','p2z_cv_son','p2z_sd_son')
# Stop the cluster
parallel::stopCluster(cl)
# Clean up the data
output[output == "NaN"] <- NA
output[output == "-Inf"] <- NA
output[output == "Inf"] <- NA
write.csv(output,file = '../data/p2z_seasonal.csv')
|
3a104c8f1556a13ff9582093979279946999568d
|
ab418aac86f7e1495a7ac1e03d2439809c95467b
|
/src/6f_plot.R
|
842ceb4f67969588e2bee248a198b69626ca8306
|
[] |
no_license
|
rivas-lab/covid19_HGI
|
e6d63923a6dcfd67aaf7dd2b35dcb6ed851edb73
|
92b1e3e98aee73d93090c97741c4943801a788d9
|
refs/heads/master
| 2023-04-14T22:18:33.727148
| 2021-04-29T21:50:28
| 2021-04-29T21:50:28
| 331,125,097
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,695
|
r
|
6f_plot.R
|
fullargs <- commandArgs(trailingOnly=FALSE)
args <- commandArgs(trailingOnly=TRUE)
script.name <- normalizePath(sub("--file=", "", fullargs[grep("--file=", fullargs)]))
suppressWarnings(suppressPackageStartupMessages({
library(tidyverse)
library(data.table)
}))
####################################################################
# clumpp <- 1e-5
# HGI_cc <- 'B2'
# HGI_sx <- 'eur_leave_ukbb_23andme'
# pheno <- 'INI30130'
HGI_cc <- args[1]
HGI_sx <- args[2]
clumpp <- as.numeric(args[3])
pheno <- args[4]
####################################################################
source(file.path(dirname(script.name), '0_parameters.sh'))
source(file.path(dirname(script.name), '0_parameters_chr3_chemokine_pos.sh'))
source(file.path(dirname(script.name), 'plot_functions.R'))
####################################################################
clumpp_str <- str_replace(sprintf('%.0e', clumpp), '0', '')
ukb_phe_info %>% fread(select=c('GBE_ID', 'GBE_short_name')) -> ukb_phe_info_df
setNames(ukb_phe_info_df$GBE_short_name, ukb_phe_info_df$GBE_ID) -> ukb_phe_info_named_l
HGI_sumstats_f <- file.path(
data_d, 'plink_format_UKB_cal',
UKB_cal_f %>%
str_replace('@@HGI_case_control@@', HGI_cc) %>%
str_replace('@@HGI_suffix@@', HGI_sx)
)
clump_f <- file.path(
data_d, 'UKB_PRS_clump',
basename(HGI_sumstats_f) %>%
str_replace('.tsv.gz$', str_replace(sprintf('.clump%.0e.clumped.gz', clumpp), '0', ''))
)
UKB_sumstats_f <- file.path(
data_d, 'UKB_PRS_PheWAS_follow_up_GWAS',
sprintf('ukb.%s.glm.gz', pheno)
)
clump_f %>% fread() %>% pull(SNP) -> clumped_vars
ukb_cal_annotation_f %>%
fread(colClasses = c('#CHROM'='character')) %>%
rename('CHROM'='#CHROM') -> ukb_cal_annotation_df
UKB_sumstats_f %>% fread(colClasses = c('#CHROM'='character')) %>%
rename('CHROM'='#CHROM') %>% filter(ID %in% clumped_vars) -> UKB_sumstats_df
HGI_sumstats_f %>% fread(colClasses = c('#CHROM'='character')) %>%
rename('CHROM'='#CHROM') %>% filter(ID %in% clumped_vars) -> HGI_sumstats_df
inner_join(
UKB_sumstats_df %>%
separate(P, c('P_base', 'P_exp'), sep='e', remove=F, fill='right') %>%
replace_na(list(P_exp='0')) %>% mutate(log10P = log10(as.numeric(P_base)) + as.numeric(P_exp)) %>%
select(CHROM, POS, ID, BETA, SE, P, log10P),
HGI_sumstats_df %>%
separate(P, c('P_base', 'P_exp'), sep='e', remove=F, fill='right') %>%
replace_na(list(P_exp='0')) %>% mutate(log10P = log10(as.numeric(P_base)) + as.numeric(P_exp)) %>%
select(CHROM, POS, ID, BETA, SE, P, log10P),
by=c('CHROM', 'POS', 'ID'),
suffix=c('_UKB', '_HGI')
) %>%
left_join(
ukb_cal_annotation_df %>% select(ID, REF, ALT, SYMBOL),
by='ID'
) %>% mutate(
is_in_chr3_chemokine_region = (
CHROM == '3' &
((chr3_chemokine_pos - remove_half_window) <= POS) &
(POS <= (chr3_chemokine_pos + remove_half_window))
)
) -> df
df %>% p_HGIpval_vs_UKBpval() -> p_pval
df %>% p_HGIbeta_vs_UKBbeta() -> p_beta
out_f <- file.path(
repo_fig_d, 'UKB_PRS_PheWAS_follow_up_GWAS', HGI_sx, HGI_cc, sprintf(
'HGIrel5_%s_%s.clump%s.%s.png',
HGI_cc, HGI_sx, clumpp_str, pheno
)
)
if(!dir.exists(dirname(dirname(out_f)))) dir.create(dirname(dirname(out_f)))
if(!dir.exists(dirname(out_f))) dir.create(dirname(out_f))
gridExtra::arrangeGrob(
p_pval, p_beta, ncol=2,
top=grid::textGrob(sprintf(
"Comparison of GWAS associations (%s, %s, clump p1: %s)\n%s (%s)",
HGI_cc, HGI_sx, clumpp_str,
ukb_phe_info_named_l[[pheno]], pheno
),gp=grid::gpar(fontsize=20))
) -> g
ggsave(file=out_f, g, width=16, height=8)
ggsave(file=str_replace(out_f, '.png$', '.pdf'), g, width=16, height=8)
|
bffbcbf30f497fcacab43632873d68d97ebde83a
|
c26fb000f0f7070559c869b6913677edb584c50d
|
/code/functions_eval.R
|
c399e35c248a5fa21bdd0597eecdad55c5f655c6
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
benediktschulz/probabilistic_solar_forecasting
|
9f50fa7aef3a79c2c3368cee6cc2860aa8486219
|
999dfe30b8528429a8c36bcd5c86a6e5bba779b0
|
refs/heads/main
| 2023-04-14T03:52:35.901436
| 2023-02-02T08:39:17
| 2023-02-02T08:39:17
| 556,637,600
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,548
|
r
|
functions_eval.R
|
## Functions for evaluation of postprocessed forecasts ##
#### Import ####
# Import basic functions
source(paste0(getwd(), "/functions_basic.R"))
#### Coverage ####
# Calculate coverage of a central prediction interval
fn_cover <- function(x, alpha = 0.1){
###-----------------------------------------------------------------------------
###Input
#x.......PIT values (n vector)
#alpha...Significance level (probability)
#........Default: 0.1 -> 10% -> 90% prediction interval
###-----------------------------------------------------------------------------
###Output
#res...Coverage in percentage
###-----------------------------------------------------------------------------
#### Coverage calculation ####
res <- mean((alpha/2 <= x) & (x <= (1 - alpha/2)))
# Output as percentage
return(100*res)
}
#### Brier score ####
# Brier score for given distribution or ensemble
brier_score <- function(f, y, t = 0, distr = "ens", t_distr = 0){
###-----------------------------------------------------------------------------
###Input
#f.........distr == par. distr.: Matrix with location and scale of forecast distribution (n x n_par matrix)
#..........distr == "ens": Ensemble forecasts (n x n_ens matrix)
#..........distr == "p" (or elsewise): Probability forecasts of exceeding t (n vector)
#y.........Observations (n vector)
#t.........Brier Score Threshold (non-negative scalar)
#..........Default: 0
#distr.....Forecast type (specific distribution or ensemble) (string)
#..........Default: "ens" -> Ensemble
#t_distr...Threshold for censored or truncated distribution (Scalar)
#..........Default: 0
###-----------------------------------------------------------------------------
###Output
#res...Brier Scores of n forecasts (n vector)
###-----------------------------------------------------------------------------
#### Calculation ####
## Brier Score w.r.t. event of exceeding threshold t
## p_t = 1 - F(t). BS_t (F, y) = (p_t - 1(y > t))^2 = (F(t) - 1(y <= t))^2
# Calculate F(t) depending on distribution
if(distr == "ens"){
if(is.vector(f)){ f <- mean(f <= t) }
else{ f <- rowMeans(f <= t) }
}
# Truncated logistic
else if(distr == "tlogis"){ f <- (t > t_distr)*crch::ptlogis(q = t,
location = f[,1],
scale = f[,2],
left = t_distr) }
# Truncated normal
else if(distr == "tnorm"){ f <- (t > t_distr)*crch::ptnorm(q = t,
mean = f[,1],
sd = f[,2],
left = t_distr) }
# Calculate Brier Score
res <- (f - (y <= t))^2
# Return score
return(res)
}
#### Pinball loss ####
# Quantile score / pinball loss for given distribution or ensemble
pinball_loss <- function(f, y, alpha = 0.95, distr = "ens", t_distr = 0){
###-----------------------------------------------------------------------------
###Input
#f.........distr == par. distr.: Matrix with location and scale of forecast distribution (n x n_par matrix)
#..........distr == "ens": Ensemble forecasts (n x n_ens matrix)
#..........distr == "q" (or elsewise): Quantile forecasts at level alpha (n vector)
#y.........Observations (n vector)
#alpha.....Quantile level (probability)
#..........Default: 0.95 -> 95%
#distr.....Forecast type (specific distribution or ensemble) (string)
#..........Default: "ens" -> Ensemble
#t_distr...Threshold for censored or truncated distribution (Scalar)
#..........Default: 0
###-----------------------------------------------------------------------------
###Output
#res...Quantile scores / pinball losses of n forecasts (n vector)
###-----------------------------------------------------------------------------
#### Calculation ####
## Pinball loss of quantile forecast q_f at level alpha
## q_f = Q(alpha). PL (q_f, y) = (q_f - y)*(1(y <= q_f) - alpha)
# Calculate F(t) depending on distribution
if(distr == "ens"){
if(is.vector(f)){ f <- quantile(x = f,
probs = alpha,
type = 8) }
else{ f <- sapply(1:nrow(f), function(x) quantile(x = f[x,],
probs = alpha,
type = 8)) }
}
# Truncated logistic
else if(distr == "tlogis"){ f <- crch::qtlogis(p = alpha,
location = f[,1],
scale = f[,2],
left = t_distr) }
# Truncated normal
else if(distr == "tnorm"){ f <- crch::qtnorm(p = alpha,
mean = f[,1],
sd = f[,2],
left = t_distr) }
# Calculate quantile score / pinball loss
res <- (f - y)*((y <= f) - alpha)
# Return score
return(res)
}
#### Interval score ####
# Interval score of central prediction interval based on upper and lower boundaries
interval_score <- function(l, u, y, alpha = 0.1){
###-----------------------------------------------------------------------------
###Input
#l.......Lower boundaries of prediction interval (n vector)
#u.......Upper boundaries of prediction interval (n vector)
#y.......Observations (n vector)
#alpha...1 - Level of prediction interval (probability)
#........Default: 0.1 -> 90% prediction interval
###-----------------------------------------------------------------------------
###Output
#res...Interval scores of n forecasts (n vector)
###-----------------------------------------------------------------------------
#### Calculation ####
# Calculate interval score
res <- (u - l) + 2/alpha*(l - y)*(y < l) + 2/alpha*(y - u)*(y > u)
# Return score
return(res)
}
#### BQN: Bernstein Quantile function ####
# Function that calculates quantiles for given coefficients
bern_quants <- function(alpha, q_levels){
###-----------------------------------------------------------------------------
###Input
#alpha......Coefficients of Bernstein Basis (n x (p_degree + 1) matrix)
#q_levels...Quantile levels (n_q vector)
###-----------------------------------------------------------------------------
###Output
#res...Quantile forecasts for given coefficients (n x n_q matrix)
###-----------------------------------------------------------------------------
#### Initiation ####
# Get degree of polynomials from coefficients
if(is.vector(alpha)){ p_degree <- length(alpha) - 1 }
else{ p_degree <- ncol(alpha) - 1 }
#### Calculation ####
# Calculate quantiles (sum of coefficients times basis polynomials)
if(length(q_levels) == 1){ res <- alpha %*% sapply(0:p_degree, dbinom, size = p_degree, prob = q_levels) }
else{ res <- alpha %*% t(sapply(0:p_degree, dbinom, size = p_degree, prob = q_levels)) }
# Return quantiles
return(res)
}
#### Evaluation of ensemble forecasts ####
# Function to calculate evaluation measures of scores
fn_scores_ens <- function(ens, y, alpha = 0.1, skip_evals = NULL, scores_ens = TRUE){
###-----------------------------------------------------------------------------
###Input
#ens..........Ensemble data for prediction (n x n_ens matrix)
#y............Observations for prediction (n vector)
#alpha........1 - Level of prediction interval (probability)
#.............Default: 0.1 -> 90% prediction interval
#skip_evals...Skip the given evaluation measures (string vector)
#.............Default: NULL -> Calculate all
#scores_ens...Should scores of ensemble forecasts be calculated? (logical)
#.............Default: TRUE
###-----------------------------------------------------------------------------
###Output
#...scores_ens...Data frames containing (n x 6 data frame):
#......rank......Ranks of observations in ensemble forecasts (n vector)
#......crps......CRPS of ensemble forecasts (n vector)
#......logs......Log-Score of ensemble forecasts (n vector)
#......lgt.......Ensemble range (n vector)
#......e_md......Bias of median forecast (n vector)
#......e_me......Bias of mean forecast (n vector)
###-----------------------------------------------------------------------------
#### Initiation ####
# Load packages
library(scoringRules)
# Calculate only if scores_ens is TRUE
if(!scores_ens){ return(FALSE) }
# Check if vector is given
if(is.vector(ens)){ ens <- matrix(data = ens,
nrow = 1) }
# Get number of ensembles
n <- nrow(ens)
# Get ensemble size
n_ens <- ncol(ens)
# Make data frame
scores_ens <- data.frame(rank = numeric(length = n),
crps = numeric(length = n),
logs = numeric(length = n),
lgt = numeric(length = n),
e_me = numeric(length = n),
e_md = numeric(length = n))
#### Calculation ####
# Calculate observation ranks
if(is.element("rank", colnames(scores_ens))){
scores_ens[["rank"]] <- apply(cbind(y, ens), 1, function(x){ rank(x, ties = "random")[1] }) }
# Calculate CRPS of raw ensemble
if(is.element("crps", colnames(scores_ens))){
scores_ens[["crps"]] <- crps_sample(y = y,
dat = ens) }
# Calculate Log-Score of raw ensemble
if(is.element("logs", colnames(scores_ens))){
scores_ens[["logs"]] <- logs_sample(y = y,
dat = ens) }
# Calculate (1-alpha) prediction interval
if(is.element("lgt", colnames(scores_ens))){
# Corresponding quantiles (alpha/2 and 1-alpha/2) are included
if( ((((n_ens + 1)*alpha/2) %% 1) == 0) &
((((n_ens + 1)*(1-alpha/2)) %% 1) == 0) ){
# Indices of corresponding quantiles
i_lgt <- (n_ens + 1)*c(alpha/2, 1-alpha/2)
# Get quantiles
q_lgt <- t(apply(ens, 1, sort))[,i_lgt]
# Transform if vector
if(n == 1){ q_lgt <- matrix(data = q_lgt,
nrow = 1) }
# Calculate corresponding range
scores_ens[["lgt"]] <- apply(t(apply(q_lgt, 1, range)), 1, diff)
}
# Quantiles are not included: Calculate corresponding via quantile function
else{
# Choose type 8, as suggested in ?quantile (and bias observed for linearly pooled aggregation)
scores_ens[["lgt"]] <- apply(ens, 1, function(x)
diff(quantile(x = x,
probs = c(alpha/2, 1-alpha/2),
type = 8)) )
}
}
# Calculate bias of median forecast
if(is.element("e_md", colnames(scores_ens))){
scores_ens[["e_md"]] <- apply(ens, 1, median) - y }
# Calculate bias of mean forecast
if(is.element("e_me", colnames(scores_ens))){
scores_ens[["e_me"]] <- rowMeans(ens) - y }
#### Output ####
# Skip evaluation measures
scores_ens <- as.data.frame(scores_ens[,!is.element(colnames(scores_ens), skip_evals), drop = FALSE])
# Return output
return(scores_ens)
}
#### Evaluation of parametric distributional forecasts ####
# Function for prediction based on the distributional parameters #
fn_scores_distr <- function(f, y, distr = "tlogis", alpha = 0.1,
skip_evals = NULL){
###-----------------------------------------------------------------------------
###Input
#f............Parameters of forecast distribution (n x n_par matrix)
#y............Observations (n vector)
#distr........Parametric distribution ("tlogis", "tnorm", "norm")
#.............Default: (zero-)truncated logistic
#alpha........1 - Level of prediction interval (probability)
#.............Default: 0.1 -> 90% prediction interval
#skip_evals...Skip the following evaluation measures (string vector)
#.............Default: NULL -> Calculate all
###-----------------------------------------------------------------------------
###Output
#res...List containing:
#......scores_pp...Data frames containing (n x 6 data frame):
#.........pit.........PIT values of distributional forecasts (n vector)
#.........crps........CRPS of forecasts (n vector)
#.........logs........Log-Score of forecasts (n vector)
#.........lgt.........Length of prediction interval (n vector)
#.........e_md........Bias of median forecast (n vector)
#.........e_me........Bias of mean forecast (n vector)
###-----------------------------------------------------------------------------
#### Initiation ####
# Load packages
library(scoringRules)
# Input check
if(!is.element(distr, c("tlogis", "tnorm", "norm"))){
print("Chosen 'distr' not available. Choose 'tlogis', 'tnorm' or 'norm.") }
if(is.element(distr, c("tlogis", "tnorm", "norm")) & any(f[,2] < 0)){ print("Non-positive scale forecast!") }
#### Data preparation ####
# Number of predictions
n <- nrow(f)
# Make data frame
scores_pp <- data.frame(pit = numeric(length = n),
crps = numeric(length = n),
logs = numeric(length = n),
lgt = numeric(length = n),
e_me = numeric(length = n),
e_md = numeric(length = n))
#### Prediction and score calculation ####
# Forecasts depending on distribution
if(distr == "tlogis"){ # truncated logistic
# Calculate PIT values
if(is.element("pit", colnames(scores_pp))){
scores_pp[["pit"]] <- crch::ptlogis(q = y,
location = f[,1],
scale = f[,2],
left = 0) }
# Calculate CRPS of forecasts
if(is.element("crps", colnames(scores_pp))){
scores_pp[["crps"]] <- crps_tlogis(y = y,
location = f[,1],
scale = f[,2],
lower = 0) }
# Calculate Log-Score of forecasts
if(is.element("logs", colnames(scores_pp))){
scores_pp[["logs"]] <- logs_tlogis(y = y,
location = f[,1],
scale = f[,2],
lower = 0) }
# Calculate length of (1 - alpha) % prediction interval
if(is.element("lgt", colnames(scores_pp))){
scores_pp[["lgt"]] <- crch::qtlogis(p = (1 - alpha/2),
location = f[,1],
scale = f[,2],
left = 0) - crch::qtlogis(p = alpha/2,
location = f[,1],
scale = f[,2],
left = 0) }
# Calculate bias of median forecast
if(is.element("e_md", colnames(scores_pp))){
scores_pp[["e_md"]] <- crch::qtlogis(p = 0.5,
location = f[,1],
scale = f[,2],
left = 0) - y }
# scores_pp[["e_md"]] <- (f[,1] + f[,2]*log(1 + 2*exp(- f[,1]/f[,2]))) - y
# Calculate bias of mean forecast
if(is.element("e_me", colnames(scores_pp))){
scores_pp[["e_me"]] <- (f[,1] - f[,2]*log(1 - plogis(- f[,1]/f[,2])))/(1 - plogis(- f[,1]/f[,2])) - y }
}
else if(distr == "tnorm"){ # truncated normal
# Calculate PIT values
if(is.element("pit", colnames(scores_pp))){
scores_pp[["pit"]] <- crch::ptnorm(q = y,
mean = f[,1],
sd = f[,2],
left = 0) }
# Calculate CRPS of forecasts
if(is.element("crps", colnames(scores_pp))){
scores_pp[["crps"]] <- crps_tnorm(y = y,
location = f[,1],
scale = f[,2],
lower = 0) }
# Calculate Log-Score of forecasts
if(is.element("logs", colnames(scores_pp))){
scores_pp[["logs"]] <- logs_tnorm(y = y,
location = f[,1],
scale = f[,2],
lower = 0) }
# Calculate length of (1 - alpha) % prediction interval
if(is.element("lgt", colnames(scores_pp))){
scores_pp[["lgt"]] <- crch::qtnorm(p = (1 - alpha/2),
mean = f[,1],
sd = f[,2],
left = 0) - crch::qtnorm(p = alpha/2,
mean = f[,1],
sd = f[,2],
left = 0) }
# Calculate bias of median forecast
if(is.element("e_md", colnames(scores_pp))){
scores_pp[["e_md"]] <- crch::qtnorm(p = 0.5,
mean = f[,1],
sd = f[,2],
left = 0) - y }
# # Calculate bias of mean forecast
# scores_pp[["e_me"]] <- #TODO
}
else if(distr == "norm"){ # normal
# Calculate PIT values
if(is.element("pit", colnames(scores_pp))){
scores_pp[["pit"]] <- pnorm(q = y,
mean = f[,1],
sd = f[,2]) }
# Calculate CRPS of forecasts
if(is.element("crps", colnames(scores_pp))){
scores_pp[["crps"]] <- crps_norm(y = y,
location = f[,1],
scale = f[,2]) }
# Calculate Log-Score of forecasts
if(is.element("logs", colnames(scores_pp))){
scores_pp[["logs"]] <- logs_norm(y = y,
location = f[,1],
scale = f[,2]) }
# Calculate length of (1 - alpha) % prediction interval
if(is.element("lgt", colnames(scores_pp))){
scores_pp[["lgt"]] <- qnorm(p = (1 - alpha/2),
mean = f[,1],
sd = f[,2]) - qnorm(p = alpha/2,
mean = f[,1],
sd = f[,2]) }
# Calculate bias of median forecast
if(is.element("e_md", colnames(scores_pp))){
scores_pp[["e_md"]] <- qnorm(p = 0.5,
mean = f[,1],
sd = f[,2]) - y }
# Calculate bias of mean forecast
scores_pp[["e_me"]] <- f[,1] - y
}
#### Output ####
# Skip evaluation measures
scores_pp <- as.data.frame(scores_pp[,!is.element(colnames(scores_pp), skip_evals), drop = FALSE])
# Return
return(scores_pp)
}
|
c0889b819cc023f251d0dc268897f2d40ac88151
|
ef01bab1215f822fe415021c73c2b915fdd787ba
|
/01_data_preparation/step3_merge_datasets/step39_correct_observations_with_multiple_simultaneous_changes.R
|
f80aff2f2f2059ae4ae48de4b7497ae89fc3fa41
|
[] |
no_license
|
nvkov/MA_Code
|
b076512473cf463e617ed7b24d6553a7ee733155
|
8c996d3fdbbdd1b1b84a46f84584e3b749f89ec3
|
refs/heads/master
| 2021-01-17T02:48:40.082306
| 2016-09-25T18:53:53
| 2016-09-25T18:53:53
| 58,817,375
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,750
|
r
|
step39_correct_observations_with_multiple_simultaneous_changes.R
|
rm(list=ls())
library("stringi")
library("data.table")
load("C:/Users/Nk/Documents/Uni/MA/Pkw/MobileDaten/generatedData/Merged_data/df_merge_after_step38.RData")
vendors<- vendors[n_unique_changes_price==1 & n_changes_total>1,
`:=`(firstDate=min(cars_lastChange),
lastDate=max(cars_lastDate),
KM_lag=data.table::shift(Kilometer, 1, NA, "lag")),
by=.(valuePrice, car_ID, vendor_ID, Erstzulassung,
Typ, prices_firstDate, prices_lastDate, Farbe)]
vendors<- vendors[n_carID_pool>1,
`:=`(firstDate=min(cars_lastChange),
lastDate=max(cars_lastDate),
KM_lag=data.table::shift(Kilometer, 1, NA, "lag")),
by=.(valuePrice, car_ID, vendor_ID, Typ)]
vendors$KM_lag[is.na(vendors$KM_lag)]<-vendors$Kilometer[is.na(vendors$KM_lag)]
vendors$KM_monotonicity<- vendors$Kilometer-vendors$KM_lag
vendors<- vendors[n_unique_changes_price==1 & n_changes_total>1,
KM_mon_check:=min(KM_monotonicity),
by=.(valuePrice, car_ID, vendor_ID, Erstzulassung,
Typ, prices_firstDate, prices_lastDate, Farbe)]
vendors<- vendors[n_carID_pool>1,
KM_mon_check:=min(KM_monotonicity),
by=.(valuePrice, car_ID, vendor_ID, Typ)]
# Key numbers -------------------------------------------------------------
nrow(vendors[vendors$n_unique_changes_price==1 & vendors$n_changes_total>1,])
nrow(vendors[vendors$KM_mon_check<0,])
nrow(vendors[vendors$n_carID_pool>1,])
# Keep only latest observations for doublets ------------------------------
vendors[!is.na(vendors$firstDate) & vendors$lastDate!=vendors$cars_lastDate,]<- NA
vendors<- vendors[!is.na(vendors$vendor_ID),]
vendors$cars_lastChange[!is.na(vendors$firstDate)]<- vendors$firstDate[!is.na(vendors$firstDate)]
# Keep only realistic observations for car ID pool ------------------------
vendors[n_carID_pool>1 & cars_lastDate> prices_firstDate]<- NA
vendors<- vendors[!is.na(vendors$vendor_ID),]
# Look at car ID pools ----------------------------------------------------
# # Select only vendors with repeating prices:
# vendors_carID_pool<- vendors[vendors$car_ID_pool>1,]
# vendors_carID_pool<- vendors_carID_pool[,duplics:=.N,
# by=.(valuePrice, car_ID)]
#
# vendors_carID_pool<- vendors_carID_pool[,diff_types:=length(unique(Typ)),
# by=.(valuePrice, car_ID, Typ)]
#
# vendors_carID_pool<- vendors_carID_pool[,diff_Erstzulassung:=length(unique(Erstzulassung)),
# by=.(valuePrice, car_ID, Typ)]
#
# vendors_carID_pool<- vendors_carID_pool[,rows:=.N,
# by=.(valuePrice, car_ID, Typ)]
#
#
# vendors_carID_pool<- vendors_carID_pool[vendors_carID_pool$duplics>1,]
#
#
# vendors_carID_pool<- vendors_carID_pool[ ,`:=`(firstDate=min(cars_lastChange),
# lastDate=max(cars_lastDate),
# KM_lag=data.table::shift(Kilometer, 1, NA, "lag")),
# by=.(valuePrice, car_ID, Typ)]
#
#
# vendors_carID_pool<- vendors_carID_pool[prices_firstDate>=cars_lastDate,]
#
# vendors
# Save dataset ------------------------------------------------------------
#save(vendors_carID_pool, file="C:/Users/Nk/Documents/Uni/MA/Pkw/MobileDaten/generatedData/Merged_data/carIDpool.RData")
save(vendors, file="C:/Users/Nk/Documents/Uni/MA/Pkw/MobileDaten/generatedData/Merged_data/df_merge_after_step39.RData")
|
b3be5f76c0292f6c427b12360fd98486c6b5ce12
|
024b84112a8ebad3f859d5a395f4eb772034088b
|
/stats/api-stats.R
|
92810a226fdc26ef68238503dc1ad04b5340bf6c
|
[] |
no_license
|
ImgBotApp/ghtorrent.org
|
af39acc19d99bfdee771c28d9b2cc9a3f49180b4
|
3754cce4dbff5b082f431a7033cba49596cdab38
|
refs/heads/master
| 2021-01-22T05:32:47.711386
| 2017-08-29T09:12:28
| 2017-08-29T09:12:28
| 102,281,307
| 2
| 0
| null | 2017-09-03T17:20:11
| 2017-09-03T17:20:11
| null |
UTF-8
|
R
| false
| false
| 2,627
|
r
|
api-stats.R
|
# Run this to create the data file
# cat */log.txt |grep APIClient|grep -v WARN |perl -lape 's/\[([T0-9-:.]*).*\] DEBUG.*\[([0-9.]*)\].*Total: ([0-9]*) ms/$1 $2 $3/'|cut -f2,3,4 -d' '|ruby -ne 'BEGIN{require "time"}; t,i,d=$_.split(/ /); print Time.parse(t).to_i," ", i, " ", d;' |egrep -v "#" >data.txt
library(ggplot2)
library(sqldf)
require(scales)
data <- read.csv("data.txt", sep=" ", colClasses = c("integer", "factor", "integer"))
# Filter out data older than 3 days
data <- subset(data, ts > (as.numeric(Sys.time()) - 3 * 86400))
data$ts <- as.POSIXct(data$ts, origin = "1970-01-01")
summary(data$ms)
p <- ggplot(data) + aes(x = ip, y = ms) + scale_y_log10() + geom_boxplot() + theme(axis.text.x = element_text(angle = 90, hjust = 1))
png("resp-ip-boxplot.png")
print(p)
dev.off()
# Total num requests per IP
aggregate(ms ~ ip, data = data, length)
# Mean time per IP
aggregate(ms ~ ip, data = data, mean)
data$timebin <- cut.POSIXt(data$ts, breaks = "10 mins")
mean.interval <- aggregate(ms ~ timebin, data = data, mean)
mean.interval$timebin <- as.POSIXct(mean.interval$timebin, origin = "1970-01-01")
p <- ggplot(mean.interval) + aes(x = timebin, y = ms) + geom_line() + scale_x_datetime() +
xlab('time') + ylab('Mean API resp in ms') + ggtitle('Mean API response time timeseries (10 min intervals)')
png("api-resp.png")
print(p)
dev.off()
data$timebin <- cut.POSIXt(data$ts, breaks = "30 mins")
count.interval <- aggregate(ms ~ timebin, data = data, length)
count.interval$timebin <- as.POSIXct(count.interval$timebin, origin = "1970-01-01")
p <- ggplot(count.interval) + aes(x = timebin, y = ms) + geom_line() + scale_x_datetime() + scale_y_continuous(labels = comma) +
stat_smooth(method = "loess", formula = y ~ x^2, size = 2, alpha = 0)+xlab('time') + ylab('Num API calls') + ggtitle('Num API calls per timeslot (30 mins interval)')
png("num-reqs.png")
print(p)
dev.off()
events <- read.csv("events.txt", sep=" ", colClasses = c("integer", "factor"))
# Filter out data older than 3 days
events <- subset(events, ts > (as.numeric(Sys.time()) - 3 * 86400))
events$ts <- as.POSIXct(events$ts, origin = "1970-01-01")
summary(events$ts)
events$timebin <- cut.POSIXt(events$ts, breaks = "1 day")
groupped <- sqldf("select timebin,event,count(*) as number from events group by timebin,event")
p <- ggplot(groupped) + aes(x = timebin, y = number, fill = event) +
scale_y_continuous(labels = comma) +
geom_bar(stat = "identity", position="dodge") +
xlab('day') + ylab('Num events') +
ggtitle('Number of events processed per day')
png("events-per-day.png")
print(p)
dev.off()
|
0c4bb99241202a9732283ba52991b26a51052531
|
e915d387e526e1269571b8d5153331c21f62f492
|
/man/transfer-class.Rd
|
8121ec302f3b512c4bb28a6e67364d93bba1d38a
|
[] |
no_license
|
jmcurran/tfer
|
fb5c38cb7e09d70c015512f2ff879e68545d8883
|
9581203a3034821545434eb42a96e4c7c32fc161
|
refs/heads/master
| 2021-01-20T03:29:11.740950
| 2015-02-24T17:09:31
| 2015-02-24T17:09:31
| 31,270,959
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,427
|
rd
|
transfer-class.Rd
|
\name{transfer-class}
\Rdversion{1.1}
\docType{class}
\alias{transfer-class}
\alias{[,transfer,ANY,missing,missing-method}
\alias{Compare,transfer,numeric-method}
\alias{parameters,transfer-method}
\alias{plot,transfer-method}
\alias{show,transfer-method}
\alias{summary,transfer-method}
\alias{tprob,transfer,missing-method}
\alias{tprob,transfer,numeric-method}
\title{Class "transfer"}
\description{
An S4 class.
}
\section{Objects from the Class}{
Objects can be created by calls of the form \code{new("transfer", ...)}.
}
\section{Slots}{
\describe{
\item{\code{para}:}{Object of class \code{"numeric"} }
\item{\code{Y}:}{Object of class \code{"numeric"} }
}
}
\section{Methods}{
\describe{
\item{[}{\code{signature(x = "transfer", i = "ANY", j = "missing", drop = "missing")}: ... }
\item{Compare}{\code{signature(e1 = "transfer", e2 = "numeric")}: ... }
\item{parameters}{\code{signature(object = "transfer")}: ... }
\item{plot}{\code{signature(x = "transfer")}: ... }
\item{show}{\code{signature(object = "transfer")}: ... }
\item{summary}{\code{signature(object = "transfer")}: ... }
\item{tprob}{\code{signature(object = "transfer", x = "missing")}: ... }
\item{tprob}{\code{signature(object = "transfer", x = "numeric")}: ... }
}
}
\author{
TingYu Huang
}
\examples{
showClass("transfer")
}
\keyword{classes}
|
fda045f162f98e1d23da2973f4df892f721ceaf2
|
a3851712c0aebf9dc4f513b30e00bfd6d72b31fa
|
/R/typeofSwitch.R
|
5d02fd84d7a418220845aec66dabead0938c0197
|
[] |
no_license
|
duncantl/NativeCodeAnalysis
|
d0476d2865e491bbde5b39b6e56ca81b02b647af
|
60ebb6a29960f747d88f5baa1b0dc00074a3c754
|
refs/heads/master
| 2021-11-24T03:07:04.453351
| 2021-11-22T14:41:03
| 2021-11-22T14:41:03
| 211,151,757
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,040
|
r
|
typeofSwitch.R
|
findSEXPTypesFromSwitch =
#
# given a parameter (class Argument) p, find where it is used and if any of these are TYPEOF() calls.
# For these TYPEOF() calls, find any switch statements that use this value and see what cases they handle.
#
#
# Connect to testType() in testType.R
#
function(p)
{
u = getAllUsers(p)
isTypeof = sapply(u, function(x) is(x, "CallBase") && is(cf <- getCalledFunction(x), "Function") && getName(cf) == "TYPEOF")
if(!any(isTypeof))
return(integer())
u2 = unlist(lapply(u[isTypeof], getAllUsers))
w = sapply(u2, is, "SwitchInst")
if(!any(w))
return(integer())
ans = lapply(u2[w], getSwitchValues)
# Check to see if the default branch for each switch leads to an error block
# If not, then implicitly handles other types of R objects but in the same way.
# sapply(u2[w], function(x) leadsToError(x[[2]]))
#
ans
}
getSwitchValues =
function(sw)
{
sapply(sw[seq(3, length(sw) - 1L, by = 2) ], getValue)
}
|
6b39992dc390091f97965c8ff4faa0b8096eb622
|
8403b972f889cea691634290304d86e073cc6af7
|
/SDM II - HW2/HW2_P3.R
|
6aa450133f0cdb3dda35a16cb89083b606ceded4
|
[] |
no_license
|
rohithx/statistical-data-mining
|
6f1867ae3a7bb39ed52aa7d85251131c6a570339
|
378fdfe59b5c16bfa1b180bd43dcb7bab5cb2b7e
|
refs/heads/main
| 2023-05-13T12:48:52.897500
| 2021-06-07T21:24:05
| 2021-06-07T21:24:05
| 373,328,031
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 770
|
r
|
HW2_P3.R
|
###################
#Written by Rohith#
###################
rm(list=ls())
data<-read.delim(choose.files())
data_y<-data
data_x<-data
data_x$Seed.Group<-NULL
data_x<-scale(data)
###################
#a)
d <- dist(data, method = "euclidean")
#Clustering using single, average and complete
hc1 <- hclust(d, method = "single")
hc2 <- hclust(d, method = "average")
hc3 <- hclust(d, method = "complete")
plot(hc1,hang=-1)
plot(hc2,hang=-1)
plot(hc3,hang=-1)
#Grouping the data
hc_x <- cutree(hc1,3)
hc_y <- cutree(hc2,3)
hc_z <- cutree(hc3,3)
table(data_x, data_y$Seed.Group)
#Kmeans
km= kmeans(data_x, centers=3,nstart = 10)
plot()
table(km$cluster, data_y$Seed.Group)
adj.rand.index(km$cluster, as.numeric(seeddata$Seed.Group))
|
3bac54dec9651679178b990fbeb246a14f2066f7
|
660fbe7a68f3b0b887ae07a1584f8bbd7b3eaff1
|
/man/listMarketBook.Rd
|
0ae03582ffad2f51aac7fe354340c6ece7d24d7c
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
christopherok/abettor
|
2670e79edd27b19ab2206be3f662af297a39438b
|
d4d71903ffa4eff1569f33d342ab5d7dacf46b66
|
refs/heads/master
| 2020-07-14T09:20:05.291515
| 2015-12-14T21:12:02
| 2015-12-14T21:12:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,019
|
rd
|
listMarketBook.Rd
|
% Generated by roxygen2 (4.1.0.9000): do not edit by hand
% Please edit documentation in R/listMarketBook.R
\name{listMarketBook}
\alias{listMarketBook}
\title{Return listMarketBook data}
\usage{
listMarketBook(marketIds, priceData, sslVerify = TRUE)
}
\arguments{
\item{marketIds}{String. The market identification number of the required
event. IDs can be obtained via \code{\link{listMarketCatalogue}}, Required.
No default.}
\item{priceData}{String. Supports five price data types, one of which must be
specified. Valid price data types are SP_AVAILABLE, SP_TRADED,
EX_BEST_OFFERS, EX_ALL_OFFERS and EX_TRADED. Must be upper case. See note
below explaining each of these options. Required. no default.}
\item{sslVerify}{Boolean. This argument defaults to TRUE and is optional. In
some cases, where users have a self signed SSL Certificate, for example
they may be behind a proxy server, Betfair will fail login with "SSL
certificate problem: self signed certificate in certificate chain". If this
error occurs you may set sslVerify to FALSE. This does open a small
security risk of a man-in-the-middle intercepting your login credentials.}
}
\value{
Response from Betfair is stored in listMarketBook variable, which is
then parsed from JSON as a list. Only the first item of this list contains
the required event type identification details.
}
\description{
\code{listMarketBook} returns pricing data for the selected market.
}
\details{
\code{listMarketBook} returns pricing data for the selected market. It is
also possible to filter price returns based on either those currently
available or the volume currently available in the Starting Price (SP)
market.
}
\section{Notes on \code{priceData} options}{
There are three options for this
argument and one of them must be specified. All upper case letters must be
used. \describe{ \item{SP_AVAILABLE}{Amount available for the Betfair
Starting Price (BSP) auction.} \item{SP_TRADED}{Amount traded in the
Betfair Starting Price (BSP) auction. Zero returns if the event has not yet
started.} \item{EX_BEST_OFFERS}{Only the best prices available for each
runner.} \item{EX_ALL_OFFERS}{EX_ALL_OFFERS trumps EX_BEST_OFFERS if both
settings are present} \item{EX_TRADED}{Amount traded in this market on the
Betfair exchange.}}
}
\section{Note on \code{listMarketBookOps} variable}{
The
\code{listMarketBookOps} variable is used to firstly build an R data frame
containing all the data to be passed to Betfair, in order for the function
to execute successfully. The data frame is then converted to JSON and
included in the HTTP POST request.
}
\examples{
\dontrun{
# Return all prices for the requested market. This actual market ID is
unlikely to work and is just for demonstration purposes.
listMarketBook(marketIds = "1.116700328", priceData = "EX_ALL_OFFERS")
}
}
\seealso{
\code{\link{loginBF}}, which must be executed first. Do NOT use the
DELAY application key. The DELAY application key does not support price data.
}
|
f02c386da2f82b89b9f5c320cfd5a17a9609c4ff
|
48c532d4e600ca143d39d9e9be50f42763e9418f
|
/Modulo02 - Google Trends - volumen/trends.r
|
54c155f4fcefded773ccc88ef5079fdc521638a9
|
[] |
no_license
|
mat4os/saber
|
5b7ae408739c365cf9d613a628a86406d91f81a2
|
3c43d89bbe5e4336cd6998326f3388af02febcd7
|
refs/heads/master
| 2021-01-11T07:31:13.238675
| 2016-09-29T07:15:35
| 2016-09-29T07:15:35
| 69,275,699
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 731
|
r
|
trends.r
|
install.packages("gtrendsR")
install.packages("XML")
library(gtrendsR)
library(XML)
user <- "losmat4os@gmail.com"
psw <- "vivarbet1"
gconnect(user, psw)
a<-gtrends("Nintendo",res="7d")
a[7]
a$trend
sum(a$trend$nintendo.)
colname = colnames(a$trend)[2]
a$trend$colname
a$trend[6]
#* @get /score
trends <- lee_trends('Un perro verde')
trends
calcula_scoring(trends)
ticker <- 'Apple Inc.'
ticker <- gsub('\\.', '', ticker)
ticker
pp <- gtrends('E. I. du Pont de Nemours and Company',res="7d", geo= "US")
result = tryCatch({
pp <- gtrends('E. I. du Pont de Nemours and Company',res="7d", geo= "US")
},error = function(e) {
print('pepe')
}, finally = {
cleanup-code
})
|
dcf4200e39f0386a07e504022de799019dcccab5
|
77a1d43f1e231eed951fa32f733be76eb43dd8ec
|
/tests/testthat/test-app.R
|
6b7b67eed81c6a56fff7862d95f17257e73fedf7
|
[
"MIT"
] |
permissive
|
ThinkR-open/clientapp
|
be7213dedb137baab104b12e154641541d9849a1
|
eccbcbc4bab3b7cf3a1830802984fe9d19a4e58e
|
refs/heads/master
| 2020-03-31T02:29:42.572013
| 2018-10-06T18:31:53
| 2018-10-06T18:31:53
| 151,826,425
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 813
|
r
|
test-app.R
|
context("Application")
library(shiny)
test_that("app ui", {
ui <- clientapp:::app_ui()
expect_is(ui, "shiny.tag")
})
# need to be updated to work with ShinyModule... PR welcome
# test_that("app server", {
# session <- as.environment(list(
# sendCustomMessage = function(type, message) {
# session$lastCustomMessage = list(type = type, message = message)
# },
# sendInputMessage = function(inputId, message) {
# session$lastInputMessage = list(id = inputId, message = message)
# }
# ))
#
# input <- as.environment(list())
# output <- as.environment(list())
# serv <- clientapp:::app_server(input = input,
# output = output,
# session = session)
# expect_is(serv, "shiny.render.function")
# })
|
3ea206db21e680be4ca46b43be86cfcc6a7ad86a
|
61591677da804b25ab037fb14c02252986a6eca4
|
/hw2/hw2.R
|
62f9e7cb30318adcad0ce8d51668603711315d7b
|
[] |
no_license
|
franciszxlin/MarketMicrostructureAlgorithmicTrading
|
5d2212fb24dcb3476e6008943f90f04e5187cafa
|
9f3298b67e3a65483ff88f37e246a78acc4e3220
|
refs/heads/master
| 2021-07-07T12:44:51.135848
| 2017-10-02T08:40:03
| 2017-10-02T08:40:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,519
|
r
|
hw2.R
|
library(rkdb)
h1<-open_connection('172.19.10.167',6005)
h2<-open_connection('172.19.10.167',7005)
# Problem 1
# See what instruments are in different classes
execute(h1, 'select inst by class from instinfo')
# Class:Inst picked
tline=seq(-7,16,0.25)
# AG: ZW
zw<-execute(h2, "{[bd;ed;inst;minintvl] select v from 0^(([] tbin:distinct minintvl xbar (-7:00 + til 1381)) lj select last v by tbin from update v:v%(ed-bd) from select v:sum siz by tbin from update tbin:minintvl xbar rcvtime.minute from select from trade where date within (bd;ed), sym2inst[sym]=inst)} [2017.07.31;2017.09.01;`ZW;15]")
plot(tline,zw$v/1000, xlab='', ylab='Thousands of lots in 15 minutes',main='Wheat (ZW) 2017-07-31 to 2017-09-01',axes=FALSE,type='o',pch=0)
axis(1,at=seq(-6,16,2))
axis(2)
abline(h=0)
# EN: CL
cl<-execute(h2, "{[bd;ed;inst;minintvl] select v from 0^(([] tbin:distinct minintvl xbar (-7:00 + til 1381)) lj select last v by tbin from update v:v%(ed-bd) from select v:sum siz by tbin from update tbin:minintvl xbar rcvtime.minute from select from trade where date within (bd;ed), sym2inst[sym]=inst)} [2017.07.31;2017.09.01;`CL;15]")
plot(tline,cl$v/1000, xlab='', ylab='Thousands of lots in 15 minutes',main='Crude Oil (CL) 2017-07-31 to 2017-09-01',axes=FALSE,type='o',pch=0)
axis(1,at=seq(-6,16,2))
axis(2)
abline(h=0)
# EQ: NQ
nq<-execute(h2, "{[bd;ed;inst;minintvl] select v from 0^(([] tbin:distinct minintvl xbar (-7:00 + til 1381)) lj select last v by tbin from update v:v%(ed-bd) from select v:sum siz by tbin from update tbin:minintvl xbar rcvtime.minute from select from trade where date within (bd;ed), sym2inst[sym]=inst)} [2017.07.31;2017.09.01;`NQ;15]")
plot(tline,nq$v/1000, xlab='', ylab='Thousands of lots in 15 minutes',main='E-mini NASDAQ 100 (NQ) 2017-07-31 to 2017-09-01',axes=FALSE,type='o',pch=0)
axis(1,at=seq(-6,16,2))
axis(2)
abline(h=0)
# FX: 6B
b6<-execute(h2, "{[bd;ed;inst;minintvl] select v from 0^(([] tbin:distinct minintvl xbar (-7:00 + til 1381)) lj select last v by tbin from update v:v%(ed-bd) from select v:sum siz by tbin from update tbin:minintvl xbar rcvtime.minute from select from trade where date within (bd;ed), sym2inst[sym]=inst)} [2017.07.31;2017.09.01;`6B;15]")
plot(tline,b6$v/1000, xlab='', ylab='Thousands of lots in 15 minutes',main='British Pound (6B) 2017-07-31 to 2017-09-01',axes=FALSE,type='o',pch=0)
axis(1,at=seq(-6,16,2))
axis(2)
abline(h=0)
# IR: ZB
zb<-execute(h1, "{[bd;ed;inst;minintvl] select v from 0^(([] tbin:distinct minintvl xbar (-7:00 + til 1381)) lj select last v by tbin from update v:v%(ed-bd) from select v:sum siz by tbin from update tbin:minintvl xbar rcvtime.minute from select from trade where date within (bd;ed), sym2inst[sym]=inst)} [2017.07.31;2017.09.01;`ZB;15]")
plot(tline,zb$v/1000, xlab='', ylab='Thousands of lots in 15 minutes',main='US Treaury (ZB) 2017-07-31 to 2017-09-01',axes=FALSE,type='o',pch=0)
axis(1,at=seq(-6,16,2))
axis(2)
abline(h=0)
# MT: GC
gc<-execute(h2, "{[bd;ed;inst;minintvl] select v from 0^(([] tbin:distinct minintvl xbar (-7:00 + til 1381)) lj select last v by tbin from update v:v%(ed-bd) from select v:sum siz by tbin from update tbin:minintvl xbar rcvtime.minute from select from trade where date within (bd;ed), sym2inst[sym]=inst)} [2017.07.31;2017.09.01;`GC;15]")
plot(tline,gc$v/1000, xlab='', ylab='Thousands of lots in 15 minutes',main='GOLD (GC) 2017-07-31 to 2017-09-01',axes=FALSE,type='o',pch=0)
axis(1,at=seq(-6,16,2))
axis(2)
abline(h=0)
# The causes for spikes at mornings before noon and afternoon before closes are traders trade heavily on these contracts
# NQ equity contract has a regular U shape during the day
# CL crude oil also has a U shape
# GC are heavily traded during the day
# ZB treasury bond also has a distinct U shape
# ZW wheat contracts are heavily traded in the mroning and afternoon when agricultural news come out
# Problem 2
# Part (a)
ge_expir<-execute(h1, "{[bd;ed;inst] select m2expir, prct:(v%sum(v))*100 from select v:sum siz by m2expir from update m2expir:(sym2expir[sym]-min(sym2expir[sym])) from select from trade where date within (bd;ed), sym2inst[sym]=inst}[2017.07.31;2017.09.01;`GE]")
plot(ge_expir$m2expir,ge_expir$prct,xlab='Months to expiration',ylab='Percent of total volume',main='Eurodollar (GE) 2017-07-31 to 2017-09-01',type='o',axes=FALSE)
axis(1)
axis(2)
abline(h=0)
# Part (b) # Entropy
# Extreme Case 1: Only a single p = 1 and all others zero
p1<-1
w<-(-p1*log(p1))
n_eff<-exp(w)
n_eff
# In this case, the effective entropy number is 1, which is not spread out distribution at all.
# Extreme Case 2: All pj=1/N. For simplicity, we will use N=4 to demostrate
N<-4
pvec<-numeric(N)
pvec<-c(1/N,1/N,1/N,1/N)
w<-sum(-pvec*log(pvec))
n_eff<-exp(w)
n_eff
# In this case, the effective entropy number is 4(N), which is very spread out distribution.
# Find the 2 most actively traded interest rate instruments and 5 most actively traded non-interest rate instruments
execute(h1,"{[bd;ed] 3 # `volume xdesc select volume:sum siz by inst from update inst:sym2inst[sym] from select from trade where date within (bd;ed)}[2017.07.31;2017.09.01]")
# The two most actively traded interest rate instruments are ZN (10 yr note) and GE.
execute(h2, "{[bd;ed] 6 # `volume xdesc select volume:sum siz by inst from update inst:sym2inst[sym] from select from trade where date within (bd;ed)}[2017.07.31;2017.09.01]")
# The five most actively traded non-interest rate instruments are ES, CL, NQ, GC, and 6E(Euro FX)
# A function to compute effective entropy number given a vector of distribution
# Input: vec: a vector of distribution
# Output: the effetive tntropy number of the distribution
enp<-function(vec)
{
w<-sum(-vec*log(vec))
enp<-exp(w)
return(enp)
}
# compute the effetive entropy number for ZN
znv<-execute(h1, "{[bd;ed;inst] select m2expir, prct:v%sum(v) from select v:sum siz by m2expir from update m2expir:(sym2expir[sym]-min(sym2expir[sym])) from select from trade where date within (bd;ed), sym2inst[sym]=inst}[2017.07.31;2017.09.01;`ZN]")
enp_zn<-enp(znv$prct)
enp_zn
# The effective entropy number of ZN distribution is 1.542818
# compute the effetive entropy number for GE
gev<-execute(h1, "{[bd;ed;inst] select m2expir, prct:v%sum(v) from select v:sum siz by m2expir from update m2expir:(sym2expir[sym]-min(sym2expir[sym])) from select from trade where date within (bd;ed), sym2inst[sym]=inst}[2017.07.31;2017.09.01;`GE]")
enp_ge<-enp(gev$prct)
enp_ge
# The effective entropy number of GE distribution is 16.00225
# compute the effetive entropy number for ES
esv<-execute(h2, "{[bd;ed;inst] select m2expir, prct:v%sum(v) from select v:sum siz by m2expir from update m2expir:(sym2expir[sym]-min(sym2expir[sym])) from select from trade where date within (bd;ed), sym2inst[sym]=inst}[2017.07.31;2017.09.01;`ES]")
enp_es<-enp(esv$prct)
enp_es
# The effective entropy number of ES distribution is 1.023106
# compute the effetive entropy number for CL
clv<-execute(h2, "{[bd;ed;inst] select m2expir, prct:v%sum(v) from select v:sum siz by m2expir from update m2expir:(sym2expir[sym]-min(sym2expir[sym])) from select from trade where date within (bd;ed), sym2inst[sym]=inst}[2017.07.31;2017.09.01;`CL]")
enp_cl<-enp(clv$prct)
enp_cl
# The effective entropy number of CL distribution is 2.783646
# compute the effetive entropy number for NQ
nqv<-execute(h2, "{[bd;ed;inst] select m2expir, prct:v%sum(v) from select v:sum siz by m2expir from update m2expir:(sym2expir[sym]-min(sym2expir[sym])) from select from trade where date within (bd;ed), sym2inst[sym]=inst}[2017.07.31;2017.09.01;`NQ]")
enp_nq<-enp(nqv$prct)
enp_nq
# The effective entropy number of NQ distribution is 1,027616
# compute the effetive entropy number for GC
gcv<-execute(h2, "{[bd;ed;inst] select m2expir, prct:v%sum(v) from select v:sum siz by m2expir from update m2expir:(sym2expir[sym]-min(sym2expir[sym])) from select from trade where date within (bd;ed), sym2inst[sym]=inst}[2017.07.31;2017.09.01;`GC]")
enp_gc<-enp(gcv$prct)
enp_gc
# The effective entropy number of GC distribution is 1.138405
# compute the effetive entropy number for 6E
e6v<-execute(h2, "{[bd;ed;inst] select m2expir, prct:v%sum(v) from select v:sum siz by m2expir from update m2expir:(sym2expir[sym]-min(sym2expir[sym])) from select from trade where date within (bd;ed), sym2inst[sym]=inst}[2017.07.31;2017.09.01;`6E]")
enp_e6<-enp(e6v$prct)
enp_e6
# The effective entropy number of 6E distribution is 1.109962
# IR products: The GE(Eurodollars) has 16 a large effective entropy number: more than a few maturities are active at one time.
# Non-IR products: The CL(crude oil) has 2.78 effective entropy number: more than 20 maturities are active at one time
# Problem 3
# Date range
date_range<-numeric(33)
date_range[1]<-"2017.07.31"
date_range[33]<-"2017.09.01"
for (i in 1:31)
{
if (i<=9)
{
date_range[i+1]<-paste("2017.08.0",i,sep="")
}
if (i>9)
{
date_range[i+1]<-paste("2017.08.",i,sep="")
}
}
ir_range<-execute(h1,"`inst xasc select distinct inst from (update inst:sym2inst[sym] from select from trade where date=2017.08.01) where not null inst")
nir_range<-execute(h2,"`inst xasc select distinct inst from (update inst:sym2inst[sym] from select from trade where date=2017.08.01) where not null inst")
length(ir_range$inst)
length(nir_range$inst)
ir_mat<-matrix(0,nrow=22,ncol=33)
nir_mat<-matrix(0,nrow=66,ncol=33)
for (i in 1:33)
{
d<-date_range[i]
for (j in 1:11)
{
inst<-ir_range$inst[j]
s<-execute(h1,paste("{[d;inst] select last sym from `v xasc select v:sum siz by sym from select from trade where date=d, sym2inst[sym]=inst}[", d, "; `", inst,"]",sep=""))
sym<-s$sym
r<-execute(h1, paste("{[d;s] select r:(avg (bsiz+asiz))%(avg siz) from aj[`seq; select from trade where date=d, sym=s; select from quote where date=d, sym=s]}[", d, "; `", sym, "]", sep=""))
e<-execute(h1, paste("{[d;s] select eta:(ch-dch)%dch from select sum dch, sum ch from update dch:differ sign from update sign:signum del from update del:deltas prc from select from (update ch:differ prc from select from trade where date=d, sym=s) where ch=1}[", d, ";`", sym, "]", sep=""))
ir_mat[2*j-1,i]<-r$r
ir_mat[2*j,i]<-e$eta
}
for (k in 1:33)
{
inst<-nir_range$inst[k]
s<-execute(h2,paste("{[d;inst] select last sym from `v xasc select v:sum siz by sym from select from trade where date=d, sym2inst[sym]=inst}[", d, "; `", inst,"]",sep=""))
sym<-s$sym
r<-execute(h2, paste("{[d;s] select r:(avg (bsiz+asiz))%(avg siz) from aj[`seq; select from trade where date=d, sym=s; select from quote where date=d, sym=s]}[", d, "; `", sym, "]", sep=""))
e<-execute(h2, paste("{[d;s] select eta:(ch-dch)%dch from select sum dch, sum ch from update dch:differ sign from update sign:signum del from update del:deltas prc from select from (update ch:differ prc from select from trade where date=d, sym=s) where ch=1}[", d, ";`", sym, "]", sep=""))
nir_mat[2*k-1,i]<-r$r
nir_mat[2*k,i]<-e$eta
}
}
ir_rm<-rowMeans(ir_mat, na.rm=TRUE)
nir_rm<-rowMeans(nir_mat, na.rm=TRUE)
inst_name<-c(ir_range$inst,nir_range$inst)
irnir_stat<-c(ir_rm, nir_rm)
x<-numeric(length(inst_name))
y<-numeric(length(inst_name))
for (i in 1:length(inst_name))
{
x[i]<-irnir_stat[2*i-1]
y[i]<-irnir_stat[2*i]
}
plot(x,y, main="2017.07-31 to 2017.09-01", xlab="Average quote size / average aggressive trade size", ylab="Reversion parameter", axes=FALSE)
text(x,y,inst_name)
axis(1, at=c(2,5,10,20,50,100,200,500,1000,2000))
axis(2, at=c(0.1,0.2,0.5,1))
|
a2739e539f80a038edb8d221873fa764c39d4c77
|
1a647915c7614faacf2ef26b2b63f8d458de08cb
|
/Inference modeling/Q1.R
|
dcbcf3aea5f20e66c201efdcec7368c7c352aed5
|
[] |
no_license
|
asajini/data-viz-R
|
414e236d5210b61f7d0fc50d169125448abf2cc8
|
3e96fb095148c7121700f77b49b2c7a82fc4d2af
|
refs/heads/master
| 2022-03-28T09:08:06.578660
| 2020-01-26T07:10:38
| 2020-01-26T07:10:38
| 198,112,845
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 553
|
r
|
Q1.R
|
library(tidyverse)
options(digits = 3)
library(dslabs)
data(brexit_polls)
p <- 0.481 # official proportion voting "Remain"
d <- 2*p-1 # official spread
N <- 1500
#What is the expected total number of voters in the sample choosing "Remain"?
Es = N*p
Es
#What is the standard error of the total number of voters in the sample choosing "Remain"?
SE <- sqrt(N*p*(1-p))
SE
#What is the standard error of X^ , the proportion of "Remain" voters?
sqrt((p*(1-p))/N)
#expected value of spread
p-(1-p)
#standard error of spread
2* sqrt((p*(1-p))/N)
|
4a28c931a352b00809a7cabb95fa7a2ac38ec8b4
|
3146b8b5a297450b01b1db28935971cdd6e2453c
|
/Rscripts/compare_score_definitions.R
|
3e43aeba22cc63470be810929d246e820936175f
|
[
"MIT"
] |
permissive
|
csoneson/annotation_problem_txabundance
|
8f80bf193cba9ef21570d6512294ee01a2c53e58
|
748b78b67b5b06a990586483a05d748920fff2d4
|
refs/heads/master
| 2021-10-08T16:40:53.795263
| 2018-12-14T17:46:22
| 2018-12-14T17:46:22
| 102,369,466
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,949
|
r
|
compare_score_definitions.R
|
################################################################################
## ##
## Compare gene scores calculated with different functions (g) ##
## ##
## Inputs: ##
## * combcovrds: object with junction coverage information for all methods ##
## (output from combine_scaled_coverages.R) ##
## * outrds: output file ##
## ##
## Outputs: ##
## * Plots comparing the JCC scores ##
## ##
################################################################################
args <- (commandArgs(trailingOnly = TRUE))
for (i in 1:length(args)) {
eval(parse(text = args[[i]]))
}
print(combcovrds)
print(outrds)
suppressPackageStartupMessages({
library(dplyr)
library(GGally)
library(cowplot)
})
## Define weight functions
glist <- list(
gthr0.75 = function(omega) {
sapply(omega, function(o) {
if (is.na(o) || o >= 0.75) 1
else 0
})
},
gsigmoid = function(omega) {
sapply(omega, function(o) {
if (is.na(o)) 1
else 1/(1 + exp(-(25*(o - 0.7))))
})
},
gpwlinear = function(omega) {
sapply(omega, function(o) {
if (is.na(o)) 1
else if (o < 0.6) 0
else 2.5*o - 1.5
})
},
glinear = function(omega) {
sapply(omega, function(o) {
if (is.na(o)) 1
else o
})
},
gconstant = function(omega) {
sapply(omega, function(o) {
1
})
}
)
## Define help function for calculating score
junction_score <- function(uniqreads, mmreads, predcovs, g, beta = 1) {
omega <- uniqreads/(uniqreads + mmreads)
omega[mmreads == 0] <- 1 ## if there are no multi-mapping reads, all reads are considered to be unique
w1 <- (sum(g(omega) * uniqreads)/sum(g(omega) * predcovs)) ^ beta
## w1 can be non-numeric if all g(omega)=0 (not enough uniquely mapping reads
## for any junction) or if g(omega)*pred.cov=0 for all junctions, even if
## g(omega)!=0 for some of them (if the predicted coverage is 0 for a junction
## that has non-zero uniquely mapping reads). In both these cases, we don't
## scale the predicted coverage (i.e., we set w1=1).
w1[is.na(w1)] <- 1
w1[!is.finite(w1)] <- 1
signif(sum(abs(w1 * g(omega) * predcovs - g(omega) * uniqreads))/sum(g(omega) * uniqreads), 2)
}
## Define corresponding help function for calculating scaled coverages
scaled_coverage <- function(uniqreads, mmreads, predcovs, g, beta = 1) {
omega <- uniqreads/(uniqreads + mmreads)
omega[mmreads == 0] <- 1 ## if there are no multi-mapping reads, all reads are considered to be unique
w1 <- (sum(g(omega) * uniqreads)/sum(g(omega) * predcovs)) ^ beta
## w1 can be non-numeric if all g(omega)=0 (not enough uniquely mapping reads
## for any junction) or if g(omega)*pred.cov=0 for all junctions, even if
## g(omega)!=0 for some of them (if the predicted coverage is 0 for a junction
## that has non-zero uniquely mapping reads). In both these cases, we don't
## scale the predicted coverage (i.e., we set w1=1).
w1[is.na(w1)] <- 1
w1[!is.finite(w1)] <- 1
w1 * predcovs
}
## Read combined coverage file
combcov <- readRDS(combcovrds)
junccov <- combcov$junctions
## Calculate score
junccov <- junccov %>%
dplyr::group_by(gene, method) %>%
dplyr::mutate(score_gthr0.75_b1 =
junction_score(uniqreads, mmreads, pred.cov,
g = glist$gthr0.75, beta = 1),
score_gsigmoid_b1 =
junction_score(uniqreads, mmreads, pred.cov,
g = glist$gsigmoid, beta = 1),
score_glinear_b1 =
junction_score(uniqreads, mmreads, pred.cov,
g = glist$glinear, beta = 1),
score_gconstant_b1 =
junction_score(uniqreads, mmreads, pred.cov,
g = glist$gconstant, beta = 1),
score_gpwlinear_b1 =
junction_score(uniqreads, mmreads, pred.cov,
g = glist$gpwlinear, beta = 1)) %>%
dplyr::mutate(scaled.cov_gthr0.75_b1 =
scaled_coverage(uniqreads, mmreads, pred.cov,
g = glist$gthr0.75, beta = 1),
scaled.cov_gsigmoid_b1 =
scaled_coverage(uniqreads, mmreads, pred.cov,
g = glist$gsigmoid, beta = 1),
scaled.cov_glinear_b1 =
scaled_coverage(uniqreads, mmreads, pred.cov,
g = glist$glinear, beta = 1),
scaled.cov_gconstant_b1 =
scaled_coverage(uniqreads, mmreads, pred.cov,
g = glist$gconstant, beta = 1),
scaled.cov_gpwlinear_b1 =
scaled_coverage(uniqreads, mmreads, pred.cov,
g = glist$gpwlinear, beta = 1)) %>%
dplyr::ungroup()
## Add score to gene table
genecov <- combcov$gene
genecov <- dplyr::left_join(genecov,
junccov %>% dplyr::select(gene, method, score_gthr0.75_b1,
score_gsigmoid_b1, score_glinear_b1,
score_gconstant_b1, score_gpwlinear_b1) %>%
dplyr::distinct(),
by = c("gene", "method"))
lowerfun <- function(data, mapping){
ggplot(data = data, mapping = mapping) +
geom_abline(slope = 1, intercept = 0, alpha = 0.3) +
geom_point(alpha = 0.3, size = 0.5)
}
## Pairs plot of gene scores
gp <- ggpairs(genecov %>% dplyr::filter(method == "Salmon" & uniqjuncreads > 25) %>%
dplyr::select(score_gthr0.75_b1, score_gsigmoid_b1, score_gpwlinear_b1,
score_glinear_b1, score_gconstant_b1) %>%
dplyr::rename(gthr0.75_b1 = score_gthr0.75_b1,
gsigmoid_b1 = score_gsigmoid_b1,
gpwlinear_b1 = score_gpwlinear_b1,
glinear_b1 = score_glinear_b1,
gconstant_b1 = score_gconstant_b1),
lower = list(continuous = lowerfun)) +
theme_bw() + xlab("JCC score") + ylab("JCC score")
gp
## Illustration of weight functions
x <- seq(0, 1, length.out = 1000)
df1 <- do.call(dplyr::bind_rows, list(
data.frame(x = x, y = glist$gthr0.75(x), g = "gthr0.75_b1", stringsAsFactors = FALSE),
data.frame(x = x, y = glist$gsigmoid(x), g = "gsigmoid_b1", stringsAsFactors = FALSE),
data.frame(x = x, y = glist$gpwlinear(x), g = "gpwlinear_b1", stringsAsFactors = FALSE),
data.frame(x = x, y = glist$glinear(x), g = "glinear_b1", stringsAsFactors = FALSE),
data.frame(x = x, y = glist$gconstant(x), g = "gconstant_b1", stringsAsFactors = FALSE)
)) %>% dplyr::mutate(g = factor(g, levels = c("gthr0.75_b1", "gsigmoid_b1", "gpwlinear_b1",
"glinear_b1", "gconstant_b1")))
gg <- ggplot(df1, aes(x = x, y = y)) + geom_path() + facet_wrap(~ g, ncol = 1) +
theme_bw() + xlab(expression(omega)) + ylab(expression(g(omega)))
gg
png(gsub("rds$", "png", outrds), width = 10, height = 7, unit = "in", res = 400)
cowplot::plot_grid(gg, ggmatrix_gtable(gp), nrow = 1,
labels = c("A", "B"), rel_widths = c(0.3, 1))
dev.off()
saveRDS(NULL, file = outrds)
date()
sessionInfo()
|
158fbf2b87b51731d3cd9aa00fdf1f4fe27b2e6c
|
f4fffe026383f8f681c8b2ef2e7b2ec0f8143688
|
/man/ggBivServer.Rd
|
072a9e7eee45c5f9df28f148bbebd426dc9cb5f7
|
[] |
no_license
|
DavisVaughan/romic
|
436da67b077937d1c13af9701d39a00f083e6694
|
f3470f5cd42b6ee8322db3f10a1c254766a5dc3e
|
refs/heads/master
| 2023-05-08T05:44:49.460728
| 2021-05-18T13:10:03
| 2021-05-18T13:10:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 673
|
rd
|
ggBivServer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/module_ggbiv.R
\name{ggBivServer}
\alias{ggBivServer}
\title{ggBivariate Server}
\usage{
ggBivServer(id, tomic, plot_table, return_brushed_points = FALSE)
}
\arguments{
\item{id}{An ID string that corresponds with the ID used to call the module's
UI function.}
\item{tomic}{Either a \code{tidy_omic} or \code{triple_omic} object}
\item{plot_table}{table containing the data to be plotted}
\item{return_brushed_points}{Return values selected on the plot}
}
\value{
a tomic_table if return_brushed_points is TRUE, and 0 otherwise
}
\description{
Server components for the ggBivariate module.
}
|
83bfe4453db87c043a3f9ec9dce9f9e90fe2ab35
|
7ca0ee6639ecad09598c5c58d058ccda00884f6b
|
/data-raw/clean_delta.R
|
f36d787cf7ab1e35013b3ced60ac37577d482b4e
|
[] |
no_license
|
FlowWest/Explore-SIT-Model
|
edcf404fa66be7c2d7284b992dded66a9bfdda9f
|
7f47fc2a5f9e3a0c5bbb03e11f2f5331354105aa
|
refs/heads/master
| 2021-06-20T21:59:30.842006
| 2017-07-25T21:29:37
| 2017-07-25T21:29:37
| 94,383,223
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,708
|
r
|
clean_delta.R
|
library(tidyverse)
library(stringr)
# cleaning input delta data from sit model
delta_inflow <- read_rds('data-raw/delta_inflow.rds')
names(delta_inflow) <- 1970:1990
delta_inflow %>%
tidyr::gather(year, flow) %>%
dplyr::filter(year != 1990) %>%
write_rds('data/delta_inflow.rds')
inputs <- read_rds('data-raw/delta_inputs.rds') %>%
mutate(watershed = case_when(
Watershed == 'N.Delta' ~ 'North Delta',
Watershed == 'SC.Delta' ~ 'South Delta'
))
inputs %>%
select(watershed, high_pred = High.pred, contact_points = contct.pts) %>%
write_rds('data/delta_inputs.rds')
inputs %>%
select(watershed, temp.1:temp.12) %>%
gather(month, temperature, -watershed) %>%
mutate(month = as.numeric(str_replace(month, 'temp.', ''))) %>%
arrange(watershed, month) %>%
write_rds('data/delta_temperature.rds')
prop_div <- read_rds('data-raw/delta_prop_diversions.rds')
glimpse(prop_div)
# extract prop diverted, north and south delta have same prop diverted
tot_div <- read_rds('data-raw/delta_total_diversions.rds')
total_div <- c(map(1:21, ~tot_div[, ., 1]) %>% unlist() , map(1:21, ~tot_div[, ., 2]) %>% unlist())
tibble(watershed = c(rep('North Delta', 252), rep('South Delta', 252)),
year = rep(rep(1970:1990, each = 12), 2),
month = rep(1:12, 42),
tot_div = total_div) %>%
write_rds('data/delta_tot_div.rds')
prop_diverted <- c(map(1:21, ~prop_div[, ., 1]) %>% unlist() , map(1:21, ~prop_div[, ., 2]) %>% unlist())
tibble(watershed = c(rep('North Delta', 252), rep('South Delta', 252)),
year = rep(rep(1970:1990, each = 12), 2),
month = rep(1:12, 42),
prop_div = prop_diverted) %>%
write_rds('data/delta_prop_div.rds')
|
b2ee8fffc3148362c698033e3fe470b0ca5da04a
|
a59b0019cd455e5c8c59263d5248b388eb235257
|
/man/fixef.gam.Rd
|
2e58efd79b666f395e854673f9c67b4dc5cfc267
|
[
"MIT"
] |
permissive
|
dill/gratia
|
4df529f5e636a0139f5c355b52a2924bebf7aca4
|
26c3ece0e6a6298ab002b02019b0ea482d21dace
|
refs/heads/master
| 2023-04-08T18:35:18.730888
| 2023-03-20T12:52:33
| 2023-03-20T12:52:33
| 160,169,115
| 0
| 0
|
NOASSERTION
| 2018-12-03T09:54:30
| 2018-12-03T09:54:30
| null |
UTF-8
|
R
| false
| true
| 929
|
rd
|
fixef.gam.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/effects.R
\name{fixef.gam}
\alias{fixef.gam}
\alias{fixef.gamm}
\alias{fixef.lm}
\alias{fixef.glm}
\alias{fixed_effects}
\alias{fixed_effects.default}
\title{Extract fixed effects estimates from a fitted GAM}
\usage{
\method{fixef}{gam}(object, ...)
\method{fixef}{gamm}(object, ...)
\method{fixef}{lm}(object, ...)
\method{fixef}{glm}(object, ...)
fixed_effects(object, ...)
\method{fixed_effects}{default}(object, ...)
}
\arguments{
\item{object}{a fitted GAM}
\item{...}{arguments passed to other methods}
}
\description{
Extract fixed effects estimates from a fitted GAM
}
\examples{
load_mgcv()
# run example if lme4 is available
if (require("lme4")) {
data(sleepstudy, package = "lme4")
m <- gam(Reaction ~ Days + s(Subject, bs = "re") +
s(Days, Subject, bs = "re"),
data = sleepstudy, method = "REML")
fixef(m)
}
}
|
7f6eacc767f831ed3886330965abe7d247638847
|
de6cd80dab1c5cd752e2b8ea639ba1662fb4af48
|
/polar_cord_plot.R
|
021c4866808679c077f5381fdf3232cf10cdc5b1
|
[] |
no_license
|
smart-patrol/Useful_Code
|
6204c4843a633c6071681ad7878edb976e946ea1
|
bc28e7feba147dd19fd0318f2578318201312364
|
refs/heads/master
| 2021-06-25T05:36:41.011122
| 2017-09-07T01:09:21
| 2017-09-07T01:09:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 741
|
r
|
polar_cord_plot.R
|
rm(list = ls(all = TRUE))
library(RODBC)
library(dplyr)
library(ggplot2)
library(reshape2)
library(XLConnect)
library(ggthemr)
ggthemr('flat')
#devtools::install_github('ggthemr', 'cttobin')
read.excel <- function(header=TRUE,...) {
read.table("clipboard",sep="\t",header=header,...)
}
dat=read.excel()
str(dat)
dat$per <- dat$Percent * 100
ggplot(dat, aes(x = spec, y = per, fill = spec)) +
geom_bar(stat = "identity") +
coord_polar() + labs(x="", y="" ) +
geom_text(aes(label = paste0(round(per, 1), "%")),
size = 16, hjust = 1) +
theme(legend.position="none", text = element_text(size=28),
axis.text.y = element_blank(),
axis.ticks = element_blank() )
# scale_fill_brewer(palette = 1)
|
ce73b176591f8f2820aa13bafefb3c0fd5bc9269
|
904355e00b661fe5f8b42c055dec3bd3bc16f000
|
/ShinyAppTutorials/media/app.R
|
e53bced64be16ec15fcd4ec5b0bd0218fb88f317
|
[] |
no_license
|
mobatusi/R_projects
|
eb6462f4cc3d5f3cfae38bb4c71e494fe2d85a32
|
5d6b0b26219548aa1705fd56e98542ef59124d56
|
refs/heads/master
| 2020-04-04T08:13:53.972846
| 2018-11-04T18:14:43
| 2018-11-04T18:14:43
| 155,776,137
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 889
|
r
|
app.R
|
library(shiny)
server = function(input, output){
}
ui = navbarPage("Integration of different media types",
tabPanel("Image sourced locally",
tags$img(src="logo.png", width ="100px", height = "100px")),
tabPanel("Video sourced locally",
tags$video(src="WestWave-EEG.mp4", type = "video/mp4", controls = T,
width ="900px", height = "800px")),
tabPanel("Pdf sourced online, Iframe",
tags$iframe(style="height:600px; width:100%; scrolling=yes",
src="https://cran.r-project.org/web/packages/shiny/shiny.pdf")),
tabPanel("Text as .txt",
includeText("mytxt.txt"))
)
shinyApp(ui = ui, server = server)
|
7e6c0b4b5116abc456da21e8fc0ff9b12d617c8b
|
747eb6826bc0111b151bb156a6a3d41acec60038
|
/Code/Data Cleaning/Uniting databases/Gouping.R
|
82f288be17d89d7ffed659ccafbc892d0afa4cd2
|
[] |
no_license
|
andreschprr/Violent-Extremism-and-Social-Cohesion
|
aab084664f61be5fb59ebcba8303107934aabef1
|
e4076aa438a0f0a5a6e1598e85ca2394a7b938cf
|
refs/heads/master
| 2021-02-06T14:20:54.628401
| 2020-03-13T08:03:32
| 2020-03-13T08:03:32
| 243,921,226
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,990
|
r
|
Gouping.R
|
survey_qs <- merge(afro_groupedby, wvs_2groupedby, all.x = TRUE, by = c("ROUND", "COUNTRY"))
survey_qs$VSC_UG <- (survey_qs$VSC_UG1 + survey_qs$VSC_UG2) / 2
survey_qs$VSC_CA <- rowMeans(survey_qs[,c("VSC_CA1","VSC_CA2","VSC_E026","VSC_E026B","VSC_E028B","VSC_G021")], na.rm = TRUE)
survey_qs$VSC_DE <- rowMeans(survey_qs[,c("VSC_DE1","VSC_DE2","VSC_DE3","VSC_E233B")], na.rm = TRUE)
survey_qs$VSC_PP <- rowMeans(survey_qs[,c("VSC_PP1","VSC_PP2","VSC_PP3","VSC_PP4","VSC_PP5")], na.rm = TRUE)
survey_qs$VSC_GQ <- rowMeans(survey_qs[,c("VSC_GQ1","VSC_GQ2","VSC_GQ3","VSC_GQ4","VSC_GQ5","VSC_GQ6","VSC_E069_06","VSC_E069_17")], na.rm = TRUE)
survey_qs$VSC_UG <- rowMeans(survey_qs[,c("VSC_UG1","VSC_UG2")], na.rm = TRUE)
survey_qs$VSC_SP <- rowMeans(survey_qs[,c("VSC_SP1","VSC_SP2")], na.rm = TRUE)
survey_qs$HSC_CI <- rowMeans(survey_qs[,c("HSC_CI1","HSC_CI2","HSC_G020")], na.rm = TRUE)
survey_qs$HSC_CS <- rowMeans(survey_qs[,c("HSC_CS1","HSC_CS2","HSC_A098","HSC_A099","HSC_A100","HSC_A101","HSC_A102","HSC_A103"
,"HSC_A104","HSC_A105","HSC_A106","HSC_A106B","HSC_A106C")], na.rm = TRUE)
survey_qs$HSC_IM <- rowMeans(survey_qs[,c("HSC_IM2","HSC_IM3","HSC_IM4")], na.rm = TRUE)
survey_qs$HSC_ST <- rowMeans(survey_qs[,c("HSC_A165","HSC_G007_18_B","HSC_G007_35_B","HSC_G007_36_B","HSC_H001")], na.rm = TRUE)
survey_qs$VSC <- rowMeans(survey_qs[,c("VSC_CA","VSC_DE","VSC_PP","VSC_GQ","VSC_UG","VSC_SP")], na.rm = TRUE)
survey_qs$HSC <- rowMeans(survey_qs[,c("HSC_CI","HSC_CS","HSC_IM","HSC_ST")], na.rm = TRUE)
indices <- survey_qs %>%
select(ROUND, COUNTRY,
VSC_CA, VSC_DE, VSC_PP, VSC_GQ, VSC_UG, VSC_SP, VSC,
HSC_CI, HSC_CS, HSC_IM, HSC_ST, HSC,
VE3, VE4, VE1_PV, VE1_Tens, VE1_Terr, VE1_BH, VE1_all, VE2_pnsh, VE_H007, VE_F114_03)
write_csv(indices, "indices.csv")
merged_data <- merge(indices, acled_groupedby_round)
write_csv(merged_data, "merged_data.csv")
write_csv(wvs, "wvs_best.csv")
|
8b7582b348611d2818f6bb1a586800394f2f1568
|
4e853052f2468cbc7b5aaa8441c71f125fe3c44f
|
/Gretchen R modeling/ZOld/agecumulative.R
|
533f25f62ce03f3964249c6c29a604af7c55ce3a
|
[] |
no_license
|
flaxter/nims
|
316ff30e4e4c487cbacb9d818b2d3ee324dac4ac
|
438d22a874b0d9dfa70924b280d2750c1cff37a6
|
refs/heads/master
| 2021-01-10T22:07:33.484281
| 2010-09-02T13:43:47
| 2010-09-02T13:43:47
| 744,457
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 577
|
r
|
agecumulative.R
|
agecumulative<- function(survey,year,thresh1,thresh2,agel,ageh,p)
{
i<-1
N<-length(p)
cump<-c(NA)
while(i <= N)
{
if(thresh2[i] == 99) # already cumulative
{
cump[i]<-p[i]
i<-i+1
} else {
istart<-i
studyid<-survey[i]
startage<-agel[i]
endage<-ageh[i]
sexid<-female[i]
cump[istart]<-0
while(i<= N & thresh2[i] <= 99 & studyid == survey[i] &
startage == agel[i] & endage == ageh[i] &
sexid == female[i])
{
cump[istart]<-cump[istart]+p[i]
i<-i+1
}
i<-istart+1 # so can repeat for next round
}
}
return(cump)
}
|
a8d918279ee717edc27034abacfce5d0b6903182
|
adf73a99ea29d5b56a1499849cc3609498e80558
|
/man/GO.mouse.Rd
|
48883b43ba7e6370b8a324113320c04b33b66021
|
[] |
no_license
|
woodhaha/EGAD
|
ac31565b9f8bd334b73c4b261bb9b5b29379f594
|
082caa9723859f9300d37c622bab1292c2ee63c8
|
refs/heads/master
| 2021-01-20T01:19:04.417337
| 2016-04-29T22:00:25
| 2016-04-29T22:00:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 529
|
rd
|
GO.mouse.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GO.mouse.R
\docType{data}
\name{GO.mouse}
\alias{GO.mouse}
\title{GO - mouse}
\format{A data frame with 2086086 rows and 4 variables:
\describe{
\item{name}{chromosome}
\item{entrezID}{chromosomal start position, in base pairs}
\item{GO}{chromosomal end position, in base pairs}
\item{evidence}{chromosomal strand, + or - }
}
@source \url{http://geneontology.org/}}
\description{
A dataset of the gene GO associations
}
|
7ab79ae50b789facb350355551d114f1f77bc44e
|
69236e01a49f22152f82a1bb0df251efc4002466
|
/covid19/www/2_load_functions/Estimate_Rt.R
|
be89593e4b4223400e838819e753965e1d7b531b
|
[
"MIT"
] |
permissive
|
CHAD-Analytics/CHAD
|
9f31315a497f476b7049ecbf244002a96c334877
|
0519e801c034b895fd1565a72f135395f0083e0f
|
refs/heads/master
| 2023-02-01T15:29:07.516415
| 2020-12-12T22:57:46
| 2020-12-12T22:57:46
| 258,328,663
| 14
| 7
|
MIT
| 2020-05-12T14:17:55
| 2020-04-23T20:57:07
|
R
|
UTF-8
|
R
| false
| false
| 649
|
r
|
Estimate_Rt.R
|
Estimate_Rt <- function(IncludedCounties){
#Find counties in radius
CovidCountiesCases<-subset(CovidConfirmedCases, CountyFIPS %in% IncludedCounties$FIPS)
#Compute cumlative cases and deaths in selected counties
CumDailyCovid<-colSums(CovidCountiesCases[,5:length(CovidCountiesCases)])
#5-day and 14-day averages
len = length(CumDailyCovid)
cases5day = CumDailyCovid[len] - CumDailyCovid[len-5]
cases14day = CumDailyCovid[len] - CumDailyCovid[len-14]
avg5 = cases5day/5
avg14 = cases14day/14
if (avg14 == 0){
Rt = "Undefined for Region"
} else{
Rt = round(avg5/avg14,2)
}
}
|
15bdf7bc832b21eb257dd11cce25a919f40cf049
|
809fd0e2b1536bec26ea4e19ecefeb99f36fb32a
|
/workout01/code/make-shots-data-script.R
|
2f319f8f17365e4e57fcbf27fac66c070c129429
|
[] |
no_license
|
Lin2879/stat133-workout-01
|
595a320ec502f214fadc277af2868daf8e38b957
|
023f8af523dd44d3e0bcd3954bb352d401e7fbc7
|
refs/heads/master
| 2020-04-28T20:34:10.488772
| 2019-03-14T05:35:12
| 2019-03-14T05:35:12
| 175,549,112
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,360
|
r
|
make-shots-data-script.R
|
##################################################
## title:Data Preparation
## description: add new attributes and modify some attributes to the
## csv files, then stack them to create a new data csv
## file. Also summaries all the data csv files.
## inputs: the data csv files of all GSW players
## output: some summary files as text files and a new data csv file
##################################################
##Read raw data of the players
curry <- read.csv('../data/stephen-curry.csv', stringsAsFactors = FALSE)
durant <- read.csv('../data/kevin-durant.csv', stringsAsFactors = FALSE)
green <- read.csv('../data/draymond-green.csv', stringsAsFactors = FALSE)
iguodala <- read.csv('../data/andre-iguodala.csv', stringsAsFactors = FALSE)
thompson <- read.csv('../data/klay-thompson.csv', stringsAsFactors = FALSE)
##Add new column "name" for each data frame
curry$name = 'Stephen Curry'
durant$name = 'Kevin Durant'
green$name = 'Draymond Green'
iguodala$name = 'Andre Iguodala'
thompson$name = 'Klay Thompson'
##Change original values of column "shot_made_flag" to more descriptive values
curry$shot_made_flag[curry$shot_made_flag == "n"] = "shot_no"
curry$shot_made_flag[curry$shot_made_flag == "y"] = "shot_yes"
durant$shot_made_flag[durant$shot_made_flag == "n"] = "shot_no"
durant$shot_made_flag[durant$shot_made_flag == "y"] = "shot_yes"
green$shot_made_flag[green$shot_made_flag == "n"] = "shot_no"
green$shot_made_flag[green$shot_made_flag == "y"] = "shot_yes"
iguodala$shot_made_flag[iguodala$shot_made_flag == "n"] = "shot_no"
iguodala$shot_made_flag[iguodala$shot_made_flag == "y"] = "shot_yes"
thompson$shot_made_flag[thompson$shot_made_flag == "n"] = "shot_no"
thompson$shot_made_flag[thompson$shot_made_flag == "y"] = "shot_yes"
##Add a new column "minute" to indicate the minute number where a shot occured
curry$minute = (curry$period - 1) * 12 + curry$minutes_remaining
durant$minute = (durant$period - 1) * 12 + durant$minutes_remaining
green$minute = (green$period - 1) * 12 + green$minutes_remaining
iguodala$minute = (iguodala$period - 1) * 12 + iguodala$minutes_remaining
thompson$minute = (thompson$period - 1) * 12 + thompson$minutes_remaining
##A bit of reordering to make the sequence of the columns better
curry = curry[, c(14, 1, 2, 3, 4, 5, 6, 15, 7, 8, 9, 10, 11, 12, 13)]
durant = durant[, c(14, 1, 2, 3, 4, 5, 6, 15, 7, 8, 9, 10, 11, 12, 13)]
green = green[, c(14, 1, 2, 3, 4, 5, 6, 15, 7, 8, 9, 10, 11, 12, 13)]
iguodala = iguodala[, c(14, 1, 2, 3, 4, 5, 6, 15, 7, 8, 9, 10, 11, 12, 13)]
thompson = thompson[, c(14, 1, 2, 3, 4, 5, 6, 15, 7, 8, 9, 10, 11, 12, 13)]
##Send the summary output to individual text files
sink('../output/stephen-curry-summary.txt')
summary(curry)
sink('../output/kevin-durant-summary.txt')
summary(durant)
sink('../output/draymond-green-summary.txt')
summary(green)
sink('../output/andre-iguodala-summary.txt')
summary(iguodala)
sink('../output/klay-thompson-summary.txt')
summary(thompson)
sink()
##Stack the tables into one single data frame
dat = rbind(curry, durant, green, iguodala, thompson)
##Export the assembled table
write.csv(dat, '../data/shots-data.csv')
##Send the summary output of the new data frame to a text file
sink('../output/shots-data-summary.txt')
summary(dat)
|
7207f49b7613afd17971c0f4d66830b34346c032
|
a811448359f042a43c0ed22dc40d9917e19332a4
|
/man/hi_there.Rd
|
b7ec0772daa5fcaba1838d19c0e23a927bf772b7
|
[] |
no_license
|
lwjohnst86/template.package
|
1e9369b12078eed24a09c5d7d0ac507c52b8e02b
|
3e8edb5ba457fab93692f63eefb50ee90cd63ba5
|
refs/heads/master
| 2021-06-30T05:02:56.469981
| 2017-09-18T22:46:06
| 2017-09-18T22:46:06
| 103,998,131
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 344
|
rd
|
hi_there.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function.R
\name{hi_there}
\alias{hi_there}
\title{A friendly greeting to a friendly person.}
\usage{
hi_there(name)
}
\arguments{
\item{name}{A name}
}
\value{
Hi there name!
}
\description{
A friendly greeting to a friendly person.
}
\examples{
hi_there("Tim")
}
|
99a0e8eda2e313c0e62f5c7e8d56b4a7e4278b6a
|
f68ae1944a60f207c2acb2cc647a16f364a30880
|
/R/get.mutect2.data.R
|
e05ba36dce5ccece43301703eea25e55d479c942
|
[] |
no_license
|
rdeborja/ShlienLab.Core.SSM
|
e8b4541bd8b8d4da16bdfc9616d20c92b2574742
|
aefe9d007dcb2c0cd901091197eebaac8c8195e2
|
refs/heads/master
| 2021-01-21T10:35:00.224321
| 2017-05-18T13:52:42
| 2017-05-18T13:52:42
| 91,699,134
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 688
|
r
|
get.mutect2.data.R
|
get.mutect2.data <- function(path='.', pattern='hg19_multianno.txt$') {
files <- list.files(path=path, pattern=pattern, recursive=TRUE, full.names=TRUE)
data <- data.frame()
for(i in 1:length(files)) {
tmp.data <- try(
read.table(
file = files[i],
header = FALSE,
as.is = TRUE,
sep = '\t',
quote = "\"",
skip = 1
),
silent = TRUE
)
# the "try" block will return a class of "try-error"
if (class(tmp.data) == 'try-error') {
next()
}
else {
data <- rbind(tmp.data, data)
}
}
colnames(data) <- ShlienLab.Core.SSM::get.mutect2.annotated.header()
return(data)
}
|
01c8877be619629bf1851fc893928c0b57131fc5
|
6cab63f138aa37eaeb2366284ee4001496356fa1
|
/2013/Woody2013/Scripts/Old Broken Scripts/Plot.Info.Tree_Script.R
|
a5ae36397b2ddd8a55f5ff30397b92b51146dc3d
|
[] |
no_license
|
dackerly/PepperwoodVegPlots
|
a5e5c6ddbcd20a1d5a514c0edf169d3606817cdd
|
4cbe62ab9c12c5dd266ffe1cf40a16155b341e6f
|
refs/heads/master
| 2023-08-09T00:45:48.425990
| 2023-07-24T22:33:34
| 2023-07-24T22:33:34
| 36,311,558
| 8
| 9
| null | 2019-03-27T18:27:42
| 2015-05-26T17:17:19
|
R
|
UTF-8
|
R
| false
| false
| 13,191
|
r
|
Plot.Info.Tree_Script.R
|
# June 11, 2013 MFO
# Intention of script is to pull plot characters (SW lat/long, slope, aspect) out of 'PlotInfo'
# csv and combine with tree data by plot;
# Columns:
# Plot.ID, UTM.E (SW corner), UTM.N(SW corner), Slope (field collected), Aspect (field collected)
# Tree.Species, Tree.Number(per species, DBH > 1cm), Basal.Area_cm2(per species, DBH>1cm),
#Total.Basal.Area_cm2 (per plot, all species combined, DBH > 1cm), Percent.Cover (per species)
options(stringsAsFactors=FALSE)
#Setting this option makes sure that character strings aren't treated as factors.
if (Sys.getenv("USER")=='meaganoldfather') setwd("/Users/meaganoldfather/Dropbox/PepperwoodVegPlots/Database/2013/Woody2013/Data/OriginalCSV/PlotInfo/") else setwd("/Users/david/Documents/Projects/Dropbox/PepperwoodVegPlots_2013/Database/2013/Woody2013Data/OriginalCSV/PlotInfo/")
# In order to use the same script with multiple users; Final folder is 'PlotInfo'
file.list <-dir(paste(getwd(),"/", sep=''))
file.list # list of file names of csv's in 'PlotInfo'
strlen <- nchar(file.list[1]) # strlen is the length of all of the file names
plot.list <- substr(file.list,strlen-10,strlen-4) # extracts plot numbers from file name vector
plot.list # list of plot names
plot.data<-lapply(file.list, read.csv, skip=5, nrows=5, header=F)
head(plot.data)
plot.data[[1]] # return first element in plot.data (PPW1301)
plot.info<-data.frame("Plot.ID"=numeric(length(plot.data)),
"UTM.E"=numeric(length(plot.data)),
"UTM.N"=numeric(length(plot.data)),
"Slope"=numeric(length(plot.data)),
"Aspect"=numeric(length(plot.data)))
plot.info$Plot.ID<-plot.list
for (i in 1:length(plot.data)){
plot.info$UTM.E[i]<-plot.data[[i]][1,2]
plot.info$UTM.N[i]<-plot.data[[i]][2,2]
plot.info$Slope[i]<-plot.data[[i]][3,5]
plot.info$Aspect[i]<-plot.data[[i]][3,8]
}
plot.info
# Slope and aspect are missing for plot PPW1302 and not applicable for PPW1329 (on crest)
plot.info$Slope[2]<-NA # changes slope for plot 1302 to NA
plot.info$Aspect[2]<-NA # same for 1329 aspect
plot.info$Slope[29]<-NA # changes slope for plot 1329 to NA
plot.info$Aspect[29]<-NA # same for 1329 aspect
plot.info
# Now go into woody plot data in order to pull out the tree species present in each plot,
# the number of tree of that species, and the total cover of each species in each plot
if (Sys.getenv("USER")=='meaganoldfather') setwd("/Users/meaganoldfather/Dropbox/PepperwoodVegPlots/Database/2013/Woody2013/Data/OriginalCSV/Woody/") else setwd("/Users/david/Documents/Projects/Dropbox/PepperwoodVegPlots_2013/Database/2013/Woody2013")
# final folder is 'Woody'
file.list <-dir(paste(getwd(),"/", sep='')) # file list is a character vector of the names of woody csv file
strlen <- nchar(file.list[1]) # strlen is the length of all of the file names
plot.list <- substr(file.list,strlen-10,strlen-4) # extracts plot numbers from file name vector
mega.data<-lapply(file.list, read.csv,skip=3)
names(mega.data) <- plot.list
# Make all the lists of tree vectors
trees<-vector("list", length(mega.data))
names(trees) <- plot.list
tree.cover.by.plot<-vector("list", length(mega.data))
names(tree.cover.by.plot) <- plot.list
trees.clean<-vector("list", length(mega.data))
names(trees.clean)<-plot.list
# And for saplings
saplings<-vector("list", length(mega.data))
names(saplings) <- plot.list
sapling.cover.by.plot<-vector("list", length(mega.data))
names(sapling.cover.by.plot) <- plot.list
#saplings.clean<-vector("list", length(mega.data))
#names(saplings.clean)<-plot.list
for (i in 1:length(mega.data)) #iterates through the whole list of data.frames corresponding to the number of plots worth of .csv files entered
{
Plot<-plot.list[i] #Pulls the plot out of the plot.list variable made above
mega.data[[i]]<-cbind(Plot=Plot, mega.data[[i]]) #Inserts a plot column before the rest of the columns in the data.frame
colnames(mega.data[[i]])<-c("Plot", "Quad", "Type", "TreeNum", "Species", "Confidence", "Dead.Stump", "SA.Stump.Height_cm", "SA.Stump.BD_cm", "SA.Branch.Num", "DBH_cm", "X_cm", "Y_cm", "Notes") #Sets column names of data.frame to the names defined above
mega.data[[i]]<-mega.data[[i]][,1:14]
mega.data[(mega.data$Species=="QUELOB"), "Species"]<-"QUEDEC"
mega.data[(mega.data$Species=="QUEWIS"), "Species"]<-"QUEDEC"
mega.data[(mega.data$Species=="QUEDOU"), "Species"]<-"QUEDEC"
mega.data[(mega.data$Species=="QUEKEL"), "Species"]<-"QUEDEC"
# Changes QUELOB, QUEDOU, QUEKEL, QUEWIS individuals into QUEDEC
trees[[i]]<-mega.data[[i]][intersect(grep('TR',mega.data[[i]]$Type),(which(is.na(mega.data[[i]]$Dead.Stump)|(mega.data[[i]]$Dead.Stump=='')))), c(1:5,11)] # modified version of command below because 1302 had TR listed as ' TR' so wasn't matching with first condition
head(trees[[i]])
trees[[i]]<-trees[[i]][order(trees[[i]]$Species),] #alphabetizes according to Species column
trees.clean[[i]]<-subset(trees[[i]], subset=(!is.na(trees[[i]][,6]))) # removes trees with NA for DBH
tree.species<-unique(trees.clean[[i]]$Species) #tree.species is defined as a vector of the unique values of the species
tree.cover.by.plot[[i]]<-as.data.frame(matrix(NA, nrow=length(tree.species), ncol=4))
names(tree.cover.by.plot[[i]])<-c("Plot", "Species", "Number","Basal.Area_cm2")
if(length(tree.species)>0)
{
tree.cover.by.plot[[i]][,1]<-Plot # First column is the Plot
tree.cover.by.plot[[i]][,2]<-tree.species # Second column is the Tree Species
for(j in 1:length(tree.species))
{
tree.cover.by.plot[[i]]$"Number"[j] <- length(which(trees.clean[[i]]$Species==tree.species[j]))
tree.cover.by.plot[[i]]$"Basal.Area_cm2"[j]<-sum(pi*(1/4)*(as.numeric(trees.clean[[i]][trees.clean[[i]]$Species==tree.species[j],6]))^2) # area calculated with diameter
}
}
saplings[[i]]<-mega.data[[i]][intersect(grep('SA',mega.data[[i]]$Type),(which(is.na(mega.data[[i]]$Dead.Stump)|(mega.data[[i]]$Dead.Stump=='')))), c(1:5,8:10)]
saplings[[i]]<-saplings[[i]][order(saplings[[i]]$Species),]
sapling.species<-unique(saplings[[i]]$Species)
sapling.cover.by.plot[[i]]<-as.data.frame(matrix(NA, nrow=length(sapling.species), ncol=2))
names(sapling.cover.by.plot[[i]])<-c("Plot", "Species")
if(length(sapling.species)>0)
{
Plot<-plot.list[i]
sapling.cover.by.plot[[i]][,1]<-Plot
sapling.cover.by.plot[[i]][,2]<-sapling.species
for(j in 1:length(sapling.species))
{
sapling.cover.by.plot[[i]]$Number[j]<-sum(as.numeric(saplings[[i]][saplings[[i]]$Species==sapling.species[j],8]))
sapling.cover.by.plot[[i]]$Basal.Area_cm2[j]<-sum(as.numeric(saplings[[i]][saplings[[i]]$Species==sapling.species[j],7])*pi/4*(as.numeric(saplings[[i]][saplings[[i]]$Species==sapling.species[j],8]))^2)
}
}
}
head(tree.cover.by.plot)
tree.cover<-do.call(rbind, tree.cover.by.plot)
head(tree.cover)
head(plot.info)
colnames(tree.cover)[1]<-"Plot.ID" # change the name of the first column form 'Plot' to 'Plot.ID'
Plot.Info.Tree<- merge(tree.cover, plot.info, by="Plot.ID") # merges tree cover and plot info by the Plot ID column
Plot.Info.Tree
total.area<-aggregate(tree.cover$Basal.Area_cm2~tree.cover$Plot.ID, FUN=sum) # sums tree cover of all species by plot
colnames(total.area)[1]<- "Plot.ID"
colnames(total.area)[2]<-"Total.Basal.Area_cm2"
total.area
Plot.Info.Tree<-merge(Plot.Info.Tree, total.area, by="Plot.ID") # add column in Plot.Info.Tree that is the total basal area of all tree species in each plot
Plot.Info.Tree$Percent.Cover<-round((Plot.Info.Tree$Basal.Area_cm2/Plot.Info.Tree$Total.Basal.Area_cm2)*100,2)
# To swap columns order
Plot.Info.Tree<-cbind(Plot.Info.Tree[,1], Plot.Info.Tree[,5:8], Plot.Info.Tree[,2], Plot.Info.Tree[,3:4], Plot.Info.Tree[,9:10])
colnames(Plot.Info.Tree)[1]<-"Plot.ID"
colnames(Plot.Info.Tree)[6]<- "Species"
Plot.Info.Tree$Type<-'TR'
Plot.Info.Tree
#write.csv(Plot.Info.Tree, file="/Users/meaganoldfather/Dropbox/PepperwoodVegPlots/Database/2013/Woody2013/Outputs/Plot.Info.Tree.csv")
# Now do the same thing for the saplings and merge the two so that each species in each plot has 2 rows (unless) the species
# was only recorded as a tree or sapling
head(plot.info)
mega.data<-lapply(file.list, read.csv,skip=3) # need to recall mega.data so things do not go haywire
names(mega.data) <- plot.list
for (i in 1:length(mega.data)) #iterates through the whole list of data.frames corresponding to the number of plots worth of .csv files entered
{
Plot<-plot.list[[i]] #Pulls the plot out of the plot.list variable made above
mega.data[[i]]<-cbind(Plot=Plot, mega.data[[i]]) #Inserts a plot column before the rest of the columns in the data.frame
colnames(mega.data[[i]])<-c("Plot", "Quad", "Type", "TreeNum", "Species", "Confidence", "Dead.Stump", "SA.Stump.Height_cm", "SA.Stump.BD_cm", "SA.Branch.Num", "DBH_cm", "X_cm", "Y_cm", "Notes") #Sets column names of data.frame to the names defined above
mega.data[[i]]<-mega.data[[i]][,1:14]
saplings[[i]]<-mega.data[[i]][intersect(grep('SA',mega.data[[i]]$Type),(which(is.na(mega.data[[i]]$Dead.Stump)|(mega.data[[i]]$Dead.Stump=='')))), c(1:5,8:10)]
saplings[[i]]<-saplings[[i]][order(saplings[[i]]$Species),]
sapling.species<-unique(saplings[[i]]$Species)
sapling.cover.by.plot[[i]]<-as.data.frame(matrix(NA, nrow=length(sapling.species), ncol=2))
names(sapling.cover.by.plot[[i]])<-c("Plot", "Species")
if(length(sapling.species)>0)
{
Plot<-plot.list[i]
sapling.cover.by.plot[[i]][,1]<-Plot
sapling.cover.by.plot[[i]][,2]<-sapling.species
for(j in 1:length(sapling.species))
{
sapling.cover.by.plot[[i]]$Number[j]<-sum(as.numeric(saplings[[i]][saplings[[i]]$Species==sapling.species[j],8]))
sapling.cover.by.plot[[i]]$Basal.Area_cm2[j]<-sum(as.numeric(saplings[[i]][saplings[[i]]$Species==sapling.species[j],7])*pi/4*(as.numeric(saplings[[i]][saplings[[i]]$Species==sapling.species[j],8]))^2)
}
}
}
# # # # # # # # # # # # # # # # # # # # # #
sapling.cover<-do.call(rbind, sapling.cover.by.plot)
head(sapling.cover)
colnames(sapling.cover)[1]<-"Plot.ID" # change the name of the first column form 'Plot' to 'Plot.ID'
Plot.Info.Sapling<- merge(sapling.cover, plot.info, by="Plot.ID") # merges tree cover and plot info by the Plot ID column
Plot.Info.Sapling
total.area.sap<-aggregate(sapling.cover$Basal.Area_cm2~sapling.cover$Plot.ID, FUN=sum)
colnames(total.area.sap)[1]<- "Plot.ID"
colnames(total.area.sap)[2]<-"Total.Basal.Area_cm2"
total.area.sap
Plot.Info.Sapling<-merge(Plot.Info.Sapling, total.area.sap, by="Plot.ID") # add column in Plot.Info.Tree that is the total basal area of all tree species in each plot
Plot.Info.Sapling$Percent.Cover<-round((Plot.Info.Sapling$Basal.Area_cm2/Plot.Info.Sapling$Total.Basal.Area_cm2)*100,2)
# To swap columns order
Plot.Info.Sapling<-cbind(Plot.Info.Sapling[,1], Plot.Info.Sapling[,5:8], Plot.Info.Sapling[,2], Plot.Info.Sapling[,3:4], Plot.Info.Sapling[,9:10])
colnames(Plot.Info.Sapling)[1]<-"Plot.ID"
colnames(Plot.Info.Sapling)[6]<- "Species"
Plot.Info.Sapling$Type<-'SA'
head(Plot.Info.Sapling)
head(Plot.Info.Tree)
# Now try to combine the tree and sapling data....
Plot.Info.All<-rbind(Plot.Info.Tree, Plot.Info.Sapling)
head(Plot.Info.All) #Yay!
# Plot attempts to view relationship between sapling abundace and tree abundance & sapling cover and tree cover
# Also might be very interesting to bring in seedling/juvenile counts
Sapling.Species<-unique(Plot.Info.Sapling$Species) # missing QUELOB, QUEDOU, QUEKEL, QUEWIS --> MAKE ALL QUEDEC? Yes- in loop (not done)
Tree.Species<-unique(Plot.Info.Tree$Species) # missing CEOCUN, UNK21, UNK28, UNK30,UNK32, TORCAL, UNK47
Plot.Info.All[(Plot.Info.All$Species=="QUELOB"), "Species"]<-"QUEDEC"
Plot.Info.All[(Plot.Info.All$Species=="QUEWIS"), "Species"]<-"QUEDEC"
Plot.Info.All[(Plot.Info.All$Species=="QUEDOU"), "Species"]<-"QUEDEC"
Plot.Info.All[(Plot.Info.All$Species=="QUEKEL"), "Species"]<-"QUEDEC"
match(Sapling.Species, Tree.Species)
match(Tree.Species, Sapling.Species)
Species.Both<-c("QUEGAR", "PSEMEN", "QUEAGR", "UMBCAL", "AESCAL", "QUEDEC", "HETARB", "ARBMEN", "ARCMAN", "AMOCAL",
"FRACAL", "BACPIL", "QUEBER", "NOTDEN", "ADEFAS")
Plot.Info.All<-subset(Plot.Info.All, subset=(Plot.Info.All$Species!="CEOCUN"))
Plot.Info.All<-subset(Plot.Info.All, subset=(Plot.Info.All$Species!="UNK21"))
Plot.Info.All<-subset(Plot.Info.All, subset=(Plot.Info.All$Species!="UNK28"))
Plot.Info.All<-subset(Plot.Info.All, subset=(Plot.Info.All$Species!="UNK30"))
Plot.Info.All<-subset(Plot.Info.All, subset=(Plot.Info.All$Species!="UNK32"))
Plot.Info.All<-subset(Plot.Info.All, subset=(Plot.Info.All$Species!="TORCAL"))
Plot.Info.All<-subset(Plot.Info.All, subset=(Plot.Info.All$Species!="UNK47"))
par(mfrow=c(1,1))
par(ask=TRUE)
i=1
for (i in 1:length(Species.Both)) {
plot()
points()
}
#############
oaks<-c("QUELOB", "QUEWIS", "QUEDOU", "QUEKEL")
# But also want to sum them...
aggregate()
|
05e45c81fc6b2a10e2a4e926b188084a7058ded7
|
9b59d5c886ddf9ca154449cd622c78e2c39f5d77
|
/Cell DET calculations.R
|
ae7c4f223b54aa2e6306fb6703f0172b3d33579b
|
[] |
no_license
|
sealavi/Alavi-et-al.-A-quantitative-framework-for-identifying-patterns-of-route-use-in-animal-movement-data
|
eef65851a9fe9587e2045fcc7b58af3abd7869d9
|
c35bd5b6ae8f0a6b1601fecce21a92a3d813426b
|
refs/heads/main
| 2023-08-25T14:27:20.201720
| 2021-11-02T21:35:54
| 2021-11-02T21:35:54
| 423,683,588
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,109
|
r
|
Cell DET calculations.R
|
setwd("C:/Users/avining/Downloads")
tracks <- read.csv("route_data_subset_gridded.csv")
library(dplyr)
library(spatstat)
get_cell_DET <- function(x, minl){
x = as.numeric(x)
#Depending on the dataset it may be desirable to filter out consecutive visits
#to the same flower. See function below and delete '#' in the line below to use
#x = filterout(Ldata = x) #already done
#-----set up matrix resembling a recurrence plot, where a 1 indicates a repeat
#-----visit and 0 indicates the absence of a repeat.
#if (length(unique(x)) < minl) return(NA)
#if (length(x) <= 3*minl) return(NA)
#if (length(unique(x)) == length(x)) return(0)
det1 = matrix(cbind(rep(x,length(x))),nrow=length(x))
tdet = t(det1)
det = ((det1 - tdet) == 0) * 1
#set the main diagonal equal to zero so it won't be included in the calculation
diag(det) = 0
#Use spatstat package to create a 'countour map' of the matrix,
#which assigns all sets of contiguous 1's a unique number
yi <- as.im(det)
ycOut <- connected(yi, background = 0)
yc <- ycOut$v
#Depending on the dataset it may be desirable to filter out diagonals perpendicular to #the main diagonal. Code is provided for the 'removeperpdiag' function below.
#Delete "#" from the line below to filter out perpendicular diagonals
#yc = removeperpdiag(yc,minl)
#Note: this code may take several minutes to run for very long sequences
#---- filter out short repeats: a 'trapline' should include more unique resources
#---- than the minimum cutoff (minl)
#make an alternative DET matrix that contains the resource IDs
det2 = matrix(rep(x,nrow(det)),nrow=nrow(det),byrow=TRUE)*det
recCounts <- as.data.frame(table(det2)/2) #get number of times each cell occurs in the top corner of the matrix
#make a dataframe with the number of times each resource appears in a diagonal
listofseq = data.frame(group = yc[1:length(yc)], seq=det2[1:length(det2)])
#how many unique resources are in the diagonal
uniquevisits = rowSums((table(listofseq)>0)*1)
#only count diagonals with at least 'minl' number of unique resources
longenough = (uniquevisits >= minl)*table(yc)
listof_longenoug_seq <- listofseq[listofseq$group %in% names(longenough[! longenough == 0]),]
repCounts <- as.data.frame(table(listof_longenoug_seq$seq)/2)
cellDET <- as.data.frame(table(x))
colnames(recCounts) <- c("cell", "recursions")
colnames(repCounts) <- c("cell", "repeats")
colnames(cellDET) <- c("cell", "visits")
cellDET <- merge(cellDET, recCounts, by = "cell", all = TRUE)
cellDET <- merge(cellDET, repCounts, by = "cell", all = TRUE)
cellDET$repeats[is.na(cellDET$repeats)] <- 0
cellDET <- mutate(cellDET, DET = repeats/recursions)
return(cellDET)
}
cellDETs <- vector("list", length = length(levels(tracks$ID)))
names(cellDETs) <- levels(tracks$ID)
for(i in 1:length(levels(tracks$ID))) {
sequence <- rle(filter(tracks, ID == levels(tracks$ID)[i])$cell)$values
cellDETs[[i]] <- get_cell_DET(sequence, minl = 3)
}
|
dc5160e30e304be6bb6ffea6280c56509725328b
|
e0c4ae966b0dad0b1ad003c3d896842132c2e104
|
/FigureProd/MaxWindDG.R
|
6aea0c24efc1f42615ed44bb99b7b7d2cc8b5a05
|
[] |
no_license
|
alexSamalot19/M.S.-Paper
|
134e1454ae81e1ef9c11ea42d2306eaa933bd25d
|
70f214172c25f002b07892c6dfac8483668a78aa
|
refs/heads/master
| 2020-03-09T19:50:56.555008
| 2018-04-12T19:55:51
| 2018-04-12T19:55:51
| 128,968,022
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,761
|
r
|
MaxWindDG.R
|
#Makes WRF and PP PDF with outputs from: F:\KrIC2\DIR1Sph.m or *Exp.m
WRFOutIn<-read.csv("F:\\KrIC2\\maps\\MaxWRFOut.csv", header=FALSE)
summary(WRFOutIn)
ObsOutIn<-read.csv("F:\\KrIC2\\maps\\MaxObsOut.csv", header=FALSE)
summary(ObsOutIn)
R1ExpOutIn<-read.csv("F:\\KrIC2\\maps\\MaxR1ExpOut.csv", header=FALSE)
summary(R1ExpOutIn)
R1SphOutIn<-read.csv("F:\\KrIC2\\maps\\MaxR1SphOut.csv", header=FALSE)
summary(R1SphOutIn)
KalOutIn<-read.csv("F:\\KrIC2\\maps\\MaxKalOut.csv", header=FALSE)
summary(KalOutIn)
quantile(WRFOutIn$V1,c(0.025,0.975))#DtmnDataRang
WRFOut95up<-WRFOutIn$V1[WRFOutIn$V1>3.64]
WRFOut95<-WRFOut95up[WRFOut95up<15.43]
summary(WRFOut95)
quantile(ObsOutIn$V1,c(0.025,0.975))#DtmnDataRang
ObsOut95up<-ObsOutIn$V1[ObsOutIn$V1>1.7882]
ObsOut95<-ObsOut95up[ObsOut95up<14.4]
head(ObsOut95)
quantile(R1ExpOutIn$V1,c(0.025,0.975))#DtmnDataRang
R1ExpOut95up<-R1ExpOutIn$V1[R1ExpOutIn$V1>3.516]
R1ExpOut95<-R1ExpOut95up[R1ExpOut95up<14.545]
head(R1ExpOut95)
quantile(R1SphOutIn$V1,c(0.025,0.975))#DtmnDataRang
R1SphOut95up<-R1SphOutIn$V1[R1SphOutIn$V1>3.573]
R1SphOut95<-R1SphOut95up[R1SphOut95up<14.76]
head(R1SphOut95)
quantile(KalOutIn$V1,c(0.025,0.975))#DtmnDataRang
KalOut95up<-KalOutIn$V1[KalOutIn$V1>3.689]
KalOut95<-KalOut95up[KalOut95up<15.736]
head(KalOut95)
WRFOut<-as.data.frame(WRFOut95)
ObsOut<-as.data.frame(ObsOut95)
head(ObsOut)
R1ExpOut<-as.data.frame(R1ExpOut95)
R1SphOut<-as.data.frame(R1SphOut95)
head(ObsOut)
KalOut<-as.data.frame(KalOut95)
head(KalOut)
library(ggplot2)
library(scales)
WRFOut$Estimation<-'WRF'
ObsOut$Estimation<-'OBS'
R1ExpOut$Estimation<-'R1Exp'
R1SphOut$Estimation<-'R1Sph'
KalOut$Estimation<-'Kal'
head(WRFOut)
head(ObsOut)
colnames(WRFOut)[colnames(WRFOut)=="WRFOut95"] <- "Wind"
colnames(ObsOut)[colnames(ObsOut)=="ObsOut95"] <- "Wind"
colnames(R1ExpOut)[colnames(R1ExpOut)=="R1ExpOut95"] <- "Wind"
colnames(R1SphOut)[colnames(R1SphOut)=="R1SphOut95"] <- "Wind"
colnames(KalOut)[colnames(KalOut)=="KalOut95"] <- "Wind"
WRFOutlengths<-rbind(KalOut,ObsOut,R1ExpOut,R1SphOut,WRFOut)
head(WRFOutlengths)
#PDF's>>
RMSEplot<-ggplot(WRFOutlengths,aes(Wind,fill=Estimation))+geom_density(alpha=0.2)
RMSEplot+ xlab("Wind Speed (m/s)") + ylab("Density") + scale_y_continuous(labels=percent) +theme(panel.background = element_rect(fill = 'white'),
axis.title.x = element_text(size=15),axis.title.y = element_text(size=15),
axis.text.x = element_text(size=15),axis.text.y = element_text(size=15),
legend.text = element_text(size=15),
legend.title = element_text(size=15)) + scale_fill_discrete(name="Prediction\nMethod",
breaks=c("Kal","OBS","R1Exp", "R1Sph", "WRF"),
labels=c("KF","Obs","R2(Exp)", "R2(Sph)","WRF"))
#ggplot(ABSlengths,aes(AbsBias,fill=Estimation))+geom_density(alpha=0.2)
# #<<
#Boxplots>>
boxplot(WRFOutlengths$Wind~WRFOutlengths$Estimation, main="Final Model Output WS AP 107 Storms",
ylab="Wind Speed (m/s)", col=(c("indianred1","gold","palegreen","chocolate","lightskyblue1")),
names=c("KF","Obs","R2(Exp)", "R2(Sph)","WRF"))
# #<<
plot(WRFOutIn$V1,R1ExpOutIn$V1)
plot(KalOutIn$V1,R1ExpOutIn$V1)
|
51fedaf4421d63f918c724a925ede3a3c06467ef
|
bb7010d536d6ba5fe95d706622900ec85206ab48
|
/R/getMeta.R
|
3305baad13fb7fda8fc10abd72f913d6097de19f
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
andreagrioni/SomaDataIO
|
0c214efaa79e2977e57e052801f40ed4e66f47db
|
100cee4da1e70979b8f577d0bf7c2684f5e9434b
|
refs/heads/master
| 2023-01-05T10:11:40.630765
| 2020-10-23T22:26:15
| 2020-10-23T22:26:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,321
|
r
|
getMeta.R
|
#' Get Meta Data Field Names
#'
#' Return a character vector of field names of the meta data
#' for an ADAT, or `soma.data` object.
#'
#' @inheritParams getFeatures
#' @return A character vector of ADAT meta data names
#' or an integer number corresponding to the length of the
#' feature names (if `n = TRUE`).
#' @author Stu Field
#' @examples
#' meta.vec <- getMeta(example_data)
#' head(meta.vec, 20)
#' getMeta(example_data, n = TRUE)
#'
#' # test data.frame and character S3 methods
#' identical(getMeta(example_data), getMeta(names(example_data))) # TRUE
#' @importFrom usethis ui_stop
#' @export
getMeta <- function(x, n = FALSE) UseMethod("getMeta")
#' @noRd
#' @export
getMeta.default <- function(x, n) {
usethis::ui_stop(
"Couldn't find a S3 method for this object: {class(x)}."
)
}
#' @noRd
#' @export
getMeta.data.frame <- function(x, n = FALSE) {
getMeta(names(x), n = n)
}
#' @noRd
#' @export
getMeta.soma_adat <- getMeta.data.frame
#' @noRd
#' @export
getMeta.list <- getMeta.data.frame
#' S3 getMeta method for matrix
#' @noRd
#' @export
getMeta.matrix <- function(x, n = FALSE) {
getMeta(colnames(x), n = n)
}
#' S3 getMeta method for character
#' @noRd
#' @export
getMeta.character <- function(x, n = FALSE) {
lgl <- !is.seq(x)
if ( n ) {
sum(lgl)
} else {
x[lgl]
}
}
|
66319f074bf893e143b7788ec60a9da10423afb8
|
9c88b1d54cce40a6ad64c6b1518f28567c8bba9b
|
/run_analysis.R
|
0a074b815e11f776adaecca90072353c99f89162
|
[] |
no_license
|
PaoloLuciano/GettingAndCleaningData_CourseProject
|
afb45ec841cf5b58202319350815c582adad57d4
|
b4de137ad7e0b3fcf1e0dd8802e9e24982690f14
|
refs/heads/master
| 2016-09-06T01:13:08.097157
| 2015-04-26T02:33:03
| 2015-04-26T02:33:03
| 34,594,829
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,662
|
r
|
run_analysis.R
|
#
# Data Science Specialization - Getting and Cleaning Data
#
# Courser Project
#
# March 2015
#
# In this project we collect and clean data from a study performed in UCI for wearable computing.
# The data was obtained from: http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
#
#---------------------------------
#0. Preamble
#-------------------------------------------------------------------------------------------------------
library(dplyr)
library(reshape2)
#1. Getting Data
#-------------------------------------------------------------------------------------------------------
# First we get the data and extract the files.
# For simplicity, both the .zip and the extracted files will be in the same dir
if(!file.exists("UCI HAR Dataset")) #UCI HAR Dataset must be a folder
{
dir.create("UCI HAR Dataset")
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, destfile = "UCI HAR Dataset/UCI HAR Dataset.zip", mode = "wb")
unzip("UCI HAR Dataset/UCI HAR Dataset.zip", overwrite = TRUE)
}
#2. Reading the relevant data
#-------------------------------------------------------------------------------------------------------
#2.1 Reading measurments
X_test <- read.table("UCI HAR Dataset/test/X_test.txt") #Test Set - Test Values
Y_test <- read.table("UCI HAR Dataset/test/y_test.txt") #Test Labels - Activity labels
S_test <- read.table("UCI HAR Dataset/test/subject_test.txt") #Test Subjects - Test participants
X_train <- read.table("UCI HAR Dataset/train/X_train.txt") #Training Set - Train Values
Y_train <- read.table("UCI HAR Dataset/train/y_train.txt") #Training Label - Activity labels
S_train <- read.table("UCI HAR Dataset/train/subject_train.txt") #Training Subjects - Test participants
#2.2 Reading Labels
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt") #Labels
features_labels <- read.table("UCI HAR Dataset/features.txt") #Measurments
#3. Merging the data and naming everyhting
#-------------------------------------------------------------------------------------------------------
#3.1 Naming and clasifying data
names(activity_labels) <- c("number", "activity")
names(features_labels) <- c("id", "features")
#Activities
names(Y_test) <- "act_number"
names(Y_train) <- "act_number"
#Subjects
names(S_train) <- "subject"
names(S_test) <- "subject"
#3.2 Puting pieces together
#Since merge unorders the data we need to keep track of the original indexes and the return the data to its original order once merged
Y_test$id <- c(1:length(Y_test$act_number))
Y_test <- merge(Y_test, activity_labels, by.x = "act_number", by.y = "number", sort = F, )
Y_test <- Y_test[order(Y_test$id),]
Y_test <- Y_test[c("act_number", "activity")]
#Analogus to the train set
Y_train$id <- c(1:length(Y_train$act_number))
Y_train <- merge(Y_train, activity_labels, by.x = "act_number", by.y = "number", sort = F, )
Y_train <- Y_train[order(Y_train$id),]
Y_train <- Y_train[c("act_number", "activity")]
#Naming Features
names(X_test) <- features_labels$features
names(X_train) <- features_labels$features
#Creating 2 data frames
test <- cbind(S_test, Y_test, X_test)
train <- cbind(S_train, Y_train, X_train)
#Creating a final data frame with everyting on it.
data <- rbind(test,train)
#Revising the data Frame
#NOTE: We can't arrange the data frame, the way we would like to because of a bug detecting repeated columns in the arrange func (dplyr).
data[1:100,c(1,2,3)]
data[10290:10299, c(1,2,3)] #We only revise the first 3 relevant columns
#Saving the full un-ordenred data set for future references and removing all that we don't need to free RAM
write.csv(data, file = "full_data_set.csv")
rm(S_test)
rm(S_train)
rm(X_test)
rm(X_train)
rm(Y_test)
rm(Y_train)
rm(test, train)
rm(activity_labels, features_labels)
#4. Obtaining relevant mean and sd columns
#-------------------------------------------------------------------------------------------------------
#4.1 Getting the indexes of the mean and standar deviation columns
std_index <- grep("std", names(data))
mean_index <- grep("mean", names(data))
index <- c(std_index, mean_index)
index <- index[order(index)]
#We want to include the firs three columns of the data set that contain the participants and the activity
index <- c(1,2,3,index)
#4.2 Thinning the big data set
data.1 <- data[ , index]
#4.3 Ordering
#Having eliminated the problematic columns we can order our data
#Since we are dealing with 30 participants each with 6 activities it would be natural to arrange it in that order
data.1 <- arrange(data.1, subject, act_number)
write.csv(data.1, file = "relevant_data.csv")
#5. Summarizing the data
#-------------------------------------------------------------------------------------------------------
#5.1 Metling the data according to the features
#We extract the columns to melt
features_names <- names(data.1)
features_names <- features_names[c(4:length(features_names))]
clean_data <- melt(data.1, id = c("subject", "activity"), measure.var = features_names)
#5.2 Summarasing and obtaining the mean and getting our final data set
clean_data <- summarise(group_by(clean_data, subject, activity, variable), mean_features = mean(value))
#5.3 Creating a text file
write.csv(clean_data, file = "clean_data.csv")
write.table(clean_data, file = "clean_data.txt" , row.name = FALSE)
|
303201778074bcff6960f9d74aa5f356175427f3
|
163ceeb94d49b70d43cd707cbc5de03164a1ce50
|
/tests/testthat/test-rollmean.R
|
2f61393359d7cc675a5e9cc4eeafd699893e50a6
|
[] |
no_license
|
privefl/bigutilsr
|
e8cce921638d1327a1038f6ac9b237eae9ca87de
|
bb760d109193d2163e869d9d231a8fdcba2ac96e
|
refs/heads/master
| 2022-12-27T01:39:56.076386
| 2022-12-20T14:36:53
| 2022-12-20T14:36:53
| 199,856,656
| 11
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 521
|
r
|
test-rollmean.R
|
context("test-rollmean")
test_that("rollmean() works", {
x <- rnorm(100)
expect_equal(rollmean(x, 0), x)
x.roll <- sapply(c(0, 1, 3, 5, 10, 20), function(size) rollmean(x, size))
expect_false(is.unsorted(rev(apply(x.roll, 2, var))))
expect_error(rollmean(x, 50), "Parameter 'size' is too large.")
expect_error(rollmean(x, -1), "Parameter 'size' must be positive.")
expect_equal(rollmean(rep(1, 100), 20), rep(1, 100))
expect_equal(rollmean(rep(c(0, 1), 100), 50), rep(0.5, 200), tolerance = 1e-2)
})
|
70bf6db380dd89fb1f545c9e0999b052d3a12178
|
fda540791ba58168598b8320571356a565f9faf1
|
/libs/2_enoe.R
|
49f04dbecfa8d623995da7c3f36e040a12777795
|
[] |
no_license
|
monzalo14/conciliacion
|
5c3e1272090d3575552ab9b58b5b514ab9cfe58f
|
5e4670ec32026a85f5bedd0f01decee1cec01394
|
refs/heads/master
| 2021-01-12T08:58:27.217523
| 2017-05-04T07:16:47
| 2017-05-04T07:16:47
| 76,738,998
| 1
| 3
| null | 2017-02-15T18:40:55
| 2016-12-17T18:05:28
|
R
|
UTF-8
|
R
| false
| false
| 8,603
|
r
|
2_enoe.R
|
library(rformat)
df <- readRDS("../data/enoe_ampliado_raw.rds")
df <- df %>%
dplyr::mutate(
p3a = as.character(p3a),
tiene_jefe = ifelse(p3a == "1", "si", ifelse(p3a == "2", "no", NA)),
p3h = as.character(p3h),
recibe_pago = ifelse(p3h == "1", "si", ifelse(p3h == "9", "ns", "no")),
p3k1 = as.character(p3k1),
p3k2 = as.character(p3k2),
contrato = ifelse(p3k1 == "2", "base", ifelse(p3k1 == "9", "ns",
ifelse(p3k2 == "1", "temporal.0a2m",
ifelse(p3k2 == "2", "temporal.2a6m",
ifelse(p3k2 == "3", "temporal.6a12m",
ifelse(p3k2 == "4", "a.termino", NA))))
)),
contrato.base = ifelse(p3k1 == "2", "si", NA),
contrato.temporal = ifelse(p3k1 == "1", "si", NA),
temporal.duracion = ifelse(p3k2 == "1", "temporal.0a2m",
ifelse(p3k2 == "2", "temporal.2a6m",
ifelse(p3k2 == "3", "temporal.6a12m",
ifelse(p3k2 == "4", "a.termino", NA)))),
upm = stringr::str_pad(as.character(upm), width = 7, pad = "0", side = "left")
) %>%
dplyr::mutate_each_(., funs(as.character), vars = c(paste0("p3l", c(1:5, 9)), paste0("p3m", c(1:9)))) %>%
dplyr::mutate(
r3l.aguinaldo = ifelse(p3l1 == "1", "si", "no"),
r3l.vacaciones.pagadas = ifelse(p3l2 == "2", "si", "no"),
r3l.utilidades = ifelse(p3l3 == "3", "si", "no"),
r3l.ninguna.anteriores = ifelse(p3l4 == "4", "si", "no"),
r3l.nada = ifelse(p3l5 == "5", "si", "no"),
r3m.credito.viv = ifelse(p3m1 == "1", "si", "no"),
r3m.guarderia = ifelse(p3m2 == "2", "si", "no"),
r3m.maternidad.pat = ifelse(p3m3 == "3", "si", "no"),
r3m.fondo.retiro = ifelse(p3m4 == "4", "si", "no"),
r3m.seguro.vida = ifelse(p3m5 == "5", "si", "no"),
r3m.seguro.gmm = ifelse(p3m6 == "6", "si", "no"),
r3m.prestamos.caja = ifelse(p3m7 == "7", "si", "no"),
r3m.ninguna.anteriores = ifelse(p3m8 == "8", "si", "no"),
r3m.no.sabe = ifelse(p3m9 == "9", "si", "no"),
p3q = as.character(p3q),
r3q.num.trab = ifelse(p3q == "01", "t001",
ifelse(p3q == "02", "t002a005",
ifelse(p3q == "03", "t006a010",
ifelse(p3q == "04", "t011a015",
ifelse(p3q == "05", "t016a020",
ifelse(p3q == "06", "t021a030",
ifelse(p3q == "07", "t031a050",
ifelse(p3q == "08", "t051a100",
ifelse(p3q == "09", "t101a250",
ifelse(p3q == "10", "t251a500",
ifelse(p3q == "11", "t501omas", "no.sabe"
))))))))))),
r3q.num.trab.chico = ifelse(p3q %in% paste0("0", 1:4), "t01a15",
ifelse(p3q %in% c(paste0("0", 5:9), "10", "11"), "t16omas", NA)),
p3r = as.character(p3r),
r3r.inicio = ifelse(p3r == "1", "este.año",
ifelse(p3r == "2", "año.pasado",
ifelse(p3r == "3", "antes.año.pasado", "no.sabe")))
) %>% # me salto a subordinado
dplyr::mutate(
p7 = as.character(p7),
r7.simple = ifelse(p7 %in% c("7", "9"), "no", "si"),
r7.toda = ifelse(p7 == "1", "vender",
ifelse(p7 == "2", "presta.servicios",
ifelse(p7 == "3", "tierra.o.cria",
ifelse(p7 == "4", "propinas.comision.destajo",
ifelse(p7 == "5", "asalariado",
ifelse(p7 == "6", "ayuda.negocio", NA
))))))
) %>% # me salto a antescedentes laborales
dplyr::mutate_each_(., funs(as.character), vars = c(paste0("p9n", c(1:6, 9)), paste0("p10_", c(1:4, 9)), paste0("p10a", c(1:4, 9)))) %>%
dplyr::mutate(
r9n1 = ifelse(p9n1 == "1", "si", "no"),
r9n2 = ifelse(p9n2 == "2", "si", "no"),
r9n3 = ifelse(p9n3 == "3", "si", "no"),
r9n4 = ifelse(p9n4 == "4", "si", "no"),
r9n5 = ifelse(p9n5 == "5", "si", "no"),
r9n6 = ifelse(p9n6 == "6", "si", "no"),
r10_1 = ifelse(p10_1 == "1", "si", "no"),
r10_2 = ifelse(p10_2 == "2", "si", "no"),
r10_3 = ifelse(p10_3 == "3", "si", "no"),
r10_4 = ifelse(p10_4 == "4", "si", "no"),
r10a1 = ifelse(p10a1 == "1", "si", "no"),
r10a2 = ifelse(p10a2 == "2", "si", "no"),
r10a3 = ifelse(p10a3 == "3", "si", "no"),
r10a4 = ifelse(p10a4 == "4", "si", "no")
) %>%
dplyr::mutate(
p6b1 = as.character(p6b1),
p6b2 = as.character(p6b2),
p6b2 = ifelse(p6b2 != "999999" & p6b2 != "999998", as.numeric(p6b2), NA),
ingreso.mensualizado = ifelse(p6b1 == "1", p6b2,
ifelse(p6b1 == "2", p6b2 * 2,
ifelse(p6b1 == "3", p6b2 * 4,
ifelse(p6b1 == "4", p6b2 * 30, NA)))),
ingreso.mensual = p6b2,
ocupado = ifelse(clase1 == 1 & clase2 == 1, 1, 0),
p6_9 = as.character(p6_9),
p6a3 = as.character(p6a3),
p6c = as.numeric(as.character(p6c)),
r6c = p6c,
r6b1 = p6b1,
r6b2 = p6b2,
ingreso.mensual.recup1 = ifelse(!is.na(p6b2), p6b2,
ifelse(ocupado == 0, 0,
ifelse(p6_9 == "09" | p6a3 == "3", 0)))) %>%
data.frame(.) %>%
dplyr::mutate(
ingreso.mensual.recup2 = ifelse(p6c == "1", 0.5 * salario,
ifelse(p6c == "2", 1 * salario,
ifelse(p6c == "3", 1.5 * salario,
ifelse(p6c == "4", 2.5 * salario,
ifelse(p6c == "5", 4 * salario,
ifelse(p6c == "6", 7.5 * salario,
ifelse(p6c == "7", 10 * salario, NA
))))))),
ingreso.mensual.recup = ifelse(is.na(ingreso.mensual.recup1), ingreso.mensual.recup2, ingreso.mensual.recup1),
sm = ifelse(p6c %in% c("1", "2"), "sm00a01",
ifelse(p6c == "3", "sm01a02",
ifelse(p6c == "4", "sm02a03",
ifelse(p6c == "5", "sm03a05",
ifelse(p6c == "6", "sm05a10",
ifelse(p6c == "7", "sm10ymas", NA)))))),
p7g1 = as.character(p7g1),
p7g2 = as.character(p7g2),
p7gcan = as.character(p7gcan),
ingreso.secun.efectivo = ifelse(p7g1 == "1", as.numeric(p7gcan), NA),
ingreso.secun = as.numeric(p7gcan)
)
#sum(is.na(df2$p6b2))/nrow(df2) + sum(df2$p6b2 == df2$ingocup, na.rm =T)/nrow(df2) == 1
#source("../../lib/crea_cortes_eco.R")
#df <- corte.economico.gpo(datos = dplyr::rename(df, factor = fac), grupo = "per", variable.ref = "ingreso.mensual", pesos = "factor", cortes = 10, nombre = "dec", prefijo = "d")
#print(names(df))
#df <- df[, -grep("^p[0-9]", names(df))]
#
load("../intercensal/datos/upms.rdata")
df$upm_panel <- F
df$upm_panel[df$upm %in% upms$panel] <- T
df$upm_14 <- F
df$upm_14[df$upm %in% upms$a2014] <- T
df$upm_15 <- F
df$upm_15[df$upm %in% upms$a2015] <- T
fac2char <- function(x){as.character(x)}
df <- dplyr::mutate_each(df, funs(fac2char), est, t_loc)
df$fac <- as.numeric(df$fac)
library(Hmisc)
df <- data.frame(df) %>%
dplyr::mutate(fac = as.numeric(fac),
ingreso.mensual.total = ifelse(is.na(ingreso.secun), ingreso.mensual, ingreso.mensual + ingreso.secun),
ingreso.mensual.recup.total = ifelse(is.na(ingreso.secun), ingreso.mensual.recup, ingreso.mensual.recup + ingreso.secun)) %>%
data.frame()
saveRDS(df, "../data/enoe_ampliado_todo.rds")
df <- df[, -grep("^p[0-9]", names(df))]
saveRDS(df, "../data/enoe_ampliado.rds")
|
7e52aaed3203813cdd8e10936b524ff23312d36d
|
d9ee4c89fa85ee69ee6d3a6f34035924fc7472e4
|
/h2o-r/tests/testdir_demos/runit_demo_tk_cm_roc.R
|
6654ef46f45c60158ab3c4760eb094de8161f31c
|
[
"Apache-2.0"
] |
permissive
|
mrgloom/h2o-3
|
838c298d257d893202e5cba8b55c84d6f5da1c57
|
3f00bf9e8e6aeb3f249301f20694076db15b7d5e
|
refs/heads/master
| 2021-01-15T21:34:32.995372
| 2015-08-20T02:06:09
| 2015-08-20T05:52:14
| 41,108,114
| 1
| 0
| null | 2015-08-20T16:56:36
| 2015-08-20T16:56:34
| null |
UTF-8
|
R
| false
| false
| 3,501
|
r
|
runit_demo_tk_cm_roc.R
|
#----------------------------------------------------------------------
# Tom's demonstration example.
#
# Purpose: Split Airlines dataset into train and validation sets.
# Build model and predict on a test Set.
# Print Confusion matrix and performance measures for test set
#----------------------------------------------------------------------
# Source setup code to define myIP and myPort and helper functions.
# If you are having trouble running this, just set the condition to FALSE
# and hardcode myIP and myPort.
if (TRUE) {
# Set working directory so that the source() below works.
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
if (FALSE) {
setwd("/Users/tomk/0xdata/ws/h2o-dev/h2o-r/tests/testdir_demos")
}
source('../h2o-runit.R')
options(echo=TRUE)
filePath <- normalizePath(h2o:::.h2o.locate("smalldata/airlines/AirlinesTrain.csv.zip"))
testFilePath <- normalizePath(h2o:::.h2o.locate("smalldata/airlines/AirlinesTest.csv.zip"))
} else {
stop("need to hardcode ip and port")
myIP = "127.0.0.1"
myPort = 54321
library(h2o)
PASS_BANNER <- function() { cat("\nPASS\n\n") }
filePath <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/AirlinesTest.csv.zip"
testFilePath <-"https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/AirlinesTrain.csv.zip"
}
h2o.startLogging()
check.demo_cm_roc <- function(conn) {
#uploading data file to h2o
air <- h2o.importFile(conn, filePath, "air")
#Constructing validation and train sets by sampling (20/80)
#creating a column as tall as airlines(nrow(air))
s <- h2o.runif(air) # Useful when number of rows too large for R to handle
air.train <- air[s <= 0.8,]
air.valid <- air[s > 0.8,]
myX = c("Origin", "Dest", "Distance", "UniqueCarrier", "fMonth", "fDayofMonth", "fDayOfWeek" )
myY="IsDepDelayed"
#gbm
air.gbm <- h2o.gbm(x = myX, y = myY, distribution = "multinomial", training_frame = air.train, ntrees = 10,
max_depth = 3, learn_rate = 0.01, nbins = 100, validation_frame = air.valid)
print(air.gbm)
print("Variable Importance")
print(air.gbm@model$variable_importances)
print("AUC: ")
p <- h2o.performance(air.gbm, air.valid)
print(p@metrics$AUC)
#RF
# air.rf <- h2o.randomForest(x=myX,y=myY,data=air.train,ntree=10,depth=20,seed=12,importance=T,validation=air.valid, type = "BigData")
# print(air.rf)
#uploading test file to h2o
air.test <- h2o.importFile(conn,testFilePath,destination_frame="air.test")
model_object <- air.gbm # air.rf #air.glm air.gbm air.dl
#predicting on test file
pred <- predict(model_object,air.test)
head(pred)
perf <- h2o.performance(model_object,air.test)
#Building confusion matrix for test set
# FIXME - these require work
h2o.confusionMatrix(perf)
h2o.auc(perf)
h2o.precision(perf)
h2o.accuracy(perf)
#perf@metrics$AUC
#Plot ROC for test set
#FIXME
plot(perf,type="roc")
if (FALSE) {
h <- h2o.init(ip="mr-0xb1", port=60024)
df <-h2o.importFile(h, "/home/tomk/airlines_all.csv")
nrow(df)
ncol(df)
head(df)
myX <- c("Origin", "Dest", "Distance", "UniqueCarrier", "Month", "DayofMonth", "DayOfWeek")
myY <- "IsDepDelayed"
air.glm <- h2o.glm(x = myX, y = myY, training_frame = df, family = "binomial", nfolds = 10, alpha = 0.25, lambda = 0.001)
air.glm@model$confusion
}
testEnd()
}
doTest("Airlines CM and ROC", check.demo_cm_roc)
|
5c4437b22728ad504dafdc25f23734aa7cfdb7f4
|
534ef77188d98c1db98c5ead80042cf1e7d89b25
|
/plot1.R
|
95c87cb397ad11e36bc592b5087856a9c2dcc97c
|
[] |
no_license
|
bjerva/ExData_Plotting1
|
83818969da605a5347df5173b25cf0ab8f60ee97
|
e87a873921aebb1f4840b21e9a89447747062f21
|
refs/heads/master
| 2021-01-18T09:49:29.769284
| 2014-08-06T14:57:43
| 2014-08-06T14:57:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 572
|
r
|
plot1.R
|
# Read data
data <- read.table("household_power_consumption.txt", header=T, sep=';',
na.strings="?",
colClasses=c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
data <- data[(data$Date == "1/2/2007") | (data$Date == "2/2/2007"),]
# Get global active power data
x <- as.numeric(data$Global_active_power)
# Plot to png
png(filename = "plot1.png", width = 480, height = 480, units = "px")
hist(x, xlab="Global Active Power (kilowatts)", main="Global Active Power", col="red")
dev.off()
|
de9dfc439a8cda9dd8027c34afd5dec452236828
|
0d2190a6efddb7167dee3569820724bfeed0e89c
|
/R Package Creation/PBTools/man/designLattice.Rd
|
155cf1aa2404efa66c2556e7db045c9bd27d1e10
|
[] |
no_license
|
djnpisano/RScriptLibrary
|
6e186f33458396aba9f4151bfee0a4517d233ae6
|
09ae2ac1824dfeeca8cdea62130f3c6d30cb492a
|
refs/heads/master
| 2020-12-27T10:02:05.719000
| 2015-05-19T08:34:19
| 2015-05-19T08:34:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 451
|
rd
|
designLattice.Rd
|
\name{designLattice}
\alias{designLattice}
\alias{designLattice.default}
\title{Randomization for Lattice Design}
\description{description1}
\usage{
designLattice(generate, r = 2, trial = 1, numFieldRow = 1, serpentine = FALSE, file = NULL)
}
\arguments{
\item{generate}{a list}
\item{r}{numeric}
\item{trial}{numeric}
\item{numFieldRow}{numeric}
\item{serpentine}{logical}
\item{file}{NULL}
}
\author{Alaine A. Gulles}
\keyword{ design }
|
e40c8e75ed55aa3ab3f4f2de6b12f827f0e4ef04
|
2af70233e10a64dd3aaf4461c6c7c5da0f55df3d
|
/Pre-processing/netcdf_mask.R
|
a3f7efd20b2d9fe59908a77612fd390b723748f8
|
[
"Apache-2.0"
] |
permissive
|
iffylaw/EQTEC-old
|
ac4100852d281c423b1dac1d671f2290f818ad69
|
3d2dcdf840e7f16843bb4479bc3e1359f2fa653b
|
refs/heads/master
| 2021-01-15T08:35:40.768905
| 2015-06-02T09:54:07
| 2015-06-02T09:54:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,573
|
r
|
netcdf_mask.R
|
#!/usr/bin/env Rscript
# Date: 27/04/2015
# Author: lihui luo (luolh@lzb.ac.cn)
# This script mask netcdf file with time dimension through shape file
library(raster)
library(ncdf)
library(rgdal)
rm(list = ls()) # clear objects
graphics.off() # close graphics windows
# Set parameters
Args <- commandArgs(trailingOnly=TRUE);
if(length(Args) != 1) {
message("netcdf_mask.R requires file_prefix file_Date as input. Terminating"); quit()
}
# Reading the shapefile
buffer30km_shp <- readOGR("QTP_railway_highway_buffer30km.shp", layer=basename("QTP_railway_highway_buffer30km"))
# Getting the spatial extent of the shapefile
e <- extent(buffer30km_shp)
# Reading the raster you want to crop with time dimension
china_raster <- brick(paste(Args[1],Args[2],".nc",sep=""), values=TRUE)
# Cropping the raster to the shapefile spatial extent
china_raster.crop <- crop(china_raster, e, snap="out")
# Dummy raster with a spatial extension equal to the cropped raster,
# but full of NA values
buffer30km_crop <- setValues(china_raster.crop, NA)
# Rasterize the catchment boundaries, with NA outside the catchment boundaries
buffer30km_shp.r <- rasterize(buffer30km_shp, buffer30km_crop)
# Putting NA values in all the raster cells outside the shapefile boundaries
china_raster.masked <- mask(x=china_raster.crop, mask=buffer30km_shp.r)
# Create new netcdf file with masking
writeRaster(china_raster.masked, paste(Args[1],,Args[2],"-masking.nc",sep=""), "CDF", zname="time", zunit=", varname=substr(Args[1],1,4)", varname=substr(Args[1],1,4), overwrite=TRUE)
|
976d374f9c1a1b3977a62f4ea99fe3b1429b4952
|
813b88e59f26eb338b70af3c0e8bd76c00ced4c9
|
/r/load_data.R
|
9ca45db05e62ca0835622774a60e241833440b93
|
[] |
no_license
|
dgreiss/covid_dashboard
|
e54f1f8efcb05b284057c9f35ec59fbe3c406501
|
78cc4c1769337b64e1f14d5bc9b00c68fc26e402
|
refs/heads/main
| 2022-12-25T19:10:18.316395
| 2020-10-08T14:49:29
| 2020-10-08T14:49:29
| 302,368,791
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,661
|
r
|
load_data.R
|
data <- read_csv("https://data.ontario.ca/dataset/f4f86e54-872d-43f8-8a86-3892fd3cb5e6/resource/ed270bb8-340b-41f9-a7c6-e8ef587e6d11/download/covidtesting.csv")
data <-
data %>%
rename(
reported_date = `Reported Date`,
total_cases = `Total Cases`,
deaths = Deaths,
number_of_patients_hospitalized_with_covid_19 = `Number of patients hospitalized with COVID-19`,
number_of_patients_in_icu_with_covid_19 = `Number of patients in ICU with COVID-19`
)
# data <-
# data %>%
# janitor::clean_names()
data <-
data %>%
filter(total_cases > 20)
cases <-
data %>%
mutate(
new_cases = uncumulate(total_cases),
new_deaths = uncumulate(deaths),
new_hospital = uncumulate(number_of_patients_hospitalized_with_covid_19),
new_icu = uncumulate(number_of_patients_in_icu_with_covid_19),
mean_deaths_7 = rollmean(new_deaths, 7, fill = NA, align = "right"),
day = as.numeric(reported_date) - min(as.numeric(reported_date))
) %>%
arrange(reported_date)
cases <-
cases %>%
filter(!is.na(total_cases))
cases_summary <-
cases %>%
select(
reported_date, total_cases, deaths, new_cases, new_deaths
) %>%
tail() %>%
arrange(desc(reported_date)) %>%
mutate(
reported_date = as.character(reported_date),
total_cases = scales::comma(total_cases, accuracy = 1),
deaths = scales::comma(deaths, accuracy = 1),
new_cases = scales::comma(new_cases, accuracy = 1),
new_deaths = scales::comma(new_deaths, accuracy = 1)
) %>%
rename(
" " = reported_date,
`Total Cases` = total_cases,
`Total Deaths` = deaths,
`New Cases` = new_cases,
`New Deaths` = new_deaths
)
|
2c0ff5eaa2387e9267b6de1ec217116703542ec9
|
f4bee1dd67ea46b82a73df0b70a4f8a8e5def702
|
/Project/plot4.R
|
6eb5cf136ff64a65d465b00639ef40cc2b6f61d6
|
[] |
no_license
|
jacintod/ExData_Plotting1
|
17a7969006b751b0cc938a353a4c8fb48be9ab4d
|
a0495d8fdd37d4b371456f32f1b71d84a58a8f08
|
refs/heads/master
| 2021-01-22T11:20:09.013688
| 2017-05-29T10:33:47
| 2017-05-29T10:33:47
| 92,680,784
| 0
| 0
| null | 2017-05-28T18:56:26
| 2017-05-28T18:56:26
| null |
UTF-8
|
R
| false
| false
| 1,998
|
r
|
plot4.R
|
# Print the current working directory
print(getwd())
# Declare our working variables
file.name <- "./Data/household_power_consumption.txt"
url <- "http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zip.file <- "./Data/data.zip"
# Check if the data is downloaded and download when applicable
if (!file.exists(file.name)) {
download.file(url, destfile = zip.file)
unzip(zip.file)
file.remove(zip.file)
print("Downloaded the source file for plotting..")
}
## Getting full dataset
powerDT <- read.csv(file.name, header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
powerDT$Date <- as.Date(powerDT$Date, format="%d/%m/%Y")
## Subsetting the data
subsetData <- subset(powerDT, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
# Remove the orginal dataset to optimize the excection of the script
rm(powerDT)
# Convert column that we will use to correct class
convertedDatesDT <- paste(as.Date(subsetData$Date), subsetData$Time)
subsetData$Datetime <- as.POSIXct(convertedDatesDT)
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(subsetData, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
})
## Saving to file
dev.copy(png, file="./Project/plot4.png", height=480, width=480)
dev.off()
|
57c1fee46b8511f507127ed1ff4f65b19689bd8f
|
959154050a0fb3fcf44b12a3cddbc529ef1d955f
|
/getting_tweets/translate.R
|
c6c70a2314f7f5ae605197b76cb54db0cd8a0984
|
[] |
no_license
|
watsonwanda/col_res_proj
|
320409a925d4ad171e06f81b04f347e30d0e2ebc
|
5d4f35a5788639aad1ddf9eaf11c292e735204e1
|
refs/heads/master
| 2022-04-12T05:32:50.971071
| 2016-05-02T18:43:26
| 2016-05-02T18:43:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,540
|
r
|
translate.R
|
library(textcat)
library(dplyr)
library(translateR)
source("getting_tweets/define_keys.R")
load("data/lang_index.rda")
index$lang <- gsub("(^greek(.*))","greek",index$lang)
foreign <- index %>%
filter(lang %in% c("french",
"german",
"spanish",
"greek",
"italian",
"swedish",
"norwegian",
"portuguese") &
nchar(text) > 10
)
lang_table <- data.frame(
lang=c(
"french",
"german",
"spanish",
"greek",
"italian",
"swedish",
"norwegian",
"portuguese",
"dutch"
),
microsoft=c(
"fr",
"de",
"es",
"el",
"it",
"sv",
"no",
"pt",
"nl"
)
)
foreign <- foreign[order(foreign$lang),]
foreign_merge <- foreign %>%
left_join(lang_table)
foreign_merge$translation <- NA
languages <- unique(foreign_merge$microsoft)
for(i in languages) {
language <- filter(foreign_merge,microsoft==i & grepl("TranslateApiExceptionMethod",translation))
print(length(language$lang))
language$translation <- translate(
content.vec = gsub("[[:punct:]]","",language$text),
microsoft.client.id=MicrosoftClientId,
microsoft.client.secret=MicrosoftSecret,
source.lang=i,
target.lang="en"
)
old_foreign <- foreign_merge[!(foreign_merge$tweet_id %in% language$tweet_id),]
foreign_merge <- rbind(old_foreign,language)
save(foreign_merge,file="data/foreign_index.rda")
print(i)
}
|
f01bfa73d07ed062cc51c9cfe9e4e0bf56972df7
|
26151b679705ae098e79fab6c98ca68c093a4f09
|
/Fundamentos_de_DS/Practica Semana 1/Algebra_2-Carteras_Matricial.R
|
f9cc35293d3eef149b8139569ad7d962ba962497
|
[] |
no_license
|
octadelsueldo/Master_DS_CUNEF
|
2808e5f5fbf564c7c3fdf209d699056ecd75a8af
|
354f9d4e43cbcf9d47edd85cfcb5010b417a8b3a
|
refs/heads/main
| 2023-08-21T22:54:16.712139
| 2021-10-06T22:33:32
| 2021-10-06T22:33:32
| 414,352,378
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 535
|
r
|
Algebra_2-Carteras_Matricial.R
|
#-------------------------------------------------------
# MDS - Fundamentos de Algebra
# Calculo matricial con carteras
#-------------------------------------------------------
# Rendimientos medios y volatilidades
s1 <- 0.12
s2 <- 0.18
r12 <- 0.45
m1 <- 0.05
m2 <-0.08
# matriz de covarianzas
V <- matrix(c(s1^2, r12*s1*s2, r12*s1*s2, s2^2),2,2)
V
# rendientos medios
m <- c(m1, m2)
# Peso cartera
w <- c(2/5,3/5)
# Rendimiento medio cartera
ER <- t(w) %*% m
ER
# Varianza de la cartera
VR <- t(w) %*% V %*% w
VR
sqrt(VR)*100
|
0dc383aa52f0a73f3be6826170a5a81f0aa11992
|
70e48467da9729ab758287b8749e8743d7855969
|
/run_analysis.R
|
9291f56c7f2d75cea797f350324b66f24d175ac5
|
[] |
no_license
|
scullduggery/GettingCleaningDataWk4Project
|
5ba14581361222e0916fdb57a2dbdb99749055de
|
991441d34bdf19e27470e5a351981e7f976ec041
|
refs/heads/master
| 2020-03-22T04:12:34.555923
| 2018-07-02T18:51:29
| 2018-07-02T18:51:29
| 139,481,226
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,156
|
r
|
run_analysis.R
|
###############################################################
## Script to download file on data collected on the accelerometers from the Samsung Galaxy S smartphones.
## Clean and tidy the data and then summarise the data in a tidy data set output.
###############################################################
## Check if "reshape2" package already installed, if not then install it
ReshapePackage <- c("reshape2")
NewPackage <- ReshapePackage[!(ReshapePackage %in% installed.packages()[,"Package"])]
if(length(NewPackage)>0) {
install.packages("reshape2")
}
## Load library reshape
library(reshape2)
## Download and unzip the dataset:
filename <- "GettingDataCourseProjectData.zip"
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
if (!file.exists(filename)){
download.file(fileURL, filename, method="curl")
}
if (!file.exists("UCI HAR Dataset")) {
unzip(filename)
}
#################################################################
## Loading Data into R
#################################################################
## Load in Training Data
TrainSubject <- read.table("UCI HAR Dataset/train/subject_train.txt")
TrainActivity <- read.table("UCI HAR Dataset/train/Y_train.txt")
TrainValues <- read.table("UCI HAR Dataset/train/X_train.txt")
## Load in Test Data
TestSubject <- read.table("UCI HAR Dataset/test/subject_test.txt")
TestActivity <- read.table("UCI HAR Dataset/test/y_test.txt")
TestValues <- read.table("UCI HAR Dataset/test/X_test.txt")
## Load in Features, Don't allow conversion to factors
Features <- read.table("UCI HAR Dataset/features.txt", as.is = TRUE)
## Load in Activities
Activities <- read.table("UCI HAR Dataset/activity_labels.txt")
colnames(Activities) <- c("ActivityId", "ActivityLabel")
#################################################################
## Part 1 - Merge the Training and Test data together
#################################################################
# concatenate individual data tables to make single data table
AllData <- rbind(
cbind(TrainSubject, TrainValues, TrainActivity),
cbind(TestSubject, TestValues, TestActivity)
)
# To save memory remove individual data tables
rm(TrainSubject, TrainValues, TrainActivity,
TestSubject, TestValues, TestActivity)
# assign column names
colnames(AllData) <- c("Subject", Features[, 2], "Activity")
#################################################################
## Part 2 - Extract only the measurements on the mean and
## standard deviation for each measurement.
#################################################################
## Identify Columns Wanted and remove all others
ColumnsToKeep <- grepl("Subject|Activity|mean|std", colnames(AllData))
AllData <- AllData[, ColumnsToKeep]
###############################################################
## Part 3 - Use descriptive activity names to name the activities in the data set
###############################################################
## Replace activity values with Factor labels from activity_labels.txt
AllData$Activity <- factor(AllData$Activity ,
levels = Activities[, 1], labels = Activities[, 2])
###############################################################
## Part 4 - Appropriately labels the data set with descriptive variable names.
###############################################################
## Identify Column Names
AllDataCols <- colnames(AllData)
# remove brackets
AllDataCols <- gsub("[\\(\\)]", "", AllDataCols)
## Change where BodyBody occurs in field names to just Body
AllDataCols <- gsub("BodyBody", "Body", AllDataCols)
## Change abbreviations and add Capitals for easier reading
AllDataCols <- gsub("^t", "TimeDomain", AllDataCols)
AllDataCols <- gsub("Acc", "Accelerometer", AllDataCols)
AllDataCols <- gsub("Gyro", "Gyroscope", AllDataCols)
AllDataCols <- gsub("Mag", "Magnitude", AllDataCols)
AllDataCols <- gsub("Freq", "Frequency", AllDataCols)
AllDataCols <- gsub("^f", "FrequencyDomain", AllDataCols)
AllDataCols <- gsub("mean", "Mean", AllDataCols)
AllDataCols <- gsub("std", "StandardDeviation", AllDataCols)
## Change Column names within Data Set to those cleaned
colnames(AllData) <- AllDataCols
###############################################################
## Part 5 - Create a second, independent tidy data set with
## the average of each variable for each activity and each subject.
###############################################################
## Firstly Change "Subject" into factor variable
AllData$Subject <- as.factor(AllData$Subject)
## "Melt" the dataset so that it can be used within a dcast function
AllData.melted <- melt(AllData, id = c("Subject", "Activity"))
## Apply dcast function with formula to calculate mean
AllData.mean <- dcast(AllData.melted, Subject + Activity ~ variable, mean)
## Create text file from AllData.mean within working directory
write.table(AllData.mean, "TidyData.txt", row.names = FALSE, quote = FALSE)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.