blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ad69932eed6aff5e64db7657be8cd1dbce5761ed
|
c3542f507d35c5fc3f7c266b3552e6ef13765253
|
/Schritte/3_interaktiv.R
|
985883c6cd14e92efa316c3dba30577212ebbfc6
|
[] |
no_license
|
helge-baumann/leaflet_germany
|
fc59689e86d4061f776fdf3f849160d5d956ead1
|
6d1638c61cb10b67c9144b09e3329ad62c428ed6
|
refs/heads/main
| 2023-02-21T03:47:10.560170
| 2021-01-22T15:19:49
| 2021-01-22T15:19:49
| 331,964,748
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,988
|
r
|
3_interaktiv.R
|
# Hintergrunddaten laden -----
# Annahme über Dateipfad siehe funktionen/funktionen_karten.R
geo <- load_shapefile(path=NULL, level=ebene, leaflet=T)
geo <- merge(geo, dat, by=key) # dat siehe 1_read.R
# Farbenpalette: Woran geknüpft?
colfunc <- colorFactor(farben(col_i), domain=geo$gruppe, ordered=T)
geo$color <- colfunc(geo$gruppe)
# Ländergrenzen (für später, um sie einzuzeichnen)
if(ebene %in% c("KRS", "AMR")) {
geo_land <- load_shapefile(path=NULL, level="LAN", leaflet=T)
}
geo$hinweis <- NA # Platzhalter für weitere Hinweise aus Quelldaten
# Karte zeichnen-------
# Karte
fig <- leaflet_basis(geo) %>%
leaflet_text("Knapper Titel", size=20) %>%
leaflet_text(lat=55.25,
"Untertitel, in dem etwas mehr Information steht", size=15) %>%
leaflet_legend() %>%
leaflet_logo() %>%
leaflet_quelle(Text="Quelle: <br> Autor der Quelle") %>%
leaflet_anmerkung(Text="Mehr Informationen erhalten Sie per Mausklick auf die Länder!") %>%
leaflet_polygons(
variablen=c("Gesamt", "Männer", "Frauen"),
headers=c("Kategorie I", "Kategorie II", "Kategorie III"),
k=3 # Nachkommastellen
)
# Ländergrenzen (nur bei Kreisen, Bezirken, Arbeitsmarktregionen sinnvoll)
if(ebene %in% c("KRS", "AMR")) {
fig <- fig %>%
addPolygons(
data = geo_land,
color = "white", weight = 2, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 1.0,
fill = F
)
}
# Leaflet-Stil anwenden und in HTML überführen----
fig$dependencies <- list(
htmlDependency(
name = "leaflet_hbs"
,version = "1.0.0"
# if local file use file instead of href below
# with an absolute path
,src = paste0(getwd(),"/CSS/Leaflet-Stil/")
,stylesheet = "rstudio_leaflet_helge.css"
)
)
# abspeichern
saveWidget(
frameableWidget(fig),
file = paste0(getwd(), "/Output/Karten/", name_i, ".html"),
selfcontained=F
)
# mitliefern: WSI-Logo
file.copy(
from="./CSS/WSI_Abbinder_RGB.jpg",
to="./Output/Karten"
)
|
7c51e18a7024fdb1299a5f2977b031bf3faa6cd8
|
5c4494b116a8a14b58d923fd471fca996dc29ad7
|
/R/split.R
|
9fa6944995e3382ad9fd5a30968a12f0ca28c16d
|
[] |
no_license
|
chenx-bob/simplifyGO
|
ba7a9ae10772e24f5371b7ad3ed9e8097dade542
|
13644aaabb19ee37ec2f569b66d2e23ca2b69dc4
|
refs/heads/master
| 2022-04-19T23:25:52.518629
| 2020-04-17T11:42:09
| 2020-04-17T11:42:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,781
|
r
|
split.R
|
cut_dend = function(dend, cutoff = 0.8, field = "score2", plot = FALSE) {
children_score = function(dend, field) {
if(is.leaf(dend)) {
-Inf
} else {
d1 = dend[[1]]
d2 = dend[[2]]
c(attr(d1, field), attr(d2, field))
}
}
dont_split = function(dend, field, cutoff) {
s = attr(dend, field)
if(s >= cutoff) {
return(FALSE)
} else {
s_children = children_score(dend, field)
all(s_children < cutoff)
}
}
# if the top node
if(dont_split(dend, field, cutoff)) {
dend2 = dendrapply(dend, function(d) {
attr(d, "height") = 0
d
})
if(plot) {
plot(dend)
box()
}
return(dend2)
}
dend2 = edit_node(dend, function(d, index) {
if(dont_split(d, field, cutoff)) {
attr(d, "height") = 0
}
d
})
## make sure all sub-nodes having height 0 if the node is 0 height
is_parent_zero_height = function(index) {
h = sapply(seq_along(index), function(i) {
attr(dend2[[ index[1:i] ]], "height")
})
any(h == 0)
}
dend2 = edit_node(dend2, function(d, index) {
if(is_parent_zero_height(index)) {
attr(d, "height") = 0
attr(d, "nodePar") = NULL
}
d
})
if(plot) {
col_fun = colorRamp2(c(0.5, 0.75, 1), c("blue", "yellow", "red"))
dend = edit_node(dend, function(d, index) {
if(is.null(index)) {
if(!is.leaf(d)) {
if(attr(dend2, "height") > 0) {
s = attr(d, field)
attr(d, "nodePar") = list(pch = ifelse(s > cutoff, 16, 4), cex = 0.5, col = col_fun(s))
}
}
} else {
if(!is.leaf(d)) {
if(attr(dend2[[index]], "height") > 0) {
s = attr(d, field)
attr(d, "nodePar") = list(pch = ifelse(s > cutoff, 16, 4), cex = 0.5, col = col_fun(s))
}
}
}
return(d)
})
plot(dend)
box()
}
as.character(cutree(as.hclust(dend2), h = 0.1))
}
|
4475b2a049e55eba7d4caf0a958e483401c7c358
|
48423d753b948fe9b9ff7abc138743fe1e4ef50e
|
/cachematrix.R
|
0a05441400984bc2fc487fc742b48423a67ca517
|
[] |
no_license
|
noics02b/ProgrammingAssignment2
|
e8370afb0c6360c61a1aa04beb34b9188c4f726c
|
de457332fd07e02ed2770f81174d0c94b2bb53eb
|
refs/heads/master
| 2020-12-11T04:19:57.531381
| 2014-10-25T11:43:26
| 2014-10-25T11:43:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,821
|
r
|
cachematrix.R
|
## Assignment #2 by Winston A. Sumalia
## The makeCacheMatrix creates a special "matrix" object that can cache its inverse.
## subfunctions:
## a.) set - redefine the elements of the matrix
## b.) get - passes on the elements of the matrix as defined in the makeCacheMatrix function
## c.) setinv - writes into cache the inverted matrix values, either directly through makeCacheMatrix function or as computed (solve) through the CacheSolve function.
## d.) getinv - passes on the elements of the inverted matrix as defined in makeCacheMatrix.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x ##Passes on the matrix elements as defined
setinv <- function(inv) m <<- inv ##writes into cache the elements of the inverted matrix
getinv <- function() m ##Passes on the elements of the inverted matrix
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## The cacheSolve function computes for the inverse returned from makeCacheMatrix. If the matrix inverse has already been calculated, then this function no longer recalculates and retrieves the inverse from cache instead.
cacheSolve <- function(x, ...) {
m <- x$getinv() ##check if there is already cached data and retrieves it. Matrix inverse needs to be recalculated once input defined in set has been changed
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get() ##new input data detected; thus, being compiled for inverse matrix recalculation
m <- solve(data, ...) ## generic function that solves the inverse, in the form a %*% x = b
x$setinv(m) ##writes into cache the values of the inverse Matrix
m
}
|
e1632040f6b26de09e313dd1b20b07b53b5dc928
|
017592988b877cf6f74cd3b7a763e9fde9cd7a0e
|
/BCC_DEA.R
|
6a881056129aeb59df92cc12b3e9cb9c90d08262
|
[] |
no_license
|
njpates/R_Code
|
8942195648bd57908324c925162fca5dd6e7b79f
|
b8db53a23adc91b0b80136514d8deee2bc8782ea
|
refs/heads/master
| 2021-01-15T15:39:09.697712
| 2016-08-31T18:12:32
| 2016-08-31T18:12:32
| 56,538,736
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 927
|
r
|
BCC_DEA.R
|
library(lpSolve )
data <- read.csv("DEAsample3.csv")
labels <- data[,1]
In <- data[,2:3]
Out <- data[,4:5]
BCC_DEA <- function(In,Out,labels){
theta <- rep(0,times=nrow(data))
for(i in 1:nrow(data)){
inputs <- as.matrix(In)
inputs1 <- t(inputs)
Inputs <- cbind(inputs1[,i],-inputs1)
Inputs
outputs <- as.matrix(Out)
outputs1 <- t(outputs)
Outputs <- cbind(rep(0,times=nrow(outputs1)),outputs1)
Const <- rbind(c(0,rep(1,times=nrow(inputs))),Outputs,Inputs,cbind(rep(0,times=nrow(inputs1)),diag(ncol(inputs1)))
)
rel <- c(1,outputs[i,],rep(0,times=nrow(Const)-1-ncol(outputs)))
dir=rep(">=",times=nrow(Const))
obj <- c(1,rep(0,times=ncol(Const)-1))
theta[i] <- lp("min",obj,Const,dir,rel)$solution[1]
}
names(theta) <- labels
theta
}
BCC_DEA(In,Out,labels)
cbind(optx_ex,BCC_DEA(In,Out,labels))
Const
|
c6776dbb9ec457b66154850034081ebc37cfb45e
|
78a29329d6aab0c6e047666653c7ab442ddfd8fc
|
/python/exomedepth_patch/function/exomeDepthPipe.R
|
334916f8fbf4cac571d9989d7e3d1b84674f2ea3
|
[] |
no_license
|
pzweuj/practice
|
3a17f37236f930ea68f514df3b9aa772a8ced52f
|
7a51d1924addaf926ca6e727aee8c35902af3dd4
|
refs/heads/master
| 2023-07-07T09:16:41.049571
| 2023-06-25T09:46:10
| 2023-06-25T09:46:10
| 113,008,473
| 10
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,702
|
r
|
exomeDepthPipe.R
|
#!/usr/bin/env Rscript
# 20211012
# pzw
library(ExomeDepth)
library(getopt)
# data(Conrad.hg19)
# data(exons.hg19)
# 传参
# args <- commandArgs(TRUE)
spec <- matrix(
c(
"bed", "b", 1, "character", "bed file, need 4 columns without header",
"input", "i", 1, "character", "bam file directory",
"output", "o", 1, "character", "output directory"
),
byrow=TRUE, ncol=5
)
opt <- getopt(spec)
if (is.null(opt$input) || is.null(opt$output) || is.null(opt$bed)) {
cat(paste(getopt(spec, usage=TRUE), "\n"))
q()
}
# 导入bed文件
bedFilePath <- opt$bed
bedFile <- read.table(bedFilePath, head=FALSE)
names(bedFile) <- c("chromosome", "start", "end", "name")
bedFile <- as.data.frame(bedFile)
# 导入bam文件
bamFilePath <- opt$input
bamFile <- list.files(bamFilePath, pattern="*.bam$")
bamFile <- file.path(bamFilePath, bamFile)
# 输出文件夹
outputPath <- opt$output
if(!(dir.exists(outputPath))) {
dir.create(outputPath)
}
# 获得counts
my.counts <- getBamCounts(bed.frame=bedFile,
bam.files=bamFile,
include.chr=FALSE
)
my.counts.dafr <- as(my.counts, "data.frame")
exomeCount.mat <- as.matrix(my.counts.dafr[, grep(names(my.counts.dafr), pattern="*.bam")])
# 注释
# genes.GRanges.hg19 <- GenomicRanges::GRanges(
# seqnames=exons.hg19$chromosome,
# IRanges::IRanges(start=exons.hg19$start, end=exons.hg19$end),
# names=exons.hg19$name
# )
# 循环运行
nSamples <- ncol(exomeCount.mat)
for (i in 1:nSamples) {
my.choice <- select.reference.set(
test.counts=exomeCount.mat[, i],
reference.counts=exomeCount.mat[, -i],
bin.length=(my.counts.dafr$end - my.counts.dafr$start)/1000, n.bins.reduced=10000
)
my.reference.selected <- apply(X=exomeCount.mat[, my.choice$reference.choice, drop=FALSE], MAR=1, FUN=sum)
all.exons <- new("ExomeDepth", test=exomeCount.mat[, i],
reference=my.reference.selected,
formula="cbind(test, reference) ~ 1"
)
all.exons <- CallCNVs(x=all.exons,
transition.probability=10^-4,
chromosome=my.counts.dafr$chromosome,
start=my.counts.dafr$start,
end=my.counts.dafr$end,
name=my.counts.dafr$exon
)
# all.exons <- AnnotateExtra(x=all.exons, reference.annotation=Conrad.hg19.common.CNVs,
# min.overlap=0.5, column.name="Conrad.hg19"
# )
# all.exons <- AnnotateExtra(x=all.exons, reference.annotation=genes.GRanges.hg19,
# min.overlap=0.0001, column.name="exons.hg19"
# )
output.file <- paste(outputPath, "/", colnames(exomeCount.mat)[i], ".txt", sep="")
write.table(file=output.file, x=all.exons@CNV.calls, row.names=FALSE, quote=FALSE, sep="\t")
}
|
6903fe7795d02d82754c0e59ba6a366420781e2b
|
fdef19b8a214d9e5a96d3c5e558fb8dc3ba066bf
|
/analysis_test_copd.R
|
856712797d2534156698fb365fe64adcaed2f887
|
[] |
no_license
|
ragnvalds/Granheim_COPD
|
b7e722f4aa57ac86e1fbf104cddeb5912cfd296d
|
957a944e673c2d71e5826aa51023514b0bb1e7e2
|
refs/heads/master
| 2021-01-02T10:57:54.885286
| 2020-02-14T10:31:44
| 2020-02-14T10:31:44
| 239,589,518
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,796
|
r
|
analysis_test_copd.R
|
####analysis of qPCR copd testrun####
## qPCR import
library(qpcrpal); library(dplyr); library(qpcR); library(readxl); library(ggplot2); library(stringr); library(tidyr)
### Prepare a batch of imported data
batch <- prepare_batch("./data/exports/", equipment = "quant", skip = 21) %>%
model_qpcr()
## Perform model comparisons
model.tests <- test_models(batch)
# Plot best models
data.frame(table(model.tests$best.model, model.tests$target)) %>%
ggplot(aes(Var2, Freq, fill = Var1)) + geom_bar(stat = "identity") + coord_flip()
# Best model per target are stored for modeling with best model
# Plot best model per target
data.frame(table(model.tests$best.model, model.tests$target)) %>%
ggplot(aes(Var2, Freq, fill = Var1)) + geom_bar(stat="identity") + coord_flip()
# Best model per target are stored for modeling with best model
best.models <- data.frame(target = names(apply(table(model.tests$best.model, model.tests$target), 2, which.max)),
model = as.character(row.names(table(model.tests$best.model, model.tests$target))[apply(table(model.tests$best.model, model.tests$target), 2, which.max)]))
## load data with best model
qpcrbatch <- prepare_batch("./data/exports/", equipment = "quant", skip = 21)
results <- list()
# Loop through all targets in best.models data frame
for(i in 1:nrow(best.models)){
results[[i]] <- qpcrbatch %>%
model_qpcr(model = eval(parse(text = as.character(best.models[i,2]))), replicate = FALSE) %>% # use the best model in each model_qpcr
analyze_models() # analyze models for cpD2
}
# combine all results and str split id variables
qpcrdat <- rbind_all(results)
id.var <- str_split_fixed(qpcrdat$ID, "_", 5)
colnames(id.var) <- c("subject", "cdna", "timepoint", "gene","leg")
qpcrdat <- cbind(id.var, qpcrdat[,-1])
## estimate efficiencies ##
efficiencies <- list()
# use the same loop to analyze efficiencies
for(i in 1:nrow(best.models)){
efficiencies[[i]] <- qpcrbatch %>%
model_qpcr(model = eval(parse(text = as.character(best.models[i,2]))), replicate = FALSE) %>%
analyze_efficiency(method = "cpD2", model = "linexp", offset = -3)
}
# combine results and use str split to extract id variables
efficiencies <- rbind_all(efficiencies)
id.var <- str_split_fixed(efficiencies$ID, "_", 5)
colnames(id.var) <- c("subject", "cdna", "timepoint", "gene","leg")
efficiencies <- cbind(id.var, efficiencies[,-1])
efficiencies %>%
filter(eff > 1.5 & eff < 2.5)%>% # remove outliers from efficiency estimation
#separate(target, into = c("target", "cDNA"), sep = "_") %>%
group_by(gene)%>%
summarise(efficiency = mean(eff, na.rm = TRUE),
max.eff = max(eff, na.rm = TRUE),
min.eff = min(eff, na.rm = TRUE),
sd.eff = sd(eff, na.rm = TRUE))%>%
ggplot(aes(gene, efficiency)) + geom_point() +
coord_flip()
#MYHC2a is removed in this step
effs <- efficiencies %>%
filter(eff > 1.5 & eff < 2.5)%>% # remove outliers from efficiency estimation
#separate(target, into = c("target", "cdna"), sep = "_") %>%
group_by(gene)%>%
summarise(eff = mean(eff, na.rm = TRUE))
## Extract information on replicates
replicates <- data.frame(str_split(qpcrdat$gene, "_", simplify = TRUE))
colnames(replicates) <- c("gene")
qpcrdat <- cbind(replicates, qpcrdat[, -4])
head(qpcrdat)
qpcrdat
## Combine all qPCR parameters in the study to a data.frame containing all replicates
qpcrdat.replicates <- qpcrdat %>%
inner_join(effs, by = "gene") %>%
dplyr::select(subject, gene, cdna, cpD2, eff.y) %>%
mutate(cq = cpD2,
eff = eff.y) %>%
ungroup() %>%
data.frame()
qdat <- qpcrdat.replicates %>%
mutate(Ra = eff^-cq)
#make model#
m1 <- lm(Ra ~ gene + cdna + subject, data = qdat)
library(emmeans); library(ggplot2)
estimates <- emmeans(m1, specs = ~ "gene|cdna+subject")
estimates %>%
data.frame() %>%
ggplot(aes(cdna, emmean, color = subject, group = gene)) +
geom_errorbar(aes(ymin = lower.CL, ymax = upper.CL),
width = 0.2, position = position_dodge(width = 0.2)) +
geom_point(position = position_dodge(width = 0.2)) +
geom_line(position = position_dodge(width = 0.2)) +
facet_grid(gene ~ ., scales = "free" )
####expression of cq values####
qdat_analysis <- qdat %>%
dplyr::select(subject, gene, cdna, cq) %>%
group_by(cdna, gene) %>%
summarise(m = mean(cq, na.rm = TRUE),
s = sd(cq, na.rm = TRUE))
qdat_analysis %>%
ggplot(aes(cdna, m, color = gene)) +
geom_errorbar(aes(ymin = m-s, ymax = m+s),
width = 0.2,
position = position_dodge(width = 0.2)) +
geom_point(position = position_dodge(width = 0.2)) +
geom_line(position = position_dodge(width = 0.2)) +
facet_grid(gene ~ ., scales = "free" )
|
0fbe5d4e396be8823623dfb532721e004290161e
|
3bed450a6c2394d7b95fe86397170d01e4c7af6d
|
/archived R/mixpoisson-functions.R
|
a6dd128397d54e6bf01e8eb3b871ea13bdcb8837
|
[] |
no_license
|
jlivsey/countsFun
|
4f2d06a9e1f3c176c98a204b81d79ec9a884c98f
|
e2dca09f1878d94955649536a420d1fc63e90f2a
|
refs/heads/master
| 2023-04-07T00:21:30.626480
| 2023-03-01T04:34:58
| 2023-03-01T04:34:58
| 234,409,290
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,157
|
r
|
mixpoisson-functions.R
|
# #---------inverse cdf of mixpoisson series---------#
# qmixpois = function(y, p, lam1, lam2){
# yl = length(y)
# x = rep(0,yl)
# for (n in 1:yl){
# while(pmixpois(x[n], p, lam1, lam2) <= y[n]){ # R qpois would use <y; this choice makes the function right-continuous; this does not really matter for our model
# x[n] = x[n]+1
# }
# }
# return(x)
# }
#
#
# #---------cdf of mixpoisson series---------#
# pmixpois = function(x, p, lam1, lam2){
# y = p*ppois(x,lam1) + (1-p)*ppois(x,lam2)
# return(y)
# }
#
#
# #---------pdf of mixpoisson series---------#
# dmixpois = function(x, p, lam1, lam2){
# y = p*dpois(x,lam1) + (1-p)*dpois(x,lam2)
# return(y)
# }
#
#
# #---------generate mixpoisson series---------#
# rmixpois = function(n, p, lam1, lam2){
# u = runif(n)
# x = rpois(n,lam1)*(u<=p) + rpois(n,lam2)*(u>p)
# return(x)
# }
#
#
# #---------simulate negbin series with prob of success p---------#
# sim_mixedPoisson = function(n, ARMAmodel,p,lam1,lam2){
# #====================================================================================#
# # PURPOSE Simulate NegBin series with ARMA structure. See relation (1)
# # in https://arxiv.org/pdf/1811.00203.pdf
# #
# # INPUT
# # n series length
# # ARMAmodel list with ARMA parameters
# # r,p Marginal Parameters
# #
# # Output
# # X Neg bin series
# #
# # Authors Stefanos Kechagias, James Livsey, Vladas Pipiras
# # Date April 2020
# # Version 3.6.3
# #====================================================================================#
#
# z = arima.sim(model = list(ar=ARMAmodel[[1]], ma=ARMAmodel[[2]]), n = n); z = z/sd(z)
# # The third argument of qnbinom is the prob of failure
# X = qmixpois(pnorm(z), p, lam1, lam2)
# return(X)
# }
#
#
# #---------Hermitte Coefficients for all k---------#
# HermCoefMixedPoisson <- function(p, lam1,lam2, N, nHC){
# #====================================================================================#
# # PURPOSE Compute all Hermite Coefficients. See relation (21) in
# # https://arxiv.org/pdf/1811.00203.pdf
# #
# # INPUT
# # r,p Marginal parameters
# # maxCoef number of coefficients to return. Default = 20
# # N truncation of relation (21)
# # nHC number of Hermitte coefficients
# #
# # Output
# # HC All Hermite coeficients
# #
# # Authors Stefanos Kechagias, James Livsey, Vladas Pipiras
# # Date April 2020
# # Version 3.6.3
# #====================================================================================#
#
# h = 1:19 #check me
# HC = rep(NA, length(h)) # storage
# for(i in h) {
# HC[i] <- HermCoefMixedPoisson_k(p,lam1,lam2 , k = i, N)
# }
#
# return(HC)
#
# }
#
#
# #---------Hermitte Coefficients for one k---------#
# HermCoefMixedPoisson_k <- function(p, lam1, lam2, k, N){
# #====================================================================================#
# # PURPOSE Compute kth Hermite Coefficient. See relation (21) in
# # https://arxiv.org/pdf/1811.00203.pdf
# #
# # INPUT
# # r,p Marginal parameters
# # k index of Hermite coefficient
# #
# # Output
# # HC_k kth hermite coeficient
# #
# # Authors Stefanos Kechagias, James Livsey
# # Date January 2020
# # Version 3.6.1
# #====================================================================================#
#
# # function for kth Hermite Polynomial
# her <- function(x){
# evalHermPolynomial(k-1, x)
# }
#
# # # truncation numbe: check me
# # N <- which(round(pnbinom(1:1000, r,p), 7) == 1)[1]
# # if(is.na(N)){
# # N=1000
# # }
# # N = max(sapply(unique(p),function(x)which(round(pnbinom(1:1000, r,x), 7) == 1)[1]))
#
# # compute terms in the sum of relation (21) in
# terms <- exp((-qnorm( pmixpois(0:max(N), p, lam1,lam2) )^2)/2) *
# her(qnorm( pmixpois(0:max(N), p, lam1,lam2) ))
#
# terms[is.nan(terms)]=0
#
# # take the sum of all terms
# HC_k <- sum(terms) / (sqrt(2*pi) * factorial(k))
# return(HC_k)
# }
#
#
# #---------Covariance matrix---------#
# CovarMixedPoisson = function(n, p, lam1,lam2, AR, MA, N, nHC){
# #====================================================================================#
# # PURPOSE Compute the covariance matrix of a NegBin series.
# #
# # INPUT
# # r,p Marginal parameters
# # AR,MA ARMA parameters
# # n size of the matrix
# # N truncation for relation (21)
# # nHC number of Hermitte coefficents to be computed
# #
# # Output
# # GAMMA covariance matrix ofcount series
# #
# # Authors Stefanos Kechagias, James Livsey, Vladas Pipiras
# # Date April 2020
# # Version 3.6.3
# #====================================================================================#
#
# # Hermite coeficients--relation (21) in https://arxiv.org/pdf/1811.00203.pdf
# HC = HermCoefMixedPoisson(p,lam1,lam2,N, nHC)
#
# # ARMA autocorrelation function
# if(!length(AR)){arma.acf <- ARMAacf(ma = MA, lag.max = n)}
# if(!length(MA)){arma.acf <- ARMAacf(ar = AR, lag.max = n)}
# if(length(AR) & length(MA)){arma.acf <- ARMAacf(ar = AR, ma = MA, lag.max = n)}
#
# # Autocovariance of count series--relation (9) in https://arxiv.org/pdf/1811.00203.pdf
# gamma_x = CountACVF(h = 0:(n-1), myacf = arma.acf, g = HC)
#
# # Final toeplitz covariance matrix--relation (56) in https://arxiv.org/pdf/1811.00203.pdf
# GAMMA = toeplitz(gamma_x)
# return(GAMMA)
# }
#
#
# #---------Gaussian Likelihood function---------#
# GaussLogLikMP = function(theta, data, ARMAorder, MaxCdf, nHC){
# #====================================================================================#
# # PURPOSE Compute Gaussian log-likelihood for Mixed-Poisson series
# #
# # INPUT
# # theta parameter vector containing the marginal and ARMA parameters
# # data count series
# # ARMAorder ordeer of ARMA model
# # MaxCdf cdf will be computed up to this number (for light tails cdf=1 fast)
# # nHC number of HC to be computed
# #
# #
# # Output
# # loglik Gaussian log-likelihood
# #
# # Authors Stefanos Kechagias, James Livsey, Vladas Pipiras
# # Date April 2020
# # Version 3.6.3
# #====================================================================================#
#
# # retrieve parameters and sample size
# p = theta[1]
# lam1 = theta[2]
# lam2 = theta[3]
# nparms = length(theta)
# nMargParms = nparms - sum(ARMAorder)
# if(ARMAorder[1]>0){
# AR = theta[(nparms-ARMAorder[1]+1):(nMargParms + ARMAorder[1]) ]
# }else{
# AR = NULL
# }
#
# if(ARMAorder[2]>0){
# MA = theta[ (nMargParms+ARMAorder[1]+1) : length(theta)]
# }else{
# MA = NULL
# }
# n = length(data)
#
# # compute truncation of relation (21)
# N <- which(round( pmixpois(1:MaxCdf, p,lam1,lam2) , 7) == 1)[1]
#
#
# if(length(N)==0 |is.na(N) ){
# N = MaxCdf
# }
#
# #Select the mean value used to demean--sample or true?
# MeanValue = p*lam1 + (1-p)*lam2
#
# # Compute the covariance matrix--relation (56) in https://arxiv.org/pdf/1811.00203.pdf
# GAMMA = CovarMixedPoisson(n, p, lam1, lam2, AR, MA, N, nHC)
#
# # Compute the logdet and the quadratic part
# logLikComponents = EvalInvQuadForm(GAMMA, as.numeric(data), MeanValue)
#
# # final loglikelihood value
# out = 0.5*logLikComponents[1] + 0.5*logLikComponents[2]
#
# # the following will match the above if you subtract N/2*log(2*pi) and don't multiply with 2
# # out = -2*dmvnorm(as.numeric(data), rep(lam, n), GAMMA, log = TRUE)
# return(out)
# }
#
#
# #---------wrapper to fit Gaussian Likelihood function---------#
# FitGaussianLikMP = function(x0, X, LB, UB, ARMAorder, MaxCdf, nHC){
# #====================================================================================#
# # PURPOSE Fit the Gaussian log-likelihood for Mixed Poisson series
# #
# # INPUT
# # x0 initial parameters
# # X count series
# # LB parameter lower bounds
# # UB parameter upper bounds
# # ARMAorder order of the udnerlying ARMA model
# # MaxCdf cdf will be computed up to this number (for light tails cdf=1 fast)
# # nHC number of HC to be computed
# #
# # OUTPUT
# # All parameter estimates, standard errors, likelihood value
# #
# # NOTES I may comment out se in cases where maximum is achieved at the boundary
# #
# # Authors Stefanos Kechagias, James Livsey, Vladas Pipiras
# # Date April 2020
# # Version 3.6.3
# #====================================================================================#
# optim.output <- optimx(par = x0,
# fn = GaussLogLikMP,
# data = X,
# ARMAorder = ARMAorder,
# MaxCdf = MaxCdf,
# nHC = nHC,
# method = "L-BFGS-B",
# hessian = TRUE,
# lower = LB,
# upper = UB
# )
#
# nparms = length(x0)
# ParmEst = matrix(0,nrow=1,ncol=nparms)
# se = matrix(NA,nrow=1,ncol=nparms)
# loglik = rep(0,1)
#
# # save estimates, loglik and standard errors
# ParmEst[,1:nparms] = c(optim.output$p1,optim.output$p2,optim.output$p3,optim.output$p4)
# loglik = optim.output$value
#
# # compute hessian
# H = gHgen(par = ParmEst,
# fn = GaussLogLikMP,
# data = X,
# ARMAorder = ARMAorder,
# MaxCdf = MaxCdf,
# nHC = nHC
# )$Hn
#
# se[,1:nparms] = sqrt(abs(diag(solve(H))))
#
# All = cbind(ParmEst, se, loglik)
# return(All)
#
# }
#
#
# #---------initial estimates via pmle and reversion
# ComputeInitMixedPoissonAR = function(x,n,nHC,LB,UB){
# #---------------------------------#
# # Purpose: Method of Moment Initial estimates for MixPois AR(1)
# #
# #
# #
# # Authors Stefanos Kechagias, James Livsey, Vladas Pipiras
# # Date June 2020
# # Version 3.6.3
# #---------------------------------#
#
# # pmle for marginal parameters
# MixPois_PMLE <- pmle.pois(x,2)
#
# pEst = MixPois_PMLE[[1]][1]
# l1Est = MixPois_PMLE[[2]][1]
# l2Est = MixPois_PMLE[[2]][2]
#
#
# # correct estimates if they are outside the feasible region
# if(pEst<LB[1]){pEst = 1.1*LB[1]}
# if(pEst>UB[1]){pEst = 0.9*UB[1]}
#
# if(l1Est<LB[2]){l1Est = 1.1*LB[2]}
# if(l2Est<LB[3]){l2Est = 1.1*LB[3]}
#
#
# # compute thetaEst using reversion as in IYW
# initParms = ComputeInitMixedPoissonARterm(x, pEst, l1Est, l2Est, n, nHC, LB, UB)
#
# return(initParms)
#
# }
#
#
# #--------obtain initial estimate for AR term using acf and reversion
# ComputeInitMixedPoissonARterm = function(x, pEst, l1Est, l2Est, N, nHC, LB, UB){
#
# # compute Hermite coefficients
# g.coefs = HermCoefMixedPoisson(pEst, l1Est, l2Est, N, nHC)
#
# # Compute acf of count series at lag 0--(mfg of mixed Pois = mix of Pois mgfs )
# MixedPoissonVar = pEst*(l1Est^2 + l1Est) + (1-pEst)*(l2Est^2 + l2Est) -
# (pEst*l1Est + (1-pEst)*l2Est)^2
#
# # compute link coeffcients
# link.coefs <- link_coefs(g.coefs, MixedPoissonVar)
#
# # compute Inverse Link coefficients of f^-1: gam.z --> gam.x
# inv.link.coefs <- reversion(link.coefs)
#
# # sample acf of count data
# gam.x <- acf(x = x, lag.max = 30, plot = FALSE, type = "correlation")$acf
#
# # compute gamma Z thru reversion
# gam.z <- power_series(gam.x[,,1], inv.link.coefs)
# gam.z = gam.z/gam.z[1]
#
# phiEst = gam.z[2]
#
# # correct if I am outside the boundaries
# if(phiEst<LB[4]){phiEst = 1.1*LB[4]}
# if(phiEst>UB[4]){phiEst = 0.9*UB[4]}
#
# InitEstimates = c(pEst, l1Est, l2Est, phiEst)
# return(InitEstimates)
# }
|
412eee054c77d4ebaa6c5bf7c92458603fb8df93
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/iMediate/examples/fimle.lnl.Rd.R
|
9aa184ae697e79b662dc8f090ab5e85be46a2919
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 614
|
r
|
fimle.lnl.Rd.R
|
library(iMediate)
### Name: fimle.lnl
### Title: Full Information Maximum Likelihood Estimates in Linear M-model
### and Linear Y-model
### Aliases: fimle.lnl
### Keywords: estimates
### ** Examples
data("jobs", package = "mediation")
fit.M <- lm(job_seek ~ treat + econ_hard + sex + age, data=jobs)
fit.Y <- lm(depress2 ~ treat + job_seek + econ_hard + sex + age, data=jobs)
fimle.lnl(fit.M, fit.Y, "treat", rho=0.2)
fit.M <- lm(job_seek ~ treat + econ_hard + sex + age , data=jobs)
fit.Y <- lm(depress2 ~ treat*job_seek+ econ_hard + sex + age , data=jobs)
fimle.lnl(fit.M, fit.Y, "treat", rho=0.5)
|
fcc6cab3487d7fc762def3efede51e58d017deaf
|
476639c612f10f610b59a14e1d29022b8cae0086
|
/man/fit_topt_VJs.Rd
|
0183fb2d682c440f96878be47e91c2c859a7ee70
|
[] |
no_license
|
jstinzi/plantecowrap
|
4b81090370e51804a6590385ca866cb6478f0e0b
|
3fcd4cdb4a29055e130ea0daed4728e5d5813120
|
refs/heads/master
| 2020-08-05T05:09:24.392516
| 2020-04-27T14:29:18
| 2020-04-27T14:29:18
| 212,407,836
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,331
|
rd
|
fit_topt_VJs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_topt_VJs.R
\name{fit_topt_VJs}
\alias{fit_topt_VJs}
\title{Fitting multiple temperature response curves}
\usage{
fit_topt_VJs(
data,
group,
varnames = list(Vcmax = "Vcmax", Jmax = "Jmax", Tleaf = "Tleaf"),
limit_jmax = 1e+05,
limit_vcmax = 1e+05,
...
)
}
\arguments{
\item{data}{Dataframe with multiple temperature response curves for Vcmax
(maximum rubisco carboxylation capacity in umol m-2 s-1) and Jmax (maximum
photosynthetic electron transport to CO2 fixation in umol m-2 s-1).}
\item{group}{Grouping variable to use, e.g. Plant ID}
\item{varnames}{Variable names. Reassigns variable names to account for
different spellings of Vcmax, Jmax, and Tleaf}
\item{limit_jmax}{Upper limit to Jmax values for fitting. Defaults to
100,000 umol m-2 s-1 as this is the "nonsense output" from fitaci. Ensures
that these points are not fit.}
\item{limit_vcmax}{Upper limit to Vcmax values for fitting. Defaults to
100,000 umol m-2 s-1.}
\item{...}{Arguments to be passed on to minpack.lm::nlsLM via fit_topt_VJ().
See ?nlsLM for details.}
}
\value{
fit_topt_VJs fits multiple Vcmax and Jmax temperature responses
using the optimum temperature response model from Medlyn et al. 2002.
REFERENCE
Medlyn BE, Dreyer E, Ellsworth D, Forstreuter M, Harley PC,
Kirschbaum MUF, Le Roux X, Montpied P, Strassemeyer J, Walcroft A,
Wang K, Loutstau D. 2002. Temperature response of parameters of a
biochemically based model of photosynthesis. II. A review of
experimental data. Plant Cell Environ 25:1167-1179
}
\description{
Fitting multiple temperature response curves
}
\examples{
\donttest{
#Read in data
data <- read.csv(system.file("extdata", "example_2.csv",
package = "plantecowrap"), stringsAsFactors = FALSE)
#Fit ACi Curves then fit temperature responses
fits <- fitacis2(data = data,
varnames = list(ALEAF = "A",
Tleaf = "Tleaf",
Ci = "Ci",
PPFD = "PPFD",
Rd = "Rd",
Press = "Press"),
group1 = "Grouping",
fitTPU = FALSE,
fitmethod = "bilinear",
gm25 = 10000,
Egm = 0)
#Extract coefficients
outputs <- acisummary(data, group1 = "Grouping", fits = fits)
#Plot curve fits
for (i in 1:length(fits)) {
plot(fits[[i]])
}
#Separate out grouping variable
outputs <- separate(outputs, col = "ID", c("Treat", "Block"), sep = "_")
#Fit the Topt model from Medlyn et al. 2002 for all individuals
#Output is a list of lists for each individual
#There is also a fit_topt_VJ for single temperature response
#fitting
out <- fit_topt_VJs(data = outputs,
group = "Block", #this grouping variable is for
#each individual
varnames = list(Vcmax = "Vcmax",
Jmax = "Jmax",
Tleaf = "Tleaf"),
limit_jmax = 100000,
limit_vcmax = 100000)
#Let's get the parameters out into a single data frame
pars <- get_t_pars(out)
#Let's get the graphs out into a list
#You can get a graph using: graph[1]
graphs <- get_t_graphs(out)
}
}
|
849ba7c44f5b7ea26b11d398b4d680cf24f917cb
|
005fd58207cfba90a5ae54577d36b55ff3905280
|
/OldCode/Phenology_GLMMER.R
|
538eb6c522a3cf50938215af6f4b11030d17f140
|
[] |
no_license
|
audhalbritter/Phenology
|
ae8b4ce130213a6e9da5872e9c5c8c28ed0b8ad7
|
d1c2cbd73610306363686198c385a1305bc2a09c
|
refs/heads/master
| 2020-12-07T19:20:38.625764
| 2018-09-16T12:26:36
| 2018-09-16T12:26:36
| 66,484,125
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,530
|
r
|
Phenology_GLMMER.R
|
####################################
# Bayesian Phenology Model
####################################
#install.packages(c("rjags", "coda", "INLA", "rgdal"), repos = c(getOption("repos"), "http://www.math.ntnu.no/inla/R/stable"))
# drop levels and convert factors in integer
pheno.dat <- pheno.long # rename data set
# Temperature
pheno.dat$destT_level <- as.character(pheno.dat$destT_level)
pheno.dat$destTemp <- factor(pheno.dat$destT_level, levels=c("1", "2"))
pheno.dat$destTempIndex <- as.integer(pheno.dat$destTemp)
# Precipitation
pheno.dat$destP_level <- as.character(pheno.dat$destP_level)
pheno.dat$destPrec <- factor(pheno.dat$destP_level, levels = c("2", "3", "4"))
pheno.dat$destPrecIndex <- as.integer(pheno.dat$destPrec)
# Species
pheno.dat$species <- as.character(pheno.dat$species)
sp <- unique(pheno.dat$species)
pheno.dat$species <- factor(pheno.dat$species, levels = sp)
pheno.dat$speciesIndex <- as.integer(pheno.dat$species)
pheno <- pheno.dat
# Transform variables
pheno <- pheno.long %>%
mutate_each(funs(as.numeric), From1To2Temp, From2To3Temp, From1To2Prec, From2To3Prec, From3To4Prec) %>% # need to be numeric
mutate_each(funs(as.character), destT_level, destP_level, species) %>% # make a character
mutate(destTemp = factor(destT_level, levels = c("1", "2"))) %>%
mutate(destPrec = factor(destP_level, levels = c("2", "3", "4"))) %>%
mutate(species = factor(species)) %>%
mutate(destTempIndex = as.integer(destTemp)) %>%
mutate(destPrecIndex = as.integer(destPrec)) %>%
mutate(speciesIndex = as.integer(species)) %>%
mutate_each(funs(as.character), Temperature_level, Precipitation_level, species) %>% # make a character
mutate(Temp = factor(Temperature_level, levels = c("1", "2"))) %>%
mutate(Prec = factor(Precipitation_level, levels = c("1", "2", "3", "4"))) %>%
mutate(TempIndex = as.integer(Temp)) %>%
mutate(PrecIndex = as.integer(Prec))
# Subset Data
pheno <- pheno %>% filter(pheno.stage =="f", pheno.var == "peak", pheno.unit == "doy")
#rm(list=ls())
library(rjags)
### First flowering ~ T * P + 1To2T + 2To3P + 3To4P + (1|species)
sink('GLMME_PhenologyDestination.txt')
cat("
model {
### EFFECT PRIORS
# Random effects
# Species random effects
invSpeciesEffectVar ~ dgamma(0.0001, 0.0001)
for(speciesIter in 1:numSP){
speciesEffect[speciesIter] ~ dnorm(0, invSpeciesEffectVar)
}
# Fixed Effects
# Intercept coefficient
intercept ~ dnorm(0,0.0001)
From1To2TempCoef ~ dnorm(0, 0.0001)
From2To3PrecCoef ~ dnorm(0, 0.0001)
From3To4PrecCoef ~ dnorm(0, 0.0001)
# Ordinal coefficients for temperature levels
tempCoeffs[1] <- 0
for(tempLevelIter in 2:numTemps){
tempCoeffs[tempLevelIter] ~ dnorm(0, 0.0001)
}
# Ordinal coefficients for precipitation levels
precCoeffs[1] <- 0
for(precLevelIter in 2:numPrecs){
precCoeffs[precLevelIter] ~ dnorm(0,0.0001)
}
### LIKELIHOOD
for(dataIter in 1:numSamples) {
log(mu[dataIter]) <- intercept + sum(tempCoeffs[1:destTemp[dataIter]]) + sum(precCoeffs[1:destPrec[dataIter]]) + From1To2Temp[dataIter]*From1To2TempCoef + From2To3Prec[dataIter]*From2To3PrecCoef + From3To4Prec[dataIter]*From3To4PrecCoef + speciesEffect[speciesIndex[dataIter]]
pheno.var[dataIter] ~ dpois(mu[dataIter])
}
}
", fill = TRUE)
sink()
# 2) Set up a list that contains all the necessary data (here, including parameters of the prior distribution)
Data = list(pheno.var = pheno$value, numSamples = length(pheno$value), numTemps = nlevels(pheno$destTemp), numPrecs = nlevels(pheno$destPrec), numSP = nlevels(pheno$species), destTemp = pheno$destTemp, destPrec = pheno$destPrec, From1To2Temp = pheno$From1To2Temp, From2To3Prec = pheno$From2To3Prec, From3To4Prec = pheno$From3To4Prec, speciesIndex = pheno$speciesIndex)
# 3) Specify a function to generate inital values for the parameters
#inits.fn <- function() list(intercept = rnorm(1), invSpeciesEffectVar = exp(rnorm(1)))
# Compile the model and run the MCMC for an adaptation (burn-in) phase
jagsModel <- jags.model(file= "GLMME_PhenologyDestination.txt", data=Data, n.chains = 3, n.adapt= 5000)
#init = inits.fn,
# Specify parameters for which posterior samples are saved
para.names <- c("intercept","From1To2TempCoef","From2To3PrecCoef", "From3To4PrecCoef")
# Continue the MCMC runs with sampling
Samples <- coda.samples(jagsModel, variable.names = para.names, n.iter = 5000)
plot(Samples)
# convergence check
gelman.diag(Samples, multivariate = FALSE)
gelman.plot(Samples)
# Statistical summaries of the posterior sample for p => compare to MLE
summary(Samples)
sink('GLMME_PhenologyOrigin.txt')
cat("
model {
### EFFECT PRIORS
# Random effects
# Species random effects
invSpeciesEffectVar ~ dgamma(0.0001, 0.0001)
for(speciesIter in 1:numSP){
speciesEffect[speciesIter] ~ dnorm(0, invSpeciesEffectVar)
}
# Fixed Effects
# Intercept coefficient
intercept ~ dnorm(0,0.0001)
From1To2TempCoef ~ dnorm(0, 0.0001)
From2To3PrecCoef ~ dnorm(0, 0.0001)
From3To4PrecCoef ~ dnorm(0, 0.0001)
# Ordinal coefficients for temperature levels
tempCoeffs[1] <- 0
for(tempLevelIter in 2:numTemps){
tempCoeffs[tempLevelIter] ~ dnorm(0, 0.0001)
}
# Ordinal coefficients for precipitation levels
precCoeffs[1] <- 0
for(precLevelIter in 2:numPrecs){
precCoeffs[precLevelIter] ~ dnorm(0,0.0001)
}
### LIKELIHOOD
for(dataIter in 1:numSamples) {
log(mu[dataIter]) <- intercept + sum(tempCoeffs[1:Temp[dataIter]]) + sum(precCoeffs[1:Prec[dataIter]]) + From1To2Temp[dataIter]*From1To2TempCoef + From2To3Prec[dataIter]*From2To3PrecCoef + From3To4Prec[dataIter]*From3To4PrecCoef + speciesEffect[speciesIndex[dataIter]]
pheno.var[dataIter] ~ dpois(mu[dataIter])
}
}
", fill = TRUE)
sink()
# 2) Set up a list that contains all the necessary data (here, including parameters of the prior distribution)
Data = list(pheno.var = pheno$value, numSamples = length(pheno$value), numTemps = nlevels(pheno$Temp), numPrecs = nlevels(pheno$Prec), numSP = nlevels(pheno$species), Temp = pheno$Temp, Prec = pheno$Prec, From1To2Temp = pheno$From1To2Temp, From2To3Prec = pheno$From2To3Prec, From3To4Prec = pheno$From3To4Prec, speciesIndex = pheno$speciesIndex)
# Compile the model and run the MCMC for an adaptation (burn-in) phase
jagsModel <- jags.model(file= "GLMME_PhenologyOrigin.txt", data=Data, n.chains = 3, n.adapt= 5000)
|
c10c97c936bcc008edc46c270df6fc1210fc700f
|
7946b84034a7dd7e3d5b4d69db88373d58185789
|
/man/ellipsoid_cluster_plot_3d.Rd
|
297eaa8d6aa11a122c9a4c3c8426ff00b711d180
|
[] |
no_license
|
luismurao/ntbox
|
3839a7a346b390850d9d1dc77cbd50cb32a52d00
|
220112e32c53ceef4f5f1bcdb7daed6b755604bf
|
refs/heads/master
| 2023-07-09T05:55:01.119706
| 2023-07-08T18:01:47
| 2023-07-08T18:01:47
| 79,830,037
| 7
| 9
| null | 2020-07-21T00:42:51
| 2017-01-23T17:39:41
|
R
|
UTF-8
|
R
| false
| true
| 2,218
|
rd
|
ellipsoid_cluster_plot_3d.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ellipsoid_cluster_plot_3d.R
\name{ellipsoid_cluster_plot_3d}
\alias{ellipsoid_cluster_plot_3d}
\title{Function to plot clusters in 3-dimensions.}
\usage{
ellipsoid_cluster_plot_3d(
niche_data,
cluster_ids,
x,
y,
z,
mve = FALSE,
ellips = TRUE,
level = 0.975,
alpha,
grupos,
vgrupo,
cex1 = 0.25
)
}
\arguments{
\item{niche_data}{A data.frame or matrix containing niche variables}
\item{cluster_ids}{Cluster ids}
\item{x}{variable on x-axes}
\item{y}{variable on y-axes}
\item{z}{variable on z-axes}
\item{mve}{A logical value. If TRUE a minimum volume ellipsoid will be computed using
the function \code{\link[MASS]{cov.rob}} of the \pkg{MASS} package. If False the covariance matrix of the input data will be used.}
\item{ellips}{Ellipsoid plot of each cluster}
\item{level}{proportion of points inside the ellipsoid.}
\item{alpha}{Transparency level of ellipsoid plots}
\item{grupos}{Logical. Show ids of a geographic grouping variable}
\item{vgrupo}{A vector containing the levels of the geographic grouping variable}
\item{cex1}{Point size}
}
\value{
Returns a list of the ellipsoid metadata for each cluster.
}
\description{
Plot cluster data in 3D by modeling them as an Ellipsoid.
}
\details{
The output of this function is the same of the \code{\link[ntbox]{cov_center}} function.
}
\examples{
\dontrun{
cluster_data <- read.csv(system.file("extdata",
"nichekmeansCluster.csv",
package = "ntbox"))
ellipsoid_clusters <- ellipsoid_cluster_plot_3d(niche_data =environ_data[,c("bio5","bio6","bio12")],
cluster_ids = cluster_data$cluster,
x = "bio5",y="bio6",z="bio12",mve = T,
ellips = T,alpha = 0.25,
grupos = T,vgrupo =cluster_data$cluster,
cex1 = 1,level = 0.975)
# Print metadata for the Minimum Volume Ellipsoid that belongs to cluster 1
print(ellipsoid_clusters$cluster_n_1)
}
}
|
92b6fbc5e779c2038801ec455953d45c49fea729
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Rnightlights/examples/getPolyFnameRDS.Rd.R
|
08a26f38196490c5293a1eb97d311a7457c7a735
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 260
|
r
|
getPolyFnameRDS.Rd.R
|
library(Rnightlights)
### Name: getPolyFnameRDS
### Title: Get the filename of the polygon zip file as downloaded from
### <URL: http://GADM.ORG>
### Aliases: getPolyFnameRDS
### ** Examples
Rnightlights:::getPolyFnameZip("KEN")
#returns "path/to/"
|
31c3cb2be39e8e7e458469e5505a96a677d111b0
|
ecb91b4e873fc01abda8b16a314e8f05a6aae2d7
|
/man/geom_bgimage.Rd
|
b6a39faa6c78fd52cc32f36d64a6cd5ff88617c2
|
[] |
no_license
|
cran/ggimage
|
0d1f6ac6aeb2b010d92c629a9b4b096e46bbf7f1
|
a2f90b69b18263d7593fd1fb1dda36c67ed8b259
|
refs/heads/master
| 2023-06-22T23:12:01.059798
| 2023-06-19T04:10:02
| 2023-06-19T12:47:37
| 82,685,702
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 342
|
rd
|
geom_bgimage.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_bgimage.R
\name{geom_bgimage}
\alias{geom_bgimage}
\title{geom_bgimage}
\usage{
geom_bgimage(image)
}
\arguments{
\item{image}{image file}
}
\value{
ggplot
}
\description{
add image as background to plot panel.
}
\author{
Guangchuang Yu
}
|
4b6e7f0a07f108625a6aa20eb2f16c78a1bee16e
|
6778d5ab56c3215d5b840cea14fb13a61042adf6
|
/stemr/man/stem_inference_ode.Rd
|
34d6ad767d45d04c4290e973fd15aa2a62f942a7
|
[] |
no_license
|
popoloni/stemr
|
c6b54d1be6c2a30102153c4570ec6d73b1976c3e
|
4dc0de5c4083d786cb9d80125f2b4f953715cc1a
|
refs/heads/master
| 2021-04-16T21:20:14.119358
| 2020-03-09T23:50:38
| 2020-03-09T23:50:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,271
|
rd
|
stem_inference_ode.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stem_inference_ode.R
\name{stem_inference_ode}
\alias{stem_inference_ode}
\title{Approximate Bayesian inference for a stochastic epidemic model via
deterministic trajectory matching.}
\usage{
stem_inference_ode(
stem_object,
iterations,
priors,
mcmc_kernel,
t0_kernel,
thin_params,
thin_latent_proc,
initialization_attempts = 500,
ess_args = NULL,
print_progress = 0,
status_filename = "ODE",
messages
)
}
\arguments{
\item{stem_object}{stochastic epidemic model object with model dynamics, the
measurement process, and a dataset.}
\item{iterations}{number of MCMC iterations}
\item{priors}{a list of named functions for computing the prior density as
well as transforming parameters to and from their estimation scales. The
functions should have the following names: "prior_density",
"to_estimation_scale", "from_estimation_scale". The prior_density function
must take two vectors as arguments, the model parameters (excluding initial
compartment volumes and t0) on their natural scales, and the model
parameters on their estimation scales. The functions for converting between
parameter scales should take vector of parameters as an argument, returning
a transformed vector (the function call has the form:
\code{transformed_vector <- conversion_function(original_vector)}).}
\item{mcmc_kernel}{list containing the mcmc_kernel method, proposal
covariance matrix, and an external pointer for the compiled mcmc_kernel
function}
\item{t0_kernel}{output of \code{t0_kernel}, specifying the RWMH transition
mcmc_kernel for t0 and the truncated normal distribution prior.}
\item{thin_params}{thinning interval for posterior parameter samples,
defaults to 1}
\item{thin_latent_proc}{thinning interval for latent paths, defaults to
ceiling(iterations/100)}
\item{initialization_attempts}{}
\item{ess_args}{}
\item{print_progress}{prints progress every n iterations, defaults to 0 for
no printing}
\item{messages}{should status messages be generated in an external text file?}
}
\value{
list with parameter posterior samples and MCMC diagnostics
}
\description{
Approximate Bayesian inference for a stochastic epidemic model via
deterministic trajectory matching.
}
|
39a0311729f19f9d3b865725508020ebdf281505
|
e405d48e8413d2d182aa662be327e9995105acfd
|
/tests/testthat.R
|
91f7561b3d56fbcd292623d18a270de1a729314b
|
[
"MIT"
] |
permissive
|
joebrew/tango
|
53615bf0098c0ab3a5ac24637814ce90b8c4d22b
|
5ae9c0115d1d9887b2a43462c6217d2d1c2a47a2
|
refs/heads/master
| 2021-05-18T17:54:21.877607
| 2020-04-07T08:41:36
| 2020-04-07T08:41:36
| 251,347,057
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54
|
r
|
testthat.R
|
library(testthat)
library(tango)
test_check("tango")
|
e6cbf8d6ef1dad71a3ae16ce562e05a088a067df
|
5e832862b2e36be6ba27e874e98499bc399de699
|
/inst/NEWS.Rd
|
193712a16f33d8c6dad2c26bbfe25d497cd364cc
|
[] |
no_license
|
dmgatti/DOQTL
|
c5c22306053ddbd03295207702827cf2a715bb70
|
a1a4d170bf5923ca45689a83822febdb46ede215
|
refs/heads/master
| 2021-01-17T02:08:27.831277
| 2019-05-24T19:22:35
| 2019-05-24T19:22:35
| 13,506,518
| 15
| 12
| null | 2019-02-27T13:46:31
| 2013-10-11T18:33:24
|
R
|
UTF-8
|
R
| false
| false
| 3,717
|
rd
|
NEWS.Rd
|
% -*- coding: utf-8 -*-
\name{NEWS}
\title{News for Package \pkg{DOQTL}}
\encoding{UTF-8}
\section{CHANGES IN VERSION 1.5.3}{
\subsection{CHANGES}{
\itemize{
\item Added get.pgw() to allow users to get genome-wide p-values for LOD or -log10(p-values).
}
}
}
\section{CHANGES IN VERSION 1.5.0}{
\subsection{CHANGES}{
\itemize{
\item Major changes to the signficance thresholds. We now produce
separate autosomal and X chromosome thresholds. This means that the
return value of scanone.perm() is a 2 column matrix (or array).
The columns contain the autosomal and X Chr maximum statistics, respoectively.
There is a new function called get.sig.thr() that implements the
method of Broman et.al., Genetics, 2006 for calculating X chromosome
thresholds.
}
}
}
\section{CHANGES IN VERSION 1.1.8}{
\subsection{CHANGES}{
\itemize{
\item Fixed bug in X chromosome mapping. We now requrire sex as an additive covariate always.
}
}
}
\section{CHANGES IN VERSION 1.1.6}{
\subsection{CHANGES}{
\itemize{
\item Moved MUGAExampleData and doMPI to "suggests" field in the DESCRIPTION file.
\item Changed haplotype reconstruction to work in Euclidean (X,Y) space.
\item Replaced mclust() with pamk() in initial genotype clustering at each marker.
Pamk is faster and the EM algorithm seems to produce equivalent haplotype
reconstructions.
}
}
}
\section{CHANGES IN VERSION 1.1.3}{
\subsection{NEW FEATURES}{
\itemize{
\item Added support for HS rat haplotype reconstruction.
\item Added support parallel whole genome association mapping.
}
}
}
\section{CHANGES IN VERSION 1.07}{
\subsection{NEW FEATURES}{
\itemize{
\item Added \code{scanone.assoc} to perform genome wide association mapping.
}
}
\subsection{CHANGES}{
\itemize{
\item Fixed bug in \code{assoc.map} where the SDPs were not in the correct order.
}
}
}
\section{CHANGES IN VERSION 1.07}{
\subsection{NEW FEATURES}{
\itemize{
\item Added \code{calc.genoprob2} to run forward/backward algorithm once cluster parameters have been estimated.
}
}
\subsection{CHANGES}{
\itemize{
\item Change \code{assoc.map} to use the Sanger VCF file rather than our own custom SNP file.
}
}
}
\section{CHANGES IN VERSION 1.06}{
\subsection{NEW FEATURES}{
\itemize{
\item Added limited support for association mapping in Heterogeneous Stock mice.
}
}
\subsection{CHANGES}{
\itemize{
\item \code{calc.genoprob.intensity} Fixed bug in which probs were not written out on the X chromosome when mapping with males only.
}
}
}
\section{CHANGES IN VERSION 0.99.1}{
\subsection{NEW FEATURES}{
\itemize{
\item Added limited support for Heterogeneous Stock mice.
\item \code{assoc.plot} Added strain distribution patterns above the association mapping plot.
}
}
\subsection{CHANGES}{
\itemize{
\item \code{rankZ} Fixed bug relating to NA values.
}
}
}
\section{CHANGES IN VERSION 0.99.0}{
\subsection{NEW FEATURES}{
\itemize{
\item \code{read.vcf} reads Sanger SNP VCF files.
\item \code{assoc.map} imputes Sanger SNPs onto DO genomes and performs association mapping.
\item Fixed bug in \code{kinship.probs} in which kinship per chromosome was not calculated correctly.
\item Improved gene layout algorithm in \code{gene.plot}.
}
}
\subsection{CHANGES}{
\itemize{
\item \code{scanone} returns p-values and -log10(p-values).
\item \code{doqtl.plot} plots either LOD or -log10(p-values).
}
}
}
|
9168970eed022a07e7eb783a9b6a3abaab55994d
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/ClustVarLV/R/crit_init.R
|
45b7b7e1faba60dd6701479e9e02096a5e50145a
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,424
|
r
|
crit_init.R
|
crit_init <-
function(method,X,EXTr,Xr,EXTu,Xu)
### initial clustering criterion (each variable = one group) / hierarchy
{
n<-nrow(X)
p<-ncol(X)
# verification if there are NA values
valmq=FALSE
if (sum(is.na(X))>0) valmq=TRUE
# method 1
if (method==1) {
if ((EXTu==0)&(EXTr==0)){ crit<- apply(X,2,var,na.rm=TRUE) }
if ((EXTu==0)&(EXTr==1)){
XrX<- t(Xr)%*%X
crit<- apply(XrX^2/(n-1), 2, sum,na.rm=TRUE)
}
if ((EXTu==1)&(EXTr==0)){
crit=c()
for (i in 1:p) {
critk<-var(X[,i],na.rm=TRUE)
crit=c(crit,critk)
}
}
}
# method 2
if (method==2) {
if ((EXTu==0)&(EXTr==0)){
#crit<-apply(X,2,var,na.rm=TRUE) # version RSA ck=xbark
crit<-apply(X,2,sd,na.rm=TRUE) # version CommStat ck normalized
}
if ((EXTu==0)&(EXTr==1)){
if (valmq) stop("The matrix X contains missing values. Use a X matrix without missing value for CLV with external data")
px<-sqrt (diag(tcrossprod(t(X)%*%Xr)))
crit<- px/(n-1)
}
if ((EXTu==1)&(EXTr==0)){
if (valmq) stop("The matrix X contains missing values. Use a X matrix without missing value for CLV with external data")
crit=c()
for (i in 1:p) {
critk<- sqrt(crossprod(X[,i])/(n-1))
crit=c(crit,critk)
}
}
}
return(crit)
}
|
47559dd6b3908a6e4427a59ae13aa09c64f5d12a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/BinaryEPPM/examples/foodstamp.grouped.Rd.R
|
584f2a78ae7515495bcfeae72c4395fea536d80c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 267
|
r
|
foodstamp.grouped.Rd.R
|
library(BinaryEPPM)
### Name: foodstamp.grouped
### Title: Participation in the federal food stamp program as a list not a
### data frame.
### Aliases: foodstamp.grouped
### Keywords: datasets
### ** Examples
data(foodstamp.grouped)
print(foodstamp.grouped)
|
c2a81dfe33e33c4b2430054134c282483c1c970e
|
89c14aa2c3d9341081b9ff6b560d93e97796ead6
|
/R/NetSim.R
|
cb65416b75d4c4d8494e6cd5dcf9a08763a4154d
|
[] |
no_license
|
AkandaAshraf/netsim
|
338af794b507062fde99ef297a750070bbf88a3e
|
8ea1310f9e186912a157dee3c9b83fa6bf436b2d
|
refs/heads/master
| 2021-01-19T02:26:37.633643
| 2017-02-09T11:23:58
| 2017-02-09T11:23:58
| 78,456,274
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 50,860
|
r
|
NetSim.R
|
#' This fucntion allowes you to generate three different types of networks with same number of vertices and edges
#' @param ScaleFreePowerRange A vector containing powers for scale free networks, e.g. c(1.5,1.75)
#' @param SmallWorldProbability A vector containing probabilities for small world networks, e.g. c(0.5,0.6)
#' @param VerticesVector A vector containing number of vertices to be used to generate all three network types(please note that number of edges will be calculated to match number of edges and verices for all three network types, see documentation for further information), e.g. c(10,100)
#' @param SampleSize Integer number defining how many sample will be generated for each network types
#' @param edgesEachScaleFreeInteger number defining how many edges to be added in each step of scale free networks
#' @param savingDir A string with the directory where to save graph objects and plots, the directory folder need to be created beforehand
#' @param plotGraph if "y" then plots will also be created, only line plots to generate point plots use
#' @import igraph
#' @import ggplot2
#' @export
gen_graphs<- function(ScaleFreePowerRange,SmallWorldProbability,VerticesVector,SampleSize,edgesEachScaleFree,savingDir,plotGraph="y",GenDegreeDist)
{
#require('igraph')
# Start the clock!
ptm <- proc.time()
print("generating Scalefree networks(preferential attachment, 2 edges each step) and calculating different properties")
# EdgesVector = (VerticesVector * 2)-3
EdgesVector = ((VerticesVector*edgesEachScaleFree)-((edgesEachScaleFree*(edgesEachScaleFree+1))/2))
NeiSmallWorld = edgesEachScaleFree
numberOFDelforSmallWorld = (VerticesVector*(edgesEachScaleFree-NeiSmallWorld))+((edgesEachScaleFree*(edgesEachScaleFree+1))/2)
print(numberOFDelforSmallWorld)
##numberOFDelforSmallWorld = ((edgesEachScaleFree*(edgesEachScaleFree+1))/2)
##print(numberOFDelforSmallWorld)
####Generate Networks
for(power in ScaleFreePowerRange)
{
ScaleFreeGraphsEdges = lapply(1:SampleSize,function(i) BAGraphScalefreePropertiesEdges(VectorVertices = VerticesVector,p1=power,p2=power,PstepSize=0.25,EdgesEachStep = edgesEachScaleFree))
print(paste("Serializing and saving Network objects Object:Scalefree",power,sep = ""))
saveRDS(ScaleFreeGraphsEdges, file = paste(savingDir,"Scalefree",power,sep = ""), ascii = FALSE, version = NULL,
compress = TRUE, refhook = NULL)
}
print("generating and calculating different properties of random graph networks(gnm) with same number of edges as in Scalefree ")
RandomGraphsEdges = lapply(1:SampleSize,function(i) ERGraphRandomPropertiesGNM2(VectorVertices = VerticesVector,VectorEdges = EdgesVector,PstepSize=1))
print(paste("Serializing and saving Network objects Object: RandomGraphsEdges"))
saveRDS(RandomGraphsEdges, file =paste(savingDir,"randomGraphEdgeComparisn",sep=""), ascii = FALSE, version = NULL,
compress = TRUE, refhook = NULL)
print("generating small World Networks and calculating different properties lattice Dimention-1, Nei-2, also deleting randomly selected edges to match with other two")
for(Probability in SmallWorldProbability)
{
SmallWorldEdges = lapply(1:SampleSize,function(i) SmallWorldGraphPropertiesEdges(SizeVector = VerticesVector,latticeNei = NeiSmallWorld,latticeDim = 1,numberOfEdgesDelRandomly =numberOFDelforSmallWorld,p1=Probability,p2=Probability,PstepSize =1 ))
print(paste("Serializing and saving Network objects Object:SmallWorldEdges",Probability,sep = ""))
saveRDS(SmallWorldEdges, file = paste(savingDir,"SmallWorld",Probability,sep = ""), ascii = FALSE, version = NULL,
compress = TRUE, refhook = NULL)
}
#############
if(plotGraph=="Y")
{
# require('ggplot2')
########Plotting
DfGlobalEdgeVertices<-NULL
DfEdgeGlobalClusteringCoefficent<-NULL
DfGlobalEdgeCentralityBetweenessMean<-NULL
DfGlobalEdgeCentralityClosenessMean<-NULL
DfGlobalEdgeCentralityDegreeMean<-NULL
DfGlobalEdgeAvgGeodesicPath<-NULL
print("loading serialized object-randomGraphEdgeComparisn from hdd")
randomGraphEdgeComparisn = readRDS(paste(savingDir,"randomGraphEdgeComparisn",sep =""), refhook = NULL)
#testing
if(GenDegreeDist)
degreeDistributionPlot(givenObjects=randomGraphEdgeComparisn,SavingDir=savingDir,graphtype="random")
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(randomGraphEdgeComparisn,x="NumberOfEdges",y="GlobalClusteringCoefficent",xlabel = "Number of Edges",ylabel = "Global Clustering Coefficent")
dfRandomTemp = cbind(dfRandomTemp,networkType="randomGraph")
DfEdgeGlobalClusteringCoefficent<-rbind(DfEdgeGlobalClusteringCoefficent,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(randomGraphEdgeComparisn,x="NumberOfEdges",y="Vertices",xlabel = "Number of Edges",ylabel = "Vertices")
dfRandomTemp = cbind(dfRandomTemp,networkType="randomGraph")
DfGlobalEdgeVertices<-rbind(DfGlobalEdgeVertices,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(randomGraphEdgeComparisn,x="NumberOfEdges",y="CentralityBetweenessMean",xlabel = "Number of Edges",ylabel = "Mean Betweeness Centrality")
dfRandomTemp = cbind(dfRandomTemp,networkType="randomGraph")
DfGlobalEdgeCentralityBetweenessMean<-rbind(DfGlobalEdgeCentralityBetweenessMean,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(randomGraphEdgeComparisn,x="NumberOfEdges",y="CentralityClosenessMean",xlabel = "Number of Edges",ylabel = "Mean Closeness Centrality")
dfRandomTemp = cbind(dfRandomTemp,networkType="randomGraph")
DfGlobalEdgeCentralityClosenessMean<-rbind(DfGlobalEdgeCentralityClosenessMean,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(randomGraphEdgeComparisn,x="NumberOfEdges",y="CentralityDegreeMean",xlabel = "Number of Edges",ylabel = "Mean Degree Centrality")
dfRandomTemp = cbind(dfRandomTemp,networkType="randomGraph")
DfGlobalEdgeCentralityDegreeMean<-rbind(DfGlobalEdgeCentralityDegreeMean,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(randomGraphEdgeComparisn,x="NumberOfEdges",y="AvgGeodesicPath",xlabel = "Number of Edges",ylabel = "Avg Geodesic Path")
dfRandomTemp = cbind(dfRandomTemp,networkType="randomGraph")
DfGlobalEdgeAvgGeodesicPath<-rbind(DfGlobalEdgeAvgGeodesicPath,dfRandomTemp)
dfRandomTemp=NULL
for (powersf in ScaleFreePowerRange) {
for (probabilitysw in SmallWorldProbability) {
NameScaleFree <- paste("Scalefree",powersf,sep ="")
NameSmallWorld <- paste("SmallWorld",probabilitysw,sep ="")
NameFileName <- paste("sc",powersf,"sw",probabilitysw,sep ="")
print(paste("loading serialized object from hdd:",NameScaleFree))
Scalefree = readRDS(paste(savingDir,NameScaleFree,sep =""), refhook = NULL)
if(GenDegreeDist)
degreeDistributionPlot(givenObjects=Scalefree,SavingDir=savingDir,graphtype=NameScaleFree)
NameScaleFree <- paste("SF (alpha=",powersf,")",sep ="")
print(paste("loading serialized object from hdd:",NameSmallWorld))
SmallWorldEdges = readRDS(paste(savingDir,NameSmallWorld,sep =""), refhook = NULL)
if(GenDegreeDist)
degreeDistributionPlot(givenObjects=SmallWorldEdges,SavingDir=savingDir,graphtype=NameSmallWorld)
NameSmallWorld <- paste("SW (P=",probabilitysw,")",sep ="")
dfScaleFreeMultiPlot = Plot2dListOfRandomGraphPropertiesMean(Scalefree,x="NumberofEdges",y="GlobalClusteringCoefficent",xlabel = "Number of Edges",ylabel = "Global Clustering Coefficent")
dfScaleFreeMultiPlot = cbind(dfScaleFreeMultiPlot,networkType=NameScaleFree)
DfEdgeGlobalClusteringCoefficent<-rbind(DfEdgeGlobalClusteringCoefficent,dfScaleFreeMultiPlot)
dfSmallWorldMultiPlot = Plot2dListOfRandomGraphPropertiesMean(SmallWorldEdges,x="NumberOfEdges",y="GlobalClusteringCoefficent",xlabel = "Number of Edges",ylabel = "Global Clustering Coefficent")
dfSmallWorldMultiPlot = cbind(dfSmallWorldMultiPlot,networkType=NameSmallWorld)
DfEdgeGlobalClusteringCoefficent<-rbind(DfEdgeGlobalClusteringCoefficent,dfSmallWorldMultiPlot)
dfScaleFreeMultiPlot = Plot2dListOfRandomGraphPropertiesMean(Scalefree,x="NumberofEdges",y="Vertices",xlabel = "Number of Edges",ylabel = "Vertices")
dfScaleFreeMultiPlot = cbind(dfScaleFreeMultiPlot,networkType=NameScaleFree)
DfGlobalEdgeVertices<-rbind(DfGlobalEdgeVertices,dfScaleFreeMultiPlot)
dfSmallWorldMultiPlot = Plot2dListOfRandomGraphPropertiesMean(SmallWorldEdges,x="NumberOfEdges",y="Vertices",xlabel = "Number of Edges",ylabel = "Vertices")
dfSmallWorldMultiPlot = cbind(dfSmallWorldMultiPlot,networkType=NameSmallWorld)
DfGlobalEdgeVertices<-rbind(DfGlobalEdgeVertices,dfSmallWorldMultiPlot)
#print(paste("generating and saving plots for:",NameFileName))
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(Scalefree,x="NumberofEdges",y="CentralityBetweenessMean",xlabel = "Number of Edges",ylabel = "Mean Betweeness Centrality")
dfRandomTemp = cbind(dfRandomTemp,networkType=NameScaleFree)
DfGlobalEdgeCentralityBetweenessMean<-rbind(DfGlobalEdgeCentralityBetweenessMean,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(Scalefree,x="NumberofEdges",y="CentralityClosenessMean",xlabel = "Number of Edges",ylabel = "Mean Closeness Centrality")
dfRandomTemp = cbind(dfRandomTemp,networkType=NameScaleFree)
DfGlobalEdgeCentralityClosenessMean<-rbind(DfGlobalEdgeCentralityClosenessMean,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(Scalefree,x="NumberofEdges",y="CentralityDegreeMean",xlabel = "Number of Edges",ylabel = "Mean Degree Centrality")
dfRandomTemp = cbind(dfRandomTemp,networkType=NameScaleFree)
DfGlobalEdgeCentralityDegreeMean<-rbind(DfGlobalEdgeCentralityDegreeMean,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(Scalefree,x="NumberofEdges",y="AvgGeodesicPath",xlabel = "Number of Edges",ylabel = "Avg Geodesic Path")
dfRandomTemp = cbind(dfRandomTemp,networkType=NameScaleFree)
DfGlobalEdgeAvgGeodesicPath<-rbind(DfGlobalEdgeAvgGeodesicPath,dfRandomTemp)
dfRandomTemp=NULL
#small world
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(SmallWorldEdges,x="NumberOfEdges",y="CentralityBetweenessMean",xlabel = "Number of Edges",ylabel = "Mean Betweeness Centrality")
dfRandomTemp = cbind(dfRandomTemp,networkType=NameSmallWorld)
DfGlobalEdgeCentralityBetweenessMean<-rbind(DfGlobalEdgeCentralityBetweenessMean,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(SmallWorldEdges,x="NumberOfEdges",y="CentralityClosenessMean",xlabel = "Number of Edges",ylabel = "Mean Closeness Centrality")
dfRandomTemp = cbind(dfRandomTemp,networkType=NameSmallWorld)
DfGlobalEdgeCentralityClosenessMean<-rbind(DfGlobalEdgeCentralityClosenessMean,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(SmallWorldEdges,x="NumberOfEdges",y="CentralityDegreeMean",xlabel = "Number of Edges",ylabel = "Mean Degree Centrality")
dfRandomTemp = cbind(dfRandomTemp,networkType=NameSmallWorld)
DfGlobalEdgeCentralityDegreeMean<-rbind(DfGlobalEdgeCentralityDegreeMean,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(SmallWorldEdges,x="NumberOfEdges",y="AvgGeodesicPath",xlabel = "Number of Edges",ylabel = "Avg Geodesic Path")
dfRandomTemp = cbind(dfRandomTemp,networkType=NameSmallWorld)
DfGlobalEdgeAvgGeodesicPath<-rbind(DfGlobalEdgeAvgGeodesicPath,dfRandomTemp)
dfRandomTemp=NULL
}
}
p1 <- ggplot(DfEdgeGlobalClusteringCoefficent, aes(`Number of Edges`,`Global Clustering Coefficent`, group = networkType,
colour = networkType)) + geom_line(size = 1)
#p1 + geom_text(data = DfEdgeGlobalClusteringCoefficent, aes(label = networkType), hjust = 0.7, vjust = 1)
ggsave(paste(savingDir,"EdgeGlobalClusteringCoef.pdf",sep=""), width = 20, height = 20, units = "cm")
p1 <- ggplot(DfGlobalEdgeVertices, aes(`Number of Edges`,Vertices, group = networkType,
colour = networkType)) + geom_line(size = 1)
#p1 + geom_text(data = DfGlobalEdgeVertices, aes(label = networkType), hjust = 0.7, vjust = 1)
ggsave(paste(savingDir,"EdgeVertices.pdf"), width = 20, height = 20, units = "cm")
p1 <- ggplot(DfGlobalEdgeCentralityDegreeMean, aes(`Number of Edges`,`Mean Degree Centrality`, group = networkType,
colour = networkType)) + geom_line(size = 1)
#p1 + geom_text(data = DfGlobalEdgeCentralityDegreesMean, aes(label = networkType), hjust = 0.7, vjust = 1)
ggsave(paste(savingDir,"EdgeMeanDegreeCentrality.pdf",sep=""), width = 20, height = 20, units = "cm")
p1 <- ggplot(DfGlobalEdgeCentralityBetweenessMean, aes(`Number of Edges`,`Mean Betweeness Centrality`, group = networkType,
colour = networkType)) + geom_line(size = 1)
#p1 + geom_text(data = DfGlobalEdgeCentralityBetweenessMean, aes(label = networkType), hjust = 0.7, vjust = 1)
ggsave(paste(savingDir,"EdgeMeanBetweenessCentrality.pdf",sep=""), width = 20, height = 20, units = "cm")
p1 <- ggplot(DfGlobalEdgeCentralityClosenessMean, aes(`Number of Edges`,`Mean Closeness Centrality`, group = networkType,
colour = networkType)) + geom_line(size = 1)
#p1 + geom_text(data = DfGlobalEdgeClosenessMean, aes(label = networkType), hjust = 0.7, vjust = 1)
ggsave(paste(savingDir,"EdgeMeanClosnessCentrality.pdf",sep=""), width = 20, height = 20, units = "cm")
p1 <- ggplot(DfGlobalEdgeAvgGeodesicPath, aes(`Number of Edges`,`Avg Geodesic Path`, group = networkType,
colour = networkType)) + geom_line(size = 1)
#p1 + geom_text(data = DfGlobalEdgeAvgGeodesicPath, aes(label = networkType), hjust = 0.7, vjust = 1)
ggsave(paste(savingDir,"EdgeAvgGeodesicPath.pdf",sep=""), width = 20, height = 20, units = "cm")
}
print("Completed!( Number of Edges = Number of Vertices*2-3, this is to match the number of vertices in all three networks) ")
# Stop the clock
print("user time:execution of the code, system time:CPU ,elapsed time: total")
proc.time() - ptm
}
#'
#'
#' This function allowes you to generate three different types of networks with same number of vertices and edges
#' @param ScaleFreePowerRange A vector containing powers for scale free networks, e.g. c(1.5,1.75)
#' @param SmallWorldProbability A vector containing probabilities for small world networks, e.g. c(0.5,0.6)
#' @param plot type of plot(string input "point" or "line")
#' @param savingDir A string with the directory where to save graph plots. This function requires the network proporties to be generated and saved in that folder by using gen_graps function
#' @import igraph
#' @import ggplot2
#' @export
gen_plots<- function(ScaleFreePowerRange,SmallWorldProbability,plot=C("point","line"),savingDir,GenDegreeDist)
{
# require('igraph')
# require('ggplot2')
# Start the clock!
ptm <- proc.time()
print("generating Scalefree networks(preferential attachment, 2 edges each step) and calculating different properties")
# EdgesVector = (VerticesVector * 2)-3
#EdgesVector = ((VerticesVector*edgesEachScaleFree)-((edgesEachScaleFree*(edgesEachScaleFree+1))/2))
# NeiSmallWorld = edgesEachScaleFree
# numberOFDelforSmallWorld = (VerticesVector*(edgesEachScaleFree-NeiSmallWorld))+((edgesEachScaleFree*(edgesEachScaleFree+1))/2)
# print(numberOFDelforSmallWorld)
##numberOFDelforSmallWorld = ((edgesEachScaleFree*(edgesEachScaleFree+1))/2)
#############
########Plotting
DfGlobalEdgeVertices<-NULL
DfEdgeGlobalClusteringCoefficent<-NULL
DfGlobalEdgeCentralityBetweenessMean<-NULL
DfGlobalEdgeCentralityClosenessMean<-NULL
DfGlobalEdgeCentralityDegreeMean<-NULL
DfGlobalEdgeAvgGeodesicPath<-NULL
print("loading serialized object-randomGraphEdgeComparisn from hdd")
randomGraphEdgeComparisn = readRDS(paste(savingDir,"randomGraphEdgeComparisn",sep =""), refhook = NULL)
if(GenDegreeDist)
degreeDistributionPlot(givenObjects=randomGraphEdgeComparisn,SavingDir=savingDir,graphtype="random")
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(randomGraphEdgeComparisn,x="NumberOfEdges",y="GlobalClusteringCoefficent",xlabel = "Number of Edges",ylabel = "Global Clustering Coefficent")
dfRandomTemp = cbind(dfRandomTemp,networkType="randomGraph")
DfEdgeGlobalClusteringCoefficent<-rbind(DfEdgeGlobalClusteringCoefficent,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(randomGraphEdgeComparisn,x="NumberOfEdges",y="Vertices",xlabel = "Number of Edges",ylabel = "Vertices")
dfRandomTemp = cbind(dfRandomTemp,networkType="randomGraph")
DfGlobalEdgeVertices<-rbind(DfGlobalEdgeVertices,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(randomGraphEdgeComparisn,x="NumberOfEdges",y="CentralityBetweenessMean",xlabel = "Number of Edges",ylabel = "Mean Betweeness Centrality")
dfRandomTemp = cbind(dfRandomTemp,networkType="randomGraph")
DfGlobalEdgeCentralityBetweenessMean<-rbind(DfGlobalEdgeCentralityBetweenessMean,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(randomGraphEdgeComparisn,x="NumberOfEdges",y="CentralityClosenessMean",xlabel = "Number of Edges",ylabel = "Mean Closeness Centrality")
dfRandomTemp = cbind(dfRandomTemp,networkType="randomGraph")
DfGlobalEdgeCentralityClosenessMean<-rbind(DfGlobalEdgeCentralityClosenessMean,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(randomGraphEdgeComparisn,x="NumberOfEdges",y="CentralityDegreeMean",xlabel = "Number of Edges",ylabel = "Mean Degree Centrality")
dfRandomTemp = cbind(dfRandomTemp,networkType="randomGraph")
DfGlobalEdgeCentralityDegreeMean<-rbind(DfGlobalEdgeCentralityDegreeMean,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(randomGraphEdgeComparisn,x="NumberOfEdges",y="AvgGeodesicPath",xlabel = "Number of Edges",ylabel = "Avg Geodesic Path")
dfRandomTemp = cbind(dfRandomTemp,networkType="randomGraph")
DfGlobalEdgeAvgGeodesicPath<-rbind(DfGlobalEdgeAvgGeodesicPath,dfRandomTemp)
dfRandomTemp=NULL
for (powersf in ScaleFreePowerRange) {
for (probabilitysw in SmallWorldProbability) {
NameScaleFree <- paste("Scalefree",powersf,sep ="")
NameSmallWorld <- paste("SmallWorld",probabilitysw,sep ="")
NameFileName <- paste("sc",powersf,"sw",probabilitysw,sep ="")
print(paste("loading serialized object from hdd:",NameScaleFree))
Scalefree = readRDS(paste(savingDir,NameScaleFree,sep =""), refhook = NULL)
#Testing
if(GenDegreeDist)
degreeDistributionPlot(givenObjects=Scalefree,SavingDir=savingDir,graphtype=NameScaleFree)
NameScaleFree <- paste("SF (alpha=",powersf,")",sep ="")
print(paste("loading serialized object from hdd:",NameSmallWorld))
SmallWorldEdges = readRDS(paste(savingDir,NameSmallWorld,sep =""), refhook = NULL)
if(GenDegreeDist)
degreeDistributionPlot(givenObjects=SmallWorldEdges,SavingDir=savingDir,graphtype=NameSmallWorld)
NameSmallWorld <- paste("SW (P=",probabilitysw,")",sep ="")
dfScaleFreeMultiPlot = Plot2dListOfRandomGraphPropertiesMean(Scalefree,x="NumberofEdges",y="GlobalClusteringCoefficent",xlabel = "Number of Edges",ylabel = "Global Clustering Coefficent")
dfScaleFreeMultiPlot = cbind(dfScaleFreeMultiPlot,networkType=NameScaleFree)
DfEdgeGlobalClusteringCoefficent<-rbind(DfEdgeGlobalClusteringCoefficent,dfScaleFreeMultiPlot)
dfSmallWorldMultiPlot = Plot2dListOfRandomGraphPropertiesMean(SmallWorldEdges,x="NumberOfEdges",y="GlobalClusteringCoefficent",xlabel = "Number of Edges",ylabel = "Global Clustering Coefficent")
dfSmallWorldMultiPlot = cbind(dfSmallWorldMultiPlot,networkType=NameSmallWorld)
DfEdgeGlobalClusteringCoefficent<-rbind(DfEdgeGlobalClusteringCoefficent,dfSmallWorldMultiPlot)
dfScaleFreeMultiPlot = Plot2dListOfRandomGraphPropertiesMean(Scalefree,x="NumberofEdges",y="Vertices",xlabel = "Number of Edges",ylabel = "Vertices")
dfScaleFreeMultiPlot = cbind(dfScaleFreeMultiPlot,networkType=NameScaleFree)
DfGlobalEdgeVertices<-rbind(DfGlobalEdgeVertices,dfScaleFreeMultiPlot)
dfSmallWorldMultiPlot = Plot2dListOfRandomGraphPropertiesMean(SmallWorldEdges,x="NumberOfEdges",y="Vertices",xlabel = "Number of Edges",ylabel = "Vertices")
dfSmallWorldMultiPlot = cbind(dfSmallWorldMultiPlot,networkType=NameSmallWorld)
DfGlobalEdgeVertices<-rbind(DfGlobalEdgeVertices,dfSmallWorldMultiPlot)
#print(paste("generating and saving plots for:",NameFileName))
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(Scalefree,x="NumberofEdges",y="CentralityBetweenessMean",xlabel = "Number of Edges",ylabel = "Mean Betweeness Centrality")
dfRandomTemp = cbind(dfRandomTemp,networkType=NameScaleFree)
DfGlobalEdgeCentralityBetweenessMean<-rbind(DfGlobalEdgeCentralityBetweenessMean,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(Scalefree,x="NumberofEdges",y="CentralityClosenessMean",xlabel = "Number of Edges",ylabel = "Mean Closeness Centrality")
dfRandomTemp = cbind(dfRandomTemp,networkType=NameScaleFree)
DfGlobalEdgeCentralityClosenessMean<-rbind(DfGlobalEdgeCentralityClosenessMean,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(Scalefree,x="NumberofEdges",y="CentralityDegreeMean",xlabel = "Number of Edges",ylabel = "Mean Degree Centrality")
dfRandomTemp = cbind(dfRandomTemp,networkType=NameScaleFree)
DfGlobalEdgeCentralityDegreeMean<-rbind(DfGlobalEdgeCentralityDegreeMean,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(Scalefree,x="NumberofEdges",y="AvgGeodesicPath",xlabel = "Number of Edges",ylabel = "Avg Geodesic Path")
dfRandomTemp = cbind(dfRandomTemp,networkType=NameScaleFree)
DfGlobalEdgeAvgGeodesicPath<-rbind(DfGlobalEdgeAvgGeodesicPath,dfRandomTemp)
dfRandomTemp=NULL
#small world
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(SmallWorldEdges,x="NumberOfEdges",y="CentralityBetweenessMean",xlabel = "Number of Edges",ylabel = "Mean Betweeness Centrality")
dfRandomTemp = cbind(dfRandomTemp,networkType=NameSmallWorld)
DfGlobalEdgeCentralityBetweenessMean<-rbind(DfGlobalEdgeCentralityBetweenessMean,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(SmallWorldEdges,x="NumberOfEdges",y="CentralityClosenessMean",xlabel = "Number of Edges",ylabel = "Mean Closeness Centrality")
dfRandomTemp = cbind(dfRandomTemp,networkType=NameSmallWorld)
DfGlobalEdgeCentralityClosenessMean<-rbind(DfGlobalEdgeCentralityClosenessMean,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(SmallWorldEdges,x="NumberOfEdges",y="CentralityDegreeMean",xlabel = "Number of Edges",ylabel = "Mean Degree Centrality")
dfRandomTemp = cbind(dfRandomTemp,networkType=NameSmallWorld)
DfGlobalEdgeCentralityDegreeMean<-rbind(DfGlobalEdgeCentralityDegreeMean,dfRandomTemp)
dfRandomTemp=NULL
dfRandomTemp = Plot2dListOfRandomGraphPropertiesMean(SmallWorldEdges,x="NumberOfEdges",y="AvgGeodesicPath",xlabel = "Number of Edges",ylabel = "Avg Geodesic Path")
dfRandomTemp = cbind(dfRandomTemp,networkType=NameSmallWorld)
DfGlobalEdgeAvgGeodesicPath<-rbind(DfGlobalEdgeAvgGeodesicPath,dfRandomTemp)
dfRandomTemp=NULL
}
}
if(plot=="line")
{
p1 <- ggplot(DfEdgeGlobalClusteringCoefficent, aes(`Number of Edges`,`Global Clustering Coefficent`, group = networkType,
colour = networkType)) + geom_line(size = 1)
#p1 + geom_text(data = DfEdgeGlobalClusteringCoefficent, aes(label = networkType), hjust = 0.7, vjust = 1)
ggsave(paste(savingDir,"EdgeGlobalClusteringCoef.pdf",sep=""), width = 20, height = 20, units = "cm")
p1 <- ggplot(DfGlobalEdgeVertices, aes(`Number of Edges`,Vertices, group = networkType,
colour = networkType)) + geom_line(size = 1)
#p1 + geom_text(data = DfGlobalEdgeVertices, aes(label = networkType), hjust = 0.7, vjust = 1)
ggsave(paste(savingDir,"EdgeVertices.pdf"), width = 20, height = 20, units = "cm")
p1 <- ggplot(DfGlobalEdgeCentralityDegreeMean, aes(`Number of Edges`,`Mean Degree Centrality`, group = networkType,
colour = networkType)) + geom_line(size = 1)
#p1 + geom_text(data = DfGlobalEdgeCentralityDegreesMean, aes(label = networkType), hjust = 0.7, vjust = 1)
ggsave(paste(savingDir,"EdgeMeanDegreeCentrality.pdf",sep=""), width = 20, height = 20, units = "cm")
p1 <- ggplot(DfGlobalEdgeCentralityBetweenessMean, aes(`Number of Edges`,`Mean Betweeness Centrality`, group = networkType,
colour = networkType)) + geom_line(size = 1)
#p1 + geom_text(data = DfGlobalEdgeCentralityBetweenessMean, aes(label = networkType), hjust = 0.7, vjust = 1)
ggsave(paste(savingDir,"EdgeMeanBetweenessCentrality.pdf",sep=""), width = 20, height = 20, units = "cm")
p1 <- ggplot(DfGlobalEdgeCentralityClosenessMean, aes(`Number of Edges`,`Mean Closeness Centrality`, group = networkType,
colour = networkType)) + geom_line(size = 1)
#p1 + geom_text(data = DfGlobalEdgeClosenessMean, aes(label = networkType), hjust = 0.7, vjust = 1)
ggsave(paste(savingDir,"EdgeMeanClosnessCentrality.pdf",sep=""), width = 20, height = 20, units = "cm")
p1 <- ggplot(DfGlobalEdgeAvgGeodesicPath, aes(`Number of Edges`,`Avg Geodesic Path`, group = networkType,
colour = networkType)) + geom_line(size = 1)
#p1 + geom_text(data = DfGlobalEdgeAvgGeodesicPath, aes(label = networkType), hjust = 0.7, vjust = 1)
ggsave(paste(savingDir,"EdgeAvgGeodesicPath.pdf",sep=""), width = 20, height = 20, units = "cm")
}
if(plot=="point"){
p1 <- ggplot(DfEdgeGlobalClusteringCoefficent, aes(`Number of Edges`,`Global Clustering Coefficent`, group = networkType,
colour = networkType)) + geom_point(size = 1)
#p1 + geom_text(data = DfEdgeGlobalClusteringCoefficent, aes(label = networkType), hjust = 0.7, vjust = 1)
ggsave(paste(savingDir,plot,"EdgeGlobalClusteringCoef.pdf",sep=""), width = 20, height = 20, units = "cm")
p1 <- ggplot(DfGlobalEdgeVertices, aes(`Number of Edges`,Vertices, group = networkType,
colour = networkType)) + geom_point(size = 1)
#p1 + geom_text(data = DfGlobalEdgeVertices, aes(label = networkType), hjust = 0.7, vjust = 1)
ggsave(paste(savingDir,plot,"EdgeVertices.pdf"), width = 20, height = 20, units = "cm")
p1 <- ggplot(DfGlobalEdgeCentralityDegreeMean, aes(`Number of Edges`,`Mean Degree Centrality`, group = networkType,
colour = networkType)) + geom_point(size = 1)
#p1 + geom_text(data = DfGlobalEdgeCentralityDegreesMean, aes(label = networkType), hjust = 0.7, vjust = 1)
ggsave(paste(savingDir,plot,"EdgeMeanDegreeCentrality.pdf",sep=""), width = 20, height = 20, units = "cm")
p1 <- ggplot(DfGlobalEdgeCentralityBetweenessMean, aes(`Number of Edges`,`Mean Betweeness Centrality`, group = networkType,
colour = networkType)) + geom_point(size = 1)
#p1 + geom_text(data = DfGlobalEdgeCentralityBetweenessMean, aes(label = networkType), hjust = 0.7, vjust = 1)
ggsave(paste(savingDir,plot,"EdgeMeanBetweenessCentrality.pdf",sep=""), width = 20, height = 20, units = "cm")
p1 <- ggplot(DfGlobalEdgeCentralityClosenessMean, aes(`Number of Edges`,`Mean Closeness Centrality`, group = networkType,
colour = networkType)) + geom_point(size = 1)
#p1 + geom_text(data = DfGlobalEdgeClosenessMean, aes(label = networkType), hjust = 0.7, vjust = 1)
ggsave(paste(savingDir,plot,"EdgeMeanClosnessCentrality.pdf",sep=""), width = 20, height = 20, units = "cm")
p1 <- ggplot(DfGlobalEdgeAvgGeodesicPath, aes(`Number of Edges`,`Avg Geodesic Path`, group = networkType,
colour = networkType)) + geom_point(size = 1)
#p1 + geom_text(data = DfGlobalEdgeAvgGeodesicPath, aes(label = networkType), hjust = 0.7, vjust = 1)
ggsave(paste(savingDir,plot,"EdgeAvgGeodesicPath.pdf",sep=""), width = 20, height = 20, units = "cm")
}
print("Completed!( Number of Edges = Number of Vertices*2-3, this is to match the number of vertices in all three networks) ")
# Stop the clock
print("user time:execution of the code, system time:CPU ,elapsed time: total")
proc.time() - ptm
}
createDataFrameforPlotting<-function(dfObj,name)
{
dfObj = cbind(dfObj,graphType=name)
}
DelRandomEdge <- function(graphObj)
{
gsize(graphObj)
edgeList = get.edgelist(graphObj)
randomNumberlength=length(edgeList)/2
rs <- sample_seq(1, randomNumberlength, 1)
SelectedEdge = edgeList[rs,]
EdgeforDel = paste((SelectedEdge)[1],"|",(SelectedEdge)[2],sep = "")
graphObj = graphObj%>%delete_edges(EdgeforDel)
gsize(graphObj)
return(graphObj)
}
#library(igraph)
#test1 = erdos.renyi.game(n=10000,p=50000, type = "gnm", directed = FALSE,loops = FALSE)
#test2 = erdos.renyi.game(n=10000,p=50000, type = "gnm", directed = FALSE,loops = FALSE)
#degree(test1)
#degree(test2)
#degreeDist<-1
#degreeDist<-degree(test1,normalized = FALSE)
#h1=hist(degreeDist)
#h1$density
#h2$density
#hMeanDen<-hist(degreeDist)
#hMeanDen$density = (h1$density+h2$density)/2
#h1$breaks
#h2$breaks
#hMeanDen$breaks
#h1$counts
#h2$counts
#hMeanDen$counts
#plot(hMeanDen)
#degreeDist<-degree(test2,normalized = FALSE)
#h2=hist(degreeDist)
#degreeDist<-degree(test,normalized = TRUE)
#hist(degreeDist)
#list(degreeDist)
#plot(degreeDist)
#plot(degreeDist,log="xy",ylim=c(.01,10), bg="black",pch=21, xlab="Degree", ylab="Cumulative Frequency")
degreeDistributionPlot<- function(givenObjects,SavingDir,graphtype)
{
# savingDir = "E:\\netsim\\"
# randomGraphEdgeComparisn = readRDS(paste(savingDir,"randomGraphEdgeComparisn",sep =""), refhook = NULL)
# givenObjects = randomGraphEdgeComparisn
# paramName="Degree"
#graphtype="random"
totalNumberofSamples = length(givenObjects)
mainDir<-SavingDir
subDir<-"DegreeDist"
subsubDir<-paste(mainDir,"\\",subDir,"\\",sep="")
dir.create(file.path(mainDir, subDir), showWarnings = FALSE)
dir.create(file.path(subsubDir,graphtype), showWarnings = FALSE)
for (i in 1:totalNumberofSamples) {
SampleProperties=as.data.frame(givenObjects[[i]][[1]])
SampleCentralityAll =givenObjects[[i]][[2]]
NetWithHowManyDiffVertices<- length(SampleProperties[[1]][])
for (j in 1:NetWithHowManyDiffVertices) {
VerticesSize <- SampleProperties[["Vertices"]][j]
EdgesSize<- SampleProperties[["NumberOfEdges"]][j]
Degree<-as.vector( SampleCentralityAll[[j]] )
# names(df)[names(df) == 'old.var.name'] <- 'new.var.name'
# jpeg(paste(subsubDir,graphtype,"\\","sample",i,graphtype,"v",VerticesSize,"E",EdgesSize,".jpg",sep=""))
# plot(hist(DegreeCentralityVect[[1]]))
# dev.off()
# Histogram overlaid with kernel density curve
# Histogram overlaid with kernel density curve
# vectorTest <-c(10,10,1,2,3,5,4,5,1,2,1,5,2,1,51,1,1,1,1,2,5,6,7,9,7)
#library(ggplot2)
dframe<-as.data.frame(Degree)
# ggplot(dframe, aes(x=degree)) + geom_density()
# jpeg(paste(subsubDir,graphtype,"\\","sample",i,graphtype,"v",VerticesSize,"E",EdgesSize,".jpg",sep=""))
gplot<- ggplot(dframe, aes(x=Degree)) +
geom_histogram(aes(y=..density..), # Histogram with density instead of count on y-axis
binwidth=.5,
colour="black", fill="white") +
geom_density(alpha=.2, fill="#FF6666") # Overlay with transparent density plot
# ggsave(plot =gplot)
ggsave(plot =gplot, filename = paste("sample",i,graphtype,"v",VerticesSize,"E",EdgesSize,".jpg",sep=""), path =paste(subsubDir,graphtype,sep=""))
}
}
}
ERGraphRandomPropertiesGNM2<- function(n1=NULL,p1=NULL,n2=NULL,p2=NULL,VectorVertices=NULL,VectorEdges=NULL,PstepSize,sampleSize)
{
# Measure other properties of the graphs:
#- Average geodesic/shortest path, global clustering coefficient, degree centrality(degree, closeness, betweeness)
Probability<-1
Vertices<-1
NumberOfEdges<-1
DegreeDist<-1
CentralityDegreeMean<- 1
CentralityClosenessMean<-1
CentralityBetweenessMean<- 1
CentralityDegreeMin<- 1
CentralityClosenessMin<-1
CentralityBetweenessmin<- 1
CentralityDegreeMax<- 1
CentralityClosenessmax<-1
CentralityBetweenessmax<- 1
CentralityDegreeList<-1
names(CentralityDegreeList) = "Degree"
CentralityClosenessList<-1
names(CentralityClosenessList) = "Closeness"
CentralityBetweenessList<-1
names(CentralityBetweenessList) = "Estimate_betweenness"
GlobalClusteringCoefficent <-1
AvgGeodesicPath<- 1
NumberOfEdges<- 1
counter1 = 1
for (n in VectorVertices)
{
e=VectorEdges[[counter1]]
erRandGraph = erdos.renyi.game(n=n,p=e, type = "gnm", directed = FALSE,loops = FALSE)
#calculation of properties from graph
Degree = degree(erRandGraph, normalized = FALSE)
Closeness = closeness.estimate(erRandGraph, cutoff = -1)
Estimate_betweenness = estimate_betweenness(erRandGraph, cutoff = -1)
Mean_distance = mean_distance(erRandGraph, directed = FALSE)
Transitivity = transitivity(erRandGraph, type = c("global"), vids = NULL, weights = NULL)
# Post calculation of properties
CentralityDegreeList[counter1]=list(Degree)
CentralityClosenessList[counter1] = list(Closeness)
CentralityBetweenessList[counter1] = list(Estimate_betweenness)
CentralityDegreeMean[[counter1]] = mean(Degree)
CentralityClosenessMean[[counter1]] = mean(Closeness)
CentralityBetweenessMean[[counter1]] = mean(Estimate_betweenness) #cutoff:The maximum path length to consider when calculating the betweenness. If zero or negative then there is no such limit.
CentralityDegreeMin[[counter1]] = min(Degree)
CentralityClosenessMin[[counter1]] = min(Closeness)
CentralityBetweenessmin[[counter1]] = min(Estimate_betweenness)
CentralityDegreeMax[[counter1]] = max(Degree)
CentralityClosenessmax[[counter1]] = max(Closeness)
CentralityBetweenessmax[[counter1]] = max(Estimate_betweenness)
AvgGeodesicPath[[counter1]] = Mean_distance
GlobalClusteringCoefficent[[counter1]] = Transitivity
#Graph Info
Vertices[[counter1]]<-n
NumberOfEdges[[counter1]]<-e
#Next graph counter increament
counter1 = counter1+1;
}
#return(CentralityDegreeList)
return(list(data.frame(Vertices,NumberOfEdges,GlobalClusteringCoefficent,CentralityDegreeMean,CentralityClosenessMean,CentralityBetweenessMean,AvgGeodesicPath),CentralityDegreeList,CentralityClosenessList));
#return(data.frame(Vertices,Probability,GlobalClusteringCoefficent,CentralityDegreeMean,CentralityClosenessMean,CentralityBetweenessMean,AvgGeodesicPath, check.rows = FALSE));
}
BAGraphScalefreePropertiesEdges<- function(n1=NULL,p1,n2=NULL,p2,PstepSize,VectorVertices=NULL, sampleSize,EdgesEachStep=1)
{
# Measure other properties of the graphs:
#- Average geodesic/shortest path, global clustering coefficient, degree centrality(degree, closeness, betweeness)
Probability<-1
Vertices<-1
CentralityDegreeMean<- 1
CentralityClosenessMean<-1
CentralityBetweenessMean<- 1
CentralityDegreeMin<- 1
CentralityClosenessMin<-1
CentralityBetweenessmin<- 1
CentralityDegreeMax<- 1
CentralityClosenessmax<-1
CentralityBetweenessmax<- 1
CentralityDegreeList<-1
names(CentralityDegreeList) = "Degree"
CentralityClosenessList<-1
names(CentralityClosenessList) = "Closeness"
CentralityBetweenessList<-1
names(CentralityBetweenessList) = "Estimate_betweenness"
GlobalClusteringCoefficent <-1
AvgGeodesicPath<- 1
NumberofEdges<- 1
counter1 = 1
if(is.null(VectorVertices)){
for (n in n1:n2)
{
flag = 0;
p=p1;
repeat {
if(p>p2)
{
break
}
Scalefree = sample_pa(n, power = p, m = EdgesEachStep, out.dist = NULL, out.seq = NULL,
out.pref = FALSE, zero.appeal = 1, directed = FALSE,
algorithm = "psumtree", start.graph = NULL)
##print(paste(" probablity ",p))
# #print(paste(" vertices ",n))
#calculation of properties from graph
Degree = degree(Scalefree, normalized = FALSE)
Closeness = closeness.estimate(Scalefree, cutoff = -1)
Estimate_betweenness = estimate_betweenness(Scalefree, cutoff = -1)
Mean_distance = mean_distance(Scalefree, directed = FALSE)
Transitivity = transitivity(Scalefree, type = c("global"), vids = NULL, weights = NULL)
Edges_get= gsize(Scalefree)
# Post calculation of properties
#print(paste(" Edges:",Edges_get))
CentralityDegreeList[counter1]=list(Degree)
CentralityClosenessList[counter1] = list(Closeness)
CentralityBetweenessList[counter1] = list(Estimate_betweenness)
CentralityDegreeMean[[counter1]] = mean(Degree)
CentralityClosenessMean[[counter1]] = mean(Closeness)
CentralityBetweenessMean[[counter1]] = mean(Estimate_betweenness) #cutoff:The maximum path length to consider when calculating the betweenness. If zero or negative then there is no such limit.
CentralityDegreeMin[[counter1]] = min(Degree)
CentralityClosenessMin[[counter1]] = min(Closeness)
CentralityBetweenessmin[[counter1]] = min(Estimate_betweenness)
CentralityDegreeMax[[counter1]] = max(Degree)
CentralityClosenessmax[[counter1]] = max(Closeness)
CentralityBetweenessmax[[counter1]] = max(Estimate_betweenness)
AvgGeodesicPath[[counter1]] = Mean_distance
GlobalClusteringCoefficent[[counter1]] = Transitivity
#Graph Info
Vertices[[counter1]]<-n
Probability[[counter1]]<-p
NumberofEdges[[counter1]]<- Edges_get
#Next graph counter increament
counter1 = counter1+1;
p= p+PstepSize;
}
}
}
else{
for (n in VectorVertices)
{
{
flag = 0;
p=p1;
repeat {
if(p>p2)
{
break
}
Scalefree = sample_pa(n, power = p, m = EdgesEachStep, out.dist = NULL, out.seq = NULL,
out.pref = FALSE, zero.appeal = 1, directed = FALSE,
algorithm = "psumtree", start.graph = NULL)
##print(paste(" probablity ",p))
# #print(paste(" vertices ",n))
#calculation of properties from graph
Degree = degree(Scalefree, normalized = FALSE)
Closeness = closeness.estimate(Scalefree, cutoff = -1)
Estimate_betweenness = estimate_betweenness(Scalefree, cutoff = -1)
Mean_distance = mean_distance(Scalefree, directed = FALSE)
Transitivity = transitivity(Scalefree, type = c("global"), vids = NULL, weights = NULL)
Edges_get= gsize(Scalefree)
# Post calculation of properties
#print(paste(" Edges:",Edges_get))
CentralityDegreeList[counter1]=list(Degree)
CentralityClosenessList[counter1] = list(Closeness)
CentralityBetweenessList[counter1] = list(Estimate_betweenness)
CentralityDegreeMean[[counter1]] = mean(Degree)
CentralityClosenessMean[[counter1]] = mean(Closeness)
CentralityBetweenessMean[[counter1]] = mean(Estimate_betweenness) #cutoff:The maximum path length to consider when calculating the betweenness. If zero or negative then there is no such limit.
CentralityDegreeMin[[counter1]] = min(Degree)
CentralityClosenessMin[[counter1]] = min(Closeness)
CentralityBetweenessmin[[counter1]] = min(Estimate_betweenness)
CentralityDegreeMax[[counter1]] = max(Degree)
CentralityClosenessmax[[counter1]] = max(Closeness)
CentralityBetweenessmax[[counter1]] = max(Estimate_betweenness)
AvgGeodesicPath[[counter1]] = Mean_distance
GlobalClusteringCoefficent[[counter1]] = Transitivity
#Graph Info
Vertices[[counter1]]<-n
Probability[[counter1]]<-p
NumberofEdges[[counter1]]<- Edges_get
#Next graph counter increament
counter1 = counter1+1;
p= p+PstepSize;
}
}
}
}
#return(CentralityDegreeList)
return(list(data.frame(Vertices,NumberofEdges,Probability,GlobalClusteringCoefficent,CentralityDegreeMean,CentralityClosenessMean,CentralityBetweenessMean,AvgGeodesicPath),CentralityDegreeList,CentralityClosenessList));
#return(data.frame(Vertices,Probability,GlobalClusteringCoefficent,CentralityDegreeMean,CentralityClosenessMean,CentralityBetweenessMean,AvgGeodesicPath, check.rows = FALSE));
}
SmallWorldGraphPropertiesEdges<- function(latticeDim,size1 = NULL,size2 = NULL,latticeNei,p1,p2,PstepSize,SizeVector = NULL,numberOfEdgesDelRandomly=NULL)
{
# Measure other properties of the graphs:
#- Average geodesic/shortest path, global clustering coefficient, degree centrality(degree, closeness, betweeness)
Probability<-1
Vertices<-1
NumberOfEdges<- 1
CentralityDegreeMean<- 1
CentralityClosenessMean<-1
CentralityBetweenessMean<- 1
CentralityDegreeMin<- 1
CentralityClosenessMin<-1
CentralityBetweenessmin<- 1
CentralityDegreeMax<- 1
CentralityClosenessmax<-1
CentralityBetweenessmax<- 1
CentralityDegreeList<-1
names(CentralityDegreeList) = "Degree"
CentralityClosenessList<-1
names(CentralityClosenessList) = "Closeness"
CentralityBetweenessList<-1
names(CentralityBetweenessList) = "Estimate_betweenness"
GlobalClusteringCoefficent <-1
AvgGeodesicPath<- 1
counter1 = 1
if(!is.null(SizeVector))
{
for (n in SizeVector)
{
flag = 0;
p=p1;
repeat {
if(p>p2)
{
break
}
smallWorldtest <- sample_smallworld(dim = latticeDim,size=n, nei=latticeNei, p=p)
if(numberOfEdgesDelRandomly>0 && !is.null(numberOfEdgesDelRandomly))
{
for (delNum in 1:numberOfEdgesDelRandomly) {
smallWorldtest = DelRandomEdge(smallWorldtest)
}
}
#calculation of properties from graph
##print(paste(" probablity ",p))
##print(paste(" vertices ",n))
Edges_get= gsize(smallWorldtest)
#print(paste(" Edges:",Edges_get))
Degree = degree(smallWorldtest, normalized = FALSE)
Closeness = closeness.estimate(smallWorldtest, cutoff = -1)
Estimate_betweenness = estimate_betweenness(smallWorldtest, cutoff = -1)
Mean_distance = mean_distance(smallWorldtest, directed = FALSE)
Transitivity = transitivity(smallWorldtest, type = c("global"), vids = NULL, weights = NULL)
# Post calculation of properties
CentralityDegreeList[counter1]=list(Degree)
CentralityClosenessList[counter1] = list(Closeness)
CentralityBetweenessList[counter1] = list(Estimate_betweenness)
CentralityDegreeMean[[counter1]] = mean(Degree)
CentralityClosenessMean[[counter1]] = mean(Closeness)
CentralityBetweenessMean[[counter1]] = mean(Estimate_betweenness) #cutoff:The maximum path length to consider when calculating the betweenness. If zero or negative then there is no such limit.
CentralityDegreeMin[[counter1]] = min(Degree)
CentralityClosenessMin[[counter1]] = min(Closeness)
CentralityBetweenessmin[[counter1]] = min(Estimate_betweenness)
CentralityDegreeMax[[counter1]] = max(Degree)
CentralityClosenessmax[[counter1]] = max(Closeness)
CentralityBetweenessmax[[counter1]] = max(Estimate_betweenness)
AvgGeodesicPath[[counter1]] = Mean_distance
GlobalClusteringCoefficent[[counter1]] = Transitivity
#Graph Info
Vertices[[counter1]]<-n
Probability[[counter1]]<-p
NumberOfEdges[[counter1]]<-Edges_get
#Next graph counter increament
counter1 = counter1+1;
p= p+PstepSize;
}
}
}
else
{
for (n in size1:size2)
{
flag = 0;
p=p1;
repeat {
if(p>p2)
{
break
}
smallWorldtest <- sample_smallworld(dim = latticeDim,size=n, nei=latticeNei, p=p)
if(numberOfEdgesDelRandomly>0 && !is.null(numberOfEdgesDelRandomly))
{
for (delNum in 1:numberOfEdgesDelRandomly) {
smallWorldtest = DelRandomEdge(smallWorldtest)
}
}
#calculation of properties from graph
##print(paste(" probablity ",p))
##print(paste(" vertices ",n))
Edges_get= gsize(smallWorldtest)
##print(paste(" Edges:",Edges_get))
Degree = degree(smallWorldtest, normalized = FALSE)
Closeness = closeness.estimate(smallWorldtest, cutoff = -1)
Estimate_betweenness = estimate_betweenness(smallWorldtest, cutoff = -1)
Mean_distance = mean_distance(smallWorldtest, directed = FALSE)
Transitivity = transitivity(smallWorldtest, type = c("global"), vids = NULL, weights = NULL)
# Post calculation of properties
CentralityDegreeList[counter1]=list(Degree)
CentralityClosenessList[counter1] = list(Closeness)
CentralityBetweenessList[counter1] = list(Estimate_betweenness)
CentralityDegreeMean[[counter1]] = mean(Degree)
CentralityClosenessMean[[counter1]] = mean(Closeness)
CentralityBetweenessMean[[counter1]] = mean(Estimate_betweenness) #cutoff:The maximum path length to consider when calculating the betweenness. If zero or negative then there is no such limit.
CentralityDegreeMin[[counter1]] = min(Degree)
CentralityClosenessMin[[counter1]] = min(Closeness)
CentralityBetweenessmin[[counter1]] = min(Estimate_betweenness)
CentralityDegreeMax[[counter1]] = max(Degree)
CentralityClosenessmax[[counter1]] = max(Closeness)
CentralityBetweenessmax[[counter1]] = max(Estimate_betweenness)
AvgGeodesicPath[[counter1]] = Mean_distance
GlobalClusteringCoefficent[[counter1]] = Transitivity
#Graph Info
Vertices[[counter1]]<-n
Probability[[counter1]]<-p
NumberOfEdges[[counter1]]<-Edges_get
#Next graph counter increament
counter1 = counter1+1;
p= p+PstepSize;
}
}
}
#return(CentralityDegreeList)
return(list(data.frame(Vertices,Probability,NumberOfEdges,GlobalClusteringCoefficent,CentralityDegreeMean,CentralityClosenessMean,CentralityBetweenessMean,AvgGeodesicPath),CentralityDegreeList,CentralityClosenessList));
#return(data.frame(Vertices,Probability,GlobalClusteringCoefficent,CentralityDegreeMean,CentralityClosenessMean,CentralityBetweenessMean,AvgGeodesicPath, check.rows = FALSE));
}
savePlot<- function(nameX,nameY,plotType,ParamFileNameDir)
{
ParamFileName = ParamFileNameDir
XnameString = gsub("[[:space:]]", "", nameX)
YnameString = gsub("[[:space:]]", "", nameY)
XnameString = str_replace_all(XnameString, "`", "")
YnameString = str_replace_all(YnameString, "`", "")
fileName<- paste(XnameString,YnameString,plotType,".pdf",sep ="")
ggsave(paste(ParamFileName,"/",ParamFileName,fileName,sep = ""), width = 20, height = 20, units = "cm")
}
Plot2dListOfRandomGraphPropertiesMean<- function(givenObject,x,y,xlabel,ylabel)
{
#This functions plots the average value from the multiple graph generated by the same parameteres
totalNumberofSamples = length(givenObject)
if(is.null(dim(givenObject[[1]]))==FALSE){
Samples = givenObject
xCounter<-0
yCounter<-0
counter<-0
for (Sample in Samples) {
xCounter = Sample[[x]]+xCounter
yCounter = Sample[[y]]+yCounter
counter=counter+1
}
# colNumbers = ncol(Samples[[1]])
xMean = xCounter/counter
yMean = yCounter/counter
# plot3d(xMean, yMean,xlab = xlabel,ylab = ylabel)
#surf3D(xMean, yMean, zMean, phi = 45, theta = 45,xlab = xlabel,ylab = ylabel,zlab = zlabel)
df = data.frame(xMean,yMean)
names(df)[names(df) == 'xMean'] <- xlabel
names(df)[names(df) == 'yMean'] <- ylabel
cat("test")
return(df)
}
else if(is.null(dim(givenObject))==TRUE){
#x="NumberOfEdges"
#y="AvgGeodesicPath"
# #xlabel = "Number of Edges"
#ylabel = "Avg Geodesic Path"
xCounter<-0
yCounter<-0
zCounter<-0
counter<-0
for (i in 1:totalNumberofSamples) {
Sample=as.data.frame(givenObject[[i]][[1]])
xCounter = Sample[[x]]+xCounter
yCounter = Sample[[y]]+yCounter
counter=counter+1
}
# colNumbers = ncol(Samples[[1]])
xMean = xCounter/counter
yMean = yCounter/counter
# plot(xMean, yMean,xlab = xlabel,ylab = ylabel)
#surf3D(xMean, yMean, zMean, phi = 45, theta = 45,xlab = xlabel,ylab = ylabel,zlab = zlabel)
df = data.frame(xMean,yMean)
names(df)[names(df) == 'xMean'] <- xlabel
names(df)[names(df) == 'yMean'] <- ylabel
# cat("test")
return(df)
}
}
|
ed96309dc89e048dbf22f16167891573d5fb42f9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rubias/examples/mixture_draw.Rd.R
|
dc77724323f1e3fca648a406035f5145e8a1633b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 378
|
r
|
mixture_draw.Rd.R
|
library(rubias)
### Name: mixture_draw
### Title: Separate a chosen proportion of a reference dataset into a
### mixture with known population proportions
### Aliases: mixture_draw
### Keywords: internal
### ** Examples
rhos <- as.vector(gtools::rdirichlet(1, table(alewife$repunit)))
cross_val <- mixture_draw(D = alewife, rhos = rhos, N = 100, min_remaining = .005)
|
6efe81f4b98eba9ba0a98e1f3ac2bfe96cb771dd
|
c486604d9335890f984a425eb9bab70aabfd8c66
|
/Rfiles/fyp.R
|
cd9e712ced2b2a81d2b537c5398d55383531d711
|
[] |
no_license
|
Colin303/An-Analysis-of-the-Dublin-rental-market
|
4c0a0f5365fcd5c2033d02b9cc4d8699e199dd76
|
0ee878cd253010b2736630e2a4c95779007c7104
|
refs/heads/master
| 2021-04-14T02:19:08.251063
| 2020-03-25T19:35:16
| 2020-03-25T19:35:16
| 249,202,147
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,720
|
r
|
fyp.R
|
#final year project
#install.packages("pxR")
#install.packages("dplyr")
#install.packages("tree")
library(tree)
library(pxR)
library(dplyr)
library(ggplot2)
library(plotly)
?read.px
df <- read.px("ria02.px")
df <- as.data.frame(df)
?select
dublin <- df %>% select(Number.of.Bedrooms, Property.Type, Location, Year, value) %>% filter(grepl('Dublin', Location))
str(dublin)
levels(clean$Property.Type)
sapply(dublin, FUN=function(x) sum(is.na(x)))
dublin2017 <- dublin %>% select(Number.of.Bedrooms, Property.Type, Location, Year, value) %>% filter(grepl('2017', Year))
sapply(dublin2017, FUN=function(x) sum(is.na(x)))
dublin2017 <- droplevels.data.frame(dublin2017)
clean <- na.omit(dublin2017)
str(clean)
clean$Location <- as.character(clean$Location)
names <- c("bedrooms", "prop", "location", "year", "value")
colnames(clean) <- names
str(clean)
tree = tree(value~., clean)
summary(tree)
plot(tree)
text(tree, pretty=0)
install.packages("rattle")
library(rattle)
asRules(fit)
library(rpart)
fit <- rpart(value~., data = clean)
cv.tree = cv.tree(tree)
plot(cv.tree$size, cv.tree$dev, type = 'b')
prune.tree = prune.tree(tree, best=3)
plot(prune.tree)
text(prune.tree, pretty=1)
summary(prune.tree)
?aggregate
library(dplyr)
remove(col)
agg <- clean %>% group_by(Property.Type, Location) %>% summarise(round(mean(value), 2))
#------------------------------------------------------------------------------------
#daft city cleaning
#daft city is a new dataset from daft with all dublin areas, incl new columns
daft_city <- read.csv(file = "daft_city.csv", sep = ",", stringsAsFactors = FALSE)
?read.csv
daft_city <- daft_city[, c(-1, -2)]
#check for NAs
sapply(daft_city, FUN=function(x) sum(is.na(x)))
#overviews have 69 Nas (looks exlusively studio apartment to rent are missing this data)
#now to deal with price
daft_city$price<- gsub("€", "", daft_city$price)
daft_city$price<- gsub(",", "", daft_city$price)
class(daft_city$price)
daft_city$price <- as.numeric(gsub('[Per weekPer month]','',daft_city$price)) * c(1,4)[grepl('Per week',daft_city$price) + 1]
#changed weekly to monthly and changed to numeric
class(daft_city$price)
#looking at graphs, data looks non-normal
hist(daft_city$price)
boxplot(daft_city$price)
qqnorm(daft_city$price)
#shapiro test confirms this
shapiro.test(daft_city$price)
#removing row 39 cause its messed up
daft_city<- daft_city[-c(39),]
#now looking at another column
head(daft_city$overviews)
library(tidyr)
furnished <- "furnished"
beds <- "beds"
daft_city$beds <- beds
daft_city$furnished <- furnished
#splitting the overviews column into its separate parts
daft_city<- separate(daft_city, overviews, furnished, sep = ",", remove = F, convert = T)
#now I have a furnished column f
#remove it from the original column and split again
daft_city$overviews<- gsub("Furnished,", "", daft_city$overviews)
daft_city$overviews<- gsub("Unfurnished,", "", daft_city$overviews)
daft_city$overviews<- gsub("Furnished or unfurnished,", "", daft_city$overviews)
daft_city<- separate(daft_city, overviews, beds, sep = ",", remove = F, convert = T)
#removes everything before the comma, leaving only number of bathrooms
daft_city$overviews<- gsub('.*\\,', "", daft_city$overviews)
#looking at beds column, i want to remove the (double / single) values
head(daft_city$beds)
daft_city$beds<- gsub('\\(.*', "", daft_city$beds)
#making a backup from this point..
backup <- daft_city
#remove non-numbers
daft_city$beds<- gsub(' ', "", daft_city$beds)
daft_city$beds<- gsub("[^0-9.-]", "", daft_city$beds)
class(daft_city$beds)
#now changing beds to factors
str(daft_city$beds)
daft_city$beds <- as.factor(daft_city$beds)
str(daft_city$beds)
levels(daft_city$beds)
head(daft_city$beds)
#now to do the same for bathrooms
#change colname overviews to bathrooms
names(daft_city)[names(daft_city) == 'overviews'] <- 'bathrooms'
#removing non numeric
head(daft_city$bathrooms)
daft_city$bathrooms<- gsub(' ', "", daft_city$bathrooms)
daft_city$bathrooms<- gsub("[^0-9.-]", "", daft_city$bathrooms)
head(daft_city$bathrooms)
#now changing bathroom to factors
str(daft_city$bathrooms)
daft_city$bathrooms <- as.factor(daft_city$bathrooms)
str(daft_city$bathrooms)
levels(daft_city$bathrooms)
head(daft_city$bathrooms)
#now to make furnished a factor
daft_city$furnished<- gsub('\\(', "", daft_city$furnished)
daft_city$furnished<- gsub('4 Bedrooms 1 single', "", daft_city$furnished)
daft_city$furnished <- as.factor(daft_city$furnished)
levels(daft_city$furnished)
#same for dwelling_type
#rename to dwelling
names(daft_city)[names(daft_city) == 'dwelling_type'] <- 'dwelling'
daft_city$dwelling <- as.factor(daft_city$dwelling)
levels(daft_city$dwelling)
#rename city_center_distance to distance, change to numeric
names(daft_city)[names(daft_city) == 'city_center_distance'] <- 'distance'
class(daft_city$distance)
daft_city$distance <- as.numeric(daft_city$distance)
hist(daft_city$distance)
boxplot(daft_city$distance)
#removing row 139 because long + lat is completely wrong and massive outlier for city center distance
daft_city[138,] <- 0
daft_city <- daft_city[-138,]
#row 955 has Na for distance, looking at its co-ordinates, looks like it should be about a value of 2.
daft_city$distance[is.na(daft_city$distance)]<-2
#looking good, last to deal with address
str(daft_city)
backup <- daft_city
str(daft_city)
#graphs ---------------------------------------------------------
hist(daft_city$price, col = "blue", main = "Histogram of price")
hist(daft_city$distance, col = "red")
daft_city$bathrooms <- as.numeric(daft_city$bathrooms)
hist(daft_city$bathrooms, col = "green")
daft_city$beds <- as.numeric(daft_city$beds)
hist(daft_city$bathrooms, col = "yellow")
?boxplot
boxplot(daft_city$price, main = "Boxplot of Price")
boxplot(daft_city$distance, main = "Boxplot of distance")
boxplot(daft_city$beds, main = "Boxplot of beds")
boxplot(daft_city$bathrooms, main = "Boxplot of bathrooms")
daft_city$beds <- as.factor(daft_city$beds)
counts <- table(daft_city$beds)
barplot(counts, col = "red", main = "Barchart of Beds")
counts <- table(daft_city$bathrooms)
barplot(counts, col = "blue", main = "Barchart of Bathrooms")
#correlations
library(ggplot2)
ggplot(data = daft_city, aes(x = distance, y = price)) +
geom_point()
corDF = data.frame(daft_city$price, daft_city$distance, daft_city$beds, daft_city$bathrooms)
cor(corDF)
library("corrplot")
corVis <- cor(corDF)
corrplot(corVis, method = "number")
corrplot(corVis, method = "color")
# splitting daft_city into different areas, dublin 2, dublin 4, dublin 24 etc
#splitting daft into zipcodes..
daft <- daft_city
daftdublin3 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 3', addr))
daftdublin4 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 4', addr))
daftdublin5 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 5', addr))
daftdublin6 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 6', addr)) %>% filter(!grepl('Dublin 6W', addr))
daftdublin6w <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 6W', addr))
daftdublin7 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 7', addr))
daftdublin8 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 8', addr))
daftdublin9 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 9', addr))
daftdublin1 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 1', addr)) %>% filter(!grepl('Dublin 18', addr)) %>% filter(!grepl('Dublin 16', addr)) %>% filter(!grepl('Dublin 15', addr)) %>% filter(!grepl('Dublin 14', addr)) %>% filter(!grepl('Dublin 13', addr)) %>% filter(!grepl('Dublin 12', addr))%>% filter(!grepl('Dublin 11', addr))%>% filter(!grepl('Dublin 10', addr)) %>% filter(!grepl('Dublin 17', addr)) %>% filter(!grepl('Dublin 19', addr))
daftdublin10 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 10', addr))
daftdublin11 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 11', addr))
daftdublin12 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 12', addr))
daftdublin13 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 13', addr))
daftdublin14 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 14', addr))
daftdublin15 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 15', addr))
daftdublin16 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 16', addr))
daftdublin17 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 17', addr))
daftdublin18 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 18', addr))
daftdublin2 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 2', addr)) %>% filter(!grepl('Dublin 20', addr)) %>% filter(!grepl('Dublin 22', addr)) %>% filter(!grepl('Dublin 24', addr))
daftdublin20 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 20', addr))
daftdublin22 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 22', addr))
daftdublin24 <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Dublin 24', addr))
#non post code dublin locations, known as "other".
daftdublinco <- daft %>% select(price, addr,longitude, latitude, bathrooms, beds, furnished, distance, dwelling) %>% filter(grepl('Co. Dublin', addr))
counts <- table(daft_city$dwelling)
barplot(counts, main="Types of Dwellings")
counts$
library(plotly)
#Dwellings all data ------------------------------------
d <- summary(daft_city$dwelling)
p <- plot_ly(daft_city, x = names(d), y = d, type = 'bar',
marker = list(color = c('rgba(222,45,38,0.8)', 'rgba(204,204,204,1)',
'rgba(204,204,204,1)', 'rgba(204,204,204,1)')))%>%
layout(title = "Dwellings All Data",
xaxis = list(title = "Types"),
yaxis = list(title = "Amount"))
p
#dwellings dublin other ------------------------------------
d2 <- summary(daftdublinco$dwelling)
p2 <- plot_ly(daftdublinco, x = names(d2), y = d2, type = 'bar',
marker = list(color = c('rgba(204,204,204,1)', 'rgba(204,204,204,1)',
'rgba(222,45,38,0.8)', 'rgba(204,204,204,1)')))%>%
layout(title = "Dwellings Dublin Other",
xaxis = list(title = "Types"),
yaxis = list(title = "Amount"))
p2
#dwellings dublin 2 ------------------------------------
d3 <- summary(daftdublin2$dwelling)
p3 <- plot_ly(daftdublin2, x = names(d3), y = d3, type = 'bar',
marker = list(color = c('rgba(222,45,38,0.8)', 'rgba(204,204,204,1)',
'rgba(204,204,204,1)', 'rgba(204,204,204,1)')))%>%
layout(title = "Dwellings Dublin 2",
xaxis = list(title = "Types"),
yaxis = list(title = "Amount"))
p3
#dwelling comparison Dublin 2 / Dublin other
data <- data.frame(names(d3), d3, d2)
p4 <- plot_ly(data, x = ~names.d3., y = d3, type = 'bar', name = 'Dublin 2', marker = list(color = 'rgb(49,130,189)')) %>%
add_trace(y = ~d2, name = 'Dublin Other', marker = list(color = 'rgb(204,204,204)')) %>%
layout(xaxis = list(title = "", tickangle = -45),
yaxis = list(title = ""),
margin = list(b = 100),
barmode = 'group')
p4
#------------ comparison of North side
s1 <- summary(daftdublin1$beds)
s3 <- summary(daftdublin3$beds)
s5 <- summary(daftdublin5$beds)
s7 <- summary(daftdublin7$beds)
s9 <- summary(daftdublin9$beds)
s11 <- summary(daftdublin11$beds)
s13 <- summary(daftdublin13$beds)
s15 <- summary(daftdublin15$beds)
s17 <- summary(daftdublin17$beds)
north <- data.frame(names(s1), s3, s5, s7, s9, s11, s13, s15, s17, d2)
p5 <- plot_ly(north, x = ~names.s1., y = s1, type = 'bar', name = 'Dublin 1', marker = list(color = 'rgb(49,130,189)')) %>%
add_trace(y = ~s3, name = 'Dublin 3', marker = list(color = "green")) %>%
add_trace(y = ~s5, name = 'Dublin 5', marker = list(color = "purple")) %>%
add_trace(y = ~s7, name = 'Dublin 7', marker = list(color = "orange")) %>%
add_trace(y = ~s9, name = 'Dublin 9', marker = list(color = "yellow")) %>%
add_trace(y = ~s11, name = 'Dublin 11', marker = list(color = "pink")) %>%
add_trace(y = ~s13, name = 'Dublin 13', marker = list(color = "red")) %>%
add_trace(y = ~s15, name = 'Dublin 15', marker = list(color = "black")) %>%
add_trace(y = ~s17, name = 'Dublin 17', marker = list(color = "gray")) %>%
layout(title = "North side beds",
xaxis = list(title = "", tickangle = -45),
yaxis = list(title = ""),
margin = list(b = 100),
barmode = 'group')
p5
#Comparison of south side -----------------------------------------------------
s2 <- summary(daftdublin2$beds)
s4 <- summary(daftdublin4$beds)
s6 <- summary(daftdublin6$beds)
s6w <- summary(daftdublin6w$beds)
s8 <- summary(daftdublin8$beds)
s10 <- summary(daftdublin10$beds)
s12 <- summary(daftdublin12$beds)
s14 <- summary(daftdublin14$beds)
s16 <- summary(daftdublin16$beds)
s18<- summary(daftdublin18$beds)
s20<- summary(daftdublin20$beds)
s22<- summary(daftdublin22$beds)
s24<- summary(daftdublin24$beds)
south <- data.frame(names(s2), s2, s4, s6, s6w, s8, s10, s12, s14, s16, s18, s20, s22, s24)
p6 <- plot_ly(south, x = ~names.s2., y = s2, type = 'bar', name = 'Dublin 2', marker = list(color = 'rgb(49,130,189)')) %>%
add_trace(y = ~s4, name = 'Dublin 4', marker = list(color = "green")) %>%
add_trace(y = ~s6, name = 'Dublin 6', marker = list(color = "purple")) %>%
add_trace(y = ~s6w, name = 'Dublin 6w', marker = list(color = "orange")) %>%
add_trace(y = ~s8, name = 'Dublin 8', marker = list(color = "yellow")) %>%
add_trace(y = ~s10, name = 'Dublin 10', marker = list(color = "pink")) %>%
add_trace(y = ~s12, name = 'Dublin 12', marker = list(color = "red")) %>%
add_trace(y = ~s14, name = 'Dublin 14', marker = list(color = "black")) %>%
add_trace(y = ~s16, name = 'Dublin 16', marker = list(color = "brown")) %>%
add_trace(y = ~s18, name = 'Dublin 18', marker = list(color = "olive")) %>%
add_trace(y = ~s20, name = 'Dublin 20', marker = list(color = "cream")) %>%
add_trace(y = ~s24, name = 'Dublin 24', marker = list(color = "ginger")) %>%
layout(title = "South side beds",
xaxis = list(title = "", tickangle = -45),
yaxis = list(title = ""),
margin = list(b = 100),
barmode = 'group')
p6
#comparison of sides: red = south, blue = north --------------------------
#side by side
both <- north
both <- cbind(north, south)
p7 <- plot_ly(both, x = ~names.s1., y = s2, type = 'bar', name = 'Dublin 2', marker = list(color = "red")) %>%
add_trace(y = ~s4, name = 'Dublin 4', marker = list(color = "red")) %>%
add_trace(y = ~s6, name = 'Dublin 6', marker = list(color = "red")) %>%
add_trace(y = ~s6w, name = 'Dublin 6w', marker = list(color = "red")) %>%
add_trace(y = ~s8, name = 'Dublin 8', marker = list(color = "red")) %>%
add_trace(y = ~s10, name = 'Dublin 10', marker = list(color = "red")) %>%
add_trace(y = ~s12, name = 'Dublin 12', marker = list(color = "red")) %>%
add_trace(y = ~s14, name = 'Dublin 14', marker = list(color = "red")) %>%
add_trace(y = ~s16, name = 'Dublin 16', marker = list(color = "red")) %>%
add_trace(y = ~s18, name = 'Dublin 18', marker = list(color = "red")) %>%
add_trace(y = ~s20, name = 'Dublin 20', marker = list(color = "red")) %>%
add_trace(y = ~s24, name = 'Dublin 24', marker = list(color = "red")) %>%
add_trace(y = ~s1, name = 'Dublin 1', marker = list(color = "blue")) %>%
add_trace(y = ~s3, name = 'Dublin 3', marker = list(color = "blue")) %>%
add_trace(y = ~s5, name = 'Dublin 5', marker = list(color = "blue")) %>%
add_trace(y = ~s7, name = 'Dublin 7', marker = list(color = "blue")) %>%
add_trace(y = ~s9, name = 'Dublin 9', marker = list(color = "blue")) %>%
add_trace(y = ~s11, name = 'Dublin 11', marker = list(color = "blue")) %>%
add_trace(y = ~s13, name = 'Dublin 13', marker = list(color = "blue")) %>%
add_trace(y = ~s15, name = 'Dublin 15', marker = list(color = "blue")) %>%
add_trace(y = ~s17, name = 'Dublin 17', marker = list(color = "blue")) %>%
layout(title = "Comparison",
xaxis = list(title = "", tickangle = -45),
yaxis = list(title = ""),
margin = list(b = 100),
barmode = 'group')
p7
#comparison combined w/ Dublin other ---------------------------------------------------------------------
p8 <- plot_ly(both, x = ~names.s1., y = s2, type = 'bar', name = 'Dublin 2', marker = list(color = "red")) %>%
add_trace(y = ~s1, name = 'Dublin 1', marker = list(color = "blue")) %>%
add_trace(y = ~s4, name = 'Dublin 4', marker = list(color = "red")) %>%
add_trace(y = ~s3, name = 'Dublin 3', marker = list(color = "blue")) %>%
add_trace(y = ~s5, name = 'Dublin 5', marker = list(color = "blue")) %>%
add_trace(y = ~s6, name = 'Dublin 6', marker = list(color = "red")) %>%
add_trace(y = ~s6w, name = 'Dublin 6w', marker = list(color = "red")) %>%
add_trace(y = ~s7, name = 'Dublin 7', marker = list(color = "blue")) %>%
add_trace(y = ~s8, name = 'Dublin 8', marker = list(color = "red")) %>%
add_trace(y = ~s9, name = 'Dublin 9', marker = list(color = "blue")) %>%
add_trace(y = ~s10, name = 'Dublin 10', marker = list(color = "red")) %>%
add_trace(y = ~s11, name = 'Dublin 11', marker = list(color = "blue")) %>%
add_trace(y = ~s12, name = 'Dublin 12', marker = list(color = "red")) %>%
add_trace(y = ~s13, name = 'Dublin 13', marker = list(color = "blue")) %>%
add_trace(y = ~s14, name = 'Dublin 14', marker = list(color = "red")) %>%
add_trace(y = ~s15, name = 'Dublin 15', marker = list(color = "blue")) %>%
add_trace(y = ~s16, name = 'Dublin 16', marker = list(color = "red")) %>%
add_trace(y = ~s17, name = 'Dublin 17', marker = list(color = "blue")) %>%
add_trace(y = ~s18, name = 'Dublin 18', marker = list(color = "red")) %>%
add_trace(y = ~s20, name = 'Dublin 20', marker = list(color = "red")) %>%
add_trace(y = ~s24, name = 'Dublin 24', marker = list(color = "red")) %>%
add_trace(y = ~d2, name = 'Dublin Other', marker = list(color = "green")) %>%
layout(title = "Comparison",
xaxis = list(title = "", tickangle = -45),
yaxis = list(title = ""),
margin = list(b = 100),
barmode = 'group')
p8
#---------------------------------------------------------------------------------
chart(dublin2017)
|
fb6b0143c6f8a9e00968fa6204159cb287f33260
|
88d9c0d58c72ba565d403a21de37f9169ac282a0
|
/man/fcds_const.Rd
|
5b3105599eb070abe2125de5b0c26fb8c1b95a18
|
[
"MIT"
] |
permissive
|
GerkeLab/fcds
|
01191bc32e4b73a857ae7ab7ef39e29c6c2713c4
|
7d6cbc89726418629d9c3cd54b10414eb7cab028
|
refs/heads/master
| 2021-07-08T02:48:46.061128
| 2020-07-30T18:45:01
| 2020-07-30T19:04:25
| 167,439,089
| 3
| 1
|
NOASSERTION
| 2020-07-30T19:04:26
| 2019-01-24T21:16:41
|
R
|
UTF-8
|
R
| false
| true
| 1,374
|
rd
|
fcds_const.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import.R
\name{fcds_const}
\alias{fcds_const}
\title{List Expected or Valid FCDS Constants}
\usage{
fcds_const(var = "year_group", full = FALSE,
fcds_recoding_file = NULL)
}
\arguments{
\item{var}{An FCDS variable or a package constant. Set to \code{NULL} for a list
of valid variables.}
\item{full}{If \code{FALSE}, returns only the values that are expected in the
cleaned FCDS data. If \code{TRUE}, returns information regarding the original
variable name (\code{name_original}) and value (\code{value}) and the cleaned
variable name (\code{name_clean}) and value label (\code{label}).}
\item{fcds_recoding_file}{The path to the recoding specification yaml file.
Set to \code{NULL} to use the default recoding as used in \code{\link[=fcds_import]{fcds_import()}}.}
}
\value{
A character vector of valid FCDS values, or a tibble with information
about the original and recoded FCDS data values.
}
\description{
Lists the expected values in the processed, cached FCDS data using the
built-in recoding, or alternatively returns a tibble containing information
about the cleaned column and value labels and the original data values.
}
\examples{
fcds_const("race")
fcds_const("race", full = TRUE)
fcds_const("cancer_site_group", full = TRUE) \%>\% head()
fcds_const("moffitt_catchment")
}
|
b4d15af0bf4dd1b87aeb6cc3a3e6da64d8896ba7
|
f8a9279f9726238f606877417b8978cf2766cf49
|
/R/Bn.R
|
f6dedf99a1887513f0934ed73b1cd4ea0f88ae9b
|
[] |
no_license
|
gcybis/Uclust
|
fd09bca691d283dbcc4abd3f43bbdbd33e83bcae
|
10ae3f89a83f001942abc626d72a4c63f72d8e14
|
refs/heads/master
| 2020-04-21T02:16:36.053717
| 2019-02-05T14:03:17
| 2019-02-05T14:03:17
| 169,249,205
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,853
|
r
|
Bn.R
|
#' Computes Bn Statistic.
#'
#' Returns the value for the Bn statistic that measures the degree of separation between two groups.
#' The statistic is computed through the difference of average within group distances to average between
#' group distances. Large values of Bn indicate large group separation. Under overall sample homogeneity
#' we have E(Bn)=0.
#'
#' Either \code{data} OR \code{md} should be provided.
#' If data are entered directly, Bn will be computed considering the squared Euclidean distance, which is compatible with
#' \code{\link{is_homo}}, \code{\link{uclust}} and \code{\link{uhclust}}.
#'
#' For more detail see Cybis, Gabriela B., Marcio Valk, and Sílvia RC Lopes. "Clustering and classification problems in genetics through U-statistics."
#' Journal of Statistical Computation and Simulation 88.10 (2018)
#' and Valk, Marcio, and Gabriela Bettella Cybis. "U-statistical inference for hierarchical clustering." arXiv preprint arXiv:1805.12179 (2018).
#' @param group_id A vector of 0s and 1s indicating to which group the samples belong. Must be in the same order as data or md.
#' @param md Matrix of distances between all data points.
#' @param data Data matrix. Each row represents an observation.
#' @return Value of the Bn statistic.
#'
#' @examples
#' n=5
#' x=matrix(rnorm(n*10),ncol=10)
#' bn(c(1,0,0,0,0),data=x) # option (a) entering the data matrix directly
#' md=as.matrix(dist(x))^2
#' bn(c(0,1,1,1,1),md) # option (b) entering the distance matrix
#'
#' @export
bn <- function(group_id, md = NULL, data = NULL) {
if (is.null(md)) {
# Computing data matrix if one is not provided
if (is.null(data)) {
stop("No data provided")
}
md <- as.matrix(dist(data) ^ 2)
}
if (class(md) != "matrix") {
stop("md is not of class matrix")
}
group1 <- (group_id == 0)
group2 <- group_id == 1
ngv <- c(sum(group1), sum(group2))
ng <- dim(md)[1]
if (ng != sum(ngv)) {
stop("Incorrect dimension or group_id")
}
if (min(ngv) > 1) {
#for groups with n1>2 (oiriginal definition of Bn)
s11 <- sum(md[group1, group1]) / 2
s22 <- sum(md[group2, group2]) / 2
s12 <- sum(md[group1, group2])
a1 <- (1 / (ngv[1] * ngv[2])) * s12
a2 <- (2 / (ngv[1] * (ngv[1] - 1))) * s11
a3 <- (2 / (ngv[2] * (ngv[2] - 1))) * s22
sBn <- (ngv[1] * ngv[2] / (ng * (ng - 1))) * (2 * a1 - a2 - a3)
} else{
#if n1=1 (extended definition of Bn)
if (ngv[1] == 1) {
s22 <- sum(md[group2, group2]) / 2
s12 <- sum(md[group1, group2])
a1 <- (1 / (ngv[1] * ngv[2])) * s12
a2 <- 0
a3 <- (2 / (ngv[2] * (ngv[2] - 1))) * s22
sBn <- (ngv[1] * ngv[2] / (ng * (ng - 1))) * (a1 - a2 - a3)
} else{
s11 <- sum(md[group1, group1]) / 2
s12 <- sum(md[group1, group2])
a1 <- (1 / (ngv[1] * ngv[2])) * s12
a2 <- (2 / (ngv[1] * (ngv[1] - 1))) * s11
a3 <- 0
sBn <- (ngv[1] * ngv[2] / (ng * (ng - 1))) * (a1 - a2 - a3)
}
}
sBn
}
############################################
# Internal function: computes Bn
############################################
#Computes Bn
## ngv is a vector with 2 entries: size of group 1 (n1) and size of group 2 (n2)
## md is the distance matrix, ordered so that the first n1 elements are from group 1t
Bn <- function(ngv, md) {
ng <- sum(ngv)
maux1 <- matrix(0, nrow = ng, ncol = ng)
if (min(ngv) > 1) {
#for groups with n1>2 (oiriginal definition of Bn)
s11 <- sum(md[(1:ngv[1]), 1:ngv[1]]) / 2
s22 <- sum(md[(ngv[1] + 1):ng, (ngv[1] + 1):ng]) / 2
s12 <- sum(md[1:ngv[1], (ngv[1] + 1):ng])
a1 <- (1 / (ngv[1] * ngv[2])) * s12
a2 <- (2 / (ngv[1] * (ngv[1] - 1))) * s11
a3 <- (2 / (ngv[2] * (ngv[2] - 1))) * s22
sBn <- (ngv[1] * ngv[2] / (ng * (ng - 1))) * (2 * a1 - a2 - a3)
}
else {
#if n1=1 (extended definition of Bn)
if (ngv[1] == 1) {
s22 <- sum(md[(ngv[1] + 1):ng, (ngv[1] + 1):ng]) / 2
s12 <- sum(md[1:ngv[1], (ngv[1] + 1):ng])
a1 <- (1 / (ngv[1] * ngv[2])) * s12
a2 <- 0
a3 <- (2 / (ngv[2] * (ngv[2] - 1))) * s22
sBn <- (ngv[1] * ngv[2] / (ng * (ng - 1))) * (a1 - a2 - a3)
}
else {
s11 <- sum(md[(1:ngv[1]), 1:ngv[1]]) / 2
s22 <- sum(md[(ngv[1] + 1):ng, (ngv[1] + 1):ng]) / 2
s12 <- sum(md[1:ngv[1], (ngv[1] + 1):ng])
a1 <- (1 / (ngv[1] * ngv[2])) * s12
a2 <- (2 / (ngv[1] * (ngv[1] - 1))) * s11
a3 <- 0
sBn <- (ngv[1] * ngv[2] / (ng * (ng - 1))) * (a1 - a2 - a3)
}
}
sBn
}
|
e371614b70862a7182125c4fc95652cedca22fa9
|
8c7b47e2d083ab703e17e2f392750aa878193b41
|
/scripts/ldshrink_ldo.R
|
3cbb96572441b66785391e3a832675da332df893
|
[] |
no_license
|
CreRecombinase/PolygenicRSS
|
847adedd95e437b56903f48a65797e6fd9245e55
|
a77be2bea99a6fbddd8b8b6054e4998f69217e9a
|
refs/heads/master
| 2021-06-03T02:27:52.602202
| 2021-03-29T21:16:31
| 2021-03-29T21:16:31
| 98,669,066
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,574
|
r
|
ldshrink_ldo.R
|
library(dplyr)
library(ldmap)
library(ldshrink)
library(EigenH5)
shrink <- snakemake@params[["shrink"]]
if(is.null(shrink)){
doshrink <- TRUE
}else{
doshrink <- shrink=="shrink"
}
snplist_f <- snakemake@input[["snp_list"]]
snplist_df <- tibble(rsid=rsid2int(scan(snplist_f,what=character())))
bim_df <- read_plink_bim(snakemake@input[["bimf"]]) %>%
mutate(snp_id = 1:n(),
ldmr = chromosomes(snp_struct),
rsid = rsid2int(rsid)) %>%
semi_join(snplist_df)
fam_df <- read_plink_fam(snakemake@input[["famf"]])
N <- nrow(fam_df)
bim_l <- split(bim_df, bim_df$ldmr)
purrr::walk(bim_l, function(df){
gl <- read_plink_bed(snakemake@input[["bedf"]], subset = df$snp_id, N = N)
Xm <- gt2matrix(gl)
cS <- colSums(Xm, na.rm = TRUE)
cAF <- cS / (N * 2)
cM <- cS/(N-1)
bad_snps <- cAF==0 | cAF==1
indx <- which(is.na(Xm), arr.ind = TRUE)
Xm[indx] <- cM[indx[,2]]
sXm <- Xm[,!bad_snps]
sdf <- filter(df,!bad_snps)
if(!doshrink){
svdX <- svd(sXm, nu = 0, nv = ncol(sXm))
d <- (svdX$d^2) / (nrow(fam_df)-1)
q <- svdX$v
}else{
R <- ldshrink::ldshrink(sXm, sdf$map, isGeno = TRUE, na.rm=FALSE)
lvdR <- eigen(R)
d <- lvdR$values
q <- lvdR$vectors
}
ldmr_id <- as.character(unique(sdf$ldmr))
write_matrix_h5(q,snakemake@output[["h5f"]], paste0(ldmr_id, "/Q"))
write_vector_h5(d, snakemake@output[["h5f"]], paste0(ldmr_id, "/D"))
write_vector_h5(sdf$snp_id, snakemake@output[["h5f"]], paste0(ldmr_id, "/snp_id"))
write_vector_h5(sdf$rsid, snakemake@output[["h5f"]], paste0(ldmr_id, "/rsid"))
})
|
d7ef8cf11f91f9d0caea9c434dc7742c39fcbf9f
|
360df3c6d013b7a9423b65d1fac0172bbbcf73ca
|
/FDA_Pesticide_Glossary/4'-methoxy-2-propyl-.R
|
7832a45e632ac37e1abc8abff77215a1a57428cb
|
[
"MIT"
] |
permissive
|
andrewdefries/andrewdefries.github.io
|
026aad7bd35d29d60d9746039dd7a516ad6c215f
|
d84f2c21f06c40b7ec49512a4fb13b4246f92209
|
refs/heads/master
| 2016-09-06T01:44:48.290950
| 2015-05-01T17:19:42
| 2015-05-01T17:19:42
| 17,783,203
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 276
|
r
|
4'-methoxy-2-propyl-.R
|
library("knitr")
library("rgl")
#knit("4'-methoxy-2-propyl-.Rmd")
#markdownToHTML('4'-methoxy-2-propyl-.md', '4'-methoxy-2-propyl-.html', options=c("use_xhml"))
#system("pandoc -s 4'-methoxy-2-propyl-.html -o 4'-methoxy-2-propyl-.pdf")
knit2html('4'-methoxy-2-propyl-.Rmd')
|
a9a1d7b38b73fb4aa412ce0be15a8fce681b9aaa
|
750c72e3cceedbe65146442dcc6de16923af8fcc
|
/man/genertestPreSelect.Rd
|
e9154a1a2ecbd9cfcc86e8e6049442490c7a43ea
|
[] |
no_license
|
llaarraa/genertest
|
075fd472503d9d3e6a23a85e66d9091a90e51932
|
576b4035d50f3cd94e4413ee34dff1cead34a19c
|
refs/heads/master
| 2018-12-28T05:27:22.615172
| 2014-02-17T13:42:50
| 2014-02-17T13:42:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,206
|
rd
|
genertestPreSelect.Rd
|
\name{genertestPreSelect}
\alias{genertestPreSelect}
\title{Function that Generates Tests using a list of prespecified questions}
\description{
This function generates tests with questions drawn from a tab-delimited database. The questions to be included in each test are specified by the user. The tests can include permuted questions and answers, or be generated using Sweave code. The output is a LaTeX or PDF file for each of the tests.}
\usage{
genertestPreSelect(my.db.name, my.outdir, list.QID, num.tests=NULL,
repeat.each.test=1, my.seed=1999,
generate.solutions=FALSE, my.title="Exam", my.date="Today", my.prefix="exam", head.name="Name", head.id="ID number",
head.points="Number of points", head.prefix="MED", my.language="english",
use.Sweave=TRUE, compile.pdf=TRUE, merge.pdf=TRUE, my.final.sentence=NULL, files.to.move=NULL, names.files.to.move=NULL)
}
\arguments{
\item{my.db.name}{Name of the tab delimited file including the questions, either the full path should be given or the file should be placed in the R working directory, should be a string, written between quotes. Alternatively, an R data.frame can be passed as an argument. See details.}
\item{my.outdir}{Name of the directory where the exams will be written, should be a string, written between quotes; full path must be given. If not specified, a directory named Exams+DateTime (ExamsMonthDayYearHourMin using the date and time when the function is executed) will be created in the working directory and the files will be stored in that directory.}
\item{list.QID}{List with the questions ID (as they appear in the questions data base) to be used for each test. Each element of the list contains a vector with the IDs of the questions to include in each test.}
\item{num.tests}{Number of different tests to be generated, optional, as the number can be retrieved from the lenght of the list.QID argument.}
\item{repeat.each.test}{Number of times that each test needs to be permuted, to generate permuted version of the same test, if set to 1 there will be no permutation and just one test of each kind will be generated}
\item{my.seed}{Seed used to inizialize the random number generator, useful to get reproducibile results}
\item{generate.solutions}{Indicator (TRUE or FALSE) that indicates if the solutions of the tests must be generated; if set to TRUE it generated a LaTeX (or PDF) file with solutions corresponding to each exam}
\item{my.title}{Name of the exam - to be displayed in the header}
\item{my.date}{Date of the exam - to be displayed in the header}
\item{my.prefix}{String with which the names of the LaTeX files of the tests begins}
\item{head.name}{String indicating the name of the student - to be displayed in the header (the default is "Name" but it can be changed if a language different than English is used)}
\item{head.id}{String indicating the identification number of the student - to be displayed in the header (the default is "ID number" but it can be changed if a language different than English is used)}
\item{head.points}{String indicating how to indicate the number of points - to be displayed in the header (the default is "Number of points" but it can be changed if a language different than English is used)}
\item{head.prefix}{An alpha-numerical ID is generated for each test. The ID is generated pasting \code{ head.prefix}, a random number and a progressive number that indicates the test}
\item{my.language}{String indicating the language in which the test is written - it is used to call a LaTeX Babel package that contains the appropriate language settings (a call to the Babel package will be included in the header of the *.tex files)}
\item{use.Sweave}{Indicator of whether in the database there are some exercises written using Sweave code; default is TRUE, if set to TRUE *.rnw files will be generated and compiled to *.tex (or *.pdf) files. See details.}
\item{compile.pdf}{Indicator of whether the *.tex files should be compiled to *.pdf files (compiled if set to TRUE). See details.}
\item{merge.pdf}{Indicator of whether the *.pdf files should be merged in a single PDF file, valid only if compile.pdf=TRUE}
\item{my.final.sentence}{A string with a sentence that will be written in bold at the end of each exam. If set to NULL (default) no sentence will be displayed.}
\item{files.to.move}{Vector of strings indicating the (full path) name of the files that should be moved in the same directory as the exams, if not specified, all the files in dir.files.to.move are moved.}
\item{names.files.to.move}{Vector of strings indicating the name of the files.to.move files, as it should appear in the final version. This parameter is useful only if the files passed through the files.to.move argument should be renamed (as in the shinyApp). Ignore otherwise.}
}
\details{
The data set with questions must be prepared using a similar structure as the example data from this library. The inclusion of some variables is mandatory (Question, Question.ID) other variables can be included (Answer, Topic, Points, etc). See \code{\link{dbQuestions}} for more details.
\code{my.db.name} can be either the path to the tab delimted data base containing the questions or an R data.frame.
If use.Sweave=TRUE, a call to the Sweave package will be included in the header of the *.rnw and *.tex files. For this reason Sweave package must be available to LaTeX compiler. File Sweave.sty can be included in the directory my.outdir in case of problems related to package unavailability.
If compile.pdf=TRUE, the *.tex files will be compiled into *.pdf files using \code{texi2dvi} function.
%MikTeX must be installed locally and its executables (as pdflatex.exe) must be available from my.outdir directory. See MikTeX documentation.
If the user wishes to use a different program to compile the LaTeX files, set compile.pdf=FALSE and manually compile the *.tex files outside R.}
\value{
Writes LaTeX (od PDF) files containing the texts of the exams and returns a list containing the IDs of the questions selected for each test and the list of the files *.tex files that were generated.
\item{Questions}{Each element of the list contains a vector that can be used to identify the questions selected for an exam. The index for a question represents the row in which the question appears (or begins) in the database of questions. If permuted versions of the same test were produced (repeat.each.test>1) then only one record for each set of permuted tests is reported}
\item{files}{Names of the generated files, using full path (*.tex or *.pdf, depending on the selected options)}
\item{names.files}{Names the generated files (*.tex or *.pdf, depending on the selected options)}
\item{dir.files}{Path where the generated files are stored}
\item{merged.file}{Path to the files containing the merged PDFs, if produced.}
\item{errors}{A string with the errors encountered during the execution of the function, if any.)}
}
\references{\url{http://sites.google.com/site/lara3107/Home/software/genertest}, \url{https://github.com/llaarraa/genertest}}
\author{Lara Lusa}
\note{}
\examples{
#data(my.data)
my.title<-"Medical Faculty - Undergraduates - Exam of Biostatistics"
my.date<-"31.7.2013"
#my.outdir<-"C:\\Users\\lara\\Documents\\proveGenertest"
#Sweave.sty and pdflatex.exe must be available from this directory
#name and path of the database with the questions
my.file.name=paste(path.package("genertest"),"/extdata/questionsExamples.txt",sep="")
gPS.output<-genertestPreSelect(my.file.name, my.outdir=NULL, list.QID=list(c(1,2,3), c(2,3,4)), num.tests=2, repeat.each.test=3,
my.seed=2, generate.solutions=TRUE,
my.title=my.title, my.date=my.date, my.prefix="Exam", head.name="Name", head.id="ID Number",
head.points="Points", head.prefix="Exam", my.language="english",
use.Sweave=TRUE, compile.pdf=TRUE, my.final.sentence="Good luck!")
gPS.output
#merge the files in a single file
merge.out=Merge.pdf(gPS.output)
}
\seealso{\code{\link{write.all}}, \code{\link{dbQuestions}}}
\keyword{misc}
|
a68e5055978b477c7c885119c14f2719757cff0b
|
8ae4e43fd9f3468232ac2035a12ee9a20eb7f064
|
/Homework1.Rcheck/00_pkg_src/Homework1/R/dmvnorm.R
|
a1f02a5a5f27ae6cd8e5bbf17703f8af55851802
|
[] |
no_license
|
duyu8411/Biostat778_HW1
|
40d60343a624c51fa4120c6a3452f364cf3b4a96
|
ca182445349846a46832b7e91d3d215e90c6af2e
|
refs/heads/master
| 2020-12-14T09:58:21.868668
| 2013-11-13T18:29:45
| 2013-11-13T18:29:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 705
|
r
|
dmvnorm.R
|
##Fast Multivariate Normal Density
dmvnorm <- function(x, mu, S, log = TRUE) {
if (!is.matrix(x)){
x=t(as.matrix(x))
}
k=length(mu)
n=nrow(x)
##Check if S is positive definite
R=tryCatch({chol(S)},
error=function(e){
message("S is not positive definite")
})
logdetS=2*sum(log(diag(R)))
T=x-rep(1,n)%*%t(mu)
C=forwardsolve(t(R),t(T))
term3=diag(crossprod(C))
fx=(-k/2)*log(2*pi)-(1/2)*logdetS-(1/2)*term3
if(log==TRUE){
fx=fx
}else {
fx=exp(fx)
}
return(fx)
}
|
ae3889520f055d438f8455a76dcd68fe48100c9a
|
4aa72199184e3fbbcfa21e7e7d2a058fd56ce24a
|
/R_ARA/userdata_ARA.R
|
4d8d33114aed2c7a2956bc560ab4b273d0fae6cc
|
[] |
no_license
|
0525hhgus/GameR
|
71813f692ce42798a1d301155e78c63566532031
|
ae6f9b25916c7bdf1c28733a5bef05f1d3fec0bb
|
refs/heads/master
| 2023-02-07T01:15:46.671650
| 2020-12-28T06:39:09
| 2020-12-28T06:39:09
| 248,466,592
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 912
|
r
|
userdata_ARA.R
|
# userdata Association Rule Analysis
# userdata(ID, Gamename)
# set working directory
setwd("D:/Project/test")
# association rule analysis package
library(arules)
# data import-> make transaction data
udata1<-read.csv("merge_new_userdata_notime.csv")
head(udata1)
udata.list<-split(udata1$Game,udata1$ID)
udata.trans<-as(udata.list,"transactions")
# warning(s) : In asMethod(object) : removing duplicated items in transactions
udata.trans
# summary of userdata
summary(udata.trans)
#density : 0.016, 373*1104 cell -> 1.6%
# for running dvdtras data
# apriori(transaction, parameter=list(support=list(support=0.0#, confidence=0.##))
udata_rule<-apriori(udata.trans,parameter = list(support=0.2,confidence = 0.20,minlen = 2))
udata_rule
# 19 rules
inspect(udata_rule)
summary(udata_rule)
# Bar chart for support>0.2
itemFrequencyPlot(udata.trans,support=0.2,main="item for support>=0.2", col="blue")
|
d31f4e9abc1c9c15459a76f453d2618c7930495e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/tidypredict/examples/tidypredict_sql_interval.Rd.R
|
6e2121ff2adf0301404198721d060fa06589b75b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 330
|
r
|
tidypredict_sql_interval.Rd.R
|
library(tidypredict)
### Name: tidypredict_sql_interval
### Title: Returns a SQL query with formula to calculate predicted interval
### Aliases: tidypredict_sql_interval
### Keywords: internal
### ** Examples
library(dbplyr)
model <- lm(mpg ~ wt + am + cyl, data = mtcars)
tidypredict_sql_interval(model, simulate_dbi())
|
8c050daaa55749e33b0c1b34f206fc0b794966fe
|
b8cb7d600124f924712ce542c8aa681a8dac409e
|
/tests/testthat/test_map.R
|
4ee4a51d4dcd24ce37aa0f8db22a6428bd338322
|
[] |
no_license
|
itsaquestion/MyUtils
|
c963d37d4412d1e8ec8572cfde230b7a9ffdb5da
|
9f9b644d59986c9099a49527b920e219261c2c30
|
refs/heads/master
| 2021-06-21T02:44:25.781272
| 2019-08-25T03:13:09
| 2019-08-25T03:13:09
| 106,079,702
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 273
|
r
|
test_map.R
|
library(testthat)
context("row_col_map")
library(xts)
a_df = data.frame(a = 1:10, b = 11:20)
a_xts = as.xts(a_df, order.by = seq(Sys.Date() - 9, Sys.Date(), by = 1))
a_xts
expect_is(colMap(a_df, max), "data.frame")
expect_is(colMap(a_xts, max), "matrix")
|
bc77f136f159451d8ef7be4c71f7cd957f7fe42a
|
129408919e4fcde9818bef047f6e9b2a74d23c8a
|
/tests/testthat/setup-run-model.R
|
8cb1089e1dc6c715a19cc374fcf8684ebb8471f7
|
[
"MIT"
] |
permissive
|
mrc-ide/naomi
|
93decfb73624de911f298aadcc0e0d02b8d7d5e5
|
94d34246144e4dfcb86161258faf213a7db03268
|
refs/heads/master
| 2023-06-14T06:37:36.343882
| 2023-05-05T11:08:33
| 2023-05-05T11:08:33
| 204,965,083
| 7
| 6
|
NOASSERTION
| 2023-09-12T12:54:48
| 2019-08-28T15:32:00
|
R
|
UTF-8
|
R
| false
| false
| 2,871
|
r
|
setup-run-model.R
|
## A single set of valid model options and data, update once instead of copying
## for every test.
a_hintr_data <- list(
pjnz = system_file("extdata/demo-subnational-pjnz/demo_mwi2019_region-pjnz.zip"),
population = system_file("extdata/demo-subnational-pjnz/demo_population_zone.csv"),
shape = system_file("extdata/demo-subnational-pjnz/demo_areas_region-pjnz.geojson"),
survey = system_file("extdata/demo_survey_hiv_indicators.csv"),
art_number = system_file("extdata/demo-subnational-pjnz/demo_art_number_zone.csv"),
anc_testing = system_file("extdata/demo-subnational-pjnz/demo_anc_testing_zone.csv")
)
a_hintr_options <- list(
area_scope = "MWI",
area_level = "2",
calendar_quarter_t1 = "CY2016Q1",
calendar_quarter_t2 = "CY2018Q4",
calendar_quarter_t3 = "CY2019Q2",
calendar_quarter_t4 = "CY2022Q3",
calendar_quarter_t5 = "CY2023Q3",
survey_prevalence = c("DEMO2016PHIA", "DEMO2015DHS"),
survey_art_coverage = "DEMO2016PHIA",
survey_recently_infected = "DEMO2016PHIA",
include_art_t1 = "true",
include_art_t2 = "true",
anc_clients_year2 = 2018,
anc_clients_year2_num_months = "9",
anc_prevalence_year1 = 2016,
anc_prevalence_year2 = 2018,
anc_art_coverage_year1 = 2016,
anc_art_coverage_year2 = 2018,
spectrum_population_calibration = "national",
artattend = "true",
artattend_t2 = "false",
artattend_log_gamma_offset = -4L,
anchor_home_district = TRUE,
output_aware_plhiv = "true",
rng_seed = 17,
no_of_samples = 20,
max_iter = 250,
use_kish_prev = "true",
deff_prev = 1.0,
use_kish_artcov = "true",
deff_artcov = 1.0,
use_kish_recent = "true",
deff_recent = 1.0,
use_survey_aggregate = "false",
psnu_level = NULL
)
a_hintr_output <- hintr_run_model(a_hintr_data, a_hintr_options)
a_hintr_options_bad <- a_hintr_options
a_hintr_options_bad$calendar_quarter_t2 <- NULL
a_hintr_calibration_options <- list(
spectrum_plhiv_calibration_level = "subnational",
spectrum_plhiv_calibration_strat = "sex_age_group",
spectrum_artnum_calibration_level = "subnational",
spectrum_artnum_calibration_strat = "sex_age_coarse",
spectrum_aware_calibration_level = "national",
spectrum_aware_calibration_strat = "age_coarse",
spectrum_infections_calibration_level = "none",
spectrum_infections_calibration_strat = "age_coarse",
calibrate_method = "logistic"
)
a_hintr_output_calibrated <- hintr_calibrate(a_hintr_output,
a_hintr_calibration_options)
## Use fit.RDS if it exists locally, otherwise just use the actual functions
## fit.RDS not on git because it is pretty massive ~ 220MB
if (file.exists("testdata/fit.RDS")) {
model_output <- readRDS("testdata/fit.RDS")
fit <- mockery::mock(model_output, cycle = TRUE)
sample <- mockery::mock(model_output, cycle = TRUE)
} else {
fit <- fit_tmb
sample <- sample_tmb
}
|
2510ed3e3e4c0df9372a6d6175b61f774c421764
|
dca44395dbf60e1743c65bced7b26838bd676781
|
/HGU/BioData/Data/07_1.Union_with_vennDiagram.R
|
ec604d69a4e985a2683b5dea7da3be0caacd3339
|
[] |
no_license
|
ksmpooh/SungminCode
|
1b550c375125ea7869917de337aa093160aa03eb
|
33b266b80389664282a2d4d6eb9c2db593442a5f
|
refs/heads/master
| 2023-08-03T16:22:35.085299
| 2023-07-31T16:12:33
| 2023-07-31T16:12:33
| 106,177,934
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,331
|
r
|
07_1.Union_with_vennDiagram.R
|
##Union count
# unique(c(A,B))
## ensemble model data.
# 1. CV
# 2. Mean
# 3. Var
# 4. annotation 308
# 5. annotation 2267
#library(VennDiagram)
library(limma)
library(gplots)
library(dplyr)
##옛날 data
CV <- read.csv("D:/biodatalab/2018-1/TCGA_with_GEO/union/names_GEO_input_ensemble_CV_3000.csv",header = T, sep = ",")
Mean <- read.csv("D:/biodatalab/2018-1/TCGA_with_GEO/union/names_GEO_input_ensemble_Mean_3000.csv",header = T, sep = ",")
Var <- read.csv("D:/biodatalab/2018-1/TCGA_with_GEO/union/names_GEO_input_ensemble_VAR_3000.csv",header = T, sep = ",")
Annotated_308<-read.csv("D:/biodatalab/2018-1/TCGA_with_GEO/union/names_GEO_input_ensemble_foundation_308.csv",header = T,sep = ",")
Annotated_2267<-read.csv("D:/biodatalab/2018-1/TCGA_with_GEO/union/names_GEO_input_ensemble_foundation_2267.csv",header = T, sep = ',')
##최근 data %%확인 필요!
CV <- read.csv("D:/biodatalab/2018-1/TCGA_with_GEO/union/CV_3000.csv")
Mean <- read.csv("D:/biodatalab/2018-1/TCGA_with_GEO/union/Mean_3000.csv")
Var <- read.csv("D:/biodatalab/2018-1/TCGA_with_GEO/union/VAR_3000.csv")
Annotated_2267<- read.csv("D:/biodatalab/2018-1/TCGA_with_GEO/union/foundation_2267.csv")
Annotated_308 <- read.csv("D:/biodatalab/2018-1/TCGA_with_GEO/union/foundation_308.csv")
#CV_union <- subset(CV,select = -c(index,result,cancer_code,patient))
#Mean_union <- subset(Mean,select = -c(index,result,cancer_code,patient))
#Var_union <- subset(Var,select = -c(index,result,cancer_code,patient))
#Annotated_308union <- subset(Annotated_308,select = -c(index,result,cancer_code,patient))
#Annotated_308colnames<-colnames(Annotated_308union)
#Annotated_2267union <- subset(Annotated_2267,select = -c(index,result,cancer_code,patient))
#Annotated_2267colnames<-colnames(Annotated_2267union)
#lists <- c(500,1000,1500,2000,2500,3000,3500,4000)
#colnames(CV) <- "CV"
#colnames(Mean) <- "Mean"
#colnames(Var) <- "Var"
#colnames(Annotated_308) <- "Annotated_308"
#colnames(Annotated_2267) <- "Annotated_2267"
result <-union(CV$x,Mean$x)
result <-union(result,Var$x)
result <-union(result,Annotated_2267$x)
result <-union(result,Annotated_308$x)
all_gene <- Reduce(rbind,result)
all_gene <- as.data.frame(all_gene)
all_gene <- t(all_gene)
all_gene <-as.data.frame(all_gene)
colnames(all_gene)<-result
all_gene$index <- "all"
CV_ <-t(CV)
CV_ <-as.data.frame(CV_)
colnames(CV_)<- CV$x
CV_$index <- "CV"
Mean_ <-t(Mean)
Mean_ <-as.data.frame(Mean_)
colnames(Mean_) <- Mean$x
Mean_$index <-"Mean"
Annotated_2267_ <-t(Annotated_2267)
Annotated_2267_ <-as.data.frame(Annotated_2267_)
colnames(Annotated_2267_) <- Annotated_2267$x
Annotated_2267_$index <-"Annotated_2267"
Annotated_308_ <-t(Annotated_308)
Annotated_308_ <-as.data.frame(Annotated_308_)
colnames(Annotated_308_) <- Annotated_308$x
Annotated_308_$index <-"Annotated_308"
Var_ <-t(Var)
Var_ <-as.data.frame(Var_)
colnames(Var_) <- Var$x
Var_$index <-"Var"
df <-data.frame()
df <- bind_rows(all_gene,CV_)
df <- bind_rows(df,Mean_)
df <- bind_rows(df,Var_)
df <- bind_rows(df,Annotated_308_)
df <- bind_rows(df,Annotated_2267_)
rownames(df)<-df$index
df<-subset(df,select = -index)
df_ <- t(df)
df_ <-as.data.frame(df_)
#colnames(df_)
#rownames(df_)
df_ <-subset(df_,select = -all)
id <- (df_!="")
id[is.na(id)]<-FALSE
id.df<-as.data.frame(id)
vennCounts(id.df)
venn(id.df)
|
f4f1b6ff92ccbe2354db5f8d16db2e8972288c25
|
9fda538efa5f6cd2a5cecc2df29600871b537d6a
|
/netsim/testset/prova_sn_actfun.R
|
e75be3fd84afe8d0596a4345299274e869078baf
|
[] |
no_license
|
mfalda/gen-net-sim
|
f4b84e8215e4ffcf117099e2d096ee4e61b50915
|
3018f43d4e8dfbf4e37593af8d7d22171061f3ea
|
refs/heads/master
| 2021-03-12T20:00:20.007538
| 2015-08-25T07:34:52
| 2015-08-25T07:34:52
| 41,350,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 86
|
r
|
prova_sn_actfun.R
|
library("netsim")
ris <- simulatenet(N=5, act.fun="linear", times=seq(1,5))
cat("ok")
|
199a9093bfc6b997a15d3be9504bd07f51aff9b7
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Tentrup/mult-matrix/mult_bool_matrix_12_12_11.unsat/mult_bool_matrix_12_12_11.unsat.R
|
520d311d86bdd080b536b86bdf3bb3f4770e0d31
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 84
|
r
|
mult_bool_matrix_12_12_11.unsat.R
|
ec0c63fecb1bdc3e6ce6590c60df747b mult_bool_matrix_12_12_11.unsat.qdimacs 23900 70494
|
6c57e127745fa7fe3fefe7feaa2362f667d6c36a
|
966634f142f00afef1259057bde4a2895224a085
|
/man/bisect.Rd
|
4f07ea1642a01a708206e4ad774e37a1d3c80675
|
[] |
no_license
|
pra1981/setsim
|
1c6b14d4535820521a128768bb375c1ae502c486
|
068363c626c051a82c7cbd69b1a9c31b85ff85d3
|
refs/heads/master
| 2020-04-03T06:36:10.289957
| 2014-03-27T14:53:04
| 2014-03-27T14:53:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,501
|
rd
|
bisect.Rd
|
\name{bisect}
\alias{bisect}
\title{Bisection Method}
\description{
This function finds the approximate roots of a numeric function by bisection algorithm. The root-finding method is one-dimensional.
}
\usage{
bisect(fn, a, b, tol = 1e-05, error = 1e-04, decimal = 5, input, VZ, c_value)
}
\arguments{
\item{fn}{a numeric function defined by the user. An object which is not a numeric function will not be allowed.}
\item{a}{numeric value of the lower end point of the interval to be searched.}
\item{b}{numeric value of the upper end point of the interval to be searched.}
\item{tol}{tolerance to stop bisection algorithm. The default value is 0.00001.}
\item{error}{significance of the approximate root. The default value is 0.0001.}
\item{decimal}{the number of decimal places to round off for the approximate root.}
\item{input}{a list of response variable, design(X) matrix, maximum likelihood estimators for the parameters and maximized log likelihood value. This argument must be ignored if confidence region visualization algorithm is not used.}
\item{VZ}{a matrix product by multiplying the matrix square root of a covariance matrix for the maximum likelihood estimator and random vector generated from a standard multivariate normal distribution. This argument must be ignored if confidence region visualization algorithm is not used.}
\item{c_value}{the critical values to visualize confidence regions. This argument must be ignored if confidence region visualization algorithm is not used.}
}
\value{the approximate root by bisection algorithm. The error of approximation, number of decimal places and other requirements can be adjusted by user to produce a different root.}
\details{
The function requires at least one root to process. Check \code{\link{nroots}} if a roots exists.
}
\seealso{
\code{\link{nroots}} for finding the lower and upper end point of the interval containing a root.}
\section{Warning}{If this function is not used for confidence region visualization algorithm, the numeric function to be calculated must include \{\code{\ldots}\} in its argument. This is due to the last three arguments of this function.}
\examples{
y <- function(x,...){x^2-4}
bisect(y,a=-5,b=0,error=0.1,decimal=5)
bisect(y,a=-5,b=0,error=0.0001,decimal=5)
f <- function(x,...){sin(x)}
x <- seq(-50,50,length=100)
roots <- nroots(f,x)
n <- roots$n
range <- roots$range
sol <- 0
for (i in 1:n){
sol[i] <- bisect(f,a=range[i,1],b=range[i,2])}
sol
}
\keyword{bisect}
|
f4ea7ec660e155bdf933aa1a7b01aae320e1da03
|
4d6cb9288727a510475fc1e9ebcf247653486580
|
/2020/day05.R
|
a3876c7db8c0c5b3a1ac40127da8a942ecd237c9
|
[] |
no_license
|
rrrlw/advent-of-code
|
b9ac82442d7c6164ca49c4fb3107fa323810680a
|
66c26a723717bfd7d95e2cb4e690735ec0f66005
|
refs/heads/main
| 2021-12-28T02:36:51.593985
| 2021-12-15T19:40:24
| 2021-12-15T19:40:24
| 226,371,569
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 839
|
r
|
day05.R
|
library(magrittr)
#####INPUT#####
fin <- file("day5.in", "r")
seats <- readLines(fin)
close(fin)
#####UTILITY FUNCTIONS#####
seat_row <- function(seat) {
seat %>%
substr(start = 1, stop = 7) %>%
gsub(pattern = "F", replacement = "0", fixed = TRUE) %>%
gsub(pattern = "B", replacement = "1", fixed = TRUE) %>%
strtoi(base = 2)
}
seat_col <- function(seat) {
seat %>%
substr(start = 8, stop = 10) %>%
gsub(pattern = "L", replacement = "0", fixed = TRUE) %>%
gsub(pattern = "R", replacement = "1", fixed = TRUE) %>%
strtoi(base = 2)
}
seat_id <- function(seat) {
seat_row(seat) * 8 + seat_col(seat)
}
#####PART 1#####
taken <- seat_id(seats)
taken %>%
max() %>%
print()
#####PART 2#####
all_seats <- seq(from = min(taken), to = max(taken), by = 1)
all_seats[!(all_seats %in% taken)] %>%
print()
|
9e0465aa1f02bd32fec53887e417b168d9489fc3
|
541fff10228b07c9b54437c7e5f8e674eb7a3211
|
/man/show-commaNMFOffset-method.Rd
|
9068347b17226500089d078f2628f6bf5e148208
|
[] |
no_license
|
cran/NMF
|
8d125625278b93f52bdcc646e382a969d6ea717d
|
14e3f913b2de158149bf1fc869952cf6bb3d0561
|
refs/heads/master
| 2023-06-28T00:46:53.707046
| 2023-03-20T14:30:02
| 2023-03-20T14:30:02
| 17,692,093
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 313
|
rd
|
show-commaNMFOffset-method.Rd
|
\docType{methods}
\name{show,NMFOffset-method}
\alias{show,NMFOffset-method}
\title{Show method for objects of class \code{NMFOffset}}
\usage{
\S4method{show}{NMFOffset}(object)
}
\arguments{
\item{object}{Any R object}
}
\description{
Show method for objects of class \code{NMFOffset}
}
\keyword{methods}
|
d050e7dda6cc7da900fc9a8ca9912abd96744b5b
|
7ba42ea09417547219343e5532a1f7954bdf10b2
|
/R/symmetric-delete.R
|
01c993d8b3f60345975983e66dddabbf2283a31f
|
[
"Apache-2.0"
] |
permissive
|
r-spark/sparknlp
|
622822b53e2b5eb43508852e39a911a43efa443f
|
4c2ad871cc7fec46f8574f9361c78b4bed39c924
|
refs/heads/master
| 2023-03-16T05:35:41.244593
| 2022-10-06T13:42:00
| 2022-10-06T13:42:00
| 212,847,046
| 32
| 7
|
NOASSERTION
| 2023-03-13T19:33:03
| 2019-10-04T15:27:28
|
R
|
UTF-8
|
R
| false
| false
| 9,038
|
r
|
symmetric-delete.R
|
#' Spark NLP SymmetricDeleteApproach
#'
#' Spark ML estimator that is a spell checker inspired on Symmetric Delete algorithm. It retrieves tokens and utilizes
#' distance metrics to compute possible derived words.
#' See \url{https://nlp.johnsnowlabs.com/docs/en/annotators#symmetric-spellchecker}
#'
#' @template roxlate-nlp-algo
#' @template roxlate-inputs-output-params
#' @param dictionary_path path to dictionary of properly written words
#' @param dictionary_token_pattern token pattern used in dictionary of properly written words
#' @param dictionary_read_as LINE_BY_LINE or SPARK_DATASET
#' @param dictionary_options options to pass to the Spark reader
#' @param max_edit_distance Maximum edit distance to calculate possible derived words. Defaults to 3.
#' @param dups_limit maximum duplicate of characters in a word to consider.
#' @param deletes_threshold minimum frequency of corrections a word needs to have to be considered from training.
#' @param frequency_threshold minimum frequency of words to be considered from training.
#' @param longest_word_length ength of longest word in corpus
#' @param max_frequency maximum frequency of a word in the corpus
#' @param min_frequency minimum frequency of a word in the corpus
#'
#' @export
nlp_symmetric_delete <- function(x, input_cols, output_col,
dictionary_path = NULL, dictionary_token_pattern = "\\S+", dictionary_read_as = "LINE_BY_LINE",
dictionary_options = list("format" = "text"), max_edit_distance = NULL, dups_limit = NULL,
deletes_threshold = NULL, frequency_threshold = NULL, longest_word_length = NULL,
max_frequency = NULL, min_frequency = NULL,
uid = random_string("symmetric_delete_")) {
UseMethod("nlp_symmetric_delete")
}
#' @export
nlp_symmetric_delete.spark_connection <- function(x, input_cols, output_col,
dictionary_path = NULL, dictionary_token_pattern = "\\S+", dictionary_read_as = "LINE_BY_LINE",
dictionary_options = list("format" = "text"), max_edit_distance = NULL, dups_limit = NULL,
deletes_threshold = NULL, frequency_threshold = NULL, longest_word_length = NULL,
max_frequency = NULL, min_frequency = NULL,
uid = random_string("symmetric_delete_")){
args <- list(
input_cols = input_cols,
output_col = output_col,
dictionary_path = dictionary_path,
dictionary_token_pattern = dictionary_token_pattern,
dictionary_read_as = dictionary_read_as,
dictionary_options = dictionary_options,
max_edit_distance = max_edit_distance,
dups_limit = dups_limit,
deletes_threshold = deletes_threshold,
frequency_threshold = frequency_threshold,
longest_word_length = longest_word_length,
max_frequency = max_frequency,
min_frequency = min_frequency,
uid = uid
) %>%
validator_nlp_symmetric_delete()
if (!is.null(args[["dictionary_options"]])) {
args[["dictionary_options"]] = list2env(args[["dictionary_options"]])
}
jobj <- sparklyr::spark_pipeline_stage(
x, "com.johnsnowlabs.nlp.annotators.spell.symmetric.SymmetricDeleteApproach",
input_cols = args[["input_cols"]],
output_col = args[["output_col"]],
uid = args[["uid"]]
) %>%
sparklyr::jobj_set_param("setMaxEditDistance", args[["max_edit_distance"]]) %>%
sparklyr::jobj_set_param("setDupsLimit", args[["dups_limit"]]) %>%
sparklyr::jobj_set_param("setDeletesThreshold", args[["deletes_threshold"]]) %>%
sparklyr::jobj_set_param("setFrequencyThreshold", args[["frequency_threshold"]]) %>%
sparklyr::jobj_set_param("setLongestWordLength", args[["longest_word_length"]]) %>%
sparklyr::jobj_set_param("setMaxFrequency", args[["max_frequency"]]) %>%
sparklyr::jobj_set_param("setMinFrequency", args[["min_frequency"]])
if (!is.null(args[["dictionary_path"]])) {
sparklyr::invoke(jobj, "setDictionary", args[["dictionary_path"]], args[["dictionary_token_pattern"]],
read_as(x, args[["dictionary_read_as"]]), args[["dictionary_options"]])
}
new_nlp_symmetric_delete(jobj)
}
#' @export
nlp_symmetric_delete.ml_pipeline <- function(x, input_cols, output_col,
dictionary_path = NULL, dictionary_token_pattern = "\\S+", dictionary_read_as = "LINE_BY_LINE",
dictionary_options = list("format" = "text"), max_edit_distance = NULL, dups_limit = NULL,
deletes_threshold = NULL, frequency_threshold = NULL, longest_word_length = NULL,
max_frequency = NULL, min_frequency = NULL,
uid = random_string("symmetric_delete_")) {
stage <- nlp_symmetric_delete.spark_connection(
x = sparklyr::spark_connection(x),
input_cols = input_cols,
output_col = output_col,
dictionary_path = dictionary_path,
dictionary_token_pattern = dictionary_token_pattern,
dictionary_read_as = dictionary_read_as,
dictionary_options = dictionary_options,
max_edit_distance = max_edit_distance,
dups_limit = dups_limit,
deletes_threshold = deletes_threshold,
frequency_threshold = frequency_threshold,
longest_word_length = longest_word_length,
max_frequency = max_frequency,
min_frequency = min_frequency,
uid = uid
)
sparklyr::ml_add_stage(x, stage)
}
#' @export
nlp_symmetric_delete.tbl_spark <- function(x, input_cols, output_col,
dictionary_path = NULL, dictionary_token_pattern = "\\S+", dictionary_read_as = "LINE_BY_LINE",
dictionary_options = list("format" = "text"), max_edit_distance = NULL, dups_limit = NULL,
deletes_threshold = NULL, frequency_threshold = NULL, longest_word_length = NULL,
max_frequency = NULL, min_frequency = NULL,
uid = random_string("symmetric_delete_")) {
stage <- nlp_symmetric_delete.spark_connection(
x = sparklyr::spark_connection(x),
input_cols = input_cols,
output_col = output_col,
dictionary_path = dictionary_path,
dictionary_token_pattern = dictionary_token_pattern,
dictionary_read_as = dictionary_read_as,
dictionary_options = dictionary_options,
max_edit_distance = max_edit_distance,
dups_limit = dups_limit,
deletes_threshold = deletes_threshold,
frequency_threshold = frequency_threshold,
longest_word_length = longest_word_length,
max_frequency = max_frequency,
min_frequency = min_frequency,
uid = uid
)
stage %>% sparklyr::ml_fit_and_transform(x)
}
#' Load a pretrained Spark NLP model
#'
#' Create a pretrained Spark NLP \code{SymmetricDeleteModel} model
#'
#' @template roxlate-pretrained-params
#' @template roxlate-inputs-output-params
#' @export
nlp_symmetric_delete_pretrained <- function(sc, input_cols, output_col,
name = NULL, lang = NULL, remote_loc = NULL) {
args <- list(
input_cols = input_cols,
output_col = output_col
) %>%
validator_nlp_symmetric_delete()
model_class <- "com.johnsnowlabs.nlp.annotators.spell.symmetric.SymmetricDeleteModel"
model <- pretrained_model(sc, model_class, name, lang, remote_loc)
spark_jobj(model) %>%
sparklyr::jobj_set_param("setInputCols", args[["input_cols"]]) %>%
sparklyr::jobj_set_param("setOutputCol", args[["output_col"]])
new_nlp_symmetric_delete_model(model)
}
#' @import forge
validator_nlp_symmetric_delete <- function(args) {
args[["input_cols"]] <- cast_string_list(args[["input_cols"]])
args[["output_col"]] <- cast_string(args[["output_col"]])
args[["dictionary_path"]] <- cast_nullable_string(args[["dictionary_path"]])
args[["dictionary_token_pattern"]] <- cast_nullable_string(args[["dictionary_token_pattern"]])
args[["max_edit_distance"]] <- cast_nullable_integer(args[["max_edit_distance"]])
args[["dups_limit"]] <- cast_nullable_integer(args[["dups_limit"]])
args[["deletes_threshold"]] <- cast_nullable_integer(args[["deletes_threshold"]])
args[["frequency_threshold"]] <- cast_nullable_integer(args[["frequency_threshold"]])
args[["longest_word_length"]] <- cast_nullable_integer(args[["longest_word_length"]])
args[["max_frequency"]] <- cast_nullable_integer(args[["max_frequency"]])
args[["min_frequency"]] <- cast_nullable_integer(args[["min_frequency"]])
args
}
new_nlp_symmetric_delete <- function(jobj) {
sparklyr::new_ml_estimator(jobj, class = "nlp_symmetric_delete")
}
new_nlp_symmetric_delete_model <- function(jobj) {
sparklyr::new_ml_transformer(jobj, class = "nlp_symmetric_delete_model")
}
|
d33541c7e5d59f0f58675d4511decb337bc076e8
|
b539cbf73742731f3363ba67e256632d8f4bf929
|
/models/churn/rf_jay.r
|
87a0be5ce4973d6ccefa29d4513609738939a1bb
|
[] |
no_license
|
emmanuelq2/454-kdd2009
|
b600042674d28021ac5c87b1499f5068efd3156a
|
5884645e6bfa77545f2b196760fc42e31a50bf07
|
refs/heads/master
| 2021-01-15T22:38:20.173538
| 2015-08-31T03:58:15
| 2015-08-31T03:58:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,458
|
r
|
rf_jay.r
|
library(randomForest)
library(dplyr)
library(ROCR)
dirs <- c('c:/Users/jay/Dropbox/pred_454_team',
'c:/Users/uduak/Dropbox/pred_454_team',
'C:/Users/Sandra/Dropbox/pred_454_team',
'~/Manjari/Northwestern/R/Workspace/Predict454/KDDCup2009/Dropbox',
'C:/Users/JoeD/Dropbox/pred_454_team'
)
for (d in dirs){
if(dir.exists(d)){
setwd(d)
}
}
# choose a script to load and transform the data
source('data_transformations/impute_0.r')
# the data needs to be in matrix form, so I'm using make_mat()
# from kdd_tools.r
source('kdd_tools.r')
# over sample the possitive instances of churn
train <- select(train, -upsell, -appetency)
set.seed(2651)
churn_rf_jay <- randomForest(factor(churn) ~ ., data = train,
nodesize = 4, ntree = 1000,
strata = factor(train$churn),
sampsize = c(2500, 2500)
)
churn_rf_jay_predictions <- predict(churn_rf_jay, test,
type = 'prob')[,2]
churn_ens_rf_jay_predictions <- predict(churn_rf_jay, ensemble_test,
type = 'prob')[,2]
pred <- prediction(churn_rf_jay_predictions, test$churn)
perf <- performance(pred,'auc')
perf@y.values
save(list = c('churn_rf_jay_predictions', 'churn_rf_jay',
'churn_ens_rf_jay_predictions'),
file = 'models/churn/rf_jay.RData')
|
6f40e538c5227837129f124194ba6064580ad80b
|
1b1241229f6386662bef24be67ca94e0ac8f7ca5
|
/R/br_contribReadin2006.R
|
2974ff4145f8930a73074b809ee2f6a3f98c8de1
|
[] |
no_license
|
rafaeldjacome/CongressoAberto
|
c928815bef71008ffadefc7d2ea1d07fd75129a1
|
a4785785cb37e8095893dc411f0a030a57fd30f8
|
refs/heads/master
| 2020-03-18T00:33:50.482718
| 2010-04-14T14:46:51
| 2010-04-14T14:46:51
| null | 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 15,689
|
r
|
br_contribReadin2006.R
|
#### To do:
#### Export output table to mySQL
#### Incorporate Leoni's techinque for a faster routine
####
#### Jul 16: Fixed issue with empty string names, which caused wrong CPF assignment to these cases
#### and consequent misclassification of "OTHERS", and a few other errors.
####
#### Reads campaign contributions data obtained by Leoni
#### Uses only a subset of "Deputados Federais", but could run for all
#### 1-Cleans donor and candidate names
#### 2-Classifies type of donor based on CPF/CGC in PJ,PF and (based on donationtype2) PP or Other
#### Other is basically "rendimentos de aplicacoes financeiras".
#### Note that this misses very few diretorios that have a CNPJ and are classified as PJ by the
### TSE. This correction is done at the end of the routine
#### At this point, we DO NOT store information regarding which level of the party donated funds
#### 3-Finds invalid CPF/CGC that can be internally corrected
#### invalid CPF/CGCs and "other" types of contributions are coded NAs in the cpfcgc column
#### As many of the invalid CPF/CGCs are from "campaigns" that donate funds, this step
### includes:
### 3.1 - A very lengthy routine that parses the donor-cmapaing data and looks up its cnpj
#### in the full contrib database.
#### 3.2 - A less lengthy routine that parses donor-committee data and looks up its cnpj
#### in the contribcommittee database
#### 4-Corrects donor type classification after matching, and does appropriate identification
#### of party donors, converting some PJs to PPs.
#### 5-Assembles list of unique donors
#### 6-Replaces all variations of names with the most comon name for each CPF
#### Note that when there are 2 variations, the algorithms selects the "first" variation which
#### might be the misspelled one. There is no real consequence but it might be worth
#### correcting this in the future (from the CPF/CGC online database?)
####
#### Inputs:
#### contrib2006.Rdta: just the raw data for all offices, read into R format
#### Outputs:
#### br_donorsunique2006fd.Rdta: unique donors with CPF/CGC for the 2006 federal deputy campaigns
#### br_donorsvariations2006fd.Rdta: list with the same lenght as the above, with all the name
#### variations for each donor
#### br_contrib2006fd.csv:: donation data with corrected names and cpf/cgcs for fed deptuty 2006
#### IMPORTANT!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#### to READ THIS FILE: read.csv("br_contrib2006fd.csv",as.is=TRUE)
#### as.is is important because some cpfs and cgcs start with zeros!
####
################################################################################################
library(reshape)
rm(list=ls(all=TRUE))
tmyear <- proc.time()
## paths
rf <- function() {
if (.Platform$OS.type!="unix") {
"C:/reps/CongressoAberto/data/CampaignContributions" #added /data/CampaginContributions....
} else {
"~/reps/CongressoAberto/CampaignContributions"
}
}
run.from <- rf()
setwd(run.from)
load("contrib2006.Rdta")
d <- contrib[contrib$office=="Deputado Federal",-14]
rm(contrib)
d$year <- 2006
clean.text<-function(x){
y<-toupper(x)
y<-gsub("Â","A", y)
y<-gsub("Á","A", y)
y<-gsub("Ã","A", y)
y<-gsub("É","E", y)
y<-gsub("Ê","E", y)
y<-gsub("Í","I", y)
y<-gsub("Ó","O", y)
y<-gsub("Ô","O", y)
y<-gsub("Õ","O", y)
y<-gsub("Ú","U", y)
y<-gsub("Ü","U", y)
y<-gsub("Ç","C", y)
y<-gsub("*","", y, fixed=TRUE)
y<-gsub("'"," ", y)
y<-gsub("."," ", y, fixed=TRUE)
y<-gsub("-"," ", y, fixed=TRUE)
y<-gsub("/","", y, fixed=TRUE)
y<-gsub(" "," ", y)
return(y)
}
d$name <- clean.text(d$name)
d$donorname <- clean.text(d$donorname) #clean donornames
d$donorname <- ifelse(d$donorname=="",NA,d$donorname) #convert "" to NA
d$donortype <- ifelse(is.element(d$cpfcgc,c("00000000000","00000000000000")),NA, #what out for invalids with 11 and 14 chars.
ifelse(nchar(d$cpfcgc)==11,"PF",
ifelse(d$donationtype2=="RECURSOS DE PARTIDO POLÍTICO","PP",
ifelse(d$donationtype2=="RECURSOS DE OUTROS CANDIDATOS/COMITÊ","PP",
ifelse(nchar(d$cpfcgc)==14,"PJ",
ifelse(d$donationtype2=="Rendimentos de aplicações financeiras","Other",NA))))))
d$cpfcgc<- ifelse(d$cpfcgc=="",NA,#Use NA for invalid or no CPF-CGC
ifelse(is.element(d$cpfcgc,c("00000000000","00000000000000")),NA,
ifelse(nchar(d$cpfcgc==11)|nchar(d$cpfcgc==14),as.character(d$cpfcgc),NA)))
###### TRY TO CORRECT INVALID CPF/CGCS ##############################################################################
#INTERNAL CHECK: Check for cases with "invalid" CPF/CGCs that also appear with valid CPF/CGCs: #####################
#This is done before name replacement to make use of different spellings!!!!
unique.invalid <- na.omit(unique(d[is.na(d$cpfcgc),"donorname"])) #invalid cpfcgc
cases.invalid <- nrow(d[is.na(d$cpfcgc),]) #just for on screen reporting
cat("Found",length(unique.invalid),"names/",sum(is.na(d$cpfcgc)),"cases, with invalid or missing CPF/CGC\n")
unique.valid <- na.omit(unique(d[is.na(d$cpfcgc)==FALSE,"donorname"])) #valid cpfcgc
unique.invalid.matched <- unique.invalid[is.element(unique.invalid,unique.valid)] #which are matched
for(i in unique.invalid.matched){
d$cpfcgc[which(is.na(d$cpfcgc)&d$donorname==i)] <- #replace missing CPFCGCs for matched name
names(sort(table(d$cpfcgc[d$donorname==i]),decreasing=TRUE)[1]) #with most comon CPFCGC for that name
}
unique.invalid <- unique(d[is.na(d$cpfcgc),"donorname"]) #invalid cpfcgc after corrections
cat("\tINTERNAL CORRECTION: fixed",length(unique.invalid.matched),"names/",cases.invalid-nrow(d[is.na(d$cpfcgc),]),"cases with invalid CPF/CGC\n")
cases.invalid <- nrow(d[is.na(d$cpfcgc),]) #for reporting on screen
cat("\tTime elapsed:",((proc.time()-tmyear)[3])/60,"mins","\n")
#EXTERNAL CHECK: look the CNPJ of campaigns and committees in other databases #######################################
unique.invalid.data <- unique(d[which(is.na(d$cpfcgc)& #unique candidates or committee contributors missing cnpj
d$donationtype2=="RECURSOS DE OUTROS CANDIDATOS/COMITÊS"),"donorname"] )
# CAMPAIGNS: Here we look up the CNPJ of campaigns that donated to other campaings
#It is necessary to parse information form the donorname field to search for candidates in original contrib file
unique.invalid.cand <- unique.invalid.data[-grep("COMITE",
unique.invalid.data)]#get rid of commitees, keep only campaigns
unique.invalid.cand <- data.frame(orig=as.character(unique.invalid.cand), #parse candidate campaign info
name=as.character(gsub("^(.*)\\s(\\d{2,5})\\s(\\w{2})$","\\1",unique.invalid.cand,perl=TRUE)),
candno=as.character(gsub("^(.*)\\s(\\d{2,5})\\s(\\w{2})$","\\2",unique.invalid.cand,perl=TRUE)),
state=as.character(gsub("^(.*)\\s(\\d{2,5})\\s(\\w{2})$","\\3",unique.invalid.cand,perl=TRUE)))
cat("\tFound",nrow(unique.invalid.cand),"campaings that donated and have missing CNPJ\n\t")
load("contrib2006.Rdta")
for(j in 1:nrow(unique.invalid.cand)){ #for each campaing donor with missing CNPJ
the.cnpj <- names(sort(table(
contrib$candcnpj[which(contrib$state==as.character(unique.invalid.cand$state[j])&
contrib$candno==as.character(unique.invalid.cand$candno[j]))]
),decreasing=TRUE))[1] #get most common among all used cnp
d[which(d$donorname==as.character(unique.invalid.cand$orig[j])),"cpfcgc"]<-
ifelse(is.null(the.cnpj),NA,the.cnpj) #replace missing CPFCGC with correct number
if(round(j/100)-j/100==0){cat(j,"...")
flush.console()} #report advances periodically
}
flush.console()
rm(contrib)
unique.invalid <- unique(d[is.na(d$cpfcgc),"donorname"]) #invalid cpfcgc after corrections, for future reference
cat("\tTime elapsed:",((proc.time()-tmyear)[3])/60,"mins","\n")
# COMMITTEESS Now, do something similar for COMMITTEEs, source of data is contrib2006committee.Rdta
unique.invalid.com <- unique.invalid.data[grep("COMITE", #now, do something similar for committees
unique.invalid.data)]
unique.invalid.com <- gsub("\\d","",unique.invalid.com,perl=TRUE) #get rid of numbers left over
unique.invalid.com <- data.frame(orig=unique.invalid.com, #parse campaign info
committee=gsub("^(.*)\\s(P\\w{2,7})\\s(\\w{2})$","\\1",unique.invalid.com,perl=TRUE),
party=gsub("\\s$","",gsub("^.*\\s(P\\D{1,7})\\s\\w{2}\\s?$","\\1",unique.invalid.com,perl=TRUE)),
state=gsub("^.*\\s(\\w{2})\\s?$","\\1",unique.invalid.com,perl=TRUE))
unique.invalid.com$party <- ifelse(nchar(as.character(unique.invalid.com$party))>7,
gsub(".*\\s(\\w*)$","\\1",as.character(unique.invalid.com$party),perl=TRUE),
as.character(unique.invalid.com$party))
unique.invalid.com$state <- ifelse(nchar(as.character(unique.invalid.com$state))>2,
"ES", #manual fix for one case
as.character(unique.invalid.com$state))
cat("\tFound",nrow(unique.invalid.com),"committees that donated and have missing CNPJ\n\t")
load("contribcommittee2006.Rdta")
contrib$committeecnpj <- ifelse(nchar(as.character(contrib$committeecnpj))!=14,NA,as.character(contrib$committeecnpj))
for(j in 1:nrow(unique.invalid.com)){ #for each campaing donor with missing CNPJ
the.cnpj <- names(sort(table(
contrib$committeecnpj[which(contrib$state==as.character(unique.invalid.com$state[j])&
contrib$party==as.character(unique.invalid.com$party[j]))]
),decreasing=TRUE))[1] #find most comonly used cnpj
if(unique.invalid.com$orig[j]==""){next}#skip if committee names is empty
d[which(d$donorname==as.character(unique.invalid.com$orig[j])),"cpfcgc"]<-
ifelse(is.null(the.cnpj),NA,the.cnpj) #replace missing CPFCGC with correct number
if(round(j/20)-j/20==0){cat(j,"...")} #report advances periodically
}
flush.console()
rm(contrib)
cat("\tTime elapsed:",((proc.time()-tmyear)[3])/60,"mins","\n")
unique.invalid.data.2 <- unique(d[which(is.na(d$cpfcgc)& #unique candidates or committee contributors missing cnpj
d$donationtype2=="RECURSOS DE OUTROS CANDIDATOS/COMITÊS"),"donorname"] )
cat("\n\tEXTERNAL CORRECTION for Committees & Campaigns fixed",
length(unique.invalid.data)-length(unique.invalid.data.2),"names\n")
# Redo donortype classification, after previous corrections of CPF/CGC ###########################################
d$donortype <- ifelse(is.element(d$cpfcgc,c("00000000000","00000000000000")),NA, #what out for invalids with 11 and 14 chars.
ifelse(nchar(d$cpfcgc)==11,"PF",
ifelse(d$donationtype2=="RECURSOS DE PARTIDO POLÍTICO","PP",
ifelse(d$donationtype2=="RECURSOS DE OUTROS CANDIDATOS/COMITÊ","PP",
ifelse(nchar(d$cpfcgc)==14,"PJ",
ifelse(d$donationtype2=="Rendimentos de aplicações financeiras","Other",
ifelse(d$donationtype2=="Recursos próprios","Self",NA)))))))
party.donors <- union(union( #aditional criteria to make sure party donations are classified as such
grep("COMITE|Ê",d$donorname),grep("DIRETORIO",d$donorname))
,grep("PARTIDO",d$donorname))
d$donortype[party.donors]<-"PP"
d$cpfcgc<- ifelse(d$cpfcgc=="",NA,#Use NA for invalid or no CPF-CGC
ifelse(is.element(d$cpfcgc,c("00000000000","00000000000000")),NA,
ifelse(nchar(d$cpfcgc==11)|nchar(d$cpfcgc==14),as.character(d$cpfcgc),NA)))
by(d$donation,d$donortype,sum)
cat("\t Invalid or missing CPF/CGC correspond to",round(100*sum(d$donation[is.na(d$cpfcgc)])/sum(d$donation),2),"% of total donations\n")
cat("\t If Political Parties are excluded, missingness is reduced to",round(sum(d[is.na(d$donortype),"donation"])/sum(d$donation)*100,2),"% of total donations\n")
#Assemble unique donors ################################################################################################
cat("Assembling list of unique donors")
unique.donors <- data.frame(donor=NA,
cpfcgc=as.character(na.omit(unique(d$cpfcgc))), #drop NA's
variations=NA,
e2006=TRUE) #create dataframe to store unique donor info
variations <- list() #create object to store different spellings
for(i in 1:nrow(unique.donors)){
donors <- sort(table(as.character(d$donorname[d$cpfcgc==unique.donors$cpfcgc[i]])),decreasing=TRUE)#find all name variations for a given cpfcgc
unique.donors$donor[i] <- ifelse(is.null(names(donors)),"UNKOWN",names(donors)[1]) #use most comon name variation
#IFELSE is necessary because there might be empty name (NA) with valid CPF
unique.donors$variations[i] <- length(donors) #take note of number of different spellings
variations[[i]] <- names(donors) #store, in a separate object, all different spellings
if(round(i/500)-i/500==0){cat(i,"...")
flush.console()} #report advances periodically
}
write.csv(unique.donors,file="br_donorsunique2006fd.csv",row.names=FALSE)
save(variations,file="br_donorsvariations2006fd.Rdta")
### Standarize: One name for each CPF.###################################################################################
cat("Standarizing names")
flush.console()
d$donor<- d$donorname #create new name field, typically the same as old names
for(i in which(unique.donors$variations>1)){ #replace newnames of cases with variations with the most comon name
d$donor[which(as.character(d$cpfcgc)==as.character(unique.donors$cpfcgc[i]))] <- as.character(unique.donors$donor[i])
}
write.csv(d,file="br_contrib2006fd.csv",row.names=FALSE)
cat("\tTime elapsed:",((proc.time()-tmyear)[3])/60,"mins","\n")
|
05539034a6f55caa5b92d4a818d23a455c2cbc60
|
5ae9dc9e4052d7e6117c76fbf08041b3871b2e5e
|
/Analysis/sce_zinbwave.R
|
9d7d12d6cec61971cb2cca33829512f0aaa13366
|
[] |
no_license
|
LieberInstitute/HumanPilot
|
110074304e850b5110d70067d8022c0be31cb352
|
eca2070bcd09282a8adbdc3de310084834b7cd9e
|
refs/heads/master
| 2023-04-13T13:14:41.101756
| 2023-02-10T16:25:59
| 2023-02-10T16:25:59
| 225,910,046
| 50
| 25
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,362
|
r
|
sce_zinbwave.R
|
# screen -S sce
# qrsh -l mem_free=60G,h_vmem=60G,h_fsize=100G -pe local 4
# module load conda_R/3.6.x
library('SingleCellExperiment')
library('zinbwave')
library('clusterExperiment')
library('BiocParallel')
library('scran')
library('RColorBrewer')
library('sessioninfo')
dir.create('pdf_zinbwave', showWarnings = FALSE)
dir.create('rda_zinbwave', showWarnings = FALSE)
## From convert_sce.R
load('geom_spatial.Rdata', verbose = TRUE)
## From sce_scran.R
load('Human_DLPFC_Visium_processedData_sce_scran.Rdata',
verbose = TRUE)
## For parallelization purposes
register(MulticoreParam(4))
table(rowSums(assay(sce) > 1))
table(rowSums(assay(sce) > 1) > 5)
# FALSE TRUE
# 19352 14186
table(rowSums(assay(sce) > 2) > 5)
# FALSE TRUE
# 24809 8729
table(rowSums(assay(sce) > 3) > 5)
# FALSE TRUE
# 28660 4878
table(rowSums(assay(sce) > 5) > 5)
# FALSE TRUE
# 31786 1752
# filter <- rowSums(assay(sce) > 1) > 5
# filtered <- sce[filter, ]
## makeFilterStats and zinbwave() doesn't work with the Matrix object
filtered <- sce[top.hvgs,]
assays(filtered)$counts <- as.matrix(assays(filtered)$counts)
# assays(filtered)$logcounts <- as.matrix(assays(filtered)$logcounts)
## From
## https://www.bioconductor.org/packages/release/bioc/vignettes/scone/inst/doc/sconeTutorial.html#sample-filtering-with-metric_sample_filter
# > quantile(assay(sce)[assay(sce) > 0])
# 0% 25% 50% 75% 100%
# 1 1 1 2 610
num_reads <- quantile(assay(filtered)[assay(filtered) > 0])[4]
num_reads
# 75%
# 2
num_cells <- 0.25 * ncol(filtered)
num_cells
# [1] 11920.25
is_common <- rowSums(assay(filtered) >= num_reads) >= num_cells
table(is_common)
# FALSE TRUE
# 33039 499
## Drop in favor of the scran filtered list of genes output
# ## Continue with makeFilterStats()
# filtered <- makeFilterStats(filtered, filterStats="var", transFun = log1p)
# filtered <- filterData(filtered, percentile = 2000, filterStats="var")
# filtered
#
# table(rowSums(assay(filtered) > 1) > 5)
# # TRUE
# # 2000
#
# table(rowSums(assay(filtered) >= num_reads ) >= num_cells)
# # FALSE TRUE
# # 1501 499
## Adjust for sample
Sys.time()
clustered <-
zinbwave(
filtered,
K = 50,
X = "~ subject_position",
residuals = TRUE,
normalizedValues = TRUE,
observationalWeights = TRUE,
verbose = TRUE,
BPPARAM = BiocParallel::MulticoreParam(4),
epsilon = 1e12
)
Sys.time()
## Takes about 20 hours to run!
# [1] "2019-11-12 14:11:12 EST"
# [1] "2019-11-13 09:44:34 EST"
save(clustered, file = 'rda_zinbwave/clustered.Rdata')
Sys.time()
## Set some colors
col_samples <-
brewer.pal('Set3', n = length(unique(filtered$sample_name)))
names(col_samples) <- unique(filtered$sample_name)
## From
## https://bioconductor.github.io/BiocWorkshops/analysis-of-single-cell-rna-seq-data-dimensionality-reduction-clustering-and-lineage-inference.html#dimensionality-reduction
W <- reducedDim(clustered, 'zinbwave')
d <- dist(W)
length(d)
# [1] 1136715040
## Did not work: required too much mem
fit <- cmdscale(d, eig = TRUE, k = 2)
# pdf('pdf_zinbwave/mds_by_sample.pdf', useDingbats = FALSE)
# plot(fit$points, col = col_clus[filtered$sample_name], main = "",
# pch = 20, xlab = "Component 1", ylab = "Component 2")
# legend(x = "topleft", legend = names(col_samples), cex = .5,
# fill = col_samples, title = "Sample")
# dev.off()
## Try with scran
Sys.time()
g_k5 <- buildSNNGraph(clustered, k = 5, use.dimred = 'zinbwave')
Sys.time()
## Takes about 2 minutes
Sys.time()
g_walk_k5 <- igraph::cluster_walktrap(g_k5)
Sys.time()
## Takes about 7 min
# [1] "2019-11-15 10:17:15 EST"
# [1] "2019-11-15 10:24:00 EST"
clust_k5 <- sort_clusters(g_walk_k5$membership)
length(unique(clust_k5))
# [1] 85
## Too many!
save(g_k5, g_walk_k5, file = 'rda_zinbwave/g_k5.Rdata')
# sce_image_grid(clustered, clust_k5, 'pdf_zinbwave/grid_SNN_k5_noXY.pdf', colors = cols)
## Try with K = 10
Sys.time()
g_k10 <- buildSNNGraph(clustered, k = 10, use.dimred = 'zinbwave')
Sys.time()
## Takes about 2 minutes
Sys.time()
g_walk_k10 <- igraph::cluster_walktrap(g_k10)
Sys.time()
# [1] "2019-11-15 12:27:46 EST"
# [1] "2019-11-15 12:50:10 EST"
clust_k10 <- sort_clusters(g_walk_k10$membership)
length(unique(clust_k10))
# [1] 45
save(g_k10, g_walk_k10, file = 'rda_zinbwave/g_k10.Rdata')
## And with K = 50
Sys.time()
g_k50 <- buildSNNGraph(clustered, k = 50, use.dimred = 'zinbwave')
Sys.time()
## Takes about 2 minutes
Sys.time()
g_walk_k50 <- igraph::cluster_walktrap(g_k50)
Sys.time()
clust_k50 <- sort_clusters(g_walk_k50$membership)
length(unique(clust_k50))
save(g_k50, g_walk_k50, file = 'rda_zinbwave/g_k50.Rdata')
## Remove since they are not needed right now
rm(d, W)
Sys.time()
## Fails due to memory
clustered <- RSEC(
clustered,
k0s = 4:15,
alphas = c(0.1),
betas = 0.8,
reduceMethod = "zinbwave",
clusterFunction = "hierarchical01",
minSizes = 1,
ncores = 1,
isCount = FALSE,
dendroReduce = "zinbwave",
subsampleArgs = list(
resamp.num = 100,
clusterFunction = "kmeans",
clusterArgs = list(nstart = 10)
),
verbose = TRUE,
consensusProportion = 0.7,
mergeMethod = "none",
random.seed = 20191111,
consensusMinSize = 10
)
Sys.time()
library('ClusterR')
km_rcpp <- function(x, k, checkArgs, cluster.only, ...) {
km <- ClusterR::KMeans_rcpp(t(x), clusters = k, ...)
if (cluster.only) {
res <- km$clusters
} else {
res <- list(clustering = km$clusters)
}
return(res)
}
myCF <-
ClusterFunction(
clusterFUN = km_rcpp,
inputType = "X",
algorithmType = "K",
outputType = "vector"
)
clustered <- RSEC(
clustered,
k0s = 4:15,
alphas = c(0.1),
betas = 0.8,
reduceMethod = "zinbwave",
clusterFunction = "hierarchical01",
minSizes = 1,
ncores = 1,
isCount = FALSE,
dendroReduce = "zinbwave",
subsampleArgs = list(
resamp.num = 100,
clusterFunction = myCF,
clusterArgs = list(num_init = 10)
),
verbose = TRUE,
consensusProportion = 0.7,
mergeMethod = "none",
random.seed = 20191111,
consensusMinSize = 10
)
## Reproducibility information
print('Reproducibility information:')
Sys.time()
proc.time()
options(width = 120)
session_info()
|
294d94dc60ac254c039aff80685e222fb582c8a9
|
7a7375245bc738fae50df9e8a950ee28e0e6ec00
|
/R/LGA__Year_Sex_SpeaksEnglishOnly.R
|
787445413fe7cffef1b410336da69a8fe9e51d6f
|
[] |
no_license
|
HughParsonage/Census2016.DataPack.TimeSeries
|
63e6d35c15c20b881d5b337da2f756a86a0153b5
|
171d9911e405b914987a1ebe4ed5bd5e5422481f
|
refs/heads/master
| 2021-09-02T11:42:27.015587
| 2018-01-02T09:01:39
| 2018-01-02T09:02:17
| 112,477,214
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 201
|
r
|
LGA__Year_Sex_SpeaksEnglishOnly.R
|
#' @title Sex, SpeaksEnglishOnly by LGA, Year
#' @description Number of personsSex, SpeaksEnglishOnly by LGA, Year
#' @format 6,756 observations and 5 variables.
"LGA__Year_Sex_SpeaksEnglishOnly"
|
366e0267148eb127c7fc129ea96e8f9c0d186147
|
5a5271666cebe26ee08d92fff11fdbd1a207deb9
|
/ui.R
|
990125eaa59b04e54ef48873b680c7ff0ee9086c
|
[] |
no_license
|
elias-nicholson/NFL-2016
|
41745bef6bcb941db993912b2103e79b0ee20117
|
9a26593cd3042134f5e991c96b9086809afd1588
|
refs/heads/master
| 2021-01-01T19:09:46.034735
| 2017-08-07T17:48:57
| 2017-08-07T17:48:57
| 98,529,578
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,369
|
r
|
ui.R
|
library(shiny)
library(leaflet)
vars <- c(
"Rank" = "Rank",
"Points Per Game" = "PointsPerGame",
"Total Points" = "TotalPoints",
"Plays From Scrimmage" = "PlaysFromScrimmage",
"Yards Per Game" = "YardsPerGame",
"Yards Per Play" = "YardsPerPlay",
"1st Down Per Game" = "1stDownPerGame",
"3rd Down Made" = "3rdDownMade",
"3rd Down Attempted" = "3rdDownAttempted",
"3rd Down Percent" = "3rdDownPercent",
"4th Down Made" = "4thDownMade",
"4th Down Attempted" = "4thDownAttempted",
"4th Down Percent" = "4thDownPercent",
"Total Penalties" = "Penalties",
"Penalty Yards" = "PenaltyYards",
"Total Fumbles" = "Fumbles",
"Fumbles Lost" = "FumblesLost",
"Turn Overs" = "TurnOvers",
"Superbowl Wins" = "SuperbowlWin",
"Superbowl Losses" = "SuperbowlLoss",
"Superbowl Percent Win" = "SuperbowlPercent",
"Superbowl Points" = "SuperbowlPoints",
"Superbowl Opposing Points" = "SuperbowlOpposingPoints")
shinyUI(fluidPage(
navbarPage("NFL 2016 Data in ColoR", id = "nav",
tabPanel("Interactive Data Map",
tags$head(
# Include our custom CSS
includeCSS("solarized.css")
),
div(class="outer",
tags$head(
# Include our custom CSS
includeCSS("styles.css"),
includeScript("activemap2.js")
),
leafletOutput("map", width="100%", height="100%"),
absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE,
draggable = TRUE, top = 60, left = "auto", right = 20, bottom = "auto",
width = 330, height = "auto",
h2("Explorer"),
selectInput("size","Size", vars, selected = "Rank"),
selectInput("color","Color", vars, selected = "PointsPerGame"),
plotOutput("scatterNFL2016", height = 200),
plotOutput("superbowlhist", height = 250),
actionButton("reset_button", "Reset View")
)
)
),
tabPanel("PlotR",
fluidRow(
column(4,
selectInput("xaxis","X-Axis", vars, selected = "Rank")
),
column(4,
selectInput("yaxis", "Y-Axis", vars, selected = "Rank")
),
column(2,
radioButtons("cluster","Cluster Choice", choices = list("none","Conference","Division"), selected = "none")),
column(2,
radioButtons("regression", "Regression Type",choices = c("none","linear","loess"), selected = "none"))
),
plotOutput("InteractiveScatter"),
h3(textOutput("correlation"), align = "center")
),
tabPanel("Data",
fluidRow(
column(3,
selectInput("teams", "Teams", c("All Teams"="",structure(NFL2016stats$Team, names = NFL2016stats$Team )), multiple=TRUE)
)
),
fluidRow(
column(3,
numericInput("minRank", "Min Rank", min=1, max=32, value=1)
),
column(3,
numericInput("maxRank", "Max Rank", min=1, max=32, value=32)
)
),
hr(),
DT::dataTableOutput("NFLtable")
),
tabPanel("Information", theme = "solarized.css",
sidebarLayout(position = "right",
sidebarPanel(
h2("Contact Information",style = "color:white"),
hr(),
h3("Email:"),
h3("efn3@hood.edu"),
h3("GitHub:"),
h3("elias-nicholson")
),
mainPanel(
fluidRow(
column(8, align = "center", offset = 2, style = "color:white",
h1("Application Features"))
),
fluidRow(
column(6, align = "center", offset = 3,
h3("Spatial Mapping"))
),
fluidRow(
column(8, align = "center", offset = 2,
p("The spatial map included in this app allows users to view the data regionally and take the location into account.
The primary feature of the page is the map in which the points representing the locations of the teams can be adjusted
in size and color. This feature can be accessed via the draggable panel located on the right side of the screen. The second
feature of this page includes two plots which are reactive to the points which are in view on the screen. The first reactive plot
is a scatterplot representing the team ranks versus their points per game. The second of the reactive plots is a barplot which
increases or decreases dependent upon the number of Superbowl wins the visible teams contain."))
),
fluidRow(
column(6, align = "center", offset = 3,
h3("Interactive Data Table"))
),
fluidRow(
column(8, align = "center", offset = 2,
p("The interactive data table feature allows users a few different ways to sort and filter their data.
The primary method for filtering this data that is used in the app is the selection of the team/s. This can be done
by either typing or selecting the team/s which are desired in the dropdown menu. The primary method of sorting that the
data table utiliizes is ascending or descending sorting by any of the variables contatined in the data table.
This sorting feature can be utilized by clicking the up/down arrows which are located next to each of the variables."))
),
fluidRow(
column(6, align = "center", offset = 3,
h3("PlotR"))
),
fluidRow(
column(8, align = "center", offset = 2,
p("The interactive plot has several different ways that the data can be viewed. The first feature of the plot is the choice in the
variables plotted. The variables can be selected via dropdown menu. The next feature is adjusting the grouping of the points. Two
different forms of grouping available is grouping by conference or grouping by division. The next feature available is the option
to view trends in two different ways. The first way to view trends is through the linear model; the linear model relays information
for each group pertaining to the formula and the R-squared value. The second way to view the trends is through the loess model; the
loess model which bases its fitting on localized subsets to allow for more accurate modeling of curves."))
),
fluidRow(
column(8, align = "center", offset = 2,
h5("Acknowledgements: Data gathered from the National Football League, cascading style sheet generated by bootswatch, and map generated by OpenStreetMap"))
)
)
)
),
conditionalPanel("false", icon("crosshair"))
)
)
)
|
ed1a3ec337d31178967f655ae68b9f2e63044b6e
|
54ed1cde940049aecaf859c897a12ff90f83aff8
|
/man/lemad_prepare_q_matrix.Rd
|
823335e2c70f13eaa63b750a302a1d9fde16be97
|
[] |
no_license
|
leonelhalsina/lemad
|
e23c735fa9499c9912efddd766be6f92f3c89271
|
cca43ebff36b67fd78a11c58257a8f4cc15572bf
|
refs/heads/main
| 2023-09-01T17:51:24.864455
| 2023-08-07T13:25:25
| 2023-08-07T13:25:25
| 508,330,655
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 804
|
rd
|
lemad_prepare_q_matrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lemad_utils.R
\name{lemad_prepare_q_matrix}
\alias{lemad_prepare_q_matrix}
\title{Transition matrix preparation.}
\usage{
lemad_prepare_q_matrix(
all_area_combination,
matrices_names,
id_q_expansion,
id_q_contraction
)
}
\arguments{
\item{all_area_combination}{Matrix with all regions combinations coming from function prepare_full_lambdas_vicariance().}
\item{matrices_names}{Vector with all regions names coming from function prepare_full_lambdas_vicariance().}
\item{q_expansion}{Parameter ID for colonization rate.}
\item{q_contraction}{Parameter ID for contraction (local extinction/extirpation) rate.}
}
\value{
the transtion matrix.
}
\description{
Prepares a q matrix for range expansion and contraction.
}
|
8d3561f0589c8cb113cf9e97c3597193043ee43d
|
2b52b13f54b7139851b9ff0d1ca9ac547d54c3bb
|
/1b.R
|
0c03f4ec76f72c5c434596610fb63e7d2b530ef1
|
[] |
no_license
|
friedpine/scRNASeq_ESCC
|
1fd37d98eef83b8ed44c9d2789ae852afb102ffc
|
1c0b0011337671c9212b290ec92b054a5202f6ac
|
refs/heads/main
| 2023-04-30T15:05:58.418996
| 2021-05-13T08:26:03
| 2021-05-13T08:26:03
| 365,109,859
| 6
| 3
| null | 2021-05-13T08:26:03
| 2021-05-07T04:04:57
|
R
|
UTF-8
|
R
| false
| false
| 2,404
|
r
|
1b.R
|
setwd("/home/data/human/ESCC/Linna-ESCC/Fig/source_data/")
library(scales)
library(ggplot2)
####CD45- TSNE####
df1 <- readRDS("/home/data/human/ESCC/Linna-ESCC/Fig/Cellinfo_20190924/Cell.info/CD45-_addTEC5_info.rds")
df_EPI=df1[df1$celltype=="Epithelial",]
df_FIBRO=df1[df1$celltype=="Fibroblast",]
df_Endo=df1[df1$celltype=="Endothelial",]
df_FRC=df1[df1$celltype=="FRC",]
df_peri=df1[df1$celltype=='Pericytes',]
col3<-c("#f57665",'#1279A2',"#CAA57D","#f1c550","#0b8457")
ggplot(df_FRC, aes(tSNE1, tSNE2))+
geom_point(size=0.3,alpha=0.6,color="#0b8457")+
geom_point(data=df_EPI, aes(tSNE1, tSNE2), color="#f57665", size=0.3,alpha=0.6)+
geom_point(data=df_Endo, aes(tSNE1, tSNE2), color="#f1c550",size=0.3,alpha=0.6)+
geom_point(data=df_FIBRO, aes(tSNE1, tSNE2), color="#1279A2", size=0.3,alpha=0.6)+
geom_point(data=df_peri, aes(tSNE1, tSNE2), color="#CAA57D", size=0.3,alpha=0.6)+
guides(color=guide_legend(title=NULL,override.aes = list(size = 5)))+
theme_linedraw()+theme(panel.grid =element_blank())+
theme(legend.position = 'none' )
#ggsave("CD45-_TSNE.png",res1, height = 6, width = 6)
ggplot(df1, aes(tSNE1, tSNE2,color=celltype))+
geom_point(size=0.3)+ scale_color_manual(values=col3)+
guides(color=guide_legend(title=NULL,override.aes = list(size = 5)))+
theme_linedraw()+theme(panel.grid =element_blank())+
theme(legend.position="bottom")
#ggsave("CD45-_TSNE_legend.png",res2, height = 6, width = 6)
df_t=df1[df1$tissue=='Tumor',]
df_n=df1[df1$tissue=='Adj. normal',]
ggplot(df_t, aes(tSNE1, tSNE2))+
geom_point(size=0.3,alpha=0.8,color="#C6DBEF")+
geom_point(data=df_n, aes(tSNE1, tSNE2), color="#08306B", size=0.3,alpha=0.8)+
guides(color=guide_legend(title=NULL,override.aes = list(size = 5)))+
theme_linedraw()+theme(panel.grid =element_blank())+
theme(legend.position = 'none' )
ggplot(df1, aes(tSNE1, tSNE2, color=tissue))+
geom_point(size=0.3)+ scale_color_manual(values=rev(c("#C6DBEF","#08306B")))+
guides(color=guide_legend(title=NULL,override.aes = list(size = 5)))+
theme_linedraw()+theme(panel.grid =element_blank())+
theme(legend.position="bottom")
#ggsave("CD45-_Tissue.png",res3, height = 6, width = 6)
#ggsave("CD45-_Tissue_legend.png",res4, height = 6, width = 6)
edit(colnames(df1))
result=df1[,c("cell", "tissue", "sample", "celltype", "tSNE1", "tSNE2")]
write.table(result,"Fig1b.txt",quote = F,sep="\t",row.names = F)
|
9ec97f2ce679de4b39bf4b5e0bd0010e5b7849b5
|
c4a1f2829277b32dcc5cad6e450548280a7030a0
|
/man/read_sql_query.Rd
|
0f3937e8d684f26e7544f56fb16b4fd4387b856b
|
[
"MIT"
] |
permissive
|
moj-analytical-services/dbtools
|
32f309d7b1e1a991c1b2b9bcc8a5a3e774731641
|
b40672c8e4d91c01991ffd5bdf9bed3a5265ce46
|
refs/heads/main
| 2023-08-04T14:40:34.161304
| 2022-07-13T11:12:57
| 2022-07-13T11:12:57
| 145,823,893
| 6
| 4
|
NOASSERTION
| 2023-05-23T03:21:15
| 2018-08-23T08:24:09
|
R
|
UTF-8
|
R
| false
| true
| 469
|
rd
|
read_sql_query.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.R
\name{read_sql_query}
\alias{read_sql_query}
\title{Send an SQL query to Athena and receive a dataframe.}
\usage{
read_sql_query(sql)
}
\arguments{
\item{sql}{An SQL query}
}
\value{
Dataframe or tibble if the tibble library is loaded.
}
\description{
Send an SQL query to Athena and receive a dataframe.
}
\examples{
`df <- dbtools::read_sql_query('select * from my_db.my_table')`
}
|
4fbec1eb17f433344e318ead37ebcd3c1d4a17f3
|
e1c388af0d6464a60848d46e3379c1ce24deb7e3
|
/R/run_all.r
|
872adabff498d41e5c244efd1c2051e09a40b32d
|
[] |
no_license
|
mengeln/PHAB-metrics
|
485886dbf0591be17f6050f0ea3e0a2d5cb3e9e6
|
6387e32611cc9eecb53ef4a5e9dbab79f83eb0bd
|
refs/heads/master
| 2021-03-12T23:55:50.680066
| 2013-02-06T21:28:19
| 2013-02-06T21:28:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,592
|
r
|
run_all.r
|
###PHAB metrics###
###Metric Calculations###
start <- proc.time()
source("L://Bioassessment Data Management Tools_RM/R Scripts/Bank_morphology.r")
source("L://Bioassessment Data Management Tools_RM/R Scripts/channel_morphology.r")
source("L://Bioassessment Data Management Tools_RM/R Scripts/densiometer.r")
source("L://Bioassessment Data Management Tools_RM/R Scripts/Percent Bank Stability.r")
source("L://Bioassessment Data Management Tools_RM/R Scripts/riparian_vegetation.r")
source("L://Bioassessment Data Management Tools_RM/R Scripts/human_disturbance.r")
source("L://Bioassessment Data Management Tools_RM/R Scripts/substrate_size_and_composition.r")
source("L://Bioassessment Data Management Tools_RM/R Scripts/channel_sinuosity_and_slope.r")
source("L://Bioassessment Data Management Tools_RM/R Scripts/habitat_complexity.r")
source("L://Bioassessment Data Management Tools_RM/R Scripts/algae.r")
source("L://Bioassessment Data Management Tools_RM/R Scripts/flow.r")
source("L://Bioassessment Data Management Tools_RM/R Scripts/quality.r")
source("L://Bioassessment Data Management Tools_RM/R Scripts/misc.r")
###Data formatting###
library(reshape)
filelist <- c("Bank_morphology_metrics.csv", "channel_morphology_metrics.csv",
"densiometer_metrics.csv", "Percent_Bank_Stability_metrics.csv",
"riparian_vegetation_metrics.csv", "human_disturbance_metrics.csv",
"substrate_size_and_composition_metrics.csv",
"channel_sinuosity_and_slope_metrics.csv", "habitat_complexity_metrics.csv",
"algae_metrics.csv", "water_quality_metrics.csv", "misc_metrics.csv",
"flow_metrics.csv")
finalresults <- data.frame()
for(i in 1:length(filelist)){
aa <- read.csv(filelist[i])[,which(!(colnames(read.csv(filelist[i])) %in% c("Filename", "Filename.1", "Filename.2", "Filename.3",
"Filename.4", "Filename.5", "Filename.6", "Filename.7", "Filename.8"
, "Filename.9", "X.1", "X.2", "X.3", "X.4"
, "X.5", "X.6", "X.7", "X.8", "X.9", "X.10", "Filename.10",
"X.11", "X.12", "Filename.11", "Filename.12")))]
colnames(aa)[1] <- "ID"
output <- aa[,which(!(1:length(colnames(aa)) %in% c((grep("_count", colnames(aa))),
(grep("_sd", colnames(aa))))))]
output <- melt(output, id="ID")
output$type<-gsub("([[:alnum:]_]+)\\.([[:alnum:]]+)", "\\2", output$variable)
output$variable<-gsub("([[:alnum:]_]+)\\.([[:alnum:]]+)", "\\1", output$variable)
output$ID<-gsub("Site ([[:digit:]])", "Site_\\1", output$ID)
output$ID<-gsub("SGUT ([[:digit:]])", "SGUT_\\1", output$ID)
output$StationCode <- gsub("([_[:alnum:]-]+)[[:blank:]]([[:alnum:]-]+)", "\\1", output$ID)
output$SampleDate <- gsub("([_[:alnum:]-]+)[[:blank:]]([[:alnum:]-]+)", "\\2", output$ID)
finalresults<-rbind(output[, c("StationCode", "SampleDate", "variable", "type", "value")], finalresults)
}
fc <- file("all_metrics_revised.csv", open="w")
write.csv(finalresults, fc)
close(fc)
#unlink(filelist)
end <-proc.time()
print(end-start)
setwd("C:\\Documents and Settings\\gisuser\\Desktop")
gmailsender("mengeln@gmail.com", "mengeln@gmail.com", finalresults, user="mengeln", pw="kram92620")
|
dcf118a7267ed13f09f52a0b65ad6d819d54ff81
|
a9f9590616da68ae0672ed090a0913820d1d558f
|
/lab 5/Lab5.R
|
4e72e4d2cf9d23363a97ce0b0aa004c08971a0fb
|
[] |
no_license
|
christian-miljkovic/R-projects
|
ab144771df651dc6b61fc2ef186feca8caeef76a
|
998a1cb952c61306d64756a50ee317c5152188fb
|
refs/heads/master
| 2021-01-13T08:34:12.694878
| 2016-12-16T17:14:50
| 2016-12-16T17:14:50
| 71,661,772
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,772
|
r
|
Lab5.R
|
#group Christian Miljkovic, Zachary Fineberg, Suchit Sadan
#load in the libraries that you need to use
library(rpart)
### part 1 ###
#load in the data
train = read.csv("/Users/christianmiljkovic/Downloads/LoanStats3c.csv", skip = 1)
test = read.csv("/Users/christianmiljkovic/Downloads/LoanStats3d.csv", skip = 1)
#remove empty rows at the bottom:
train = train[1:(ncol(test) -1),]
test = test[1:(ncol(train) -1),]
### part 2 ###
train$highgrade = ifelse(train$grade == "A" | train$grade == "B",1,0)
percentHigh = nrow(train[train$highgrade == 1,])/nrow(train)
#t-test for above and below median income:
median_income = median(train$annual_inc)
above_med_income = ifelse(train$annual_inc>median_income,"Above","Below")
t.test(train$highgrade~above_med_income)
#t-test for above or below median loan amount
median_loan = median(train$loan_amnt)
above_med_loan = ifelse(train$loan_amnt > median_loan,"Above", "Below")
t.test(train$highgrade~above_med_loan)
#t-test for home ownership status:
home_owner = ifelse(train$home_ownership == "RENT", "Rents", "Doesn't rent")
t.test(train$highgrade~home_owner)
##########
# part 3 #
#########
#run glm regression:
fit1 = glm(highgrade~annual_inc + home_ownership + loan_amnt, data = train)
summary(fit1)
#predict values and find the optimal threshold:
train$predict_val = predict(fit1, type = "response")
train$predict = (train$predict_val >.6)
accuracy = mean(train$predict == train$highgrade)
accuracy
random = runif(nrow(test), min=0, max=1)
train$benchmark1 = random>=.5
mean(train$benchmark1 == train$highgrade)
#test against benchmark of all 0's:
benchmark2 = rep(0, nrow(train))
mean(benchmark2 == train$highgrade)
##########
# part 4 #
#########
fit2 = rpart(highgrade~annual_inc + home_ownership + loan_amnt, data = train, method = "class")
z = predict(fit2, type="class")
paste("The machine learning based classifier has an accuracy of", round(mean(z == train$highgrade),4), "while the regression based approach has an accuracy of", round(accuracy,4))
### part 5 ###
test$highgrade = ifelse(test$grade == "A" | test$grade == "B",1,0)
#predict using regression:
predict_val = predict(fit1, test, type = "response")
predict = (predict_val > .5)
test_accuracy1 = mean(test$highgrade == predict)
#print out the result
test_accuracy1
#predict using classifier:
predict_val2 = predict(fit2, test, type = "class")
test_accuracy2 = mean(test$highgrade == predict_val2)
#print out the 2nd result
test_accuracy2
#benchmark accuracy of random assignment:
rand = (runif(nrow(test), min = 0, max = 1) >= .5)
acc = mean(rand == test$highgrade)
#print out the result
acc
#benchmark accuracy of all 0's:
zero = rep(0,nrow(test))
acc2 = mean(zero == test$highgrade)
#print out the second result
acc2
|
de2c0857bf8329a0dd99cbb9060babe4431911e9
|
9d7f54434513aef0cb5391e473b8203f7767f79d
|
/man/subform.Rd
|
62d893abc2f4f39c68ea767f5673e37bd7fe819e
|
[] |
no_license
|
blosloos/enviPat
|
8c84d7cccb66d4db171d6c3e04c3bbb185314d4a
|
2a44696579178c9f8365c1dc179125be2c4982ad
|
refs/heads/master
| 2022-11-05T09:23:11.980814
| 2022-11-03T10:05:47
| 2022-11-03T10:05:47
| 28,415,727
| 8
| 6
| null | 2022-03-07T20:32:03
| 2014-12-23T19:31:19
|
C
|
UTF-8
|
R
| false
| false
| 834
|
rd
|
subform.Rd
|
\name{subform}
\alias{subform}
\title{Subtract one chemical formula from another}
\description{Subtract one chemical formula from another}
\usage{subform(formula1,formula2)}
\arguments{
\item{formula1}{Chemical formula to subtract from}
\item{formula2}{Chemical formula to subtract}
}
\details{
Useful for adduct calculations, check \code{\link[enviPat]{adducts}}.
Chemical formulas must conform to what is described in \code{\link[enviPat]{check_chemform}}.
Prior check if \code{formula2} is contained in \code{formula2} at all? See \code{\link[enviPat]{check_ded}}.
}
\value{A unified and filtered peaklist}
\author{Martin Loos}
\seealso{
\code{\link[enviPat]{adducts}},\code{\link[enviPat]{check_ded}}
}
\examples{
formula1<-c("C10[13]C2H10Cl10")
formula2<-c("C2H5[13]C1")
subform(formula1,formula2)
}
|
e426ac40b05399a1bd0dba4b114c3cfe325a7ac8
|
07bd88725a753783fe892df3bee11c511be6b7f3
|
/man/mhingeova.Rd
|
345de5cb1cff00f09ac2c183db609f968b42a9c9
|
[] |
no_license
|
cran/bst
|
aaf796323637dea5bdf4efa35cf3b88c2be4dbc4
|
53dbd049b094eee6de2c02c00969900f01da88d2
|
refs/heads/master
| 2023-01-12T09:01:02.160478
| 2023-01-06T17:50:56
| 2023-01-06T17:50:56
| 17,694,897
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,370
|
rd
|
mhingeova.Rd
|
\name{mhingeova}
\alias{mhingeova}
\alias{print.mhingeova}
\title{ Multi-class HingeBoost}
\description{
Multi-class algorithm with one-vs-all binary HingeBoost which optimizes the hinge loss functions with componentwise
linear, smoothing splines, tree models as base learners.
}
\usage{
mhingeova(xtr, ytr, xte=NULL, yte=NULL, cost = NULL, nu=0.1,
learner=c("tree", "ls", "sm"), maxdepth=1, m1=200, twinboost = FALSE, m2=200)
\method{print}{mhingeova}(x, ...)
}
\arguments{
\item{xtr}{ training data containing the predictor variables.}
\item{ytr}{ vector of training data responses. \code{ytr} must be in \{1,2,...,k\}.}
\item{xte}{ test data containing the predictor variables.}
\item{yte}{ vector of test data responses. \code{yte} must be in \{1,2,...,k\}.}
\item{cost}{ default is NULL for equal cost; otherwise a numeric vector indicating price to pay for false positive, 0 < \code{cost} < 1; price of false negative is 1-\code{cost}.}
\item{nu}{ a small number (between 0 and 1) defining the step size or shrinkage parameter. }
\item{learner}{ a character specifying the component-wise base learner to be used:
\code{ls} linear models,
\code{sm} smoothing splines,
\code{tree} regression trees.
}
\item{maxdepth}{ tree depth used in \code{learner=tree}}
\item{m1}{ number of boosting iteration }
\item{twinboost}{ logical: twin boosting? }
\item{m2}{ number of twin boosting iteration }
\item{x}{ class of \code{\link{mhingeova}}. }
\item{\dots}{ additional arguments. }
}
\details{
For a C-class problem (C > 2), each class is separately compared against all other classes with HingeBoost, and C functions are estimated to represent confidence for each class. The classification rule is to assign the class with the largest estimate.
A linear or nonlinear multi-class HingeBoost classifier is fitted using a boosting algorithm based on one-against component-wise
base learners for +1/-1 responses, with possible cost-sensitive hinge loss function.
}
\value{
An object of class \code{mhingeova} with \code{\link{print}} method being available.
}
\references{
Zhu Wang (2011),
HingeBoost: ROC-Based Boost for Classification and Variable Selection.
\emph{The International Journal of Biostatistics}, \bold{7}(1), Article 13.
Zhu Wang (2012), Multi-class HingeBoost: Method and Application to the Classification of Cancer Types Using Gene Expression Data. \emph{Methods of Information in Medicine}, \bold{51}(2), 162--7.
}
\seealso{\code{\link{bst}} for HingeBoost binary classification. Furthermore see \code{\link{cv.bst}} for stopping iteration selection by cross-validation, and \code{\link{bst_control}} for control parameters.}
\author{ Zhu Wang }
\examples{
\dontrun{
dat1 <- read.table("http://archive.ics.uci.edu/ml/machine-learning-databases/
thyroid-disease/ann-train.data")
dat2 <- read.table("http://archive.ics.uci.edu/ml/machine-learning-databases/
thyroid-disease/ann-test.data")
res <- mhingeova(xtr=dat1[,-22], ytr=dat1[,22], xte=dat2[,-22], yte=dat2[,22],
cost=c(2/3, 0.5, 0.5), nu=0.5, learner="ls", m1=100, K=5, cv1=FALSE,
twinboost=TRUE, m2= 200, cv2=FALSE)
res <- mhingeova(xtr=dat1[,-22], ytr=dat1[,22], xte=dat2[,-22], yte=dat2[,22],
cost=c(2/3, 0.5, 0.5), nu=0.5, learner="ls", m1=100, K=5, cv1=FALSE,
twinboost=TRUE, m2= 200, cv2=TRUE)
}
}
\keyword{classification}
|
2d324ae1833183be0c9bd0dec7e17584de892f2e
|
8ff3fd8e4ce47b5a09880bb2f2279c114c8b4064
|
/R/MinstressV4.R
|
045a2b81625962a674c1fcf1880cfcf147eceb21
|
[] |
no_license
|
cran/analytics
|
cafa61c9e15f59974e4468094158f69ced3d66d7
|
b5a52e2ebfef87c1acec2ffe8dcea7188cd7d9bc
|
refs/heads/master
| 2021-01-11T16:33:02.057567
| 2018-10-14T22:30:06
| 2018-10-14T22:30:06
| 80,107,975
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,951
|
r
|
MinstressV4.R
|
#' @title Better Starting Configuration For Non-Metric MDS
#' @description \code{Minstress} is a heuristic to find better non-metric MDS solutions,
#' by finding better starting configurations, instead of just using a random one.
#' @details This function performs several iterations, each using a different starting seed,
#' and in turn each one of those iterations performs non-metric MDS many times (typically, thousands or more)
#' in an attempt to find the best seed (which induces a particular initial configuration) of them all.
#' @param x a data frame containing numeric values only
#' @param p the size of the population of seeds (any positive integer)
#' @param s the number of seeds we sample (any positive integer)
#' @param k the number of dimensions wanted (any positive integer)
#' @param iter a positive integer specifying the number of iterations.
#' @param pb a Boolean variable declaring if one wants to display a pogress bar (default: False)
#' @param m a string specifying the distance method (default: 'euclidean')
#' @return A list informing about dimensionality, minimum STRESS level found, and best seed found.
#' One can then use the best seed found to perform non-metric MDS with a better initial configuration (generally).
#' @author Albert Dorador
#' @export
#' @import cluster
#' @import tcltk
#' @import MASS
#' @import stats
#' @examples
#'
#' require(MASS)
#'
#' swiss.x <- as.data.frame(swiss[, -1])
#' Minstress(swiss.x, 1e5, 50, 2, iter = 3)
#'
#' # Comparing without using Minstress (for such a low value of s, difference is minimal)
#' swiss.x <- as.matrix(swiss[, -1])
#' swiss.dist <- dist(swiss.x)
#' swiss.mds <- isoMDS(swiss.dist)
#'
Minstress <- function(x, p, s, k, iter = 5, pb = F, m = 'euclidean'){
stopifnot(is.numeric(c(p, s, k)))
if (m == "gower"){
Dist <- as.matrix(daisy(x, metric = "gower"))
} else {
Dist <- as.matrix(dist(x, method = m))
}
n <- nrow(x)
best.seeds <- integer(iter)
Stress.vector <- numeric(iter)
if (pb == T){
progbar <- tkProgressBar(title = "Progress bar", min = 0, max = iter, width = 300)
setTkProgressBar(progbar, 0, label = paste(0,"% done"))
}
for (it in 1:iter){
ind <- sample.int(p, s)
STRESS <- numeric(length(ind))
for (i in ind){
set.seed(i)
init <- scale(matrix(runif(n*k), ncol = k), scale = FALSE)
nmmds.out <- isoMDS(Dist, y = init, k = k, maxit = 100, trace = F)
STRESS[which(ind == i)] <- nmmds.out$stress
}
if (pb == T) setTkProgressBar(progbar, it, label = paste(round(it/iter*100, 0),"% done"))
best.seeds[it] <- ind[which.min(STRESS)]
Stress.vector[it] <- min(STRESS)
}
if (pb == T){
Sys.sleep(0.5)
close(progbar)
}
solution <- list('Dimensionality'= k, 'Minimum found'= min(Stress.vector),
'Best seed'= best.seeds[which.min(Stress.vector)])
return(solution)
}
|
6c0bfc6c6b2b1afd1ac58ef3c0800b14c9685539
|
74b77f77bb7adbb429ed5cf8f8765a5d914337a7
|
/R/bisection.update.R
|
dfe12256aa889a41fa9abafefb283f9aa7004419
|
[] |
no_license
|
cran/gIPFrm
|
59d349575ec4324951106a8a0f341897a5f233e1
|
ec662590567da2900f2bfc8fbd4059eee7de9915
|
refs/heads/master
| 2020-06-06T12:22:21.899567
| 2017-07-24T08:49:06
| 2017-07-24T08:49:06
| 17,696,180
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,052
|
r
|
bisection.update.R
|
bisection.update <-
function(ModelMx, ObsTbl, tolerance)
{
obs.s <- sum(ObsTbl);
b <- suff.stat(ModelMx, ObsTbl/obs.s);
gamma.one <- 1/(sum(b));
gamma.two <- min(1/b);
gamma.mid <- (gamma.one+gamma.two)/2;
F.gamma.mid <- ipf.gamma(ModelMx, ObsTbl, gamma.mid, tolerance, "probabilities")
p.mid <- F.gamma.mid$fitted.values;
while(abs(sum(p.mid)/obs.s -1)> tolerance)
{
F.gamma.two <- ipf.gamma(ModelMx, ObsTbl, gamma.two, tolerance, "probabilities")
p.two <- F.gamma.two$fitted.values;
if( sign(sum(p.mid)/obs.s -1) == sign(sum(p.two)/obs.s -1))
{
gamma.two <- gamma.mid;
} else { gamma.one <- gamma.mid };
gamma.mid <- (gamma.one + gamma.two)/2;
F.gamma.mid <- ipf.gamma(ModelMx, ObsTbl, gamma.mid, tolerance, "probabilities")
p.mid <- F.gamma.mid$fitted.values;
}
bisection.result <- list(gamma.tilde = gamma.mid,
model.tilde = F.gamma.mid);
return(bisection.result);
}
|
86947e8f27c3e99192116e49c655436a7f464d58
|
983585773a5f29526e764ff9b35c9a00bbe993c4
|
/simple regression.R
|
0a931aab9b80e2f4fe84f2b247cde11cb74fb16a
|
[] |
no_license
|
arjunchandrashekara/R-codes
|
3361f23b5867e4130df8a327ba1c0931cb73c2d3
|
f9fcdd275c77c639900823ab6a22c6234473857f
|
refs/heads/master
| 2023-03-26T09:05:51.945927
| 2021-03-29T13:48:19
| 2021-03-29T13:48:19
| 294,035,218
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,658
|
r
|
simple regression.R
|
#MODEL 1 (without applying transformation)
library(ggplot2)
library(readr)
diet <- read.csv('E:/excelr data/assignments/simple regression/calories_consumed.csv')
View(diet)
attach(diet)
plot(Weight.gained..grams.,Calories.Consumed)
cor(Weight.gained..grams.,Calories.Consumed)
diet_model <- lm(Calories.Consumed~Weight.gained..grams.)
summary(diet_model)
pred1 <- predict(diet_model)
ggplot(data = diet, aes(x = Weight.gained..grams., y = Calories.Consumed)) +
geom_point(color='blue') +
geom_line(color='red',data = diet, aes(x=Weight.gained..grams., y=pred1))
confint(diet_model,level=0.95)
diet_dataset <- predict(diet_model,interval='predict')
diet_dataset <- as.data.frame(diet_dataset)
diet_final <- cbind(diet,diet_dataset)
setwd("E:/excelr data/assignments/simple regression")
write.csv(diet_final,file='diet_final')
#MODEL 2 (with applying transformation to the independent variable)
library(ggplot2)
library(readr)
diet <- read.csv('E:/excelr data/assignments/simple regression/calories_consumed.csv')
View(diet)
attach(diet)
plot(Weight.gained..grams.,Calories.Consumed)
cor(Weight.gained..grams.,Calories.Consumed)
diet_model <- lm(Calories.Consumed~log(Weight.gained..grams.))
summary(diet_model)
pred1 <- predict(diet_model)
ggplot(data = diet, aes(x =log(Weight.gained..grams.), y = Calories.Consumed)) +
geom_point(color='blue') +
geom_line(color='red',data = diet, aes(x=log(Weight.gained..grams.), y=pred1))
confint(diet_model,level=0.95)
diet_dataset <- predict(diet_model,interval='predict')
diet_dataset <- as.data.frame(diet_dataset)
diet_final <- cbind(diet,diet_dataset)
setwd("E:/excelr data/assignments/simple regression/diet_fit")
write.csv(diet_final,file='diet_final_logged')
#MODEL 2 ( applying transformation to the dependent variable)
library(ggplot2)
library(readr)
diet <- read.csv('E:/excelr data/assignments/simple regression/calories_consumed.csv')
View(diet)
attach(diet)
plot(Weight.gained..grams.,Calories.Consumed)
cor(Weight.gained..grams.,Calories.Consumed)
diet_model <- lm(log(Calories.Consumed)~Weight.gained..grams.)
summary(diet_model)
pred1 <- predict(diet_model)
ggplot(data = diet, aes(x = Weight.gained..grams., y = log(Calories.Consumed))) +
geom_point(color='blue') +
geom_line(color='red',data = diet, aes(x=Weight.gained..grams., y=pred1))
confint(diet_model,level=0.95)
diet_dataset <- predict(diet_model,interval='predict')
diet_dataset <- as.data.frame(diet_dataset)
diet_final <- cbind(diet,diet_dataset)
setwd("E:/excelr data/assignments/simple regression/diet_fit")
write.csv(diet_final,file='diet_final_looged_y')
|
848d6a8d2710e1d2664ba882d71cb56dcb7ecce4
|
52a2ee6ebfa9183312b16694fc3cdd57a18186e2
|
/importexport.R
|
f5d842e6a9719ba6b5046c8128a5603887af8821
|
[] |
no_license
|
mahamoodsalamalipm1994/analytics1
|
1befd665b2377afe4fe5f740ae1fccf2fa561278
|
f832c2f0b367e231153a75ee3b15020b3370dcad
|
refs/heads/master
| 2020-04-02T16:33:18.211769
| 2018-10-27T15:53:57
| 2018-10-27T15:53:57
| 154,617,619
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,382
|
r
|
importexport.R
|
#check for files and folders
dir('./data2')
list.files('./data2')
list.files('./data')
file.exists("./data/mtcars.csv")
#Reading from a flat file into a vector
list.files('./data2')
data= scan("./data2/hhe.txt",what="character")
head(data)
class(data)
#csv read from csv
#First create as csv file from iris data
head(iris)
write.csv(iris,"./data/iris.csv",row.names=F)
# Goto folder data and check for the file
read1= read.csv(file="./data/iris.csv",header = TRUE, sep = ",")
str(read1)
class(read1)
#CSV file from web
read_web1 = read.csv('http://www.stats.ox.ac.uk/pub/datasets/csb/ch11b.dat')
head(read_web1)
#using library
library(data.table)
read_web2 = fread("http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv")
head(read_web2)
class(read_web2)
#Import from Google Sheet
library(gsheet)
url_gsheet = "https://docs.google.com/spreadsheets/d/1QogGSuEab5SZyZIw1Q8h-0yrBNs1Z_eEBJG7oRESW5k/edit#gid=107865534"
df_gsheet = as.data.frame(gsheet2tbl(url_gsheet))
head(df_gsheet)
#Import from Excel
# read in the first worksheet from the workbook myexcel.xlsx
# first row contains variable names
library(xlsx)
library(rJava)
df_excel1 = read.xlsx("./data2/myexcel.xlsx", 1)
df_excel1
# read in the worksheet named mysheet
df_excel2a = read.xlsx("./data2/myexcel.xlsx", sheetName = "bowlers")
df_excel2a
df_excel2b = read.xlsx("./data2/myexcel.xlsx", sheetIndex = 2)
df_excel2b
|
a23eaf683a1288e0b66bde51289e8713f1023b18
|
d859174ad3cb31ab87088437cd1f0411a9d7449b
|
/autonomics/man/prepare_rnaseq.Rd
|
eb77f754142577e8cba7584edc80f8920d5e5eff
|
[] |
no_license
|
bhagwataditya/autonomics0
|
97c73d0a809aea5b4c9ef2bf3f886614eceb7a3c
|
c7ca7b69161e5181409c6b1ebcbeede4afde9974
|
refs/heads/master
| 2023-02-24T21:33:02.717621
| 2021-01-29T16:30:54
| 2021-01-29T16:30:54
| 133,491,102
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 719
|
rd
|
prepare_rnaseq.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepare_omics.R
\name{prepare_rnaseq}
\alias{prepare_rnaseq}
\title{Prepare rnaseq counts for analysis}
\usage{
prepare_rnaseq(
object,
filter_exprs_replicated_in_some_subgroup = FALSE,
plot = TRUE
)
}
\arguments{
\item{object}{SummarizedExperiment}
\item{filter_exprs_replicated_in_some_subgroup}{logical}
\item{plot}{logical}
}
\description{
Prepare rnaseq counts for analysis
}
\examples{
if (require(autonomics.data)){
require(magrittr)
object <- 'extdata/stemdiff/rnaseq/gene_counts.txt' \%>\%
system.file(package = 'autonomics.data') \%>\%
read_rnaseq()
object \%>\% prepare_rnaseq()
}
}
|
44a52cae0957430783d7eb6d763ea90acb52e2a8
|
3165136f79bb0154b8af62dc71a112ffc80ae787
|
/funs/sf.count.R
|
433d7f3791de43353bb1b71f8e53860a1dba454f
|
[] |
no_license
|
yuliasidi/wilson_newcombe
|
0dce8043477417799fe098f1a99645fb12f5e9d0
|
0b9b065bdb01b35f48088df787d0e3b6a15bda02
|
refs/heads/master
| 2021-06-23T00:14:48.715457
| 2019-10-21T01:28:03
| 2019-10-21T01:28:03
| 192,383,341
| 0
| 0
| null | 2019-10-21T01:28:05
| 2019-06-17T16:37:50
|
R
|
UTF-8
|
R
| false
| false
| 369
|
r
|
sf.count.R
|
#count number of successes and failures for variable y.m in the dataframe
sf.count <- function(dt){
out <-
dt%>%
dplyr::filter(is.na(y.m) == F)%>%
dplyr::group_by(y.m)%>%
dplyr::summarise(nobs = n())%>%
dplyr::mutate(y.name = ifelse(y.m==0, "fail", "success"))%>%
dplyr::select(-y.m)%>%
tidyr::spread('y.name', 'nobs')
return(out)
}
|
9ec3503849ddfd2314c438d3f30ca0a148619acf
|
5c418434fdc42397f1273d32d00180b18c823b42
|
/ig1/plot-snpindex.R
|
3bd262a8ce7869c5df28389137a083a19fb3a4ce
|
[] |
no_license
|
carnegie-dpb/evanslab-R
|
82d75d06d7f49d6d57daf76f513900c9659f1909
|
c9d9f252d985092ef5809fd9033cd14339aade1b
|
refs/heads/master
| 2020-08-24T13:53:23.010546
| 2020-03-12T14:01:55
| 2020-03-12T14:01:55
| 216,839,315
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 714
|
r
|
plot-snpindex.R
|
##
## plot the "SNP Index" for each seg call on each chromosome, stacked.
##
opar = par()
par(mfrow=c(10,1),mar=c(0.4,4,0.4,0.4))
strain = "B73"
minsize = 20
xmax = max(seg$start[seg$contig==1])
xlim = c(0,xmax)
ylim = c(-1,1)
plotrange = (seg$a+seg$b)>minsize & (seg$c+seg$d)>minsize
plotseg = seg[plotrange,]
## one plot per chromosome
for (chr in 1:10) {
plot(plotseg$start[plotseg$contig==chr], plotseg$snpindex.t[plotseg$contig==chr],
pch=1, cex=0.2, col="darkred", ylab=paste("Chr",chr), xlim=xlim, ylim=ylim, xaxt='n', xaxs='i')
points(plotseg$start[plotseg$contig==chr], -plotseg$snpindex.nt[plotseg$contig==chr],
pch=1, cex=0.2, col="darkblue")
}
par(opar)
|
b44bade795347102615a0ec8bcd3583bb26a36db
|
7f85b0a178495cc4e56b9321f0b4f4d22c7eb5bd
|
/Experimental/nsx.R
|
bbe7c4d090ec0a788e657c1da1982f500f07a903
|
[] |
no_license
|
LasseHjort/cuRe
|
9e07b9bc7aadf33bed424bfe0053693c1ee58cd9
|
b8043decfe107f1e6f94fcea074e08fcad4e41f3
|
refs/heads/master
| 2023-07-06T12:44:30.247560
| 2023-06-29T21:01:42
| 2023-06-29T21:01:42
| 85,562,010
| 8
| 4
| null | 2022-02-28T22:00:21
| 2017-03-20T10:08:30
|
R
|
UTF-8
|
R
| false
| false
| 3,432
|
r
|
nsx.R
|
nsx <- function (x, df = NULL, knots = NULL, intercept = FALSE,
Boundary.knots = range(x),
derivs = if (cure) c(2,1) else c(2,2),
log=FALSE, # deprecated: only used in rstpm2:::stpm2Old
centre = FALSE, cure = FALSE, stata.stpm2.compatible=FALSE)
{
nx <- names(x)
x <- as.vector(x)
nax <- is.na(x)
if (nas <- any(nax))
x <- x[!nax]
if (!missing(Boundary.knots)) {
Boundary.knots <- sort(Boundary.knots)
outside <- (ol <- x < Boundary.knots[1L]) | (or <- x >
Boundary.knots[2L])
}
else outside <- FALSE
if (!missing(df) && missing(knots)) {
nIknots <- df - 1 - intercept + 4 - sum(derivs)
if (nIknots < 0) {
nIknots <- 0
warning("'df' was too small; have used ", 1 + intercept)
}
knots <- if (nIknots > 0) {
knots <- if (!cure)
seq.int(0, 1, length.out = nIknots + 2L)[-c(1L,
nIknots + 2L)]
else c(seq.int(0, 1, length.out = nIknots + 1L)[-c(1L,
nIknots + 1L)], 0.95)
if (!stata.stpm2.compatible)
stats::quantile(x[!outside], knots)
else stats::quantile(x[!outside], round(knots,2), type=2)
}
}
else nIknots <- length(knots)
Aknots <- sort(c(rep(Boundary.knots, 4L), knots))
if (any(outside)) {
basis <- array(0, c(length(x), nIknots + 4L))
if (any(ol)) {
k.pivot <- Boundary.knots[1L]
xl <- cbind(1, x[ol] - k.pivot)
tt <- spline.des(Aknots, rep(k.pivot, 2L), 4, c(0,
1))$design
basis[ol, ] <- xl %*% tt
}
if (any(or)) {
k.pivot <- Boundary.knots[2L]
xr <- cbind(1, x[or] - k.pivot)
tt <- spline.des(Aknots, rep(k.pivot, 2L), 4, c(0,
1))$design
basis[or, ] <- xr %*% tt
}
if (any(inside <- !outside))
basis[inside, ] <- spline.des(Aknots, x[inside],
4)$design
}
else basis <- spline.des(Aknots, x, 4)$design
const <- splineDesign(Aknots, rep(Boundary.knots, 3-derivs), 4, c(derivs[1]:2, derivs[2]:2))
if (!intercept) {
const <- const[, -1, drop = FALSE]
basis <- basis[, -1, drop = FALSE]
}
qr.const <- qr(t(const))
q.const <- qr.Q(qr.const, complete=TRUE)[, -(1L:2L), drop = FALSE] # NEW
basis <- as.matrix((t(qr.qty(qr.const, t(basis))))[, -(1L:nrow(const)), drop = FALSE])
n.col <- ncol(basis)
if (nas) {
nmat <- matrix(NA, length(nax), n.col)
nmat[!nax, ] <- basis
basis <- nmat
}
dimnames(basis) <- list(nx, 1L:n.col)
if (centre) {
centreBasis <- nsx(centre,
knots=if (is.null(knots)) numeric(0) else knots,
Boundary.knots=Boundary.knots,
intercept=intercept, derivs=derivs, centre=FALSE, log=log)
oldAttributes <- attributes(basis)
basis <- t(apply(basis,1,function(x) x-centreBasis))
attributes(basis) <- oldAttributes
}
a <- list(degree = 3, knots = if (is.null(knots)) numeric(0) else knots,
Boundary.knots = Boundary.knots, intercept = intercept, derivs=derivs,
centre=centre, log=log, q.const=q.const)
attributes(basis) <- c(attributes(basis), a)
class(basis) <- c("nsx", "basis", "matrix")
basis
}
|
5466d7e4aaa6a9fe4a14f80ad20a2aaf415abd8d
|
7dda987e5bc0dea30143ad52f190b41bc06a1911
|
/LeafletMapping/mappingLeafletTrailPCT.R
|
283a32e8dbe9be6757c3b57d46333b717109ff2c
|
[] |
no_license
|
bgbutler/R_Scripts
|
1b646bc4e087e8d80b52d62fed8eb20841ed2df7
|
b10e69f8781eb2a19befe5f80f59863a0f838023
|
refs/heads/master
| 2023-01-25T04:08:44.844436
| 2023-01-12T16:00:55
| 2023-01-12T16:00:55
| 28,370,052
| 5
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,169
|
r
|
mappingLeafletTrailPCT.R
|
#testing alternative mapping with leaflet
library(rgdal)
library(maps)
library(htmltools)
library(devtools)
library(leaflet)
library(sp)
library(htmlwidgets)
library(plotKML)
library(maptools)
library(XML)
library(RCurl)
library(stringi)
library(mapview)
install_github("environmentalinformatics-marburg/mapview", ref = "develop")
remove.packages("leaflet")
install_github('rstudio/leaflet')
pct <- readOGR("pct.gpx", layer = "tracks")
mapStates <- map("state", fill = TRUE,
plot = FALSE,
region = c('california', 'oregon', 'washington:main'))
your.map <- mapview(pct, map.types = "CartoDB.Positron")@map %>%
addMarkers(-116.4697, 32.60758, popup = "Campo") %>%
addMarkers(-120.7816, 49.06465, popup = "Manning Park, Canada") %>%
addPolygons(data=mapStates, fillColor = heat.colors(3, alpha = NULL), stroke = FALSE) %>%
# Add legend
addLegend(position = 'topright', colors = "red", labels = "PCT", opacity = 0.4,
title = 'Legend')
your.map
x = as(pct, "SpatialLinesDataFrame")
mapView(x, burst = TRUE)
leaflet() %>% addTiles() %>% addPolylines(data = x)
|
769e9fe94d1972514d0b400bc7b8cbd35a4d1c94
|
b2016858528a47152f8895761fd22b739cb7ee5a
|
/ApeR/R/Graficas.R
|
e3efce1245ecf86eaeb2def90394847eaf05ff1d
|
[] |
no_license
|
crisns1d/BiologiaComparada
|
165d1371843e88b087e5f8a7f8f5fbd15d485839
|
3a1c51f7b6d6b49d0cc2164c98ae9b3afe976988
|
refs/heads/main
| 2023-03-20T02:24:15.226454
| 2021-03-10T16:15:20
| 2021-03-10T16:15:20
| 341,722,364
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,537
|
r
|
Graficas.R
|
library(ape)
library(ade4)
library(phangorn)
library(adephylo)
library(phylobase)
library(rgl)
getwd()
setwd("C:/Users/UIS/Downloads/ApeR")
tre1 <- read.tree(text = "(((a:0.7,b:0.2):1.5,d:2):1,c:7);")
#####ARBOLES CON LONG DE RAMA Y TITULO#####
par(mfrow=c(1,3))
plot(tre1, use.edge.length = FALSE, main = "Arbol sin longitud de ramas")
plot.phylo(tre1, use.edge.length = TRUE, main="Arbol Con Longitud de Ramas")
#####Arbol con long de ramas igual y titulo
tre2 <- tre1
tre2$edge.length <- c(1, 1, 1, 1, 1, 1)
plot.phy(tre2, use.edge.length = TRUE, main="Arbol Con Longitud de Ramas Igual")
dev.off()
?plot
###Arboles como cladogramas (forma triangular), en dirección de derecha a izquierda
par(mfrow=c(1,3))
plot.phylo(tre1, type = "c", direction = "l", use.edge.length = FALSE, main = "Arbol sin longitud de ramas")
plot.phylo(tre1, type = "c", direction = "l", use.edge.length = TRUE, main="Arbol Con Longitud de Ramas")
plot.phylo(tre2, type = "c", direction = "l", use.edge.length = TRUE, main="Arbol Con Longitud de Ramas Igual")
###Arbol con titulos de los nodos justificados y distintas long de ramas
tre3 <- read.tree(text = "((((Homooooo:0.21,Pongooo:0.21):0.28,Macaca:0.49):0.13,Ateles:0.62):0.38,Galagoooo:1);")
par(mfrow=c(1,3))
plot.phylo(tre3, adj = 0, direction = "l", use.edge.length = FALSE, main = "Nombres alineados sin long de ramas")
plot.phylo(tre3, adj = 0, direction = "l", use.edge.length = TRUE, main="Nombres Alineados Con Long de Ramas")
tre4 <- tre3
tre4$edge.length <- c(1,1,1,1,1,1,1,1)
plot.phylo(tre4, adj = 0, direction = "l", use.edge.length = TRUE, main="Nombres Alineados Con Longitud de Ramas Igual")
dev.off()
###Arboles unrooted con distintas long de ramas
data(bird.orders)
plot(bird.orders, type = "u", font = 1, no.margin = TRUE, use.edge.length = FALSE, main = "Arbol sin longitud de ramas")
plot(bird.orders, type = "u", font = 1, no.margin = TRUE, use.edge.length = TRUE, main = "Arbol con longitud de ramas")
tre5 <- bird.orders$edge.length <- c(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)
plot(bird.orders, type = "u", font = 1, no.margin = TRUE, use.edge.length = TRUE, main = "Arbol con longitud de ramas iguales")
###Arboles en forma de fan con distintas long de ramas
data(bird.orders)
par(mfrow=c(1,3))
plot(bird.orders, type = "fan", font = 1, no.margin = TRUE, use.edge.length = FALSE, main = "Arbol sin longitud de ramas")
plot(bird.orders, type = "fan", font = 1, no.margin = TRUE, use.edge.length = TRUE, main = "Arbol con longitud de ramas")
plot(tre5, type = "fan", font = 1, no.margin = TRUE, use.edge.length = TRUE, main = "Arbol con longitud de ramas iguales")
dev.off()
par(mfrow=c(1,3))
plot(tre6)
tre6 <- read.tree(text = "((t5:0.7751354985,(t6:0.9835859218,(((t1:0.6528835276,t2:0.8458062934):0.8703846436,t8:0.3825760901):0.07328546001,t7:0.5932530346):0.6092662113):0.6149031692):0.6467406675,((t9:0.4991088894,t4:0.4884779244):0.7723605123,t3:0.1005979984):0.5129213275);")
plot(tre6, type = "fan", font = 1, use.edge.length = FALSE, main = "Arbol sin longitud de ramas")
plot(tre6, type = "fan", font = 1, use.edge.length = TRUE, main = "Arbol con longitud de ramas")
tre7 <- tre6
tre7$edge.length <- c(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)
plot(tre7, type = "fan", font = 1, use.edge.length = TRUE, main = "Arbol con longitud de ramas iguales")
###Arboles en forma radial con distintas lond de rama
plot.phylo(tre6, type = "radial", no.margin = TRUE, font = 1, use.edge.length = FALSE, main = "Arbol sin longitud de ramas")
plot.phylo(tre6, type = "radial", no.margin = TRUE, font = 1, use.edge.length = TRUE, main = "Arbol con longitud de ramas")
plot.phylo(tre7, type = "radial", no.margin = TRUE, font = 1, use.edge.length = TRUE, main = "Arbol con longitud de ramas iguales")
data(bird.orders)
plot.phylo(bird.orders, type = "radial", font = 1, no.margin = TRUE, use.edge.length = FALSE, main = "Arbol sin longitud de ramas")
plot.phylo(bird.orders, type = "radial", font = 1, no.margin = TRUE, use.edge.length = TRUE, main = "Arbol con longitud de ramas")
tre5 <- bird.orders$edge.length <- c(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)
plot.phylo(bird.orders, type = "radial", font = 1, no.margin = TRUE, use.edge.length = TRUE, main = "Arbol con longitud de ramas iguales")
dev.off()
###Arboles con rotulos en los nodos
?nodelabels
trape <- read.tree(text = "((Homo,Pan),Gorilla);")
plot(trape, main= "Arbol con titulos en los nodos")
nodelabels()
#rotulos específicos rectangulares
plot(trape, main= "Arbol con titulos en los nodos")
nodelabels("Raiz", 4, frame = "r", bg = "white")
nodelabels("Nodo interno", 5, frame = "r", bg = "white")
#rotulos específicos circulares
plot(trape, main= "Arbol con titulos en los nodos")
nodelabels("Raiz", 4, frame = "c", bg = "white")
nodelabels("Nodo interno", 5, frame = "c", bg = "white")
###Arbol con distintos valores en lOS nodos
valores1 <- scan()
valores2 <- scan()
valores3 <- scan()
valores1
plot(tre6, no.margin = TRUE, use.edge.length = TRUE, main = "Arbol con valores en los nodos")
#cex=tamaño letra, font= tipo de letra, adj= ajustar posición de la letra (x,y)
nodelabels(valores1, adj = c(-0.2, -0.1), frame = "n",
cex = 0.8, font = 2, col = "purple")
nodelabels(valores2, adj = c(1.2, -0.5), frame = "n",
cex = 0.8, font = 3, col = "green")
nodelabels(valores3, adj = c(1.2, 1.5), frame = "n",
cex = 0.8, col = "red")
###Arbol con termometros en los nodos
plot(tre6, no.margin = TRUE, use.edge.length = TRUE, main = "Arbol con termometros en los nodos")
nodelabels(thermo = valores3/100, piecol = c("purple","yellow"), horiz = TRUE)
###Arbol con puntos en los nodos
dev.off()
valores1
colores <- c("blue", "purple", "orange")
p <- character(length(valores1))
p[valores1 >= 90] <- colores[1]
p[valores1 < 90 & valores1 >= 70] <- colores[2]
p[valores1 < 70] <- colores[3]
plot(tre6, no.margin = TRUE, use.edge.length = TRUE, main = "Arbol con puntos en los nodos")
nodelabels(node = 10:17, pch = 21, bg = p[-1], cex = 2)
#Leyenda
points(rep(0.005, 3), 1:3, pch = 21, cex = 2, bg = colores)
text(rep(0.01, 3), 1:3, adj = 0,
c("<= 90", "70 : 90", "< 70"))
#Arbol con terminales en cajitas
par(mar=c(3,3,3,3))
trape <- read.tree(text = "((Homo,Pan),Gorilla);")
plot(trape, main= "Arbol terminales en cajitas", cex=1.5)
tiplabels(trape$tip.label[2], 2, adj = 0,
bg = "yellow", col = "purple", cex=1.5)
?tiplabels()
dev.off()
#Arboles con barras de secciones
par(mar=c(3,3,3,3))
data(bird.orders)
plot(bird.orders,no.margin = TRUE)
segments(38, 1, 38, 5, lwd = 3)
text(39, 3, "Proaves", srt = 270)
segments(38, 6, 38, 23, lwd = 3)
text(39, 14.5, "Neoaves", srt = 270)
?segments
?text
colors()
#Arboles con clados de distinto color
plot(bird.orders, type = "c", no.margin = FALSE, font = 1)
wh <- which.edge(bird.orders, 20:23)
wh2 <- which.edge(bird.orders, 13:19)
wh3 <- which.edge(bird.orders, 6:13)
colo <- rep("black", Nedge(bird.orders))
colo[wh] <- "turquoise3"
colo[38] <- "turquoise3"
colo[wh2] <- "hotpink2"
colo[wh3] <- "yellow3"
plot(bird.orders, "c", FALSE, font = 1, edge.color = colo,
edge.width = 3, no.margin = TRUE)
#Arboles con clados encerrados en cuadros
plot(bird.orders, font = 1, no.margin = TRUE)
rect(1.2, 0.5, 36, 5.4, col = "orange2")
?par
par(new = TRUE)
plot(bird.orders, font = 1, no.margin = TRUE)
#________________________________________________________________________________________
#Arbol de razgos con nombres de las terminales
?phylo4d
par(mar=c(3,3,3,3))
t1 <- rcoal(20)
?plot.phylo
x1 <- phylo4d(t1, matrix(rnorm(100), 20))
?table.phylo4d()
table.phylo4d(x1, box = FALSE, symbol = ("colors"), col = terrain.colors(4))
#Arbol de razgos sin nombres de las terminales
par(mar=c(3,3,3,3))
t2 <- rcoal(20)
x2 <- phylo4d(t2, matrix(rnorm(100), 20))
table.phylo4d(x2, box = FALSE, show.tip.label = FALSE,)
#Barras de riqueza de especies
Orders.dat <- scan()
data("bird.orders")
names(Orders.dat) <- bird.orders$tip.label
Orders.dat
plot(bird.orders, x.lim = 50, font = 1, cex = 0.8)
segments(rep(40, 23), 1:23, rep(40, 23) + log(Orders.dat), 1:23, lwd = 3)
axis(1, at = c(40, 45, 50), labels = c(0, 5, 10))
mtext("ln(species richness)", at = 45, side = 1, line = 2)
axisPhylo()
#Combinar 2 gráficas en 1
dev.off()
layout(matrix(c(2, rep(1, 8)), 3, 3))
wh <- which.edge(bird.orders, 8:23)
colo <- rep("black", Nedge(bird.orders))
colo[wh] <- "turquoise3"
plot(bird.orders, "p", FALSE, font = 1, no.margin = TRUE, edge.color=colo)
arrows(4.3, 15.5, 6.9, 12, length = 0.1)
par(mar = c(2, 2, 2, 2))
hist(rnorm(1000), main = "", col = terrain.colors(14))
?hist
#Arboles cofilogeneticos
dev.off()
trk <- bird.orders
trc <- chronopl(bird.orders, lambda = 2, age.min = 12)
layout(matrix(1:2, 1, 2))
plot(trk)
plot(trc, show.tip.label = FALSE, direction = "l")
?layout()
layout(matrix(1:2, 1, 2), width = c(1, 1))
par(mar = c(2, 0, 0, 0))
plot(trk, adj = 0.5, cex = 0.6, show.tip.label = T)
nodelabels(node = 26, "?", adj = 2, bg = "white")
axisPhylo()
plot(trc, direction = "l", show.tip.label=F)
axisPhylo()
#Coarbol de parasitos
TR <- read.tree("C:/Users/UIS/Downloads/ApeR/Data/Arboleswuuu.tre")
?matrix
A <- matrix(c("C.hispidus", "C.formosus", "C.eremicus", "C.intermedius", "C.californicus", "C.baileyi", "F.zacatecae-C.h.", "F.reducta-C.f.", "F.zacatecae-C.e.", "F.zacatecae-C.i.", "F.tribulosa-C.c.", "F.reducta-C.b."), 6, 2)
A
cophyloplot(TR[[1]], TR[[2]], A, space = 20, length.line = -3, lty = 2)
dev.off()
#Graficar multiples arboles de coalescencia
TR <- replicate(6, rcoal(10), simplify = FALSE)
kronoviz(TR, horiz = FALSE, type = "c", show.tip.label=FALSE)
#Extraer clados de una topología grande
dev.off()
data(chiroptera)
tr <- drop.tip(chiroptera, 16:916, subtree = TRUE)
plot(tr, font = c(rep(3, 15), rep(2, 3)), cex = 0.8, no.margin = TRUE)
#Hacer zoom en 2 clados
data("bird.families")
zoom(bird.families, list(1:15, 38:48),
col = c("lightgrey", "slategrey"),
no.margin = TRUE, font = 1, subtree = TRUE)
#Hacer un objeto gif
open3d()
plot(ntx, edge.width = 1, tip.color = "black")
play3d(spin3d())
movie3d(spin3d(), 12, fps = 1, convert = FALSE, dir = ".")
#####EJERCICIOS######
###1. Dibujar la figura 4.11 usando una escala de colores en lugar de gris. La figura debe incluir una leyenda
dev.off()
valores1 <- scan()
valores1
colores <- c("red", "pink", "white")
p <- character(length(valores1))
p[valores1 >= 90] <- colores[1]
p[valores1 < 90 & valores1 >= 70] <- colores[2]
p[valores1 < 70] <- colores[3]
par(mar=c(3,3,3,3))
plot(tre6, no.margin = FALSE, use.edge.length = TRUE, main = "Arbol con puntos en los nodos")
nodelabels(node = 10:17, pch = 21, bg = p[-1], cex = 2)
#Leyenda
?points
points(rep(0.0003, 3), 1:3, pch = 21, cex = 2, bg = colores)
text(rep(0.01, 3), 1:3, adj = 0,
c("<= 90", "70 : 90", "< 70"))
###2. graficar la filogenia de ordenes de aves y colorear las proaves de azul. Repita esto pero solo para las ramas terminales de este clado
dev.off()
plot(bird.orders, no.margin = FALSE, font = 1)
wh <- which.edge(bird.orders, 1:5)
colo <- rep("black", Nedge(bird.orders))
colo[wh] <- "turquoise3"
colo2 <- rep("black", Ntip(bird.orders))
colo2[1:5] <- "turquoise3"
?plot
plot(bird.orders, "c", FALSE, font = 1, edge.color = colo,
edge.width = 3, no.margin = TRUE, tip.color = colo2)
plot(bird.orders, no.margin = FALSE, font = 1)
colo <- rep("black", Nedge(bird.orders))
bird.orders$edge
colo[3:4] <- "turquoise3"
colo[7:9] <- "turquoise3"
colo2 <- rep("black", Ntip(bird.orders))
colo2[1:5] <- "turquoise3"
plot(bird.orders, "c", FALSE, font = 1, edge.color = colo,
edge.width = 3, no.margin = TRUE, tip.color = colo2)
colors()
###3. Suponga que tiene un factor que representa un estado de caracter para cada nodo y cada terminal del arbol.
### Encuentre una manera de asociar un color con cada rama dependiendo del estado en ambos extremos de la rama
tr <- rtree(6)
tr$edge
colo <- rep("black", Ntip(tr))
colores <- c("violetred4", "red", "pink")
colo[1:2] <- colores[1]
colo[3:4] <- colores[2]
colo[5:6] <- colores[3]
estados <- c(1, 2, 3, 4, 5)
class(estados)
f <- character(length(estados))
f[estados=5] <- colores[2]
f[estados<5] <- colores[1]
f[estados<3] <- colores[3]
colo2 <- rep("black", Nedge(tr))
colores2 <- c("violetred4", "orange2", "red", "purple", "pink")
colo2[1] <- colores2[5]
colo2[2] <- colores2[4]
colo2[3:5] <- colores2[1]
colo2[6] <- colores2[2]
colo2[7:8] <- colores2[3]
colo2[9:10] <- colores2[5]
plot(tr, no.margin = FALSE, font = 1, tip.color = colo, edge.color = colo2)
nodelabels(node = 7:11, pch = 21, bg = f, cex = 2)
t1 <- read.tree(text = "(c:2,(a:1,b:1):1);")
t2 <- read.tree(text = "(c:4,(a:2,b:2):2);")
tmax <- speciesTree(list(t1, t2))
all.equal(tmax, t1)
tsha <- speciesTree(list(t1, t2), sum)
kronoviz(list(t1, t2, tmax, tsha), type = "c")
##################CONTRASTES INDEPENDIENTES######################
#Arbol con contrastes en los nodos
par(mar=c(3,3,3,3))
TreePrimates <- read.tree(text="((((Homo:0.21,Pongo:0.21):0.28,Macaca:0.49):0.13,Ateles:0.62):0.38,Galago:1.00);")
TreePrimates
body <- c(4.09434, 3.61092, 2.37024, 2.02815, -1.46968)
longevity <- c(4.74493, 3.3322, 3.3673, 2.89037, 2.30259)
names(body) <- names(longevity) <- c("Homo", "Pongo", "Macaca", "Ateles", "Galago")
#Calculo de contrastes
pic.body <- pic(body, tree.primates)
pic.longevity <- pic(longevity, tree.primates)
pic.body
pic.longevity
plot(tree.primates)
edgelabels()
wh <- which.edge(tree.primates, 1:2)
colo <- rep("black", Nedge(tree.primates))
colo[wh] <- "pink"
plot(tree.primates, "p", FALSE, font = 1, no.margin = TRUE, edge.color=colo)
nodelabels(round(pic.body, 3), adj = c(0, -0.5), frame = "n", col = "blue")
nodelabels(round(pic.longevity, 3), adj = c(0, 1), frame = "n", col = "purple")
#Grafica de correlación de los contrastes
plot(pic.body, pic.longevity)
abline(a = 2.56, b = -0.5, lty = 1) # x = y line
?abline
lm(pic.longevity ~ pic.body - 1)
lmorigin(pic.longevity ~ pic.body, nperm = 1e4)
lmorigin(pic.longevity ~ pic.body, nperm = 100)
lmorigin(pic.longevity ~ pic.body, nperm = 1000)
#Cambio la diagonal de la matriz a 0
w <- 1/cophenetic(tree.primates)
diag(w) <- 0 # OR: w[w == Inf] <- 0
Moran.I(body, w)
#Que pasas si no cambio la diagonal
w1 <- 1/cophenetic(tree.primates)
diag(w1)
Moran.I(body, w)
#Calcular gearymoran
gearymoran(w, data.frame(body, longevity))
#Calcular Moran pero de la distancia cofenetica normal
Moran.I(longevity, cophenetic(tree.primates))
#Calculo abouheif.moran
abouheif.moran(cbind(body, longevity), w)
X <- phylo4d(tree.primates, data.frame(body, longevity))
abouheif.moran(X)
data(carnivora)
frm <- SW ~ Order/SuperFamily/Family/Genus
correl.carn <- correlogram.formula(frm, data = carnivora)
correl.carn
plot(correl.carn, col = c("white", "black"), adj = 1, cex=1)
tr <- rtree(3)
treePart(tr)
treePart(tr, "orthobasis")
as.matrix(orthobasis.phylo(tr))
B <- as.matrix(orthobasis.phylo(tree.primates))
X <- B[, 1:2]
X
anova(lm(body ~ X))
anova(lm(longevity ~ X))
par(mfrow=c(1,2))
orthogram(body, tree.primates)
orthogram(longevity, tree.primates)
dev.off()
tr <- rtree(30)
X <- matrix(rnorm(150), 30, 5)
rownames(X) <- tr$tip.label
X2 <- replicate(5, rTraitCont(tr))
dat <- phylo4d(tr, X)
dat2 <- phylo4d(tr, X2)
res <- ppca(dat)
res2 <- ppca(dat2)
plot(res)
plot(res2)
x <- cumsum(c(0, rnorm(99)))
x <- numeric(100)
for (i in 1:99)
x[i + 1] <- x[i] - 0.2 * x[i] + rnorm(1)
X <- replicate(100, cumsum(c(0, rnorm(99))))
sim.ou <- function() {
x <- numeric(100)
for (i in 1:99)
x[i + 1] <- x[i] - 0.2 * x[i] + rnorm(1)
x # returns the value of x
}
X2 <- replicate(50, sim.ou())
var(X[100 ,])
var(X2[100 ,])
layout(matrix(1:2, 1, 2))
yl <- range(X)
matplot(X, ylim = yl, type = "l", col = 1, main = "Brownian")
matplot(X2, ylim = yl, type = "l", col = 1, main = "OU")
|
9553cc7c5efd36bdc16112cbb51360f8ffdb9fd9
|
a7fcfcd589789859ea7fc129246820502578d97f
|
/scripts/da.R
|
1d5fd5c6ded40a1ad0d49846a66337c91eb9e09b
|
[] |
no_license
|
mikkimikki2020/multivariate_analysis_final_project
|
7c27106a8975f21ad2737e72ac95ced123eaab31
|
02863423f625fa9d66096afd22d1f4c834c6f851
|
refs/heads/main
| 2023-05-06T23:06:47.945321
| 2021-04-28T14:52:51
| 2021-04-28T14:52:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,472
|
r
|
da.R
|
# Applied Multivariate Data Analysis
# Final Project
# Emrecan Ozdogan and Kyle Naddeo
#--------------------------------------------#
# Linear and Quadratic Discriminant Analysis #
#--------------------------------------------#
# Import
library(dplyr)
library(magrittr)
library(ggplot2)
library(MASS)
# Read in data
#---------------------------------------------------------------------------------------#
data = read.table("../data/plasma_data.txt", header=F)
names = read.table("../data/plasma_variable_names.txt", header=F, stringsAsFactors=F)
colnames(data) = names[1,]
# Remove Noncontinuous variables
data = data[, -which(names(data) %in% c("SEX", "SMOKSTAT", "VITUSE"))]
# Get Individual datasets Function
#------------------------------------------------------------------------------------------------------#
get_plasma_datasets = function(dataset, num_BETAPLASMA_bins, num_RETPLASMA_bins){
data_w_bins = mutate(dataset, "Binned_BETAPLASMA" = cut_number(BETAPLASMA, num_BETAPLASMA_bins),
"Binned_RETPLASMA" = cut_number(RETPLASMA, num_RETPLASMA_bins))
# Make individual datasets for BETA and RET
BETA_data = dplyr::select(data_w_bins, -c(Binned_RETPLASMA, BETAPLASMA, RETPLASMA))%>%
rename(Bins = Binned_BETAPLASMA)
RET_data = dplyr::select(data_w_bins, -c(Binned_BETAPLASMA, BETAPLASMA, RETPLASMA))%>%
rename(Bins = Binned_RETPLASMA)
# Make a reference table for bins
BETA_category_data = cbind("Bins" = 1:num_BETAPLASMA_bins,
"Ranges" = levels(unique(data$Binned_BETAPLASMA)))
RET_category_data = cbind("Bins" = 1:num_RETPLASMA_bins,
"Ranges" = levels(unique(data$Binned_RETPLASMA)))
return(list("BETA_data" = BETA_data, "Beta_category_data" = BETA_category_data,
"RET_data" = RET_data, "RET_category_data" = RET_category_data))
}
# LDA/QDA Analysis Function
#---------------------------------------------------------------------------------------------------------#
discrim_analysis = function(discrim_data){
# LDA
lda.discrim_data = lda(Bins ~ ., data = discrim_data)
lda.predict = predict(lda.discrim_data)
lda_prediction_table = table(lda.predict$class, discrim_data[ ,ncol(discrim_data)])
lda_accuracy = sum(diag(lda_prediction_table))/sum(lda_prediction_table)
# QDA
qda.discrim_data = qda(Bins ~ ., data = discrim_data)
qda.predict = predict(qda.discrim_data)
qda_prediction_table = table(qda.predict$class, discrim_data[ ,ncol(discrim_data)])
qda_accuracy = sum(diag(qda_prediction_table))/sum(qda_prediction_table)
# Return Accuracies
return(list(lda_accuracy, qda_accuracy))
}
# Analysis
# --------------------------------------------------------------------#
min_num_bins = 4
max_num_bins = 50
BETA_optimal_bin_acc = c(min_num_bins, 0, min_num_bins, 0)
RET_optimal_bin_acc = c(min_num_bins, 0, min_num_bins, 0)
for (i in seq(min_num_bins, max_num_bins)){
# Get Individual data sets
binned_data = get_plasma_datasets(data,
num_BETAPLASMA_bins = i,
num_RETPLASMA_bins = i)
## BETA ##
# Check each bin has sufficient population
if (min(table(binned_data$BETA_data$Bins)) > 10){
# Preform Analysis
BETA_results = discrim_analysis(binned_data$BETA_data)
# Save Best
if (as.numeric(BETA_results[1]) > BETA_optimal_bin_acc[2]){
BETA_optimal_bin_acc[1:2] = c(i, as.numeric(BETA_results[1]))
}
if (as.numeric(BETA_results[2]) > BETA_optimal_bin_acc[4]){
BETA_optimal_bin_acc[3:4] = c(i, as.numeric(BETA_results[2]))
}
}
## RET ##
# Check each bin has sufficient population
if (min(table(binned_data$RET_data$Bins)) > 10){
# Preform Analysis
RET_results = discrim_analysis(binned_data$RET_data)
# Save Best
if (as.numeric(RET_results[1]) > RET_optimal_bin_acc[2]){
RET_optimal_bin_acc[1:2] = c(i, as.numeric(RET_results[1]))
}
if (as.numeric(RET_results[2]) > RET_optimal_bin_acc[4]){
RET_optimal_bin_acc[3:4] = c(i, as.numeric(RET_results[2]))
}
}
}
# Display results
results = round(rbind(BETA_optimal_bin_acc,
RET_optimal_bin_acc), digits = 2)
colnames(results) = c("LDA Bins", "LDA Acc", "QDA Bins" , "QDA Acc")
rownames(results) = c("BETA", "RET")
print(results)
|
4b65587ce133ef0f5fa84fdabf266045cb0049fe
|
2b9f876089553638cab483e1744533eb5a0bee90
|
/scripts/tests.R
|
59be9e22cd1839e5312bf386caf7e42583e2d294
|
[] |
no_license
|
hunzikp/dimstar
|
028e2d609f5248511b6bba5872aced1db82be2b2
|
d512e2d130d805772eea234c1b5ddf17747c5d79
|
refs/heads/master
| 2020-03-21T04:25:38.326133
| 2018-10-07T14:39:18
| 2018-10-07T14:39:18
| 138,107,604
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,006
|
r
|
tests.R
|
##################################################
# Test
##################################################
library(dimstar)
reticulate::use_condaenv("py36")
#########################
# Set parameters
N <- 256
TT <- 20
G <- 1
n_pairs <- ifelse(G > 1, ncol(combn(G, 2)), 1)
temporal_dep <- TRUE
spatial_dep <- TRUE
outcome_dep <- FALSE
count <- FALSE
rho <- ifelse(spatial_dep, 0.45, 0)
gamma <- ifelse(temporal_dep, 0.45, 0)
lambda <- ifelse(outcome_dep, 0.25, 0)
if (count) {
beta <- c(3,1)
} else {
beta <- c(-0.5, 1)
}
sigma2 <- 1
#########################
# Simulate data
set.seed(6)
out <- simulate_data(N, TT, G, count, rho_vec = rep(rho, G), lambda_vec = rep(lambda, n_pairs), gamma_vec = rep(gamma, G),
beta.ls = rep(list(beta), G), sigma2_vec = rep(sigma2, G))
X.ls <- out$X.ls
y.ls <- out$y.ls
W_t <- out$W_t
ystar_full <- out$ystar
# Randomly censor outcome
# y.ls[[1]][sample(length(y.ls[[1]]), size = 10, replace = FALSE)] <- NA
#########################
# Make model
model <- DISTAR$new(X.ls = X.ls, y.ls = y.ls, W_t = W_t, N = N, G = G, TT = TT,
count = count, spatial_dep = spatial_dep, temporal_dep = temporal_dep, outcome_dep = outcome_dep)
#########################
# Benchmarking
# library(rbenchmark)
#
# set.seed(0)
# model$sample_ystar(M = 50, ystar_init = colMeans(model$ystar_sample))
# plot(ystar_full, colMeans(model$ystar_sample))
#
# theta <- model$pack_theta()
# rbenchmark::benchmark(test = {model$E_llik(theta)})
# test replications elapsed relative user.self sys.self user.child sys.child
# 1 test 100 0.102 1 0.1 0 0 0
#########################
# Train test, iterative
model$sample_ystar(M = 50, ystar_init = colMeans(model$ystar_sample))
system.time(model$update_theta())
theta <- model$pack_theta()
system.time(model$E_llik(theta))
# model$beta.ls
# model$sigma2_vec
# model$lambda_vec
# model$gamma_vec
# model$rho_vec
#########################
# Train test
chng_vec <- model$train(maxiter = 50, M = 50, abs_tol = 1e-5, burnin = 0, thinning = 1, soft_init = FALSE, verbose = TRUE)
plot(chng_vec)
model$beta.ls
model$sigma2_vec
model$lambda_vec
model$gamma_vec
model$rho_vec
#########################
# VCOV test
model$compute_vcov(M = 1000, thinning = 10, verbose = TRUE)
model$coeftest()
estim <- model$coeftest(print_out = F)
theta_est <- estim$coef
se <- estim$se
se
# 0.05516480 0.03882166 0.02401263 0.02210312
theta_lo <- theta_est - qnorm(0.95)*se
theta_hi <- theta_est + qnorm(0.95)*se
### TODO
# - [DONE] Move code to own project/repo; package; write basic tests.
# - [DONE] Check out whether we can use constrained optim on lambda/gamma/rho instead of tanh/atanh transform
# - [NOT POSSIBLE ATM] Speed up likelihood eval, esp for VC estimation
# - [DONE] Ensure that the model works with G = T = 1
# - [NOT EFFICIENT] Ensure that we use sparse cholesky for det eval when G > 1
# - [DONE] Ensure that we use efficient log-det eval if G = 1
|
7db45d57ae4fd6e4187b0574aadd717be932a763
|
dd75f1e69992dc37957d45f1cb6c22e7b134251f
|
/R/GenLengthAgeKey.r
|
31e438187a67242993833c336bd44ab06e794ca0
|
[] |
no_license
|
allen-chen-noaa-gov/nwblastarca
|
03ba1f7248661e0c80d7a8b6a2bdfad415fce694
|
4bd06d76c66aaeba4cb6e01228834d138f328288
|
refs/heads/master
| 2023-03-29T17:14:50.047636
| 2021-03-29T21:32:02
| 2021-03-29T21:32:02
| 297,245,801
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,121
|
r
|
GenLengthAgeKey.r
|
GenLengthAgeKey <- function(numbersatage, ALKey) {
#' GenLengthAgeKey
#'
#' Organize the numbersatage and age-length-key
#'
#' @param numbersatage Vector of current numbers at age
#' @param ALKey DF of age-length-conversion w/ C1=LBins, C2=first age
#'
#' @return tmp6 Probability distribution of ages for each length class
#' @export
#' @importFrom reshape2 melt dcast
#' @importFrom data.table data.table :=
#' @examples
#'
cols <- c("age",unlist(ALKey[1]))
ALKey <- data.frame(as.numeric(names(ALKey)[-1]),
t(ALKey[,-1]))
names(ALKey) <- cols
tmp1 <- data.frame(age=ALKey[1],
count=numbersatage)
# Now merge counts with age-length key for ease of multiplying
tmp2 <- merge(tmp1,ALKey, by.x="age", by.y="age", all.x=FALSE, all.y=FALSE)
# Now multiply age counts times length distribution to get numbers at length
# for each age
for (i in names(tmp2[3:ncol(tmp2)])) {
tmp2[,i]<-tmp2[,"count"]*tmp2[,i]
}
# Now drop the counts column and reshape the data to long
tmp3 <- data.frame(tmp2[,-which(names(tmp2)=="count")])
tmp4 <- melt(tmp3,variable.name="length",id.vars="age")
# Now generate probability distribution of ages at length.
# Note: convert to data.table for fast and easy manipulation.
tmp5 <- data.table(tmp4)
tmp5[, sumCount:=sum(value), by=length]
# sum counts over length classes
tmp5$prob <- (tmp5$value/tmp5$sumCount)
# divide count for each age at len by total len count
tmp5$prob[which(tmp5$prob=="NaN")] <- 0
# address cases where certain sizes not observed
tmp5 <- tmp5[,.(age,length,prob)]
tmp5[,"prob"][tmp5[,"prob"]<1e-15]<-0
# Zero out very small probabilities
tmp5 <- as.data.frame(tmp5)
# Convert back to data frame
# Now reshape to wide so that rows contain a probability distribution of
# ages for each length class.
tmp6 <- dcast(tmp5, length~age, value.var="prob")
tmp6[,-1] <- tmp6[,-1][,order(as.numeric(names(tmp6)[-1]))]
return(tmp6)
}
|
0950227f540dde0ff41018f1feb0ebc473889946
|
371c46abac59d344a0ef068bab5540d2df0eeda9
|
/Week 8/ROICalc/.Rproj.user/8F601889/sources/per/t/C79AA8CB-contents
|
a4d8c477c3ae425c11f47dde87a89fa815a0a644
|
[] |
no_license
|
searssl/My-First-Repository
|
83e60e238f5562a0ac0b29fd882949b987a53637
|
67bbaa6ac772a51fff98c6c868731e7e453e9995
|
refs/heads/master
| 2020-08-01T04:42:05.526993
| 2019-12-20T06:43:49
| 2019-12-20T06:43:49
| 210,867,405
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,046
|
C79AA8CB-contents
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/InvestmentCalculator.R
\name{InvestmentCalculator}
\alias{InvestmentCalculator}
\title{Return on Investment Calculator}
\usage{
InvestmentCalculator(IA, Fee, PT, PP, SP, ST)
}
\arguments{
\item{IA}{Initial Investment: The value of money to be invested.}
\item{Fee}{The cost charged by the broker to complete the transaction.}
\item{PT}{Purchase Trades: The number of transactions required to purchase the volume of stocks demanded.}
\item{PP}{Purchase Price: The price at which the stock is purchased.}
\item{SP}{Sale Price: The price at which the stock is sold.}
\item{ST}{Sale Trades: The number of transactions required to sell the volume of stocks.}
}
\value{
The currency value return of a stock, or other simple investment, purchase and sale. The output is the nominal return on the investment.
}
\description{
Return on Investment Calculator
}
\note{
This calculator assumes the broker allows for the purchase of fractional shares.
}
\author{
Sean Sears
}
|
|
09602e1d3387c9f7b27488c13e5aa4dcc57bfc98
|
1162ed3eb91acc15f6b3d4d1505adc4d7c7bb86e
|
/tests/testthat/test-corpus_sample.R
|
803ea8e2418975fe3b3aef6d74d06d827b83f541
|
[] |
no_license
|
leeper/quanteda
|
01fec90decb0e8ee5a4984c41269f3fbbebe4490
|
8b4ee857251ad14494c107aca889cd1b844c6418
|
refs/heads/master
| 2021-01-20T03:47:37.822050
| 2017-04-27T10:55:57
| 2017-04-27T10:55:57
| 89,587,640
| 1
| 0
| null | 2017-04-27T10:53:45
| 2017-04-27T10:53:45
| null |
UTF-8
|
R
| false
| false
| 1,004
|
r
|
test-corpus_sample.R
|
context("corpus_sample tests")
doccorpus <- corpus(c(one = "Sentence one. Sentence two. Third sentence.",
two = "First sentence, doc2. Second sentence, doc2."))
sentcorpus <- corpus_reshape(doccorpus, to = "sentences")
test_that("test corpus_sample to see if without grouping, documents can be oversampled", {
# sampling without document grouping should be able to produce oversampling of a document
set.seed(100)
expect_gt(
sum(stringi::stri_detect_regex(docnames(corpus_sample(sentcorpus, replace = TRUE)), "^one")),
3
)
})
test_that("test corpus_sample to see if with grouping, documents can be oversampled", {
# sampling without document grouping should be able to produce oversampling of a document
# resample 10 times
for (i in 1:10) {
expect_equal(
sum(stringi::stri_detect_regex(docnames(corpus_sample(sentcorpus, replace = TRUE, by = "_document")), "^one")),
3
)
}
})
|
11ae884cf90764e880a0310b8241d4c4825536a0
|
16d234889fef3e74dbf1e271b8b29577218fc915
|
/R/ld_for_finemapping.R
|
2a0f94d13ff190cc60760f672c4598502bb896e6
|
[] |
no_license
|
CreRecombinase/ptb
|
d5fd1cc8e6fe003028d84b010a5404c0de08f7fa
|
d14650f83260c74ca8a9f13910dc4e4612aadda2
|
refs/heads/master
| 2020-04-29T01:11:34.537193
| 2019-04-18T20:42:27
| 2019-04-18T20:42:27
| 175,722,378
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,406
|
r
|
ld_for_finemapping.R
|
library(tidyverse)
library(spaa)
library(Matrix)
args = commandArgs(trailingOnly=TRUE)
blocks <- as.numeric(args)
#b <- 512
zscores <- read_tsv("ga.zscores.tsv.gz")
snp_info <- readRDS("/project/compbio/LD/CEU/SNP_chunks.RDS")
block2chunk <- readRDS("/project/compbio/LD/CEU/block2chunk.RDS")
#annot <- read_tsv("../../one_plus_annot_torus/base1/full.annot.tsv.gz")
for(b in blocks){
myz <- filter(zscores, ldchunk==b) %>%
rename(rs = variant) %>%
inner_join(., snp_info)
chunks <- unique(myz$chunk)
blocks <- filter(block2chunk, row_chunk %in% chunks & col_chunk %in% chunks)
chr <- myz$chr[1]
files <- paste0("/project/compbio/LD/CEU/CEU/LD_DF/chr", chr, "/AF0.01chr", chr, "_CEU_F_omni_T_0.01_", blocks$block_ind, "_190.RDS")
ld <- map_df(files, function(x){
readRDS(x)})
ld <- filter(ld, rowsnp %in% myz$rs & colsnp %in% myz$rs)
#myz.save <- myz
#ld.save <- ld
#myz <- myz[1:10,]
#ld <- filter(ld, rowsnp %in% myz$rs & colsnp %in% myz$rs)
ld$ix1 <- match(ld$rowsnp, myz$rs)
ld$ix2 <- match(ld$colsnp, myz$rs)
cor_mat <- sparseMatrix(i=ld$ix1,j=ld$ix2,x=ld$r,symmetric=T)
cor_mat <- data.frame(as.matrix(cor_mat))
write_tsv(cor_mat, path=paste0("ld.", b, ".txt"), col_names=FALSE)
myz <- myz %>% rename(variant=rs) %>% select(variant, ldchunk, tstat)
write_tsv(myz, path=paste0("ga.zscores.", b, ".tsv"))
}
|
4db0cccdca0104e0ac42905f3ca8474f2cee5985
|
6beef7a871c10d3baf74d87520ae53dbd52cf450
|
/man/aggregate.Rd
|
48d1bd6c9f6e37c55844cb9c0e3574170b909e81
|
[] |
no_license
|
FelixErnst/RNAmodR
|
d66ed5cb83b300b3d9e24f8310f46bb2f9b734ee
|
114a9f8f781a896205e573c3a87f437978dfe03f
|
refs/heads/master
| 2021-11-22T11:37:10.254735
| 2021-08-25T19:23:14
| 2021-08-25T19:23:14
| 53,844,127
| 2
| 0
| null | 2020-04-29T10:15:57
| 2016-03-14T09:40:42
|
R
|
UTF-8
|
R
| false
| true
| 4,372
|
rd
|
aggregate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/SequenceData-class.R,
% R/SequenceDataSet-class.R, R/SequenceDataList-class.R, R/Modifier-class.R,
% R/ModifierSet-class.R
\name{aggregate}
\alias{aggregate}
\alias{aggregateData}
\alias{getAggregateData}
\alias{hasAggregateData}
\alias{aggregate,SequenceData-method}
\alias{aggregateData,SequenceData-method}
\alias{aggregate,SequenceDataSet-method}
\alias{aggregate,SequenceDataList-method}
\alias{aggregate,Modifier-method}
\alias{aggregateData,Modifier-method}
\alias{getAggregateData,Modifier-method}
\alias{hasAggregateData,Modifier-method}
\alias{aggregate,ModifierSet-method}
\title{Aggregate data per positions}
\usage{
aggregate(x, ...)
aggregateData(x, ...)
getAggregateData(x)
hasAggregateData(x)
\S4method{aggregate}{SequenceData}(x, condition = c())
\S4method{aggregateData}{SequenceData}(x, condition)
\S4method{aggregate}{SequenceDataSet}(x, condition = "Treated")
\S4method{aggregate}{SequenceDataList}(x, condition = "Treated")
\S4method{aggregate}{Modifier}(x, force = FALSE)
\S4method{aggregateData}{Modifier}(x)
\S4method{getAggregateData}{Modifier}(x)
\S4method{hasAggregateData}{Modifier}(x)
\S4method{aggregate}{ModifierSet}(x, force = FALSE)
}
\arguments{
\item{x}{a \code{\link[=SequenceData-class]{SequenceData}},
\code{SequenceDataSet}, \code{SequenceDataList},
\code{\link[=Modifier-class]{Modifier}} or
\code{\link[=Modifier-class]{ModfierSet}} object.}
\item{...}{additional arguments}
\item{condition}{character value, which selects, for which condition the data
should be aggregated. One of the following values: \code{Both},
\code{Control}, \code{Treated}}
\item{force}{whether to recreate the aggregated data, if it is already stored
inside the \code{Modifier} object.}
}
\value{
\itemize{
\item{\code{aggregate}: }{for \code{SequenceData} object the aggregated data
is returned as a \code{SplitDataFrameList} with an element per transcript,
whereas for a \code{Modifier} the modified input object is returned,
containing the aggregated data, which can be accessed using
\code{getAggregateData}.}
\item{\code{getAggregateData}: }{only for \code{Modifier}: a
\code{SplitDataFrameList} with an element per transcript is returned. If the
aggregated data is not stored in the object, it is generated on the fly, but
does not persist.}
\item{\code{hasAggregateData}: }{TRUE or FALSE. Does the \code{Modifier}
object already contain aggregated data?}
}
If 'x' is a
\itemize{
\item{\code{\link[=SequenceData-class]{SequenceData}}} {a
\code{SplitDataFrameList} with elments per transcript.}
\item{\code{\link[=SequenceDataSet-class]{SequenceDataSet}} or
\code{\link[=SequenceDataList-class]{SequenceDataList}}} {a \code{SimpleList}
with \code{SplitDataFrameList} as elements.}
\item{\code{\link[=Modifier-class]{Modifier}} or
\code{\link[=ModifierSet-class]{ModifierSet}}} {an updated \code{Modifier}
object. The data can be accessed by using the \code{aggregateData} function.}
}
}
\description{
The \code{aggregate} function is defined for each
\code{\link[=SequenceData-class]{SequenceData}} object and can be used
directly on a \code{\link[=SequenceData-class]{SequenceData}} object or
indirectly via a \code{\link[=Modifier-class]{Modifier}} object.
For the letter the call is redirect to the
\code{\link[=SequenceData-class]{SequenceData}} object, the result summarized
as defined for the individual \code{Modifier} class and stored in the
\code{aggregate} slot of the \code{Modifier} object. The data is then used
for subsequent tasks, such as search for modifications and visualization of
the results.
The summarization is implemented in the \code{aggregateData} for each type of
\code{Modifier} class. The stored data from the \code{aggregate} slot can be
retrieved using the \code{getAggregateData} function.
Whether the aggrgeated data is already present in the \code{aggregate} slot
can be checked using the \code{hasAggregateData} function.
For \code{SequenceDataSet}, \code{SequenceDataList} and \code{ModfierSet}
classes wrapper of the \code{aggregate} function exist as well.
}
\examples{
data(e5sd,package="RNAmodR")
data(msi,package="RNAmodR")
# modify() triggers the search for modifications in the data contained in
# the Modifier or ModifierSet object
sdfl <- aggregate(e5sd)
mi <- aggregate(msi[[1]])
}
|
38f921ddc11dbeceb6abae08718f387e2d692390
|
2725b52781b03ea0e49fe75c8d1184ab91bdf383
|
/man/dvrpc_bg_data.Rd
|
b1a5044ade0e48c33a6c9bc793adb1826ba0b0c9
|
[
"MIT"
] |
permissive
|
davisadamw/evworkplace
|
8f45428ab2231b0ec0038d522968f68b3295bb3d
|
4279e532eb0d88cb040e408c74d8c8385fc06167
|
refs/heads/master
| 2023-04-29T10:38:34.564711
| 2021-05-17T03:09:37
| 2021-05-17T03:09:37
| 351,246,349
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,414
|
rd
|
dvrpc_bg_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{dvrpc_bg_data}
\alias{dvrpc_bg_data}
\title{EV adoption data for the DVRPC region.}
\format{
A data frame with 7128 rows and 13 variables: \describe{
\item{GEOID}{FIPS code for each block group in the region}
\item{vehicles}{Number of privately owned vehicles in the block group}
\item{ev_frac}{evs_start / vehicles} \item{evs_start}{Number of electric
vehicles in the block group at the start of our analysis}
\item{households}{Number of households in the block group}
\item{med_income}{Median household income of the block group, dollars}
\item{apartment}{Number of households living in apartments}
\item{mobile_home}{Number of households living in mobile homes}
\item{single_attached}{Number of households living in attached single
family houses} \item{single_detached}{Number of households living in
detached single family houses} \item{WgtAvgHOVShare_Mi}{Ignore this column}
\item{WgtAvgCmtDst_Mi}{Average commute distance of people living in this
block group, miles} \item{nb_ev_ownership}{Weighted average EV adoption
rate in surrounding block groups} }
}
\source{
Most data from American Community Survey 2015-2019 5-year averages.
}
\usage{
dvrpc_bg_data
}
\description{
A dataset containing block-group-level information for the Delaware Valley
Region relevant to EV adoption.
}
\keyword{datasets}
|
f5c7caedf80a08eb811e4fc5e7d86dbac8d02503
|
e8d454a6501c12436bd88c9b68745233945907dd
|
/tests/testthat/test_check_data.r
|
c61f03c4577a6677b88f5c558ad1464d55b40d85
|
[] |
no_license
|
CharnelMouse/relationcheckr
|
0d75e50acab13726151e7cd46c3653c693a663e6
|
18f0f7d18e222c6e22cc8ebedef6fc9a83ca8962
|
refs/heads/master
| 2023-04-20T14:37:33.562568
| 2021-05-10T09:54:11
| 2021-05-10T09:54:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,962
|
r
|
test_check_data.r
|
describe("check_primary_keys_unique()", {
it("expects at least one column name for primary key", {
expect_exerr(
check_primary_keys_unique(data.table(a = integer()), character()),
"colnames cannot be length zero"
)
})
it("expects colnames to be in dt", {
expect_exerr(
check_primary_keys_unique(data.table(a = integer()), c("a", "b")),
"columns not in dt: b"
)
})
it("returns NULL (pass) if dt has no rows", {
expect_null(check_primary_keys_unique(data.table(a = integer()), "a"))
})
it("returns NULL (pass) if dt has no rows where primary key is duplicated", {
expect_null(check_primary_keys_unique(data.table(a = 1L), "a"))
expect_null(check_primary_keys_unique(data.table(a = 1:2, b = 1L), "a"))
})
it("returns duplicated keys if any in error message", {
expect_exerr(
check_primary_keys_unique(data.table(a = rep(1L, 2L)), "a"),
"there are duplicated primary keys:\na: 1"
)
expect_exerr(
check_primary_keys_unique(data.table(a = rep(1L, 3L)), "a"),
"there are duplicated primary keys:\na: 1"
)
expect_exerr(
check_primary_keys_unique(data.table(a = rep(1:2, 2L)), "a"),
"there are duplicated primary keys:\na: 1\n\na: 2"
)
})
it("checks for unique sets over multiple-column keys", {
expect_exerr(
check_primary_keys_unique(
data.table(
a = rep(1:2, 3L),
b = c(1L, 1L, 2L, 2L, 2L, 3L)
),
c("a", "b")
),
"there are duplicated primary keys:\na: 1\nb: 2"
)
})
})
describe("check_foreign_keys()", {
it("expects at least one key name", {
expect_exerr(
check_foreign_keys(
data.table(a = character()),
data.table(a = character()),
character()
),
"require at least one key"
)
})
it("expects key to exist in table and reference table", {
expect_exerr(
check_foreign_keys(
data.table(a = character()),
data.table(b = character()),
"b"
),
"foreign key columns not found in dt: b"
)
expect_exerr(
check_foreign_keys(
data.table(a = character()),
data.table(b = character()),
"a"
),
"reference key columns not found in ref: a"
)
})
it("returns error with values in dt that don't exist in ref", {
expect_exerr(
check_foreign_keys(
data.table(a = 1:3),
data.table(a = 1:2),
"a"
),
"foreign key values not found in reference columns:\na: 3"
)
expect_exerr(
check_foreign_keys(
data.table(a = 1:4,
b = 1:2),
data.table(a = c(1:2, 1L),
b = 1:3),
c("a", "b")
),
"foreign key values not found in reference columns:\na: 3, 4"
)
})
it("returns NULL if foreign keys all pull values out of ref", {
expect_null(
check_foreign_keys(
data.table(a = integer()),
data.table(a = integer()),
"a"
)
)
})
it("returns error for NA values by default", {
expect_exerr(
check_foreign_keys(
data.table(a = NA_character_),
data.table(a = "a"),
"a"
),
"foreign key values not found in reference columns:\na: NA"
)
})
it("expects optional to be length one or same length as keys", {
expect_exerr(
check_foreign_keys(
data.table(a = NA_character_, b = NA_character_),
data.table(a = "a", b = "b"),
c("a", "b"),
optional = c(TRUE, FALSE, TRUE)
),
"optional must be length one or same length as keys"
)
})
it("allows NA in addition to ref values if optional = TRUE", {
expect_null(
check_foreign_keys(
data.table(a = NA_character_),
data.table(a = "a"),
"a",
optional = TRUE
)
)
})
it("allows an optional flag for each key pair", {
expect_exerr(
check_foreign_keys(
data.table(a = NA_character_, b = NA_character_),
data.table(a = "a", b = "b"),
c("a", "b"),
optional = c(TRUE, FALSE)
),
"foreign key values not found in reference columns:\nb: NA"
)
})
})
describe("check_no_required_values_missing", {
it("returns error if NA values found", {
expect_exerr(
check_no_required_values_missing(data.table(a = NA)),
"there are missing required values in the following rows:\na: 1"
)
})
it("ignores optional columns, where normalization would hinder use", {
expect_null(check_no_required_values_missing(data.table(a = NA), "a"))
expect_exerr(
check_no_required_values_missing(data.table(a = 1:2, b = c(1, NA), c = NA)),
"there are missing required values in the following rows:\nb: 2\nc: 1, 2"
)
})
})
describe("check_column_types()", {
it("expects to be given a type for each column, with no extras", {
expect_exerr(
check_column_types(
data.table(a = integer(), b = character()),
c(a = "integer")
),
"missing column types: b"
)
expect_exerr(
check_column_types(
data.table(a = integer(), b = character()),
c(a = "integer", b = "character", c = "numeric")
),
"types given for absent columns: c"
)
})
it("returns error if any primary column types are not as expected", {
mult_inherit <- numeric()
class(mult_inherit) <- c("test", "character")
expect_exerr(
check_column_types(
data.table(a = integer(), b = mult_inherit),
c(a = "integer", b = "character")
),
"unexpected column types:\nb: expected character, observed test"
)
})
it("can take types out of order", {
expect_null(
check_column_types(
data.table(a = integer(), b = character()),
c(b = "character", a = "integer")
)
)
})
it("can allow check on inheritance instead of direct type", {
mult_inherit <- numeric()
class(mult_inherit) <- c("test", "character")
expect_exerr(
check_column_types(
data.table(a = integer(), b = mult_inherit, c = mult_inherit),
c(a = "integer", b = "character", c = "numeric"),
inherit = TRUE
),
"unexpected column types:\nc: expected numeric, observed test, character"
)
})
it("can allow inheritance separately for individual columns", {
mult_inherit <- numeric()
class(mult_inherit) <- c("test", "character")
expect_exerr(
check_column_types(
data.table(a = integer(), b = mult_inherit, c = mult_inherit),
c(a = "integer", b = "character", c = "numeric"),
inherit = c(FALSE, FALSE, TRUE)
),
"unexpected column types:\nb: expected character, observed test\nc: expected numeric, observed test, character"
)
})
it("expects inherits to be length one or same length as types", {
expect_exerr(
check_column_types(
data.table(a = integer()),
c(a = "integer"),
inherit = c(FALSE, TRUE)
),
"inherit must be length one or same length as types"
)
})
it("uses inherit in same order as types if given out of order", {
mult_inherit <- numeric()
class(mult_inherit) <- c("test", "character")
expect_null(
check_column_types(
data.table(a = integer(), b = mult_inherit),
types = c(b = "character", a = "integer"),
inherit = c(TRUE, FALSE)
)
)
})
})
describe("check_table_constraint()", {
it("checks whether calling an evaluation inside the table returns TRUE", {
expect_null(
check_table_constraint(
data.table(a = 1:6, b = 1:3),
expression(b <= a)
)
)
expect_exerr(
check_table_constraint(
data.table(a = 1:3, b = c(1L, 3L, NA_integer_)),
expression(b <= a)
),
"table has entries that violate constraint b <= a:\na: 2\nb: 3\n\na: 3\nb: NA"
)
})
it("expects expression to return logical vector of same length as table", {
expect_exerr(
check_table_constraint(
data.table(a = 1:3, b = 1:3),
expression(b[1] <= a[1])
),
"expression result is not logical with length equal to table entry count"
)
expect_exerr(
check_table_constraint(
data.table(a = 1:3, b = 1:3),
expression(as.integer(b <= a))
),
"expression result is not logical with length equal to table entry count"
)
})
it("fails if given external variables", {
c <- rep(1:3, 2)
expect_exerr(
check_table_constraint(
data.table(a = 1:6, b = 1:3),
expression(c <= a)
),
"constraint evaluation threw an error, check that you're not using variables defined outside of the table"
)
})
it("can check expression over groups", {
expect_null(
check_table_constraint(
data.table(a = c(1:3, 3:1), b = 1:3),
expression(a == c(b, 4L - b)),
by = "b"
)
)
})
})
describe("check_range_contiguous()", {
it("checks ranges in two columns are contiguous, minus expected gap", {
expect_null(
check_range_contiguous(
data.table(
start = c(0, 1, 2, 3),
end = c(1, 2, 3, 4)
),
"start",
"end",
spacing = 0
)
)
expect_exerr(
check_range_contiguous(
data.table(
start = c(0, 1, 2, 3),
end = c(1, 3, 3, 4)
),
"start",
"end",
spacing = 0
),
"ranges are not contiguous with spacing 0:\ntransition: 3 -> 2"
)
expect_null(
check_range_contiguous(
data.table(
start = c(0, 2.5, 4.5, 6.5),
end = c(1, 3, 5, 7)
),
"start",
"end",
spacing = 1.5
)
)
})
it("expects columns to be sorted", {
expect_exerr(
check_range_contiguous(
data.table(
start = c(9, 1, 2, 3),
end = c(1, 2, 3, 4)
),
"start",
"end",
spacing = 0
),
"range columns are not sorted"
)
})
it("can take Date inheritors in addition to numbers", {
start_date <- as.Date("2021-02-05")
expect_exerr(
check_range_contiguous(
data.table(
start = as.Date(c(0, 1, 2, 3), origin = start_date),
end = as.Date(c(1, 3, 3, 4), origin = start_date)
),
"start",
"end",
spacing = 0
),
"ranges are not contiguous with spacing 0:\ntransition: 2021-02-08 -> 2021-02-07"
)
})
it("can check over groups", {
expect_exerr(
check_range_contiguous(
data.table(
grp = rep(c("a", "b"), each = 4),
start = c(0, 1, 2, 3),
end = c(1, 2, 3, 4, 1, 3, 3, 4)
),
"start",
"end",
spacing = 0,
by = "grp"
),
"ranges are not contiguous with spacing 0:\ngrp: b\ntransition: 3 -> 2"
)
})
})
describe("check_column_relation()", {
it("checks first column values are function of second column", {
expect_null(
check_column_relation(
data.table(a = 3L),
data.table(b = 1:3),
"a",
"b",
max
)
)
expect_exerr(
check_column_relation(
data.table(a = 3),
data.table(b = 1:3),
"a",
"b",
max
),
"first column not function of second column"
)
})
it("can check relation over groups, using by", {
expect_null(
check_column_relation(
data.table(grp = c("a", "b"), a = 3:4),
data.table(grp = rep(c("a", "b"), each = 3), b = c(1:3, 2:4)),
"a",
"b",
max,
by = "grp"
)
)
})
it("can group over unordered columns", {
expect_null(
check_column_relation(
data.table(grp = c("a", "b"), a = 3:4),
data.table(grp = rep(c("b", "a"), each = 3), b = c(2:4, 1:3)),
"a",
"b",
max,
by = "grp"
)
)
})
})
|
7c30e8f98cb903f7120799429f43e59c2e62753d
|
9be16fab833da0f2355961d57b8b077a3380a352
|
/icesIntro/data/nscod_natmort.R
|
130b652556232fb4fe413614643f61560cda915a
|
[] |
no_license
|
ices-eg/tc_tcsai2017
|
6747783d450a77990f02d32a5f8b65e0f13a0b9f
|
4fe6988b4124e3da9970a056b930043a61dd4d62
|
refs/heads/master
| 2021-06-17T20:17:34.840348
| 2017-06-12T15:49:53
| 2017-06-12T15:49:53
| 93,315,117
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,692
|
r
|
nscod_natmort.R
|
nscod_natmort <- as.matrix(read.table(text="
Year 1 2 3 4 5 6
1983 1.357 0.715 0.212 0.200 0.200 0.200
1984 1.344 0.717 0.212 0.200 0.200 0.200
1985 1.325 0.718 0.213 0.200 0.200 0.200
1986 1.301 0.718 0.213 0.200 0.200 0.200
1987 1.274 0.718 0.214 0.200 0.200 0.200
1988 1.247 0.718 0.215 0.200 0.200 0.200
1989 1.220 0.720 0.215 0.200 0.200 0.200
1990 1.196 0.722 0.216 0.200 0.200 0.200
1991 1.174 0.723 0.216 0.200 0.200 0.200
1992 1.157 0.725 0.217 0.200 0.200 0.200
1993 1.144 0.727 0.217 0.200 0.200 0.200
1994 1.136 0.730 0.217 0.200 0.200 0.200
1995 1.129 0.734 0.218 0.200 0.200 0.200
1996 1.122 0.740 0.219 0.200 0.200 0.200
1997 1.115 0.748 0.220 0.200 0.200 0.200
1998 1.106 0.756 0.222 0.200 0.200 0.200
1999 1.097 0.767 0.224 0.200 0.200 0.200
2000 1.088 0.779 0.226 0.200 0.200 0.200
2001 1.084 0.795 0.229 0.200 0.200 0.200
2002 1.085 0.814 0.232 0.200 0.200 0.200
2003 1.091 0.835 0.235 0.200 0.200 0.200
2004 1.100 0.854 0.237 0.200 0.200 0.200
2005 1.112 0.871 0.238 0.200 0.200 0.200
2006 1.126 0.884 0.239 0.200 0.200 0.200
2007 1.141 0.893 0.238 0.200 0.200 0.200
2008 1.159 0.900 0.237 0.200 0.200 0.200
2009 1.180 0.907 0.236 0.200 0.200 0.200
2010 1.208 0.916 0.235 0.200 0.200 0.200
2011 1.242 0.929 0.234 0.200 0.200 0.200
2012 1.283 0.945 0.233 0.200 0.200 0.200
2013 1.326 0.962 0.233 0.200 0.200 0.200
2014 1.326 0.962 0.233 0.200 0.200 0.200
2015 1.326 0.962 0.233 0.200 0.200 0.200
", header=TRUE, check.names=FALSE, row.names=1))
|
a06883c7ae52ccfed56060f041b8bc53ab8aaefd
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/markovchain/tests/testthat/testInference.R
|
300fbce7e960566bab42097d409957af513c6987
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 706
|
r
|
testInference.R
|
#library(markovchain)
sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a")
mcFit<-markovchainFit(data=sequence,byrow=FALSE)
# verifyMarkovProperty(sequence)
# assessOrder(sequence)
# assessStationarity(sequence, 1)
# divergenceTest(sequence, mcFit$estimate@transitionMatrix)
data(blanden)
myMc<-as(blanden,"markovchain")
# print(myMc)
sequenza<-rmarkovchain(n = 100,myMc)
sequenza
res<-verifyMarkovProperty(sequenza)
res<-assessOrder(sequenza)
res<-assessStationarity(sequenza, 10)
res<-divergenceTest(sequenza, myMc)
# print(res)
test_that("States are those that should be", {
# expect_equal(verifyMarkovProperty(sequenza), TRUE)
})
|
09628358222abc10bb0a284a45a665f19d1ba101
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MPTmultiverse/tests/test-identifiability_checks.R
|
184dc51997720fceb614f74836b413bf32514643
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,932
|
r
|
test-identifiability_checks.R
|
context("MPTinR: Identifiability of individual parameters")
test_that("Non-identified parameters are excluded", {
testthat::skip_on_cran()
testthat::skip_on_travis()
op <- mpt_options()
mpt_options("default")
mpt_options("bootstrap_samples" = 200)
mpt_options(n.optim = 5)
EQN_FILE <- system.file("extdata", "prospective_memory.eqn", package = "MPTmultiverse")
DATA_FILE <- system.file("extdata", "smith_et_al_2011.csv", package = "MPTmultiverse")
data <- read.csv(DATA_FILE, fileEncoding = "UTF-8-BOM")
data <- data[c(10:15, 110:115),]
COL_CONDITION <- "WM_EX"
data[[COL_CONDITION]] <- factor(
data[[COL_CONDITION]]
, levels = 1:2
, labels = c("low_WM", "high_WM")
)
set.seed(99)
expect_warning(only_pb <- fit_mpt(
method = "pb_no"
, dataset = DATA_FILE
, data = data
, model = EQN_FILE
, condition = COL_CONDITION
), "MPTinR-no: IDs and parameters with pb-CIs > 0.99 (i.e., non-identified)",
fixed = TRUE)
mpt_options(n.optim = 20)
set.seed(11)
only_asymptotic <- fit_mpt(
method = "asymptotic_no"
, dataset = DATA_FILE
, data = data
, model = EQN_FILE
, condition = COL_CONDITION
)
expect_equivalent(only_pb$est_indiv[[1]]$est,
only_asymptotic$est_indiv[[1]]$est,
tolerance = 0.001)
expect_false(isTRUE(all.equal(only_pb$est_indiv[[1]]$se,
only_asymptotic$est_indiv[[1]]$se,
tolerance = 0.001)))
## check for group estimates
expect_false(isTRUE(all.equal(
only_pb$est_group[[1]]$est,
only_asymptotic$est_group[[1]]$est,
tolerance = 0.001)))
## check for group differences
expect_false(isTRUE(all.equal(
only_pb$test_between[[1]]$est_diff,
only_asymptotic$test_between[[1]]$est_diff,
tolerance = 0.001)))
mpt_options(op) # reset options
})
|
3b4e8ffee39cdb6f4fb28792cb65b88770a9dcf4
|
6eb980a9312f50491782a92875a52618dfbcffc6
|
/man/print.available_packages.Rd
|
3ca307d6f55d8afb28e5db6421c45848ecbb8d7a
|
[] |
no_license
|
cran/deepdep
|
d9636bb8dd22b64e86b893adb2c0873ea87068c4
|
74b4aafcb30d8d1bde5e212c6187d052180a7e94
|
refs/heads/master
| 2023-03-05T18:45:35.804669
| 2023-02-20T23:10:05
| 2023-02-20T23:10:05
| 245,601,598
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 572
|
rd
|
print.available_packages.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_available_packages.R
\name{print.available_packages}
\alias{print.available_packages}
\title{Print function for an object of \code{available_packages} class}
\usage{
\method{print}{available_packages}(x, ...)
}
\arguments{
\item{x}{An object of \code{available_packages} class.}
\item{...}{other}
}
\description{
Print function for an object of \code{available_packages} class
}
\examples{
\donttest{
library(deepdep)
av <- get_available_packages()
head(av)
}
}
|
db0712c4737b635bfb28b7e214fdea76c1c25269
|
2a63bbb53d797c916ae959dde41fe263a7ae8d44
|
/rankall.R
|
d5df290e72f24b0c5f593f0a429357c20370ec53
|
[] |
no_license
|
emiliehwolf/prog3
|
0b0e77a7456fa6e83f172bb2370ebbf8893eef74
|
29ca28a61d87795be67639ef031ad636f89a888b
|
refs/heads/master
| 2020-04-06T07:04:08.765064
| 2017-02-12T09:56:31
| 2017-02-12T09:56:31
| 33,079,881
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,389
|
r
|
rankall.R
|
##rankall.R
##Author: Emilie H. Wolf
##February 12, 2017
rankall <- function(outcome, num="best") {
## Read outcome data
df <- read.csv("outcome-of-care-measures.csv",
colClasses = "character",
na.strings = "Not Available")
## Slim down the csv object and fix classes
## Col 1: Hospital.Name
## Col 2: State
## Col 3: Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack
## Col 4: Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure
## Col 5: Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia
o <- data.frame(df[,2],df[,7],as.numeric(df[,11]),
as.numeric(df[,17]),as.numeric(df[,23]))
## Give column names to our slim data frame
names(o) <- c("hospital","state","ha","hf","pn")
## Check that outcome is valid and assign x to the column index
if(outcome=="heart attack") x <- 3
else if(outcome=="heart failure") x <- 4
else if(outcome=="pneumonia") x <- 5
else stop("invalid outcome")
## If we had a state argument, this would check if valid, except this datafile contains 54 state abbreviations, not 50
## if((state %in% state.abb) == FALSE) stop("invalid state")
d <- data.frame("hospital"=character(54),"state"=character(54),stringsAsFactors=FALSE)
## Create a vector of available states in the datafile
states <- levels(o$state)
## For each state, find the hospital of the given rank
for(i in 1:54) {
## Extract the observations for current state in loop
ostate <- o[o$state==states[i],]
## Reorder the rows by outcome, then hospital name
sorted <- ostate[ order(ostate[,x],ostate$hospital, na.last=NA),]
sorted$hospital <- as.character(sorted$hospital)
sorted$state <- as.character(sorted$state)
if(num=="best") y <- 1
else if(num=="worst") y <- nrow(sorted)
else y <- num
d[i,] <- sorted[y,1:2]
}
## Return a data frame with the hospital names and the state names
d
}
|
544ba753e01be090925588864ec841a4fc00ac30
|
297723ea95582c22fba42adb1e375a20863f3f94
|
/SC_HOBO2020.R
|
bf6e9fd74ba65d598a38c95c93880942cfd72773
|
[] |
no_license
|
kellyloria/megacosm
|
ac9f6ebea9d56f6fc3b87e5af9348f9e3ad4179b
|
ebbbbf2a7b4d179c4c2d749877bb544aa62d7cd9
|
refs/heads/master
| 2020-08-29T19:39:15.884895
| 2020-07-29T21:12:17
| 2020-07-29T21:12:17
| 218,150,596
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,923
|
r
|
SC_HOBO2020.R
|
## ---------------------------
## QA'QC for 2020 Sandy Corner (SC) "Megacosm" Pilot:
## Zooplankton Community Comp Data Logger Data
##
## Author: Kelly A. Loria
## Date Created: 2020-06-22
## Email: kelly.loria@colorado.edu
##
## ---------------------------
## Load packages:
library(ggplot2)
library(dplyr)
library(lubridate)
library(tidyverse)
library(zoo)
library(lmerTest)
library(lme4)
## ---------------------------
# File path setup:
if (dir.exists('/Users/kellyloria/Documents/Niwot\ LTER\ 2017-2019/Mesocosm/')){
inputDir<- '/Users/kellyloria/Documents/Niwot\ LTER\ 2017-2019/Mesocosm/'
outputDir<- '/Users/kellyloria/Desktop/'
}
## ---------------------------
# Read in data and fix timestamp start tanks 1-20
# A Block #1-4:
A1H <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank1_H_20261875.csv"), header=T)
A1H$timestamp <- as.POSIXct(A1H$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(A1H$timestamp)
# Restrict for date range of deployment:
A1H <- subset(A1H,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-11 12:00:00'))
range(A1H$timestamp)
qplot(timestamp, TempC, data = A1H, geom="point") +
#scale_x_datetime(date_breaks = "504 hour", labels = date_format("%b %d")) +
theme(axis.text.x = element_text(angle = 25, vjust = 1.0, hjust = 1.0))
A1S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank1_S_20483158.csv"), header=T)
A1S$timestamp <- as.POSIXct(A1S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(A1S$timestamp)
# Restrict for date range of deployment:
A1S <- subset(A1S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(A1S$timestamp)
qplot(timestamp, TempC, data = A1S, geom="point") +
#scale_x_datetime(date_breaks = "504 hour", labels = date_format("%b %d")) +
theme(axis.text.x = element_text(angle = 25, vjust = 1.0, hjust = 1.0))
##
A2H <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank2_H_2026187.csv"), header=T)
A2H$timestamp <- as.POSIXct(A2H$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(A2H$timestamp)
# Restrict for date range of deployment:
A2H <- subset(A2H,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-11 12:00:00'))
range(A2H$timestamp)
A2S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank2_S_20483159.csv"), header=T)
A2S$timestamp <- as.POSIXct(A2S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(A2S$timestamp)
# Restrict for date range of deployment:
A2S <- subset(A2S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(A2S$timestamp)
##
A3H <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank3_H_20261877.csv"), header=T)
A3H$timestamp <- as.POSIXct(A3H$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(A3H$timestamp)
# Restrict for date range of deployment:
A3H <- subset(A3H,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-11 12:00:00'))
range(A3H$timestamp)
A3S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank3_S_20483160.csv"), header=T)
A3S$timestamp <- as.POSIXct(A3S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(A3S$timestamp) # issue with battery failure in April 2020
##
A4H <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank4_H_20261878.csv"), header=T)
A4H$timestamp <- as.POSIXct(A4H$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(A4H$timestamp)
# Restrict for date range of deployment:
A4H <- subset(A4H,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-11 12:00:00'))
range(A4H$timestamp)
A4S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank4_S_20483161.csv"), header=T)
A4S$timestamp <- as.POSIXct(A4S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(A4S$timestamp)
# Restrict for date range of deployment:
A4S <- subset(A4S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(A4S$timestamp)
##B block
# B Block #5-8:
B5H <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank5_H_20261879.csv"), header=T)
B5H$timestamp <- as.POSIXct(B5H$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(B5H$timestamp)
# Restrict for date range of deployment:
B5H <- subset(B5H,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-11 12:00:00'))
range(B5H$timestamp)
B5S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank5_S_20483162.csv"), header=T)
B5S$timestamp <- as.POSIXct(B5S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(B5S$timestamp)
# Restrict for date range of deployment:
B5S <- subset(B5S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(B5S$timestamp)
##
B6H <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank6_H_20261880.csv"), header=T)
B6H$timestamp <- as.POSIXct(B6H$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(B6H$timestamp)
# Restrict for date range of deployment:
B6H <- subset(B6H,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-11 12:00:00'))
range(B6H$timestamp)
B6S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank6_S_20483163.csv"), header=T)
B6S$timestamp <- as.POSIXct(B6S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(B6S$timestamp)
# Restrict for date range of deployment:
B6S <- subset(B6S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(B6S$timestamp)
##
B7H <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank7_H_20261881.csv"), header=T)
B7H$timestamp <- as.POSIXct(B7H$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(B7H$timestamp)
# Restrict for date range of deployment:
B7H <- subset(B7H,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-11 12:00:00'))
range(B7H$timestamp)
B7S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank7_S_20483164.csv"), header=T)
B7S$timestamp <- as.POSIXct(B7S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(B7S$timestamp)
# Restrict for date range of deployment:
B7S <- subset(B7S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(B7S$timestamp)
##
B8H <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank8_H_20261882.csv"), header=T)
B8H$timestamp <- as.POSIXct(B8H$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(B8H$timestamp)
# Restrict for date range of deployment:
B8H <- subset(B8H,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-11 12:00:00'))
range(B8H$timestamp)
B8S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank8_S_20483165.csv"), header=T)
B8S$timestamp <- as.POSIXct(B8S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(B8S$timestamp)
# Restrict for date range of deployment:
B8S <- subset(B8S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(B8S$timestamp)
##
# C9H = no data
C9S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank9_S_20483166.csv"), header=T)
C9S$timestamp <- as.POSIXct(C9S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(C9S$timestamp)
# Restrict for date range of deployment:
C9S <- subset(C9S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(C9S$timestamp)
##
C10H <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank10_H_20261886.csv"), header=T)
C10H$timestamp <- as.POSIXct(C10H$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(C10H$timestamp)
# Restrict for date range of deployment:
C10H <- subset(C10H,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-11 12:00:00'))
range(C10H$timestamp)
C10S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank10_S_20483167.csv"), header=T)
C10S$timestamp <- as.POSIXct(C10S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(C10S$timestamp)
# Restrict for date range of deployment:
C10S <- subset(C10S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(C10S$timestamp)
##
C11S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank11_S_20483168.csv"), header=T)
C11S$timestamp <- as.POSIXct(C11S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(C11S$timestamp)
# Restrict for date range of deployment:
C11S <- subset(C11S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(C11S$timestamp)
##
C12H <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank12_H_20261888.csv"), header=T)
C12H$timestamp <- as.POSIXct(C12H$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(C12H$timestamp)
# Restrict for date range of deployment:
C12H <- subset(C12H,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-11 12:00:00'))
range(C12H$timestamp)
C12S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank12_S_20483169.csv"), header=T)
C12S$timestamp <- as.POSIXct(C12S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(C12S$timestamp)
# Restrict for date range of deployment:
C12S <- subset(C12S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(C12S$timestamp)
##
D13H <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank13_H_20261889.csv"), header=T)
D13H$timestamp <- as.POSIXct(D13H$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(D13H$timestamp)
# Restrict for date range of deployment:
D13H <- subset(D13H,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-11 12:00:00'))
range(D13H$timestamp)
D13S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank13_S_20483170.csv"), header=T)
D13S$timestamp <- as.POSIXct(D13S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(D13S$timestamp)
# Restrict for date range of deployment:
D13S <- subset(D13S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(D13S$timestamp)
##
D14S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank14_S_20483171.csv"), header=T)
D14S$timestamp <- as.POSIXct(D14S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(D14S$timestamp)
# Restrict for date range of deployment:
D14S <- subset(D14S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(D14S$timestamp)
##
D15H <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank15_H_20261890.csv"), header=T)
D15H$timestamp <- as.POSIXct(D15H$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(D15H$timestamp)
# Restrict for date range of deployment:
D15H <- subset(D15H,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-11 12:00:00'))
range(D15H$timestamp)
D15S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank15_S_20483172.csv"), header=T)
D15S$timestamp <- as.POSIXct(D15S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(D15S$timestamp)
# Restrict for date range of deployment:
D15S <- subset(D15S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(D15S$timestamp)
##
D16S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank16_S_20483173.csv"), header=T)
D16S$timestamp <- as.POSIXct(D16S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(D16S$timestamp)
# Restrict for date range of deployment:
D16S <- subset(D16S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(D16S$timestamp)
##
E17S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank17_S_20483174.csv"), header=T)
E17S$timestamp <- as.POSIXct(E17S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(E17S$timestamp)
# Restrict for date range of deployment:
E17S <- subset(E17S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(E17S$timestamp)
##
E18S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank18_S_20483175.csv"), header=T)
E18S$timestamp <- as.POSIXct(E18S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(E18S$timestamp)
# Restrict for date range of deployment:
E18S <- subset(E18S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(E18S$timestamp)
##
E19H <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank19_H_20261895.csv"), header=T)
E19H$timestamp <- as.POSIXct(E19H$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(E19H$timestamp)
# Restrict for date range of deployment:
E19H <- subset(E19H,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-11 12:00:00'))
range(D15H$timestamp)
E19S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank19_S_20483176.csv"), header=T)
E19S$timestamp <- as.POSIXct(E19S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(E19S$timestamp)
# Restrict for date range of deployment:
E19S <- subset(E19S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(E19S$timestamp)
##
E20S <- read.csv(paste0(inputDir, "/2020SandyCorner/20200524_HOBO/Tank20_S_20483177.csv"), header=T)
E20S$timestamp <- as.POSIXct(E20S$Date.Time..GMT.06.00, format="%m/%d/%Y %H:%M")
range(E20S$timestamp)
# Restrict for date range of deployment:
E20S <- subset(E20S,timestamp >= as.POSIXct('0019-11-01 10:00:00') &
timestamp <= as.POSIXct('0020-06-04 9:30:00'))
range(E20S$timestamp)
## ---------------------------
# Aggregate logger data
Ablock <- rbind(A1H, A1S, A2H, A2S, A3H, A3S, A4H, A4S)
Ablock$Block <- "A"
Bblock <- rbind(B5H, B5S, B6H, B6S, B7H, B7S, B8H, B8S)
Bblock$Block <- "B"
Cblock <- rbind(C9S, C10H, C10S, C11S, C12H, C12S)
Cblock$Block <- "C"
Dblock <- rbind(D13H, D13S, D14S, D15H, D15S, D16S)
Dblock$Block <- "D"
Eblock <- rbind(E17S, E18S, E19H, E19S, E20S)
Eblock$Block <- "E"
SandCorn <- rbind(Ablock, Bblock, Cblock, Dblock, Eblock)
names(SandCorn)
## ---------------------------
# QAQC data:
colnames(SandCorn)[7] = "IntensityLux"
# 1. Flag temperature values:
SandCorn.Q=SandCorn %>%
mutate(temperature=ifelse(TempC>35, NA, TempC)) %>%
mutate(hour=lubridate::hour(timestamp))%>%
arrange(Tank_ID, HOBO_position, timestamp)%>%
group_by(Tank_ID, HOBO_position, timestamp)%>% #this will get the nearest 15, but could be fewer if some are missing OR >35C, I think (?) the 35 are bogus so that is ok but you could
mutate(mnT=rollapply(temperature, width = 15, FUN = mean, fill=NA), # also filter out the NAs and >35s if you wanted to always have 15 values in your rolling window after removing bad values
sdT=rollapply(temperature, width = 15, FUN = sd, fill=NA)) %>%
mutate(loT=mnT- (3*sdT), hiT=mnT+ (3*sdT))%>%
mutate(mnL=rollapply(IntensityLux, width = 15, FUN = mean, fill=NA), # also filter out the NAs and >35s if you wanted to always have 15 values in your rolling window after removing bad values
sdL=rollapply(IntensityLux, width = 15, FUN = sd, fill=NA)) %>%
mutate(loL=mnL- (3*sdL), hiL=mnL+ (3*sdL))%>%
full_join(., SandCorn)%>%
mutate(
flag_temperature=
case_when( #may as well add the m in here since your metadata days that flag was used
is.na(temperature) ~ 'm',
temperature>35 ~ 'q',
temperature<loT&!is.na(loT) ~ 'o',
temperature>hiT&!is.na(hiT) ~ 'o',
temperature<0 ~ 'q', TRUE ~ 'n')) %>%
mutate(
flag_Lux=
case_when( #may as well add the m in here since your metadata days that flag was used
is.na(IntensityLux) ~ 'm',
IntensityLux<loL&!is.na(loL) ~ 'o',
IntensityLux>hiL&!is.na(hiL) ~ 'o',
IntensityLux<0 ~ 'q', TRUE ~ 'n'))
# 2.Check the QAQC
p <- ggplot(SandCorn.Q, aes(x=timestamp, y=(temperature), colour =as.factor(flag_temperature))) +
geom_point(alpha = 0.7) +
theme_classic() + facet_wrap(~Tank_ID)
p2 <- ggplot(SandCorn.Q, aes(x=timestamp, y=(temperature), colour =as.factor(flag_temperature))) +
geom_point(alpha = 0.7) +
theme_classic() + facet_wrap(~Color)
p3 <- ggplot(SandCorn.p, aes(x=timestamp, y=(temperature), colour =as.factor(Color))) +
geom_point(alpha = 0.3) + scale_color_manual(values=c("#dec1a0", "#000000")) +
theme_bw() + facet_grid(Block~.)
#ggsave(paste0(outputDir,("SC2020HOBO.pdf")), p3, scale = 1.75, width = 15, height = 30, units = c("cm"), dpi = 500)
# ice out plot
SandCorn.p <- subset(SandCorn.Q2, timestamp >= as.POSIXct('0020-04-27 00:00:00') & timestamp <= as.POSIXct('0020-06-11 00:00:00'))
summary(SandCorn.p$timestamp)
names(SandCorn.Q)
# 3. Remove unwanted variables:
SandCorn.Q2 <- subset(SandCorn.Q, select=c(Block, Tank_ID, Color, HOBO_position, HOBO_Serial, timestamp, temperature,
IntensityLux, flag_temperature, flag_Lux, Notes))
# 4. Double chec for duplicated values:
SandCorn.Q2%>%select(Tank_ID, HOBO_position, timestamp)%>%duplicated()%>%sum() # 0
# if there were duplicates use code below:
#View(Soddie.Q2%>%
# inner_join(
# Soddie.Q2 %>%
# group_by(Tank, Position, timestamp) %>%
# summarize(ct=dplyr::n())%>% filter(ct>1)))
# Remove values:
#Soddie.Q3 = Soddie.Q2 %>%
# distinct(Tank, Position, timestamp, .keep_all = TRUE)
#Soddie.Q3%>%select(Tank, Position, timestamp)%>%duplicated()%>%sum()
# 5. Export and save data:
# write.csv(SandCorn.Q2, paste0(outputDir, "SC2020HOBO_Q.csv")) # complied data file of all DO sensors along buoy line
## ---------------------------
# Analyze the data:
summary(SandCorn.Q2)
# remove outliers
SandCorn.Q3 <- subset(SandCorn.Q2, flag_temperature=="n")
summary(SandCorn.Q3)
SandCorn.Q4 <- subset(SandCorn.Q3, timestamp >= as.POSIXct('0020-04-27 00:00:00') & timestamp <= as.POSIXct('0020-06-11 00:00:00'))
summary(SandCorn.Q4$timestamp)
hist(SandCorn.Q4$temperature)
SC.mod <- t.test(temperature~Color, data = SandCorn.Q4)
summary(SC.mod)
SC.mod2 <- glm(temperature~Color, data = SandCorn.Q4)
summary(SC.mod2)
SC.mod3 <- lmer(temperature ~ Color + (1|Block), data = SandCorn.Q4)
summary(SC.mod3)
hist(residuals(SC.mod3)) # not the greatest residuL distribution
|
aa4208569629a1bf9a3794d823b75b5bd1281c65
|
02b5125f6b2f94430176c1de91d4e65aef7e9ff5
|
/binomial/man/bin_probability.Rd
|
4cdaf018f9e35638642db6df3511210e760998da
|
[] |
no_license
|
stat133-sp19/hw-stat133-sp8268
|
1a3c4586e621becaa3547a06e334609cb9536947
|
9a6d68542ff0452bf4c222ac11cc06a3f1614de8
|
refs/heads/master
| 2020-04-28T06:17:40.574375
| 2019-05-02T06:07:14
| 2019-05-02T06:07:14
| 175,051,636
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 704
|
rd
|
bin_probability.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/density_functions.R
\name{bin_probability}
\alias{bin_probability}
\title{Binomial Probability}
\usage{
bin_probability(success, trials, prob)
}
\arguments{
\item{trials}{number of trials, must be non negative int}
\item{prob}{probability of success, must be numeric between 0 and 1}
\item{successs}{number of successes, must be non negative int less than num trials}
}
\value{
computed probability
}
\description{
Computes the probability of obtaining given number of successesin given
trials with probability of success prob
}
\examples{
bin_probability(success = 2, trials = 5, prob = 0.5)
bin_probability(0:2, 5, 0.5)
}
|
1a795de3e7f91294b8581d4aef4f43986dd1701e
|
8d2dad35b45e1d4a477cae2411c4a2aabc54dc5b
|
/DiscriminantAnalysis.R
|
ae1f07899aed0e8ecb59a27476792f12463b0fef
|
[] |
no_license
|
dov1000/Seasonal_Forecast
|
fce3ea3acc4ddcf23ba8b84b6135e3a5ef23bd80
|
5cb86b2dd7b3d08f67d0d9ea9ad4177e512d8458
|
refs/heads/master
| 2023-05-05T11:14:24.941515
| 2021-05-10T18:33:59
| 2021-05-10T18:33:59
| 365,926,494
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,610
|
r
|
DiscriminantAnalysis.R
|
library("tidyverse")
library('caret')
library('MASS')
library('mda')
setwd("C:/Users/daoban/Documents/RelacionIndicesLluvia/")
data_reventazon <-read.csv('Ventanas.csv') %>%
as_tibble()
#Primero comparamos con los deciles de la lluvia total, luego vemos si funcina
#mejor la comparación dividiendo por mes
deciles <- quantile(data_reventazon$lluvia,c(.1,.2,.3,.4,.5,.6,.7,.8,.9))
data_model <- data_reventazon %>%
dplyr::select(3:16)
#Creamos nuevas categorias de indices desplazados
data_model <- data_model %>%
mutate(mei_1 = lag(data_model$mei,1),
tna_1 = lag(data_model$tna,1),
tni_1 = lag(data_model$tni,1),
nao_1 = lag(data_model$nao,1),
ao_1 = lag(data_model$ao,1)) %>%
drop_na()
data_model <- data_model %>%
mutate(ind = case_when(lluvia < deciles[1] ~ 0,
between(lluvia,deciles[1],deciles[2])~ 1,
between(lluvia,deciles[2],deciles[3]) ~ 2,
between(lluvia,deciles[3],deciles[4]) ~ 3,
between(lluvia,deciles[4],deciles[5]) ~ 4,
between(lluvia,deciles[5],deciles[6]) ~ 5,
between(lluvia,deciles[6],deciles[7]) ~ 6,
between(lluvia,deciles[7],deciles[8]) ~ 7,
between(lluvia,deciles[8],deciles[9]) ~ 8,
lluvia > deciles[9] ~9 ))
data_model <- data_model %>%
mutate(ind2 = lead(data_model$ind,1)) %>%
mutate(ind2 = factor(ind2)) %>%
drop_na()
#Clasificando un mes siguiente
train <- data_model[1:528,]
test <- data_model[529:538,]
#Modelo Lineal
model <- lda(ind2~., data = train)
pred <- predict(model, data_model)
# Predicted classes
head(pred$class,10)
# Predicted probabilities of class memebership.
head(pred$posterior, 10)
# Linear discriminants
head(pred$x, 3)
t1<-table(pred$class,data_model$ind2)
#0.3628319
#Cuadrático
#Súper overfiteado
model2 <- qda(ind2~., data = data_model)
pred2 <- predict(model2, data_model)
t2<-table(pred2$class,data_model$ind2)
#Mixture discriminant analysis - MDA
model3 <- mda(ind2~., data = train)
pred3 <- predict(model3, data_model)
t3<-table(pred3,data_model$ind2)
#0.5530973
#Flexible discriminant analysis - FDA
#Para no lineales
model4 <- fda(ind2~., data = train)
pred4 <- predict(model4, data_model)
t4<-table(pred4,data_model$ind2)
#0.3672566
saveRDS(model3,file = "C:/Users/daoban/Documents/RelacionIndicesLluvia/Modelos/Ventanas/MixDiscAna")
|
b7fdfae199b0f0836b5c24aa4800b703fee25e7d
|
5241969456b343da0cafa603f6b373c3bc0863eb
|
/man/LAhighweewoo.Rd
|
11d776b2f3125fea57d4ea84759c640f8af30597
|
[] |
no_license
|
cran/IDmeasurer
|
880fc937e1eda6c7ca891eeaf3e516d3a5675032
|
c89c6d520a594207d3e099f9edd66287837f4560
|
refs/heads/master
| 2020-05-21T00:44:51.684846
| 2019-05-09T14:10:10
| 2019-05-09T14:10:10
| 185,838,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,125
|
rd
|
LAhighweewoo.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_LAhighweewoo.R
\docType{data}
\name{LAhighweewoo}
\alias{LAhighweewoo}
\title{Yellow-breasted boubou, \emph{Laniarius atroflavus} - spectrum properties}
\format{A data frame with 330 rows and 7 variables:
\describe{
\item{id}{factor, identity code of an individual emitting the call}
\item{dur}{duration of the call, in seconds}
\item{df}{frequency of maximum amplitude within the spectrum - peak frequency, in Hertz}
\item{minf, maxf}{minimum and maximum fequency at -25dB relative to the call peak amplitude, in Hertz}
\item{q25, q75}{frequencies at the two quartiles of amplitude
distribution; frequencies below which lie 25 and 75 percent of the energy of
the call, respectively, in Hertz}
}}
\source{
Osiejuk, unpublished data
}
\usage{
LAhighweewoo
}
\description{
\itemize{
\item \strong{Species:} Yellow-breasted boubou, \emph{Laniarius atroflavus}
\item \strong{Number of individuals:} 33
\item \strong{Number of calls per individual:} 10
\item \strong{Number of acoustic variables:} 6
\item \strong{Individual identity:} HS=3.83
\item \strong{Reference:} Osiejuk, unpublished data
}
Male Yellow-breasted boubous were recorded in Bamenda region in Cameroon.
Birds were recorded between 06.00 to 10.00 in the morning in 2016, typically,
from the distance of 10 - 20 meters. The calls were recorded after short
provocation with playback. Repertoire of males at the field site included
three distinct call types and only the most common call typed labeled as
“High wee woo” was used for this study. The original dataset comprised 33
individuals and 10 calls per individual. \cr\cr
Variables were selected to measure basic spectral parameters of each “high
weewoo” call like the peak frequency, distribution of frequency amplitudes
within spectrum, and range of the frequencies (minimum and maximum).
Additionally, the duration of the call was measured. Variables were extracted
in Raven Pro 1.5 by the Cornell Bioacoustic Research Program.
}
\keyword{datasets}
|
4703ae5922f786517dbbb331c8c40668dd002d4a
|
5e4b2875992bd59ff81a39d5898edfb881c963e0
|
/R/hello.R
|
e8ff43f59b80f9bdb3fbb21eaf78215862e1c77f
|
[] |
no_license
|
A-Mani/RapidRoughSets
|
000202f67e909dd15a3150f2007760226fe139e4
|
2f25ce5ba9f31d47cfa35d76b2d758c243f291ac
|
refs/heads/master
| 2020-05-29T11:04:56.183312
| 2015-09-03T20:30:11
| 2015-09-03T20:30:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,738
|
r
|
hello.R
|
# Hello, world!
#
# This is an example function named 'hello'
# which prints 'Hello, world!'.
#
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Ctrl + Shift + B'
# Check Package: 'Ctrl + Shift + E'
# Test Package: 'Ctrl + Shift + T'
#' @export
hello <- function() {
print("Hello, world!")
data(RoughSetData)
wine.data <- RoughSetData$wine.dt
set.seed(13)
wine.data <- wine.data[sample(nrow(wine.data)),]
idx <- round(0.6 * nrow(wine.data))
wine.tra <- RoughSets::SF.asDecisionTable(wine.data[1:idx,],
decision.attr = 14,
indx.nominal = 14)
cut.values <- RoughSets::D.discretization.RST(wine.tra,
type.method = "unsupervised.quantiles",
nOfIntervals = 3)
data.tra <- RoughSets::SF.applyDecTable(wine.tra, cut.values)
print(class(data.tra))
rules <- RoughSets::RI.AQRules.RST(data.tra, confidence = 0.9, timesCovered = 3)
rules <- RI.AQRules.RST(data.tra, confidence = 0.9, timesCovered = 3)
rules
}
# Hello, world!
#
# This is an example function named 'hello'
# which prints 'Hello, world!'.
#
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Ctrl + Shift + B'
# Check Package: 'Ctrl + Shift + E'
# Test Package: 'Ctrl + Shift + T'
#' @export
RI.AQRules.RST <- function(decision.table, confidence = 1.0, timesCovered = 1) {
return(1)
}
|
0983539815307873228ee36f5d472b4817b2067e
|
f5b5aaa6bce2ddfb76ee0b519533e72d254fd125
|
/Plot4.R
|
ee3f437fffb33ee9b93d11ad3a2ea20e91e05a83
|
[] |
no_license
|
JulesBuh/ExData_Plotting1
|
6d73beaafe8415a2eaea65016b98b1275cdce009
|
cc6d621e06c785420dff7eed65d08c5440d105df
|
refs/heads/master
| 2021-01-18T17:34:16.917814
| 2017-04-02T16:30:47
| 2017-04-02T16:30:47
| 86,810,546
| 0
| 0
| null | 2017-03-31T11:08:26
| 2017-03-31T11:08:26
| null |
UTF-8
|
R
| false
| false
| 19,423
|
r
|
Plot4.R
|
##Plot 4----
#> Description----
# outputs a line graph as a png file with a transparent background
# subsets with differnt colours and labels
#>0 Input and prerequsites----
#date range 2007-02-01 and 2007-02-02
studyPeriodMin<-as.Date("2007-02-01")
studyPeriodMax<-as.Date("2007-02-02")
#gets the name of the subset filename if the script has been previously loaded
subsetFilename<<-paste("household_power_consumption",
studyPeriodMin,
studyPeriodMax,
"subset.txt",
sep="-")
#this is the name of the output graphic filename
PlotFilename<-"Plot4.png"
# 0.0 reads the data----
# As the file read is a lengthy process due to size, the following script
# checks to see if a download and subset file has already been created or is
# already within R where the script has already ran previously for one of the other
# plot scripts)
if(!exists("dataRead")){
#0.1A if a subset has already been previously saved load this instead----
if(file.exists(subsetFilename)){
preDeterminedColclass<-c("character",
"character",
"numeric",
"numeric",
"numeric",
"numeric",
"numeric",
"numeric",
"numeric")
dataRead<-read.table(subsetFilename,
header=TRUE,
sep=";",
colClasses=preDeterminedColclass,
stringsAsFactors = FALSE,
na.strings = "?")
dataRead$Time<-as.POSIXct(dataRead$Time)
dataRead$Date<-as.Date(dataRead$Date)
rm("preDeterminedColclass")
}
#0.1B if starting from scratch the data needs to be read----
if(!file.exists(subsetFilename)){
#0.2B downloads the data----
if(!file.exists("data.zip")){
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip","data.zip")
}
#0.3B unzips the data----
filename<-unzip("data.zip",list=TRUE)[[1]]
unzip("data.zip",filename)
#0.4B preparation to read the data----
if(!file.exists(subsetFilename)){
readFunction<-function(){
#0.4.0 Preparation for read----
#0.4.0.0 Sizing up the file----
message("please wait...")
#0.4.0.1 gets the number of items per line
colCount<-max(count.fields(filename, sep = ";"))
#0.4.0.2 gets the number of lines from the file
rowCount<-length(readLines(filename))
#0.4.0.3 selects the first column within colClasses
colSelect<-c(rep("character", 1), rep("NULL", colCount-1))
#0.4.1 Preparing the dataframe----
#0.4.1.05 predetermine the colclasses to load
#The dataset has 2,075,259 rows and 9 columns.
#The columns are:
#1.Date: Date in format dd/mm/yyyy
#2.Time: time in format hh:mm:ss
#3.Global_active_power: household global minute-averaged active power (in kilowatt)
#4.Global_reactive_power: household global minute-averaged reactive power (in kilowatt)
#5.Voltage: minute-averaged voltage (in volt)
#6.Global_intensity: household global minute-averaged current intensity (in ampere)
#7.Sub_metering_1: energy sub-metering No. 1 (in watt-hour of active energy). It corresponds to the kitchen, containing mainly a dishwasher, an oven and a microwave (hot plates are not electric but gas powered).
#8.Sub_metering_2: energy sub-metering No. 2 (in watt-hour of active energy). It corresponds to the laundry room, containing a washing-machine, a tumble-drier, a refrigerator and a light.
#9.Sub_metering_3: energy sub-metering No. 3 (in watt-hour of active energy). It corresponds to an electric water-heater and an air-conditioner.
preDeterminedColclass<-c("character",
"character",
"numeric",
"numeric",
"numeric",
"numeric",
"numeric",
"numeric",
"numeric")
#0.4.1.1 gets the header of the data only
header<-read.table(filename,header=FALSE,sep=";",skip=0,nrows = 1,stringsAsFactors = FALSE)
keep<-NULL
#0.4.2 Initialises chunking process for read-----
#0.4.2.0 sets the general chunk size
chunkSize<-NULL
chunkSize$main<-2000
#0.4.2.1 sets the chunking positions
chunkPositions<-NULL
chunkPositions$seq<-seq(from=1, to=rowCount-1, by = chunkSize$main)
chunkPositions$count<-length(chunkPositions$seq)
chunkPositions$last<-chunkPositions$seq[chunkPositions$count]
chunkSize$last<-rowCount-chunkPositions$last
chunkPositions$trackLoop<-1
#Function to perform the read once the position of the study period has been determined
PerformTheRead<-function(startPos=startLine,chunkFull=ch){
dataRead<- read.table(filename,
header=FALSE,
sep=";",
colClasses=preDeterminedColclass,
skip= startPos,
nrows = chunkFull,
stringsAsFactors = FALSE,
na.strings = "?")
#add the headers
names(dataRead)<-header
#convert the character dates to POSIXlt dates
dataRead$Time<-as.POSIXct(strptime(paste(dataRead$Date,dataRead$Time,sep="_"),"%d/%m/%Y_%H:%M:%S"))
dataRead$Date<-as.Date(strptime(dataRead$Date,"%d/%m/%Y"))
#subsets the chunk to only include the date range specified
dataRead<-subset(dataRead,dataRead$Date>=studyPeriodMin&
dataRead$Date<=studyPeriodMax)
dataRead<<-dataRead
write.table(dataRead,file = subsetFilename, row.names = FALSE, sep=";", na="?")
}
#set chunk import counter
#prepare tester
tester<-as.logical(NULL)
converge<-c(as.Date("1970-01-01"))
increment<-as.numeric(NULL)
ch<-1L
message(paste("Expecting to process up to",chunkPositions$count,"chunks.",
"\r\n",
"This parsing assumes all dates are close together and stops",
"after it finds a cluster within the date range specified",
"\r\n",
"If this is not appropriate for the dataset for example",
"the dates aren't ordered sequentially",
",an alternative parsing method should be adopted"))
readline(paste("Press 'Enter' to continue or Press 'Esc' to abort"))
if(rowCount>500000){
message("This will take a while ")
}
message("please wait...")
for(i in unlist(chunkPositions$seq)){
#resets the chunk sample
chunk<-NULL
#reads the line
chunk$test$date<-read.table(filename,header=FALSE,sep=";",colClasses=colSelect,skip=i,nrows = chunkSize$main,stringsAsFactors = FALSE)
chunk$test$averageDate<-mean(as.Date(strptime(chunk$test$date[,1],"%d/%m/%Y")))
chunk$test$minDate<-min(as.Date(strptime(chunk$test$date[,1],"%d/%m/%Y")))
chunk$test$maxDate<-max(as.Date(strptime(chunk$test$date[,1],"%d/%m/%Y")))
#tester for each line
#chunkStudyRelationship
#print(paste(chunk$test$minDate,studyPeriodMin,chunk$test$maxDate,studyPeriodMax))
if(chunk$test$maxDate < studyPeriodMin|
chunk$test$minDate > studyPeriodMax){
chunk$test$rel<-"outside"
}else{
chunk$test$rel<-"inside"
if((chunk$test$minDate < studyPeriodMin && chunk$test$maxDate < studyPeriodMax && chunk$test$maxDate > studyPeriodMin)|
(chunk$test$minDate < studyPeriodMax && chunk$test$minDate > studyPeriodMin && chunk$test$maxDate > studyPeriodMax)){
chunk$test$rel<-"overlap"
print(chunk$test$rel)
}
if((chunk$test$minDate < studyPeriodMin && chunk$test$maxDate < studyPeriodMax && chunk$test$maxDate > studyPeriodMin)|
(chunk$test$minDate < studyPeriodMin && chunk$test$maxDate > studyPeriodMax))
{
chunk$test$rel<-"inside"
print(chunk$test$rel)
}
}
ifelse(chunk$test$rel=="inside"|chunk$test$rel=="overlap",tester[i]<-TRUE,tester[i]<-FALSE)
converge[i]<-chunk$test$averageDate
increment[i]<-converge[i]-converge[i-1]>=0
if(mean(tester,na.rm=TRUE)>0&&mean(converge,na.rm=TRUE)>studyPeriodMax&&prod(increment,na.rm=TRUE)!=0){
PerformTheRead();
#0.4.9.0 Early Return----
#conditions are that the tester has to have return true atleast once
#the average of all chunk dates must exceed the upper end of the study period
#the averge of the chunks need to have continually incremented
#if these conditions are not met, the full source file is chunked and takes considerably longer to complete
message(paste("The process has retrieved a cluster for the",
"date range specified and hasn't found any",
"matched dates since encountering the cluster.",
"\r\n",
"If you believe there should be more observations",
"then you should run read.table() on the full dataset",
"\r\n"))
readline(paste("The records are stored in the variable called dataRead"))
return(str(dataRead))
}
if(tester[i]){
#reads the line into keep
print(paste("Found",as.integer((ch+chunkSize$main)/chunkSize$main),"chunks at line",i))
if(!exists("startLine")){
startLine<-i
}
ch<-ch+chunkSize$main
}
print(paste(chunk$test$rel,"date range. - approximate chunk date is",chunk$test$averageDate,
"...",as.integer(((i+chunkSize$main)/chunkSize$main)*100/chunkPositions$count), "% - (", i,"lines read)"))
}
PerformTheRead();
message(paste("The process has completed the whole read",
"any results returned will be stored within 'dataRead'",
"and should be filtered further as it may have captured surrounding",
"dates within the chunk.\r\n"))
}
#0.4.9 reads the data----
readFunction()
}
}
#0.9 Returns variable----
#`dataRead`
}
#>1 Function Body----
#1.0 ensures the data is filtered to the date range----
dataRead<-subset(dataRead,dataRead$Date>=studyPeriodMin&
dataRead$Date<=studyPeriodMax)
#1.1 initialise the graphic device----
# default size is already 480 x 480 px
grDevices::png(PlotFilename,bg="transparent",antialias = "cleartype")
#1.2 creates the linegraph----
#sets the two by two grid
graphics::par(mfrow=c(2,2))
graphics::par(col="black")
#1.2.1 Graph Upper Left----
graphics::plot (x=dataRead$Time,
y=dataRead$Global_active_power,
type="l",
col="black",
ylab="Global Active Power",
xlab=""
)
#1.2.2 Graph Upper Right----
graphics::plot (x=dataRead$Time,
y=dataRead$Voltage,
type="l",
col="black",
ylab="Voltage",
xlab="datetime"
)
#1.2.3 Lower Left----
graphics::plot (x=c(dataRead$Time,
dataRead$Time,
dataRead$Time),
y=c(dataRead$Sub_metering_1,
dataRead$Sub_metering_2,
dataRead$Sub_metering_3),
type="n",
col="black",
ylab="Energy sub metering",
xlab=""
)
#sets the colours to cycle through
lineCol<-c("black","red","blue")
#draws the legend
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
text.col="black",
col=lineCol,
bg = "transparent",
box.col = "transparent",
lty=1,
lwd=1,
seg.len=1)
#draws the lines
graphics::par(col=lineCol[1])
graphics::lines (x=dataRead$Time,
y=dataRead$Sub_metering_1,
type="l")
graphics::par(col=lineCol[2])
graphics::lines(x=dataRead$Time,
y=dataRead$Sub_metering_2,
type="l")
graphics::par(col=lineCol[3])
graphics::lines (x=dataRead$Time,
y=dataRead$Sub_metering_3,
type="l")
#1.2.4 Graph Upper Right----
graphics::par(col="black")
graphics::plot (x=dataRead$Time,
y=dataRead$Global_reactive_power,
type="l",
col="black",
xlab="datetime"
)
#1.9 closes the graphic device----
grDevices::dev.off()
#>9 Returns----
if(file.exists(PlotFilename)){
message(PlotFilename," saved to working directory")
}
|
816830d5fe6f04966f3adffdf2131b4ac47da9f7
|
da79c7c583e1786763e8fb4c85609de33dea1996
|
/Homework-3/ENVE_681_HW_3.R
|
c80c987eea5f3f50d8c14c99421ac6defd12384c
|
[] |
no_license
|
nhorscroft/ENVE-681
|
13815d9ef6033281db7ff3dff43c5b7d537ea30c
|
2d25be025c836b660bba712f4a261f11f0224709
|
refs/heads/master
| 2021-08-15T14:50:58.020585
| 2017-11-17T21:32:58
| 2017-11-17T21:32:58
| 107,407,500
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,682
|
r
|
ENVE_681_HW_3.R
|
# Homework 3 ------------------
# Nicola Horscroft
# 10/23/17
#------------------------------
library(tidyverse)
library(lubridate)
library(plotly)
# Define input variables
#------------------------------------------------------------------------------
# x_len = length in x direction in meters
x_len = 600
# y_len = length in y direction in meters
y_len = 1000
# del = change in distance along both x and y
# (for simplicity the change is the same in both x & y directions)
del = 25
# R = recharge in cm/yr
R = 50
# Convert R to m/yr
R = R/(100*365)
# T = transmissivity in m^2/day
T = 200
# x_nodes = number of nodes in x direction not including the border
x_nodes = x_len/del - 1
# y_nodes = number of nodes in y direction not including the border
y_nodes = y_len/del - 1
# total_nodes = total number of nodes in system
total_nodes = x_nodes * y_nodes
# Creating answer matrix
#------------------------------------------------------------------------------
# creating corner boundaries variables for matrix
corner_1 = 1
corner_2 = x_nodes
corner_3 = total_nodes
corner_4 = total_nodes - x_nodes + 1
# define answer matrix as rhs
# fill in initial matrix with general overall equation
rhs = matrix((-R*(del^2)/T), total_nodes,1)
# fill in conditions for top and bottom boundaries
for (i in 1:x_nodes)
{
rhs[i,1] = (-R*(del^2)/T) - 10
rhs[total_nodes- i, 1] = (-R*(del^2)/T) - 10
}
# fill in conditions for side boundaries
for (i in 1:(y_nodes-1))
{
rhs[x_nodes*i, 1] = (-R*(del^2)/T) - 10
rhs[(x_nodes*i) + 1 , 1] = (-R*(del^2)/T) - 10
}
# fill in corner boundaries
rhs[corner_1,1] = (-R*(del^2)/T) - 20
rhs[corner_2,1] = (-R*(del^2)/T) - 20
rhs[corner_3,1] = (-R*(del^2)/T) - 20
rhs[corner_4,1] = (-R*(del^2)/T) - 20
# Creating coefficient matrix
#------------------------------------------------------------------------------
# define coeff as coefficient matrix with the intial diagnols as -4
coeff = diag(total_nodes) * -4
# create 2 offset diagonals as 1
diag(coeff[-1,]) = 1
diag(coeff[,-1]) = 1
# create offset diagnols with one (x_nodes + 1) rows down
diag(coeff[(x_nodes + 1):total_nodes,]) = 1
# create offset diagnols with one (x_nodes + 1) columns over
diag(coeff[,(x_nodes + 1):total_nodes]) = 1
# Modify coefficient matrix for boundary conditions
for (i in 1:(y_nodes - 1))
{
coeff[(i * x_nodes),(i * x_nodes + 1)] = 0
coeff[(i * x_nodes + 1),(i * x_nodes)] = 0
}
# Solving for h matrix
#------------------------------------------------------------------------------
# define h as matrix for h values of groundwater
h = solve(coeff,rhs)
# Reassemble h into matrix that matches nodes
z = matrix(h, x_nodes,y_nodes)
i = matrix(10 , x_nodes, 1)
j = matrix(10, 1, y_nodes + 2)
z = cbind(i, z)
z = cbind(z, i)
z = rbind(j, z)
z = rbind(z, j)
# graphing matrix
#------------------------------------------------------------------------------
plot_ly(z = matrix(z , x_nodes + 2 , y_nodes + 2)) %>%
add_surface()
# layout(scene(xaxis = list(title = "length"),
# yaxis = list(title = "width"),
# zaxis = list(title = "height of groundwater")),
# title = "Groundwater Mounding on Island")
#------------------------------------------------------------------------------
# BONUS
#------------------------------------------------------------------------------
# Same variable from previous section
# Creating answer matrix
#------------------------------------------------------------------------------
trans_rhs = matrix(rhs, x_nodes, y_nodes)
x_inlet = 200
y_inlet = 300
x_inlet_nodes = x_inlet/del
y_inlet_nodes = y_inlet/del
inlet_corner_x1 = x_inlet_nodes - (x_inlet_nodes - 1)
inlet_corner_y1 = y_nodes - y_inlet_nodes
inlet_corner_x2 = x_inlet_nodes + 1
inlet_corner_y2 = y_nodes
# Updating new boundary conditions
for (i in (inlet_corner_x1 + 1):x_inlet_nodes)
{
trans_rhs[i, inlet_corner_y1] = (-R*(del^2)/T) - 10
}
for (j in (inlet_corner_y1 + 1):y_nodes)
{
trans_rhs[x_inlet_nodes + 1, j] = (-R*(del^2)/T) - 10
}
# Update corners
trans_rhs[inlet_corner_x1, inlet_corner_y1] = (-R*(del^2)/T) - 20
trans_rhs[inlet_corner_x2, inlet_corner_y2] = (-R*(del^2)/T) - 20
# updating where inlet is
for (i in (inlet_corner_x1):x_inlet_nodes)
{
for (j in (inlet_corner_y1 + 1):y_nodes)
{
trans_rhs[i, j] = 10
}
}
# making it a single column matrix
rhs_2 = matrix(trans_rhs, total_nodes, 1)
# Creating coefficient matrix
#------------------------------------------------------------------------------
coeff_2 <- coeff
# Modify coefficient matrix for new boundary conditions
for (j in 1:y_inlet_nodes)
{
for (i in 1:x_inlet_nodes)
{
coeff_2[(x_nodes*(y_nodes-j) + i),(x_nodes*(y_nodes-j) + i)] = 1
coeff_2[(x_nodes*(y_nodes-j) + i + 1),(x_nodes*(y_nodes-j) + i)] = 0
coeff_2[(x_nodes*(y_nodes-j) + i),(x_nodes*(y_nodes-j) + i + 1)] = 0
coeff_2[(x_nodes*(y_nodes-j) + i),(x_nodes*(y_nodes-j) + i - x_nodes)] = 0
coeff_2[(x_nodes*(y_nodes-j) + i - x_nodes),(x_nodes*(y_nodes-j) + i)] = 0
}
}
# Solving for h matrix
#------------------------------------------------------------------------------
# define h as matrix for h values of groundwater
h_2 = solve(coeff_2,rhs_2)
# Reassemble h into matrix that matches nodes
z_2 = matrix(h_2, x_nodes,y_nodes)
i = matrix(10 , x_nodes, 1)
j = matrix(10, 1, y_nodes + 2)
z_2 = cbind(i, z_2)
z_2 = cbind(z_2, i)
z_2 = rbind(j, z_2)
z_2 = rbind(z_2, j)
# graphing matrix
#------------------------------------------------------------------------------
plot_ly(z = matrix(z_2 , x_nodes + 2 , y_nodes + 2)) %>%
add_surface()
|
cdd0d75629a5ce1e28858be4c6921d7bdaa67918
|
738bb985cee6f38c65b1bc627545a546918d67ae
|
/src/prediction_binary.R
|
87c7a3668b43c2cb4d934dcdf80192b1e71290c6
|
[] |
no_license
|
zrxing/multiseq_analysis
|
ca610a62fa1b6625808baac72a157ea2acb534c6
|
6c0905d98488bc0054dccff118c193d4fdc676b0
|
refs/heads/master
| 2021-01-17T13:17:12.345993
| 2016-06-16T22:30:25
| 2016-06-16T22:30:25
| 34,276,097
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,235
|
r
|
prediction_binary.R
|
library(multiseq)
nbase = 2^7
normalize = function(x) x/sum(x)
dprof = read.table("data/Batf_S356_dnase_data.txt.gz")
cinfo = read.table("data/Batf_S356_site_and_chip_data.txt.gz",header = TRUE)
ccount = cinfo[,6]
train.size = round(0.8*dim(dprof)[1])
set.seed(417)
train.ind = sample(1:dim(dprof)[1],train.size,replace = FALSE)
train.ind = (1:dim(dprof)[1])%in%train.ind
dprof.train = dprof[train.ind,]
dprof.test = dprof[!train.ind,]
ccount.train = ccount[train.ind]
ccount.test = ccount[!train.ind]
peak.info = read.table("data/macs_peaks/Batf_q1pc_peaks.bed")
peak.ind = 0
for(i in 1:dim(cinfo)[1]){
peak.ind[i] = sum((as.character(peak.info[, 1]) == as.character(cinfo[i, 1])) & (peak.info[, 2] < cinfo[i, 2]) & (peak.info[, 3] > cinfo[i, 3])) > 0
}
unbound.ind = !(peak.ind) & cinfo[, 6] < median(cinfo[, 6])
bound.ind = as.logical(peak.ind)
filter.ind = bound.ind == 1 | unbound.ind == 1
unbound.ind = unbound.ind[filter.ind]
bound.ind = bound.ind[filter.ind]
dprof = dprof[filter.ind, ]
cinfo = cinfo[filter.ind, ]
ccount = cinfo[,6]
train.size = round(0.8*dim(dprof)[1])
set.seed(417)
train.ind = sample(1:dim(dprof)[1],train.size,replace = FALSE)
train.ind = (1:dim(dprof)[1])%in%train.ind
dprof.train = dprof[train.ind,]
dprof.test = dprof[!train.ind,]
ccount.train = ccount[train.ind]
ccount.test = ccount[!train.ind]
unbound.ind.train = unbound.ind[train.ind]
unbound.ind.test = unbound.ind[!train.ind]
bound.ind.train = bound.ind[train.ind]
bound.ind.test = bound.ind[!train.ind]
base.ind.for = 4033:4160 - 3584
base.ind.rev = 12225:12352 - 10752
nr = 10
eff.for = matrix(0,nr,nbase)
eff.rev = matrix(0,nr,nbase)
base.for = matrix(0,nr,nbase)
base.rev = matrix(0,nr,nbase)
for(i in 1:nr){
ind = sample(1:dim(dprof.train)[1],1000)
est.for = multiseq(as.matrix(dprof.train[ind,base.ind.for]),as.numeric(bound.ind.train[ind]),lm.approx = TRUE)
est.rev = multiseq(as.matrix(dprof.train[ind,base.ind.rev]),as.numeric(bound.ind.train[ind]),lm.approx = TRUE)
base.for[i,] = est.for$baseline.mean
base.rev[i,] = est.rev$baseline.mean
eff.for[i,] = est.for$effect.mean
eff.rev[i,] = est.rev$effect.mean
print(i)
}
base.mean.for = colMeans(base.for)
base.mean.rev = colMeans(base.rev)
eff.mean.for = colMeans(eff.for)
eff.mean.rev = colMeans(eff.rev)
##estimating prior
ccount.prior.prob = c(1 - mean(bound.ind.train), mean(bound.ind.train))
##computing the likelihood
lambda.for = exp(matrix(c(base.mean.for, base.mean.for + eff.mean.for), nr = 2, byrow = TRUE))
lambda.rev = exp(matrix(c(base.mean.rev, base.mean.rev + eff.mean.rev), nr = 2, byrow = TRUE))
lik.for = matrix(0, nr = length(ccount.test), nc = 2)
lik.rev = matrix(0, nr = length(ccount.test), nc = 2)
for(i in 1:length(ccount.test)){
loglik.ini.for = rowSums(t(apply(lambda.for,1,dpois,x = as.numeric(dprof.test[i,base.ind.for]),log = TRUE)))
loglik.ini.for = loglik.ini.for - max(loglik.ini.for)
loglik.ini.rev = rowSums(t(apply(lambda.rev,1,dpois,x = as.numeric(dprof.test[i,base.ind.rev]),log = TRUE)))
loglik.ini.rev = loglik.ini.rev - max(loglik.ini.rev)
lik.for[i,] = exp(loglik.ini.for)
lik.rev[i,] = exp(loglik.ini.rev)
}
##computing the posterior
# ccount.post.val = ccount.prior.val
# ccount.post.prob.for = lik.for * (rep(1,length(ccount.test))%o%ccount.prior.prob)
# ccount.post.prob.rev = lik.rev * (rep(1,length(ccount.test))%o%ccount.prior.prob)
# ccount.post.prob.for = t(apply(ccount.post.prob.for,1,normalize))
# ccount.post.prob.rev = t(apply(ccount.post.prob.rev,1,normalize))
#
# ccount.post.mean.for = 0
# ccount.post.mode.for = 0
# ccount.post.logmean.for = 0
# ccount.post.mean.rev = 0
# ccount.post.mode.rev = 0
# ccount.post.logmean.rev = 0
# for(i in 1:length(ccount.test)){
# ccount.post.mode.for[i] = exp(ccount.post.val[which(ccount.post.prob.for[i,]==max(ccount.post.prob.for[i,]))])
# ccount.post.mean.for[i] = sum(exp(ccount.post.val)*ccount.post.prob.for[i,])
# ccount.post.logmean.for[i] = sum(ccount.post.val*ccount.post.prob.for[i,])
# ccount.post.mode.rev[i] = exp(ccount.post.val[which(ccount.post.prob.rev[i,]==max(ccount.post.prob.rev[i,]))])
# ccount.post.mean.rev[i] = sum(exp(ccount.post.val)*ccount.post.prob.rev[i,])
# ccount.post.logmean.rev[i] = sum(ccount.post.val*ccount.post.prob.rev[i,])
#
# }
ccount.post.prob = lik.for * lik.rev * (rep(1,length(ccount.test))%o%ccount.prior.prob)
ccount.post.prob = t(apply(ccount.post.prob,1,normalize))
roc.res.ms = roc(bound.ind.test ~ ccount.post.prob[,1])
plot(roc.res.ms)
pscore = cinfo[,5]
pscore.test = pscore[!train.ind]
centFit <- fitCentipede(Xlist = list(DNase=as.matrix(dprof.test[, c(413:612, 1437:1636)])), Y=matrix(rep(1,dim(dprof.test)[1], nc = 1)), sweeps = 300)
centFit.pwm <- fitCentipede(Xlist = list(DNase=as.matrix(dprof.test)[, c(413:612, 1437:1636)]), Y=cbind(rep(1,dim(dprof.test)[1]),pscore.test))
#roc.res.cent = rocplot(centFit$PostPr[unbound.ind], centFit$PostPr[bound.ind])
#roc.res.cent.pwm = rocplot(centFit.pwm$PostPr[unbound.ind], centFit.pwm$PostPr[bound.ind])
#lines(roc.res.cent$fpr, roc.res.cent$tpr, col = 2)
#lines(roc.res.cent.pwm$fpr, roc.res.cent.pwm$tpr, col = 3)
roc.res.cent = roc(bound.ind.test ~ as.vector(centFit$PostPr))
roc.res.cent.pwm = roc(bound.ind.test ~ as.vector(centFit.pwm$PostPr))
lines(roc.res.cent, col = 2)
lines(roc.res.cent.pwm, col = 3)
dcut = rowSums(dprof.test[, c(413:612, 1437:1636)])
#roc.res.dcut = rocplot(dcut[unbound.ind], dcut[bound.ind])
#lines(roc.res.dcut$fpr,roc.res.dcut$tpr, col = 4)
roc.res.dcut = roc(bound.ind.test ~ dcut)
lines(roc.res.dcut, col = 4)
plot(as.vector(centFit$PostPr), ccount.post.prob)
##look at differences between centipede and multiseq
plot(centFit$MultiNomLogRatio,as.numeric(apply(as.matrix(dprof.test[, base.ind.for]), 1, dmultinom, size = NULL, prob = lambda.for[2, ], log = TRUE) - apply(as.matrix(dprof.test[, base.ind.for]), 1, dmultinom, size = NULL, prob = lambda.for[1, ], log = TRUE)))
abline(0, 1, col = 2)
plot(centFit$NegBinLogRatio, as.numeric(dpois(rowSums(as.matrix(dprof.test[, base.ind.for])), lambda = sum(lambda.for[2, ]), log = TRUE) - dpois(rowSums(as.matrix(dprof.test[, base.ind.for])), lambda = sum(lambda.for[1, ]), log = TRUE)))
abline(0, 1, col = 2)
|
d619de9480171b75d9b8e6be35d570b6f34c8554
|
453af6b3999d75b9fa9ab0521c38e530b3dfacb0
|
/data-raw/gesundheitsaemter_geolocation.R
|
93e8f62ce6ac4441d3d2e17feeacd0fb8f47b06a
|
[
"MIT"
] |
permissive
|
gstephan30/sormasmap
|
5824be33f8f053da477524493414e1e9f3a7ea56
|
62a506c341695e10837fcf52bb4474c8a1ed7b7f
|
refs/heads/master
| 2023-02-15T20:36:10.767043
| 2021-01-15T13:25:05
| 2021-01-15T13:25:05
| 329,615,108
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,751
|
r
|
gesundheitsaemter_geolocation.R
|
library(tidyverse)
library(ggmap)
library(janitor)
# import data
load("data-raw/gesundheitsaemter_plz.rda")
# prep data
# filter geo locations with address
gesus_nested_prep <- gesus_nested %>%
mutate(geo_string = paste(Street, Postalcode, Place),
geo_string= str_trim(geo_string)) %>%
# filter geo locations with address
filter(geo_string != "")
# get geocodes of authority
get_geocode <- function(id, string){
print(string)
geo <- geocode(string, output = "all", force = FALSE)
df <- tibble(
id = id,
key = names(geo),
geo = geo
) %>%
filter(key == "results") %>%
unnest(geo) %>%
unnest_wider(geo)
return(df)
}
gesus_geo <- NULL
for (i in 1:nrow(gesus_nested_prep)) {
gesus_geo[[i]] <- get_geocode(gesus_nested_prep$id[i], gesus_nested_prep$geo_string[i])
}
gesus_geo <- bind_rows(gesus_geo)
# it appears that some locations have multiple geo locations in the same area
# check this
gesus_geo %>%
add_count(id) %>%
filter(n > 1) %>%
select(formatted_address, geometry) %>%
unnest_wider(geometry) %>%
unnest_wider(location)
# delete this double entries and clean
delete_double <- function(df){
df %>%
group_by(id) %>%
mutate(ind = 1:n()) %>%
ungroup() %>%
filter(ind == 1) %>%
select(-ind)
}
gesus_geo_clean <- gesus_geo %>%
delete_double()
# combine the data
gesus_nestes_geo <- gesus_nested %>%
left_join(gesus_geo_clean) %>%
unnest_wider(geometry) %>%
unnest_wider(location) %>%
rename(long = lng)
# for validation get zip code from google
plz_ids <- gesus_geo %>%
select(address_components) %>%
rownames_to_column("id") %>%
unnest(address_components) %>%
unnest_wider(address_components) %>%
unnest_wider(types) %>%
janitor::clean_names() %>%
filter(x1 == "postal_code") %>%
select(id, plz_google = long_name)
gesus_nested_geo_plz <- gesus_nestes_geo %>%
left_join(plz_ids) %>%
rename(plz_rki = PLZ)
# cleanerize
gesundheitsaemter_pre <- gesus_nested_geo_plz %>%
clean_names()
rki_data <- gesundheitsaemter_pre %>%
select(id, code, phone, fax, email, contains("covid"), contains("aussteige")) %>%
nest(data_rki = -c("id"))
google_data <- gesundheitsaemter_pre %>%
select(id, key, address_components, formatted_address, location_type, viewport, bounds, place_id,
plus_code, types, partial_match) %>%
nest(data_google = -c("id"))
gesundheitsaemter <- gesundheitsaemter_pre %>%
select(id, name, department, street, postalcode, place, plz_rki,
gemeinden, long, lat, plz_google) %>%
left_join(
rki_data
) %>%
left_join(
google_data
)
# use it
usethis::use_data(gesundheitsaemter, overwrite = TRUE)
#usethis::use_r("gesundheitsaemter")
#usethis::use_test("gesundheitsaemter")
|
6a567c8acdca4d24f4c0fe9ce227789f069bc4bb
|
c320f24a8099951a226944cb5ca681808f6689c5
|
/9_Positive_selection_analysis/positive_sel_plot_perm_lrt.R
|
c527178bb32ca5d55bc4261c0ae732b09f37b127
|
[] |
no_license
|
AsexGenomeEvol/Timema_asex_genomes
|
8abb44a8eee376aaf1e9f71fa3a825a9c2850416
|
d224ec578dce30799e152a9a29b134fd725e0ad5
|
refs/heads/main
| 2023-04-16T02:55:04.951616
| 2022-05-31T16:27:42
| 2022-05-31T16:27:42
| 313,881,475
| 2
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,023
|
r
|
positive_sel_plot_perm_lrt.R
|
### +ve sel
library(ggplot2)
library(cowplot)
library(hash)
library(stringr)
library(car)
library(MASS)
library(fitdistrplus)
print (sessionInfo())
# R version 3.5.1 (2018-07-02)
# Platform: x86_64-apple-darwin15.6.0 (64-bit)
# Running under: macOS 10.15.7
# Matrix products: default
# BLAS: /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libRblas.0.dylib
# LAPACK: /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libRlapack.dylib
# locale:
# [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
# attached base packages:
# [1] stats graphics grDevices utils datasets methods base
# other attached packages:
# [1] fitdistrplus_1.0-14 npsurv_0.4-0 lsei_1.2-0 survival_3.1-7 MASS_7.3-51.4 car_3.0-3 carData_3.0-2 stringr_1.4.0 hash_2.2.6.1 cowplot_1.0.0 ggplot2_3.3.2
# loaded via a namespace (and not attached):
# [1] zip_2.0.3 Rcpp_1.0.2 pillar_1.4.2 compiler_3.5.1 cellranger_1.1.0 forcats_0.4.0 tools_3.5.1 lattice_0.20-38 lifecycle_0.2.0 tibble_2.1.3 gtable_0.3.0 pkgconfig_2.0.2
# [13] rlang_0.4.8 Matrix_1.2-17 openxlsx_4.1.0.1 curl_4.0 haven_2.1.1 rio_0.5.16 withr_2.1.2 dplyr_1.0.2 generics_0.0.2 vctrs_0.3.4 hms_0.5.1 grid_3.5.1
# [25] tidyselect_1.1.0 glue_1.4.2 data.table_1.12.2 R6_2.4.0 readxl_1.3.1 foreign_0.8-72 purrr_0.3.2 magrittr_1.5 splines_3.5.1 scales_1.0.0 abind_1.4-5 colorspace_1.4-1
# [37] stringi_1.4.3 munsell_0.5.0 crayon_1.3.4
## data
dat1 <- read.table("pos_sel_data/timema_543_branches_with-ncat-codon-rate_sites_with_h0.tsv", sep = "\t", header = T)
dat1$gene <- as.character(dat1$gene )
head(dat1)
dat1$branch_name <- as.character(dat1$branch_name)
dat1$branch_name <-
ifelse(dat1$branch_name == "Northern_Clade", "Northern",
ifelse(dat1$branch_name == "Santa_Barbara_Clade", "Santa Barbara",
ifelse(dat1$branch_name == "Southern_Clade", "Southern",
dat1$branch_name)))
dat1$sp_pair <-
ifelse(dat1$branch_name == "Tbi", "Tbi-Tte",
ifelse(dat1$branch_name == "Tce", "Tce-Tms",
ifelse(dat1$branch_name == "Tcm", "Tcm-Tsi",
ifelse(dat1$branch_name == "Tpa", "Tpa-Tge",
ifelse(dat1$branch_name == "Tps", "Tps-Tdi",
ifelse(dat1$branch_name == "Tte", "Tbi-Tte",
ifelse(dat1$branch_name == "Tms", "Tce-Tms",
ifelse(dat1$branch_name == "Tsi", "Tcm-Tsi",
ifelse(dat1$branch_name == "Tge", "Tpa-Tge",
ifelse(dat1$branch_name == "Tdi", "Tps-Tdi",
NA))))))))))
dat1$rep_mode <-
ifelse(dat1$branch_name == "Tbi", "sex",
ifelse(dat1$branch_name == "Tce", "sex",
ifelse(dat1$branch_name == "Tcm", "sex",
ifelse(dat1$branch_name == "Tpa", "sex",
ifelse(dat1$branch_name == "Tps", "sex",
ifelse(dat1$branch_name == "Tte", "asex",
ifelse(dat1$branch_name == "Tms", "asex",
ifelse(dat1$branch_name == "Tsi", "asex",
ifelse(dat1$branch_name == "Tge", "asex",
ifelse(dat1$branch_name == "Tdi", "asex",
ifelse(dat1$branch_name == "Tps/Tdi", "sex_asex",
ifelse(dat1$branch_name == "Tpa/Tge", "sex_asex",
ifelse(dat1$branch_name == "Tcm/Tsi", "sex_asex",
ifelse(dat1$branch_name == "Tbi/Tte", "sex_asex",
ifelse(dat1$branch_name == "Santa Barbara", "sex_asex",
ifelse(dat1$branch_name == "Northern", "clade",
ifelse(dat1$branch_name == "Southern", "clade",
NA)))))))))))))))))
head(dat1)
###################################################################################################
### sig diff
dat1_a <- subset(dat1, dat1$rep_mode != "clade")
dat1_term <- subset(dat1_a, dat1_a$rep_mode != "sex_asex")
dat1_term$rep_mode <- as.factor(dat1_term$rep_mode)
dat1_term$gene <- as.factor(dat1_term$gene)
#####################################
### get test stats from real data
### using 2 dists to check it is robust
m7c_real = glm(dat1_term$lrt ~ dat1_term$sp_pair + dat1_term$rep_mode)
m7d_real = glm(dat1_term$lrt ~ dat1_term$sp_pair + dat1_term$rep_mode, family = quasipoisson(link = "log"))
m7c_real_sp_pair_LR <- Anova(m7c_real, type = 3)$LR[1]
m7d_real_sp_pair_LR <- Anova(m7d_real, type = 3)$LR[1]
m7c_real_rep_mode_LR <- Anova(m7c_real, type = 3)$LR[2]
m7d_real_rep_mode_LR <- Anova(m7d_real, type = 3)$LR[2]
########## randomise rep mode
rand_rep_mode <- function(df){
pos <- c("sex", "asex")
rand_rep <- c()
for (i in seq(1,length(df[,1]) / 2)){
rand_rep_i <- sample(pos, replace = F)
rand_rep <- c(rand_rep, rand_rep_i)
}
df$rand_rep <- rand_rep
m7c = glm(df$lrt ~ df$sp_pair + df$rand_rep)
m7c_out = Anova(m7c, type = 3)
m7d = glm(df$lrt ~ df$sp_pair + df$rand_rep, family = quasipoisson(link = "log"))
m7d_out = Anova(m7d, type = 3)
## LR sp pair, LR rep, P sp pair, P rep mode
m7c_out_v <- c(m7c_out$LR[1], m7c_out$LR[2], m7c_out$P[1], m7c_out$P[2])
m7d_out_v <- c(m7d_out$LR[1], m7d_out$LR[2], m7d_out$P[1], m7d_out$P[2])
output <- list("m7c_out_v" = m7c_out_v, "m7d_out_v" = m7d_out_v )
return(output)
}
#### run for x times
run_N = 1000 ### number of randomisations. this takes some time to run.
set.seed(42)
rand_rep_df_m7c <- c()
rand_rep_df_m7d <- c()
for (i in seq(1:run_N)){
print(i)
test_i <- rand_rep_mode(dat1_term)
rand_rep_df_m7c <- rbind(rand_rep_df_m7c, test_i$m7c_out_v)
rand_rep_df_m7d <- rbind(rand_rep_df_m7d, test_i$m7d_out_v)
}
colnames(rand_rep_df_m7c) <- c("LR_sp","LR_rep_mode", "P_sp","P_rep_mode")
rand_rep_df_m7c <- as.data.frame(rand_rep_df_m7c)
colnames(rand_rep_df_m7d) <- c("LR_sp","LR_rep_mode", "P_sp","P_rep_mode")
rand_rep_df_m7d <- as.data.frame(rand_rep_df_m7d)
get_pval = function(rand_df,calc_TS){
N_rand_larger <- length(subset(rand_df, rand_df$LR_rep_mode > calc_TS)[,1])
print(N_rand_larger)
adj_pval = 10000
if(N_rand_larger == 0){
adj_pval = 0
}
else{
adj_pval = N_rand_larger / length(rand_df$LR_rep_mode)
}
print(adj_pval)
}
get_pval(rand_rep_df_m7c, m7c_real_sp_pair_LR ) # 0.011
get_pval(rand_rep_df_m7d, m7d_real_sp_pair_LR ) # 0.011
|
19e08ecbc794292625caaea887bccbb201c2a261
|
59d6c373b4a59dd6f4ad6f771a4bc147d71bb4a3
|
/man/sensPlotMatrix.Rd
|
01e5b3987754b47852f059e6f073d87a90bc01e7
|
[] |
no_license
|
dkneis/mcu
|
0107c4b6f5cf6d1d4ec3b66889783868da0a7eb8
|
0b87de1944d553b23386c0e35f11f65f4c1a70f9
|
refs/heads/master
| 2021-01-24T16:10:03.146094
| 2016-03-17T17:32:30
| 2016-03-17T17:32:30
| 38,431,953
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,385
|
rd
|
sensPlotMatrix.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/sensPlotMatrix.r
\name{sensPlotMatrix}
\alias{sensPlotMatrix}
\title{Plot sensitivity info as a matrix}
\usage{
sensPlotMatrix(sout, thr = 0.9, xpars = TRUE)
}
\arguments{
\item{sout}{A list of data frames returned from a call to \code{\link{sens}}.}
\item{thr}{A number in range \eqn{0 < thr < 1}. This is used to pragmatically
distinguish between highly sensitive and less sensitive parameters.
\code{thr} specifies the minimum relative decrease in the value of the
objective function to consider a particular varied parameter as
\emph{highly} sensitive. A value of, say, 0.9 means that a parameter is
consired as highly sensitive if \eqn{ftst/fdef <= 0.9}, where \eqn{ftst}
and \eqn{fdef} denote the output of the objective function for a test value
and the default value of a parameter, respectively. Reasonable values are
probably between 0.8 and 0.95.}
\item{xpars}{Logical. Controls the plot's layout. If \code{TRUE}, the
parameter names appear as column headers and the objective function(s) as
row headers(s). If \code{FALSE}, the result matrix is transposed.}
}
\value{
\code{NULL}.
}
\description{
Plots the results of a sensitivity analysis carried out using \code{sens}
as a matrix of symbols. The output can facilitate manual model calibration
(see details below).
}
\note{
Symbols in the created graphics have the following meaning:
\itemize{
\item{} Triangles pointing up: Parameter should be increased to/beyond
the tested upper limit in order to reduce the value of the objective
function. Filled/non-filled: Relative sensitivity is high/low.
\item{} Triangles pointing down: Parameter should be decreased to/beyond
the tested lower limit in order to reduce the value of the objective
function. Filled/non-filled: Relative sensitivity is high/low.
\item{} Diamond: Lowest value of the objective function does not
(exclusively) occur at a boundary of the tested parameter range. Hence,
the optimum parameter values may be inside that range. Filled/non-filled:
Relative sensitivity is high/low.
\item{} Circle: Objective function is not sensitive to the parameter.
\item{} Cross: No information due to non-finite return values of the
objective function.
}
Note that the analysis does not account for possible parameter interactions
such as compensation effects. For the example (see below), the created plot
suggests that the intercept should be decreased (although the true optimum
value is 0). This is due to the fact that all test values for the slope
are actually too high (true optimum at 1).
In cases with long names of parameters/functions, it will be necessary to
adjust the plot margings accordingly (see example).
}
\examples{
# Sensitivity of parameters of a linear model
obs= data.frame(x=c(1,2), y=c(1,2))
model= function(p, x) { p["slope"] * x + p["intercept"] }
objfun= function(p, obs) { c(sse= sum((obs$y - model(p, obs$x))^2),
mae= sum(abs(obs$y - model(p, obs$x)))) }
p= data.frame(
name=c("slope","intercept"),
default= c(1.5, 0.1),
min= c(1.1, -1),
max= c(2, 1)
)
s= sens(fn=objfun, p=p, obs=obs)
omar= par("mar")
par(mar=c(0.5,6,10,0.5))
sensPlotMatrix(sout=s, thr=0.75, xpars=TRUE)
par(mar=omar)
}
\author{
David Kneis \email{david.kneis@tu-dresden.de}
}
|
0f7404466675d37cf515622925914c724f215d46
|
b98aa1f5e3bdc1b3c9d0cf29d2f7ad7711707a41
|
/GUI.R
|
28d7a68171990ed5344905ec4f5b031766c68c6b
|
[] |
no_license
|
Matheesha-Nayanajith/R-Project01
|
7fee4ad74ff7b31987b3130ff397e92f34a68368
|
ab585b0346f112212a57b287f0c433273d5a59df
|
refs/heads/main
| 2023-07-19T03:31:52.146735
| 2021-09-27T08:21:21
| 2021-09-27T08:21:21
| 380,057,264
| 0
| 0
| null | 2021-09-27T08:21:22
| 2021-06-24T21:44:54
|
R
|
UTF-8
|
R
| false
| false
| 15,002
|
r
|
GUI.R
|
#simple scatterplot----
?plot
X1 = 1:10
Y1 = X1^2
X1
Y1
plot(x=X1 , y=Y1)
#adding linetype
plot(x=X1, y=Y1, type='p') #lty = linetype = points
plot(x=X1, y=Y1, type='l') #lty = linetype = lines
plot(x=X1, y=Y1, type='b') #lty = linetype = both
#adding line width, color and cex(size of point)
plot(x=X1, y=Y1, type='b', lwd= 1, col = 'red', cex=1)
plot(x=X1, y=Y1, type='b', lwd= 2, col = 'red', cex=1.5)
plot(x=X1, y=Y1, type='b', lwd= 3, col = 'red', cex=2)
#change the limits of x and y xaxis
plot(x=X1, y=Y1, xlim=c(0,10), ylim = c(0,100), type = 'b', cex=1, col='blue')
#using mtcars dataset: plotting mpg and weight
plot(mtcars$wt, mtcars$mpg, col = 'blue', pch = 20, cex=1.5)
plot(mtcars$wt, mtcars$mpg, col = 'blue', pch = 20, cex=1, xlab = 'weight', ylab = 'Mileage', main = 'Weight vs MPG' )
table(mtcars$gear)
table(mtcars$cyl)
x
plot(x = mtcars$wt, y = mtcars$mpg, col = mtcars$cyl, pch = mtcars$gear, cex = 1, xlab = 'Weight', ylab = 'Mileage', main = 'weight vs Mileage')
#add fit lines
abline(lm(mtcars$mpg~mtcars$wt), col="red") #regression line (y~x)
#plotting wt mpg graph and adding legends to it
#X-Weight, Y-Milleage, colore-Cylinder , Shape- Gear , Size - AM : points
lapply(mtcars[,c('cyl','gear','am')], table)#count of each category cols
#you should know this to plan how many colors, shapes, size to choose
plot(x = mtcars$wt, y = mtcars$mpg, col = c(1,2,3), pch = c(20,21,22), cex=c(1,2), xlab = 'Weight' , ylab = 'Mileage' , main = "Weight vs Mileage")
?InsectSprays
InsectSprays
?boxplot
#showing mean of mpg
boxplot(x=mtcars$mpg, col = 'green') #col = column for plotting
abline(h=mean(mtcars$mpg))
abline(h=quantile(mtcars$mpg))
boxplot(x=mtcars$mpg, col = 'green', horizontal = T)#horizental = directon of boxplot
# boxplot on a formula:
?InsectSprays
boxplot(count ~ spray, data = InsectSprays, col = "lightgray")
# *add* notches (somewhat funny here <--> wrnning "notches.. outside hinges"):
boxplot(count ~ spray, data = InsectSprays,
notch = T, col = "blue")
#notch is used to compare groupps in the notched boxplot, if two boxes' notces do not overlap this is
##if we put notch=T,we'll get a warning message by saying " some notches went outside hinges ('box'):maybe set notch=FALSE"
#Using formula for mtcars
boxplot(mpg ~ cyl, data = mtcars,
xlab = "Number of cylinders",
ylab = "Miles per Gallon",
motch = TRUE,
main = "Mileage Data",
col = c("green","yellow","purple"),
names = c("High","Medium","Low"))
#graph autos with adjacent bars using rainbow colours
cars <- c(1, 3, 6, 4, 9)
trucks <-c(2, 5, 4, 5, 12)
suvs = c(4,4,6,6,16)
autos_data = data.frame(cars,trucks,suvs)
autos_data
barplot(autos_data$cars, main ="Auto Data", xlab = "Days",
ylab = "Total", names.arg=c("Mon","Tue","wed","Thu","Fri"),
border="blue", density=c(10,20,30,40,50))
#graph autos with adjactent bar using rainbow colours
barplot(as.matrix(autos_data), main ="autos", ylab = "Tatal",
beside = T, col = rainbow(5))
legend("topleft", c("Mon","Tue","wed","Thu","Fri"), cex=1,
byt="n", fill = rainbow(5))
#graph autos (transposing the matrix) using heat colours
#put 10% of the space between each bar, and make labels
#smaller with horizental y-axis lables
autos_data
barplot(t(autos_data),main="Autos", ylab="Total",
col=heat.colors(3),space=0.1, cex.axis=0.8, las=1,
names.arg=c("Mon","Tue","Wed","Thu","Fri"),cex=0.8)
legend("topleft", c("Cars","Trucks","Suvs"),cex=1,
bty="n", fill=heat.colors(3))
legend("top", c("Cars","Trucks","Suvs"),cex=1,
bty="n", fill=heat.colors(3))
?hist
#Histogrm for a normally distributed data
hist(rnorm(1000))
hist(rnorm(1000), probability = T)
#histogrm for a skewed data
hist(islands)#gives results in frequencies
hist(islands,probability = T)#proportion (or probabilities)
hist(islands, breaks = 5, main = "islands histogrm", xlab = "Area range", border = "red", col = "grey")
head(airquality)
hist(airquality$Temp)
#adding label names and title
hist(airquality$Temp,
main="Temperature Histogram",
xlab="Temperature",
ylab="Temperature Frequency",
las=1)
#adding color to bars
hist(airquality$Temp,
main="Tempareture Histogram",
xlab="Temparature",
ylab="Temperature Frequency",
las=1,
col=c("skyblue","chocolate2"))
#removing all the annotation and axis lim and name to give our own
hist(airquality$Temp,
axes=F,
ann=F,
labels=T,
ylim=c(0,35),
col = c("skyblue","chocolate2"))
#specifying own annotations
hist(airquality$Temp,
main="Tempareture Histogram",
xlab="Temparature",
ylab="Temperature Frequency",
las=1,
col=c("skyblue","chocolate2"),
xlim=c(55,100),
ylim=c(0,40),
density=80)
#giving the number of breaks and adding numbers to the bars
hist(airquality$Temp,
breaks=20,
main="Tempareture Histogram",
xlab="Temparature",
ylab="Temperature Frequency",
las=1,
col=c("skyblue","chocolate2"),
labels = T,
ylim=c(0,25)
)
#changing frequency on y axis to density/probability values; removing labels also as they are in freq value only and adding density line
hist (airquality$Temp,
breaks = 20,
freq=F,
main="Tempareture Histogram",
xlab="Temparature",
ylab="Temperature Frequency",
las=1,
col=c("skyblue","chocolate2"),
labels = T,
)
lines(density(airquality$Temp),
lwd=4,col="red")
hist(airquality$Temp,
breaks=20,
main="Tempareture Histogram",
xlab="Temparature",
ylab="Temperature Frequency",
las=1,
col=c("skyblue","chocolate2"),
labels = T,
ylim=c(0,25) )
#pie chart ----
#pie chart are used to show parts of whole
#represents numbers in percentages are the total sum all the divided segments equal 100 percent
#create a pie chart for cars
cars <- c(1, 3, 6, 4, 9)
pie(cars)
#custom colors and labels
pie(cars, main = "Cars", col = rainbow(length(cars)),
labels = c("Mon","Tue","Wed","Thu","Fri"))
#define some colours ideal for black & white print
colors <- c("white","grey70","grey90","grey50","black")
#calculate the percentage for each day , rounded to one
#decimal place
car_labels <- round(cars/sum(cars) * 100, 1)
car_labels
#concatenate a '%' char after each value
car_labels <- paste(car_labels,"%", sep = " ")
car_labels
# and labels
pie(cars, main = "Cars", col = colors, labels = car_labels,
cex=0.8)
# create a legend at the right
legend('topright', c("Mon","Tue","Wed","Thu","Fri"), cex=0.8,
fill = colors)
#3D Exploded pie chart
library(plotrix)
slices <- c(10,12,4,16,8)
lbls <-c("US","UK","AUS","GER","FRNS")
pie3D(slices,labels = lbls,explode = 0.2,
main = "pie chart of Countries")
#GGPlot2 ----
library(ggplot2)
library(dplyr)
mtcars
#SCATTER PLOT ----
#basic scatter plot (wt vs mpg)
plot(mtcars$wt, mtcars$mpg)
#1st layer of ggplot - creating base for plotting
ggplot(data = mtcars, aes(x=wt, y=mpg))
#adding geometry to graph
ggplot(data = mtcars, aes(x=wt, y=mpg)) + geom_point()
#adding aesthetics : color, size and shape of point
ggplot(data = mtcars, aes(x=wt, y=mpg)) + geom_point(color='red', size=3)
ggplot(data = mtcars, aes(x=wt, y=mpg)) + geom_point(color='red', size=3, shape = 20)
head(mtcars)
table(mtcars)
table(mtcars$gear)
table(mtcars$carb)
table(mtcars$am)
#adding different dimensions of ggplot now : adding col as per no, of cyl
#ggplot(data = mtcars, aes(x=wt, y=mpg)) + geom_point(color = cyl, size=3, shape=20) Error
ggplot(data = mtcars, aes(x=wt, y=mpg)) + geom_point(aes(color = cyl), size=3, shape=20)
ggplot(data = mtcars, aes(x=wt, y=mpg)) + geom_point(aes(color = factor(cyl)), size = 3,
shape = 20)
#adding different dimensions to ggplot now : adding size as per types of transmission
ggplot(data = mtcars, aes(x=wt, y=mpg)) + geom_point(aes(color = factor(cyl), size = factor(am)), shape = 20)
#adding different dimensions to ggplot now : adding size as per types of transmission
ggplot(data = mtcars, aes(x=wt, y=mpg)) + geom_point(aes(color = factor(cyl), size = factor(am), shape = factor(gear)))
#adding labales to the graph
ggplot(data = mtcars, aes(x=wt, y=mpg)) + geom_point(aes(shape=factor(gear), size=factor(am), color=factor(
cyl))) + labs(title = 'Adding dimensions to graph', subtitle = 'Scatter Plot', x='weight', y='Mileage')
#adding text to points, name of the cars
ggplot(data=mtcars, aes(x=wt, y=mpg)) + geom_point(aes(color = factor(cyl), size=factor(am), shape = factor(gear))) + facet_grid(cyl + vs~carb) + labs(title='Adding dimensions to graph ', subtitle = 'Scatter Plot', x= 'Weight', y ='Mileage') + geom_text(aes(labal = rownames(mtcars)), size = 2.5)
ggplot(data=mtcars, aes(x=wt, y=mpg)) + geom_point(aes(color = factor(cyl), size=factor(am), shape = factor(gear))) + facet_grid(cyl + vs~carb) + labs(title='Adding dimensions to graph ', subtitle = 'Scatter Plot', x= 'Weight', y ='Mileage') + geom_text(aes(label = rownames(mtcars)), size = 2.5)
#adding text to points,name of the cars using ggrepel package to make graph tidy
library(ggrepel)
ggplot(data=mtcars, aes(x=wt,y=mpg))+ geom_point(aes(color=factor(cyl),size=factor(am),shape=factor(gear)))+ facet_grid(vs~carb)+labs(title='Adding dimensions to graph', subtitle='Scatter Plot', x='Weight', y='Mileage') + ggrepel::geom_text_repel(aes(label=rownames(mtcars)),size=2.5)
#BAR PLOT ----
#converting creating columns to factors data types
barplot(mtcars$cyl)
barplot(table(mtcars$cyl))
#creating bar plot using no. of cyl to basic if col of be used boarders of bar will be coloumn
ggplot(data=mtcars, aes(x=cyl)) + geom_bar()
ggplot(data=mtcars, aes(x=cyl)) + geom_bar(fill="red")
ggplot(data=mtcars, aes(x=cyl)) + geom_bar(aes(fill=factor(cyl)))
ggplot(data=mtcars, aes(x=cyl)) + geom_bar(aes(col = factor(cyl)))
#if you the heights of the bars to represent values in the data use stat = identity and map a values to the y aesthetic
#summarising and summing the count of cars against no. o cyl : using stat = identify to get count on top of bars
mtcars %>% group_by(cyl) %>% summarise (n=n()) %>% ggplot(.,aes(x=cyl,y=n))+ geom_bar(stat='identity',aes(fill=factor(cyl)))+geom_text(aes(label=n))
mtcars %>% group_by(cyl,gear,am,vs) %>% summarise (n=n()) %>% ggplot(.,aes(x=cyl,y=n))+ geom_bar(stat='identity',aes(fill=factor(cyl)))+ geom_text(aes(label=n))+facet_grid(gear~am)
#updating facetlayer-scales and space = free--makes full use of the graph areas otherwise the values are very small so the bars will also be short-zooms the graph by auto selecting the limits
mtcars %>% group_by(cyl,gear,am,vs) %>% summarise (n=n()) %>% ggplot(.,aes(x=cyl,y=n))+ geom_bar(stat='identity',aes(fill=factor(cyl)))+ geom_text(aes(label=n))+facet_grid(gear~am, scales='free',space='free')
# HEAT MAP ----
a = mtcars %>% group_by(cyl,gear) %>% summarise(n=n())
a
ggplot(a, aes(x=factor(cyl), y=factor(gear), fill=n)) + geom_tile()
ggplot(a, aes(x=factor(cyl), y=factor(gear), fill=n)) + geom_tile() + geom_text(aes(label=n), size=6)
ggplot(a, aes(x=factor(cyl), y=factor(gear), fill=n)) + geom_tile() + geom_text(aes(label=n), size=6) + scale_fill_gradient2()
?women
women
str(women)
dim(women)
#check linearity of data set first by plotting it
plot(women$height,women$weight)
#building a model for women data set
lmmodel = lm(weight ~ height, dat = women)
lmmodel
#checking summary of the model
summary(lmmodel)
#plotting a regression line on graph to shown the relationship
abline(lm(weight ~ height, data = women), col='red', lwd = 1)
#predicting the value of y (weight) with a new data of x (height)
head(women)
range(women$weight)
range(women$height)
newdata = data.frame(height = c(50, 75, 80, 85))
pred_weight = predict(lmmodel, newdata)
pred_weight
#cheking assumpitions of the model by daignostic plotting
plot(lmmodel)
names(mtcars)
View(mtcars)
plot(mpg~hp, data = mtcars)
plot(mpg~wt, data = mtcars)
result<-lm(mpg~hp+wt, data = mtcars)
summary(result)
#value of adjusted R2 = 0.82,
#means that 82% of the variance in the measure of mpg can be predicted by hp and wt.
#checking multicollnearity
result<-lm(mpg~hp+wt+disp+cyl+gear , data = mtcars)
summary(result)
#----example 03
#create training and test data
trainingRowIndex <- sample(1: nrow(mtcars), 0.8*nrow(mtcars))
trainingData <- mtcars[trainingRowIndex, ]
testData <- mtcars[-trainingRowIndex, ]
#build the model on training data ----
lmMod <- lm(mpg~ cyl
+disp
+hp
+wt, data = trainingData)
summary(lmMod)
#predicition ----
#prediciting values for test dataset
testData$mpgPred <- predict(lmMod, testData)
View(testData)
#accuracy----
#determining predicion accueacy on test dataset using MAPE
mape <- mean(abs((testData$mpgPred - testData$mpg))/testData$mpg)
mape
#checking assumtions through daignostic plots
plot(lmMod)
lmMod
mtcars
mydata <- read.csv("https://stats.idre.ucla.edu/stat/data/binary.csv")
head(mydata)
str(mydata)
summary(mydata)
sapply(mydata,sd)
#rank is a categorical values , but is saved in integer datatype ; lets convert it to factors
mydata$rank = as.factor(mydata$rank)
mydata$admit = as.factor(mydata$admit)
str(mydata)
dim(mydata)
#wo way contingency table of categorical outcome and predictors we want
#to make sure there are not 0 cells
#which rank of institute are more successful (in nos/ %) in getting admitted - 2/1
xtabs(~admit + rank, data = mydata)
#model 1
mylogit = glm(admit ~ gre + gpa + rank, data = mydata, family = "binomial")
summary(mylogit)
#model 2
#Building the logestic regression model
n = nrow(mydata)
sample = sample(1:n, size = round(0.7*n), replace = F)
train = mydata[sample,]
test = mydata[-sample,]
?glm
logR1 = glm(admit ~ gre+gpa+rank, train, family = binomial)
logR1
summary(logR1)
?predict
predicted = predict(logR1, newdata = test, type = 'response')
head(predicted)
predicteV = factor(ifelse(predicted <0.5,0, 1))
head(predicteV)
test = cbind(test, predicteV)
head(test)
str(test)
library(caret)
confusionMatrix(test$admit, test$predicteV) #better
#Model 3
n = nrow(mydata)
sample = sample(1:n, size = round(0.7*n), replace = F)
train = mydata[sample,]
test2 = mydata[-sample,]
logR2 = glm(admit ~ gre + rank, train, family = binomial)#gpa removed
logR2
summary(logR2)
pred = predict(logR2, newdata = test2, type = 'response')
predict = factor(ifelse(pred <0.5,0, 1))
test2 = cbind(test2, predict)
head(test2)
#checking accuracy using caret package : confusion matric
caret::confusionMatrix(test$admit, test2$predict) #better
#confusion matrix using table command
table(test2$admit, pred > 0.5)
#checking accuracy of model using mean command
mean(test2$predict == test$admit)
#new data prediction
range(mydata$gre)
df2 = data.frame(gre = 700 , rank = factor(3))
df2
p = predict(logR2, newdata = df2)
p
p1 = factor(ifelse(p <0.5,0, 1))
p1
df.p = cbind(df2, p1)
df.p
|
55e8a778926f197c624d25d62a58ec6630ed4e08
|
411028fa05f953b3b129ff0e289fac869d314453
|
/LIME_data_prep.R
|
67614c586516228173298cc0be48a537392a64e0
|
[] |
no_license
|
davidgillmarine/spatial-fisheries-analysis
|
2a4959ee8b9f7eaca1461931d4bf5cfe7c7e2912
|
b2f932b3d4bad5c7387b068c4b9ef2df91c27b86
|
refs/heads/master
| 2023-06-08T23:17:42.360084
| 2023-05-31T15:04:30
| 2023-05-31T15:04:30
| 184,622,949
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,035
|
r
|
LIME_data_prep.R
|
library(rio)
library(janitor)
library(tidyverse)
#set working directory to the R Drive
workdir <- "R:/Gill/research/spatial-fisheries-analysis/"
input.dir <- paste0(workdir,"tables/raw/")
spatialdir <- paste0(workdir,"spatial/raw/")
plotdir <- paste0(workdir,"output/")
tabledir <- paste0(workdir,"output/")
speciesdir <- "R:/Gill/research/spatial-fisheries-analysis/tables/final/LIME_Outputs/"
#^directory where all individual species catch will be housed, for LIME analysis
###################################### Import the Data ##############################
# import data
fish.dat <- import(paste0(input.dir,"Statia logbook Raw data last update October 2022.xlsx"), #import the correct file and the page of the fisheries
which = "Fish") %>% #spreadsheet and tell it where to start from the top
clean_names() %>%
filter(!is.na(species_latin_name))
#check names and fish
names(fish.dat)
unique(fish.dat$species_latin_name)
# create fake data to get seq 0:50 columns
fake.dat <- data.frame(species_latin_name="",length_cm=seq(0,50,1)) # create fake data
fake.dat <- fish.dat %>%
slice(0) %>% # get column names from fish.dat
bind_rows(fake.dat) # add fake data
fish.dat.gp <- fish.dat %>%
mutate(length_cm=as.integer(round(length_cm))) %>% # round up
bind_rows(fake.dat) %>% # add fake data
group_by(species_latin_name,year,length_cm) %>%
summarise(num=n()) # count unique values
head(fish.dat.gp)
fish.dat.gp1 <- fish.dat.gp %>%
spread(length_cm,num,fill = 0) %>% # spread to columns
filter(!is.na(species_latin_name) & !(species_latin_name=="")) # get rid of blank values
fish.dat.gp1 %>% view()
#**NOTE: there is one species who does not have it's species Latin name listed - need to try and find it via common name
write.csv(fish.dat.gp1,paste0(speciesdir,today.date,"_Statia_LIME__ALL_Species.csv"))
###################################### DIG Continuation of this work! ##############################
# SWITCHING OVER TO LIME-Statia.RMD in LIME project ##
|
19f6beab6f34e885443ed5c20cb43daab8dd347c
|
02e4fa1103ff77f47562d06b7a5c7898547daf6e
|
/misc/rename_vars.R
|
2b0145b773403b8e0b6fad377345823a10f7ffe1
|
[] |
no_license
|
uhoang/mec
|
fb3d29badf1d9ee4353c70eaf1b322b4d042cd9d
|
341c0b229bb37ee27daf3eb2f754b0fcd40cdf87
|
refs/heads/master
| 2021-05-02T10:19:51.342436
| 2018-02-21T19:29:01
| 2018-02-21T19:29:01
| 120,794,523
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,147
|
r
|
rename_vars.R
|
questions <- c(
'Q_LANGUAGE' = 'Q_LANGUAGE: In which language do you prefer to take this survey? / Dans quelle langue préférez-vous effectuer cette enquête?',
'Q_AGESCREENER' = 'Q_AGESCREENER: How old are you?',
'h_AGE' = 'h_AGE: How old are you?',
'Q_PROVINCE' = 'Q_PROVINCE: Which province/territory do you currently live in?',
'Q_COMMUNITY' = 'Q_COMMUNITY: Please indicate which best describes where you live.',
'Q_CONDITION' = 'Q_CONDITION: Which of the following health conditions, if any, do you currently suffer from?','Q_CONDITION) Which of the following health conditions, if any, do you currently suffer from?',
'Q_REMEDY' = 'Q_REMEDY: You said you currently suffer from the following health condition(s). Of these, which do you currently take medication for?',
'Q_GENDER' = 'Q_GENDER: Are you...?',
'Q_PARENTS' = 'Q_PARENTS: Do you have children under the age of 18 living at home?',
'Q_ACTIVITY' = 'Q_ACTIVITY: How often do you engage in the following?',
'Q_PURCHASE' = 'Q_PURCHASE: When is the last time you purchased the following products?',
'Q_FREQUENCY' = 'Q_FREQUENCY: You said you have purchased the products listed below in the past. How often do you use each of the following nutritional products?',
'Q_SPEND' = 'Q_SPEND: In the past month, how much have you spent on vitamins, minerals, and health supplements?',
'Q_PREVENTION' = 'Q_PREVENTION: Which of the following health conditions, if any, are you concerned about preventing in the future?',
'Q_STATEMENTS' = 'Q_STATEMENTS: For each statement, please indicate your agreement on a scale of 1-7, where 1 means "disagree completely" and 7 means "agree completely".',
'Q_GOALS' = 'Q_GOALS: Which of the following statements best represents your personal wellness goal?',
'Q_MEDIA' = 'Q_MEDIA: Please select how often you usually do the following:',
'Q_AGESCREENER' = 'Q_AGESCREENER: How old are you?'
)
q_condition <- c('Q_CONDITIONr1' = 'Difficulty falling asleep',
'Q_CONDITIONr2' = 'Fatigue/ lack of energy',
'Q_CONDITIONr3' = 'High blood pressure/ Hypertension',
'Q_CONDITIONr4' = 'Insomnia/ Difficulty staying asleep',
'Q_CONDITIONr5' = 'Joint problems/ stiffness',
'Q_CONDITIONr6' = 'Stress/ anxiety',
'Q_CONDITIONr7' = 'Oral/ Dental/ Tooth problems',
'Q_CONDITIONr8' = 'Weight concerns',
'Q_CONDITIONr9' = 'Glaucoma, cataracts, or other vision-related ailments',
'Q_CONDITIONr10' = 'Digestive complications',
'Q_CONDITIONr11' = 'Heart condition',
'Q_CONDITIONr12' = 'Respiratory condition',
'Q_CONDITIONr13' = 'High cholesterol',
'Q_CONDITIONr14' = 'Chronic pain',
'Q_CONDITIONr15' = 'Diabetes',
'Q_CONDITIONr16' = 'Some other condition(s)',
'Q_CONDITIONr98' = 'None of the above')
q_prevention <- c(
'Q_PREVENTIONr1' = 'Diabetes',
'Q_PREVENTIONr2' = 'High blood pressure/ Hypertension',
'Q_PREVENTIONr3' = 'Joint problems/ stiffness',
'Q_PREVENTIONr4' = 'General illness - cold, flu, etc.',
'Q_PREVENTIONr5' = 'Difficulty falling asleep',
'Q_PREVENTIONr6' = 'Fatigue/ lack of energy',
'Q_PREVENTIONr7' = 'Insomnia/ Difficulty staying asleep',
'Q_PREVENTIONr8' = 'Stress/ anxiety',
'Q_PREVENTIONr9' = 'Oral/ Dental/ Tooth problems',
'Q_PREVENTIONr10' = 'Weight concerns',
'Q_PREVENTIONr11' = 'Glaucoma, cataracts, or other vision-related ailments',
'Q_PREVENTIONr12' = 'Digestive complications',
'Q_PREVENTIONr13' = 'Heart condition',
'Q_PREVENTIONr14' = 'Respiratory condition',
'Q_PREVENTIONr15' = 'High cholesterol',
'Q_PREVENTIONr16' = 'Chronic pain',
'Q_PREVENTIONr17' = 'Some other condition(s)',
'Q_PREVENTIONr98' = 'None of the above'
)
q_remedy <- c(
'Q_REMEDYr1' = 'Difficulty falling asleep',
'Q_REMEDYr2' = 'Fatigue/ lack of energy',
'Q_REMEDYr3' = 'High blood pressure/ Hypertension',
'Q_REMEDYr4' = 'Insomnia/ Difficulty staying asleep',
'Q_REMEDYr5' = 'Joint problems/ stiffness',
'Q_REMEDYr6' = 'Stress/ anxiety',
'Q_REMEDYr7' = 'Oral/ Dental/ Tooth problems',
'Q_REMEDYr8' = 'Weight concerns',
'Q_REMEDYr9' = 'Glaucoma, cataracts, or other vision-related ailments',
'Q_REMEDYr10' = 'Digestive complications',
'Q_REMEDYr11' = 'Heart condition',
'Q_REMEDYr12' = 'Respiratory condition',
'Q_REMEDYr13' = 'High cholesterol',
'Q_REMEDYr14' = 'Chronic pain',
'Q_REMEDYr15' = 'Diabetes',
'Q_REMEDYr16' = 'Some other condition(s)',
'Q_REMEDYr98' = 'None of the above'
)
q_statements <- c(
'Q_STATEMENTSr1' = 'Exercise is an essential part of my day',
'Q_STATEMENTSr2' = 'Friends and family often come to me for advice about how to eat well and/or be healthier',
'Q_STATEMENTSr3' = 'Health and wellness are important to me, but it can be overwhelming to try to do everything I should do',
'Q_STATEMENTSr4' = 'I am happy with my current life stage',
'Q_STATEMENTSr5' = 'I am trying to "fight" aging by staying healthy',
'Q_STATEMENTSr6' = 'I consider myself to be living a healthy lifestyle',
'Q_STATEMENTSr7' = "I don't stress over my nutrition or fitness level",
'Q_STATEMENTSr8' = "I eat what I want and don't pay attention to health benefits",
'Q_STATEMENTSr9' = 'I feel depressed about my current health status',
'Q_STATEMENTSr10' = 'I focus on getting the most out of every day',
'Q_STATEMENTSr11' = 'Life is complicated. I need simple solutions for my nutritional and health needs',
'Q_STATEMENTSr12' = 'My stress level has a negative impact on my daily life',
'Q_STATEMENTSr13' = 'Social media impacts what I eat and drink')
q_activity <- c(
'1' = 'Once a day or more',
'2' = '2-3 times a week',
'3' = 'Once a week',
'4' = 'A couple of times a month',
'5' = 'Once a month',
'6' = 'Once every 2-3 months',
'7' = 'Less often',
'8' = 'Never')
q_purchase <- c(
'1' = 'In the past month',
'2' = 'In the past 2-3 months',
'3' = 'In the past 6 months',
'4' = 'In the past year',
'5' = 'Longer ago than that',
'6' = 'Never')
q_frequency <- c(
'1' = 'Once daily',
'2' = 'Several days a week',
'3' = 'Once a week',
'4' = 'Once every two weeks',
'5' = 'Once a month',
'6' = 'Once every 2-3 months',
'7' = 'Less often than that',
'8' = 'Never'
)
q_spend <- c(
'1' = '$0-25',
'2' = '$26-50',
'3' = '$51-75',
'4' = '$76-100',
'5' = '$101-125',
'6' = '$126+',
'7' = "Don't know"
)
q_media <- c(
'1' = 'Once a day or more',
'2' = 'Several days a week',
'3' = 'Once a week',
'4' = 'Once every two weeks',
'5' = 'Once a month',
'6' = 'Once every three months',
'7' = 'Once every six months',
'8' = 'Once a year',
'9' = 'Less often',
'10' = 'Never'
)
q_goals <- c(
'1' = 'I want to maintain my current lifestyle without fear or discomfort.',
'2' = 'I want to maintain my active lifestyle without feeling limited by pain.',
'3' = 'I want to maintain a baseline of nutrition through easy to incorporate solutions.',
'4' = 'I want to find a way to be productive and confident about my life and my health.',
'5' = 'I want to fend off any and all potential health conditions, stay perfectly healthy forever.',
'6' = "I want to be healthy enough that I don't have any bothersome issues.",
'7' = 'I want to be able to continue being carefree about my health for as long as possible.'
)
q_gender <- c(
'1' = 'Male',
'2' = 'Female',
'100' = 'Other'
)
q_parents <- c(
'1' = 'Yes',
'2' = 'No'
)
q_community <- c(
'1' = 'Urban/city centre',
'2' = 'Large population centre',
'3' = 'Medium population centre',
'4' = 'Small population centre',
'5' = 'Rural area'
)
h_province <- c(
'1' = 'Ontario',
'2' = 'Quebec',
'3' = 'Atlantic Canada',
'4' = 'West Canada'
)
q_language <- c(
'1' = 'English',
'2' = 'French'
)
q_activity_title <- c(
'Q_ACTIVITYr1' = 'High intensity cardio activity',
'Q_ACTIVITYr2' = 'Weightlifting or strength training',
'Q_ACTIVITYr3' = 'Low intensity physical activity',
'Q_ACTIVITYr4' = 'Competitive or recreational team sports',
'Q_ACTIVITYr5' = 'Competitive or recreational individual sports'
)
q_purchase_title <- c(
'Q_PURCHASEr1' = 'Multivitamins',
'Q_PURCHASEr2' = 'Letter vitamin supplements',
'Q_PURCHASEr3' = 'Mineral supplements',
'Q_PURCHASEr4' = 'Fish oil and omegas',
'Q_PURCHASEr5' = 'Meal replacements',
'Q_PURCHASEr6' = 'Protein supplement',
'Q_PURCHASEr7' = 'Weight loss supplements',
'Q_PURCHASEr8' = 'Probiotics'
)
q_frequency_title <- c(
'Q_FREQUENCYr1' = 'Multivitamins',
'Q_FREQUENCYr2' = 'Letter vitamin supplements',
'Q_FREQUENCYr3' = 'Mineral supplements',
'Q_FREQUENCYr4' = 'Fish oil and omegas',
'Q_FREQUENCYr5' = 'Meal replacements',
'Q_FREQUENCYr6' = 'Protein supplement',
'Q_FREQUENCYr7' = 'Weight loss supplements',
'Q_FREQUENCYr8' = 'Probiotics'
)
q_media_title <- c(
'Q_MEDIAr1' = 'Watch TV',
'Q_MEDIAr2' = 'Listen to the radio',
'Q_MEDIAr3' = 'Read a printed version of a newspaper',
'Q_MEDIAr4' = 'Read a printed version of a magazine',
'Q_MEDIAr5' = 'Go to the cinema / movie theatre',
'Q_MEDIAr6' = 'Visit social media sites',
'Q_MEDIAr7' = 'Use the internet for watching TV / video content',
'Q_MEDIAr8' = 'Use the internet for listening to music, radio or podcasts',
'Q_MEDIAr9' = 'Use the internet for looking at newspaper content',
'Q_MEDIAr10' = 'Use a smartphone to access the internet',
'Q_MEDIAr11' = 'Play video games',
'Q_MEDIAr12' = 'Pass by large posters on the roadside or other large out of home advertising',
'Q_MEDIAr13' = 'Pass by small posters on the street, at bus stops, in shopping malls, etc.',
'Q_MEDIAr14' = 'Pass by advertising on or around public transportation',
'Q_MEDIAr15' = 'Go to the doctor or a walk-in clinic',
'Q_MEDIAr16' = 'Talk to a pharmacist',
'Q_MEDIAr17' = 'Use a tablet to access the internet',
'Q_MEDIAr18' = 'Use drugstore loyalty programs'
)
h_age <- c(
'1' = '18-24',
'2' = '25-34',
'3' = '35-44',
'4' = '45-54',
'5' = '55-70'
)
q_province <- c(
'1'='Ontario',
'2'='Quebec',
'3'='British Columbia',
'4'='Alberta',
'5'='Manitoba',
'6'='Saskatchewan',
'7'='Nova Scotia',
'8'='New Brunswick',
'9'='Newfoundland and Labrador',
'10'='Prince Edward Island',
'11'='Northwest Territories',
'12'='Yukon',
'13'='Nunavut',
'14'='I do not live in Canada'
)
|
83e806a81918dede404bbf817bf02cf4b791d514
|
4dda5c9494580ea7e20a1465f9016364a9a7cdda
|
/calculos/script_wsdi_todo.R
|
38ba93bbea8c4d6eded95710fe04ebcccca74607
|
[] |
no_license
|
MOfelia/PaperHeatwaves
|
20af18280c5b24aef4c12f4b046670d00c9caf5c
|
788dfdb8cc52ba51036c0c1b30bd23176eaec7b1
|
refs/heads/master
| 2022-03-31T20:00:08.217428
| 2020-01-18T20:16:41
| 2020-01-18T20:16:41
| 110,706,492
| 0
| 0
| null | 2019-04-03T07:26:59
| 2017-11-14T15:14:44
|
TeX
|
UTF-8
|
R
| false
| false
| 5,790
|
r
|
script_wsdi_todo.R
|
## Sugerencias:
## Los comentarios con 2 signos # (bueno, esto es casi obligatorio).
## En lugar de cambiar el directorio de trabajo cada vez, que además cada uno tendremos uno distinto, se puede poner la ruta en la lectura del fichero. Si los datos están siempre en una carpeta datos, por ejemplo, esa ruta no haría falta cambiarla.
## Dejo estos comentarios pero en el futuro hay que borrarlos para que quede más claro.
##setwd("~/Data")
##setwd("/home/kike/Escritorio/Dropbox/investigacion/tesis/tesismariamolina/olasdecaloreurocordex/calculos/")
## A mi no me sirve la siguiente sentencia, así que la comento.
##setwd("/home/kike/taller/olas/") # directorio con el fichero completo 106x103
################################################
## 1. Cargo librerías
library(ncdf4)
library(climdex.pcic)
library(Matrix) ## necesario para definir la matrix para asignar wsdiavg en cada punto
library(raster)
library(rasterVis)
library(maps)
library(mapdata)
library(maptools)
data('worldEnv') ## para que salgan las líneas de los paísesposteriores al 91
#################################################
## 2. Lectura del fichero .nc y las variables necesarias.
## con la librería ncdf4:
ncfich<-nc_open("tasmax_EUR-44_CCCma-CanESM2_rcp85_r1i1p1_SMHI-RCA4_v1_day_2071-2100.nc")
xtasmax<-ncvar_get(ncfich,"tasmax")
## las siguientes instrucciones no te dan el valor de la latitud y la longitud, sino el valor de las celdas, es decir las x y las y. La latitud y la longitud son variables en el nc y hay que extraerlas como tal. Por eso en la representación que hacías, los ejes no eran correctos.
ncfich$dim$x$vals->xlon
ncfich$dim$y$vals->xlat
## xv1<-xtasmax[,,1] estas dos instrucciones son solo para hacer pruebas para pintar campos 2d
## image(xlon,xlat,xv1)
## 2.a Leo el fichero con raster:
## obtengo un raster con los valores de temperatura:
mync <- stack("/home/automata/maria/tasmax_EUR-44_CCCma-CanESM2_rcp85_r1i1p1_SMHI-RCA4_v1_day_2071-2100.nc", varname='tasmax')
## puedo obtener los valores de lat y lon de la misma manera:
mynclat <- raster("/home/automata/maria/tasmax_EUR-44_CCCma-CanESM2_rcp85_r1i1p1_SMHI-RCA4_v1_day_2071-2100.nc", varname='lat')
mynclon <- raster("/home/automata/maria/tasmax_EUR-44_CCCma-CanESM2_rcp85_r1i1p1_SMHI-RCA4_v1_day_2071-2100.nc", varname='lon')
## Estas sentencias crean una matriz con el numero de celda y el valor de lat y lon.
lat <- rasterToPoints(mynclat)
lon <- rasterToPoints(mynclon)
lonlat <- cbind(plon[,3], plat[,3])
## Si observais el raster mync, veréis que no hay una proyección asignada. Para poder representar en R necesitamos conocer la proyección y asignársela. Ferret debe hacer lo mismo que os pongo aquí a continuación, que es estimarla con los valores de lat y lon y después, según la imagen que mandaste, Kike, proyectarlo a latlon.
## defino la proyección lonlat:
crslonlat <- CRS("+proj=longlat +datum=WGS84")
## defino la proyección LCC aunque la original la desconozco y los valores con los que está definida podrían cambiar un poco:
mycrs <- CRS("+proj=lcc +lat_1=43.f +lat_0=43.f +lon_0=15.f +k=0.684241 +units=m +datum=WGS84 +no_defs")
## creo un objeto espacial, es decir, con una proyección definida con los datos de lat y lon de my nc
splonlat <- SpatialPoints(lonlat,proj4string=crs("+proj=longlat +datum=WGS84"))
## transformo los puntos lonlat a LCC
plonlat <- spTransform(splonlat, CRSobj = mycrs)
## Asigno esta proyección al raster:
projection(mync) <- mycrs
extent(mync) <- extent(plonlat)
## 2.b representación
## Con plot (base de R)##
## utiliza maps para las líneas de costa y países. Para que sea más sencillo, representamos en latlon. Para ello pasamos el raster a latlon.
## el raster tiene tantas capas como pasos de tiempo, por lo que selecciono una única capa para representar:
mync2 <- mync[[1]]
## paso a latlon
mync2 <- projectRaster(mync2, crslonlat)
extent(mync2) <- extent(splonlat)
## represento:
pdf("bc.pdf")
plot(mync2)
maps(add=TRUE)
dev.off()
## con levelplot (y proyeccion latlon)
pdf("bclevel.pdf")
levelplot(mync2) ## añadir el mapa requiere más pasos.
dev.off()
########################################################
tt<-ncvar_get(ncfich,"time")
xdias <- seq(as.Date(tt[1]+30, format = "%d/%m/%Y",origin="01/12/1949"),by = "days", length = length(tt)+7)
xdias365<-xdias[format(xdias,"%m-%d")!="02-29"]
tmax.dates.rcm <- as.character(xdias365)
cal<-"365_day"
tt<-as.PCICt(tmax.dates.rcm,cal)
# Estas instrucciones son necesarias para adaptar el eje de tiempos a lo que pide la funcion climdex
wsdiavg<-Matrix(0,nrow=length(xlon),ncol=length(xlat))
# Para cada celdilla, se va calculando wsdi
for (xx in 1:length(xlon)){
for (yy in 1:length(xlat)){
# xdias<-seq(1,10950)
vv<-xtasmax[xx,yy,1:length(tt)] # Asi se queda con la serie temporal de cada punto
circm<-climdexInput.raw(vv,vv,vv,tt,tt,tt, base.range=c(2071, 2100))
wsdi<-climdex.wsdi(circm)
wsdiavg[xx,yy]<-sum(wsdi)/30.
# De momento, guardamos el promedio a 30 años del valor anual que calcula por defecto climdex.wsdi
}
}
#image(wsdiavg), esto es para representar graficamente
# Estas instrucciones son para generar un nc con el promedio anual
setwd("/home/kike/Escritorio/Dropbox/investigacion/tesis/tesismariamolina/olasdecaloreurocordex/calculos/")
ncdout<-"wsdiavgp1.nc"
londim <- ncdim_def("lon","degrees_east",as.double(xlon))
latdim <- ncdim_def("lat","degrees_north",as.double(xlat))
vname<-"wsdi"
fillValue <- 1e32
wsdidef<-ncvar_def("wsdi","days",list(londim,latdim),fillValue,vname,prec="single")
ncsfin<-nc_create(ncdout,wsdidef,force_v4=T)
ncvar_put(ncsfin,wsdidef,wsdiavg)
ncatt_put(ncsfin,"lon","axis","X") #,verbose=FALSE) #,definemode=FALSE)
ncatt_put(ncsfin,"lat","axis","Y")
nc_close(ncsfin)
|
7c86365d144b57e36ef3efd504f969f23534cda8
|
1c24900c933b48b6448aa3581ed088dd25a96df1
|
/GenIns/HaiData.R
|
f361fce968b47d96745227bc50a69abde92693c5
|
[] |
no_license
|
trinostics/IBNR
|
e8e8f70bd1312efcb6b8e780a797079f336e8850
|
577d5360845950f4e802c21442b796477bed9edd
|
refs/heads/master
| 2021-06-18T17:43:33.120641
| 2019-08-24T11:40:55
| 2019-08-24T11:40:55
| 135,292,278
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,224
|
r
|
HaiData.R
|
library(excelRio)
library(reshape2)
library(mondate)
library(ChainLadder)
setwd("~/GitHub/IBNR/GenIns")
det <- readFromCsv("AggregatedClaims.csv", stringsAsFactors = FALSE)
det$EvaluationDate <- as.Date(det$EvaluationDate)
det$AccidentDate <- as.Date(det$AccidentDate)
det$ay <- year(det$AccidentDate)
det$age <- as.numeric(mondate(det$EvaluationDate) -
mondate.ymd(det$ay - 1))
tri.agg <- acast(det, ay ~ age, value.var = "Paid", fun.aggregate = sum,
fill = as.numeric(NA))
#copyToExcel(tri.det) # same aggregated triangle as Hai's
tri.agg.ata <- ata(tri.agg)
attr(tri.agg.ata, "vwtd")
#attr(ata(GenIns), "vwtd")
tri.det <- acast(det, ClaimNo ~ age, value.var = "Paid", fun.aggregate = sum,
fill = as.numeric(NA))
tri.det.ata <- ata(tri.det)
attr(tri.det.ata, "vwtd")
# the vwtd average link ratios are not the same between detail and aggregate
# because of the NA's in the early age columns in the detail.
# To prove, let's first count the number of those NAs
sum(is.na(tri.det[,1])) # 1338
sum(is.na(tri.det[,1]) & is.na(tri.det[,2])) # 21
sum(is.na(tri.det[,1]) & is.na(tri.det[,2]) & is.na(tri.det[,3])) # 1
sum(is.na(tri.det[,1]) & is.na(tri.det[,2]) & is.na(tri.det[,3])
& is.na(tri.det[,4])) # 0
# Now replace those NA's with zeros and the vwtd avg link ratios will be the same
# as with the agg triangle.
tri.det0 <- tri.det
tri.det0[is.na(tri.det[,1]), 1] <- 0
tri.det0[is.na(tri.det[,1]) & is.na(tri.det[,2]), 2] <- 0
tri.det0[is.na(tri.det[,1]) & is.na(tri.det[,2]) & is.na(tri.det[,3]), 3] <- 0
tri.det0.ata <- ata(tri.det0)
attr(tri.det0.ata, "vwtd") # same as attr(tri.agg.ata, "vwtd")
#
summary(MackChainLadder(tri.agg))$Totals
summary(MackChainLadder(tri.det0))$Totals
# get errors with 0's in the denominator.
# replace them with 1's
tri.det1 <- tri.det
a <- 1000
tri.det1[is.na(tri.det[,1]), 1] <- a
tri.det1[is.na(tri.det[,1]) & is.na(tri.det[,2]), 2] <- a
tri.det1[is.na(tri.det[,1]) & is.na(tri.det[,2]) & is.na(tri.det[,3]), 3] <- a
tri.det1.ata <- ata(tri.det1)
attr(tri.det1.ata, "vwtd") # same as attr(tri.agg.ata, "vwtd") # very close
#
summary(MackChainLadder(tri.agg, alpha = 2))$Totals
summary(MackChainLadder(tri.det1, alpha = 2))$Totals
|
eec446e31db39b6a9919bb40d52348f8937b0a25
|
aa5cab15e2296fd18ec2ebe29b927a821bd60e42
|
/R/grading_script.R
|
1ead607babe470ccdca57550357daa19133e7f58
|
[] |
no_license
|
jdtrat/ds4psych_actions
|
0e3a2d46b4023727752a7b066bfd609eb05562c0
|
a2432245044ac870c10af4192b0de9773a43e51e
|
refs/heads/master
| 2023-03-01T05:24:38.391272
| 2021-02-11T19:20:14
| 2021-02-11T19:20:14
| 337,856,007
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,804
|
r
|
grading_script.R
|
my_repo <- Sys.getenv("MY_GITHUB_REPO")
my_pat <- Sys.getenv("GITHUB_PAT")
# simplegit::gh_collab_invite(path = my_repo, invitee = "smasongarrison", .token = my_pat)
#
# path <- simplegit:::check_path(path = my_repo)
#
# gh::gh("PUT /repos/{owner}/{repo}/collaborators/{username}",
# owner = path[1], repo = path[2], username = "smasongarrison",
# .token = my_pat)
ds4psych_grading <- function(path, token) {
# If Mason is not a collaborator, send her an invite and tag her in an issue
if (!simplegit::gh_collab_check(path = path, collaborator = "smasongarrison", .token = token, messages = FALSE)) {
simplegit::gh_collab_invite(path = path, invitee = "smasongarrison", .token = token)
# simplegit::gh_issue_new(path = path, title = "Please grade me!", body = "@smasongarrison, I've finished my lab. Please grade me!", .token = token)
} else {
# if Mason is a collaborator, check to see whether there are any issues that mention her.
# If so, assign her to all of them and remind her! If not, she must have closed them (:
if (simplegit::gh_issue_mention(path, collaborator = "smasongarrison", .token = token)) {
# Assign Mason to those -- nothing happens if we reassign
which_issues <- as.numeric(simplegit::gh_issue_list(path, mentioned = "smasongarrison", .token = token)$number)
lapply(which_issues, simplegit::gh_issue_assign, path = path, assignees = "smasongarrison", .token = token)
# Add a comment pinging Mason to remind her!
lapply(which_issues, simplegit::gh_issue_comment, path = path,
body = "@smasongarrison, this is a reminder to please grade me. I will reappear every 48 hours until you close this issue.",
.token = token)
}
}
}
ds4psych_grading(path = my_repo, token = my_pat)
|
b8a68edf053f0191b3de597ff8c281686a2915c7
|
8d72f133d5c9557c70e4f6ac99ef510c7e08ef71
|
/Scripts/FNoDiv.R
|
558bd2590467aa9afce5ec501743df109e16802c
|
[] |
no_license
|
jesusNPL/FurnariidesDiversification
|
841b73c9845ae1b4e204924d784ea786aaeb3701
|
4813927e30be7d0165baeef2cac67fb12c2e84ab
|
refs/heads/master
| 2020-03-18T22:04:00.949144
| 2019-04-26T16:14:57
| 2019-04-26T16:14:57
| 135,322,373
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,217
|
r
|
FNoDiv.R
|
library(nodiv)
library(picante)
library(geiger)
library(raster)
library(sp)
library(letsR)
library(maptools)
library(rgdal)
library(rgeos)
##### Prepare data #####
fcomm <- read.csv("Table_S2.csv", row.names = 1)
dim(fcomm)
head(fcomm[, 1:2])
ftree <- read.nexus("furna_divTime_WO_MCC_newNames.nex")
forest <- readOGR(file.choose(), "Fores_habitats")
open <- readOGR(file.choose(), "Open_habitats")
habitats <- rbind(forest, open)
plot(habitats)
matched2 <- match.phylo.comm(phy = ftree, comm = fcomm[, 6:589])
dim(matched2$comm)
fdata <- nodiv_data(phylo = matched2$phy, commatrix = matched2$comm, coords = fcomm[, 1:2],
type = "points", shape = habitats)
frd <- Node_analysis(fdata, 10000, "rdtable")
summary(frd)
plot(frd)
par(mfrow = c(2, 2))
plotSOS(frd, 261, match.ID = FALSE)
plotSOS(frd, 266, match.ID = FALSE)
plotSOS(frd, 269, match.ID = FALSE)
plotSOS(frd, 433, match.ID = FALSE)
fqsw <- Node_analysis(fdata, 10000, "quasiswap")
summary(fqsw)
plot(fqsw)
par(mfrow = c(2, 3))
plotSOS(fqsw, 261)
plotSOS(fqsw, 266)
plotSOS(fqsw, 267)
plotSOS(fqsw, 269)
plotSOS(fqsw, 327)
plotSOS(fqsw, 373)
save.image("FurnariideNODIV.RData")
fqsw[[2]]
pAbund <- fqsw[[8]]
fnodes <- c(261, 266, 267, 269, 327, 373)
fqswNodes <- data.frame(fqsw[[12]])
fqswGND <- fqsw[[13]]
fqswGND
hist(na.omit(fqswGND))
abline(v = mean(na.omit(fqswGND)), col = "black", lwd = 3)
abline(v = 0.7, col = "red", lwd = 3)
fSOS261 <- fqswNodes$X261
fSOS266 <- fqswNodes$X266
fSOS267 <- fqswNodes$X267
fSOS269 <- fqswNodes$X269
fSOS327 <- fqswNodes$X327
fSOS373 <- fqswNodes$X373
fSOSnodes <- cbind(fcomm[, 1:2], fSOS261, fSOS266, fSOS267, fSOS269, fSOS327, fSOS373,
fcomm$Country, fcomm$Habitat, fcomm$Richness)
ForestSOS <- fSOSnodes[which(fSOSnodes$`fcomm$Habitat` == "Forest"), ]
OpenSOS <- fSOSnodes[which(fSOSnodes$`fcomm$Habitat` == "Open"), ]
hist(fSOSnodes$fSOS261)
hist(ForestSOS$fSOS261, add = T, col = "green")
hist(OpenSOS$fSOS261, add = T, col = "yellow")
require(scales)
pdf(file = "HistNodes.pdf", width = 10, height = 10)
par(mfrow = c(2, 3))
#par(mar = c(2, 2, 2, 4), xpd = T)
par(oma = c(5, 1, 3, 2))
par(mar = c(5, 5, 4, 2))
#par(cex = 1)
par(cex.axis = 2)
par(cex.lab = 2)
par(cex.main = 2)
# Node 261
hist(fSOSnodes$fSOS261, col = scales::alpha("black", 0.2), main = "Node 261", xlab = NA, xlim = c(-5, 5.5))
hist(ForestSOS$fSOS261, add = T, col = scales::alpha("palegreen4", 0.7))
hist(OpenSOS$fSOS261, add = T, col = scales::alpha("khaki", 0.7))
# Node 266
hist(fSOSnodes$fSOS266, col = scales::alpha("black", 0.2), main = "Node 266", xlab = NA, xlim = c(-5, 5.5))
hist(ForestSOS$fSOS266, add = T, col = scales::alpha("palegreen4", 0.7))
hist(OpenSOS$fSOS266, add = T, col = scales::alpha("khaki", 0.7))
# Node 267
hist(fSOSnodes$fSOS267, col = scales::alpha("black", 0.2), main = "Node 267", xlab = NA, xlim = c(-6, 4))
hist(ForestSOS$fSOS267, add = T, col = scales::alpha("palegreen4", 0.7))
hist(OpenSOS$fSOS267, add = T, col = scales::alpha("khaki", 0.7))
# Node 269
hist(fSOSnodes$fSOS269, col = scales::alpha("black", 0.2), main = "Node 269", xlab = "SOS values", xlim = c(-7.5, 4))
hist(ForestSOS$fSOS269, add = T, col = scales::alpha("palegreen4", 0.7))
hist(OpenSOS$fSOS269, add = T, col = scales::alpha("khaki", 0.7))
# Node 327
hist(fSOSnodes$fSOS327, col = scales::alpha("black", 0.2), main = "Node 327", xlab = "SOS values", xlim = c(-3.5, 3.5))
hist(ForestSOS$fSOS327, add = T, col = scales::alpha("palegreen4", 0.7))
hist(OpenSOS$fSOS327, add = T, col = scales::alpha("khaki", 0.7))
# Node 373
hist(fSOSnodes$fSOS373, col = scales::alpha("black", 0.2), main = "Node 373", xlab = "SOS values", xlim = c(-2.5, 3.5))
hist(ForestSOS$fSOS373, add = T, col = scales::alpha("palegreen4", 0.7))
hist(OpenSOS$fSOS373, add = T, col = scales::alpha("khaki", 0.7))
dev.off()
pdf(file = "HistNodesFinal.pdf", width = 10, height = 10)
par(mfrow = c(2, 2))
# Phylogeny
plot(fqsw, lwd = 1, col = mypalette)
# Node 261
hist(fSOSnodes$fSOS261, col = scales::alpha("black", 0.2), main = "Node 261", xlab = "SOS values", xlim = c(-5, 6))
hist(ForestSOS$fSOS261, add = T, col = scales::alpha("palegreen4", 0.7))
hist(OpenSOS$fSOS261, add = T, col = scales::alpha("khaki", 0.7))
# Node 266
hist(fSOSnodes$fSOS266, col = scales::alpha("black", 0.2), main = "Node 266", xlab = "SOS values", xlim = c(-5, 6))
hist(ForestSOS$fSOS266, add = T, col = scales::alpha("palegreen4", 0.7))
hist(OpenSOS$fSOS266, add = T, col = scales::alpha("khaki", 0.7))
# Node 269
hist(fSOSnodes$fSOS269, col = scales::alpha("black", 0.2), main = "Node 269", xlab = "SOS values", xlim = c(-8, 4))
hist(ForestSOS$fSOS269, add = T, col = scales::alpha("palegreen4", 0.7))
hist(OpenSOS$fSOS269, add = T, col = scales::alpha("khaki", 0.7))
legend("topleft", c("All", "Forest", "Open"), fill = c("gray", "palegreen4", "khaki"), box.lty = 0, bg = NA)
dev.off()
#### Prepare multi map of nodesig #####
library(tmap)
tm_shape(forest) + tm_fill(col = "palegreen4", legend.show = F, alpha = 0.9) +
tm_shape(open) + tm_fill(col = "khaki", legend.show = F, alpha = 0.7) +
tm_shape(sosPoints) + tm_bubbles(col = "Node261", palette = "Reds", style = "quantile",
legend.size.show = FALSE, size = 0.15) +
tm_layout(legend.position = c("right", "bottom"), legend.text.size = 0.7, legend.title.size = 1, frame = FALSE)
pdf(file = "Phylo_Maps_NodesFinal.pdf", width = 10, height = 10)
par(mfrow = c(2, 2))
# Phylogeny
plot(fqsw)
# Node 261
t1 <- tm_shape(forest) + tm_fill(col = "palegreen4", legend.show = F, alpha = 0.9) +
tm_shape(open) + tm_fill(col = "khaki", legend.show = F, alpha = 0.7) +
tm_shape(sosPoints) + tm_bubbles(col = "Node261", palette = "Reds", style = "quantile",
legend.size.show = FALSE, size = 0.15) +
tm_layout(legend.position = c("right", "bottom"), legend.text.size = 0.7, legend.title.size = 1, frame = FALSE)
# Node 266
t2 <- tm_shape(forest) + tm_fill(col = "palegreen4", legend.show = F, alpha = 0.9) +
tm_shape(open) + tm_fill(col = "khaki", legend.show = F, alpha = 0.7) +
tm_shape(sosPoints) + tm_bubbles(col = "Node266", palette = "Reds", style = "quantile",
legend.size.show = FALSE, size = 0.15) +
tm_layout(legend.position = c("right", "bottom"), legend.text.size = 0.7, legend.title.size = 1, frame = FALSE)
# Node 269
t3 <- tm_shape(forest) + tm_fill(col = "palegreen4", legend.show = F, alpha = 0.9) +
tm_shape(open) + tm_fill(col = "khaki", legend.show = F, alpha = 0.7) +
tm_shape(sosPoints) + tm_bubbles(col = "Node269", palette = "Reds", style = "quantile",
legend.size.show = FALSE, size = 0.15) +
tm_layout(legend.position = c("right", "bottom"), legend.text.size = 0.7, legend.title.size = 1, frame = FALSE)
dev.off()
library(RColorBrewer)
colfunc <- colorRampPalette(c("white", "red4"))
mypalette <- brewer.pal(7, "Reds")
pdf(file = "Phylo_NodesFinal.pdf", width = 10, height = 8)
plot(fqsw, lwd = 1, col = mypalette)
#plot(fqsw, lwd = 1, col = colfunc(50))
dev.off()
pdf(file = "Phylo_Maps_NodesFinal.pdf", width = 15, height = 10)
tmap_arrange(t1, t2, t3)
dev.off()
|
0d92f0f51af2a655bc65dbb2ea072bec73d90a88
|
31006ee88ed15cd7d6647ef49433b44898d7141a
|
/extra/package/example/man/hello.Rd
|
7d952d58e0a1836e22b74d5075b2cfa907151ad2
|
[] |
no_license
|
wfjvdham/Rcourse
|
c463c283897e73aa63a86be37ec330b8941f015f
|
e8aeab0b64843669135a82a0e4cbae443fac9d4b
|
refs/heads/master
| 2021-06-02T00:17:51.705609
| 2021-02-27T19:58:59
| 2021-02-27T19:58:59
| 102,542,422
| 5
| 29
| null | 2018-02-23T07:55:47
| 2017-09-06T00:16:31
|
HTML
|
UTF-8
|
R
| false
| true
| 384
|
rd
|
hello.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hello.R
\name{hello}
\alias{hello}
\title{function for printing hello}
\usage{
hello(word = "world")
}
\arguments{
\item{word}{that is printed after the hello}
}
\value{
string message
}
\description{
prints the hello string combined with the user input to the console
}
\examples{
hello()
hello("wim")
}
|
50c033584b62b623d3cd180edad9dcf21e8d30b1
|
defd78f89939c108dab7515871412036ff8c575a
|
/plot3.R
|
cfbbf2e3360618be4c0c8f556f51c85eb9384319
|
[] |
no_license
|
Clymsw/ExData_Plotting1
|
dde9f2599b275fbaf94d9f3c93daf7d68b704bb8
|
a128d603b75d0610e73ad1eb41c0abfe44a10ac1
|
refs/heads/master
| 2021-01-15T09:57:46.275244
| 2015-10-10T22:52:39
| 2015-10-10T22:52:39
| 44,031,034
| 0
| 0
| null | 2015-10-10T22:41:30
| 2015-10-10T22:41:30
| null |
UTF-8
|
R
| false
| false
| 611
|
r
|
plot3.R
|
# Load data
source("loadData.R")
# Plot 3
png("plot3.png")
plot(timeDataSetToUse, dataSetToUse$Sub_metering_1,
type = "l",
ylab = "Energy sub metering",
xlab = "",
main = ""
)
points(timeDataSetToUse, dataSetToUse$Sub_metering_2,
type = "l",
col = "Red"
)
points(timeDataSetToUse, dataSetToUse$Sub_metering_3,
type = "l",
col = "Blue"
)
#Add legend
legend( as.POSIXct("2007-02-02 07:15:00"), 39.5,
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=c(1,1,1),
lwd=c(2.5,2.5,2.5),
col=c("black","red","blue")
)
dev.off()
|
98ed54f1cd68338c07401483ed5d944fe99655b4
|
151277f6eb0b0d67003360f9469e53b7ac21672c
|
/data/SIR/example-plots.R
|
62a1a2297c5b394e2e159cec4315b259db9fa108
|
[] |
no_license
|
NLMichaud/seqInfSIRD
|
930c0f10732fed9e971a443aaa42dbd892d79352
|
e85885363e0ed88614ed271c120794a2386d0d86
|
refs/heads/master
| 2021-01-20T08:25:15.961031
| 2013-09-16T18:21:49
| 2013-09-16T18:21:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,536
|
r
|
example-plots.R
|
# Creates plots to see how the methods compare to the data on a single run
load("sims.RData")
n.sims = length(sims)
source("settings.R")
ll = which(probs==.025); ll=1 # current the former doesn't match
ul = which(probs==.975)
load("PLq.RData")
#load("LW90q.RData"); lw90q = lwq
load("LW95q.RData"); lw95q = lwq
load("LW99q.RData"); lw99q = lwq
#methods=c("Truth","PL","LW90","LW95","LW99")
methods=c("Truth","PL","LW95","LW99")
pdf("example-plots.pdf")
for (i in 1:n.sims)
{
sim = sims[[i]]
plq0 = plq[[i]]
# lw90q0 = lw90q[[i]]
lw95q0 = lw95q[[i]]
lw99q0 = lw99q[[i]]
par(mfrow=c(3,3))
# States
for (j in 1:sys$s)
{
plot(0:n,sim$X[,j], type="l", ylim=c(0,N), xlim=c(0,n), main=sys$states[j])
lines(0:n, plq0 $X.quantiles[j,ll,], col=2)
lines(0:n, plq0 $X.quantiles[j,ul,], col=2)
# lines(0:n, lw90q0$X.quantiles[j,ll,], col=3)
# lines(0:n, lw90q0$X.quantiles[j,ul,], col=3)
lines(0:n, lw95q0$X.quantiles[j,ll,], col=4)
lines(0:n, lw95q0$X.quantiles[j,ul,], col=4)
lines(0:n, lw99q0$X.quantiles[j,ll,], col=5)
lines(0:n, lw99q0$X.quantiles[j,ul,], col=5)
if (j==2) legend("topright", methods, col=c(1,2,4,5), lty=1)
}
# Probabilities
for (j in 1:sys$r)
{
plot(0,0, type="n", ylim=range(plq0$p.quantiles[j,,]), xlim=c(0,n), main=paste("p:",sys$rxns[j]))
abline(h=sim$probs[j])
lines(0:n, plq0 $p.quantiles[j,ll,], col=2)
lines(0:n, plq0 $p.quantiles[j,ul,], col=2)
# lines(0:n, lw90q0$p.quantiles[j,ll,], col=3)
# lines(0:n, lw90q0$p.quantiles[j,ul,], col=3)
lines(0:n, lw95q0$p.quantiles[j,ll,], col=4)
lines(0:n, lw95q0$p.quantiles[j,ul,], col=4)
lines(0:n, lw99q0$p.quantiles[j,ll,], col=5)
lines(0:n, lw99q0$p.quantiles[j,ul,], col=5)
#legend("topright", methods, col=1:5, lty=1)
}
plot(0,0,type="n", axes=F, xlab="", ylab="")
# Rates
for (j in 1:sys$r)
{
plot(0,0, type="n", ylim=range(plq0$r.quantiles[j,,]), xlim=c(0,n), main=paste("r:",sys$rxns[j]))
abline(h=sim$rates[j])
lines(0:n, plq0 $r.quantiles[j,ll,], col=2)
lines(0:n, plq0 $r.quantiles[j,ul,], col=2)
# lines(0:n, lw90q0$r.quantiles[j,ll,], col=3)
# lines(0:n, lw90q0$r.quantiles[j,ul,], col=3)
lines(0:n, lw95q0$r.quantiles[j,ll,], col=4)
lines(0:n, lw95q0$r.quantiles[j,ul,], col=4)
lines(0:n, lw99q0$r.quantiles[j,ll,], col=5)
lines(0:n, lw99q0$r.quantiles[j,ul,], col=5)
#legend("topright", methods, col=1:5, lty=1)
}
if (interactive()) readline("<enter>")
}
dev.off()
|
9bffb44d5b917bd8a7dbd3ce6da62dd7b6eacd67
|
ae2e4402aade4cdd9a7346f59e16246d0bb150b3
|
/05_Build_Cares_Act_Eviction_Summary.R
|
69ae407efb87e958912365fdd1fce740a2323069
|
[] |
no_license
|
Karnaadam/evictionlab
|
6f474cd18a42798217b81e0b9462fa3ec5f4521b
|
5855ce50f05d92eeb3a3eb4ff29dc6f15db43851
|
refs/heads/master
| 2022-11-26T23:16:57.356253
| 2020-08-03T12:07:51
| 2020-08-03T12:07:51
| 272,962,470
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,569
|
r
|
05_Build_Cares_Act_Eviction_Summary.R
|
##############################################
#Program to group coverage of general eviction orders by source and state.
# Author: KA
setwd("~/Desktop/R/EvictionLab")
rm(list = ls())
source("header.R")
##############################################
#Load in cleaned data
load("master_raw_cleaned.RData")
#Limit to only eviction moratorium orders.
master_raw_cleaned <- master_raw_cleaned[master_raw_cleaned$Cares_Act == "Y",]
master_raw_cleaned <- master_raw_cleaned[!is.na(master_raw_cleaned$STATE),]
master_raw_cleaned$bp_score <- master_raw_cleaned %>% select(starts_with("pt_")) %>% rowSums()
#Combine State Names and Source of Action to create a unique identifier for grouping the same kinds of orders in each state.
master_raw_cleaned$lag_ref <- paste(master_raw_cleaned$state_mod, master_raw_cleaned$Source_of_Action, sep = "")
master_raw_cleaned$group_id <- master_raw_cleaned %>% group_indices(lag_ref)
#Sort data
master_clean_temp1 <- sqldf("Select *
from master_raw_cleaned
order by state_mod, group_id")
#Drop variables not needed for this analysis.
#master_clean_temp1 <- master_clean_temp1[ -c(1,15:49) ]
#Convert dataframe to datatable to be able to use datatable specific functions
master_clean_temp2 <- data.table(master_clean_temp1)
master_clean_temp2 <- master_clean_temp2[order(group_id, -date_eff_fmt),]
#We seek to determine what the cumulative period of coverage was for orders from a particular source (without regard to
#the order's strength of protection). To do this, we will first need to come up with precise start and stop dates for each order.
#In instances where an order is replaced by a more recent order, even though it was initially meant to carry through for a longer
#period of time, we will want to record the old order's date as expiring on the day of the new order. This way, we will be able
#to identify gaps in coverage for specific orders too. To do this, we will make use of the fact that a more recent order's start
#date can be treated as an older order's end date (aka the row below it).
#Take the effective date of the row above and set it as a potential expiration date for the current row.
master_clean_temp2[, lag_value:=c(NA, date_eff_fmt[-.N]), by=group_id]
#Reformat variable as date
master_clean_temp2$lag_value_2 <- as.Date(master_clean_temp2$lag_value, origin = "1970-01-01")
#Create ID that can be match rows back correctly.
master_clean_temp2$ID <- seq.int(nrow(master_clean_temp2))
master_clean_temp2$date_mod <- gsub("/", "", master_clean_temp2$Expiration_Date)
#For effective dates with actual date values, test if they run past current date.
subset_w_dates <- master_clean_temp2[!is.na(master_clean_temp2$date_ex_fmt),]
currentdate <- Sys.Date()
#If they do, set the new expiration date as today since we can't know whether the orders will actually carry through beyond today.
subset_w_dates$thru_today <- subset_w_dates$date_ex_fmt > currentdate
subset_w_dates$new_ex_date[subset_w_dates$thru_today == TRUE] <- currentdate
#If they don't, set keep the expiration date the same (aka set new_ex_date = old_ex_date).
subset_w_dates$new_ex_date[subset_w_dates$thru_today == FALSE] <- subset_w_dates$date_ex_fmt[subset_w_dates$thru_today == FALSE]
subset_w_dates$new_ex_date <- as.Date(subset_w_dates$new_ex_date, origin = "1970-01-01")
#For effective dates with values other than just a date, follow methodology rules.
subset_amb_dates <- master_clean_temp2[is.na(master_clean_temp2$date_ex_fmt),]
#Drop rows where the expired_replaced information is missing.
subset_amb_dates2 <- subset_amb_dates[!is.na(subset_amb_dates$Expired_Replaced),]
#When an order is listed as expiring in part or full, assume the order expired on the day a newer order took effect.
subset_amb_dates2$new_ex_date[subset_amb_dates2$Expired_Replaced == "Y"] <- subset_amb_dates2$lag_value_2[subset_amb_dates2$Expired_Replaced == "Y"]
subset_amb_dates2$new_ex_date[subset_amb_dates2$Expired_Replaced == "Partial"] <- subset_amb_dates2$lag_value_2[subset_amb_dates2$Expired_Replaced == "Partial"]
#When an order is listed as not yet expired, set the expired date as today's date.
subset_amb_dates2$new_ex_date[subset_amb_dates2$Expired_Replaced == "N"] <- currentdate
subset_amb_dates2$new_ex_date <- as.Date(subset_amb_dates2$new_ex_date, origin = "1970-01-01")
#Merge back on subset datasets to master to have a complete list of new_ex_date.
#First merge the two subsets together
subs_joined <- rbind(subset_w_dates, subset_amb_dates2, fill = TRUE)
#Then merge the combined subsets to the master data.
master_clean_temp3 <- sqldf("Select A.*, B.new_ex_date
from master_clean_temp2 as A
left join subs_joined as B
on A.ID = B.ID")
master_clean_temp4 <- master_clean_temp3[!is.na(master_clean_temp3$new_ex_date),]
#In instances where there was a newer order found, set the expiration date as the earlier date between the start of the newer
#order or the listed expiration date.
master_clean_temp4$ex_min[!is.na(master_clean_temp4$lag_value_2)] <- pmin(master_clean_temp4$new_ex_date[!is.na(master_clean_temp4$lag_value_2)], master_clean_temp4$lag_value_2[!is.na(master_clean_temp4$lag_value_2)])
#If no newer order is found, set the expiration date to the listed expiration date.
master_clean_temp4$ex_min[is.na(master_clean_temp4$lag_value_2)] <- master_clean_temp4$new_ex_date[is.na(master_clean_temp4$lag_value_2)]
master_clean_temp4$ex_min <- as.Date(master_clean_temp4$ex_min, origin = "1970-01-01")
#Calculate days between the effective date and expiration date for each order.
master_clean_temp4$days_btwn <- master_clean_temp4$ex_min - master_clean_temp4$date_eff_fmt
#Save dataset to analyze other moratorium-realted questions.
state_mora_cleaned <- master_clean_temp4
save(state_mora_cleaned, file = "state_mora_cleaned.RData")
#Continue with general moratorium summary.
master_sub <- sqldf("Select state_mod,
Source_of_Action,
Name_of_Source,
group_id,
date_eff_fmt,
Expired_Replaced,
new_ex_date,
days_btwn as days
from master_clean_temp4
order by group_id, state_mod, date_eff_fmt")
state_sum1 <- sqldf("Select state_mod,
Source_of_Action,
Name_of_Source,
Type_of_Action,
group_id,
count(Source_of_Action) as counts,
min(date_eff_fmt) as day_first_action,
max(new_ex_date) as action_thru_date,
sum(days_btwn) as total_days_of_action
from master_clean_temp4
group by group_id
order by group_id, state_mod, Source_of_Action")
state_sum1$day_first_action <- as.Date(state_sum1$day_first_action, origin = "1970-01-01")
state_sum1$action_thru_date <- as.Date(state_sum1$action_thru_date, origin = "1970-01-01")
state_sum1$action_range <- as.numeric(state_sum1$action_thru_date - state_sum1$day_first_action)
state_sum1$test <- as.numeric(state_sum1$total_days_of_action - state_sum1$action_range)
state_sum2 <- sqldf("Select state_mod,
max(action_range) as range_max,
max(counts) as counts_max,
max(total_days_of_action) as consec_days_max
from state_sum1
group by state_mod
order by state_mod")
state_sum3 <- sqldf("Select A.*,
B.range_max,
B.counts_max,
B.consec_days_max
from state_sum1 as A
left join state_sum2 as B
on A.state_mod = B.state_mod")
state_sum3$source_filter_range[state_sum3$action_range == state_sum3$range_max] <- 1
state_sum3$source_filter_counts[state_sum3$counts == state_sum3$counts_max] <- 1
state_sum3$source_filter_consec_days[state_sum3$total_days_of_action == state_sum3$consec_days_max] <- 1
state_sum_Cares_Act <- state_sum3
save(state_sum_Cares_Act, file = "state_sum_Cares_Act.RData")
|
5718d2c3bab85a0e99dbf003df9e001618154dfe
|
d080cf5725411e18d3aebdece53019c313a33fe3
|
/plots/seasonality_plot.R
|
fe7840ed1c87794bfb5b052dabdd8a2cca7f7179
|
[
"MIT"
] |
permissive
|
MacroFinanceHub/commodity-realized-volatility-forecasting
|
f625317c15c395c39239977ebb072b6ba4c9fabe
|
f8236269245eb1c56d108f725caa7f60393d72fa
|
refs/heads/main
| 2023-04-26T03:43:39.098595
| 2021-06-07T17:07:28
| 2021-06-07T17:07:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,123
|
r
|
seasonality_plot.R
|
SeasonalPlot <- function(df = df.min288
, ticker = "C"
, ticker.name = "Corn"){
library(reshape2)
library(ggplot2)
n.df <- dim(df)[1]
p.df <- dim(df)[2]
rv <- 0.5 * cleaner.vec(log(as.numeric(df[, paste0("rvar.288.", ticker)])))
my.time <- as.POSIXct(df$fin.date, origin = "1970-01-01")
#aggregate by month
time.month <- as.numeric(format(my.time, format = "%m"))
df.time <- data.frame(cbind(time.month, rv))
agg.month.rv <- aggregate(df.time[, 2], list(df.time$time.month), mean, na.rm = T)
ci.agg.month.rv <- aggregate(df.time[, 2], list(df.time$time.month), FUN = ConfInterval, ci.level = 0.1)
plot.df <- data.frame(cbind(month.abb, agg.month.rv$x, agg.month.rv$x + ci.agg.month.rv$x, agg.month.rv$x - ci.agg.month.rv$x))
plot.df[, 2:4] <- apply(plot.df[, 2:4], 2, as.double)# * 100
names(plot.df) <- c("my.month.abb", "mean", "upr", "lwr")
print(plot.df)
dev.new()
p1 <- ggplot(data = plot.df, aes(x = factor(my.month.abb, levels = month.abb), y = mean, colour = "monthly rv mean", group = 1))
p1 <- p1 + geom_line(data = plot.df, size = 1)
p1 <- p1 + geom_ribbon(data = plot.df, aes(ymin = upr, ymax = lwr), alpha = 0.2, colour = NA)
p1 <- p1 + theme(axis.title.y = element_blank())
p1 <- p1 + theme(axis.title.x = element_blank())
p1 <- p1 + theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))
p1 <- p1 + theme(legend.position = "none")+ theme(legend.position = "none")
p1 <- p1 + ggtitle(ticker.name)
p1 <- p1 + theme(plot.title = element_text(hjust = 0.5))
p1 <- p1 + scale_y_continuous(labels = ScaleFunc)
p1 <- p1 + xlab("Month") + ylab("Average RV")
return(p1)
}
StdErr <- function(x){
x <- na.omit(x)
sd(x) / sqrt(length(x))
}
ConfInterval <- function(x, ci.level = c(0.1, 0.05, 0.01)){
ci.level <- 1 - ci.level / 2
StdErr(x) * qnorm(ci.level)
}
ScaleFunc <- function(x){
sprintf("%.2f", x)
}
SeasonalPlotJuly <- function(df = df.min288
, ticker = "NG"
, ticker.name = "Natural Gas"){
library(reshape2)
library(ggplot2)
n.df <- dim(df)[1]
p.df <- dim(df)[2]
rv <- 0.5 * cleaner.vec(log(as.numeric(df[, paste0("rvar.288.", ticker)])))
my.time <- as.POSIXct(df$fin.date, origin = "1970-01-01")
#aggregate by month
time.month <- as.numeric(format(my.time, format = "%m"))
df.time <- data.frame(cbind(time.month, rv))
agg.month.rv <- aggregate(df.time[, 2], list(df.time$time.month), mean, na.rm = T)
ci.agg.month.rv <- aggregate(df.time[, 2], list(df.time$time.month), FUN = ConfInterval, ci.level = 0.1)
plot.df <- data.frame(cbind(month.abb, agg.month.rv$x, agg.month.rv$x + ci.agg.month.rv$x, agg.month.rv$x - ci.agg.month.rv$x))
plot.df <- rbind(plot.df[7:12, ], plot.df[1:6, ])
plot.df[, 2:4] <- apply(plot.df[, 2:4], 2, as.double)# * 100
names(plot.df) <- c("my.month.abb", "mean", "upr", "lwr")
print(plot.df)
dev.new()
p1 <- ggplot(data = plot.df, aes(x = factor(c(month.abb[7:12], month.abb[1:6]), levels = c(month.abb[7:12], month.abb[1:6])), y = mean, colour = "monthly rv mean", group = 1))
p1 <- p1 + geom_line(data = plot.df, size = 1)
p1 <- p1 + geom_ribbon(data = plot.df, aes(ymin = upr, ymax = lwr), alpha = 0.2, colour = NA)
p1 <- p1 + theme(axis.title.y = element_blank())
p1 <- p1 + theme(axis.title.x = element_blank())
p1 <- p1 + theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))
p1 <- p1 + theme(legend.position = "none")+ theme(legend.position = "none")
p1 <- p1 + ggtitle(ticker.name)
p1 <- p1 + theme(plot.title = element_text(hjust = 0.5))
p1 <- p1 + scale_y_continuous(labels = ScaleFunc)
p1 <- p1 + xlab("Month") + ylab("Average RV")
return(p1)
}
|
af2776c537e1c229c70c763e7858416da7d83f70
|
56afafa119c6babbb6e551b48a3356fb34fc7bde
|
/Create_Edges.R
|
3e0c7dea51b2ab0fb5efe98a4bf455e629a9f89c
|
[] |
no_license
|
mflucas/Multimodal-RL-preparation
|
8cd7c146e982ddc6eb62340394bbbf0670839621
|
4eb9ac4ec0ead3eaa21fe42fa3e0aaa546faad6e
|
refs/heads/master
| 2020-04-04T10:50:30.751964
| 2018-11-04T13:14:00
| 2018-11-04T13:14:00
| 155,868,431
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,983
|
r
|
Create_Edges.R
|
#setwd("C:/Users/mlucas/polybox/Masterarbeit/Multimodal_Estimation/Data_preparation")
setwd("/Users/lucasmeyer/polybox/Masterarbeit/Multimodal_Estimation/Data_preparation")
getwd()
library(plyr)
library(dplyr)
library(geosphere)
library(stringr)
library(data.table)
library(igraph)
library(rgdal)
library(raster)
library(sp)
library(maptools)
library(ggplot2)
library(scales)
rm(list = ls())
#rawData <- read.table("Pt_edges_large.txt", sep=";", header=TRUE)
rawData2 <- read.table("Pt_edges_large2.txt", sep=";", header=TRUE)
#Reduce GTFS data to study area
#The delimited area for the transit Network has to be larger than the area for the observations
# rawData <- subset(rawData2, FromX<8.8866 & FromX>8.1876 & FromY<47.5265& FromY>47.1533)
# rawData <- subset(rawData, ToX<8.8866 & ToX>8.1876 & ToY<47.5265 & ToY>47.1533)
rawData <- subset(rawData2, FromX<8.6541 & FromX>8.3019 & FromY<47.4940& FromY>47.2960)
rawData <- subset(rawData, ToX<8.6541 & ToX>8.3019 & ToY<47.4940 & ToY>47.2960)
#Delete Duplicated edges
#uniqueRoutes <- rawData[!(duplicated(rawData[c("RouteID","DirectionID","FromStop","ToStop")])),]
#TODO:: route transit pairs in zurich to find transit time in links
#Create the simplified network
#_______________________________________________________________________________________________________________________________________________________________#
averageTT <- ddply(rawData, .(RouteID, DirectionID, FromStop, ToStop, Type),summarize,TravelTime=min(TravelTime),FromX=mean(FromX), FromY=mean(FromY), ToX=mean(ToX), ToY=mean(ToY), Edge=mean(Edge), Headway=mean(Headway))
#Transform the dynamic transit schedule to a static one, that is allow for only one link for each pair of Stops, by direction and RouteID.
summary(averageTT$TravelTime)
length(unique(averageTT$RouteID))
#Add traveltimes for null values of travel time.
averageTT <- as.data.table(averageTT)
averageTT[ , Distance := distGeo(matrix(c(FromX, FromY), ncol = 2),
matrix(c(ToX, ToY), ncol = 2))]
averageTT <- as.data.frame(averageTT)
averageSpeed <- mean(averageTT$Distance[averageTT$TravelTime>0]/ averageTT$TravelTime[averageTT$TravelTime>0])
averageTT = within(averageTT, {TravelTime = ifelse(TravelTime==0, 1.2*Distance/averageSpeed, TravelTime)})
#Write out the network file
#_______________________________________________________________________________________________________________________________________________________________#
#Now give these patterns to the rawData, because the observations from R5 only have a TripID, note RouteID, so that
#this can be used to look up which links were exactly taken.
#_______________________________________________________________________________________________________________________________________________________________#
k <- c(1:nrow(averageTT))
averageTT$EdgeID <- k
patterns <- left_join(rawData, averageTT[,c("FromStop","ToStop","RouteID","DirectionID","EdgeID")], by=c("FromStop","ToStop","RouteID","DirectionID"))
#_______________________________________________________________________________________________________________________________________________________________#
#Now create stop ID's, which will be used for the creation of the transfer links
#ATTENTION: The code below takes a long time to run (30min for around 1600 stops). This is why this code is usually saved to file
#_______________________________________________________________________________________________________________________________________________________________#
stops1 <- averageTT[,c("FromStop","FromX", "FromY")]
names(stops1) <- c("StopID","X","Y")
stops2 <- averageTT[,c("ToStop","ToX", "ToY")]
names(stops2) <- c("StopID","X","Y")
allEntries <- rbind(stops1, stops2)
stops <- allEntries[!(duplicated(allEntries$StopID)),]
allEntries <- NULL
stops1 <- NULL
stops2 <- NULL
#
# transfers <- averageTT
# transfers$TransferDistance <- 0
#
# #Expand the table several times
# transfers <- rbind(transfers,transfers)
# transfers <- rbind(transfers,transfers)
# transfers <- rbind(transfers,transfers)
# transfers <- rbind(transfers,transfers)
# transfers <- rbind(transfers,transfers)
# transfers <- rbind(transfers,transfers)
# transfers <- rbind(transfers,transfers)
# transfers <- rbind(transfers,transfers)
# transfers <- rbind(transfers,transfers)
#
#
# #Create transferLinks
#
# c=1
# a=1
# for(a in 1:nrow(stops)){
#
# aX=stops[a,"X"]
# aY=stops[a,"Y"]
# stopsA=c(aX,aY)
# test <- unlist(lapply(seq_len(nrow(stops)), function(i) distGeo(c(stops[i,"X"],stops[i,"Y"]),stopsA)))
# test <- as.data.frame(test)
#
# b=1
# for(b in 1:nrow(test)){
# if(test[b,1]<400){
# transfers[c, "RouteID"] <- as.character("Transfer")
# transfers[c, "DirectionID"] <- as.character("Transfer")
# transfers[c, "FromStop"] <- stops[a,"StopID"]
# transfers[c, "ToStop"] <- stops[b,"StopID"]
# transfers[c, "TravelTime"] <- 1.2*test[b,1]/1.4+120
# transfers[c, "FromX"] <- aX
# transfers[c, "FromY"] <- aY
# transfers[c, "ToX"] <- stops[b,"X"]
# transfers[c, "ToY"] <- stops[b,"Y"]
# transfers[c, "TransferDistance"] <- test[b,1]
# c=c+1
# }
# }
# }
#
# transfers <- subset(transfers, is.na(RouteID))
# transfers <- subset(transfers, FromStop!=ToStop)
#
#
#
# #Create full network(transit links + transfer links)
# transfers$Edge <- NULL
# averageTT$Edge <- NULL
#
#
# transfers$isTransfer <- as.numeric(1)
# averageTT$isTransfer <- 0
# transfers$RouteID <- "Transfer"
#
#
#
# write.table(transfers, "transferLinks.txt", row.names = FALSE, sep=";", quote=FALSE)
transfers <- read.table("transferLinks.txt", sep=";", header=TRUE)
transfers_same <- transfers
transfers_same <- transfers_same[!(duplicated(transfers_same$FromStop)),]
transfers_same$ToStop <- transfers_same$FromStop
transfers_same$TravelTime <- 2
transfers_same$ToX <- transfers_same$FromX
transfers_same$ToY <- transfers_same$FromY
transfers_same$TransferDistance <- 0
transfers_same$isTransfer <- 1
transfers <- rbind(transfers, transfers_same)
rm(transfers_same)
#Now add the transferLinks to the Pt Links to create the final transit network
averageTT$isTransfer <- 0
transfers$Headway <- 0
a <- c(1:nrow(transfers))
transfers$EdgeID <- a
transfers$EdgeID <- transfers$EdgeID + max(averageTT$EdgeID)
transfers$Type <- 1
pt_network <- averageTT[,c("EdgeID","FromStop", "ToStop", "FromX","FromY","ToX","ToY","TravelTime","Distance","Headway", "Type")]
transfer_network <- transfers[,c("EdgeID","FromStop", "ToStop", "FromX","FromY","ToX","ToY","TravelTime","TransferDistance","Headway", "Type")]
colnames(transfer_network)[9] <- "Distance"
edgeList <- averageTT[,c("EdgeID","FromX","FromY","ToX","ToY")]
edgeListTransfers <- transfers[,c("EdgeID","FromX","FromY","ToX","ToY")]
#Quick plot of the network
#First convert latlon to UTM to maintain true distances in plot
LongLatToUTM<-function(x,y,zone){
xy <- data.frame(ID = 1:length(x), X = x, Y = y)
coordinates(xy) <- c("X", "Y")
proj4string(xy) <- CRS("+proj=longlat +datum=WGS84") ## for example
res <- spTransform(xy, CRS(paste("+proj=utm +zone=",zone," ellps=WGS84",sep='')))
return(as.data.frame(res))
}
fromUTM <- LongLatToUTM(edgeList$FromX, edgeList$FromY, "32T")
toUTM <- LongLatToUTM(edgeList$ToX, edgeList$ToY, "32T")
edgeList$FromX <- fromUTM$X
edgeList$FromY <- fromUTM$Y
edgeList$ToX <- toUTM$X
edgeList$ToY <- toUTM$Y
#Plot the map. Add background in illustrator.
plot(edgeList$FromX, edgeList$FromY, cex=0.3)
segments(edgeList$FromX, edgeList$FromY, edgeList$ToX, edgeList$ToY, lwd=0.7)
segments(edgeListTransfers$FromX, edgeListTransfers$FromY, edgeListTransfers$ToX, edgeListTransfers$ToY, col="red")
plot <- subset(network, Type>1)
plot = within(plot, {mode = ifelse(Type==1, "Transfer",
ifelse(Type==102, "LongDistTrain",
ifelse(Type==103, "InterRegTrain",
ifelse(Type==106,"RegTrain",
ifelse(Type==400 | Type==100, "CommuterTrain",
ifelse(Type==700, "Bus",
ifelse(Type==900, "Tram",
ifelse(Type==1000,"Boat",
ifelse(Type==1300,"Telecabin",
ifelse(Type==1400,"Funicular", "CommunalTaxi"
))))))))))})
ggplot(plot)+ aes(x=mode, group=mode) +
stat_count(aes(y=..count..), binwidth = 1) + theme_bw() + #coord_cartesian(xlim=c(0,20))+
theme(legend.title=element_blank(),legend.position="bottom") +
xlab("Mode") + ylab("Edges") + theme(axis.text.x = element_text(angle = 45, hjust = 1))
rm(plot)
#Now save all non-transfer links to a file to route on R5 and get the real headways
transitNetwork <- averageTT[,c("EdgeID", "FromX", "FromY", "ToX", "ToY","TravelTime")]
write.table(transitNetwork, "network_input_R5.txt", sep=";",row.names = FALSE, quote=FALSE)
#___________________________________________________________________________________________________________________________
#Route the file above in the headwayWriter class in Java. First for morning peak:
# profileRequest.fromTime=27000;7:30am
# profileRequest.toTime = 30600;
#___________________________________________________________________________________________________________________________
#Check headways
headways <- read.table("headways.txt", header= TRUE, sep=";")
headways_midday <- read.table("headways_midday.txt", header= TRUE, sep=";")
test <- subset(headways,TravelTime>0)
ggplot(test)+ aes(x=increase) +
geom_histogram(aes(y=..count../sum(..count..)), binwidth = 0.1) + theme_bw() + coord_cartesian(xlim=c(0,2.2))+
theme(legend.title=element_blank(),legend.position="bottom") +
xlab("% difference between off-peak to peak headways") + ylab("Frequency") + scale_y_continuous(labels=percent)
#Now add headways and updated travel times to the network
pt_network$TravelTime <- NULL
pt_network$Headway <- NULL
pt_network <- left_join(pt_network, headways[,c("EdgeID", "TravelTime", "Headway")],"EdgeID")
pt_network = within(pt_network, {TravelTime = ifelse(TravelTime==0, 60,TravelTime)})#Force travel time to 1min if it is 0
#___________________________________________________________________________________________________________________________
#Create transport network
network <- rbind(pt_network,transfer_network)
#Now scale the variables
network$TravelTime <- network$TravelTime/60 #to get travel time in minutes
network$Headway <- network$Headway/60 #headways also in minutes
network$Distance <- network$Distance/1000 #distance in km
#Now add the type of the transit route
network$isTransfer <- ifelse(network$Type==1,1,0)
network$isLongDistTrain <- ifelse(network$Type==102,1,0)
network$isInterRegTrain <- ifelse(network$Type==103,1,0)
network$isRegTrain <- ifelse(network$Type==106,1,0)
network$isSBahn <- ifelse(network$Type==400 | network$Type==100,1,0) #Almost all 100 types are SBahn, specially in the smaller area within the city
network$isBus <- ifelse(network$Type==700,1,0)
network$isTram <- ifelse(network$Type==900,1,0)
network$isBoat <- ifelse(network$Type==1000,1,0)
network$isTelecabin <- ifelse(network$Type==1300,1,0)
network$isFunicular <- ifelse(network$Type==1400,1,0)
network$isCommunalTaxi <- ifelse(network$Type==1501,1,0)
#Make sure stop ids are unique
network$FromStop <- network$FromStop*1000
network$ToStop <- network$ToStop*1000
colnames(network)[1] <- "LinkID"
write.table(network, "network_pt_zurich_coords.txt", sep=",",row.names = FALSE, quote=FALSE)
#_______________________________________________________________________________________________________________________________________________________________#
#Read in observations
#Create Edge sequence as observations for each transit leg
#_______________________________________________________________________________________________________________________________________________________________#
observations <- read.table("observations_pt_zurich.txt", sep=";", header=TRUE)
observations <- observations[!(observations$ID==-99),]
#Make stop ID from observations match the ID from the network (Java always starts indexes with 0)
observations$FromStop <- observations$FromStop
observations$ToStop <- observations$ToStop
observations$Links <- as.character(0)
i=1
for(i in 1:nrow(observations)){
tripPattern = subset(patterns, TripID==as.character(observations[i,"TripId"]))
j=(observations[i,"FromStop"])
for(j in (observations[i,"FromStop"]):(observations[i,"ToStop"])-1){
if(j==(observations[i,"FromStop"])){
observations[i,"Links"] <- paste0(tripPattern[j,"EdgeID"],sep="")
} else{
observations[i,"Links"] <- paste(observations[i,"Links"] ,tripPattern[j,"EdgeID"],sep=",")
}
}
}
#Now add the transferLinks and convert observations to a single row
#_______________________________________________________________________________________________________________________________________________________________#
o=2
for(o in 2:nrow(observations)){
if(observations[o,"ID"]==observations[(o-1), "ID"]){
first <- str_split(observations[(o-1),"Links"], ",")
second <- str_split(observations[(o),"Links"], ",")
a <- as.numeric(first[[1]][length(first[[1]])])
b <- as.numeric(second[[1]][1])
from <- averageTT$ToStop[averageTT$EdgeID==a]
to <- averageTT$FromStop[averageTT$EdgeID==b]
c <- as.numeric(transfers$EdgeID[transfers$FromStop==from & transfers$ToStop==to])
observations[o,"Links"] <- paste(c,observations[o,"Links"],collapse=",", sep=",")
}
}
#Convert all observations of a single ID to a single row and write them to file
final_observations <- aggregate(Links ~ ID, data = observations, paste, collapse = ",")
final_observations <- final_observations[-(grep("NA", final_observations$Links)),]
#Now add the id's of destination stop at the beggining and end of the observations
x=1
final_observations$lastStop <- 0
for(x in 1:nrow(final_observations)){
last <- str_split(final_observations[x,"Links"], ",")
final_observations[x,"lastStop"] <- network$ToStop[network$LinkID==as.numeric(last[[1]][length(last[[1]])])]
}
final_observations$lastStop2 <- final_observations$lastStop
final_observations <- final_observations[,c("ID", "lastStop","Links","lastStop2")]
final_observations$Links <- gsub(",,", ",", final_observations$Links)
write.table(final_observations, "final_observations_pt_zurich.txt", sep=",",row.names = FALSE, quote=FALSE,col.names = FALSE)
#_______________________________________________________________________________________________________________________________________________________________#
#_______________________________________________________________________________________________________________________________________________________________#
#Now complete data formatting to match the one required by the RL estimator by adding destination links
#First clean workspace
rm(list = ls())
network <- read.table("network_pt_zurich_coords.txt", sep=",", header=TRUE)
data <- "final_observations_pt_zurich.txt"
observations <- read.table(data, header = FALSE, sep = ",", col.names = paste0("V",seq_len(max(count.fields(data, sep = ',')))), fill = TRUE)
#ENSURE CONSISTENCY
#This is to make sure that all of the links in the observations are indeed in the link attributes file and deletes observations which are not.
# modifier <- observations
# modifier[cbind(1:nrow(modifier), max.col(!is.na(modifier), 'last'))] <- NA
# #modifier$V300 <- NA
#
# alll <- function(x) ifelse(all(x[3:length(x)][!is.na(x[3:length(x)])] %in% network$LinkId)==FALSE,x[1],0)
# problems <- apply((modifier), 1, alll)
# problems <- as.data.frame(problems)
# problems <- subset(problems, problems>0)
# observations <- observations[!(observations$V1 %in% problems$problems),]
#
#Create Destination Links
lastValues <- function(x) tail(x[!is.na(x)], 2)
destinations <- apply(observations, 1, lastValues)
destinations <- as.data.frame(destinations)
destinations <- transpose(destinations)
colnames(destinations) <- c("LinkID","ToStop")
destinations$FromStop <- destinations$ToStop
destinations <- destinations[!(duplicated(destinations$ToStop)),]
destinations$temp <- 1:nrow(destinations)
destinations$ToStop <- destinations$temp+max(network[,c("FromStop","ToStop")])
destinations$temp <- NULL
destinations$LinkID <- 1:nrow(destinations)
destinations$LinkID <- destinations$LinkID+max(network$LinkID)
destinations[,c("Distance", "Headway", "TravelTime", "Type", "isTransfer","isLongDistTrain", "isRegTrain", "isSBahn", "isBus", "isTram", "isBoat", "isTelecabin", "isFunicular", "isCommunalTaxi")] <- 0
replace <- destinations[,c("LinkID", "FromStop")]
repNet <- network["LinkID"]
repNet$FromStop <- repNet$LinkID
replace <- rbind(replace, repNet)
observations[] <- replace$LinkID[match(unlist(observations), replace$FromStop)]
observations <- observations[,2:ncol(observations)]
observations[is.na(observations)] <- 0
network <- network[,c("LinkID","FromStop", "ToStop","TravelTime","Distance","Headway", "Type", "isTransfer","isLongDistTrain", "isRegTrain", "isSBahn", "isBus", "isTram", "isBoat", "isTelecabin", "isFunicular", "isCommunalTaxi")]
destinations <- destinations[,c("LinkID","FromStop", "ToStop","TravelTime","Distance","Headway", "Type", "isTransfer","isLongDistTrain", "isRegTrain", "isSBahn", "isBus", "isTram", "isBoat", "isTelecabin", "isFunicular", "isCommunalTaxi")]
network_final <- rbind(network, destinations)
dest <- as.data.frame(nrow(destinations))
write.table(observations, "observations_estimation_pt.txt", sep=",", quote=FALSE, row.names = FALSE, col.names=FALSE)
write.table(network_final, "LinkAttributes_complete.txt", sep=",", quote=FALSE, row.names = FALSE)
write.table(network, "LinkAttributes_estimation.txt", sep=",", quote=FALSE,row.names = FALSE)
write.table(dest, "destinations.txt", sep=",", quote=FALSE, row.names = FALSE, col.names=FALSE)
#_______________________________________________________________________________________________________________________________________________________________#
|
a9e3c52a9fc8fb628942eabb0aff7d969f3e3338
|
117bcd434e6b982d6f63eee04c6ab6c8d6c7c5f8
|
/ChiSqTests/main.R
|
2d03a24b845d7a2480b7fe9d977ee04fcaf93a6f
|
[] |
no_license
|
Rashid-Ahmed/Statistics-with-R
|
26849d6ce322fed487715ad93244a682e9d63494
|
3bc5fbc72badb27423a348b99ebe767a828b0d28
|
refs/heads/main
| 2023-07-29T20:41:14.506544
| 2021-09-24T17:49:28
| 2021-09-24T17:49:28
| 358,590,715
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,949
|
r
|
main.R
|
###############
### Exercise 1: Plotting graphs using ggplot.
###############
# There are many ways of making graphs in R, and each has their own advantages
# and disadvantages. One popular package for making plots is ggplot2.
# The graphs produced with ggplot2 look professional and the code is quite easy
# to manipulate.
# In this exercise, we'll plot a few graphs with ggplot2 to show its functionalities.
# You'll find all the information you'll need about plotting with ggplot2 here:
# http://www.cookbook-r.com/Graphs/
# Also, you have been assigned the ggplot2 course in DataCamp. Please work through
# this course first to easily solve the assignment below
## a) First install and load the ggplot2 package. Look at the help for ggplot.
install.packages("ggplot2")
library(ggplot2)
## b) We're going to be plotting data from the dataframe 'ratings'
## (included in languageR).
## Look at the description of the dataset and the summary.
library(languageR)
str(languageR::ratings)
summary(languageR::ratings)
## For each word, we have three ratings (averaged over subjects), one for the
## weight of the word's referent, one for its size, and one for the words'
## subjective familiarity. Class is a factor specifying whether the word's
## referent is an animal or a plant.
## Furthermore, we have variables specifying various linguistic properties,
## such as word's frequency, its length in letters, the number of synsets
## (synonym sets) in which it is listed in WordNet [Miller, 1990], its
## morphological family size (the number of complex words in which
## the word occurs as a constituent), and its derivational entropy (an
## information theoretic variant of the family size measure).
## Don't worry, you don't have to know what all this means yet in order to
## be able to plot it in this exercise!
## c) Let's look at the relationship between the class of words and the length.
## In order to plot this, we need a dataframe with the means.
## Below you'll find the code to create a new dataframe based on the existing
## dataset ratings.
## Plot a barplot of ratings.2 using ggplot. Map the two classes to two
## different colours.
## Remove the legend.
ratings = languageR::ratings
head(ratings)
summary(ratings)
condition <- c("animal", "plant")
frequency <- c(mean(subset(ratings, Class == "animal")$Frequency), mean(subset(ratings, Class == "plant")$Frequency))
length <- c(mean(subset(ratings, Class == "animal")$Length), mean(subset(ratings, Class == "plant")$Length))
ratings.2 <- data.frame(condition, frequency, length)
ratings.2
ggplot(data=ratings.2, aes(x=condition, y=length, fill=condition)) +
geom_bar(stat="identity") +
theme(legend.position = "none")
## d) Let's assume that we have additional data on the ratings of words.
## This data divides the conditions up into exotic and common animals
## and plants.
## Below you'll find the code to update the dataframe with this additional data.
## Draw a line graph with multiple lines to show the relationship between
## the frequency of the animals and plants and their occurrence.
## Map occurrence to different point shapes and increase the size
## of these point shapes.
condition <- c("animal", "plant")
frequency <- c(7.4328978, 3.5864538)
length <- c(5.15678625, 7.81536584)
ratings.add <- data.frame(condition, frequency, length)
ratings.3 <- rbind(ratings.2, ratings.add)
occurrence <- c("common", "common", "exotic", "exotic")
ratings.3 <- cbind(ratings.3, occurrence)
ratings.3
ggplot(ratings.3, aes(x=condition, y=frequency, color=condition)) +
geom_point(aes(shape=occurrence, size=occurrence)) +
geom_line()
## e) Based on the graph you produced in question d,
## what can you conclude about how frequently
## people talk about plants versus animals,
## with regards to how common they are?
#We can see that people more frequently talk about exotic animals than common animals
#but more frequently talk about about common plants than exotic plants.
#Also they more frequently talk about animals than plants in general
##########
##Exercise 2. Binomial distribution
##########
## Suppose there are 12 multiple choice questions in a quiz.
## Each question has 4 possible answers, and only one of them is correct.
## a) Please calculate the probability of getting exactly 5 answers right
## if you answer by chance. Calculate this using the dbinom() function.
dbinom(5,size = 12, prob = 0.25)
verified = round((.25^5)*(.75^7)*792, 8) == round(dbinom(5,size = 12, prob = 0.25), 8)
print (verified)
## b) Next please calculate the probability of answering 4 or less questions
## correctly by chance.
sum(dbinom(x=0:4,12,0.25))
##########
##Exercise 3. Chi-square test
##########
## a) Consider the dataset dutchSpeakersDistMeta from sheet1.
## Load the package (languageR) and look at the summary of the variables,
## as well as their classes. Which variables are factors?
library(languageR)
summary(languageR::dutchSpeakersDistMeta)
str(languageR::dutchSpeakersDistMeta)
#Speaker, Sex, AgeGroup, ConversationType and EduLevel are factors
## b) We want to find out whether there is a difference between males and females
## with respect to the age groups they are in.
## First use the function 'table()' to get the counts and create
## a contingency table of AgeGroup by Sex.
table(languageR::dutchSpeakersDistMeta$Sex, languageR::dutchSpeakersDistMeta$AgeGroup)
## Visualize your data with a single bar plot (use ggplot) that represents the counts with
## respect to each age group and each sex.
ggplot(languageR::dutchSpeakersDistMeta, aes(x = AgeGroup, fill = Sex)) +
geom_bar(position = position_dodge(), na.rm = TRUE) +
scale_x_discrete(na.translate = FALSE)
## c) Inspect the table you created in b). Does it look like there could be a significant
## difference between the sexes?
# I believe the difference between sexes is not very significant but it really depends on the significance level we are considering.
## d) We are going to calculate whether there's a difference between males and females
## regarding their age group using the function chisq.test.
## Look at the help of this function.
## Then use the function to calculate whether there's a difference in our table from b).
## Is there a significant difference in age group?
chisq.test(languageR::dutchSpeakersDistMeta$AgeGroup, languageR::dutchSpeakersDistMeta$Sex)
# There isnt a significant difference between the age groups, we get a significance value of even >0.5, usually significance values of >0.05 and 0.1
# are considered so we can comfortably say that there is no difference between the groups.
## e) What are the degrees of freedom for our data? How are they derived?
# The degrees of freedom in our data = 4 which is calculated as n - 1 (where n is the number of age groups). The n is the different number of
# distribution we have. In our case we are calculating different type of age groups, similiarly an example of n would be the number of
# days of the weak where each day could have some amount of sales
##########
##Exercise 4. Binomial versus chi-square
##########
## In this exercise, we will consider a made up example of there doctors can predict
## if a patient has temperature or not just by holding their hand
## Several doctors were blindfolded and were asked to tell if the experimenter
## has temperature/fever or not.
## There were a total of 200 trials, of which the doctors
## correctly indicated that a patient had fever 83 times.
## a) What is the null hypothesis, i.e. how often would we expect the doctor to
## be correct by chance (in raw number and in percentage)?
# Our null hypothesis would be (mean = 100/p = 50%) or that on average 100 of 200 doctors
# would give us the right result due to chance
## b) Using a chisquare test, what do you conclude about whether this idea
## of checking fever/temperature works?
expected = 100
observed = 83
(((observed-expected)^2) + (((200-observed)-expected)^2))/expected
# The value we get is 5.78
# The value of our probability lies between 1 - 2.5% which is less than the threshold set for significance level(5%) hence we can reject
# our alternative hypothesis, and safely conclude that the doctors cannot check fever by just holding the patients hands
## c) Now calculate significance using the binomial test as we used it in exercise 2.
pbinom(83, size = 200, prob = 0.5)
#0.96%
## d) The results from these two tests are slightly different. Which test do you think
## is better for our data, and why?
# The binomial test is better because we only have two categories(has fever/ doesnt have fever). With two categories our degree of freedom is 1
# in chi squared test and that would gives us an inaccurate result.
##########
##Exercise 5.
##########
## Describe a situation where you would choose McNemar's test over the ChiSquare test.
## What would be the problem of using the normal ChiSquare test in a case where
## McNemar's test would be more appropriate?
# A situation where the observations are not independent of one another for example the amount of people taking the stats with R course before
# and after watching a video on the course structure. In this case the observations depend on watching the video so we should not apply
# chisquare test and should instead apply MCNemar's test. Furthermore MCNemar's test can only be applied in a 2x2 scenario such as this (YES/NO) for
# (Before/After watching video).
# If we want to know whether what we exposed the sample to helped us (so in the upper case if we want to know whether the video helped change
# people's perspective of the course and persuaded them to take the course, it would be a bad idea to use ChiSquare test as it will not measure
# this change that the video created.
|
365516aa39e719b23235b4f6ba33cd786311c2bc
|
180d78934071c2308630fd1095f99a0afb5bcb66
|
/R-Tutorial/r-t-test-one-sample_kr.R
|
3709892c7355fadf1252964b17d619ae000a71e2
|
[
"MIT"
] |
permissive
|
misdb/R
|
d43d19303bdd74fc61ce9593e80b33f05ac4c11d
|
2532215acdac013ae7d6925174472850fe266b9f
|
refs/heads/master
| 2021-08-16T10:07:19.398509
| 2021-06-25T09:09:40
| 2021-06-25T09:09:40
| 211,976,382
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 424
|
r
|
r-t-test-one-sample_kr.R
|
set.seed(123)
sugar_cookie <- rnorm(30, mean = 9.99, sd = 0.04)
head(sugar_cookie)
# one-sample t-test
# H0 : mu = 10
t.test(sugar_cookie, mu = 10)
# paired t-test
set.seed(123)
# sales before the program
sales_before <- rnorm(7, mean = 50000, sd = 50)
# sales after the program.This has higher mean
sales_after <- rnorm(7, mean = 50075, sd = 50)
# draw the distribution
t.test(sales_before, sales_after,var.equal = TRUE)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.