blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
59304bf3035203bdd2f8ce874faf6c7891d5fd8d
|
bc735be6bb9db9b04e423ff365b9b73dda73dfad
|
/hkhousing.r
|
8fe5ae14eb1454d32e54898a51565a3db1ca958b
|
[] |
no_license
|
fzhang612/r_utility_code
|
ada2d51624e40859b9cc9b7a1608beb4cb3dbd4f
|
18c48499b882e18cb946da7de1cae34f1c3cd1d0
|
refs/heads/master
| 2021-01-15T16:16:01.351377
| 2011-09-13T14:26:02
| 2011-09-13T14:26:02
| 2,378,644
| 2
| 5
| null | 2018-02-02T01:25:46
| 2011-09-13T14:06:11
|
R
|
UTF-8
|
R
| false
| false
| 4,441
|
r
|
hkhousing.r
|
setwd('d:\\temp')
hkh <- read.csv('hkhousing.csv')
summary(hkh)
library(arm)
corrplot(hkh[,3:8])
pairs(hkh[,3:10])
pca <- princomp(hkh[,3:10], cor = T)
# pca <- prcomp(hkh[,3:8], scale. = T, retx = T)
# summary(pca)
print(pca)
plot(pca)
screeplot(pca)
biplot(pca)
loadings(pca)
pca$scores[,1] <- pca$scores[,1] * (-1)
cl <- kmeans(hkh[,3:8], centers = 9)
print(cl)
plot(hkh[,3:8], col = cl$cluster)
color <- hcl(seq(10, by = 35, length = 14) %% 360)
year <- 1999
library(ggplot2)
ggplot(data = as.data.frame(pca$scores[hkh$year <= year, 1:2]), aes(x = Comp.1, y=Comp.2, colour = factor(hkh[hkh$year <= year, 1]))) + geom_point(size = 5) + scale_colour_manual(values = color)
ggplot(data = as.data.frame(pca$scores[hkh$year <= year, c(1,3)]), aes(x = Comp.1, y=Comp.3, colour = factor(hkh[hkh$year <= year, 1]))) + geom_point(size = 5) + scale_colour_manual(values = color)
ggplot(data = as.data.frame(pca$scores[hkh$year <= year, c(2,3)]), aes(x = Comp.2, y=Comp.3, colour = factor(hkh[hkh$year <= year, 1]))) + geom_point(size = 5) + scale_colour_manual(values = color)
library(scatterplot3d)
s3dcolor <- rep(hcl(runif(14, 0, 360)), each = 12)
s3dpch <- rep(1:14, each = 12)
scatterplot3d(pca$scores[,1:3], col.axis = "blue", col.grid = "lightblue", pch = 20, color = s3dcolor[-168], type = 'h', cex.symbols = 3)
library(animation)
setwd('d:\\temp\\anitest')
oopt = ani.options(ani.dev = "pdf", ani.type = "pdf", ani.height = 600, ani.width = 600)
saveLatex({
for (year in 1997:2010) {
p <- ggplot(data = as.data.frame(pca$scores[hkh$year <= year, 1:2]), aes(x = Comp.1, y=Comp.2, colour = factor(hkh[hkh$year <= year, 1])))
p <- p + geom_point(size = 5) + scale_colour_manual(name = 'YEAR', values = color)
p <- p + opts(title = 'HK Property Market Movement 1997-2010 (Comp 1 VS 2)', axis.text.x = theme_blank(), axis.text.y = theme_blank())
p <- p + scale_x_continuous(name = 'Comp 1 - Overall', limits = c(-3, 8)) + scale_y_continuous(name = 'Comp 2', limits = c(-4, 3))
print(p)
}
}, ani.basename = "comp12", latex.filename = "comp12ani.tex", interval = 3)
saveLatex({
for (year in 1997:2010) {
p <- ggplot(data = as.data.frame(pca$scores[hkh$year <= year, c(1:3)]), aes(x = Comp.1, y=Comp.3, colour = factor(hkh[hkh$year <= year, 1])))
p <- p + geom_point(size = 5) + scale_colour_manual(name = 'YEAR', values = color)
p <- p + opts(title = 'HK Property Market Movement 1997-2010 (Comp 1 VS 3)', axis.text.x = theme_blank(), axis.text.y = theme_blank())
p <- p + scale_x_continuous(name = 'Comp 1 - Overall', limits = c(-3, 8)) + scale_y_continuous(name = 'Comp 3', limits = c(-3, 4))
print(p)
}
}, ani.basename = "comp13", latex.filename = "comp13ani.tex", interval = 3)
saveLatex({
for (year in 1997:2010) {
p <- ggplot(data = as.data.frame(pca$scores[hkh$year <= year, c(2:3)]), aes(x = Comp.2, y=Comp.3, colour = factor(hkh[hkh$year <= year, 1])))
p <- p + geom_point(size = 5) + scale_colour_manual(name = 'YEAR', values = color)
p <- p + opts(title = 'HK Property Market Movement 1997-2010 (Comp 2 VS 3)', axis.text.x = theme_blank(), axis.text.y = theme_blank())
p <- p + scale_x_continuous(name = 'Comp 2', limits = c(-4, 3)) + scale_y_continuous(name = 'Comp 3', limits = c(-3, 4))
print(p)
}
}, ani.basename = "comp23", latex.filename = "comp23ani.tex", interval = 3)
ani.options(oopt)
df <- melt(pca$loadings[,1:3])
df[df$X2 == 'Comp.1',3] <- df[df$X2 == 'Comp.1',3] * (-1)
dev.new()
ggplot(data = df, aes(x = X1, y = value, fill = X2)) + geom_bar(position = 'dodge', width = 0.7) + coord_polar(start = pi/4) + labs(x = '', y = '', fill = 'Components')
ggplot(data = df, aes(x = X1, y = value, colour = X2, group = X2)) + geom_line() + coord_polar()
first4vars <- 1:672
second4vars <- 673:1344
hkhlong <- melt(hkh, id.var = c('year', 'yearmth'))
head(hkhlong)
ggplot(data = hkhlong[hkhlong$variable %in% levels(hkhlong$variable)[-c(1,3,6,8)],], aes(x = as.Date(as.character(paste(yearmth, '01', '')), '%Y%m%d'))) +
geom_path(aes(y = value), group = 1) +
# geom_rect(aes(xmin = as.Date('1997-01-01'), xmax = as.Date('1997-12-01'), ymin = min(value), ymax = max(value)), colour = 'red', fill = 'red', alpha = 0.2) +
# geom_vline(xintercept = as.Date('1997-12-01')) +
facet_grid (variable ~ ., scales = 'free_y') + scale_x_date('YEAR', major = 'years', minor = 'months') + ylab('') +
opts(title = 'HK Property Market Movement 1997 - 2010')
|
25634c4fd185e1ecfd40a83601a6ccb9f3ae8695
|
e00befe0f92d42dd1f97e9304973f4b22da03af5
|
/BCS_hist1/BCS_hist1.r
|
28869962f5fe5d6dc1a04ff91c3b465d091f83da
|
[] |
no_license
|
QuantLet/BCS
|
a706ffdc3cf8777b5443b2c66ff601c3bc517ee0
|
4a5d9fc2c058e5e02534ccb37898d9e9cf2edd9e
|
refs/heads/master
| 2023-04-03T23:59:31.647499
| 2023-03-27T22:14:39
| 2023-03-27T22:14:39
| 51,316,067
| 4
| 10
| null | null | null | null |
UTF-8
|
R
| false
| false
| 264
|
r
|
BCS_hist1.r
|
# setting up the axis label size and margin (bottom, left, top, right)
par(cex.lab = 1.5, mai = c(b = 1, l = 1, t = 0.7, r = 0.5))
# histogram with relative frequencies
hist(nhtemp, freq = F, main = "", ylab = expression(hat(f)(x)), xlab = expression(x %in% K[i]))
|
b56a5fb19e8a05fe283ce07e3403a3814151e2d2
|
7879e0b7476cfefe1d83b8760932a2978296103f
|
/tests/testthat/test_basic_calls.R
|
01180c37f32a1cb9eb9c3cf4eb5b8cf2928c5792
|
[
"MIT"
] |
permissive
|
go-yohan/sppcredit
|
75914cb30f37c67e12190c9d2f8f51f542510626
|
19597394d0a8c489857476f50c00f9c2c39e02e6
|
refs/heads/master
| 2020-12-30T16:45:32.897205
| 2017-09-21T20:35:49
| 2017-09-21T20:35:49
| 91,020,921
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,985
|
r
|
test_basic_calls.R
|
LocalDriveDaPrice <- "//fs2/world/Analytics/Apps/Qlikview/FTR/SPP_DATA/Markets/DA/LMP_By_SETTLEMENT_LOC"
LocalDriveRefPrice <- "//fs2/world/SMD/Staff/SPP_Auction_Revenue_Rights/PY_2017_2018_ARR_Registration/TCR_REFERENCE_PRICES"
TestListPaths <- list(list(Source = 'NPPD_NPPD', Sink = 'SPRM_SPRM'),
list(Source = 'SPRM_SPRM', Sink = 'NPPD_NPPD'),
list(Source = 'AECI', Sink = 'SPA'),
list(Source = 'SPA', Sink = 'AECI'),
list(Source = 'GRDA.GREC3', Sink = 'WR.NSW'),
list(Source = 'WR.NSW', Sink = 'GRDA.GREC3'),
list(Source = 'GRDA.GREC3', Sink = 'NPPD_NPPD'),
list(Source = 'NPPD_NPPD', Sink = 'GRDA.GREC3')
)
testthat::test_that('getting day-ahead price using FTP on a specific day works', {
dateRange <- lubridate::as_date(c('2016-06-01', '2016-06-03'))
dfData <- getDfDaPriceSpp( dateRange = dateRange )
ind <- dfData[['Settlement Location']] == 'NPPD_NPPD'
testthat::expect_equal(nrow(dfData[ind, ]), 2*24)
# this should not download again, and just use the cache
dateRange <- lubridate::as_date(c('2016-06-02', '2016-06-03'))
testthat::expect_silent(dfData <- getDfDaPriceSpp( dateRange = dateRange ))
ind <- dfData[['Settlement Location']] == 'NPPD_NPPD'
testthat::expect_equal(nrow(dfData[ind, ]), 1*24)
})
testthat::test_that('getting day-ahead price using local drive on a specific day works', {
dateRange <- lubridate::as_date(c('2016-07-01', '2016-07-03'))
dfData <- getDfDaPriceSpp( dateRange = dateRange, ftpRoot = LocalDriveDaPrice )
ind <- dfData[['Settlement Location']] == 'NPPD_NPPD'
testthat::expect_equal(nrow(dfData[ind, ]), 2*24)
# this should not download again, and just use the cache
dateRange <- lubridate::as_date(c('2016-07-02', '2016-07-03'))
testthat::expect_silent(dfData <- getDfDaPriceSpp( dateRange = dateRange, ftpRoot = LocalDriveDaPrice ))
ind <- dfData[['Settlement Location']] == 'NPPD_NPPD'
testthat::expect_equal(nrow(dfData[ind, ]), 1*24)
})
testthat::test_that('getting the lmp distribution works', {
dateRange <- lubridate::as_date(c('2016-07-01', '2016-07-03'))
dfCongest <- getDfSppDaCongestOnPaths(lstPaths = TestListPaths, dateRange, ftpRoot = LocalDriveDaPrice)
# expect 48 hour data for each path
testthat::expect_equal(nrow(dfCongest), 48 * length(TestListPaths))
dfCongest <- getDfDaCongestDistribution(lstPaths = TestListPaths, ftpRoot = LocalDriveDaPrice, periodName = 'Jun_17', onOrOff = 'OFF', yearOffset = 1)
testthat::expect_equal(nrow(dfCongest), length(TestListPaths))
})
testthat::test_that('calculation of reference price works', {
dfCalc <- calcRefPriceSpp(lstPaths = TestListPaths, periodName = 'Jun_17', onOrOff = 'OFF', ftpRoot = LocalDriveDaPrice)
})
testthat::test_that('reference price download works', {
dfRefPrice <- getDfRefPriceByPeriod('Jun_17', onOrOff = 'OFF', ftpRoot = LocalDriveRefPrice, lstPaths = TestListPaths)
})
|
03efa51e297732e7fbbc6891baf4c27c22450c06
|
a53c5982b99271edf657f293647bc62f2475a391
|
/R/R source/lapply.R
|
f47504fd6d640745cca17c552ac687facee7aec7
|
[] |
no_license
|
iwannab1/kodb
|
0206f1031c315b07712314dca9528cc766b13c1e
|
cbeabb44eaf6f486a095edc55a5d7cf1735d608b
|
refs/heads/master
| 2021-01-10T10:27:40.103655
| 2017-06-14T10:56:55
| 2017-06-14T10:56:55
| 45,444,156
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,984
|
r
|
lapply.R
|
x = runif(10)
la = lapply(x, sqrt);la
sa = sapply(x, sqrt);sa
va = vapply(x, sqrt);va
is.list(la);is.list(sa)
is.vector(la);is.vector(sa)
va = vapply(x, sqrt, double(1));va
is.list(va);is.vector(va)
# for
xs = runif(1e5)
res <- c()
system.time(for (x in xs) {
res <- c(res, sqrt(x))
})
res <- c()
system.time(for (i in seq_along(xs)) {
res[i] <- sqrt(xs[i])
})
# data frame
df = data.frame(x = 1:10, y = letters[1:10]);df
sapply(df, class)
vapply(df, class, character(1))
df2 = data.frame(x = 1:10, y = Sys.time() + 1:10);df2
sapply(df2, class)
vapply(df2, class, character(1))
# mapply
xs = replicate(5, runif(10), simplify = FALSE);xs
ws = replicate(5, rpois(10, 5) + 1, simplify = FALSE);ws
unlist(lapply(xs, mean))
## two argument
unlist(lapply(seq_along(xs), function(i) {
weighted.mean(xs[[i]], ws[[i]])
}))
unlist(Map(weighted.mean, xs, ws))
unlist(mapply(weighted.mean, xs, ws))
# rollapply
x = seq(1, 3, length = 1e2)
library(zoo)
rollapply(x, 5, median)
# apply
m <- matrix(data=cbind(rnorm(10, 0), rnorm(10, 2), rnorm(10, 5)), nrow=10, ncol=3)
m
rmean1 = c()
cmean1 = c()
## for loop
for (i in 1:10){
rmean1[i] = mean(m[i,])
}
rmean1
for (i in 1:3){
cmean1[i] = mean(m[,i])
}
cmean1
## apply
rmean2 = apply(m, 1, mean);rmean2
cmean2 = apply(m, 2, mean);cmean2
#sapply, lapply : traverse
mean3 = sapply(m, mean);mean3 # vector
mean4 = lapply(m, mean);mean4 # list
mean3[1]
mean4[1]
mean4[[1]]
#user defined function
apply(m, 1, function(x) x*-1)
apply(m, 2, function(x) x*-1)
rmean3 = sapply(1:10, function(x) mean(m[x,]));rmean3
cmean3 = sapply(1:3, function(x) mean(m[,x]));cmean3
cmean4 = sapply(1:3, function(x, y) mean(y[,x]), y=m); cmean4
# group apply
pulse = round(rnorm(22, 70, 10 / 3)) + rep(c(0, 5), c(10, 12)); pulse
group = rep(c("A", "B"), c(10, 12)); group
tapply(pulse, group, length)
tapply(pulse, group, mean)
|
2dd6d8d73dd574d57371aeb357cf05b854f30a1c
|
90855c181b6cda9f32065a41e4b2e2845359ae44
|
/man/print.NCM.Rd
|
3b85fef3b2ac05368572d68601a5e840892fe456
|
[] |
no_license
|
jfq3/NeutralModel
|
fbbb4696bea17b94852216b61b483b73be6c45c2
|
33a3a3267c8bc43ef28c6f0fd76dd5142a52bfeb
|
refs/heads/master
| 2020-09-25T21:16:35.675170
| 2020-01-29T03:16:21
| 2020-01-29T03:16:21
| 226,090,222
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 467
|
rd
|
print.NCM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.NCM.R
\name{print.NCM}
\alias{print.NCM}
\title{Print method for class NCM}
\usage{
\method{print}{NCM}(x, ...)
}
\arguments{
\item{x}{Output of the function neutral_model.}
\item{...}{Other parameters passed to the generic print function.}
}
\value{
Printed summary.
}
\description{
Prints statistics for a class NCM object.
}
\examples{
data(rslt)
rslt
}
\author{
John Quensen
}
|
5e30527810d3a2b2b4fa255a4862da60d697781c
|
629d8c6ef6c86d475ac3730623dd943e872e2b90
|
/Functions/predictplot.R
|
0fcd40c5c40338cad8604c659bd87c61cdbb68df
|
[
"MIT"
] |
permissive
|
djhocking/Small_Mammal_Synchrony
|
773b895b98ded209ca67156b53aa330739063d33
|
e999a31d01ff924489397d72afe094a08349d195
|
refs/heads/master
| 2021-01-13T00:15:58.384006
| 2016-02-15T15:10:51
| 2016-02-15T15:10:51
| 51,762,643
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,088
|
r
|
predictplot.R
|
predict.plot.data.frame <- function(x,given,given.lab,layout,partial,
type=c("prob","logit","probit"),
identify.pred=F,scol="red",...) {
type <- match.arg(type)
resp <- response.var(x)
pred <- predictor.vars(x)
if(!missing(given) && !is.factor(given)) {
# make given a factor
b <- as.numeric(quantile(given,seq(0,1,length=5)))
given <- rcut(given,b)
}
if(missing(layout)) {
len <- length(pred) + !missing(given)
layout <- c(1,len)
if(len > 3) {
layout[2] <- ceiling(sqrt(len))
layout[1] <- ceiling(len/layout[2])
}
}
opar <- par(mfrow=layout, mar=c(4.5,4,0,0.1))
on.exit(par(opar))
if(!missing(partial)) {
pres <- data.frame(residuals(partial,type="partial"))
}
for(i in pred) {
if(!missing(partial)) {
x[[resp]] <- pres[[make.names(i)]]
if(is.null(x[[resp]])) stop(paste("partial of",i,"not found"))
}
k <- !is.na(x[,i])
if(is.factor(x[[i]]) && !is.ordered(x[[i]])) {
x[[i]] <- sort.levels(x[[i]],as.numeric(x[[resp]]))
}
if(!missing(given) && is.factor(x[[i]])) {
plot.new()
xlim <- length(levels(x[[i]]))
plot.window(xlim=c(0.5,xlim+0.5),ylim=range(x[[resp]]))
axis(1,1:xlim,labels=levels(x[[i]]))
axis(2)
box()
title(xlab=i, ylab=resp)
cat(paste("jittering",i,"\n"))
} else {
if(is.factor(x[[resp]])) {
if(type=="prob") {
if(is.factor(x[[i]])) {
mosaicplot(table(x[[i]], x[[resp]]), xlab=i, ylab=resp)
} else {
plot(x[[i]], x[[resp]], xlab=i, ylab=resp, ...)
}
}
} else {
plot(x[[i]], x[[resp]], xlab=i, ylab=resp, ...)
}
}
if(missing(given) && !is.na(scol)) {
if(is.factor(x[[resp]])) {
if(length(levels(x[[resp]]))==2 && !is.factor(x[[i]])) {
if(type=="prob") {
lines(loprob(x[k,i], x[k,resp]), col=scol)
} else {
xy <- loprob(x[k,i], x[k,resp])
p <- xy$y-1
p <- switch(type,logit=log(p/(1-p)),probit=qnorm(p))
xy$y <- p+1.5
plot(xy,col=scol,type="l",xlab=i,ylab=type)
points(x[[i]],2*as.numeric(x[[resp]])-3)
}
}
} else {
lines(lowess(x[k,i], x[k,resp]),col=scol)
}
}
if((identify.pred == T) || (i %in% identify.pred)) {
identify(x[k,i],x[k,resp],labels=rownames(x)[k])
}
if(!missing(given)) {
lev <- levels(given)
for(g in 1:length(lev)) {
color <- ((g-1) %% 6) + 1
val <- lev[g]
k <- (given == val)
if(is.factor(x[[i]])) {
jitter <- (runif(length(x[k,i]))-0.5)/5
points(as.numeric(x[k,i])+jitter, x[k,resp], col=color, ...)
} else {
points(x[k,i], x[k,resp], col=color, ...)
}
if(is.factor(x[[resp]])) {
lines(loprob(x[k,i], x[k,resp]),col=color)
} else {
lines(lowess(x[k,i], x[k,resp]),col=color)
#abline(lm(x[k,resp]~x[k,i]),col=color)
}
}
}
}
if(!missing(given)) {
# legend
plot.new()
if(!missing(given.lab)) title(xlab=given.lab)
y <- cumsum(strheight(lev)+0.02)
for(i in 1:length(lev)) {
color <- ((i-1) %% 6) + 1
val <- lev[i]
text(0.5,0.75-y[i],val,col=color,adj=0.5)
}
}
}
predict.plot.lm <- function(object,data,partial=F,...) {
if(!partial) {
if(missing(data)) {
res <- residual.frame(object)
} else {
res <- residual.frame(object,data)
}
if(F) {
expr <- match.call(expand = F)
expr$... <- NULL
expr[[1]] <- as.name("residual.frame")
res <- eval(expr, parent.frame())
}
cat("plotting residuals\n")
predict.plot.data.frame(res,...)
} else {
if(missing(data)) data <- model.frame(object)
cat("plotting partial residuals\n")
predict.plot.data.frame(data,partial=object,...)
}
}
predict.plot.formula <- function(formula,data=parent.frame(),...) {
# formula has givens?
rhs <- formula[[3]]
if(is.call(rhs) && (deparse(rhs[[1]]) == "|")) {
# remove givens from formula
given <- deparse(rhs[[3]])
formula[[3]] <- rhs[[2]]
if(is.environment(data)) g <- get(given,env=data)
else g <- data[[given]]
if(is.null(g))
stop(paste("variable \"",given,"\" not found",sep=""))
return(predict.plot.formula(formula,data,
given=g,given.lab=given,...))
}
if(F) {
expr <- match.call(expand = F)
expr$... <- NULL
expr$na.action <- na.omit
expr[[1]] <- as.name("model.frame.default")
x <- eval(expr, parent.frame())
} else {
# formula has its own environment
x <- model.frame.default(formula,data,na.action=na.omit)
}
predict.plot.data.frame(x,...)
}
predict.plot <- function(object, ...) UseMethod("predict.plot")
step.up <- function(object) {
resp <- response.var(object)
pred <- predictor.vars(object)
scope <- terms(formula(paste(resp,"~",paste(pred,collapse="*"))))
step(object,scope)
}
|
c0e9b140697816548408241d6958e5a191d06700
|
7f17e160c10eaccfe433fb72ca39a9dfadb8ab9d
|
/SourceCode.R
|
9eb097398094609b35c55ac684ecee52e3210aa9
|
[] |
no_license
|
lmilam/Information-Retrieval-Final
|
c954410c0f289da7bfdd889d5974cf2462f64ff2
|
5453aec97b75585ccbf08092c31460d0cc40fe50
|
refs/heads/master
| 2022-06-20T04:52:13.153824
| 2020-05-06T21:22:45
| 2020-05-06T21:22:45
| 261,602,384
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,249
|
r
|
SourceCode.R
|
## Created by: Lora Milam
## Group: 12
## Course: CS 4331 003: Information Retrieval
##
##
## Import dataset from download directory
winequality.white <- read.csv("C:/Users/default.LAPTOP-QPG8VPNC/Downloads/winequality-white.csv", sep=";")
##Use Regression Modeling to find relevant predictor variables
model01<-lm(formula=quality~alcohol+sulphates+pH+density+total.sulfur.dioxide+free.sulfur.dioxide+chlorides+residual.sugar+citric.acid+volatile.acidity+fixed.acidity,data = winequality.white)
##Model01 shows that Total.Sulfur.Dioxide,Chlorides, Citric.Acid variables have high P-values
##Remove these variables
winequality.white$total.sulfur.dioxide<-NULL
winequality.white$citric.acid<-NULL
winequality.white$chlorides<-NULL
##New model
model02<-lm(formula=quality~alcohol+sulphates+pH+density+free.sulfur.dioxide+residual.sugar+volatile.acidity+fixed.acidity,data = winequality.white)
summary(model02)
##After Preprocessing, create training and test set
set.seed(123)
n<-dim(winequality.white)[1]
divide_ind<-runif(n)<.75
wine_train<-winequality.white[divide_ind,]
wine_test<-winequality.white[!divide_ind,]
##Create models for training and set set
Model02_train<-lm(formula=quality~alcohol+sulphates+pH+density+free.sulfur.dioxide+residual.sugar+volatile.acidity+fixed.acidity,data = wine_train)
summary(Model02_train)
Model02_test<-lm(formula=quality~alcohol+sulphates+pH+density+free.sulfur.dioxide+residual.sugar+volatile.acidity+fixed.acidity,data = wine_test)
summary(Model02_test)
##Plot Regressive Model Vs Actual results
##Dataframe for predictive variables
X_train<-subset(wine_train,select = c("fixed.acidity","volatile.acidity","residual.sugar","free.sulfur.dioxide","density","pH","sulphates","alcohol"))
X_test<-subset(wine_test,select = c("fixed.acidity","volatile.acidity","residual.sugar","free.sulfur.dioxide","density","pH","sulphates","alcohol"))
##Predictive models for training and test sets
Model02_train_predict<-predict(object = Model02_train,newdata = X_train)
Model02_test_predict<-predict(object = Model02_test,newdata = X_test)
#Plot for predictive models
plot(round(Model02_train_predict))
plot(round(Model02_test_predict))
#Plot for Actual results
plot(wine_train$quality)
plot(wine_test$quality)
##Plotting Kmeans
##Install packages
install.packages('tidyverse')
install.packages('cluster')
install.packages('factoextra')
##Libraries
library(tidyverse)
library(cluster)
library(factoextra)
#Dataframe prep
wine_train<-scale(wine_train)
wine_test<-scale(wine_test)
#Plotting test and training set for Factor of 1
wine_train_kmeans<-kmeans(wine_train,centers = 1)
wine_test_kmeans<-kmeans(wine_test,centers = 1)
plot(winequality.white$quality, col = wine_train_kmeans$cluster)
plot(winequality.white$quality, col = wine_test_kmeans$cluster)
#Plotting test and training set for Factor of 2
wine_train_kmeans<-kmeans(wine_train,centers = 2)
wine_test_kmeans<-kmeans(wine_test,centers = 2)
plot(winequality.white$quality, col = wine_train_kmeans$cluster)
plot(winequality.white$quality, col = wine_test_kmeans$cluster)
#Plotting test and training set for Factor of 5
wine_train_kmeans<-kmeans(wine_train,centers = 5)
wine_test_kmeans<-kmeans(wine_test,centers = 5)
plot(winequality.white$quality, col = wine_train_kmeans$cluster)
plot(winequality.white$quality, col = wine_test_kmeans$cluster)
#Plotting test and training set for Factor of 10
wine_train_kmeans<-kmeans(wine_train,centers = 10)
wine_test_kmeans<-kmeans(wine_test,centers = 10)
plot(winequality.white$quality, col = wine_train_kmeans$cluster)
plot(winequality.white$quality, col = wine_test_kmeans$cluster)
##Plotting Hierarchical Clustering
##Plotting test and training set for Factor of 1
d_train<-dist(wine_train)
d_test<-dist(wine_test)
hc1_train<-hclust(d_train,method = "ward.D")
hc1_test<-hclust(d_test,method = "ward.D")
plot(hc1_train)
plot(hc1_test)
##Factor of 2
plot(hc1_train)
rect.hclust(hc1_train,k=2,border = 2:11)
plot(hc1_test)
rect.hclust(hc1_test,k=2,border = 2:11)
##Factor of 5
plot(hc1_train)
rect.hclust(hc1_train,k=5,border = 2:11)
plot(hc1_test)
rect.hclust(hc1_test,k=5,border = 2:11)
##Factor of 10
plot(hc1_train)
rect.hclust(hc1_train,k=10,border = 2:11)
plot(hc1_test)
rect.hclust(hc1_test,k=10,border = 2:11)
|
2daea9ddcff242e9724468c0c2db1927c7430eaf
|
a5d7a45cdc3bc85aacd36cf229a4d743bdfbcf50
|
/func_write_nls_latex.R
|
748afc37da498780e84da165b7b4930b9144d95b
|
[
"MIT"
] |
permissive
|
calvinwhealton/R_tables_to_LaTeX
|
3e8b8cffb44591db6b5b84837955ad304a8ad946
|
4933d1a60de2dbfe9248cea4cdb6511063158ffe
|
refs/heads/master
| 2020-04-11T03:38:10.460577
| 2016-02-05T16:24:12
| 2016-02-05T16:24:12
| 40,672,919
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,037
|
r
|
func_write_nls_latex.R
|
# funciton for writing NLS ANOVA tables into LaTeX tables----
# output can include more regression results as defined by user
write_latex_nls <- function(reg_obj # regression object
, name_tex # name for latex table
, name_var=NULL # names of variables
, caption=NULL # caption for table, long caption
, capShort=NULL # short caption
, comments=NULL # additional comments
, label=NULL # label for the table
, dec_sums=2 # number of decimals for SSE, SSR, SST
, dec_rse = 3 # number of decimals for RSE
, dec_r = 4 # number of decimals for R2 and AdjR2
, dec_co = 4 # number of decimals for the coefficients
){
# calculatin and rounding variables for output
y_mean <- mean(predict(reg_obj)+residuals(reg_obj))
sse <- round(sum(residuals(reg_obj)^2),dec_sums) # sum of squares error
ssr <- round(sum((predict(reg_obj)-y_mean)^2),dec_sums) # sum of squares regression
sst <- round(sum((predict(reg_obj)+residuals(reg_obj) - y_mean)^2),dec_sums)
dof <- as.numeric(unlist(summary(reg_obj)["df"])["df2"])
rse <- round(sqrt(sse/dof), dec_rse)
r2star <- round(1 - sse/sst,dec_r)
# creating file
file.create(name_tex)
sink(name_tex)
# writing date, time, and comments
time <- Sys.time()
cat(c("%", format(time) ), sep="\t")
cat(c("\n"))
cat(c("% output from R non-linear least squares regression\n"))
# printing additional comments
if(length(comments != 0)){
cat(c("%",comments,"\n"),sep=" ")
}
# writing caption and opening information
if(length(caption) != 0){
cat("\\begin{table}[h!]\n") # controling location of table
# printing caption, option for both short and long captions
if(length(capShort) == 0){
cat(c("\\caption{",caption,"}"),sep='')
}else{
cat(c("\\caption[",capShort,']{',caption,"}"),sep='')
}
# adding label
if(length(label) != 0){
cat("\\label{",label,"}",sep='')
}
cat("\\begin{center}\n") # centering
cat("\\begin{tabular}{l l l l l}\n") # justificaiton of columns
cat("\n")
cat("\\hline\n")
}
# writing initial data
# writing line 1
cat(c("SSE", sse, "", "DoF", dof), sep = "\t&\t")
cat(c("\\\\", "\n"))
# writing line 2
cat(c("SSR", ssr, "", "R2\\_star", r2star), sep = "\t&\t")
cat(c("\\\\", "\n"))
# writing line 3
cat(c("SST", sst, "", "", ""), sep = "\t&\t")
cat(c("\\\\", "\n"))
# writing line 4
cat(c("RSE", rse, "", "", ""), sep = "\t&\t")
cat(c("\\\\", "\n"))
cat("\\hline") # end of upper portion of table
cat(c("\n"))
# writing column names
cat(c("Variable", "Est. Coeff.", "Std. Error", "t Stat.", "Pr($>|t|$)"), sep = "\t&\t")
cat(c("\\\\", "\n"))
cat("\\hline") # begining of lower seciton of table
cat(c("\n"))
# setting names as variables if names are not defined by user
if(length(name_var) == 0){
name_var <- names(coefficients(reg_obj))
}
# putting in columns for coefficients and calcs
for(i in 1:length(name_var)){
cat(c(name_var[i], format(coefficients(summary(reg_obj))[i],scientific=TRUE,digits=dec_co), format(coefficients(summary(reg_obj))[i+length(name_var)],scientific=TRUE,digits=dec_co), format(coefficients(summary(reg_obj))[i+2*length(name_var)],scientific=TRUE,digits=dec_co), format(coefficients(summary(reg_obj))[i+3*length(name_var)],scientific=TRUE,digits=dec_co)), sep="\t&\t")
cat(c("\\\\", "\n"))
}
# adding statements to end table
if(length(caption) != 0){
# closing information for table
cat("\\hline")
cat("\\end{tabular}\n")
cat("\\end{center}\n")
cat("\\end{table}")
}
# closing connection so output is no longer written to file
closeAllConnections()
}
|
1b8aeb6aba2e3c37b8d49dec693e7ee868980a5b
|
9c85a0765978a98a46e617360af75417de5a1f14
|
/cachematrix.R
|
44ed8c85f08a33d8ceb4dc7801fd352be070b3e6
|
[] |
no_license
|
emmanuelt/ProgrammingAssignment2
|
f2039fc2311238a9904d1d361ef9850ebc8056b7
|
34775e4c68d3c0aa78dd754f5691620e9b68c71e
|
refs/heads/master
| 2020-12-25T03:50:34.171140
| 2015-03-20T10:29:58
| 2015-03-20T10:29:58
| 32,550,868
| 0
| 0
| null | 2015-03-19T23:04:28
| 2015-03-19T23:04:28
| null |
UTF-8
|
R
| false
| false
| 1,275
|
r
|
cachematrix.R
|
# The two functions are used to cache the inverse of a matrix,
# because matrix inversion can be too long to compute.
# makeCacheMatrix creates a list containing 4 functions:
# 1. One function setting the value of the matrix.
# 2. One function getting the value of the matrix.
# 3. One function setting the value of inverse of the matrix.
# 4. One function getting the value of inverse of the matrix.
makeCacheMatrix <- function(x = matrix()) {
inverse_matrix <- NULL
set <- function(y) {
x <<- y
inverse_matrix <<- NULL
}
get <- function() x
setinverse <- function(inverse) inverse_matrix <<- inverse
getinverse <- function() inverse_matrix
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# The following function returns the inverse of a matrix.
# It first checks if the inverse has already been computed.
# If so, it gets the result and skips the computation.
# If not, it computes the inverse, sets the value in the cache via
# setinverse function.
cacheSolve <- function(x, ...) {
inverse_matrix <- x$getinverse()
if(!is.null(inverse_matrix)) {
message("getting cached data.")
return(inverse_matrix)
}
data <- x$get()
inverse_matrix <- solve(data)
x$setinverse(inverse_matrix)
inverse_matrix
}
|
0a52a7b25f7a58ae15356994760e60810b155699
|
b4177d4f2ed8400edc436b79b1594c804c7a8a52
|
/code/functions.R
|
7c3e1e9cf8124805f9deb538d824c8e7c18ce07a
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
effectsizer/stabilitycondcoop
|
5d7e9a63ef6fe7df7df6fee5c2d7ded4d8e20f6d
|
979abf87b5d3cfb7aa8be13b81ef233076c3bb75
|
refs/heads/master
| 2023-03-16T17:03:40.283719
| 2020-08-31T18:12:45
| 2020-08-31T18:12:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,140
|
r
|
functions.R
|
# =================== DETERMINING TYPE GROUP========================================== #
cat("Loading functions...")
cat("\n")
# Types Classification
# Type number refers to the 10th base conversion of a strategy
# For instance LLL -> 0 LLM -> 1
class_table <- bind_rows(
list(type = 0, wide = 'selfish', narrow = 'selfish', wide_o = 'selfish'),
list(type = 1, wide = 'imp-cond-coop',narrow = 'cond-coop', wide_o = 'imp-cond-coop'),
list(type = 2, wide = 'imp-cond-coop',narrow = 'cond-coop', wide_o = 'imp-cond-coop'),
list(type = 3, wide = 'humped',narrow = 'humped', wide_o = 'humped'),
list(type = 4, wide = 'imp-cond-coop',narrow = 'cond-coop', wide_o = 'imp-cond-coop'),
list(type = 5, wide = 'perf-cond-coop',narrow = 'cond-coop', wide_o = 'perf-cond-coop'),
list(type = 6, wide = 'humped',narrow = 'humped', wide_o = 'humped'),
list(type = 7, wide = 'humped',narrow = 'humped', wide_o = 'humped'),
list(type = 8, wide = 'imp-cond-coop',narrow = 'cond-coop', wide_o = 'imp-cond-coop'),
list(type = 9, wide = 'inv-cond-coop',narrow = 'other', wide_o = 'other'),
list(type = 10, wide = 'inv-humped',narrow = 'other', wide_o = 'other'),
list(type = 11, wide = 'inv-humped',narrow = 'other', wide_o = 'other'),
list(type = 12, wide = 'inv-cond-coop',narrow = 'other', wide_o = 'other'),
list(type = 13, wide = 'uncond',narrow = 'other', wide_o = 'other'),
list(type = 14, wide = 'imp-cond-coop',narrow = 'cond-coop', wide_o = 'imp-cond-coop'),
list(type = 15, wide = 'humped',narrow = 'humped', wide_o = 'humped'),
list(type = 16, wide = 'humped',narrow = 'humped', wide_o = 'humped'),
list(type = 17, wide = 'imp-cond-coop',narrow = 'cond-coop', wide_o = 'imp-cond-coop'),
list(type = 18, wide = 'invcond',narrow = 'other', wide_o = 'other'),
list(type = 19, wide = 'inv-humped',narrow = 'other', wide_o = 'other'),
list(type = 20, wide = 'inv-humped',narrow = 'other', wide_o = 'other'),
list(type = 21, wide = 'inv-cond-coop',narrow = 'other', wide_o = 'other'),
list(type = 22, wide = 'inv-cond-coop',narrow = 'other', wide_o = 'other'),
list(type = 23, wide = 'inv-humped',narrow = 'other', wide_o = 'other'),
list(type = 24, wide = 'inv-cond-coop',narrow = 'other', wide_o = 'other'),
list(type = 25, wide = 'inv-cond-coop',narrow = 'other', wide_o = 'other'),
list(type = 26, wide = 'uncond',narrow = 'other', wide_o = 'other')
)
# Function to convert conditional response names
#10: 10 base number
#3: 3 base number in vector form
#L: Letter base
cat("convert_type")
cat("\n")
convert_type <- function (type, conv = c("10t3","3t10","10tL","Lt10","3tL","Lt3") ) {
# Control inputs ------------------------------------------------------------------------
if(!(length(conv)==1)) {
stop('provide conversion method')
}
if(!(conv %in% c("10t3","3t10","10tL","Lt10","3tL","Lt3"))) {
stop('conversion method wrong')
}
if(conv == "3t10" | conv == "3tL" ) {
if(any(type > 2) | any(type<0) ){
stop('wrong type type provided. should 3 items between 0 and 2')
}
}
if(conv == "10t3" | conv == "10tL" ) {
if(type > 26 | type < 0){
stop('wrong type number provided. should be between 0 and 26')
}
}
if(conv == "Lt3" | conv == "Lt3" ) {
stop('conversions from letters are not implemented yet')
# TO DO: Conversions from L
}
#-----------------------------------------------------------------------------
type_labels <- c("L","M","H")
if (conv=="3t10") {
return(digits2number(rev(type),base=3))
}
if (conv=="10t3") {
baseconv<-rev(number2digits(type,base=3))
if (length(baseconv)==3){
return(baseconv)
}
if (length(baseconv)==2){
return(c(0,baseconv))
}
if (length(baseconv)==1){
return(c(0,0,baseconv))
}
if (length(baseconv)==0){
return(c(0,0,0))
}
}
if (conv=="3tL") {
type <- type+1
return (paste(type_labels[type[1]],type_labels[type[2]],type_labels[type[3]],sep=""))
}
if (conv=="10tL") {
if ("factor" %in% class(type)) {
type = as.numeric(as.character(type))
}
type<-convert_type(type,conv="10t3")
type<-convert_type(type,conv="3tL")
return(type)
}
}
# for bulk conversion
# it would be nicer that above function detects the form and does accordingly
# however it is not trivial as some of the inputs(3,L) are in vector form already
cat("convert_type_list")
cat("\n")
convert_type_list <- function(types, conv = c("10t3","3t10","10tL","Lt10","3tL","Lt3")) {
length_types <- length(types)
output_list <- list(rep(NA,length_types))
for (i in 1:length_types) {
output_list[[i]] = convert_type(types[[i]],conv)
}
return(output_list)
}
cat("convert_type_vector")
cat("\n")
convert_type_vector <- function(types, conv = c("10t3","3t10","10tL","Lt10","3tL","Lt3")) {
length_types <- length(types)
output_vector <- rep(NA,length_types)
for (i in 1:length_types) {
output_vector[[i]] = convert_type(types[[i]],conv)
}
return(output_vector)
}
cat("classify_type")
cat("\n")
classify_type<- function(typeno, base3 = FALSE, classification = "wide"){
if (base3 == TRUE){
typeno <- convert_type(typeno,"3t10")
}
return(filter(class_table, type == typeno)[1,classification][[1]])
}
cat("classify_type_vector")
cat("\n")
classify_type_vector <- function(types, classification= "wide") {
# we classify and also convert to factor with a specific order. the mai
# the main reason for that is we need to be consistent about the order
# to have proper graphs
# base3 not possible here
length_types <- length(types)
output_vector <- rep(NA,length_types)
for (i in 1:length_types) {
output_vector[[i]] = classify_type(types[[i]], base3=FALSE, classification)
}
output_vector <- factor(output_vector, levels = get_label_table(classification = classification)[,"shortname"])
return(output_vector)
}
cat("get_label_table")
cat("\n")
cat(" classifications: wide, wide_o, narrow")
cat("\n")
get_label_table <- function(classification = "wide") {
pal_red<-"#C0392B"
pal_blue<-"#2980B9"
pal_dblue<-"#1e6391"
pal_purple<-"#9B59B6"
pal_green<-"#27AE60"
pal_dgreen<-"#085b2b"
pal_yellow<-"#F1C40F"
pal_orange<-"#E67E22"
pal_pink <- "#ef39a3"
pal_gray <- "#888888"
if (classification == "wide") {
label_mat<-rbind(
c(1,"selfish","Selfish",pal_red),
c(2,"imp-cond-coop","Imp.Cond.Coop.",pal_green),
c(3,"perf-cond-coop","Perf.Cond.Coop.",pal_dgreen),
c(4,"humped","Hump-Shaped",pal_yellow),
c(5,"inv-humped","Inv. Hump-Shaped",pal_pink),
c(6,"uncond","Unconditional",pal_blue),
c(7,"inv-cond-coop","Inv. Cond.Coop.",pal_purple)
)
}
if (classification == "wide_o") {
label_mat<-rbind(
c(1,"selfish","Selfish",pal_red),
c(2,"imp-cond-coop","Imp.Cond.Coop.",pal_green),
c(3,"perf-cond-coop","Perf.Cond.Coop.",pal_dgreen),
c(4,"humped","Hump-Shaped",pal_yellow),
c(5,"other","Other",pal_gray)
)
}
if (classification == "narrow") {
label_mat<-rbind(
c(1,"selfish","Selfish",pal_red),
c(2,"cond-coop","Cond.Coop.",pal_green),
c(3,"humped","Hump-Shaped",pal_yellow),
c(5,"other","Other",pal_gray)
)
}
colnames(label_mat)<-c("code","shortname","longname","color")
return(label_mat)
}
cat("labelize_class")
cat("\n")
labelize_class <- function(classnames, output=c("color", "longname"),classification="wide") {
label_table <- get_label_table(classification = classification)
length_classnames <- length(classnames)
output_table <- character(length(classnames))
for (i in 1:length_classnames) {
output_table[i] <- label_table[label_table[,'shortname'] == classnames[[i]], output]
}
return(output_table)
}
cat("treatment_convert")
cat("\n")
treatment_convert <- function(TreatmentNo) {
TreatmentNames <- c("CondInfo","NoCondInfo")
return(TreatmentNames[TreatmentNo])
}
# Save the last plot as pdf
cat("pdf_last_plot")
cat("\n")
pdf_last_plot <- function(filename, fig_scale = 1, width = 7, height = 6, crop = FALSE) {
filename_with_path <- file.path(".","figs",paste(filename,".pdf", sep = ""))
cat("Saving", filename_with_path , "\n")
ggsave(filename_with_path,
device = "pdf",
scale = fig_scale,
width = width,
height = height,
units = "in"
# dpi = 300
)
if (crop == TRUE ){
system(paste("pdfcrop --margins '0 0 0 0'",filename_with_path,filename_with_path)) # linux only, requires pdfcrop
}
}
cat("pdf_last_plot")
cat("\n")
png_last_plot <- function(filename, fig_scale = 1, crop = FALSE) {
filename_with_path <- file.path(".","figs",paste(filename,".png", sep = ""))
cat("Saving", filename_with_path , "\n")
ggsave(filename_with_path, device = "png",scale = fig_scale)
if (crop == TRUE ){
system(paste("pdfcrop --margins '0 0 0 0'",filename_with_path,filename_with_path)) # linux only, requires pdfcrop
}
}
generate_switch_vector <- function(history) {
length_history <- length(history)
switch_vector = numeric(length_history-1)
for (i in 1:(length_history-1)) {
switch_vector[[i]] <- dplyr::if_else(history[[i]] == history[[i+1]],0,1)
}
return(switch_vector)
}
cat("get_mode")
cat("\n")
get_mode <- function(x) {
# credits: @digEmAll
# https://stackoverflow.com/questions/2547402/is-there-a-built-in-function-for-finding-the-mode
ux <- unique(x)
tab <- tabulate(match(x, ux))
ux[tab == max(tab)]
}
cat("get_first")
cat("\n")
get_first <- function(vector) {
return(vector[[1]])
}
cat("vectorize_history \n")
cat(" -> used internally. converts history list to a vector")
cat("\n")
vectorize_history <- function(...) {
input_list <- list(...)
output <- unlist(input_list, use.names = FALSE)
# output <- paste(input_list, collapse = '')
return(output)
}
cat("measure_stability")
cat("\n")
measure_stability <- function(history, method = "psi") {
# Measures stability for a given history of
# methods: psi: psi-stability
# andreozzi : andreozzi measure of deviation from expected switches
# mode: if mode is equal to most common choice
# mode_lasthalf: if mode is equal to the choice that is in last half
#choiceset <- unique(choiceset) # Not needed. see below.should be unique anyways but just to be sure
length_history <- length(history)
#length_choiceset <- length(choiceset) # We don't need it anymore as we use length
unique_history <- unique(history)
num_unique_history <- length(unique_history)
switch_vector <- generate_switch_vector(history)
sum_switch <- sum(switch_vector)
stability <- NA
if (method == "psi") {
stability <- 1 - (num_unique_history/length_history) * (sum_switch / (length_history-1))
}
if (method == "andreozzi") {
expected_switches <- (length_history-1) * (num_unique_history-1)/(num_unique_history)
stability <- sum_switch - expected_switches
}
if (method == "mode") {
first_choice <- history[[1]]
mode_choice <- get_mode(history)
if (length(mode_choice) != 1) {
stability <- 0
}
else {
stability <- ifelse(first_choice %in% mode_choice,1,0)
}
}
if (method == "mode_lasthalf") {
first_choice <- history[[1]]
half_index <- ceiling(length_history/2)
mode_choice_lasthalf <- get_mode(history[half_index:length_history])
if (length(mode_choice_lasthalf) != 1) {
stability <- 0
}
else {
stability <- ifelse(first_choice %in% mode_choice_lasthalf,1,0)
}
}
return(stability)
}
|
7701cb3a9e4eae73ba4729b8931c501aad6a026c
|
62cb50797a68620eb6ff32f86d1d47205bed2dac
|
/plot1.R
|
c18f212e9f575100d91eb944f1543a9f4f55f020
|
[] |
no_license
|
ellenkoenig/ExData_Plotting1
|
7c66852c92b636fc387cbdf00b5eb54bb42d29de
|
0deed712f22e9efc6b0bd36b6c05b6db90638d41
|
refs/heads/master
| 2021-01-13T06:55:14.743562
| 2015-05-09T23:28:25
| 2015-05-09T23:28:25
| 35,269,714
| 0
| 0
| null | 2015-05-08T09:06:28
| 2015-05-08T09:06:28
| null |
UTF-8
|
R
| false
| false
| 1,049
|
r
|
plot1.R
|
# This file constructs a barplot from data on "Global Active Power"
# It assumes a file called household_power_consumption.txt with the data in the source folder
# Imports
library("plyr")
# 1. Read the data
data <- read.csv("household_power_consumption.txt", sep = ";", colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"), na.strings = c('?'))
relevant_data <- data[data$Date == "1/2/2007" |data$Date == "2/2/2007",]
remove(data) #remove the large source dataset which is no longer needed from memory
# 2. Prepare the data (Create the frequencies grouped by increments of 0.5)
rounded_gap <- round_any(relevant_data$Global_active_power, 0.5, ceiling)
frequencies_gap <- table(rounded_gap)
# 3. Plot the results and output to png file
png("plot1.png")
barplot(frequencies_gap, main = "Global Active Power", col = "red", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", space = 0, xaxt = 'n')
axis(1, at = seq(0,12, by = 4), labels = as.character(seq(0,6, by = 2)))
dev.off()
|
9caa39305dc64d8de34b880638b6e49bb1b0fb08
|
e3a93fff69205b76be167006e085dc36b0bf2f39
|
/R/reverse_ip.R
|
32c327d5b2074dfe6f1430ca6a9137abf64728e4
|
[] |
no_license
|
cneskey/domaintools
|
78c0797c982f59e1472a97bb1a334a16686b4cb4
|
2d8a1199529adf3717022f9637f3f83b7c778958
|
refs/heads/master
| 2020-04-19T14:40:33.438229
| 2015-08-26T06:28:14
| 2015-08-26T06:28:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,131
|
r
|
reverse_ip.R
|
#' Reverse IP
#'
#' The Reverse IP API provides a list of domain names that share the same
#' Internet host (i.e. the same IP address).
#'
#' @param ip IP address to perform the reverse IP query
#' @param limit Limits the size of the domain list than can appear in a
#' response. The limit is applied per-IP address, not for the entire
#' request.
#' @note In rare cases, you may request an IP for which no recent Whois
#' record is available. If that occurs, the system will respond with
#' an error.
#' @export
#' @references \url{http://www.domaintools.com/resources/api-documentation/reverse-ip/}
#' @return a \code{list} of result detais for the \code{ip}
#' @examples
#' reverse_ip("64.246.165.240")
reverse_ip <- function(ip, limit=NULL) {
url <- paste0(domaintools_url_base, trimws(ip), "/host-domains")
res <- POST(url,
query=list(api_username=domaintools_username(),
api_key=domaintools_api_key()))
stop_for_status(res)
jsonlite::fromJSON(content(res, as="text"))
}
#' Shared IPs
#'
#' The Reverse IP API provides a list of domain names that share the same
#' Internet host (i.e. the same IP address).
#'
#' @param hostname IP address to perform the reverse IP query
#' @param limit Limits the size of the domain list than can appear in a
#' response. The limit is applied per-IP address, not for the entire
#' request.
#' @note In rare cases, you may request an IP for which no recent Whois
#' record is available. If that occurs, the system will respond with
#' an error.
#' @export
#' @references \url{http://www.domaintools.com/resources/api-documentation/reverse-ip/}
#' @return a \code{list} of result detais for the \code{hostname}
#' @examples
#' shared_ips("domaintools.com")
shared_ips <- function(hostname, limit=NULL) {
url <- paste0(domaintools_url_base, trimws(hostname), "/reverse-ip")
res <- POST(url,
query=list(api_username=domaintools_username(),
api_key=domaintools_api_key),)
stop_for_status(res)
jsonlite::fromJSON(content(res, as="text"))
}
|
39e0990be31c56443a8621370ff88d8b9d907c12
|
284c7b66d6db034a5ccfd34486eaeba8bc2ccaf6
|
/man/helpr_render_json.Rd
|
0b8c8a7f43b65b847a9d518fdc98e3021c9ea206
|
[] |
no_license
|
hadley/helpr
|
c9967cfabe6d510212a32d83643136b3b85d5507
|
2eeb174b09493f8d5c4c072772285c100f410a29
|
refs/heads/master
| 2021-01-11T04:57:20.128449
| 2012-04-09T14:34:21
| 2012-04-09T14:34:21
| 377,173
| 23
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 275
|
rd
|
helpr_render_json.Rd
|
\name{helpr_render_json}
\alias{helpr_render_json}
\title{Render JSON}
\usage{
helpr_render_json(obj)
}
\arguments{
\item{obj}{object to be rendered}
}
\description{
Remove the warnings from the rendering of the JSON
}
\author{
Barret Schloerke
}
\keyword{internal}
|
177139d4a385a4f6778dcfc2e0bca196cdc2c3a2
|
2a6acc51c0fa6941abca4ac9b8bbe307dac1a85b
|
/M-Tech Final Year Project/R Code/twittwr_data_extrac.R
|
bd18d3fc0dc22dd2dd047d6449caa6994c97df48
|
[] |
no_license
|
LEOKA037/AcademicProjects
|
507a74f9c6b73a103f8fb33042e22b6e2ba867de
|
2fd5166dbae7d6864d2ab61442d6096162d6d140
|
refs/heads/main
| 2023-06-06T10:10:49.883727
| 2021-06-24T18:35:22
| 2021-06-24T18:35:22
| 372,004,267
| 10
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 425
|
r
|
twittwr_data_extrac.R
|
library(twitteR)
api_key<- "dcc4QaXU4zYQ4r9Iw9RGkmJAA"
api_secret <- "42Bwmwv0Mtl5Zw50Cp1spVtDEnylfDI11gITDVswQ0LwIb2WT7"
access_token <- "372578663-2Am0nkaYZcsaKtdkVdRM6y7v3ho5jj6ZAKHKJ3H6"
access_token_secret <- "QyQvTldRa2d5k3dMZV4NsSfjWsyadnjdJAPAXAVvDgdsm"
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret)
#searchTwitter('Karnataka Election', n = 1000, since = '2020-01-02')
|
7c0986b1bb6dbd1b92a72ad83c7b032c442c89fa
|
5edb4b41956ab38660f8af2276722ce6fea3bba0
|
/man/bsmds.Rd
|
53f9869f87a810f801407d2aba602ecfe49af8f6
|
[] |
no_license
|
davidaarmstrong/bsmds
|
4c88465c1b191db00e830f15b2feb72d11a982d6
|
4b313de19228f2a56c71b7104b815b78ddf6bfac
|
refs/heads/master
| 2021-08-31T13:26:11.509552
| 2017-12-21T13:17:28
| 2017-12-21T13:17:28
| 115,007,902
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,177
|
rd
|
bsmds.Rd
|
\name{bsmds}
\alias{bsmds}
\alias{bsfun}
\title{Bootstrap Confidence Regions for Multidimensional Scaling Solutions}
\description{This function implements a bootstrapping solution that identifies and optionally removes potentially degenerate solutions.}
\usage{
bsmds(data, dist.fun, dist.data.arg = "x", dist.args = NULL, R, ndim = 2, weightmat = NULL, init = NULL, type="interval", ties = "primary", verbose = FALSE, relax = 1, modulus = 1, itmax = 1000, eps = 1e-06, rm.degen = TRUE, km.thresh = 5, iter.info=FALSE)
}
\arguments{
\item{data}{A numeric data frame or matrix of variables to be scaled. Column names will be propagated through the function as the names of stimuli. This data frame must contain only the variables to be scaled.}
\item{dist.fun}{A character string identifying the name of the distance function to be used. We have included in this package a function to calculate Rabinowitz' Line-of-Sight distance. The \code{dist} function from the MASS library is another option. Users can provide their own function so long as it returns the distance/dissimilarity matrix in its object `dist'}
\item{dist.data.arg}{A character string giving the name of the data argument to the distance function}
\item{dist.args}{Optional arguments to be passed to the distance function - must not be NULL}
\item{R}{Number of bootstrap replications to compute}
\item{ndim}{Number of dimensions for the multidimensional scaling solution}
\item{weightmat}{Optional matrix of dissimilarity weights to be passed to \code{smacofSym}}
\item{init}{Matrix with starting values for the initial configuration to be passed to \code{smacofSym}, in lieu of a matrix, supply \sQuote{togrgerson} (default) or \sQuote{random}.}
\item{type}{How should distances be treated (formerly the \code{METRIC} argument), can be \sQuote{interval}, \sQuote{ratio}, \sQuote{mspline}, \sQuote{ordinal}.}
\item{ties}{Tie specification for non-metric MDS only: \code{"primary"}, \code{"secondary"}, or \code{"tertiary"}, to be passed to \code{smacofSym}}
\item{verbose}{If \code{TRUE}, intermediate stress is printed out}
\item{relax}{If \code{TRUE}, block relaxation is used for majorization, to be passed to \code{smacofSym}}
\item{modulus}{Number of smacof iterations per monotone regression call}
\item{itmax}{Maximum number of iterations}
\item{eps}{Convergence criterion}
\item{rm.degen}{A logical argument indicating whether the algorithm should remove potentially degenerate solutions. See `Details' for more information}
\item{km.thresh}{Integer-valued threshold (between 2 and n-stimuli-1) of the maximum number of clusters for which, if approximately 100\% of the total variance is accounted for by between-cluster variance, the solution is deemed to be degenerate}
\item{iter.info}{Logical indicating whether information about each iteration should be printed}
}
\details{The \code{bsmds} function bootstraps data to create bootstrap replicates of the dissimilarity matrix on which the MDS solution can be computed with \code{smacofSym}. Our experience is that this procedure has a tendency to generate a non-trivial proportion of degenerate solutions (i.e., those where the scaled stimuli locations fall into a small number of clusters). We include an optional k-means clustering step (which can be turned on by \code{rm.degen = TRUE}) which calculates the ratio of between-cluster to total sums of squares. If this ratio is approximately 1 for any number of clusters less than or equal to \code{km.thresh}, the solution is deemed to be degenerate and a different bootstrap sample is drawn and a new dissimilarity matrix is calculated.\\
The function performs a Procrustean similarity transformation, proposed by Schonemann and Carroll (1970), to the bootstrap configuration that brings each into maximum similarity with the target configuration (the configuration from the original MDS solution). This consists of a translation, rotation and dilation of the bootstrap configuration.
}
\value{The returned object is of class \code{bsmds} with elements:
\item{mds}{The original mds solution returned by \code{smacofSym}}
\item{bs.mds}{The bootstrapped solution (the return from a call to \code{boot})}
\item{dmat}{The original dissimilarities matrix used to produce the scaling solution}
\item{X.i}{A list of bootstrap stimuli locations. Each element of the list is an R by n-dim matrix of points for each stimulus. The names of the list elements come from the column names of the data matrix}
\item{cors}{An R by n-dim*2 matrix of correlations. The first n-dim columns give the correlations between the original MDS configuration and the untransformed bootstrap configuration. The last n-dim columns of the matrix are the correlations between the optimally transformed bootstrapped configurations and the original MDS configuration}
\item{v}{A vector of values indicating whether the solution was valid (1) or degenerate (0)}
\item{stress}{The stress (either metric or non-metric, as appropriate) from each bootstrap replication of the \code{smacofSym} procedure}
\item{pct.totss}{The percentage of the total sum of squares accounted for by the between-cluster sum of squares in the k-means clustering solution. This is returned regardless of whether \code{rm.degen=TRUE}, though solutions are only discarded on this bases if that argument is turned on.}}
\references{
de Leeuw, J. & Mair, P. (2009). Multidimensional scaling using majorization:
The R package smacof. Journal of Statistical Software, 31(3), 1-30, \url{http://www.jstatsoft.org/v31/i03/}
Jacoby, W.G. and D. Armstrong. 2011 Bootstrap Confidence Regions for Multidimensional Scaling Solutions. Unpublished Manuscript.
Schonemann, P. and R. M. Carroll. 1970 \dQuote{Fitting One Matrix to Another Under Choice of a Central Dilation and a Rigid Motion} \emph{Psychometrika} \bold{35}, 245--256.
}
\author{Dave Armstrong and William Jacoby}
\examples{
data(thermometers2004)
out <- bsmds(thermometers2004, dist.fun = "dist", dist.data.arg = "x",
dist.args=list(method="euclidian"), R=25, type="interval", iter.info=FALSE)
}
\keyword{Statistics}
\keyword{Multivariate}
|
19c72bf56665084cbbed20aec38f9eae12bc1514
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.analytics/man/quicksight_describe_group.Rd
|
7fb0f3f44ae256f589c8561f642b88c197aeda54
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 1,175
|
rd
|
quicksight_describe_group.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quicksight_operations.R
\name{quicksight_describe_group}
\alias{quicksight_describe_group}
\title{Returns an Amazon QuickSight group's description and Amazon Resource
Name (ARN)}
\usage{
quicksight_describe_group(GroupName, AwsAccountId, Namespace)
}
\arguments{
\item{GroupName}{[required] The name of the group that you want to describe.}
\item{AwsAccountId}{[required] The ID for the AWS account that the group is in. Currently, you use the
ID for the AWS account that contains your Amazon QuickSight account.}
\item{Namespace}{[required] The namespace. Currently, you should set this to \code{default}.}
}
\value{
A list with the following syntax:\preformatted{list(
Group = list(
Arn = "string",
GroupName = "string",
Description = "string",
PrincipalId = "string"
),
RequestId = "string",
Status = 123
)
}
}
\description{
Returns an Amazon QuickSight group's description and Amazon Resource
Name (ARN).
}
\section{Request syntax}{
\preformatted{svc$describe_group(
GroupName = "string",
AwsAccountId = "string",
Namespace = "string"
)
}
}
\keyword{internal}
|
4e76df55da662687d00cea55865af8e0fc122f92
|
feb27e905198606cff9145e3e9a97ca58c3136cf
|
/app/server.R
|
78f641784dec81b1e584e2c985425e58a21b2ab9
|
[] |
no_license
|
TZstatsADS/Spr2017-proj2-grp13
|
9acc8125de77d3866da35f7cd71210f95802dcee
|
7edb4a1da27482bc7d1b4aaa8a815ef9eb484d36
|
refs/heads/master
| 2021-01-18T19:10:08.074138
| 2019-04-18T21:00:20
| 2019-04-18T21:00:20
| 80,873,358
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,824
|
r
|
server.R
|
library(leaflet)
library(RColorBrewer)
library(scales)
library(lattice)
library(dplyr)
library(ggplot2)
collegedata <- read.csv("clean4.csv")
colnames(collegedata)[c(which(colnames(collegedata)=="LATITUDE"):which(colnames(collegedata)=="LONGITUDE"))] <- c("latitude","longitude")
collegedata$COSTT4_A <- as.numeric(paste(collegedata$COSTT4_A))
collegedata$Rank <- as.numeric(paste(collegedata$Rank))
collegedata$UGDS_MEN <- as.numeric(paste(collegedata$UGDS_MEN))
collegedata$UGDS_WOMEN <- as.numeric(paste(collegedata$UGDS_WOMEN))
collegedata$SAT_AVG <- as.numeric(paste(collegedata$SAT_AVG))
collegedata$ADM_RATE <- as.numeric(paste(collegedata$ADM_RATE))
collegedata$ACTCMMID <- as.numeric(paste(collegedata$ACTCMMID))
#å¯ä»¥ç¨ï¼ï¼ï¼
data1<-read.csv('CleanDataFinal.csv', na.strings = "NULL")
data1[is.na(data1)]=0
data1[,c(10:18,31)][data1[,c(10:18,31)]<=200]=NA
function(input, output, session) {
observe({
usedcolor <- "red"
tuition <- as.numeric(input$tuition)
liRank <- as.numeric(input$liRank)
SAT <- as.numeric(input$SAT)
adm <- as.numeric(input$adm)
ACT <- as.numeric(input$ACT)
lidata<- filter(collegedata,COSTT4_A<tuition,Rank<liRank,SAT_AVG<SAT,ADM_RATE<adm,ACTCMMID<ACT)
radius1 <-lidata$Arrest*100
opacity <- 0.8
if(input$color=="liRank"){
usedcolor <- "green"
radius1 <- lidata$COSTT4_A
opacity <- 0.4
}else if(input$color=="population"){
usedcolor<- ifelse(input$sex=="men","blue","red")
radius1 <- ifelse(input$sex=="men",lidata$UGDS_MEN*100000,lidata$UGDS_WOMEN*100000)
opacity <- 0.6
}else if(input$color=="ttpopulation"){
usedcolor <- "blueviolet"
radius1 <- lidata$UGDS*2
opacity <- 0.5
}
output$map <- renderLeaflet({
leaflet(data=lidata) %>%
addTiles(
urlTemplate = "//{s}.tiles.mapbox.com/v3/jcheng.map-5ebohr46/{z}/{x}/{y}.png",
attribution = 'Maps by <a href="http://www.mapbox.com/">Mapbox</a>'
) %>%setView(lng = -93.85, lat = 37.45, zoom = 4) %>%
clearShapes() %>%
addCircles(~longitude, ~latitude, radius=radius1, layerId=~UNITID,
stroke=FALSE, fillOpacity=opacity, fillColor=usedcolor)
})
showZipcodePopup <- function(x, lat, lng) {
lidata <- lidata[lidata$UNITID == x,]
content <- as.character(tagList(
tags$h4("University:",lidata$INSTNM),
tags$strong(HTML(sprintf("information"
))), tags$br(),
sprintf("Rank:%s",lidata$Rank), tags$br(),
sprintf("State: %s City: %s",lidata$STABBR,lidata$CITY),tags$br(),
sprintf("Cost :%s (four years)",lidata$CO), tags$br(),
sprintf("Url: %s ",lidata$INSTURL),tags$br(),
sprintf("Arrested in 2016: %s",lidata$Arrest)
))
leafletProxy("map") %>% addPopups(lng, lat, content, layerId =x)
}
observe({
leafletProxy("map") %>% clearPopups()
event <- input$map_shape_click
if (is.null(event))
return()
isolate({
showZipcodePopup(event$id, event$lat, event$lng)
})
})
})
#####
##
#### ###########################################
output$plot <- renderPlot({
library(fmsb)
name=input$school
transpose<-read.csv("transpose.csv")
datahh=data.frame(as.numeric(as.character(transpose[7,name]))*100, (300-as.numeric(as.character(transpose[32,name])))/3, as.numeric(as.character(transpose[31,name]))/500,
as.numeric(as.character(transpose[26,name]))/36*100, as.numeric(as.character(transpose[27,name]))/36*100,
as.numeric(as.character(transpose[28,name]))/36*100, as.numeric(as.character(transpose[29,name]))/16)
colnames(datahh)=c("admission rate" , "rank" , "Tuition", "ACT English" , "ACT Math", "ACT Writing" ,"SAT" )
datahh=rbind(rep(100,7) , rep(0,7) , datahh)
#radarchart(data)
radarchart( datahh , axistype=1 ,
#custom polygon
pcol=rgb(0.2,0.5,0.5,0.9) , pfcol=rgb(0.2,0.5,0.5,0.5) , plwd=4 ,
#custom the grid
cglcol="grey", cglty=1, axislabcol="grey", caxislabels=seq(0,100,20), cglwd=0.8,
#custom labels
vlcex=0.8
)
})
## Data Explorer ###########################################
library(maps)
library(mapproj)
counties <- read.csv("orgi.csv")
source("helpers.R")
output$mapplot <- renderPlot({
args <- switch(input$hkvar,
"Student_Married%" = list(counties$married, "darkgreen", "Student_Married%"),
"Student_Depedent%" = list(counties$dependent, "black", "Student_Depedent%"),
"Student_Veteran%" = list(counties$veteran, "darkorange", "Student_Veteran%"),
"Student_First_Generation%" = list(counties$first.generation, "darkviolet", "Student_First_Generation%"))
args$min <- input$range[1]
args$max <- input$range[2]
do.call(percent_map, args)
})
####################
output$raderplot <- renderPlot({
library(fmsb)
# input$satmath,input$satessay,input$satreading,input$actscore,input$moneywillingness,input$gpascore,input$studentrank
hhdata=data.frame(input$satmath/8,input$satessay/8*100,input$satreading/8,input$actscore/35*100,input$moneywillingness/800,input$gpascore/4*100,input$studentrank)
colnames(hhdata)=c("SAT math" , "SAT essay" , "SAT reading", "ACT" , "Financials", "GPA" ,"Rank" )
hhdata=rbind(rep(100,7) , rep(0,7) , hhdata)
# default radarchart(data)
radarchart( hhdata , axistype=1 ,
#custom polygon
pcol=rgb(0.2,0.5,0.5,0.9) , pfcol=rgb(0.2,0.5,0.5,0.5) , plwd=4 ,
#custom the grid
cglcol="grey", cglty=1, axislabcol="grey", cglwd=0.8,
#custom labels
vlcex=0.8
)
})
#####
####
####
best.valued=reactive({as.character(data$College[as.numeric(names(sort(model()$residuals,decreasing = T)[1:5]))])})
# Fill in the spot we created for a plot
data=data1[order(data1[,'Rank']),]
data2=data
data2[is.na(data2)]=-999
model=reactive({lm(data[,input$y]~data[,input$x])})
InverseRank=1/data[,'Rank']
Rank=data[,'Rank']
ylim1=reactive({max(data[,input$var])})
ylim2=reactive({min(data[,input$var])})
x=reactive({input$ho$x})
y=reactive({input$ho$y})
data$all25=data$SATVR25+data$SATMT25
data$all50=data$SATVRMID+data$SATMTMID
data$all75=data$SATVR75+data$SATMT75
diff=reactive({abs(data$SAT_AVG-ave())})
df=reactive({data[order(diff(),decreasing=F),][1:10,]})
ave=reactive({input$vr+input$mt})
act=reactive({input$act})
diff2=reactive({abs(data$ACTCMMID-act())})
df2=reactive({data[order(diff2(),decreasing=F),][1:10,]})
score=function(x,y,x1,y1){return((x-x1)^2+(y-y1)^2)}
name=reactive({as.character(data2$College)[which.min(score(x(),y(),data2[,input$x],data2[,input$y]))]})
observeEvent(input$go, {
if(input$x!=input$y){
bool=rep(0,length(data$College))
bool[as.numeric(names(sort(model()$residuals,decreasing = T)[1:5]))]=1
data$bool=bool
output$sPlot <- renderPlot({
# Render a barplot
ggplot(data=data, aes(x=data[,input$x], y=data[,input$y],size=factor(bool),fill=factor(bool),colour=factor(bool)))+
geom_point() + theme(axis.text.x=element_text(angle = -90, hjust = 0))+ylab(input$y)+labs(fill='best value')+xlab(input$x)+ylab(input$y)
})
output$text1 <- renderText({
paste(best.valued(), sep=",", collapse="")
})
}
})
observeEvent(input$x, {
bool=rep(0,length(data$College.Name))
bool[as.numeric(names(sort(model()$residuals,decreasing = T)[1:5]))]=1
data$bool=bool
output$sPlot <- renderPlot({
# Render a barplot
ggplot(data=data, aes(x=data[,input$x], y=data[,input$y],size=InverseRank,colour=Rank))+
geom_point() + theme(axis.text.x=element_text(angle = -90, hjust = 0))+ylab(input$y)+scale_colour_gradient(low="blue", high="black",limit=(c(0,200)))+labs(fill='Rank')+xlab(input$x)+ylab(input$y)
})
output$text1 <- renderText({
''
})
})
observeEvent(input$y, {
bool=rep(0,length(data$College.Name))
bool[as.numeric(names(sort(model()$residuals,decreasing = T)[1:5]))]=1
data$bool=bool
output$sPlot <- renderPlot({
# Render a barplot
ggplot(data=data, aes(x=data[,input$x], y=data[,input$y],size=InverseRank,colour=Rank))+
geom_point() + theme(axis.text.x=element_text(angle = -90, hjust = 0))+ylab(input$y)+scale_colour_gradient(low="blue", high="black",limit=(c(0,200)))+labs(fill='Rank')+xlab(input$x)+ylab(input$y)
})
output$text1 <- renderText({
''
})
})
observeEvent(input$mt, {
ave=reactive({input$vr+input$mt})
data$all25=data$SATVR25+data$SATMT25
data$all50=data$SATVRMID+data$SATMTMID
data$all75=data$SATVR75+data$SATMT75
diff=reactive({abs(data$SAT_AVG-ave())})
df=reactive({data[order(diff()),][1:10,]})
output$cPlot <- renderPlot({
# Render a barplot
ggplot(data=df(), aes(x=factor(College.Name),ymin=400,ymax=1600,lower=all25,middle=all50,upper=all75))+
geom_boxplot(stat="identity")+ theme(axis.text.x=element_text(angle = -90, hjust = 0))+ylab('SAT')+scale_fill_gradient(low="white", high="blue",limit=(c(400, 1600)))+ylim(400,1600)+labs(fill='SAT')+xlab('College Name')+
geom_hline(yintercept=ave())
})})
observeEvent(input$vr, {
ave=reactive({input$vr+input$mt})
data$all25=data$SATVR25+data$SATMT25
data$all50=data$SATVRMID+data$SATMTMID
data$all75=data$SATVR75+data$SATMT75
diff=reactive({abs(data$SAT_AVG-ave())})
df=reactive({data[order(diff()),][1:10,]})
output$cPlot <- renderPlot({
# Render a barplot
ggplot(data=df(), aes(x=factor(College.Name),ymin=400,ymax=1600,lower=all25,middle=all50,upper=all75))+
geom_boxplot(stat="identity")+ theme(axis.text.x=element_text(angle = -90, hjust = 0))+ylab('SAT')+scale_fill_gradient(low="white", high="blue",limit=(c(400, 1600)))+ylim(400,1600)+labs(fill='SAT')+xlab('College Name')+
geom_hline(yintercept=ave())
})})
observeEvent(input$act, {
act=reactive({input$act})
diff2=reactive({abs(data$ACTCMMID-act())})
df2=reactive({data[order(diff2(),decreasing=F),][1:10,]})
output$DPlot <- renderPlot({
# Render a barplot
ggplot(data=df2(), aes(x=factor(College.Name),ymin=0,ymax=32,lower=ACTCM25,middle=ACTCMMID,upper=ACTCM75))+
geom_boxplot(stat="identity")+ theme(axis.text.x=element_text(angle = -90, hjust = 0))+ylab('ACT')+scale_fill_gradient(low="white", high="blue",limit=(c(0, 32)))+ylim(0,32)+labs(fill='ACT')+xlab('College Name')+
geom_hline(yintercept=act())
})})
observeEvent(input$pc$x, {
f=as.factor(as.character(df2()$College.Name))
lvls <- levels(f)
nn <- lvls[round(input$pc$x)]
roll=data[data$College.Name==nn,]
aroll=apply(roll[,2:6],2,as.character)
output$intro <- renderText({
paste(aroll, sep=",", collapse="\n")
})
})
observeEvent(input$pc2$x, {
f=as.factor(as.character(df()$College.Name))
lvls <- levels(f)
nn <- lvls[round(input$pc2$x)]
roll=data[data$College.Name==nn,]
aroll=apply(roll[,2:6],2,as.character)
output$intro <- renderText({
paste(aroll, sep=",", collapse="\n")
})
})
output$bPlot <- renderPlot({
# Render a barplot
ggplot(data=data[c(input$rank:(input$rank+10)),], aes(x=data[c(input$rank:(input$rank+10)),][,"College Name"], y=data[c(input$rank:(input$rank+10)),][,input$var],fill=data[c(input$rank:(input$rank+10)),][,input$var]))+
geom_bar(stat="identity")+ theme(axis.text.x=element_text(angle = -90, hjust = 0))+ylab(input$var)+scale_fill_gradient(low="white", high="blue",limit=(c(ylim2(), ylim1())))+ylim(0, ylim1())+labs(fill=input$var)+xlab("College Name")
})
output$sPlot <- renderPlot({
# Render a barplot
ggplot(data=data, aes(x=data[,input$x], y=data[,input$y],size=InverseRank,colour=Rank))+
geom_point() + theme(axis.text.x=element_text(angle = -90, hjust = 0))+ylab(input$y)+scale_colour_gradient(low="blue", high="black",limit=(c(0,200)))+labs(fill='Rank')+xlab(input$x)+ylab(input$y)
})
output$text1 <- renderText({
''
})
output$info <- renderText({
name()
})
output$cPlot <- renderPlot({
# Render a barplot
ggplot(data=df(), aes(x=factor(College.Name),ymin=400,ymax=600,lower=all25,middle=all50,upper=all75))+
geom_boxplot(stat="identity")+ theme(axis.text.x=element_text(angle = -90, hjust = 0))+ylab('SAT')+scale_fill_gradient(low="white", high="blue",limit=(c(400, 1600)))+ylim(400,1600)+labs(fill='SAT')+xlab('College Name')+
geom_hline(yintercept=ave())
})
output$DPlot <- renderPlot({
# Render a barplot
ggplot(data=df2(), aes(x=factor(College.Name),ymin=0,ymax=32,lower=ACTCM25,middle=ACTCMMID,upper=ACTCM75))+
geom_boxplot(stat="identity")+ theme(axis.text.x=element_text(angle = -90, hjust = 0))+ylab('ACT')+scale_fill_gradient(low="white", high="blue",limit=(c(0, 32)))+ylim(0,32)+labs(fill='SAT')+xlab('College Name')+
geom_hline(yintercept=act())
})
output$intro <- renderText({
''
})
}
|
0cce6228f5c51ae8ac0fac78fb60d0df0e6d5e8d
|
95772ed7b2639221d1a564999c565cd6152aad87
|
/DataAnalyse/Weather.R
|
352c640da0f8508a9d0468854808938629f79671
|
[] |
no_license
|
tunjing998/bigdata_ca2_ang_tunjing
|
f46b71f8cc3295f69826ec0c69c4b2b22ba2b5d7
|
6c9632fab1185b7e23c6c3e37a43796b1ffb35f0
|
refs/heads/master
| 2020-10-01T22:11:09.350516
| 2019-12-20T22:20:14
| 2019-12-20T22:20:14
| 227,634,170
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,752
|
r
|
Weather.R
|
library(mongolite)
library(car)
library(StatMeasures)
library(lubridate)
m <- mongo("dublin_weather",url = "mongodb://localhost:27017",db="bigdata_ca2_ang_tunjing")
#https://stackoverflow.com/questions/34454034/r-mongolite-date-query
dstart <- as.integer(as.POSIXct(strptime("2018-01-01","%Y-%m-%d")))*1000
dend <- as.integer(as.POSIXct(strptime("2019-01-01","%Y-%m-%d")))*1000
conditionYear18 <-paste0('{"date":{
"$gte": {"$date" : { "$numberLong" : "', dstart, '" }},
"$lt": {"$date" : { "$numberLong" : "', dend, '" }}
}}')
dublin_weather <-m$find(conditionYear18)
m <- mongo("jlhome_temperature_hour",url = "mongodb://localhost:27017",db="bigdata_ca2_ang_tunjing")
jlhome_temperature_hour <- m$find(conditionYear18)
m <-mongo("jlhome_power_hour",url = "mongodb://localhost:27017",db="bigdata_ca2_ang_tunjing")
jlhome_power_hour <- m$find(conditionYear18)
colnames(jlhome_power_hour) = c("jlpower","date")
colnames(jlhome_temperature_hour) = c("jltemperature","date")
alldata <- merge(x=jlhome_power_hour,y=jlhome_temperature_hour,by="date")
alldata <- merge(x=alldata,y= dublin_weather,by="date")
want<- c("date","jlpower","jltemperature","rain","temp","wetb","dewpt","vappr","rhum","msl","wdsp","wddir","ww","sun","vis","clht","clamt")
mydata <-subset(alldata,select = want)
#got 42 missing data
summary(mydata)
attach(mydata)
boxplot(jlpower)
hist(jlpower)
sd(jlpower)
outliers(jlpower)$numOutliers
# 408 outlier
boxplot(jltemperature)
hist(jltemperature)
outliers(jltemperature)$numOutliers
#93
boxplot(rain)
hist(rain)
outliers(rain)$numOutliers
#1092
boxplot(temp)
hist(temp)
sd(temp)
outliers(temp)$numOutliers
# 6
boxplot(wetb)
sd(wetb)
hist(wetb)
#0
boxplot(dewpt)
hist(dewpt)
sd(dewpt)
outliers(dewpt)$numOutliers
#10
boxplot(vappr)
hist(vappr)
outliers(vappr)$numOutliers
#52
boxplot(rhum)
hist(rhum)
outliers(rhum)$numOutliers
#100
boxplot(msl)
hist(msl)
sd(msl)
outliers(msl)$numOutliers
#19
boxplot(wdsp)
hist(wdsp)
outliers(wdsp)$numOutliers
#164
boxplot(wddir)
hist(wddir)
sd(wddir)
outliers(wddir)$numOutliers
#0
barplot(table(ww))
boxplot(sun)
hist(sun)
outliers(sun)$numOutliers
#1923
boxplot(vis)
hist(vis)
outliers(vis)$numOutliers
#0
boxplot(clht)
hist(clht)
sd(clht)
outliers(clht)$numOutliers
remove999 <- subset(clht,clht!=999)
boxplot(remove999)
hist(remove999)
outliers(remove999)$numOutliers
#975
summary(clht==999)
#2247 data is no clht
clhtReplace = clht
clhtReplace[clhtReplace==999]<- 0
boxplot(clhtReplace)
table(clamt)
barplot(table(clamt))
#reference to wenyu
define_daytime<- function(data,n){
daytime <-c()
for(i in 1:nrow(data)){
hour <- hour(data[i,n])
if(hour>=18)
{
daytime[i] = 3
}
else if(hour>9)
{
daytime[i] = 2
}
else
{
daytime[i] = 1
}
}
daytime
}
str(mydata)
mydata$month <- month(mydata$date)
mydata[,1]
mydata$daytime<- define_daytime(mydata,1)
mydata$daytime
pairs(data.frame(jlpower,jltemperature,rain,temp,wetb,dewpt,vappr,rhum,msl,wdsp,wddir,sun,vis,clht))
#missing correlation
cor(data.frame(jlpower,jltemperature,rain,temp,wetb,dewpt,vappr,rhum,msl,wdsp,wddir,sun,vis,clht))
cor(jltemperature,temp)
cor(jltemperature,wetb)
cor(jltemperature,dewpt)
cor(jltemperature,vappr)
cor(jltemperature,rhum)
cor(jltemperature,msl)
cor(jltemperature,wdsp)
cor(temp,wetb)
cor(temp,dewpt)
cor(temp,vappr,method="spearman")
cor(temp,rhum)
cor(wetb,dewpt)
cor(wetb,vappr)
cor(dewpt,vappr,method="spearman")
cor(rhum,sun)
cor(rhum,vis)
scale_data <- scale(data.frame(jlpower,jltemperature,rain,temp,wetb,dewpt,vappr,rhum,msl,wdsp,wddir,sun,vis,clht))
wss <- (nrow(scale_data)-1)*sum(apply(scale_data,2,var))
for (i in 2:15){
wss[i] <- sum(kmeans(scale_data, centers=i)$withinss)
}
plot(1:15, wss, type="b", xlab="Number of Clusters",ylab="Within Clusters Sum of Squares")
k2data<-kmeans(scale_data,2)
pairs(mydata,col=k2data$cluster)
print(k2data)
k2data$centers
e<-eigen(cov(scale_data))
e
plot(1:length(e$values),e$values,type="b")
sqrt(e$values)
pca<-prcomp(scale_data) #run PCA
plot(pca,type="l") #plot screeplot
summary(pca)
print(pca)
pca$rotation
plot(pca$x[,1],pca$x[,2],xlab="PC 1", ylab="PC 2",
col=k2data$cluster,pch=k2data$cluster)
plot(pca$x[,2],pca$x[,3],xlab="PC 2", ylab="PC 3",
col=k2data$cluster,pch=k2data$cluster)
plot(pca$x[,1],pca$x[,3],xlab="PC 1", ylab="PC 3",
col=k2data$cluster,pch=k2data$cluster)
attach(mydata)
pairs(numericData)
boxplot(log(jlpower))
pairs(~log(jlpower)+jltemperature+rain+temp+wetb+dewpt+vappr+rhum+msl+wdsp+wddir+vis+clht)
lmresult <- lm(log(jlpower)~vappr)
plot(lmresult)
summary(lmresult)
lmresultMulti <- lm(log(jlpower)~jltemperature+vappr+rhum++wdsp+sun+clht+clamt+daytime)
plot(lmresultMulti)
summary(lmresultMulti)
vif(lmresultMulti)
BootstrapRand<-function(data, mod_formula, rep){
coef_table<-c()
for(i in 1:rep){
data_boot<- data[sample(1:nrow(data),size=nrow(data),replace=T),]
lm_boot<-lm(mod_formula,data=data_boot)
coef_table<-rbind(coef_table,coef(lm_boot))
}
coef_table
}
lm_bs_multi <- BootstrapRand(mydata,log(jlpower)~jltemperature+vappr+rhum++wdsp+sun+clht+clamt+daytime,100000)
apply(lm_bs_multi,2,mean)
#r_sq=1-SSE/SST
SST<-sum((log(jlpower)-mean(log(jlpower)))^2)
SST
fitted_boot<-(173.408867496+12.070879079*jltemperature-22.537416249*vappr+6.101169264*wdsp-0.085239352*wddir+100.572835692*sun+0.000282637*clhtReplace+19.282570106*clamt)
fitted_boot2<-(3.332261+0.03990689*jltemperature-0.08436999*vappr+0.009241768*rhum+0.01535765*wdsp+0.480168*sun-0.0000002996450*clhtReplace+0.0597881*clamt+0.4356672*daytime)
fitted_boot_fix <- exp(fitted_boot2)
SSE_boot<-sum((jlpower-fitted_boot_fix)^2)
SSE_boot <- sum((log(jlpower) - fitted_boot2)^2)
SSE_boot
r_sq<-1-(SSE_boot/SST)
r_sq
|
dfb8e9edca87b94bf1af78e6138c10d20097cca4
|
902037115141ead7b315e7b63e437ec61c01c2c1
|
/man/rowChisq2Class.Rd
|
eef866ac1d19dbe53b935595b52e9040ed1c8811
|
[] |
no_license
|
cran/scrime
|
4bdc7e989ba9e648d004ca47cd2d10bb5e78a717
|
cf0033dbfe2a6fa807593a460ef4bcb0931db96a
|
refs/heads/master
| 2021-06-02T21:50:17.706604
| 2018-12-01T10:00:03
| 2018-12-01T10:00:03
| 17,699,500
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,302
|
rd
|
rowChisq2Class.Rd
|
\name{rowChisq2Class}
\alias{rowChisq2Class}
\alias{rowChisqMultiClass}
\title{Rowwise Pearson's ChiSquare Test Based on Tables}
\description{
Given a set of matrices, each of which represents one group of subjects, and
summarizes rowwise the numbers of these observations
showing the levels of the categorical variables represented by the rows of
the matrices, the value of Pearson's ChiSquare statistic for testing
whether the distribution of the variable differs between the different
groups is computed for each variable.
Using this function instead of \code{rowChisqStats} is recommended
when the total number of observations is very large.
}
\usage{
rowChisq2Class(cases, controls, add.pval = TRUE, sameNull = FALSE)
rowChisqMultiClass(..., listTables = NULL, add.pval = TRUE,
sameNull = FALSE)
}
\arguments{
\item{cases}{a numeric matrix in which each row represents one categorical
variable and each column one of the levels that the variables exhibit. The
entries of this matrix are the numbers of observations from one group (e.g.,
the cases in a case-control study) showing a particular
level at the different variables. Such a matrix can, e.g., be generated
by \code{\link{rowTables}}.The rowwise sums of \code{cases} are allowed to
differ between variables (which might happen when some of the observations
are missing for some of the variables).}
\item{controls}{a numeric matrix of the same dimensions as \code{cases} comprising
the numbers of observations from the second group (e.g., the controls in a
case-control study) that show the respective level at the different variables.
The rows of \code{controls} must represent the same variables in the same
order as \code{cases}, and the columns must represent the same levels in the
same order. This matrix can also be generated by employing \code{\link{rowTables}}.
The rowwise sums of \code{controls} are allowed to
differ between variables (which might happen when some of the observations
are missing for some of the variables).}
\item{...}{numeric matrices (such as \code{cases} and \code{controls}) each of which
comprises the numbers of observations showing the respective levels at the different
variables. The dimensions of all matrices must be the same, and the rows and columns
must represent the same variables and levels, respectively, in the same order in
all matrices.}
\item{listTables}{instead of inputting the matrices directly into \code{rowChisqMultiClass}
a list can generated, where each entry of this list is one of matrices, and this
list can be used in \code{rowChisqMultiClass} by specifying \code{listTables}.}
\item{add.pval}{should p-values be added to the output? If \code{FALSE}, only the
rowwise values of Pearson's ChiSquare statistic will be returned. If \code{TRUE},
additionally the degrees of freedom and the (raw) p-values are computed by assuming
approximation to the ChiSquare-distribution, and added to the output.}
\item{sameNull}{should all variables follow the same null distribution? If \code{TRUE},
then all variables must show the same number of levels such that their null distribution
is approximated by the same ChiSquare-distribution.}
}
\value{
Either a vector containing the rowwise values of Pearson's ChiSquare statistic
(if \code{add.pval = FALSE}) or a list containing these values (\code{stats}),
the degrees of freedom (\code{df}), and the p-values (\code{rawp}) not adjusted
for multiple comparisons (if \code{add.pval = TRUE}).
}
\author{Holger Schwender, \email{holger.schwender@udo.edu}}
\note{
In the case of a 2 x 2 table, no continuity correction is applied. In this
case, the results of \code{rowChisq2Class} and \code{rowChisMultiClass} are
only equal to the results of \code{chisq.test} if in the latter \code{correct = FALSE}
is used.
The usual contingency table for a variable can be obtained from the matrices
by forming a variable-specific matrix in which each row consists of the
row of one of these matrices.
}
\seealso{\code{\link{rowChisqStats}}, \code{\link{rowTables}}, \code{\link{rowCATTs}},
\code{\link{rowMsquares}}}
\examples{\dontrun{
# Generate a matrix containing data for 10 categorical
# variables with levels 1, 2, 3.
mat <- matrix(sample(3, 500, TRUE), 10)
# Now assume that the first 25 columns correspond to
# cases and the remaining 25 columns to cases. Then
# a vector containing the class labels is given by
cl <- rep(1:2, e=25)
# and the matrices summarizing the numbers of subjects
# showing the respective levels at the different variables
# are computed by
cases <- rowTables(mat[, cl==1])
controls <- rowTables(mat[,cl==2])
# To obtain the ChiSquare values call
rowChisq2Class(cases, controls)
# This leads to the same results as
rowChisqStats(mat, cl)
# or
rowChisqMultiClass(cases, controls)
# or
listTab <- list(cases, controls)
rowChisqMultiClass(listTables = listTab)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{htest}
\keyword{array}
|
cd2a69a2074722f223a2bf31b09d6a1b6520c08f
|
10501df54cf10edc869b457b41cf3ebef02c9240
|
/man/resmerge.ij.Rd
|
7e7f4e497bbb22f9296af3eff8e1baeface3a75b
|
[] |
no_license
|
mattocci27/LeafArea
|
6f08cc2b039b5ef171a62ee250f4604b2139bb23
|
5c5fbab817c59d68fc0203d02cfe7927aa46917e
|
refs/heads/master
| 2023-08-25T15:47:09.664445
| 2023-08-10T05:48:29
| 2023-08-10T05:48:29
| 38,448,428
| 32
| 12
| null | 2023-08-10T05:48:30
| 2015-07-02T18:00:51
|
R
|
UTF-8
|
R
| false
| true
| 1,473
|
rd
|
resmerge.ij.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resmerge.ij.R
\name{resmerge.ij}
\alias{resmerge.ij}
\title{File management}
\usage{
resmerge.ij(path, prefix = "\\\\.|-")
}
\arguments{
\item{path}{Path to the target directory}
\item{prefix}{Regular expression to manage file names}
}
\value{
A data frame of total leaf area for each sample
\item{sample}{Name of sample}
\item{total.leaf.area}{Total leaf area of the sample (cm2)}
}
\description{
File management function. The output file contains sample names in the first column and total leaf area (cm2) of the sample (e.g., one individual plant or one ramet) in the second column.
}
\examples{
#prepare example files
data(leafdata)
tf <- paste(tempdir(),"/",sep="")
for (i in 1:7){
write.table(leafdata[[i]],paste(tf,names(leafdata)[i],sep=""),sep="\t")
}
#list of files
list.files(tf)
#combine multiple tab-delimited text files with a leaf area value
#(one text file for each original JPEG image file) that share the same
#filename 'prefix', defined as the part of the filename preceding the first
#hyphen (-) or period (.).
resmerge.ij(tf)
#combine multiple tab-delimited text files with a leaf area value
#(one text file for each original JPEG image file) that share the same
#filename 'prefix', defined as the part of the filename preceding the first
#'.txt'.
resmerge.ij(tf, prefix = ".txt")
unlink(list.files(tf))
}
\author{
Masatoshi Katabuchi \email{mattocci27@gmail.com}
}
|
9b38ba45b309c3585fc51943a8acff97a7ff3c35
|
4f1d5c9043a07db40001193b42fa40309ff57cc8
|
/Ngram.R
|
79c4b8fc1ece84f0a8e729a1bdaff80e6bbf940b
|
[] |
no_license
|
jaiswalvineet/next-word-predictor
|
dc9136fd6aee03bb1011b2c364bc67dffa40fe54
|
c11c09f09d0abd2872b25fca16f7ca14d522a5d5
|
refs/heads/master
| 2021-09-01T04:30:50.932849
| 2017-12-24T11:57:13
| 2017-12-24T11:57:13
| 114,632,359
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,514
|
r
|
Ngram.R
|
library(tidytext)
library(stringr)
library(dplyr)
library(tidyr)
GetNGram <- function(rawFromETL, x) {
tokenData <-
rawFromETL %>% unnest_tokens(word, cleanData, token = "ngrams", n = x)
# if (x == 1)
# {
# data(stop_words)
# tokenData <- tokenData %>%
# anti_join(stop_words)
# } else if (x == 2)
# {
# bigrams_separated <- tokenData %>%
# separate(word, c("word1", "word2"), sep = " ")
#
# bigrams_filtered <- bigrams_separated %>%
# filter(!word1 %in% stop_words$word) %>%
# filter(!word2 %in% stop_words$word)
#
# tokenData <- bigrams_filtered %>%
# unite(word, word1, word2, sep = " ")
# }
# Data cleaning required for above parameters too
tokenData <- tokenData %>% count(word, sort = TRUE) %>% mutate(word = reorder(word, n))
return(tokenData)
}
GetDataFromFile <- function() {
if (!exists("rawFromETL") || !(length(rawFromETL) > 0))
{
rawFromETL <- GetData("en_US")
}
oneGram <- GetNGram(rawFromETL,1)
twoGram <- GetNGram(rawFromETL,2)
threeGram <- GetNGram(rawFromETL,3)
fourGram <- GetNGram(rawFromETL,4)
fiveGram <- GetNGram(rawFromETL,5)
write.csv(oneGram, file='oneGram.csv')
write.csv(twoGram, file='twoGram.csv')
write.csv(threeGram, file='threeGram.csv')
write.csv(fourGram,file='fourGram.csv')
write.csv(fiveGram, file='fiveGram.csv')
}
LoadDataFromRFile <- function(){
load('fiveGram.RData')
}
|
9cd86b79f46becae8a8911224eab0f6e092e5b84
|
d1aeb6d430dcd8b26128e5c8b1571b253ad50b93
|
/jlimR/R/fm.1cv.ver_1h4.R
|
a05574e7bb6e2879b4321f7e3d0d5cc6dfad9d90
|
[] |
no_license
|
xtmgah/jlim
|
f6dafe427cc16f12b21154fa12fec9115b4c3ac1
|
f239f542f6264347716be5d5d9c69526a49ff586
|
refs/heads/master
| 2021-01-19T22:13:37.422542
| 2016-11-18T21:36:49
| 2016-11-18T21:36:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,087
|
r
|
fm.1cv.ver_1h4.R
|
load.isLD1.flag <- function(file1, snpset.flag) {
# cohort 1
gt1 <- read.GT0(file1)
gt1 <- gt1[, snpset.flag]
ASSERT(ncol(gt1) == sum(snpset.flag))
# IN-SAMPLE LD
gt0 <- gt1
gt0 <- stdGT(gt0)
PL("mean gt0", range(apply(gt0, 2, mean)))
PL("sd gt0", range(apply(gt0, 2, sd)))
ld0 <- (t(gt0) %*% gt0)
var0 <- apply(gt0, 2, function(v) sqrt(sum(v*v)))
for (I in 1:nrow(ld0)) {
for (J in 1:nrow(ld0)) {
ld0[I, J] <- ld0[I, J]/var0[I]/var0[J]
}
}
ld0
}
load.isLD1.ped <- function(file1, snpset.flag) {
ped <- read.table(file1, header=FALSE, stringsAsFactors=FALSE,
colClasses="character")
gt <- ped[, 7:ncol(ped)]
gt <- as.matrix(gt)
# Make sure all sites are biallelic
al1 <- gt[, seq(1, ncol(gt), by=2)]
al2 <- gt[, seq(2, ncol(gt), by=2)]
# select SNPs
al1 <- al1[, snpset.flag]
al2 <- al2[, snpset.flag]
al <- rbind(al1, al2)
alcount <- apply(al, 2, function (X) length(setdiff(union(X, NULL), "0")))
ASSERT(sum(alcount > 2) == 0)
# Recode
# Sample 1, allele 1 is the reference allele (:= 0), the other one is
# alt (:= 1), missing gt is NA.
ref <- al1[1, ]
al.recode <-
matrix(apply(al, 2, function (X) as.numeric(X != X[1])),
byrow=FALSE, nrow=nrow(al), ncol=ncol(al))
al.recode[al == "0"] <- NA
# To genotype
N <- nrow(al.recode) / 2
gt.recode <-
matrix(apply(al.recode, 2, function (X) (X[1:N] + X[(N+1):(2*N)])),
byrow=FALSE, nrow=N, ncol=ncol(al))
ASSERT(sum(!is.na(gt.recode) & gt.recode != 0 & gt.recode != 1 &
gt.recode != 2) == 0)
# IN-SAMPLE LD
gt0 <- gt.recode
gt0 <- stdGT(gt0)
PL("mean gt0", range(apply(gt0, 2, mean)))
PL("sd gt0", range(apply(gt0, 2, sd)))
ld0 <- (t(gt0) %*% gt0)
var0 <- apply(gt0, 2, function(v) sqrt(sum(v*v)))
for (I in 1:nrow(ld0)) {
for (J in 1:nrow(ld0)) {
ld0[I, J] <- ld0[I, J]/var0[I]/var0[J]
}
}
ld0
}
load.isLD1.ped2 <- function(file1, snpset.flag) {
ped <- read.table(file1, header=FALSE, stringsAsFactors=FALSE)
gt <- ped[, 7:ncol(ped)]
gt <- as.matrix(gt)
al1 <- gt[, seq(1, ncol(gt), by=2)]
al2 <- gt[, seq(2, ncol(gt), by=2)]
# select SNPs
al1 <- al1[, snpset.flag]
al2 <- al2[, snpset.flag]
al <- rbind(al1, al2)
# Make sure all sites are biallelic
alcount <- apply(al, 2, function (X) length(setdiff(unique(X), 0)))
ASSERT(sum(alcount > 2) == 0)
# Recode
# missing gt is NA.
al.recode <-
matrix(apply(al, 2, function (X) as.numeric(X)),
byrow=FALSE, nrow=nrow(al), ncol=ncol(al))
al.recode[al == 0] <- NA
ASSERT(sum(!is.na(al.recode) & al.recode != 1 &
al.recode != 2) == 0)
# To genotype
N <- nrow(al.recode) / 2
gt.recode <-
matrix(apply(al.recode, 2,
function (X) (as.numeric(X[1:N] == 2) +
as.numeric(X[(N+1):(2*N)] == 2))),
byrow=FALSE, nrow=N, ncol=ncol(al))
ASSERT(sum(!is.na(gt.recode) & gt.recode != 0 & gt.recode != 1 &
gt.recode != 2) == 0)
# IN-SAMPLE LD
gt0 <- gt.recode
gt0 <- stdGT(gt0)
PL("mean gt0", range(apply(gt0, 2, mean, na.rm=TRUE)))
PL("sd gt0", range(apply(gt0, 2, sd, na.rm=TRUE)))
ld0 <- cor(gt0, use="pairwise.complete.obs")
ld0
}
get.dosage.altaf <- function(dosage.file, pheno.file, bpset) {
# read in samples IDs (pheno1$fam)
pheno1 <- read.delim(pheno.file, header=FALSE, stringsAsFactors=FALSE,
sep=" ")
dim(pheno1)
colnames(pheno1) <- c("fam", "ind", "expr")
dosage1 <- read.delim(dosage.file, header=TRUE, stringsAsFactors=FALSE,
sep="\t")
dim(dosage1)
gt1 <- dosage1[, 5:ncol(dosage1)]
rownames(gt1) <- dosage1$clone # SNP
# reorder SNPs
gt1 <- gt1[dosage1$Start %in% bpset, ]
dim(gt1)
gt1 <- gt1[, colnames(gt1) %in% pheno1$fam]
gt1 <- as.matrix(gt1)
dim(gt1)
meanF0 <- 1 - apply(gt1, 1, mean)/2
meanF0
}
load.isLD1.dosage <- function(dosage.file, pheno.file, bpset) {
# read in samples IDs (pheno1$fam)
pheno1 <- read.delim(pheno.file, header=FALSE, stringsAsFactors=FALSE,
sep=" ")
dim(pheno1)
colnames(pheno1) <- c("fam", "ind", "expr")
dosage1 <- read.delim(dosage.file, header=TRUE, stringsAsFactors=FALSE,
sep="\t")
dim(dosage1)
gt1 <- dosage1[, 5:ncol(dosage1)]
rownames(gt1) <- dosage1$clone # SNP
# reorder SNPs
gt1 <- gt1[dosage1$Start %in% bpset, ]
dim(gt1)
gt1 <- gt1[, colnames(gt1) %in% pheno1$fam]
gt1 <- as.matrix(gt1)
gt1 <- t(gt1)
dim(gt1)
# IN-SAMPLE LD
gt0 <- gt1
gt0 <- stdGT(gt0)
PL("mean gt0", range(apply(gt0, 2, mean)))
PL("sd gt0", range(apply(gt0, 2, sd)))
ld0 <- (t(gt0) %*% gt0)
var0 <- apply(gt0, 2, function(v) sqrt(sum(v*v)))
for (I in 1:nrow(ld0)) {
for (J in 1:nrow(ld0)) {
ld0[I, J] <- ld0[I, J]/var0[I]/var0[J]
}
}
ld0
}
load.isLD1.dosage2 <- function(dosage.file, bpset) {
dosage1 <- read.delim(dosage.file, header=TRUE, stringsAsFactors=FALSE,
sep="\t")
dim(dosage1)
gt1 <- dosage1[, 5:ncol(dosage1)]
rownames(gt1) <- dosage1$clone # SNP
gt1 <- gt1[dosage1$Start %in% bpset, ]
dim(gt1)
gt1 <- as.matrix(gt1)
gt1 <- t(gt1)
dim(gt1)
# IN-SAMPLE LD
gt0 <- gt1
gt0 <- stdGT(gt0)
PL("mean gt0", range(apply(gt0, 2, mean)))
PL("sd gt0", range(apply(gt0, 2, sd)))
ld0 <- (t(gt0) %*% gt0)
var0 <- apply(gt0, 2, function(v) sqrt(sum(v*v)))
for (I in 1:nrow(ld0)) {
for (J in 1:nrow(ld0)) {
ld0[I, J] <- ld0[I, J]/var0[I]/var0[J]
}
}
ld0
}
process.refLD.impute <- function(refgt, refgt.all.sel, af.check=NULL) {
refgt0 <- refgt[refgt.all.sel, ]
if ("BP" %in% colnames(refgt0)) {
refgt0 <- refgt0[order(refgt0$BP), ]
} else if ("POS" %in% colnames(refgt0)) {
refgt0 <- refgt0[order(refgt0$POS), ]
}
ASSERT(sum(refgt.all.sel) == nrow(refgt0))
PL("RefLD cols", paste(paste(colnames(refgt0)[1:4], collapse=" "),
colnames(refgt0)[5], sep=" / "))
refgt.mat <- refgt0[, 5:ncol(refgt0)]
refgt.mat <- as.matrix(refgt.mat)
ASSERT(sum(refgt.mat != 0 & refgt.mat != 1) == 0)
# CONVERT
# ref allele := 0
# alt allele := 1
# TO
# ref allele := 1
# alt allele := 2
refgt.mat <- refgt.mat + 1
refgt.mat <- toGT(refgt.mat)
rownames(refgt.mat) <- refgt0$BP
if (!is.null(af.check)) {
meanF0 <- apply(refgt.mat, 1, mean)/2
plot(meanF0, af.check, xlab="Alt AF (RefGT)",
ylab="Alt AF (In-sample)")
}
gt0 <- stdGT(t(refgt.mat))
ld0 <- (t(gt0) %*% gt0)
var0 <- apply(gt0, 2, function(v) sqrt(sum(v*v)))
for (I in 1:nrow(ld0)) {
for (J in 1:nrow(ld0)) {
ld0[I, J] <- ld0[I, J]/var0[I]/var0[J]
}
}
ASSERT(sum(refgt.all.sel) == nrow(ld0))
ld0
}
process.refLD.vcf <- function(refgt, refgt.all.sel, af.check=NULL) {
refgt0 <- refgt[refgt.all.sel, ]
refgt0 <- refgt0[order(refgt0$POS), ]
ASSERT(sum(refgt.all.sel) == nrow(refgt0))
refgt.mat <- refgt0[, 8:ncol(refgt0)]
refgt.mat <- as.matrix(refgt.mat)
ASSERT(sum(refgt.mat == ".") == 0)
# ref allele := 1
refgt.mat <-
t(sapply(1:nrow(refgt.mat),
function(I) as.numeric(refgt.mat[I, ] == refgt0$REF[I]),
simplify=TRUE))
# alt allele := 2
refgt.mat[refgt.mat != 1] <- 2
refgt.mat <- toGT(refgt.mat)
rownames(refgt.mat) <- refgt0$POS
if (!is.null(af.check)) {
# meanF0 <- apply(refgt.mat, 1, mean)/2
# plot(meanF0, af.check, xlab="Alt AF (RefGT)",
# ylab="Alt AF (In-sample)")
meanF0 <- apply(refgt.mat, 1, mean)/2
meanF0 <- pmin(meanF0, 1 - meanF0)
plot(meanF0, af.check, xlab="MAF (RefLD)",
ylab="MAF (In-sample)")
af.check.tab <- cbind(refgt0[, 1:3], meanF0, af.check)
af.check.tab <-
af.check.tab[order(abs(meanF0 - af.check), decreasing=TRUE), ]
print(af.check.tab[1:10, ])
}
gt0 <- stdGT(t(refgt.mat))
ld0 <- (t(gt0) %*% gt0)
var0 <- apply(gt0, 2, function(v) sqrt(sum(v*v)))
for (I in 1:nrow(ld0)) {
for (J in 1:nrow(ld0)) {
ld0[I, J] <- ld0[I, J]/var0[I]/var0[J]
}
}
ASSERT(sum(refgt.all.sel) == nrow(ld0))
ld0
}
calcMAF.refLD.vcf <- function(refgt, refgt.all.sel) {
refgt0 <- refgt[refgt.all.sel, ]
refgt0 <- refgt0[order(refgt0$POS), ]
ASSERT(sum(refgt.all.sel) == nrow(refgt0))
refgt.mat <- refgt0[, 8:ncol(refgt0)]
refgt.mat <- as.matrix(refgt.mat)
ASSERT(sum(refgt.mat == ".") == 0)
# ref allele := 1
refgt.mat <-
t(sapply(1:nrow(refgt.mat),
function(I) as.numeric(refgt.mat[I, ] == refgt0$REF[I]),
simplify=TRUE))
# alt allele := 2
refgt.mat[refgt.mat != 1] <- 2
refgt.mat <- toGT(refgt.mat)
rownames(refgt.mat) <- refgt0$POS
# meanF0 <- apply(refgt.mat, 1, mean)/2
# plot(meanF0, af.check, xlab="Alt AF (RefGT)",
# ylab="Alt AF (In-sample)")
meanF0 <- apply(refgt.mat, 1, mean)/2
meanF0 <- pmin(meanF0, 1 - meanF0)
# plot(meanF0, af.check, xlab="MAF (RefLD)",
# ylab="MAF (In-sample)")
meanF0
}
load.mperm <- function (permfile) {
permtab <- read.delim(permfile, sep=" ", header=FALSE,
stringsAsFactors=FALSE)
# perm row 0
permtab <- permtab[permtab[, 1] >= 1, ]
# last column NA
if (sum(!is.na(permtab[, ncol(permtab)])) == 0) {
permtab <- permtab[, 1:(ncol(permtab)-1)]
}
# remove run no
as.matrix(permtab[, 2:ncol(permtab)])
}
calc.stat <- function (assoc1, assoc2, ld0, ld2, R2thr) {
# FIXED for 1h4
logP1 <- (abs(assoc1$Z) ** 2) / 2
logP2 <- (abs(assoc2$Z) ** 2) / 2
ASSERT(sum(is.na(assoc1$Z)) == 0)
ASSERT(sum(is.na(assoc2$Z)) == 0)
### PEAK SELECTION (local)
maxI1 <- which.max(logP1)
relP1 <- exp(logP1 - max(logP1))
postP1 <- exp(logP1) / sum(exp(logP1))
local <- which(ld0[maxI1, ]**2 >= R2thr)
gap <- 0
sumRelP1 <- sum(relP1[local])
for (I in local) {
gap <- gap +
relP1[I] * (logP2[I] - max(logP2[ld2[I, ]**2 < R2thr]))
cat(paste("*",
relP1[I],
postP1[I],
ld0[maxI1, I]**2,
logP2[I] - max(logP2[ld2[I, ]**2 < R2thr]),
"Farthest:",
max(ld2[I, (ld2[I, ]**2 < R2thr)]**2),
"\n")
)
}
gap <- gap / sumRelP1
gap
PL("--GAP", gap)
gap
}
# FOR SIMULATED NULL DIST
perm.test <- function (assoc1, assoc2, permmat, ld0, ld2,
thresholdingP, R2thr, lambda.t) {
thresholdingZ <- PtoZ(thresholdingP)
# FIXED in version 1h4
logP1 <- (abs(assoc1$Z) ** 2) / 2
# SNP selection part 1
markers.p1 <- abs(assoc1$Z) >= thresholdingZ
# peak selection
maxlogP1 <- max(logP1)
maxI1 <- which.max(logP1)
relP1 <- exp(logP1 - maxlogP1)
# iterate
simNo <- 1
NULLGAP <- c()
ASSERT(ncol(permmat) == nrow(ld0))
ASSERT(ncol(permmat) == nrow(ld2))
for (simNo in 1:nrow(permmat)) {
assoc2n.Z <- permmat[simNo, ]
ASSERT(sum(is.na(assoc2n.Z)) == 0)
# SNP selection part 2
markers.p <-
markers.p1 | (abs(assoc2n.Z) >= thresholdingZ)
ASSERT(markers.p[maxI1]) # always include maxI1
# need multiple markers
if (sum(markers.p) > 1) {
local <- intersect(which(ld0[maxI1, ]**2 >= R2thr),
which(markers.p))
# FIXED in 1h4
logP2n <- (abs(assoc2n.Z) ** 2) / 2
gap <-
sum(relP1[local] *
sapply(local, function (I)
(logP2n[I] -
max(logP2n[(ld2[I, ]**2 < R2thr) & markers.p])),
simplify=TRUE))
NULLGAP[simNo] <- gap / sum(relP1[local])
}
# Adaptive
if (simNo == 100 && sum(is.na(NULLGAP)) == 0) {
pv.partial <-
sum(NULLGAP >= lambda.t) / length(NULLGAP)
if (pv.partial > 0.1) {
break
}
}
}
NULLGAP
}
|
5109d54d4d2863391a8b060878dbf2d24de6f7e1
|
57122870e76b44ca4b85fd91a8ca2172ad28d077
|
/App_directory/AirBNB_Final _project/server.R
|
b042d7d51adba469268f8b0f09834acfa0eb634f
|
[] |
no_license
|
L-HommeSage/Airbnb_Data_Analytics_Shiny
|
21bea3924b487ce288991bbc7373a1865e640879
|
817c807e8f915ba133d666056fd2037a823a5341
|
refs/heads/main
| 2023-01-24T14:56:48.539829
| 2020-11-25T20:17:40
| 2020-11-25T20:17:40
| 314,373,463
| 0
| 0
| null | 2020-11-21T18:10:18
| 2020-11-19T21:24:25
|
R
|
UTF-8
|
R
| false
| false
| 8,313
|
r
|
server.R
|
library(shiny)
library(dplyr)
library(ggplot2)
library(rlist)
library(googleVis)
source("getGzCsv.R")
source("utils_tab_deep_dive.R")
source("utils_tab_data.R")
source("utils_tab_comparing_cities.R")
function(input, output, session) {
# Initialization
berlin <- get_data("https://github.com/L-HommeSage/Airbnb_Data_Analytics_Shiny/raw/main/CSV/berlin.csv.gz")
girona <- get_data("https://github.com/L-HommeSage/Airbnb_Data_Analytics_Shiny/raw/main/CSV/girona.csv.gz")
lyon <- get_data("https://github.com/L-HommeSage/Airbnb_Data_Analytics_Shiny/raw/main/CSV/lyon.csv.gz")
## FIRST TAB
output$main_plot <- renderPlot({
names <- names(berlin)
berlin_filtered <- data.frame()
girona_filtered <- data.frame()
lyon_filtered <- data.frame()
for (k in names){
berlin_filtered[[k]]
girona_filtered[[k]]
lyon_filtered[[k]]
}
if("0" %in% input$cities)
berlin_filtered <- subset(berlin, berlin$data_date == "13/10/2020")
if("1" %in% input$cities)
berlin_filtered <- rbind(berlin_filtered, subset(berlin, berlin$data_date == "23/09/2020"))
if("2" %in% input$cities)
berlin_filtered <- rbind(berlin_filtered, subset(berlin, berlin$data_date == "30/08/2020"))
if("3" %in% input$cities)
girona_filtered <- subset(girona, girona$data_date == "28/10/2020")
if("4" %in% input$cities)
girona_filtered <- rbind(girona_filtered, subset(berlin, berlin$data_date == "29/06/2020"))
if("5" %in% input$cities)
girona_filtered <- rbind(girona_filtered, subset(berlin, berlin$data_date == "28/05/2020"))
if("6" %in% input$cities)
lyon_filtered <- subset(lyon, lyon$data_date == "24/10/2020")
if("7" %in% input$cities)
lyon_filtered <- rbind(lyon_filtered, subset(lyon, lyon$data_date == "19/09/2020"))
if("8" %in% input$cities)
lyon_filtered <- rbind(lyon_filtered, subset(lyon, lyon$data_date == "31/08/2020"))
berlin_clean_to_plot <- get_aggregation(input$aggregation, input$feature, berlin_filtered)
girona_clean_to_plot <- get_aggregation(input$aggregation, input$feature, girona_filtered)
lyon_clean_to_plot <- get_aggregation(input$aggregation, input$feature, lyon_filtered)
df <- get_df(input$cities, berlin_clean_to_plot, girona_clean_to_plot, lyon_clean_to_plot)
get_ggplot(input$plot, input$aggregation, input$xlogscale, input$ylogscale,
df,
get_title(input$aggregation, input$feature),
get_x_label(input$feature, input$plot),
get_y_label(input$feature, input$plot)
)
})
output$error <- renderText({
get_error(input$plot, input$aggregation)
})
output$multi_var_plot <- renderPlot({
names <- names(berlin)
df <- data.frame()
for (k in names) df[[k]]
if("0" %in% input$cities || "1" %in% input$cities || "2" %in% input$cities )
df <- rbind(df, berlin)
if("3" %in% input$cities || "4" %in% input$cities || "5" %in% input$cities)
df <- rbind(df, girona)
if("6" %in% input$cities || "7" %in% input$cities || "8" %in% input$cities)
df <- rbind(df, lyon)
ggplot(df) +
geom_bar(aes(
x=get_x_feature(input$x_feature, df),
y=get_y_feature(input$y_feature, df),
fill=city),
stat="identity",
position = "dodge")
})
## SECOND TAB
observeEvent(input$map_city, {
table = get_table(input$map_city, berlin, girona, lyon)
max_revenue = max(table$revenue_30)
max_bedrooms = max(table$bedrooms)
updateSliderInput(session, "revenue_slider", value=c(0, max_revenue), max=max_revenue)
updateSliderInput(session, "bedrooms_slider", value=c(0, max_bedrooms), max=max_bedrooms)
updateSelectInput(session, "house_type", choices=unique(table$property_type))
updateSelectInput(session, "room_type", choices=unique(table$room_type))
})
output$map <- renderGvis({
gvisMap(
get_map_filtered(
input$revenue_slider,
input$availability_slider,
input$house_type,
input$bedrooms_slider,
input$room_type,
get_table(input$map_city, berlin, girona, lyon)
),
"latlong",
"revenue_30",
options=list(region="DE"))
})
output$insigths_panel <- renderText({
map_dataframe <- get_map_filtered(
input$revenue_slider,
input$availability_slider,
input$house_type,
input$bedrooms_slider,
input$room_type,
get_table(input$map_city, berlin, girona, lyon)
)
paste("Number of matching listings : ", nrow(map_dataframe))
})
output$insight_plot_1 <- renderPlot({
map_dataframe <- get_map_filtered(
input$revenue_slider,
input$availability_slider,
input$house_type,
input$bedrooms_slider,
input$room_type,
get_table(input$map_city, berlin, girona, lyon)
)
blank_theme <- theme_minimal()+
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.border = element_blank(),
panel.grid=element_blank(),
axis.ticks = element_blank(),
plot.title=element_text(size=14, face="bold")
)
ggplot(map_dataframe, aes(x = factor(1), fill=room_type)) +
geom_bar(width = 1) +
coord_polar("y") +
ggtitle("Proportion of each room type") +
blank_theme +
theme(axis.text.x=element_blank(), legend.position="bottom")
})
output$insight_plot_2 <- renderPlot({
map_dataframe <- get_map_filtered(
input$revenue_slider,
input$availability_slider,
input$house_type,
input$bedrooms_slider,
input$room_type,
get_table(input$map_city, berlin, girona, lyon)
)
blank_theme <- theme_minimal()+
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.border = element_blank(),
panel.grid=element_blank(),
axis.ticks = element_blank(),
plot.title=element_text(size=14, face="bold")
)
ggplot(map_dataframe, aes(x = factor(1), fill=property_type)) +
geom_bar(width = 1) +
coord_polar("y") +
ggtitle("Proportion of each property type") +
blank_theme +
theme(axis.text.x=element_blank(), legend.position="bottom")
})
output$insight_plot_3 <- renderPlot({
map_dataframe <- get_map_filtered(
input$revenue_slider,
input$availability_slider,
input$house_type,
input$bedrooms_slider,
input$room_type,
get_table(input$map_city, berlin, girona, lyon)
)
ggplot(map_dataframe, aes(y=bedrooms)) +
geom_boxplot(alpha=0.3) +
ggtitle("Distribution of bedrooms :") +
theme(legend.position="none") +
ylim(0, max(map_dataframe$bedrooms) + 2) +
labs(y="Bedrooms")
})
output$insight_plot_4 <- renderPlot({
map_dataframe <- get_map_filtered(
input$revenue_slider,
input$availability_slider,
input$house_type,
input$bedrooms_slider,
input$room_type,
get_table(input$map_city, berlin, girona, lyon)
)
ggplot(map_dataframe, aes(y=availability_30)) +
geom_boxplot(alpha=0.3) +
ggtitle("Distribution of availability for the next 30 days :") +
theme(legend.position="none") +
ylim(0, 32) +
labs(y="Days of availability")
})
output$insight_plot_5 <- renderPlot({
map_dataframe <- get_map_filtered(
input$revenue_slider,
input$availability_slider,
input$house_type,
input$bedrooms_slider,
input$room_type,
get_table(input$map_city, berlin, girona, lyon)
)
ggplot(map_dataframe, aes(y=revenue_30)) +
geom_boxplot(alpha=0.3) +
ggtitle("Distribution of revenue for the next 30 days :") +
theme(legend.position="none") +
ylim(0, max(map_dataframe$revenue_30) + 500) +
scale_y_log10() +
labs(y="Revenue")
})
## THIRD TAB
output$data <- renderGvis({
opts <-
list(
page=TRUE,
pageSize=input$pagesize,
width=800
)
gvisTable(get_table(input$data_city, berlin, girona, lyon), options=opts)
})
}
|
75b36bb843cf4f928a64c73c50295771b48e752f
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/3066_1/rinput.R
|
edab04189d1046db6d43e74b4d337444e2c39e8f
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("3066_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3066_1_unrooted.txt")
|
5e54dd8c23c37a13618052042a920c1e77873497
|
78d9d12918281c917925dbbfcb3525f100c27df8
|
/TIDYVERSE.R
|
b796971f86690eb2b8b2e3898b5bcf9d7c26b1e0
|
[] |
no_license
|
JoeNoonan/tidyverse-workshops
|
cec3918aaef28a1a66237741725924981abc6f4b
|
c150a5dde0c2715c3d588ccdfefba10286c304eb
|
refs/heads/master
| 2020-09-07T14:18:14.392723
| 2019-11-28T16:10:04
| 2019-11-28T16:10:04
| 220,807,430
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,557
|
r
|
TIDYVERSE.R
|
library(tidyverse)
library(janitor)
### Read in data
### Reading Data Locally
gsodi_long <- read_csv("gsodi_long.csv")
### Reading Data from URL
gsodi_wide <- read_csv("https://www.idea.int/gsod-indices/sites/default/files/gsodi_pv_3.csv")
### NOTE final format is important. These commands open .csv (comma seperated values)
### If you want to import excel data you need to use other functions.
### Exploring the dataframe
### Find all of the variable names
names(gsodi_long)
### Look at the data like in Excel
View(gsodi_long)
### Summary of the dataframe (more complex)
str(gsodi_long)
### Dimensions of dataframe (rows by columns)
dim(gsodi_long)
### Top of the dataframe
head(gsodi_long)
### Bottom of the data frame
tail(gsodi_long)
### Long data versus wide data
### Compare gsodi_long and gsodi_wide. What are the differences between these two datasets?
### Filtering
### and
### Selecting
### Works the same as in excel but you type it out!
filter(.data = gsodi_long, ID_year == 2018)
### You don't need the .data = argument. I've only included it for clarity.
### Using %in% and c(x,x) lets you pick out specific criteria
filter(gsodi_long, ID_year %in% c(1975, 2018))
filter(gsodi_long, ID_country_name == "United States")
filter(gsodi_long, ID_country_name %in% c("United States", "Sweden"))
# Boolean Operators in R
# Relational Operators
# < Less Than
# > Greater than
# <= Less than or equal to
# >= Greater than or equal to
# == Equal to
# != equal to
### What are the differences between these two operations?
lt <- filter(gsodi_long, ID_year < 1980)
lte <- filter(gsodi_long, ID_year <= 1980)
### In base R to explore a variable you use $ to specify what variable of a dataframe.
### For instance to look at all values you would do gsodi_long$value
### This is used sometimes in the tidyverse, but not too much.
### Summary for each filtered data frame
summary(lt$ID_year)
summary(lte$ID_year)
# Logical Operators in R
# ! Logical NOT
# & Logical
# | Logical or
# What do each of these queries return? Read each like a sentance!
# "Return everything where..."
# & operator is not needed normally
filter(gsodi_long, ID_year == 2018, ID_country_name == "Honduras")
# Is the same as...
filter(gsodi_long, ID_year == 2018 & ID_country_name == "Honduras")
# neg_sig_10_years is a flag for a significant decrease in the last 10 years, 1 means there has been
# regime_status_name is the class of regime
filter(gsodi_long, ID_year == 2018, neg_sig_10_years == 1, regime_status_name != "Hybrid" )
filter(gsodi_long, ID_year == 2018 & neg_sig_10_years == 1 | ID_year == 2018 & neg_sig_5_years == 1)
# You can also break up filter queries so that you have one line for each critera
filter(gsodi_long,
ID_variable_name == "Representative Government",
ID_year < 2000 & ID_year >= 1990,
value >= 0.50,
neg_sig_5_years == 1)
### Selecting
# What do you actually need to look at?
select(gsodi_long, value)
# Selecting allows you to select variables.
select(gsodi_long, ID_year, ID_country_name, ID_region_name, ID_variable_name, value)
select(gsodi_long, ID_year, ID_country_name, ID_region_name, ID_variable_name, value, regime_status_name)
# How do you combine these?
# Ineffecient way
# Create a new dataframe of filtered data
gsodi_long_honduras_2018_1 <- filter(gsodi_long, ID_year == 2018, ID_country_name == "Honduras")
# Create a new dataframe selecting the proper variables.
gsodi_long_honduras_2018_2 <- select(gsodi_long_honduras_2018_1,
ID_year, ID_year, ID_country_name, ID_region_name, ID_variable_name, value)
gsodi_long_honduras_2018_2
# The pipe %>% (shft-ctrl-m)
# What the pipe does is it takes the output of the function on the
# left and feeds it to the right function as its first argument.
# Or in english "do this and then this"
# Filter and then select
filter(gsodi_long, ID_year == 2018, ID_country_name == "Honduras") %>% # Do this...
select(ID_year, ID_year, ID_country_name, ID_region_name, ID_variable_name, value) # then this.
# Everything to the left is always placed in the first argument to the right.
gsodi_long %>% names()
# Is the same as
names(gsodi_long)
# The Pipe function is the key to stringing together analysis!!!
####
#### Tidyverse assignments 1
####
### Transformations!
### Making new variables! through mutate()
# mutate() works like this mutate(df, new_variable = (operation for new variable))
gsodi_long %>%
mutate(above_index = above_world_average + above_region_average) %>% # Create the new variable, in this case just adding some flags for if a country is above or below the global average
select(ID_year, ID_year, ID_country_name, ID_region_name, ID_variable_name,
value, above_index, above_world_average, above_region_average) %>% # Select relevant variables
arrange(desc(above_index)) # Arrange by new variable above_index
# mutate() is a very useful tool, however, I have already created most of the new variables that you'd need.
# This is one of the advantages of the long dataset. One can store data about each of the indices variables through mutate.
# For example if the country-year-variable is above or below the regional average
# The group_by() function greatly expands the functionality of mutate()
# This is how you would create for regional averages
regional_value_mutate_df <- gsodi_long %>%
group_by(ID_year, ID_variable_name, ID_region_name) %>% # Perform next operations by year, variable and region
mutate(regional_value_joe = mean(value, na.rm = TRUE))%>% # mean is the function for average
select(ID_year, ID_year, ID_country_name, ID_region_name, ID_variable_name,
value, regional_value_joe, region_value)
filter(regional_value_mutate_df, ID_year == 2018,
ID_region_name == "Europe",
ID_variable_name == "Clean Elections")
# We used group_by(ID_year,ID_variable_name, ID_region_name),
# What variables would you choose to group make a global average?
# Summarize data
# Like mutate but collapses or distills the output of the group
# Compare the summarized operation with the mutated operation
regional_value_summarize_df<- gsodi_long %>%
group_by(ID_year, ID_variable_name, ID_region_name) %>% # Perform next operations by year, variable and region
summarize(regional_value_joe = mean(value, na.rm = TRUE))
regional_value_summarize_df
regional_value_mutate_df
# Both mutate and summarize can make multiple new variable .
gsodi_long %>%
group_by(ID_year, ID_variable_name, ID_region_name) %>% # Perform next operations by year, variable and region
summarize(regional_value_joe = mean(value, na.rm = TRUE),
regional_min = min(value, na.rm = TRUE),
regional_max = max(value, na.rm = TRUE))
# Sometimes you have to use the ungroup() function if you want to regroup with different variables
gsodi_long %>%
group_by(ID_year, ID_variable_name, ID_region_name) %>% # Perform next operations by year, variable and region
summarize(regional_value_joe = mean(value, na.rm = TRUE),
regional_min = min(value, na.rm = TRUE),
regional_max = max(value, na.rm = TRUE)) %>%
ungroup() %>%
mutate(regional_value_0_100 = regional_value_joe * 100)
####
#### Tidyverse Assignment 2
####
### WDI Example
### Janitor
# "Data science is mostly counting things"
# tabyl() function from the janitor package.
# Uses tidyverse standards %>%, group_by etc.
# Used to make frequency tables.
# One way tabyls
# Performance class of Reperesnative Governemnt (high, low, mid-range) in 2018
filter(gsodi_long, ID_variable_name == "Representative Government", ID_year == 2018) %>% # Filter out critera
tabyl(var1= perfom_class_var_name, show_missing_levels = FALSE)
### Adorn percentage formating (makes it look pretty)
filter(gsodi_long, ID_variable_name == "Representative Government", ID_year == 2018) %>% # Filter out critera
tabyl(var1= perfom_class_var_name, show_missing_levels = FALSE) %>%
adorn_pct_formatting()
#Two way tabyls
filter(gsodi_long, ID_variable_name == "Representative Government", ID_year == 2018) %>% # Filter out critera
tabyl(var1 = perfom_class_var_name, var2 = regime_status_name, show_missing_levels = FALSE) %>%
adorn_totals(c("row", "col")) %>% # adds total column to the rows and collumns
adorn_percentages("col") %>% # adds percentages for the columns
adorn_pct_formatting() %>% # adds percentage formating
adorn_ns() # adds ns
|
8d0bcc2e6ad0ce542f7badfca3e65ca8e5f2a7a1
|
0a38873c36cead1262eb3ac75a8ab1b93c4bef5a
|
/R/stdViz.R
|
7271904e075e32db822f7043dcaffcec778674ed
|
[
"MIT"
] |
permissive
|
jberesni/ASHviz
|
bbeb0a64d7f9b8b9ee108024f558bd33dd08667f
|
7deff49b1d94591f2813747cdf399aa7a66e785b
|
refs/heads/master
| 2020-04-01T03:38:53.929801
| 2019-05-16T18:42:54
| 2019-05-16T18:42:54
| 152,830,383
| 6
| 3
|
MIT
| 2018-10-13T03:54:39
| 2018-10-13T03:14:36
|
R
|
UTF-8
|
R
| false
| false
| 2,244
|
r
|
stdViz.R
|
###########################################################
# AAS over time by Instance/Wait Class etc (Top Activity)
###########################################################
#
# build CPU, WAIT summary dataset of all instances over all samples
d <- ashDF %>% group_by(INSTANCE_NUMBER,SAMPLE_TIME,SAMPLE_ID,WAITCLASS) %>% summarize(AS=n())
#
# use SAMPLE_TIME for x-axis, peaks line up
ggplot(data=d,aes(x=SAMPLE_TIME,y=AS,color=WAITCLASS)) + geom_point(alpha=.5)
ggsave("./Plots/stdViz01.png", device="png")
ggplot(data=d,aes(x=SAMPLE_TIME,y=AS,color=INSTANCE_NUMBER)) + geom_point(alpha=.5)
ggsave("./Plots/stdViz02.png", device="png")
# facet by INSTANCE to compare
ggplot(data=d,aes(x=SAMPLE_TIME,y=AS,color=WAITCLASS)) + geom_point() + facet_wrap(~INSTANCE_NUMBER)
ggsave("./Plots/stdViz03.png", device="png")
#
# use SAMPLE_ID for x-axis, note difference
ggplot(data=d,aes(x=SAMPLE_ID,y=AS,color=WAITCLASS)) + geom_point(alpha=.5)
ggsave("./Plots/stdViz04.png", device="png")
# color by INSTANCE
ggplot(data=d,aes(x=SAMPLE_ID,y=AS,color=INSTANCE_NUMBER)) + geom_point(alpha=.5)
ggsave("./Plots/stdViz05.png", device="png")
# facet by INSTANCE and see shifts in peak
ggplot(data=d,aes(x=SAMPLE_ID,y=AS,color=WAITCLASS)) + geom_point(alpha=.5) + facet_wrap(~INSTANCE_NUMBER)
ggsave("./Plots/stdViz06.png", device="png")
#
# investigate relationship of SAMPLE_TIME and SAMPLE_ID
d2 <- d %>% distinct(INSTANCE_NUMBER,SAMPLE_ID,SAMPLE_TIME)
#
ggplot(data=d2, aes(x=SAMPLE_ID, y=SAMPLE_TIME, color=INSTANCE_NUMBER)) + geom_line(size=2)
ggsave("./Plots/stdViz07.png", device="png")
#
#
# create one-minute summaries of AAS by WAITCLASS and INSTANCE
d3 <- ashDF %>%
group_by(INSTANCE_NUMBER,SAMPLE_TIME,WAITCLASS) %>%
summarize(AS=n()) %>%
mutate(MIN=as.character(trunc(SAMPLE_TIME,units=c("mins")))) %>%
group_by(INSTANCE_NUMBER,MIN,WAITCLASS) %>%
summarize(AAS = sum(AS)/60)
#
# plot global AAS by WAITCLASS per minute
ggplot(data=d3,aes(x=MIN,y=AAS,color=WAITCLASS)) +
geom_col(aes(fill=WAITCLASS)) +
scale_x_discrete(labels = NULL) + ylab("Avg Active Sessions")
ggsave("./Plots/stdViz08.png", device="png")
# cleanup
rm(d)
rm(d2)
rm(d3)
|
0eeceafa47e3e47edac4f6c4871f102ddb3799c9
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615848534-test.R
|
b7e8a3cba1386d839e2a3132c1ef6afd7c4a788f
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 425
|
r
|
1615848534-test.R
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(1.03878561526028e-13, 1.23255483898152e-250, -4.20274050050721e+305, 9.32946680697566e+98, 1.22430160524861e-250, -7.14714235507037e-15, 3.29092470798389e-270, 3.55259342257796e+59, 6.38734728873395e+149, 1.2213594310705e+35, 7.27044912882664e-308, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
9993aa6de72939a15c1467d450e45a0c2823435d
|
57cc155e66f317cb235ff8c2df89a881aa8c5c76
|
/functions/symmetrise_scale.R
|
632adbaaa98dc6aaf28019928b8d9c20e05f6258
|
[] |
no_license
|
JustinCally/SexualSelection_Speciation
|
dfbae89b77966049671ba7416915844fad66cb7d
|
42147a432ba3566398c1881fa91ceb6df043568c
|
refs/heads/master
| 2021-06-05T14:14:26.780188
| 2021-03-02T22:08:58
| 2021-03-02T22:08:58
| 137,003,444
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,170
|
r
|
symmetrise_scale.R
|
#Symmetrise Scale when using facet_wrap function
symmetrise_scale <- function(p, axis = "x") {
gb <- ggplot2::ggplot_build(p)
type <- switch(axis, "x" = "x.range", "y" = "y.range")
fname <- setdiff(names(gb$layout$panel_layout), c("PANEL", "ROW", "COL", "SCALE_X", "SCALE_Y"))
lims <- do.call(cbind, lapply(gb$layout$panel_ranges, "[[", type))
facets <- gb$layout$panel_layout[, fname, drop = FALSE]
# dev version ggplot2_2.2.1.9000 breaks everything yet again
if (utils::packageVersion("ggplot2") >= "2.2.1.9") {
fname <- setdiff(names(gb$layout$layout), c("PANEL", "ROW", "COL", "SCALE_X", "SCALE_Y"))
lims <- do.call(cbind, lapply(gb$layout$panel_params, "[[", type))
facets <- gb$layout$layout[, fname, drop = FALSE]
}
lims2 <- as.vector(t(tcrossprod(apply(abs(lims), 2, max), c(-1, 1))))
dummy <- stats::setNames(data.frame(facets[rep(seq_len(nrow(facets)), each = 2), ], lims2), c(fname, axis))
switch(axis,
"x" = p + geom_blank(data = dummy, aes_string(x = "x", y = "Inf"), inherit.aes = FALSE),
"y" = p + geom_blank(data = dummy, aes_string(x = "Inf", y = "y"), inherit.aes = FALSE)
)
}
|
0865633bdd2cc9d04ff44abf18d13c88651cb330
|
f713e7474358ca4ed3be9e73a5c389daaee5d193
|
/HW1_Code and Report/HW1 Q1.R
|
a4b0205c8ef314fce8d1ad9fca6d04ef0d01a176
|
[] |
no_license
|
aduispace/EE232E_Graphs-and-Network-Flows
|
73fee16aac4d34fdc80d844bee0e77e43924a18c
|
fce539c4ffaaf4c915898f161d2b46baf55b7147
|
refs/heads/master
| 2021-01-19T20:57:01.460757
| 2017-06-18T10:04:22
| 2017-06-18T10:04:22
| 88,581,294
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,658
|
r
|
HW1 Q1.R
|
library("igraph")
#1(a)
p <- c(0.01, 0.05, 0.1)
nodes <- 1000
graph_1 <- random.graph.game(n = nodes, p = p[1],directed = FALSE)
graph_2 <- random.graph.game(n = nodes, p = p[2],directed = FALSE)
graph_3 <- random.graph.game(n = nodes, p = p[3],directed = FALSE)
degree_1 <- degree(graph = graph_1)
degree_2 <- degree(graph = graph_2)
degree_3 <- degree(graph = graph_3)
degree_dist_1 <- hist(x = degree_1, breaks = seq(from = min(degree_1), to = max(degree_1), by=1))
degree_dist_2 <- hist(x = degree_2, breaks = seq(from = min(degree_2), to = max(degree_2), by=1))
degree_dist_3 <- hist(x = degree_3, breaks = seq(from = min(degree_3), to = max(degree_3), by=1))
#1(b)
connect1 = connect2 = connect3 = diameter1 = diameter2 = diameter3 = 0;
for (i in 1:50) {
g1 <- erdos.renyi.game(1000, 0.1, type="gnp", directed = FALSE)
g2 <- erdos.renyi.game(1000, 0.05, type="gnp", directed = FALSE)
g3 <- erdos.renyi.game(1000, 0.01, type="gnp", directed = FALSE)
connect1 = c(connect1, is.connected(g1))
connect2 = c(connect1, is.connected(g2))
connect3 = c(connect1, is.connected(g3))
diameter1 = c(diameter1, diameter(g1))
diameter2 = c(diameter2, diameter(g2))
diameter3 = c(diameter3, diameter(g3))
}
Connect1 <- mean(connect1)
Connect2 <- mean(connect2)
Connect3 <- mean(connect3)
Diameter1 <- mean(diameter1)
Diameter2 <- mean(diameter2)
Diameter3 <- mean(diameter3)
#1(c)
pc <- 0
for (i in 1:50) {
for (p in seq(from = 0, to = 0.0100, by = 0.0001)) {
g = random.graph.game(1000, p, directed = FALSE)
if (is.connected(g))
break
}
pc <- c(pc, p)
}
mean(pc)
|
a69db9b9ebaa182fd085ac308612a24acd5df301
|
146da93ef3da74b2022bfd635b42c3622774b38e
|
/cachematrix.R
|
658e9e3a4adce1b23a5423d6c48b423ef4dd0256
|
[] |
no_license
|
dabooc/ProgrammingAssignment2
|
5ce56ca397f3b7b9c657c007aa528d64221deaa0
|
d423af5994ae124d4d8349731495cf62b91e7546
|
refs/heads/master
| 2021-01-18T09:12:31.262206
| 2014-06-30T06:14:26
| 2014-06-30T06:14:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 984
|
r
|
cachematrix.R
|
## Allow to cache the invertion of a matrix
## Create a specific matrix object with a fied containing the inverted matrix if already computed
makeCacheMatrix <- function(x = matrix()) {
cachedInvertion = null
## retrieve the original matrix
get <- function() {
x
}
## cache the inverted matrix
setCachedInvertion <- function(inv) {
cachedInvertion <<- inv
}
## retrieve the cached value
getCachedInvertion <- function() {
cachedInvertion
}
list(get = get,
setCachedInvertion = setCachedInvertion,
getCachedInvertion = getCachedInvertion)
}
## Return the invert of a matrix. Takes advantage of caching to improve the process
cacheSolve <- function(x, ...) {
## check if the invert has already been computed
cached <- x$getCachedInvertion()
if (is.null(cached)) {
## no, we need to compute it
cached <- solve(x$get(),...)
## and we cache it in the object
x$setCachedInvertion(cached)
}
cached
}
|
a99b249e491f2aabfa37f45920c8fd0ced1edf8b
|
3cc6265e82e373d377dae488831cfdb1caad1dfe
|
/codedepends/DTL/plot_DTL.R
|
3dfc1f25abe03ba7de28ed80cc4d4886c3308ef2
|
[] |
no_license
|
clarkfitzg/phd_research
|
439ecc0d650da23bfad1e1a212e490c2746a6656
|
dfe46c49f6beba54389b0074e19f3c9b1ea04645
|
refs/heads/master
| 2020-04-15T14:02:03.890862
| 2019-09-20T02:33:07
| 2019-09-20T02:33:07
| 59,333,323
| 6
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 323
|
r
|
plot_DTL.R
|
source("../depend_graph.R")
#igraph_options(plot.layout=layout_as_tree)
s = readScript("DTLfirst.R")
# I see 5 edges coming into 24 on the graph, which corresponds to the 5
# inputs in s[[24]].
info = lapply(s, getInputs)
g = depend_graph(s, add_source = TRUE)
write_graph(g, "graph.dot", format = "dot")
# 167 edges
|
5f30a8a1bee0671df37023286e1bd69bd4c02d70
|
9bfe3914043180472c6ec1b0ab8b5c6b5b934211
|
/R/partition.R
|
40e12574274e0c372a02e378b47f0b17a7c591ec
|
[] |
no_license
|
cran/splitTools
|
e7667cfe0a2c2706266acc73a7b7d9160691f21a
|
701b046f2103a41bc3b7496800dd8aa48d064f43
|
refs/heads/master
| 2023-06-09T04:57:33.937514
| 2023-06-06T13:00:02
| 2023-06-06T13:00:02
| 236,901,439
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,503
|
r
|
partition.R
|
#' Split Data into Partitions
#'
#' This function provides row indices for data splitting, e.g., to split data
#' into training, validation, and test. Different types of split strategies are
#' supported, see Details.
#' The partition indices are either returned as list with one element per partition
#' (the default) or as vector of partition IDs.
#'
#' By default, the function uses stratified splitting. This will balance the partitions
#' as good as possible regarding the distribution of the input vector `y`.
#' (Numeric input is first binned into `n_bins` quantile groups.)
#' If `type = "grouped"`, groups specified by `y` are kept together when
#' splitting. This is relevant for clustered or panel data.
#' In contrast to basic splitting, `type = "blocked"` does not sample indices
#' at random, but rather keeps them in groups: e.g., the first 80% of observations form
#' a training set and the remaining 20% are used for testing.
#'
#' @param y Either the variable used for "stratification" or "grouped" splits.
#' For other types of splits, any vector of the same length as the data
#' intended to split.
#' @param p A vector with split probabilities per partition, e.g.,
#' `c(train = 0.7, valid = 0.3)`. Names are passed to the output.
#' @param type Split type. One of "stratified" (default), "basic", "grouped", "blocked".
#' @param n_bins Approximate numbers of bins for numeric `y`
#' (only for `type = "stratified"`).
#' @param split_into_list Should the resulting partition vector be split into a list?
#' Default is `TRUE`.
#' @param use_names Should names of `p` be used as partition names?
#' Default is `TRUE`.
#' @param shuffle Should row indices be randomly shuffled within partition?
#' Default is `FALSE`. Shuffling is only possible when `split_into_list = TRUE`.
#' @param seed Integer random seed.
#' @returns
#' A list with row indices per partition (if `split_into_list = TRUE`)
#' or a vector of partition IDs.
#' @export
#' @examples
#' y <- rep(c(letters[1:4]), each = 5)
#' partition(y, p = c(0.7, 0.3), seed = 1)
#' partition(y, p = c(0.7, 0.3), split_into_list = FALSE, seed = 1)
#' p <- c(train = 0.8, valid = 0.1, test = 0.1)
#' partition(y, p, seed = 1)
#' partition(y, p, split_into_list = FALSE, seed = 1)
#' partition(y, p, split_into_list = FALSE, use_names = FALSE, seed = 1)
#' partition(y, p = c(0.7, 0.3), type = "grouped")
#' partition(y, p = c(0.7, 0.3), type = "blocked")
#' @seealso [create_folds()]
partition <- function(y, p,
type = c("stratified", "basic", "grouped", "blocked"),
n_bins = 10L, split_into_list = TRUE, use_names = TRUE,
shuffle = FALSE, seed = NULL) {
# Input checks
type <- match.arg(type)
stopifnot(
length(p) >= 1L,
p > 0,
is.atomic(y),
(n <- length(y)) >= 2L
)
# Initializations
p <- p / sum(p)
if (!is.null(seed)) {
set.seed(seed)
}
# Calculation of partition ids
if (type == "basic") {
out <- .smp_fun(n, p)
out <- .fill_empty_partitions(out, p = p)
} else if (type == "blocked") {
out <- rep.int(seq_along(p), times = ceiling(p * n))[seq_len(n)]
} else if (type == "stratified") {
if (is.numeric(y) && length(unique(y)) > n_bins) {
y <- .bin(y, n_bins)
}
if (anyNA(y)) {
y <- factor(y, exclude = NULL)
}
out <- stats::ave(integer(n), y, FUN = function(z) .smp_fun(length(z), p))
out <- .fill_empty_partitions(out, p = p)
} else if (type == "grouped") {
y_unique <- unique(y)
m <- length(y_unique)
stopifnot(length(p) <= m)
y_folds <- .smp_fun(m, p)
y_folds <- .fill_empty_partitions(y_folds, p = p)
out <- y_folds[match(y, y_unique)]
}
# Output
if (use_names && !is.null(names(p))) {
out <- factor(out, levels = seq_along(p), labels = names(p))
}
if (!split_into_list) {
if (shuffle) {
message("Shuffling has no effect with split_into_list = TRUE.")
}
return(out)
}
out <- split(seq_along(y), out)
if (shuffle) {
out <- lapply(out, .shuffle)
}
out
}
# Little helpers
# Save shuffling (even if x has length 1)
.shuffle <- function(x, ...) {
x[sample.int(length(x), ...)]
}
# Efficient binning
.bin <- function(y, n_bins) {
qu <- stats::quantile(y, seq(0, 1, length.out = n_bins + 1L), na.rm = TRUE)
findInterval(y, unique(qu), rightmost.closed = TRUE)
}
# This this secret heart of splitTools
.smp_fun <- function(n, p) {
sample(rep.int(seq_along(p), times = ceiling(p * n)), n)
}
# Fills empty partitions
.fill_empty_partitions <- function(fi, p) {
counts <- tabulate(fi, nbins = length(p))
empty <- which(counts == 0L)
n_empty <- length(empty)
if (n_empty == 0L) {
return(fi)
}
message("Empty partition detected. Redistributing...")
# Find positions of potential donors
drop_random <- function(z) {
m <- length(z) - 1L
if (m >= 1L) sample(z, m)
}
positions <- split(seq_along(fi), fi)
donors <- unlist(lapply(positions, drop_random), use.names = FALSE)
n_donors <- length(donors)
# Randomly select donors
if (n_empty > n_donors) {
message("Cannot fill all empty partitions.")
n_empty <- n_donors
}
selected <- donors[sample.int(n_donors, n_empty)]
# Replace donors by empty partition numbers
fi[selected] <- empty[seq_len(n_empty)]
fi
}
|
26b2ba2f5cbcc0e27ef8d37f489832e7555c7da4
|
6425ad71279b9744c1fc192df9f989129aec6397
|
/R/p14.R
|
4e9d6a0690303925f2d78fb6f81fbf64939c0238
|
[] |
no_license
|
dxe4/project_euler
|
b84d0df13c060b19cfd17be6ccfe5543f2ecc1b5
|
5e9a6a62f7fa3947d0a9ac16ced666afa1e0e4cc
|
refs/heads/master
| 2023-06-08T13:24:38.296566
| 2023-05-29T15:04:10
| 2023-05-29T15:04:10
| 91,951,924
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,451
|
r
|
p14.R
|
# 14 | Longest Collatz sequence
<<<<<<< HEAD
=======
#
>>>>>>> aa005b7... Added support to linebreaks if files are executed via command line.
# https://projecteuler.net/problem=14
#
# The following iterative sequence is defined for the set of positive integers:
#
# n → n/2 (n is even)
# n → 3n + 1 (n is odd)
#
# Using the rule above and starting with 13, we generate the following sequence:
#
# 13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1
# It can be seen that this sequence (starting at 13 and finishing at 1)
# contains 10 terms. Although it has not been proved yet (Collatz Problem),
# it is thought that all starting numbers finish at 1.
#
# Which starting number, under one million, produces the longest chain?
#
# NOTE: Once the chain starts the terms are allowed to go above one million.
comp_collatz_iters <- function(vect) {
"
Computes the nr of iterations for a collatz chain to converge.
It does it in vectorized form.
"
# Initialises the nr_iters for each element in vect
nr_iters <- rep(1, length(vect))
while (mean(vect) > 1L) {
# Updates counter
nr_iters <- nr_iters + ifelse(vect == 1L, 0L, 1L)
# Vectorises the application of the collatz rule
vect <- ifelse(
vect == 1L, vect, ifelse(vect %% 2L == 0L, vect / 2L, vect * 3L + 1L))
}
return(nr_iters)
}
# Computes result
cap <- 1e6 - 1
nrs <- 1:cap
print(which.max(comp_collatz_iters(nrs)))
|
3770ae26294ae60813390dd91ef41bddf334423d
|
b605d5e30be6eac950c2962dc1e0b6496f28790f
|
/R/compliance.R
|
f012cdd7e25e906850e5ebe9fce57ad8865451ce
|
[] |
no_license
|
khalidharun/DBI
|
8a723be4f1ecf749df7803cdc4c5f9663008df25
|
7a0ad76dea21a846cee62f67108ab8e8b7d60a49
|
refs/heads/master
| 2020-12-25T07:15:03.893858
| 2015-10-30T09:28:01
| 2015-10-30T09:28:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,988
|
r
|
compliance.R
|
#' Check a driver for compliance with DBI.
#'
#' @param driver Driver name.
#' @param pkg Package that driver lives in - is usually "Rdriver"
#' @export
#' @examples
#' if (require("RSQLite")) {
#' dbiCheckCompliance("SQLite")
#' dbiCheckCompliance("NoDriver", "RSQLite")
#' }
dbiCheckCompliance <- function(driver, pkg = paste0("R", driver)) {
cat("Compliance check for ", driver, "\n", sep = "")
where <- asNamespace(pkg)
classes <- paste0(driver, names(key_methods))
names(classes) <- names(key_methods)
is_class <- vapply(classes, isClass, where = where, FUN.VALUE = logical(1))
if (!all(is_class)) {
cat("NOT OK\n",
" Missing definitions for classes: ",
paste0(classes[!is_class], collapse = ", "), "\n", sep = "")
return(invisible())
}
methods <- Map(function(g, c) has_methods(g, c, where), key_methods, classes)
names(methods) <- classes
cat(unlist(Map(compliance_message, methods, names(methods))), sep = "\n")
}
has_methods <- function(generic, class, where) {
vapply(generic, function(x) hasMethod(x, class, where), FUN.VALUE = logical(1))
}
compliance_message <- function(methods, name) {
if (all(methods)) return(paste0(name, ": OK"))
methods <- paste0(names(methods)[!methods], collapse = ", ")
paste0(name, ": NOT OK\n",
paste0(strwrap(methods, indent = 2, exdent = 2), collapse = "\n"))
}
key_methods <- list(
Driver = c(
"dbGetInfo",
"dbConnect",
"dbUnloadDriver",
"dbListConnections",
"dbDataType"
),
Connection = c(
"dbDisconnect",
"dbGetInfo",
"dbSendQuery",
"dbGetException",
"dbListResults",
"dbListFields",
"dbListTables",
"dbReadTable",
"dbWriteTable",
"dbExistsTable",
"dbRemoveTable",
"dbBegin",
"dbCommit",
"dbRollback",
"dbIsValid",
"dbQuoteString",
"dbQuoteIdentifier"
),
Result = c(
"dbIsValid",
"dbFetch",
"dbClearResult",
"dbColumnInfo"
)
)
|
c1d9103a37b98d78281b2ee31d996d6af83f7360
|
e8e5bc16d8c74bb95bfcbf867cf92d9983410f49
|
/R/pure_soda.R
|
0a88f3d54429af723641eab0a16c4fe95ce2f6df
|
[] |
no_license
|
cran/sodavis
|
1a0207cf615d8f0d8821a4049bb525a6eb568e06
|
ca871473e9284c0c7926a1f9064fe12366d36a63
|
refs/heads/master
| 2021-01-10T13:15:05.917563
| 2018-05-13T20:24:03
| 2018-05-13T20:24:03
| 48,088,856
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,060
|
r
|
pure_soda.R
|
library(nnet)
# create predictor matrix from terms
create_pmatrix_from_terms = function(xx, terms)
{
nt = length(terms);
nr = nrow(xx);
pmatrix = matrix(0, nr, 0);
if (nt > 0)
for(it in 1:nt)
{
term = terms[it];
if (grepl("*",term,fixed=T))
{
splits = strsplit(term,"*",fixed=T)[[1]];
id1 = as.numeric(splits[1]);
id2 = as.numeric(splits[2]);
pmatrix = cbind(pmatrix, term=xx[,id1]*xx[,id2]);
}
else
{
id = as.numeric(term);
pmatrix = cbind(pmatrix, term=xx[,id]);
}
}
return(pmatrix);
}
calc_lda_BIC = function(xx, yy, cur_set, D, K, debug=F, gam=0)
{
N = nrow(xx);
D = ncol(xx);
K = max(yy);
d = length(cur_set);
ll = 0;
if (d == 0)
{
p = numeric(K);
for(i in 1:N)
{
p[yy[i]] = p[yy[i]] + 1.0/N;
}
for(k in 1:K)
{
ll = ll + sum(yy==k)*log(p[k]);
}
BIC = -2*ll + (K-1)*(log(N) + 2*gam*log(D));
return(BIC);
}
else
{
lgt = multinom(yy ~ as.matrix(xx[,cur_set]), family = "binomial", trace=F);
BIC = lgt$deviance + (K-1)*(1+d)*(log(N) + 2*gam*log(D));
return(BIC);
}
}
#
# xx: explanatory variables
# yy: response variable
# cur_set: current set of selected variables
# debug: if shows debug information
# gam: gamma in EBIC
# terms: selected linear and interaction terms
#
calc_BIC = function(xx, yy, terms, D, K, debug=F, gam=0)
{
N = length(yy);
D = ncol(xx);
K = max(yy);
d = length(terms);
ll = 0;
if (d == 0)
{
p = numeric(K);
for(i in 1:N)
{
p[yy[i]] = p[yy[i]] + 1.0/N;
}
for(k in 1:K)
{
ll = ll + sum(yy==k)*log(p[k]);
}
BIC = (K-1) * (log(N) + 2*gam*log(D));
BIC = BIC - 2*ll;
return(BIC);
}
else
{
pmatrix = create_pmatrix_from_terms(xx, terms);
lgt = multinom(yy ~ pmatrix, family = "multinomial", trace=F);
BIC = lgt$deviance;
BIC = BIC + (K-1)*(1+ncol(pmatrix))*(log(N) + 2*gam*log(D)); # BIC with quadratic penalty
return(BIC);
}
}
get_term_name = function(x_names, cur_set, term)
{
if (length(cur_set) == 0)
{
return("(Empty)");
}
str = "(Empty)";
splits = strsplit(term, ".", fixed=T)[[1]]
first = T;
for(i in 1:length(cur_set))
{
split = splits[i];
if (split=="1")
{
if (first)
{
first = F;
str = x_names[cur_set[i]];
}
else
{
str = paste0(str, "*", x_names[cur_set[i]]);
}
}
if (split=="2")
{
if (first)
{
first = F;
str = paste0(x_names[cur_set[i]], "^2");
}
else
{
str = paste0(str, "*", paste0(x_names[cur_set[i]], "^2"));
}
}
}
return (str);
}
get_term_name_2 = function(x_names, term)
{
if (grepl("*",term,fixed=T))
{
splits = strsplit(term,"*",fixed=T)[[1]];
id1 = as.numeric(splits[1]);
id2 = as.numeric(splits[2]);
return(paste(x_names[id1], x_names[id2], sep="*"))
}
else
{
id = as.numeric(term);
return(x_names[id])
}
}
get_lin_terms = function(n_terms)
{
terms = c();
for(i in 1:n_terms)
{
arr = numeric(n_terms);
arr[i] = 1;
terms = c(terms, paste0(arr, collapse = "."));
}
return(terms);
}
get_lin_terms_vec = function(c_set)
{
terms = as.character(c_set);
return(terms);
}
get_quad_terms_vec = function(c_set)
{
terms = c();
if (length(c_set) <= 0)
return(terms);
terms = get_lin_terms_vec(c_set);
for(i in 1:length(c_set))
for(j in i:length(c_set))
terms = c(terms, paste(c_set[i],c_set[j],sep="*"));
return(terms);
}
get_inter_terms_vec = function(c_set)
{
terms = c();
if (length(c_set) < 2)
return(terms);
for(i in 1:(length(c_set)-1))
{
for(j in (i+1):length(c_set))
terms = c(terms, paste(c_set[i],c_set[j],sep="*"));
}
return(terms);
}
get_set_from_terms = function(terms)
{
nt = length(terms);
c_set = c();
if (nt > 0)
for(it in 1:nt)
{
term = terms[it];
if (grepl("*",term,fixed=T))
{
splits = strsplit(term,"*",fixed=T)[[1]];
id1 = as.numeric(splits[1]);
id2 = as.numeric(splits[2]);
c_set = union(c_set, id1);
c_set = union(c_set, id2);
}
else
{
id = as.numeric(term);
c_set = union(c_set, id);
}
}
return(c_set)
}
trim_terms = function(terms)
{
if (length(terms) == 0)
return(c());
splits = strsplit(terms[1], ".", fixed=T)[[1]]
Nvar = length(splits);
var_set = rep(F, Nvar);
if (Nvar == 0)
{
return(c());
}
for(term in terms)
{
splits = strsplit(term, ".", fixed=T)[[1]]
Nvar = length(splits);
for(i in 1:Nvar)
{
split = splits[i];
if (split != "0")
var_set[i] = T;
}
}
new_terms = c();
for(term in terms)
{
splits = strsplit(term, ".", fixed=T)[[1]]
splits = splits[which(var_set)];
new_term = paste0(splits, collapse = ".");
new_terms = c(new_terms, new_term)
}
return(new_terms);
}
nqnorm = function(data)
{
if (class(data)=="numeric")
{
N = length(data);
qs = rank(data)/N - 0.5/N;
data = qnorm(qs);
return(data);
}
else
{
N = dim(data)[1];
D = dim(data)[2];
for(d in 1:D)
{
qs = rank(data[,d])/N - 0.5/N;
data[,d] = qnorm(qs);
}
return(data);
}
}
preprocess_y = function(y)
{
if (min(y) <= 0)
{
cat("Warning: categorical response y should start from 1 instead of 0.\n")
y = y - min(y) + 1
}
return(y)
}
#
# xx: explanatory variables
# yy: response variable
# norm: if TRUE, xx are quantile normalized to normal
# debug: if shows debug information
# gam: gamma in EBIC
# minF: minimum number of forward steps
#
soda = function(xx, yy, norm=F, debug=F, gam=0, minF = 3)
{
if (min(yy) == 0)
yy = yy + 1;
K = max(yy);
N = dim(xx)[1];
D = dim(xx)[2];
minF = min(D, minF);
if (norm)
{
for(k in 1:K)
xx[yy=k,] = nqnorm(xx[yy=k,]);
}
x_names = colnames(xx);
if (is.null(x_names))
x_names = paste0("X",1:D);
set_all = 1:D;
cur_set = c();
BIC = c();
Var = list();
Term = list();
Type = c();
BIC[1] = calc_BIC(xx, yy, c(), D, K, debug, gam=gam);
Type[1] = "Init";
Var[[1]] = cur_set;
Term[[1]] = c();
cur_score = BIC[1];
cat(paste0("Initialization: empty set, EBIC = ", sprintf("%.3f", BIC[1]), "\n\n"));
tt = 1;
########################
# Linear Forward Stage #
########################
cat(paste0("Forward Stage - Main effects:\n"));
while(T)
{
ops = list();
n_ops = 0;
######################
# Forward Operations #
######################
not_set = setdiff(set_all, cur_set);
Nnset = length(not_set);
if (Nnset > 0)
{
for(j in 1:Nnset)
{
jj = not_set[j];
new_set = sort(c(jj, cur_set));
new_score = calc_lda_BIC(xx, yy, new_set, D, K, debug, gam=gam);
if (debug)
cat(paste0(" Trying to add variable ", jj , ": ", x_names[jj], " into main effect set... D_Score: ", cur_score-new_score, "\n\n"));
if (new_score < cur_score)
{
n_ops = n_ops + 1;
ops[[n_ops]] = list();
ops[[n_ops]]$new_set = new_set;
ops[[n_ops]]$new_score = new_score;
ops[[n_ops]]$print = paste0(" Main effects: add variable ", jj , ": ", x_names[jj], " into selection set... df = ", length(new_set)+1, ", EBIC = ", sprintf("%.3f",new_score));
}
}
}
#######################
# The Best Operations #
#######################
if (n_ops == 0)
{
break;
}
toprint = "";
for(i in 1:n_ops)
{
if (ops[[i]]$new_score < cur_score)
{
cur_score = ops[[i]]$new_score;
cur_set = ops[[i]]$new_set;
toprint = ops[[i]]$print;
}
}
tt = tt + 1;
BIC[tt] = cur_score;
Type[[tt]] = "Forward (Main)";
Var[[tt]] = cur_set;
Term[[tt]] = get_lin_terms_vec(cur_set);
cat(paste0(toprint,"\n"));
}
linear_set = cur_set;
cur_terms = get_lin_terms_vec(linear_set);
cur_set = c();
# cur_score = BIC[1];
###########################
# Quadratic Forward Stage #
###########################
cat(paste0("\nForward Stage - Interactions: \n"));
while(T)
{
ops = list();
n_ops = 0;
######################
# Forward Operations #
######################
not_set = setdiff(set_all, cur_set);
Nnset = length(not_set);
if (Nnset > 0)
{
for(j in 1:Nnset)
{
jj = not_set[j];
new_set = sort(c(jj, cur_set));
new_terms = union(cur_terms, get_quad_terms_vec(new_set));
new_score = calc_BIC(xx, yy, new_terms, D, K, debug, gam=gam);
if (debug)
cat(paste0(" Trying to add variable ", jj , ": ", x_names[jj], " into interaction set... D_Score: ", cur_score-new_score, "\n"));
if (new_score < cur_score || length(cur_set) < minF)
{
n_ops = n_ops + 1;
ops[[n_ops]] = list();
ops[[n_ops]]$new_set = new_set;
ops[[n_ops]]$new_score = new_score;
ops[[n_ops]]$new_terms = new_terms;
ops[[n_ops]]$print = paste0(" Interactions: add variable ", jj , ": ", x_names[jj], " into selection set... df = ", length(new_terms)+1, ", EBIC = ", sprintf("%.3f",new_score));
}
}
}
######################
# The Best Operation #
######################
if (n_ops == 0)
{
break;
}
toprint = "";
if (length(cur_set) < minF)
cur_score = 1e6;
for(i in 1:n_ops)
{
if (ops[[i]]$new_score < cur_score)
{
cur_score = ops[[i]]$new_score;
cur_set = ops[[i]]$new_set;
toprint = ops[[i]]$print;
cur_terms = ops[[i]]$new_terms;
}
}
tt = tt + 1;
BIC[tt] = cur_score;
Type[[tt]] = "Forward (Int)";
Var[[tt]] = c(setdiff(linear_set, cur_set), cur_set);
Term[[tt]] = cur_terms;
cat(paste0(toprint,"\n"));
}
# set of variables at end of forward stage
cur_set = c(setdiff(linear_set, cur_set), cur_set);
int_terms = get_inter_terms_vec(cur_set);
cur_terms = union(cur_terms, get_lin_terms_vec(linear_set))
cur_score = calc_BIC(xx, yy, cur_terms, D, K, debug, gam=gam);
# #############################
# # Interaction Forward Stage #
# #############################
# while(T)
# {
# ops = list();
# n_ops = 0;
#
# ######################
# # Forward Operations #
# ######################
# not_terms = setdiff(int_terms, cur_terms);
# Nnset = length(not_terms);
# if (Nnset > 0)
# {
# for(j in 1:Nnset)
# {
# term = not_terms[j];
# new_terms = union(cur_terms, term);
# new_score = calc_BIC(xx, yy, new_terms, D, K, debug, gam=gam);
# if (debug)
# cat(paste0(" Trying to add interaction ", term , " into interaction set... D_Score: ", cur_score-new_score, "\n"));
# if (new_score < cur_score)
# {
# n_ops = n_ops + 1;
# ops[[n_ops]] = list();
# ops[[n_ops]]$new_set = cur_set;
# ops[[n_ops]]$new_score = new_score;
# ops[[n_ops]]$new_terms = new_terms;
# ops[[n_ops]]$print = paste0(" Interactions: add interaction ", term, " into selection set... df = ", length(new_terms)+1, ", EBIC = ", sprintf("%.3f",new_score));
# }
# }
# }
#
# ######################
# # The Best Operation #
# ######################
# if (n_ops == 0)
# {
# break;
# }
#
# toprint = "";
#
# if (length(cur_set) < minF)
# cur_score = 1e6;
#
# for(i in 1:n_ops)
# {
# if (ops[[i]]$new_score < cur_score)
# {
# cur_score = ops[[i]]$new_score;
# cur_set = ops[[i]]$new_set;
# toprint = ops[[i]]$print;
# cur_terms = ops[[i]]$new_terms;
# }
# }
#
# tt = tt + 1;
# BIC[tt] = cur_score;
# Var[[tt]] = c(setdiff(linear_set, cur_set), cur_set);
# Term[[tt]] = cur_terms;
#
# cat(paste0(toprint,"\n"));
# }
cat(paste0("\nBackward stage: \n"));
##################
# Backward Stage #
##################
if (length(cur_set) > 0)
{
while(T)
{
ops = list();
n_ops = 0;
#######################
# Backward Operations #
#######################
Nterms = length(cur_terms);
if(Nterms > 0)
{
for(j in 1:Nterms)
{
term = cur_terms[j];
new_terms = setdiff(cur_terms, term);
# print("new_terms:")
# print(new_terms)
new_score = calc_BIC(xx, yy, new_terms, D, K, debug, gam=gam);
if (debug)
{
term_name = get_term_name_2(x_names, term);
cat(paste0(" Trying to remove term ", term_name, " from selection set... Score: ", cur_score - new_score, "\n\n"));
}
# cat(paste0("new_score = ", new_score, "\n"))
# cat(paste0("cur_score = ", cur_score, "\n"))
if (new_score < cur_score)
{
n_ops = n_ops + 1;
term_name = get_term_name_2(x_names, term);
ops[[n_ops]] = list();
# ops[[n_ops]]$new_set = cur_set;
ops[[n_ops]]$new_terms = new_terms;
ops[[n_ops]]$new_score = new_score;
ops[[n_ops]]$print = paste0(" Remove term ", term_name, " from selection set... df = ", length(new_terms)+1, ", EBIC = ", sprintf("%.3f", new_score));
}
}
}
#######################
# The Best Operations #
#######################
if (n_ops == 0)
{
break;
}
toprint = "";
for(i in 1:n_ops)
{
if (ops[[i]]$new_score < cur_score)
{
cur_score = ops[[i]]$new_score;
cur_terms = ops[[i]]$new_terms;
cur_set = get_set_from_terms(ops[[i]]$new_terms);
toprint = ops[[i]]$print;
}
}
tt = tt + 1;
BIC[tt] = cur_score;
Type[[tt]] = "Backward";
Var[[tt]] = cur_set;
Term[[tt]] = cur_terms;
cat(paste0(toprint,"\n"));
}
}
result = list();
result$BIC = BIC;
result$Type = Type;
result$Var = Var;
result$Term = Term;
MIN_IDX = -1;
MIN_BIC = 100000000;
if (tt > 0)
{
for(i in 1:tt)
{
# if (BIC[i] < MIN_BIC)
{
MIN_IDX = i;
MIN_BIC = BIC[i];
}
}
result$final_EBIC = BIC[MIN_IDX];
result$final_Var = Var[[MIN_IDX]];
result$final_Term = Term[[MIN_IDX]];
# for(ii in 1:tt)
# {
# cat(paste("Iteration #", ii, ", Variables: (", paste(Var[[ii]], collapse=", "),"), Terms: (", paste0(Term[[ii]], collapse=", "), ")\n", sep=""));
# }
}
else
{
result$final_EBIC = BIC[1];
result$final_Var = Var[[1]];
result$final_Term = Term[[1]];
}
cat(paste("\nFinal selected variables: ", paste0(x_names[result$final_Var], collapse=", ")));
term_names = c();
for(term in result$final_Term)
{
term_name = get_term_name_2(x_names, term);
term_names = c(term_names, term_name);
}
cat(paste("\n terms: ", paste0(term_names, collapse=", "), "\n"));
result$best_Var = result$final_Var
result$best_Term = result$final_Term
return(result)
}
logistic_terms_CV = function(xx, yy, terms, KK, Debug=F)
{
N = length(yy);
K = max(yy);
D = dim(xx)[2];
o = sample(1:N);
n = floor(N/KK);
if (is.null(terms))
{
xx = matrix(0, N, 0);
}
else
{
xx = create_pmatrix_from_terms(as.matrix(xx), terms);
}
xx = as.matrix(xx[o,]);
yy = yy[o];
m_succ = 0;
c_succ = 0;
for(kk in 1:KK)
{
if (Debug)
cat(paste0("Cross validation k = ", kk, " / ", KK, "\n"));
set_tr = setdiff(1:N,((kk-1)*n+1):((kk)*n));
set_te = ((kk-1)*n+1):((kk)*n);
xx_tr = as.matrix(xx[set_tr,]);
yy_tr = yy[set_tr];
xx_te = as.matrix(xx[set_te,]);
yy_te = yy[set_te];
pmatrix = rep(1,length(yy_tr));
if (length(xx_tr) > 0)
pmatrix = xx_tr;
fit = multinom(yy_tr ~ pmatrix, family = "multinomial", trace=F);
cef = coef(fit);
if (K==2)
cef = t(as.matrix(coef(fit)));
zz = matrix(0, K, length(yy_te))
for(k in 2:K)
{
pmatrix = rep(1,length(yy_te));
if (length(xx_te) > 0)
pmatrix = xx_te;
zz[k,] = cbind(1,pmatrix) %*% cef[k-1,];
}
pp = apply(zz,2,which.max);
m_succ = m_succ + sum(pp == yy_te);
if (Debug)
cat(paste0(" Successes in k = ", kk, ": ", m_succ, " / ", n*kk, "\n"));
}
res = list();
res$m_sp = m_succ/N;
if (Debug)
cat(paste0("Classification accuracy = ", res$m_sp, "\n"));
return(res);
}
#
# Calculate a trace of cross-validation error rate for SODA forward-backward procedure
#
soda_trace_CV = function(xx, yy, res_SODA)
{
if (min(yy) == 0)
yy = yy + 1;
N_CV = 20;
Np = length(res_SODA$Var);
errors_ss = matrix(0, Np, N_CV);
ss_V = numeric(Np);
ss_MT = numeric(Np);
ss_IT = numeric(Np);
ss_BIC = numeric(Np);
ss_Typ = character(Np);
for(i in 1:Np)
{
cat(paste0("Calculating CV error for step ", i, " ...\n"));
SS = res_SODA$Var[[i]];
TT = res_SODA$Term[[i]];
for(icv in 1:N_CV)
{
errors_ss[i,icv] = 1 - logistic_terms_CV(xx, yy, TT, 10)$m_sp;
}
ss_V[i] = length(SS);
ss_MT[i] = length(TT) - sum(grepl("*",TT,fixed=T));
ss_IT[i] = sum(grepl("*",TT,fixed=T));
ss_BIC[i] = res_SODA$BIC[i]
ss_Typ[i] = res_SODA$Type[i]
}
ss_mean = apply(errors_ss, 1, mean);
tab = data.frame(ss_Typ, ss_BIC, ss_V, ss_MT, ss_IT, ss_mean);
colnames(tab) = c("Step Type", "BIC", "# Variables", "# Main terms", "# Interactions", "CV Error");
return(tab);
}
s_soda = function(x, y, H=5, gam=0, minF=3, norm=F, debug=F)
{
cat(paste0("Variable selection using S-SODA....."));
if (norm)
{
y = nqnorm(y);
x = nqnorm(x);
}
N = length(y);
oo = order(y);
xx = x[oo,];
yy = y[oo];
ls = round(seq(1, N, length.out=H+1));
LL = length(ls);
res = list();
res$S = numeric(N);
res$H = H
res$int_l = numeric(LL-1);
res$int_m = numeric(LL-1);
res$int_u = numeric(LL-1);
res$S[oo[1]] = 1;
for (i in 1:H)
{
ff = ls[i]+1;
to = ls[i+1];
res$S[oo[ff:to]] = i;
res$int_l[i] = yy[ff];
res$int_m[i] = mean(yy[ff:to]);
res$int_u[i] = yy[to];
}
res_SODA = soda(x, res$S, gam=gam, minF=minF);
res$BIC = res_SODA$BIC;
res$Var = res_SODA$Var;
res$Term = res_SODA$Term;
res$best_BIC = res_SODA$best_BIC;
res$best_Var = res_SODA$best_Var;
res$best_Term = res_SODA$best_Term;
pmt = create_pmatrix_from_terms(as.matrix(xx), res$best_Term);
if (length(pmt) <= 0)
pmt = matrix(1, length(res$S), 1);
lgt = multinom(res$S ~ pmt, family = "multinomial", trace=F);
res$logit_m = lgt;
print(paste("Selected variables: ", paste(names(x)[res$best_Var], collapse=" ")));
return(res)
}
s_soda_model = function(x, y, H=10)
{
cov_MIN = 0.1;
N = length(y);
yy = y;
o = order(y);
y = sort(y);
xx = sort(y);
result = list();
result$sort_y = y;
ls = round(seq(1, N, length.out=H+1));
LL = length(ls);
result$cuts_o = ls;
result$cuts_v = y[ls];
result$H = H;
result$int_d = numeric(N);
result$int_h = numeric(N);
result$int_y = list();
result$int_x = list();
result$int_p = numeric(H);
result$int_l = numeric(H);
result$int_m = list();
result$int_v = list();
result$int_ul = numeric(LL-1);
result$int_ur = numeric(LL-1);
result$int_um = numeric(LL-1);
result$H = LL-1;
result$int_d[1] = 1;
for (i in 1:(LL-1))
{
from = ls[i]+1;
to = ls[i+1];
result$int_d[from:to] = i;
result$int_h[yy>=y[from] & yy<=y[to]] = i;
result$int_p[i] = (to-from+1)/N;
result$int_l[i] = y[to]-y[from];
result$int_ul[i] = y[from];
result$int_ur[i] = y[to];
result$int_um[i] = mean(y[from:to]);
}
x = as.matrix(x);
x = as.matrix(x[o,]);
D = dim(x)[2];
result$sort_x = x;
result$int_m0 = list();
result$int_m1 = list();
result$int_m2 = list();
for (i in 1:(LL-1))
{
from = ls[i];
to = ls[i+1];
result$int_d[from:to] = i;
result$int_h[yy>=y[from] & yy<=y[to]] = i;
result$int_p[i] = (to-from+1)/N;
result$int_l[i] = y[to]-y[from];
result$int_m0[[i]] = coef(lm(y[from:to] ~ 1));
result$int_m1[[i]] = coef(lm(y[from:to] ~ x[from:to,]));
result$int_m2[[i]] = coef(lm(y[from:to] ~ poly(as.matrix(x[from:to,]), degree=2, raw=TRUE)));
sx = as.matrix(x[from:to,]);
result$int_m[[i]] = colMeans(sx);
ss = cov(sx);
diag(ss) = pmax(diag(ss), cov_MIN);
result$int_v[[i]] = cov(sx);
}
return(result);
}
surf.colors = function(x, col = terrain.colors(20)) {
# First we drop the 'borders' and average the facet corners
# we need (nx - 1)(ny - 1) facet colours!
x.avg = (x[-1, -1] + x[-1, -(ncol(x) - 1)] +
x[-(nrow(x) -1), -1] + x[-(nrow(x) -1), -(ncol(x) - 1)]) / 4
# Now we construct the actual colours matrix
colors = col[cut(x.avg, breaks = length(col), include.lowest = T)]
return(colors)
}
s_soda_pred = function(x, model, po = 1)
{
x = as.matrix(x);
N = dim(x)[1];
D = dim(x)[2];
H = model$H;
y = numeric(N);
x2 = poly(x, degree=2, raw=TRUE)
cat("Making predictions using S-SODA model...\n")
for(n in 1:N)
{
if (n %% round(N/10) == 0)
cat(paste0(round(n/N*10), "0% "));
pp = log(model$int_p);
for(h in 1:H)
{
pp[h] = pp[h] + dmvnorm(x[n,], model$int_m[[h]], model$int_v[[h]], log=T);
}
pp = pp-max(pp);
pp = exp(pp);
pp = pp / sum(pp);
y[n] = 0;
for(h in 1:H)
{
if (po == 0)
y[n] = y[n] + pp[h]*(model$int_m0[[h]]);
if (po == 1)
y[n] = y[n] + pp[h]*sum(model$int_m1[[h]] * cbind(1, t(as.numeric(x[n,]))));
if (po == 2)
y[n] = y[n] + pp[h]*sum(model$int_m2[[h]] * cbind(1, t(as.numeric(x2[n,]))));
}
}
cat("\n")
return(y);
}
s_soda_pred_grid = function(xx1, xx2, model, po=1)
{
xx = expand.grid(xx1,xx2);
pp = s_soda_pred(xx, model, po=po);
return(matrix(pp, length(xx1), length(xx2)));
}
compare_surface = function(xx, yy, col_idx, theta=-25, zlab="Y", add_points=F, H=25)
{
ii = col_idx[1]
jj = col_idx[2]
x1 = xx[, ii]
x2 = xx[, jj]
name_i = colnames(xx)[ii]
name_j = colnames(xx)[jj]
x1_r = range(xx[, ii], na.rm=T)
x2_r = range(xx[, jj], na.rm=T)
m_soda = s_soda_model(xx[, col_idx], yy, H=H)
m_line = lm(yy ~ xx[, col_idx])
m_coef = m_line$coefficients
MM = 30;
g_x1 = seq(x1_r[1], x1_r[2], length.out=MM);
g_x2 = seq(x2_r[1], x2_r[2], length.out=MM);
gexx = expand.grid(x1=g_x1, x2=g_x2)
p_soda = s_soda_pred_grid(g_x1, g_x2, m_soda, po=1)
p_line = m_coef[1] + m_coef[2] * gexx[, 1] + m_coef[3] * gexx[, 2]
p_line = matrix(p_line, MM, MM)
p_combined = c(p_soda, p_line)
min_p = min(p_combined)
max_p = max(p_combined)
par(mfrow=c(1,2))
main_1 = "Linear Regression"
main_2 = "S-SODA"
persp(g_x1, g_x2, p_line, theta=theta, col=surf.colors(p_line), xlab=name_i, ylab=name_j, zlab=zlab, zlim=c(min_p, max_p))
title(main_1, line=0)
persp(g_x1, g_x2, p_soda, theta=theta, col=surf.colors(p_soda), xlab=name_i, ylab=name_j, zlab=zlab, zlim=c(min_p, max_p))
title(main_2, line=0)
}
|
9d3d0543bab79eed9d04e3c653c06437d275fc59
|
34542f2bdf76012b3bb93d458de4ca6068477258
|
/man/my.bisec.Rd
|
f59008bac1e36b9e15027c4386142a351b798aa6
|
[] |
no_license
|
cran/ebGenotyping
|
8f3c20d8f7af36951a09a5e5af226578c6c134b6
|
994a6072f2ba4c00fc5762262c2a29353de581cc
|
refs/heads/master
| 2021-01-18T21:13:58.068228
| 2016-04-13T09:28:12
| 2016-04-13T09:28:12
| 35,105,609
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,660
|
rd
|
my.bisec.Rd
|
\name{my.bisec}
\alias{my.bisec}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Bisection method to find the root
}
\description{
This function is to apply bisection method to find the root of a function f.
}
\usage{
my.bisec(f, int.l, int.u, eps = 1e-06)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{f}{
the function for which the root is sought.
}
\item{int.l}{
a vector containing the lower bound of the interval to be searched for the root. The length of the vector is the same as that of the input of function f.
}
\item{int.u}{
a vector containing the upper bound of the interval to be searched for the root. The length of the vector is the same as that of the input of function f.
}
\item{eps}{
a single value: a threshold to control the convergence criterion. The default is 1e-06.
}
}
\details{
Both int.l and int.u must be specified: the upper bound must be strictly larger than the lower bound.
The function f must be well defined without invalid output(NA, nan, Inf, etc).
The length of the input of function f, the output of function f, int.l and int.u must be the same.
}
\value{
a vector containing the root of the function. If there is no root in the interval (int.l, int.u), lower bound of the interval will be returned.
}
\references{
Na You and Gongyi Huang.(2016) An Empirical Bayes Method for Genotyping and SNP detection Using Multi-sample Next-generation Sequencing Data.
}
\author{
Na You <youn@mail.sysu.edu.cn> and Gongyi Huang<53hgy@163.com>
}
\examples{
f <- function(x){
a <- 1:10
return(x-a)
}
my.bisec(f=f, int.l=rep(-1,10), int.u=rep(11,10), eps = 1e-08)
}
|
511b40770ee07c6d43ff5465121264e6a33e7dd6
|
e17f9b6cb4c2fc0b37fc744e8e448de834fc435e
|
/Lasso regressin Analysis.R
|
a2d99b4ea098e82501ea77b962f8eaf0bddfa893
|
[] |
no_license
|
kamleshthakur123/R-for-Data-Science
|
4a1ec2175733453fc55ff4fbdbf8a1706da54b39
|
d434d254bdb2c5015e92bd72ba7dd110ab383a91
|
refs/heads/main
| 2023-07-05T20:19:48.431862
| 2021-08-15T17:54:10
| 2021-08-15T17:54:10
| 396,436,436
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,211
|
r
|
Lasso regressin Analysis.R
|
#### library Needed
library(caret)
library(glmnet)
library(mlbench)
library(psych)
data("BostonHousing")
str(BostonHousing)
pairs.panels(BostonHousing)
### scatter plot of every variables which are numeric
pairs.panels(BostonHousing[c(-4,-14)],cex=2)
### data partition
set.seed(222)
ind <- sample(2,nrow(BostonHousing),replace = T,
prob = c(.7,.3))
Training <- BostonHousing[ind==1,]
Testing <- BostonHousing[ind==2,]
### custome control parameters
### verboselter is to see how the process is going on
custom <- trainControl(method = "repeatedcv",
number = 10,
repeats = 5,
verboseIter = T)
custom
### linear Model
set.seed(1234)
lm <- train(medv~.,Training,
method= "lm",
trControl = custom)
lm
### Result
lm$results
summary(lm)
par(mfrow=c(2,2))
plot(lm$finalModel)
#### Ridge Regression
### tries to shrink the coefficient
###but keeps all the variable in the model
### glmnet package allows us to fit lass, ridge and elastic net model
set.seed(1234)
ridge <- train(medv~.,
Training,
method= "glmnet",
tuneGrid= expand.grid(alpha=0,
lambda=seq(.0001,1,length=5)),
trControl=custom)
### lambda is the strength of penalty on the coeffi.
### as we increase lamba we are increasing the penality
### lamba will increase ,it will make coeffiecient to shrink
### plot
plot(ridge)
### it is clear from the plot for higher value of lambda
### error increases
ridge
plot(ridge$finalModel,xvar = "lambda",label = T)
### when lambda is 8 or9 all the cofficeints are zero
### lambda does not make coefficient of those variables which'
### are not contributing in the model
### plot for important variable
plot(varImp(ridge,scale = F))
#### Lasso Regression
set.seed(1234)
lasso <- train(medv~.,Training,
method= "glmnet",
tuneGrid= expand.grid(alpha=1,
lambda=seq(.0001,1,length=5)),
trControl= custom)
### Plot
par(mfrow=c(1,1))
plot(lasso)
plot(lasso$finalModel,xvar="lambda",
label = T)
plot(varImp(lasso,scale = F))
#### Elastic net Regressin
set.seed(1234)
en <- train(medv~.,Training,
method= 'glmnet',
tuneGrid=expand.grid(alpha=seq(0,1,length=10),
lambda=seq(.0001,1,length=5)),
trControl=custom)
### plot Result
plot(en)
plot(en$finalModel,xvar = "lambda",label = T)
plot(varImp(en,scale = F))
#### compare Models
model_list <- list(linearmodel= lm, Ridge=ridge,
Lasso=lasso,elasticnet=en)
#### to compare the model
res <- resamples(model_list)
summary(res)
xyplot(res,metric="RMSE") ### compares ridge and linear model
### Best model
en$bestTune
best <- en$finalModel
coef(best,en$bestTune$lambda)
### save final model
fm <- saveRDS(en,"final_model.rds")
### predictio
predict(fm,Training)
### also calculate Rmse
|
75a5a172686f0fadb630166ff8f161e61b711b21
|
723da7e475a2814b820cc9507a3d041233117221
|
/R/getROI.R
|
f5552517ad9093ce192b098d14190931dfba9bbf
|
[] |
no_license
|
tziol001/Project-Geoscripting
|
d0ca9995dc4d1eb5933f6c901b3d8165f0ae1449
|
2ebaca8e68b6a7cde6a182bdde644486f6e87121
|
refs/heads/master
| 2020-04-12T19:47:56.262904
| 2015-01-27T05:13:34
| 2015-01-27T05:13:34
| 29,669,321
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 403
|
r
|
getROI.R
|
# this function...
getROI <-
function(raster, ROI)
{
# project the layer using the ndvi raster projection
ROI_proj2raster <- spTransform(ROI, CRS(proj4string(raster)))
# mask the Lelysta forest
masked_ROI <- mask(raster, ROI_proj2raster)
# get the values for a raster brick into a new data frame!
valuetable <- getValues(masked_ROI)
valuetable <- as.data.frame(masked_ROI)
return (valuetable)
}
|
aff886412a1b59e1ce0a95766a5909b3131fddca
|
b7534c7b2ec799d74c81d5d5a42d4733ea03a8f8
|
/data_src/final_analysis.R
|
6c77895503933e1d163da03c3a7f282cc9045288
|
[] |
no_license
|
robbizorg/github_msd_project
|
66a6fbb02e2bab9dcd3be0088569912b95cd7310
|
1a77d0ab1c5afa26723ead09c57be7380319ea56
|
refs/heads/master
| 2021-01-20T03:30:17.444342
| 2017-05-06T03:21:09
| 2017-05-06T03:21:09
| 89,548,153
| 3
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,551
|
r
|
final_analysis.R
|
#### BIO_ANALYSIS ####
# Desc: File just for the Analysis of the Profiles with Bios
library(tidyverse)
library(readr)
library(glmnet)
library(ROCR)
library(lda)
library(randomForest)
library(tm)
library(topicmodels)
library(tidytext)
set.seed(42)
final_data <- read_csv("~/Documents/MSD/github_project/data/final_data.csv")
final_data <- final_data %>% mutate(followers = ifelse(is.na(followers),0,followers),
following = ifelse(is.na(following),0,following),
public_repos = ifelse(is.na(public_repos),0,public_repos),
public_gists = ifelse(is.na(public_gists),0,public_gists),
has_bio=ifelse(!is.na(bio)>0, 1, 0)) %>% filter(has_bio==1) ## Key Difference
### List of Features
# Custom Domain
# Custom email domain
# Topic Modeling
## Let's Trying Building a Model?
# Building the dataframe
# Defining the Threshold as the Median
# To avoid Skew, just Doing binary for now for above_pop
predictive <- final_data %>% mutate(has_location=ifelse(!is.na(location)>0, 1, 0),
has_company=ifelse(!is.na(company)>0, 1, 0),
has_blog=ifelse(!is.na(blog)>0, 1, 0),
has_name=ifelse(!is.na(name)>0, 1, 0),
hireable=ifelse(is.na(hireable),0,1),
above_pop=ifelse(followers>median(followers, na.rm=T), 1, 0),
fol_scaled=(followers-mean(followers))/sd(followers),
bio_length=ifelse(is.na(nchar(bio)),0,nchar(bio)), # no effect on the AUC of Model
bio_scaled=(bio_length-mean(bio_length))/sd(bio_length),
following_scaled=(following-mean(following))/sd(following),
repos_scaled=(public_repos-mean(public_repos))/sd(public_repos),
age=(as.numeric(Sys.time()-created_at)),
age_scaled=(age-mean(age))/sd(age),
gists_scaled=(public_gists-mean(public_gists))/sd(public_gists)) %>%
select(userId, has_bio, has_location, has_company, has_blog, has_name, following_scaled, repos_scaled, gists_scaled,
above_pop, fol_scaled, bio_length, bio_scaled, hireable, age_scaled, closed_merge_frac)
# Statistical Significance Aside:
significance <- final_data %>% mutate(has_location=ifelse(!is.na(location)>0, 1, 0),
has_company=ifelse(!is.na(company)>0, 1, 0),
has_blog=ifelse(!is.na(blog)>0, 1, 0),
has_name=ifelse(!is.na(name)>0, 1, 0),
hireable=ifelse(is.na(hireable),0,1),
above_pop=ifelse(followers>median(followers, na.rm=T), 1, 0),
fol_scaled=(followers-mean(followers))/sd(followers),
bio_length=ifelse(is.na(nchar(bio)),0,nchar(bio)), # no effect on the AUC of Model
bio_scaled=(bio_length-mean(bio_length))/sd(bio_length),
following_scaled=(following-mean(following))/sd(following),
repos_scaled=(public_repos-mean(public_repos))/sd(public_repos),
gists_scaled=(public_gists-mean(public_gists))/sd(public_gists))
## Linear Regression Time
###### WE GONNA GIVE A TED TALK
fit <- lm(closed_merge_frac ~ has_location + has_company +
has_blog + has_name + hireable + above_pop + following_scaled + fol_scaled +
bio_scaled + repos_scaled + gists_scaled, age_scaled, significance)
summary(fit)
## Significant Values: hireable, above_pop, fol_scaled, gists_scaled
## Let's try Doing Logistic Regression Here
## First: Plotting Over Thresholds for All Features
models <- matrix(NA, nrow=length(seq(0.01,.99,0.01)), ncol=3)
count <- 1
for (i in seq(.01,.99, 0.01)) {
pred_fixed <- predictive %>% mutate(good_coder=ifelse(closed_merge_frac>i, 1, 0)) %>%
select(-closed_merge_frac)
data <- pred_fixed %>% select(-good_coder, -userId)
#data <- pred_fixed %>% select(above_pop, fol_scaled, bio_scaled, bio_length, hireable,
# repos_scaled,)
# 90% Train-Test Split
ndx <- sample(nrow(data), floor(nrow(data) * 0.9))
train <- as.matrix(data[ndx,])
test <- as.matrix(data[-ndx,])
## Let's get the Good Coder Values
trainy <- pred_fixed[ndx,]$good_coder
test_y <- pred_fixed[-ndx,]$good_coder
cvfit <- cv.glmnet(train, trainy, family = "binomial", type.measure = "auc")
## Getting the Train Accuracy
tmp <- data.frame(pred=(predict(cvfit,newx=train,s="lambda.min", type="class")), real=trainy)
acc <- tmp %>% mutate(right=ifelse(X1==real,1,0)) %>% summarize(acc=sum(right)/nrow(tmp))
# Build the DataFrame
models[count,] <-c(i, max(cvfit$cvm), acc[1,1])
count <- count + 1
}
models <- data.frame(models)
colnames(models) <- c("threshold","auc","acc")
## Plot the Thresholds:
# Over AUC
models %>% ggplot(aes(x=threshold,y=auc)) + geom_line() +
labs(title="AUC over Thresholds (Just Bio)") +
geom_vline(aes(xintercept = median(final_data$closed_merge_frac)), color="red")
# Over Accuracy
models %>% ggplot(aes(x=threshold,y=acc)) + geom_line() +
labs(title="Accuracy over Thresholds (Just Bio)") +
geom_vline(aes(xintercept = median(final_data$closed_merge_frac)), color="red")
## Second: Plotting Over Thresholds for Sig. Features
models_sub <- matrix(NA, nrow=length(seq(0.01,.99,0.01)), ncol=3)
count <- 1
for (i in seq(.01,.99, 0.01)) {
pred_fixed <- predictive %>% mutate(good_coder=ifelse(closed_merge_frac>i, 1, 0)) %>%
select(hireable, above_pop, fol_scaled, gists_scaled, age_scaled, good_coder)
data <- pred_fixed %>% select(-good_coder)
#data <- pred_fixed %>% select(above_pop, fol_scaled, bio_scaled, bio_length, hireable,
# repos_scaled)
# 90% Train-Test Split
ndx <- sample(nrow(data), floor(nrow(data) * 0.9))
train <- as.matrix(data[ndx,])
test <- as.matrix(data[-ndx,])
## Let's get the Good Coder Values
trainy <- pred_fixed[ndx,]$good_coder
test_y <- pred_fixed[-ndx,]$good_coder
cvfit <- cv.glmnet(train, trainy, family = "binomial", type.measure = "auc")
## Getting the Train Accuracy
tmp <- data.frame(pred=(predict(cvfit,newx=train,s="lambda.min", type="class")), real=trainy)
acc <- tmp %>% mutate(right=ifelse(X1==real,1,0)) %>% summarize(acc=sum(right)/nrow(tmp))
# Build the DataFrame
models_sub[count,] <-c(i, max(cvfit$cvm), acc[1,1])
count <- count + 1
}
models_sub <- data.frame(models_sub)
colnames(models_sub) <- c("threshold","auc","acc")
## Plot the Thresholds:
# Over AUC
models_sub %>% ggplot(aes(x=threshold,y=auc)) + geom_line() +
labs(title="AUC over Thresholds (Just Bio, Significant Features)") +
geom_vline(aes(xintercept = median(final_data$closed_merge_frac)), color="red")
# Over Accuracy
models_sub %>% ggplot(aes(x=threshold,y=acc)) + geom_line() +
labs(title="Accuracy over Thresholds (Just Bio, Significant Features)") +
geom_vline(aes(xintercept = median(final_data$closed_merge_frac)), color="red")
## Let's Do Random Forest one more Time
## Haven't had any luck with logistic, let's try Random Forests?
pred_fixed <- predictive %>% mutate(good_coder=ifelse(closed_merge_frac>median(closed_merge_frac), 1, 0)) %>%
select(hireable, above_pop, fol_scaled, gists_scaled, good_coder)
data <- pred_fixed %>% select(-good_coder)
#data <- pred_fixed %>% select(above_pop, fol_scaled, bio_scaled, bio_length, hireable,
# repos_scaled)
# 90% Train-Test Split
ndx <- sample(nrow(data), floor(nrow(data) * 0.9))
train <- as.matrix(data[ndx,])
test <- as.matrix(data[-ndx,])
## Let's get the Good Coder Values
trainy <- pred_fixed[ndx,]$good_coder
test_y <- pred_fixed[-ndx,]$good_coder
forrest <- randomForest(train, as.factor(trainy))
forrest ## As you can see, it doesn't do much better
## I look around my data analysis and I see desolation
### Let's do some Topic Modeling
# create a Corpus from the user bios
corpus <- VCorpus(VectorSource(final_data$bio))
# remove punctuation and numbers, lowercase as well
corpus <- corpus %>% tm_map(removeNumbers) %>% tm_map(removePunctuation)
corpus <- tm_map(corpus , content_transformer(tolower))
corpus <- tm_map(corpus, removeWords, stopwords('english'))
# create a DocumentTermMatrix from the Bios Corpus
dtm <- DocumentTermMatrix(corpus)
# Removing 0 entry lines from dtm, from stackoverflow:
# http://stackoverflow.com/questions/13944252/remove-empty-documents-from-documenttermmatrix-in-r-topicmodels
rowTotals <- apply(dtm , 1, sum) #Find the sum of words in each Document
dtm.new <- dtm[rowTotals> 0, ] #remove all docs without words
bio_lda <- LDA(dtm.new, k = 10, control = list(seed = 1234))
bio_topics <- tidy(bio_lda, matrix = "beta")
bio_top_terms <- bio_topics %>%
group_by(topic) %>%
top_n(10, beta) %>%
ungroup() %>%
arrange(topic, -beta)
bio_top_terms %>%
mutate(term = reorder(term, beta)) %>%
ggplot(aes(term, beta, fill = factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap(~ topic, scales = "free") +
coord_flip()
|
55401144a540e79523d42cd1af680b8f4eda6490
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/TSA/examples/ma1.1.s.Rd.R
|
a821c1f387a69b400eea1417aa9c027ed9bf07ba
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 204
|
r
|
ma1.1.s.Rd.R
|
library(TSA)
### Name: ma1.1.s
### Title: A simulated MA(1) series / time series
### Aliases: ma1.1.s
### Keywords: datasets
### ** Examples
data(ma1.1.s)
## maybe str(ma1.1.s) ; plot(ma1.1.s) ...
|
0694654d1240a4d22c271f0aa2b8c65fcaf1c6f0
|
e40018718c0df266e207e79351c4ddc6bca187d6
|
/seminar11/Seminar 11 In class Exercises.R
|
e9ae97f471a5770ea337758a973977650d26d329
|
[
"MIT"
] |
permissive
|
mutazag/mda
|
11fbb51d5cd8cf375a25eb77b717f3861ce4278e
|
c12f314a288ba9b1c314c2e22f8446bae075f3cb
|
refs/heads/master
| 2023-01-03T14:14:02.755096
| 2020-10-30T02:57:04
| 2020-10-30T02:57:04
| 284,615,927
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,377
|
r
|
Seminar 11 In class Exercises.R
|
install.packages("MASS")
install.packages("klaR")
library(MASS)
library(klaR)
creditcard_data<-read.csv("C:/Documents/creditcard.csv")
test_rows<-sample(1:nrow(creditcard_data), round(nrow(creditcard_data)*0.2) ,replace=FALSE)
creditcard_data_training<-creditcard_data[-test_rows,]
creditcard_data_test<-creditcard_data[test_rows,]
creditcard_lda<-lda(A15~A1+A2+A3+A4+A5+A6+A7+A8+A9+A10+A11+A12+A13+A14,data=creditcard_data_training)
creditcard_lda
creditcard_data_test$lda_predict<-predict(creditcard_lda,creditcard_data_test[,1:14])$class
table(creditcard_data_test$A15,creditcard_data_test$lda_predict)
partimat(as.factor(A15)~A1+A2+A3+A4+A5+A6+A7+A8+A9+A10+A11+A12+A13+A14,data=creditcard_data_test,method="lda")
creditcard_qda<-qda(A15~A1+A2+A3+A4+A5+A6+A7+A8+A9+A10+A11+A12+A13+A14,data=creditcard_data_training)
creditcard_qda
creditcard_data_test$qda_predict<-predict(creditcard_qda,creditcard_data_test[,1:14])$class
table(creditcard_data_test$A15,creditcard_data_test$qda_predict)
partimat(as.factor(A15)~A1+A2+A3+A4+A5+A6+A7+A8+A9+A10+A11+A12+A13+A14,data=creditcard_data_test,method="qda")
wine_data <- read.table("http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data",sep=",")
colnames(wine_data)<-c("Cult","Alc","MalAcid","Ash","AshAlk","Mag","TotPhen","Flav","NonFlav","Proant","Color","Hue","OD280OD315","Proline")
test_rows<-sample(1:nrow(wine_data), round(nrow(wine_data)*0.2) ,replace=FALSE)
wine_data_training<-wine_data[-test_rows,]
wine_data_test<-wine_data[test_rows,]
wine_lda<-lda(Cult~Alc+MalAcid+Ash+AshAlk+Mag+TotPhen+Flav+NonFlav+Proant+Color+Hue+OD280OD315+Proline,data=wine_data_training)
wine_lda
wine_data_test$lda_predict<-predict(wine_lda,wine_data_test[,2:14])$class
table(wine_data_test$Cult,wine_data_test$lda_predict)
partimat(as.factor(Cult) ~ Alc+MalAcid+Ash+AshAlk+Mag+TotPhen+Flav+NonFlav+Proant+Color+Hue+OD280OD315+Proline,data=wine_data_test,method="lda")
wine_qda<-qda(Cult~Alc+MalAcid+Ash+AshAlk+Mag+TotPhen+Flav+NonFlav+Proant+Color+Hue+OD280OD315+Proline,data=wine_data_training)
wine_qda
wine_data_test$qda_predict<-predict(wine_qda,wine_data_test[,2:14])$class
table(wine_data_test$Cult,wine_data_test$qda_predict)
partimat(as.factor(Cult) ~ Alc+MalAcid+Ash+AshAlk+Mag+TotPhen+Flav+NonFlav+Proant+Color+Hue+OD280OD315+Proline,data=wine_data_test,method="qda")
|
a3c8be863947f5d7a466b61e968c127c0da95399
|
243e5a608115856910a19296fffb63f44c09f0f2
|
/Data Manipulation with Dplyr and Tidyr.R
|
eb63fb9f311def8a7970b4dacc36650b3bde8187
|
[] |
no_license
|
koseogluonur/R-for-Data-Science-and-Machine-Learning
|
53759ea87409fcb2080109d9f18a933095c2d0c0
|
19fbe910ad89640b21fc5808fc646bf6183bdfa1
|
refs/heads/main
| 2023-04-09T21:48:11.406448
| 2021-04-25T12:33:25
| 2021-04-25T12:33:25
| 356,064,123
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,438
|
r
|
Data Manipulation with Dplyr and Tidyr.R
|
install.packages("dplyr")
library(dplyr)
mtcars
library(DT)
datatable(mtcars, options = list(scrollX=T))
library(magrittr)
library(dplyr)
arrange(
summarize(
group_by(
filter(mtcars, carb > 1),
cyl
),
Avg_mpg = mean(mpg)
),
desc(Avg_mpg)
)
a <- filter(mtcars, carb > 1)
b <- group_by(a, cyl)
c <- summarise(b, Avg_mpg = mean(mpg))
c
mtcars %>%
filter(carb > 1) %>%
group_by(cyl) %>%
summarise(Avg_mpg = mean(mpg)) %>%
arrange(desc(Avg_mpg))
head(mtcars)
select(mtcars,mpg,cyl)
mtcars %>% select(mpg:qsec)
mtcars %>% select(-mpg:-qsec)
mtcars2 <- mtcars %>% select(-mpg:-qsec)
mtcars2
mtcars2 <- mtcars
mtcars2 %<>% select(-mpg:-qsec)
mtcars %>% select(starts_with("c"))
mtcars %>% select(ends_with("t"))
mtcars %>% select(contains("a"))
mtcars %>% select(matches("^m"))
mtcars %>% select(matches("^(c|m)"))
mtcars %>% select(matches("^d..t$"))
mtcars %>% select(starts_with(c("c","m")))
mtcars %>% select(MilesPerGallon = mpg)
mtcars %>% rename(MilesPerGallon = mpg)
#< Less than
#> Greater than
#== Equal to
#<= Less than or equal to
#>= Greater than or equal to
#!= Not equal to
#%in% Group membership
#is.na is NA
#!is.na is not NA
#&,|,! Boolean operators
mtcars %>% filter(cyl == 4)
mtcars %>% filter(cyl == 4 & hp >90)
mtcars %>% filter((cyl == 4 | cyl == 6) & hp >110)
mtcars %>% filter(cyl %in% c(4,6) & hp >110)
# remove duplicate rows
mtcars[2] %>% distinct()
mtcars[2] %>% unique()
# random sample, 50% sample size without replacement
mtcars %>% sample_frac(size = 0.5, replace = FALSE)
# random sample of 10 rows with replacement
mtcars %>% sample_n(size = 10, replace = TRUE)
# select rows 3-5
mtcars %>% slice(3:5)
# select top n entries - in this case ranks variable mpg and selects
# the rows with the top 5 values
mtcars %>% top_n(n = 5, wt = mpg)
# similarly you can use slice_max and slice_min
mtcars %>% slice_max(mpg,n=5)
mtcars %>% slice_min(mpg,n=5)
mtcars %>% group_by(cyl)
mtcars %>% summarise(mean_Hp = mean(hp))
mtcars %>%
group_by(cyl) %>%
summarise(mean_Hp = mean(hp))
mtcars2 <- mtcars %>% group_by(cyl)
mtcars2 %>%
summarise(mean = mean(hp))
mtcars2 %<>% ungroup
mtcars2 %>%
summarise(mean = mean(hp))
mtcars %>%
group_by(cyl) %>%
summarise(count=n(),
mean_Hp = mean(hp),
sd_Hp=sd(hp)
)
mtcars %>%
group_by(cyl) %>%
summarise(count=n(),
mean_Hp = mean(hp),
sd_Hp=sd(hp)
) %>% ungroup()
absolute <- function(x){
mean(abs(x - mean(x, na.rm = T)))
}
mtcars %>%
select(cyl, mpg, hp) %>%
filter(cyl %in% c(4,6)) %>%
group_by(cyl) %>%
summarise(
count = n(),
mean_mpg = mean(mpg, na.rm = T),
sd_mpg = sd(mpg, na.rm = T),
abs_mpg = absolute(mpg),
mean_hp = mean(hp, na.rm = T),
sd_hp = sd(hp, na.rm = T),
abs_hp = absolute(hp)
)
mtcars %>% arrange(cyl)
mtcars %>% arrange(desc(mpg))
mtcars %>% arrange(cyl, desc(mpg))
mtcars %>%
mutate(g100m = 1/mpg*100) %>%
round(3) %>%
arrange(g100m)
mtcars %>%
filter(cyl %in% c(4,6)) %>%
group_by(cyl) %>%
summarise_all(mean)
mtcars %>%
filter(cyl %in% c(4,6)) %>%
group_by(cyl) %>%
summarise_if(is.numeric, mean)
mtcars %>%
filter(cyl %in% c(4,6)) %>%
group_by(cyl) %>%
summarise_at(vars(starts_with("m")), mean)
mtcars %>%
filter(cyl %in% c(4,6)) %>%
mutate_if(~mean(.x) > 100,log)
mtcars %>%
filter(cyl %in% c(4,6)) %>%
mutate_all(.,log)
mtcars %>%
group_by(cyl) %>%
summarise(across(contains("a"), mean, na.rm=TRUE))
mtcars %>%
group_by(cyl) %>%
summarise(across(contains("a"), mean, na.rm=TRUE, .names="mean_{col}"))
mtcars %>%
group_by(cyl) %>%
summarise(across(contains("a"), ~mean(.x, na.rm=T)))
mtcars %>%
group_by(cyl) %>%
summarise(across(contains("a"), ~mean(.x, na.rm=T)))
mtcars %>%
group_by(cyl) %>%
summarise(across(contains("a"), list(mean=mean, sd=sd),
.names = "{fn}.{col}"))
mtcars %>%
group_by(cyl) %>%
summarise(across(contains("a"), list(mean, sd),
.names = "{fn}.{col}"))
mtcars %>%
group_by(cyl) %>%
mutate(across(contains("a"), log))
mtcars %>%
group_by(cyl) %>%
mutate(across(contains("a"), ~log(.x+1)))
|
ba58ed9c2b356ccf6946e0959fd9e6f4cb1bfcce
|
f73df032972334544e67fcd0c5782a2d85775ccf
|
/ProblemSets/PS5/PS5_McGuire.R
|
5c3b4f5540f5b6734951b53b3feaee69d0a00fdb
|
[
"MIT"
] |
permissive
|
kmmcguire/DScourseS20
|
ad8d04d8a10014a01a5d123b60ee6e45d7ac23c5
|
0bc2ff3bbd2d3f8766e1bb7f09631c13b4bd9124
|
refs/heads/master
| 2020-12-13T11:59:08.629335
| 2020-04-14T15:03:11
| 2020-04-14T15:03:11
| 234,405,546
| 0
| 0
|
MIT
| 2020-01-20T22:43:47
| 2020-01-16T20:27:50
| null |
UTF-8
|
R
| false
| false
| 2,573
|
r
|
PS5_McGuire.R
|
library(rvest)
polling_avg <- read_html("https://en.wikipedia.org/wiki/Nationwide_opinion_polling_for_the_2020_Democratic_Party_presidential_primaries")
table <- polling_avg %>% html_nodes("#mw-content-text > div > table:nth-child(18)") %>%
html_table(fill=TRUE)
table <- table[[1]]
#Initialize libraries and API Keys
library(rtweet)
# *****API KEY/TOKEN INFORMATION REMOVED*****
#Set date range from target start date to date after target end date
dateStart = "2020-02-18"
dateEnd = "2020-02-19"
sanders_query = paste0("@BernieSanders -filter:retweets -filter:replies since:",dateStart," until:",dateEnd)
sanders_tweets <- search_tweets(sanders_query, n = 100000, retryonratelimit = TRUE)
save(sanders_tweets,file=paste0("sanders_",dateStart,".Rda"))
biden_query = paste0("@JoeBiden -filter:retweets -filter:replies since:",dateStart," until:",dateEnd)
biden_tweets <- search_tweets(biden_query, n = 100000, retryonratelimit = TRUE)
save(biden_tweets,file=paste0("biden_",dateStart,".Rda"))
warren_query = paste0("@ewarren -filter:retweets -filter:replies since:",dateStart," until:",dateEnd)
warren_tweets <- search_tweets(warren_query, n = 100000, retryonratelimit = TRUE)
save(warren_tweets,file=paste0("warren_",dateStart,".Rda"))
buttigieg_query = paste0("@PeteButtigieg -filter:retweets -filter:replies since:",dateStart," until:",dateEnd)
buttigieg_tweets <- search_tweets(buttigieg_query, n = 100000, retryonratelimit = TRUE)
save(buttigieg_tweets,file=paste0("buttigieg_",dateStart,".Rda"))
klobuchar_query = paste0("@amyklobuchar -filter:retweets -filter:replies since:",dateStart," until:",dateEnd)
klobuchar_tweets <- search_tweets(klobuchar_query, n = 100000, retryonratelimit = TRUE)
save(klobuchar_tweets,file=paste0("klobuchar_",dateStart,".Rda"))
bloomberg_query = paste0("@MikeBloomberg -filter:retweets -filter:replies since:",dateStart," until:",dateEnd)
bloomberg_tweets <- search_tweets(bloomberg_query, n = 100000, retryonratelimit = TRUE)
save(bloomberg_tweets,file=paste0("bloomberg_",dateStart,".Rda"))
steyer_query = paste0("@TomSteyer -filter:retweets -filter:replies since:",dateStart," until:",dateEnd)
steyer_tweets <- search_tweets(steyer_query, n = 100000, retryonratelimit = TRUE)
save(steyer_tweets,file=paste0("steyer_",dateStart,".Rda"))
#Dropped out 2/11/20
#yang_query = paste0("@AndrewYang -filter:retweets -filter:replies since:",dateStart," until:",dateEnd)
#yang_tweets <- search_tweets(yang_query, n = 100000, retryonratelimit = TRUE)
#save(yang_tweets,file=paste0("yang_",dateStart,".Rda"))
|
9a4ee0950d7a12a10ee92188a6d34926f8c66d88
|
9250a2e62021ff09a2010278d313303af11a3250
|
/R/as.timeDate.R
|
5fbb3c0c8b4621d12609e176ac119ef06269713e
|
[] |
no_license
|
cran/fCalendar
|
9d85636646e543aa2ed9f20d208bc3d33926df43
|
26c9aeef7d2d189a193242ef6bd7992b9d85512b
|
refs/heads/master
| 2016-09-05T22:37:38.860585
| 2009-05-25T00:00:00
| 2009-05-25T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,157
|
r
|
as.timeDate.R
|
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General
# Public License along with this library; if not, write to the
# Free Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
# Copyrights (C)
# for this R-port:
# 1999 - Diethelm Wuertz, GPL
# 2007 - Rmetrics Foundation, GPL
# Diethelm Wuertz <wuertz@phys.ethz.ch>
# www.rmetrics.org
# for the code accessed (or partly included) from other R-ports:
# see R's copyright and license files
# for the code accessed (or partly included) from contributed R-ports
# and other sources
# see Rmetrics's copyright file
################################################################################
# MEHOD: COERCION AND OBJECT TRANSFORMATIONS:
# as.timeDate Implements Use Method
# as.timeDate.default Default Method
# as.timeDate.POSIXt Returns a 'POSIX' object as 'timeDate' object
# as.timeDate.Date Returns a 'POSIX' object as 'timeDate' object
################################################################################
as.timeDate <-
function(x, zone = NULL, FinCenter = NULL)
{
UseMethod("as.timeDate")
}
# ------------------------------------------------------------------------------
as.timeDate.default <-
function(x, zone = myFinCenter, FinCenter = myFinCenter)
{
# A function implemented by Diethelm Wuertz
# Description:
# Returns default object as 'timeDate' object
# Arguments:
# x - a 'timeDate' object
# Value:
# Returns 'x' as a 'timeDate' object.
# FUNCTION:
# as timeDate:
ans = timeDate(charvec = as.character(x),
zone = zone, FinCenter = FinCenter)
# Return Value:
ans
}
# ------------------------------------------------------------------------------
as.timeDate.timeDate <-
function(x, zone = x@FinCenter, FinCenter = myFinCenter)
{
# A function implemented by Diethelm Wuertz
# Description:
# Returns default object as 'timeDate' object
# Arguments:
# x - a 'timeDate' object
# Value:
# Returns 'x' as a 'timeDate' object.
# FUNCTION:
stopifnot(class(x) == "timeDate")
if (zone != x@FinCenter)
warning("argument zone is ignored and FinCenter\n of timeDate is used as zone")
# as timeDate:
ans = timeDate(charvec = as.character(x),
zone = zone, FinCenter = FinCenter)
# Return Value:
ans
}
# ------------------------------------------------------------------------------
as.timeDate.POSIXt <-
function(x, zone = myFinCenter, FinCenter = myFinCenter)
{
# A function implemented by Diethelm Wuertz
# Description:
# Returns a 'POSIXt' object as 'timeDate' object
# Arguments:
# x - a 'timeDate' object
# Value:
# Returns 'x' as a 'timeDate' object.
# FUNCTION:
# as timeDate:
ans = timeDate(charvec = x, zone = zone, FinCenter = FinCenter)
# Return Value:
ans
}
# ------------------------------------------------------------------------------
as.timeDate.Date <-
function(x, zone = myFinCenter, FinCenter = myFinCenter)
{
# A function implemented by Diethelm Wuertz
# Description:
# Returns a 'Date' object as 'timeDate' object
# Arguments:
# x - a 'timeDate' object
# ... - arguments passed to other methods.
# Value:
# Returns 'x' as a character vector.
# FUNCTION:
# as timeDate:
ans = timeDate(charvec = x, zone = zone, FinCenter = FinCenter)
# Return Value:
ans
}
################################################################################
|
6ffda2266d479b293545b08eef0c09a67176b131
|
f9f7f2fb38348204e3869259b867d4fa0480cbe9
|
/NB.R
|
c8f538baee7c922ab508fcb4e2f83a42cc0eff72
|
[] |
no_license
|
ajmals/adult-dataset-decisiontree-naivebayes
|
c7ca055a945d01ab138911b72a1cf7bc3b019b65
|
91d37cd60aba40396b975be2f8137d33541ff9ef
|
refs/heads/master
| 2021-10-23T13:02:19.318578
| 2019-03-14T16:40:33
| 2019-03-14T16:40:33
| 175,661,159
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,445
|
r
|
NB.R
|
library(DataExplorer)
#library(naniar)
library(caret)
#library(ggplot2)
#library(dplyr)
library(e1071)
#training Naive Bayes classifier {e1071}
NBclassifier_e1071 = naiveBayes(income ~ ., data = trainAdf)
summary(NBclassifier_e1071)
#naiveBayes(formula, data, laplace = 0, ..., subset, na.action = na.pass)
# Predicting using Naive Bayes model{caret} and accuracy results from confusion matrix{caret} (train dataset)
y_pred_train1 = predict(NBclassifier_e1071, newdata = trainAdf[,!(names(trainAdf) %in% "income")])
confusionMatrix(data = y_pred_train1, trainAdf$income)
# Predicting using Naive Bayes model{caret} and accuracy results from confusion matrix{caret} (validation dataset)
y_pred_validation1 = predict(NBclassifier_e1071, newdata = validationAdf[,!(names(validationAdf) %in% "income")])
confusionMatrix(data = y_pred_validation1, validationAdf$income)
#training Naive Bayes model using {caret} package with 10 fold cross validation
#NBclassifierCaretCV = train(x= trainAdf[,-10],y=trainAdf$income, 'nb', trControl = trainControl(method ='cv', number = 10))
NBclassifierCaretCV = train(income ~ ., data = trainAdf, 'nb', trControl = trainControl(method ='cv', number = 10))
CVtrainDataset = predict (NBclassifierCaretCV, newdata = trainAdf[,!(names(trainAdf) %in% "income")])
# Confusion matrix and a summary / using caret package
confusionMatrix(data = CVtrainDataset, trainAdf$income)
|
a25d36aafe3b9e38686476b92c5a8a78e4806a5a
|
e9c16352f28947839a66c3a866c1b54ecec5c870
|
/Modules/generate_data.R
|
f90e15633b1edfd953c2baebb5b0ef7f54f65f11
|
[] |
no_license
|
AsgerMorville/GPLVM
|
35dac980118ca3147495adee94298b2e96e9874d
|
06f59df8798f228c6354d25545d3d5fee6c3135c
|
refs/heads/master
| 2020-07-01T18:13:11.233987
| 2019-11-05T11:03:57
| 2019-11-05T11:03:57
| 201,251,037
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36
|
r
|
generate_data.R
|
generate_data <- function(n,t,q){}
|
f33ce9640e331a15ac475591a619b806e5567f91
|
3a4c8fc4e09edb9be762949b266192fb4abbf22e
|
/plot_mpp_holes.R
|
0c2bcceeeb90ab755512a6743d08f3fb9d68ad64
|
[] |
no_license
|
kreitmew/mpp
|
4a195f0fa92f9ba9172336d80859a7edbcc8cd01
|
a5e8313c559dbe91dc741d78a954a46b48121b8d
|
refs/heads/master
| 2021-05-08T15:04:41.111065
| 2018-03-02T18:44:15
| 2018-03-02T18:44:15
| 120,103,497
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,433
|
r
|
plot_mpp_holes.R
|
plot.new()
plot.window( xlim=c(0,9), ylim=c(0,9), asp = 1 )
par(lty = 2)
do.call(rect, placeCellPanel(9, 9)[[1]])
do.call(rect, placeCellPanel(9, 9)[[2]])
do.call(rect, placeCellPanel(9, 9)[[3]])
title("Plattenfronten - Porositaet massstabsgetreu")
do.call(draw.circle, do.call(placeCircles , c(list(input$phi1ui, input$r1ui), placeCellPanel(9, 9)[[1]]))[c(2,3,1)])
do.call(draw.circle, do.call(placeCircles , c(list(input$phi1ui, input$r1ui), placeCellPanel(9, 9)[[1]]))[c(4,5,1)])
do.call(draw.circle, do.call(placeCircles , c(list(input$phi1ui, input$r1ui), placeCellPanel(9, 9)[[1]]))[c(6,7,1)])
do.call(draw.circle, do.call(placeCircles , c(list(input$phi1ui, input$r1ui), placeCellPanel(9, 9)[[1]]))[c(8,9,1)])
text(7,2.1, paste(g_dist_holes_name," 1"), pos = 2, offset = 2.8)
text(7,1.6, g_dist_holes_expl, pos = 2, offset = -5.5)
text(7,1.1, formatC( round(input$r1ui * (pi / input$phi1ui)^0.5, digits = 8), format='f', digits=8 ) , pos = 2, offset = 1.2)
do.call(draw.circle, do.call(placeCircles , c(list(input$phi2ui, input$r2ui), placeCellPanel(9, 9)[[2]]))[c(2,3,1)])
do.call(draw.circle, do.call(placeCircles , c(list(input$phi2ui, input$r2ui), placeCellPanel(9, 9)[[2]]))[c(4,5,1)])
do.call(draw.circle, do.call(placeCircles , c(list(input$phi2ui, input$r2ui), placeCellPanel(9, 9)[[2]]))[c(6,7,1)])
do.call(draw.circle, do.call(placeCircles , c(list(input$phi2ui, input$r2ui), placeCellPanel(9, 9)[[2]]))[c(8,9,1)])
text(7,5.1, paste(g_dist_holes_name," 2"), pos = 2, offset = 2.8)
text(7,4.6, g_dist_holes_expl, pos = 2, offset = -5.5)
text(7,4.1, formatC( round(input$r2ui * (pi / input$phi2ui)^0.5, digits = 8), format='f', digits=8 ) , pos = 2, offset = 1.2)
do.call(draw.circle, do.call(placeCircles , c(list(input$phi3ui, input$r3ui), placeCellPanel(9, 9)[[3]]))[c(2,3,1)])
do.call(draw.circle, do.call(placeCircles , c(list(input$phi3ui, input$r3ui), placeCellPanel(9, 9)[[3]]))[c(4,5,1)])
do.call(draw.circle, do.call(placeCircles , c(list(input$phi3ui, input$r3ui), placeCellPanel(9, 9)[[3]]))[c(6,7,1)])
do.call(draw.circle, do.call(placeCircles , c(list(input$phi3ui, input$r3ui), placeCellPanel(9, 9)[[3]]))[c(8,9,1)])
text(7,8.1, paste(g_dist_holes_name," 3"), pos = 2, offset = 2.8)
text(7,7.6, g_dist_holes_expl, pos = 2, offset = -5.5)
text(7,7.1, formatC( round(input$r3ui * (pi / input$phi3ui)^0.5, digits = 8), format='f', digits=8 ) , pos = 2, offset = 1.2)
g_chart_holes <<- recordPlot()
|
57369dae9cc6aae85064e03745343f212c8db6c3
|
0ff91c44fcc9c644803cc513aee916fceda85fa5
|
/get_10-K_filings.R
|
b86d56fd4b52cd147ca5aab50be7f9c0051609f9
|
[] |
no_license
|
iangow/filings
|
6cfcb8845986d43b7aaa92b9241c110c766b46a5
|
c9466887d0a3f621ffc53ba207f71f09ce80f412
|
refs/heads/master
| 2023-05-28T00:43:02.455003
| 2023-05-16T18:49:35
| 2023-05-16T18:49:50
| 50,131,907
| 18
| 11
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,665
|
r
|
get_10-K_filings.R
|
library(dplyr)
pg <- src_postgres()
# The name of the local directory where filings are stored. ``
raw_directory <- "/Volumes/2TB/data/"
filings <- tbl(pg, sql("SELECT * FROM filings.filings"))
extracted <- tbl(pg, sql("SELECT * FROM filings.extracted"))
# Pull together a list of all proxy filings on EDGAR
file.list <-
filings %>%
filter(form_type == 'DEF 14A') %>%
filter(date_filed=='2015-09-16') %>%
anti_join(extracted) %>%
compute()
# Function to download header (SGML) files associated with a filing.
# Most of the work is in parsing the name of the text filing and transforming
# that into the URL of the SGML file.
get_sgml_file <- function(path) {
directory <- raw_directory
if (is.na(path)) return(NA)
# The remote SGML file to be downloaded. Note that SGML files used to be
# found in the directory for the firm, but now go in a sub-directory.
# The code below looks in both places.
sgml_basename <- basename(gsub(".txt$", ".hdr.sgml", path, perl=TRUE))
sgml_path <- file.path(dirname(path),
gsub("(-|\\.hdr\\.sgml$)", "",
sgml_basename, perl=TRUE))
sgml_path_old <- file.path(dirname(path), sgml_basename)
ftp <- file.path("https://www.sec.gov/Archives", sgml_path, sgml_basename)
ftp_old <- file.path("https://www.sec.gov/Archives", sgml_path_old,
sgml_basename)
# The local filename for the SGML file
local_filename <- file.path(directory, sgml_path, sgml_basename)
local_filename_old <- file.path(directory, sgml_path_old, sgml_basename)
# Skip if we already have the file in the "new" location
if (file.exists(local_filename)) {
return(file.path(sgml_path, sgml_basename))
} else if (class(con <- try(url(ftp, open="rb")))[1]=="try-error") {
# If there's no file on the SEC site in the "new" location,
# try the "old" location
dir.create(dirname(local_filename_old), showWarnings=FALSE, recursive=TRUE)
if (!file.exists(local_filename_old)) {
old <- try(download.file(url=ftp_old, destfile=local_filename_old))
if (old==0) {
return(file.path(sgml_path_old, sgml_basename))
} else {
return(NA)
}
} else {
return(file.path(sgml_path_old, sgml_basename))
}
} else {
# Download the file from the "new" location
dir.create(dirname(local_filename), showWarnings=FALSE, recursive=TRUE)
new <- try(download.file(url=ftp, destfile=local_filename))
if (new==0) {
return(file.path(sgml_path, sgml_basename))
}
close(con)
return(NA)
}
}
# Now, pull SGMLs for each filing
file.list$sgml_file <- NA
to.get <- 1:length(file.list$sgml_file) #
file.list %>%
mutate(sgml_file = get_sgml_file(file_name))
library(parallel)
# Get the file
system.time({
file.list$sgml_file[to.get] <-
unlist(mclapply(file.list$file_name[to.get], get_sgml_file,
mc.preschedule=FALSE, mc.cores=6))
})
parseSGMLfile <- function(sgml_file, field="<PERIOD>") {
con <- file(file.path(raw_directory, sgml_file), "r", blocking = FALSE)
text <- readLines(con)
value <- text[grep(paste("^", field, sep=""), text, perl=TRUE)]
if(length(value)==0) {
close(con)
return(NA)
}
value <- gsub(paste("^", field, sep=""), "", value, perl=TRUE)
close(con)
return(value[[1]])
}
file.list$period <- NA
file.list$period <-
unlist(lapply(file.list$sgml_file, parseSGMLfile, field="<PERIOD>"))
file.list$period <- as.Date(file.list$period, format="%Y%m%d")
file.list$conformed_name <- NA
file.list$conformed_name <-
unlist(mclapply(file.list$sgml_file, parseSGMLfile, field="<CONFORMED-NAME>", mc.cores=12))
fyear <- function(date) {
date <- as.Date(date)
month <- as.integer(format(date, "%m"))
year <- as.integer(format(date, "%Y"))
fyear <- year - (month <= 5)
return(fyear)
}
file.list$fyear <- fyear(file.list$period)
rs <- dbWriteTable(pg, c("filings", "filing_10k"), file.list, overwrite=TRUE, row.names=FALSE)
rm(file.list)
matched <- dbGetQuery(pg, "
SET work_mem='10GB';
WITH compustat AS (
SELECT gvkey, cik, conm, datadate
FROM comp.funda
INNER JOIN (SELECT DISTINCT gvkey, datadate FROM comp.secm) AS secm
USING (gvkey, datadate)
WHERE indfmt='INDL' AND datafmt='STD' AND popsrc='D' AND consol='C'
AND cik IS NOT NULL AND sale IS NOT NULL
AND datadate > '1999-12-31'
AND fic='USA')
SELECT *
FROM compustat AS a
LEFT JOIN filings.filing_10k AS b
ON a.cik::integer=b.cik AND b.period
BETWEEN a.datadate AND a.datadate + interval '2 months'")
table(is.na(matched$date_filed), fyear(matched$datadate))
|
cecc26e666a53ef6d7488fc28714ac31dc85e8d8
|
8c1333fb9fbaac299285dfdad34236ffdac6f839
|
/equity-valuation/ch3/placeholder-02b.R
|
804fd6ff49080bc4b1039e0e5f0deaf5da396277
|
[
"MIT"
] |
permissive
|
cassiopagnoncelli/datacamp-courses
|
86b4c2a6d19918fc7c6bbf12c51966ad6aa40b07
|
d05b74a1e42b119efbbf74da3dfcf71569c8ec85
|
refs/heads/master
| 2021-07-15T03:24:50.629181
| 2020-06-07T04:44:58
| 2020-06-07T04:44:58
| 138,947,757
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70
|
r
|
placeholder-02b.R
|
# Calculate the Mylan Unlevered Beta
myl_unl_beta <- ___
myl_unl_beta
|
1f9cd94c315a51260a22e97227f9fe08a95433ab
|
1a0a23794b5b1cedc885db7ff200b5c9fbc29463
|
/scripts/2.1_prepare_data.R
|
43c9ea94bc5432f09084bd94981898189d8327fd
|
[] |
no_license
|
mreddoh/brownlow
|
da6a2aacd19d39ac290660fcc01303e7c1fbec8b
|
62ee22c17768851e880da2e772f0a2fd41ea6b9b
|
refs/heads/main
| 2023-08-07T05:18:51.603489
| 2021-09-15T14:27:17
| 2021-09-15T14:27:17
| 405,284,552
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,077
|
r
|
2.1_prepare_data.R
|
# Load packages ----
library(tidyverse)
library(here)
# Load data ----
load(file = here("data","player_data_2021.Rdata"))
load(file = here("data","player_data_full.Rdata"))
load(file = here("data","player_data_partial.Rdata"))
# Wrangle data into model-able dataset with normalised and cleaned variables ----
# Note. look at normalised values, for example, disposals as percentage of teams disposals...
## * Create team and match total variables ----
team_totals <- player_data_full %>%
group_by(match_id, player_team) %>%
summarise_at(.vars = names(.)[27:78], sum) %>%
setNames(c(names(.)[1:2],paste0('team.', names(.)[3:ncol(.) - 1])))
match_totals <- player_data_full %>%
group_by(match_id) %>%
summarise_at(.vars = names(.)[27:78], sum) %>%
setNames(c(names(.)[1],paste0('match.', names(.)[2:ncol(.) - 1])))
## * Join on variables ----
player_data_full %>%
left_join(.,team_totals,by=c("match_id","player_team")) %>%
left_join(.,match_totals,by=c("match_id")) ->
player_data_full
team_portions <- player_data_full[27:78] / player_data_full[,substr(names(player_data_full),1,5)=="team."]
match_portions <- player_data_full[27:78] / player_data_full[,substr(names(player_data_full),1,6)=="match."]
## * Assign new variable names ----
team_portions %>% setNames(object = ., nm = paste0('team_pct.', names(.)[1:ncol(.)])) -> team_portions
match_portions %>% setNames(object = ., nm = paste0('match_pct.', names(.)[1:ncol(.)])) -> match_portions
## * Combine datasets ----
player_data_full.cleaned <- cbind(player_data_full,team_portions,match_portions)
# Add in new variables based result, i.e. was player in winning team? ----
player_data_full.cleaned$team_result <- ifelse(player_data_full.cleaned$match_winner==player_data_full.cleaned$player_team,
player_data_full.cleaned$match_margin,
-1*player_data_full.cleaned$match_margin)
# Save data ----
save(player_data_full.cleaned, file = here("data","player_data_full.cleaned.Rdata"))
|
522019836050c63bc34d0c60edd514ebc01bf118
|
25ba484e7a24eb8b6966c489b05ac828481f5807
|
/script.R
|
656122f3e0ea1e5200148d35161e9a21695d0ddb
|
[] |
no_license
|
yemeth/RepData_PeerAssessment1
|
e279e21b311bf57374c6b3896ae80c019ce218ce
|
c73d8911a5e76ffde48029b84cb5e0da41e8f289
|
refs/heads/master
| 2021-01-18T07:32:49.415471
| 2015-02-14T21:33:28
| 2015-02-14T21:33:28
| 30,795,455
| 0
| 0
| null | 2015-02-14T12:03:43
| 2015-02-14T12:03:43
| null |
UTF-8
|
R
| false
| false
| 4,197
|
r
|
script.R
|
## Read Data
url<- "https://d396qusza40orc.cloudfront.net/repdata%2Fdata%2Factivity.zip"
fileName <- "activity.csv"
zipFile <- "activity.zip"
if (!file.exists(fileName)) {
if (!file.exists(zipFile)) { download.file(url,destfile=zipFile) }
unzip(zipFile)
}
activityData <- read.csv(fileName, colClasses = c("integer","character","integer"))
activityData$date <- as.Date(activityData$date, "%Y-%m-%d")
## Mean total number of steps taken per day
#dailySteps <- aggregate(activityData$steps, by=list(activityData$date), FUN=sum, na.rm=TRUE)
dailySteps <- aggregate(steps ~ date, data=activityData, FUN=sum, na.action=na.omit)
### Histogram
meanTotalSteps <- mean(dailySteps$steps)
hist(dailySteps$steps, xlab="Number of steps", ylab="Frequency", main="Histogram total steps by day",
freq=TRUE, col="lightgray", ylim=c(0,30))
abline(v=meanTotalSteps, col="blue", lwd=3)
abline(v=median(dailySteps$steps), col="green", lwd=2)
text(x=meanTotalSteps-1300, y=30, labels=c("Mean"), col="blue")
text(x=median(dailySteps$steps)+1400, y=30, labels=c("Median"), col="green")
### Mean
formattedMeanTotal <- format(meanTotalSteps)
### Median
median(dailySteps$steps)
## Average interval-daily activity pattern
intervalSteps <- aggregate(steps ~ interval, data=activityData, FUN=mean, na.action=na.omit)
### Plot the time series
intervalMax <- intervalSteps[which.max(intervalSteps$steps),"interval"]
maxSteps <- intervalSteps[which.max(intervalSteps$steps), "steps"]
plot(intervalSteps, type="l", xlab="Interval", ylab="Average steps", main="Average activity by interval across days")
abline(v=intervalSteps[which.max(intervalSteps$steps),"interval"], col="red", lty=2)
points(x=intervalMax, y=maxSteps, col="red", type="o")
text(x=intervalMax-190, y=maxSteps, col="red", labels=c("Max value"))
intervalMax <- intervalSteps[which.max(intervalSteps$steps),"interval"]
maxSteps <- intervalSteps[which.max(intervalSteps$steps), "steps"]
formatIntervalMax <- format(intervalMax)
formatMaxValue <- format(maxSteps)
## Imputing missing values
### Total rows with missing information
rowsNA <- sum(is.na(activityData))
### Days with missing information, and how many rows are missing each day
with(subset(activityData,is.na(steps)), {table(date)})
### Replace missing data with mean for the interval
intervalSteps <- aggregate(steps ~ interval, data=activityData, FUN=mean, na.action=na.omit)
naIndex <- which(is.na(activityData$steps))
correctedData <- activityData
missingIntervals <- correctedData[naIndex,]
missingValues <- merge(missingIntervals, intervalSteps, by="interval")
correctedData[naIndex, "steps"] <- missingValues[,"steps.y"]
### Histogram with correctedData
dailyCorrSteps <- aggregate(steps ~ date, data=correctedData, FUN=sum, na.action=na.omit)
hist(dailyCorrSteps$steps, xlab="Number of steps", ylab="Frequency", main="Histogram total steps by day", freq=TRUE, col="lightgray", ylim=c(0,35))
abline(v=mean(dailyCorrSteps$steps), col="blue", lwd=3)
abline(v=median(dailyCorrSteps$steps), col="green", lwd=2)
text(x=mean(dailyCorrSteps$steps)-1300, y=35, labels=c("Mean"), col="blue")
text(x=median(dailyCorrSteps$steps)+1400, y=33, labels=c("Median"), col="green")
### Mean
mean(dailyCorrSteps$steps)
### Median
median(dailyCorrSteps$steps)
## Activity patterns between weekdays and weekends (with corrected data)
Sys.setlocale("LC_TIME", "en_GB.UTF-8") # Use english day names
correctedData <- transform(correctedData, weekday=factor(weekdays(date)))
levels(correctedData$weekday)[levels(correctedData$weekday)!="Saturday" & levels(correctedData$weekday)!="Sunday"]<-"weekday"
levels(correctedData$weekday)[levels(correctedData$weekday)=="Saturday" | levels(correctedData$weekday)=="Sunday"]<-"weekend"
### Panel plot
library(lattice)
intervalDayData <- aggregate(steps ~ interval + weekday, data = correctedData, mean)
xyplot(steps ~ interval | weekday, data=intervalDayData, type = "l", layout = c(1, 2),
xlab = "Interval", ylab = "Number of steps")
## Compare statistics
tapply(intervalDayData$steps, intervalDayData$weekday, mean)
tapply(intervalDayData$steps, intervalDayData$weekday, median)
tapply(intervalDayData$steps, intervalDayData$weekday, max)
|
359c6ccefe2d556f7b5dd932977a1477d831a93d
|
a249beeec2598922dc69817a68d5bc7e6b1586ab
|
/vignettes/dobtools-vignette.R
|
f0bfdaa7d3e7d29711379347cbec365184ffab52
|
[] |
no_license
|
aedobbyn/dobtools
|
9c9b56241c65d37d318923bd546a03ce5963b43f
|
f63664430648e48f6ded8dade3afe55699c025bf
|
refs/heads/master
| 2021-01-19T21:24:33.469420
| 2019-05-03T21:13:28
| 2019-05-03T21:13:28
| 101,250,864
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 298
|
r
|
dobtools-vignette.R
|
## ----setup, include = FALSE----------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----eval=FALSE----------------------------------------------------------
# # install.packages("devtools")
# devtools::install_github("aedobbyn/dobtools")
|
9cea3de0474744d0dba97c6ce89edb5ab820b48c
|
caad1dd61c8683f3e71f4a41d155cd39093994bf
|
/NMR_WORKFLOW.R
|
f5d5eb4e15784818401de1884e431a99c98d6821
|
[] |
no_license
|
talbenhorin/NMR_Sophie
|
c755a64b74ed7ab91db2a67683effe48e5015fb4
|
110b13262c98a2486e21456582d15466a195c87c
|
refs/heads/master
| 2023-01-01T19:01:07.069409
| 2020-10-27T14:04:32
| 2020-10-27T14:04:32
| 307,721,161
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 214
|
r
|
NMR_WORKFLOW.R
|
library(Rnmr1D)
data_dir <- system.file("extra", package = "Rnmr1D")
RAWDIR <- file.path(data_dir, "CD_BBI_16P02")
CMDFILE <- file.path(data_dir, "NP_macro_cmd.txt")
SAMPLEFILE <- file.path(data_dir, "Samples.txt")
|
d2ad67d47c5ca51a5da47777200856a19adae4a6
|
6c897e166c1e717c5fd2495e9d9cf14e0d674eca
|
/PerStoreLM.R
|
47b2febc4bc8d3305f6808020be5324105b18c62
|
[] |
no_license
|
tsuresh83/KaggleRossmanSalesPrediction
|
388a4a704bee87b5c6a3a5c06462b78448bf75ea
|
456bd99dbde455df44ae2c22e47d4afb6b875b85
|
refs/heads/master
| 2021-05-03T13:14:08.524947
| 2017-08-17T21:20:56
| 2017-08-17T21:20:56
| 72,159,181
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,940
|
r
|
PerStoreLM.R
|
rm(list=ls())
library(xgboost)
set.seed(13)
startTime <- Sys.time()
scriptName<-"PerStoreLM"
set.seed(13)
os <- Sys.info()[["sysname"]]
nodename <- Sys.info()[["nodename"]]
trainFile <- ifelse(nodename=="bigtumor",("/home/tumor/MLExperimental/rossman/data/train.csv"),
ifelse(os=="Darwin",
("/Users/sthiagar/Kaggle/rossman/data/train.csv"),
("/media/3TB/kag/rossman/data/train.csv")))
train <- read.csv(trainFile)
print("Training data set loaded...")
storeFile <- ifelse(nodename=="bigtumor",("/home/tumor/MLExperimental/rossman/data/store.csv"),
ifelse(os=="Darwin",
("/Users/sthiagar/Kaggle/rossman/data/store.csv"),
("/media/3TB/kag/rossman/data/store.csv")))
store <- read.csv(storeFile)
testFile <- ifelse(nodename=="bigtumor",("/home/tumor/MLExperimental/rossman/data/test.csv"),
ifelse(os=="Darwin",
("/Users/sthiagar/Kaggle/rossman/data/test.csv"),
("/media/3TB/kag/rossman/data/test.csv")))
test <- read.csv(testFile)
print("Test data set loaded")
outputFolder <- ifelse(nodename=="bigtumor",("/home/tumor/MLExperimental/rossman/result/"),
ifelse(os=="Darwin",
("/Users/sthiagar/Kaggle/rossman/result/"),
("/media/3TB/kag/rossman/result/")))
salesDist <- ifelse(nodename=="bigtumor",("/home/tumor/MLExperimental/rossman/result/salesDistributionPerStore.rdata"),
ifelse(os=="Darwin",
("/Users/sthiagar/Kaggle/rossman/result/salesDistributionPerStore.rdata"),
("/media/3TB/kag/rossman/result/salesDistributionPerStore.rdata")))
load(salesDist)
train <- merge(train,store)
test <- merge(test,store)
#feature.names <- names(train)[c(1,2,6:ncol(train))]
# looking at only stores that were open in the train set
# may change this later
train <- train[ which(train$Open=='1'),]
train <- train[ which(train$Sales!='0'),]
# seperating out the elements of the date column for the train set
train$month <- as.integer(format(as.Date(train$Date), "%m"))
train$year <- as.integer(format(as.Date(train$Date), "%Y"))
train$day <- as.integer(format(as.Date(train$Date), "%d"))
# removing the date column (since elements are extracted) and also StateHoliday which has a lot of NAs (may add it back in later)
train <- train[,-c(3)]
# seperating out the elements of the date column for the test set
test$month <- as.integer(format(as.Date(test$Date), "%m"))
test$year <- as.integer(format(as.Date(test$Date), "%Y"))
test$day <- as.integer(format(as.Date(test$Date), "%d"))
# removing the date column (since elements are extracted) and also StateHoliday which has a lot of NAs (may add it back in later)
test <- test[,-c(4)]
train$Weekend <- 0
train[train$DayOfWeek %in% c(6,7),]$Weekend <- 1
train$PromoInterval <- as.character(train$PromoInterval)
train[train$PromoInterval=="",]$PromoInterval <-0
train[train$PromoInterval=="Feb,May,Aug,Nov",]$PromoInterval <-"2,5,8,11"
train[train$PromoInterval=="Jan,Apr,Jul,Oct",]$PromoInterval <-"1,4,7,10"
train[train$PromoInterval=="Mar,Jun,Sept,Dec",]$PromoInterval <-"3,6,9,12"
promo2 <- rep(F,nrow(train))
for(i in c(1:12)){
promo2[grepl(paste("\\b",i,"\\b",sep=""),train$PromoInterval) & train$month==i & train$year>=train$Promo2SinceYear] <-T
}
# Promo2Cols <- data.frame()
# for(i in c(1:12)){
# promo2 <- rep(0,nrow(train))
# promo2[grepl(paste("\\b",i,"\\b",sep=""),train$PromoInterval) & train$month==i & train$year>=train$Promo2SinceYear] <-1
# if(i==1){
# Promo2Cols <- promo2
# }else{
# Promo2Cols <- cbind(Promo2Cols,promo2)
# }
#
# }
# Promo2Cols <- as.data.frame(Promo2Cols)
# colnames(Promo2Cols) <- paste("Promo2",c(1:12),sep="_")
train <- train[,-grep("PromoInterval",colnames(train))]
train <- cbind(train,Promo2On = promo2)
test$Weekend <- 0
test[test$DayOfWeek %in% c(6,7),]$Weekend <- 1
test$PromoInterval <- as.character(test$PromoInterval)
test[test$PromoInterval=="",]$PromoInterval <-0
test[test$PromoInterval=="Feb,May,Aug,Nov",]$PromoInterval <-"2,5,8,11"
test[test$PromoInterval=="Jan,Apr,Jul,Oct",]$PromoInterval <-"1,4,7,10"
test[test$PromoInterval=="Mar,Jun,Sept,Dec",]$PromoInterval <-"3,6,9,12"
# Promo2ColsTest <- data.frame()
# for(i in c(1:12)){
# promo2 <- rep(0,nrow(test))
# promo2[grepl(paste("\\b",i,"\\b",sep=""),test$PromoInterval) & test$month==i & test$year>=test$Promo2SinceYear] <-1
# if(i==1){
# Promo2ColsTest <- promo2
# }else{
# Promo2ColsTest <- cbind(Promo2ColsTest,promo2)
# }
#
# }
# Promo2ColsTest <- as.data.frame(Promo2ColsTest)
# colnames(Promo2ColsTest) <- paste("Promo2",c(1:12),sep="_")
promo2Test <- rep(F,nrow(test))
for(i in c(1:12)){
promo2Test[grepl(paste("\\b",i,"\\b",sep=""),test$PromoInterval) & test$month==i & test$year>=test$Promo2SinceYear] <-T
}
test <- test[,-grep("PromoInterval",colnames(test))]
test <- cbind(test,Promo2On=promo2Test)
train[is.na(train)]<-0
test[is.na(test)]<-0
feature.names <- names(train)[c(1,2,6:ncol(train))]
feature.names
#
# for (f in feature.names) {
# if (class(train[[f]])=="character") {
# levels <- unique(c(train[[f]], test[[f]]))
# train[[f]] <- as.integer(factor(train[[f]], levels=levels))
# test[[f]] <- as.integer(factor(test[[f]], levels=levels))
# }
# }
store1And3 <- train[train$Store %in% c(1,3),]
store1And3 <- store1And3[,-grep("Customers",colnames(store1And3))]
fits <- regsubsets(Sales~.-Store,store1And3[store1And3$Store==1,],method="forward")
summ <- summary(fits)
x <- model.matrix(Sales~.,data=store1And3[store1And3$Store==1,])
y<- store1And3[store1And3$Store==1,]$Sales
library(glmnet)
grid = 10^seq(10,-2,length=100)
lassomodel <- glmnet(x,y,alpha=1,lambda=grid)
cv.out = cv.glmnet(x,y,alpha=1)
bestlambda <- cv.out$lambda.min
|
dbad557689f027d9e7170d62454ffe7f5ee3a0cf
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/4536_10/rinput.R
|
64470c2a1b4251de289fc22e611ad6bceb070f13
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("4536_10.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4536_10_unrooted.txt")
|
5a1d5b335e03d605846cd384144760ac00e5bcdb
|
da3112d28186f4000ab3aa285b9be9879da69595
|
/man/surv_table.Rd
|
0edca5dfb9ba49dafc70ec6bd3dfe5c091e11dac
|
[] |
no_license
|
Huaichao2018/rawr
|
08c0456d9822ae9654a11891bc47bf3c7a49b466
|
95857be33dd7128ab5ad48875a66c249191a2bd7
|
refs/heads/master
| 2023-01-19T02:07:17.790103
| 2020-11-26T04:50:44
| 2020-11-26T04:50:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,574
|
rd
|
surv_table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/surv.R
\name{surv_table}
\alias{surv_table}
\title{Summary table}
\usage{
surv_table(
s,
digits = ifelse(percent, 0L, 3L),
times = pretty(s$time),
maxtime = FALSE,
percent = FALSE,
...
)
}
\arguments{
\item{s}{a \code{\link[survival]{survfit}} object}
\item{digits}{number of digits to use in printing numbers}
\item{times}{vector of times}
\item{maxtime}{logical; if \code{TRUE}, adds the maximum time for which an
even occurs; if \code{FALSE}, number of events may not sum to total}
\item{percent}{logical; if \code{TRUE}, percentages are shown instead of
probabilities}
\item{...}{additional arguments passed to \code{\link{summary.survfit}}}
}
\value{
A matrix (or list of matrices) with formatted summaries for each strata; see
\code{\link{summary.survfit}}
}
\description{
Prints a formatted summary table for \code{\link{survfit}} objects.
}
\examples{
library('survival')
fit0 <- survfit(Surv(time, status == 2) ~ 1, data = cancer)
surv_table(fit0, times = 0:2 * 100, maxtime = FALSE)
## also works for list of tables
fit1 <- survfit(Surv(time, status == 2) ~ sex, data = cancer, conf.int = 0.9)
surv_table(fit1)
rawr::combine_table(surv_table(fit1))
s <- `colnames<-`(
surv_table(fit0, times = 0:8 * 100, digits = 2)[, -4],
c('Time', 'No. at risk', 'No. of events', 'Surv (95\% CI)')
)
ht <- htmlTable::htmlTable(s, caption = 'Table: Overall survival.')
structure(ht, class = 'htmlTable')
}
\seealso{
\code{\link{survfit}}; \code{\link{print.summary.survfit}}
}
|
05e9f720cb58203d7534a4d813016e2252da1f13
|
acf901961c3cebee84734b0dc19699f57786f05d
|
/man/nat_palette.Rd
|
0c514c9fdd2536b99b14b939031e4fd52969af33
|
[] |
no_license
|
thomased/natpalette
|
e7040f0dfb93d22340f24f442892dbbbd3824d01
|
a401faaabaa699662ac5c6e60384928a758d6043
|
refs/heads/master
| 2021-01-21T10:04:41.507440
| 2017-10-17T20:25:24
| 2017-10-17T20:25:24
| 83,362,706
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,239
|
rd
|
nat_palette.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cols.R
\name{nat_palette}
\alias{nat_palette}
\title{A 'natural' colour palette generator}
\usage{
nat_palette(name, n)
}
\arguments{
\item{name}{The desired palette. One of:
\itemize{
\item \code{Acripeza}: The mountain katydid \emph{Acripeza reticulata}.
\item \code{Aix}: The mandarin duck \emph{Aix galericulata}.
\item \code{Chrysiridia}: The Madagascan sunset moth \emph{Chrysiridia rhipheus}.
\item \code{Coracias}: The lilac-breasted roller \emph{Coracias caudatus}.
\item \code{Furcifur}: The panther chameleon \emph{Furcifer pardalis}.
\item \code{Delias}: The Phillippine jezabel \emph{Delias henningia}.
\item \code{Maratus}: The peacock spider \emph{Maratus volans}.
\item \code{Synchiropus}: The mandarinfish \emph{Synchiropus splendidus}.
\item \code{Trichoglossus}: The rainbow lorikeet \emph{Trichoglossus haematodus}.
\item \code{Tulipa}: Tulips \emph{Tulipa gesneriana}.
}}
\item{n}{Number of colours. If omitted, uses all colours.}
}
\value{
A vector of colours.
}
\description{
Some fairly impractical colour palettes inspired by nature.
}
\examples{
nat_palette('Maratus')
nat_palette('Tulipa')
}
\keyword{colours}
|
10a28b5728545a3ff41a7c42e4feebba24f2a56e
|
af1d99a02986da336132c449b43e85473a783391
|
/07_junk/script_old.R
|
5f4d84b10308c00de3370db8b224b95052eaaf02
|
[] |
no_license
|
DeAngelisA/RRRRsandbox
|
46a502dcbb6a339ed1204552ac86f2d8afd2cf23
|
829f34606ae885384ba416a0489990016d3a09fd
|
refs/heads/master
| 2023-04-18T03:40:10.164062
| 2021-04-23T13:54:57
| 2021-04-23T13:54:57
| 266,096,185
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 825
|
r
|
script_old.R
|
# Messy Data Project
library('tidyverse')
setwd("C:\Users\andre\Dropbox\Lavoro\Teaching\1-templates-lessons\messy-tidy-projects\Messy Data Project")
load("DSP_original.Rdata")
DSP_rec <- DSP_Dataset_v1 %>%
select(country_name, country_text_id, country_id, year, v2smgovdom, v2smregcon, v2smhargr_0, v2smpolsoc) %>%
mutate(
country_name = as.factor(country_name),
v2smgovdom = gov_fakes * (-1),
v2smpolsoc = polariz * (-1)
)
fig1 <- ggplot(DSP_rec) +
geom_histogram(mapping = aes(x = v2smgovdom), binwidth = 0.2) +
labs(title = "Histogram of government spreading of fakes domestically",
x = "",
y = "Count") +
theme_bw()
fig1
ggsave(plot = fig1,
filename = "Fig1.tiff",
device = "tiff", dpi = 600, compression = "lzw",
height = 5, width = 5, units = "in"
)
|
0297f63a2ae95a371af1b2bce59a580d1c382969
|
d81a869717f6ac0c70799463025118ce11ca8951
|
/knapsack/man/knapsack-package.Rd
|
7f48558094ea0d152b7c0a78560b855648d69e25
|
[] |
no_license
|
zoepatton/Lab6
|
5e14e6abd4186c56478b096cea077a40a7172daa
|
1e52ff2064002c5ab3acf798c18300482072072e
|
refs/heads/main
| 2022-12-31T08:17:47.421984
| 2020-10-26T15:52:35
| 2020-10-26T15:52:35
| 301,396,766
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 802
|
rd
|
knapsack-package.Rd
|
\name{knapsack-package}
\alias{knapsack-package}
\alias{knapsack}
\docType{package}
\title{
\packageTitle{knapsack}
}
\description{
\packageDescription{knapsack}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{knapsack}
\packageIndices{knapsack}
~~ An overview of how to use the package, including the most important functions ~~
}
\author{
\packageAuthor{knapsack}
Maintainer: \packageMaintainer{knapsack}
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in the R documentation ~~
~~ directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
|
65d863e52b8a98f00ea29e44850a1e7badac6db1
|
f5feacda6bcf986bf61cdfa57f5387ed7e651918
|
/man/amean_byelt.Rd
|
c855a7f0d1f2e34e9f026453f0119c48ab32206a
|
[] |
no_license
|
cran/functClust
|
3386c3179bdf9e255bfec00ed8f39b6c3c696da1
|
f7415612fbc0fd749a1da01e822b6217e2b8bb0e
|
refs/heads/master
| 2023-01-20T01:30:18.270906
| 2020-12-02T09:30:02
| 2020-12-02T09:30:02
| 318,755,202
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,655
|
rd
|
amean_byelt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calibrating.R
\name{amean_byelt}
\alias{amean_byelt}
\title{Arithmetic mean
by components occurring within an assembly motif}
\usage{
amean_byelt(fobs, mOccur)
}
\arguments{
\item{fobs}{a numeric vector. The vector \code{fobs} contains the
quantitative performances of assemblages.}
\item{mOccur}{a matrix of occurrence (occurrence of components).
Its first dimension equals to \code{length(fobs)}. Its second dimension
equals to the number of components.}
}
\value{
Return a vector of \code{length(fobs)}.
Its values are computed as the average
of mean performances of assemblages that contain the same components
as the assemblage to calibrate \code{opt.model = "byelt"} .
}
\description{
Take a vector \code{fobs}
of performances of assemblages
that share a same assembly motif,
and return a vector of performances
predicted as the arithmetic mean
of performances of assemblages
that contain the same components as the assemblage to predict. \cr
}
\details{
Modelled performances are computed
using arithmetic mean (\code{opt.mean = "amean"}) of performances.
Assemblages share a same assembly motif.
Modelled performances are the average
of mean performances of assemblages that contain the same components
as the assemblage to calibrate (\code{opt.model = "byelt"}).
This procedure corresponds to a linear model with each assembly motif
based on the component occurrence in each assemblage.
}
\seealso{
\code{\link{amean_byelt}} using arithmetic mean.
\code{\link{gmean_byelt}} using geometric mean.
}
\keyword{internal}
|
e33bccd1caee0d724d4b1043687286bca6f6bb71
|
63621c988789745da5d51c70c52330a28be21b92
|
/GWAS/plots.r
|
1ed58b875154082f7c1fdee0f34c5e2983e20c90
|
[] |
no_license
|
meyer-lab-cshl/Genetic-association-studies
|
90514bc218c0d75b0697d812afa078c4c3d342bd
|
73edc9d464184075908424565b393b5956d8dc0e
|
refs/heads/master
| 2022-08-02T09:56:44.711666
| 2020-05-22T18:46:53
| 2020-05-22T18:46:53
| 116,718,777
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,461
|
r
|
plots.r
|
#' Manhattan plot for genome-wide association studies
#'
#' The results of a genome-wide association study are visualised the genomic
#' location of the tested genetic variant on the x-axis versus its -log10(p)
#' of the association on the y-axis. Optionally, lines for suggestive and
#' genome-wide significance can be drawn.
#'
#' @param d [NrSNPs x 3 (+2)] dataframe with columns: i) mandatory columns for
#' [chr] for chromosome, [bp] for chromosome position, [p] for assocation pvalue
#' and ii) optional columns for [snp] variant identifier and [compare] for
#' analysis identifiers (if results from different analyses depicted in same
#' plot). [chr], [bp], [p] have to be numeric, [snp] is character, [compare] a
#' factor.
#' @param chr [character] column name of chromosome column.
#' @param bp [character] column name of chromosome position column.
#' @param p [character] column name of association p-value column.
#' @param snp [character] column name of (optional) variant identifier column.
#' @param compare [character] column name of (optional) compare column.
#' @param min.y [double] minimum y-value to plot.
#' @param max.y [double] maximum y-value to plot.
#' @param is.negLog [logical] indicates that [p] column is already converted to
#' -log10(p-value)
#' @param compareAnalysis [logical] should different analysis types be compared
#' and depicted in different colors? If so, provide additional factor column
#' [compare] with information about analysis group.
#' @param highlight [logical] vector of variant IDs to be highlighted on plot.
#' Variant ID's have to be present in the [snp] column.
#' colorHighlight [character] color for hightlighted variants.
#' color [character] vector of colors for manhattan plot.
#' @param genomewideline [double] y-value to draw genomewide significance line
#' at.
#' @param colorGenomewide [character] colors of genome-wide significance line.
#' @param linetypeGenomewide [integer] linetype of genome-wide significance line.
#' @param size.x.title [integer] size of x-axis title
#' @param size.y.title [integer] size of y-axis title
#' @param size.x.text [integer] size of x-axis text
#' @param size.y.text [integer] size of y-axis text
#' @param size.points [double] size of plotting points
#' @param raster [logical] set to use ggrastr::geom_point_rast for plotting
#' points i.e. rasterising points of plot. Recommended for large numbers of
#' values to plot; allows for saving final plot as .pdf.
#' return ggplot2 object of manhattan plot.
manhattan <- function(d, chr = "CHR", bp = "BP", p = "P", snp="SNP",
compare="TYPE", compareAnalysis=FALSE,
title=NULL, max.y="max", min.y="min", is.negLog=FALSE,
highlight=NULL, colorHighlight="green",
color=c("#67a9cf", "#016c59"), a=0.5,
genomewideline=-log10(5e-8), colorGenomewide="gray90",
linetypeGenomewide=1,
size.x.title=12, size.y.title=12,
size.x.text=12, size.y.text=12,size.points=1,
raster=TRUE) {
if (!(chr %in% names(d))) stop(paste("Column", chr, "not found!"))
if (!(bp %in% names(d))) stop(paste("Column", bp, "not found!"))
if (!(p %in% names(d))) stop(paste("Column", p, "not found!"))
if (!is.numeric(d[[bp]])) stop(paste(bp, "column should be numeric."))
if (!is.numeric(d[[p]])) stop(paste(p, "column should be numeric."))
if (!is.numeric(d[[chr]])) {
stop(paste(chr, "column should be numeric. Does your [chr] column",
"chromsomes in chr1 etc format? Are there 'X', 'Y',",
" 'MT', etc? If so, change them to numeric encoding."))
}
names(d)[names(d) == chr] <- "CHR"
names(d)[names(d) == bp] <- "BP"
names(d)[names(d) == p] <- "P"
if (!is.null(d[[snp]])) {
names(d)[names(d) == snp] <- "SNP"
}
if (!is.null(d[[compare]])) {
names(d)[names(d) == compare] <- "TYPE"
}
d <- na.omit(d)
if (!is.negLog) {
if (any(d$P < 0 | d$P >= 1)) stop ("P-values have to be in range (0,1]")
d <- d[order(d$CHR, d$BP),]
message("Pvalues are converted to negative log10 pvalues")
d$logp <- -log10(d$P)
} else {
d <- d[order(d$CHR, d$BP),]
message("log10(p values) are used to depict results")
d$logp <- d$P
}
d$pos <- NA
ticks <- NULL
lastbase <- 0
numchroms <- length(unique(d$CHR))
if (numchroms == 1) {
d$pos <- d$BP
} else {
for (i in unique(d$CHR)) {
if (i == 1) {
d[d$CHR==i, ]$pos <- d[d$CHR==i, ]$BP
} else {
lastbase <- lastbase + max(subset(d, CHR==i-1)$BP)
d[d$CHR==i, ]$pos <- d[d$CHR==i, ]$BP + lastbase
}
ticks <- c(ticks,
d[d$CHR==i, ]$pos[floor(length(d[d$CHR==i, ]$pos)/2)+1])
}
ticklim <- c(min(d$pos),max(d$pos))
}
if (max.y == "max") {
maxy <- ceiling(max(d$logp))
} else {
maxy <- max.y
}
if (min.y == "min") {
miny <- floor(min(d$logp))
} else {
miny <- min.y
}
if (maxy < 8) {
maxy <- 8
}
mycols <- rep(color, max(d$CHR))
ylab <-expression(-log[10](italic(p)))
if (numchroms == 1) {
p <- ggplot2::ggplot(data=d, ggplot2::aes(x=pos, y=logp))
if (! raster) {
p <- p + ggplot2::geom_point(size=size.points)
} else {
p <- p + ggrastr::geom_point_rast(size=size.points)
}
p <- p + ggplot2::ylab(expression(-log[10](italic(p)))) +
ggplot2::xlab(paste("Chromosome", unique(d$CHR),"position"))
} else {
p <- ggplot2::ggplot(data=d, ggplot2::aes(x=pos, y=logp))
p <- p + ggplot2::ylab(expression(-log[10](italic(p))))
p <- p + ggplot2::scale_x_continuous(name="Chromosome", breaks=ticks,
limits=ticklim, expand=c(0.01,0.01),
labels=(unique(d$CHR)))
p <- p + ggplot2::scale_y_continuous(limits = c(miny, maxy),
expand=c(0.01,0.01))
}
if (compareAnalysis) {
if (!raster) {
p <- p + ggplot2::geom_point(ggplot2::aes(color=TYPE, alpha=a),
size=size.points)
} else {
p <- p + ggrastr::geom_point_rast(ggplot2::aes(color=TYPE, alpha=a),
size=size.points)
}
p <- p + ggplot2::scale_colour_manual(values=color)
} else {
if (!raster) {
p <- p + ggplot2::geom_point(ggplot2::aes(color=as.factor(CHR)),
size=size.points)
} else {
p <- p + ggrastr::geom_point_rast(ggplot2::aes(color=as.factor(CHR)),
size=size.points)
}
p <- p + ggplot2::scale_colour_manual(values=mycols, guide=FALSE)
p <- p + ggplot2::theme(legend.position="none")
}
if (!is.null(highlight)) {
if (any(!(highlight %in% as.vector(d$SNP)))) {
warning("SNPs selected for highlighting do not exist in d")
}
d.annotate <- d[d$SNP %in% highlight, ]
p <- p + ggplot2::geom_point(data=d.annotate, colour=I(colorHighlight),
size=size.points)
}
if (is.null(title)) {
p <- p + ggplot2::theme(title=title)
}
p <- p + ggplot2::theme_classic()
p <- p + ggplot2::theme(
axis.text.x=ggplot2::element_text(size=size.x.text, colour="grey50"),
axis.text.y=ggplot2::element_text(size=size.y.text, colour="grey50"),
axis.title.x=ggplot2::element_text(size=size.x.title, colour="grey50"),
axis.title.y=ggplot2::element_text(size=size.y.title, colour="grey50"),
axis.ticks=ggplot2::element_blank()
)
if (genomewideline) {
p <- p + ggplot2::geom_segment(x=min(d$pos), xend=max(d$pos),
y=genomewideline, yend=genomewideline,
colour=colorGenomewide,
linetype=linetypeGenomewide)
}
p
}
#' Quantile-quantile plot for genome-wide association studies.
#'
#' The calibration of a genome-wide association study is visualised by depicting
#' the -log10 of the observed p-values versus the -log10 p-vlaues of expected
#' p-values.
#'
#' @param values vector with [NrSNPs] observed p-values [double].
#' @param ci threshold for confidence interval [double].
#' @param highlight vector of length [NrSNPs], with 0/1 indicating if a SNP
#' should be highlighted or not.
#' @param name [character] Title of highlight color scale.
#' @param size.title [int] size of plot title.
#' @param size.text [int] size of plot labels
#' @param raster [logical] set to use ggrastr::geom_point_rast for plotting
#' points i.e. rasterizing points of plot. Recommended for large numbers of
#' values to plot; allows for saving final plot as .pdf.
#' return ggplot2 object of quantile-quantile plot
qqplot <- function(pvalues, ci=0.95, is.negLog=FALSE,
highlight=NULL, name="", size.title=12,
size.text=12, raster=TRUE) {
N <- length(pvalues)
if (is.negLog) {
observed <- sort(pvalues, decreasing=TRUE)
} else {
observed <- -log10(sort(pvalues))
}
df <- data.frame(
observed <- observed,
expected <- -log10(1:N / N),
clower <- -log10(qbeta(ci, 1:N, N - 1:N + 1)),
cupper <- -log10(qbeta(1 - ci, 1:N, N - 1:N + 1))
)
if (!is.null(highlight)) {
df$highlight <- highlight
}
xlabel <- expression(Expected~~-log[10](italic(p)))
ylabel <- expression(Observed~~-log[10](italic(p)))
p <- ggplot2::ggplot(df)
p <- p + ggplot2::geom_ribbon(ggplot2::aes(x=expected, ymin=clower,
ymax=cupper), fill="gray90") +
ggplot2::geom_segment(ggplot2::aes(x=0, y=0, xend=max(df$expected),
yend=max(df$expected)), color="gray10") +
ggplot2::xlim(0, max(df$expected)) +
ggplot2::labs(x=xlabel, y=ylabel) +
ggplot2::theme_bw() +
ggplot2::theme(axis.title=ggplot2::element_text(size=size.title),
axis.text=ggplot2::element_text(size=size.text)
)
if (!is.null(highlight)) {
if (!raster) {
p <- p + ggplot2::geom_point(ggplot2::aes(x=expected, y=observed,
color=highlight))
} else {
p <- p + ggrastr::geom_point_rast(ggplot2::aes(x=expected,
y=observed,
color=highlight))
}
p <- p + ggplot2::scale_color_manual(values=c("#32806E","gray10"),
name=name)
} else {
if (!raster) {
p <- p + ggplot2::geom_point(ggplot2::aes(x=expected, y=observed),
col="gray10")
} else {
p <- p + ggrastr::geom_point_rast(ggplot2::aes(x=expected,
y=observed),
col="gray10")
}
}
p
}
|
89ac82827af0153079853c8a2d0164805221e1b0
|
76c3a6c9158747b5d64f0f7aec2a52d036b37ea4
|
/Code/4. Clean WCQ data.R
|
a6bec9ffa8e828e0dface03fa83b0fee4758c0fb
|
[] |
no_license
|
JulianEGerez/WCQ
|
a3106f009e6b80a9128487734b2ffede4f937d3b
|
c2ebab63327cd1993d38a1e7cd4494b74368f858
|
refs/heads/master
| 2020-03-10T23:49:18.699002
| 2019-03-01T22:42:35
| 2019-03-01T22:42:35
| 129,648,858
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,894
|
r
|
4. Clean WCQ data.R
|
# This file cleans WCQ data using regular expressions
# Created by Julian Gerez
# Convert from factors to character
wcq_data[,2] <- as.character(wcq_data[,2])
# Read in match dates
# There are three types: DD.MM.YY, DD- M-YY, and DD-MM-YY
wcq_data$date1 <- str_match(wcq_data$combinedtext, "[0-9]{2}.[0-9]{1,2}.[0-9]{2}")
wcq_data$date2 <- str_match(wcq_data$combinedtext, "[0-9]{1,2}- [0-9]{1}-[0-9]{2}")
wcq_data$date3 <- str_match(wcq_data$combinedtext, "[0-9]{1,2}-[0-9]{2}-[0-9]{2}")
wcq_data$date1 <- gsub("(\\d{2})$", "19\\1", wcq_data$date1)
wcq_data$date1 <- as.Date(wcq_data$date1, format = "%d.%m.%Y")
wcq_data$date2 <- as.Date(wcq_data$date2, format = "%d- %m-%y")
wcq_data$date3 <- as.Date(wcq_data$date3, format = "%d-%m-%y")
# Combine into one column, then remove concatenated NAs
wcq_data$date <- str_replace(str_replace(str_replace(
paste(wcq_data$date1, wcq_data$date2, wcq_data$date3), "NA", ""), "NA", ""), "NA", "")
# Remove extraneous columns
wcq_data <- wcq_data[ -c(3:5)]
# Create special circumstances variable (abandonded/anulled/awarded)
wcq_data$sc <- str_match(wcq_data$combinedtext, " [A,a]bd |abandoned|\\[annulled]|nullified|awd|awarded|n\\/p")
wcq_data$sc[is.na(wcq_data$sc)] <- 0
wcq_data$sc[wcq_data$sc!=0] <- 1
# Create et variable
wcq_data$et <- str_match(wcq_data$combinedtext, "\\[aet]|aet|\\[asdet]")
wcq_data$et[is.na(wcq_data$et)] <- 0
wcq_data$et[wcq_data$et!=0] <- 1
# Save original combined text, then remove all special circumstances and et and reorder
wcq_data$oldcombinedtext <- wcq_data$combinedtext
wcq_data$combinedtext <- gsub(" [A,a]bd |abandoned|\\[annulled]|nullified|awd|awarded|n\\/p", "", wcq_data$combinedtext)
wcq_data$combinedtext <- gsub("\\[aet]|aet|\\[asdet]", "", wcq_data$combinedtext)
wcq_data <- wcq_data[, c(1,6,2,3,4,5)]
# Read in match scores
# First we need to delete "type 2" and "type 3" dates)
wcq_data$combinedtext <- str_replace(wcq_data$combinedtext, "[0-9]{1,2}. [0-9]{1}.[0-9]{2}", "")
wcq_data$combinedtext <- str_replace(wcq_data$combinedtext, "[0-9]{1,2}-[0-9]{2}-[0-9]{2}", "")
# Now let's extract the scores
wcq_data$scores <- str_match(wcq_data$combinedtext, " [0-9]{1,2}-[0-9]{1,2} ")
# Split scores
wcq_data$scoreA <- str_split_fixed(wcq_data$scores, "-", 2)[,1]
wcq_data$scoreB <- str_split_fixed(wcq_data$scores, "-", 2)[,2]
# Read in match teams
# Extract raw text
wcq_data$teamA <- str_match(wcq_data$combinedtext, "[A-z]*\\.*? +[0-9]{1,2}-[0-9]{1,2}")
wcq_data$teamB <- str_match(wcq_data$combinedtext, "[0-9]{1,2}-[0-9]{1,2} [A-z]* *[A-z]*")
# Clean the scores out
wcq_data$teamA <- str_replace(wcq_data$teamA, " [0-9]{1,2}-[0-9]{1,2}", "")
wcq_data$teamB <- str_replace(wcq_data$teamB, "[0-9]{1,2}-[0-9]{1,2} ", "")
# Further cleaning
# Remove non-match observations
wcq_data$date <- gsub("^ $", NA, wcq_data$date)
wcq_data <- wcq_data[complete.cases(wcq_data$date),]
# Reset row names
rownames(wcq_data) <- seq(length=nrow(wcq_data))
# Trim whitespace
wcq_data$teamA <- trimws(wcq_data$teamA)
wcq_data$teamB <- trimws(wcq_data$teamB)
# Fix teamA and teamB
{
# Remove punctuation and other symbols
wcq_data$teamA <- gsub("[[:punct:]]", "", wcq_data$teamA)
wcq_data$teamB <- gsub("[[:punct:]]", "", wcq_data$teamB)
# No teams or scores for sc matches
wcq_data$teamA <- ifelse(wcq_data$sc==1, NA, wcq_data$teamA)
wcq_data$teamB <- ifelse(wcq_data$sc==1, NA, wcq_data$teamB)
wcq_data$scores <- ifelse(wcq_data$sc==1, NA, wcq_data$scores)
wcq_data$scoreA <- ifelse(wcq_data$sc==1, NA, wcq_data$scoreA)
wcq_data$scoreB <- ifelse(wcq_data$sc==1, NA, wcq_data$scoreB)
wcq_data$et <- ifelse(wcq_data$sc==1, NA, wcq_data$et)
# Error with "and"
wcq_data <- wcq_data[-5419,]
rownames(wcq_data) <- seq(length=nrow(wcq_data))
# Fix issue with odd number of spaces
wcq_data$teamB <- gsub(" {2,}[A-z]+$", "", wcq_data$teamB)
# Bosnia-Herzegovina
wcq_data$teamB <- recode(wcq_data$teamB, "Bosnia" = "Bosnia-Herzegovina")
wcq_data$teamA <- recode(wcq_data$teamA, "H" = "Bosnia-Herzegovina")
wcq_data$teamA <- recode(wcq_data$teamA, "Herz" = "Bosnia-Herzegovina")
wcq_data$teamA <- recode(wcq_data$teamA, "Herzegovina" = "Bosnia-Herzegovina")
wcq_data$teamB <- recode(wcq_data$teamB, "Bosnia" = "Bosnia-Herzegovina")
# Costa Rica
wcq_data$teamB <- recode(wcq_data$teamB, "Costa" = "Costa Rica")
# Czech Republic, Czechoslovakia, etc.
wcq_data$teamA <- recode(wcq_data$teamA, "Republic" = "Czech Republic/CSFR")
wcq_data$teamB <- recode(wcq_data$teamB, "Czech Republic" = "Czech Republic/CSFR")
wcq_data$teamA <- recode(wcq_data$teamA, "Czechoslovakia" = "Czech Republic/CSFR")
wcq_data$teamB <- recode(wcq_data$teamB, "Czechoslovakia" = "Czech Republic/CSFR")
# DRC/Zaire
wcq_data$teamA <- recode(wcq_data$teamA, "Zaire" = "DRC/Zaire")
wcq_data$teamB <- recode(wcq_data$teamB, "Zaire" = "DRC/Zaire")
# East Germany
wcq_data$teamB <- recode(wcq_data$teamB, "East Germany" = "German Dem. Rep. (East Germany)")
# Indonesia
wcq_data$teamB <- recode(wcq_data$teamB, "Dutch Indies" = "Indonesia/Nether. Indies")
wcq_data$teamB <- recode(wcq_data$teamB, "Dutch Indies" = "Indonesia/Nether. Indies")
# Russia
wcq_data$teamA <- recode(wcq_data$teamA, "Russia" = "Russia/USSR")
wcq_data$teamB <- recode(wcq_data$teamB, "Russia" = "Russia/USSR")
wcq_data$teamA <- recode(wcq_data$teamA, "Union" = "Russia/USSR")
wcq_data$teamB <- recode(wcq_data$teamB, "Soviet Union" = "Russia/USSR")
# West Germany
wcq_data$teamB <- recode(wcq_data$teamB, "West Germany" = "Germany")
# United Arab Emirates
wcq_data$teamA <- recode(wcq_data$teamA, "UAE" = "United Arab Emirates")
wcq_data$teamB <- recode(wcq_data$teamB, "UAE" = "United Arab Emirates")
# Yugoslavia
wcq_data$teamA <- recode(wcq_data$teamA, "Montenegro" = "Serbia/Yugoslavia")
wcq_data$teamA <- recode(wcq_data$teamA, "Serbia" = "Serbia/Yugoslavia")
wcq_data$teamB <- recode(wcq_data$teamB, "Serbia" = "Serbia/Yugoslavia")
wcq_data$teamA <- recode(wcq_data$teamA, "Yugoslavia" = "Serbia/Yugoslavia")
wcq_data$teamB <- recode(wcq_data$teamB, "Yugoslavia" = "Serbia/Yugoslavia")
# Fix countries that are missing other word
wcq_data$teamA <- recode(wcq_data$teamA, "Africa" = "South Africa")
wcq_data$teamA <- recode(wcq_data$teamA, "Arabia" = "Saudi Arabia")
wcq_data$teamA <- recode(wcq_data$teamA, "Coast" = "Ivory Coast")
wcq_data$teamB <- recode(wcq_data$teamB, "El" = "El Salvador")
wcq_data$teamA <- recode(wcq_data$teamA, "Rica" = "Costa Rica")
wcq_data$teamA <- recode(wcq_data$teamA, "Tobago" = "Trinidad and Tobago")
wcq_data$teamB <- recode(wcq_data$teamB, "Trinidad" = "Trinidad and Tobago")
wcq_data$teamB <- recode(wcq_data$teamB, "Trinidad And" = "Trinidad and Tobago")
wcq_data$teamA <- recode(wcq_data$teamA, "Zealand" = "New Zealand")
wcq_data$teamA <- recode(wcq_data$teamA, "Emirates" = "United Arab Emirates")
wcq_data$teamA <- recode(wcq_data$teamA, "United Arab" = "United Arab Emirates")
# Manually fix troublesome countries
# East Germany
wcq_data[c(153, 157, 245:246, 366, 370, 469:470, 654, 656, 660, 866, 872, 873, 1216:1217, 1455,
1465, 1467, 1472, 1738, 1741, 1744, 1751), 10] <- "German Dem. Rep. (East Germany)"
# Northern Ireland
wcq_data[c(50, 89, 179, 182, 237, 241, 354, 357, 361, 474, 476, 675, 677, 678, 885:886, 888, 1196,
1204, 1209, 1211, 1434, 1437, 1439, 1441, 1787:1788, 1794, 1802, 2090, 2096, 2100, 2103,
2125, 2129, 2719, 2721, 2728, 2730, 2739), 10] <- "Northern Ireland"
# North Korea
wcq_data[c(441, 815:816, 821, 1375, 1381, 1384, 1690, 1692, 2000, 2002, 2003, 2011, 2427:2428, 2430,
2445, 2498, 2501, 2504, 4586, 4589, 4591, 4646, 4648, 4652, 5396, 5441, 5445, 5447, 5497,
5503, 5505, 5509, 6212, 6214, 6218), 10] <- "North Korea"
# South Korea
wcq_data$teamA <- recode(wcq_data$teamA, "Korea" = "South Korea")
# West Germany
wcq_data$teamB <- recode(wcq_data$teamB, "West Germany" = "Germany")
# Trim whitespace
wcq_data$teamA <- trimws(wcq_data$teamA)
wcq_data$teamB <- trimws(wcq_data$teamB)
}
# Rename id column, remove text and score column, reorder the rest of the columns
colnames(wcq_data)[1] <- "id"
wcq_data <- wcq_data[ ,-c(2:3, 7)]
wcq_data <- wcq_data[, c(1,2,7,5,6,8,4,3)]
# Keep just countries that went to the World Cup Finals
wcq_data <- subset(wcq_data, wcq_data$teamA %in% unique(c(wcf_data$teamA, wcf_data$teamB)) |
wcq_data$teamB %in% unique(c(wcf_data$teamA, wcf_data$teamB)) |
wcq_data$sc == 1)
# Save as .csv
if (saveIntermediate == TRUE) { write.csv(wcq_data, file = paste0(directory, "wcq_matchdata.csv"),
row.names = FALSE)
}
|
cf397cf1f83ebec8883ffc956e97cd0f89680dba
|
a57ff02873a753f8cb3c21c3cbbbe4c642aa633e
|
/R/aula_6.R
|
98b1b322118e0ecaf146e70d49dc2eaff4ea3cd1
|
[] |
no_license
|
luhne/segundaa
|
0ee30be0cdd0213606f3c53d73c3b1c0d4d2c658
|
d450276d21a130672a6de143854fdc1b6b28235c
|
refs/heads/master
| 2020-06-20T08:07:12.658353
| 2019-09-05T14:28:06
| 2019-09-05T14:28:06
| 197,053,850
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,522
|
r
|
aula_6.R
|
install.packages('modleR')#soon!
install.packages('rJava')#soon!
install.packages('raster')#soon!
install.packages('rlang')#soon!
install.packages('dplyr')#soon!
install.packages('rgdall')
library(rlang)
library(modleR)
library(rJava)
library(raster)
library(rgdal)
# === aula 6 Análise Exploratória de Dados ===
data("anscombe")
# ------ funcoes para checar os dados ------
dim(anscombe) # dimensao dos dados, N de linhas e N de colunas
head(anscombe) # seis primeiras linhas dos dados
class(anscombe) # classe do objeto
str(anscombe) # estrutura do objeto
# ------ selecionar colunas dos dados / media das colunas ------
mean(anscombe$x1)
mean(anscombe$x2)
mean(anscombe$x3)
mean(anscombe$x4)
# ------ funcao apply ------
## o mesmo calculo, agora apenas em 1 linha de comando
# media de todos os vetores x
apply(anscombe[,1:4], 2, mean) #aplica uma funcao a todas as linhas de um objeto
# media de todos os vetores y
apply(anscombe[,5:8], 2, mean)
# ------ Descrição estatística dos dados ------
## variância dos dados
apply(anscombe, 2, var) ## aplica a funcao var a todas as linhas do objeto
# ---- Ententendo a correlação e coeficiente de regressão dos conjuntos x e y
## correlação
cor(anscombe$x1, anscombe$y1)
cor(anscombe$x2, anscombe$y2)
cor(anscombe$x3, anscombe$y3)
cor(anscombe$x4, anscombe$y4)
# ------ coeficiente de regressão ------
## primeiro criamos objetos com as regressoes dos quatro conjuntos
m1 <- lm(anscombe$y1 ~ anscombe$x1)
m2 <- lm(anscombe$y2 ~ anscombe$x2)
m3 <- lm(anscombe$y3 ~ anscombe$x3)
m4 <- lm(anscombe$y4 ~ anscombe$x4)
## vamos criar agora uma lista com todos os modelos para facilitar o trabalho
mlist <- list(m1, m2, m3, m4)
## agora sim podemos calcular de forma menos repetitiva os coeficientes de regressao
lapply(mlist, coef)
anscombe
##!!! funcao par para definir as configuracoes da janela grafica entre em ?par
par(mfrow=c(2,2), #abre uma janela gráfica com 2 linhas e 2 colunas
las=1, # deixa as legendas dos eixos na vertical
bty="l") # tipo do box do grafico em L
plot(anscombe$y1 ~ anscombe$x1) #plot das variaveis
abline(mlist[[1]]) # adicionando a reta prevista pelo modelo de regressao
plot(anscombe$y2 ~ anscombe$x2)
abline(mlist[[2]])
plot(anscombe$y2 ~ anscombe$x3)
abline(mlist[[3]])
plot(anscombe$y3 ~ anscombe$x4)
abline(mlist[[4]])
par(mfrow=c(1,1)) # retorna a janela grafica para o padrao de 1 linha e 1 coluna
#======================================//====================================#
# === Parte II === // aula 6 Análise Exploratória de Dados ================#
#carregar conjunto de dados
head(iris)
summary(iris)
#tabela
table(iris$Species)
# media do comprimento de sepala por especie
tapply(X = iris$Sepal.Length, INDEX = list(iris$Species), FUN = mean)
# a mesma tarefa, executada por outra funcao. Outros argumentos e outra saída
aggregate(x = iris$Sepal.Length, by = list(iris$Species), FUN = mean)
# ainda a mesma tarefa, com a mesma função mas em uma notação diferente
aggregate(Sepal.Length ~ Species, data=iris, mean)
aggregate(Sepal.Length ~ Species, data=iris, mean)
aggregate(Sepal.Width ~ Species, data=iris, mean)
aggregate(Petal.Length ~ Species, data=iris, mean)
# === Desvio padrao ===
tapply(X = iris$Sepal.Length, INDEX = list(iris$Species), FUN = sd)
tapply(X = iris$Sepal.Width, INDEX = list(iris$Species), FUN = sd)
tapply(X = iris$Petal.Length, INDEX = list(iris$Species), FUN = sd)
tapply(X = iris$Petal.Width, INDEX = list(iris$Species), FUN = sd)
# === Matriz FOR ===
# criando matriz de 3 colunas - uma para cada sp - e 4 linhas - uma para cada metrica
medias <- matrix(NA, ncol=3, nrow=4)
# definindo o nome das colunas e das linhas da matriz
colnames(medias) <- unique(iris$Species)
rownames(medias) <- names(iris)[-5]
for (i in 1:4){
medias[i,] <- tapply(iris[,i], iris$Species, mean)
}
# === media
vars <- iris[,-5]
apply(vars, 2, mean)
# === mediana
apply(vars, 2, median)
# === moda
freq_sl <- sort(table(iris$Sepal.Length), decreasing = TRUE)
freq_sl[1]
# === variancia
apply(vars, 2, var)
# === desvio padrao
sd01 <- apply(vars, 2, sd)
# outra forma:
sd02 <- apply(vars, 2, function(x) sqrt(var(x)))
sd01
sd02
sd01==sd02
# === coeficiente de variacao
cv <- function(x){
sd(x)/mean(x)*100
}
apply(vars, 2, cv)
# === quantis ou percentis
# sumario de 5 numeros
apply(vars, 2, quantile)
# 5%, 50% e 95%
apply(vars, 2, quantile, probs=c(0.5, 0.5, 0.95))
# === intervalo
# a funcao range nos retorna os valores minimo e maximo
apply(vars, 2, range)
# aplicando a funcao diff ao resultado do range, temos o valor desejado
# uma boa pratica é nunca sobrescrever um objeto já existente no R, por isso
# nunca nomeie um objeto com um nome já existente
my_range <- function(x){
diff(range(x))
}
apply(vars, 2, my_range)
# === Intervalo interquartil
apply(vars, 2, IQR)
# === Correlacao
cor(vars)
# ========= Graficos =======
barplot(table(iris$Species))
# - histograma
par(mfrow=c(2,2))
hist(iris$Sepal.Length)
hist(iris$Sepal.Width)
hist(iris$Petal.Length)
hist(iris$Petal.Length)
par(mfrow=c(1,1))
par(mfrow=c(1,2))
hist(iris$Sepal.Width)
hist(iris$Sepal.Width, breaks = 4)
# - curva de densidade
par(mfrow=c(1,2))
hist(iris$Sepal.Width)
hist(iris$Sepal.Width, freq = FALSE)
par(mfrow=c(1,1))
par(mfrow=c(1,2))
# plot da curva de densidade
plot(density(iris$Sepal.Width))
# plot da curva de densidade sobre o histograma de densidade
hist(iris$Sepal.Width, freq = FALSE)
lines(density(iris$Sepal.Width), col="blue") # note que agora estamos usando a funcao o comando add=TRUE
par(mfrow=c(1,1))
# - Box - plot
boxplot(iris$Sepal.Length)
boxplot(iris$Sepal.Width)
boxplot(iris$Petal.Length)
boxplot(iris$Petal.Width)
# Agora vamos olhar para os valores por espécie.
boxplot(Sepal.Length ~ Species, data=iris)
boxplot(Sepal.Width ~ Species, data=iris)
boxplot(Petal.Length ~ Species, data=iris)
boxplot(Petal.Width ~ Species, data=iris)
# checar outlier
boxplot(iris$Sepal.Width)
my_boxplot <- boxplot(iris$Sepal.Width, plot=FALSE)
my_boxplot
# o objeto é uma lista e os valores outliers estão guardados no elemento $out da lista
outliers <- my_boxplot$out
#qual a posicao dos outliers
which(iris$Sepal.Width %in% outliers)
# vamos usar a posicao para indexar o objeto
iris[which(iris$Sepal.Width %in% outliers), c("Sepal.Width", "Species")]
# checar outlier por sp especifica
boxplot(Sepal.Width ~ Species, data=iris)
my_boxplot2 <- boxplot(Sepal.Width ~ Species, data=iris, plot=FALSE)
my_boxplot2
# o objeto é uma lista e os valores outliers estão guardados no elemento $out da lista
outliers2 <- my_boxplot2$out
# neste caso, queremos apenas os outliers da especie setosa
# vamos usar a posicao para indexar o objeto
iris[iris$Sepal.Width %in% outliers2 &
iris$Species=="setosa",
c("Sepal.Width", "Species")]
# distribuicao normal (?)
par(mfrow=c(1,3))
qqnorm(iris$Sepal.Length[iris$Species=="setosa"],
main="setosa")
qqline(iris$Sepal.Length[iris$Species=="setosa"])
qqnorm(iris$Sepal.Length[iris$Species=="versicolor"],
main="versicolor")
qqline(iris$Sepal.Length[iris$Species=="versicolor"])
qqnorm(iris$Sepal.Length[iris$Species=="virginica"],
main="virginica")
qqline(iris$Sepal.Length[iris$Species=="virginica"])
par(mfrow=c(1,1))
# relacao entre variaveis
pairs(vars)
# carregando o pacote GGally
## se você não tiver o pacote usar:
# install.packages("GGally")
## se já tiver o pacote, apenas carregue
library(GGally)
ggpairs(vars)
|
ca98ab2baa881337f962cb207fe54546d3910d0d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rqPen/examples/groupMultLambda.Rd.R
|
f702cc6965e9bf8461f6785ce8bd7b6538a0e753
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 358
|
r
|
groupMultLambda.Rd.R
|
library(rqPen)
### Name: groupMultLambda
### Title: Quantile Regression with Group Penalty for multiple lambdas
### Aliases: groupMultLambda
### ** Examples
## Not run:
##D x <- matrix(rnorm(400),nrow=100)
##D y <- 1 + x[,1] - 3*x[,3] + rnorm(100)
##D cv_model <- groupMultLambda(x,y,groups=c(rep(1,2),rep(2,2)),lambda=seq(.1,.5,.1))
## End(Not run)
|
0d045a3578644be10fd44f68771271e4929d4267
|
85f6d81be7c6f4f3168939888bc4f7f6e59ee758
|
/factors.r
|
be55194a71e654946580d79154b90c23f2ae92ba
|
[] |
no_license
|
davidgrenier/artOfR
|
65ac328c247fa6f1f5176ee1d318dbb3a4203ee8
|
200e5a56c1e74d7c710ffee701f8ee249e7afcb8
|
refs/heads/master
| 2021-01-01T19:58:55.377827
| 2018-04-13T11:30:40
| 2018-04-13T11:30:40
| 98,737,908
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,034
|
r
|
factors.r
|
x <- c(5,12,13,12)
xf <- factor(x)
# xf
# str(xf)
# unclass(xf)
# ?unclass
# attr(xf,"levels")
# length(xf)
# xff <- factor(x,levels=c(5,12,13,88))
xff <- factor(x,c(5,12,13,88))
# xff
# xff[2] <- 88
# xff
xff[2] <- 28
ages <- c(25,26,55,37,21,42)
affils <- c("R","D","D","R","U","D")
tapply(ages,affils,mean)
d <- data.frame(gender=c("M","M","F","M","F","F")
,age=c(47,59,21,32,33,24)
,income=c(55000,88000,32450,67500,123000,45650))
d$over25 <- d$age > 25
# split(d$income,d[c(1,4)])
# tapply(d$income,d[c(1,4)],mean)
# split(d[-3],d[c(1,4)])
#invalid tapply(d[-3],d[c(1,4)],function (x,y) print(x))
aba <- read.csv("data/abalone.data")
# g <- aba$Gender; split(seq(g),g)
byGender <- by(aba,aba$Gender,function (m) lm(m[,2] ~ m[,3]))
fittedF <- byGender$F$fitted.values
# pdf("test.pdf")
# plot(aba[aba$Gender == "F",]$Length,fittedF,pch=c('x','.'))
# dev.off()
# system("xdg-open test.pdf")
txt <- scan("data/testconcord.txt","")
words <- split(seq(txt),txt)
words[order(sapply(words,length))]
|
a5e761105cca16df09f7bcd4bb19083b9a0d82c9
|
7590a2ceba0efdc130c5d7631617e4d829016d5c
|
/R/pubmedQuery.R
|
d626289fb0e9a2ef8e9aab8d3f7fc2ba7dc1f72d
|
[] |
no_license
|
andymckenzie/bayesbio
|
e52b8bfb46d32d04373a3161f6a9722b47af8e32
|
1389283ba9ac8e1778dd7930af35e719a3baf540
|
refs/heads/master
| 2021-01-17T12:44:10.735034
| 2019-06-11T16:24:17
| 2019-06-11T16:24:17
| 59,575,223
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,866
|
r
|
pubmedQuery.R
|
#' @title Perform PubMed queries on 2x2 combinations of term vectors.
#' @description Perform PubMed queries on the intersections of two character vectors. This function is a wrapper to RISmed::EUtilsSummary with type = 'esearch', db = 'pubmed'.
#' @param rowTerms Character vector of terms that should make up the rows of the resulting mention count data frame.
#' @param colTerms Character vector of terms for the columns.
#' @param sleepTime How much time (in seconds) to sleep between successive PubMed queries. If you set this too low, PubMed may shut down your connection to prevent overloading their servers.
#' @param ... Additional arguments to RISmed::EUtilsSummary
#' @param use Package to use to search PubMed. Options = "rentrez", "RISmed".
#' @return A data frame of the number of mentions for each combination of terms.
#' @export
pubmedQuery <- function(rowTerms, colTerms, sleepTime = 0.01, use = "rentrez", ...){
if(use == "RISmed"){
if (!requireNamespace("RISmed", quietly=TRUE)) stop("Please install package 'RISmed' to use this function.")
}
if(use == "rentrez"){
if (!requireNamespace("rentrez", quietly=TRUE)) stop("Please install package 'rentrez' to use this function.")
}
disease_gene_mentions = data.frame(matrix(0, nrow = length(rowTerms),
ncol = length(colTerms) + 1))
if(any(duplicated(rowTerms))){
message("Duplicated entries in rowTerms; collapsing to unique terms.")
rowTerms = unique(rowTerms)
}
if(any(duplicated(colTerms))){
message("Duplicated entries in colTerms; collapsing to unique terms.")
colTerms = unique(colTerms)
}
for(i in 1:length(colTerms)){
for(j in 1:length(rowTerms)){
query = paste(colTerms[i], "AND", rowTerms[j], sep = " ")
str(query)
if(use == "RISmed"){
res = RISmed::EUtilsSummary(query,
type = 'esearch', db = 'pubmed', ...)
disease_gene_mentions[j, i] = RISmed::QueryCount(res)
Sys.sleep(sleepTime)
}
if(use == "rentrez"){
res = entrez_search(db="pubmed", term = query)
disease_gene_mentions[j, i] = as.numeric(res$count)
Sys.sleep(sleepTime)
}
}
}
total_res = numeric(length(rowTerms))
for(j in 1:length(rowTerms)){
if(use == "RISmed"){
res = RISmed::EUtilsSummary(rowTerms[j], type = 'esearch', db = 'pubmed')
total_res[j] = RISmed::QueryCount(res)
}
if(use == "rentrez"){
res = entrez_search(db="pubmed", term = rowTerms[j])
total_res[j] = as.numeric(res$count)
}
Sys.sleep(sleepTime)
}
rownames(disease_gene_mentions) = rowTerms
disease_gene_mentions[, length(colTerms) + 1] = total_res
colnames(disease_gene_mentions) = c(colTerms, "Total_Mentions")
disease_gene_mentions = disease_gene_mentions[order(disease_gene_mentions[ , length(colTerms) + 1]), ]
return(disease_gene_mentions)
}
|
1510e4f5c61abe6e2f4f65b65fb67dd46518f0a1
|
0c96f7bf162980f76c31c9b8d498de9652bcb903
|
/1_code/06_logit_v3_mkt_check.R
|
f0cbe5cc087d51fffc564080bf3fbf85a528ec73
|
[] |
no_license
|
Vidogreg/payer_model
|
7c582c3d4a8bd7ae3e147d9a24e426fe1c82ae8e
|
ce85158666ff0febc258bc1080d9ee3f39d678d6
|
refs/heads/master
| 2020-04-11T07:33:36.838323
| 2019-03-14T11:22:06
| 2019-03-14T11:22:06
| 161,610,941
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,973
|
r
|
06_logit_v3_mkt_check.R
|
# ------------
# Introduction
# ------------
## This file trains logistic model with payment&session data for several register months.
NAME <- '06_logit_v3_mkt_check'
# ------------
# Preamble
# ------------
## Imports
source('1_code/00_utils.R')
# packageTest('DBI')
# packageTest('odbc')
packageTest('data.table')
if(!exists('dfLoad'))
dfLoad <- data.table(readRDS(file.path(
'0_data', 'ga_972_payer_dataset_v3.rds'
)))
## Settings & working directory
setwd(file.path(PROJECT_DIR, PROJECT))
randomSeed <- 1024
resultTableName <- 'ga_972_payer_prediction_v3'
# con <- DBI::dbConnect(
# odbc::odbc(),
# Driver = 'MapR Hive ODBC Connector',
# Host = 'dwh-prod-mapr-master-02',
# Schema = 'vgregor',
# UID = 'mapr',
# PWD = 'mapr',
# Port = 10000
# )
## Set up pipeline folder if missing
pipeline <- file.path('2_pipeline', NAME)
if (!dir.exists(pipeline)) {
dir.create(pipeline)
dir.create(file.path(pipeline, 'out'))
}
# ---------
# Main code
# ---------
## Define and transform dataset
dfAll <- dfLoad[
source == "marketing" & register_platform == "google play",
.(
player_id,
register_month,
tier,
dx_pay_count,
dx_payer,
dx_active_days,
d0_session_count,
d1x_session_count,
d1x_daily_session_count_relative_to_d0,
dy_payer
)]
dfAll[, tier := as.numeric(tier)]
dfAll[, dy_payer := factor(dy_payer)]
## Define relevant register months
allRegMonths <- sort(unique(dfAll$register_month))
testRegMonths <- allRegMonths[4:length(allRegMonths)]
## Loop through register months and train relevant models
K <- 5
dfTestList <- list()
for(i in 1:length(testRegMonths)) {
## define relevant months
testRegMonth <- testRegMonths[i]
trainRegMonths <- allRegMonths[i:(i+2)]
## define train-validation & test datasets
dfTrainVal <- dfAll[
register_month >= min(trainRegMonths) &
register_month <= max(trainRegMonths)]
dfTrainVal[, c('player_id', 'register_month') := NULL]
dfTest <- dfAll[register_month == testRegMonth]
## run CV to get optimal cut-off
cvResult <- makeCrossValCutOff(K = K, data = dfTrainVal)
optimalCutOff <- round(mean(cvResult$optimal_cutoff), 4)
## Train model on full train-validation dataset
modTrainVal <- glm(
formula = dy_payer ~ .,
data = dfTrainVal,
family = 'binomial'
)
## Calculate predictions on test dataset and write results
dfTest[, mod_fit := predict.glm(modTrainVal, newdata = dfTest, type = 'response')]
dfTest[, mod_cut_off := optimalCutOff]
dfTestList[[i]] <- dfTest
}
## Bind the results together and save it to hive
dfTestFinal <- rbindlist(dfTestList)
fwrite(
dfTestFinal,
file = file.path(
'2_pipeline', NAME, 'out', resultTableName %+% '.csv'
)
)
# print('Writing results to hive')
# DBI::dbSendQuery(con, 'drop table if exists ' %+% resultTableName)
# DBI::dbWriteTable(
# con, resultTableName,
# dfTestFinal[, .(player_id, dy_payer, mod_fit, mod_cut_off)]
# )
# print('Saving done')
|
b70b53a7778e3a7ce840f2b0a519d4a51e54dc93
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/SCRT/examples/graph1.Rd.R
|
c3a160e6d0a357225d28e510545fcfe7bca0ad20
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 216
|
r
|
graph1.Rd.R
|
library(SCRT)
### Name: graph1
### Title: Graphical representation of single-case data
### Aliases: graph1
### Keywords: Single-case design Graph
### ** Examples
data(ABAB)
graph1(design = "ABAB", data = ABAB)
|
b401e0ae050a51cc20ec93b407b778c98691c643
|
31f4fb31a14842e9f92208e9984628cbc9118416
|
/server-database/README.rd
|
b8584b1e7803bd153331cd85dc71903e21922b12
|
[] |
no_license
|
siyand6777/6
|
5afbfab308be98461cecff61817f878d879d6eca
|
10afd0f1bf67e6cd5c4429a0188b739a98e944f1
|
refs/heads/master
| 2023-03-14T16:05:50.958351
| 2021-03-09T06:16:23
| 2021-03-09T06:16:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,514
|
rd
|
README.rd
|
Backend of MyStory:
Methods:
* server.py: server code that implement python server middleware that
connects UI with tensorflow server and database
* server_call_tensorflow.py: part of code that calls tensorflow server
which generates captions given images
- image_processing.py: pre-process the images. ex. resize to square
* run_server.sh: shell file to launch Tensorflow servers and mongoDB.
create two dockers for Tensorflow servers and start mongoDB.
* mongo_db.py, confirm.py, delete.py, load.py, login.py, register.py, delete_user.py:
code that manipulate the mongoDB:
- mongo_db.py: basic initializations (create client, databases, etc.)
- confirm.py: confirm a story to be added to the database
- delete.py: remove a story from the database
- load.py: load all stories belong to a user
- login.py: check user login
- register.py: check user registration
- delete_user.py: remove a user from the database
* client.py: A Python client class for unit testing. Note all methods
are mapped to Java Version in the real Mobile app
Singletons and Others:
* tokenizer.npy: a word tokenizer generated from Tensorflow training process
* help.py: a dummy file for special database manipulations (ex. force reset)
* autoencoder_server, inception_server: Tensorflow server models saved
during training
Testing and Logging:
* server.log: log the activity of server access
* test_server.py: PyTest file that sanity checks all methods (with a
Python client.py file)
|
a13850fcec0962b0686a8b22ac7af2ac2f350ce8
|
fc67b3b2035770e894893708a7d9382303a6607f
|
/hist_exp_obs.R
|
5a71bf7edc82a1543bd987f6de34cb4312ea55a4
|
[
"MIT"
] |
permissive
|
ilBegonia666/Order-Book-Modelling
|
72668f0ffa73b52614fce2a3aa405e8362249d1c
|
e93ecf6178591837ff0d6847e754da2fa5a185a9
|
refs/heads/master
| 2023-03-17T12:32:19.337393
| 2019-08-11T16:52:27
| 2019-08-11T16:52:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,129
|
r
|
hist_exp_obs.R
|
# Plots histogram of exponential next to histogram of observed limit order interarrival times
# Simulate from exponential distribution with parameter the inverse of mean limit order interarrival time
# (number of simulations) = (number of limit orders)
simulated_exponential <- rexp(length(limit_interarrival_times[[j]][[i]]),1/mean(limit_interarrival_times[[j]][[i]]))
# Plot the simulated exponential and the actual interarrival times
sim_data <- data.frame(simulated_exponential) # correct form for ggplot
lodata <- data.frame(limit_interarrival_times[[j]][[i]]) # correct form for ggplot
# Following 2 plots only used to find suitable y axis values
plota <- ggplot(sim_data, aes(x=sim_data[,1])) + #sets data to be used
geom_histogram(breaks = seq(0, pretty(3*mean(limit_interarrival_times[[j]][[i]]))[2], length.out = 9))
plotb <- ggplot(lodata, aes(x=lodata[,1])) + #sets data to be used
geom_histogram(breaks = seq(0, pretty(3*mean(limit_interarrival_times[[j]][[i]]))[2],length.out = 9))
# Take max y value to be larger of max bin height over both the plots
ylimit <- max(ggplot_build(plota)$data[[1]][["count"]], ggplot_build(plotb)$data[[1]][["count"]])
# Create the two plots
plot1 <- ggplot(sim_data, aes(x=sim_data[,1])) + #sets data to be used
geom_histogram(breaks = seq(0, pretty(3*mean(limit_interarrival_times[[j]][[i]]))[2], length.out = 9), col = "black", fill = "blue", alpha = 0.5) + #sets x-axis and colours
ylim(c(0, ylimit)) + #sets y-axis as max height of bins over both plots
labs(x = "Simulated exponential interarrival times (s)", y = "Frequency") #sets x and y axis labels
plot2 <- ggplot(lodata, aes(x=lodata[,1])) + #sets data to be used
geom_histogram(breaks = seq(0, pretty(3*mean(limit_interarrival_times[[j]][[i]]))[2], length.out = 9), col = "black", fill = "blue", alpha = 0.5) + #sets x-axis and colours
ylim(c(0, ylimit)) + #sets y-axis as max height of bins over both plots
labs(x = "Observed limit order interarrival times (s)", y = "Frequency") #sets x and y axis labels
# Plot next to each other
grid.arrange(plot1, plot2, ncol = 2)
|
8469d8e1a3183bdaa5a4378cb61a95f7c33ed410
|
0841838ba8723e94b37a1514409a5a9767cbf181
|
/MESA_project/code/haplotype_inference/MESA_haplotypeoverlap.R
|
b4bc9f306ab306032a0dc5f6eb69e8e15b0fe9c4
|
[] |
no_license
|
kelseysumner/taylorlab
|
cfa2358b5c552e7853b111de12940983d081de6a
|
8801f5d32b7f81f2a66b3efd763cc18c5d35f42b
|
refs/heads/master
| 2021-08-07T03:55:06.004801
| 2021-06-20T21:29:08
| 2021-06-20T21:29:08
| 150,612,627
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,012
|
r
|
MESA_haplotypeoverlap.R
|
# ----------------------------------------- #
# MESA Batch 1 Create First Step #
# of Haplotype Tracking Pipeline Sample #
# Summary #
# 21AUG2018 #
# K. Sumner #
# ----------------------------------------- #
# load in packages
library(readr) # for reading in csvs using read_csv (more efficient and better at keeping data format)
library(dplyr) # for left_join function
#### ------------------ AMA -------------------- ####
# read in the track reads through pipeline document to reorganize for matching it up with samples
AMA_track = read_csv("/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/AMA_haplotypes/AMA/MESA_AMA_trackReadsThroughPipeline.csv")
# add a column with the sample numbers pulled out of the first column
split_sample_name = rep(NA,nrow(AMA_track))
for (i in 1:nrow(AMA_track)){
firstsplit = strsplit(AMA_track$Sample[i],"_")[[1]][1]
split_sample_name[i] = substring(firstsplit, 2)
}
AMA_track$Sample_order = as.integer(split_sample_name)
# reorder the data set
neworder_sample_names = order(split_sample_name)
ordered_data = left_join(data.frame(Sample_order=neworder_sample_names),AMA_track,by="Sample_order")
# export the new ordered data set
write_csv(ordered_data,"/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/AMA_haplotypes/AMA/MESA_AMA_trackReadsThroughPipeline_Ordered.csv")
# paste in Betsy's sequence ID and MESA ID inventory in the new ordered data set
## ----- ##
# read back the data set that Betsy's sequence ID and MESA ID inventory have been merged with
AMA_ID = read_csv("/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/AMA_haplotypes/AMA/23AUG2018 AMA MESA Update/Prelim Materials/MESA_AMA_trackReadsThroughPipeline_Ordered.csv")
# split the MESA ID column
mesa_id = rep(NA,nrow(AMA_ID))
person = rep(NA,nrow(AMA_ID))
for (k in 1:nrow(AMA_ID)){
part_mesa_id = strsplit(AMA_ID$`MESA ID`[k],"_")[[1]][1]
if (nchar(part_mesa_id) == 4){
mesa_id[k] = paste0("MPS",part_mesa_id)
}
if (nchar(part_mesa_id) == 3){
mesa_id[k] = paste0("MPS","0",part_mesa_id)
}
if (nchar(part_mesa_id) == 2){
mesa_id[k] = paste0("MPS","00",part_mesa_id)
}
if (nchar(part_mesa_id) == 1){
mesa_id[k] = paste0("MPS","000",part_mesa_id)
}
person[k] = strsplit(AMA_ID$`MESA ID`[k],"_")[[1]][2]
}
AMA_ID$mesa_id_meta_data <- mesa_id
AMA_ID$person_meta_data <- person
# change the name of some of the columns
colnames(AMA_ID)[colnames(AMA_ID) == 'MESA ID'] <- 'lab_mesa_id'
colnames(AMA_ID)[colnames(AMA_ID) == 'MiSeq ID'] <- 'lab_miseq_id'
colnames(AMA_ID)[colnames(AMA_ID) == 'Sample'] <- 'lab_miseq_sample'
colnames(AMA_ID)[colnames(AMA_ID) == 'reads.in'] <- 'raw_reads'
colnames(AMA_ID)[colnames(AMA_ID) == 'reads.out'] <- 'filtered_reads'
colnames(AMA_ID)[colnames(AMA_ID) == 'merged'] <- 'merged_reads'
colnames(AMA_ID)[colnames(AMA_ID) == 'tabled'] <- 'total_tabled_haplotype_reads'
colnames(AMA_ID)[colnames(AMA_ID) == 'nonchim'] <- 'no_chimeras_haplotype_reads'
# make the control columns be indicated
AMA_ID$mesa_id_meta_data[512] = "Control"
AMA_ID$person_meta_data[512] = "Control"
AMA_ID$mesa_id_meta_data[513] = "Control"
AMA_ID$person_meta_data[513] = "Control"
AMA_ID$mesa_id_meta_data[514] = "Control"
AMA_ID$person_meta_data[514] = "Control"
# reorder the columns in the data set
AMA_ID = AMA_ID %>% select(lab_miseq_sample,lab_miseq_id,lab_mesa_id,mesa_id_meta_data,person_meta_data,raw_reads,filtered_reads,merged_reads,total_tabled_haplotype_reads,no_chimeras_haplotype_reads)
# export the data set
write_csv(AMA_ID,"/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/AMA_haplotypes/AMA/23AUG2018 AMA MESA Update/Prelim Materials/MESA_AMA_trackReadsThroughPipeline_Inventory.csv")
## ----- ##
# load in the data set (the haplotypes after chimeras have been removed and haplotypes censored - MESA_AMA_haplotypes_final.rds)
AMA_data = readRDS("/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/AMA_haplotypes/AMA/MESA_AMA_haplotypes_final.rds")
# rename the columns to be a unique haplotype column number
newcolnames = c(1:ncol(AMA_data))
pastedcolnames = rep(NA,length(newcolnames))
for (i in 1:length(newcolnames)){
pastedcolnames[i] = paste0("H",newcolnames[i])
}
colnames(AMA_data) <- pastedcolnames
# remove the rows with the controls
control_list <- c("S512", "S513", "S514")
"%ni%" <- Negate("%in%")
AMA_data = subset(AMA_data, rownames(AMA_data) %ni% control_list)
# export the data set as something easier for others to analyze
saveRDS(AMA_data,"/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/AMA_haplotypes/AMA/MESA_AMA_haplotypes_final_clean_column_names.rds")
#### ------------------ CSP -------------------- ####
# read in the track reads through pipeline document to reorganize for matching it up with samples
CSP_track = read_csv("/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/CSP_haplotypes/MESA_CSP_trackReadsThroughPipeline.csv")
# add a column with the sample numbers pulled out of the first column
split_sample_name = rep(NA,nrow(CSP_track))
for (i in 1:nrow(CSP_track)){
firstsplit = strsplit(CSP_track$Sample[i],"_")[[1]][1]
split_sample_name[i] = substring(firstsplit, 2)
}
CSP_track$Sample_order = as.integer(split_sample_name)
# reorder the data set
neworder_sample_names = order(split_sample_name)
ordered_data = left_join(data.frame(Sample_order=neworder_sample_names),CSP_track,by="Sample_order")
# export the new ordered data set
write_csv(ordered_data,"/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/CSP_haplotypes/MESA_CSP_trackReadsThroughPipeline_Ordered.csv")
# add Betsy's sequence ID and MESA ID inventory have been merged with
## ----- ##
# read back the data set that Betsy's sequence ID and MESA ID inventory have been merged with
CSP_ID = read_csv("/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/CSP_haplotypes/23AUG2018 CSP MESA Update/MESA_CSP_trackReadsThroughPipeline_Ordered.csv")
# split the MESA ID column
mesa_id = rep(NA,nrow(CSP_ID))
person = rep(NA,nrow(CSP_ID))
for (k in 1:nrow(CSP_ID)){
part_mesa_id = strsplit(CSP_ID$`MESA ID`[k],"_")[[1]][1]
if (nchar(part_mesa_id) == 4){
mesa_id[k] = paste0("MPS",part_mesa_id)
}
if (nchar(part_mesa_id) == 3){
mesa_id[k] = paste0("MPS","0",part_mesa_id)
}
if (nchar(part_mesa_id) == 2){
mesa_id[k] = paste0("MPS","00",part_mesa_id)
}
if (nchar(part_mesa_id) == 1){
mesa_id[k] = paste0("MPS","000",part_mesa_id)
}
person[k] = strsplit(CSP_ID$`MESA ID`[k],"_")[[1]][2]
}
CSP_ID$mesa_id_meta_data <- mesa_id
CSP_ID$person_meta_data <- person
# change the name of some of the columns
colnames(CSP_ID)[colnames(CSP_ID) == 'MESA ID'] <- 'lab_mesa_id'
colnames(CSP_ID)[colnames(CSP_ID) == 'MiSeq ID'] <- 'lab_miseq_id'
colnames(CSP_ID)[colnames(CSP_ID) == 'Sample'] <- 'lab_miseq_sample'
colnames(CSP_ID)[colnames(CSP_ID) == 'reads.in'] <- 'raw_reads'
colnames(CSP_ID)[colnames(CSP_ID) == 'reads.out'] <- 'filtered_reads'
colnames(CSP_ID)[colnames(CSP_ID) == 'merged'] <- 'merged_reads'
colnames(CSP_ID)[colnames(CSP_ID) == 'tabled'] <- 'total_tabled_haplotype_reads'
colnames(CSP_ID)[colnames(CSP_ID) == 'nonchim'] <- 'no_chimeras_haplotype_reads'
# make the control columns be indicated
CSP_ID$mesa_id_meta_data[512] = "Control"
CSP_ID$person_meta_data[512] = "Control"
CSP_ID$mesa_id_meta_data[513] = "Control"
CSP_ID$person_meta_data[513] = "Control"
CSP_ID$mesa_id_meta_data[514] = "Control"
CSP_ID$person_meta_data[514] = "Control"
# reorder the columns in the data set
CSP_ID = CSP_ID %>% select(lab_miseq_sample,lab_miseq_id,lab_mesa_id,mesa_id_meta_data,person_meta_data,raw_reads,filtered_reads,merged_reads,total_tabled_haplotype_reads,no_chimeras_haplotype_reads)
# export the data set
write_csv(CSP_ID,"/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/CSP_haplotypes/23AUG2018 CSP MESA Update/MESA_CSP_trackReadsThroughPipeline_Inventory.csv")
## ----- ##
# load in the data set (the haplotypes after chimeras have been removed and haplotypes censored - MESA_CSP_haplotypes_final.rds)
CSP_data = readRDS("/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/CSP_haplotypes/MESA_CSP_haplotypes_final.rds")
# rename the columns to be a unique haplotype column number
newcolnames = c(1:ncol(CSP_data))
pastedcolnames = rep(NA,length(newcolnames))
for (i in 1:length(newcolnames)){
pastedcolnames[i] = paste0("H",newcolnames[i])
}
colnames(CSP_data) <- pastedcolnames
# remove the rows with the controls
control_list <- c("S512", "S513", "S514")
"%ni%" <- Negate("%in%")
CSP_data = subset(CSP_data, rownames(CSP_data) %ni% control_list)
# export the data set as something easier for others to analyze
saveRDS(CSP_data,"/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/CSP_haplotypes/MESA_CSP_haplotypes_final_clean_column_names.rds")
#### ------------------ HIST B -------------------- ####
# read in the track reads through pipeline document to reorganize for matching it up with samples
HistB_track = read_csv("/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/HistB_haplotypes/MESA_HistB_trackReadsThroughPipeline.csv")
# add a column with the sample numbers pulled out of the first column
split_sample_name = rep(NA,nrow(HistB_track))
for (i in 1:nrow(HistB_track)){
firstsplit = strsplit(HistB_track$Sample[i],"_")[[1]][1]
split_sample_name[i] = substring(firstsplit, 2)
}
HistB_track$Sample_order = as.integer(split_sample_name)
# reorder the data set
neworder_sample_names = order(split_sample_name)
ordered_data = left_join(data.frame(Sample_order=neworder_sample_names),HistB_track,by="Sample_order")
# export the new ordered data set
write_csv(ordered_data,"/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/HistB_haplotypes/MESA_HistB_trackReadsThroughPipeline_Ordered.csv")
# add Betsy's sequence ID and MESA ID inventory have been merged with
## ----- ##
# read back the data set that Betsy's sequence ID and MESA ID inventory have been merged with
HistB_ID = read_csv("/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/HistB_haplotypes/23AUG2018 HistB MESA Update/MESA_HistB_trackReadsThroughPipeline_Ordered.csv")
# split the MESA ID column
mesa_id = rep(NA,nrow(HistB_ID))
person = rep(NA,nrow(HistB_ID))
for (k in 1:nrow(HistB_ID)){
part_mesa_id = strsplit(HistB_ID$`MESA ID`[k],"_")[[1]][1]
if (nchar(part_mesa_id) == 4){
mesa_id[k] = paste0("MPS",part_mesa_id)
}
if (nchar(part_mesa_id) == 3){
mesa_id[k] = paste0("MPS","0",part_mesa_id)
}
if (nchar(part_mesa_id) == 2){
mesa_id[k] = paste0("MPS","00",part_mesa_id)
}
if (nchar(part_mesa_id) == 1){
mesa_id[k] = paste0("MPS","000",part_mesa_id)
}
person[k] = strsplit(HistB_ID$`MESA ID`[k],"_")[[1]][2]
}
HistB_ID$mesa_id_meta_data <- mesa_id
HistB_ID$person_meta_data <- person
# change the name of some of the columns
colnames(HistB_ID)[colnames(HistB_ID) == 'MESA ID'] <- 'lab_mesa_id'
colnames(HistB_ID)[colnames(HistB_ID) == 'MiSeq ID'] <- 'lab_miseq_id'
colnames(HistB_ID)[colnames(HistB_ID) == 'Sample'] <- 'lab_miseq_sample'
colnames(HistB_ID)[colnames(HistB_ID) == 'reads.in'] <- 'raw_reads'
colnames(HistB_ID)[colnames(HistB_ID) == 'reads.out'] <- 'filtered_reads'
colnames(HistB_ID)[colnames(HistB_ID) == 'merged'] <- 'merged_reads'
colnames(HistB_ID)[colnames(HistB_ID) == 'tabled'] <- 'total_tabled_haplotype_reads'
colnames(HistB_ID)[colnames(HistB_ID) == 'nonchim'] <- 'no_chimeras_haplotype_reads'
# make the control columns be indicated
HistB_ID$mesa_id_meta_data[512] = "Control"
HistB_ID$person_meta_data[512] = "Control"
HistB_ID$mesa_id_meta_data[513] = "Control"
HistB_ID$person_meta_data[513] = "Control"
HistB_ID$mesa_id_meta_data[514] = "Control"
HistB_ID$person_meta_data[514] = "Control"
# reorder the columns in the data set
HistB_ID = HistB_ID %>% select(lab_miseq_sample,lab_miseq_id,lab_mesa_id,mesa_id_meta_data,person_meta_data,raw_reads,filtered_reads,merged_reads,total_tabled_haplotype_reads,no_chimeras_haplotype_reads)
# export the data set
write_csv(HistB_ID,"/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/HistB_haplotypes/23AUG2018 HistB MESA Update/MESA_HistB_trackReadsThroughPipeline_Inventory.csv")
## ------ ##
# load in the data set (the haplotypes after chimeras have been removed and haplotypes censored - MESA_HistB_haplotypes_final.rds)
HistB_data = readRDS("/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/HistB_haplotypes/MESA_HistB_haplotypes_final.rds")
# rename the columns to be a unique haplotype column number
newcolnames = c(1:ncol(HistB_data))
pastedcolnames = rep(NA,length(newcolnames))
for (i in 1:length(newcolnames)){
pastedcolnames[i] = paste0("H",newcolnames[i])
}
colnames(HistB_data) <- pastedcolnames
# remove the rows with the controls
control_list <- c("S512", "S513", "S514")
"%ni%" <- Negate("%in%")
HistB_data = subset(HistB_data, rownames(HistB_data) %ni% control_list)
# export the data set as something easier for others to analyze
saveRDS(HistB_data,"/Users/kelseysumner/Desktop/Meshnick Lab/Steve Taylor's Lab/Webuye MESA Sequence Data/Mapped Cut Reads/HistB_haplotypes/MESA_HistB_haplotypes_final_clean_column_names.rds")
|
b88cd289393b0c484e05f0a410f46989080037ee
|
451d76b27e3da30c7e5a6295e0cc289735972eb2
|
/02_05082020/starter_project.R
|
686dfcda0f2045184733d3ba76b7281b985bc681
|
[] |
no_license
|
seluccaajay/R_programming
|
f9f16f5822d14b3d07b121c94fbe41920393b251
|
2410c797bb9585e177e23b0b0ad595b758f1de27
|
refs/heads/master
| 2022-11-28T05:26:06.157863
| 2020-08-10T17:52:42
| 2020-08-10T17:52:42
| 284,708,900
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 280
|
r
|
starter_project.R
|
## Starter Project on Loading our own dataset
library(readxl)
data <- read_excel("C:\\Users\\Ajay Sen Riti\\Desktop\\Files and Shortcuts\\DataSet\\tamil_nadu_covid19.xls")
head(data)
tail(data)
str(data)
names(data)
data$day_tested
head(data$day_confirmed)
|
a897241fb21a672b0885bae841375cefeffa172d
|
2d945f0e9167a3c9ba0ebe9071c4db38f2a6141c
|
/tests/testthat/test-function-to_subdir_matrix.R
|
79ae8bf0026e7ca8a17e766e2e2649d7d18899a9
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.file
|
af12a91038d3f2aa81774d1856714adda0c56b8d
|
87a026edc457445dd3a0e5f03d6072bd845e86c3
|
refs/heads/master
| 2022-06-17T14:00:33.014904
| 2021-12-16T12:26:10
| 2021-12-16T12:26:10
| 160,693,673
| 0
| 0
|
MIT
| 2021-12-16T12:26:11
| 2018-12-06T15:24:43
|
R
|
UTF-8
|
R
| false
| false
| 322
|
r
|
test-function-to_subdir_matrix.R
|
#library(testthat)
test_that("to_subdir_matrix() works", {
f <- kwb.file:::to_subdir_matrix
expect_error(f())
paths <- c("a", "a/b", "a/b/c")
expect_identical(dim(f(paths)), c(3L, 3L))
expect_identical(f(paths, method = 1), f(paths, method = 2))
expect_length(f(paths, result_type = "list"), 3L)
})
|
aa152e7998b3a639b84d7ba499e5c5ed80f5b0a0
|
fefbb6395288d873f8d733d2a8b4575aaf411c15
|
/cachematrix.R
|
2fe00e04db7ee336eaff0ee3a95588e7faa02ab8
|
[] |
no_license
|
soshim/ProgrammingAssignment2
|
05f9c10807a02446b83de771229c2a162ab9ae07
|
c45cd8beecb82f699d7546aea674ce82ca2740bb
|
refs/heads/master
| 2020-12-25T10:50:02.715518
| 2015-03-17T17:21:47
| 2015-03-17T17:21:47
| 32,341,572
| 0
| 0
| null | 2015-03-16T17:30:55
| 2015-03-16T17:30:54
| null |
UTF-8
|
R
| false
| false
| 2,008
|
r
|
cachematrix.R
|
###########################################################################
# In this file, two R functions, makeCacheMatrix and cacheSolve are defined.
# The former creates matrix data structure and the latter calculates
# its inverse matrix.
# As the inverse calculation is time-consuming, once the inverse is calculated,
# its value is cached until the contents of the matrix is changed.
###########################################################################
#--------------------------------------------------------------------------
# makeCacheMatrix function creates a special "matrix" which is a list
# containing a function to
# 1. set the value of the matrix (set)
# 2. get the value of the matrix (get)
# 3. set the value of the inverse (setinverse)
# 4. get the value of the inverse (getinverse)
#--------------------------------------------------------------------------
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inv) i <<- inv
getinverse <- function(inv) i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#--------------------------------------------------------------------------
# cacheSolve function calculates the inverse of the special "matrix"
# which is created with the makeCacheMatrix function.
# First, it checks if the inverse has been calculated. If so, it gets
# the inverse via getinverse function from the cache and just returns it.
# Otherwise, it calculates the inverse of the data and sets it in the cache
# via the setinverse function.
#--------------------------------------------------------------------------
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
7b88fe43563db82dd6443895eba6fd02fc77f0f7
|
451583f87000130c407048e48e9d8367fa16bb5b
|
/sourceFiles/simulationCodes.R
|
e70bc061d51c99107352fd3fa7b087bff00b2dd7
|
[] |
no_license
|
yangxhcaf/meanVarianceCausality
|
d802dfc83fe06e9300e4264552790728f30f9b3c
|
c603d9d061d0d4598ff1250ab5114e2fa074676e
|
refs/heads/master
| 2022-04-21T16:22:18.022999
| 2020-03-06T00:40:31
| 2020-03-06T00:40:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,033
|
r
|
simulationCodes.R
|
####################
## This is the main call function that performs the simulation for the paper
##
## simEmpiricalSize() performs the simulations for the ARMA(1,1)+GARCH(1,1) simulations
## in the paper. If we specify alternative != 0, it is technically computing power
##
##
## The second function is the call function for the VAR(p)+(marginal) GARCH(1,1) simulations.
##
##
## In each function we specify the sample size n
## the maximum lag M
## The number of replications, run.times
## The number of bootstrap samples, boot.runs
## alternative, which specifies if an alternative hypothesis is true or not (0)
## one.way, whether a one-sided test should be performed for causality.
##
## Lastly, we have the function sampleSizeDetermination() which calculares the emprical size
## as a function of the sample size n
##
simEmpiricalSize <- function(cluster=NULL, n=250, M=c(5), run.times=1000, boot.runs=1000, alternative=0, one.way="no") {
cat("************************************\n", file=error.file)
cat(" Error Log for parameters\n\n", file=error.file, append=TRUE)
cat(" n: ", n, "\n", file=error.file, append=TRUE)
cat(" M: ", M, "\n", file=error.file, append=TRUE)
cat(" run.times: ", run.times, "\n", file=error.file, append=TRUE)
cat(" boot.runs: ", boot.runs, "\n", file=error.file, append=TRUE)
cat("alternative: ", alternative, "\n", file=error.file, append=TRUE)
cat("************************************\n\n", file=error.file, append=TRUE)
N <- rep(n, run.times)
out <- sapply(N, do.one.simulation, cluster=cluster, M=M, alternative=alternative, boot.run=boot.runs, one.way=one.way)
compare <- function(x, sig=0.05) { mean(x<sig) }
tab <- matrix(nrow=20, ncol=4*length(M), 0)
if(length(M)==1) {
tab[,1] <- apply(out, 1, compare, sig=0.10)
tab[,2] <- apply(out, 1, compare, sig=0.05)
tab[,3] <- apply(out, 1, compare, sig=0.01)
tab[,4] <- apply(out, 1, compare, sig=0.001)
colnames(tab) <- c("10% Sig", " 5% Sig", " 1% Sig", "0.1% Sig")
} else {
tmp <- apply(out, 1, compare, sig=0.10)
tab[,1] <- tmp[1:20]
tab[,5] <- tmp[21:40]
tmp <- apply(out, 1, compare, sig=0.05)
tab[,2] <- tmp[1:20]
tab[,6] <- tmp[21:40]
tmp <- apply(out, 1, compare, sig=0.01)
tab[,3] <- tmp[1:20]
tab[,7] <- tmp[21:40]
tmp <- apply(out, 1, compare, sig=0.001)
tab[,4] <- tmp[1:20]
tab[,8] <- tmp[21:40]
colnames(tab) <- c("10% Sig", " 5% Sig", " 1% Sig", "0.1% Sig", "10% Sig", " 5% Sig", " 1% Sig", "0.1% Sig")
}
rownames(tab) <- c("Boot BP Mean", "Boot BP Var",
"Boot LB Mean", "Boot LB Var",
"Boot WLB Mean", "Boot WLB Var",
"Boot Dan Mean", "Boot Dan Var",
"Boot Mat Mean", "Boot Mat Var",
"Theo BP Mean", "Theo BP Var",
"Theo LB Mean", "Theo LB Var",
"Theo WLB Mean", "Theo WLB Var",
"Theo Dan Mean", "Theo Dan Var",
"Theo Mat Mean", "Theo Mat Var")
tab
}
simEmpiricalVARSize <- function(cluster=NULL, n=250, M=c(5), run.times=1000, boot.runs=1000, alternative=0, one.way="no") {
cat("************************************\n", file=error.file)
cat(" Error Log for parameters\n\n", file=error.file, append=TRUE)
cat(" n: ", n, "\n", file=error.file, append=TRUE)
cat(" M: ", M, "\n", file=error.file, append=TRUE)
cat(" run.times: ", run.times, "\n", file=error.file, append=TRUE)
cat(" boot.runs: ", boot.runs, "\n", file=error.file, append=TRUE)
cat("alternative: ", alternative, "\n", file=error.file, append=TRUE)
cat("************************************\n\n", file=error.file, append=TRUE)
N <- rep(n, run.times)
out <- sapply(N, do.one.var.simulation, cluster=cluster, M=M, alternative=alternative, boot.run=boot.runs, one.way=one.way)
compare <- function(x, sig=0.05) { mean(x<sig) }
tab <- matrix(nrow=20, ncol=4*length(M), 0)
if(length(M)==1) {
tab[,1] <- apply(out, 1, compare, sig=0.10)
tab[,2] <- apply(out, 1, compare, sig=0.05)
tab[,3] <- apply(out, 1, compare, sig=0.01)
tab[,4] <- apply(out, 1, compare, sig=0.001)
colnames(tab) <- c("10% Sig", " 5% Sig", " 1% Sig", "0.1% Sig")
} else {
tmp <- apply(out, 1, compare, sig=0.10)
tab[,1] <- tmp[1:20]
tab[,5] <- tmp[21:40]
tmp <- apply(out, 1, compare, sig=0.05)
tab[,2] <- tmp[1:20]
tab[,6] <- tmp[21:40]
tmp <- apply(out, 1, compare, sig=0.01)
tab[,3] <- tmp[1:20]
tab[,7] <- tmp[21:40]
tmp <- apply(out, 1, compare, sig=0.001)
tab[,4] <- tmp[1:20]
tab[,8] <- tmp[21:40]
colnames(tab) <- c("10% Sig", " 5% Sig", " 1% Sig", "0.1% Sig", "10% Sig", " 5% Sig", " 1% Sig", "0.1% Sig")
}
rownames(tab) <- c("Boot BP Mean", "Boot BP Var",
"Boot LB Mean", "Boot LB Var",
"Boot WLB Mean", "Boot WLB Var",
"Boot Dan Mean", "Boot Dan Var",
"Boot Mat Mean", "Boot Mat Var",
"Theo BP Mean", "Theo BP Var",
"Theo LB Mean", "Theo LB Var",
"Theo WLB Mean", "Theo WLB Var",
"Theo Dan Mean", "Theo Dan Var",
"Theo Mat Mean", "Theo Mat Var")
tab
}
simEmpiricalTrivialSize <- function(cluster=NULL, n=250, M=c(5), run.times=1000, boot.runs=1000, one.way="no") {
cat("************************************\n", file=error.file)
cat(" Error Log for parameters\n\n", file=error.file, append=TRUE)
cat(" n: ", n, "\n", file=error.file, append=TRUE)
cat(" M: ", M, "\n", file=error.file, append=TRUE)
cat(" run.times: ", run.times, "\n", file=error.file, append=TRUE)
cat(" boot.runs: ", boot.runs, "\n", file=error.file, append=TRUE)
cat("************************************\n\n", file=error.file, append=TRUE)
N <- rep(n, run.times)
out <- sapply(N, do.one.trivial.simulation, cluster=cluster, M=M, boot.run=boot.runs, one.way=one.way)
compare <- function(x, sig=0.05) { mean(x<sig) }
tab <- matrix(nrow=20, ncol=4*length(M), 0)
if(length(M)==1) {
tab[,1] <- apply(out, 1, compare, sig=0.10)
tab[,2] <- apply(out, 1, compare, sig=0.05)
tab[,3] <- apply(out, 1, compare, sig=0.01)
tab[,4] <- apply(out, 1, compare, sig=0.001)
colnames(tab) <- c("10% Sig", " 5% Sig", " 1% Sig", "0.1% Sig")
} else {
tmp <- apply(out, 1, compare, sig=0.10)
tab[,1] <- tmp[1:20]
tab[,5] <- tmp[21:40]
tmp <- apply(out, 1, compare, sig=0.05)
tab[,2] <- tmp[1:20]
tab[,6] <- tmp[21:40]
tmp <- apply(out, 1, compare, sig=0.01)
tab[,3] <- tmp[1:20]
tab[,7] <- tmp[21:40]
tmp <- apply(out, 1, compare, sig=0.001)
tab[,4] <- tmp[1:20]
tab[,8] <- tmp[21:40]
colnames(tab) <- c("10% Sig", " 5% Sig", " 1% Sig", "0.1% Sig", "10% Sig", " 5% Sig", " 1% Sig", "0.1% Sig")
}
rownames(tab) <- c("Boot BP Mean", "Boot BP Var",
"Boot LB Mean", "Boot LB Var",
"Boot WLB Mean", "Boot WLB Var",
"Boot Dan Mean", "Boot Dan Var",
"Boot Mat Mean", "Boot Mat Var",
"Theo BP Mean", "Theo BP Var",
"Theo LB Mean", "Theo LB Var",
"Theo WLB Mean", "Theo WLB Var",
"Theo Dan Mean", "Theo Dan Var",
"Theo Mat Mean", "Theo Mat Var")
tab
}
sampleSizeDetermination <- function(cluster=NULL, n=seq(100,500,100), M=NULL, run.times=1000, one.way="no") {
compare <- function(x, sig=0.05) { mean(x<sig) }
tab1 <- matrix(nrow=10, ncol=length(n), 0)
tab2 <- matrix(nrow=10, ncol=length(n), 0)
tab3 <- matrix(nrow=10, ncol=length(n), 0)
tab4 <- matrix(nrow=10, ncol=length(n), 0)
n.names <- rep(0, length(n) )
for(i in 1:length(n) ) {
if(is.null(M) )
M <- round(log(n[i]))
N <- rep(n[i], run.times)
if(is.null(cluster)) {
out <- sapply(N, do.one.trivial.noboot.simulation, M=M, one.way=one.way)
} else {
out <- parSapply(cl=cluster, X=N, FUN=do.one.trivial.noboot.simulation,
M=M, one.way=one.way )
}
tab1[,i] <- apply(out, 1, compare, sig=0.10)
tab2[,i] <- apply(out, 1, compare, sig=0.05)
tab3[,i] <- apply(out, 1, compare, sig=0.01)
tab4[,i] <- apply(out, 1, compare, sig=0.001)
n.names[i] <- paste("n=",n[i], sep="")
}
rownames(tab4) <- c("Theo BP Mean", "Theo BP Var",
"Theo LB Mean", "Theo LB Var",
"Theo WLB Mean", "Theo WLB Var",
"Theo Dan Mean", "Theo Dan Var",
"Theo Mat Mean", "Theo Mat Var")
rownames(tab1) <- rownames(tab2) <- rownames(tab3) <- rownames(tab4)
colnames(tab4) <- n.names
colnames(tab1) <- colnames(tab2) <- colnames(tab3) <- colnames(tab4)
list(tab1, tab2, tab3, tab4)
}
|
dbc6117c0d7a1abc87a9d5d57bee51c0cedaa143
|
69b49ce61413bc8190227621b0aa8dfaf951a048
|
/src/Concerto/TestBundle/Resources/R/concerto5/man/concerto-package.Rd
|
0628d7d48ee37cf2d004e2027b4adcd6e3e39fb4
|
[
"Apache-2.0"
] |
permissive
|
campsych/concerto-platform
|
de926ae820f2a3cf6985598f3824dee8f4615232
|
988b67e8d52acbf25fdc9078e7592cc07d2dd9a3
|
refs/heads/master
| 2023-08-31T08:09:05.570628
| 2023-08-23T16:43:03
| 2023-08-23T16:43:03
| 55,242,761
| 164
| 109
|
Apache-2.0
| 2023-07-26T15:10:48
| 2016-04-01T15:34:25
|
PHP
|
UTF-8
|
R
| false
| false
| 518
|
rd
|
concerto-package.Rd
|
\name{concerto5-package}
\alias{concerto5-package}
\docType{package}
\title{
Package for Concerto platform test logic R code.
}
\description{
Contains all required functions by Concerto platform.
}
\details{
\tabular{ll}{
Package: \tab concerto5\cr
Type: \tab Package\cr
Version: \tab 0.32.0\cr
Date: \tab 2021-01-28\cr
License: \tab Apache-2.0\cr
}
}
\author{
Przemyslaw Lis
Maintainer: Przemyslaw Lis <pl362@cam.ac.uk>
}
\references{
}
\keyword{package}
\seealso{
}
\examples{
}
|
0dc422e069dacedc84005cd67c77c5cdd5912f14
|
6911c5385656f2a310ba5f44c8867f45ad02119d
|
/Mid-5.R
|
51c1047568690b4d53cf50b88f666ed5a9efecbe
|
[] |
no_license
|
SuperLouV/CS513
|
ee4dee226a074e635b301dd545c29f3d138015e5
|
31d048fc254961ddf8f9c99d59ecafb9ceb494b8
|
refs/heads/master
| 2022-05-20T23:52:05.757226
| 2020-04-30T22:39:03
| 2020-04-30T22:39:03
| 260,332,614
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,237
|
r
|
Mid-5.R
|
# Course : Data Mining
# First Name : Yilin
# Last Name : Lou
# Id : 10445676
# Project : Midterm O5
## remove all objects
rm(list=ls())
#load a file
data<-read.csv("COVID19_v3.csv",header = TRUE,na.strings = "?",colClasses = c("MaritalStatus"="factor"))
# Remove the missing values
data<-na.omit(data)
#Discretize the “MonthAtHospital” into “less than 6 months” and “6 or more months”.
data$MonthAtHospital[data$MonthAtHospital>=6]<-c("6 or more months")
data$MonthAtHospital[data$MonthAtHospital<6]<-c("less than 6 months")
# Also discretize the age into “less than 35”, “35 to 50” and “51 or over”.
data$Age[data$Age<35]<-c("less than 35")
data$Age[data$Age>=35&data$Age<=50]<-c("35 to 50")
data$Age[data$Age>51]<-c("51 or over")
##Use 30% test 70% training
idx<-sort(sample(nrow(data),as.integer(.70*nrow(data))))
train<-data[idx,]
test<-data[-idx,]
#naive-bayes
nb=naiveBayes(Infected~., data=train)
category_nb<-predict(nb,test )
#table
table(NBayes=category_nb,Infected=test$Infected)
NB_right<-sum(category_nb==test$Infected)
#coubt error rate
NB_right_rate<-NB_right/length(category_nb)
print(paste0('Accuracy Percentage: ', NB_right_rate*100))
|
8d1a44bad97a8b0a5eefba782b29177d3274c0b8
|
77c74ad76727bf22c4d2d678706d8e6cc3164d2f
|
/rankhospital.R
|
2629eebd6c89bb579335127aa89489072f986add
|
[] |
no_license
|
aryan-shrivastava09/datasciencecoursera
|
f940a7f8047a823eeacbcef3ce41fac4cda2344b
|
3c660abcc62d2f0b6bb960ed5804b40665d68af2
|
refs/heads/master
| 2022-10-24T01:04:57.736892
| 2020-06-17T14:17:10
| 2020-06-17T14:17:10
| 261,231,116
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 637
|
r
|
rankhospital.R
|
rankhospital <- function(state, outcome, rank) {
data<-read.csv("outcome-of-care-measures.csv")
datastate <- subset(data,subset = State == state)
if(outcome== "heart attack"){
outcome = "Heart.Attack"
} else if(outcome == "heart failure") {
outcome = "Heart.Failure"
} else if(outcome == "pneumonia") {
outcome = "Pneumonia"
}
outcomecolumn <- paste("Hospital.30.Day.Death..Mortality..Rates.from.",outcome, sep="")
datastate2<- arrange(datastate, datastate$Hospital.Name)
sorted<- arrange(datastate2,as.numeric(datastate2[[outcomecolumn]]))
newdata<- data.frame(sorted$Hospital.Name)
newdata[rank, ]
}
|
290ab280b738b879b16af10a87807343d3312586
|
25ae4c2d7fbbeb4e14372d380cf01065139bf90a
|
/analyses_figures_of_space_time_dataset__part1.R
|
7280dfb445e7bb06d4dc05a83377c37727f2e43d
|
[] |
no_license
|
bparment1/MEOT_analyses
|
46dfc46ccf6c4a9c3a88e40915963ff259fdec2f
|
c19cc04478f6e35f0b7a8b1237d76fdc496f530a
|
refs/heads/master
| 2020-12-25T21:01:22.175138
| 2016-12-31T15:45:00
| 2017-08-25T17:45:42
| 15,113,303
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,028
|
r
|
analyses_figures_of_space_time_dataset__part1.R
|
######################################## Generation of space-time dataset #######################################
########################################### For Testing MSSA-MEOT and S-T mode PCA #####################################
#This script performs analyses and produces figures for synthetic space time datasets in R to test EOT,MEOT,PCA and MSSA.
#Space-time series were generated by combining a set of spatial patterns and temporal patterns.
#This script uses 11 functions stored in a separate file: generation_of_space_time_dataset_functions_12312013.R.
#AUTHORS: Benoit Parmentier
#DATE CREATED: 12/31/2013
#DATE MODIFIED: 01/21/2014
#PROJECT: Clark Labs, MEOT, time series
#TO DO:
###################################################################################################
###Loading R library and packages
library(gtools) # loading various useful tools such as mixedsort
library(sp) #Spatial object models and functionality
library(raster) #Raster functionality
library(rasterVis) #Raster visualization
library(rgdal) #GDAL binding
library(vegan)
library(parallel) # parallelization based on snow
library(mgcv) # GAM package by Simon Wood
library(spdep) # Spatial pacakge with methods and spatial stat. by Bivand et al.
library(gstat) # Kriging and co-kriging by Pebesma et al.
library(fields) # NCAR Spatial Interpolation methods such as kriging, splines
library(foreign) # Library for format exchange (e.g. dbf,spss,sas etc.)
library(gdata) # various tools with xls reading
library(xts) # basic package for time series analysis
library(zoo) # basic package for time series analysis
library(forecast) # package containing ARIMA procedures
library(plotrix)
library(matrixStats)
library(colorRamps) #Color palettes for raster images
library(gridExtra)
library(foreign) # Library for format exchange (e.g. dbf,spss,sas etc.)
library(gdata) # various tools with xls reading
library(xts) # basic package for time series analysis
library(zoo) # basic package for time series analysis
library(forecast) # package containing ARIMA procedures
##### Functions used in this script #####
functions_generation_space_time_datasets <- "generation_of_space_time_dataset_functions_12312013v6.R"
script_path<-"/Users/benoitparmentier/Dropbox/Data/MEOT_paper/" #path to script
source(file.path(script_path,functions_generation_space_time_datasets)) #source all functions used in this script.
## create function to assemble the meot indices in a table...
plot_temporal_components_IDRISI_ETM_proj <-function(folder_components,pattern_str,out_dir,out_suffix){
#Plot output components from ETM in IDRISI
list_temporal_component_files <- mixedsort(list.files(pattern="*.avl$",folder_components,full.names=T))
list_temp_profiles <- lapply(1:length(pattern_str),function(k){grep(pattern=pattern_str[k],list_temporal_component_files,value=TRUE)})
#tmp_str <- strsplit(comp_files,"_")
#comp_str <-unique(unlist(lapply(tmp_str,function(k){grep(pattern="Comp*",k,value=TRUE)})))
#no_comp <- length(comp_str) #number of components in the analysis
combine_time_profiles_comp<-function(list_time_profiles){
list_df <- lapply(list_time_profiles,read.table)
list_df <- lapply(list_df,FUN=function(x){x[,2]})
df <-do.call(cbind,list_df)
df <- as.data.frame(df)
names_str <- paste("comp",1:ncol(df),sep="_")
names(df)<- names_str
return(df)
}
l_df<-lapply(list_temp_profiles,FUN=combine_time_profiles_comp)
names(l_df) <- pattern_str
## Now plot...
#list_plots_obj <- vector("list",length=no_comp)
return(l_df)
}
plot_components_IDRISI_ETM_proj <-function(components_folders,comp_files,out_dir,out_suffix,r_mask=NULL,
res_pix=480,col_mfrow=1,row_mfrow=1){
#Plot output components from ETM in IDRISI
tmp_str <- strsplit(comp_files,"_")
comp_str <-unique(unlist(lapply(tmp_str,function(k){grep(pattern="Comp*",k,value=TRUE)})))
comp_file_str <- paste(comp_str,"_R.rst",sep="") #components end string
no_comp <- length(comp_str) #number of components in the analysis
list_MEOT_Lag_analyses_comp <- lapply(1:length(comp_file_str),function(k){grep(pattern=comp_file_str[k],comp_files,value=TRUE)})
list_plots_obj <- vector("list",length=no_comp)
list_raster_name <- vector("list",length=1)
for(i in 1:length(comp_str)){
comp_str_processed <- comp_str[i]
list_r_comp_s <- file.path(folder_components,list_MEOT_Lag_analyses_comp[[i]])
r_comp_s <- stack(mixedsort(list_r_comp_s))
if (!is.null(r_mask)){
r_comp_s <- mask(r_comp_s,r_mask)
}
layerNames(r_comp_s)<-paste(comp_str_processed,1:nlayers(r_comp_s),sep="_")
temp.colors <- matlab.like(100)
#levelplot(r_stack,layers=1:48, col.regions=temp.colors)
p <- levelplot(r_comp_s, col.regions=temp.colors,main=paste(comp_str_processed,"_",out_suffix,sep=""))
#How to match to 24 by 12 (width and height)
#res_pix<-480 #set as function argument...
#col_mfrow<-1
#row_mfrow<-2
#row_mfrow<-1
png_file_name<- paste("Figure_",comp_str_processed,"_",paste(out_suffix,sep=""),".png", sep="")
png(filename=file.path(out_dir,png_file_name),
width=col_mfrow*res_pix,height=row_mfrow*res_pix)
par(mfrow=c(row_mfrow,col_mfrow))
print(p) #plot now
dev.off()
list_plots_obj[[i]] <- p
}
return(list_plots_obj)
}
plot_components_by2_IDRISI_ETM_proj <-function(components_folders,comp_files,out_dir,out_suffix){
#Plot output components from ETM in IDRISI
tmp_str <- strsplit(comp_files,"_")
comp_str <-unique(unlist(lapply(tmp_str,function(k){grep(pattern="Comp*",k,value=TRUE)})))
no_comp <- length(comp_str) #number of components in the analysis
list_MEOT_Lag_analyses_comp <- lapply(1:length(comp_str),function(k){grep(pattern=comp_str[k],comp_files,value=TRUE)})
list_raster_name <- vector("list",length=1)
for(i in 1:length(comp_str)){
comp_str_processed <-comp_str[i]
list_r_comp_s <- file.path(folder_components,list_MEOT_Lag_analyses_comp[[i]])
r_comp_s <- stack(mixedsort(list_r_comp_s))
layerNames(r_comp_s)<-paste(comp_str_processed,1:nlayers(r_comp_s),sep="_")
temp.colors <- colorRampPalette(c('blue', 'white', 'red'))
temp.colors <- matlab.like(18)
#levelplot(r_stack,layers=1:48, col.regions=temp.colors)
p <- levelplot(r_comp_s, col.regions=temp.colors,main=paste(comp_str_processed,"_",out_suffix,sep=""))
res_pix<-480
col_mfrow<-1
#row_mfrow<-2
row_mfrow<-1
png_file_name<- paste("Figure_",comp_str_processed,"_",paste(out_suffix,sep=""),".png", sep="")
png(filename=file.path(out_dir,png_file_name),
width=col_mfrow*res_pix,height=row_mfrow*res_pix)
par(mfrow=c(row_mfrow,col_mfrow))
print(p)
dev.off()
}
}
crosscor_lag_analysis_fun<-function(telind,mode_list,d_z,lag_window,fig,out_prefix){
#This function crosss correlates between two sets of time series given some lag window.
#Arguments:
#1)telind: time series 1 as character vector
#2)modelist: time series 2 as character vector
#3)d_z: zoo object
#4)lag_window:
#5)fig:
lag_table_ext<-matrix(data=NA,nrow=length(telind),ncol=length(mode_list))
lag_table_lag<-matrix(data=NA,nrow=length(telind),ncol=length(mode_list))
lag_table_text<-matrix(data=NA,nrow=length(telind),ncol=length(mode_list)) #Formatted table used in the paper
#lag_cross_cor_PCA<-vector("list",length(mode_list))
lag_m<-seq(-1*lag_window,lag_window,1)
#retain ccf!!!
#list_ccf_lag_table
#lag_cross_cor_PCA_m<-array(data=NA,nrow=length(lag_m),ncol=length(mode_list))
for (i in 1:length(telind)){
telindex<-telind[i]
pos1<-match(telindex,names(d_z))
#retain ccf!!!
for (j in 1:length(mode_list)){
mode_n<-mode_list[j]
pos2<-match(mode_n,names(d_z))
ccf_obj<-ccf(d_z[,pos1],d_z[,pos2], lag=lag_window) #Note that ccf does not take
lag_m<-seq(-1*lag_window,lag_window,1)
ccf_obj$lag[,1,1]<-lag_m #replacing lag values because continuous
if (fig=="TRUE"){
plot_name<-paste(telindex, "and", mode_n,"lag analysis",sep="_")
png(paste(plot_name,"_",out_prefix,".png", sep=""))
plot(ccf_obj, main= paste(telindex, "and", mode_n,"lag analysis",sep=" "), ylab="Cross-correlation",
xlab="Lag (month)", ylim=c(-1,1))
dev.off()
}
######### NOW FIND THE m
absext <-max(abs(ccf_obj$acf)) # maximum of the extremum
pos<-match(absext,ccf_obj$acf) #find the position and lag, if NA it means it was negative
if (is.na(pos)) {
pos<-match(absext*-1,ccf_obj$acf)
absext<-absext*-1 #recover the sign
}
absext_lag<-ccf_obj$lag[pos,1,1] #This is the lag corresponding to the maximum absolute value
lag_table_ext[i,j]<-absext
lag_table_lag[i,j]<-absext_lag
#number<-format(absext,digits=3)
ext<-round(absext,digits=3)
element<-paste(ext," (",absext_lag,")",sep="")
lag_table_text[i,j]<-element
##Keep ccf lag somewhere
}
}
lag_table_ext<-as.data.frame(lag_table_ext)
names(lag_table_ext)<-mode_list
rownames(lag_table_ext)<-telind
file_name<-paste("lag_table_extremum_window_", lag_window,"_",out_prefix,".txt",sep="")
write.table(lag_table_ext,file=file_name,sep=",")
lag_table_lag<-as.data.frame(lag_table_lag)
names(lag_table_lag)<-mode_list
rownames(lag_table_lag)<-telind
file_name<-paste("lag_table_lag_extremum_window_", lag_window,"_",out_prefix,".txt",sep="")
write.table(lag_table_lag,file=file_name,sep=",")
lag_table_text<-as.data.frame(lag_table_text)
names(lag_table_text)<-mode_list
rownames(lag_table_text)<-telind
file_name<-paste("lag_table_lag_ext_text", lag_window,"_",out_prefix,".txt",sep="")
write.table(lag_table_text,file=file_name,sep=",")
#create return object
crosscor_obj<-list(lag_table_ext,lag_table_lag,lag_table_text)
names(crosscor_obj)<-c("extremum","lag_ext","text")
file_name<-paste("crosscor_obj_lag_analysis_", lag_window,"_",out_prefix,".RData",sep="")
save(crosscor_obj,file=file_name)
return(crosscor_obj)
}
############# Parameters and arguments ####################
out_suffix <-"MEOT_s11_L24_01162014"
#in_dir<- "/Users/benoitparmentier/Documents/DATA/Benoit/Clark_University/Paper_writings/MSSA_BNP/work_dir6_01172014"
#r_mask_file <- "/Users/benoitparmentier/Documents/DATA/Benoit/Clark_University/Paper_writings/MSSA_BNP/SST_1982_2007_Data/mask_rgf_1_1.rst"
in_dir <- "/Users/benoitparmentier/Documents/DATA/Benoit/Clark_University/Paper_writings/MSSA_BNP/work_dir5_01102013"
out_dir<- "/Users/benoitparmentier/Documents/DATA/Benoit/Clark_University/Paper_writings/MSSA_BNP/"
out_dir_name <- paste("analyses_and_figures","_",out_suffix,sep="")
out_dir <- file.path(out_dir,out_dir_name)
if (!file.exists(out_dir)){
dir.create(out_dir)
}
setwd(out_dir)
############## PART I : MEOT WITH SINUSOIDAL PATTERN ###############
sine_inDir <- "/Users/benoitparmentier/Documents/Data/Benoit/Clark_University/Paper_writings/MSSA_BNP/work_dir5_01102013/sine9x9_90"
r_test_sine <- stack(list.files(pattern="sine9x9_90.*.rst$",path=sine_inDir,full.names=T))
#r_meot_list <- list.files(pattern="Cover9.*.rst$",path=sine_inDir,full.names=T)
#nt <- 45
nt <- 90
r_agg <- aggregate(r_test_sine,fact=3,fun=mean) #
coords_xy <- coordinates(r_agg)
#Prepare time series profiles
pix_dat <- t(extract(r_test_sine,coords_xy))
pix_dat <- as.data.frame(pix_dat)
pix_dat <- pix_dat[,c(1,2,3,9,8,7,4,5,6)] #reorder pixels according to S shape
########### Plotting figures ###########
### Figure 1: Original signal...###
#1. images
layout_m <- c(1,1)
png(paste("Figure1a_","original_sine","_paper_revisions_supplement_",out_suffix,".png", sep=""),
height=480*layout_m[1],width=480*layout_m[2]*1)
#height=3*480*layout_m[1],width=2*480*layout_m[2])
#height=480*6,width=480*4)
#par(mfrow=layout_m)
names_panel_plot <- paste("T",1:9,sep="_")
r_pattern <- subset(r_test_sine,1:9)
layerNames(r_pattern) <- names_panel_plot
levelplot(r_pattern,col.regions=matlab.like(18))
dev.off()
#2. temporal profiles
#r_dat <- as(r_test_sine,"SpatialPointsDataFrame")
layout_m <- c(1,1)
png(paste("Figure1b_","original_image_time_series_1","_paper_revisions_supplement_",out_suffix,".png", sep=""),
height=480*layout_m[1],width=480*layout_m[2]*1)
plot(subset(r_test_sine,1),col=matlab.like(18),
legend.width=2, legend.shrink=0.75)
text(coords_xy[,1],coords_xy[,2],labels=c(1,2,3,9,8,7,4,5,6))
#text(coords_xy[,1],coords_xy[,2],labels=1:9)
dev.off()
png(paste("Figure1c_","original_sine_time_series","_paper_revisions_supplement_",out_suffix,".png", sep=""),
height=480*layout_m[1],width=480*layout_m[2]*1)
#names_pix_blocks <-paste("B",c(1,2,3,9,8,7,4,5,6),sep="")
names_pix_blocks <-paste("B",1:9,sep="")
plot(ts(data=pix_dat,names=names_pix_blocks),main="Sinusoidal patterns for the 9 blocks",
ylab="Amplitude",xlab="Time steps",pch=1,type="b")
dev.off()
########### Plotting figures 2 to figures 8: MEOT 9 sequences ###########
#Get all files relevant to components for supplementary material analyes
list_components_folders <- list.dirs(path=in_dir) #default uses recursive and full.names
list_components_folders <- mixedsort(grep("*.components$",list_components_folders,value=T))
list_components_folders <- (grep("test_sine9x9_90_",list_components_folders,value=T)) #get the sine9by9
#Modify later to auto detect?
pattern_str<- c("test_sine9x9_90_L2","test_sine9x9_90_L4","test_sine9x9_90_L5","test_sine9x9_90_L6",
"test_sine9x9_90_L7","test_sine9x9_90_L8","test_sine9x9_90_L9","test_sine9x9_90_L18")
folder_components <-list_components_folders
#Select groups of files containing MEOT analyses
list_component_files <- mixedsort(list.files(pattern="*.Comp.*.R.rst$",list_components_folders[1]))
list_MEOT_Lag_analyses <- lapply(1:length(pattern_str),function(k){grep(pattern=pattern_str[k],list_component_files,value=TRUE)})
## Plot
## Quick exploration of results
list_plots_meot_obj <- vector("list",length=length(list_MEOT_Lag_analyses))
#r44 <- raster("/Users/benoitparmentier/Documents/DATA/Benoit/Clark_University/Paper_writings/MSSA_BNP/SST_1982_2007_Data/mask_rgf_1_1.rst")
#Quick check...
out_suffix_s <- paste(pattern_str[1],out_suffix,sep="_") #Lag 2,Lag4 ... to Lag 18
#undebug(plot_components_IDRISI_ETM_proj)
#list_plots_meot_obj[[1]] <- plot_components_IDRISI_ETM_proj(folder_components,comp_files=list_MEOT_Lag_analyses[[1]] ,out_dir,out_suffix_s)
list_plots_meot_obj[[1]] <- plot_components_IDRISI_ETM_proj(folder_components,comp_files=list_MEOT_Lag_analyses[[1]] ,
out_dir,out_suffix_s,r_mask=NULL,
res_pix=480,col_mfrow=1,
row_mfrow=1)
#put this in a loop..
for (i in 1:length(list_plots_meot_obj)){
out_suffix_s <- paste(pattern_str[i],out_suffix,sep="_") #Lag 2,Lag4 ... to Lag 18
list_plots_meot_obj[[i]] <- plot_components_IDRISI_ETM_proj(folder_components,comp_files=list_MEOT_Lag_analyses[[i]] ,
out_dir,out_suffix_s,r_mask=NULL,
res_pix=480,col_mfrow=1,
row_mfrow=1)
}
title_plots <- c("Lag window 2 MEOT","Lag window 4 MEOT", "Lag window 5 MEOT", "Lag window 6 MEOT",
"Lag window 7 MEOT","Lag window 8 MEOT","Lag window 9 MEOT","Lag window 18 MEOT")
for (i in 1:length(list_plots_meot_obj)){
layout_m <- c(1,1)
no_fig <-i+1
png(paste("Figure",no_fig,"_paper_revisions_supplement_",out_suffix,".png", sep=""),
height=480*layout_m[1],width=480*layout_m[2]*2)
#height=3*480*layout_m[1],width=2*480*layout_m[2])
#height=480*6,width=480*4)
#par(mfrow=layout_m)
p_comp1 <- list_plots_meot_obj[[i]][[1]]
p_comp2 <- list_plots_meot_obj[[i]][[2]]
p_comp1$main <- paste(title_plots[[i]],1,sep="")
p_comp2$main <- paste(title_plots[[i]],2,sep="")
grid.arrange(p_comp1,p_comp2,ncol=2)
dev.off()
}
######### Plot temporal profiles...
#### PLOT R
l_df <- plot_temporal_components_IDRISI_ETM_proj(folder_components,pattern_str,out_dir,out_suffix)
#pattern_str<- c("sine9by9_L2","sine9by9_L4","sine9by9_L5","sine9by9_L8","sine9by9_L9","sine9by9_L18")
#pattern_str<- c("sine9x9_90_L2","sine9x9_90_L4","sine9x9_90_L5","sine9x9_90_L6",
# "sine9x9_90_L8","sine9x9_90_L9","sine9x9_90_L18")
folder_components <- list_components_folders
df<- l_df[[1]]
names(df)
### Plot temporal profiles...
for (i in 1:length(l_df)){
df <-l_df[[i]]
layout_m <- c(1,1)
no_fig <-i+1
png(paste("Figure",no_fig,"_temporal_MEOT_paper_revisions_supplement_",out_suffix,".png", sep=""),
height=480*layout_m[1],width=480*layout_m[2]*2)
#height=3*480*layout_m[1],width=2*480*layout_m[2])
#height=480*6,width=480*4)
#par(mfrow=layout_m)
plot(df[,1],type="b",lty="solid",col="blue",main=paste(title_plots[[i]],1,sep=""),lwd=1.2,cex.main=1.2,cex.axis=1.2,font=2,
xlab="Time steps",ylab="Amplitude",cex.lab=1.2, font.lab=2)
lines(df[,2],type="b",lty=4 ,col="darkgreen",lwd=1.2)
legend("topright",legend=c("MEOT1","MEOT2"), cex=0.8, col=c("blue","darkgreen"),
lty=c(1,4),lwd=3,bty="n") #lwd=line width
dev.off()
}
############## PART II : RED NOISE ANALYSIS ###############
### ORIGINAL RED NOISE DATASET
rednoise_inDir <- "/Users/benoitparmentier/Documents/Data/Benoit/Clark_University/Paper_writings/MSSA_BNP/work_dir4_11252013/r2_rednoise_AR92_trend_x_gen_11252013"
r_test_rednoise <- stack(list.files(pattern="r2_rednoise_AR92_trend_x.*.rst$",path=rednoise_inDir,full.names=T))
nt <- 45
#Test some palettes
levelplot(r_test_rednoise,layers=1:9,col.regions=matlab.like(18))
#r_agg <- aggregate(r_test_sine,fact=3,fun=mean) #
#coords_xy <- coordinates(r_agg)
#rednoise_pix_dat <- as(r_test_rednoise,"SpatialPointsDataFrame")
rednoise_pix_dat <- as.matrix(r_test_rednoise)#,"SpatialPointsDataFrame")
rednoise_pix_dat <- t(rednoise_pix_dat)
########### Plotting figures ###########
### Figure 1: Original signal...###
#1. images
layout_m <- c(1,1)
png(paste("Figure1b_","rednoise_image_time_series_1","_paper_revisions_supplement_",out_suffix,".png", sep=""),
height=480*layout_m[1],width=480*layout_m[2]*1)
names_panel_plot <- paste("T",1:9,sep="_")
r_pattern <- subset(r_test_rednoise,1:9)
layerNames(r_pattern) <- names_panel_plot
levelplot(r_pattern,col.regions=matlab.like(18))
dev.off()
#2. temporal profiles
#r_dat <- as(r_test_sine,"SpatialPointsDataFrame")
png(paste("Figure1c_","original_sine_time_series","_paper_revisions_supplement_",out_suffix,".png", sep=""),
height=480*layout_m[1],width=480*layout_m[2]*1)
#names_pix_blocks <-paste("Pixel",c(1,2,3,9,8,7,4,5,6),sep="")
names_pix_blocks <-paste("Pixel_",1:9,sep="")
names(rednoise_pix_dat) <- names_pix_blocks
plot(ts(data=rednoise_pix_dat,names=names_pix_blocks),main="Red noise patterns for the 9 pixels",
ylab="Amplitude",xlab="Time steps",pch=1,type="b")
dev.off()
### NOW USE MEOT ANALYSES RESULTS FOR MEOT
in_dir<- "/Users/benoitparmentier/Documents/Data/Benoit/Clark_University/Paper_writings/MSSA_BNP/work_dir4_11252013"
#Get all files relevant to components
list_components_folders <- list.dirs(path=in_dir) #default uses recursive and full.names
list_components_folders <- mixedsort(grep("*.components$",list_components_folders,value=T))
folder_components <- (grep("rednoise_AR92_trend_x_gen_11252013",list_components_folders,value=T)) #get the sine9by9
pattern_str<- c("r2_rednoise_AR92_trend_x")
list_component_files <- mixedsort(list.files(pattern="*.Comp.*.R.rst$",folder_components))
list_MEOT_Lag_analyses_rednoise <- lapply(1:length(pattern_str),function(k){grep(pattern=pattern_str[k],list_component_files,value=TRUE)})
list_MEOT_Lag_analyses_rednoise[[1]] <- list_MEOT_Lag_analyses_rednoise[[1]][1:45]
## Plot
## Quick exploration of results
list_plots_meot_rednoise_obj <- vector("list",length=length(list_MEOT_Lag_analyses_rednoise))
out_suffix_s <- paste(pattern_str[1],out_suffix,sep="_") #Lag 9
list_plots_meot_rednoise_obj[[1]] <- plot_components_IDRISI_ETM_proj(components_folders,
comp_files=list_MEOT_Lag_analyses_rednoise[[1]] ,out_dir,out_suffix_s)
list_plots_meot_rednoise_obj[[1]][[5]]
title_plots <- c("Lag window 9 MEOT")
for (i in 1:length(list_plots_meot_rednoise_obj)){
plot_obj <- list_plots_meot_rednoise_obj[[i]]
layout_m <- c(1,1)
no_fig <-i+1
png(paste("Figure",no_fig,"_rednoise_paper_revisions_supplement_",out_suffix,".png", sep=""),
height=480*layout_m[1],width=480*layout_m[2]*2)
#height=3*480*layout_m[1],width=2*480*layout_m[2])
#height=480*6,width=480*4)
#par(mfrow=layout_m)
p_comp1 <- plot_obj[[1]]
p_comp2 <- plot_obj[[2]]
p_comp1$main <- paste(title_plots[[i]],1,sep="")
p_comp2$main <- paste(title_plots[[i]],2,sep="")
grid.arrange(p_comp1,p_comp2,ncol=2)
dev.off()
}
### Now temporal profiles...
pattern_str<- c("r2_rednoise_AR92_trend_x")
l_rednoise_df <- plot_temporal_components_IDRISI_ETM_proj(folder_components,pattern_str,out_dir,out_suffix)
#pattern_str<- c("sine9by9_L2","sine9by9_L4","sine9by9_L5","sine9by9_L8","sine9by9_L9","sine9by9_L18")
#folder_components <-list_components_folders
title_plots <- c("Lag window 9 MEOT")
for (i in 1:length(l_rednoise_df)){
df <-l_rednoise_df[[i]]
layout_m <- c(1,1)
no_fig <-i+1
png(paste("Figure",no_fig,"_rednoise_temporal_MEOT_paper_revisions_supplement_",out_suffix,".png", sep=""),
height=480*layout_m[1],width=480*layout_m[2]*2)
plot(df[,1],type="b",lty="solid",col="blue",main=paste(title_plots[[i]],1,sep=""),lwd=1.2,cex.main=1.2,cex.axis=1.2,font=2,
xlab="Time steps",ylab="Amplitude",cex.lab=1.2, font.lab=2)
lines(df[,2],type="b",lty=4 ,col="darkgreen",lwd=1.2)
legend("topright",legend=c("MEOT1","MEOT2"), cex=0.8, col=c("blue","darkgreen"),
lty=c(1,4),lwd=3,bty="n") #lwd=line width
dev.off()
}
#####################################################
################ END OF SCRIPT ##############
|
aea88096f5b7ec8a055acfa4d6e87dfb3bda98cd
|
297ab7b92af3ffc6d497358525c52a9edd5f0a5b
|
/cachematrix.R
|
031929510fe44ba5d194dc95ebabd2c552eec606
|
[] |
no_license
|
alohr/ProgrammingAssignment2
|
ee4118bb3c43e3d011e58ff04872e339af2cc2fe
|
7d7bad6aa9ab8b816d637deffa53c7c97d479dbd
|
refs/heads/master
| 2021-01-23T22:52:50.022300
| 2014-06-07T15:59:19
| 2014-06-07T15:59:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,583
|
r
|
cachematrix.R
|
## The functions makeCacheMatrix() and cacheSolve() implement a caching scheme for the
## inverse of a matrix.
##
## Example usage:
##
## > m <- makeCacheMatrix(matrix(c(1, 2, 3, 4), nrow=2, ncol=2))
##
## > m$get()
## [,1] [,2]
## [1,] 1 3
## [2,] 2 4
##
## Get the inverse for the first time:
##
## > cacheSolve(m)
## [,1] [,2]
## [1,] -2 1.5
## [2,] 1 -0.5
##
## Any subsequent calls to cacheSolve() with the same matrix as its argument
## will return the previously calculated and cached inverse matrix:
##
## > cacheSolve(m)
## getting cached data
## [,1] [,2]
## [1,] -2 1.5
## [2,] 1 -0.5
makeCacheMatrix <- function(myMatrix = matrix()) {
## This function creates a special "matrix" object that can cache its inverse
myInverse <- NULL
set <- function(m) {
myMatrix <<- m
myInverse <<- NULL
}
get <- function() myMatrix
setInverse <- function(inverse) myInverse <<- inverse
getInverse <- function() myInverse
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
cacheSolve <- function(x, ...) {
## This function computes the inverse of the special "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated (and the matrix
## has not changed), then the cachesolve should retrieve the inverse from the cache.
myInverse <- x$getInverse()
if (!is.null(myInverse)) {
message("getting cached data")
return(myInverse)
}
myMatrix <- x$get()
myInverse <- solve(myMatrix, ...)
x$setInverse(myInverse)
myInverse
}
|
0f612faa23bfcf67339106c462be734058e6f53c
|
654b7d883c73e2dd3d3fffdd816315677e95cc50
|
/man/SpatialExperiment.Rd
|
ee264b74bfdeb0e33d6198fdcf73e3130565c70f
|
[
"MIT"
] |
permissive
|
genesofeve/astRal
|
c3147fe7ab9f967fc87061c08992a103aaf8bd08
|
f22f749017e954e80ddda1c5baacfc7bdb070158
|
refs/heads/master
| 2023-01-01T10:21:26.700219
| 2020-10-16T16:31:57
| 2020-10-16T16:31:57
| 430,408,999
| 1
| 0
|
NOASSERTION
| 2021-11-21T15:46:02
| 2021-11-21T15:46:01
| null |
UTF-8
|
R
| false
| true
| 482
|
rd
|
SpatialExperiment.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClasses.R
\docType{class}
\name{SpatialExperiment}
\alias{SpatialExperiment}
\alias{SpatialExperiment-class}
\title{The SpatialExperiment class}
\description{
The SpatialExperiment class
}
\section{Fields}{
\describe{
\item{\code{images}}{SimpleList of images associated with each slide or assay}
\item{\code{clusters}}{SimpleList of cluster information for different
dimensionality reduction.}
}}
|
36b40aa50293eafc68072331f258972670990920
|
71baaddb70a95c682b579a00ef006c1526bbb872
|
/plot2.r
|
7d8845548909f122eaa676cb1776ba38cb76e877
|
[] |
no_license
|
mgan2014/ExData_Plotting1
|
fedaf1fe077cd392bcaaf13bd1b0e2aa317deb80
|
63432631f01e3f4a9d656c90b6be37e77dc1d6b9
|
refs/heads/master
| 2021-01-17T17:12:05.353052
| 2014-09-06T00:46:12
| 2014-09-06T00:46:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 713
|
r
|
plot2.r
|
plot2<-function (){
#import the data from the txt file
x<-read.table("household_power_consumption.txt",header=TRUE,sep=";")
#Convert classes of date
x[,1]<-as.Date(x[,1],"%d/%m/%Y")
#Extract only 2 days data
y<-x[(x$Date=="2007-02-01"|x$Date=="2007-02-02"),]
dt<-strptime(paste(y$Date,y$Time),format="%Y-%m-%d %H:%M:%S")
#Prepare data for the plot
z<-as.numeric(as.character(y$Global_active_power))
#Set up the graphics device (png)
png(filename="plot2.png",width=480, height=480)
#Make the plot to png
plot(dt,z,ylab="Global Active Power (kilowatts)",xlab="",type="l")
#Close the device
dev.off()
}
|
b5bf044ee51f80d6780c5a8bcc890f511b330780
|
3050849fdeb7b54d22f5b72ec004fefeb0af86a6
|
/man/plotKmeansLabelstSNE.Rd
|
25d141af5942b6e338de86da5f570e51efac225e
|
[
"MIT"
] |
permissive
|
dami82/DIscBIO
|
4e8fd3591b2c1d92c9deb83596713f076a12ac1c
|
8de0522099697a9364ee01befdb13f5b36b16970
|
refs/heads/master
| 2021-04-22T02:19:49.309902
| 2020-04-08T11:37:15
| 2020-04-08T11:37:15
| 259,098,496
| 0
| 1
|
MIT
| 2020-04-26T18:01:52
| 2020-04-26T18:01:51
| null |
UTF-8
|
R
| false
| true
| 797
|
rd
|
plotKmeansLabelstSNE.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DIscBIO-generic-plotKmeansLabelstSNE.R
\name{plotKmeansLabelstSNE}
\alias{plotKmeansLabelstSNE}
\alias{plotKmeansLabelstSNE,DISCBIO-method}
\title{tSNE map for K-means clustering with labels}
\usage{
plotKmeansLabelstSNE(object)
\S4method{plotKmeansLabelstSNE}{DISCBIO}(object)
}
\arguments{
\item{object}{\code{DISCBIO} class object.}
}
\value{
Plot containing the ID of the cells in each cluster
}
\description{
Visualizing the K-means clusters using tSNE maps
}
\examples{
sc <- DISCBIO(valuesG1msReduced) # changes signature of data
sc <- Clustexp(sc, cln=3) # data must be clustered before plottin
sc <- comptSNE(sc, rseed=15555, quiet=TRUE)
plotKmeansLabelstSNE(sc) # Plots the ID of the cells in each cluster
}
|
0ac1f6b5f3fd9090502a338fd28b0b69cca4c963
|
34541609e2877bde7ca96a139d73d9e69f63709c
|
/R/guide_dendrogram.R
|
778605b537fed9ef288d6128e476081f1f12ea95
|
[
"MIT"
] |
permissive
|
dimbage/ggh4x
|
3e6a12a9543c511b922639ca1e4751c785b8739a
|
ae1ac5b085f9e487ce7438e9e321bf368596aed6
|
refs/heads/master
| 2023-06-24T22:57:24.376292
| 2021-07-24T17:07:34
| 2021-07-24T18:32:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,575
|
r
|
guide_dendrogram.R
|
# Constructor -------------------------------------------------------------
#' Dendrogram guide
#'
#' Visual representation of a discrete variable with hierarchical relationships
#' between members, like those detailed in
#' \code{\link[=scale_x_dendrogram]{scale_(x|y)_dendrogram)}}.
#'
#' @inheritParams guide_axis_truncated
#' @param label A \code{logical(1)}. If \code{TRUE}, labels are drawn at the
#' dendrogram leaves. If \code{FALSE}, labels are not drawn.
#' @param dendro Relevant plotting data for a dendrogram such as those returned
#' by \code{\link[ggdendro]{dendro_data}}.
#'
#' @details The dendrogram guide inherits graphical elements from the
#' \code{axis.ticks} theme element. However, the size of the dendrogram is set
#' to 10 times the \code{axis.ticks.length} theme element.
#'
#' @export
#'
#' @return A \emph{dendroguide} class object.
#'
#' @examples
#' clust <- hclust(dist(USArrests), "ave")
#'
#' # Melting USArrests
#' df <- data.frame(
#' State = rownames(USArrests)[row(USArrests)],
#' variable = colnames(USArrests)[col(USArrests)],
#' value = unname(do.call(c, USArrests))
#' )
#'
#' # The guide function can be used to customise the axis
#' g <- ggplot(df, aes(variable, State, fill = value)) +
#' geom_raster() +
#' scale_y_dendrogram(hclust = clust,
#' guide = guide_dendro(n.dodge = 2))
#'
#' # The looks of the dendrogram are controlled through ticks
#' g + theme(axis.ticks = element_line(colour = "red"))
#'
#' # The size of the dendrogram is controlled through tick size * 10
#' g + theme(axis.ticks.length = unit(5, "pt"))
guide_dendro <- function(
title = waiver(),
check.overlap = FALSE,
n.dodge = 1,
order = 0,
position = waiver(),
label = TRUE,
trunc_lower = NULL,
trunc_upper = NULL,
colour = NULL,
color = NULL,
dendro = waiver()
) {
colour <- color %||% colour
check_trunc_arg(trunc_lower, trunc_upper)
structure(
list(title = title,
check.overlap = check.overlap,
n.dodge = n.dodge,
order = order,
position = position,
available_aes = c("x", "y"),
label = label,
trunc_lower = trunc_lower,
trunc_upper = trunc_upper,
colour = colour,
dendro = dendro,
name = "axis"),
class = c("guide", "dendroguide", "axis_ggh4x", "axis")
)
}
# Trainer -----------------------------------------------------------------
#' @method guide_train dendroguide
#' @export
#' @noRd
guide_train.dendroguide <- function(guide, scale, aesthetic = NULL) {
guide <- NextMethod()
if (!is.null(guide$key$.label) & guide$label) {
i <- seq_len(NROW(guide$dendro$labels))
guide$dendro$labels$label <- as.character(guide$dendro$labels$label)
guide$dendro$labels$label[i] <- as.character(guide$key$.label[i])
} else {
guide$dendro$labels$label <- NULL
}
guide
}
# Transformer -------------------------------------------------------------
#' @method guide_transform dendroguide
#' @export
#' @noRd
guide_transform.dendroguide <- function(guide, coord, panel_params) {
if (is.null(guide$position) || nrow(guide$key) == 0) {
return(guide)
}
aesthetics <- names(guide$key)[!grepl("^\\.", names(guide$key))]
if (all(c("x", "y") %in% aesthetics)) {
guide$key <- coord$transform(guide$key, panel_params)
} else {
other_aesthetic <- setdiff(c("x", "y"), aesthetics)
override_value <- if (guide$position %in% c("bottom", "left")) -Inf else Inf
guide$key[[other_aesthetic]] <- override_value
guide$key <- coord$transform(guide$key, panel_params)
.int$warn_for_guide_position(guide)
}
denseg <- guide$dendro$segments
xvars <- c("x", "xend")
yvars <- c("y", "yend")
if (isTRUE(aesthetics == "y")) {
colnames(denseg) <- chartr("xy", "yx", colnames(denseg))
denseg[, yvars] <- coord$transform(denseg[, yvars],
panel_params)
upper <- max(do.call(c, denseg[, xvars]), na.rm = TRUE)
denseg[, xvars] <- lapply(denseg[, xvars], function(y) {
scales::rescale(y, from = c(0, upper))
})
} else {
denseg[, xvars] <- coord$transform(denseg[, xvars],
panel_params)
upper <- max(do.call(c, denseg[, yvars]), na.rm = TRUE)
denseg[, yvars] <- lapply(denseg[, yvars], function(y) {
scales::rescale(y, from = c(0, upper))
})
}
guide$dendro$segments <- denseg
guide$trunc <- transform_truncated(guide$trunc, coord, panel_params)
guide
}
# Grob generator ----------------------------------------------------------
#' @method guide_gengrob dendroguide
#' @export
#' @noRd
guide_gengrob.dendroguide <- function(guide, theme) {
aesthetic <- names(guide$key)[!grepl("^\\.", names(guide$key))][1]
key <- guide$key
key$.label <- guide$dendro$labels$label
draw_dendroguide(
key = key,
axis_position = guide$position,
theme = theme,
check.overlap = guide$check.overlap,
n.dodge = guide$n.dodge,
dendro = guide$dendro$segments,
trunc = guide$trunc,
colour = guide$colour
)
}
# Drawing -----------------------------------------------------------------
draw_dendroguide <- function(
key, axis_position, theme,
check.overlap = FALSE, n.dodge = 1, dendro = NULL,
trunc, colour = NULL
) {
axis_position <- match.arg(substr(axis_position, 1, 1),
c("t", "b", "r", "l"))
elements <- build_axis_elements(axis_position, angle = NULL, theme, colour)
params <- setup_axis_params(axis_position)
params$labels_first <- !params$labels_first
line_grob <- build_trunc_axis_line(elements$line, params, trunc)
if ({n_breaks <- nrow(key)} == 0) {
out <- grid::gTree(
children = grid::gList(line_grob),
width = grid::grobWidth(line_grob),
height = grid::grobHeight(line_grob),
cl = "absoluteGrob"
)
return(out)
}
label_grobs <- build_axis_labels(
elements, key = key,
dodge = n.dodge, check.overlap = check.overlap, params = params
)
dendro_grob <- grid::segmentsGrob(
x0 = if (axis_position == "l") 1 - dendro$x else dendro$x,
y0 = if (axis_position == "b") 1 - dendro$y else dendro$y,
x1 = if (axis_position == "l") 1 - dendro$xend else dendro$xend,
y1 = if (axis_position == "b") 1 - dendro$yend else dendro$yend,
gp = element_grob(elements$ticks)$gp
)
elements$tick_length <- elements$tick_length * 10
assemble_axis_grobs(
ticks = dendro_grob, labels = label_grobs,
lines = line_grob, elements = elements,
params = params
)
}
|
d185850cbe991085b69cfe8ffc8dca788ba82d74
|
e75a40843a8738b84bd529a549c45776d09e70d9
|
/samples/client/petstore/R-httr2/man/Tag.Rd
|
2aea0440ef9cb78765050b15ecae898fc47b91b2
|
[
"Apache-2.0"
] |
permissive
|
OpenAPITools/openapi-generator
|
3478dbf8e8319977269e2e84e0bf9960233146e3
|
8c2de11ac2f268836ac9bf0906b8bb6b4013c92d
|
refs/heads/master
| 2023-09-02T11:26:28.189499
| 2023-09-02T02:21:04
| 2023-09-02T02:21:04
| 133,134,007
| 17,729
| 6,577
|
Apache-2.0
| 2023-09-14T19:45:32
| 2018-05-12T09:57:56
|
Java
|
UTF-8
|
R
| false
| true
| 4,767
|
rd
|
Tag.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tag.R
\docType{class}
\name{Tag}
\alias{Tag}
\title{Tag}
\format{
An \code{R6Class} generator object
}
\description{
Tag Class
}
\details{
OpenAPI Petstore
This is a sample server Petstore server. For this sample, you can use the api key `special-key` to test the authorization filters.
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{id}}{integer [optional]}
\item{\code{name}}{character [optional]}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-Tag-new}{\code{Tag$new()}}
\item \href{#method-Tag-toJSON}{\code{Tag$toJSON()}}
\item \href{#method-Tag-fromJSON}{\code{Tag$fromJSON()}}
\item \href{#method-Tag-toJSONString}{\code{Tag$toJSONString()}}
\item \href{#method-Tag-fromJSONString}{\code{Tag$fromJSONString()}}
\item \href{#method-Tag-validateJSON}{\code{Tag$validateJSON()}}
\item \href{#method-Tag-toString}{\code{Tag$toString()}}
\item \href{#method-Tag-clone}{\code{Tag$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Tag-new"></a>}}
\if{latex}{\out{\hypertarget{method-Tag-new}{}}}
\subsection{Method \code{new()}}{
Initialize a new Tag class.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Tag$new(id = NULL, name = NULL, ...)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{id}}{id}
\item{\code{name}}{name}
\item{\code{...}}{Other optional arguments.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Tag-toJSON"></a>}}
\if{latex}{\out{\hypertarget{method-Tag-toJSON}{}}}
\subsection{Method \code{toJSON()}}{
To JSON String
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Tag$toJSON()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
Tag in JSON format
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Tag-fromJSON"></a>}}
\if{latex}{\out{\hypertarget{method-Tag-fromJSON}{}}}
\subsection{Method \code{fromJSON()}}{
Deserialize JSON string into an instance of Tag
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Tag$fromJSON(input_json)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{input_json}}{the JSON input}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
the instance of Tag
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Tag-toJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-Tag-toJSONString}{}}}
\subsection{Method \code{toJSONString()}}{
To JSON String
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Tag$toJSONString()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
Tag in JSON format
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Tag-fromJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-Tag-fromJSONString}{}}}
\subsection{Method \code{fromJSONString()}}{
Deserialize JSON string into an instance of Tag
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Tag$fromJSONString(input_json)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{input_json}}{the JSON input}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
the instance of Tag
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Tag-validateJSON"></a>}}
\if{latex}{\out{\hypertarget{method-Tag-validateJSON}{}}}
\subsection{Method \code{validateJSON()}}{
Validate JSON input with respect to Tag and throw an exception if invalid
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Tag$validateJSON(input)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{input}}{the JSON input}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Tag-toString"></a>}}
\if{latex}{\out{\hypertarget{method-Tag-toString}{}}}
\subsection{Method \code{toString()}}{
To string (JSON format)
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Tag$toString()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
String representation of Tag
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Tag-clone"></a>}}
\if{latex}{\out{\hypertarget{method-Tag-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Tag$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
4137e0ee7e0f45c871b54d9b6c726a5f399d3201
|
a859ae6c8dc7e916266f38066ae8e35860cb9c6b
|
/R script/learn.R
|
75ffc30f242cdab5764b1246b2080753947ebcbd
|
[] |
no_license
|
dongwoLee/LectureData
|
a6d530529b7825229556cb9ab54e979c590b73c4
|
3c3e1f5d9dd6d998eb472d3787606890b0d94429
|
refs/heads/master
| 2021-01-18T22:09:59.462793
| 2017-10-30T07:37:51
| 2017-10-30T07:37:51
| 100,555,528
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,253
|
r
|
learn.R
|
#!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
mosq.data <- read.csv(args[1])
inputData <- mosq.data
library(randomForest)
print("Loading 'randomForest'")
library(party)
print("Loading 'party'")
print("Importance Value Selection")
rffile1 <- paste(args[1],".rflog.Rdata",sep="")
print(rffile1)
#cf1 <- randomForest(Mosq ~ ., inputData, keep.forest=FALSE, ntree=500,log="y")
cf1 <- cforest(Mosq ~ ., data=inputData, control=cforest_unbiased(mtry=0,ntree=500))
save(cf1,file=rffile1)
varimp(cf1)
#varimp(cf1, conditional=TRUE)
#varimpAUC(cf1)
print("Importance Value Selection 2")
rffile2 <- paste(args[1],".rfimportance.Rdata",sep="")
print(rffile2)
set.seed(654)
mosq.rf <- randomForest(Mosq ~ .,data=inputData,ntree=500,keep.forest=FALSE,importance=TRUE)
save(mosq.rf,file=rffile2)
mosq.imp <- importance(mosq.rf)
mosq.imp
mosq.imp <- importance(mosq.rf, type=1)
mosq.imp
q()
print("Prediction")
set.seed(91)
print("Data Sampling")
train <- sample(1:nrow(inputData), 0.7*nrow(inputData))
trainData <- inputData[train,]
testData <- inputData[-train,]
print("Learning...")
cForestMod <- cforest(Mosq ~ ., data = trainData)
actuals <- testData$Mosq
predicted <- predict(cForestMod, newdata = testData)
table(true=actuals,pred=predicted)
|
f522e18d443bdf63a931fbc1e199cc9c1dfc75ff
|
caa38c536aa036bcd8d080af1ea6f42751d7d8b6
|
/hils regresja/regresja_hills.R
|
42764a17557b4132ef6520c4738e259d32d0e69b
|
[] |
no_license
|
coprobo/R-models
|
76fc1388ecede70c8aa2b3814fecf213ad84f6fe
|
bcf71a3ef10e0e180b3e032bc1a4a2df04af3810
|
refs/heads/master
| 2021-05-11T03:00:36.681034
| 2018-01-17T23:18:54
| 2018-01-17T23:18:54
| 117,903,175
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 740
|
r
|
regresja_hills.R
|
###############################################
# Model Regresji Liniowej #
###############################################
#zaczytanie danych (tutaj skorzystam z wbudowanego zestawu "hills")
install.packages("MASS")
library("MASS")
hills = hills
attach(hills)
#konstruujemy prosty model regresji liniowej
model.regresji = lm(dist ~ time, data = hills)
#dokladniejsze wypisanie uzyskanych wynikow
model.opis = summary(model.regresji)
#Wyraz wolny Bo
coef(model.regresji)[1]
#Współczynnik kierunkowy B1
coef(model.regresji)[2]
#Wartości prognozowane: Yi
fitted(model.regresji)
#typowy wykres regresji || jedna zmienna objasniająca
plot(time, dist, main="Wykres regresji liniowej")
abline(model.regresji)
grid()
|
32f01cf3f23aacc0c8736ab6fa8739f2b6d79933
|
884677dd48325c8314489ce0cfe0770b0d97e5c0
|
/man/cost_error.Rd
|
940e6461582ee818948770a7c65f02250d41d2e9
|
[] |
no_license
|
dtrfgv/dtrfgv
|
1ff5657c39933ae14f5057dbdd570c6235648461
|
a736537579ecddaa124898c3683eb209aad7537e
|
refs/heads/master
| 2020-03-28T12:36:31.603482
| 2019-04-26T20:37:56
| 2019-04-26T20:37:56
| 148,315,133
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 757
|
rd
|
cost_error.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions_Cost_Complexity_Pruning.R
\name{cost_error}
\alias{cost_error}
\title{cost_error}
\usage{
cost_error(node, tree, alpha)
}
\arguments{
\item{node}{the number of the node in the tree \code{tree}}
\item{tree}{a tree. A data frame returned by the function \code{CARTGV} and which summarizes the resulted CARTGV tree.}
\item{alpha}{a positive real. It is the value of the complexity paramter in the cost-complexity criterion.}
}
\value{
a numeric indicating the misclassification error of the subtree stremming from node penalized by alpha.
}
\description{
function that calculate the misclassification error of the subtree stremming from \code{node} penalized by alpha
}
|
486af586e6869908edf4db6693eddcf117eac531
|
6f1510f8efd00fa734c1e01e15b8bce20d059c86
|
/R/Functions.R
|
c7061d2150f8e9d56167401b08ba30f4182dc59c
|
[] |
no_license
|
T-Engel/CValternatives
|
fcf8a10412e41acba3d7ef7427ee140d2f819998
|
64656c8d5fef8800da60e8432c47b884394b5b00
|
refs/heads/master
| 2020-08-06T04:25:53.204355
| 2019-10-04T14:48:20
| 2019-10-04T14:48:20
| 212,830,044
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 780
|
r
|
Functions.R
|
#' Proportional Variability index (PV)
#'
#' Calculate PV of the variable Z
#'
#' @param Z a numeric vector
#'
#' @return a numeric value
#' @export
#'
#' @examples
#' \donttest{
#' Z = c(2,3,4,5)
#' PV(Z)
#' }
PV <- function (Z){
n = length(Z)
pairs = combn(Z,2)
min_z = apply(pairs,2, min)
max_z = apply(pairs,2, max)
z = 1- (min_z/max_z)
PV=2*sum(z)/(n*(n-1))
return(PV)
}
#' Consecutive disparity index (D)
#'
#' Calculate D of the series P.
#'
#' @param P a numeric vector
#'
#' @return a numeric value
#' @export
#'
#' @examples
#' \donttest{
#' P = c(2,3,4,5)
#' D(P)
#' }
D <- function(P){
n = length(P)
flixbus <- NA
for(i in (1: (n-1))){
flixbus[i]=abs(log(P[i+1]/P[i]))
}
D=sum(flixbus)/(n-1)
return(D)
}
|
557bd97df125c32d3d69d50ba0ace8c614111dd8
|
88ef0324c59dd7f073c5dca09e535da9c591de81
|
/plot3.R
|
5fa8a8dd091234a9d8885f04b5e73289f5f7ae22
|
[] |
no_license
|
alklar/ExData_Plotting1
|
00b153a2240badfce8e2c69071f2813fa85d4a81
|
e6487ea1d7ae747ae24fb507717f4bb231dc2247
|
refs/heads/master
| 2021-01-18T21:48:13.501654
| 2016-08-23T20:12:20
| 2016-08-23T20:12:20
| 66,393,353
| 0
| 0
| null | 2016-08-23T18:37:36
| 2016-08-23T18:37:35
| null |
UTF-8
|
R
| false
| false
| 1,602
|
r
|
plot3.R
|
# this is how to download and unzip the zip file
# uncomment the following lines if needed
#zipURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
#zipFile <- "exdata.zip"
#download.file(zipURL, destfile= "exdata.zip")
#unzip(zipFile)
#create Data Frame from unzipped text file
exdataDF <- read.csv("household_power_consumption.txt", header = TRUE, sep = ";",
na.strings = "?")
# convert Date column
exdataDF$Date <- as.Date(exdataDF$Date, "%d/%m/%Y")
# convert Time column, Date format now is YYYY/mm/dd
exdataDF$Time <- strptime(paste (exdataDF$Date, exdataDF$Time, sep = " ", collapse = NULL),
"%Y-%m-%d %H:%M:%S")
# for plotting we only need data from the dates 2007-02-01 and 2007-02-02
lowDate <- as.Date("2007-02-01", "%Y-%m-%d")
highDate <- as.Date("2007-02-02", "%Y-%m-%d")
exdataDF2 <- exdataDF[(exdataDF$Date >= lowDate & exdataDF$Date <= highDate),]
# now we are ready to create the plot
png(filename = "plot3.png", width = 480, height = 480)
# start with Sub_metering_1 (black line)
with(exdataDF2, plot(Time, Sub_metering_1, type = "l", col = "black",
xlab = "", ylab = "Energy sub metering"))
# add a red line for Sub_metering_2
with(exdataDF2, lines(Time, Sub_metering_2, col = "red"))
# add a blue line for Sub_metering_3
with(exdataDF2, lines(Time, Sub_metering_3, col = "blue"))
# finally add the legend
legend("topright", lty=c(1,1), col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off() # close file
|
e9679bfd0c2cf3737f074fe62543fbe3d2f85835
|
5d17dde663d995fd348ad610bec94d01a6b3ca8a
|
/helpers.R
|
8254be688b03947b4f15bfc7a4b074edcb6002bc
|
[] |
no_license
|
jealie/SFM_Workflow_Comparison
|
b760bdaadc2eb09c731c12040f25c13d1493ab1d
|
ebbb4d09f289265047186445662433c84ade2a18
|
refs/heads/master
| 2021-03-16T08:57:25.980092
| 2018-07-31T16:00:19
| 2018-07-31T16:00:19
| 66,972,493
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,285
|
r
|
helpers.R
|
mod_ashape3d = function (x, alpha, pert = FALSE, eps = 1e-09)
{
flag <- 1
flag2 <- 0
alphaux <- alpha
if (any(alphaux < 0)) {
stop("Parameter alpha must be greater or equal to zero",
call. = TRUE)
}
if (inherits(x, "ashape3d")) {
inh <- 1
tc.def <- x$tetra
tri.def <- x$triang
ed.def <- x$edge
vt.def <- x$vertex
alphaold <- x$alpha
if (any(match(alphaux, alphaold, nomatch = 0))) {
warning("Some values of alpha have already been computed",
call. = FALSE)
alphaux <- alphaux[is.na(match(alphaux, alphaold))]
}
if (!is.null(x$xpert)) {
flag <- 1
xorig <- x$x
x <- x$xpert
}
else {
x <- x$x
}
}
else {
if (any(duplicated(x))) {
warning("Duplicate poits were removed", call. = FALSE,
immediate. = TRUE)
x <- unique(x)
}
inh <- 0
x = x * 1
xorig <- x
while (flag == 1) {
flag <- 0
n <- dim(x)[1]
x4 <- x[, 1]^2 + x[, 2]^2 + x[, 3]^2
tc <- matrix(delaunayn(x), ncol = 4)
ntc <- dim(tc)[1]
ORDM <- matrix(as.integer(0), ntc, 4)
storage.mode(tc) <- "integer"
storage.mode(ORDM) <- "integer"
tc <- .C("sortm", ORDM, as.integer(ntc), as.integer(4),
tc, PACKAGE = "alphashape3d")[[1]]
tc.aux <- matrix(1:(4 * ntc), ncol = 4)
tri <- rbind(tc[, -4], tc[, -3], tc[, -2], tc[, -1])
t1 <- tri[, 1]
t2 <- tri[, 2]
t3 <- tri[, 3]
ix3 = .Call("sortbycolumn", t1, t2, t3)
d1 <- abs(diff(t1[ix3]))
d2 <- abs(diff(t2[ix3]))
d3 <- abs(diff(t3[ix3]))
dup <- (d1 + d2 + d3) == 0
dup <- c(FALSE, dup)
i1 <- ix3[dup]
i2 <- ix3[c(dup[-1], FALSE)]
ih <- (1:(4 * ntc))[-c(i1, i2)]
ntris <- length(i1)
ntrich <- length(ih)
ntri <- ntris + ntrich
ind.tri <- numeric(4 * ntc)
ind.tri[i1] <- 1:ntris
ind.tri[i2] <- 1:ntris
ind.tri[ih] <- ((ntris + 1):ntri)
in.tc <- rep(1:ntc, 4)
t1u <- t1[c(i1, ih)]
t2u <- t2[c(i1, ih)]
t3u <- t3[c(i1, ih)]
on.ch3 <- numeric(ntri)
on.ch3[(ntris + 1):ntri] <- 1
tri.aux <- matrix(1:(3 * ntri), ncol = 3)
storage.mode(x) <- "double"
storage.mode(t1u) <- "integer"
storage.mode(t2u) <- "integer"
storage.mode(t3u) <- "integer"
m123 <- numeric(ntri)
storage.mode(m123) <- "double"
fm123 <- .C("fm123", x, as.integer(n), t1u, t2u,
t3u, as.integer(ntri), m123, PACKAGE = "alphashape3d")
m123 <- fm123[[7]]
m1230 <- +m123[ind.tri[tc.aux[, 1]]] - m123[ind.tri[tc.aux[,
2]]] + m123[ind.tri[tc.aux[, 3]]] - m123[ind.tri[tc.aux[,
4]]]
if (any(m1230 == 0) & !pert) {
stop("The general position assumption is not satisfied\nPlease enter data in general position or set pert=TRUE to allow perturbation",
call. = FALSE)
}
if (any(m1230 == 0) & pert) {
flag <- 1
}
if (flag == 0) {
e1 <- c(t1u, t1u, t2u)
e2 <- c(t2u, t3u, t3u)
a1 <- 10^nchar(max(c(e1, e2)))
a2 <- e2 + e1 * a1
ix2 <- order(a2)
d1 <- abs(diff(e1[ix2]))
d2 <- abs(diff(e2[ix2]))
dup <- (d1 + d2) == 0
in.tri <- rep(1:ntri, 3)
dup <- c(which(!dup), length(dup) + 1)
dup.aux <- dup
dup <- c(dup[1], diff(dup))
ned <- length(dup)
auxi <- rep(1:ned, dup)
ind.ed <- numeric()
ind.ed[ix2] <- auxi
in.trio <- in.tri[ix2]
e1u <- e1[ix2][dup.aux]
e2u <- e2[ix2][dup.aux]
on.ch2 <- numeric(ned)
on.ch2[ind.ed[tri.aux[as.logical(on.ch3), ]]] <- 1
vt <- c(e1u, e2u)
ind.vt <- vt
nvt <- n
in.ed <- rep(1:ned, 2)
ix1 <- order(vt)
dup1 <- diff(vt[ix1]) == 0
dup1 <- c(which(!dup1), length(dup1) + 1)
dup1.aux <- dup1
dup1 <- c(dup1[1], diff(dup1))
in.edo <- in.ed[ix1]
on.ch1 <- numeric(nvt)
on.ch1[c(t1u[as.logical(on.ch3)], t2u[as.logical(on.ch3)],
t3u[as.logical(on.ch3)])] <- 1
storage.mode(e1u) <- "integer"
storage.mode(e2u) <- "integer"
mk0 <- matrix(0, nrow = ned, ncol = 3)
storage.mode(mk0) <- "double"
num2 <- numeric(ned)
storage.mode(num2) <- "double"
fmk0 <- .C("fmk0", x, as.integer(n), e1u, e2u,
as.integer(ned), mk0, num2, PACKAGE = "alphashape3d")
mk0 <- fmk0[[6]]
num.rho2 <- fmk0[[7]]
rho2 <- sqrt(0.25 * num.rho2)
storage.mode(tri.aux) <- "integer"
storage.mode(ind.ed) <- "integer"
storage.mode(mk0) <- "double"
m23 <- numeric(ntri)
storage.mode(m23) <- "double"
m13 <- numeric(ntri)
storage.mode(m13) <- "double"
m12 <- numeric(ntri)
storage.mode(m12) <- "double"
storage.mode(num2) <- "double"
num3 <- numeric(ntri)
storage.mode(num3) <- "double"
fmij0 <- .C("fmij0", x, as.integer(n), t1u, t2u,
t3u, as.integer(ntri), tri.aux, ind.ed, as.integer(ned),
mk0, m23, m13, m12, num.rho2, num3, PACKAGE = "alphashape3d")
m230 <- fmij0[[11]]
m130 <- fmij0[[12]]
m120 <- fmij0[[13]]
num.rho3 <- fmij0[[15]]
den.rho3 <- m230^2 + m130^2 + m120^2
if (any(den.rho3 == 0) & !pert) {
stop("The general position assumption is not satisfied\nPlease enter data in general position or set pert=TRUE to allow perturbation",
call. = FALSE)
}
if (any(den.rho3 == 0) & pert) {
flag <- 1
}
}
if (flag == 0) {
rho3 <- sqrt(0.25 * num.rho3/(m230^2 + m130^2 +
m120^2))
storage.mode(x4) <- "double"
storage.mode(tc.aux) <- "integer"
storage.mode(ind.tri) <- "integer"
storage.mode(m230) <- "double"
storage.mode(m130) <- "double"
storage.mode(m120) <- "double"
m2340 <- numeric(ntc)
m1340 <- numeric(ntc)
m1240 <- numeric(ntc)
storage.mode(m2340) <- "double"
storage.mode(m1340) <- "double"
storage.mode(m1240) <- "double"
fmijk0 <- .C("fmijk0", x4, as.integer(n), tc,
as.integer(ntc), tc.aux, ind.tri, as.integer(ntri),
m230, m130, m120, m2340, m1340, m1240, PACKAGE = "alphashape3d")
m2340 <- fmijk0[[11]]
m1340 <- fmijk0[[12]]
m1240 <- fmijk0[[13]]
m1234 <- -(-x4[tc[, 4]] * m123[ind.tri[tc.aux[,
1]]] + x4[tc[, 3]] * m123[ind.tri[tc.aux[,
2]]] - x4[tc[, 2]] * m123[ind.tri[tc.aux[,
3]]] + x4[tc[, 1]] * m123[ind.tri[tc.aux[,
4]]])
rho.sq <- (0.25 * (m2340^2 + m1340^2 + m1240^2 +
4 * m1230 * m1234)/m1230^2)
if (any(rho.sq < 0)) {
ind <- which(rho.sq < 0)
v1 <- x[tc[, 1], ]
v2 <- x[tc[, 2], ]
v3 <- x[tc[, 3], ]
v4 <- x[tc[, 4], ]
v1 <- v1[ind, , drop = FALSE]
v2 <- v2[ind, , drop = FALSE]
v3 <- v3[ind, , drop = FALSE]
v4 <- v4[ind, , drop = FALSE]
nv1 <- rowSums(v1^2)
nv2 <- rowSums(v2^2)
nv3 <- rowSums(v3^2)
nv4 <- rowSums(v4^2)
Dx <- numeric()
Dy <- numeric()
Dz <- numeric()
ct <- numeric()
a <- numeric()
for (i in 1:length(ind)) {
tetra <- rbind(v1[i, ], v2[i, ], v3[i, ],
v4[i, ])
nor <- c(nv1[i], nv2[i], nv3[i], nv4[i])
Dx[i] <- det(cbind(nor, tetra[, 2:3], rep(1,
4)))
Dy[i] <- -det(cbind(nor, tetra[, c(1, 3)],
rep(1, 4)))
Dz[i] <- det(cbind(nor, tetra[, 1:2], rep(1,
4)))
ct[i] <- det(cbind(nor, tetra[, 1:3]))
a[i] <- det(cbind(tetra[, 1:3], rep(1, 4)))
}
rho.sq[ind] <- 0.25 * (Dx^2 + Dy^2 + Dz^2 -
4 * a * ct)/a^2
}
rho4 <- sqrt(rho.sq)
#########################
### MODIFICATION HERE ###
#########################
if (any(is.nan(rho4))) {
for (i in which(is.nan(rho4))) {
if (i == 1) {
rho4[i] = rho4[which(is.nan(rho4))[1]]
} else {
rho4[i] = rho4[i-1]
}
}
}
rf1 <- rho4[in.tc[i1]]
rf2 <- rho4[in.tc[i2]]
ntri2 <- length(rf1)
mu3 <- numeric(ntri2)
storage.mode(mu3) <- "double"
mu3up <- numeric(ntri2)
storage.mode(mu3up) <- "double"
mus3 <- .C("int3", as.integer(ntri2), as.double(rf1),
as.double(rf2), mu3, mu3up, PACKAGE = "alphashape3d")
mu3 <- c(mus3[[4]], rho4[in.tc[ih]])
Mu3 <- c(mus3[[5]], rho4[in.tc[ih]])
sign <- rep(c(1, -1, 1, -1), each = ntc)
pu1 <- sign[i1] * m2340[in.tc[i1]] * m230[1:ntris] +
sign[i1] * m1340[in.tc[i1]] * m130[1:ntris] +
sign[i1] * m1240[in.tc[i1]] * m120[1:ntris] -
2 * sign[i1] * m1230[in.tc[i1]] * m123[1:ntris]
pu2 <- sign[i2] * m2340[in.tc[i2]] * m230[1:ntris] +
sign[i2] * m1340[in.tc[i2]] * m130[1:ntris] +
sign[i2] * m1240[in.tc[i2]] * m120[1:ntris] -
2 * sign[i2] * m1230[in.tc[i2]] * m123[1:ntris]
at3 <- (pu1 > 0 | pu2 > 0)
pu3 <- sign[ih] * m2340[in.tc[ih]] * m230[(ntris +
1):ntri] + sign[ih] * m1340[in.tc[ih]] * m130[(ntris +
1):ntri] + sign[ih] * m1240[in.tc[ih]] * m120[(ntris +
1):ntri] - 2 * sign[ih] * m1230[in.tc[ih]] *
m123[(ntris + 1):ntri]
at3 <- c(at3, pu3 > 0)
auxmu3 <- (1 - at3) * rho3 + at3 * mu3
storage.mode(in.trio) <- "integer"
storage.mode(dup) <- "integer"
storage.mode(auxmu3) <- "double"
storage.mode(Mu3) <- "double"
mu2 <- numeric(ned)
storage.mode(mu2) <- "double"
mu2up <- numeric(ned)
storage.mode(mu2up) <- "double"
aux1 <- 1:(3 * ntri)
aux1 <- ceiling(aux1[ix2]/ntri)
storage.mode(aux1) <- "integer"
storage.mode(num.rho2) <- "double"
storage.mode(mk0) <- "double"
at <- numeric(ned)
storage.mode(at) <- "integer"
mus2 <- .C("int2", dup, as.integer(ned), as.integer(ntri),
in.trio, auxmu3, Mu3, mu2, mu2up, tri.aux,
aux1, ind.ed, num.rho2, mk0, at, PACKAGE = "alphashape3d")
mu2 <- mus2[[7]]
Mu2 <- mus2[[8]]
at2 <- mus2[[14]]
auxmu2 <- (1 - at2) * rho2 + at2 * mu2
storage.mode(in.edo) <- "integer"
storage.mode(dup1) <- "integer"
storage.mode(auxmu2) <- "double"
storage.mode(Mu2) <- "double"
mu1 <- numeric(nvt)
storage.mode(mu1) <- "double"
mu1up <- numeric(nvt)
storage.mode(mu1up) <- "double"
mus1 <- .C("int1", dup1, as.integer(nvt), as.integer(ned),
in.edo, auxmu2, Mu2, mu1, mu1up, PACKAGE = "alphashape3d")
mu1 <- mus1[[7]]
Mu1 <- mus1[[8]]
tc.def <- cbind(tc, rho4)
tri.def <- cbind(t1u, t2u, t3u, on.ch3, at3,
rho3, mu3, Mu3)
ed.def <- cbind(e1u, e2u, on.ch2, at2, rho2,
mu2, Mu2)
vt.def <- cbind(1:nvt, on.ch1, mu1, Mu1)
colnames(tc.def) <- c("v1", "v2", "v3", "v4",
"rhoT")
colnames(tri.def) <- c("tr1", "tr2", "tr3", "on.ch",
"attached", "rhoT", "muT", "MuT")
colnames(ed.def) <- c("ed1", "ed2", "on.ch",
"attached", "rhoT", "muT", "MuT")
colnames(vt.def) <- c("v1", "on.ch", "muT", "MuT")
}
if (flag == 1) {
flag2 <- 1
warning("The general position assumption is not satisfied\nPerturbation of the data set was required",
call. = FALSE, immediate. = TRUE)
x <- x + rnorm(length(x), sd = sd(as.numeric(x)) *
eps)
}
}
}
if (length(alphaux) > 0) {
for (i in 1:length(alphaux)) {
alpha <- alphaux[i]
fclass <- numeric(length = length(tc.def[, "rhoT"]))
fclass[alpha > tc.def[, "rhoT"]] <- 1
tc.def <- cbind(tc.def, fclass)
colnames(tc.def)[length(colnames(tc.def))] <- paste("fc:",
alpha, sep = "")
fclass <- numeric(length = dim(tri.def)[1])
fclass[tri.def[, "on.ch"] == 0 & tri.def[, "attached"] ==
0 & alpha > tri.def[, "rhoT"] & alpha < tri.def[,
"muT"]] <- 3
fclass[tri.def[, "on.ch"] == 0 & alpha > tri.def[,
"muT"] & alpha < tri.def[, "MuT"]] <- 2
fclass[tri.def[, "on.ch"] == 0 & alpha > tri.def[,
"MuT"]] <- 1
fclass[tri.def[, "on.ch"] == 1 & tri.def[, "attached"] ==
0 & alpha > tri.def[, "rhoT"] & alpha < tri.def[,
"muT"]] <- 3
fclass[tri.def[, "on.ch"] == 1 & alpha > tri.def[,
"muT"]] <- 2
tri.def <- cbind(tri.def, fclass)
colnames(tri.def)[length(colnames(tri.def))] <- paste("fc:",
alpha, sep = "")
fclass <- numeric(length = dim(ed.def)[1])
fclass[ed.def[, "on.ch"] == 0 & ed.def[, "attached"] ==
0 & alpha > ed.def[, "rhoT"] & alpha < ed.def[,
"muT"]] <- 3
fclass[ed.def[, "on.ch"] == 0 & alpha > ed.def[,
"muT"] & alpha < ed.def[, "MuT"]] <- 2
fclass[ed.def[, "on.ch"] == 0 & alpha > ed.def[,
"MuT"]] <- 1
fclass[ed.def[, "on.ch"] == 1 & ed.def[, "attached"] ==
0 & alpha > ed.def[, "rhoT"] & alpha < ed.def[,
"muT"]] <- 3
fclass[ed.def[, "on.ch"] == 1 & alpha > ed.def[,
"muT"]] <- 2
ed.def <- cbind(ed.def, fclass)
colnames(ed.def)[length(colnames(ed.def))] <- paste("fc:",
alpha, sep = "")
fclass <- numeric(length = dim(vt.def)[1])
fclass[alpha < vt.def[, "muT"]] <- 3
fclass[vt.def[, "on.ch"] == 0 & alpha > vt.def[,
"muT"] & alpha < vt.def[, "MuT"]] <- 2
fclass[vt.def[, "on.ch"] == 0 & alpha > vt.def[,
"MuT"]] <- 1
fclass[vt.def[, "on.ch"] == 1 & alpha > vt.def[,
"muT"]] <- 2
vt.def <- cbind(vt.def, fclass)
colnames(vt.def)[length(colnames(vt.def))] <- paste("fc:",
alpha, sep = "")
}
}
if (inh) {
alphaux <- unique(c(alphaold, alphaux))
}
if (flag2 == 1) {
ashape3d.obj <- list(tetra = tc.def, triang = tri.def,
edge = ed.def, vertex = vt.def, x = xorig, alpha = alphaux,
xpert = x)
}
else {
ashape3d.obj <- list(tetra = tc.def, triang = tri.def,
edge = ed.def, vertex = vt.def, x = x, alpha = alphaux)
}
class(ashape3d.obj) <- "ashape3d"
invisible(ashape3d.obj)
}
color.read.ply = function (file, ShowSpecimen = TRUE, addNormals = TRUE)
{
require('geomorph')
plyfile <- scan(file = file, what = "char", sep = "\n", strip.white = TRUE,
quiet = TRUE)
is.ply <- grep("ply", plyfile)
if ((length(is.ply) == 0))
stop("File is not a PLY file")
format <- unlist(strsplit(grep(c("format "), plyfile, value = TRUE),
" "))
if (format[2] != "ascii")
stop("PLY file is not ASCII format: ", "format = ", format[2:length(format)])
poly <- NULL
material <- NULL
xline <- unlist(strsplit(grep(c("vertex "), plyfile, value = TRUE),
" "))
npoints <- as.numeric(xline[grep(c("vertex"), xline) + 1])
yline <- unlist(strsplit(grep(c("element face"), plyfile,
value = TRUE), " "))
npoly <- as.numeric(yline[grep(c("face"), yline) + 1])
headerend <- grep(c("end_header"), plyfile)
ncolpts <- (length(grep(c("property"), plyfile)) - 1)
cols <- grep(c("property"), plyfile, value = TRUE)
x <- grep(c(" x"), cols)
y <- grep(c(" y"), cols)
z <- grep(c(" z"), cols)
points <- as.matrix(as.numeric(unlist(strsplit(plyfile[(headerend +
1):(headerend + npoints)], " "))))
dim(points) <- c(ncolpts, npoints)
xpts <- points[x, ]
ypts <- points[y, ]
zpts <- points[z, ]
vertices <- rbind(xpts, ypts, zpts, 1)
if (yline[3] == 0)
print("Object has zero faces")
if (yline[3] != 0) {
poly <- as.matrix(as.numeric(unlist(strsplit(plyfile[(headerend +
npoints + 1):(headerend + npoints + npoly)], " "))))
dim(poly) <- c((poly[1] + 1), npoly)
poly <- poly[-1, ]
poly = poly + 1
}
colinfo <- grep("property uchar red", plyfile)
if (length(colinfo) != 0) {
color <- rgb(points[4, ], points[5, ], points[6, ], maxColorValue = 255)
material$color <- color
}
mesh <- list(vb = vertices, it = poly, primitivetype = "triangle", material=material)
class(mesh) <- c("mesh3d", "shape3d")
if (addNormals == TRUE) {
mesh <- addNormals(mesh)
}
if (ShowSpecimen == TRUE) {
clear3d()
if (length(poly) == 0) {
dot3d(mesh)
}
if (length(material) != 0) {
shade3d(mesh)
}
shade3d(mesh, color = "gray")
}
return(mesh)
}
|
a6951fb8d5eb4c9a29bbb9f1e9b519a5d090ab19
|
ff4335cd97bd4bfda812240029024f0c0f856c3a
|
/R/mod_formcomplete.R
|
f2e385c5aa646b84c6d98c70464f3ba3fc134619
|
[
"MIT"
] |
permissive
|
SwissClinicalTrialOrganisation/secuTrialRshiny
|
82cfe99c58c3fa81d4b8d6276dc6293c3a637185
|
ef622ac456fa894593013dc514ea4ebd1dfe5168
|
refs/heads/master
| 2021-02-18T09:36:20.716973
| 2020-04-01T08:46:13
| 2020-04-01T08:46:13
| 245,182,231
| 1
| 2
|
MIT
| 2020-04-27T12:15:51
| 2020-03-05T14:19:36
|
R
|
UTF-8
|
R
| false
| false
| 3,420
|
r
|
mod_formcomplete.R
|
#' Shiny module UI function for form completeness monitoring
#'
#' This function represents a shiny dashboard UI module that allows users to
#' view a form completeness table.
#'
#' @param id string containing a namespace identifier
#' @param label string to be used as sidebar tab label
#' @return shiny.tag list object containing the tab item content
#' @seealso \code{\link{mod_formcomplete_srv}}
#' @import shiny
#' @import shinydashboard
#' @importFrom shinyWidgets materialSwitch
#' @export
#'
mod_formcomplete_UI <- function(id, label) {
ns <- NS(id)
tabItem(tabName = label,
fluidRow(
h2("Form completeness"),
br()
),
fluidRow(
#checkboxInput(inputId = ns("percent"), label = "Percent", value = TRUE),
materialSwitch(inputId = ns("percent"), label = "Percent switch",
value = TRUE, status = "danger", right = TRUE),
#checkboxInput(inputId = ns("counts"), label = "Counts", value = TRUE),
materialSwitch(inputId = ns("counts"), label = "Counts",
value = TRUE, status = "danger", right = TRUE),
#checkboxInput(inputId = "rmat", label = "Remove audit trail (at) forms"),
materialSwitch(inputId = ns("rmat"), label = "Remove audit trail (at) forms",
value = TRUE, status = "danger", right = TRUE)
),
fluidRow(
box(
tableOutput(ns("form_completeness_perc")),
width = 300
),
box(
tableOutput(ns("form_completeness_count")),
width = 250
)
),
fluidRow(
br(), br(),
com_footer_UI(ns("file_info"))
)
)
}
#' Shiny module server function for form completeness monitoring
#'
#' This function represents a shiny dashboard server module that allows users to
#' view a form completeness table.
#'
#' @param input session's input object
#' @param output session's output object
#' @param session session object environment
#' @param sT_export secuTrialdata object generated e.g. with secuTrialR::read_secuTrial()
#' @param vals_upload reactivevalues list containing the output of \code{\link{mod_upload_srv}}
#' @seealso \code{\link{mod_formcomplete_UI}}
#' @import shiny
#' @importFrom secuTrialR form_status_summary
#' @export
#'
mod_formcomplete_srv <- function(input, output, session, sT_export, vals_upload) {
output$form_completeness_count <- renderTable({
if (input$counts) {
table <- form_status_summary(sT_export())
names <- names(table)
names_count <- names[! grepl(names, pattern = ".percent")]
if (input$rmat) {
table <- table[which(! grepl(table$form_name, pattern = "^at")), ]
table[, names_count]
} else {
table[, names_count]
}
}
})
output$form_completeness_perc <- renderTable({
if (input$percent) {
table <- form_status_summary(sT_export())
names <- names(table)
names_perc <- names[grepl(names, pattern = ".percent")]
names_perc <- c("form_name", names_perc)
if (input$rmat) {
table <- table[which(! grepl(table$form_name, pattern = "^at")), ]
table[, names_perc]
} else {
table[, names_perc]
}
}
})
output$file_info <- renderText({
vals_upload$file_info
})
}
|
0d15df7abdb5168dbf04c5667288b2a83b5243a9
|
492f49a78bea9ab16fc99d159653722113afa125
|
/man/headers_flextable_at_bkm.Rd
|
8f24873735b3afe218222f08f89bf89ab0ba9c5e
|
[] |
no_license
|
davidgohel/flextable
|
48c34514420e435ca70f65354e94aa69786777bc
|
fc62aaf29c01bbac26fe34ef85240afe4eb201ab
|
refs/heads/master
| 2023-08-23T06:49:13.945566
| 2023-08-20T22:53:39
| 2023-08-20T22:53:39
| 62,127,938
| 502
| 83
| null | 2023-08-20T19:03:11
| 2016-06-28T09:25:11
|
R
|
UTF-8
|
R
| false
| true
| 755
|
rd
|
headers_flextable_at_bkm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/body_add_flextable.R
\name{headers_flextable_at_bkm}
\alias{headers_flextable_at_bkm}
\title{Add flextable at a bookmark location in document's header}
\usage{
headers_flextable_at_bkm(x, bookmark, value)
}
\arguments{
\item{x}{an rdocx object}
\item{bookmark}{bookmark id}
\item{value}{a flextable object}
}
\description{
replace in the header of a document a paragraph containing a bookmark by a flextable.
A bookmark will be considered as valid if enclosing words
within a paragraph; i.e., a bookmark along two or more paragraphs is invalid,
a bookmark set on a whole paragraph is also invalid, but bookmarking few words
inside a paragraph is valid.
}
\keyword{internal}
|
4b2983fb2035a33f66460da68c32f3abb3047ef9
|
1390e0a0714de7dfc5314dc10afdbac1549e61c3
|
/time_series_forecasting_correlate_analysis.R
|
b6626a7196d41e9bad723fadcb3f07c2c2d693c5
|
[
"MIT"
] |
permissive
|
hanhanwu/Hanhan_Data_Science_Practice
|
1a97efa544aefca1335a1fd7b44f73636d2ddb8e
|
5de73e6df2dcfc623cf06601e6e3ada5a4aaac34
|
refs/heads/master
| 2023-06-22T06:46:58.022975
| 2023-06-14T23:26:24
| 2023-06-14T23:26:24
| 56,539,777
| 25
| 18
|
MIT
| 2022-04-03T22:07:09
| 2016-04-18T20:28:02
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 3,781
|
r
|
time_series_forecasting_correlate_analysis.R
|
# WITH TIME SERIES OBJECT & CORRELATE ANALYSIS
cadairydata <- maml.mapInputPort(1)
# Create a new column as a POSIXct object
Sys.setenv(TZ = "PST8PDT")
cadairydata$Time <- as.POSIXct(strptime(paste(as.character(cadairydata$Year), "-", as.character(cadairydata$Month.Number), "-01 00:00:00", sep = ""), "%Y-%m-%d %H:%M:%S"))
# correlate analysis
ts.detrend <- function(ts, Time, min.length = 3){
## Function to detrend and standardize a time series.
## Define some messages if they are NULL.
messages <- c('ERROR: ts.detrend requires arguments ts and Time to have the same length',
'ERROR: ts.detrend requires argument ts to be of type numeric',
paste('WARNING: ts.detrend has encountered a time series with length less than', as.character(min.length)),
'ERROR: ts.detrend has encountered a Time argument not of class POSIXct',
'ERROR: Detrend regression has failed in ts.detrend',
'ERROR: Exception occurred in ts.detrend while standardizing time series in function ts.detrend'
)
# Create a vector of zeros to return as a default in some cases.
zerovec <- rep(length(ts), 0.0)
# The input arguments are not of the same length, return ts and quit.
if(length(Time) != length(ts)) {warning(messages[1]); return(ts)}
# If the ts is not numeric, just return a zero vector and quit.
if(!is.numeric(ts)) {warning(messages[2]); return(zerovec)}
# If the ts is too short, just return it and quit.
if((ts.length <- length(ts)) < min.length) {warning(messages[3]); return(ts)}
## Check that the Time variable is of class POSIXct.
if(class(cadairydata$Time)[[1]] != "POSIXct") {warning(messages[4]); return(ts)}
## Detrent the time series using a linear model.
ts.frame <- data.frame(ts = ts, Time = Time)
tryCatch({ts <- ts - fitted(lm(ts ~ Time, data = ts.frame))},
error = function(e){warning(messages[5]); zerovec})
tryCatch( {stdev <- sqrt(sum((ts - mean(ts))^2))/(ts.length - 1)
ts <- ts/stdev},
error = function(e){warning(messages[6]); zerovec})
ts
}
# Apply the detrend.ts function to the variables of interest.
df.detrend <- data.frame(lapply(cadairydata[, 4:7], ts.detrend, cadairydata, cadairydata$Time))
# generate the pairwise scatterplot matrix
pairs(~ Cotagecheese.Prod + Icecream.Prod + Milk.Prod + N.CA.Fat.Price, data = df.detrend, main = "Pairwise Scatterplots of detrended standardized time series")
# A function to compute pairwise correlations from a
## list of time series value vectors.
pair.cor <- function(pair.ind, ts.list, lag.max = 1, plot = FALSE){
ccf(ts.list[[pair.ind[1]]], ts.list[[pair.ind[2]]], lag.max = lag.max, plot = plot)
}
## A list of the pairwaise indices.
corpairs <- list(c(1,2), c(1,3), c(1,4), c(2,3), c(2,4), c(3,4))
## Compute the list of ccf objects.
cadairycorrelations <- lapply(corpairs, pair.cor, df.detrend)
## None of these correlation values is large enough to be significant. We can therefore conclude that we can model each variable independently.
cadairycorrelations
df.correlations <- data.frame(do.call(rbind, lapply(cadairycorrelations, '[[', 1)))
c.names <- c("correlation pair", "-1 lag", "0 lag", "+1 lag")
r.names <- c("Corr Cot Cheese - Ice Cream",
"Corr Cot Cheese - Milk Prod",
"Corr Cot Cheese - Fat Price",
"Corr Ice Cream - Mik Prod",
"Corr Ice Cream - Fat Price",
"Corr Milk Prod - Fat Price")
## Build a dataframe with the row names column and the
## correlation data frame and assign the column names
outframe <- cbind(r.names, df.correlations)
colnames(outframe) <- c.names
outframe
maml.mapOutputPort('outframe')
|
726e7d4fd34a0ed2c7a8f8e74d18d9cf5e7b9e25
|
0f62a3d2021951b862c74e4cf01c292fce60a9bf
|
/SVM.R
|
fb2ebaf9340edf165564a8ae84bb4d7c106f80f8
|
[] |
no_license
|
saniya-k/bank-marketing
|
9e7c3437c48121788b9fcc0273f79c5628425c46
|
0a3224ee603d458e317cff26c5be76fb5aa19fa7
|
refs/heads/master
| 2023-02-28T12:53:08.416989
| 2021-02-06T22:25:04
| 2021-02-06T22:25:04
| 271,338,985
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,751
|
r
|
SVM.R
|
library(ggplot2)
library(caret)
library(dplyr)
library(DMwR)
library(e1071)
library(factoextra)
library(pROC)
library(kableExtra)
##### Read the pre-processed file #####
new_df= read.csv("data/Imputed_data.csv",header=TRUE)
# Subscribed vs Not Subscribed
barplot(table(new_df$y))
dim(new_df)
str(new_df)
# Remove first column
new_df = new_df[,-1]
# irrelevant & redundant vars can be handled by SVM , ignoring feature selection
new_df$y<-as.factor(new_df$y)
table(new_df$y)
#check missing rows
sum(!complete.cases(new_df))
##### Create dummy vars #####
df1 <- dummyVars(~job+marital+education+housing+loan+contact+month+day_of_week+pdays+poutcome+age_group, data=new_df, sep="_",fullRank=TRUE)
df2 <- predict(df1,new_df)
# add back to original dataframe
new_df2 = data.frame(new_df[,-c(2:9,12,14,21)],df2)
#check structure, only y var is factor type
str(new_df2)
# testing sets using a 80/20 split rule
set.seed(110)
samp <- createDataPartition(new_df2$y, p=.80, list=FALSE)
train = new_df2[samp, ]
test = new_df2[-samp, ]
# distribution of data before balancing
barplot((table(train$y)))
##### Rebalancing data via SMOTE #####
# SMOTE
set.seed(110)
new_train<-SMOTE(y~.,train,perc.over = 100, perc.under=200)
barplot((table(new_train$y)))
##### Visualise data with PCA to figure out hyperplane, #####
bank.pca<- prcomp(new_df2[,-10],center=TRUE,scale=TRUE)
fviz_pca_ind(bank.pca, geom.ind = "point", pointshape = 21,
pointsize = 2,
fill.ind = new_df2$y, col.ind = "black",
repel = TRUE)
# No clear separation, however, on the left 1's (those who respond positively to the marketing) are more.
##### SVM Kernel selection #####
# linear kernel
set.seed(110)
svm_linear_mod <- svm(y~.,
data=new_train,
method="C-classification",
kernel="linear",
scale=TRUE)
# training performance
svm_linear<-predict(svm_linear_mod,new_train[,-10],type="class")
svm_linear_acc <- confusionMatrix(svm_linear,new_train$y,mode="prec_recall",positive = "1")
# radial kernel
set.seed(110)
svm_radial_mod <- svm(y~.,
data=new_train,
method="C-classification",
kernel="radial",
scale=TRUE)
# training performance
svm_radial<-predict(svm_radial_mod,new_train[,-10],type="class")
svm_radial_acc <- confusionMatrix(svm_radial,new_train$y,mode="prec_recall",positive = "1")
# polynomial kernel
set.seed(110)
svm_poly_mod <- svm(y~.,
data=new_train,
method="C-classification",
kernel="polynomial",
scale=TRUE)
# training performance
svm_polynomial<-predict(svm_poly_mod,new_train[,-10],type="class")
svm_polynomial_acc <- confusionMatrix(svm_polynomial,new_train$y,mode="prec_recall",positive = "1")
# sigmoid kernel
set.seed(110)
svm_sigmoid_mod <- svm(y~.,
data=new_train,
method="C-classification",
kernel="sigmoid",
scale=TRUE)
# training performance
svm_sigmoid<-predict(svm_sigmoid_mod,new_train[,-10],type="class")
svm_sigmoid_acc <- confusionMatrix(svm_sigmoid,new_train$y,mode="prec_recall",positive = "1")
# Check accuracy for different kernels
dtable<-rbind(Linear_SMOTE=svm_linear_acc$overall[1],
Radial_SMOTE=svm_radial_acc$overall[1],
Polynomial_SMOTE=svm_polynomial_acc$overall[1],
Sigmoid_SMOTE=svm_sigmoid_acc$overall[1])
kable(head(dtable), digits = 2, format = "html",caption = "Accuracy on Training Data", row.names = TRUE) %>%
kable_styling(bootstrap_options = c("striped", "hover"),
full_width = T,
font_size = 12,
position = "left")
# As Polynomial Kernel gives best results, we do Hyper parameter tuning on it for rest of the parameters
##### Hyperparamter Tuning #####
set.seed(110)
tPoly=tune(svm, y~., data=new_train,
tunecontrol=tune.control(sampling = "cross"), #default to 10 k cross validation
kernel="polynomial", scale = TRUE,
ranges = list(degree = 3:5, cost = 2^(-1:3)))
summary(tPoly)
tPoly$best.parameters
# Evaluate best model on training set
inpred <- predict(tPoly$best.model, new_train[, -10],type="class")
confusionMatrix(inpred, new_train$y, mode="prec_recall",positive="1")
##### Apply Best Model to Test set #####
outpred <- predict(tPoly$best.model, test[,-10],type="class")
d1<-confusionMatrix(outpred, test$y, mode="prec_recall",positive="1")
# As above model is overfitting, testing original model (Polynomial kernel with degree 3)
outpred_poly <- predict(svm_poly_mod, test[,-10],type="class")
d2<-confusionMatrix(outpred_poly, test$y, mode="everything",positive="1")
summary(svm_poly_mod)
tbd<-cbind(d1$byClass,d2$byClass)
kable(head(tbd), digits = 2, format = "html",caption = "Accuracy on Test Data", row.names = TRUE) %>%
kable_styling(bootstrap_options = c("striped", "hover"),
full_width = T,
font_size = 12,
position = "left")
##### ROC Comparisons #####
pROC_obj <- roc(test$y,factor(outpred,
ordered = TRUE),
smoothed = TRUE,
# arguments for ci
ci=TRUE, ci.alpha=0.9, stratified=FALSE,
# arguments for plot
plot=TRUE, auc.polygon=TRUE, max.auc.polygon=TRUE, grid=TRUE,
print.auc=TRUE, show.thres=TRUE)
pROC_obj2 <- roc(test$y,factor(outpred_poly,
ordered = TRUE),
smoothed = TRUE,
# arguments for ci
ci=TRUE, ci.alpha=0.9, stratified=FALSE,
# arguments for plot
plot=TRUE, auc.polygon=TRUE, max.auc.polygon=TRUE, grid=TRUE,
print.auc=TRUE, show.thres=TRUE)
# Plot ROC of both fitted curves
plot(pROC_obj, type = "n") # but don't actually plot the curve
# Add the line
lines(pROC_obj, type="b", pch=21, col="blue", bg="grey")
# Add the line of an other ROC curve
lines(pROC_obj2, type="o", pch=19, col="red")
outpred_complete <- predict(svm_poly_mod, new_df2[,-10],type="class")
confusionMatrix(outpred_complete, new_df2$y, mode="everything",positive="1")
pROC_obj_complete <- roc(new_df2$y,factor(outpred_complete,
ordered = TRUE),
smoothed = TRUE,
# arguments for ci
ci=TRUE, ci.alpha=0.9, stratified=FALSE,
# arguments for plot
plot=TRUE, auc.polygon=TRUE, max.auc.polygon=TRUE, grid=TRUE,
print.auc=TRUE, show.thres=TRUE)
|
a2ab25905c2b1b81d994173a94945b2b9a5eef28
|
2fa33aeef712fa0a1b8043b40261d218a37cafa2
|
/man/gibbs2.Rd
|
080eb3ede91ff8ed01d53ee645004a8420883aad
|
[] |
no_license
|
cran/bayess
|
778e3cd961acecec0ccbf0de66048543af82c98c
|
30208f8c4b61bc73e5885875b8134f05a963719c
|
refs/heads/master
| 2022-09-03T00:53:47.483683
| 2022-08-11T09:30:08
| 2022-08-11T09:30:08
| 17,694,647
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,053
|
rd
|
gibbs2.Rd
|
\name{gibbscap1}
\alias{gibbscap1}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Gibbs sampler for the two-stage open population capture-recapture
model%% ~~function to do ... ~~
}
\description{
This function implements a regular Gibbs sampler associated with Chapter 5 for a two-stage
capture recapture model with open populations, accounting for the possibility
that some individuals vanish between two successive capture experiments.
}
\usage{
gibbscap1(nsimu, n1, c2, c3, N0 = n1/runif(1), r10, r20)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{nsimu}{
number of simulated values in the sample%% ~~Describe \code{nsimu} here~~
}
\item{n1}{
first capture population size%% ~~Describe \code{n1} here~~
}
\item{c2}{
number of individuals recaptured during the second experiment%% ~~Describe \code{c2} here~~
}
\item{c3}{
number of individuals recaptured during the third experiment%% ~~Describe \code{c2} here~~
}
\item{N0}{
starting value for the population size%% ~~Describe \code{N0} here~~
}
\item{r10}{
starting value for the number of individuals who vanished between the first and second experiments%% ~~Describe \code{r10} here~~
}
\item{r20}{
starting value for the number of individuals who vanished between the second and third experiments%% ~~Describe \code{r20} here~~
}
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
\item{N }{Gibbs sample of the simulated population size}
\item{p }{Gibbs sample of the probability of capture}
\item{q }{Gibbs sample of the probability of leaving the population}
\item{r1 }{Gibbs sample of the number of individuals who vanished between the first and second experiments}
\item{r2 }{Gibbs sample of the number of individuals who vanished between the second and third experiments}
%% ...
}
\examples{
res=gibbscap1(100,32,21,15,200,10,5)
plot(res$p,type="l",col="steelblue3",xlab="iterations",ylab="p")
}
\keyword{Gibbs}
\keyword{capture-recapture}
\keyword{open population}
|
90afe885c2683379670c1eb3ea2da3107b502a08
|
c6ece4fd16b7f8d5811e161052857a0f3e26f914
|
/global.R
|
adc65316b809e87e21f69ff75e485062cb9794a9
|
[
"MIT"
] |
permissive
|
ToshihiroIguchi/ezdis
|
4caeaa2bc12f15b4ab11575a66fd337d5a409230
|
a08ffe2a74da8ea4ec8be7f33334073c86362ac9
|
refs/heads/master
| 2023-05-06T04:55:30.346543
| 2021-06-01T14:00:14
| 2021-06-01T14:00:14
| 270,997,926
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 48,363
|
r
|
global.R
|
#ライブラリ読み込み
library(ggplot2)
library(tibble)
library(readr)
library(readxl)
library(R.utils)
library(fitdistrplus)
library(ismev)
library(FAdist) #actuar,evd, EnvStarsより先に読み込ませる
library(extraDistr)
library(evd) #evdはactuarより先に読み込ませて、evd::dgumbelなどをマスクする
library(actuar)
library(EnvStats)
library(mixtools)
library(RcppFaddeeva)
library(goftest) #CVMのomega2からp-valueを計算
library(rmutil)
library(PearsonDS)
library(gsl)
library(ExtDist)
library(lmomco)
library(hydroApps)
library(normalp)
library(triangle)
#指定したディレクトリのファイルを一括で読み込む
source.dir <- function(dir){
#指定されたディレクトリのファイル一覧を取得
files.vec <- list.files(dir)
for(filename in files.vec){
source(paste0(dir, "/", filename))
}
}
#統計分布のファイルをdistディレクトリから一括で呼び出し
source.dir("dist")
#ベクトルに強制変換
as.vec <- function(x){
as.matrix(x) %>% as.vector()
}
#NULLとNAであればNULLを返す
is.null.na.null <- function(x){
if(is.null(x)){return(NULL)}
if(is.na(x)){return(NULL)}
if(x == "NA"){return(NULL)}
return(TRUE)
}
#ヒストグラムを描く
gg.hist <- function(vec, bw = NULL){
#https://qiita.com/hoxo_b/items/13d034ab0ed60b4dca88
#エラーチェック
if(is.null(vec)){return(NULL)}
#エラーチェック2
if(!is.vector(vec)){return(NULL)}
#欠損値を除く
vec <- na.omit(vec)
#確率密度計算
dens <- density(vec)
#バンド幅が指定されていないければ設定
if(is.null(bw)){
bw <- diff(range(vec))/20
}
#ggplot本体
ret <- ggplot(data.frame(x = vec), aes(x = x)) +
geom_histogram(binwidth = bw, fill = "white", color = "black") +
geom_density(eval(bquote(aes(y=..count..*.(bw)))), fill='black', alpha=0.3) +
xlim(range(dens$x))
#戻り値
return(ret)
}
#整数か判断(型チェックではない)
is_integer <- function(vec){
#そもそも数値でなかったらFALSE
if(!is.numeric(vec)){return(FALSE)}
#誤差二乗和
e2 <- sum((vec - round(vec, 0))^2)
#戻り値
if(e2 == 0){return(TRUE)}else{return(FALSE)}
}
#NULLをNAに変換
null.na <- function(x){
if(is.null(x)){return(NA)}else{return(x)}
}
#KSのD値からp値を計算
kspval <- function(n, D, k.max = 100){
#エラーチェック
if(!is.numeric(n) || !is.numeric(D)){return(NA)}
#https://github.com/SurajGupta/r-source/blob/master/src/library/stats/R/ks.test.R
pkstwo.fn <- function(x, k.max = 10000){
ret <- 1
for(i in c(1:k.max)){
ret <- ret + 2*((-1)^i)*exp(-2*(i^2)*(x^2))
}
#戻り値
return(ret)
}
#p値の計算
ret <- 1 - pkstwo.fn(sqrt(n) * D, k.max = k.max)
#戻り値
return(ret)
}
#CVMのomega2からp-valueを計算
cvmpval <- function(n, omega2){
#https://github.com/cran/goftest/blob/master/R/cvmtest.R
#エラーチェック
if(!is.numeric(n) || !is.numeric(omega2)){return(NA)}
#cvm test
ret <- try(pCvM(q = omega2, n = n, lower.tail = FALSE), silent = FALSE)
#エラーだったらNAを返す
if(class(ret) == "try-error"){return(NA)}
#戻り値
return(ret)
}
#ADのAnからp-valueを計算
adpval <- function(n, An){
#エラーチェック
if(!is.numeric(n) || !is.numeric(An)){return(NA)}
#cvm test
ret <- try(pAD(q = An, n = n, lower.tail = FALSE), silent = FALSE)
#エラーだったらNAを返す
if(class(ret) == "try-error"){return(NA)}
#戻り値
return(ret)
}
#分布関数にフィッティング
fit.dist <- function(data, distr = "norm", method = "mle", timeout = 10){
#エラーチェック
if(is.null(data)){return(NULL)}
#尤度計算でおかしいと判断する対数尤度の値
loglik.th <- 1e30
#初期時間
t0 <- Sys.time()
#初期値
fitdist.start <- NULL
fix.arg <- NULL
fitdist.lower <- -Inf
fitdist.upper <- Inf
optim.method <- "Nelder-Mead"
data.length.th <- 10000
#エラーの場合の戻り値を定義
error.ret <- function(st = Sys.time()){
ret <- list()
class(ret) <- "fitdist.error"
ret$CalculationTime <- st - t0
return(ret)
}
#初期値推定用のデータサンプリング
if(length(data) > data.length.th){
set.seed(108)
sample.data <- sample(data, size = data.length.th)
}else{
sample.data <- data
}
#各分布関数の初期値を指定
#対数正規分布の場合の初期値
if(distr == "lnorm"){
#最小値がゼロ以下で対数になりえない場合は空の結果を返す
if(min(data) <= 0){
return(error.ret(Sys.time()))
}
#fitdist.start <- list(meanlog = mean(log(data)), sdlog = sd(log(data)))
#fitdist.lower <- c(-Inf, 0)
}
#ガンベル分布の場合の初期値
if(distr == "Gumbel"){
gum.res <- gum.fit(data)
fitdist.start <- list(alpha = gum.res$mle[1], scale = gum.res$mle[2])
fitdist.lower <- c(-Inf, 0)
}
#逆ワイブル分布の場合の初期値
if(distr == "invweibull"){
fitdist.start <- list(shape = 1, scale = 1)
fitdist.lower <- c(0, 0)
}
#ワイブル分布の場合の初期値
if(distr == "weibull"){
#最小値がゼロ以下だとエラー
if(min(data) <= 0){
return(error.ret(Sys.time()))
}
fitdist.lower <- c(0, 0)
}
#3母数ワイブル分布の初期値
if(distr == "weibull3"){
fitdist.start <- list(shape = 1, scale = 1, thres = min(data) - 1)
fitdist.lower <- c(0, 0, -Inf)
}
#逆ワイブル分布の場合の初期値
if(distr == "rweibull"){
fitdist.start <- list(loc = max(data) + 1, scale = 1, shape = 1)
fitdist.lower <- c(-Inf, 0, 0)
}
#多重モードワイブル分布の初期値
if(distr == "multiweibull"){
#最小値がゼロ以下だとエラー
if(min(data) <= 0){
return(error.ret(Sys.time()))
}
#小さい値と大きな値にわける
data.1 <- sort(data)[c(1:round(length(data)/2, 0))]
data.2 <- sort(data)[c(round(length(data)/2, 0):length(data))]
#小さい値と大きな値でWeibull分布にフィッティングしてパラメータ推定
wp1 <- fitdistrplus::fitdist(data.1, "weibull")$estimate
wp2 <- fitdistrplus::fitdist(data.2, "weibull")$estimate
#推定したパラメータで二つのワイブルの初期値をとする
fitdist.start <- list(shape1 = data.1[1], scale1 = data.1[2],
shape2 = data.2[1], scale2 = data.2[2])
#上限と下限設定
fitdist.lower <- c(0, 0, 0, 0)
fitdist.upper <- c(Inf, Inf, Inf, Inf)
}
#レイリー分布の初期値
if(distr == "rayleigh"){
#最小値がゼロ以下だとエラー
if(min(data) <= 0){
return(error.ret(Sys.time()))
}
fitdist.start <- list(sigma = 1)
fitdist.lower <- c(0)
}
#一般化極値分布の場合の初期値
if(distr == "gev"){
#evdパッケージのgev関数
gev.res <- try(gev.fit(data, show = FALSE), silent = FALSE)
#結果がエラーなら空の結果を返す
if(class(gev.res)[1] == "try-error"){
return(error.ret(Sys.time()))
}
fitdist.start <- list(loc = gev.res$mle[1], scale = gev.res$mle[2], shape = gev.res$mle[3])
fitdist.lower <- c(-Inf, 0, -Inf)
}
#一般化パレート分布の場合の初期値
if(distr == "GPD"){
gen.pareto.res <- try(gpd.fit(data, show = FALSE, threshold = 2), silent = TRUE)
#結果がエラーなら空の結果を返す
if(class(gen.pareto.res)[1] == "try-error"){
return(error.ret(Sys.time()))
}
#対数尤度の計算
dgpd.loglikelihood <- function(x, loc, scale, shape){
ret <- sum(log(dGPD(x, loc, scale, shape)))
if(is.nan(ret)){ret <- -Inf}
return(ret)
}
#パラメータから対数尤度を求める関数
dgpd.opt <- function(x){
dgpd.loglikelihood(x = data, x[1], x[2], x[3])
}
#対数尤度を最大化
gpd.opt <- optim(par = c(min(0, data) - 0.1 , 1, 2),
fn = dgpd.opt, control = list(fnscale = -1))
#初期値を定義
fitdist.start <- list(loc = gpd.opt$par[1],
scale = gpd.opt$par[2],
shape = gpd.opt$par[3])
#最小値と最大値
fitdist.lower <- c(-Inf, 0, -Inf)
fitdist.upper <- c(min(data), Inf, Inf)
}
#指数分布の場合の初期値
if(distr == "exp"){
#最小値がゼロより小さい場合は空の結果を返す
if(min(data) < 0){
return(error.ret(Sys.time()))
}
fitdist.lower <- c(0)
}
#exponential power distribution
if(distr == "normp2"){
fitdist.start <- list(mu = 0, sigmap = 1, shape = 2)
fitdist.lower <- c(-Inf, 1e-10, 1)
}
#Wald分布
if(distr == "Wald"){
#最小値がゼロより小さい場合は空の結果を返す
if(min(data) < 0){
return(error.ret(Sys.time()))
}
fitdist.start <- list(mu = 1, lambda = 1)
fitdist.lower <- c(1e-10, 1e-10)
}
#シングルパラメータパレート分布の場合の初期値
if(distr == "pareto1"){
#最小値が0以下だとエラー
if(min(data) <= 0){
return(error.ret(Sys.time()))
}
#対数尤度の計算
dpareto1.ll <- function(x, shape, min){
if(min(shape, min) <= 0){return(-Inf)}
ret <- sum(log(dpareto1(x = x, shape = shape, min = min)))
if(is.nan(ret)){ret <- -Inf}
return(ret)
}
#パラメータから対数尤度を求める関数
dpareto1.opt <- function(x){
ret <- dpareto1.ll(x = data, shape = x[1], min = x[2])
return(ret)
}
#対数尤度を最大化
gpd.opt <- optim(par = c(1, max(min(data), 1e-10)),
fn = dpareto1.opt, control = list(fnscale = -1))
fitdist.start <- list(shape = gpd.opt$par[1], min = gpd.opt$par[2])
fitdist.lower <- c(0, -Inf)
fitdist.upper <- c(Inf, Inf)
}
#パレート分布の場合の初期値(actuarを想定)
if(distr == "pareto_ac"){
#最小値が0以下だとエラー
if(min(data) <= 0){
return(error.ret(Sys.time()))
}
#対数尤度の計算
dpareto.ll <- function(x, shape, scale){
if(min(shape, scale) <= 0){return(-Inf)}
ret <- sum(log(dpareto_ac(x = x, shape = shape, scale = scale)))
if(is.nan(ret)){ret <- -Inf}
return(ret)
}
#パラメータから対数尤度を求める関数
dpareto.opt <- function(x){
ret <- dpareto.ll(x = data, shape = x[1], scale = x[2])
return(ret)
}
#対数尤度を最大化
pareto.opt <- optim(par = c(1, 1),
fn = dpareto.opt, control = list(fnscale = -1))
fitdist.start <- list(shape = pareto.opt$par[1], scale = pareto.opt$par[2])
fitdist.lower <- c(1e-10, 1e-10)
fitdist.upper <- c(Inf, Inf)
}
#タイプ2パレート分布の場合の初期値
if(distr == "pareto2"){
#対数尤度の計算
dpareto2.ll <- function(x, min, shape, scale){
ret <- sum(log(dpareto2(x = x, min = min, shape = shape, scale = scale)))
if(is.nan(ret)){ret <- -Inf}
return(ret)
}
#パラメータから対数尤度を求める関数
dpareto2.opt <- function(x){
ret <- dpareto2.ll(x = data, min = x[1], shape = x[2], scale = x[3])
return(ret)
}
#対数尤度を最大化
gpd.opt <- optim(par = c(min(data) - 1, 2, 2),
fn = dpareto2.opt, control = list(fnscale = -1))
fitdist.start <- list(min = gpd.opt$par[1],
shape = gpd.opt$par[2], scale = gpd.opt$par[3])
fitdist.lower <- c(-Inf, 0, 0)
}
#タイプ3パレート分布の場合の初期値
if(distr == "pareto3"){
#対数尤度の計算
dpareto3.ll <- function(x, min, shape, scale){
ret <- sum(log(dpareto3(x = x, min = min, shape = shape, scale = scale)))
if(is.nan(ret)){ret <- -Inf}
return(ret)
}
#パラメータから対数尤度を求める関数
dpareto3.opt <- function(x){
ret <- dpareto3.ll(x = data, min = x[1], shape = x[2], scale = x[3])
return(ret)
}
#対数尤度を最大化
pa3.opt <- optim(par = c(min(data) - 1, 2, 2),
fn = dpareto3.opt, control = list(fnscale = -1))
fitdist.start <- list(min = pa3.opt$par[1],
shape = pa3.opt$par[2], scale = pa3.opt$par[3])
fitdist.lower <- c(-Inf, 0, 0)
}
#タイプ4パレート分布の場合の初期値
if(distr == "pareto4"){
#対数尤度の計算
dpareto4.ll <- function(x, min, shape1, shape2, scale){
ret <- sum(log(dpareto4(x = x, min = min, shape1 = shape1, shape2 = shape2, scale = scale)))
if(is.nan(ret)){ret <- -Inf}
return(ret)
}
#パラメータから対数尤度を求める関数
dpareto4.opt <- function(x){
ret <- dpareto4.ll(x = data, min = x[1], shape1 = x[2], shape2 = x[3], scale = x[4])
return(ret)
}
#対数尤度を最大化
pa4.opt <- optim(par = c(min(data), 1, 1, 1),
fn = dpareto4.opt, control = list(fnscale = -1))
fitdist.start <- list(min = pa4.opt$par[1],
shape1 = pa4.opt$par[2], shape2 = pa4.opt$par[3],
scale = pa4.opt$par[4])
fitdist.lower <- c(-Inf, 0, 0, 0)
}
#Lomax分布の場合の初期値
if(distr == "Lomax"){
#最小値がゼロ未満だとエラー
if(min(data) < 0){
return(error.ret(Sys.time()))
}
#対数尤度の計算
dlomax.ll <- function(x, alpha, lambda){
ret <- sum(log(dLomax(x = x, alpha = abs(alpha), lambda = abs(lambda))))
if(is.nan(ret)){ret <- -Inf}
return(ret)
}
#パラメータから対数尤度を求める関数
dplomax.opt <- function(x){
ret <- dlomax.ll(x = data, alpha = x[1], lambda = x[2])
return(ret)
}
#対数尤度を最大化
lomax.opt <- optim(par = c(1, 1),
fn = dplomax.opt, control = list(fnscale = -1))
fitdist.start <- list(alpha = abs(lomax.opt$par[1]),
lambda = abs(lomax.opt$par[2]))
print(fitdist.start)
fitdist.lower <- c(0, 0)
}
#ピアソン タイプI分布
if(distr == "pearson1"){
#対数尤度の計算
dPearson1.ll <- function(x, a, b, location, scale){
if(min(a, b) <= 0){return(-Inf)}
ret <- sum(log(dPearson6(x = x, a, b, location, scale)))
if(is.nan(ret)){ret <- -Inf}
return(ret)
}
#パラメータから対数尤度を求める関数
dPearson1.opt <- function(x){
ret <- dPearson1.ll(x = data, a = x[1], b = x[2], location = x[3], scale = x[4])
return(ret)
}
#対数尤度を最大化
Pearson1.opt <- optim(par = c(1, 1, min(data) - 3, 1),
fn = dPearson1.opt, control = list(fnscale = -1))
fitdist.start <- list(a = Pearson1.opt$par[1], b = Pearson1.opt$par[2],
location = Pearson1.opt$par[3], scale = Pearson1.opt$par[4])
#fitdist.start <- list(a = 5, b = 1, location = min(data) + 3, scale = 1)
fitdist.lower <- c(0, 0, -Inf, -Inf)
}
#ピアソン タイプII分布
if(distr == "pearson2"){
fitdist.start <- list(a = 5, location = 1, scale = 1)
fitdist.lower <- c(0, -Inf, -Inf)
}
#ピアソン タイプIII分布
if(distr == "pearson3"){
#shape, location, scale
#s<>0, a>0 and (x-lambda)/s>=0., a = shape, lambda = location, s = scale
#対数尤度の計算
dpearson3.ll <- function(x, shape, location, scale){
if(scale == 0){return(-Inf)}
if(shape <= 0){return(-Inf)}
if((min(data) - location)/scale < 0){return(-Inf)}
ret <- sum(log(dpearson3(x = x, shape = shape, location = location, scale = scale)))
if(is.nan(ret)){ret <- -Inf}
return(ret)
}
#パラメータから対数尤度を求める関数
dpearson3.opt <- function(x){
ret <- dpearson3.ll(x = data, shape = x[1], location = x[2], scale = x[3])
return(ret)
}
#対数尤度を最大化
pearson3.opt <- optim(par = c(3, min(data) - 3, 3),
fn = dpearson3.opt, control = list(fnscale = -1))
fitdist.start <- list(
shape = pearson3.opt$par[1],
location = pearson3.opt$par[2],
scale = pearson3.opt$par[3])
#fitdist.start <- list(shape = 1, location = mean(data), scale = 1)
fitdist.lower <- c(1e-10, -Inf, -Inf)
}
#ピアソン タイプIV(4)分布
if(distr == "pearson4"){
fitdist.start <- list(m = 5, nu = 1, location = mean(data), scale = 1)
fitdist.lower <- c(1+1e-10, -Inf, -Inf, -Inf) #m > 1
}
#ピアソン タイプV分布
if(distr == "pearson5"){
fitdist.start <- list(shape = 1, location = mean(data), scale = 1)
fitdist.lower <- c(1e-10, -Inf, -Inf)
}
#ピアソン タイプVI(6)分布
if(distr == "Pearson6"){
#Pearsonが大文字で始まることに注意。actuarパッケージのpearson6と重複するため。
#対数尤度の計算
dPearson6.ll <- function(x, a, b, location, scale){
if(min(a, b) <= 0){return(-Inf)}
ret <- sum(log(dPearson6(x = x, a, b, location, scale)))
if(is.nan(ret)){ret <- -Inf}
return(ret)
}
#パラメータから対数尤度を求める関数
dPearson6.opt <- function(x){
ret <- dPearson6.ll(x = data, a = x[1], b = x[2], location = x[3], scale = x[4])
return(ret)
}
#対数尤度を最大化
Pearson6.opt <- optim(par = c(1, 1, min(data) - 3, 1),
fn = dPearson6.opt, control = list(fnscale = -1))
fitdist.start <- list(a = Pearson6.opt$par[1], b = Pearson6.opt$par[2],
location = Pearson6.opt$par[3], scale = Pearson6.opt$par[4])
fitdist.lower <- c(1e-10, 0.1, -Inf, -Inf)
}
#ピアソン タイプVII分布
if(distr == "pearson7"){
fitdist.start <- list(df = 1, location = mean(data), scale = 1)
fitdist.lower <- c(0.1, -Inf, -Inf)
}
#Burr分布
if(distr == "Burr"){
#最小値がゼロ以下だとエラー
if(min(data) <= 0){
return(error.ret(Sys.time()))
}
#初期値
fitdist.start <- list(k = 1, c = 1)
#最小値(ゼロより少し大きな値)
fitdist.lower <- c(0.01, 0.01)
}
#Extended Burr type XII
if(distr == "BurrXII"){
#最小値がゼロ未満だとエラー
if(min(data) < 0){
return(error.ret(Sys.time()))
}
#L-momentsから初期値を計算
start.burr <- function(data){
#L-momentsを計算
data.lmom <- lmom.ub(data)
#初期値を計算
start.burr <- parBurrXII.approx(
L1 = data.lmom$L1, tau = data.lmom$LCV, tau3 = data.lmom$TAU3)
#返り値
return(start.burr)
}
#初期値計算
start.b <- try(start.burr(data), silent = FALSE)
#エラー処理
if(class(start.b)[1] == "try-error" || start.b %>% na.omit() %>% length() < 3){
fitdist.start <- list(lambda = 1, k = -1, c = 1)
}else{
#初期値ベクトルについている名前を消す
names(start.b) <- NULL
#初期値に代入
fitdist.start <- list(
lambda = start.b[1],
k = start.b[2],
c = start.b[3]
)
}
#最小値と最大値
fitdist.lower <- c(0, -Inf, 0)
fitdist.upper <- c(Inf, 0, Inf)
}
#Johnson SU分布
if(distr == "johnsonSU"){
#パラメータ推定
su.param <- try.null(eJohnsonSU(sample.data))
if(is.null(su.param)){
fitdist.start <- list(gamma = 1,
delta = 1,
xi = 1,
lambda = 1)
}else{
fitdist.start <- list(gamma = su.param$gamma,
delta = su.param$delta,
xi = su.param$xi,
lambda = su.param$lambda
)
}
fitdist.lower <- c(-Inf, 1e-10, -Inf, 1e-10)
}
#Johnson SB分布
if(distr == "johnsonSB"){
#http://www.ntrand.com/jp/johnson-sb-distribution/
#xiとlambdaの値を設定
#パラメータ推定
sb.param <- try.null(eJohnsonSB(data))
if(is.null(sb.param)){
#パラメータ推定に失敗した場合は初期値に全部1をとりあえず入れる
fitdist.start <- list(gamma = 1,
delta = 1,
xi = 1,
lambda = 1)
fitdist.lower <- c(-Inf, 1e-10, -Inf, 1e-10)
}else{
#初期値にそのまま推定値を入れる
fitdist.start <- list(gamma = sb.param$gamma,
delta = sb.param$delta,
xi = sb.param$xi,
lambda = sb.param$lambda
)
#下限と上限を推定値の近傍で設定する。(±0.01)
start.vec <- c(sb.param$gamma, sb.param$delta, sb.param$xi, sb.param$lambda)
fitdist.lower <- start.vec - 0.01
fitdist.upper <- start.vec + 0.01
}
}
#バーンバウム サンダース分布の初期値
if(distr == "fatigue"){
fitdist.start <- list(alpha = 0.5, beta = 1, mu = 0)
fitdist.lower <- c(0, 0, -Inf)
}
#ラプラス分布の初期値
if(distr == "Laplace"){
fitdist.start <- list(mu = 0, sigma = 1)
fitdist.lower <- c(-Inf, 0)
}
#gompertz分布の初期値
if(distr == "gompertz"){
fitdist.start <- list(a = 1, b = 1)
fitdist.lower <- c(1e-10, 1e-10)
}
#Muth分布の初期値
if(distr == "muth"){
#最小値がゼロより小さい場合は空の結果を返す
if(min(data) < 0){
return(error.ret(Sys.time()))
}
fitdist.start <- list(alpha = 0.5)
fitdist.lower <- c(1e-10)
fitdist.upper <- c(1)
}
#ロジスティック分布の初期値
if(distr == "llogis"){
fitdist.lower <- c(0, 0)
}
#双曲線正割分布の初期値
if(distr == "hs"){
fitdist.start <- list(mu = 0, sigma = 1)
fitdist.lower <- c(-Inf, 1e-10)
}
#アーラン分布の初期値
if(distr == "erlang"){
#最小値がゼロより小さい場合は空の結果を返す
if(min(data) < 0){
return(error.ret(Sys.time()))
}
#対数尤度の計算
derlang.ll <- function(x, k, mu){
if(min(k, mu) <= 0){return(-Inf)}
ret <- sum(log(derlang(x = x, k = k, mu = mu)))
if(is.nan(ret)){ret <- -Inf}
return(ret)
}
#パラメータから対数尤度を求める関数
derlang.opt <- function(x){
ret <- derlang.ll(x = data, k = x[1], mu = x[2])
return(ret)
}
#対数尤度を最大化
erlang.opt <- optim(par = c(1, 1),
fn = derlang.opt, control = list(fnscale = -1))
#kの最適値と推測した値
k.int <- round(erlang.opt$par[1])
fitdist.start <- list(k = k.int, mu = erlang.opt$par[2])
fitdist.lower <- c(k.int-1e-10, 1e-10)
fitdist.upper <- c(k.int+1e-10, Inf)
}
#Voigt分布の初期値
if(distr == "voigt"){
fitdist.start <- list(
x0 = mean(data),
sigma = sd(data)/2,
gamma = sd(data)/2
)
fitdist.lower <- c(-Inf, 0, 0)
}
#レヴィ分布
if(distr == "Levy"){
fitdist.start <- list(
m = min(data) - 1,
s = 1
)
fitdist.lower <- c(-Inf, 0)
fitdist.upper <- c(min(data), Inf)
}
#ベータ分布の初期値
if(distr == "beta"){
#[0, 1]に入らない場合は空の結果を返す
if(min(data) < 0 || max(data) > 1){
return(error.ret(Sys.time()))
}
fitdist.start <- list(shape1 = 1, shape2 = 1)
fitdist.lower <- c(0, 0)
}
#カイ二乗分布
if(distr == "chi2"){
fitdist.start <- list(df = 1)
fitdist.lower <- c(1e-10)
}
#非心カイ二乗分布
if(distr == "ncchi2"){
fitdist.start <- list(df = 1, ncp = 1)
fitdist.lower <- c(1e-10, 0)
}
#t分布の場合の初期値
if(distr == "t2"){
fitdist.lower <- c(1e-10)
fitdist.start <- list(df = 1)
}
#非心t分布の場合の初期値
if(distr == "nct"){
#対数尤度の計算
dnct.ll <- function(x, df, ncp){
if(df <= 0){return(-Inf)}
ret <- sum(log(dnct(x = x, df = df, ncp = ncp)))
if(is.nan(ret)){ret <- -Inf}
return(ret)
}
#パラメータから対数尤度を求める関数
dnct.opt <- function(x){
ret <- dnct.ll(x = data, df = x[1], ncp = x[2])
return(ret)
}
#対数尤度を最大化
nct.opt <- optim(par = c(2, mean(data)),
fn = dnct.opt, control = list(fnscale = -1))
fitdist.start <- list(df = nct.opt$par[1], ncp = nct.opt$par[2])
fitdist.lower <- c(1e-10, -Inf)
}
#F分布の場合の初期値
if(distr == "F"){
fitdist.start <- list(df1 = 1, df2 = 1)
fitdist.lower <- c(1e-10, 1e-10)
}
#非心F分布の場合の初期値
if(distr == "ncF"){
fitdist.start <- list(df1 = 1, df2 = 1, ncp = 0)
fitdist.lower <- c(1e-10, 1e-10, -Inf)
}
#2変量混合正規分布の場合の初期値
if(distr == "normmix2"){
#EMアルゴリズム
normalmixEM.res <- try(normalmixEM(data, k = 2), silent = TRUE)
#エラーの場合は止める
if(class(normalmixEM.res)[1] == "try-error"){
return(error.ret(Sys.time()))
}
fitdist.start <- list(
mean1 = normalmixEM.res$mu[1],
sd1 = normalmixEM.res$sigma[1],
rate1 = normalmixEM.res$lambda[1],
mean2 = normalmixEM.res$mu[2],
sd2 = normalmixEM.res$sigma[2],
rate2 = normalmixEM.res$lambda[2])
fitdist.lower <- c(-Inf, 0, 0, -Inf, 0, 0)
fitdist.upper <- c(Inf, Inf, 1, Inf, Inf, 1)
}
#3変量混合正規分布の場合の初期値
if(distr == "normmix3"){
#EMアルゴリズム
normalmixEM.res <- try(normalmixEM(data, k = 3), silent = TRUE)
#エラーの場合は止める
if(class(normalmixEM.res)[1] == "try-error"){
return(error.ret(Sys.time()))
}
fitdist.start <- list(
mean1 = normalmixEM.res$mu[1],
sd1 = normalmixEM.res$sigma[1],
rate1 = normalmixEM.res$lambda[1],
mean2 = normalmixEM.res$mu[2],
sd2 = normalmixEM.res$sigma[2],
rate2 = normalmixEM.res$lambda[2],
mean3 = normalmixEM.res$mu[3],
sd3 = normalmixEM.res$sigma[3],
rate3 = normalmixEM.res$lambda[3])
fitdist.lower <- c(-Inf, 0, 0, -Inf, 0, 0, -Inf, 0, 0)
fitdist.upper <- c(Inf, Inf, 1, Inf, Inf, 1, Inf, Inf, 1)
}
#4変量混合正規分布の場合の初期値
if(distr == "normmix4"){
#EMアルゴリズム
normalmixEM.res <- try(normalmixEM(data, k = 4), silent = TRUE)
#エラーの場合は止める
if(class(normalmixEM.res)[1] == "try-error"){
return(error.ret(Sys.time()))
}
fitdist.start <- list(
mean1 = normalmixEM.res$mu[1],
sd1 = normalmixEM.res$sigma[1],
rate1 = normalmixEM.res$lambda[1],
mean2 = normalmixEM.res$mu[2],
sd2 = normalmixEM.res$sigma[2],
rate2 = normalmixEM.res$lambda[2],
mean3 = normalmixEM.res$mu[3],
sd3 = normalmixEM.res$sigma[3],
rate3 = normalmixEM.res$lambda[3],
mean4 = normalmixEM.res$mu[4],
sd4 = normalmixEM.res$sigma[4],
rate4 = normalmixEM.res$lambda[4]
)
fitdist.lower <- c(-Inf, 0, 0, -Inf, 0, 0, -Inf, 0, 0, -Inf, 0, 0)
fitdist.upper <- c(Inf, Inf, 1, Inf, Inf, 1, Inf, Inf, 1, Inf, Inf, 1)
}
#2変量混合対数正規分布の場合の初期値
if(distr == "lnormmix2"){
#EMアルゴリズム
normalmixEM.res <- try(normalmixEM(log(data), k = 2), silent = TRUE)
#エラーの場合は止める
if(class(normalmixEM.res)[1] == "try-error"){
return(error.ret(Sys.time()))
}
fitdist.start <- list(
meanlog1 = normalmixEM.res$mu[1],
sdlog1 = normalmixEM.res$sigma[1],
rate1 = normalmixEM.res$lambda[1],
meanlog2 = normalmixEM.res$mu[2],
sdlog2 = normalmixEM.res$sigma[2],
rate2 = normalmixEM.res$lambda[2])
fitdist.lower <- c(-Inf, 0, 0, -Inf, 0, 0)
fitdist.upper <- c(Inf, Inf, 1, Inf, Inf, 1)
}
#切断正規分布の場合の初期値
if(distr == "tnorm"){
fitdist.start <- list(mean = mean(data), sd = sd(data), a= min(data) - sd(data), b = max(data) + sd(data))
fitdist.lower <- c(-Inf, 0, -Inf, -Inf)
}
#切断正規分布の場合の初期値
if(distr == "zmnorm"){
fitdist.start <- list(mean = mean(data), sd = sd(data), p.zero = 0.5)
fitdist.lower <- c(-Inf, 0, 0)
fitdist.upper <- c(Inf, Inf,1)
}
#ポアソン分布の場合
if(distr == "pois"){
#整数ではないとだとエラー
if(!is_integer(data) || min(data) < 0){
return(error.ret(Sys.time()))
}
}
#幾何分布の場合
if(distr == "geom"){
#整数ではないとだとエラー
if(!is_integer(data) || min(data) < 0){
return(error.ret(Sys.time()))
}
}
#負の二項分布の場合
if(distr == "nbinom"){
#整数ではないとだとエラー
if(!is_integer(data) || min(data) < 0){
return(error.ret(Sys.time()))
}
}
#三角分布の場合
if(distr == "triangle"){
fitdist.start <- list(a = min(data)-sd(data), b = max(data) + sd(data), c = (min(data) + max(data))/2)
}
#計算中の分布関数を表示
print(distr)
#フィッティングを関数に
fitdist.fn <- function(method = method){
#フィッティング
ret <- suppressWarnings(
try(
withTimeout({
fitdist(data = data, distr = distr, method = method,
start = fitdist.start, fix.arg = fix.arg,
lower = fitdist.lower, upper = fitdist.upper,
optim.method = optim.method)
}, timeout = timeout, onTimeout = "error")
, silent = FALSE)
)
#もし、対数尤度が閾値を超えたらエラーとする
if(class(ret)[1] == "fitdist"){
if(is.numeric(ret$loglik)){
if(ret$loglik >= loglik.th){
ret <- list()
class(ret) <- "try-error"
}
}
}
#戻り値
return(ret)
}
#フィッティング
ret <- fitdist.fn(method = method)
#エラーならmgeで計算
if(class(ret)[1] == "try-error"){
ret <- fitdist.fn(method = "mge")
}
#エラーなら空のリストを返す
if(class(ret)[1] == "try-error"){
return(error.ret(Sys.time()))
}
#時間を書き込み
ret$CalculationTime <- Sys.time() - t0
#戻り値
return(ret)
}
#分布関数の結果を一覧表示
summary.fit.dist <- function(data){
#戻り値をデータフレームに
ret <- NULL
#閾値の以下の値をNAに変換
th_na <- function(vec, th = -Inf){
#戻り値にまず初期値を入れる
ret <- vec
#等号を入れたのはth = -Infのとき、-Infのみを検出するため。
ret[vec <= th] <- NA
#戻り値
return(ret)
}
#AICでおかしいと判断するAICの値
aic.th <- -Inf
#空のtibble
df0 <- tibble()
#各結果をデータフレームに変換
for(i in 1:length(data)){
print(names(data)[i][1])
#適合度の統計量計算
gofstat.res <- try(gofstat(data[[i]]), silent = FALSE)
#エラーでなければデータフレーム作成
if(class(gofstat.res)[1] != "try-error"){
#データフレーム作成
df0 <- tibble(
distr = names(data)[i][1],
name = data[[i]]$name[1],
AIC = null.na(data[[i]]$aic)[1],
BIC = null.na(data[[i]]$bic)[1],
"log likelihood" = null.na(data[[i]]$loglik)[1],
#連続分布の場合
"Kolmogorov-Smirnov statistic(D)" = null.na(gofstat.res$ks)[1],
"Kolmogorov-Smirnov test p-value" = kspval(data[[i]]$n, gofstat.res$ks)[1],
"Cramer-von Mises statistic(omega2)" = null.na(gofstat.res$cvm)[1],
"Cramer-von Mises test p-value" = cvmpval(data[[i]]$n, gofstat.res$cvm)[1],
"Anderson-Darling statistic(An)" = null.na(gofstat.res$ad)[1],
"Anderson-Darling test p-value" = adpval(data[[i]]$n, gofstat.res$ad)[1],
#離散分布の場合
"Chi-squared" = null.na(gofstat.res$chisq)[1],
"Chi-squared p-value" = null.na(gofstat.res$chisqpvalue)[1],
#計算時間
"Calculation time" = data[[i]]$CalculationTime[1],
#解の存在
"Solution" = TRUE
)
}else{
#エラーの場合
#データフレーム作成
df0 <- tibble(
distr = names(data)[i][1],
name = data[[i]]$name[1],
Solution = FALSE
)
}
#結合
if(!is.null(ret)){
ret <- dplyr::bind_rows(ret, df0)
}else{
ret <- df0
}
}
#並び替え用のAIC
ret <- ret %>% dplyr::mutate(AIC2 = th_na(AIC, th = aic.th))
#並び替え用AICの小さな順に並び替え
ret <- ret %>% dplyr::arrange(AIC2)
#並び替え用AICを削除
ret[, c("AIC2")] <- NULL
#重複を削除
ret <- dplyr::distinct(ret)
#戻り値
return(ret)
}
#文字列末尾確認
chk.end <- function(chk, chr){
chr.pos <- grep(paste0("\\", chk, "$"), chr)
if(length(chr.pos) == 0){return(FALSE)}else{return(TRUE)}
}
#data.frameの行数を拡張
df.long <- function(df, length){
#元のデータフレームの長さ
df.nrow <- nrow(df)
df.ncol <- ncol(df)
#元の長さがlengthより長ければエラー
if(df.ncol > length){stop("length is too small.")}
#長さが同じならそのまま返す
if(df.ncol == length){return(df)}
#不足しているデータフレームを作成
add.df.nrow <- length - df.nrow
add.df <- matrix(rep(NA, add.df.nrow * df.ncol), ncol = df.ncol) %>%
as.data.frame() %>% as_tibble()
colnames(add.df) <- colnames(df)
#結合
ret <- df %>% rbind(add.df)
#戻り値
return(ret)
}
#データ読み込み
read.data <- function(file){
#csvの場合
if(chk.end(".csv", file)){
#csvが数値だけのデータの場合はシート全体をベクトル化
#1行目からデータを読み込む
ret.chk.raw <- suppressMessages(read_csv(file, col_names = FALSE))
#数値のみ選択
ret.chk <- ret.chk.raw %>% select_if(is.numeric)
#文字の列が全くない場合
if(ncol(ret.chk.raw) == ncol(ret.chk)){
#ベクトル化
ret <- tibble(Data = ret.chk %>% as.vec())
#戻り値
return(ret)
}
#文字の列が存在する阿合は、項目名から再度読み込んで各列を数値化
ret <- suppressMessages(read_csv(file)) %>%
select_if(is.numeric) #数値のみ選択
#戻り値
return(ret)
}
#エクセル形式の場合
if(chk.end(".xlsx", file) || chk.end(".xls", file) || chk.end(".xlsm", file)){
#空のデータフレームを準備
ret <- tibble()
#シート名を抜き出す
excel.sheets <- excel_sheets(file)
#各シートから抜き出す
for(i in 1:length(excel.sheets)){
#シートがすべて数値になっているかチェック
df.i.chk.raw <- try(read_excel(file, sheet = i, col_names = FALSE),
silent = FALSE) #項目名はなし
df.i.chk <- try(df.i.chk.raw %>% select_if(is.numeric), silent = FALSE)
df.i.chk.error <- try(ncol(df.i.chk.raw) == ncol(df.i.chk), silent =FALSE)
#数値のみ抜き出したデータが、元のデータ数と一致するかチェック
if(df.i.chk.error == TRUE){
#一致する場合
df.i <- data.frame(Data = df.i.chk %>% as.vec())
colnames(df.i) <- excel.sheets[i]
}else{
#シートから読み込みし数値列のみ抜き出す
df.i <- try(read_excel(file, sheet = i) %>% select_if(is.numeric), silent = FALSE)
}
#エラーが起きない場合の処理
if(class(df.i)[1] != "try-error"){
#データが存在する場合
if(nrow(df.i) > 0 && ncol(df.i) > 0){
#戻り値に何かデータが入っているかチェック
if(nrow(ret) > 0){
#入っている場合
#長い方の行数
max.nrow <- max(nrow(df.i), nrow(ret))
#行数を拡張したうえで結合
ret <- tibble(df.long(ret, max.nrow), df.long(df.i, max.nrow))
}else{
#入ってない場合
ret <- df.i
}
}
}
}
#結合したデータを返す
return(ret)
}
#該当がなかった場合、NAを返す
return(NULL)
}
#確率紙にプロット
plot_paper <- function(result, rank = "median", method = "norm"){
#参考
#http://aoki2.si.gunma-u.ac.jp/R/npp2.html
#http://aoki2.si.gunma-u.ac.jp/R/weibull.html
#エラーチェック
if(is.null(result)){return(result)}
if(class(result)[1] != "fitdist"){return(NULL)}
if(is.null(rank) || is.null(method)){return(NULL)}
#パラメータ推定に失敗した場合
if(result$estimate %>% na.omit() %>% length() == 0){
return(NULL)
}
#小さい順に並んだデータを抜き出す
data <- result$data %>% sort
#メジアンランクの場合
if(rank == "median"){
mol_plus <- -0.3 #分子
den_plus <- 0.4 #分母
}
#平均ランクの場合
if(rank == "mean"){
mol_plus <- 0 #分子
den_plus <- 1 #分母
}
#不信頼度
rank_i <- c(1:length(data))
rank_n <- length(data)
fi <- (rank_i + mol_plus)/(rank_n + den_plus)
#縦軸のFのベクトルを作成する関数
make.f.vec <- function(f.min, length = NULL){
#スケールケーズの最小値の自然対数
f.min.log <- floor(log10(f.min))
#スケールに使用する数値作成
if(is.null(length)){
f.vec.small <- 10^c(f.min.log : -1)
}else{
f.vec.small <- 10^seq(f.min.log, -1, length = length)
}
f.vec.large <- 1 - f.vec.small
f.vec.std <- c(0.2, 0.5, 0.632)
f.vec <- sort(c(f.vec.small, f.vec.std, f.vec.large))
#戻り値
return(f.vec)
}
#縦軸のベクトルを作成
probs <- make.f.vec(min(fi))
#ワイブル分布の尺度に変更
weib <- function(p) log10(log10(1/(1-p)))
#指数確率紙の尺度に変更
#http://www.fml.t.u-tokyo.ac.jp/~sakai/kougi/ProbSystem/ProbPaper/probpaper.htm
expp <- function(p) log(1/(1-p))
#対数正規確率かワイブルの場合(x軸が対数)
if(method == "lnorm" || method == "weibull"){
#対数スケールだと0以下はプロットできないので、NULLを返す
if(min(data) <= 0){
return(NULL)
}
#x軸を対数スケール
plot.log <- "x"
#q.vecを作る
q.vec <- exp(seq(log(min(data)) - 1, log(max(data)) + 1, length = 1000))
}else{
#x軸をリニア
plot.log <- ""
#q.vecを作る
q.vec <- seq(min(data) - 3*sd(data), max(data) + 3*sd(data), length = 1000)
}
#fitdistの結果とqベクトルから、pベクトルを作る関数定義
pfit <- function(result, q){
#エラーチェック
if(class(result)[1] != "fitdist"){return(NULL)}
#関数を作成
p.func <- paste0("p", result$distname)
#最適値を表示
est.vec <- result$estimate
#パラメータを作る
est.param <- NULL
for(i in 1:length(est.vec)){
#iのパラメータを追加
est.param <- paste0(est.param, names(est.vec)[i], "=", est.vec[i])
#最後でなければコンマを加える
if(i < length(est.vec)){est.param <- paste0(est.param, ", ")}
}
eval(parse(text = paste0(
"ret <- ", p.func, "(q, ", est.param, ")"
)))
return(ret)
}
#pベクトルを作る
p.vec <- pfit(result, q.vec)
#正規確率か対数正規確率の場合
if(method == "norm" || method == "lnorm"){
plot.y <- qnorm(c(min(probs), max(probs)))
point.y <- qnorm(fi)
axis.y <- qnorm(probs)
p.vec.y <- qnorm(p.vec)
}
#ワイブルかガンベルの場合
if(method == "weibull" || method == "gumbel"){
plot.y <- weib(c(min(probs), max(probs)))
point.y <- weib(fi)
axis.y <- weib(probs)
p.vec.y <- weib(p.vec)
}
#指数の場合
if(method == "exp"){
plot.y <- expp(c(min(probs), max(probs)))
point.y <- expp(fi)
axis.y <- expp(probs)
p.vec.y <- expp(p.vec)
}
#累積確率分布の場合
if(method == "cdf"){
plot.y <- (c(0, 1))
point.y <- (fi)
axis.y <- seq(0, 1, by = 0.1)
p.vec.y <- (p.vec)
}
#図を作成
plot(c(data[1], data[rank_n]), plot.y,
log = plot.log,
type = "n", yaxt = "n",
xlab = "Data", ylab = "Probability")
#データ点を打つ
points(data, point.y)
#縦軸の確率を表示
axis(2, axis.y, probs*100)
#フィッテングした関数の線を引く
lines(q.vec, p.vec.y, col = "Red")
}
#ロジット
logit <- function(x) log(x / (1 - x))
#数値のまとめ表示
vec.summary <- function(vec){
#エラーチェック
if(is.null(vec)){return(NULL)}
if(!is.vector(vec)){return(NULL)}
if(!is.numeric(vec)){return(NULL)}
#NA除去
vec <- na.omit(vec)
#結果を入れる入れ物
res <- list()
#結果を格納
res$n <- length(vec)
res$Mean <- mean(vec)
res$SD <- sd(vec)
res$VAR <- var(vec)
res$Skewness <- EnvStats::skewness(vec)
res$Kurtosis <- EnvStats::kurtosis(vec, excess = FALSE)
res$ExcessKurtosis <- EnvStats::kurtosis(vec, excess = TRUE) #過剰尖度
res$Median <- median(vec)
res$Max <- max(vec)
res$Min <- min(vec)
#結果を結合
ret <- ""
for(i in 1:length(res)){
ret0 <- paste(names(res)[i], "=",
signif(res[[i]], digits = 4),
", ")
ret <- paste0(ret, ret0)
}
#戻り値
return(ret)
}
#統計量とパラメータ表示
fitdist_summary <- function(result){
#エラーチェック
if(is.null(result)){return(NULL)}
if(class(result)[1] != "fitdist"){return(NULL)}
#パラメータ推定に失敗した場合
if(result$estimate %>% na.omit() %>% length() == 0){
return("Parameter estimation failed.")
}
#適合度の統計量計算
gofstat.res <- try(gofstat(result), silent = TRUE)
#パラメータを整理
ret1 <- "Parameters :"
for(i in 1:length(result$estimate)){
ret1 <- paste0(ret1, "\n", names(result$estimate)[i], " = ", result$estimate[i] %>% signif(4))
}
ret1 <- paste0(ret1, "\n")
#retベクトルにパラメータと統計量を格納
ret <- NULL
ret[1] <- ret1[1]
ret[2] <- paste0("n = ", result$n, ", ",
"AIC = ", null.na(result$aic)[1] %>% round(2), ", ",
"BIC = ", null.na(result$bic)[1] %>% round(2), ", ",
"log likelihood = ", null.na(result$loglik)[1] %>% round(2))
ret[3] <- paste0("Kolmogorov-Smirnov, ",
"D = ", null.na(gofstat.res$ks)[1] %>% signif(4), ", ",
"p-value = ", kspval(result$n, gofstat.res$ks)[1] %>% signif(4))
ret[4] <- paste0("Cramer-von Mises, ",
"omega2 = ", null.na(gofstat.res$cvm)[1] %>% signif(4), ", ",
"p-value = ", cvmpval(result$n, gofstat.res$cvm)[1] %>% signif(4))
ret[5] <- paste0("Anderson-Darling, ",
"An = ", null.na(gofstat.res$ad)[1] %>% signif(4), ", ",
"p-value = ", adpval(result$n, gofstat.res$ad)[1] %>% signif(4))
ret[6] <- paste0("Chi-squared = ",null.na(gofstat.res$chisq)[1] %>% signif(4), ", ",
"p-value = ", null.na(gofstat.res$chisqpvalue)[1] %>% signif(4))
#改行で結合
ret <- paste0(ret, sep = "\n")
#cat(ret)
return(ret)
}
#qから累積確率を計算
pdist <- function(result, q){
#エラーチェック
if(is.null(result)){return(result)}
if(class(result)[1] != "fitdist"){return(NULL)}
if(is.null(q)){return(NULL)}
#パラメータ推定に失敗した場合
if(result$estimate %>% na.omit() %>% length() == 0){
return(NULL)
}
#fitdistの結果とqベクトルから、pベクトルを作る関数定義
pfit <- function(result, q){
#エラーチェック
if(class(result)[1] != "fitdist"){return(NULL)}
#関数を作成
p.func <- paste0("p", result$distname)
#最適値を表示
est.vec <- result$estimate
#パラメータを作る
est.param <- NULL
for(i in 1:length(est.vec)){
#iのパラメータを追加
est.param <- paste0(est.param, names(est.vec)[i], "=", est.vec[i])
#最後でなければコンマを加える
if(i < length(est.vec)){est.param <- paste0(est.param, ", ")}
}
eval(parse(text = paste0(
"ret <- ", p.func, "(q, ", est.param, ")"
)))
return(ret)
}
#pベクトルを作る
p.vec <- pfit(result, q)
}
#qから累積確率を計算
qdist <- function(result, p){
#エラーチェック
if(is.null(result)){return(result)}
if(class(result)[1] != "fitdist"){return(NULL)}
if(is.null(p)){return(NULL)}
#パラメータ推定に失敗した場合
if(result$estimate %>% na.omit() %>% length() == 0){
return(NULL)
}
#fitdistの結果とpベクトルから、qベクトルを作る関数定義
qfit <- function(result, p){
#エラーチェック
if(class(result)[1] != "fitdist"){return(NULL)}
#関数を作成
q.func <- paste0("q", result$distname)
#最適値を表示
est.vec <- result$estimate
#パラメータを作る
est.param <- NULL
for(i in 1:length(est.vec)){
#iのパラメータを追加
est.param <- paste0(est.param, names(est.vec)[i], "=", est.vec[i])
#最後でなければコンマを加える
if(i < length(est.vec)){est.param <- paste0(est.param, ", ")}
}
eval(parse(text = paste0(
"ret <- ", q.func, "(p, ", est.param, ")"
)))
return(ret)
}
#qベクトルを作る
q.vec <- qfit(result, p)
}
#chrを先頭につけてnumの数値を表示
chr.num <- function(num, chr){
if(is.null(chr) || is.null(num)){return(NULL)}
ret <- paste0(chr, num)
return(ret)
}
#エラーの場合NULLを返す
try.null <- function(res){
ret <- try(res, silent = TRUE)
if(class(ret)[1] == "try-error"){return(NULL)}
return(ret)
}
#データフレームでなかったらNULLを返す
is.data.frame.null <- function(obj){
if(is.data.frame(obj)){
return(obj)
}else{
return(NULL)
}
}
|
ab4bce4cb9f4fa36dc767d4bb5456f5ec93d7194
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.application.integration/man/sqs_untag_queue.Rd
|
d8f1295f7c90bd94a63119761aed19807ddd56e9
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 789
|
rd
|
sqs_untag_queue.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sqs_operations.R
\name{sqs_untag_queue}
\alias{sqs_untag_queue}
\title{Remove cost allocation tags from the specified Amazon SQS queue}
\usage{
sqs_untag_queue(QueueUrl, TagKeys)
}
\arguments{
\item{QueueUrl}{[required] The URL of the queue.}
\item{TagKeys}{[required] The list of tags to be removed from the specified queue.}
}
\description{
Remove cost allocation tags from the specified Amazon SQS queue. For an overview, see \href{https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html}{Tagging Your Amazon SQS Queues} in the \emph{Amazon SQS Developer Guide}.
See \url{https://www.paws-r-sdk.com/docs/sqs_untag_queue/} for full documentation.
}
\keyword{internal}
|
ffa793f22f8dfe8584df6e3a9476a056f141799c
|
f890ebe54e12c534de49e2160afca2e78065d770
|
/R/mortalityTable.period.R
|
b33b150b15fc64097de63bc407f93110a0770477
|
[] |
no_license
|
Algorios/r-mortality-tables
|
c4dca0898b80e17e33443a2bd8469c8104fcb8a0
|
95cd06a0298b65829ab8ca2eb939a1a2b6d425e4
|
refs/heads/master
| 2021-06-04T16:12:04.194171
| 2016-09-07T17:12:31
| 2016-09-07T17:12:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 755
|
r
|
mortalityTable.period.R
|
#' @include mortalityTable.R
NULL
#' Class mortalityTable.period - Period life tables
#'
#' A period life table, giving death probabilities for each age, up to
#' maximum age \code{omega}. The \code{baseYear} slot can be used to hold
#' information about the period.
#'
#' @slot ages The ages corresponding to the entries of the deathProbs
#' @slot deathProbs The one-year death probabilities for the ages
#'
#' @export mortalityTable.period
#' @exportClass mortalityTable.period
mortalityTable.period = setClass(
"mortalityTable.period",
slots = list(
ages = "numeric",
deathProbs = "numeric"
),
prototype = list(
ages = eval(0:120),
deathProbs = rep(1,120)
),
contains = "mortalityTable"
)
|
72973768dc983f4719384ce822c67b03feba143a
|
0b8ff928c22d87ee5d7a8a19f5ce8ff51bd7155c
|
/ThesisWorkThirdPaper.R
|
e5eb826f150fc043f238d487efa8607f163e5146
|
[] |
no_license
|
Gtmille2/seamlessTrial
|
d9104369cfda1987c374d8b3535d925df7211105
|
7dcd49f4c09486ee3d383eb1df8bc673d77a658e
|
refs/heads/master
| 2020-07-31T17:24:08.170740
| 2019-11-17T18:35:21
| 2019-11-17T18:35:21
| 210,692,243
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,396
|
r
|
ThesisWorkThirdPaper.R
|
##### This script is to test different parameter settings for the third paper to determine how they affect the outcome of the type I error #####
library(seamlessTrials)
#Running multiple simulations:
save.boundary = save.boundary.values()
n.trt = 3
nsim = 1000
n1 = 20
N1 = 100
N=200
sigma0 = 1
sigma = 1
p1 = .5
p2 = .5
rho = 0.5
tau1 = 1
tau2 = 1
mean.s = NULL
mean.t = NULL
design = "SPBD"
set.seed(10101)
allsims = NULL
###Under null hypothesis that all treatment effects are equal
ptm = proc.time()
reject = simulatetrials(N1 = 250, N = 500, n.trt = n.trt, mean.s = rep(0,n.trt+1), mean.t = rep(0,n.trt+1), p1 = .5,p2 = .5,sigma0 = 1,sigma = 1,rho = .5,nsim = 100, design = "Pocock",tau1= 1,tau2 = 1)
proc.time() - ptm
ptm = proc.time()
reject = simulatetrials(nsim=100)
proc.time() - ptm
allsims = cbind(allsims, reject)
reject = simulatetrials(N1 = 300, N = 500, n.trt = n.trt, mean.s = rep(0,n.trt+1), mean.t = rep(0,n.trt+1), p1 = .5,p2 = .5,sigma0 = 1,sigma = 1,rho = .5,nsim = 10000, design = "Pocock", tau1= 1,tau2 = 1)
allsims = cbind(allsims, reject)
proc.time() - ptm
###Under alternative hypothesis that at least 1 treatment is not equal to zero
ptm = proc.time()
reject = simulatetrials(N1 = 200, N = 500, n.trt = n.trt, mean.s = rep(0,(n.trt+1)), mean.t = rep(0,n.trt+1), p1 = .5,p2 = .5,sigma0 = 1,sigma = 1,rho = .5,nsim = 100, design = "SPBD", tau1= 1,tau2 = 1)
proc.time() - ptm
reject = simulatetrials(N1 = 300, N = 500, n.trt = n.trt, mean.s = rep(0,n.trt+1), mean.t = rep(0,n.trt+1), p1 = .5,p2 = .5,sigma0 = 1,sigma = 1,rho = .5,nsim = 10000, design = "SPBD", tau1= 1,tau2 = 1)
###Using boundary.values to calculate the future boundary value:
n.trt = 3
#Pocock's design for Type II error
ptm = proc.time()
pocockreject1 = simulateest(N1 = 100, N = 200, n.trt = n.trt, mean.s = c(rep(0,3),0), mean.t =c(rep(0,3),0), p1 = .5,p2 = .5,sigma0 = 1,sigma = 1,rho = .5,nsim = 100, design = "Pocock",tau1= 1,tau2 = 1,save.boundary = save.boundary)
proc.time() - ptm
ptm = proc.time()
pocockreject2 = simulateest(N1 = 250, N = 500, n.trt = n.trt, mean.s = c(rep(0,3),.333), mean.t = c(rep(0,3),0), p1 = .5,p2 = .5,sigma0 = 1,sigma = 1,rho = .5,nsim = 10000, design = "Pocock",tau1= 1,tau2 = 1,save.boundary = save.boundary)
proc.time() - ptm
ptm = proc.time()
pocockreject3 = simulateest(N1 = 250, N = 500, n.trt = n.trt, mean.s = c(rep(0,3),-.333), mean.t = c(rep(0,3),0), p1 = .5,p2 = .5,sigma0 = 1,sigma = 1,rho = .5,nsim = 10000, design = "Pocock",tau1= 1,tau2 = 1,save.boundary = save.boundary)
proc.time() - ptm
#### Pocock's design for Power
ptm = proc.time()
pocockpower1 = simulateest(N1 = 250, N = 500, n.trt = n.trt, mean.s = c(rep(0,3),0), mean.t = c(rep(0,3),0.333), p1 = .5,p2 = .5,sigma0 = 1,sigma = 1,rho = .5,nsim = 10000, design = "Pocock",tau1= 1,tau2 = 1,save.boundary = save.boundary)
proc.time() - ptm
ptm = proc.time()
pocockpower2 = simulateest(N1 = 250, N = 500, n.trt = n.trt, mean.s = c(rep(0,3),.333), mean.t = c(rep(0,3),0.333), p1 = .5,p2 = .5,sigma0 = 1,sigma = 1,rho = .5,nsim = 10000, design = "Pocock",tau1= 1,tau2 = 1,save.boundary = save.boundary)
proc.time() - ptm
ptm = proc.time()
pocockpower3 = simulateest(N1 = 250, N = 500, n.trt = n.trt, mean.s = c(rep(0,3),-.333), mean.t = c(rep(0,3),0.333)(0,n.trt+1), p1 = .5,p2 = .5,sigma0 = 1,sigma = 1,rho = .5,nsim = 10000, design = "Pocock",tau1= 1,tau2 = 1,save.boundary = save.boundary)
proc.time() - ptm
### SPBD for type II error
ptm = proc.time()
spbdreject1 = simulateest(N1 = 250, N = 500, n.trt = n.trt, mean.s = c(rep(0,3),0), mean.t =c(rep(0,3),0), p1 = .5,p2 = .5,sigma0 = 1,sigma = 1,rho = .5,nsim = 100, design = "spbd",tau1= 1,tau2 = 1,save.boundary = save.boundary)
proc.time() - ptm
ptm = proc.time()
spbdreject2 = simulateest(N1 = 250, N = 500, n.trt = n.trt, mean.s = c(rep(0,3),.333), mean.t = c(rep(0,3),0), p1 = .5,p2 = .5,sigma0 = 1,sigma = 1,rho = .5,nsim = 10000, design = "spbd",tau1= 1,tau2 = 1,save.boundary = save.boundary)
proc.time() - ptm
ptm = proc.time()
spbdreject3 = simulateest(N1 = 250, N = 500, n.trt = n.trt, mean.s = c(rep(0,3),-.333), mean.t = c(rep(0,3),0), p1 = .5,p2 = .5,sigma0 = 1,sigma = 1,rho = .5,nsim = 10000, design = "spbd",tau1= 1,tau2 = 1,save.boundary = save.boundary)
proc.time() - ptm
### SPBD for power
ptm = proc.time()
spbdpower1 = simulateest(N1 = 250, N = 500, n.trt = n.trt, mean.s = c(rep(0,3),0), mean.t = c(rep(0,3),0.333), p1 = .5,p2 = .5,sigma0 = 1,sigma = 1,rho = .5,nsim = 10000, design = "spbd",tau1= 1,tau2 = 1,save.boundary = save.boundary)
proc.time() - ptm
ptm = proc.time()
spbdpower2 = simulateest(N1 = 250, N = 500, n.trt = n.trt, mean.s = c(rep(0,3),.333), mean.t = c(rep(0,3),0.333), p1 = .5,p2 = .5,sigma0 = 1,sigma = 1,rho = .5,nsim = 10000, design = "spbd",tau1= 1,tau2 = 1,save.boundary = save.boundary)
proc.time() - ptm
ptm = proc.time()
spbdpower3 = simulateest(N1 = 250, N = 500, n.trt = n.trt, mean.s = c(rep(0,3),-.333), mean.t = c(rep(0,3),0.333)(0,n.trt+1), p1 = .5,p2 = .5,sigma0 = 1,sigma = 1,rho = .5,nsim = 10000, design = "spbd",tau1= 1,tau2 = 1,save.boundary = save.boundary)
proc.time() - ptm
####No CAR
ptm = proc.time()
nocarreject1 = simulateNoCar(n1 = 20, N1 = 100,N = 200,n.trt = 3,mean.s = c(rep(0,3),0),mean.t = rep(0,n.trt+1), p1 = .5, p2 =.5,sigma0 = 1,sigma = 1,rho = 0,nsim = 1000,tau1 = 1,tau2 = 1,save.boundary = save.boundary)
proc.time() - ptm
ptm = proc.time()
nocarreject2 = simulateNoCar(N1 = 250,N = 500,n.trt = 3,mean.s = c(rep(0,3),0.333),mean.t = rep(0,n.trt+1), p1 = .5, p2 =.5,sigma0 = 1,sigma = 1,rho = .5,nsim = 10000,tau1 = 1,tau2 = 1,save.boundary = save.boundary)
proc.time() - ptm
ptm = proc.time()
nocarreject3 = simulateNoCar(N1 = 250,N = 500,n.trt = 3,mean.s = c(rep(0,3),-0.333),mean.t = rep(0,n.trt+1), p1 = .5, p2 =.5,sigma0 = 1,sigma = 1,rho = .5,nsim = 10000,tau1 = 1,tau2 = 1,save.boundary = save.boundary)
proc.time() - ptm
ptm = proc.time()
nocarpower1 = simulateNoCar(n1 = 20, N1 = 100,N = 200,n.trt = 3,mean.s = c(rep(0,3),0),mean.t = c(rep(0,3),0.333), p1 = .5, p2 =.5,sigma0 = 1,sigma = 1,rho = 0,nsim = 10000,tau1 = 1,tau2 = 1,save.boundary = save.boundary)
proc.time() - ptm
ptm = proc.time()
nocarpower2 = simulateNoCar(N1 = 250,N = 500,n.trt = 3,mean.s = c(rep(0,3),0.333),mean.t = c(rep(0,3),0.333), p1 = .5, p2 =.5,sigma0 = 1,sigma = 1,rho = .5,nsim = 10000,tau1 = 1,tau2 = 1,save.boundary = save.boundary)
proc.time() - ptm
ptm = proc.time()
nocarpower3 = simulateNoCar(N1 = 250,N = 500,n.trt = 3,mean.s = c(rep(0,3),-0.333),mean.t = c(rep(0,3),0.333), p1 = .5, p2 =.5,sigma0 = 1,sigma = 1,rho = .5,nsim = 10000,tau1 = 1,tau2 = 1,save.boundary = save.boundary)
proc.time() - ptm
nocarpower1 = simNoCarOrig(n1=20, N1= 100, N=200, n.trt =3, mean.s=rep(0,n.trt+1), mean.t = c(rep(0,3), 0.3333), p1 = .5, p2=.5, sigma0 = 1, sigma = 1, rho = 0.5, nsim = 1000, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
pocockpower1 = simCarOrig(n1= 20, N1= 100, N =200, n.trt = 3, mean.s = rep(0,n.trt+1), mean.t = c(rep(0,3), 0.3333), p1 = .5, p2 = 5, sigma0 = 1, sigma = 1, rho = .5, nsim = 100, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "Pocock")
allsimerror = function(n1 = 20, N1 = 100, N = 200, n.trt = 3, p1, p2, sigma0 = 1, sigma = 1, rho, nsim = 10000, tau1 = 1, tau2 = 1, save.boundary, block.size = 12) {
# pocockerror1 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.0000), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "Pocock")
# pocockerror2 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "Pocock")
# pocockerror3 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),-0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "Pocock")
spbderror1 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.0000), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd",block.size = block.size)
spbderror2 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd",block.size = block.size)
spbderror3 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),-0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd",block.size = block.size)
nocarerror1 =simNoCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.0000), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
nocarerror2 =simNoCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
nocarerror3 =simNoCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),-0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
# pocockerror = cbind(pocockerror1,pocockerror2,pocockerror3)
spbderror = cbind(spbderror1,spbderror2,spbderror3)
nocarerror = cbind(nocarerror1,nocarerror2,nocarerror3)
allsim = rbind(spbderror, nocarerror)
# allsim = rbind(pocockerror, spbderror, nocarerror)
return(allsim)
}
allsimpower = function(n1 = 20, N1 = 100, N = 200, n.trt = 3, p1, p2, sigma0 = 1, sigma = 1, rho, nsim = 10000, tau1 = 1, tau2 = 1, save.boundary, block.size = 12) {
# pocockpower1 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.0000), mean.t = c(rep(0,3), 0.3333), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "Pocock")
# pocockpower2 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.3333), mean.t = c(rep(0,3), 0.3333), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "Pocock")
# pocockpower3 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),-0.3333), mean.t = c(rep(0,3), 0.3333), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "Pocock")
spbdpower1 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.0000), mean.t = c(rep(0,3), 0.3333), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd",block.size = block.size)
spbdpower2 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.3333), mean.t = c(rep(0,3), 0.3333), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd",block.size = block.size)
spbdpower3 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),-0.3333), mean.t = c(rep(0,3), 0.3333), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd",block.size = block.size)
nocarpower1 =simNoCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.0000), mean.t = c(rep(0,3), 0.3333), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
nocarpower2 =simNoCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.3333), mean.t = c(rep(0,3), 0.3333), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
nocarpower3 =simNoCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),-0.3333), mean.t = c(rep(0,3), 0.3333), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
# pocockpower = cbind(pocockpower1,pocockpower2,pocockpower3)
spbdpower = cbind(spbdpower1,spbdpower2,spbdpower3)
nocarpower = cbind(nocarpower1,nocarpower2,nocarpower3)
allsim = rbind( spbdpower, nocarpower)
# allsim = rbind(pocockpower, spbdpower, nocarpower)
return(allsim)
}
#### Testing 3 different rho values ####
n1 = 20
N1 =100
N = 200
p1 = .5
p2 = .5
nsim = 5000
ptm = proc.time()
error_rho_0 = allsimerror(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
error_rho_5 = allsimerror(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0.5, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
error_rho_7 = allsimerror(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0.7, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
proc.time() -ptm
allsimerror1 = rbind(error_rho_0,error_rho_5,error_rho_7)
write.csv(allsimerror1,"C:/Users/garre/OneDrive/Documents/UTHealth Files/Thesis/allsimerror20_100_200_5000.csv")
proc.time() -ptm
power_rho_0 = allsimpower(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
power_rho_5 = allsimpower(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0.5, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
power_rho_7 = allsimpower(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0.7, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
allsimpower1 = rbind(power_rho_0,power_rho_5,power_rho_7)
write.csv(allsimpower1,"C:/Users/garre/OneDrive/Documents/UTHealth Files/Thesis/allsimpower20_100_200_5000.csv")
n1 = 20
N1 =100
N = 200
p1 = .5
p2 = .4
nsim = 5000
ptm = proc.time()
error_rho_0 = allsimerror(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
error_rho_5 = allsimerror(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0.5, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
error_rho_7 = allsimerror(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0.7, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
proc.time() -ptm
allsimerror1 = rbind(error_rho_0,error_rho_5,error_rho_7)
write.csv(allsimerror1,"C:/Users/garre/OneDrive/Documents/UTHealth Files/Thesis/allsimerror20_100_200_5000_rho54.csv")
proc.time() -ptm
power_rho_0 = allsimpower(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
power_rho_5 = allsimpower(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0.5, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
power_rho_7 = allsimpower(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0.7, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
allsimpower1 = rbind(power_rho_0,power_rho_5,power_rho_7)
write.csv(allsimpower1,"C:/Users/garre/OneDrive/Documents/UTHealth Files/Thesis/allsimpower20_100_200_5000_rho54.csv")
p1 = .5
p2 = .5
n1 = 50
N1 =150
N = 300
nsim = 5000
ptm = proc.time()
error_rho_0 = allsimerror(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
error_rho_5 = allsimerror(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0.5, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
error_rho_7 = allsimerror(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0.7, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
proc.time() -ptm
allsimerror1 = rbind(error_rho_0,error_rho_5,error_rho_7)
write.csv(allsimerror1,"C:/Users/garre/OneDrive/Documents/UTHealth Files/Thesis/allsimerror50_150_300_5000.csv")
proc.time() -ptm
power_rho_0 = allsimpower(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
power_rho_5 = allsimpower(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0.5, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
power_rho_7 = allsimpower(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0.7, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
allsimpower1 = rbind(power_rho_0,power_rho_5,power_rho_7)
write.csv(allsimpower1,"C:/Users/garre/OneDrive/Documents/UTHealth Files/Thesis/allsimpower50_150_300_5000.csv")
#### Trying to change the block size to determine its effect on the type I error
n.trt = 3
n1 = 20
N1 =100
N = 200
nsim = 5000
rho = 0.5
p1 = .5
p2 = .5
spbderror1b12 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.0000), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 12)
spbderror2b12 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 12)
spbderror3b12 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),-0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 12)
spbderror1b16 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.0000), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 16)
spbderror2b16 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 16)
spbderror3b16 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),-0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 16)
spbderror1b20 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.0000), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 20)
spbderror2b20 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 20)
spbderror3b20 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),-0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 20)
spbderror1b40 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.0000), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 40)
spbderror2b40 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 40)
spbderror3b40 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),-0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 40)
spbderror1b8 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.0000), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 4000)
spbderror2b8 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 4000)
spbderror3b8 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),-0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 400)
blocksizes = seq(4, 4000,200)
nsim = 10000
allerrors = NULL
for ( i in blocksizes) {
spbderror1 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.0000), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = i)
spbderror2 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = i)
spbderror3 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),-0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = i)
errors = c(spbderror1$power,spbderror2$power,spbderror3$power)
allerrors = rbind(allerrors, errors)
}
errorMeans = rowMeans(allerrors)
all = data.frame(cbind(blocksizes, errorMeans))
b = ggplot(all, aes(x = blocksizes, y = errorMeans))
b + geom_point()
spbdpower1b16 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.0000), mean.t = c(rep(0,3), 0.3333), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 16)
spbdpower2b16 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.3333), mean.t = c(rep(0,3), 0.3333), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 16)
spbdpower3b16 = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),-0.3333), mean.t = c(rep(0,3), 0.3333), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 16)
spbdb12 = rbind(spbderror1b12,spbderror2b12,spbderror3b12)
spbdb16 = rbind(spbderror1b16,spbderror2b16,spbderror3b16)
spbdb16power = rbind(spbdpower1b16,spbdpower2b16,spbdpower3b16)
spbdb20 = rbind(spbderror1b20,spbderror2b20,spbderror3b20)
spbdb40 = rbind(spbderror1b40,spbderror2b40,spbderror3b40)
spbderror = cbind(spbdb12$power,spbdb16$power,spbdb20$power, spbdb40$power)
write.csv(spbdm4,"C:/Users/garre/OneDrive/Documents/UTHealth Files/Thesis/spbdb12.csv")
write.csv(spbdm6,"C:/Users/garre/OneDrive/Documents/UTHealth Files/Thesis/spbdb16.csv")
write.csv(spbdm6power,"C:/Users/garre/OneDrive/Documents/UTHealth Files/Thesis/spbdb16power.csv")
colMeans(spbderror)
n1 = 100
N1 =200
N = 400
nsim = 5000
ptm = proc.time()
error_rho_0 = allsimerror(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
error_rho_5 = allsimerror(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0.5, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
error_rho_7 = allsimerror(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0.7, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
proc.time() -ptm
allsimerror1 = rbind(error_rho_0,error_rho_5,error_rho_7)
write.csv(allsimerror1,"C:/Users/garre/OneDrive/Documents/UTHealth Files/Thesis/allsimerror100_200_400_5000.csv")
proc.time() -ptm
power_rho_0 = allsimpower(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
power_rho_5 = allsimpower(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0.5, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
power_rho_7 = allsimpower(n1 = n1, N1 = N1, N = N, n.trt = 3, p1 = p1, p2 = p2,sigma0 = 1, sigma= 1, rho = 0.7, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
allsimpower1 = rbind(power_rho_0,power_rho_5,power_rho_7)
write.csv(allsimpower1,"C:/Users/garre/OneDrive/Documents/UTHealth Files/Thesis/allsimpower100_200_400_5000.csv")
##### Testing Bootstrap
n1 = 20
N1 =100
N = 200
p1 = .5
p2 = .5
nsim = 100
n.trt = 3
rho = 0.5
nsim = 10000
pocockerror1bs = simCarOrigBS(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.0000), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "Pocock")
pocockerror2bs = simCarOrigBS(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "Pocock")
pocockerror3bs = simCarOrigBS(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),-0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "Pocock")
spbderror1bs = simCarOrigBS(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.0000), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 20)
spbderror2bs = simCarOrigBS(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),-0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 20)
spbderror3bs = simCarOrigBS(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 20)
nocarerror1bs =simNoCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
nocarerror2bs =simNoCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),-0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
nocarerror3bs = simNoCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),-0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary)
pocockerror = cbind(pocockerror1,pocockerror2,pocockerror3)
spbderror = cbind(spbderror1,spbderror2,spbderror3)
nocarerror = cbind(nocarerror1,nocarerror2,nocarerror3)
bs = rbind(spbderror, nocarerror)
write.csv(bs, "C:/Users/garre/OneDrive/Documents/UTHealth Files/Thesis/sbpdbs20_100_200.csv")
spbderror1bs = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3), 0.0000), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 20)
spbderror2bs = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),-0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 20)
spbderror3bs = simCarOrig(n1=n1, N1 = N1, N = N, n.trt = 3, mean.s = c(rep(0,3),0.3333), mean.t = c(rep(0,3), 0.0), p1 = p1, p2 = p2, sigma0 = 1, sigma = 1, rho = rho, nsim = nsim, tau1 = 1, tau2 = 1, save.boundary = save.boundary, design = "spbd", block.size = 20)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.