blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
090d010ac8cb50459341ec672f3583ef7612dc67
|
612ad2e7bd9b6f3352651d9fc7124afcb31a8dd8
|
/R/remix_PI.R
|
7bf2c5285081c0bf79a78c7f3b67b145720c167e
|
[] |
no_license
|
cbrown5/remixsiar
|
4dd684e3041a57b6e78baa0aeeea5921171dfea4
|
f3c5c060bd43567816ae1c973c6aa7a047cad129
|
refs/heads/master
| 2020-05-29T08:47:46.546346
| 2020-04-25T13:17:38
| 2020-04-25T13:17:38
| 70,018,787
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,776
|
r
|
remix_PI.R
|
#' Estimate predictive intervals for consumer isotope ratios
#'
#' Plotting the consumer's isotope values against those predicted by the model
#' can be useful to detect model bias, or instance when consumers fall outside
#' the source polygon. This function also returns residuals for the consumers
#' as their deviations from the mean predicted value.
#'
#' @Usage remix_PI(simdata, simmr_in, simmr_out, groups = NULL,
#' plotresid = TRUE, probs = c(0.025, 0.975))
#'
#' @param simdata A \code{remix_PI} object.
#' @param simmr_in A \code{simmr_input} object.
#' @param simmr_in A \code{simmr_output} object.
#' @param groups A \code{numeric} with the groups to plot if they are used,
#' otherwise \code{NULL}.
#' @param plotresid A \code{logical} which determines whether credibility
#' residual plots are created.
#' @param probs A \code{numeric} vector of two numbers giving the upper and
#' lower predictive intervals.
#' @return A \code{remix_PI} object with
#' \item{predint}{predictive intervals for each group and tracer}
#' \item{resid}{residuals for each group and tracer.}
#'
#' @details
#' New samples should fall within the predictive intervals
#' with 95% (or other user-specified interval) probability.
#' Model bias is indicated by consumer data that fall outside the predictive
#' intervals. Bias may occur, among other things due to
#' missed sources or incorrect fractionation estimates.
#' @author Christopher J. Brown
#' @rdname remix_PI
#' @export
remix_PI <- function(simdat, simmr_in, simmr_out, groups = NULL, plotresid = TRUE, probs = c(0.025, 0.975)){
if(class(simmr_out) != "simmr_output") stop("A simmr output object is required input")
nxvals <- 100
ngrps <- length(simmr_out$output)
mixes <- data.frame(simmr_in$mixtures)
nobs <- nrow(mixes)
vpnorm <- Vectorize(pnorm, vectorize.args = c('mean', 'sd'))
ntotal <- nrow(simdat$xmean[[1]])
if(is.logical(plotresid)){
igrps <- 1:ngrps
} else {
igrps <- plotresid
plotresid <- TRUE
}
quants <- matrix(NA, nrow = 2, ncol = simmr_in$n_tracers)
quants <- data.frame(quants)
names(quants) <- colnames(simmr_in$mix)
rownames(quants) <- paste0('Quant_', probs)
quants <- lapply(igrps, function(x) quants)
names(quants) <- paste0('Group_',igrps)
yresid <- lapply(1:length(igrps), function(x) matrix(NA, nrow = nobs, ncol = simmr_in$n_tracers))
names(yresid) <- paste0('Group_',igrps)
for (igrp in igrps){
for (itrc in 1:simmr_in$n_tracers){
minx <- min(simdat$xmean[[igrp]][,itrc]) - max((2*sqrt(simdat$xvar[[igrp]][, itrc])))
maxx <- max(simdat$xmean[[igrp]][, itrc]) + max((2*sqrt(simdat$xvar[[igrp]][, itrc])))
xvals <- seq(minx, maxx, length.out = nxvals)
pout <- vpnorm(xvals, mean = simdat$xmean[[igrp]][,itrc], sd = sqrt(simdat$xvar[[igrp]][,itrc]))
xquant <- apply(pout, 1, function(x) sum(x)/ntotal)
ilwr <- which.min(abs(xquant - probs[1]))
iupr <- which.min(abs(xquant - probs[2]))
quants[[igrp]][1, itrc] <- xvals[ilwr]
quants[[igrp]][2, itrc] <- xvals[iupr]
tmid <- median(simdat$xmean[[igrp]][,itrc])
yresid[[igrp]][,itrc] <- mixes[,itrc] - tmid
if(plotresid){
x <- 1:nobs
iord <- order(yresid[[igrp]][,itrc])
ymin <- min(c(quants[[igrp]][,itrc]-tmid, yresid[[igrp]][,itrc]))*1.1
ymax <- max(c(quants[[igrp]][,itrc]-tmid, yresid[[igrp]][,itrc]))*1.1
maint <- paste('Group',igrp,',',colnames(simmr_in$mixtures)[itrc])
plot(x, yresid[[igrp]][iord,itrc], ylim = c(ymin, ymax), ylab = 'Residuals', xaxt = 'n', xlab = 'observations', pch = 16, main = maint)
abline(h = 0)
arrows(x, yresid[[igrp]][iord,itrc], x, rep(0, nobs), len = 0)
abline(h=quants[[igrp]][iord,itrc]-tmid, lty = 2)
}
}
}
rout <- list(predint = quants, resid = yresid)
class(rout) <- 'remix_PI'
return(rout)
}
|
45b9c8e1241310cf7966a5ce768db1d40c506b0e
|
f98a371ab8be01dabaa44f73164ed28fee24e8f3
|
/R/dhs_tags.R
|
bf953ea1d13dc94812b302397dede6f6e1abf869
|
[] |
no_license
|
muschellij2/dhs
|
9850fde5624f244389bcf9da758dfaa8b9e18652
|
1ce747280f7f7304649b72dbf65b497e324a9833
|
refs/heads/master
| 2020-05-21T05:30:18.409993
| 2017-10-09T23:13:36
| 2017-10-09T23:13:36
| 84,578,817
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 363
|
r
|
dhs_tags.R
|
#' @title Return DHS Tags
#' @description Returns the set of tags from DHS from their website
#'
#' @return A \code{data.frame} of the listing of tags
#' @export
#'
#' @examples
#' head(dhs_tags())
dhs_tags = function(){
fieldurl = paste0(
dhs::rest_dhs_url,
"tags")
res = get_dhs_data(url = fieldurl)
res = rbind_dhs_data(res$data)
return(res)
}
|
ddfe97b657f35b5bcc73d86a9ca6aba4e185ce5c
|
ca7a4f8bb6896fdfc6cb3ded207308e88cb15707
|
/phastCons/scripts/2_conservation/conservation_sarcopterygii.R
|
a7ceba7518f77233edf5d24588e03e9ea1f556d3
|
[] |
no_license
|
paulati/acc_regions
|
9a0d3b11bfe5b7653b6d9fe148ad976a1fd72ad8
|
07fca8d32da44238de8e9946da23d41a28eaea13
|
refs/heads/master
| 2020-03-27T01:30:57.172243
| 2018-08-22T17:15:39
| 2018-08-22T17:15:39
| 145,718,195
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,162
|
r
|
conservation_sarcopterygii.R
|
source("~/2018/phastCons/scripts/config/paths_config_modneuPhastCons100way.R")
source("~/2018/phastCons/scripts/2_conservation/conservation_base.R")
load.alignment <- function(chr.id) {
file.name.tail <- "_sarcopterygii.maf"
remote.base.folder.path <- remote.sarcopterygii.align.base.folder.path
local.base.folder.path <- sarcopterygii.align.base.path
align <- load.alignment.base(chr.id, file.name.tail,
remote.base.folder.path,
local.base.folder.path)
return(align)
}
#paula rehacer, esta todo hardcode
# save.bed.file <- function(data, chr.id)
# {
#
# q <- nrow(data)
# bed.data <- data.frame(chr=character(q),
# start=integer(q),
# end=integer(q))
#
# char.name <- paste("chr", chr.id, sep="")
#
# bed.data$chr <- rep(char.name, q)
# bed.data$start <- data$start
# bed.data$end <- data$end
# out.file.name <- paste("chr", chr.id, "_intersect_req.bed", sep="")
# setwd(phastConsElements.out.base.path)
# write.table(bed.data, out.file.name, sep="\t", col.names = FALSE, row.names = FALSE, quote=FALSE)
#
#
# }
required.species.feats <- function(align)
{
cons.req.species <- c("allMis1", "anoCar2")
cons.req.turtles <- c("cheMyd1", "chrPic2", "pelSin1", "apaSpi1")
#esta no esta en el alineamiento
#informative.regions.msa: This function will not alter the value of x even if it is stored as a pointer.
req.regions <- informative.regions.msa(align, min.numspec=2, spec=cons.req.species,
refseq="hg38", gaps.inf=FALSE)
#informative.regions.msa: This function will not alter the value of x even if it is stored as a pointer.
turtle.regions <- informative.regions.msa(align, min.numspec=1, spec=cons.req.turtles,
refseq="hg38", gaps.inf=FALSE)
#calculo la interseccion entre req.regions y turtle.regions
#coverage.feat: Any features object passed into this function which is stored as a pointer
#to an object stored in C may be reordered (sorted) by this function.
intersection.regions <- coverage.feat(req.regions, turtle.regions, or=FALSE, get.feats=TRUE)
intersection.regions.order <- sort(intersection.regions, decreasing = FALSE)
return(intersection.regions.order)
}
main()
#
#
#
# setwd(phastConsElements.out.base.path)
# out.file.name <- paste("chr", chr.id, "_phastCons_mostConserved_sinFiltro.csv", sep="")
# write.table(cons.elements, out.file.name, sep="\t", col.names = TRUE, row.names = FALSE, quote=FALSE)
#
#
# q <- nrow(cons.elements)
# bed.data <- data.frame(chr=character(q),
# start=integer(q),
# end=integer(q))
#
# char.name <- paste("chr", chr.id, sep="")
#
# bed.data$chr <- rep(char.name, q)
# bed.data$start <- cons.elements$start
# bed.data$end <- cons.elements$end
# out.file.name <- paste("chr", chr.id, "_phastCons_mostConserved_sinFiltro.bed", sep="")
# write.table(bed.data, out.file.name, sep="\t", col.names = FALSE, row.names = FALSE, quote=FALSE)
#
|
e4de1cf3d552a9b0d8d677224a0d8a807ba690f1
|
cb102427b8a13e8667a6e97e6b04976f16215615
|
/man/weightedMscale.Rd
|
bcaf8f23f55eae7991a94dea5cf3ab930576bd12
|
[] |
no_license
|
cran/RMBC
|
a672b792273a4e9d7110b25f78cb1c2125867bde
|
53101d69ac03b1cc6a6a730575a55943d0d97da7
|
refs/heads/master
| 2023-06-26T10:11:18.724235
| 2021-07-22T05:40:05
| 2021-07-22T05:40:05
| 388,515,071
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 896
|
rd
|
weightedMscale.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weightedMscale.R
\name{weightedMscale}
\alias{weightedMscale}
\title{weightedMscale
the M scale of an univariate sample (see reference below)}
\usage{
weightedMscale(u, b = 0.5, weights, c, initialsc = 0)
}
\arguments{
\item{u}{an univariate sample of size n.}
\item{b}{the desired break down point}
\item{weights}{the weights of each observation.}
\item{c}{a tuning constant, if consistency to standard normal distribution is desired use
\code{\link{normal_consistency_constants}}}
\item{initialsc}{the initial scale value, defaults to 0}
}
\value{
the weighted-Mscale value
}
\description{
weightedMscale
the M scale of an univariate sample (see reference below)
}
\references{
Maronna, R. A., Martin, R. D., Yohai, V. J., & Salibián-Barrera, M. (2018).
Robust statistics: theory and methods (with R). Wiley.
}
|
db43958c3cf70850c6eb9520608064992f89d21f
|
49aaf90b347f72c37235ecf39a129a1365d54d1c
|
/DataAnalysis/riskbugetResult.R
|
41fdee8a2981e2184e6b23acd23be5c8e2b92307
|
[] |
no_license
|
algo21-116010293/Assignment1
|
9f43fd65ec0e06699966080078904f6750e05d42
|
3c5a6d758cf61a5db9ac86421d62907d46253a32
|
refs/heads/main
| 2023-03-23T15:11:46.302657
| 2021-03-20T04:18:38
| 2021-03-20T04:18:38
| 349,147,790
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,831
|
r
|
riskbugetResult.R
|
library('scatterplot3d')
library('rgl')
library('MASS')
library('lattice')
data = read.csv('C:/Users/admin/Desktop/风险预算配置结果.csv')
data = data[,2:11]
SH = data[,1];CSI = data[,2];SGE = data[,3];GI = data[,4];HI = data[,5]
av = data[,6];ar = data[,7];sr = data[,8];mddr = data[,9]
srMDDR = data[,10]
ratio = c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1)
#av
avmeanD = data.frame()
avmeanList = c()
for (i in ratio){
jrange = c()
for (m in 0:(10-i*10)){
jrange = c(jrange,m/10)
}
for (j in jrange){
avmean = mean(data[(data[,1] == i)&(data[,2]==j),][,7])
avmeanList = c(avmeanList,avmean)
element = c(i,j,avmean)
avmeanD = rbind(avmeanD,element)
}
}
colnames(avmeanD) = c('X000001.SH','H11001.CSI','av mean')
levelplot(avmeanFrame$`av mean`~avmeanD[,1]+avmeanD[,2])
plot3d(SH,CSI,av,col = 'blue',size = 4)
#线性回归
fit1 = lm(av~SH+CSI+SGE+GI)#与SH相关性弱,R^2 = 0.65
fit2 = lm(ar~SH+CSI+SGE+GI)#与GI不是很相关,R^2 = 0.43
fit3 = lm(sr~SH+CSI+SGE+GI)#R^2 = 0.79
fit4 = lm(mddr~SH+CSI+SGE+GI) #与SH的关系不大,与GI相关性较弱,R^2 = 0.56
fit5 = lm(srMDDR~SH+CSI+SGE+GI)#与SH相关性弱,R^2 = 0.83
summary(fit1)
summary(fit2)
summary(fit3)
summary(fit4)
summary(fit5)
#主成分分析
pc = princomp(data[2:6])
summary(pc)
comp1 = pc$scores[,1]
comp2 = pc$scores[,2]
comp3 = pc$scores[,3]
comp4 = pc$scores[,4]
fit11 = lm(av~comp1+comp2+comp3)
#岭回归
ridge_fit = lm.ridge(srMDDR~SH+CSI+SGE+GI,lambda = seq(0,1,length = 100))
summary(ridge_fit)
plot(x = ridge_fit$lambda,y = ridge_fit$GCV,type = 'l')
matplot(x = ridge_fit$lambda,y = t(ridge_fit$coef),type = 'l')
lam = ridge_fit$lambda[which.min(ridge_fit$GCV)]
ridgeFit = lm.ridge(srMDDR~SH+CSI+SGE+GI,lambda = lam)
library(car)
vif(fit1)
|
059a098e2538e12f2eb34cdf394b7f52c24877db
|
ab3a5543fe3418aa0455e8e5a49f02a7cf649bda
|
/R/mask_recovery.R
|
109e0e920aff137c0b7dd971da59ece17536df60
|
[] |
no_license
|
jonasbhend/VOLCprediction
|
19982a69cb3bacef9c5959f1dfb7ed02543ae03a
|
be5171de58db1254eb48c9767227c1cc5f008eca
|
refs/heads/master
| 2021-01-02T22:30:18.360030
| 2015-03-11T07:34:21
| 2015-03-11T07:34:21
| 18,637,804
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,060
|
r
|
mask_recovery.R
|
#' @name mask_recovery
#' @aliases mask_erupaod
#'
#' @title
#' Mask n years after eruption
#'
#' @description
#' This functions masks forecast scores for n years after the eruption, to
#' exclude years with recovery from volcanic eruption in the averaging
#' interval.
#'
#' @param x input (output from all_scores)
#' @param after number of years after eruption to be masked
#'
#' @keywords utilities
#' @export
mask_recovery <- function(x, after=6){
if (!is.null(x$erup.i)){
nseas <- median(table(floor(attr(x$opt, 'time'))))
ei <- which(x$erup.i)
after.i <- unique(as.vector(outer(ei, seq(0, nseas*after - 1), '+')))
after.i <- after.i[after.i < length(attr(x$opt, 'time'))]
x$optimistic[,,after.i] <- NA
x$pessimistic[,,after.i] <- NA
}
return(x)
}
#' @rdname mask_recovery
#' @aod threshold over which to mask
#' @export
mask_climaod <- function(x, aod=0.01){
if (!is.null(x$climaod)){
x$optimistic[,,x$climaod[1,] > aod] <- NA
x$pessimistic[,,x$climaod[1,] > aod] <- NA
}
return(x)
}
|
b8fa33d5ddd33d5549528b0f8fc77386547b2caa
|
7a2af3a535e95c8330bf1988ed02e06af907a147
|
/R/get_channel_sections.R
|
334a9f44801eeba1befe16c7f1e4255d772e437d
|
[] |
no_license
|
JohnCoene/youTubeDataR
|
f29d6b9927ae2228173255d472681ef9b20b0819
|
0d3794423d648f6031ea1dc8eea4c5b0bc7185b1
|
refs/heads/master
| 2020-04-11T08:06:56.707550
| 2018-05-01T19:57:48
| 2018-05-01T19:57:48
| 51,235,971
| 14
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,855
|
r
|
get_channel_sections.R
|
#' getChannelSections
#'
#' @description Returns a list of channelSection resources that match the API
#' request criteria.
#'
#' @param token
#' Your token as returned by \code{\link{youOAuth}}.
#' @param channel.id
#' Indicates that the API response should only contain resources created by
#' the channel. The default value is \code{NULL}.
#' @param part
#' The part parameter specifies a comma-separated list of one or more activity
#' resource properties that the API response will include. The default value
#' is \code{snippet}, can take any of \code{contentDetails}, \code{id} or
#' \code{snippet}. See \code{\link{findParts}}.
#' @param mine
#' Set this parameter's value to true to retrieve a feed of the authenticated
#' user's activities. The default value is \code{FALSE}.
#' @param id
#' Specifies a comma-separated list of IDs that uniquely identify the
#' channelSection resources that are being retrieved. In a
#' \code{getChannelSections} resource, the id property specifies the section's
#' ID.
#' @param hl
#' The hl parameter instructs the API to retrieve localized resource metadata
#' for a specific application language that the YouTube website supports. The
#' parameter value must be a language code included in the list returned by
#' \code{\link{getLanguages}}
#' @param on.behalf.of.content.owner
#' Indicates that the request's
#' authorization credentials identify a YouTube CMS user who is acting on
#' behalf of the content owner specified in the parameter value. This parameter
#' is intended for YouTube content partners that own and manage many different
#' YouTube channels. It allows content owners to authenticate once and get
#' access to all their video and channel data, without having to provide
#' authentication credentials for each individual channel. The actual CMS
#' account that the user authenticates with must be linked to the specified
#' YouTube content owner. This parameter can only be used in a properly
#' authorized request. Note: This parameter is intended exclusively for
#' YouTube content partners. See scope under \code{\link{youOAuth}}.
#' The default value is \code{NULL}.
#' @param verbose
#' If \code{TRUE} prints infromational messages in the console.
#' The default value is \code{FALSE}.
#'
#' @details Must specify one (and only one) of \code{mine} (\code{TRUE}),
#' \code{id} or \code{channel.id}.
#'
#' @examples
#' \dontrun{
#' #Authenticate
#' token <- youOAuth(client.id = "something.apps.googleusercontent.com",
#' client.secret = "XxxXX1XxXxXxxx1xxx1xxXXX")
#'
#' # search channels on cats
#' search <- searchTube(token, query = "cats", type = "channel")
#'
#' # get channel sections
#' sections <- getChannelSections(token, channel.id = search$channelId [1])
#' }
#'
#' @export
#'
#' @author John Coene \email{jcoenep@@hotmail.com}
getChannelSections <- function(token, channel.id, part = "snippet",
mine = FALSE, id, hl = NULL,
on.behalf.of.content.owner = NULL,
verbose = FALSE) {
if(missing(channel.id)) channel.id <- NULL
if(missing(id)) id <- NULL
# check required arguments
# check token
checkToken(token)
if(is.null(channel.id) && mine == FALSE && is.null(id)) {
stop("must provide channel.id or mine or id")
} else {
c <- mine + length(id) + length(channel.id)
if(c > 1) {
stop("can only specify one of id, mine or channel.id")
} else {
# mine
if (mine == TRUE) {
mine <- paste0("&mine=true")
} else {
mine <- NULL
}
}
}
arguments <- namedList(channel.id, hl, on.behalf.of.content.owner, id)
# buildParameters
x <- list()
for (i in 1:length(arguments)) {
y <- buildParam(param = names(arguments[i]), values = arguments[[i]])
x[[i]] <- ifelse(!is.null(y), y, "")
}
# collapse
suffix <- paste(x, collapse = "")
testPart("getChannelSections", part)
# build uri
uri <- paste0("https://www.googleapis.com/youtube/v3/channelSections?part=",
part, suffix, mine)
# GET
response <- httr::GET(uri, config = (token = token))
# parse
json <- jsonlite::fromJSON(rawToChar(response$content),
simplifyDataFrame = FALSE)
# check if error
if(length(json$error)) {
stop(paste0("API returned the following error (code ",
json$error$code,"): ",
json$error$message))
# else parse
} else {
dat <- do.call(plyr::"rbind.fill", lapply(json$items, as.data.frame))
}
if(verbose == TRUE && nrow(dat)){
cat(paste0("API returned ", nrow(dat),
" results."))
}
dat <- renameReturn(dat)
return(dat)
}
|
8a5dca7e04cb004f158221e4e505b88e0aee734d
|
a9a9af4f010a883720f70391d2af66f437cb15c3
|
/test_script.R
|
4f761fd2c5d3775e327a48a2754ae1cccefb4782
|
[] |
no_license
|
kalden/spartanDB
|
ad4162c78ef54170c21c08a8a7a822fafc457636
|
bc698715cdce55f593e806ac0c537c3f2d59ac7a
|
refs/heads/master
| 2020-03-26T23:32:14.724243
| 2019-02-20T11:05:17
| 2019-02-20T11:05:17
| 145,549,860
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,362
|
r
|
test_script.R
|
# R & MySQL tutorial
# https://programminghistorian.org/en/lessons/getting-started-with-mysql-using-r#open-mysql-workbench
## Main Script here:
library(RMySQL)
library(spartan)
library(EasyABC)
# R needs a full path to find the settings file
#rmysql.settingsfile<-"~/Documents/sql_settings/spartanDB.cnf"
rmysql.settingsfile<-"/home/kja505/Dropbox/Sarcoid/spartanDB.cnf"
#rmysql.db<-"spartan_ppsim"
rmysql.db<-"spartan_sarcoid"
dblink<-dbConnect(MySQL(),default.file=rmysql.settingsfile,group=rmysql.db)
parameters<-c("stableBindProbability","chemokineExpressionThreshold","initialChemokineExpressionValue","maxChemokineExpressionValue","maxProbabilityOfAdhesion","adhesionFactorExpressionSlope")
measures<-c("Velocity","Displacement")
baseline<- c(50,0.3, 0.2, 0.04, 0.60, 1.0)
minvals <- c(10, 0.10, 0.10, 0.015, 0.1, 0.25)
maxvals <- c(100, 0.9, 0.50, 0.08, 0.95, 5.0)
incvals <- c(10, 0.1, 0.05, 0.005, 0.05, 0.25)
measure_scale<-c("Velocity","Displacement")
# Delete the current database structure if there already
delete_database_structure(dblink)
# Set up the database:
create_database_structure(dblink, parameters, measures)
#### 1: LHC Sampling
## Route 1: Generate a sample and store in the database
#parameters<-c("chemoThreshold","chemoUpperLinearAdjust","chemoLowerLinearAdjust","maxVCAMeffectProbabilityCutoff","vcamSlope")
generate_lhc_set_in_db(dblink, parameters, 500, minvals, maxvals, "normal", experiment_description="generated_lhc_set")
download_sample_as_csvfile("/home/kja505/Desktop/", dblink, experiment_id=1)
## Note the above has an optional date argument if you don't want to use today's date
## Route 2: Already have an existing sample and want to add it to the database
add_existing_lhc_sample_to_database(dblink, read.csv("~Documents/spartanDB/test_data/LHC_Params.csv",header=T), experiment_description="original ppsim lhc dataset")
#### 2: Robustness Sampling
#parameters<-c("chemoThreshold","chemoUpperLinearAdjust","chemoLowerLinearAdjust","maxVCAMeffectProbabilityCutoff","vcamSlope")
generate_robustness_set_in_db(dblink,parameters, baseline, minvals, maxvals, incvals, experiment_id=NULL, experiment_description="PPSim Robustness")
download_sample_as_csvfile("/home/kja505/Desktop/", dblink, experiment_type="Robustness",experiment_id=5)
#### 3: eFAST Sampling
num_samples<-65
num_curves<-3
generate_efast_set_in_db(dblink, parameters, num_samples, minvals, maxvals, num_curves, experiment_id=NULL, experiment_description="PPSim eFAST2")
download_sample_as_csvfile("/home/kja505/Desktop/", dblink, experiment_type="eFAST",experiment_id=3)
## Or can add an existing generated set to the database, as is shown below
############ RECREATING THE ORIGINAL PPSIM ANALYSES:
#### 4: Adding LHC Results to Database
## Route 1: From Spartan 2, all results can be provided in a single CSV file - this method processes that file and puts all results in the DB
## In this case, we add the parameters from the tutorial set, don't generate them, such that the parameters can tie up with the results
data(pregenerated_lhc)
add_existing_lhc_sample_to_database(dblink, pregenerated_lhc, experiment_description="original ppsim lhc dataset")
# Now add the results for that experiment
experiment_id<-1 # Could have also added by description and date - these removed as default to NULL if ID specified
add_lhc_and_robustness_sim_results(dblink, parameters, measures, experiment_id, results_csv="~/Documents/spartanDB/test_data/LHC_AllResults.csv")
# Or could have used the object
data(ppsim_lhc_results)
add_lhc_and_robustness_sim_results(dblink, parameters, measures, experiment_id, results_obj=ppsim_lhc_results)
# Now analyse the replicates to create a summary result
summarise_replicate_lhc_runs(dblink, measures, experiment_id)
# Now we have the data in a format that spartan can process - so we'll do the analysis
generate_lhc_analysis(dblink, parameters, measures, experiment_id=1)
# Graph an experiment - the graphs are never stored in the database, but we provide methods to graph for an experiment_id
output_directory<-"~/Desktop/"
graph_lhc_analysis(dblink, parameters, measures, measure_scale, output_directory, experiment_id=1, output_type=c("PDF"))
# Or we could generate an R object (as robospartan will) and add that to the database
lhc_set<-spartan::lhc_generate_lhc_sample(FILEPATH=NULL, parameters, 500, minvals, maxvals, "normal", write_csv = FALSE)
add_existing_lhc_sample_to_database(dblink, lhc_set, experiment_description="original ppsim lhc dataset")
#### 5: Adding eFAST Results to Database
## CSV file:
# In this case, we add the parameters from the tutorial set, don't generate them, such that the parameters can tie up with the results
# Pregenerated eFAST sample now part of the package
# Note eFAST need
dir.create(file.path(getwd(), "efast"), showWarnings = FALSE)
unzip(system.file("extdata","pregenerated_efast_sample.zip",package="spartanDB"),exdir=file.path(getwd(), "efast"))
num_curves<-3
add_existing_efast_sample_to_database(dblink, parameters, num_curves, parameter_set_path=file.path(getwd(), "efast"), experiment_description="Original PPSim eFAST")
# Now add the results for this experiment - file available online, we're going to extract into the same folder as created for the samples
sample_results<-"~/Documents/spartanDB/test_data/eFAST_Sample_Outputs.zip"
unzip(sample_results,exdir=file.path(getwd(), "efast"))
experiment_id<-2 # Could have also added by description and date - these removed as default to NULL if ID specified
add_efast_sim_results_from_csv_files(dblink, file.path(getwd(), "efast"), parameters, measures, num_curves, experiment_id)
# Now we can create summary stats from the replicates:
summarise_replicate_efast_runs(dblink, parameters, measures, experiment_id=2)
# Now do the eFAST Analysis
generate_efast_analysis(dblink, parameters, measures, experiment_id=2, graph_results=TRUE, output_directory=output_directory)
# Or we could generate an R object (as robospartan will) and add that to the DB
# Current spartan does not generate the dummy - needs specifying
efast_set<-spartan::efast_generate_sample(FILEPATH=NULL, 3, 65, c(parameters,"Dummy"), c(minvals,1), c(maxvals,2), write_csv = FALSE, return_sample = TRUE)
add_existing_efast_sample_to_database(dblink, parameters, num_curves=3, parameters_r_object=efast_set, experiment_description="Original PPSim eFAST")
# Delete the extracted files
unlink(file.path(getwd(), "efast"), recursive=TRUE)
#### 6: Adding Robustness Results to Database
# In this case, we add the parameters from the tutorial set, don't generate them, such that the parameters can tie up with the results
data(ppsim_robustness_set)
# Read these into the database:
add_existing_robustness_sample_to_database(dblink, parameters, ppsim_robustness_set, experiment_description="Original PPSim Robustness")
# Now add the results for this experiment:
experiment_id<-3
data(ppsim_robustness_results)
add_lhc_and_robustness_sim_results(dblink, parameters, measures, "Robustness", experiment_id, results_obj=ppsim_robustness_results)
# Now create summary stats from these replicates
# Replicate responses not analysed for OAT
generate_robustness_analysis(dblink, parameters, measures, baseline, experiment_id=3)
graph_robustness_analysis(dblink, "/home/kja505/Desktop/",parameters, measures, experiment_id=3)
####### MACHINE LEARNING SECTION
#### Now mine the database for experiments to use to create emulations and ensembles, using spartan
emulators<-create_emulators_from_database_experiments(dblink, parameters, measures, emulator_list=c("RF"),normalise_set=TRUE,experiment_id=1)
# Or can regenerate the emulators from previous emulator data in the database
emulators<-regenerate_emulators_from_db_data(dblink, parameters, measures, emulator_list, normalise_set=TRUE, experiment_id=5)
# Now to generate some ensembles from the emulators
validation_set<-retrieve_validation_set_from_db_for_emulator(dblink, parameters, measures, experiment_id=4)
# Try to make some predictions
use_emulators_to_make_and_store_predictions(dblink, emulators, parameters, measures, validation_set, normalise=FALSE, normalise_result=TRUE, experiment_description="Predict Validation Set")
# Generate emulators and an ensemble
ensemble<-generate_emulators_and_ensemble_using_db(dblink, parameters, measures, emulator_list=c("RF","SVM"), normalise_set=TRUE, experiment_id=2)
# Use ensemble to generate predictions
validation_set<-retrieve_validation_set_from_db_for_emulator(dblink, parameters, measures, experiment_id=5)
use_ensemble_to_make_and_store_predictions(dblink, ensemble, parameters, measures, validation_set, normalise=FALSE, normalise_result=TRUE, experiment_description="Predict Validation Set2")
##### Demonstration of using the Ensemble to perform SA and add straight to the DB
#1: LHC
emulated_lhc_values<-spartan::lhc_generate_lhc_sample(NULL,parameters,500,minvals,maxvals, "normal",write_csv=FALSE)
analyse_and_add_emulated_lhc_to_db(dblink, emulated_lhc_values, ensemble, parameters, measures, experiment_description="Emulated LHC Analysis", output_directory="/home/kja505/Desktop", normalise_sample=TRUE)
#2:eFAST
emulated_efast_values<-efast_generate_sample(NULL, 3,65,c(parameters,"Dummy"), c(minvals,0), c(maxvals,1), write_csv=FALSE, return_sample=TRUE)
analyse_and_add_emulated_efast_to_db(dblink, emulated_efast_values, ensemble, parameters, measures, experiment_description="Emulated eFAST Analysis2",
graph_results=TRUE, output_directory="/home/kja505/Desktop", normalise_sample=TRUE, normalise_result=TRUE)
all_curve_results<-emulate_efast_sampled_parameters(NULL, ensemble, c(parameters,"Dummy"), measures, 3, normalise = TRUE, csv_file_input=FALSE,
spartan_sample_obj=emulated_efast_values,write_csv_file_out=FALSE, normalise_result=TRUE)
analyse_and_add_emulated_efast_to_db(dblink, emulated_efast_values, all_curve_results, c(parameters,"Dummy"), measures, experiment_id=NULL, experiment_description="Test emulated eFAST3",
graph_results=TRUE, output_directory="/home/kja505/Desktop")
#### Can we store ABC data in the database too?
normalise_values = TRUE
normalise_result = TRUE
prior=list(c("unif",0,100),c("unif",0.1,0.9),c("unif",0.1,0.5),
c("unif",0.015,0.08),c("unif",0.1,1.0),c("unif",0.25,5.0))
sum_stat_obs=c(4.4677342593,28.5051144444)
abc_set<-create_abc_settings_object(parameters, measures, ensemble, normalise_values,
normalise_result, file_out = FALSE)
numRunsUnderThreshold=100
tolerance=c(20,15,10.00,7,5.00)
abc_resultSet<-ABC_sequential(method="Beaumont",
model=ensemble_abc_wrapper, prior=prior,
nb_simul=numRunsUnderThreshold,
summary_stat_target=sum_stat_obs,
tolerance_tab=tolerance, verbose=FALSE)
store_abc_experiment_in_db(dblink, abc_set, abc_resultSet, parameters, measures, experiment_id=NULL, experiment_description="ABC Test",
graph_results=TRUE, output_directory="/home/kja505/Desktop")
# Retrieve stored results for plotting
retrieve_abc_experiment_for_plotting(dblink, experiment_description="ABC Test", experiment_date = Sys.Date())
dbDisconnect(dblink)
|
7e28a077cad970821d5821929f5a2115fd62518c
|
f16d1db416fc25ccb644c73ba5097616cadc6cc4
|
/Aggregate_Data_R.R
|
95abd063712ea91b215eec367a91b69ac1304e73
|
[] |
no_license
|
anjalimutha/R-Programs
|
ce095f0230f154194ac279000b9d11b257429de9
|
9590417775d8f41325d82062c8419462dd6971ac
|
refs/heads/master
| 2021-05-14T09:00:54.711004
| 2018-01-04T23:22:37
| 2018-01-04T23:22:37
| 116,316,464
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,105
|
r
|
Aggregate_Data_R.R
|
#open data
data(stulevel)
# Average ability for all students
stulevel_agg_1<- mean(stulevel$ability, na.rm=TRUE)
# Show results
stulevel_agg_1
# Convert stulevel to data table
stulevel <- data.table(stulevel)
# Declare which variable you want to group on (optional).
# List the name of the data table first, then the name of the field(s).
setkey(stulevel, grade)
# Average ability by grade
stulevel_agg_2 <- as.data.frame(stulevel[, mean(ability, na.rm = TRUE),by = grade])
# Show results
stulevel_agg_2
# Average ability by grade
stulevel_agg_3 <- as.data.frame(stulevel[, j=list(mean(ability, na.rm = TRUE),mean(attday, na.rm = TRUE)),by = grade])
# Show results
stulevel_agg_3
# Average ability by grade
stulevel_agg_4 <- as.data.frame(stulevel[, j=list(mean(ability, na.rm = TRUE),mean(attday, na.rm = TRUE)),by = list(year,grade)])
# Show results
stulevel_agg_4
# Average ability by grade and rename
stulevel_agg_5 <- as.data.frame(stulevel[, j=list(mean_ability = mean(ability, na.rm = TRUE), mean_attendance = mean(attday, na.rm = TRUE)), by = list(year,grade)])
# Show results
stulevel_agg_5
|
6382700a507709276237f700d9888e43ff106b8d
|
3b8a4b5b89cf8bf465f9766d276bc7d356706dd7
|
/man/print.ISEdescription.Rd
|
5bf663906b7adaa2074b3b6c2952f336bf2b9413
|
[] |
no_license
|
cran/ISEtools
|
4bf9df44c9b2be5f3a87d696f9ba39406f7fe3e1
|
c7e0b163a0a47aecc32d69372328afd0ba5b72ba
|
refs/heads/master
| 2022-11-07T06:42:15.447467
| 2022-10-19T08:17:57
| 2022-10-19T08:17:57
| 164,702,450
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 526
|
rd
|
print.ISEdescription.Rd
|
\name{print.ISEdescription}
\alias{print.ISEdescription}
\title{Prints tables of ISE parameters.}
\usage{
\method{print}{ISEdescription}(x, ...)
}
\arguments{
\item{x}{ISE analysis results (e.g. object of class analyseISE)}
\item{...}{Other objects passed through.}
}
\value{No return value, prints results from describeISE.}
\description{
Prints tables of ISE parameters for one or multiple ISEs.
}
\seealso{
\code{\link{describeISE}}
}
\author{
Peter Dillingham, \email{peter.dillingham@otago.ac.nz}
}
|
69afbb4a25317e64230c83c33ad3f0934c4d5e13
|
9a5c565e5e8ab108b59d7026abcab1807704942c
|
/man/ctvs.Rd
|
7691e397f2788a4a876fc5ff174016f473a40df5
|
[
"MIT"
] |
permissive
|
Dahaniel/glodatamix
|
150ba78a7a0d569dd397052b6e3ae7eb1c8d12c7
|
40782789813d3229bfcbe84fcd5993a21876d94e
|
refs/heads/master
| 2021-01-15T15:44:24.672042
| 2016-08-17T11:37:01
| 2016-08-17T11:37:01
| 35,495,052
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,606
|
rd
|
ctvs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ctvs.R
\name{ctvs}
\alias{ctvs}
\title{ctvs}
\usage{
ctvs(data.object, ctv.type, rec.frames = NA, bg.firstframe = NA,
bg.lastframe = NA, sig.firstframe = NA, sig.lastframe = NA,
wa = c(0.2, 0.7), plot = F, report = F, ylim = NA, mfrow = c(4, 3),
suffix = "", meanTraces = F, fp = c(29, 37, 41, 49))
}
\arguments{
\item{data.object}{input data (gloDatamix format)}
\item{ctv.type}{type of CTV to calculate}
\item{rec.frames}{number of frames recorded, will be autodetected if empty}
\item{bg.firstframe}{for CTVs with background subtraction this is the position of the first bg frame}
\item{bg.lastframe}{2nd frame for background (end of window)}
\item{sig.firstframe}{signal onset}
\item{sig.lastframe}{signal offset}
\item{wa}{calc "width @" xx*extremum, needs 2 arguments, e.g. width when 0.2 of peakmax is reached to when peak falls to .9 of peakmax}
\item{plot}{perform a barplot in the end?}
\item{report}{write CTV report file (gives single traces with signal, bg ... marked and values in legend)}
\item{ylim}{ylim for report plots, given as 2 value vector, e.g. c(-1,5) plots from -1 to}
\item{mfrow}{numer of columns and rows to plot per page}
\item{suffix}{optionally, ad describing suffix to filenames, name of data.object is used when empty}
\item{meanTraces}{does data come from meanTraces.R?}
\item{fp}{numerical vector of length 4 giving positions for ctv.w for putative peak on and offset}
}
\description{
collection of 'CurveToValue' functions
}
\author{
Daniel Münch <daniel@muench.bio>
}
|
5ca195bf15ff75198dd1a656a0d6d676d203f8cc
|
c3826e89c7c78acdcc4596820d03fa96c8710b38
|
/R/data.R
|
5a320daa6e67ee5fa61d1cf896d10a2bf21fa298
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
chen496/SomaDataIO
|
7b393fad010774e17e086555a026c2a38de06415
|
b8f00329aaa283f8243d1064a7bda19b873fdd67
|
refs/heads/master
| 2023-06-24T21:22:02.222540
| 2021-07-27T20:45:52
| 2021-07-27T20:45:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,966
|
r
|
data.R
|
#' Example Data and Objects
#'
#' The `example_data` object is intended to provide existing and prospective
#' SomaLogic customers with example data to enable analysis preparation prior
#' to receipt of SomaScan data, and also for those generally curious about the
#' SomaScan data deliverable. It is **not** intended to be used as a control
#' group for studies or provide any metrics for SomaScan data in general.
#'
#' @name SomaDataObjects
#' @aliases example_data ex_analytes ex_anno_tbl ex_target_names
#' @docType data
#'
#' @section Data Description:
#' The `example_data` object contains a SomaScan V4 study from healthy
#' normal individuals. The RFU measurements themselves and other identifiers
#' have been altered to protect personally identifiable information (PII),
#' but also retain underlying biological signal as much as possible.
#' There are 192 total EDTA-plasma samples across two 96-well plate runs
#' which are broken down by the following types:
#' * 170 clinical samples (client study samples)
#' * 10 calibrators (replicate controls for combining data across runs)
#' * 6 QC samples (replicate controls used to assess run quality)
#' * 6 Buffer samples (no protein controls)
#'
#' @section Data Processing:
#' The standard V4 data normalization procedure for EDTA-plasma samples was
#' applied to this dataset. For more details on the data standardization process
#' see the Data Standardization and File Specification Technical Note. General
#' details are outlined above.
#'
#' @format
#' \describe{
#' \item{example_data}{a `soma_adat` parsed via [read_adat()] containing
#' 192 samples (see below for breakdown of sample type). There are 5318
#' columns containing 5284 analyte features and 34 clinical meta data fields.
#' These data have been pre-processed via the following steps:
#' \itemize{
#' \item hybridization normalized (all samples)
#' \item calibrators and buffers median normalized
#' \item plate scaled
#' \item calibrated
#' \item Adaptive Normalization by Maximum Likelihood (ANML) of
#' QC and clinical samples
#' }
#' **Note1:** The `Age` and `Sex` (`M`/`F`) fields contain simulated values
#' designed to contain biological signal.
#'
#' **Note2:** The `SampleType` column contains sample source/type information
#' and usually the `SampleType == Sample` represents the "client" samples.
#' }
#'
#' \item{ex_analytes}{character string of the analyte features contained
#' in the `soma_adat` object, derived from a call to [getAnalytes()].}
#'
#' \item{ex_anno_tbl}{a lookup table corresponding to a
#' transposed data frame of the "Col.Meta" attribute of an ADAT, with an
#' index key field `AptName` included in column 1, derived from a call to
#' [getAnalyteInfo()].}
#'
#' \item{ex_target_names}{A lookup table mapping `SeqId` feature names ->
#' target names contained in `example_data`. This object (or one like it) is
#' convenient at the console via auto-complete for labeling and/or creating
#' plot titles on the fly.}
#' }
#'
#' @source SomaLogic Inc.
#' @keywords datasets
#' @examples
#' # S3 print method
#' example_data
#'
#' # print header info
#' print(example_data, show_header = TRUE)
#'
#' class(example_data)
#'
#' # Features/Analytes
#' head(ex_analytes, 20)
#'
#' # Feature info table (annotations)
#' ex_anno_tbl
#'
#' # Search via `filter()`
#' ex_anno_tbl %>% dplyr::filter(grepl("^MMP", Target))
#'
#' # Lookup table -> targets
#' # MMP-9
#' ex_target_names$seq.2579.17
#'
#' # gender hormone FSH
#' tapply(example_data$seq.3032.11, example_data$Sex, median)
#'
#' # gender hormone LH
#' tapply(example_data$seq.2953.31, example_data$Sex, median)
#'
#' # Target lookup
#' ex_target_names$seq.2953.31 # tab-completion at console
#'
#' # Sample Type/Source
#' table(example_data$SampleType)
#'
#' # Sex/Gender Variable
#' table(example_data$Sex)
#'
#' # Age Variable
#' summary(example_data$Age)
NULL
|
222d7ac0ab5e0090a0c8f7041fa24015ca8d1dd4
|
1cac56f720d629345939433e197fd8a9476bed6c
|
/R/OVOT-SNOW.R
|
02ae6ae6ff9b6624550e44fe845ebe0a2a82a8fa
|
[] |
no_license
|
sqlitus/R-ONOW-Analysis
|
ece0926b5c5ff2f515cc9c6987164e75f7737126
|
c9077f182b9d0a61d303fb174f388a57fb9b59f7
|
refs/heads/master
| 2020-03-22T07:47:48.166340
| 2019-10-31T01:21:14
| 2019-10-31T01:21:14
| 139,723,839
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,452
|
r
|
OVOT-SNOW.R
|
# Open Volume Over Time - HISTORICAL BACKLOG REPORT #
# Purpose: dataset of all open tickets for each day
# Note: Using Incident Team History & State History to find open queue volumes
# Log: touch ups, filtering out redundant re-assignments from team hist. Doesn't change OVOT in this case but speeds up script.
# conditionally install packages if not already installed, then load
if (!require("pacman")) install.packages("pacman")
pacman::p_load(tidyverse, lubridate)
start_time <- Sys.time()
print(paste("Starting:", start_time))
# import all appropriately named files
path <- "\\\\cewp1650\\Chris Jabr Reports\\ONOW Exports\\INC History"
state_history_files <- list.files(path, "(?i)state_hist", full.names = TRUE)
team_history_files <- list.files(path, "(?i)team_hist", full.names = TRUE)
# merge state history
state_history <- data_frame()
for (i in 1:length(state_history_files)){
data <- readxl::read_excel(state_history_files[i])
# data$import_sheet <- str_extract(state_history_files[i], "(?<=/).*") # positive lookbehind
state_history <- bind_rows(state_history, data)
}
state_history <- state_history %>% select(Number, Field, Value, Start, End) %>% distinct()
filter_out <- state_history %>% filter(Start == End)
state_history <- state_history %>% anti_join(filter_out)
# merge assignment history
team_history <- data_frame()
for (i in 1:length(team_history_files)){
data <- readxl::read_excel(team_history_files[i])
# data$import_sheet <- str_extract(team_history_files[i], "(?<=/).*") # positive lookbehind
team_history <- bind_rows(team_history, data)
}
team_history <- team_history %>% group_by(Number) %>% arrange(Start)
### Filter out incorrect data ----
# filter out 'flash' assignments
team_history <- team_history %>% select(Number, Field, Value, Start, End) %>% distinct()
filter_out <- team_history %>% filter(Start == End)
team_history <- team_history %>% anti_join(filter_out)
# then filter out redundant re-assignments
writeLines(str_glue('Filtering out consecutive redundant assignments. Elapsed time: {round(difftime(Sys.time(),start_time, units="secs"),1)} seconds'))
assignment_group_reassignments <- team_history %>%
group_by(Number) %>%
arrange(Start) %>%
mutate(prev_team = lag(Value),
prev_team_time = lag(Start)) %>%
filter(Value == prev_team)
team_history <- team_history %>% anti_join(assignment_group_reassignments)
# set TZ of imported times to CST, since created calendar defaults to that timezone.
# state_history[c('Start','End')] <- force_tz(state_history[c('Start','End')], tzone = 'US/Central')
# team_history[c('Start','End')] <- force_tz(team_history[c('Start','End')], tzone = 'US/Central')
# calendar table; datetime @ 8am; Timezone defaults to CDT
calendar_start <- (Sys.Date() - (7*14)) + ( 1 - as.integer(format(Sys.Date(), format = "%u"))) # last 14 weeks
calendar <- data_frame(
date = seq.Date(from = calendar_start, to = today(), by = "days"),
datetime = seq.POSIXt(from = as.POSIXct(paste(calendar_start, "08"), format = "%Y-%m-%d %H"),
to = as.POSIXct(today()+1),
by = "DSTday")
)
calendar$datetime <- force_tz(calendar$datetime, tzone = 'UTC')
# get all distinct incidents from both datasets
distinct_incidents <- bind_rows(state_history %>% select(Number), team_history %>% select(Number)) %>% distinct()
# construct daily list of open tickets per day. use state history first since it's all tickets.
ovot <- data_frame()
for (i in 1:nrow(calendar)){
insert_day <- distinct_incidents %>% mutate(datetime = calendar$datetime[i]) %>%
left_join(state_history, by = "Number") %>%
filter(Start <= calendar$datetime[i] & (calendar$datetime[i] < End | is.na(End))) %>%
left_join(team_history, by = "Number") %>%
filter(Start.y <= calendar$datetime[i] & (calendar$datetime[i] < End.y | is.na(End.y))) %>%
distinct()
ovot <- bind_rows(ovot, insert_day)
}
ovot <- ovot %>% distinct()
# prune & output file
out <- ovot %>% select(Number, datetime, Status=Value.x, Team=Value.y)
# ## ADD: data for getting historical aging matrix
# ## IMPORT & MERGE TICKET DATA, LEFT JOIN FOR CREATED / PRIORITY INFO
# ## somehow adding duplicates?
# inc_list_files <- list.files(path, "(?i)all inc list", full.names = TRUE)
# all_incidents <- data_frame()
# for (i in 1:length(inc_list_files)){
# data <- read.csv(inc_list_files[i])
# all_incidents <- bind_rows(all_incidents, data)
# }
# all_incidents <- all_incidents %>% distinct()
# all_incidents[c('Created','Resolved')] <- force_tz(all_incidents[c('Created','Resolved')], tzone = 'US/Central')
# # inc_list <- readxl::read_excel("\\\\cewp1650\\Chris Jabr Reports\\ONOW Exports\\incident.xlsx")
# # inc_list[c('Created','Resolved')] <- force_tz(inc_list[c('Created','Resolved')], tzone = 'US/Central')
#
# out <- out %>% left_join(select(all_incidents, Number, Priority, Created), by = "Number") %>% distinct()
# output
writeLines(paste("Exporting file now at", Sys.time(),"\n Elapsed time:", round(difftime(Sys.time(),start_time, units='secs'),2)))
# write.csv(out, na = "", row.names = FALSE, paste0(path, "\\ovot.csv"))
writexl::write_xlsx(x = out, path = paste0(path, "\\ovot.xlsx")) # switch to this
writeLines(paste0("DONE. \nStart time: ", start_time,
"\nEnd time: ", Sys.time(),
"\nElapsed time: ", round(difftime(Sys.time(),start_time, units='secs'),2), " seconds."))
|
2640c112d2cd279289738779dcc77b5f399a8eef
|
b55244f5f1efe5838dc42ca7c2c2468f6b143ddc
|
/man/save_googlecloud.Rd
|
32f564ea60c3b03faf7421d01c429cba0ecc9c3a
|
[
"MIT"
] |
permissive
|
djnavarro/jaysire
|
17b4a557d0ffc4540840e137e4c2f42974a8b29e
|
fd76ccbce491bc8575ac4e751d97db3d5227e327
|
refs/heads/master
| 2021-06-25T18:36:35.041591
| 2021-04-07T11:15:13
| 2021-04-07T11:15:13
| 217,963,163
| 43
| 10
|
NOASSERTION
| 2020-10-17T08:58:48
| 2019-10-28T04:00:57
|
R
|
UTF-8
|
R
| false
| true
| 1,236
|
rd
|
save_googlecloud.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/save_locally.R
\name{save_googlecloud}
\alias{save_googlecloud}
\title{Return a javascript function to save data to Google datastore}
\usage{
save_googlecloud()
}
\value{
A javascript function to write data to the Google datastore
}
\description{
Return a javascript function to save data to Google datastore
}
\details{
The purpose of the \code{save_googlecloud()} is to return a
javascript function that, when called from within the jsPsych experiment,
will write the data to the Google datastore.
The intention is that when an experiment is
to be deployed on Google App Engine (i.e., using the \code{\link{run_googlecloud}()}
function to deploy the experiment), the
\code{save_googlecloud()} function provides the mechanism for saving the data.
If the goal is simply to save the data set at the end of the experiment, the
easiest way to do this is when building the experiment using
\code{\link{build_experiment}()}. Specifically, the method for doing this is
to include the argument \code{on_finish = save_googlecloud()} as part of the
call to \code{\link{build_experiment}()}.
}
\seealso{
\code{\link{run_googlecloud}}, \code{\link{build_experiment}}
}
|
b68adb93b2c08bbcd88ba3d5d68b3060314c231f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/tseriesEntropy/examples/Srho.test.ts.p.Rd.R
|
7bb0614a1ff0101779f7422ece1fc0e964ae7441
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,051
|
r
|
Srho.test.ts.p.Rd.R
|
library(tseriesEntropy)
### Name: Srho.test.ts.p
### Title: Entropy Tests Of Serial And Cross Dependence For Time Series -
### Parallel Version
### Aliases: Srho.test.ts.p
### Keywords: ts
### ** Examples
## Not run:
##D ## ************************************************************
##D ## WARNING: computationally intensive, increase B with caution
##D ## ************************************************************
##D set.seed(13)
##D n <- 120
##D w <- rnorm(n)
##D x <- arima.sim(n, model = list(ar=0.8));
##D y <- arima.sim(n, model = list(ar=0.8));
##D z <- lag(x,-1) + rnorm(n,sd=2) # dependence at lag 1
##D # UNIVARIATE VERSION
##D res1 <- Srho.test.ts.p(w, lag.max = 5, B = 40, ci.type="perm") # independence
##D res2 <- Srho.test.ts.p(x, lag.max = 5, B = 40, ci.type="perm") # dependence
##D
##D # BIVARIATE VERSION
##D res3 <- Srho.test.ts.p(x, y, lag.max = 5, B = 40, ci.type="mbb") # independence
##D res4 <- Srho.test.ts.p(x, z, lag.max = 5, B = 40, ci.type="mbb") # dependence
## End(Not run)
|
e1ee4972026bfb0562561750843632fedaa06f75
|
734e403665bc8433227db1a5c49c39b818c4e6c8
|
/man/initContext.Rd
|
526122e331d7dba0ddb08b3fbe8e8499c631666c
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.context
|
527d71a5db860267a4ece637b7c0ead3c24d521e
|
55f830caf99af08d82ed87cd3534b2d905eb6fb0
|
refs/heads/master
| 2023-06-13T07:11:30.179822
| 2021-07-08T19:46:05
| 2021-07-08T19:46:05
| 161,501,032
| 0
| 0
| null | 2021-07-08T19:46:06
| 2018-12-12T14:37:33
|
R
|
UTF-8
|
R
| false
| true
| 485
|
rd
|
initContext.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/context_main.R
\name{initContext}
\alias{initContext}
\title{Initialise the variable storing the full function call context}
\usage{
initContext(number = getDefault("initContext", "number"))
}
\arguments{
\item{number}{Number of the context. You may use different contexts that are
numbered. The default context number is 1.}
}
\description{
Initialise the variable storing the full function call context
}
|
9ca13f3ee60a09daa95b0ca0850fcef6f25ab975
|
7265ecd0f649137d9c19e4739afd7cff72d9c872
|
/plot3.R
|
648265714d10ef5fa9a6174de0449ad558549e18
|
[] |
no_license
|
juanfgonzo8/ExData_Plotting1
|
43345138a2c15e2a4d11108390f3ed80a15abcc7
|
9d5929ab5924b326a49b5d127398f65b0de8d633
|
refs/heads/master
| 2023-01-04T22:15:06.134001
| 2020-11-05T15:10:03
| 2020-11-05T15:10:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,400
|
r
|
plot3.R
|
##Script to generate the third plot by juanfgonzo8
#The unzipped dataset must be in the working directory for the code to run
#The data from the two dates required is loaded using read.table and subset
data <- subset(read.table(file = "household_power_consumption.txt", sep = ";",
header = TRUE, na.strings = "?",
colClasses = c("character","character","numeric",
"numeric","numeric","numeric",
"numeric","numeric","numeric")),
(Date == "1/2/2007" | Date == "2/2/2007"))
#The two date time columns are coverted to Date/Time formats
data$Time <- strptime(paste(data$Date,data$Time), format = "%d/%m/%Y %H:%M:%S")
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
#The PNG file device is initialized
png(filename = "plot3.png", width = 480, height = 480)
#The plot is created
plot(data$Time, data$Sub_metering_1, type = "n", xlab = "",
ylab = "Energy Sub Metering")
#The lines are added
lines(data$Time, data$Sub_metering_1)
lines(data$Time, data$Sub_metering_2, col = "red")
lines(data$Time, data$Sub_metering_3, col = "blue")
#The legend is added
legend("topright", col = c("black","red","blue"),
lty = c("solid","solid","solid"),
legend = c("Sub Metering 1","Sub Metering 2","Sub Metering 3"))
#The deviced is closed
dev.off()
|
b459ba118fb2e6ca6406cee297853915d3be655f
|
cb158c0ef3db7c1e0540e0a24913fd8d917b5086
|
/analysis.R
|
5889e9d84eb98e65db2a3ce1ad314d8828009eac
|
[] |
no_license
|
HappyFeet75x/capstone_project
|
4459b7b037c98e6b945f4f5aee8641ebfce8e3f4
|
2720e9fc4cf71f498a71a27903f4cb90301910e7
|
refs/heads/master
| 2021-01-18T06:22:54.628656
| 2016-02-14T19:35:26
| 2016-02-14T19:35:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,189
|
r
|
analysis.R
|
# Load Packages
library(dplyr)
library(xml2)
library(DBI)
library(RSQLite)
## Ensure working directory is correct
## If fails, set working directory to root dir of project, e.g. setwd('~/source/NASS')
expected_dirs <- c("01_abstract", "02_data_review", "data", "doc")
stopifnot(dir.exists(expected_dirs))
## Useful Functions
case_url <- function(case_id) {
paste0("http://www-nass.nhtsa.dot.gov/nass/cds/CaseForm.aspx?GetXML&caseid=", case_id, "&year=&transform=0&docInfo=0")
}
case_path <- function(case_id) {
paste0("data/cases/", case_id, ".txt")
}
get_case_ids <- function() {
df <- read.table("data/nass_case_ids_filtered.txt", sep="\t", stringsAsFactors=FALSE, header=FALSE)
return(as.character(df[, 9]))
}
## Control variables. Use for skipping steps.
do_webscrape <- FALSE
do_parse <- TRUE
## Source web-scraping code and scrape any remaining cases to data/cases
source("R/scrape.R")
if (do_webscrape)
download_all_cases()
## Parse XML to data frame
source("R/parse.R")
source("R/database.R")
if (do_parse) {
df <- parse_xml() # Parse the XML files
write_db(df) # Cache df to sqlite
} else {
df <- read_db() # Load cached data
}
|
d96809ae7afaea7d638299e003c1f5e6b26239f9
|
9fbb245678e32c6ae2b67272dd4d25ff54a71107
|
/plot4.r
|
2f199b39a5432995335bce88d11efae0f90adbe5
|
[] |
no_license
|
nikot112/ExData_Plotting1
|
f79bd3de98fca76942d2e0996a46b464562b6100
|
82ceb99dd718c60a829f94796121fa03866c2ee7
|
refs/heads/master
| 2020-12-31T02:42:35.177372
| 2015-04-11T02:59:27
| 2015-04-11T02:59:27
| 33,697,428
| 0
| 0
| null | 2015-04-09T23:15:49
| 2015-04-09T23:15:49
| null |
UTF-8
|
R
| false
| false
| 1,504
|
r
|
plot4.r
|
## Remember to go to your working directory with the "household_power_consumption.txt" file
## Read in data
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings="?", stringsAsFActors=FALSE)
## Convert Date column from character class to date class
data$Date = as.Date(data$Date, "%d/%m/%Y")
## Subset data frame to only 2007-02-01 and 2007-02-02
data2 = subset(data, Date == "2007-02-01" | Date == "2007-02-02")
## Convert Time column from character class to time class
data2$Time = strptime(data2$Time, format = "%H:%M:%S")
## Add datetime column to combine date & time. Note that Date column is in date class and Time column is in character class
data2$datetime = paste(data2$Date, data2$Time)
data2$datetime = as.POSIXct(data2$datetime)
## Plot fourth plot
par(mfrow = c(2,2))
plot(data2$datetime2, data2$Global_active_power, xlab="", ylab="Global Active Power (kilowatts)", type="l")
plot(data2$datetime2, data2$Voltage, xlab="datetime", ylab="Voltage", type="l")
plot(data2$datetime2, data2$Sub_metering_1, xlab="", ylab="Energy sub metering", type="l")
lines(data2$datetime2, data2$Sub_metering_2, col="red")
lines(data2$datetime2, data2$Sub_metering_3, col="blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"), pch="", lwd=2, cex=.5)
plot(data2$datetime2, data2$Global_reactive_power, xlab="datetime", ylab="Global_reactive_power", type="l")
dev.copy(png, file="plot4.png")
dev.off()
|
0b76827e5c2fa9a93085530c23c06000abab0d8e
|
037a6e231949b9a2896ffa09792c28a8579e7bf2
|
/notes/EQG_2016/Day2/Exercise3.1/Exercise3.1_data.R
|
0330e623496b1bccddec9ea91e5b274f9acf4839
|
[] |
no_license
|
nicolise/UW_EQG_2017
|
18c478dced5f554cefc0647bab0a760e19e6c256
|
e6b3da988fa84bbb6479c2f055cdf2e306b6f86a
|
refs/heads/master
| 2021-01-22T02:13:04.983131
| 2017-06-12T18:54:04
| 2017-06-12T18:54:04
| 92,337,314
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,380
|
r
|
Exercise3.1_data.R
|
LITTER NAME SPEED BODY TAIL
761 1 0.162074554 154 77
761 3 0.090189905 149 70
761 4 0.10823 153 69
761 5 0.276094735 154 73
761 6 0.135317997 150 72
761 8 0.203252033 149 68
761 10 0.1926009 153 74
761 11 0.173301644 149 72
761 12 0.168957969 155 73
761 15 0.147275405 160 66
764 1 0.136612022 155 78
764 3 0.104660618 159 74
764 5 0.030443932 155 73
764 6 0.192666606 159 76
765 1 0.037735849 152 76
765 2 0.051652893 153 75
765 5 0.078370426 153 72
765 6 0.07527457 159 77
765 7 0.115306197 152 74
765 8 0.064143682 152 77
765 12 0.106715797 145 77
765 17 0.126767552 149 76
765 19 0.15503876 149 74
765 28 0.135995509 150 79
765 32 0.127507485 149 76
765 37 0.14931243 152 74
766 1 0.334448161 157 75
766 3 0.087796313 158 78
766 4 0.310104595 155 74
766 6 0.248756219 160 74
766 7 0.053937433 161 71
766 10 0.18115942 158 72
766 11 0.133782197 160 69
766 12 0.149700599 162 73
767 2 0.22010149 152 79
767 3 0.221603793 153 76
767 4 0.212406278 150 70
767 8 0.237420907 151 76
767 10 0.247858017 150 71
767 17 0.208463352 151 69
767 19 0.26440214 148 72
767 24 0.199898765 152 79
767 27 0.144512946 154 75
767 28 0.256614401 152 74
767 29 0.171125244 152 78
775 2 0.350444346 154 77
775 3 0.292499596 152 69
775 4 0.208659024 153 75
775 9 0.290560571 152 72
775 10 0.267542042 159 74
775 11 0.238095238 155 76
775 12 0.277487394 151 76
776 2 0.279436444 159 82
776 7 0.23015873 154 78
776 9 0.066050198 154 77
779 2 0.225035945 154 75
780 1 0.212335557 155 80
780 6 0.162577705 152 70
780 11 0.124185753 153 66
781 2 0.241725014 156 78
781 5 0.264914549 152 77
781 7 0.218818381 159 74
783 2 0.19047619 154 73
786 5 0.197787777 149 80
786 6 0.216450216 142 73
786 7 0.187866187 152 72
786 8 0.170209063 152 73
787 4 0.154798762 154 75
787 5 0.254452926 156 73
794 1 0.174216028 151 76
761 2 0.147791841 154.1814 72.2571
761 7 0.142857143 157.1814 79.2571
761 9 0.192678227 148.1814 73.2571
761 13 0.153443891 150.1814 72.2571
764 2 0.108546308 156.1814 69.2571
764 4 0.131984348 153.1814 70.2571
764 7 0.057937428 154.1814 72.2571
765 9 0.07057163 149.1814 70.2571
765 10 0.153099415 152.1814 77.2571
765 11 0.184338617 153.1814 77.2571
765 13 0.134408602 154.1814 78.2571
765 14 0.134150727 155.1814 78.2571
765 15 0.092336103 150.1814 72.2571
765 16 0.136578762 155.1814 75.2571
765 18 0.09718173 155.1814 75.2571
765 23 0.067069081 151.1814 73.2571
765 27 0.141643059 159.1814 74.2571
765 30 0.114416476 152.1814 79.2571
765 31 0.061425061 151.1814 73.2571
765 33 0.042973786 146.1814 76.2571
765 35 0.139275766 148.1814 77.2571
765 38 0.170357751 154.1814 74.2571
766 9 0.222744771 155.1814 74.2571
766 13 0.286532951 152.1814 73.2571
766 16 0.263840413 161.1814 77.2571
767 1 0.172249311 154.1814 74.2571
767 5 0.206261988 154.1814 74.2571
767 6 0.195440957 155.1814 74.2571
767 7 0.238691968 152.1814 73.2571
767 11 0.031030298 154.1814 78.2571
767 12 0.23125949 155.1814 74.2571
767 13 0.164744646 153.1814 77.2571
767 14 0.269667168 152.1814 76.2571
767 15 0.236406619 154.1814 75.2571
767 16 0.146881432 158.1814 73.2571
767 18 0.213255934 151.1814 74.2571
767 20 0.135232537 151.1814 75.2571
767 21 0.254452926 152.1814 71.2571
767 22 0.268870956 152.1814 73.2571
767 23 0.171074681 150.1814 71.2571
767 25 0.15600624 151.1814 75.2571
767 26 0.210012812 152.1814 75.2571
767 30 0.152722074 155.1814 75.2571
775 1 0.222717149 154.1814 68.2571
775 5 0.272797311 150.1814 72.2571
775 6 0.301204819 151.1814 71.2571
775 7 0.163087623 155.1814 70.2571
775 13 0.261676871 150.1814 74.2571
776 1 0.226925392 159.1814 79.2571
776 3 0.228035758 156.1814 74.2571
776 5 0.294985251 161.1814 76.2571
776 6 0.222222222 155.1814 71.2571
776 8 0.237413188 159.1814 72.2571
776 11 0.232018561 158.1814 74.2571
779 1 0.273972603 154.1814 80.2571
779 3 0.139808612 154.1814 75.2571
780 3 0.101936799 153.1814 73.2571
780 4 0.182866372 154.1814 75.2571
780 5 0.172711572 153.1814 73.2571
780 8 0.134952767 151.1814 73.2571
780 9 0.096976291 152.1814 76.2571
780 10 0.190949468 153.1814 74.2571
781 3 0.130548303 150.1814 77.2571
781 4 0.254692557 156.1814 80.2571
781 6 0.226708521 153.1814 76.2571
781 8 0.208333333 148.1814 76.2571
786 1 0.233359642 153.1814 69.2571
786 2 0.228683689 152.1814 73.2571
786 3 0.188871054 154.1814 72.2571
786 4 0.229885057 153.1814 71.2571
787 7 0.274733569 150.1814 70.2571
795 1 0.240963855 155.1814 71.2571
795 3 0.212866924 151.1814 71.2571
|
344af45736bdf8cd72d95e305f1b35c4087d4954
|
e54b786c875ff2e6c1eae31ee774c6b6147794c2
|
/R/S4/find_annotations.R
|
7c9fdbfbf55cc83d4750253f34bffc45816a308c
|
[] |
no_license
|
hjanime/STAU1_hiCLIP
|
9f1d5516b6b5f7413709315b2f0985d729719480
|
861051cae1b2508a9c3871bf24f377479ae460a7
|
refs/heads/master
| 2020-12-11T05:26:01.387765
| 2015-01-07T22:00:06
| 2015-01-07T22:00:06
| 36,923,073
| 1
| 1
| null | 2015-06-05T09:05:11
| 2015-06-05T09:05:11
| null |
UTF-8
|
R
| false
| false
| 8,780
|
r
|
find_annotations.R
|
##############################################################
#' annotateHybrid
#'
#' annotateHybrid annotates hybrid reads.
#' @param \code{hgrl}. HybridGRL object to be examined.
#' @param \code{a.df}. Annotation data.frame contains catergory and annotation of each genes.
#' @param \code{t.gtf.gr}. Transcriptome coordinate of protein coding gene as GRanges object
#' @param \code{mRNA.with.intron.grL}. Genomic coordinate of genes with the annotation of intron as GRanges object.
#'
#' @export
#' @docType methods
#' @rdname hybridGRL-methods
#'
#' @examples
#' annotateHybrid(hgrl, a.df, t.gtf.gr, mRNA.with.intron.grL)
setGeneric(
name = "annotateHybrid",
def = function(object, a.df, t.gtf.gr, mRNA.with.intron.grL){standardGeneric("annotateHybrid")}
)
setMethod(
f = "annotateHybrid",
signature = "HybridGRL",
definition = function(object, a.df, t.gtf.gr, mRNA.with.intron.grL){
## Define Method specific functions
annotateProteinCoding <- function(gr.input, t.gtf){
gr <- gr.input
sub.t.gtf <- t.gtf[seqnames(t.gtf) %in% unique(as.character(seqnames(gr)))]
seqlevels(sub.t.gtf) <- unique(as.character(seqnames(sub.t.gtf)))
seqlevels(gr) <- as.character(seqlevels(sub.t.gtf))
gr.temp <- gr
## The next line was added to define the hybrid reads by the position of the first base.
end(gr.temp) <- start(gr.temp)
ol <- findOverlaps(gr.temp, sub.t.gtf, type = "any")
ol <- as.matrix(ol)
ol <- ol[!duplicated(ol[, 1]), ]
elementMetadata(gr)$annot[ol[, 1]] <- as.character(elementMetadata(sub.t.gtf)$annot[ol[, 2]])
return(gr)
}
addCategoryAnnot <- function(gr, a.df, t.gtf.gr, mRNA.with.intron.grL){
annotation.df <- a.df
t.gtf <- t.gtf.gr
mRNA.with.intron <- mRNA.with.intron.grL
if(!all(c("category", "annot") %in% names(elementMetadata(gr)))){
stop("column have to be pre-prepared")
}
if(!all(
c(
is.character(annotation.df[, 1]),
is.character(annotation.df[, 2]),
is.character(annotation.df[, 3])
)
)
)
{
stop("annotation data frame only accept character")
}
# 1st: annotate rRNA and tRNA
elementMetadata(gr)$category[grep("^rRNA", seqnames(gr))] <- "rRNA"
elementMetadata(gr)$annot[grep("^rRNA", seqnames(gr))] <- as.character(seqnames(gr))[grep("^rRNA", seqnames(gr))]
elementMetadata(gr)$category[grep("^trna", seqnames(gr))] <- "tRNA"
elementMetadata(gr)$annot[grep("^trna", seqnames(gr))] <- "tRNA"
# 2nd: annotate protein_coding and ncRNAs
ensg.category <- annotation.df$category
names(ensg.category) <- annotation.df$gene_id
ensg.annot <- annotation.df$annot
names(ensg.annot) <- annotation.df$gene_id
elementMetadata(gr)$category[grep("^ENSG", seqnames(gr))] <- ensg.category[as.character(seqnames(gr[grep("^ENSG", seqnames(gr))]))]
elementMetadata(gr)$annot[grep("^ENSG", seqnames(gr))] <- ensg.annot[as.character(seqnames(gr[grep("^ENSG", seqnames(gr))]))]
# elementMetadata(gr)$annot[elementMetadata(gr)$category == "protein_coding"]
gr[elementMetadata(gr)$category == "protein_coding"] <- annotateProteinCoding(gr[elementMetadata(gr)$category == "protein_coding"], t.gtf)
# if mapped to minus strand of gene => intergenic
elementMetadata(gr)$category[(as.character(strand(gr)) == "-") & (elementMetadata(gr)$annot %in% c("protein_coding", "lncRNA", "other_ncRNAs", "miRNA"))] <- "intergenic"
elementMetadata(gr)$annot[(as.character(strand(gr)) == "-") & (elementMetadata(gr)$annot %in% c("protein_coding", "lncRNA", "other_ncRNAs", "miRNA"))] <- "intergenic"
# 3rd: genomic regions as intron or intergenic
elementMetadata(gr)$annot[grep("^chr", seqnames(gr))] <- "intergenic"
if(length(gr[grep("^chr", seqnames(gr))]) != 0){
gr[grep("^chr", seqnames(gr))] <- annotateProteinCoding(gr[grep("^chr", seqnames(gr))], mRNA.with.intron)
}
elementMetadata(gr)$category[grep("^chr", seqnames(gr))] <- elementMetadata(gr)$annot[grep("^chr", seqnames(gr))]
return(gr)
}
## Define Method specofic functions: Up to here
object$L <- addCategoryAnnot(object$L, a.df, t.gtf.gr, mRNA.with.intron.grL)
object$R <- addCategoryAnnot(object$R, a.df, t.gtf.gr, mRNA.with.intron.grL)
validObject(object)
return(object)
}
)
##############################################################
#' plotHybridRNASource
#'
#' plotHybridRNASource plot RNA source of hybrid reads
#' @param \code{hgrl}. HybridGRL object to be examined.
#'
#' @export
#' @docType methods
#' @rdname hybridGRL-methods
#'
#' @examples
#' plotHybridRNASource(hgrl)
setGeneric(
name = "plotHybridRNASource",
def = function(object){standardGeneric("plotHybridRNASource")}
)
setMethod(
f = "plotHybridRNASource",
signature = "HybridGRL",
definition = function(object){
annotation.each.reads.df <- data.frame(
category = c(elementMetadata(object$L)$category, elementMetadata(object$R)$category),
annot = c(elementMetadata(object$L)$annot, elementMetadata(object$R)$annot)
)
annot.collapase.rRNA <- annotation.each.reads.df
annot.collapase.rRNA$annot <- as.character(annot.collapase.rRNA$annot)
annot.collapase.rRNA$annot[grep("^rRNA", annot.collapase.rRNA$annot)] <- "rRNA"
annot.collapase.rRNA$annot <- factor(as.character(annot.collapase.rRNA$annot), levels = c("utr5", "CDS", "utr3", "protein_coding", "lncRNA", "miRNA", "other_ncRNAs", "rRNA", "tRNA", "intron", "intergenic"))
cat("RNA source of hybrid reads [total]\n")
print(table(as.character(annot.collapase.rRNA$annot)))
cat("\n")
cat("RNA source of hybrid reads [%]\n")
print(round(
prop.table(table(as.character(annot.collapase.rRNA$annot))) * 100
, digits = 1)
)
cat("\n")
print(ggplot(annot.collapase.rRNA) +
geom_bar(aes(x = factor(1), fill = annot), width = 1) +
coord_polar(theta = "y") +
theme(axis.ticks = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.text.x = element_blank(),
axis.title.x = element_blank(),
panel.background = element_blank(),
legend.title=element_blank()
)
)
}
)
##############################################################
#' plotHybridPairRNASource
#'
#' plotHybridPairRNASource is simlar to plotHybridRNASource, but the results are calcualted from the pair of RNA sources.
#' @param \code{hgrl}. HybridGRL object to be examined.
#'
#' @export
#' @docType methods
#' @rdname hybridGRL-methods
#'
#' @examples
#' plotHybridPairRNASource(hgrl)
setGeneric(
name = "plotHybridPairRNASource",
def = function(object){standardGeneric("plotHybridPairRNASource")}
)
setMethod(
f = "plotHybridPairRNASource",
signature = "HybridGRL",
definition = function(object){
if(!all(seqnames(object$L) == seqnames(object$R))){
stop("This function is only applicable to those mapped to intra_molecular duplexes")
}
annotation.each.reads.df <- data.frame(
category = paste(elementMetadata(object$L)$category, elementMetadata(object$R)$category, sep = "_"),
annot = paste(elementMetadata(object$L)$annot, elementMetadata(object$R)$annot, sep = "_")
)
plot.annotation.df <- as.data.frame(table(annotation.each.reads.df$annot))
plot.annotation.df$Var2 <- hotfactor(plot.annotation.df, n = 3)
name.dict <- as.character(plot.annotation.df$Var2)
names(name.dict) <- as.character(plot.annotation.df$Var1)
cat("RNA source of hybrid reads [total]\n")
print(table(as.character(annotation.each.reads.df$annot)))
cat("\n")
cat("RNA source of hybrid reads [%]\n")
print(round(
prop.table(table(as.character(annotation.each.reads.df$annot))) * 100
, digits = 1)
)
cat("\n")
annotation.each.reads.df$annot_collapsed <- name.dict[as.character(annotation.each.reads.df$annot)]
print(ggplot(annotation.each.reads.df) +
geom_bar(aes(x = factor(1), fill = annot_collapsed), width = 1) +
coord_polar(theta = "y") +
theme(axis.ticks = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.text.x = element_blank(),
axis.title.x = element_blank(),
panel.background = element_blank(),
legend.title=element_blank()
)
)
}
)
|
7217ec6d00f59fbfca320265fe992d4c8d1a9f20
|
869759586dae387b361e31e40e6d21eb6defaa49
|
/HW 5 - Perceptron/HW5.R
|
ed79257de243784d64665ca8131a4b436f1344eb
|
[] |
no_license
|
rishi-ag/ML_Assignments
|
49cd460b39600cd0690fd606007f0157dfc6f518
|
487dfbac61b5c08c19881924726497db8a2a3775
|
refs/heads/master
| 2021-05-29T15:18:04.470385
| 2015-05-09T18:25:06
| 2015-05-09T18:25:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,507
|
r
|
HW5.R
|
if (!require("devtools") install.packages("devtools")
if (!require("ggplot2")) install.packages("ggplot2")
if (!require("reshape2")) install.packages("reshape2")
if (!require("ggthemes")) install.packages("ggthemes")
source("c:/Users/Rishabh/Documents/BGSE Material/Sem2/14D005 Machine Learning/Seminar 5/HW/helper.R")
#data parameters
mu <- seq(33, 20, -0.5)
comparison<- c()
plots <- c()
for( i in 1:length(mu)) {
rhoXY <- c(0.8, 0.5)
sd1 <- c(2, 2)
mu1 <- c(15, mu[i])
sd2 <- c(1, 1.5)
mu2 <- c(15, 15)
#test and train data
data.train <- get.data(300,300, mu1, mu2, sd1, sd2, rhoXY, 2500)
data.test <- get.data(200,200, mu1, mu2, sd1, sd2, rhoXY, 2100)
#Calculate R
mean.data <- colMeans(data.train[,1:2])
R <- max(apply(data.train[,1:2], 1, function(x){sqrt(sum((x - mean.data)^2))}))
#Perceptron Algorithm
maxIter <- 100
info <- perceptron(maxIter, data.train, data.test, W, b)
errors <- info[,1:2]
errors$iter <- 1:nrow(errors)
#calculate gamma
W <- as.matrix(info[nrow(info), 3:4])
b <- info[nrow(info), 5]
data <- t(as.matrix(data.train[,1:2]))
gamma <- min(abs((W %*% data + b)/ norm(W, type = "2")))
#test vs train
melt.error <- melt(errors, id.vars = "iter", variable.name = "Type", value.name = "Error")
#plots <- rbind(plots, ggplot(melt.error, aes(x = iter, y = Error, color = Type)) +
# geom_line() + scale_colour_tableau())
################################################################################
comparison <- rbind(comparison, c(R, gamma, nrow(info)))
}
colnames(comparison) <- c("R", "gamma", "convergence")
comparison <- as.data.frame(comparison)
comparison <- round(comparison, 3)
p1 <- ggplot(comparison, aes(x = R, y = convergence, color = gamma)) +
geom_point(size = 5, alpha = 0.9) +
scale_color_gradient(low = "#fd8d3c", high = "#253494")
p1 <- p1 +
annotate("text", label = "Non Seperable", x = 10, y = 90, size = 3, colour = "black") +
annotate("text", label = "No Convergence", x = 10, y = 80, size = 3, colour = "black")
jpeg(filename = 'CovergenceTrend.jpg', units = "in", width = 9, height = 9, res = 400)
p1
dev.off()
|
7239d055c176d4f85b46d46c460ccfe48d81029a
|
57e77ea9dfe2ce75d4c7e69490901d21059bd410
|
/20220330_repetitive_R_code.R
|
0b6298f0f633a2878c72fceeb711a4c3e2b091db
|
[
"CC-BY-4.0",
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
oulib-resdata/repetitive_r
|
74bfb32b5787be06f6448eaf6be3b2013e741d42
|
5163f4bd30620d0802cf4396c2876aac4f22817a
|
refs/heads/main
| 2023-05-29T23:35:59.193119
| 2023-05-17T17:59:02
| 2023-05-17T17:59:02
| 188,274,852
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,504
|
r
|
20220330_repetitive_R_code.R
|
x <- 1:4
x * 2
y <- 6:9
x + y
mtcars$mpg
#Challenge 1 - convert mtcars$mpg to kpl
#conversion factor is 0.43
mtcars$kpl <- mtcars$mpg * 0.43
greater_than_two <- x > 2
sqrt(x)
lapply(X = mtcars, FUN = mean, na.rm = TRUE)
means_mtcars <- mtcars
means_mtcars[2:3] <- lapply(X = mtcars[2:3],
FUN = function(y) y / mean(y))
rm(mtcars)
# Loops
# basic structure
# for (iterator in set_of_values){
# do a thing
# }
for (i in 1:5){
print(paste("We are on i loop part", i))
for (j in c('a', 'b', 'c')){
print(paste("We are on loop part", i, j))
}
}
output_vector <- c()
for (i in 1:5){
print(paste("We are on i loop part", i))
for (j in c('a', 'b', 'c')){
print(paste("We are on loop part", i, j))
temp_output <- paste(i, j)
output_vector <- c(output_vector, temp_output)
}
}
# What will the value of temp_output be at the last loop run?
# How many items will output_vector contain at the end?
output_matrix <- matrix(nrow = 5, ncol = 3)
j_vector <- c('a', 'b', 'c')
for (i in 1:5){
print(paste("We are on i loop part", i))
for (j in 1:3){
temp_j_value <- j_vector[j] #1, i, j
print(paste("We are on loop part", i, temp_j_value))
temp_output <- paste(i, temp_j_value)
output_matrix[i,j] <- temp_output
}
}
sample(1:2, 1)
#1 = challenge
#2 = functions
#Step1
#making an output - optional here, but can do
#names of needed columns
mpg <- mtcars$mpg
cyl <- mtcars$cyl
names(mtcars)
#Step 2: which combinations of rows and columns
#are we getting mean for?
#columns are mpg and cyl
unique(cyl)
unique(mtcars$cyl)
#means of rows where cyl = 4,6,8
#Step 3: generate a mean for just one of these groups.
mean(mtcars[mtcars$cyl == 4, "mpg"])
#Step 4: what values need to repeat/iterate in the loop?
mean(mtcars[mtcars$cyl == iCyl, "mpg"])
#Step 5: how to get the vector of values?
unique(mtcars$cyl)
#Step 6: put together the loop
cyl_vector <- unique(mtcars$cyl)
for (iCyl in 1:3) {
iCyl_tmp <- cyl_vector[iCyl]
tmp <- mean(mtcars[mtcars$cyl == iCyl_tmp, "mpg"])
print(paste(iCyl_tmp, "cylinders has", tmp, "mpg"))
}
# Functions
fahr_to_kelvin <- function(temp, degrees = "K"){
kelvin <- ((temp-32) * (5/9)) + 273.5
kelvin_written <- paste(kelvin, degrees) #This turns it into character vector
return(kelvin_written)
}
object <- fahr_to_kelvin(degrees = "K", temp = 50)
source("20220330_conversion.R")
|
d58b32b43710299de2c3e8cfa5a39ec2f783ad2a
|
db86076191c1d17eee12f39ef6712bc4b8e85925
|
/Plot3.R
|
6e9b8ca1e2a89a36c33870cf4a9e46b8b80f0c41
|
[] |
no_license
|
remibacha/ExData_Plotting1
|
19ea637a4803f19ff3f2bb5ebcaa23715c94d57c
|
401e7c94a581c0af6cf864f9acf4d3594bcc1c0b
|
refs/heads/master
| 2021-07-01T19:41:55.874315
| 2017-09-22T07:10:18
| 2017-09-22T07:10:18
| 104,225,020
| 1
| 0
| null | 2017-09-20T14:18:30
| 2017-09-20T14:18:30
| null |
UTF-8
|
R
| false
| false
| 1,254
|
r
|
Plot3.R
|
setwd("~/Documents/Scripts_R/Coursera")
fileURL<- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL, destfile = "~/Documents/Scripts_R/Coursera/household_power_consumption.zip", method = "curl")
unzip("household_power_consumption.zip")
dataset <- read.table("household_power_consumption.txt",header=TRUE,sep = ";", na.strings = "?")
dataset$Date <- as.Date(dataset$Date,"%d/%m/%Y")
dataset$Time <- as.character(dataset$Time)
dataset[, c(3,4,5,6,7,8,9)] <- sapply(dataset[, c(3,4,5,6,7,8,9)], as.numeric)
filtered_dataset <- subset(dataset, Date>="2007-02-01" & Date<="2007-02-02")
filtered_dataset <- filtered_dataset[complete.cases(filtered_dataset),]
filtered_dataset$Date_Time <- as.POSIXct(paste(filtered_dataset$Date, filtered_dataset$Time), format="%Y-%m-%d %H:%M:%S")
#Plot 3
with(dataset, {
plot(filtered_dataset$Sub_metering_1, type="l", ylab="Global Active Power (kilowatts)", xlab="")
lines(filtered_dataset$Sub_metering_2,col='Red')
lines(filtered_dataset$Sub_metering_3,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png,"plot3.png", width=480, height=480)
dev.off()
|
e91e4be95589ab208e889fe322ce641b1b75a171
|
5733768465a94706d5d902b48297a48eb5b7693c
|
/R-main/proportion_sig_test.R
|
07563724fae270ab11f6036ab1b2a4bb5b0d2bb2
|
[] |
no_license
|
eriqande/rubias
|
18a05dee424fbd8da56654ae5d4702bd9d23bb3c
|
8273ffdedd92b3bbcef6a6fd0e5a4391e93ee0b8
|
refs/heads/master
| 2023-04-06T23:18:45.363792
| 2022-02-09T22:17:37
| 2022-02-09T22:17:37
| 63,456,523
| 2
| 2
| null | 2019-06-09T18:04:26
| 2016-07-15T23:34:15
|
HTML
|
UTF-8
|
R
| false
| false
| 5,461
|
r
|
proportion_sig_test.R
|
library(ggplot2)
library(dplyr)
load("~/R/rubias/cjfas_data.RData")
# Exploring the relationship between RU bias and the difference of the true rho and Nc/P
### STASTICS IN THE PAPER APPEAR AT THE BOTTOM OF THE DOCUMENT ####
# First, in coalescent data; calculate residuals both with (prop_diff) and without (diff)
# expressing as a proportion of the true rho
coal_rho_data$prop_diff <- (coal_rho_data$Estimate - coal_rho_data$true_rho) / coal_rho_data$true_rho
coal_rho_data$diff <- (coal_rho_data$Estimate - coal_rho_data$true_rho)
coal_rho_data$Np_C <- rep(c(2/17, 3/17, 12/17), 100)
coal_rho_data$Np_diff <- coal_rho_data$Np_C - coal_rho_data$true_rho
# Model based on proportional residuals
coal_mod_prop <- lm(prop_diff ~ Np_diff * method, data = coal_rho_data)
plot(coal_mod_prop)
# Appears non-normal and heteroscedastic, so simple regression probably inappropriate.
summary(coal_mod_prop)
# Significant relationship between the residual as a proportion of the true value
# and the difference between the true rho and the Np/C expectation (P = 5.17e-10).
# Also a signficant effect of method (P = .0234, PB reduces slope of relationship).
# Interaction of the two is signficant (P = 5.34e-05), i.e. PB changes the slope
# of the relationship in the two by shrinking residuals towards 0.
# Try again with plain residuals
coal_mod <- lm(diff ~ Np_diff * method, data = coal_rho_data)
plot(coal_mod)
# Appears to meet assumptions of linear models
summary(coal_mod)
# Highly significant relationship between the plain residual and the
# difference between the true rho and the Np/C expectation (P = <2e-16)
# But no effect of method (expected, since PB has a net zero effect when averaged
# across positively and negatively biased populations). Highly signficant interaction
# (P = <2e-16) confirms this.
#Graphs of the above relationship, split by reporting unit and method
cp <- ggplot2::ggplot(coal_rho_data, aes(x = Np_diff, y = prop_diff, colour = repunit)) +
geom_point() +
facet_grid(repunit ~ method) +
labs(x='Np/C - True Rho', y = 'Proportional Residual') +
scale_color_brewer(palette = "Set1")
# Clearly not linear, so the proportional bias statistics must be disregarded.
# Now with just plain residuals
c <- ggplot2::ggplot(coal_rho_data, aes(x = Np_diff, y = diff, colour = repunit)) +
geom_point() +
facet_grid(repunit ~ method) +
geom_abline(intercept = 0, slope = 0, linetype = "dashed") +
geom_vline(xintercept = 0, linetype = "dashed") +
geom_smooth(method = "lm") +
labs(x='Np/C - True Rho', y = 'Residual') +
scale_color_brewer(palette = "Set1")
# Similar graph, but not split by reporting unit
cbp <- ggplot2::ggplot(coal_rho_data, aes(x = Np_diff, y = diff)) +
geom_point(aes(colour = repunit)) +
facet_grid(~ method) +
geom_abline(intercept = 0, slope = 0, linetype = "dashed") +
geom_vline(xintercept = 0, linetype = "dashed") +
geom_smooth(method = "lm", colour = "darkgrey", level = 0) +
labs(x='Np/C - True Rho', y = 'Residual') +
scale_color_brewer(palette = "Set1")
# Now the same, but with Hasselman alewife data
Hass_rho_data$prop_diff <- (Hass_rho_data$Estimate - Hass_rho_data$true_rho) / Hass_rho_data$true_rho
Hass_rho_data$diff <- (Hass_rho_data$Estimate - Hass_rho_data$true_rho)
Hass_rho_data$Np_C <- rep(c(6/21, 3/21, 12/21), 100)
Hass_rho_data$Np_diff <- Hass_rho_data$Np_C - Hass_rho_data$true_rho
hass_mod_prop <- lm(prop_diff ~ Np_diff * method, data = Hass_rho_data)
plot(hass_mod_prop)
# very non-normal, assumptions violated
summary(hass_mod_prop)
# No signficant relationships, should ignore anyways
hass_mod <- lm(diff ~ Np_diff * method, data = Hass_rho_data)
plot(hass_mod) # Looks better, although not as normal as coalescent
summary(hass_mod)
# Significant relationship between the plain residual
# and the difference between the true rho and the Np/C expectation (P = 0.0417)
# No signficant interaction (consistent with bias correction less successful than
# in coalescent simulation)
hp <- ggplot2::ggplot(Hass_rho_data, aes(x = Np_diff, y = prop_diff, colour = repunit)) +
geom_point() +
facet_grid(repunit ~ method) +
labs(x='Np/C - True Rho', y = 'Proportional Residual') +
scale_color_brewer(palette = "Set1")
# Yep, again, nonlinear, and so regression is inappropriate
h <- ggplot2::ggplot(Hass_rho_data, aes(x = Np_diff, y = diff, colour = repunit)) +
geom_point() +
facet_grid(repunit ~ method) +
geom_abline(intercept = 0, slope = 0, linetype = "dashed") +
geom_vline(xintercept = 0, linetype = "dashed") +
geom_smooth(method = "lm") +
labs(x='Np/C - True Rho', y = 'Residual') +
scale_color_brewer(palette = "Set1")
# Similar graph, but not split by reporting unit
hbp <- ggplot2::ggplot(Hass_rho_data, aes(x = Np_diff, y = diff)) +
geom_point(aes(colour = repunit)) +
facet_grid(~ method) +
geom_abline(intercept = 0, slope = 0, linetype = "dashed") +
geom_vline(xintercept = 0, linetype = "dashed") +
geom_smooth(method = "lm", colour = "darkgrey", level = 0) +
labs(x='Np/C - True Rho', y = 'Residual') +
scale_color_brewer(palette = "Set1")
# Separate regressions for coalescent data of each method
##### ACTUAL STATISTICS CITED IN PAPER #####
coal_pb <- coal_rho_data %>% filter(method == 'PB')
coal_pb_mod <- lm(diff ~ Np_diff, data = coal_pb)
summary(coal_pb_mod)
coal_mcmc <- coal_rho_data %>% filter(method == 'MCMC')
coal_mcmc_mod <- lm(diff ~ Np_diff, data = coal_mcmc)
summary(coal_mcmc_mod)
|
087de8497843d0e41b74bca3b842b317b795c7df
|
20df9ae1aa7cd7c653a49ec48b54dd78a732dc39
|
/macDemo.R
|
6602403d689519666b3cb277385e7818d40c0245
|
[] |
no_license
|
ParkerOHeeron/MachineLearningDemo
|
2b7c8318fc7673d4b8cc161f93ce537c580976d3
|
a5f9b62ad838ce1ba70fef01c7171d09d70545b8
|
refs/heads/main
| 2023-03-18T17:04:15.257242
| 2021-03-18T20:22:22
| 2021-03-18T20:22:22
| 342,719,088
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,876
|
r
|
macDemo.R
|
install.packages("tidyverse")
library(tidyverse)
install.packages("reshape2")
library(reshape2)
housing = read.csv('housing.csv')
head(housing)
summary(housing)
par(mfrow=c(2,5))
colnames(housing)
ggplot(data = melt(housing), mapping = aes(x = value)) + geom_histogram(bins = 30) + facet_wrap(~variable, scale = 'free_x')
housing$total_bedrooms[is.na(housing$total_bedrooms)] = median(housing$total_bedrooms , na.rm = TRUE)
housing$mean_bedrooms = housing$total_bedrooms/housing$households
drops = c('total_bedrooms', 'total_rooms')
housing = housing[ , !(names(housing) %in% drops)]
head(housing)
categories = unique(housing$ocean_proximity)
cat_housing = data.frame(ocean_proximity = housing$ocean_proximity)
for(cat in categories){
cat_housing[,cat] = rep(0, times= nrow(cat_housing))
}
head(cat_housing)
for(i in 1:length(cat_housing$ocean_proximity)){
cat = as.character(cat_housing$ocean_proximity[i])
cat_housing[,cat][i] = 1
}
head(cat_housing)
cat_columns = names(cat_housing)
keep_columns = cat_columns[cat_columns != 'ocean_proximity']
cat_housing = select(cat_housing, one_of(keep_columns))
tail(cat_housing)
colnames(housing)
drops = c('ocean_proximity','median_house_value')
housing_num = housing[ , !(names(housing) %in% drops)]
head(housing_num)
scaled_housing_num = scale(housing_num)
head(scaled_housing_num)
cleaned_housing = cbind(cat_housing, scaled_housing_num, median_house_value=housing$median_house_value)
head(cleaned_housing)
set.seed(1555) #Changed by Parker O'Heeron
sample = sample.int(n = nrow(cleaned_housing), size = floor(.8*nrow(cleaned_housing)), replace = F)
train = cleaned_housing[sample, ]
test = cleaned_housing[-sample, ]
head(train)
nrow(train) + nrow(test) == nrow(cleaned_housing)
library('boot')
?cv.glm
glm_house = glm(median_house_value~median_income+mean_bedrooms+population, data=cleaned_housing)
k_fold_cv_error = cv.glm(cleaned_housing , glm_house, K=5)
k_fold_cv_error$delta
glm_cv_rmse = sqrt(k_fold_cv_error$delta)[1]
glm_cv_rmse
names(glm_house)
glm_house$coefficients
install.packages("randomForest")
library('randomForest')
?randomForest
names(train)
set.seed(1555) #Changed by Parker O'Heeron
train_y = train[, 'median_house_value']
train_x = train[, names(train) !='median_house_value']
head(train_y)
head(train_x)
rf_model = randomForest(train_x, y = train_y , ntree = 500, importance = TRUE)
names(rf_model)
rf_model$importance
oob_prediction = predict(rf_model)
train_mse = mean(as.numeric((oob_prediction - train_y)^2))
oob_rmse = sqrt(train_mse)
oob_rmse
test_y = test[,'median_house_value']
test_x = test[, names(test) !='median_house_value']
y_pred = predict(rf_model , test_x)
test_mse = mean(((y_pred - test_y)^2))
test_rmse = sqrt(test_mse)
test_rmse
|
7cea6142985d9ba46d9200f375c97fb32a986217
|
3d4fd9491344654eb6055930c6f407948c892fd4
|
/man/update_ade.Rd
|
b98cbc5f58d4be288e599747b5e79a82d3a9fb40
|
[] |
no_license
|
vcerqueira/tsensembler
|
bb8542e81177e1c1da84ae53070840b1ade3d018
|
b0f1c786440ed3d09160931d970df3eeab09eb5e
|
refs/heads/master
| 2021-07-18T21:26:22.651558
| 2020-10-24T11:46:06
| 2020-10-24T11:46:06
| 82,293,256
| 34
| 14
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,936
|
rd
|
update_ade.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/arbitration-methods.r
\docType{methods}
\name{update_ade}
\alias{update_ade}
\alias{update_ade,ADE-method}
\title{Updating an ADE model}
\usage{
update_ade(object, newdata, num_cores = 1)
\S4method{update_ade}{ADE}(object, newdata, num_cores = 1)
}
\arguments{
\item{object}{a \code{\link{ADE-class}} object.}
\item{newdata}{data used to update the ADE model. This should be
the data used to initially train the models (training set), together
with new observations (for example, validation set). Each model
is retrained using \code{newdata}.}
\item{num_cores}{A numeric value to specify the number of cores used to
train base and meta models. num_cores = 1
leads to sequential training of models. num_cores > 1
splits the training of the base models across num_cores cores.}
}
\description{
\strong{update_ade} is a generic function that combines
\code{\link{update_base_models}}, \code{\link{update_ade_meta}},
and \code{\link{update_weights}}.
}
\examples{
specs <- model_specs(
learner = c("bm_svr", "bm_glm", "bm_mars"),
learner_pars = NULL
)
data("water_consumption")
dataset <- embed_timeseries(water_consumption, 5)
# toy size for checks
train <- dataset[1:300, ]
validation <- dataset[301:400, ]
test <- dataset[401:500, ]
model <- ADE(target ~., train, specs)
preds_val <- predict(model, validation)
model <- update_ade(model, rbind.data.frame(train, validation))
preds_test <- predict(model, test)
}
\seealso{
\code{\link{ADE-class}} for building an ADE model;
\code{\link{update_weights}} for updating the weights of the ensemble (without
retraining the models); \code{\link{update_base_models}} for updating the
base models of an ensemble; and \code{\link{update_ade_meta}} for
updating the meta-models of an ADE model.
Other updating models: \code{\link{update_ade_meta}},
\code{\link{update_weights}}
}
\concept{updating models}
|
ae9c727cf1e76236cf29204b74e2297a7c6eda56
|
4e0329a15458fb7f19598134f853e38d8bafe327
|
/process XBT files.R
|
6a781dd42589583c3040e270b585d6cc7dc96071
|
[] |
no_license
|
Planktos/OSTRICH_rvws
|
18da40988886d59789a79adbac8eec9689ed49f4
|
39185d22cd86787f99d7ca0bc181ee9af73c55b9
|
refs/heads/master
| 2016-08-12T14:54:22.153195
| 2015-11-05T20:13:13
| 2015-11-05T20:13:13
| 45,570,012
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,855
|
r
|
process XBT files.R
|
#PROCESS XBT DEPTH FILES FROM R/V WALTON SMITH & ADD LATITUDE AND LONGITUDE
#Step 1: Add date, time, latitude, and longitude values to individual XBT casts so that data can be imported into and related in Access and Arc geodatabase
setwd("C:/Users/kelly.robinson/Dropbox/Cowen_Sponaugle/OSTRICH/PhysData/Data")
library(data.table)
library(stringr)
options("digits.secs" = 8)
d <- list.files(full.names=T, recursive=FALSE, pattern = ".EDF")
for(i in 1:length(d)) {
#Read file in and create data frame with data values and the empty fields which will be filled-in
x <- read.table(d[i], skip = 34, blank.lines.skip = TRUE)
df <- as.data.frame(x)
colnames(df) <- c("depth_m", "temperature_C", "sound velocity_m/s")
df$date <- NA
df$time <- NA
df$latitude <- NA
df$longitude <- NA
#Extract launch date from header and paste into final data frame
x2 <- readLines(d[i], skip = 2, n = 3)
y <- grepl("//", x2)
(x3 <- x2[!y])
x3list <- strsplit(x3, split = ": ")
m <- matrix(unlist(x3list), nrow=length(x3list), byrow = TRUE)
colnames(m) <- c("txt", "launch_date")
df2 <- as.data.frame(m)
df2$txt <- NULL
df$date <- paste0(df2$launch_date)
#Extract launch time from header and paste into final data frame
x4 <- readLines(d[i], skip = 2, n = 4)
y2 <- grepl("/", x4)
(x5 <- x4[!y2])
x5list <- strsplit(x5, split = ": ")
m2 <- matrix(unlist(x5list), nrow=length(x5list), byrow = TRUE)
colnames(m2) <- c("time", "launch_time")
df3 <- as.data.frame(m2)
df3$time <- NULL
df$time <- paste0(df3$launch_time)
##Extract latitude from header and paste into final data frame
x6 <- read.table(d[i], skip = 5, nrow=1)
colnames(x6) <- c("lattxt", "colon", "latdeg", "latmindir")
df4 <- as.data.frame(x6)
df4$latdeg <- as.numeric(df4$latdeg)
df4$latmin <- NA
df4$latmin <- str_sub(df4$latmindir, 1, -2) #removes the 'N'
df4$latmin <- as.numeric(df4$latmin)
df4$latdecdeg <- NA
df4$latdecdeg <- (df4$latdeg + (df4$latmin/60)) #NOTE: Does not take into account which hemisphere you are in. Default is Northern Hemisphere
df4$lattxt <- NULL
df4$colon <- NULL
df4$latdeg <- NULL
df4$latmindir <- NULL
df$latitude<- paste0(df4$latdecdeg)
#Extract longitude from header and paste into final data frame
x7 <- read.table(d[i], skip = 6, nrow=1)
colnames(x7) <- c("lontxt", "colon", "londeg", "lonmindir")
df5 <- as.data.frame(x7)
df5$londeg <- as.numeric(df5$londeg)
df5$lonmin <- NA
df5$lonmin <- str_sub(df5$lonmindir, 1, -2) #removes the 'W'
df5$lonmin <- as.numeric(df5$lonmin)
df5$londecdeg <- NA
df5$londecdeg <- ((df5$londeg + (df5$lonmin/60))*-1) #NOTE: Default is West of Prime Meridian
df5$lontxt <- NULL
df5$colon <- NULL
df5$londeg <- NULL
df5$lonmindir <- NULL
df$longitude<- paste0(df5$londecdeg)
df$latitude <- as.numeric(df$latitude)
df$longitude <- as.numeric(df$longitude)
round(df$latitude, digits = 7)
round(df$longitude, digits = 7)
#create new field in the df "Date_Time" by joining "Date", "Time" , sep=' ')
df$bdate <- NA
df$bdate <- as.Date(df$date, "%m/%d/%Y")
df$Date_Time <- NA
df$Date_Time <- paste(df$bdate, df$time, sep=' ')
df$Date_Time <- as.POSIXct(df$Date_Time, format = "%Y-%m-%d %H:%M:%S" )
df$bdate <- NULL
#Re-order the column names
df <- df[c("Date_Time", "date", "time", "latitude","longitude", "depth_m", "temperature_C", "sound velocity_m/s")]
#write new text file
suppressWarnings(dir.create("XBT processed")) #remove YMDHMS to create directory for high-resolution data
write.table(df, paste0("XBT processed/", substr(paste0(basename(d[i])),1,8),"_proc", ".txt"), row.names=FALSE, sep="\t")
}
#PROCESS XBT DEPTH FILES MISSING LATITUDE AND LONGITUDE FROM R/V WALTON SMITH
#Step 1: Add date and time to individual XBT casts so that data can be imported into and related in Access and Arc geodatabase
setwd("C:/Users/kelly.robinson/Dropbox/Cowen_Sponaugle/OSTRICH/PhysData/Data/proc_alt")
library(data.table)
library(stringr)
options("digits.secs" = 8)
d <- list.files(full.names=T, recursive=FALSE, pattern = ".EDF")
for(i in 1:length(d)) {
#Read file in and create data frame with data values and the empty fields which will be filled-in
x <- read.table(d[i], skip = 34, blank.lines.skip = TRUE)
df <- as.data.frame(x)
colnames(df) <- c("depth_m", "temperature_C", "sound velocity_m/s")
df$date <- NA
df$time <- NA
df$latitude <- NA
df$longitude <- NA
#Extract launch date from header and paste into final data frame
x2 <- readLines(d[i], skip = 2, n = 3)
y <- grepl("//", x2)
(x3 <- x2[!n])
x3list <- strsplit(x3, split = ": ")
m <- matrix(unlist(x3list), nrow=length(x3list), byrow = TRUE)
colnames(m) <- c("txt", "launch_date")
df2 <- as.data.frame(m)
df2$txt <- NULL
df$date <- paste0(df2$launch_date)
#Extract launch time from header and paste into final data frame
x4 <- readLines(d[i], skip = 2, n = 4)
y2 <- grepl("/", x4)
(x5 <- x4[!y2])
x5list <- strsplit(x5, split = ": ")
m2 <- matrix(unlist(x5list), nrow=length(x5list), byrow = TRUE)
colnames(m2) <- c("time", "launch_time")
df3 <- as.data.frame(m2)
df3$time <- NULL
df$time <- paste0(df3$launch_time)
#create new field in the df "Date_Time" by joining "Date", "Time" , sep=' ')
df$bdate <- NA
df$bdate <- as.Date(df$date, "%m/%d/%Y")
df$Date_Time <- NA
df$Date_Time <- paste(df$bdate, df$time, sep=' ')
df$Date_Time <- as.POSIXct(df$Date_Time, format = "%Y-%m-%d %H:%M:%S" )
df$bdate <- NULL
#Re-order the column names
df <- df[c("Date_Time", "date", "time", "depth_m", "temperature_C", "sound velocity_m/s")]
suppressWarnings(dir.create("processed")) #remove YMDHMS to create directory for high-resolution data
write.table(df, paste0("processed/", substr(paste0(basename(d[i])),1,8),"_proc", ".txt"), row.names=FALSE, sep="\t")
}
#Step 1b: Merge XBT files with ship GPS
setwd("C:/Users/kelly.robinson/Dropbox/Cowen_Sponaugle/OSTRICH/GIS projects/txt files for gdb")
options(digits = 8)
xy <- fread("OSTRICH 2014_WS_GPS_YMDHMS.txt", sep ="\t", header = TRUE, data.table = FALSE)
df_xy <- join(df, xy, by = "Date_Time", type="left", match = "first")
#Other examples of merging or joining two data frames
#depth_xy <- merge(df.depth, GPSByIntegerSec, by = "Date_Time", all = TRUE)
#depth_xy <- rbind.fill(mtcars[c("mpg", "wt")], mtcars[c("wt", "cyl")])
df.na <- na.omit(df_xy)
#Re-name latitude and longitude columns to match other XBT processed files
df.na$latitude <- df.na$Lat.DecDeg
df.na$longitude <- df.na$Lon.DecDeg
#re-order the columns
XBT <- df.na[c("Date_Time", "date", "time", "latitude","longitude", "depth_m", "temperature_C", "sound velocity_m/s")]
#sort by date
XBT[order(XBT$Date_Time, decreasing = TRUE), ]
#write new text file
suppressWarnings(dir.create("XBT processed"))
write.table(df, paste0("XBT processed/", substr(paste0(basename(d[i])),1,8),"_proc", ".txt"), row.names=FALSE, sep="\t")
}
#STEP 2: merge XBT processed files into a single file
setwd("C:/Users/kelly.robinson/Dropbox/Cowen_Sponaugle/OSTRICH/PhysData/Data/XBT processed")
library(plyr)
options("digits" = 8)
##Get a List of Files in working directory
xbt_files <- list.files()
##Merge the LatLon_hms files into a Single Dataframe
for (file in xbt_files){
# if the merged dataset doesn't exist, create it
#use 'skip' function to skip the first row and get the headers from the second row if need be
if (!exists("xbt.dataset")){
xbt.dataset <- do.call("rbind",lapply(xbt_files, FUN=function(files){fread(files, header=TRUE, sep="\t")}))
}
print(file)
}
signif(xbt.dataset$latitude, digits = 8)
signif(xbt.dataset$longitude, digits = 8)
write.table(xbt.dataset, paste0("OSTRICH2014_XBT casts.txt"), row.names=FALSE, sep="\t")
|
c976365601b325c838af05905b64595fe04b4dfc
|
bd8a7c215d851e6b3c44165baec15e3f13efb665
|
/man/es_add_static.Rd
|
45e69f29687fb47b179fad9bbb554b50d2c7306c
|
[] |
no_license
|
mYstar/easyshiny
|
dfe36d11f97d390cb3e7e5548f64d6939b9de36a
|
9987d571a65ecdb6004cfa112ad80f027694b0fd
|
refs/heads/master
| 2020-04-12T15:02:55.283045
| 2019-06-19T08:19:46
| 2019-06-19T08:19:46
| 162,569,346
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 613
|
rd
|
es_add_static.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/es_add.R
\name{es_add_static}
\alias{es_add_static}
\title{Add Static Component}
\usage{
es_add_static(static_call, tab = "Output", box = "Result")
}
\arguments{
\item{static_call}{a call to a \code{\link[shiny]{tags}} element or a corresponding wrapper}
\item{tab}{the tab to place the static element in}
\item{box}{the box to place the static element in}
}
\description{
Adds a static shiny component to the easyshiny app.
The object can be composed of different nested shiny calls and will be placed in the given tab and box.
}
|
9381eeb40783f4c60065f333f2f187c751ad5525
|
c13fd87cbb4066729f529a09f06db1649b7d1d6f
|
/man/eco.unlock-ecogen-method.Rd
|
80259aa566af26c49c619a5a2a22a9020cad5062
|
[] |
no_license
|
cran/EcoGenetics
|
05c6bd845a714051c5317f3e70ececf8b2dbc40b
|
46b904508a5958f3cb11513b83cc6b69c3c0a3ab
|
refs/heads/master
| 2021-01-17T14:00:47.875370
| 2020-05-24T14:20:17
| 2020-05-24T14:20:17
| 30,083,571
| 2
| 4
| null | 2018-01-12T04:31:38
| 2015-01-30T17:44:07
|
R
|
UTF-8
|
R
| false
| true
| 630
|
rd
|
eco.unlock-ecogen-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ecogen.3OF6.basic.methods.R
\name{eco.unlock,ecogen-method}
\alias{eco.unlock,ecogen-method}
\alias{eco.unlock,ecogen}
\title{Unlock rows in an ecogen object}
\usage{
\S4method{eco.unlock}{ecogen}(object)
}
\arguments{
\item{object}{object of class ecogen}
}
\description{
This methods unlocks the rows in an ecogen object. This means that
different data frames in the object can have different rows, with different row names.
}
\examples{
\dontrun{
data(eco.test)
eco2 <- eco.unlock(eco)
is.locked(eco2)
eco3 <- eco.lock(eco2)
is.locked(eco3)
}
}
|
b61330e16c7eee3fb852c226443ce9e286aac029
|
79be0e4475dedd9e4ad859a4f995e6779a03bd7b
|
/lab#3_Golakoti.R
|
1b6a6155094ac62ebaf33b89545b410d6e079e91
|
[] |
no_license
|
Kgolakot/DatascienceR
|
583441624636dbc32b7b28c7d7a22a5f61fccbd1
|
43b94348f5757878efcbcc152848ed2cdd706a89
|
refs/heads/master
| 2022-12-17T12:38:56.845691
| 2020-09-13T09:42:07
| 2020-09-13T09:42:07
| 295,119,842
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,098
|
r
|
lab#3_Golakoti.R
|
#Clear workspace
rm(list=ls())
#load elemstat library for data
library(ElemStatLearn)
#set initial seed
set.seed(22)
#load SAheart data
data(SAheart)
df<-SAheart
#summary of the data
summary(df)
#change factor values to numeric
df[,5]<-sapply(df[,5], as.numeric)
#boxplots of predictors against responses
par(mfrow = c(3,3))
for(i in 1:9){
boxplot(df[,i]~df$chd, data = df ,xlab = colnames(df[i]))
abline(h = mean(df[,i]))
}
#randomly assign sampled data
index = sample(x = 1:nrow(df), size = 325)
df_train <- df[index,]
df_test <- df[-index,]
#load corrplot library to create a correlation plot
library(corrplot)
cor.df<-cor(df[,1:10])
corrplot(cor.df, method = 'circle')
# create a scatter plot for various predictors
mycol<-c("red","blue")
pairs(df, col = mycol[df$chd+1])
#fit logistic regression model
glm.fit<-glm(chd~., data = df_train, family = binomial)
summary(glm.fit)
#predict the responses for test data
predict_test<-predict(glm.fit, newdata = df_test, type = 'response')
#create different confusion matrix for various threshold
table(df_test$chd, predict_test>0.5)
table(df_test$chd, predict_test>0.55)
table(df_test$chd, predict_test>0.6)
table(df_test$chd, predict_test>0.65)
table(df_test$chd, predict_test>0.7)
table(df_test$chd, predict_test>0.75)
#prediction accuracy
log.pred<-ifelse(predict_test>0.5, 1,0 )
log.acc<-mean(df_test$chd == log.pred)
#load tree library
library(tree)
#fit a CART model to the chd response
cart.fit<-tree(chd~., data = df_train)
summary(cart.fit)
#plot the CART tree
plot(cart.fit)
text(cart.fit, pretty = 0)
#Run k fold cross validation for CART tree
cv.cart = cv.tree(cart.fit)
cv.cart
#plot CART deviance vs tree size
plot(cv.cart$size,cv.cart$dev,type='b')
plot(cv.cart)
#best tree size is equal to 4
best.cart<-prune.tree(cart.fit, best =4)
summary(best.cart)
#plot the pruned tree
plot(best.cart)
text(best.cart,pretty=0)
#get the training predictions and test predictions of the orginal CART model and pruned CART model
train.pred_tree<-predict(cart.fit, df_train)
test.pred_tree<-predict(cart.fit, df_test)
train.pred_tprune<-predict(best.cart, df_train)
test.pred_tprune<-predict(best.cart, df_test)
#create confusion matrix for the above predictions
table(df_train$chd, train.pred_tree>0.5)
table(df_train$chd, train.pred_tprune>0.5)
table(df_test$chd, test.pred_tree>0.5)
table(df_test$chd, test.pred_tprune>0.5)
#prediction accuracy
cart.pred<-ifelse(test.pred_tprune>0.5, 1 ,0 )
cart.acc<-mean(df_test$chd == log.pred)
library(randomForest) # Random Forest
library(gbm) # Boosting
#Create intial random forest with all predictors and intial tree size to be 400
bag.heart <- randomForest(chd ~ ., data=df_train, mtry = ncol(df_train) - 1, importance = TRUE, ntree=400)
plot(bag.heart, type='l', main='MSE by ntree for Bagging')
#The best tree size was determined to be 90
bag.heart <- randomForest(chd ~ ., data=df_train, mtry = ncol(df_train) - 1, importance = TRUE, ntree=90)
plot(bag.heart, type='l', main='MSE by ntree for Bagging')
#confusion matrix for bag model
bag.predict<-predict(bag.heart, newdata=df_test)
table(df_test$chd, bag.predict>0.5)
#prediction accuracy
bag.pred<-ifelse(bag.predict>0.5, 1 ,0 )
bag.acc<-mean(df_test$chd == bag.pred)
#varibale importnce of Bagging model
varImpPlot(bag.heart)
importance(bag.heart)
#Calculate Out of bag error for different m predictors
oob.mse <- c()
test.mse <-c()
for(i in 1:(ncol(df_train)-1)){
rf.heart <- randomForest(chd~., data=df_train, mtry=i, importance=TRUE, ntree=90)
oob.mse[i] <- rf.heart$mse[90]
pred <- predict(rf.heart, newdata = df_test)
test.mse[i] <- with(df_test, mean( (chd - pred)^2))
}
#plot thr oob mse and test mse for different trees with different m predictors
plot(rf.mse, main='Training Error by m', xlab='Number of Predictors', ylab='MSE')
matplot(1:9 , cbind(oob.mse,test.mse), pch=19 , col=c("red","blue"),type="b",ylab="Mean Squared Error",xlab="Number of Predictors Considered at each Split")
legend("topright",legend=c("Out of Bag Error","Test Error"),pch=19, col=c("red","blue"))
#the final tree is made with selecting 2 random predictors for every subtree
fin.heart <- randomForest(chd ~ ., data=df_train, mtry = 2, importance = TRUE, ntree=90)
plot(bag.heart, type='l', main='MSE by ntree for Random Forest')
#variable importance for Random forest
varImpPlot(fin.heart)
importance(fin.heart)
#predict the test data
rf_predict<-predict(fin.heart, newdata=df_test)
bag_predict<-predict(bag.heart, newdata=df_test)
#confusion matrix for random forest
table(df_test$chd, rf_predict>0.5)
#prediction accuracy
rf.pred<-ifelse(rf_predict>0.5, 1 ,0 )
rf.acc<-mean(df_test$chd == rf.pred)
#load ada library
library(ada)
boost.heart<-ada(chd~., loss = "ada", iter=100, data=df_train)
boost.heart
summary(boost.heart)
plot(boost.heart, type='l', main='MSE pre iteration for Boosting')
#predict the test error for boost model
true.boost<-predict(boost.heart, newdata=df_test)
#confusion matrix for boosting
table(df_test$chd, true.boost)
#prediction accuracy
boost.acc<-mean(df_test$chd == true.boost)
|
2f5e9e3bf41063c1d8302916a3a264a8b092d85a
|
a3a480e10fdd7aef344d2ae7caefdae801ae24cb
|
/Steimann_TCM_Original.R
|
cebfdb89f58fa8d3d37c18c5b53da174f4c763ef
|
[] |
no_license
|
yigitk/R-scripts-for-ICSME-Publications
|
df0c8d5e90cc4c24ca1a3f165cfccd446f263fc9
|
2ea64da58e48dcbc1d7a54f76af5e01da021cf76
|
refs/heads/master
| 2023-02-02T02:08:54.313360
| 2020-12-16T17:58:58
| 2020-12-16T17:58:58
| 322,061,717
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 32,800
|
r
|
Steimann_TCM_Original.R
|
ranking_cost_exam <-
function (arr, col) {
# cost to hit the first bug
tmp_array <-
arr[which(!((is.na(arr[, col])) | (is.nan(as.numeric(
arr[, col]
))))),]
if (is.null(dim(tmp_array))) {
cost <- 1
hit_bug <- 1
cost2 <- 1
cat("wierd!!", "\n")
} else{
bugidx <- which(as.numeric(tmp_array[, 'Y']) != 0)
max_score <- max(as.numeric(tmp_array[bugidx, col]))
sort_array <-
sort(as.numeric(tmp_array[, col]), decreasing = TRUE)
cost <- max(which(sort_array == as.numeric(max_score)))
#cost<-length(which(sort_array==as.numeric(max_score)))
hit_bug <-
which(as.numeric(tmp_array[bugidx, col]) == max_score) # for multiple bugs
num_of_stmt_higher_than_maxscore <-
length(which(sort_array > as.numeric(max_score)))
if (length(bugidx) == 1) {
#single bug
cost2 <-
num_of_stmt_higher_than_maxscore + (cost - num_of_stmt_higher_than_maxscore +
1) / 2
} else{
cost2 <-
num_of_stmt_higher_than_maxscore + (cost - num_of_stmt_higher_than_maxscore +
1) / (length(hit_bug) + 1)
}
}
stmt2 <- cost2
stmt <- cost
cost <- cost / length(hit_bug)
cost <- cost / dim(arr)[1]
cost2 <- cost2 / dim(arr)[1]
#lst<-list("cost"=stmt2,"stmt"=stmt,"stmt2"=stmt2)
lst <- list("cost" = cost2,
"stmt" = stmt,
"stmt2" = stmt2)
return(lst)
}
Xtab <- function (arr) {
#filename_common_part <- paste(dir_base_data, statement, sep = '')
#statement_dt_all <- read.csv( paste(filename_common_part, '_baah_samp.csv', sep = '') )
#dt_baah_samp<-statement_dt_all[testcaseset,]
S <- arr[, 'ef']
for (i in 1:dim(arr)[1]) {
A <- arr[i, 'ef']
B <- arr[i, 'nf']
C <- arr[i, 'ep']
D <- arr[i, 'np']
N = A + B + C + D
Ecf <- (A + C) * (A + B) / N
Ecs <- (A + C) * (C + D) / N
Euf <- (B + D) * (A + B) / N
Eus <- (B + D) * (C + D) / N
chi <-
((A - Ecf) ^ 2) / Ecf + ((C - Ecs) ^ 2) / Ecs + ((B - Euf) ^ 2) / Euf +
((D - Eus) ^ 2) / Eus
L = (A / (A + B)) / (C / (C + D))
if (is.nan(L)) {
S[i] <- NA
next
}
if (L < 1) {
S[i] = (-1) * chi / N
} else if (L > 1) {
S[i] = chi / N
} else{
S[i] = 0
}
}
return (S)
}
BaseMetrics <- function(arr, name) {
A <- arr[, 'ef']
B <- arr[, 'nf']
C <- arr[, 'ep']
D <- arr[, 'np']
N = A + B + C + D
if (name == 'CVA') {
# for(i in 1:dim(arr)[1]){
# a<-arr[i,'ef']
# b<-arr[i,'nf']
# c<-arr[i,'ep']
# d<-arr[i,'np']
# n=a+b+c+d
# S[i] <- (a+m*(a+b)/n)/(a+c+m)
# }
S <- arr[, 'ef']
for (i in 1:dim(arr)[1]) {
a <- arr[i, 'ef']
b <- arr[i, 'nf']
c <- arr[i, 'ep']
d <- arr[i, 'np']
n = a + b + c + d
p <- (a + b) / n
minusp <- 1.0 - p
standerr <- sqrt((minusp * p) / n)
cv <- standerr / p
S[i] <- cv * 100.0
}
}
if (name == 'CVB') {
S <- arr[, 'ef']
for (i in 1:dim(arr)[1]) {
a <- arr[i, 'ef']
b <- arr[i, 'nf']
c <- arr[i, 'ep']
d <- arr[i, 'np']
n = a + b
p <- a / n
minusp <- 1.0 - p
standerr <- sqrt((minusp * p) / n)
cv <- standerr / p
if (cv > 0 & !is.na(cv)) {
S[i] <- cv * 100.0
} else{
S[i] <- NA
}
}
}
if (name == 'CVC') {
S <- arr[, 'ef']
for (i in 1:dim(arr)[1]) {
a <- arr[i, 'ef']
b <- arr[i, 'nf']
c <- arr[i, 'ep']
d <- arr[i, 'np']
n = a + c
p <- a / n
minusp <- 1.0 - p
standerr <- sqrt((minusp * p) / n)
cv <- standerr / p
if (cv > 0 & !is.na(cv)) {
S[i] <- cv * 100.0
} else{
S[i] <- NA
}
}
}
if (name == 'FailTestNum') {
S <- A + B
}
if (name == 'TotalTestNum') {
S <- N
}
if (name == 'LKlosgen') {
S <- arr[, 'ef']
for (i in 1:dim(arr)[1]) {
a <- arr[i, 'ef']
b <- arr[i, 'nf']
c <- arr[i, 'ep']
d <- arr[i, 'np']
n = a + b + c + d
S[i] <-
sqrt(a / n) * max(a / (a + c) - (a + b) / n, a / (a + b) - (a + c) / n) # Lucia klosgen, symmetric
}
}
if (name == 'TarantulaC') {
S <- arr[, 'ef']
for (i in 1:dim(arr)[1]) {
a <- arr[i, 'ef']
b <- arr[i, 'nf']
c <- arr[i, 'ep']
d <- arr[i, 'np']
n = a + b + c + d
S[i] <-
(a / (a + b)) / (c / (c + d) + a / (a + b)) * max(a / (a +
b), c / (d + c))
}
}
if (name == 'Tarantula') {
S <- (A / (A + B)) / (C / (C + D) + A / (A + B))
}
if (name == 'Ochiai') {
S <- A / ((A + B) * (A + C)) ^ 0.5
}
if (name == 'Ku2') {
recall <- A / (A + B)
prec <- A / (A + C)
S <- (recall + prec) / 2
}
if (name == 'RAinKu2') {
W <- ((A + B) + (A + C)) / (2 * (A + B))
RA <- A / (A + C) - (A + B) / N
S <- W * RA
}
if (name == 'RDinKu2') {
W <- ((A + B) + (A + C)) / (2 * (A + B))
RD <- A / (A + C) - (B / (B + D))
S <- W * RD
}
# if(name=='RAinMasri'){
# recall <- A/(A+B)
# RA<-A/(A+C)-(A+B)/N
# S <-(recall+prec)/2
# }
# if(name=='RDinMasri'){
# recall <- A/(A+B)
# RD<-A/(A+C)-((B/(B+D)))
# S <-(recall+prec)/2
# }
if (name == 'RAinOchiai') {
RA <- A / (A + C) - (A + B) / N
S <- sqrt((A + C) / (A + B)) * RA
}
if (name == 'RDinOchiai') {
RD <- A / (A + C) - (B / (B + D))
S <- sqrt((A + C) / (A + B)) * RD
}
if (name == 'Klosgen') {
S <- sqrt((A + C) / N) * (A / (A + C) - (A + B) / N)
}
if (name == 'RAinF1') {
RA <- A / (A + C) - (A + B) / N
S <- (2 * (A + C) / N / ((A + C) / N + (A + B) / N)) * RA
}
if (name == 'RDinF1') {
RD <- A / (A + C) - (B / (B + D))
S <- (2 * (A + C) / N / ((A + C) / N + (A + B) / N)) * RD
}
if (name == 'PePfRD') {
RD <- A / (A + C) - (B / (B + D))
S <- ((A + C) / N) / ((A + B) / N) * RD
}
if (name == 'F1') {
S <- 2 / (1 / (A / (A + C)) + 1 / (A / (A + B)))
}
if (name == 'RA') {
S <- A / (A + C) - (A + B) / N
}
if (name == 'RD') {
# risk difference
S <- A / (A + C) - (B / (B + D))
}
if (name == 'RR') {
S <- A / (A + B) - (A + C) / N
}
if (name == 'RRinF1') {
RR <- A / (A + B) - (A + C) / N
S <- 2 / (1 / (A / (A + C)) + 1 / RR)
}
if (name == 'AmpleTarantula') {
S <-
(A / (A + B)) / (C / (C + D) + A / (A + B)) * abs(A / (A + B) - C /
(C + D))
}
if (name == 'RRTarantula') {
RR <- A / (A + B) - (A + C) / N
S <- RR * (A / (A + B)) / (C / (C + D) + A / (A + B))
}
if (name == 'AFTarantula') {
S <- arr[, 'ef']
for (i in 1:dim(arr)[1]) {
a <- arr[i, 'ef']
b <- arr[i, 'nf']
c <- arr[i, 'ep']
d <- arr[i, 'np']
n = a + b + c + d
AF <- max(0, ((a + b) / n - b / (b + d)) / ((a + b) / n))
S[i] <- AF * (a / (a + b)) / (c / (c + d) + a / (a + b))
}
}
if (name == 'AmpleGP13') {
S <- abs(A / (A + B) - C / (C + D)) * A * (1 + 1 / (A + 2 * C))
}
if (name == 'GP13') {
S <- A * (1 + 1 / (A + 2 * C))
}
if (name == 'RRGP13') {
S <- (A / (A + B) - (A + C) / N) * A * (1 + 1 / (A + 2 * C))
}
if (name == "DS2RA") {
RA <- A / (A + C) - (A + B) / N
S <- A * A / (B + C) * RA
}
if (name == 'AFinO') {
S <- arr[, 'ef']
for (i in 1:dim(arr)[1]) {
a <- arr[i, 'ef']
b <- arr[i, 'nf']
c <- arr[i, 'ep']
d <- arr[i, 'np']
n = a + b + c + d
AF <- max(0, ((a + b) / n - b / (b + d)) / ((a + b) / n))
S[i] <- sqrt(a / (a + c) * AF)
}
}
if (name == 'AF') {
S <- arr[, 'ef']
for (i in 1:dim(arr)[1]) {
a <- arr[i, 'ef']
b <- arr[i, 'nf']
c <- arr[i, 'ep']
d <- arr[i, 'np']
n = a + b + c + d
AF <- max(0, ((a + b) / n - b / (b + d)) / ((a + b) / n))
S[i] <- AF
}
}
if (name == 'AFinF1') {
S <- arr[, 'ef']
for (i in 1:dim(arr)[1]) {
a <- arr[i, 'ef']
b <- arr[i, 'nf']
c <- arr[i, 'ep']
d <- arr[i, 'np']
n = a + b + c + d
AF <- max(0, ((a + b) / n - b / (b + d)) / ((a + b) / n))
S[i] <- 2 / (1 / (a / (a + c)) + 1 / AF)
}
}
if (name == 'AFinK') {
S <- arr[, 'ef']
for (i in 1:dim(arr)[1]) {
a <- arr[i, 'ef']
b <- arr[i, 'nf']
c <- arr[i, 'ep']
d <- arr[i, 'np']
n = a + b + c + d
AF <- max(0, ((a + b) / n - b / (b + d)) / ((a + b) / n))
S[i] <- ((a + b) / n) / sqrt((a + c) / n) * (AF - (a + c) / n)
}
}
if (name == 'Fmeasure') {
beta <- 0.5
#S<-(beta*beta+1)*(A/(A+C))*(A/(A+B))/(beta*beta*(A/(A+C))+A/(A+B))
S <- (5 / 4 * A) / (3 / 2 * A + 1 / 2 * B + C)
}
if (name == 'Mmeasure') {
m <- 22.466
S <- arr[, 'ef']
for (i in 1:dim(arr)[1]) {
a <- arr[i, 'ef']
b <- arr[i, 'nf']
c <- arr[i, 'ep']
d <- arr[i, 'np']
n = a + b + c + d
S[i] <- (a + m * (a + b) / n) / (a + c + m)
}
}
if (name == 'Rcost') {
# when cr=0.5, it is like Ample
cr <- 0.342
S <- cr * A / (A + B) - (1 - cr) * C / (C + D)
}
if (name == 'RcostRA') {
# when cr=0.5, it is like Ample
cr <- 0.342
S <-
(cr * A / (A + B) - (1 - cr) * C / (C + D)) * (A / (A + C) - (A +
B) / N)
}
if (name == "RAinRcost") {
cr <- 0.342
RA <- A / (A + C) - (A + B) / N
S <- (A + C) / (A + B) * (RA + cr - 1)
}
if (name == 'CoTarantula') {
S <- sqrt((A + C) / N) * (A / (A + B)) / (C / (C + D) + A / (A + B))
}
if (name == 'CovTarantula') {
S <- ((A + C) / N) * (A / (A + B)) / (C / (C + D) + A / (A + B))
}
if (name == 'recall') {
S <- A / (A + B)
}
if (name == 'MmeasureRA') {
m <- 22.466
S <-
(A + m * (A + B) / N) / (A + C + m) * (A / (A + C) - (A + B) /
N)
}
if (name == "RAinMmeasure") {
m <- 22.466
RA <- A / (A + C) - (A + B) / N
S <- ((A + C) * RA + 0.5 * m) / (A + C + m)
}
if (name == 'GP13RA') {
S <- A * (1 + 1 / (A + 2 * C)) * (A / (A + C) - (A + B) / N)
}
if (name == 'RAinF05') {
beta <- 0.5
s_F <- (A + C) / (A + B)
S <- 5 / 4 * s_F / (0.25 + s_F) * (A / (A + C) - (A + B) / N)
}
if (name == "RAinRcost2") {
cr <- 0.342
RA <- A / (A + C) - (A + B) / N
S <- cr * (A + C) / (A + B) * RA - (1 - cr) * C / (C + D)
}
if (name == "Cost") {
cr <- 0.437
S <- cr * A - (1 - cr) * C
}
if (name == "RRprec") {
RR <- A / (A + B) - (A + C) / N
S <- RR * A / (A + C)
}
if (name == "recallPrec") {
recall <- A / (A + B)
S <- recall * A / (A + C)
}
if (name == "recallRD") {
recall <- A / (A + B)
RD <- A / (A + C) - (B / (B + D))
S <- recall * RD
}
if (name == "RsuppRA") {
RA <- A / (A + C) - (A + B) / N
S <- sqrt(A / N) * RA
}
if (name == "suppRA") {
RA <- A / (A + C) - (A + B) / N
S <- A / N * RA
}
if (name == 'Cost') {
# P(s|F) * # Jaccard
S <- (A / (A + B)) * (A / (A + B + C))
}
if (name == 'Ample') {
S <- abs(A / (A + B) - C / (C + D))
}
if (name == 'AmpleinO') {
S <- abs(A / (A + B) - C / (C + D)) * (A / (A + C))
}
if (name == 'AmpleinK') {
S <- abs(A / (A + B) - C / (C + D)) * (A / (A + C) - (A + B) / N)
}
if (name == 'AmpleLK') {
S <- arr[, 'ef']
for (i in 1:dim(arr)[1]) {
a <- arr[i, 'ef']
b <- arr[i, 'nf']
c <- arr[i, 'ep']
d <- arr[i, 'np']
n = a + b + c + d
S[i] <-
abs(a / (a + b) - c / (c + d)) * max(a / (a + c) - (a + b) / n, a / (a +
b) - (a + c) / n)
}
}
# if(name=='SuppTarantula'){
# S <- (A/N)*(A/(A+B))/(C/(C+D)+A/(A+B))
# }
# if(name=='RRRA'){
#
# S<-sqrt((A/(A+C)-(A+B)/N)*(A/(A+B)-(A+C)/N))
# }
# if(name=='SKPrec'){
# S<-arr[,'ef']
# for(i in 1:dim(arr)[1]){
# a<-arr[i,'ef']
# b<-arr[i,'nf']
# c<-arr[i,'ep']
# d<-arr[i,'np']
# n=a+b+c+d
# S[i] <- sqrt(a/n)*max(a/(a+c),a/(a+b)) #
# }
#
# }
# if(name=='AV'){
# S<-arr[,'ef']
# for(i in 1:dim(arr)[1]){
# a<-arr[i,'ef']
# b<-arr[i,'nf']
# c<-arr[i,'ep']
# d<-arr[i,'np']
# n=a+b+c+d
# S[i] <- max(a/(a+c)-(a+b)/n,a/(a+b)-(a+c)/n) #
# }
#
# }
# if(name=='sFRA'){
# S <- sqrt(A/(A+B)*(A/(A+C)-(A+B)/N)) # klosgen,
#
# }
#if(name=='AFRA'){
# S<-arr[,'ef']
# for(i in 1:dim(arr)[1]){
# a<-arr[i,'ef']
# b<-arr[i,'nf']
# c<-arr[i,'ep']
# d<-arr[i,'np']
# n=a+b+c+d
# S[i] <- sqrt(max(0,((a+b)/n-b/(b+d))/((a+b)/n))*(a/(a+c)-(a+b)/n)) # klosgen, symmetric
# }
#
#}
#if(name=='AFRAL'){
# S<-arr[,'ef']
# for(i in 1:dim(arr)[1]){
# a<-arr[i,'ef']
# b<-arr[i,'nf']
# c<-arr[i,'ep']
# d<-arr[i,'np']
# n=a+b+c+d
# S[i] <- sqrt(max(0,((a+b)/n-b/(b+d))/((a+b)/n))*max(a/(a+c)-(a+b)/n,a/(a+b)-(a+c)/n)) # , symmetric
# }
#
#}
# if(name=='sFRAL'){
# S<-arr[,'ef']
# for(i in 1:dim(arr)[1]){
# a<-arr[i,'ef']
# b<-arr[i,'nf']
# c<-arr[i,'ep']
# d<-arr[i,'np']
# n=a+b+c+d
# S[i] <- sqrt(a/(a+b)*max(a/(a+c)-(a+b)/n,a/(a+b)-(a+c)/n)) # klosgen, symmetric
# }
#
# }
# if(name=="RsfRA"){ # root square of P(s|F) * RA
# RA<-A/(A+C)-(A+B)/N
# S<-sqrt((A+C)/(A+B))*RA
# }
# if(name=="sfRA"){ # RR
# RA<-A/(A+C)-(A+B)/N
# S<-(A+C)/(A+B)*RA
# }
# if(name=="RpsfRA"){
# RA<-A/(A+C)-(A+B)/N
# S<-sqrt(A/(A+B))*RA
# }
# if(name=="PsFRA"){ ## recall * RA
# RA<-A/(A+C)-(A+B)/N
# S<-RA*A/(A+B)
# }
# if(name=="Ku"){
# S<-A/(B+C)
# }
# if(name=="KuRA"){
# RA<-A/(A+C)-(A+B)/N
# S<-A/(B+C)*RA
# }
# if(name=="suppRA"){ RR
#
# RA<-A/(A+C)-(A+B)/N
# S<-(A+C)/(B+C)*RA
# }
# if(name=='RAinO'){
# S<-sqrt(A/(A+B)*(A/(A+C)-(A+B)/N))
#
# }
# if(name=='RRinO'){
# RR<-A/(A+B)-(A+C)/N
# S <- sqrt(RR*A/(A+C))
# }
#if(name=='WRRinO'){
# RR<-A/(A+B)-(A+C)/N
# S <- sqrt((A+C)/N)*sqrt(RR*A/(A+C))
#}
# if(name=='PS2'){
#
# S <- -4*(B+0.1)*(C+0.5)/((N+0.6)*(N+0.6))
# }
# if(name=='RAinPS2'){
# RA<-RA <- A/(A+C)-(A+B)/N
# S <- -4*((A+B)/N)*(1-A/(A+B))*((A+C)/N)*(1-RA)-2/N*((A+B)/N)*(1-A/(A+B))-0.4/N*A/(A+C)*(1-RA)
# }
#if(name=='AFRA'){
# S<-arr[,'ef']
# for(i in 1:dim(arr)[1]){
# a<-arr[i,'ef']
# b<-arr[i,'nf']
# c<-arr[i,'ep']
# d<-arr[i,'np']
# n=a+b+c+d
# S[i] <- sqrt(max(0,((a+b)/n-b/(b+d))/((a+b)/n))*(a/(a+c)-(a+b)/n)) # klosgen, symmetric
# }
#
#}
return(S)
}
#####################################################
### the above are codes for functions
######################################################
###################################################
# In the following codes, we need to assure
# 1. the value in variable "numbug" decides which directory the script will access. (Important!!)
# 2. the value in variable "dir", which is the path of inputs (dtatset) (Important!!)
# 3. the path in variable "output_cost_filekk", which is used to store unexpected situation. For the dataset I have now, this variable is useless. (Not important!!)
# 4. the path in variable "output_cost_file1", which stores the cost for each version.(for debugging and not important)
# 5. the path in variable "output_cost_file", which stores the cost results for each program. (Important!!)
#
##################################################
#numbug='out1' #single bug
numbug = 'outM' # contains ALL (2,4,8,16,32) multiple-bug versions
#num_vers=1000 # originally designed for random select 1000 from all the base. (useless now)
programs <-
c(
'daikon',
'eventbus',
'jaxen',
'Jester1.37b (+tests)',
'Jexel',
'JParsec',
'org.apache.commons.codec 1.3 (+test, resolved abstr. tests)',
'org.apache.commons.lang3',
'org.eclipse.draw2d 3.4.2',
'org.htmlparser 1.6'
)
dir <-
'C:/Users/yigit/Desktop/TCM/src/' # original coverage information file, the format is on website
fin_cols <- c(
'program',
'Tarantula',
'TarantulaC',
'Klosgen',
'LKlosgen',
'RA',
'RD',
'recallPrec',
'RRprec',
'RRTarantula',
'recallRD',
'F1',
'RAinF1',
'RDinF1',
'recall',
'RR',
'PePfRD',
'Ku2',
'RAinKu2',
'RDinKu2',
'Ochiai',
'RAinOchiai',
'RDinOchiai',
'CVA',
'CVB',
'CVC',
'FailTestNum',
'TotalTestNum'
)
fin_array <-
matrix(nrow = length(programs), ncol = length(fin_cols))
colnames(fin_array) <- fin_cols
fin_array2 <-
matrix(nrow = length(programs), ncol = length(fin_cols))
colnames(fin_array2) <- fin_cols
for (i in 1:length(programs)) {
sub_dir <- paste(dir, programs[i], '/', numbug, '/', sep = '')
vers <- list.files(path = sub_dir)
#vers_id<-sample(1:length(vers),num_vers)
num_vers <- length(vers)
vers_id <- c(1:num_vers) # use all data sets in the folder
res_cols <-
c(
'id',
'Xtab',
'Tarantula',
'TarantulaC',
'Ochiai',
'F1',
'Klosgen',
'LKlosgen',
'PePfRD',
'recall',
'RDinOchiai',
'RRprec',
'RRTarantula',
'recallPrec',
'RAinF1',
'RA',
'RD',
'recallRD',
'RDinF1',
'Ku2',
'RAinKu2',
'RDinKu2',
'RR',
'AFinK',
'RAinOchiai',
'filename',
'CVA',
'CVB',
'CVC',
'FailTestNum',
'TotalTestNum'
)
res_array <-
matrix(nrow = num_vers, ncol = length(res_cols)) # store relative cost
colnames(res_array) <- res_cols
res_array2 <-
matrix(nrow = num_vers, ncol = length(res_cols)) # store absolute cost
colnames(res_array2) <- res_cols
for (j in 1:num_vers) {
tar_path <- paste(sub_dir, vers[vers_id[j]], sep = '')
tarfile <- read.csv(tar_path)
ver_cols <-
c(
'method',
'Xtab',
'Tarantula',
'TarantulaC',
'Ochiai',
'F1',
'Klosgen',
'LKlosgen',
'PePfRD',
'recall',
'RDinOchiai',
'RRprec',
'RRTarantula',
'recallPrec',
'RAinF1',
'RA',
'RD',
'recallRD',
'RDinF1',
'RR',
'AFinK',
'RAinOchiai',
'Ku2',
'RAinKu2',
'RDinKu2',
'Y',
'CVA',
'CVB',
'CVC',
'FailTestNum',
'TotalTestNum'
)
ver_array <-
matrix(nrow = dim(tarfile)[1], ncol = length(ver_cols))
colnames(ver_array) <- ver_cols
ver_array[, 'method'] <- tarfile[, 'method']
ver_array[, 'Y'] <- tarfile[, 'faulty']
ver_array[, 'LKlosgen'] <- BaseMetrics(tarfile, 'LKlosgen')
ver_array[, 'TarantulaC'] <- BaseMetrics(tarfile, 'TarantulaC')
ver_array[, 'Klosgen'] <- BaseMetrics(tarfile, 'Klosgen')
ver_array[, 'recall'] <- BaseMetrics(tarfile, 'recall')
ver_array[, 'RR'] <- BaseMetrics(tarfile, 'RR')
ver_array[, 'PePfRD'] <- BaseMetrics(tarfile, 'PePfRD')
ver_array[, 'Tarantula'] <- BaseMetrics(tarfile, 'Tarantula')
ver_array[, 'RA'] <- BaseMetrics(tarfile, 'RA')
ver_array[, 'RD'] <- BaseMetrics(tarfile, 'RD')
ver_array[, 'F1'] <- BaseMetrics(tarfile, 'F1')
ver_array[, 'RAinF1'] <- BaseMetrics(tarfile, 'RAinF1')
ver_array[, 'RDinF1'] <- BaseMetrics(tarfile, 'RDinF1')
ver_array[, 'Ochiai'] <- BaseMetrics(tarfile, 'Ochiai')
ver_array[, 'RAinOchiai'] <- BaseMetrics(tarfile, 'RAinOchiai')
ver_array[, 'RDinOchiai'] <- BaseMetrics(tarfile, 'RDinOchiai')
ver_array[, 'Ku2'] <- BaseMetrics(tarfile, 'Ku2')
ver_array[, 'RAinKu2'] <- BaseMetrics(tarfile, 'RAinKu2')
ver_array[, 'RDinKu2'] <- BaseMetrics(tarfile, 'RDinKu2')
ver_array[, 'recallPrec'] <- BaseMetrics(tarfile, 'recallPrec')
ver_array[, 'RRprec'] <-
BaseMetrics(tarfile, 'RRprec') # Enhanced Tarantula
ver_array[, 'RRTarantula'] <-
BaseMetrics(tarfile, 'RRTarantula')
ver_array[, 'recallRD'] <- BaseMetrics(tarfile, 'recallRD')
ver_array[, 'CVA'] <- BaseMetrics(tarfile, 'CVA')
ver_array[, 'CVB'] <- BaseMetrics(tarfile, 'CVB')
ver_array[, 'CVC'] <- BaseMetrics(tarfile, 'CVC')
ver_array[, 'FailTestNum'] <-
BaseMetrics(tarfile, 'FailTestNum')
ver_array[, 'TotalTestNum'] <-
BaseMetrics(tarfile, 'TotalTestNum')
#ver_array[,'Xtab']<-Xtab(tarfile)
#ver_array[,'sFRA']<-BaseMetrics(tarfile,'sFRA')
#ver_array[,'RAinMmeasure']<-BaseMetrics(tarfile,'RAinMmeasure')
#ver_array[,'AFRAL']<-BaseMetrics(tarfile,'AFRAL')
#ver_array[,'RpsfRA']<-BaseMetrics(tarfile,'RpsfRA')
#ver_array[,'Cost']<-BaseMetrics(tarfile,'Cost')
#ver_array[,'Ample']<-BaseMetrics(tarfile,'Ample')
#ver_array[,'AmpleinK']<-BaseMetrics(tarfile,'AmpleinK')
#ver_array[,'AmpleLK']<-BaseMetrics(tarfile,'AmpleLK')
#ver_array[,'PsFRA']<-BaseMetrics(tarfile,'PsFRA')
#ver_array[,'CovTarantula']<-BaseMetrics(tarfile,'CovTarantula')
#ver_array[,'SuppTarantula']<-BaseMetrics(tarfile,'SuppTarantula')
#ver_array[,'AFinO']<-BaseMetrics(tarfile,'AFinO')
#ver_array[,'AFTarantula']<-BaseMetrics(tarfile,'AFTarantula')
#ver_array[,'AF']<-BaseMetrics(tarfile,'AF')
#ver_array[,'RcostRA']<-BaseMetrics(tarfile,'RcostRA')
#ver_array[,'CoTarantula']<-BaseMetrics(tarfile,'CoTarantula')
#ver_array[,'RAinF05']<-BaseMetrics(tarfile,'RAinF05')
#ver_array[,'RRinF1']<-BaseMetrics(tarfile,'RRinF1')
#ver_array[,'RAinRcost']<-BaseMetrics(tarfile,'RAinRcost')
#ver_array[,'sfRA']<-BaseMetrics(tarfile,'sfRA')
#ver_array[,'AFinK']<-BaseMetrics(tarfile,'AFinK')
#ver_array[,'AFinF1']<-BaseMetrics(tarfile,'AFinF1')
#ver_array[,'Mmeasure']<-BaseMetrics(tarfile,'Mmeasure')
#ver_array[,'Fmeasure']<-BaseMetrics(tarfile,'Fmeasure')
#ver_array[,'Rcost']<-BaseMetrics(tarfile,'Rcost')
output_cost_file1 <-
paste(
'C:/Users/yigit/Desktop/TCM/output_metrics/',
'__array',
programs[i],
'_',
basename(tar_path),
'_',
numbug,
'.csv',
sep = ''
)
write.csv(ver_array, output_cost_file1, row.names = FALSE)
res_array[j, 'id'] <- j
#res_array[j,'Xtab']<-ranking_cost_exam(ver_array,'Xtab')$cost
res_array[j, 'Tarantula'] <-
ranking_cost_exam(ver_array, 'Tarantula')$cost
res_array[j, 'TarantulaC'] <-
ranking_cost_exam(ver_array, 'TarantulaC')$cost
res_array[j, 'Ochiai'] <-
ranking_cost_exam(ver_array, 'Ochiai')$cost
res_array[j, 'F1'] <- ranking_cost_exam(ver_array, 'F1')$cost
res_array[j, 'Klosgen'] <-
ranking_cost_exam(ver_array, 'Klosgen')$cost
res_array[j, 'RRprec'] <-
ranking_cost_exam(ver_array, 'RRprec')$cost
res_array[j, 'LKlosgen'] <-
ranking_cost_exam(ver_array, 'LKlosgen')$cost
#res_array[j,'sFRA']<-ranking_cost_exam(ver_array,'sFRA')$cost
#res_array[j,'RAinMmeasure']<-ranking_cost_exam(ver_array,'RAinMmeasure')$cost
#res_array[j,'AFRAL']<-ranking_cost_exam(ver_array,'AFRAL')$cost
#res_array[j,'RpsfRA']<-ranking_cost_exam(ver_array,'RpsfRA')$cost
#res_array[j,'Cost']<-ranking_cost_exam(ver_array,'Cost')$cost
#res_array[j,'Ample']<-ranking_cost_exam(ver_array,'Ample')$cost
res_array[j, 'PePfRD'] <-
ranking_cost_exam(ver_array, 'PePfRD')$cost
#res_array[j,'AmpleinK']<-ranking_cost_exam(ver_array,'AmpleinK')$cost
#res_array[j,'AmpleLK']<-ranking_cost_exam(ver_array,'AmpleLK')$cost
#res_array[j,'PsFRA']<-ranking_cost_exam(ver_array,'PsFRA')$cost
res_array[j, 'recall'] <-
ranking_cost_exam(ver_array, 'recall')$cost
#res_array[j,'CovTarantula']<-ranking_cost_exam(ver_array,'CovTarantula')$cost
#res_array[j,'SuppTarantula']<-ranking_cost_exam(ver_array,'SuppTarantula')$cost
#res_array[j,'AFinO']<-ranking_cost_exam(ver_array,'AFinO')$cost
#res_array[j,'AFTarantula']<-ranking_cost_exam(ver_array,'AFTarantula')$cost
#res_array[j,'AF']<-ranking_cost_exam(ver_array,'AF')$cost
res_array[j, 'RDinOchiai'] <-
ranking_cost_exam(ver_array, 'RDinOchiai')$cost
#res_array[j,'RcostRA']<-ranking_cost_exam(ver_array,'RcostRA')$cost
#res_array[j,'CoTarantula']<-ranking_cost_exam(ver_array,'CoTarantula')$cost
#res_array[j,'RAinF05']<-ranking_cost_exam(ver_array,'RAinF05')$cost
res_array[j, 'RRTarantula'] <-
ranking_cost_exam(ver_array, 'RRTarantula')$cost
#res_array[j,'RRinF1']<-ranking_cost_exam(ver_array,'RRinF1')$cost
res_array[j, 'RAinF1'] <-
ranking_cost_exam(ver_array, 'RAinF1')$cost
res_array[j, 'RA'] <- ranking_cost_exam(ver_array, 'RA')$cost
res_array[j, 'recallPrec'] <-
ranking_cost_exam(ver_array, 'recallPrec')$cost
res_array[j, 'RD'] <- ranking_cost_exam(ver_array, 'RD')$cost
res_array[j, 'recallRD'] <-
ranking_cost_exam(ver_array, 'recallRD')$cost
res_array[j, 'RDinF1'] <-
ranking_cost_exam(ver_array, 'RDinF1')$cost
#res_array[j,'RAinRcost']<-ranking_cost_exam(ver_array,'RAinRcost')$cost
#res_array[j,'sfRA']<-ranking_cost_exam(ver_array,'sfRA')$cost
res_array[j, 'RR'] <- ranking_cost_exam(ver_array, 'RR')$cost
#res_array[j,'AFinK']<-ranking_cost_exam(ver_array,'AFinK')$cost
res_array[j, 'RAinOchiai'] <-
ranking_cost_exam(ver_array, 'RAinOchiai')$cost
#res_array[j,'AFinF1']<-ranking_cost_exam(ver_array,'AFinF1')$cost
#res_array[j,'Mmeasure']<-ranking_cost_exam(ver_array,'Mmeasure')$cost
#res_array[j,'Fmeasure']<-ranking_cost_exam(ver_array,'Fmeasure')$cost
#res_array[j,'Rcost']<-ranking_cost_exam(ver_array,'Rcost')$cost
res_array[j, 'Ku2'] <- ranking_cost_exam(ver_array, 'Ku2')$cost
res_array[j, 'RAinKu2'] <-
ranking_cost_exam(ver_array, 'RAinKu2')$cost
res_array[j, 'RDinKu2'] <-
ranking_cost_exam(ver_array, 'RDinKu2')$cost
res_array[j, 'CVA'] <- as.numeric(ver_array[1, 'CVA'])
res_array[j, 'CVB'] <-
mean(as.numeric(ver_array[, 'CVB']), na.rm = TRUE)
res_array[j, 'CVC'] <-
mean(as.numeric(ver_array[, 'CVC']), na.rm = TRUE)
res_array[j, 'FailTestNum'] <-
as.numeric(ver_array[1, 'FailTestNum'])
res_array[j, 'TotalTestNum'] <-
as.numeric(ver_array[1, 'TotalTestNum'])
if (is.infinite(res_array[j, 'Ochiai'])) {
output_cost_filekk <-
paste(
'C:/Users/yigit/Desktop/TCM/output_considerRD/',
'__coverage_array',
programs[i],
j,
'.csv',
sep = ''
)
write.csv(ver_array, output_cost_filekk, row.names = FALSE)
cat(tar_path, '\n')
}
if (j %% 100 == 0) {
cat(i, '/', programs[i], '/', j, '/ # of versions:', num_vers, '\n')
}
}
for (j in 1:num_vers) {
res_array[j, 'filename'] <-
paste(sub_dir, vers[vers_id[j]], sep = '')
}
output_cost_file1 <-
paste(
'C:/Users/yigit/Desktop/TCM/output_considerRD/details/_',
i,
'_',
programs[i],
'_',
numbug,
'_numofstmt_costALLstmt2.csv',
sep = ''
) #store the details easy for debugging
write.csv(res_array, output_cost_file1, row.names = FALSE)
# output_cost_file2<- paste('C:/Users/yigit/Desktop/TCM/output_6metrics/details/_',i,'_',programs[i],'_',numbug,'_absolute_costALLstmt2.csv', sep = '')
# write.csv(res_array2, output_cost_file2, row.names = FALSE)
fin_array[i, 'program'] <- programs[i]
#fin_array[i,'Xtab']<-mean(as.numeric(res_array[,'Xtab']))
#fin_array[i,'Xtabdev']<-sd(as.numeric(res_array[,'Xtab']))
fin_array[i, 'Tarantula'] <-
mean(as.numeric(res_array[, 'Tarantula']))
#fin_array[i,'Tarantuladev']<-sd(as.numeric(res_array[,'Tarantula']))
fin_array[i, 'TarantulaC'] <-
mean(as.numeric(res_array[, 'TarantulaC']))
#fin_array[i,'TarantulaCdev']<-sd(as.numeric(res_array[,'TarantulaC']))
fin_array[i, 'Ochiai'] <- mean(as.numeric(res_array[, 'Ochiai']))
#fin_array[i,'Ochiaidev']<-sd(as.numeric(res_array[,'Ochiai']))
fin_array[i, 'F1'] <- mean(as.numeric(res_array[, 'F1']))
#fin_array[i,'F1dev']<-sd(as.numeric(res_array[,'F1']))
fin_array[i, 'Klosgen'] <-
mean(as.numeric(res_array[, 'Klosgen']))
#fin_array[i,'Klosgendev']<-sd(as.numeric(res_array[,'Klosgen']))
fin_array[i, 'RRprec'] <- mean(as.numeric(res_array[, 'RRprec']))
fin_array[i, 'LKlosgen'] <-
mean(as.numeric(res_array[, 'LKlosgen']))
#fin_array[i,'LKlosgendev']<-sd(as.numeric(res_array[,'LKlosgen']))
#fin_array[i,'sFRA']<-mean(as.numeric(res_array[,'sFRA']))
#fin_array[i,'sFRAdev']<-sd(as.numeric(res_array[,'sFRA']))
#fin_array[i,'RAinMmeasure']<-mean(as.numeric(res_array[,'RAinMmeasure']))
#fin_array[i,'Fmeasuredev']<-sd(as.numeric(res_array[,'AFRA']))
#fin_array[i,'AFRAL']<-mean(as.numeric(res_array[,'AFRAL']))
#fin_array[i,'Mmeasuredev']<-sd(as.numeric(res_array[,'AFRAL']))
#fin_array[i,'RpsfRA']<-mean(as.numeric(res_array[,'RpsfRA']))
#fin_array[i,'Cost']<-mean(as.numeric(res_array[,'Cost']))
#fin_array[i,'Ample']<-mean(as.numeric(res_array[,'Ample']))
fin_array[i, 'PePfRD'] <- mean(as.numeric(res_array[, 'PePfRD']))
#fin_array[i,'AmpleinK']<-mean(as.numeric(res_array[,'AmpleinK']))
#fin_array[i,'AmpleLK']<-mean(as.numeric(res_array[,'AmpleLK']))
#fin_array[i,'PsFRA']<-mean(as.numeric(res_array[,'PsFRA']))
fin_array[i, 'recall'] <- mean(as.numeric(res_array[, 'recall']))
#fin_array[i,'CovTarantula']<-mean(as.numeric(res_array[,'CovTarantula']))
#fin_array[i,'SuppTarantula']<-mean(as.numeric(res_array[,'SuppTarantula']))
#fin_array[i,'AFinO']<-mean(as.numeric(res_array[,'AFinO']))
#fin_array[i,'AFTarantula']<-mean(as.numeric(res_array[,'AFTarantula']))
#fin_array[i,'AF']<-mean(as.numeric(res_array[,'AF']))
fin_array[i, 'RDinOchiai'] <-
mean(as.numeric(res_array[, 'RDinOchiai']))
#fin_array[i,'RcostRA']<-mean(as.numeric(res_array[,'RcostRA']))
#fin_array[i,'CoTarantula']<-mean(as.numeric(res_array[,'CoTarantula']))
#fin_array[i,'RAinF05']<-mean(as.numeric(res_array[,'RAinF05']))
fin_array[i, 'RRTarantula'] <-
mean(as.numeric(res_array[, 'RRTarantula']))
#fin_array[i,'RRinF1']<-mean(as.numeric(res_array[,'RRinF1']))
fin_array[i, 'RAinF1'] <- mean(as.numeric(res_array[, 'RAinF1']))
fin_array[i, 'RA'] <- mean(as.numeric(res_array[, 'RA']))
fin_array[i, 'recallPrec'] <-
mean(as.numeric(res_array[, 'recallPrec']))
fin_array[i, 'RD'] <- mean(as.numeric(res_array[, 'RD']))
fin_array[i, 'recallRD'] <-
mean(as.numeric(res_array[, 'recallRD']))
fin_array[i, 'RDinF1'] <- mean(as.numeric(res_array[, 'RDinF1']))
#fin_array[i,'RAinRcost']<-mean(as.numeric(res_array[,'RAinRcost']))
#fin_array[i,'sfRA']<-mean(as.numeric(res_array[,'sfRA']))
fin_array[i, 'RR'] <- mean(as.numeric(res_array[, 'RR']))
#fin_array[i,'AFinK']<-mean(as.numeric(res_array[,'AFinK']))
fin_array[i, 'RAinOchiai'] <-
mean(as.numeric(res_array[, 'RAinOchiai']))
#fin_array[i,'AFinF1']<-mean(as.numeric(res_array[,'AFinF1']))
#fin_array[i,'Mmeasure']<-mean(as.numeric(res_array[,'Mmeasure']))
#fin_array[i,'Fmeasure']<-mean(as.numeric(res_array[,'Fmeasure']))
#fin_array[i,'Rcost']<-mean(as.numeric(res_array[,'Rcost']))
fin_array[i, 'Ku2'] <- mean(as.numeric(res_array[, 'Ku2']))
fin_array[i, 'RAinKu2'] <-
mean(as.numeric(res_array[, 'RAinKu2']))
}
## for ICST ver.
##output_cost_file <- paste('C:/Users/yigit/Desktop/TCM/output_6metrics/','final_with_numofstmt_',numbug,'2_ALLstmt3.csv', sep = '')
##write.csv(fin_array, output_cost_file, row.names = FALSE)
# for ICST CRC version
output_cost_file <-
paste(
'C:/Users/yigit/Desktop/TCM/output_considerRD/',
'final_with_numofstmt_',
numbug,
'_after_submission.csv',
sep = ''
)
write.csv(fin_array, output_cost_file, row.names = FALSE)
|
6eff9cfc5e660ce02991cf1e7e13c0284be1bfba
|
80f18bfa822b1e58d7b023d80b4f0e2bef442c19
|
/run_analysis.R
|
10547cab8a20a2a3e551d850f1c33bb3ca8e7956
|
[] |
no_license
|
smalapet/datascience_course_3
|
412819310f996ea245bd4b6abb753a61a2447c14
|
661cdb945b875825af44249a472e92fab0aa37a7
|
refs/heads/master
| 2021-01-22T04:10:29.968968
| 2017-09-03T13:03:58
| 2017-09-03T13:03:58
| 102,264,531
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,407
|
r
|
run_analysis.R
|
run_analysis <- function(){
##1. Read features and activityLabels vector
features <- read.csv("features.txt", sep = "", header = FALSE)[2]
activities <- read.csv("activity_labels.txt", sep = "", header = FALSE)
##2. Read data sets
test_set <- read.csv("test/X_test.txt", sep = "", header = FALSE)
train_set <- read.csv("train/X_train.txt", sep = "", header = FALSE)
merged_set <- rbind(test_set,train_set)
##3. Read movement
test_moves <- read.csv("test/y_test.txt", sep = "", header = FALSE)
train_moves <- read.csv("train/y_train.txt", sep = "", header = FALSE)
merged_moves <- rbind(test_moves, train_moves)
##4. Read personID
test_person <- read.csv("test/subject_test.txt", sep = "", header = FALSE)
train_person <- read.csv("train/subject_train.txt", sep = "", header = FALSE)
merged_person <- rbind(test_person, train_person)
##5. Extracting columns which includes measurements
names(merged_set) <- features[ ,1]
merged_set <- merged_set[ grepl("std|mean", names(merged_set), ignore.case = TRUE) ]
#6. Descripe activityName
merged_moves <- merge(merged_moves, activities, by.x = "V1", by.y = "V1")[2]
merged_set <- cbind(merged_person, merged_moves, merged_set)
names(merged_set)[1:2] <- c("PersonID", "Activities")
##7. Tidy merged_set
group_by(merged_set, PersonID, Activities) %>%
summarise_each(funs(mean))
}
|
4c764f1ae954dfcd4fbec101c1c9de1a735e7534
|
7456d3eae8574560e823cd4180579e7e16a5c9f0
|
/R/parToCov.R
|
10dd3b8d300ee5c53dc8f5c31106d13d8c2d1eed
|
[] |
no_license
|
fbertran/ebadimex
|
7f362e82315f4172eef5798c57765611b3a311bc
|
e0db30de62b1b4d8fbd59cd567ffb4c4af498ba9
|
refs/heads/master
| 2021-09-22T03:20:25.246862
| 2018-09-05T18:34:46
| 2018-09-05T18:34:46
| 257,308,813
| 1
| 0
| null | 2020-04-20T14:36:00
| 2020-04-20T14:35:59
| null |
UTF-8
|
R
| false
| false
| 690
|
r
|
parToCov.R
|
##################################################
# Unconstrained parameterization
##################################################
#' @export
parToCov <- function(th){
th_1 <- th[1]
th_2 <- th[2]
th_3 <- th[3]
l_11 <- exp(th_1)
l_21 <- exp(th_2)
l_22 <- pi*exp(th_3) / (1+exp(th_3))
Sigma11 <- l_11*l_11;
Sigma22 <- l_21*l_21;
Sigma12 <- l_11*l_21 * cos(l_22);
matrix(c(Sigma11,Sigma12,Sigma12,Sigma22), 2,2)
}
#' @export
covToPar <- function(sigma){
l_11 <- sqrt(sigma[1,1])
l_21 <- sqrt(sigma[2,2])
l_22 <- acos(sigma[1,2]/l_11/l_21)
th_1 <- log(l_11)
th_2 <- log(l_21)
th_3 <- log(l_22 / (pi - l_22))
return(c(th_1, th_2, th_3))
}
|
cb7307b77412c01ac6f5dcc76ba180bc485edd9c
|
00ff834e26ad86758e1a29d23cff2509bdb842fe
|
/man/firstup.Rd
|
f9a27599723917cddaa953c308206f6338cd9b88
|
[] |
no_license
|
dataning/ropenfda
|
9c227e5d2ef49abed7019795b0c9e81c920dd1f2
|
87c1dfb5da7f0b0ca47f7a4d8603d5947e4df81e
|
refs/heads/master
| 2021-05-04T15:51:27.229673
| 2017-04-02T02:57:20
| 2017-04-02T02:57:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 331
|
rd
|
firstup.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/help-functions.R
\name{firstup}
\alias{firstup}
\title{This is an internal function to capitalize the first letter.}
\usage{
firstup(x)
}
\arguments{
\item{x}{Input string.}
}
\description{
This is an internal function to capitalize the first letter.
}
|
62155d63b146889dec42616fda58164723fd8586
|
85a2068fee8e58fbdb8cd184077ba68490e557cc
|
/man/get_LeadService.Rd
|
1a7b22611a533f8dbf74ad6600489830802502ca
|
[] |
no_license
|
jdjohn215/milwaukeer
|
06f33bdf0c7daf98b044e9e284f99547b99a8604
|
f3034af2c09fc4763e3e2d35f86576e8f404b29e
|
refs/heads/master
| 2020-04-08T13:49:59.029627
| 2019-08-28T21:11:23
| 2019-08-28T21:11:23
| 159,408,851
| 3
| 2
| null | 2020-03-22T05:32:36
| 2018-11-27T22:31:50
|
R
|
UTF-8
|
R
| false
| true
| 1,032
|
rd
|
get_LeadService.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_LeadService.R
\name{get_LeadService}
\alias{get_LeadService}
\title{Get the locations of lead service lines
\code{get_LeadService} returns a data.frame containing addresses in which the city-owned
section of the water service line is made of lead. The data.frame can be geocoded
(if specified) and filtered for the selected geography (if specified).}
\usage{
get_LeadService(shape, spatial = FALSE, include_missing = FALSE)
}
\arguments{
\item{shape}{An object of class sf. If included, the output will be filtered using
st_intersection}
\item{spatial}{Logical. If TRUE the output is class sf. Defaults to FALSE.}
\item{include_missing}{Logical. If TRUE values not geocoded will be added to the output.
Defaults to FALSE.}
}
\value{
A dataframe.
}
\description{
Refer to the data dictionary for further information:
\url{https://data.milwaukee.gov/dataset/lead-service-line-data}
}
\examples{
get_LeadService()
get_LeadService(spatial = TRUE)
}
|
2d92b247646190612cc70cb0f4bbfe2e786d9009
|
19361af6ab987d9a87334a3f6c83e07b434d2698
|
/R/dist_wpd.R
|
94be49cb7bf333c44644d928a9c1acfa9ef1ffbe
|
[] |
no_license
|
Sayani07/gracsr
|
1f3ef4395874316994e3b265758f847407f8444e
|
0f365cd358f808cd077403d7d7a534cc584e59ce
|
refs/heads/master
| 2023-09-03T22:46:45.477390
| 2021-10-27T00:45:27
| 2021-10-27T00:45:27
| 395,581,498
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,057
|
r
|
dist_wpd.R
|
#' Title Compute distances based on wpd
#' Computes distances between subjects based on wpd across different granularities
#' @param .data a tsibble
#' @param harmony_tbl a harmony table
#' @param response measured variable
#' @param nperm number of permutations for normalization
#' @return returns an object of class "dist"
#'
#' @examples
#' library(gravitas)
#' library(tidyverse)
#' library(parallel)
#' library(tsibble)
#' library(rlang)
#' sm <- smart_meter10 %>%
#' filter(customer_id %in% c("10006704", "10017936", "10006414", "10018250"))
#' gran1 <- "hour_day"
#' gran2 <- NULL
#' harmonies <- sm %>%
#' harmony(
#' ugran = "year",
#' filter_in = "wknd_wday",
#' filter_out = c("hhour", "fortnight", "quarter", "semester")
#' )
#' harmonies1 <- harmonies %>% mutate(facet_variable = NA)
#'
#' h <- harmonies1 %>%
#' select(-facet_levels) %>%
#' distinct() %>%
#' mutate(facet_levels = NA) %>%
#' filter(x_variable %in% c("month_year", "hour_day", "wknd_wday"))
#'
#' v <- dist_wpd(sm, harmony_tbl = h)
#' v
#' @export
dist_wpd <- function(.data,
harmony_tbl = NULL,
# filter_comb = NULL,
response = NULL,
nperm = 100) {
key <- tsibble::key(.data)
key <- key[1] %>% as.character()
index <- tsibble::index(.data) %>% as.character()
if (is.null(response)) {
response <- tsibble::measured_vars(.data)
response <- response[1]
}
if (is.null(harmony_tbl)) {
stop("harmony table must be provided")
}
harmonies <- harmony_tbl %>%
mutate(comb = paste(facet_variable,
x_variable,
sep = "-"
)) %>%
select(-comb)
# %>% filter(comb %in% c("hour_day-wknd_wday",
# "day_month-hour_day",
# "wknd_wday-hour_day",
# "hour_day-day_week",
# "day_week-hour_day"))
uni_cust <- unique(.data %>% pull(!!sym(key)))
customer_ref <- tibble(
customer_serial_id = as.character(seq(length(uni_cust))),
customer_id = uni_cust
)
elec_split <- .data %>% group_split(!!sym(key))
elec_select_harmony <- parallel::mclapply(seq_len(length(elec_split)), function(x) {
data_id <- elec_split %>%
magrittr::extract2(x) %>%
as_tsibble(index = index)
k <- hakear::wpd(data_id,
harmony_tbl = harmonies,
response = {{ response }},
nperm = nperm
) %>% arrange(-wpd)
}, mc.cores = parallel::detectCores() - 1, mc.preschedule = FALSE, mc.set.seed = FALSE) %>%
dplyr::bind_rows(.id = "customer_serial_id") %>%
# dplyr::mutate(!!key := m) %>%
# dplyr::select(-m) %>%
dplyr::left_join(customer_ref) %>%
dplyr::select(-customer_serial_id)
# write_rds(elec_select_harmony, "data/elec_select_harmony.rds")
mydist <- elec_select_harmony %>%
mutate(comb = paste(facet_variable, x_variable, sep = "-")) %>%
select(comb, customer_id, wpd) %>%
pivot_wider(names_from = comb, values_from = wpd) %>%
dplyr::rename(key1 := !!key)
mydist <- column_to_rownames(mydist,
var = "key1"
) %>%
dist()
mydist
}
|
f2e5fea5c8bdea8c1fce19aa1fe55a31c21c57ca
|
c1c86256adc1e7f9f9397654acd92254b2d93a43
|
/dendogram.R
|
763aaa5855a13151a2414db768b1c325ed6f3b37
|
[] |
no_license
|
Marzan1/Code-for-heatmap
|
a61987eaf903639701930ee2c2f118d2911f975c
|
bbbfe1a299c35a90ba2c48ec2c82f83b8088e2f8
|
refs/heads/main
| 2023-07-10T20:05:42.706172
| 2021-08-24T23:03:33
| 2021-08-24T23:03:33
| 399,626,793
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 230
|
r
|
dendogram.R
|
tr <- read.csv(file= "heatmap1.csv", sep = ",", header = TRUE)
row.names(tr) <- tr$X
tr <- tr[, 2:30]
dis <- dist(tr)
link <- as.dendrogram(hclust(dis, method = "complete"))
par(mar = c(3, 4, 1, 15))
plot(link, horiz = TRUE)
|
219a5de570edc0cddb8349f6532ae1190a5cf1c8
|
bb1e610719504eadec848afaeea6dcb0069bedc4
|
/EPL_League results scrap.r
|
45c7c91c4d8618e27f9e2a671366514825a39e63
|
[] |
no_license
|
batemansogq/R_cde_bkup
|
c8a5a4ef3a5bf8024f631c79bf0063c485d08946
|
8b4a9f51ee28c067e869df538bdf3634d09b142b
|
refs/heads/master
| 2020-12-07T23:02:32.150473
| 2020-03-17T10:54:40
| 2020-03-17T10:54:40
| 67,341,827
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,855
|
r
|
EPL_League results scrap.r
|
#install.packages("rvest")
library(rvest)
library(stringr)
# whole table
# .box2+ .box td , th
# schedule
#.wfb-ad+ .box td
#page ref
ref <- read_html("http://www.worldfootball.net/schedule/eng-premier-league-2011-2012-spieltag/1/")
# "http://www.worldfootball.net/schedule/eng-premier-league-2012-2013-spieltag/38/"
leagueRd <- ref %>% html_nodes(".box2+ .box td:nth-child(3)") %>% html_text()
leagueRd
# trim out the team name
# find the start
# substring(leagueRd[2],22)
# using the end text, extract the full team name
# substring(n, 0, (str_locate(n,"\r\n")[1,1])-1)
# single statement
# substring((substring(leagueRd[3],22)), 0, (str_locate((substring(leagueRd[3],22)),"\r\n")[1,1])-1)
#lappy
lapply(leagueRd, function (x) substring((substring(x,22)), 0, (str_locate((substring(x,22)),"\r\n")[1,1])-1))
#position
lg <- ".box2+ .box .standard_tabelle td:nth-child(1)"
#team
tm <- ".box2+ .box td:nth-child(3)"
#matches
mt <- ".box2+ .box td:nth-child(4)"
#wins
wn <- ".box2+ .box td:nth-child(5)"
#draws
dw <- ".box2+ .box td:nth-child(6)"
#losses
ls <- ".box2+ .box td:nth-child(7)"
#for against
fa <- ".box2+ .box td:nth-child(8)"
#dif
df <- ".box2+ .box td:nth-child(9)"
#league points
lp <- "td:nth-child(10)"
#league pos
pos <-c(1:20)
############################################################################################
league_table <- function(x) {
# set the table position, not all col values are filled
pos <-c(1:20)
# create the blank data frame
leaguetable <- data.frame(round=as.factor(numeric()),
position=as.factor(numeric()),
team=character(),
matches=as.factor(numeric()),
win=numeric(),
draw=numeric(),
loss=numeric(),
for_against=character(),
goal_diff=numeric(),
points=numeric())
# set a loop for the 38 rounds
for (i in 1:38) {
# create the string page reference for each rd
pg <- paste("http://www.worldfootball.net/schedule/eng-premier-league-", x , "-spieltag/",i, "/", sep="" )
# read the page in
ref <- read_html(pg)
# from the page, take the individual col ref
#team
tm <- ref %>% html_nodes(".box2+ .box td:nth-child(3)") %>% html_text()
# extract the team name from the URL reference
tm <- lapply(tm, function (x) substring((substring(x,22)), 0, (str_locate((substring(x,22)),"\r\n")[1,1])-1))
#matches
mt <- ref %>% html_nodes(".box2+ .box td:nth-child(4)") %>% html_text()
#wins
wn <- ref %>% html_nodes(".box2+ .box td:nth-child(5)") %>% html_text()
#draws
dw <- ref %>% html_nodes(".box2+ .box td:nth-child(6)") %>% html_text()
#losses
ls <- ref %>% html_nodes(".box2+ .box td:nth-child(7)") %>% html_text()
#for against
fa <- ref %>% html_nodes(".box2+ .box td:nth-child(8)") %>% html_text()
#dif
df <- ref %>% html_nodes(".box2+ .box td:nth-child(9)") %>% html_text()
#league points
lp <- ref %>% html_nodes("td:nth-child(10)") %>% html_text()
# bind all of the values into a single data frame
rd_res <- as.data.frame(cbind(i, pos,tm,mt,wn,dw,ls,fa,df,lp))
# add the round detail to the league table
leaguetable <- rbind(leaguetable, rd_res)
}
# return the league table
leaguetable
}
season <- c("2010-2011", "2011-2012", "2012-2013", "2013-2014", "2014-2015", "2015-2016")
league_hist <- function(s="2010-2011") {
for (i in 1:length(s)) {
res_df <- league_table(s[i])
res_df <- data.frame(lapply(res_df, as.character), stringsAsFactors=FALSE)
file_path <- paste("E://R/Football/Raw_data/", s[i], ".csv", sep="")
write.csv(res_df, file=file_path)
}
}
lapply(season, league_hist
|
438789f89d592ff232fdbc5914f330d02cbda2ff
|
9598c94fe076830bfece6e366b87cff83c4c66b6
|
/shocks/Identification of govt spending shock ES 1980q1-2018q4.R
|
d0f4137766cfad86782cc2360e6f22d4472b4433
|
[] |
no_license
|
mdg9709/spilloversNL
|
1b8d836ad4f5d1145f6cc15345745358d5f15b24
|
04388fe2bfcf764ab4a256b456db3de00ba824f9
|
refs/heads/master
| 2022-11-18T22:37:24.704113
| 2020-07-17T17:07:07
| 2020-07-17T17:07:07
| 280,177,983
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,554
|
r
|
Identification of govt spending shock ES 1980q1-2018q4.R
|
###########################################################
# Identify government spending shock through VAR(4) model #
###########################################################
# Spain, 1980q1-2018q4
# Import Spanish fiscal data and other data
library(readxl)
fES <- read_excel("~/Studie/MSc ECO/Period 5-6 MSc thesis/Data/Main datasets/ABP_Fiscal_Database_July2019.xlsx",
sheet = "ES")
oES <- read_excel("~/Studie/MSc ECO/Period 5-6 MSc thesis/Data/Main datasets/ABP_other_variables.xlsx",
sheet = "ES", n_max = 157)
# Rename first column of both datasets to 'quarter'
library(dplyr)
fES1 <- fES %>%
rename(quarter = ...1,)
oES1 <- oES %>%
rename(quarter = ...1,)
# Add the 10-year interest rate to the fiscal dataset
fES2 <- fES1 %>%
mutate(intES = oES1$R)
# Create variables for real government spending & net tax revenues, and take logs
fES3 <- fES2 %>%
mutate(trES = TOR - (THN + SIN), gES = GCN + GIN) %>%
mutate(rtrES = trES/(P/100), rgES = gES/(P/100), ryES = Y/(P/100)) %>%
mutate(lrtrES = log(rtrES), lrgES = log(rgES), lryES = log(ryES), lPES = log(P))
# Create time series
library(zoo)
names(fES3)[1] <- "quarterES"
quarterES <- fES3$quarterES
fES4 <- fES3 %>%
mutate(quartersES = as.yearqtr(quarterES)) ## declare that the "quarter" column contains dates.
fES5 <- subset(fES4, select = -c(quarterES))
fES6 <- fES5[, c(24, 1:23)]
fESts <- ts(fES6, start=c(1980, 1), end=c(2018, 4), frequency=4) ## create time series
View(fESts)
# Create the log variables
lrtrES <- fESts[, "lrtrES"]
lrgES <- fESts[, "lrgES"]
lryES <- fESts[, "lryES"]
lPES <- fESts[, "lPES"]
intES <- fESts[, "intES"]
# KPSS tests for level and trend stationarity
library(tseries)
kpss.test(lrtrES)
kpss.test(lrtrES, null = "T")
kpss.test(lrgES)
kpss.test(lrgES, null = "T")
kpss.test(lryES)
kpss.test(lryES, null = "T")
kpss.test(lPES)
kpss.test(lPES, null = "T")
kpss.test(intES)
kpss.test(intES, null = "T")
# Create dataframe and time series for VAR model
d.lES <- data.frame(cbind(lrtrES, lrgES, lryES, lPES, intES))
d.lESts <- ts(d.lES, start=c(1980, 1), end=c(2018, 4), frequency=4)
q <- time(d.lESts)
ex <- I(q)^2
# Optimal lag length
library(vars)
VARselect(d.lESts, lag.max = 6, type = "both", exogen = ex)
# VAR model, which contains four lags, a constant and a trend (see Alloza et al.)
varES1 <- VAR(d.lESts, p = 4, type = "both", exogen = ex)
summary(varES1)
# Eigenvalues: stability of VAR process (stable if values < 1)
roots(varES1, modulus = TRUE)
# Test for serially correlated errors
serial.test(varES1, lags.pt = 16)
# Normality, multivariate skewness and kurtosis test
normality.test(varES1)
# Identify the structural government spending (lrg) shock
resES1 <- residuals(varES1)
resES2 <- ts(resES1, start=c(1980, 1), end=c(2018, 4), frequency=4)
# Obtain the residuals of lrg and lP - see Alloza et al. (2019)
res.lrgES <- subset(resES2, TRUE, lrgES, drop = FALSE)
res.lPES <- subset(resES2, TRUE, lPES, drop = FALSE)
# Compute structural lrg shock - see Alloza et al. (2019), page 4-5, Eq. 3 and footnote 11
shock.lrgES <- res.lrgES - (-0.5)*res.lPES
shock.lrgES
# Add structural shock vector to time series
fESts1 <- cbind(fESts, shock.lrgES)
colnames(fESts1) <- c("quarter", "TOR", "DTX", "SCT", "TIN", "TOE", "THN", "GCN", "COE", "SIN",
"GIN", "INP", "Y", "P", "intES", "trES", "gES", "rtrES", "rgES", "ryES",
"lrtrES", "lrgES", "lryES", "lPES", "shockES")
|
749bc1e5e506a8fee630ff4cac232e3f9be6747c
|
fa57e15d1a5a91acd42f01f4d0222cb291426e03
|
/MyFunctions.R
|
32e60eb86cdbfa164f2379d7637df6e364ecea6d
|
[] |
no_license
|
emdean99/Bio381Scripting
|
517649d3058a1fb4c6e07bae05affb1b1ff3b986
|
a444340a6d3c4146ff193009812c7e63473aec84
|
refs/heads/master
| 2023-04-07T05:52:41.914867
| 2021-04-27T20:54:17
| 2021-04-27T20:54:17
| 339,512,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,938
|
r
|
MyFunctions.R
|
# All function must be declared at the start
###########################
# FUNCTION: get_data
# read in .csv file
# Input: .csv file
# Output: data frame
#----------------------------
library(ggplot2)
get_data <- function(file_name=NULL) {
if(is.null(file_name)) {
data_frame <- data.frame(ID=101:110,
varA=runif(10),
varB=runif(10))
} else {
data_frame <- read.table(file=file_name,
header = TRUE,
sep = ",",
comment.char="#")
}
return(data_frame)
}
###########################
# FUNCTION: calculate_stuff
# fit an ordinary lease squares regression
# Input: X and Y vector of numeric of same length
# Output: entire summary of regression model
#----------------------------
calculate_stuff <- function(x_var=runif(10),
y_var=runif(10)) {
data_frame <- data.frame(x_var,y_var)
reg_model <- lm(y_var~x_var)
return(summary(reg_model))
}
###########################
# FUNCTION: summarize_output
# pull elements from model summary list
# Input: list from summary call of lm
# Output: vector of regression residuals
#----------------------------
summarize_output <- function(z=NULL) {
if(is.null(z)) {
z <- summary(lm(runif(10)~runif(10)))
}
return(z$residuals)
}
###########################
# FUNCTION: graph_results
# one line description
# Input: X
# Output: X
#----------------------------
graph_results <- function(x_var=runif(10),
y_var=runif(10)) {
data_frame <- data.frame(x_var,y_var)
# p1 <- ggplot2::qplot(data=data_frame,
# x=x_var,
# y=y_var,
# geom=c("smooth","point"))
p1 <- ggplot2::ggplot(data_frame) +
aes(x=x_var,y=y_var) +
geom_point() +
stat_smooth(method="lm")
print(p1)
message("Regression Graph Created")
}
|
8a11a7435d29e62af7a13e673b4a315a9cfd089a
|
e508870d7b82ca065aff9b7bf33bc34d5a6c0c1c
|
/pkg/man/fT.LandT.Rd
|
dcc9828977c258b9d0a727fe16afaee08dcc2fb9
|
[] |
no_license
|
Dong-po/SoilR-exp
|
596be0e6c5d291f00c6e08c348952ee23803e15e
|
c10d34e035deac8af4912c55382012dfc247eedc
|
refs/heads/master
| 2021-09-03T11:12:49.199268
| 2018-01-08T15:52:17
| 2018-01-08T15:52:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 688
|
rd
|
fT.LandT.Rd
|
\name{fT.LandT}
\alias{fT.LandT}
\usage{fT.LandT(Temp)}
\arguments{
\item{Temp}{A scalar or vector containing values of soil temperature for which the effects on decomposition rates are calculated}
}
\description{Calculates the effects of temperature on decomposition rates according to a function proposed by Lloyd and Taylor (1994).}
\references{Lloyd, J., and J. A. Taylor (1994), On the Temperature Dependence of Soil Respiration,
Functional Ecology, 8(3), 315-323.}
\title{Effects of temperature on decomposition rates according to a function proposed by Lloyd and Taylor (1994)}
\value{A scalar or a vector containing the effects of temperature on decomposition rates (unitless).}
|
01573d2c393e2fefb8f83cdef6b83fd3e76e7558
|
720e2039cc492ee7299b096ecd3d556900c1b565
|
/1b.sRoot_Extraction.RES.PSF.R
|
fa168d5e6831a5e5775e4608a44b393ce23c29d1
|
[] |
no_license
|
ggpmrutten/linkingRES-PSF
|
061165ad981b7018a802f183a7a5e66df612a154
|
df6943a2101c1c0f69c47948ed8d19e7e7c9aa73
|
refs/heads/master
| 2023-04-10T15:09:48.909719
| 2023-02-10T10:48:13
| 2023-02-10T10:48:13
| 596,455,574
| 0
| 0
| null | 2023-02-10T09:53:47
| 2023-02-02T08:13:11
|
R
|
UTF-8
|
R
| false
| false
| 9,522
|
r
|
1b.sRoot_Extraction.RES.PSF.R
|
## Collects root traits from the database based on species list
## also adds phylogenetic tree and a figure
##
## Project: Review Plant & Soil
##
## by Gemma Rutten (gemma.rutten@unibe.ch)
## Last Edited February 23
##
##
### Used data from Petermann et al 2008 and Bennett et al
## clean working space
cat("\014")
rm(list=ls())
## load packages
library(vegan)
library(ggplot2)
library(grid)
library(gridExtra)
library(ape)
library(geiger)
library(tidyverse)
library(phytools)
library(RColorBrewer)
## functions
mansca<-function(x){x/sqrt(sum(x^2)/(length(x)-1))}
## Read sRoot modified root traits for which we have phylogeny
load("Data/sRoot.traits.Rdata") #cleaned according to Bergmann et al from sRoot traits
summary(spectrait)# 568 species
str(spectrait)
## Read pruned tree for those species
tree.prp <- read.tree("Data/sRoot_species.tre")
length(tree.prp$tip.label) # 568 species
# See which species don't match original species list and phylogeny tips
setdiff(tree.prp$tip.label, spectrait$full_species) # should be character(0)
setdiff(spectrait$full_species, tree.prp$tip.label) # should be character(0)
## Read species lists and PSF data
species.RES.pet <-read.csv2("Data/linking.RES.PSF.SpeciesList.pet.csv")# Petermann 24
species.RES.pet$full_species <- gsub(" ", "_", species.RES.pet$species.full)
species.RES.ben<-read.csv2("Data/linking.RES.PSF.SpeciesList.ben.csv")# Bennett 44
species.RES.ben$full_species <- gsub(" ", "_", species.RES.ben$species.full)
#spectrait[grep("Melilotus", spectrait$full_species), c("full_species")]
# bind species lists together
species.RES<-rbind(species.RES.pet, species.RES.ben)#68
### now start links to traits, selecting complete observations and links to phylogeny
# merge with trait data
traits.RES<-merge(species.RES, spectrait, by="full_species", all.x=T)
summary(traits.RES)# 68 species, 33 NA's
# remove species without trait data drop NAs
traits.RES<-traits.RES %>%
drop_na("Root_diameter_corrected")%>%
drop_na("rootN_corrected")%>%
drop_na("RTD_corrected")%>%
drop_na("SRL_corrected")
str(traits.RES)## for 35 species these traits are in the database
## subset phylo tree
# matrix with species names as rows
species.names <- as.matrix(cbind(
full_species=traits.RES$full_species ,
species.full=traits.RES$species.full))
dimnames(species.names)[[1]] <- species.names[,1]
str(species.names)##35
## check if names in tree and data are same
comb<-geiger::name.check(tree.prp,species.names)
## remove tips from tree
tree.RES<-drop.tip(tree.prp, comb$tree_not_data)
comb$data_not_tree
geiger::name.check(tree.RES,species.names)
length(tree.RES$tip.label)#35
str(species.names)#35
## save pruned tree
write.tree(tree.RES,file="Data/treeRES.tre")
## phylogenetically corrected PCA
## make sure names are the same
setdiff(tree.RES$tip.label, traits.RES$full_species) # should be character(0)
setdiff(traits.RES$full_species, tree.RES$tip.label) # should be character(0)
rownames(traits.RES) <- traits.RES$full_species
# PCA with phy tree
phylpca <- phytools::phyl.pca(tree.RES, traits.RES[,4:7], mode="corr", method="lambda")
summary(phylpca)
print(phylpca)
# save PCA axes loadings
traits.RES$PCA1<-phylpca$S[,1];traits.RES$PCA1rev<--phylpca$S[,1];traits.RES$PCA2<-phylpca$S[,2]##
## clean levels of growthform
traits.RES$growthForm<- gsub("shrub/tree", "tree", traits.RES$growthForm)
traits.RES[traits.RES$nitrogenFixationNodDB=="YES",]$growthForm<- gsub("herb", "legume", traits.RES[traits.RES$nitrogenFixationNodDB=="YES",]$growthForm)
traits.RES$micro<-as.factor(paste(traits.RES$growthForm,traits.RES$mycorrhizalAssociationTypeFungalRoot, sep="_"))
table(as.factor(traits.RES$growthForm))
table(as.factor(traits.RES$micro))# less traits for AMF trees than EMF and 4,4,5 for g,h,l
## save data file
save (traits.RES, file = "Data/traits.RES.benpet.Rdata")## all together
## correlations
names(traits.RES)
summary(lm(PCA1 ~ Root_diameter_corrected, data=traits.RES))#R2 adj 0.8116, p-value: 1.013e-13
summary(lm(PCA2 ~ Root_diameter_corrected, data=traits.RES))#R2 adj 0.2917 , p-value: 0.0004814
summary(lm(PCA1 ~ SRL_corrected, data=traits.RES))#R2 adj 0.9369, p-value: < 2.2e-16
summary(lm(PCA2 ~ SRL_corrected, data=traits.RES))#R2 adj 0.1, p-value: 0.03599
summary(lm(PCA1 ~ rootN_corrected, data=traits.RES))#R2 adj -0.03021, p-value: NS
summary(lm(PCA2 ~ rootN_corrected, data=traits.RES))#R2 adj 0.4588, p-value: 4.724e-06
summary(lm(PCA1 ~ RTD_corrected, data=traits.RES))#R2 adj -0.01814 , p-value: NS
summary(lm(PCA2 ~ RTD_corrected, data=traits.RES))#R2 adj 0.463 , p-value: 4.144e-06
###### Figure: All Species in RES #####
yGem<-c("#FE9929")
oGem<-c("#CC4C02")
rGem<-c("#662506")# col2rgb(rGem)
gemma<-c(16,17)
gems<-c("#35978F","#807DBA")#colors for symbols
summary(phylpca)
## first calculate vectors
# fit $ loadings is fit1 $ rotation is phy.pca$L
# fit $ scores is fit1 $ x is phy.pca$S
datapc <- data.frame(varnames=rownames(phylpca$L), phylpca$L)#rotation
mult <- min(
(max(phylpca$S[,2]) - min(phylpca$S[,2])/(max(phylpca$L[,2])-min(phylpca$L[,2]))),
(max(phylpca$S[,1]) - min(phylpca$S[,1])/(max(phylpca$L[,1])-min(phylpca$L[,1])))
)
datapc1 <- transform(datapc,
v1 = .7 * mult * (get("PC1")),
v2 = .7 * mult * (get("PC2")))
datapc1$lab<-c("D","SRL","RTD","N")
# save figure
tiff('Plots/Together.in.traitspaceCorrected.tiff',
width = 20, height = 20, units = "cm", res = 400 , pointsize= 15,bg="transparent")
par(mar = c(4, 4, 1, 1),xpd = T)
#par(mar=c(3,3,3,3))
#plot(-phylpca$S[order(row.names(phylpca$S)),1], phylpca$S[order(row.names(phylpca$S)),2], pch=19, col="gray",
# xlab=paste0("PC1"," (", round(0.4913097 ,2),")"), ylab=paste0("PC2"," (", round(0.2680714 ,2),")"),
# main=NA, xlim=c(-50,50),ylim=c(-50,50) ,cex=0.4)
#points(phylpca$S[which(species.means.4$mycorrhizalAssociationTypeFungalRoot=="ErM"),1],
# -phylpca$S[which(species.means.4$mycorrhizalAssociationTypeFungalRoot=="ErM"),2],
# pch=20, col="grey80",cex=1)
plot(traits.RES$PCA1rev,traits.RES$PCA2, pch=gemma[droplevels(as.factor(traits.RES$mycorrhizalAssociationTypeFungalRoot))],
col=gems[droplevels(as.factor(traits.RES$woodiness))],
xlim=c(-50,50),ylim=c(-50,50),
xlab=paste0("PC1"," (", round(0.4913097 ,2),")"),
ylab=paste0("PC2"," (", round(0.2680714 ,2),")"))
#abline(h=0, col="darkgray",xpd = F); abline(v=0,col="darkgray",xpd = F)
polygon(c(53.65,-53.65,-53.65,53.65),c(0,0,53.65,53.65), density=30, angle=45, col= rgb(102,37,6, max=255, alpha = 51))#pathogen
polygon(c(-53.65,-53.65,0,0),c(53.65,-53.65,-53.65,53.65),density=30, angle=-45, col= rgb(254, 153, 41, max = 255, alpha = 51))#mutualists
text(traits.RES$PCA1rev,traits.RES$PCA2, font=3, col="black",
labels = traits.RES$species.full,pos=1, offset = 0.15, cex=.5)
arrows(0,0,-datapc1$v1[1],datapc1$v2[1],col="black",length = 0.05, angle = 35)
arrows(0,0,-datapc1$v1[2],datapc1$v2[2],col="black",length = 0.05, angle = 35)
arrows(0,0,-datapc1$v1[3],datapc1$v2[3],col="black",length = 0.05, angle = 35)
arrows(0,0,-datapc1$v1[4],datapc1$v2[4],col="black",length = 0.05, angle = 35)
text(-datapc1$v1,datapc1$v2,pos=c(2,4,1,3),offset =.5, font=1, labels = datapc1$lab, cex=.8, col="black")
text(-40,33,'outsourcer-fast', col=oGem, cex=1)
text(40,33,'DIY-fast', col=rGem,cex=1)
text(-40,-33,'outsourcer-slow',col=yGem, cex=1)
text(40,-33, 'DIY-slow', col=c("darkgray"),cex=1)
legend("topright",inset = c( -0, 0),
paste0(levels(droplevels(as.factor(traits.RES$woodiness)))," (",table(traits.RES$woodiness), ")"),
cex=1, pch=16,col=gems, bty="n" )
legend("bottomright",inset = c( -0, 0),
paste0(levels(droplevels(as.factor(traits.RES$mycorrhizalAssociationTypeFungalRoot))),
" (",table(droplevels(as.factor(traits.RES$mycorrhizalAssociationTypeFungalRoot))), ")"),
cex=1, pch=gemma, bty="n")
dev.off()
# save figure
tiff('Plots/concept.traitspace.tiff',
width = 20, height = 20, units = "cm", res = 400 , pointsize= 15,bg="transparent")
par(mar = c(3, 3, 1, 1),xpd = T)
plot(traits.RES$PCA1rev,traits.RES$PCA2, col="white",
xlim=c(-50,50),ylim=c(-50,50),
xlab="", ylab="")
abline(h=0, col="darkgray",xpd = F); abline(v=0,col="darkgray",xpd = F)
polygon(c(53.65,-53.65,-53.65,53.65),c(0,0,53.65,53.65), density=30, angle=45, col= rgb(102,37,6, max=255, alpha = 51))#pathogen
polygon(c(-53.65,-53.65,0,0),c(53.65,-53.65,-53.65,53.65),density=30, angle=-45, col= rgb(254, 153, 41, max = 255, alpha = 51))#mutualists
#text(-traits.RES$PCA1,traits.RES$PCA2, font=3,
# labels = traits.RES$species.full,pos=1, offset = 0.1, cex=.5)
arrows(0,0,-datapc1$v1[1],datapc1$v2[1],col="black",length = 0.05, angle = 35, lwd=2)
arrows(0,0,-datapc1$v1[2],datapc1$v2[2],col="black",length = 0.05, angle = 35, lwd=2)
arrows(0,0,-datapc1$v1[3],datapc1$v2[3],col="black",length = 0.05, angle = 35, lwd=2)
arrows(0,0,-datapc1$v1[4],datapc1$v2[4],col="black",length = 0.05, angle = 35, lwd=2)
text(-datapc1$v1,datapc1$v2,pos=c(2,4,1,3),offset =.5, font=1, labels = datapc1$lab, cex=.8, col="black")
text(-40,33,'outsourcer-fast', col=oGem, cex=1)
text(40,33,'DIY-fast', col=rGem,cex=1)
text(-40,-33,'outsourcer-slow',col=yGem, cex=1)
text(40,-33, 'DIY-slow', col=c("darkgray"),cex=1)
dev.off()
# fast fig to check independent of reversed order of the PCA1
p.pca<-phyl.pca(tree.RES,traits.RES[,4:7],method="lambda", mode="corr")
summary(p.pca)
biplot(p.pca)
|
14213139de53cd844ac90656f2782d8af8fcf918
|
999390ceca09f02cb2b10e9c1489591c2da69ee3
|
/RScript/IM_Replace/script.R
|
9fa4ce992ad8c9e8240ee055ec575dc15fab6d30
|
[] |
no_license
|
Benjit87/SAP_Predictive_Analysis
|
c20244c6592e64a289517f49752750716c0c1831
|
7766172024062f419ae7c954ba8b64300449e25b
|
refs/heads/master
| 2021-03-12T23:50:24.193483
| 2014-10-03T11:02:44
| 2014-10-03T11:02:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 233
|
r
|
script.R
|
#Replacing missing columns with specified value
IM_Replace<- function(data,col,replace)
{
toimpute <- data[,col]
missing <- is.na(toimpute)
toimpute[missing] <- replace
data[,col] <- toimpute
return(list(out=data,model=data[,col]))
}
|
f478c2dbd45bfc91f31bcdc31c6058334a157c60
|
d380feade427b7b16b0d4e6b5d953c858e0d2c4c
|
/R/find potential BOY re-tests.r
|
9cf1bdd20dd333fa0f653cec1f86d2aa835c3d8c
|
[] |
no_license
|
amcox/star
|
3d5588ec7170cf690cba2c2e44bf632fc0ddcf5e
|
f1eaf596f4175924829cd16bad6eed3502a6ca1c
|
refs/heads/master
| 2020-06-04T07:37:33.777105
| 2015-06-28T16:36:17
| 2015-06-28T16:36:17
| 24,352,111
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 895
|
r
|
find potential BOY re-tests.r
|
library(plyr)
library(dplyr)
update_functions <- function() {
old.wd <- getwd()
setwd("functions")
sapply(list.files(), source)
setwd(old.wd)
}
update_functions()
df <- load_star_and_ps_data()
df <- df[, c('StudentId', 'GE', 'subject', 'last.name', 'first.name', 'grade', 'school')]
names(df)[names(df) == 'GE'] <- 'current.boy.ge'
d.old <- load_old_star_summary()
d.old <- d.old[, c('subject', 'StudentId', 'last.date', 'last.actual', 'last.modeled', 'eoy.modeled')]
d <- merge(df, d.old)
d <- d %>% mutate(last.actual.dif = current.boy.ge - last.actual,
last.modeled.dif = current.boy.ge - last.modeled,
eoy.modeled.dif = current.boy.ge - eoy.modeled
)
d <- d %>% mutate(last.actual.dif.abs = abs(last.actual.dif),
last.modeled.dif.abs = abs(last.modeled.dif),
eoy.modeled.dif.abs = abs(eoy.modeled.dif)
)
save_df_as_csv(d, 'STAR Difference Between 13-14 EOY and 14-15 BOY')
|
129a59fcd2a1f70a4265f4aba55e55daeda74de7
|
d4a3833b07bf9458f1faf2d02e670e66e4756e0b
|
/TimeLineAnalysis/code/ProcessTLD3.R
|
ed25a403e6a1297d3fb5e004e44372bc054b72a3
|
[] |
no_license
|
Uliege/R
|
08d8b5d31787f4031af364709d990ef304288be6
|
d2e1da210ce1ab5612ec57e0db863e738d45d196
|
refs/heads/master
| 2021-09-10T23:26:14.254575
| 2021-08-30T09:48:09
| 2021-08-30T09:48:09
| 241,374,139
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 9,161
|
r
|
ProcessTLD3.R
|
#Script para generar datos de SDE y SDD del paquete ASPACE desde los archivos raw (RData)
library(jsonlite)
library(aspace)
library(dplyr)
library(ggmap)
#Obtener url del directorio de trabajo dek proyecto
urlDefault = getwd()
#Directorio donde se encuentran los archivos raw
urlRawData = paste(urlDefault,"/rawdata/",sep = "")
#Directorio donde se almacenan los gráficos
urlFigures = paste(urlDefault,"/figures1/",sep = "")
#Carga mapa de Quito para visualización
fileMaps = "SmartGPSMaps.RData"
load(file=paste(urlRawData,fileMaps,sep = ""))
#Especifica el nombre de los archivos raw
rawName = "tld"
#Variables para la fecha de análisis - MAX 2019-11-18
aYear=2019
aWeek=46 #November 11-17
aWeedDay=4 #Wednesday
#Variable para indicar el mínimo de registros necesarios para el procesamiento
minPoints = 10
#Archivo para almacenar el log de ejecución
fileLog = "log1.txt"
#Conexión para escibir en el archivo
con <- file(fileLog, open="a")
#Hora de inicio del proceso
writeLines(text = as.character(Sys.time()), con = con)
#Proceso repetitivo
for(k in 141:143) {
#k=7
msg = k
print(msg)
writeLines(text = as.character(msg), con = con)
#Leer el archivo raw#
rawNumber = k
fileRawData = paste(rawName,rawNumber,".RData",sep="")
load(file=paste(urlRawData,fileRawData,sep = ""))
#Análisis puntual
dataQuitoYear = filter(dataQuito, dataQuito$Y == aYear)
dataQuitoWeek = filter(dataQuito, dataQuito$Y == aYear & dataQuito$W == aWeek)
dataQuitoWeekDay = filter(dataQuito, dataQuito$Y == aYear & dataQuito$W == aWeek & dataQuito$Wd == aWeedDay)
if(nrow(dataQuitoWeek) < minPoints){
##No existen registros para la fecha indicada - next
msg = paste(" El archivo ",k," tiene poca o no tiene información en la semana seleccionada",sep = "")
print(msg)
writeLines(text = msg, con = con)
next()
}else {
#para que se almacenen los gráficos en el directorio figures
setwd(urlFigures)
#Datos Año
nameChart = paste("MapYear-",rawName,rawNumber,".jpeg",sep = "")
ggmap(roadMapQuito) +
geom_point(data = dataQuitoYear,
aes(x = dataQuitoYear$longitude[], y = dataQuitoYear$latitude[]),
alpha = .5,
color="darkred",
size = .001) +
ggtitle(paste("GPS Point Track Year - ",rawName,rawNumber,".json",sep = "")) +
xlab("Longitude") +
ylab("Latitude")
ggsave(filename = paste(urlFigures,nameChart,sep = ""))
graphics.off()
nameChart = paste("Year-",rawName,rawNumber,sep = "")
calc_sde(id=nameChart, filename = paste("sdeloc",nameChart,"_Output.txt",sep=""), centre.xy=NULL,
calccentre=TRUE, weighted=FALSE, weights=NULL, points=dataQuitoYear[,3:2], verbose=FALSE)
write.table(sdeatt, sep = ",", file = paste("sdeatt",nameChart,"_Output.txt",sep=""), col.names = TRUE)
plot_sde(plotnew=TRUE, plotSDEaxes=TRUE,
plotweightedpts=TRUE, weightedpts.col='blue', weightedpts.pch=19,
plotpoints=TRUE, points.col='green', points.pch=1,
plotcentre=TRUE, centre.col='red', centre.pch=19,
titletxt=paste("SDE Year ",rawName,rawNumber,".json",sep = ""),
xaxis="Longitude", yaxis="Latitude", sde.col='red', sde.lwd=2,
jpeg=TRUE)
graphics.off()
calc_sdd(id=nameChart, filename = paste("sddloc",nameChart,"_Output.txt",sep=""), centre.xy=NULL,
calccentre=TRUE, weighted=FALSE, weights=NULL, points=dataQuitoYear[,3:2], verbose=FALSE)
write.table(sddatt, sep = ",", file = paste("sddatt",nameChart,"_Output.txt",sep=""), col.names = TRUE)
plot_sdd(plotnew=TRUE, plothv=TRUE,
plotweightedpts=TRUE, weightedpts.col='blue', weightedpts.pch=19,
plotpoints=TRUE, points.col='green', points.pch=1,
plotcentre=TRUE, centre.col='red', centre.pch=19,
titletxt=paste("SDD Year ",rawName,rawNumber,".json",sep = ""),
xaxis="Longitude", yaxis="Latitude",
jpeg = TRUE)
graphics.off()
#Datos Semana
nameChart = paste("MapWeek-",rawName,rawNumber,".jpeg",sep = "")
ggmap(roadMapQuito) +
geom_point(data = dataQuitoWeek,
aes(x = dataQuitoWeek$longitude[], y = dataQuitoWeek$latitude[]),
alpha = .5,
color="darkred",
size = .0001) +
ggtitle(paste("GPS Point Track Week - ",rawName,rawNumber,".json",sep = "")) +
xlab("Longitude") +
ylab("Latitude")
ggsave(filename = paste(urlFigures,nameChart,sep = ""))
graphics.off()
nameChart = paste("Week-",rawName,rawNumber,sep = "")
calc_sde(id=nameChart, filename = paste("sdeloc",nameChart,"_Output.txt",sep=""), centre.xy=NULL,
calccentre=TRUE, weighted=FALSE, weights=NULL, points=dataQuitoWeek[,3:2], verbose=FALSE)
write.table(sdeatt, sep = ",", file = paste("sdeatt",nameChart,"_Output.txt",sep=""), col.names = TRUE)
plot_sde(plotnew=TRUE, plotSDEaxes=TRUE,
plotweightedpts=TRUE, weightedpts.col='blue', weightedpts.pch=19,
plotpoints=TRUE, points.col='green', points.pch=1,
plotcentre=TRUE, centre.col='red', centre.pch=19,
titletxt=paste("SDE Week ",rawName,rawNumber,".json",sep = ""),
xaxis="Longitude", yaxis="Latitude", sde.col='red', sde.lwd=2,
jpeg=TRUE)
graphics.off()
calc_sdd(id=nameChart, filename = paste("sddloc",nameChart,"_Output.txt",sep=""), centre.xy=NULL,
calccentre=TRUE, weighted=FALSE, weights=NULL, points=dataQuitoWeek[,3:2], verbose=FALSE)
write.table(sddatt, sep = ",", file = paste("sddatt",nameChart,"_Output.txt",sep=""), col.names = TRUE)
plot_sdd(plotnew=TRUE, plothv=TRUE,
plotweightedpts=TRUE, weightedpts.col='blue', weightedpts.pch=19,
plotpoints=TRUE, points.col='green', points.pch=1,
plotcentre=TRUE, centre.col='red', centre.pch=19,
titletxt=paste("SDD Week ",rawName,rawNumber,".json",sep = ""),
xaxis="Longitude", yaxis="Latitude",
jpeg = TRUE)
graphics.off()
if(nrow(dataQuitoWeekDay) < minPoints){
##No existen registros para el día indicado
msg = paste(" El archivo ",k," tiene poca o no tiene información en el día indicado",sep = "")
print(msg)
writeLines(text = msg, con = con)
}else {
#Datos Dia
nameChart = paste("MapDay-",rawName,rawNumber,".jpeg",sep = "")
ggmap(roadMapQuito) +
geom_point(data = dataQuitoWeekDay,
aes(x = dataQuitoWeekDay$longitude[], y = dataQuitoWeekDay$latitude[]),
alpha = .5,
color="darkred",
size = .0001) +
ggtitle(paste("GPS Point Track WeekDay - ",rawName,rawNumber,".json",sep = "")) +
xlab("Longitude") +
ylab("Latitude")
ggsave(filename = paste(urlFigures,nameChart,sep = ""))
graphics.off()
nameChart = paste("Day-",rawName,rawNumber,sep = "")
calc_sde(id=nameChart, filename = paste("sdeloc",nameChart,"_Output.txt",sep=""), centre.xy=NULL,
calccentre=TRUE, weighted=FALSE, weights=NULL, points=dataQuitoWeekDay[,3:2], verbose=FALSE)
write.table(sdeatt, sep = ",", file = paste("sdeatt",nameChart,"_Output.txt",sep=""), col.names = TRUE)
plot_sde(plotnew=TRUE, plotSDEaxes=TRUE,
plotweightedpts=TRUE, weightedpts.col='blue', weightedpts.pch=19,
plotpoints=TRUE, points.col='green', points.pch=1,
plotcentre=TRUE, centre.col='red', centre.pch=19,
titletxt=paste("SDE WeekDay ",rawName,rawNumber,".json",sep = ""),
xaxis="Longitude", yaxis="Latitude", sde.col='red', sde.lwd=2,
jpeg=TRUE)
graphics.off()
calc_sdd(id=nameChart, filename = paste("sddloc",nameChart,"_Output.txt",sep=""), centre.xy=NULL,
calccentre=TRUE, weighted=FALSE, weights=NULL, points=dataQuitoWeekDay[,3:2], verbose=FALSE)
write.table(sddatt, sep = ",", file = paste("sddatt",nameChart,"_Output.txt",sep=""), col.names = TRUE)
plot_sdd(plotnew=TRUE, plothv=TRUE,
plotweightedpts=TRUE, weightedpts.col='blue', weightedpts.pch=19,
plotpoints=TRUE, points.col='green', points.pch=1,
plotcentre=TRUE, centre.col='red', centre.pch=19,
titletxt=paste("SDD WeekDay ",rawName,rawNumber,".json",sep = ""),
xaxis="Longitude", yaxis="Latitude",
jpeg = TRUE)
graphics.off()
}
#Volver al directorio por defecto
setwd(urlDefault)
msg = paste(" Archivo ",k," OK!!",sep = "")
print(msg)
writeLines(text = msg, con = con)
rm(dataSource, dataQuito, dataQuitoYear, dataQuitoWeek, dataQuitoWeekDay)
}
}
#Hora fin del procesamiento
writeLines(text = as.character(Sys.time()), con = con)
#Cierra conexión al archivo
close(con)
#limpia consola
shell("cls")
#Borra las variables del ws
rm(list=ls())
print("Fin procesamiento... ")
|
df46184a27e2e1e2ef0737cc4077a336d4034dce
|
98c29220391a8fc864ba394536c6cde766dc8ecd
|
/dynamic_eqtl_calling/run_gaussian_dynamic_qtl.R
|
40bdb7e42a31d6f90d3b12408b589a0c55e0d13e
|
[] |
no_license
|
BennyStrobes/ipsc_cardiomyocyte_differentiation
|
175d2a86b07e6027a343b79376a07eba7941607a
|
6f6ac227df5f7ea2cc9e89563447d429aae2eeb5
|
refs/heads/master
| 2021-07-11T07:09:15.169745
| 2020-07-02T15:37:54
| 2020-07-02T15:37:54
| 156,638,838
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,603
|
r
|
run_gaussian_dynamic_qtl.R
|
args = commandArgs(trailingOnly=TRUE)
#library(lme4)
#library(lmtest)
run_linear_model <- function(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines) {
# Run the models
if (model_version == "glm") { # Regular linear model
fit <- lm(expr ~ genotype + time_steps + genotype_interaction:time_steps_interaction)
pvalue <- summary(fit)$coefficients[4,4]
coef <- paste(summary(fit)$coefficients[,1],collapse=',')
} else if (model_version == "glmm") { # Linear mixed model
fit_full <- lmer(expr ~ genotype + time_steps + genotype_interaction:time_steps_interaction + (1|cell_lines), REML=FALSE)
fit_null <- lmer(expr ~ genotype + time_steps + (1|cell_lines), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- paste(coefs[,1],collapse=',')
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
} else if (model_version == "glm_quadratic") {
squared_time_steps <- time_steps*time_steps
squared_time_steps_interaction <- time_steps_interaction*time_steps_interaction
fit_full <- lm(expr ~ genotype + time_steps + squared_time_steps + genotype_interaction:time_steps_interaction + genotype_interaction:squared_time_steps_interaction)
fit_null <- lm(expr ~ genotype + time_steps + squared_time_steps)
# lrt <- anova(fit_null, fit_full)
# pvalue2 <- lrt[[6]][2]
coef <- paste(summary(fit_full)$coefficients[,1],collapse=',')
obj <- lrtest(fit_null, fit_full)
pvalue <- obj[[5]][2]
}
return(list(coef=coef, pvalue=pvalue))
}
run_linear_model_one_cov <- function(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, cov1) {
# Run the models
if (model_version == "glm") { # Regular linear model
fit <- lm(expr ~ genotype + time_steps + cov1 + cov1:time_steps + genotype_interaction:time_steps_interaction)
pvalue <- summary(fit)$coefficients[6,4]
coef <- paste(summary(fit)$coefficients[,1],collapse=',')
} else if (model_version == "glmm") { # Linear mixed model
fit_full <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + genotype_interaction:time_steps_interaction + (1|cell_lines), REML=FALSE)
fit_null <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + (1|cell_lines), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- paste(coefs[,1],collapse=',')
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
} else if (model_version == "glmm_time") { # Linear mixed model
fit_full <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + genotype_interaction:time_steps_interaction + (1|factor(time_steps)), REML=FALSE)
fit_null <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + (1|factor(time_steps)), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- coefs[6,1]
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
}
return(list(coef=coef, pvalue=pvalue))
}
run_linear_model_two_cov <- function(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, cov1, cov2) {
# Run the models
if (model_version == "glm") { # Regular linear model
fit <- lm(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + genotype_interaction:time_steps_interaction)
pvalue <- summary(fit)$coefficients[8,4]
coef <- paste(summary(fit)$coefficients[,1],collapse=',')
} else if (model_version == "glmm") { # Linear mixed model
fit_full <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + genotype_interaction:time_steps_interaction + (1|cell_lines), REML=FALSE)
fit_null <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + (1|cell_lines), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- paste(coefs[,1],collapse=',')
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
} else if (model_version == "glmm_time") { # Linear mixed model
fit_full <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + genotype_interaction:time_steps_interaction + (1|factor(time_steps)), REML=FALSE)
fit_null <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + (1|factor(time_steps)), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- coefs[8,1]
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
}
return(list(coef=coef, pvalue=pvalue))
}
run_linear_model_three_cov <- function(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, cov1, cov2, cov3) {
# Run the models
if (model_version == "glm") { # Regular linear model
fit <- lm(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + genotype_interaction:time_steps_interaction)
pvalue <- summary(fit)$coefficients[10,4]
coef <- paste(summary(fit)$coefficients[,1],collapse=',')
} else if (model_version == "glmm") { # Linear mixed model
fit_full <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + genotype_interaction:time_steps_interaction + (1|cell_lines), REML=FALSE)
fit_null <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + (1|cell_lines), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- paste(coefs[,1],collapse=',')
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
} else if (model_version == "glmm_time") { # Linear mixed model
fit_full <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + genotype_interaction:time_steps_interaction + (1|factor(time_steps)), REML=FALSE)
fit_null <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + (1|factor(time_steps)), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- coefs[10,1]
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
}
return(list(coef=coef, pvalue=pvalue))
}
run_linear_model_four_cov <- function(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, cov1, cov2, cov3, cov4) {
# Run the models
if (model_version == "glm") { # Regular linear model
fit <- lm(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + genotype_interaction:time_steps_interaction)
pvalue <- summary(fit)$coefficients[12,4]
coef <- paste(summary(fit)$coefficients[,1],collapse=',')
} else if (model_version == "glmm") { # Linear mixed model
fit_full <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + genotype_interaction:time_steps_interaction + (1|cell_lines), REML=FALSE)
fit_null <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + (1|cell_lines), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- paste(coefs[,1],collapse=',')
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
} else if (model_version == "glmm_time") { # Linear mixed model
fit_full <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + genotype_interaction:time_steps_interaction + (1|factor(time_steps)), REML=FALSE)
fit_null <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + (1|factor(time_steps)), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- coefs[12,1]
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
}
return(list(coef=coef, pvalue=pvalue))
}
run_linear_model_five_cov <- function(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, cov1, cov2, cov3, cov4, cov5) {
# Run the models
if (model_version == "glm") { # Regular linear model
fit <- lm(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + genotype_interaction:time_steps_interaction)
pvalue <- summary(fit)$coefficients[14,4]
coef <- paste(summary(fit)$coefficients[,1],collapse=',')
} else if (model_version == "glmm") { # Linear mixed model
fit_full <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + genotype_interaction:time_steps_interaction + (1|cell_lines), REML=FALSE)
fit_null <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + (1|cell_lines), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- paste(coefs[,1],collapse=',')
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
} else if (model_version == "glmm_time") { # Linear mixed model
fit_full <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + genotype_interaction:time_steps_interaction + (1|factor(time_steps)), REML=FALSE)
fit_null <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + (1|factor(time_steps)), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- coefs[14,1]
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
} else if (model_version == "glm_quadratic") {
squared_time_steps <- time_steps*time_steps
squared_time_steps_interaction <- time_steps_interaction*time_steps_interaction
fit_full <- lm(expr ~ genotype + time_steps + squared_time_steps + cov1 + cov1:time_steps + cov1:squared_time_steps + cov2 + cov2:time_steps + cov2:squared_time_steps + cov3 + cov3:time_steps + cov3:squared_time_steps + cov4 + cov4:time_steps + cov4:squared_time_steps + cov5 + cov5:time_steps + cov5:squared_time_steps + genotype_interaction:time_steps_interaction + genotype_interaction:squared_time_steps_interaction)
fit_null <- lm(expr ~ genotype + time_steps + squared_time_steps + cov1 + cov1:time_steps + cov1:squared_time_steps + cov2 + cov2:time_steps + cov2:squared_time_steps + cov3 + cov3:time_steps + cov3:squared_time_steps + cov4 + cov4:time_steps + cov4:squared_time_steps + cov5 + cov5:time_steps + cov5:squared_time_steps)
# lrt <- anova(fit_null, fit_full)
# pvalue2 <- lrt[[6]][2]
coef <- paste(summary(fit_full)$coefficients[,1],collapse=',')
obj <- lrtest(fit_null, fit_full)
pvalue <- obj[[5]][2]
} else if (model_version == "anova") {
genotype <- factor(genotype)
time_steps <- factor(time_steps)
genotype <- factor(genotype)
genotype_interaction <- factor(genotype_interaction)
time_steps_factor <- factor(time_steps)
time_steps_interaction <- factor(time_steps_interaction)
fit <- aov(expr ~ genotype + time_steps_factor + genotype_interaction:time_steps_interaction)
coef <- summary(fit)[[1]][["F value"]][3]
pvalue <- summary(fit)[[1]][["Pr(>F)"]][3]
}
return(list(coef=coef, pvalue=pvalue))
}
run_linear_model_six_cov <- function(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, cov1, cov2, cov3, cov4, cov5, cov6) {
# Run the models
if (model_version == "glm") { # Regular linear model
fit <- lm(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + genotype_interaction:time_steps_interaction)
pvalue <- summary(fit)$coefficients[16,4]
coef <- paste(summary(fit)$coefficients[,1],collapse=',')
} else if (model_version == "glmm") { # Linear mixed model
fit_full <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + genotype_interaction:time_steps_interaction + (1|cell_lines), REML=FALSE)
fit_null <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + (1|cell_lines), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- paste(coefs[,1],collapse=',')
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
} else if (model_version == "glmm_time") { # Linear mixed model
fit_full <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + genotype_interaction:time_steps_interaction + (1|factor(time_steps)), REML=FALSE)
fit_null <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + (1|factor(time_steps)), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- coefs[16,1]
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
}
return(list(coef=coef, pvalue=pvalue))
}
run_linear_model_seven_cov <- function(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, cov1, cov2, cov3, cov4, cov5, cov6, cov7) {
# Run the models
if (model_version == "glm") { # Regular linear model
fit <- lm(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + genotype_interaction:time_steps_interaction)
pvalue <- summary(fit)$coefficients[18,4]
coef <- paste(summary(fit)$coefficients[,1],collapse=',')
} else if (model_version == "glmm") { # Linear mixed model
fit_full <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + genotype_interaction:time_steps_interaction + (1|cell_lines), REML=FALSE)
fit_null <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + (1|cell_lines), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- paste(coefs[,1],collapse=',')
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
} else if (model_version == "glmm_time") { # Linear mixed model
fit_full <- lm(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + genotype_interaction:time_steps_interaction + (1|factor(time_steps)), REML=FALSE)
fit_null <- lm(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + (1|factor(time_steps)), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- coefs[18,1]
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
}
return(list(coef=coef, pvalue=pvalue))
}
run_linear_model_eight_cov <- function(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, cov1, cov2, cov3, cov4, cov5, cov6, cov7,cov8) {
# Run the models
if (model_version == "glm") { # Regular linear model
fit <- lm(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + cov8 + cov8:time_steps + genotype_interaction:time_steps_interaction)
pvalue <- summary(fit)$coefficients[20,4]
coef <- paste(summary(fit)$coefficients[,1],collapse=',')
} else if (model_version == "glmm") { # Linear mixed model
fit_full <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + genotype_interaction:time_steps_interaction + (1|cell_lines), REML=FALSE)
fit_null <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + (1|cell_lines), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- paste(coefs[,1],collapse=',')
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
} else if (model_version == "glmm_time") { # Linear mixed model
fit_full <- lm(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + genotype_interaction:time_steps_interaction + (1|factor(time_steps)), REML=FALSE)
fit_null <- lm(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + (1|factor(time_steps)), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- coefs[18,1]
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
}
return(list(coef=coef, pvalue=pvalue))
}
run_linear_model_nine_cov <- function(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, cov1, cov2, cov3, cov4, cov5, cov6, cov7,cov8,cov9) {
# Run the models
if (model_version == "glm") { # Regular linear model
fit <- lm(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + cov8 + cov8:time_steps + cov9 + cov9:time_steps + genotype_interaction:time_steps_interaction)
pvalue <- summary(fit)$coefficients[22,4]
coef <- paste(summary(fit)$coefficients[,1],collapse=',')
} else if (model_version == "glmm") { # Linear mixed model
fit_full <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + genotype_interaction:time_steps_interaction + (1|cell_lines), REML=FALSE)
fit_null <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + (1|cell_lines), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- paste(coefs[,1],collapse=',')
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
} else if (model_version == "glmm_time") { # Linear mixed model
fit_full <- lm(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + genotype_interaction:time_steps_interaction + (1|factor(time_steps)), REML=FALSE)
fit_null <- lm(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + (1|factor(time_steps)), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- coefs[18,1]
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
}
return(list(coef=coef, pvalue=pvalue))
}
run_linear_model_ten_cov <- function(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, cov1, cov2, cov3, cov4, cov5, cov6, cov7,cov8,cov9,cov10) {
# Run the models
if (model_version == "glm") { # Regular linear model
fit <- lm(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + cov8 + cov8:time_steps + cov9 + cov9:time_steps + cov10 + cov10:time_steps + genotype_interaction:time_steps_interaction)
pvalue <- summary(fit)$coefficients[24,4]
coef <- paste(summary(fit)$coefficients[,1],collapse=',')
} else if (model_version == "glmm") { # Linear mixed model
fit_full <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + genotype_interaction:time_steps_interaction + (1|cell_lines), REML=FALSE)
fit_null <- lmer(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + (1|cell_lines), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- paste(coefs[,1],collapse=',')
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
} else if (model_version == "glmm_time") { # Linear mixed model
fit_full <- lm(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + genotype_interaction:time_steps_interaction + (1|factor(time_steps)), REML=FALSE)
fit_null <- lm(expr ~ genotype + time_steps + cov1 + cov1:time_steps + cov2 + cov2:time_steps + cov3 + cov3:time_steps + cov4 + cov4:time_steps + cov5 + cov5:time_steps + cov6 + cov6:time_steps + cov7 + cov7:time_steps + (1|factor(time_steps)), REML=FALSE)
coefs <- data.frame(coef(summary(fit_full)))
coef <- coefs[18,1]
#tvalue <- coefs[4,3]
#pvalue <- 2*pt(-abs(tvalue),df=num_samp-1)
lrt <- anova(fit_null,fit_full)
pvalue <- lrt[[8]][2]
}
return(list(coef=coef, pvalue=pvalue))
}
null_response <- function(){
return(list(coef=0,pvalue=1))
}
#####################
# Command line args
#####################
input_data_file = args[1]
output_file = args[2]
model_version = args[3]
permute = args[4]
covariate_method = args[5]
job_number = as.numeric(args[6])
num_jobs = as.numeric(args[7])
num_lines_string = args[8]
# Extract total number of lines in file
total_lines = as.numeric(strsplit(num_lines_string," ")[[1]][1])
# Determine number of lines each parrallelized job will complete
lines_per_job = ceiling(total_lines/num_jobs)
start_num = job_number*lines_per_job
end_num = (job_number + 1)*lines_per_job
# Stream input file
stop = FALSE
count = 0
f = file(input_data_file, "r")
next_line = readLines(f, n = 1)
sink(output_file)
while(!stop) {
# Only consider lines between start_num and end_num (for parallelization purposes)
if (count >= start_num & count < end_num) {
# Parse the line
data = strsplit(next_line,'\t')[[1]]
rs_id = data[1]
ensamble_id = data[2]
time_steps = as.numeric(strsplit(data[3],';')[[1]])
genotype = as.numeric(strsplit(data[4],';')[[1]])
expr = as.numeric(strsplit(data[5],';')[[1]])
cell_lines = as.factor(strsplit(data[6],';')[[1]])
num_samp = length(genotype)
time_steps_interaction = as.numeric(strsplit(data[3],';')[[1]])
genotype_interaction = as.numeric(strsplit(data[4],';')[[1]])
pc1 = as.numeric(strsplit(data[7],';')[[1]])
pc2 = as.numeric(strsplit(data[8],';')[[1]])
pc3 = as.numeric(strsplit(data[9],';')[[1]])
pc4 = as.numeric(strsplit(data[10],';')[[1]])
pc5 = as.numeric(strsplit(data[11],';')[[1]])
pc6 = as.numeric(strsplit(data[12],';')[[1]])
pc7 = as.numeric(strsplit(data[13],';')[[1]])
pc8 = as.numeric(strsplit(data[14],';')[[1]])
pc9 = as.numeric(strsplit(data[15],';')[[1]])
pc10 = as.numeric(strsplit(data[16],';')[[1]])
# Permute the data
if (permute == "True") {
time_steps_interaction <- sample(time_steps_interaction)
}
# Run the model in try-catch statement
tryCatch(
{
if (covariate_method == "none") {
lm_results <- run_linear_model(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines)
} else if (covariate_method == "pc1") {
lm_results <- run_linear_model_one_cov(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, pc1)
} else if (covariate_method == "pc1_2") {
lm_results <- run_linear_model_two_cov(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, pc1, pc2)
} else if (covariate_method == "pc1_3") {
lm_results <- run_linear_model_three_cov(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, pc1, pc2, pc3)
} else if (covariate_method == "pc1_4") {
lm_results <- run_linear_model_four_cov(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, pc1, pc2, pc3, pc4)
} else if (covariate_method == "pc1_5") {
lm_results <- run_linear_model_five_cov(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, pc1, pc2, pc3, pc4, pc5)
} else if (covariate_method == "pc1_6") {
lm_results <- run_linear_model_six_cov(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, pc1, pc2, pc3, pc4, pc5, pc6)
} else if (covariate_method == "pc1_7") {
lm_results <- run_linear_model_seven_cov(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, pc1, pc2, pc3, pc4, pc5, pc6, pc7)
} else if (covariate_method == "pc1_8") {
lm_results <- run_linear_model_eight_cov(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, pc1, pc2, pc3, pc4, pc5, pc6, pc7, pc8)
} else if (covariate_method == "pc1_9") {
lm_results <- run_linear_model_nine_cov(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, pc1, pc2, pc3, pc4, pc5, pc6, pc7, pc8, pc9)
} else if (covariate_method == "pc1_10") {
lm_results <- run_linear_model_ten_cov(model_version, expr, genotype, genotype_interaction, time_steps, time_steps_interaction, cell_lines, pc1, pc2, pc3, pc4, pc5, pc6, pc7, pc8, pc9, pc10)
}
# print result to output file!!
new_line <- paste0(rs_id, "\t", ensamble_id ,"\t",lm_results$coef,"\t", lm_results$pvalue,"\n")
cat(new_line)
},
error = function(e){
new_line <- paste0(rs_id, "\t", ensamble_id,"\t",0.0,"\t", 1.0,"\n")
cat(new_line)
}
)
}
count = count + 1
next_line = readLines(f, n = 1)
if(length(next_line) == 0) {
stop = TRUE
close(f)
}
}
# close output file handle
sink()
|
3d10622693a56ca4875b83de5f237fd6ed1393db
|
313c7a941221562a88c60910d85e4f3e51401921
|
/Final_Outputs/WAFR_HealthWorkforcePlots.R
|
7caf7ebbb286a80cb1eec3e7b8551a81cda9b0a6
|
[
"MIT"
] |
permissive
|
flaneuse/West-Africa-2016
|
2f29b118f028c02985575c51fbdffc2b5d9b9475
|
dd48b691e1ab55c35a756f81893529fa94e73740
|
refs/heads/master
| 2020-04-06T07:03:26.547440
| 2016-11-23T17:49:54
| 2016-11-23T17:49:54
| 54,667,387
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,164
|
r
|
WAFR_HealthWorkforcePlots.R
|
hlthwf <- read.csv("~/WAFR_HealthPAD/4.1_HealthWorkForce_WHO.csv")
setwd("~/WAFR_HealthPAD")
install.packages('tidyr')
install.packages('library(RColorBrewer')
install.packages("ggplot2")
install.packages('dplyr')
library(ggplot2)
library(dplyr)
library(tidyr)
library(RColorBrewer)
#ggplot(hlthwf, aes(x = country, y = value, fill = factor(indicator)))+
# geom_text(aes(label=value))+
# geom_bar(stat="identity", position='fill')+
# scale_fill_brewer(type = "div", palette = "Spectral")+
# facet_wrap(~country)+
# theme_bw()
hlthwf2 = hlthwf%>% mutate(comb = paste0(country, indicator))
hlthorder = hlthwf2 %>%
arrange(value)
hlthwf2$comb = factor(hlthwf2$comb,
levels = hlthorder$comb)
ggplot(hlthwf2, aes(x = value, y = comb, colour = value))+
geom_point(size = 6)+
facet_wrap(~country, scales = "free", nrow = 3)+
scale_x_continuous(breaks= seq(0, 0.8, by = 0.05))+
scale_colour_gradientn(colours = brewer.pal(6, 'YlGnBu')) +
theme_bw()
ggsave("WAFR_HealthWorkforce.pdf",width = 20, height = 5, units = c("in"),
dpi=300, limitsize = TRUE, useDingbats=FALSE, compress=FALSE)
|
fc4c19bbfa924eb4c356e6e852e4a83828e7b966
|
d8a61260cc584c8556890c1ad23009138dd43138
|
/scripts/explore_response_variables.R
|
4dba3e9cf513a548eaad6eb05d7fdc7e64bf13fa
|
[] |
no_license
|
taswegian/LakeAI
|
780fcd7cf87d4a95948564606ffa15e80e284072
|
281e1906e4927b53ade384cdd6d5480cdb5a1621
|
refs/heads/master
| 2020-03-10T07:51:35.128162
| 2019-03-06T20:13:13
| 2019-03-06T20:13:13
| 129,272,352
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,507
|
r
|
explore_response_variables.R
|
library(readr)
library(reshape2)
library(ggplot2)
library(cluster)
library(mclust)
### Data
response_variables <- read_csv("~/projects/LakeAI/data/response_variables.csv")
rvars <- response_variables
## Distributions
keep.cols <- c(2:8,12:14)
ggplot(melt(rvars[,keep.cols]), aes(value)) + geom_histogram() + facet_wrap( ~ variable, scales = "free")
# helper
not.naorinf <- function(df,cols){return(which(!(is.na(rowSums(df[,cols]))|is.infinite(rowSums(df[,cols])))))}
## PCA
rvars.pca <- prcomp(~., data=rvars[not.naorinf(rvars,keep.cols),keep.cols], center = TRUE, scale = TRUE, na.action = na.omit)
autoplot(rvars.pca,loadings = TRUE, loadings.colour = 'blue',
loadings.label = TRUE, loadings.label.size = 5)
rvars.pca$rotation
## Clustering
# dist mat
rvars.dist <- daisy(rvars[not.naorinf(rvars,keep.cols),keep.cols], metric = "gower")
# PAM
sil_width <- c()
invisible(sapply(2:5,function(i){pam_fit <- pam(rvars.dist,diss = TRUE,k = i); sil_width[i] <<- pam_fit$silinfo$avg.width; }))
plot(sil_width,xlab = "Number of clusters",
ylab = "Silhouette Width")
lines(sil_width)
fit <- pam(rvars.dist,diss = TRUE,k = 2)
# HCLUST
fit <- hclust(rvars.dist, method="ward.D")
plot(fit)
groups <- cutree(fit, k=2)
rect.hclust(fit, k=2, border="red")
# MCLUST
fit <- Mclust(rvars[not.naorinf(rvars,keep.cols),keep.cols])
plot(fit)
summary(fit)
# plot clusts
clusplot(rvars[not.naorinf(rvars,keep.cols),keep.cols], fit$cluster, color=TRUE, shade=TRUE,
labels=2, lines=0)
|
12ab33b64126a0da99e78495432a305429fedb9a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/event/examples/bp.Rd.R
|
23f7734fa41e2c1a10fd85ec48555aa9c0dc3b30
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 255
|
r
|
bp.Rd.R
|
library(event)
### Name: bp
### Title: Create a Vector of Cumulative Numbers of Previous Events for a
### Point Process (Birth Processes)
### Aliases: bp
### Keywords: manip
### ** Examples
y <- c(5,3,2,4)
i <- c(1,1,2,2)
birth <- bp(y, i)
birth
|
7d3edc80234494b0e56bac4b5f081fb66d3166da
|
24c02fa976057d7bf44a960db688689b0ceb8112
|
/plot3.R
|
264072d97adfddbda12d9c48f754565746986c8c
|
[] |
no_license
|
Orsb/ExData_Plotting1
|
64db1629286bcfd639259cc062501e9d57a727d2
|
617ea3d2fc455ed01c03ff66364e9e4c827ac2f6
|
refs/heads/master
| 2020-12-26T20:11:32.395559
| 2014-11-05T11:23:33
| 2014-11-05T11:23:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 944
|
r
|
plot3.R
|
labels <- read.table("data/household_power_consumption.txt", header = TRUE, sep=";", nrows=1)
names <- c(colnames(labels))
f<-file("data/household_power_consumption.txt","r");
data <- read.table(text = grep("^[1,2]/2/2007",readLines(f),value=TRUE), header=FALSE, sep=";", col.names=names)
close(f)
#adding new column with combined date and time
data$Date_Time <- as.POSIXct(paste(data$Date, data$Time), format = "%d/%m/%Y %H:%M:%S")
Sys.setlocale("LC_TIME", "en_US.UTF-8")
x11(width=10, height=8, bg="white")
plot(data$Date_Time, data$Sub_metering_1, type="l", ylab="Energy sub metering", xlab="")
lines(data$Date_Time, data$Sub_metering_2, type="l", col="red")
lines(data$Date_Time, data$Sub_metering_3, type="l", col="blue")
legend("topright", lty=1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
text.width = 55000, y.intersp = 1.5)
dev.copy(png, file = "plot3.png")
dev.off()
|
9641fd71aaf1a5d4a423294555a8bcfa756818c3
|
4bea6ae8b55c6acf0b9a27942329baa444a53097
|
/Rcodes/3-Building single-cross Hybrids.R
|
031de40d9ae9ff57a1a28cf3a13dd28681854d8d
|
[] |
no_license
|
xiahui625649/PlosOne_MVGBLUP_paper
|
331e1cdff68b2a6f1c62c5b1fac6080f68210117
|
f9c2579c91c168d2c1b88319a1eaa37568f8a518
|
refs/heads/master
| 2021-05-30T02:10:42.817512
| 2015-11-08T10:24:52
| 2015-11-08T10:24:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,945
|
r
|
3-Building single-cross Hybrids.R
|
############################################################################################################
######################################Building single-cross Hybrids#########################################
#Function to build the incidence matrix of the allele substitution effect
Wa.vit= function (K) {
freq=colMeans(K)/2
Fa=t(matrix(freq,ncol(K),nrow(K)))
W=K-2*Fa
rm(K); rm(Fa)
return (W)
}
#Function to build the incidence matrix of the dominance deviation effect
Wd.vit = function (K) {
freq=colMeans(K)/2
K=round(K,0)
k <- K
index2 <- k==2
index1 <- k==1
index0 <- k==0
FD <- t(matrix(-2*(1-freq)^2,ncol(K),nrow(K)))
k[index2] <- FD[index2]
FD <- t(matrix(2*freq*(1-freq),ncol(K),nrow(K)))
k[index1] <- FD[index1]
FD <- t(matrix(-2*(freq)^2,ncol(K),nrow(K)))
k[index0] <- FD[index0]
Dv <- k
rm(index2); rm(index1); rm(index0); rm(k); rm(K)
return(Dv)
}
##Simulating the single-cross hybrids genotypes:
##All the phenotypic and genotypic data processed of the inbred lines coded above used to obtain the results of the manuscript can be load here:
setwd("/home/jhonathan/Documentos/MVGBLUP/Data/")
load("data_inbreds.RData")
gh.id=read.table("heterotic_group.txt",h=T) #Inbred lines selected in the PCA analysis by visual graphical analysis of the inbred lines close to B73 and Mo17
id=as.matrix(data$`Inbred names`)
#Imputating molecular makers
require(rrBLUP)
Z.imp=as.numeric(Z)
Z.imp=matrix(Z.imp,nrow(Z),ncol(Z))
Z.imp=A.mat(Z.imp,min.MAF=F, max.missing=NULL,return.imputed=T)
Z.imp=matrix(round(Z.imp$imputed,2),nrow(Z),ncol(Z)) + 1 # Changing for the genotypic codification 2, 1 and 0
any(is.na(Z.imp)); Z=Z.imp; rm(Z.imp)
rownames(Z)<- data$`Inbred names` #Labeling the rows of the marker matrix
Z.m=Z[as.factor(gh.id$Grupo1),] # Separing he inbreds for the first hetoric group
Z.f=Z[as.factor(gh.id$Grupo2),]; rm(Z) # Separing he inbreds for the second hetoric group
Z.h=matrix(0,(nrow(Z.m)*nrow(Z.f)),ncol(Z.f)) # Matrix of zeros that will be used to receive the built hybrids markers
#The construction of the hybrids in a 20x20 partial diallel crosses design
Z.f=Z.f[rep(1:20,20),]
Z.m=Z.m[rep(1:20,rep(20,20)),]
#Artificial crosses (from code lines 30 to 73) between the two heterotic groups. It is based on the expectation operation described in the manuscript
index0<- (Z.m==2 & Z.f==2)
Z.h[index0]<-2
index0<- (Z.m==2 & Z.f==1)
Z.h[index0]<-0.5*2+0.5*1
index0<- (Z.m==1 & Z.f==2)
Z.h[index0]<-0.5*2+0.5*1
index0<- (Z.m==0 & Z.f==2)
Z.h[index0]<-1
index0<- (Z.m==2 & Z.f==0)
Z.h[index0]<-1
index0<- (Z.m==0 & Z.f==0)
Z.h[index0]<-0
index0<- (Z.m>0 & Z.m<1 & Z.f>0 & Z.f<1)
Z.h[index0]<-(Z.m[index0]/1)*(0.5)*(Z.f[index0]/1)*0.5*2+((Z.m[index0]/1)*(0imputing gbs from whole genome sequecing data.5)*((Z.f[index0]/1)*(0.5)+(1-(Z.f[index0]/1))))*1+((Z.f[index0]/1)*(0.5)*((Z.m[index0]/1)*(0.5)+(1-(Z.m[index0]/1))))*1+(((Z.m[index0]/1)*(0.5)+(1-(Z.m[index0]/1)))*((Z.f[index0]/1)*(0.5)+(1-(Z.f[index0]/1))))*0
index0<- (Z.m>0 & Z.m<1 & Z.f>1 & Z.f<2)
Z.h[index0]<-(((Z.m[index0]/1)*(0.5))*((Z.f[index0]/2)+(1-((Z.f[index0]/2)))*0.5)*2)+(((Z.m[index0]/1)*(0.5))*(1-((Z.f[index0]/2)+(1-((Z.f[index0]/2)))*0.5))*1)+(((Z.f[index0]/2)+(1-((Z.f[index0]/2)))*0.5)*((Z.m[index0]/1)*(0.5)+(1-(Z.m[index0]/1)))*1)+((Z.m[index0]/1)*(0.5)+(1-(Z.m[index0]/1)))*(1-((Z.f[index0]/2)+(1-((Z.f[index0]/2)))*0.5))*0
index0<- (Z.m>1 & Z.m<2 & Z.f>1 & Z.f<2)
Z.h[index0]<-(((Z.m[index0]/2)+(1-((Z.m[index0]/2)))*0.5)*((Z.f[index0]/2)+(1-((Z.f[index0]/2)))*0.5)*2)+(((Z.m[index0]/2)+(1-((Z.m[index0]/2)))*0.5)*(1-((Z.f[index0]/2)+(1-((Z.f[index0]/2)))*0.5))*1)+(((Z.f[index0]/2)+(1-((Z.f[index0]/2)))*0.5)*(1-((Z.m[index0]/2)+(1-((Z.m[index0]/2)))*0.5))*1)+((Z.m[index0]/1)*(0.5)+(1-(Z.m[index0]/1)))*((Z.f[index0]/1)*(0.5)+(1-(Z.f[index0]/1)))*0
index0<- (Z.m>1 & Z.m<2 & Z.f>0 & Z.f<1)
Z.h[index0]<-(((Z.m[index0]/2)+(1-((Z.m[index0]/2)))*0.5)*((Z.f[index0]/1)*(0.5))*2)+(((Z.m[index0]/2)+(1-((Z.m[index0]/2)))*0.5)*((Z.f[index0]/1)*(0.5)+(1-(Z.f[index0]/1)))*1)+((1-((Z.m[index0]/2)+(1-((Z.m[index0]/2)))*0.5))*((Z.f[index0]/1)*(0.5))*1)+((1-((Z.m[index0]/2)+(1-((Z.m[index0]/2)))*0.5))*((Z.f[index0]/1)*(0.5)+(1-(Z.f[index0]/1)))*0)
index0<- (Z.m>0 & Z.m<1 & Z.f==2)
Z.h[index0] <- ((Z.m[index0]/1)*(0.5))*2+((Z.m[index0]/1)*(0.5)+(1-(Z.m[index0]/1)))*1
index0<- (Z.m>1 & Z.m<2 & Z.f==2)
Z.h[index0] <- ((Z.m[index0]/2)+(1-((Z.m[index0]/2)))*0.5)*2+(1-((Z.m[index0]/2)+(1-((Z.m[index0]/2)))*0.5))*1
index0<- (Z.m>0 & Z.m<1 & Z.f==1)
Z.h[index0]<-((Z.m[index0]/1)*(0.5))*0.5*2+((Z.m[index0]/1)*(0.5))*0.5*1+0.5*((Z.m[index0]/1)*(0.5)*1+(1-(Z.m[index0]/1)))*1
index0<- (Z.m>1 & Z.m<2 & Z.f==1)
Z.h[index0]<-((Z.m[index0]/2)+(1-((Z.m[index0]/2)))*0.5)*0.5*2+((Z.m[index0]/2)+(1-((Z.m[index0]/2)))*0.5)*0.5*1+0.5*(1-((Z.m[index0]/2)+(1-((Z.m[index0]/2)))*0.5))*1
index0<- (Z.m>0 & Z.m<1 & Z.f==0)
Z.h[index0]<-((Z.m[index0]/1)*(0.5))*1
index0<- (Z.m>1 & Z.m<2 & Z.f==0)
Z.h[index0]<-((Z.m[index0]/2)+(1-((Z.m[index0]/2)))*0.5)*1
index0<- (Z.m==2 & Z.f>0 & Z.f<1)
Z.h[index0]<- ((Z.f[index0]/1)*(0.5))*2+((Z.f[index0]/1)*(0.5)+(1-(Z.f[index0]/1)))*1
index0<- (Z.m==2 & Z.f>1 & Z.f<2)
Z.h[index0] <- ((Z.f[index0]/2)+(1-((Z.f[index0]/2)))*0.5)*2+(1-((Z.f[index0]/2)+(1-((Z.f[index0]/2)))*0.5))*1
index0<- (Z.m==1 & Z.f>0 & Z.f<1)
Z.h[index0]<-((Z.f[index0]/1)*(0.5))*0.5*2+((Z.f[index0]/1)*(0.5))*0.5*1+0.5*((Z.f[index0]/1)*(0.5)*1+(1-(Z.f[index0]/1)))*1
index0<- (Z.m==1 & Z.f>1 & Z.f<2)
Z.h[index0]<-((Z.f[index0]/2)+(1-((Z.f[index0]/2)))*0.5)*0.5*2+((Z.f[index0]/2)+(1-((Z.f[index0]/2)))*0.5)*0.5*1+0.5*(1-((Z.f[index0]/2)+(1-((Z.f[index0]/2)))*0.5))*1
index0<- (Z.m==0 & Z.f>0 & Z.f<1)
Z.h[index0]<-((Z.f[index0]/1)*(0.5))*1
index0<- (Z.m==0 & Z.f>1 & Z.f<2)
Z.h[index0]<-((Z.f[index0]/2)+(1-((Z.f[index0]/2)))*0.5)*1
rm(gh.id,id,index0,Z.m,Z.f)
##Simulating the single-cross hybrids phenotypes in different heritabilities scenarios:
# The function below is used to deregressed the markers effects due to Bayes-B shrinkage specific variance procedure
correction_markers_g <- function (fmBB) {
var.am.t=fmBB$ETA$Ad$varB
var.dm.t=fmBB$ETA$Dom$varB
ad=fmBB$ETA$Ad$b[(order(fmBB$ETA$Ad$b,decreasing=T)[1:100])]
var.a=fmBB$ETA$Ad$varB[(order(fmBB$ETA$Ad$b,decreasing=T)[1:100])]
dom=fmBB$ETA$Dom$b[(order(fmBB$ETA$Dom$b,decreasing=T)[1:100])]
var.d=fmBB$ETA$Dom$varB[(order(fmBB$ETA$Dom$b,decreasing=T)[1:100])]
hdj=0.7
h.ca=matrix(0,100,1)
for (i in 1:100) {
var.a.dif.i <- var.a[-c(i)]
sum.var.a.dif.i=sum(var.a.dif.i)+sum(var.d)
var.a.t=sum(var.a)+sum(var.d)
sum.var.t=sum(var.a)+sum(var.d)+fmBB$varE
h.dif.i=sum.var.a.dif.i/sum.var.t
h.t=var.a.t/sum.var.t
h.ca[i,1]=hdj*(1-(h.dif.i/h.t))
}
ad.new=ad/h.ca
c=matrix(0,27000,1)
c[(order(fmBB$ETA$Ad$b,decreasing=T)[1:100]),] <-ad.new
h.cd=matrix(0,100,1)
for (i in 1:100) {
var.d.dif.i <- var.d[-c(i)]
sum.var.d.dif.i=sum(var.d.dif.i)+sum(var.a)
sum.var.t=sum(var.a)+sum(var.d)+fmBB$varE
var.d.t=sum(var.d)+sum(var.a)
h.dif.i=sum.var.d.dif.i/sum.var.t
h.t=var.d.t/sum.var.t
h.cd[i,1]=hdj*(1-(h.dif.i/h.t))
}
dom.new=dom/h.cd
sum(h.cd)
sum(rbind(h.ca,h.cd))
d=matrix(0,27000,1)
d[(order(fmBB$ETA$Dom$b,decreasing=T)[1:100]),] <-dom.new
ha=sum((var.a+var.d)/(var.a+var.d+fmBB$varE))
d=d*ha; c=c*ha
return(list(c=list(c),d=list(d)))
}
#Building the phenotypic data using as reference the 0.3 heritability scenario
for (i in 1:5) {
if (i==1) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_PH.RData") #Loading the results from the BayesB analysis
y=fmBB$y; mu_PH=as.numeric(fmBB$mu)
}
if (i==2) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_EH.RData")
y=fmBB$y; mu_EH=as.numeric(fmBB$mu)
}
if (i==3) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_EL.RData")
y=fmBB$y; mu_EL=as.numeric(fmBB$mu)
}
if (i==4) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_ERN.RData")
y=fmBB$y; mu_ERN=as.numeric(fmBB$mu)
}
if (i==5) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_KW.RData")
y=fmBB$y; mu_KW=as.numeric(fmBB$mu)
}
mar=correction_markers_g(fmBB) # Deregress markers effects
c=unlist(mar$c);d=unlist(mar$d)
Z.h.a=Z.h[,(order(fmBB$ETA$Ad$b,decreasing=T)[1:100])] #Separating the fraction of the genome with the 100 largest marker allele substitution effects
Z.h.d=Z.h[,(order(fmBB$ETA$Dom$b,decreasing=T)[1:100])] #Separating the fraction of the genome with the 100 largest marker dominance effects
Wa=Wa.vit(Z.h.a)
Wd=Wd.vit(Z.h.d)
c=c[(order(fmBB$ETA$Ad$b,decreasing=T)[1:100])] #Separating the 100 largest additive effects
d=d[(order(fmBB$ETA$Dom$b,decreasing=T)[1:100])] #Separating the 100 largest dominant effects
a <- c
a <- Wa%*%a #Obtaining the parametric alelle substitution effects of the constructed single-cross hybrids
a.p=a
Va=var(a) #Parametric additive genetic variance
d <- Wd%*%d #Obtaining the parametric dominance deviations of the constructed single-cross hybrids
d.p=d
Vd=var(d) #Parametric dominance genetic variance
Vg=Va+Vd #Parametric total genetic variance
g=a+d
h2 <- 0.3 #heritability
Ve=(1-h2)/h2*Vg #Adjusting residual variance based on the heritability
y <- fmBB$mu + a + d + rnorm(nrow(Z.h),mean=0,sd=sqrt((1-h2)/h2*Vg)) #Simulating phenotypic effects
comp=as.matrix(c(Ve,Va,Vd,Vg,a.p,d.p)) #saving parametric componentes
if (i==1) {
y.h.PH.0.3=y; comp.PH.0.3=comp
}
if (i==2) {
y.h.EH.0.3=y; comp.EH.0.3=comp
}
if (i==3) {
y.h.EL.0.3=y; comp.EL.0.3=comp
}
if (i==4) {
y.h.ERN.0.3=y; comp.ERN.0.3=comp
}
if (i==5) {
y.h.KW.0.3=y; comp.KW.0.3=comp
}
}
#Building the phenotypic data using as reference the 0.5 heritability scenario
#The next three iterative processes below is the same as described above, but for the 0.5; 0,7 and historical heritability scenarios
for (i in 1:5) {
if (i==1) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_PH.RData")
y=fmBB$y; mu_PH=as.numeric(fmBB$mu)
}
if (i==2) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_EH.RData")
y=fmBB$y; mu_EH=as.numeric(fmBB$mu)
}
if (i==3) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_EL.RData")
y=fmBB$y; mu_EL=as.numeric(fmBB$mu)
}
if (i==4) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_ERN.RData")
y=fmBB$y; mu_ERN=as.numeric(fmBB$mu)
}
if (i==5) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_KW.RData")
y=fmBB$y; mu_KW=as.numeric(fmBB$mu)
}
mar=correction_markers_g(fmBB)
c=unlist(mar$c);d=unlist(mar$d)
Z.h.a=Z.h[,(order(fmBB$ETA$Ad$b,decreasing=T)[1:100])]
Z.h.d=Z.h[,(order(fmBB$ETA$Dom$b,decreasing=T)[1:100])]
Wa=Wa.vit(Z.h.a)
Wd=Wd.vit(Z.h.d)
c=c[(order(fmBB$ETA$Ad$b,decreasing=T)[1:100])]
d=d[(order(fmBB$ETA$Dom$b,decreasing=T)[1:100])]
a <- c
a <- Wa%*%a
a.p=a
Va=var(a)
d <- Wd%*%d
d.p=d
Vd=var(d)
Vg=Va+Vd
g=a+d
h2 <- 0.5 #heritability
Ve=(1-h2)/h2*Vg
y <- fmBB$mu + a + d + rnorm(nrow(Z.h),mean=0,sd=sqrt((1-h2)/h2*Vg))
comp=as.matrix(c(Ve,Va,Vd,Vg,a.p,d.p))
if (i==1) {
y.h.PH.0.5=y; comp.PH.0.5=comp
}
if (i==2) {
y.h.EH.0.5=y; comp.EH.0.5=comp
}
if (i==3) {
y.h.EL.0.5=y; comp.EL.0.5=comp
}
if (i==4) {
y.h.ERN.0.5=y; comp.ERN.0.5=comp
}
if (i==5) {
y.h.KW.0.5=y; comp.KW.0.5=comp
}
}
#Building the phenotypic data using as reference the 0.7 heritability scenario
for (i in 1:5) {
if (i==1) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_PH.RData")
y=fmBB$y; mu_PH=as.numeric(fmBB$mu)
}
if (i==2) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_EH.RData")
y=fmBB$y; mu_EH=as.numeric(fmBB$mu)
}
if (i==3) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_EL.RData")
y=fmBB$y; mu_EL=as.numeric(fmBB$mu)
}
if (i==4) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_ERN.RData")
y=fmBB$y; mu_ERN=as.numeric(fmBB$mu)
}
if (i==5) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_KW.RData")
y=fmBB$y; mu_KW=as.numeric(fmBB$mu)
}
mar=correction_markers_g(fmBB)
c=unlist(mar$c);d=unlist(mar$d)
Z.h.a=Z.h[,(order(fmBB$ETA$Ad$b,decreasing=T)[1:100])]
Z.h.d=Z.h[,(order(fmBB$ETA$Dom$b,decreasing=T)[1:100])]
Wa=Wa.vit(Z.h.a)
Wd=Wd.vit(Z.h.d)
c=c[(order(fmBB$ETA$Ad$b,decreasing=T)[1:100])]
d=d[(order(fmBB$ETA$Dom$b,decreasing=T)[1:100])]
a <- c
a <- Wa%*%a
a.p=a
Va=var(a)
d <- Wd%*%d
d.p=d
Vd=var(d)
Vg=Va+Vd
g=a+d
h2 <- 0.7 #heritability
Ve=(1-h2)/h2*Vg
y <- fmBB$mu + a + d + rnorm(nrow(Z.h),mean=0,sd=sqrt((1-h2)/h2*Vg))
comp=as.matrix(c(Ve,Va,Vd,Vg,a.p,d.p))
if (i==1) {
y.h.PH.0.7=y; comp.PH.0.7=comp
}
if (i==2) {
y.h.EH.0.7=y; comp.EH.0.7=comp
}
if (i==3) {
y.h.EL.0.7=y; comp.EL.0.7=comp
}
if (i==4) {
y.h.ERN.0.7=y; comp.ERN.0.7=comp
}
if (i==5) {
y.h.KW.0.7=y; comp.KW.0.7=comp
}
}
#Building the phenotypic data using as reference the historical heritability scenario
for (i in 1:5) {
if (i==1) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_PH.RData")
y=fmBB$y
h2 <- 0.569; mu_PH=as.numeric(fmBB$mu)
}
if (i==2) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_EH.RData")
y=fmBB$y
h2 <- 0.662; mu_EH=as.numeric(fmBB$mu)
}
if (i==3) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_EL.RData")
y=fmBB$y
h2 <- 0.381; mu_EL=as.numeric(fmBB$mu)
}
if (i==4) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_ERN.RData")
y=fmBB$y
h2 <- 0.57; mu_ERN=as.numeric(fmBB$mu)
}
if (i==5) {
load("/home/jhonathan/Documentos/MVGBLUP/Data/fmBB_KW.RData")
y=fmBB$y
h2 <- 0.418; mu_KW=as.numeric(fmBB$mu)
}
mar=correction_markers_g(fmBB)
c=unlist(mar$c);d=unlist(mar$d)
Z.h.a=Z.h[,(order(fmBB$ETA$Ad$b,decreasing=T)[1:100])]
Z.h.d=Z.h[,(order(fmBB$ETA$Dom$b,decreasing=T)[1:100])]
Wa=Wa.vit(Z.h.a)
Wd=Wd.vit(Z.h.d)
c=c[(order(fmBB$ETA$Ad$b,decreasing=T)[1:100])]
d=d[(order(fmBB$ETA$Dom$b,decreasing=T)[1:100])]
a <- c
a <- Wa%*%a
a.p=a
Va=var(a)
d <- Wd%*%d
d.p=d
Vd=var(d)
Vg=Va+Vd
g=a+d
Ve=(1-h2)/h2*Vg
y <- fmBB$mu + a + d + rnorm(nrow(Z.h),mean=0,sd=sqrt((1-h2)/h2*Vg))
comp=as.matrix(c(Ve,Va,Vd,Vg,a.p,d.p))
if (i==1) {
y.h.PH.hist=y; comp.PH.hist=comp
}
if (i==2) {
y.h.EH.hist=y; comp.EH.hist=comp
}
if (i==3) {
y.h.EL.hist=y; comp.EL.hist=comp
}
if (i==4) {
y.h.ERN.hist=y; comp.ERN.hist=comp
}
if (i==5) {
y.h.KW.hist=y; comp.KW.hist=comp
}
}
rm(Va,Vd,Ve,Vg,Wa,Wd,Z.h.a,Z.h.d,a,a.p,comp,d,d.p,g,y,c,fmBB,h2,i,mar,data,correction_markers_g,Wa.vit,Wd.vit)
save(list = ls(all = TRUE),file = "data_hybrids.RData") #Saving single-cross hybrids data
############################################################################################################
|
b9df529df6b36ba2e9209ebb83a685e8868d36fd
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/SemNetCleaner/examples/chn.let.Rd.R
|
e591b797a291ee84ca37efa81ff413c381a5a032
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 130
|
r
|
chn.let.Rd.R
|
library(SemNetCleaner)
### Name: chn.let
### Title: Change Letter
### Aliases: chn.let
### ** Examples
chn.let("bombae")
|
6827b5be6c5ce682ceadc16c8562851b1e23c61b
|
bf32fa0d8b4fe0d0a5fa0d7fbbdf471e1c4a5d34
|
/run_analysis.R
|
8194d6007fce1b1d5abc5ce517b1be2d542f81ff
|
[] |
no_license
|
thoferon/coursera-data-week4
|
6b74d4962bee113ef78d418904197eb780b7f819
|
a57a6db19e59e88207a90051a3f9db5088d7ffc6
|
refs/heads/master
| 2021-01-01T15:42:27.505805
| 2017-07-19T09:02:30
| 2017-07-19T09:02:30
| 97,678,706
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,940
|
r
|
run_analysis.R
|
library(reshape2)
# Download and extract the raw data if not already done
if(!file.exists("data.zip"))
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",
destfile = "data.zip", method = "curl")
if(!file.exists("UCI HAR Dataset")) unzip("data.zip")
# Load the data from the given type ("test" or "train")
load <- function(type) {
rawColNames <- read.table("UCI HAR Dataset/features.txt")[[2]]
xs <- read.table(paste0("UCI HAR Dataset/", type, "/X_", type, ".txt"),
col.names = rawColNames, check.names = FALSE)
colSelector <- grep("(mean|std)\\(\\)", rawColNames)
xs <- xs[,colSelector]
subjects <- read.table(paste0("UCI HAR Dataset/", type, "/subject_", type,
".txt"),
col.names = c("subject"))
activities <- read.table("UCI HAR Dataset/activity_labels.txt")[[2]]
ys <- read.table(paste0("UCI HAR Dataset/", type, "/y_", type, ".txt"),
col.names = c("activity"))
ys <- sapply(ys, function(i) activities[i])
cbind(xs, subjects, ys)
}
# Load all the data
traindata <- load("train")
testdata <- load("test")
data <- rbind(traindata, testdata)
# Generate the averages
subjects <- unique(data$subject)
activities <- unique(data$activity)
colNames <- names(data)[1:(length(names(data))-2)]
averages <- data.frame()
for(subject in subjects) {
for(activity in activities) {
selector <- data$subject == subject & data$activity == activity
subdata <- data[selector,]
for(variable in colNames) {
values <- subdata[[variable]]
row <- data.frame(subject = subject, activity = activity,
variable = variable, average = mean(values))
averages <- rbind(averages, row)
}
}
}
write.table(averages, "tidy.txt", row.name=FALSE)
|
383b618834034c9818b5b01fe2c38a9bf8fe29ee
|
166c057e761772043e1e5de46d7c8bcc6b1ad8ba
|
/man/combineHurdleMI.Rd
|
5f5c49691667fbbb0f7b2bbda28e970640e1673f
|
[] |
no_license
|
stevenliaotw/sltools
|
b1604865b7048eec6cd567233cfb8c0515e23cb2
|
dbc2ed0e83fe7ab1fc5215896f66e7f91831823e
|
refs/heads/master
| 2021-01-10T21:20:44.542516
| 2018-08-09T17:43:43
| 2018-08-09T17:43:43
| 20,469,073
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 700
|
rd
|
combineHurdleMI.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/combineHurdleMI.R
\name{combineHurdleMI}
\alias{combineHurdleMI}
\title{Combine hurdle model results fitted to multiply imputed datasets}
\usage{
combineHurdleMI(fitted.obj = NULL)
}
\arguments{
\item{fitted.obj}{a list of hurdle fitted model outputs from the pscl package. For example, llply(data.mi$imputations, function(x) return(hurdle(y ~ x, dist = "negbin", zero.dist = "binomial", link = "logit", data = x)}
}
\description{
This sltools function allows you to combine multiple imputation hurdle model results from the pscl package, extract results for the texreg package, and also output summary tables.
}
|
8d9e2120430d346ef9e2fdbde6fca889b3edb00e
|
88019e229593c66c7b85ae76f7ed73fb8b5ed31f
|
/make_pcoa.R
|
59571fcfe263a6a6dbcbea619caaa29267e3df2f
|
[] |
no_license
|
islandhopper81/pitcher_plant_utils
|
dddf2b354066bf1f28c4e84e79cfcf223d6bcefe
|
405771ae5da5e0c6aca027a21398a05778cae536
|
refs/heads/master
| 2020-12-02T17:47:22.590669
| 2017-07-06T14:38:15
| 2017-07-06T14:38:15
| 96,427,261
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,693
|
r
|
make_pcoa.R
|
#!/usr/bin/evn Rscript
library(AMOR)
require("getopt", quietly=T)
library(ggplot2)
### Defualt variables
verbose = FALSE
### Parameter variables
# The params matrix
# Each row is a parameter with 4 columns:
# 1) long value,
# 2) short value,
# 3) argument type where 0 = no argument, 1 = required , 2 = optional
# 4) data type (logical, integer, double, complex, character)
# When you call this script the args are named like --verbose or -v
params = matrix(c(
"pcoa", "p", 1, "character",
"meta", "m", 1, "character",
"x_var", "x", 1, "double",
"y_var", "y", 1, "double",
"out", "o", 1, "character",
"verbose", "v", 0, "logical"
), byrow=TRUE, ncol=4)
opt = getopt(params)
# define parameter specified varaibles
if (! is.null(opt$verbose)) {
verbose = opt$verbose
}
# read in the pcoa tbl
pcoa = read.table(opt$pcoa, header=T, row.names=1, sep="\t")
# save only pc 1 and 2
pcoa = pcoa[,c(1,2)]
# read in the metadata
meta = read.table(opt$meta, header=T, row.names=1, sep="\t")
# combine pcoa and meta
data = merge(pcoa, meta, by.x="row.names", by.y="row.names")
#summary(data)
# make the figure
ggplot(data, aes(x=X1, y=X2, color=Plant_species, label=Row.names)) +
geom_point() +
geom_text(hjust = 0, nudge_x=0.005, show.legend=F) +
expand_limits(x=.4) +
xlab(paste("PCoA 1 (", opt$x_var, "%)", sep="")) +
ylab(paste("PCoA 2 (", opt$y_var, "%)", sep="")) +
ggtitle("Pitcher Plant PCoA") +
scale_color_discrete("Plant Species") +
theme(legend.text = element_text(size=16),
legend.title = element_text(size=18),
axis.text = element_text(size=16),
axis.title = element_text(size=18),
plot.title = element_text(size=20))
ggsave(opt$out)
|
8732f380cb9cfb235c4f5bd401d10eed1b082fef
|
8373e6ef368e531f335849ec766322fb5b37f0a3
|
/R/lungCancerRadon.R
|
0b1bf7c2de3a94bb02b8c9de4f95356e64017ad6
|
[] |
no_license
|
cran/msce
|
4ba9665ca10ced50eb25eb469448a34ee0a4ac26
|
5aee1de170fc9c85f6b7b5739438f9650689b08f
|
refs/heads/master
| 2023-01-07T21:33:15.003639
| 2020-10-29T14:10:02
| 2020-10-29T14:10:02
| 310,516,298
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 708
|
r
|
lungCancerRadon.R
|
#' Exemplary person year table
#'
#' Data set of fatal lung cancer in rats exposed to Radon at Pacific Northwest National Laboratory.
#' For each rat, the age at start of Radon exposure, the age at end of exposure and the age at end of follow-up/death
#' is provided in weeks.
#' The Radon dose rate is given in WL (working level).
#'
#' @docType data
#'
#' @usage data(lungCancerRadon)
#'
#' @format Data frame with 5 variables.
#'
#' @keywords datasets
#'
#' @source Pacific Northwest National Laboratory, see
#' Heidenreich, W, Jacob P, Paretzke H., et al. (1999). Radiation research, 151 2, 209-17 DOI:10.2307/3579772.
#'
#' @examples
#' data(lungCancerRadon)
#' lungCancerRadon[1000,]
"lungCancerRadon"
|
dcc70f2697fa4b569aba3e0486482b7e53f42988
|
de7b5af9c415426ad5496ccf0b87f91b663cee29
|
/R/diag_function.R
|
6cafbdcb7ba63a5f85f3c3cdf626c21ef2cdb453
|
[
"MIT"
] |
permissive
|
rgriffier/statsBordeaux
|
bb465057eab0b41e6d1515f02cfd13adb8179ac8
|
ded4292fb43e2a959058267a1f707d25505c0b7d
|
refs/heads/master
| 2021-09-06T15:02:07.420504
| 2021-08-03T13:56:13
| 2021-08-03T13:56:13
| 178,895,755
| 2
| 0
| null | 2020-09-28T14:14:45
| 2019-04-01T15:45:33
|
R
|
UTF-8
|
R
| false
| false
| 12,004
|
r
|
diag_function.R
|
#' @title Get diagnostic performance evaluation of some qualitative procedure
#' @description Get diagnostic performance evaluation of some qualitative test
#' @param data a data.frame, containing the diagnostic result
#' @param goldStandard a character vector of length one, containing the colname
#' of the gold-standard procedure
#' @param indexTest a character vector of length one, containing the colname
#' of the index procedure
#' @param M_pos a character vector, containing the levels of positive result
#' with gold-standard procedure
#' @param T_pos a character vector, containing the levels of positive result
#' with index procedure
#' @param round an integer, number of maximal decimal. Default to 3
#' @return a list contaning contengency table and performance evaluation as data.frame
#' @export
#' @import dplyr
#' @importFrom binom binom.exact
#' @examples
diag.perf <- function(data, goldStandard, indexTest, M_pos, T_pos, round = 3){
if(!is.data.frame(data)){
stop("data must be a data.frame.")
}
if(!is.vector(goldStandard) | !is.character(goldStandard) | length(goldStandard) != 1){
stop("goldStandard must be a character vector of length one.")
}
if(!goldStandard %in% colnames(data)){
stop("goldStandard must be the name of a column in data.")
}
if(!is.vector(indexTest) | !is.character(indexTest) | length(indexTest) != 1){
stop("indexTest must be a character vector of length one.")
}
if(!indexTest %in% colnames(data)){
stop("indexTest must be the name of a column in data.")
}
if(!is.vector(M_pos) | !is.character(M_pos)){
stop("M_pos must be a character vector.")
}
if(!M_pos %in% data[, goldStandard]){
stop("M_pos must be one or more value of the gold-standard column")
}
if(!is.vector(T_pos) | !is.character(T_pos)){
stop("T_pos must be a character vector.")
}
if(!T_pos %in% data[, indexTest]){
stop("T_pos must be one or more value of the index-test column")
}
## get data perf
dataPerf <- data %>%
dplyr::select(one_of(goldStandard, indexTest)) %>%
dplyr::mutate(
`__m__` = case_when(
get(goldStandard) %in% M_pos ~ 1,
TRUE ~ 0
),
`__t__` = case_when(
get(indexTest) %in% T_pos ~ 1,
TRUE ~ 0
)
) %>%
dplyr::select(`__m__`, `__t__`)
## compute perf
VP <- dataPerf %>% dplyr::filter(`__m__` == 1 & `__t__` == 1) %>% nrow()
FN <- dataPerf %>% dplyr::filter(`__m__` == 1 & `__t__` == 0) %>% nrow()
VN <- dataPerf %>% dplyr::filter(`__m__` == 0 & `__t__` == 0) %>% nrow()
FP <- dataPerf %>% dplyr::filter(`__m__` == 0 & `__t__` == 1) %>% nrow()
## get contengency table
contingencyTable <- data.frame(
`M_pos` = c(VP, FN, VP + FN),
`M_neg` = c(FP, VN, VN + FP),
Total = c(VP + FP, FN + VN, VP + FN + VN + FP)
)
rownames(contingencyTable) <- c('T+', 'T-', 'Total')
## compute perf confidence interval
Se <- VP/(VP + FN)
Se_confint <- binom::binom.exact(x = VP, n = (VP + FN))[c('lower', 'upper')]
Se_confint <- sapply(Se_confint, function(x) ifelse(x > 1, 1, x))
Se_confint <- sapply(Se_confint, function(x) ifelse(x < 0, 0, x))
Sp <- VN/(VN + FP)
Sp_confint <- binom::binom.exact(x = VN, n = (VN + FP))[c('lower', 'upper')]
Sp_confint <- sapply(Sp_confint, function(x) ifelse(x > 1, 1, x))
Sp_confint <- sapply(Sp_confint, function(x) ifelse(x < 0, 0, x))
VPP <- VP/(VP + FP)
VPP_confint <- binom::binom.exact(x = VP, n = (VP + FP))[c('lower', 'upper')]
VPP_confint <- sapply(VPP_confint, function(x) ifelse(x > 1, 1, x))
VPP_confint <- sapply(VPP_confint, function(x) ifelse(x < 0, 0, x))
VPN <- VN/(VN + FN)
VPN_confint <- binom::binom.exact(x = VN, n = (VN + FN))[c('lower', 'upper')]
VPN_confint <- sapply(VPN_confint, function(x) ifelse(x > 1, 1, x))
VPN_confint <- sapply(VPN_confint, function(x) ifelse(x < 0, 0, x))
RVP <- Se / (1-Sp)
RVP.low <- exp(log(RVP) - qnorm(0.975, mean = 0, sd = 1) * sqrt((1 - Se)/((VP + FN) * Se) + (Sp)/((VN + FP) * (1 - Sp))))
RVP.up <- exp(log(RVP) + qnorm(0.975, mean = 0, sd = 1) * sqrt((1 - Se)/((VP + FN) * Se) + (Sp)/((VN + FP) * (1 - Sp))))
RVN <- (1-Se)/ Sp
RVN.low <- exp(log(RVN) - qnorm(0.975, mean = 0, sd = 1) * sqrt((Se)/((VP + FN) * (1 - Se)) + (1 - Sp)/((VN + FP) * (Sp))))
RVN.up <- exp(log(RVN) + qnorm(0.975, mean = 0, sd = 1) * sqrt((Se)/((VP + FN) * (1 - Se)) + (1 - Sp)/((VN + FP) * (Sp))))
## format perf confidence interval
perfResult <- data.frame(
Parameter = c('Sensitivity', 'Specificity', 'Positive Predictive Value', 'Negative Predictive Value', 'Likelihood Ratio Positive', 'Likelihood Ratio Negative'),
Value = c(
format(round(Se, round), nsmall = round),
format(round(Sp, round), nsmall = round),
format(round(VPP, round), nsmall = round),
format(round(VPN, round), nsmall = round),
format(round(RVP, round), nsmall = round),
format(round(RVN, round), nsmall = round)
),
`CI95%` = c(
paste0('[', paste0(format(round(Se_confint, round), nsmall = round), collapse = ' ; '), ']'),
paste0('[', paste0(format(round(Sp_confint, round), nsmall = round), collapse = ' ; '), ']'),
paste0('[', paste0(format(round(VPP_confint, round), nsmall = round), collapse = ' ; '), ']'),
paste0('[', paste0(format(round(VPN_confint, round), nsmall = round), collapse = ' ; '), ']'),
paste0('[', paste0(format(round(c(RVP.low, RVP.up), round), nsmall = round), collapse = ' ; '), ']'),
paste0('[', paste0(format(round(c(RVN.low, RVN.up), round), nsmall = round), collapse = ' ; '), ']')
)
)
perfResult <- as.data.frame(t(apply(perfResult, 1, function(x){
if(x['Value'] == 'NaN'){
x['CI95.'] <- NA
x['Value'] <- NA
}
return(x)
})))
return(list(contingencyTable, perfResult))
}
#' @title Display contengency table in some Rmarkdown document
#' @description Display contengency table produce with diag.perf() function in some Rmarkdown document
#' @param diag.perf a list, result of diag.perf() function
#' @return a kableExtra table
#' @export
#' @importFrom kableExtra kable kable_styling column_spec
#' @import dplyr
#' @examples
diag.perf.getContingencyTable <- function(diag.perf){
if(!is.list(diag.perf) || length(diag.perf) != 2){
stop("diag.perf must be generated by diag.perf() function")
}
table <- diag.perf[[1]]
kable <- table %>%
kableExtra::kable(booktabs = TRUE, escape = FALSE, format = "html", col.names = c('M+', 'M-', 'Total')) %>%
kableExtra::kable_styling(bootstrap_options = c('hover', 'condensed', 'responsive'), full_width = FALSE) %>%
kableExtra::column_spec(column = 1, bold = TRUE)
return(kable)
}
#' @title Display performance table in some Rmarkdown document
#' @description Display performance table produce with diag.perf() function in some Rmarkdown document
#' @param diag.perf a list, result of diag.perf() function
#' @return a kableExtra table
#' @export
#' @importFrom kableExtra kable kable_styling
#' @import dplyr
#' @examples
diag.perf.getPerformanceTable <- function(diag.perf){
if(!is.list(diag.perf) || length(diag.perf) != 2){
stop("diag.perf must be generated by diag.perf() function")
}
table <- diag.perf[[2]]
kable <- table %>% kableExtra::kable(booktabs = TRUE, escape = FALSE, format = "html",
col.names = c('Parameters', 'Value', 'CI95%')) %>%
kableExtra::kable_styling(bootstrap_options = c('hover', 'condensed', 'responsive'),
full_width = FALSE)
return(kable)
}
#' @title Perform some kappa test between two qualitative test
#' @description Perform some kappa test between two qualitative test
#' @param data a data.frame, containing the tests' result as factor
#' @param test_1 a character vector of length one, containing the colname
#' of the test_1 procedure
#' @param test_2 a character vector of length one, containing the colname
#' of the test_2 procedure
#' @param round an integer, number of maximal decimal. Default to 3
#' @return a list contaning agreement matrix table and agreement parameters as data.frame
#' @export
#' @importFrom psych cohen.kappa
#' @import dplyr
#' @examples
diag.kappa <- function(data, test_1, test_2, round = 3){
if(!is.data.frame(data)){
stop("data must be a data.frame.")
}
if(!is.vector(test_1) | !is.character(test_1) | length(test_1) != 1){
stop("test_1 must be a character vector of length one.")
}
if(!test_1 %in% colnames(data)){
stop("test_1 must be the name of a column in data.")
}
if(!is.vector(test_2) | !is.character(test_2) | length(test_2) != 1){
stop("test_2 must be a character vector of length one.")
}
if(!test_1 %in% colnames(data)){
stop("test_2 must be the name of a column in data.")
}
dataKappa <- data %>%
dplyr::select(one_of(test_1, test_2))
kappa <- suppressWarnings(psych::cohen.kappa(dataKappa, alpha = 0.05))
# get agreement matrix
agreementMatrix <- kappa$agree*kappa$n.obs
names(dimnames(agreementMatrix)) <- c(getVarLabel(data[test_1]),
getVarLabel(data[test_2]))
# agreement parameters
agreement <- sum(diag(agreementMatrix))
agreement_tx <- sum(diag(kappa$agree))
disagreement <- kappa$n.obs - sum(diag(agreementMatrix))
disagreement_tx <- disagreement/kappa$n.obs
agreementParam <- data.frame(
Parameter = c('Agreement', 'Disagreement', "Cohen's kappa"),
Value = c(
paste0('N = ', agreement, ' (', round(agreement_tx, round), ')'),
paste0('N = ', disagreement, ' (', round(disagreement_tx, round), ')'),
paste0(format(round(kappa$kappa, round), nsmall = round), ' ', paste0('CI95%[', paste0(format(round(kappa$confid['unweighted kappa', c('lower', 'upper')], round), nsmall = round), collapse = ' ; '), ']'))
)
)
return(list(agreementMatrix, agreementParam))
}
#' @title Display agreement matrix table in some Rmarkdown document
#' @description Display agreement matrix table produces by diag.kappa() in some Rmarkdown document
#' @param diag.kappa a list, result of diag.kappa() function
#' @return a kableExtra table
#' @export
#' @importFrom kableExtra kable kable_styling column_spec add_header_above add_indent
#' @import dplyr
#' @examples
diag.kappa.getAgreeentMatrix <- function(diag.kappa){
if(!is.list(diag.kappa) || length(diag.kappa) != 2){
stop("diag.kappa must be generated by diag.kappa() function")
}
table <- diag.kappa[[1]]
name1 <- names(dimnames(table))[1]
name2 <- names(dimnames(table))[2]
table <- rbind(
rep(NA, ncol(table)),
table
)
row.names(table) <- c(name1, row.names(table)[2:length(row.names(table))])
header <- c('', ncol(table))
names(header) <- c('',name2)
kable <- table %>%
kableExtra::kable(booktabs = TRUE, escape = FALSE, format = "html") %>%
kableExtra::kable_styling(bootstrap_options = c('hover', 'condensed', 'responsive'), full_width = FALSE) %>%
kableExtra::column_spec(column = 1, bold = TRUE) %>%
kableExtra::add_header_above(header) %>%
kableExtra::add_indent(positions = c(2:nrow(table)))
return(kable)
}
#' @title Display agreement parameters table in some Rmarkdown document
#' @description Display agreement parameters table produces by diag.kappa() in some Rmarkdown document
#' @param diag.kappa a list, result of diag.kappa() function
#' @return a kableExtra table
#' @export
#' @importFrom kableExtra kable kable_styling column_spec
#' @import dplyr
#' @examples
diag.kappa.getAgreeentPram <- function(diag.kappa){
if(!is.list(diag.kappa) || length(diag.kappa) != 2){
stop("diag.kappa must be generated by diag.kappa() function")
}
table <- diag.kappa[[2]]
kable <- table %>%
kableExtra::kable(booktabs = TRUE, escape = FALSE, format = "html") %>%
kableExtra::kable_styling(bootstrap_options = c('hover', 'condensed', 'responsive'), full_width = FALSE) %>%
kableExtra::column_spec(column = 1, bold = TRUE)
return(kable)
}
|
f106b5c9a493cb853b11f79b278106c08fde6fec
|
766d0f2acc322c618a6101150ffac74526e54094
|
/old_scripts/nba_project/by_player/read_in.R
|
7e906b268e92191d434c0f876d6d21ec123bcbea
|
[] |
no_license
|
benmbrew/nba_data
|
7737010e397a15d61bba89fdabb634919f3fa822
|
9db314734541ccc13075ca1c78eb876e0111b670
|
refs/heads/master
| 2022-07-14T13:21:38.623659
| 2020-05-09T16:09:01
| 2020-05-09T16:09:01
| 110,999,068
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,071
|
r
|
read_in.R
|
##########
# LOAD LIBRARIES and read in data
##########
library(broom)
library(tidyverse)
library(ggthemes)
library(lubridate)
library(doParallel)
library(caret)
library(glmnet)
registerDoParallel()
# source functions script
source('functions.R')
# read in data
dat_2015 <- read_csv('../../Data/player_stat_2015_16.csv')
dat_2016 <- read_csv('../../Data/player_stat_2016_17.csv')
dat_current <- read_csv('../../Data/season_player_feed.csv')
# combine data
dat_full <- bind_rows(dat_2015,
dat_2016,
dat_current)
rm(dat_2015, dat_2016, dat_current)
# clean column names
colnames(dat_full) <- tolower(colnames(dat_full))
colnames(dat_full) <- gsub(' ', '_', colnames(dat_full))
colnames(dat_full) <- gsub('_(r/h)', '', colnames(dat_full), fixed = TRUE)
# convert date
dat_full$date <- as.Date(dat_full$date, format = '%m/%d/%Y')
# get year
dat_full$year <- as.factor(format(dat_full$date, format = '%Y'))
# create a month variable
dat_full$month <- month(as.POSIXlt(dat_full$date))
# get percentages for fg, 3p and ft
dat_full$fg_per <- round(dat_full$fg/dat_full$fga, 2)
# calculate per minute statistics
dat_full$fga_per <- round(dat_full$fga/dat_full$min, 2)
dat_full$pts_per <- round(dat_full$pts/dat_full$min, 2)
dat_full$to_per <- round(dat_full$to/dat_full$min, 2)
# game score GmsC - a simple version of the PER
dat_full$game_score <- get_game_score(dat_full)
# fill na and inf with zero
dat_full <- full_inf_with_na(dat_full)
# subset to only raptors in own_team
dat <- dat_full %>% dplyr::filter(own_team == 'Toronto')
# subset to this season
dat <- dat %>% dplyr::filter(date > '2017/06/11')
# get avg per game and ass_to, bh, and ft_rate aggregated (total)
temp_game_score <- dat %>% group_by(player_full_name) %>%
summarise(mean_fg_per = mean(fg_per, na.rm = T),
mean_fga_per = mean(fga_per, na.rm = T),
mean_pts_per = mean(pts_per, na.rm = T),
mean_to_per = mean(to_per, na.rm = T),
mean_game_score = mean(game_score, na.rm = T))
|
a23f6ce93debe83d1fae39342cd6062bddf8e548
|
648ceb127101da98e0371f90e83c2613b20ee5d1
|
/man/almost_equal.Rd
|
3b47876510f7bf40a6b9bc673e96595b8cf3dc94
|
[] |
no_license
|
paulponcet/bazar
|
b561b9914300d4eb72d998028b4c2db061f9b07e
|
cacccfed36ed5650dbef2e78f584e0c07c321581
|
refs/heads/master
| 2021-01-11T21:59:55.950238
| 2019-07-13T23:51:42
| 2019-07-13T23:51:42
| 78,890,817
| 0
| 0
| null | 2019-04-05T09:14:21
| 2017-01-13T22:12:23
|
R
|
UTF-8
|
R
| false
| true
| 921
|
rd
|
almost_equal.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/almost_equal.R
\name{almost_equal}
\alias{almost_equal}
\alias{almost.equal}
\title{Test (almost) equality of numeric values}
\usage{
almost_equal(x, y, tolerance = sqrt(.Machine$double.eps))
almost.equal(x, y, tolerance = sqrt(.Machine$double.eps))
}
\arguments{
\item{x}{numeric vector.}
\item{y}{numeric vector of the same length as \code{x}.}
\item{tolerance}{numeric. Differences smaller than tolerance are considered as equal.
The default value is close to \code{1.5e-8}.}
}
\value{
A logical vector of the same length as \code{x} and \code{y}.
}
\description{
The function \code{almost_equal} tests if two numeric vectors
have equal values up to a tolerance.
}
\examples{
almost_equal(x = 1:3,
y = 1:3 + c(10^(-6), 10^(-7), 10^(-8)))
}
\author{
Tommy on StackOverflow, see \url{http://stackoverflow.com/a/7667703}.
}
|
59994180831b1ea8bb45b263cb5ef595d12ebf98
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ExpDes/examples/bartlett.Rd.R
|
57fad5e3d452c4ad32a573c186a037dcc3de7686
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 203
|
r
|
bartlett.Rd.R
|
library(ExpDes)
### Name: bartlett
### Title: Test for Homogeneity of Variances: Bartlett
### Aliases: bartlett
### ** Examples
data(ex1)
attach(ex1)
crd(trat, ig, quali = FALSE, hvar='bartlett')
|
e664c2d34fd8facd33dcfaecd3d60837bf7d08eb
|
28e2a38eff19990eb249c50e10a9d23239d1fad9
|
/ds.R
|
2f55d10cba3454aa299859a89127ed5656af912c
|
[] |
no_license
|
susarthak/analytics
|
d27eec9a1fec1164391c7718c5df1a64686ae50b
|
26bcf713c562e1535d9b45b4c6b31e8351f22e32
|
refs/heads/master
| 2020-03-30T06:38:19.166144
| 2018-09-30T06:47:24
| 2018-09-30T06:47:24
| 150,876,664
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 63
|
r
|
ds.R
|
#data structure in R
#Vectors
#Matrices
#Arreys
#Factors
#
|
194433edc8e6ef8123ee4d970110194dcc822f99
|
09b8d42258b903f53c5c598f6a2ec35405a9c593
|
/man/get_current.Rd
|
6ba0f39b332ea6f14dad44914cad8ae181be2363
|
[
"MIT"
] |
permissive
|
BigelowLab/fullobis
|
2801cb42714541da5250b4450c9229185bd6a3d9
|
c2f16fc14293879c574e7fb6f4ba13e7635da1ba
|
refs/heads/main
| 2023-09-03T16:35:34.910292
| 2021-10-26T18:21:35
| 2021-10-26T18:21:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 596
|
rd
|
get_current.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/options.R
\name{get_current}
\alias{get_current}
\title{Retrieve the most recent parquet filename in the root path}
\usage{
get_current(root = get_root(), pattern = "^obis_.*\\\\.parquet$")
}
\arguments{
\item{root}{character, the root path to the dataset}
\item{pattern}{the filename pattern to search, by default we use the equivalent of the glob \code{obis_*.parquet}}
}
\value{
the fully qualified filename and path or "" if none found
}
\description{
Retrieve the most recent parquet filename in the root path
}
|
bd37d920471d65018d879dc41f33e5d15b0b025a
|
db1378ea65c6566da37ddfc35a087f9f3187f78f
|
/distToLargeShed_calledFromToolbox.r
|
513203da297e459515fbb670536428c341a15dc8
|
[] |
no_license
|
asruesch/hydrographyAttributionToolbox
|
b1c0ae1d452214959f00406ccd0d38742f45b747
|
22faf25b3d2b2fe15a5271c8cd929fe1be3f2405
|
refs/heads/master
| 2020-06-05T02:57:46.547356
| 2014-11-04T15:51:10
| 2014-11-04T15:51:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,717
|
r
|
distToLargeShed_calledFromToolbox.r
|
#script to run trace to watershed of designated size
#currently runs trace whether or not there is a dam between feature and great lake, with idea that dam
#presence/ absence could be an additional output
#also, currently does not aggregate duplicate features such as lakes
#what to do with features with no watershed but in edges? some may have sheds >=x km, others may not
#probably can just not attribute, but use in trace...
#trial values
# ###########################################################################################
# edgesTable="C:\\Users\\menuzd\\Documents\\GIS_24kAttribution\\temp\\allDataEdges2.csv"
# traceid="TRACEID"
# reachField="REACHID"
# lengthField="Shape_Leng"
# damField="DAMSIDE"
# watershedArea="shedArea"
# shedThreshold=as.numeric(10)
# relationshipTable="C:\\Users\\menuzd\\Documents\\GIS_24kAttribution\\temp\\allDataRels.csv"
# fromid="FROM_TRACEID"
# toid="TO_TRACEID"
# outputTable="C:\\Users\\menuzd\\Documents\\GIS_24kAttribution\\temp\\allDataDownstreamOutput.csv"
############################################################################################
args = commandArgs(trailingOnly = TRUE)
args = gsub("\\\\", "/", args)
edgesTable=args[1]
traceid=args[2]
reachField=args[3]
lengthField=args[4]
damField=args[5]
watershedArea=args[6]
shedThreshold=as.numeric(args[7])
relationshipTable=args[8]
fromid=args[9]
toid=args[10]
outputTable=args[11]
############################################################################################
edges2=read.csv(edgesTable)
rels=read.csv(relationshipTable)
edges2$isBigShed=ifelse(edges2[[watershedArea]]>=shedThreshold, 1, 0)
edges2$isBigShed=ifelse(is.na(edges2$isBigShed), 0, edges2$isBigShed)
#combine relationship table with edge data on from side
fromMerge=merge(rels, edges2, by.x=fromid, by.y=traceid)
#select all columns that we are interested in
fromMerge2=data.frame(fromMerge[[fromid]], fromMerge[[toid]], fromMerge[[reachField]],
fromMerge[[lengthField]], fromMerge[[damField]], fromMerge$isBigShed)
colnames(fromMerge2)=c("fromfeat", "tofeat", "FROMID", "FROMLENGTH", "FROMDAM", "FROMSHED")
cols=dim(fromMerge2)[2]#this just helps in case we change the number of fields that we use
#the select columns below should not need to be changed as long as both "to" and "from" info is populated
#however, will have to adjust colFiller below if additional fields are added
#combine relationship table with edge data on "to" side
toMerge=merge(fromMerge2, edges2, by.x="tofeat", by.y=traceid)
mergedData=data.frame(toMerge[,1:cols], toMerge[[reachField]],
toMerge[[lengthField]], toMerge[[damField]],toMerge$isBigShed)
colnames(mergedData)=c(colnames(mergedData)[1:cols], "TOID", "TOLENGTH", "TODAM", "TOSHED")
#processing of lsn- add in info about "to" features into from column for features not found in "to" column", with NA populating the missing "to" columns
missingFroms=which(is.element(mergedData$tofeat, mergedData$fromfeat)==FALSE)
newData= mergedData[missingFroms,c(1,(cols+1):(cols*2-2))]
colFiller=rep(NA, length(missingFroms))
newDataFilled=data.frame(colFiller,newData, colFiller, colFiller, colFiller, colFiller)
colnames(newDataFilled)=colnames(mergedData)
newDataFilledFinal=aggregate(newDataFilled, by=list(newDataFilled$fromfeat), FUN=mean)[,2:(cols+cols-1)]
#final data frame
lsndata=rbind(mergedData, newDataFilledFinal)
###############################################################################################################
###################### function for traces ###########################
getRowNum=function(fromfeat){
return(which(lsndata$fromfeat==fromfeat))
}
###############################################################################################################
tracedata=subset(lsndata, lsndata$FROMSHED!=1) #otherwise feature is already a big shed
fromfeat=array(0, dim(tracedata)[1])
reachID=array(0, dim(tracedata)[1])
distance=array(NA, dim(tracedata)[1])
dam=array(0, dim(tracedata)[1])
x=0
i = 0
pb = txtProgressBar(min=0, max=dim(tracedata)[1], style=3)
stTime <- Sys.time()
for (f in tracedata$fromfeat){
i = i + 1
setTxtProgressBar(pb, i)
x=x+1
flagDown=FALSE
l=0
fromfeat[x]=f
rowNum=getRowNum(f)
reachID[x]=lsndata$FROMID[rowNum]
#we only have features that ARE NOT big sheds, so we don't need if/else statement here
l=l+lsndata$FROMLENGTH[rowNum]/2
new=lsndata$tofeat[rowNum]
newRow=getRowNum(new)
if (lsndata$FROMDAM[rowNum]==2){
dam[x]=1
}
while (flagDown==FALSE) {
if (is.na(new)){
flagDown=TRUE
} else if (lsndata$FROMSHED[newRow]==1){ #if feature we traced to is a big shed
if (lsndata$FROMDAM[newRow]==1){ #if dam is on upstream side, then add dam in attributes
dam[x]=1
}
distance[x]=l
flagDown=TRUE
} else if (lsndata$FROMDAM[newRow]>0){
dam[x]=1
}
l=l+lsndata$FROMLENGTH[newRow]
new=lsndata$tofeat[newRow]
newRow=getRowNum(new)
}
}
output=data.frame(fromfeat, reachID,distance, dam)
bigShedData=subset(lsndata, lsndata$FROMSHED==1) #subset out data that is big shed
bigShedData2=data.frame(bigShedData$fromfeat, bigShedData$FROMID,
rep(0, nrow(bigShedData)), rep(0, nrow(bigShedData)))
colnames(bigShedData2)=c("fromfeat", "reachID", "distance", "dam")
mergedOutput=rbind(output, bigShedData2)
close(pb)
endTime <- Sys.time()
procDur <- difftime(endTime, stTime, units="mins")
print(paste("Trace took", signif(as.numeric(procDur), digits=2), "minutes to run"))
write.csv(mergedOutput, outputTable, row.names=FALSE)
###############################################################################################################
output=mergedOutput[which(!mergedOutput$fromfeat %in% bigShedData2$fromfeat),]
y=which(output$distance<10)
|
5a8cd7b61d691d6cd936d17a145529628f79dbe0
|
f5502bea857396ec669339f268f37c77f0ceb805
|
/R/fetch_excel.R
|
82d600fe54a70764bd06daf7a1e62c742b435adc
|
[
"MIT"
] |
permissive
|
roboton/covid-covariates
|
0646191e20596db2a4f5b9f7fa9fde7ab281c159
|
b7e962017e5e71a78d1eb8ed9eaa379d9db401b3
|
refs/heads/master
| 2022-11-22T06:43:04.250917
| 2020-07-27T11:35:56
| 2020-07-27T11:35:56
| 262,303,342
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 307
|
r
|
fetch_excel.R
|
##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##'
##' @title
##' @param url
##' @param skip
##' @return
##' @author roboton
##' @export
fetch_excel <- function(url, skip) {
GET(url, write_disk(tf <- tempfile(fileext = ".xlsx")))
read_xlsx(tf, skip = skip)
}
|
b98998030e51621ab7680d6f132cf3e3e46763b8
|
aa6750ab3a907068b03d426930bfd6bca80ade7c
|
/iris.r
|
8c1fefd7c05f37418955312ec88345a284eb6773
|
[] |
no_license
|
scott1541/QA_R
|
9ab2aa2d283fa0114a9e4e8204e7cb8727a519a0
|
c03f15c382b832a4c83efe6770510681fa55142a
|
refs/heads/master
| 2021-06-24T05:00:27.520890
| 2017-08-18T15:20:57
| 2017-08-18T15:20:57
| 100,287,586
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 231
|
r
|
iris.r
|
library(ggplot2)
subdata <- subset(iris, select=c(Species, Sepal.Width))
#qplot(subdata[1,1], subdata[2,2])
ggplot(iris, aes(x = Species, y = (Sepal.Width * Sepal.Length)), ylab="Sepal Area") + ylab("Sepal Area") + geom_boxplot()
|
fd9b419f3b1cf8c0b8e2a2eeed71396443d7c2db
|
3b951e50d7736094949de478200d9664491c91ff
|
/man/duos_pp.Rd
|
1f6ac113592d2636584eda7f4df23d6493813834
|
[] |
no_license
|
reykp/biRd
|
35a8035c73927e1abef246dd08e678b52a6ab6ae
|
42eae63a4f0bcd01628421564a3e08222fe3fd9c
|
refs/heads/master
| 2020-03-23T00:28:15.662132
| 2018-11-11T22:36:42
| 2018-11-11T22:36:42
| 140,865,408
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,753
|
rd
|
duos_pp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/duos_pp.R
\name{duos_pp}
\alias{duos_pp}
\title{Plot the Prior vs. the Posterior}
\usage{
duos_pp(duos_output, parameters = "c", burnin = NA)
}
\arguments{
\item{duos_output}{The list returned by \code{duos} containing the density estimate results.}
\item{parameters}{The group of parameters to plot (see details).}
\item{burnin}{The desired burnin to discard from the results. By default, it is half the number of iterations.}
}
\value{
A plot of overlaid histograms.
}
\description{
Plots the histograms of simulations from the prior verses the posterior from \code{duos}.
}
\details{
The results are designed to plot of a 3X2 grid. If there are more than 6 parameters, separate plots are created and printed and can be viewed by clicking the arrow through the results in the 'Plots' window.
\strong{Options for} \code{parameters}
There are two sets of parameters that can plotted in the histograms: the cut-points and the bin proportion parameters.
\itemize{
\item \code{"c"}: Plots the histograms of the cut-points. (DEFAULT).
\item \code{"p"}: Plots the histograms of the bin proportion parameters.
}
}
\examples{
## --------------------------------------------------------------------------------
## Beta Distribution
## --------------------------------------------------------------------------------
# First run 'duos' on data sampled from a beat(2,5) distribution with 150 data points.
y <- rbeta(150, 2, 5)
duos_beta <- duos(y)
# Plot histograms of the priors vs. the posteriors of the cut-point parameters
duos_pp(duos_beta)
# Plot histograms of the priors vs. the posteriors of the proportion parameters
duos_pp(duos_beta, parameters = "p")
}
|
f69d28e5872eff8f169cab10a0f67a338964284d
|
30f0b1342eae63ade69e5943610962f50d8b8752
|
/man/enrichmentBarPlot.Rd
|
be4ccb4d4e35843c2b9cbcda91c452215f054fdf
|
[
"MIT"
] |
permissive
|
remap-cisreg/ReMapEnrich
|
c9d3729b768bfd1f66b79403e306a59cc8e71c02
|
cb46422f23894d674a78941cb1958f3b9b7bc899
|
refs/heads/master
| 2022-02-11T11:25:06.093352
| 2021-11-23T15:43:11
| 2021-11-23T15:43:11
| 173,947,338
| 12
| 4
| null | 2022-02-02T13:41:17
| 2019-03-05T13:01:24
|
HTML
|
UTF-8
|
R
| false
| true
| 1,652
|
rd
|
enrichmentBarPlot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enrichment_bar_plot.R
\name{enrichmentBarPlot}
\alias{enrichmentBarPlot}
\title{Enrichment bar plot}
\usage{
enrichmentBarPlot(enrich,
top = 20,
main = paste("Significance, top", top, "categories"),
aRisk = 0.05,
sigDisplayQuantile = 0.95,
col = c("#BFCBD4", "#33ACFF"),
sigType = "q",
xlab = sigTypeTitle,
beside = TRUE,
space = 0.1,
cex.names = 0.8,
border = NA,
las = 1,
...)
}
\arguments{
\item{enrich}{The enrichment data frame from which the plot will be created.}
\item{top=20}{The number of category for the plot.}
\item{main=paste("Significance, }{top", top, "categories") Allows to choose
the title of the plot.}
\item{aRisk=0.05}{The alpha risk.}
\item{sigDisplayQuantile=0.95}{Quantile used to define the maximal value for
the Y-axis, based on a quantile.}
\item{col=c("#6699ff", "#ff5050")}{Palette of coloration for the plot
Personnal coloration such as c("#FEE0D2","#FC9272") or a RColorBrewer such
as brewer.pal(5,"Reds").}
\item{sigType="q"}{Allows to choose between Q-significance, P-significance
or E-significance.}
\item{xlab=sigTypeTtitle}{Allows to change the title of x-axis.}
\item{beside=TRUE}{Juxtaposing bar or not.}
\item{space=0.2}{Allows to change size bar.}
\item{cex.names=1}{Allows to change size of x-axis (flipped).}
\item{border=NA}{Allows to change the border of each bar.}
\item{las=1}{Allows to change the angle of label y-axis.}
}
\description{
Creates a barplot from the enrichment.
}
\examples{
data("enrichment_example", package = "ReMapEnrich")
enrichmentBarPlot(enrichment_example)
}
\author{
Martin Mestdagh
}
|
22e7a61b7dcc53c8b433d23449c3c742ac176223
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/rchallenge/R/new.r
|
c7ed3abea8c1e72394deabd94ff4088b5e0ca862
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,302
|
r
|
new.r
|
#' Install a new challenge.
#' @param path string. install path of the challenge (should be somewhere in your Dropbox).
#' @param out_rmdfile string. name of the output R Markdown file.
#' @param recursive logical. should elements of the path other than the last be created? see \code{\link{dir.create}}.
#' @param overwrite logical. should existing destination files be overwritten? see \code{\link{file.copy}}.
#' @param quiet logical. deactivate text output.
#' @param showWarnings logical. should the warnings on failure be shown? see \code{\link{dir.create}}.
#' @param data_dir string. subdirectory of the data.
#' @param submissions_dir string. subdirectory of the submissions. see \code{\link{store_new_submissions}}.
#' @param hist_dir string. subdirectory of the history. see \code{\link{store_new_submissions}}.
#' @param install_data logical. activate installation of the data files of the template challenge.
#' @param baseline string. name of the team considered as the baseline.
#' @param add_baseline logical. activate installation of baseline submission files of the template challenge.
#' @param clear_history logical. activate deletion of the existing history folder.
#' @param template string. name of the template R Markdown script to be installed.
#' Two choices are available: \code{"en"} (english) and \code{"fr"} (french).
#' @param title string. title displayed on the webpage.
#' @param author string. author displayed on the webpage.
#' @param date string. date displayed on the webpage.
#' @param email string. email of the challenge administrator.
#' @param date_start string. start date of the challenge.
#' @param deadline string. deadline of the challenge.
#' @param data_list list with members \code{train}, \code{test}, \code{y_test} and
#' \code{ind_quiz} such as returned by the \code{\link{data_split}} function.
#' @return The path of the created challenge is returned.
#' @export
#' @examples
#' path <- tempdir()
#' wd <- setwd(path)
#' # english version
#' new_challenge()
#' # french version
#' new_challenge(template = "fr")
#' setwd(wd)
#' unlink(path)
new_challenge <- function(path = ".", out_rmdfile = "challenge.rmd",
recursive = FALSE, overwrite = recursive,
quiet = FALSE, showWarnings = FALSE,
template = c("en", "fr"),
data_dir = "data",
submissions_dir = "submissions",
hist_dir = "history",
install_data = TRUE,
baseline = "baseline",
add_baseline = install_data,
clear_history = overwrite,
title = "Challenge",
author = "",
date = "",
email = "EDIT_EMAIL@DOMAIN.com",
date_start = format(Sys.Date(), "%d %b %Y"),
deadline = paste(Sys.Date()+90, "23:59:59"),
data_list = data_split(get_data("german"))) {
dir.create(path, recursive = recursive, showWarnings = showWarnings)
if (!file.exists(path))
stop("could not create directory ", path)
stopifnot(is.character(out_rmdfile), length(out_rmdfile)==1, nzchar(out_rmdfile))
stopifnot(is.character(template), nzchar(template))
template = match.arg(template, c("en", "fr"))
# currently "challenge_en.rmd" and "challenge_fr.rmd" are available
dir.create(file.path(path, "data"), recursive = recursive, showWarnings = showWarnings)
if (install_data) {
data_train <- data_list$train
data_test <- data_list$test
y_test <- data_list$y_test
ind_quiz <- data_list$ind_quiz
tmpdir = tempdir()
save('data_train', file = file.path(tmpdir, 'data_train.rda'))
file.copy(file.path(tmpdir, 'data_train.rda'), file.path(path, data_dir),
overwrite=overwrite, recursive=recursive)
save('data_test', file = file.path(tmpdir, 'data_test.rda'))
file.copy(file.path(tmpdir, 'data_test.rda'), file.path(path, data_dir),
overwrite=overwrite, recursive=recursive)
save('y_test', file = file.path(tmpdir, 'y_test.rda'))
file.copy(file.path(tmpdir, 'y_test.rda'), file.path(path, data_dir),
overwrite=overwrite, recursive=recursive)
save('ind_quiz', file = file.path(tmpdir, 'ind_quiz.rda'))
file.copy(file.path(tmpdir, 'ind_quiz.rda'), file.path(path, data_dir),
overwrite=overwrite, recursive=recursive)
unlink(tmpdir)
}
dir.create(file.path(path, submissions_dir), recursive = recursive, showWarnings = showWarnings)
if (install_data && add_baseline) {
team_dir = new_team(baseline, path=path, submissions_dir = submissions_dir,
quiet = TRUE, showWarnings = showWarnings)
# Predict all Good
y_pred <- rep("Good", nrow(data_test))
tmpfile = tempfile()
write(y_pred, file = tmpfile)
file.copy(tmpfile, file.path(team_dir, 'all_good.csv'), overwrite=overwrite)
# Predict all Bad
y_pred <- rep("Bad", nrow(data_test))
write(y_pred, file = tmpfile)
file.copy(tmpfile, file.path(team_dir, 'all_bad.csv'), overwrite=overwrite)
unlink(tmpfile)
}
if (clear_history)
unlink(file.path(path, hist_dir), recursive = TRUE)
dir.create(file.path(path, hist_dir), recursive = recursive, showWarnings = showWarnings)
expr = list(title = title, author = author, date = date, email = email,
date_start = date_start, deadline = deadline, baseline = baseline,
data_dir = data_dir, submissions_dir = submissions_dir,
hist_dir = hist_dir)
# template files are in "template/challenge_<template>.rmd"
tpl = system.file('template', paste0("challenge_", template, ".rmd"), package = 'rchallenge')
if (!nzchar(tpl))
stop("could not find template ", template)
text = readLines(tpl)
for (n in names(expr))
text = gsub(paste0("@", toupper(n), "@"), expr[[n]], text)
tmpfile = tempfile()
writeLines(text, tmpfile)
file.copy(tmpfile, file.path(path, out_rmdfile), overwrite=overwrite)
unlink(tmpfile)
if (!quiet) {
cat('New challenge installed in: "', normalizePath(path), '"\n', sep='')
cat('Next steps to complete the installation:\n')
step <- 0
if (install_data) {
step <- step + 1
cat(step, '. Replace the data files in the "data" subdirectory.\n', sep='')
}
if (add_baseline) {
step <- step + 1
cat(step, '. Replace the baseline predictions in "', file.path(submissions_dir, baseline),'".\n', sep='')
}
step <- step + 1
cat(step, '. Customize the template R Markdown file "', out_rmdfile, '" as needed.\n', sep='')
step <- step + 1
cat(step, '. Create and share subdirectories in "', submissions_dir, '" for each team:\n', sep='')
cat(' rchallenge::new_team("team_foo", "team_bar", path="', path, '", submissions_dir="', submissions_dir, '")\n', sep='')
step <- step + 1
cat(step, '. Publish the html page in your "Dropbox/Public" folder:\n', sep='')
cat(' rchallenge::publish("', file.path(path, out_rmdfile), '")\n', sep='')
step <- step + 1
template_html <- paste0(sub("([^.]+)\\.[[:alnum:]]+$", "\\1", basename(out_rmdfile)), ".html")
cat(step, '. Give the Dropbox public link to "Dropbox/Public/', template_html, '" to the participants.\n', sep='')
step <- step + 1
cat(step, '. Automate the updates of the webpage.\n', sep='')
if (.Platform$OS.type == "unix") {
cat(' On Unix systems, you can setup the following line to your crontab using "crontab -e":\n', sep='')
cat(' 0 * * * * Rscript -e \'rchallenge::publish("', normalizePath(file.path(path, out_rmdfile)), '")\'\n', sep='')
}
if (.Platform$OS.type == "windows") {
cat(' On Windows systems, you can use the Task Scheduler to create a new task with a "Start a program" action with the settings:')
cat(' - Program/script: Rscript.exe\n')
cat(' - options: -e rchallenge::publish(\'', normalizePath(file.path(path, out_rmdfile)), '\')\n', sep='')
}
}
invisible(normalizePath(path))
}
#' Create new teams submission folders in your challenge.
#' @param ... strings. names of the team subdirectories.
#' @param path string. root path of the challenge. see \code{\link{new_challenge}}.
#' @param submissions_dir string. subdirectory of the submissions. see \code{\link{new_challenge}}.
#' @param quiet logical. deactivate text output.
#' @param showWarnings logical. should the warnings on failure be shown? see \code{\link{dir.create}}.
#' @return The paths of the created teams are returned.
#' @export
#' @examples
#' path <- tempdir()
#' wd <- setwd(path)
#' new_challenge()
#' new_team("team_foo", "team_bar")
#' setwd(wd)
#' unlink(path)
new_team <- function(..., path = ".", submissions_dir = "submissions",
quiet = FALSE, showWarnings = FALSE) {
names <- c(...)
stopifnot(is.character(names))
if (!file.exists(file.path(path, submissions_dir)))
stop("could not find submissions directory:", normalizePath(file.path(path, submissions_dir)))
for (i in seq_along(names)) {
if (!quiet) cat("Creating team subdirectory:", file.path(submissions_dir, names[i]), "\n")
dir.create(file.path(path, submissions_dir, names[i]), recursive = FALSE, showWarnings = showWarnings)
}
if (!quiet) cat("Next step: share the Dropbox folders with the corresponding teams.\n")
invisible(normalizePath(file.path(path, submissions_dir, names)))
}
#' Publish your challenge R Markdown script to a html page.
#' @param input string. name of the R Markdown input file
#' @param output_file output file. If \code{NULL} then a default based on the name
#' of the input file is chosen.
#' @param output_dir string. output directory. default=\code{"~/Dropbox/Public"}
#' so that the rendered page can easily be shared on the web with Dropbox.
#' @param quiet logical. deactivate text output.
#' @param ... further arguments to pass to \code{\link[rmarkdown]{render}}.
#' @return The compiled document is written into the output file, and the path
#' of the output file is returned.
#' @export
#' @seealso \code{\link[rmarkdown]{render}}
#' @importFrom rmarkdown render
#' @examples
#' path <- tempdir()
#' wd <- setwd(path)
#' new_challenge()
#' outdir = tempdir()
#' publish(output_dir = outdir, output_options = list(self_contained = FALSE))
#' unlink(outdir)
#' setwd(wd)
#' unlink(path)
publish <- function(input="challenge.rmd", output_file = NULL,
output_dir = file.path("~/Dropbox/Public"),
quiet = FALSE, ...) {
wd <- getwd()
setwd(dirname(input))
out <- rmarkdown::render(input = basename(input), output_file = output_file,
output_dir = output_dir, quiet = quiet, ...)
setwd(wd)
if (!quiet)
cat('Next step: give the Dropbox public link to "', file.path(output_dir, basename(out)), '" to the participants.\n', sep='')
invisible(out)
}
|
9e753c843fd05f109836bf01ebf72e5fe9f3a109
|
26dea210be60fafab93c89e4bb11d5ff9edeba72
|
/02Advance/107Car_Package_recode__lo.R
|
36893ab11b0f64f040bb970a5cb98542d2a3b6c9
|
[] |
no_license
|
MomusChao/R
|
a71df4f7430d644c18f853ad4f06b0838d5545c9
|
014c8e5ec43dc5d02b9faa41b49032ed5c340439
|
refs/heads/master
| 2021-06-19T02:21:11.297723
| 2020-12-09T22:28:18
| 2020-12-09T22:28:18
| 83,297,248
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 102
|
r
|
107Car_Package_recode__lo.R
|
library(car)
x = c(1,2,3,4,5,6)
#lower 3: 0
recode(x,"lo:3=0;4:6=1")
recode(x,"lo:3=0;else=1")
|
c5a673aef52ed26320e7bb4ad2ede9963a08e5e8
|
adb25ca2c79dc1e26c54dcef224bb7e3c19a8499
|
/Project4/Isabel.R
|
a475aed18c28a4c1c491fdb9135012cb0b1f4266
|
[] |
no_license
|
wangweiyi722/data-analytics
|
528f1418747d6210e3fa38b962774f4094699b0c
|
e7f478b96560ae70d789c551f0de5c5042fcd3f2
|
refs/heads/master
| 2020-03-15T23:05:12.622653
| 2018-05-07T00:26:21
| 2018-05-07T00:26:21
| 132,386,552
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,350
|
r
|
Isabel.R
|
require(MASS)
require(ISLR)
require(tidyverse)
library(ggplot2)
library(dplyr)
project<- "https://data.world/wangweiyi722/f-17-eda-project-4"
<<<<<<< HEAD
df_orig <- read.csv("https://query.data.world/s/hvA1ht-J0O9mzt550iY43aXKb5fGDl", header=TRUE, stringsAsFactors=FALSE)
names(df_orig)
=======
df_orig <- read.csv("https://query.data.world/s/tZl4yOP0Ui6RbVBm482-KU67IDetEk", header=TRUE, stringsAsFactors=FALSE)
names(df_orig)
>>>>>>> 2e9c8aa11e6226c8ac97cb4ee0131e7b634960ed
################################# Insght 1 ########################################
library(maps)
library(mapdata)
df_map = subset(df_orig, (df_orig$Title.1.Eligible == "Yes")|(df_orig$Title.1.Eligible == "No"))
<<<<<<< HEAD
df_map <- dplyr::mutate(df_map, factored_stats = factor(df_map$Title.1.Eligible, levels = c("Yes","No")))
=======
>>>>>>> 2e9c8aa11e6226c8ac97cb4ee0131e7b634960ed
### Texas ###
df_map_texas = subset(df_map, df_map$State == "Texas")
states <- map_data("state")
tx_df <- subset(states, region == "texas")
tx_base <- ggplot(data = tx_df, mapping = aes(x = long, y = lat)) +
coord_fixed(1.3) +
geom_polygon(color = "black", fill = "black")
df_map_texas_eligible <- dplyr:: filter(df_map_texas, df_map_texas$Title.1.Eligible=="Yes")
df_map_texas_not_eligible <- subset(df_map_texas, df_map_texas$Title.1.Eligible=="No")
ditch_the_axes <- theme(
axis.text = element_blank(),
axis.line = element_blank(),
axis.ticks = element_blank(),
panel.border = element_blank(),
panel.grid = element_blank(),
axis.title = element_blank()
)
tx_base +
geom_point(data = df_map_texas_eligible, mapping = aes(x=Longitude,y=Latitude,colour='Title I Eligible')) +
geom_point(data = df_map_texas_not_eligible,mapping = aes(x=Longitude,y=Latitude,colour='Not Title I Eligible')) +
geom_point(mapping = aes(x=-97.743061,y=30.267153,colour = 'Major Cities'),size = 5,shape = 18)+
geom_point(mapping = aes(x=-96.796988,y=32.776664,colour = 'Major Cities'),size = 5,shape=18) +
geom_point(mapping = aes(x=-95.369803,y=29.760427,colour = 'Major Cities'),size = 5,shape=18) +
geom_point(mapping = aes(x=-98.493628,y=29.424122,colour = 'Major Cities'),size = 5,shape=18) +
ditch_the_axes +
scale_color_brewer(palette="PRGn")
## US ##
usa <- map_data("usa")
us_base <- ggplot(data = states) +
geom_polygon(aes(x = long, y = lat, group = group), color = "white") +
coord_fixed(1.3)
df_map_main_us <- filter(df_map, (df_map$State != "Bureau of Indian Education")&(df_map$State != "Northern Marianas")&(df_map$State != "Puerto Rico")&(df_map$State != "Alaska")&(df_map$State != "Hawaii"))
df_map_main_us$State <- tolower(df_map_main_us$State)
state_eligibilty_perc <-data.frame(state = unique(df_map_main_us$State), perc = rep(0,47))
for (i in 1:47){
state <- state_eligibilty_perc[i,]$state
num_el <- nrow(subset(df_map_main_us, (df_map_main_us$State==state)&(df_map_main_us$Title.1.Eligible=="Yes")))
total <- nrow(subset(df_map_main_us, (df_map_main_us$State==state)))
percent <- num_el/total
state_eligibilty_perc[i,]$perc <- percent
}
state_eligibilty_perc <- mutate(state_eligibilty_perc, region = state)
el_perc <- inner_join(state_eligibilty_perc, states, by = "region")
ggplot(data = el_perc) +
geom_polygon(aes(x = long, y = lat, group = group, fill=perc), color = "white") +
ggtitle("Percentage of Title I Eligibility") +
theme_bw() +
ditch_the_axes +
scale_fill_gradientn(colours = rev(terrain.colors(7)),
breaks = c(.14, .28, .42, .56, .70, .84, 1))
el_perc[which.min(el_perc$perc),]$state
el_perc[which.max(el_perc$perc),]$state
### California ###
df_map_ca = subset(df_map, df_map$State == "California")
states <- map_data("state")
ca_df <- subset(states, region == "california")
ca_base <- ggplot(data = ca_df, mapping = aes(x = long, y = lat)) +
coord_fixed(1.3) +
geom_polygon(color = "black", fill = "black")
df_map_ca_eligible <- dplyr:: filter(df_map_ca, df_map_ca$Title.1.Eligible=="Yes")
df_map_ca_not_eligible <- subset(df_map_ca, df_map_ca$Title.1.Eligible=="No")
ca_base +
geom_point(data = df_map_ca_eligible, mapping = aes(x=Longitude,y=Latitude,colour='Title I Eligible')) +
geom_point(data = df_map_ca_not_eligible,mapping = aes(x=Longitude,y=Latitude,colour='Not Title I Eligible')) +
geom_point(mapping = aes(x=-122.419416,y=37.774929,colour = 'Major Cities'),size = 5,shape=18) +
geom_point(mapping = aes(x=-117.161084,y=32.715738,colour = 'Major Cities'),size = 5,shape=18) +
geom_point(mapping = aes(x=-118.243685,y=34.052234,colour = 'Major Cities'),size = 5,shape=18) +
geom_point(mapping = aes(x=-121.886329,y=37.338208,colour = 'Major Cities'),size = 5,shape=18) +
geom_point(mapping = aes(x=-121.494400,y=38.581572,colour = 'Major Cities'),size = 5,shape=18) +
ditch_the_axes +
scale_color_brewer(palette="PRGn")
### New York ###
df_map_ny = subset(df_map, df_map$State == "New York")
states <- map_data("state")
ny_df <- subset(states, region == "new york")
ny_base <- ggplot(data = ny_df, mapping = aes(x = long, y = lat)) +
coord_fixed(1.3) +
geom_polygon(color = "black", fill = "black")
df_map_ny_eligible <- dplyr:: filter(df_map_ny, df_map_ny$Title.1.Eligible=="Yes")
df_map_ny_not_eligible <- subset(df_map_ny, df_map_ny$Title.1.Eligible=="No")
ny_base +
geom_point(data = df_map_ny_eligible, mapping = aes(x=Longitude,y=Latitude,colour='Title I Eligible')) +
geom_point(data = df_map_ny_not_eligible,mapping = aes(x=Longitude,y=Latitude,colour='Not Title I Eligible')) +
geom_point(mapping = aes(x=-74.005973,y=40.712775,colour = 'Major Cities'),size = 5,shape=18) +
ditch_the_axes +
scale_color_brewer(palette="PRGn")
################################# Insght 2 ########################################
df_elem <- filter(df_orig, df_orig$level == 'Primary School') %>% filter(Title.1.Eligible == "Yes"|Title.1.Eligible == "No")
nrow(subset(df_elem, df_elem$Title.1.Eligible == 'Yes'))/nrow(df_elem)
df_elem <- select(df_elem, "State", "Location.City",
"Longitude", "Latitude", "Title.1.Eligible", "Charter", "Total.Lunch",
"Full.Time.Teachers", "PreK.Offered",
"member", "am", "asian", "hisp", "black", "white", "pacific", "tr", "toteth")
df_elem_test_vars <- select(df_elem, "Title.1.Eligible", "Total.Lunch",
"Full.Time.Teachers", "am", "asian", "hisp", "black", "white", "pacific", "tr", "toteth")
nrow(subset(df_elem, df_elem$Title.1.Eligible=="Yes"))/nrow(df_elem) # Base line = 81 %
# Testing which variables to use
# Predicts every school as eligible
require(tree)
dim(df_elem_test_vars)
df_elem_test_vars$Title.1.Eligible <- as.factor(df_elem_test_vars$Title.1.Eligible)
elem_tree <- tree(Title.1.Eligible~., data = df_elem_test_vars)
plot(elem_tree)
text(elem_tree,pretty=0)
elem_tree
require(randomForest)
set.seed(11)
train_vars=sample(1:nrow(df_elem_test_vars),36474) # 70% of data
rf.elem = randomForest(Title.1.Eligible ~ . ,data=df_elem_test_vars, subset=train_vars)
rf.elem
varImpPlot(rf.elem,
sort = T,
main="Variable Importance",
pch = 19,
col = 'blue')
# Let's take the 4 most important
df_elem1 <- select(df_elem, "Title.1.Eligible", "Total.Lunch",
"Full.Time.Teachers", "toteth", "white")
df_elem1$Title.1.Eligible <- as.factor(df_elem1$Title.1.Eligible)
dim(df_elem1)
train=sample(1:nrow(df_elem1),36474) # 70% of data
# Testing number of predictors sampled for spliting at each node
test.err=double(4)
for(mtry in 1:4){
fit=randomForest(Title.1.Eligible~.,data=df_elem1,subset=train,mtry=mtry)
#oob.err[mtry]=fit$mse[400]
pred=predict(fit,df_elem1[-train,])
test.err[mtry]=mean(pred!=df_elem1[-train,]$Title.1.Eligible)
cat(mtry," ")
}
matplot(1:mtry,test.err,pch=19,col="blue",type="b",ylab="Misclassification Error",xlab="mtry")
min_mtry<- which.min(test.err)
fit=randomForest(Title.1.Eligible~.,data=df_elem1,subset=train,mtry=min_mtry)
fit
pred=predict(fit,df_elem1[-train,])
mean(pred!=df_elem1[-train,]$Title.1.Eligible)
mean(pred==df_elem1[-train,]$Title.1.Eligible)
table(pred, df_elem1[-train,]$Title.1.Eligible)
#df_elem1$Title.1.Eligible <- as.factor(df_elem1$Title.1.Eligible)
tree.error = double(10)
numTrees <- 1:10*20
for(i in 1:10){
fit=randomForest(Title.1.Eligible~.,data=df_elem1,subset=train,ntree=numTrees[i],mtry = min_mtry)
pred=predict(fit,df_elem1[-train,])
tree.error[i]=mean(pred!=df_elem1[-train,]$Title.1.Eligible)
cat(numTrees[i]," ")
}
matplot(iter,tree.error,pch=19,col="blue",type="b",ylab="Misclassification Error",xlab = "Number of Trees")
min_tree <- numTrees[which.min(tree.error)]
fit=randomForest(Title.1.Eligible~.,data=df_elem1,subset=train,ntree=numTrees[which.min(tree.error)],mtry=min_mtry)
fit
pred=predict(fit,df_elem1[-train,])
mean(pred!=df_elem1[-train,]$Title.1.Eligible)
mean(pred==df_elem1[-train,]$Title.1.Eligible)
table(pred, df_elem1[-train,]$Title.1.Eligible)
####################################### Insight 3 #########################################
library(maps)
library(mapdata)
library(e1071)
set.seed(11)
df_svm = filter(df_orig, (Title.1.Eligible == "Yes")|(Title.1.Eligible == "No")) %>%
filter(Location.City == 'AUSTIN' & Longitude < -95 & Latitude < 35) %>%
select("Longitude", "Latitude", "Title.1.Eligible") %>%
mutate(Longitude_Scaled = Longitude/3)
df_svm$Latitude <- as.numeric(df_svm$Latitude)
df_svm$Longitude <- as.numeric(df_svm$Longitude)
df_svm$Title.1.Eligible <- as.factor(df_svm$Title.1.Eligible)
ggplot(data = df_svm, mapping = aes(x=Longitude_Scaled,y=Latitude,colour=Title.1.Eligible)) + geom_point()
# Linear SVM
tuned = tune.svm(Title.1.Eligible~Longitude_Scaled+Latitude, data = df_svm,
kernel = "linear",
cost = 1:10,
tunecontrol=tune.control(cross=10))
best_cost <- tuned$best.model$cost
svmfit=svm(Title.1.Eligible~Longitude_Scaled+Latitude,data=df_svm,type="C",kernel="linear",cost=best_cost)
print(svmfit)
make.grid=function(x,n=100){
grange=apply(x,2,range)
x1=seq(-32.67,-32.52,length=n)
x2=seq(30.12,30.55,length=n)
expand.grid(X1=x1,X2=x2)
}
x= cbind(df_svm$Longitude_Scaled,df_svm$Latitude)
y =df_svm$Title.1.Eligible
col_func <- function(x){ ifelse(x=="Yes","blue","red") }
col <-col_func(y)
xgrid=make.grid(x)
colnames(xgrid)[1] = "Longitude_Scaled"
colnames(xgrid)[2] = "Latitude"
ygrid=predict(svmfit,xgrid)
plot(xgrid,col=c("red","blue")[as.numeric(ygrid)],pch=20,cex=.2)
points(x,col=col,pch=19)
tuned$best.performance
# Radial Basis
tuned = tune.svm(Title.1.Eligible~Longitude_Scaled+Latitude, data = df_svm,
cost = 1:10,
gamma = 1:10,
kernel = "radial",
tunecontrol=tune.control(cross=10))
best_gamma <- tuned$best.model$gamma
best_cost <- tuned$best.model$cost
svmfit=svm(Title.1.Eligible~Longitude_Scaled+Latitude,data=df_svm,type="C",kernel="radial",cost=best_cost,gamma=best_gamma)
print(svmfit)
make.grid=function(x,n=100){
grange=apply(x,2,range)
x1=seq(-32.67,-32.52,length=n)
x2=seq(30.12,30.55,length=n)
expand.grid(X1=x1,X2=x2)
}
x= cbind(df_svm$Longitude_Scaled,df_svm$Latitude)
y =df_svm$Title.1.Eligible
col_func <- function(x){ ifelse(x=="Yes","blue","red") }
col <-col_func(y)
xgrid=make.grid(x)
colnames(xgrid)[1] = "Longitude_Scaled"
colnames(xgrid)[2] = "Latitude"
ygrid=predict(svmfit,xgrid)
plot(xgrid,col=c("red","blue")[as.numeric(ygrid)],pch=20,cex=.2)
points(x,col=col,pch=19)
tuned$best.performance
# Texas
df_svm = filter(df_orig, (Title.1.Eligible == "Yes")|(Title.1.Eligible == "No")) %>%
filter(State == 'Texas') %>%
select("Longitude", "Latitude", "Title.1.Eligible","Location.City") %>%
mutate(Longitude_Scaled = Longitude/3)
df_svm$Latitude <- as.numeric(df_svm$Latitude)
df_svm$Longitude <- as.numeric(df_svm$Longitude)
df_svm$Title.1.Eligible <- as.factor(df_svm$Title.1.Eligible)
ggplot(data = df_svm, mapping = aes(x=Longitude_Scaled,y=Latitude,colour=Title.1.Eligible)) + geom_point()
# Tuning is super slow so just trust me that it returned cost=5 and gamma=10
#tuned = tune.svm(Title.1.Eligible~Longitude_Scaled+Latitude, data = df_svm,
# cost = 2^2:10,
# gamma = 2^2:10,
# kernel = "radial",
# tunecontrol=tune.control(cross=10))
#best_gamma <- tuned$best.model$gamma
#best_cost <- tuned$best.model$cost
#svmfit=svm(Title.1.Eligible~Longitude_Scaled+Latitude,data=df_svm,type="C",kernel="radial",cost=best_cost,gamma=best_gamma)
svmfit=svm(Title.1.Eligible~Longitude_Scaled+Latitude,data=df_svm,type="C",kernel="radial",cost=5,gamma=10)
print(svmfit)
make.grid=function(x,n=150){
grange=apply(x,2,range)
x1=seq(-35.54,-31.22,length=n)
x2=seq(25.87,36.49,length=n)
expand.grid(X1=x1,X2=x2)
}
x= cbind(df_svm$Longitude_Scaled,df_svm$Latitude)
y =df_svm$Title.1.Eligible
col_func <- function(x){ ifelse(x=="Yes","blue","red") }
col <-col_func(y)
xgrid=make.grid(x)
colnames(xgrid)[1] = "Longitude_Scaled"
colnames(xgrid)[2] = "Latitude"
ygrid=predict(svmfit,xgrid)
plot(xgrid,col=c("red","blue")[as.numeric(ygrid)],pch=20,cex=.2)
points(x,col=col,pch=19)
tuned$best.performance
################################### Insight 4 ######################################
# Clustering
set.seed(11)
df4 <- filter(df_orig, Title.1.Eligible == "Yes"|Title.1.Eligible == "No") %>%
filter(Location.City=="AUSTIN") %>%
mutate(stratio = Full.Time.Teachers / member,
perc_white = white / member,
perc_lunch = Total.Lunch / member)
ggplot(data = df4, mapping = aes(x=perc_white,y=perc_lunch,colour=Title.1.Eligible)) +
geom_point() + xlab("Percentage of White Students") + ylab("Percentage of Students Qualified for Free/Reduced Lunch") +
ggtitle("Schools in Austin")
col_func <- function(x){ ifelse(x=="Yes","blue","red") }
x <- cbind(df4$perc_white,df4$perc_lunch)
col <- col_func(df4$Title.1.Eligible)
km.out=kmeans(x,2)
km.out
km.out$cluster
cluster_col_func <- function(x){ifelse(x=="1",'orange','green')}
cluster_col <- cluster_col_func(km.out$cluster)
plot(x,col=cluster_col,cex=2,pch=1,lwd=2,
xlab = "Percentage of White Students",
ylab = "Percentage of Students Qualified for Free/Reduced Lunch",
main = "Clustering")
points(x,col=col,pch=19)
text(.95,.95,labels = paste("Error =",toString(mean(clust_pred != df4$Title.1.Eligible))))
clust_pred_function <- function(x){ifelse(x=="2","No","Yes")}
clust_pred <- clust_pred_function(km.out$cluster)
table(clust_pred, df4$Title.1.Eligible)
mean(clust_pred != df4$Title.1.Eligible)
# SVM
y<-df4$Title.1.Eligible <- as.factor(df4$Title.1.Eligible)
x<- cbind(df4$perc_white,df4$perc_lunch)
tuned = tune.svm(y~x, data = df4,
cost = 1:10,
gamma = 1:10,
kernel = "radial",
tunecontrol=tune.control(cross=10))
best_gamma <- tuned$best.model$gamma
best_cost <- tuned$best.model$cost
svmfit=svm(Title.1.Eligible~perc_white+perc_lunch,data=df4,type="C",kernel="radial",cost=best_cost,gamma=best_gamma)
print(svmfit)
make.grid=function(x,n=100){
grange=apply(x,2,range)
x1=seq(0,1,length=n)
x2=seq(0,1,length=n)
expand.grid(X1=x1,X2=x2)
}
col_func <- function(x){ ifelse(x=="Yes","blue","red") }
col <-col_func(y)
xgrid=make.grid(x)
colnames(xgrid)[1] = "perc_white"
colnames(xgrid)[2] = "perc_lunch"
ygrid=predict(svmfit,xgrid)
plot(xgrid,col=c("red","blue")[as.numeric(ygrid)],
pch=20,cex=.2,
xlab = "Percentage of White Students",
ylab = "Percentage of Students Qualified for Free/Reduced Lunch",
main = "Radial Basis SVM")
points(x,col=col,pch=19)
tuned$best.performance
####################################### Insight 5 ############################################
library(maps)
library(mapdata)
library(ggmap)
set.seed(11)
# Austin
df_svm = filter(df_orig, (Title.1.Eligible == "Yes")|(Title.1.Eligible == "No")) %>%
filter(Location.City == 'AUSTIN') %>%
select("Longitude", "Latitude", "Title.1.Eligible")
lat <- as.numeric(df_svm$Latitude)
lon <- as.numeric(df_svm$Longitude)
T1 <- as.factor(df_svm$Title.1.Eligible)
df_austin <- data.frame(lat,lon,T1)
austin <- get_map(location= c(lon = -97.743061, lat = 30.267153), maptype = "roadmap",zoom = 11)
ggmap(austin) +
geom_point(data = df_austin, mapping = aes(x=lon,y=lat,colour=T1)) +
ditch_the_axes + ggtitle("Schools of Austin")
tuned = tune.svm(T1~., data = df_austin,
cost = 1:10,
gamma = 1:10,
kernel = "radial",
tunecontrol=tune.control(cross=10))
best_gamma <- tuned$best.model$gamma
best_cost <- tuned$best.model$cost
svmfit=svm(T1~.,data=df_austin,type="C",kernel="radial",cost=best_cost,gamma=best_gamma)
print(svmfit)
test <- df_austin
test$pred <- predict(svmfit, df_austin)
ggmap(austin) +
geom_point(data = test, mapping = aes(x=lon,y=lat,colour = pred, shape = T1),size = 3) +
ditch_the_axes +
labs(title = "Radial Basis SVM Predictions",
caption = paste("Misclassification error = ", toString(mean(test$pred!=test$T1))))
mean(test$pred!=test$T1)
table(test$pred,test$T1)
# SA
df_svm = filter(df_orig, (Title.1.Eligible == "Yes")|(Title.1.Eligible == "No")) %>%
filter(Location.City == 'SAN ANTONIO') %>%
select("Longitude", "Latitude", "Title.1.Eligible")
lat <- as.numeric(df_svm$Latitude)
lon <- as.numeric(df_svm$Longitude)
T1 <- as.factor(df_svm$Title.1.Eligible)
df_sa <- data.frame(lat,lon,T1)
sa <- get_map(location= c(lon = -98.493628, lat = 29.424122), maptype = "roadmap",zoom = 11)
ggmap(sa) +
geom_point(data = df_sa, mapping = aes(x=lon,y=lat,colour=T1)) +
ditch_the_axes + ggtitle("Schools of San Antonio")
tuned = tune.svm(T1~., data = df_sa,
cost = 1:10,
gamma = 1:10,
kernel = "radial",
tunecontrol=tune.control(cross=10))
best_gamma <- tuned$best.model$gamma
best_cost <- tuned$best.model$cost
svmfit=svm(T1~.,data=df_sa,type="C",kernel="radial",cost=best_cost,gamma=best_gamma)
print(svmfit)
test <- df_sa
test$pred <- predict(svmfit, df_sa)
ggmap(sa) +
geom_point(data = test, mapping = aes(x=lon,y=lat,colour = pred, shape = T1),size = 3) +
ditch_the_axes +
labs(title = "Radial Basis SVM Predictions",
caption = paste("Misclassification error = ", toString(mean(test$pred!=test$T1))))
mean(test$pred!=test$T1)
table(test$pred,test$T1)
# LA
df_svm = filter(df_orig, (Title.1.Eligible == "Yes")|(Title.1.Eligible == "No")) %>%
filter(Location.City == "LOS ANGELES") %>%
select("Longitude", "Latitude", "Title.1.Eligible")
lat <- as.numeric(df_svm$Latitude)
lon <- as.numeric(df_svm$Longitude)
T1 <- as.factor(df_svm$Title.1.Eligible)
df_la <- data.frame(lat,lon,T1)
la <- get_map(location= c(lon = -118.293685, lat = 34.052234), maptype = "roadmap",zoom = 11)
ggmap(la) +
geom_point(data = df_la, mapping = aes(x=lon,y=lat,colour=T1)) +
ditch_the_axes + ggtitle("Schools of Los Angeles")
tuned = tune.svm(T1~., data = df_la,
cost = 1:10,
gamma = 1:10,
kernel = "radial",
tunecontrol=tune.control(cross=10))
best_gamma <- tuned$best.model$gamma
best_cost <- tuned$best.model$cost
svmfit=svm(T1~.,data=df_la,type="C",kernel="radial",cost=best_cost,gamma=best_gamma)
print(svmfit)
test <- df_la
test$pred <- predict(svmfit, df_la)
ggmap(la) +
geom_point(data = test, mapping = aes(x=lon,y=lat,colour = pred, shape = T1),size=2) +
ditch_the_axes +
labs(title = "Radial Basis SVM Predictions",
caption = paste("Misclassification error = ", toString(mean(test$pred!=test$T1))))
mean(test$pred!=test$T1)
table(test$pred,test$T1)
########################################### Insight 6 ###############################################
df6 <- filter(df_orig, Title.1.Eligible == "Yes"|Title.1.Eligible == "No") %>%
filter(Location.City=="AUSTIN") %>%
mutate(stratio = Full.Time.Teachers / member,
perc_white = white / member,
perc_lunch = Total.Lunch / member)
x <- cbind(df6$perc_white,df6$perc_lunch)
y <- df6$Title.1.Eligible
ggplot(data = df6, mapping = aes(x=perc_white,y=perc_lunch,colour=Title.1.Eligible)) +
geom_point() + xlab("Percentage of White Students") + ylab("Percentage of Students Qualified for Free/Reduced Lunch") +
ggtitle("Schools in Austin")
hc.complete=hclust(dist(x),method="complete")
plot(hc.complete)
hc.cut=cutree(hc.complete,2)
hc.cut
cluster_col_func <- function(x){ifelse(x=="1",'orange','green')}
cluster_col <- cluster_col_func(hc.cut)
col_func <- function(x){ifelse(x=="Yes","blue","red")}
col <- col_func(y)
clust_pred_function <- function(x){ifelse(x==2,"No","Yes")}
clust_pred <- clust_pred_function(hc.cut)
plot(x,col=cluster_col,cex=2,pch=1,lwd=2,
xlab = "Percentage of White Students",
ylab = "Percentage of Students Qualified for Free/Reduced Lunch",
main = "Clustering")
points(x=x[,1],y=x[,2],col=col,pch=19)
text(.95,.95,labels = paste("Error =",toString(mean(clust_pred != df6$Title.1.Eligible))))
table(clust_pred, df6$Title.1.Eligible)
|
0b9054f422e2bf377c9e3317eac06c1bc759d91a
|
28f2d711ec0590e3f5f703ef4a6e652412028e97
|
/signals_int.R
|
fddea1ddf45605c55273ae78031c847ce0010e1c
|
[] |
no_license
|
user05011988/shinyinterface
|
3ef9d2d0a1135c78de38eda0b716616176eab03c
|
74007fb624520d886b7e373a9d591c0317b49be2
|
refs/heads/master
| 2021-01-13T10:30:18.975254
| 2016-10-24T13:25:51
| 2016-10-24T13:25:51
| 69,452,240
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,402
|
r
|
signals_int.R
|
signals_int = function(autorun_data, finaloutput,spectrum_index,signals_introduce,ROI_profile) {
#Preparation of necessary variables and folders to store figures and information of the fitting
ROI_buckets=which(round(autorun_data$ppm,6)==round(ROI_profile[1,1],6)):which(round(autorun_data$ppm,6)==round(ROI_profile[1,2],6))
Xdata= as.numeric(autorun_data$ppm[ROI_buckets])
Ydata = as.numeric(autorun_data$dataset[spectrum_index, ROI_buckets])
other_fit_parameters = fitting_variables()
other_fit_parameters$freq = autorun_data$freq
other_fit_parameters$ROI_buckets = ROI_buckets
other_fit_parameters$buck_step = autorun_data$buck_step
# is_roi_testing = "N"
# clean_fit='N'
# signals_names=autorun_data$signals_names[1:2]
# signals_codes=autorun_data$signals_codes[1:2]
#
signals_to_quantify = which(ROI_profile[, 7] == 1)
signals_codes = replicate(length(signals_to_quantify), NA)
signals_names = replicate(length(signals_to_quantify), NA)
j = 1
for (i in signals_to_quantify) {
k = which(autorun_data$signals_names == ROI_profile[i,
4])
signals_codes[j] = autorun_data$signals_codes[k]
signals_names[j] = as.character(autorun_data$signals_names[k])
j = j + 1
}
print(signals_names)
# other_fit_parameters$clean_fit = clean_fit
experiment_name = autorun_data$Experiments[[spectrum_index]]
plot_path = file.path(autorun_data$export_path,
experiment_name,
signals_names)
scaledYdata = as.vector(Ydata / (max(Ydata)))
fitting_type=ROI_profile[1,3]
#Fitting of the signals
multiplicities=signals_introduce[,6]
roof_effect=signals_introduce[,7]
signals_parameters=as.vector(t(signals_introduce[,1:5]))
# print(signals_parameters)
# print(Xdata)
other_fit_parameters$freq=autorun_data$freq
fitted_signals = fitting_optimization(signals_parameters,
Xdata,multiplicities,roof_effect,Ydata,other_fit_parameters$freq)
# print(fitted_signals)
# signals_parameters=as.matrix(signals_parameters)
dim(signals_parameters) = c(5, dim(signals_introduce)[1])
rownames(signals_parameters) = c(
'intensity',
'shift',
'width',
'gaussian',
'J_coupling'
)
# signals_to_quantify=c(1,2)
other_fit_parameters$signals_to_quantify=signals_to_quantify
# print(signals_parameters)
#Generation of output data about the fitting and of the necessary variables for the generation ofa figure
output_data = output_generator(
signals_to_quantify,
fitted_signals,
scaledYdata,
Xdata,
signals_parameters,multiplicities
)
# print(output_data)
output_data$intensity=signals_parameters[1, signals_to_quantify] * max(Ydata)
output_data$width=signals_parameters[3, signals_to_quantify]
#Generation of the dataframe with the final output variables
results_to_save = data.frame(
shift = output_data$shift,
Area = output_data$Area * max(Ydata),
signal_area_ratio = output_data$signal_area_ratio,
fitting_error = output_data$fitting_error,
intensity = output_data$intensity,
width = output_data$width
)
#Adaptation of the quantification to de-scaled Ydata
# results_to_save$Area = results_to_save$Area * max(Ydata)
#Generation of the figure when the conditions specified in the Parameters file are accomplished
plot_data = rbind(
output_data$signals_sum,
output_data$baseline_sum,
output_data$fitted_sum,
output_data$signals
)
# print(plot_data)
rownames(plot_data) = c("signals_sum",
"baseline_sum",
"fitted_sum",
as.character(ROI_profile[,4]))
# r=1
# plotdata = data.frame(Xdata=autorun_data$ppm[ROI_buckets], t(dataset[input$x1_rows_selected,ROI_buckets,drop=F]))
plotdata2 = data.frame(Xdata,
Ydata,
plot_data[3, ] * max(Ydata),
plot_data[2, ] * max(Ydata))
plotdata3 <- melt(plotdata2, id = "Xdata")
plotdata3$variable = c(
rep('Original Spectrum', length(Ydata)),
rep('Generated Spectrum', length(Ydata)),
rep('Generated Background', length(Ydata))
)
plotdata4 = data.frame(Xdata, (t(plot_data[-c(1, 2, 3), , drop = F]) *
max(Ydata)))
plotdata5 = melt(plotdata4, id = "Xdata")
p=ggplot() +
geom_line(data = plotdata3,
aes(
x = Xdata,
y = value,
colour = variable,
group = variable
)) +
geom_line(data = plotdata5,
aes(
x = Xdata,
y = value,
colour = 'Surrounding signals',
group = variable
)) +
scale_x_reverse() + labs(x='ppm',y='Intensity')
for (r in 1:length(other_fit_parameters$signals_to_quantify)) {
plotdata = data.frame(Xdata, signals = plot_data[3 + other_fit_parameters$signals_to_quantify[r], ] * max(Ydata))
p=p +
geom_area(
data = plotdata,
aes(
x = Xdata,
y = signals,
position = 'fill',
fill = 'Quantified Signal'
)
)
}
finaloutput = save_output(
spectrum_index,
signals_codes,
results_to_save,
autorun_data$buck_step,
finaloutput)
print(plot_path)
signals_parameters=t(rbind(signals_parameters[, signals_to_quantify],multiplicities[signals_to_quantify],roof_effect[signals_to_quantify]))
blah=list()
blah$signals_parameters=signals_parameters
blah$other_fit_parameters=other_fit_parameters
blah$plot_path=plot_path
blah$p=p
blah$Xdata=Xdata
blah$Ydata=Ydata
blah$finaloutput=finaloutput
blah$results_to_save=results_to_save
blah$fitting_type=fitting_type
blah$ROI_profile=ROI_profile
blah$finaloutput=finaloutput
blah$signals_codes
return(blah)
}
|
154e8b0f178bef387926e86feaa9e6b69eb82688
|
3fd1b4568372ca3f8badd4a311d22deb200a8aa2
|
/experimentation/chi-square-vs-z.R
|
16488803ceafb207d34008318e32c364e4acf27e
|
[] |
no_license
|
javidjamae/data-science
|
8824005e47e86acaa60b9fb2d63966bf48f1e15f
|
5f789a4cd7b24b8e92050eb6caa682f327fb37f5
|
refs/heads/master
| 2023-09-03T01:55:29.177011
| 2023-08-31T04:24:23
| 2023-08-31T04:24:23
| 285,351,081
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 407
|
r
|
chi-square-vs-z.R
|
confidence.level = 0.95
a.successes <- 188
a.trials <- 2069
b.successes <- 591
b.trials <- 1000
res.prop.test <- prop.test(
x=c( a.successes, b.successes ),
n=c( a.trials, b.trials ),
conf.level=confidence.level,
p = NULL,
alternative = "two.sided",
correct = F
)
print( res.prop.test )
res.fisher.test <- fisher.test(alternative = 't',
rbind(c(3,9),c(13,4))
)
print( res.fisher.test )
|
c77138a346b0e80cef3ab2f99c9d1ea4654e7ca6
|
e37ea30ceec20595bda039465f4e5db64acd9e3d
|
/getPCA.R
|
60a03336b37b1d754a5e5312dd677c45f26ae51f
|
[] |
no_license
|
BTFProdigy/DumpScripts
|
1546c136c8cd37ee82a27fbc2c8f92d8fb3b8476
|
9f8e6f3acec70aced17fa4a657cfaae9dd843146
|
refs/heads/master
| 2021-12-13T18:29:27.273300
| 2015-08-05T07:21:28
| 2015-08-05T07:21:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,054
|
r
|
getPCA.R
|
###################################
# PCA on the randomly picked loci #
###################################
getwd()
get_dataSets <- function(dataset,inputs=FALSE, Colmat_dim, selected=FALSE, selection){
# dataset : input table
# inputs as a vector, e.g : c("X008","X195")
# Colmat_dim = dimension of matrix substracted with the input samples
# e.g dim of the dataset: 176861 97 with 2 inputs, it becomes 5:95
# as the first 4 columns are chr, start, end ,and state
# output:
# a list of two datasets
# 1. clean_dataset: annotated dataset (chr,start,end,state)
# 2. mat_clean_dataset: dataset without annotation
if (selected){
dataset = dataset[, which(colnames(dataset) %in% selection)]
} else{
#remove the input
dataset = dataset[, -which(colnames(dataset) %in% inputs)]
}
#remove the NA
clean_dataset = dataset[complete.cases(dataset),]
# remove rows that contain only zeros
mat_clean_dataset = clean_dataset[,Colmat_dim]
ind_nonZero = rowSums(mat_clean_dataset == 0)!= ncol(mat_clean_dataset)
mat_clean_dataset = mat_clean_dataset[ind_nonZero,]
clean_dataset = clean_dataset[ind_nonZero,]
returnList = list("clean_dataset"=clean_dataset, "mat_dataset"=mat_clean_dataset)
return (returnList)
}
# load datasets
dataset_91raw <- read.delim("na/randomlypicked_bigtable.raw")
dataset_91bin <- read.delim("randomlypicked_bigtable.bin")
selected_91raw <- read.delim("na/randomlypickedSelected_bigtable.raw")
selected_91bin <- read.delim("randomlypickedSelected_bigtable.bin")
head(dataset_91raw)
head(dataset_91bin)
head(selected_91raw)
head(selected_91bin)
dim(dataset_91raw)
dim(dataset_91bin)
dim(selected_91raw)
dim(selected_91bin)
raw91_Profiles <- get_dataSets(dataset_91raw, inputs=c("X008","X195"),
Colmat_dim=5:95)
bin91_Profiles <- get_dataSets(dataset_91bin, inputs=c("X008","X195"),
Colmat_dim=5:95)
selected_raw91_Profiles <- get_dataSets(selected_91raw, Colmat_dim=5:14,
selected=TRUE,
selection=c("chr","start", "end","state",
"X009", "X036", "X002", "X029",
"X033", "X037", "X028", "X007",
"X034", "X032"))
selected_bin91_Profiles <- get_dataSets(selected_91bin, Colmat_dim=5:14,
selected=TRUE,
selection=c("chr","start", "end","state",
"X009", "X036", "X002", "X029",
"X033", "X037", "X028", "X007",
"X034", "X032"),5:14)
head(raw91_Profiles$clean_dataset)
head(bin91_Profiles$clean_dataset)
head(selected_raw91_Profiles$clean_dataset)
head(selected_bin91_Profiles$clean_dataset)
dim(selected_raw91_Profiles$clean_dataset)
|
52521bbe55af1c78446c77503ff740e9f596e366
|
c5a51f5047e5bbad48ef42101b561c7b4fb02bb7
|
/man/ConquerRing.Rd
|
ef16b0dc977736a26b5116cbb938c46bcc1aa726
|
[] |
no_license
|
roderickslieker/CONQUER.d3
|
ee7eb14c69c1ef87aafe352a21dcb19201acaf38
|
bd38bb4a27dc00b92ad2dff6cd0317bd02fedcf3
|
refs/heads/master
| 2021-04-23T21:02:10.004179
| 2021-01-06T08:12:37
| 2021-01-06T08:12:37
| 250,003,606
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 543
|
rd
|
ConquerRing.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ConquerRing.R
\name{ConquerRing}
\alias{ConquerRing}
\title{ConquerRing}
\usage{
ConquerRing(
conquerSummary,
tissue,
KEGG_DATA,
hoverID,
width = NULL,
height = NULL,
elementId = NULL
)
}
\arguments{
\item{conquerSummary}{conquerSummary}
\item{tissue}{Tissue of interest}
\item{KEGG_DATA}{KEGG_DATA generated by CONQUER}
\item{hoverID}{hoverID}
\item{width}{Width}
\item{height}{Height}
\item{elementId}{elementId}
}
\description{
ConquerRing
}
|
39b58399f138601d7a99b1f434c5bce69f877869
|
5fbd7b46fe555db6271ddd423ff74039e989f4a2
|
/Course9/myApp/plotlyDemo.R
|
cffa243f03c416b3965d10b229d409dd7fdb1eff
|
[] |
no_license
|
TD0401/datasciencecoursera
|
b3efa4cbf15a20fb287c8dfbffee013a0dfabcea
|
0c369e98f9db1a830cae9df1ca3a7ca1f69ed7bc
|
refs/heads/master
| 2023-02-12T02:54:43.759691
| 2021-01-03T18:44:06
| 2021-01-03T18:44:06
| 263,995,059
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,968
|
r
|
plotlyDemo.R
|
library(plotly)
#scatter plot with color as factor
plot_ly(mtcars, x= mtcars$wt, y = mtcars$mpg, mode = "markers", color = as.factor(mtcars$cyl))
#with continuous color pallete
plot_ly(mtcars, x= mtcars$wt, y = mtcars$mpg, mode = "markers", color = mtcars$disp)
#size ofpoints vary
plot_ly(mtcars, x= mtcars$wt, y = mtcars$mpg, mode = "markers", color = as.factor(mtcars$cyl) , size = mtcars$hp)
#3d scatterplots
set.seed(11111)
temp <- rnorm(100, mean = 30 , sd= 5)
pressure <- rnorm(100)
dtime <- 1:100
plot_ly(x = temp, y = pressure, z = dtime , type = "scatter3d",mode = "markers", color = temp)
##line graphs
data("airmiles")
plot_ly(x = time(airmiles), y= airmiles)
##multilien graph
library(plotly)
library(dplyr)
library(tidyr)
data("EuStockMarkets")
stocks<- as.data.frame(EuStockMarkets) %>% gather(index,price) %>% mutate(time = rep(time(EuStockMarkets),4))
plot_ly( stocks , x= stocks$time, y = stocks$price, color = stocks$index)
#histogram
plot_ly(x=precip, type ="histogram")
#boxplot
plot_ly(iris, y= iris$Petal.Length , color = iris$Species, type ="box")
#heatmap
terrain1 <- matrix(rnorm(100*100), nrow = 100, ncol = 100)
plot_ly(z = terrain1 , type="heatmap")
#3dSurface
terrain1 <- matrix(rnorm(100*100), nrow = 100, ncol = 100)
plot_ly(z = terrain1 , type="surface")
#Choropleth Maps
state_pop <- data.frame(State= state.abb, Pop= as.vector(state.x77[,1]))
state_pop$hover <- with (state_pop,paste (State,'<br>',"Population:",Pop))
borders <- list(color = toRGB("red"))
map_options <- list(scope='usa',projection=list(type='albers usa'), snowlakes=TRUE,lakecolor=toRGB("white"))
plot_ly(state_pop, z= state_pop$Pop, text = state_pop$hover, locations = state_pop$State, type = "choropleth" ,locationmode="USA-states",
color=state_pop$Pop, colors = 'Blues', marker = list(line =borders)) %>%
layout(title ="US Population in 1975" , geo = map_options )
##ggplotly(ggplotobject) for making ggplot as interactive
|
fc01e5231321476715755a5626ef1c3e002455ce
|
7ee7666d0b6f280d8081359d11f5230bb91a502d
|
/HSData/RCode/NPRtop100.R
|
37f44c5a437e6d95f50fd733a25f1b647bc0838c
|
[] |
no_license
|
zhianwang/HMAResearch
|
d9bace5abed00ccbdc269c937da4c867369fc23f
|
5fbdc95eebb648242c4df18dacec13e72e3386fd
|
refs/heads/master
| 2020-04-02T01:00:06.082167
| 2018-11-15T18:48:25
| 2018-11-15T18:48:25
| 153,832,459
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,887
|
r
|
NPRtop100.R
|
library(dplyr)
library(purrr)
library(magrittr)
source("RCode/GM11.R")
# NPR Top 100 been split into 3 part: by System, by Hospital, and UCLA -----------------------------------------------
allhs <- read.csv("Result/allhs1116_1017.csv")
hospital_data <- read.csv("Result/hospital_data_1017.csv")
# Remove location info and ACO
hospital_data <- hospital_data[-c(5:12,16:18,40:43)]
# Top 100 HS ----------------------------------------------------------------------------------------------------
nprtop100mapping <- openxlsx::read.xlsx("Data/2017NPRtop100HS_1023_mapping.xlsx")
# Split by AHA_System_ID and AHA.ID
nprtop100_sys <- nprtop100mapping[,c(1,2,4:6)] %>% filter(!is.na(AHA_SyStem_ID)) %>%
left_join(allhs, by = c("AHA_SyStem_ID" = "System_ID")) %>%
select(-c(3:5))
nprtop100_hos <- nprtop100mapping %>% filter(!is.na(AHA.ID)) %>%
left_join(hospital_data[c(1,2,4,8:19,22:28)], by = c("AHA.ID" = "AHA_ID")) %>%
select(-c(3:8,10)) %>%
mutate_if(is.numeric, tidyr::replace_na, replace = 0) %>%
mutate(All_Physicians = (Physicians_and_dentists_FT +
Total_Physicians_NE +
Total_Physicians_TE +
Total_Physicians_TG +
Total_Physicians_TC +
Total_Physicians_TP),
Hospitals = 1)
# Add UCLA
ucla <- read.csv("Result/hmamember1116_1017.csv") %>%
filter( HMA_System_Name == "UCLA Health") %>%
mutate(NPR_Rank = 89, a_Name = "University of California - Los Angeles") %>%
select(-1)
names(nprtop100_hos) == names(nprtop100_sys)
nprtop100 <- bind_rows(nprtop100_sys,nprtop100_hos,ucla) %>% arrange(NPR_Rank, Year)
write.csv(nprtop100,"Result/nprtop1001116_1023.csv",row.names = FALSE)
# Top 100 + VA ---------------------------------------------------------------------------------------------------
VA <- allhs %>% filter(System_ID == "5999295") %>%
mutate(NPR_Rank = 101, a_Name = "Department of Veterans Affairs") %>% select(24,25,1:23)
VA <- VA[-3]
names(nprtop100) == names(VA)
nprtop100VA <- bind_rows(nprtop100,VA)
write.csv(nprtop100VA,"Result/nprtop100VA1116_1023.csv",row.names = FALSE)
# HMA in Top 100 -------------------------------------------------------------------------------------------------
hmatop100 <- nprtop100 %>% left_join(nprtop100mapping[,c(2,4)],by = 'a_Name') %>%
filter(!is.na(HMA_System_Name))
write.csv(hmatop100,"Result/nprhmatop1001116_1023.csv",row.names = FALSE)
###################################################################################################################
# Forecasting Indicator
###################################################################################################################
allhs <- read.csv("Result/allhs1116_1017.csv") %>% select(-c(5,6,9,11:14,17:21))
hma <- read.csv("Result/hmamember1116_1017.csv") %>% select(-c(5,6,9,11:14,17:21))
nprtop100 <- read.csv("Result/nprtop1001116_1023.csv") %>% select(-c(1,6,7,10,12:15,18:22))
nprhmatop100 <- read.csv("Result/nprhmatop1001116_1023.csv") %>% select(-c(1,6,7,10,12:15,18:22))
nprtop100VA <- read.csv("Result/nprtop100VA1116_1023.csv") %>% select(-c(1,6,7,10,12:15,18:22))
# Part 0: Functions -----------------------------------------------------------------------------------------------
transposedf <- function(df){
n <- df$Year
new_df <- as.data.frame(t(df[,-1]))
colnames(new_df) <- n
new_df$indicator <- row.names(new_df)
return(new_df)
}
indicatorGM11 <- function(df,descale = "N"){
df_t <- transposedf(df)
df_long <- reshape2::melt(df_t,
id.vars="indicator",
variable.name="Year",
value.name="value")
if(descale == "Y"){
df_nest <- df_long %>%
mutate(devalue = value/1000) %>%
group_by(indicator) %>%
tidyr::nest(.key = "data.tbl") %>%
mutate(value = map(data.tbl,"devalue"))
} else {
df_nest <- df_long %>%
group_by(indicator) %>%
tidyr::nest(.key = "data.tbl") %>%
mutate(value = map(data.tbl,"value"))
}
df_fit <- df_nest %>%
mutate(model = map(.x=value, .f=GM11,2),
predict = map(model,"predict")
)
if (descale == "Y"){
df_predicted <- df_fit %>%
tidyr::unnest(predict,.drop = TRUE) %>%
mutate(predicted = preval*1000) %>%
reshape2::dcast(indicator ~ period, value.var="predicted") %>%
rename(Y2017 = "1", Y2018 = "2")
} else{
df_predicted <- df_fit %>%
tidyr::unnest(predict,.drop = TRUE) %>%
reshape2::dcast(indicator ~ period, value.var="preval") %>%
rename(Y2017 = "1", Y2018 = "2")
}
return(df_predicted)
}
# Part 1: Forecast all basic indicators (Average) except # of Hospitals ------------------------------------------------------------------------------
allhs_summary <- allhs %>%
group_by(Year) %>%
summarise_if(is.numeric,mean)
hma_summary <- hma %>%
group_by(Year) %>%
summarise_if(is.numeric,mean)
nprtop100_summary <- nprtop100 %>%
group_by(Year) %>%
summarise_if(is.numeric,mean)
nprhmatop100_summary <- nprhmatop100 %>%
group_by(Year) %>%
summarise_if(is.numeric,mean)
nprtop100va_summary <- nprtop100VA %>%
group_by(Year) %>%
summarise_if(is.numeric,mean)
allhs_predicted <- indicatorGM11(allhs_summary)
allhs_predicted$Scope <- "All HS"
hma_predicted <- indicatorGM11(hma_summary)
hma_predicted$Scope <- "HMA"
nprtop100_predicted <- indicatorGM11(nprtop100_summary)
nprtop100_predicted$Scope <- "NPR Top 100"
nprhmatop100_predicted <- indicatorGM11(nprhmatop100_summary)
nprhmatop100_predicted$Scope <- "HMA in NPR Top100"
nprtop100va_predicted <- indicatorGM11(nprtop100va_summary)
nprtop100va_predicted$Scope <- "NPR Top 100 + VA"
mydf <- bind_rows(allhs_predicted,hma_predicted,nprtop100_predicted,nprtop100va_predicted,nprhmatop100_predicted)
write.csv(mydf, "Result/indicatior_1718_npr_1023.csv",row.names = FALSE)
# Part 2: Forecast the sum ----------------------------------------------------------------------------------------
allhs_sum <- allhs %>%
group_by(Year) %>%
summarise_if(is.numeric,sum)
hma_sum <- hma %>%
group_by(Year) %>%
summarise_if(is.numeric,sum)
nprtop100_sum <- nprtop100 %>%
group_by(Year) %>%
summarise_if(is.numeric,sum)
nprhmatop100_sum <- nprhmatop100 %>%
group_by(Year) %>%
summarise_if(is.numeric,sum)
nprtop100va_sum <- nprtop100VA %>%
group_by(Year) %>%
summarise_if(is.numeric,sum)
allhs_predicted <- indicatorGM11(allhs_sum,descale="Y")
allhs_predicted$Scope <- "All HS"
hma_predicted <- indicatorGM11(hma_sum,"Y")
hma_predicted$Scope <- "HMA"
nprtop100_predicted <- indicatorGM11(nprtop100_sum,"Y")
nprtop100_predicted$Scope <- "NPR Top 100"
nprhmatop100_predicted <- indicatorGM11(nprhmatop100_sum,"Y")
nprhmatop100_predicted$Scope <- "HMA in NPR Top 100"
nprtop100va_predicted <- indicatorGM11(nprtop100va_sum,"Y")
nprtop100va_predicted$Scope <- "NPR Top 100 + VA"
mysum <- bind_rows(allhs_predicted,hma_predicted,nprtop100_predicted,nprtop100va_predicted,nprhmatop100_predicted)
write.csv(mysum, "Result/indicatiorsum_1718_NPR_1023.csv",row.names = FALSE)
###################################################################################################################
# Revenue Part
###################################################################################################################
rev1117 <- read.csv("Result/revenue_1017.csv",header = TRUE)
rev1819 <- read.csv("Result/revenue_1819forecast_1017.csv")
varev <- read.xlsx("Data/Financial/VA_FS.xlsx", sheet = 1)
varev <- varev[-7]
# Part 0: filter out data already have 2018 data & predict VA 2018 -------------------------------------------------------------
actual18 <- rev1117 %>% filter(Year == "2018")
index <- which(rev1819$a_Name %in% actual18$a_Name)
rev18 <- rev1819[-index,c(1,2,4)]
names(rev18)[2:3] <- c("Total_Revenue", "NPR")
sysmappinglist <- read.xlsx("Data/HMA_Mapping_List.xlsx", sheet = "System")
rev18 <- left_join(rev18,sysmappinglist[,c(1,2,5)], by = "a_Name")
# addd actual
rev18 <- bind_rows(rev18,actual18)
rev18$Year <- "2018"
# VA
va18revmodel <- GM11(log(varev$Total_Revenue),1)
va18totalrevenue <- exp(va18revmodel$predict[1,2])
va18npr <- 0.96*va18totalrevenue
va18 <- data.frame(a_Name = "Veterans Health Administration",
Total_Revenue = va18totalrevenue,
NPR = va18npr,
HMA_System_Name = "VHA",
HMA_Member = "N")
# Part 1: 2017 Average ---------------------------------------------------------------------------------------
rev17 <- rev1117 %>% filter(Year == 2017)
va2017 <- varev %>% filter(Year == 2017)
nprtop100 <- rev17 %>%
filter(!a_Name %in% c("MEDNAX Services, Inc. (FL)", #remove MEDNAX as a physician service association
"Presence Health (IL)")) %>% # and Presence Health (IL) mergerd be part of Ascension
mutate(npr_rank = min_rank(desc(NPR))) %>%
top_n(100,NPR) %>%
arrange(npr_rank)
nprtop100va <- bind_rows(nprtop100,va2017)
rev17_avg <- rev17 %>% summarise_if(is.numeric,mean)
rev17_avg$Scope <- "All HS"
hma <- rev17 %>% filter(HMA_Member == "Y") %>% summarise_if(is.numeric,mean)
hma$Scope <- "HMA"
nprtop100_avg <- nprtop100 %>% summarise_if(is.numeric,mean) %>% select(-4)
nprtop100_avg$Scope <- "NPR Top 100"
nprtop100va_avg <- nprtop100va %>% summarise_if(is.numeric,mean) %>% select(-4)
nprtop100va_avg$Scope <- "NPR Top 100 VA"
nprhmatop100 <- nprtop100 %>% filter(HMA_Member == "Y") %>% summarise_if(is.numeric,mean) %>% select(-4)
nprhmatop100$Scope <- "HMA in NPR Top 100"
mydf2017avg <- bind_rows(rev17_avg,hma,nprtop100_avg,nprtop100va_avg,nprhmatop100)
#write.csv(mydf2017avg,"Result/revenue17avg_npr.csv",row.names = FALSE)
# Part 2: 2017 Sum --------------------------------------------------------------------------------------------
rev17_sum <- rev17 %>% summarise_if(is.numeric,sum)
rev17_sum$Scope <- "All HS"
hma_sum <- rev17 %>% filter(HMA_Member == "Y") %>% summarise_if(is.numeric,sum)
hma_sum$Scope <- "HMA"
nprtop100_sum <- nprtop100 %>% summarise_if(is.numeric,sum) %>% select(-4)
nprtop100_sum$Scope <- "NPR Top 100"
nprtop100va_sum <- nprtop100va %>% summarise_if(is.numeric,sum) %>% select(-4)
nprtop100va_sum$Scope <- "NPR Top 100 VA"
nprhmatop100_sum <- nprtop100 %>% filter(HMA_Member == "Y") %>% summarise_if(is.numeric,sum) %>% select(-4)
nprhmatop100_sum$Scope <- "HMA in NPR Top 100"
mydf2017sum <- bind_rows(rev17_sum,hma_sum,nprtop100_sum,nprtop100va_sum,nprhmatop100_sum)
mydf2017sum$Year <- "2017"
#write.csv(mydf2017sum,"Result/revenue17sum_npr.csv",row.names = FALSE)
# Part 3: 2018 Average -----------------------------------------------------------------------------------------
nprtop100_18 <- left_join(nprtop100[,c(1,5:7)], rev18[,c(1:3)], by = "a_Name")
nprtop100va18 <- bind_rows(nprtop100_18,va18)
rev18_avg <- rev18 %>% summarise_if(is.numeric,mean)
rev18_avg$Scope <- "All HS"
hma18 <- rev18 %>% filter(HMA_Member == "Y") %>% summarise_if(is.numeric,mean)
hma18$Scope <- "HMA"
nprtop100_avg18 <- nprtop100_18 %>% summarise_if(is.numeric,mean) %>% select(-npr_rank)
nprtop100_avg18$Scope <- "NPR Top 100"
nprtop100va_avg18 <- nprtop100va18 %>% summarise_if(is.numeric,mean) %>% select(-npr_rank)
nprtop100va_avg18$Scope <- "NPR Top 100 VA"
nprhmatop100_18 <- nprtop100_18 %>% filter(HMA_Member == "Y") %>% summarise_if(is.numeric,mean) %>% select(-npr_rank)
nprhmatop100_18$Scope <- "HMA in NPR Top 100"
mydf2018avg <- bind_rows(rev18_avg,hma18,nprtop100_avg18,nprtop100va_avg18,nprhmatop100_18)
mydf2018avg$Year <- "2018"
#write.csv(mydf2017avg[-1],"Result/revenue18avg.csv",row.names = FALSE)
# Part 4: 2018 Sum --------------------------------------------------------------------------------------------
rev18_sum <- rev18 %>% summarise_if(is.numeric,sum)
rev18_sum$Scope <- "All HS"
hma18_sum <- rev18 %>% filter(HMA_Member == "Y") %>% summarise_if(is.numeric,sum)
hma18_sum$Scope <- "HMA"
nprtop100_sum18 <- nprtop100_18 %>% summarise_if(is.numeric,sum) %>% select(-npr_rank)
nprtop100_sum18$Scope <- "NPR Top 100"
nprtop100va_sum18 <- nprtop100va18 %>% summarise_if(is.numeric,sum) %>% select(-npr_rank)
nprtop100va_sum18$Scope <- "NPR Top 100 VA"
nprhmatop100_18_sum <- nprtop100_18 %>% filter(HMA_Member == "Y") %>% summarise_if(is.numeric,sum) %>% select(-npr_rank)
nprhmatop100_18_sum$Scope <- "HMA in NPR Top 100"
mydf2018sum <- bind_rows(rev18_sum,hma18_sum,nprtop100_sum18,nprtop100va_sum18,nprhmatop100_18_sum)
mydf2018sum$Year <- "2018"
#write.csv(mydf2017sum[-1],"Result/revenue18sum.csv",row.names = FALSE)
avg_revenue_tbl <- rbind(mydf2017avg,mydf2018avg)
sum_revenue_tbl <- rbind(mydf2017sum,mydf2018sum)
write.csv(avg_revenue_tbl,"Result/1718_average_revenue_npr.csv",row.names = FALSE)
write.csv(sum_revenue_tbl,"Result/1718_sum_revenue_npr.csv",row.names = FALSE)
|
62c55866a67c5e71fbfde0d3be0c90b4e7d2b9ba
|
7786980abbb9f9f92d0ba45a6b526066bc4f77b8
|
/man/inspect_mem.Rd
|
9cfa3f629e56f8a74d9ff205587257d6bcdd8699
|
[] |
no_license
|
alastairrushworth/inspectdf
|
d2fc64d31da1e903b43eea7c9aec893bb27c6759
|
5c516e3ee28c63a56622948ab612bc8f3d48ba47
|
refs/heads/master
| 2022-08-29T15:37:21.670913
| 2022-08-09T06:14:32
| 2022-08-09T06:14:32
| 157,981,172
| 251
| 23
| null | 2022-08-09T06:27:38
| 2018-11-17T12:12:30
|
R
|
UTF-8
|
R
| false
| true
| 2,284
|
rd
|
inspect_mem.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inspect_mem.R
\name{inspect_mem}
\alias{inspect_mem}
\title{Summary and comparison of memory usage of dataframe columns}
\usage{
inspect_mem(df1, df2 = NULL)
}
\arguments{
\item{df1}{A data frame.}
\item{df2}{An optional second data frame with which to comparing memory usage.
Defaults to \code{NULL}.}
}
\value{
A tibble summarising and comparing the columnwise memory usage
for one or a pair of data frames.
}
\description{
For a single dataframe, summarise the memory usage in each column.
If two dataframes are supplied, compare memory usage for columns appearing
in both dataframes. For grouped dataframes, summarise the memory usage separately
for each group.
}
\details{
For a \strong{single dataframe}, the tibble returned contains the columns: \cr
\itemize{
\item \code{col_name}, a character vector containing column names of \code{df1}.
\item \code{bytes}, integer vector containing the number of bytes in each column of \code{df1}.
\item \code{size}, a character vector containing display-friendly memory usage of each column.
\item \code{pcnt}, the percentage of the dataframe's total memory footprint
used by each column.
}
For a \strong{pair of dataframes}, the tibble returned contains the columns: \cr
\itemize{
\item \code{col_name}, a character vector containing column names of \code{df1}
and \code{df2}.
\item \code{size_1}, \code{size_2}, a character vector containing memory usage of each column in
each of \code{df1} and \code{df2}.
\item \code{pcnt_1}, \code{pcnt_2}, the percentage of total memory usage of each column within
each of \code{df1} and \code{df2}.
}
For a \strong{grouped dataframe}, the tibble returned is as for a single dataframe, but where
the first \code{k} columns are the grouping columns. There will be as many rows in the result
as there are unique combinations of the grouping variables.
}
\examples{
# Load dplyr for starwars data & pipe
library(dplyr)
# Single dataframe summary
inspect_mem(starwars)
# Paired dataframe comparison
inspect_mem(starwars, starwars[1:20, ])
# Grouped dataframe summary
starwars \%>\% group_by(gender) \%>\% inspect_mem()
}
\seealso{
\code{\link{show_plot}}
}
\author{
Alastair Rushworth
}
|
af8ec103848952bdcdde54e7e1e825d2f216cc7c
|
6cee21d59ec656812104724c7eba613b4b042b1b
|
/man/jb_figure.Rd
|
e3fc04206a0b43c17ea2a60d8467136a123e68d4
|
[] |
no_license
|
barthelmes/jenshelper
|
f218272440fa4d0322f9c2284d3fda0840067681
|
102c2e21a95bd5ae460efc9b7c732c4a2baa18c6
|
refs/heads/master
| 2022-10-04T22:26:30.618047
| 2020-06-09T22:47:02
| 2020-06-09T22:47:02
| 265,327,303
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 613
|
rd
|
jb_figure.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jenshelper.R
\name{jb_figure}
\alias{jb_figure}
\title{Plot survey bar plots}
\usage{
jb_figure(df = des_amy, out = fida, upper = NA, pal = "tol3")
}
\arguments{
\item{df}{a survey design obsject (survey or srvyr)}
\item{out}{a single variable of interest}
\item{upper}{custom max value of y axis}
\item{fillncolor}{any palette from \code{\link[jenshelper]{retinal_palettes}}}
}
\value{
a ggplot dynamite plot with errorbars
}
\description{
Plot survey bar plots
}
\examples{
jenshelper::retinal_palettes # for choice of palettes
}
|
13b2a39f566c1333020657c06c78a883f11a8e51
|
990c6fb245b95d8c501cc46e899a2ff789941b38
|
/server.R
|
446658c3097893ec02f665f21b28b093dd9618b7
|
[] |
no_license
|
lynn9691/CourseProjectDataProducts
|
03806bcacddbb41b8720bfae368d4bccc0cf8f63
|
d9b44d1bdf6377657b5e9404736bf37c038eed9c
|
refs/heads/master
| 2021-01-19T12:24:05.702525
| 2017-08-19T09:09:50
| 2017-08-19T09:09:50
| 100,782,582
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,516
|
r
|
server.R
|
library(shiny)
shinyServer(function(input, output){
cars$speed2 <- cars$speed ^2
model1 <- lm(dist ~ speed, data = cars)
model2 <- lm(dist ~ speed + speed2, data = cars)
model1pred <- reactive({
speedInput <- input$sliderSpeed
predict(model1, newdata = data.frame(speed = speedInput))
})
model2pred <- reactive({
speedInput <- input$sliderSpeed
predict(model2, newdata = data.frame(speed = speedInput,
speed2 = speedInput^2))
})
output$plot1 <- renderPlot({
speedInput <- input$sliderSpeed
plot(cars$speed, cars$dist, xlab = "Speed",
ylab = "Stopping Distance", bty = "n", pch = 16,
xlim = c(0, 30), ylim = c(0, 150))
if(input$showModel1){
abline(model1, col = "red", lwd = 2)
}
if (input$showModel2){
model2lines <- predict(model2, newdata = data.frame(
speed = 0:30, speed2 = (0:30)^2
))
lines(0:30, model2lines, col = "blue", lwd = 2)
}
legend(20, 120, c("Model 1 Prediction", "Model 2 Prediction"), pch = 16,
col = c("red", "blue"), bty = "n", cex = 1.2)
points(speedInput, model1pred(), col = "red", pch = 16, cex = 2)
points(speedInput, model2pred(), col = "blue", pch = 16, cex = 2)
})
output$pred1 <- renderText({
model1pred()
})
output$pred2 <- renderText({
model2pred()
})
})
|
88824ee59e6eab3ec47b65fa081034388ab144fa
|
7b99e0516455a5e61f010dd7015da2461117263e
|
/playground/network-vectorized-speed-benchmark.R
|
4d360d7b9fc05bc6bb40b42aefb6a93c547f3b89
|
[
"MIT"
] |
permissive
|
muriteams/ergmito
|
75ec8830de7bcf47250c2038f418123eb9fc9c9e
|
f3a2ede1ed3a97eaed71987ec5b555a853cbd11d
|
refs/heads/master
| 2023-06-25T08:57:37.368032
| 2023-06-13T19:46:18
| 2023-06-13T19:46:18
| 157,758,250
| 9
| 1
|
NOASSERTION
| 2020-07-06T05:17:20
| 2018-11-15T18:56:47
|
R
|
UTF-8
|
R
| false
| false
| 1,332
|
r
|
network-vectorized-speed-benchmark.R
|
library(ergmito)
library(network)
set.seed(12)
A <- rbernoulli(50)
net0 <- network(A)
net1 <- ergmito:::matrix_to_network(list(A))
library(bench)
ans <- bench::mark(
ergm = network::network(A),
ergmito = ergmito::matrix_to_network(list(A)),
check = FALSE,
relative = TRUE,
iterations = 100
)
plot(ans)
ans
neta <- network::network(A)
# ans <- bench::mark(
# ergm = w0 <- network::set.vertex.attribute(neta, "a", 1:50),
# ergmito = w1 <- ergmito::add_vertex_attr(neta, list(1:50), "b"),
# check = FALSE,
# relative = TRUE
# )
#
# plot(ans)
# ans
networks_list <- function(x) {
res <- vector("list", length(x))
for (i in seq_along(res))
res[[i]] <- network(x[[i]])
res
}
adjmats <- rbernoulli(rep(5, 2000))
nets <- matrix_to_network(adjmats)
(ans <- bench::mark(
ergmito = matrix_to_network(adjmats),
ergm = networks_list(adjmats),
check=FALSE
))
plot(ans)
# add_vattr_network <- function(x, attrvalue, attrname) {
#
# res <- vector("list", length(x))
# for (i in seq_along(res))
# res[[i]] <- set.vertex.attribute(x[[i]], attrname = attrname, value=attrvalue)
# res
#
# }
#
# (ans <- bench::mark(
# ergmito = ergmito::add_vertex_attr(nets, list(1:5), "a"),
# ergm = add_vattr_network(nets, list(1:5), "a"),
# check=FALSE, iterations = 100
# ))
# plot(ans)
|
83acbba612229052fe71a526b71089b2ca337fe1
|
a53f9549684c68be1756f4ccab3ada48c2f4a4ff
|
/2.0.1/p_V_AT.R
|
e570a747b7ba55ce15c1181fd9babef660f70770
|
[] |
no_license
|
ponasmontecarlo/Simulations
|
1fa986c0ec8c1d495a6dc59864e366f5c55e4fdd
|
c454bc662fd8c7a0d19133f44ce9ea54c17c6ffd
|
refs/heads/master
| 2020-05-26T17:59:22.287515
| 2014-05-12T13:41:53
| 2014-05-12T13:42:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,003
|
r
|
p_V_AT.R
|
library(pracma)
library(mnormt)
##############################
orthoT <- function(d){
matrica <- matrix(data=NA,nrow = d, ncol=d)
for (i in 1:d){
for (j in 1:d){
matrica[i,j] <- rnorm(1,0,1)
}
}
matrica <- gramSchmidt(matrica, tol = .Machine$double.eps^0.5)
matricaT <- matrix(data=NA,nrow = d, ncol=d)
matricaT <- as.matrix(unlist(matrica$Q))
return(matricaT)
}
###############################
k=3
sum=0
# k - laisves laipsniai
# j - radius skaicius(kiekis)
radius <- function(k,j){
chi <- c()
for (i in 1:j){
sum = 0
t <- rnorm(k,0,1)
for (h in 1:k){
sum = sum + t[h]^2
}
chi[i] <- sqrt(sum)
}
return(chi)
}
# k - vienetiniu vektoriu skaicius(kiekis)
# d - dimensiju skaicius
unitV <- function(k,d){
t <- matrix(data=NA,nrow=k,ncol=d)
for (i in 1:k){
sum <- 0
t[i,] <- rnorm(n=d,0,1)#,mean=rep(0,d),varcov=diag(rep(1,d)))
for (h in 1:d){
sum <- sum + t[i,h]^2
}
t[i,] <- t[i,]/sqrt(sum)
}
return (t)
}
#-----------------------
L <- t(chol(sigma))
for(i in 1:M){
X[i,] <- rbind(mu + as.matrix(L)%*%cbind(as.vector(z[i,])))
}
d=3
k=d
M=1000
n=100
sigma <- diag(1,d)
mu <- rep(0,d)
ttt <- c(0.5,0.5,0.5)
L <- t(chol(sigma))
unit <- unitV(n/2,d)
x_sum_plus <- c()
x_sum_minus <- c()
for (i in 1:M){
ortho <- orthoT(d)
r <- radius(k,n/2)
z_plus <- matrix(data=NA,nrow=n/2,ncol=d)
z_minus <- matrix(data=NA,nrow=n/2,ncol=d)
x_plus <- matrix(data=NA,nrow=n/2,ncol=d)
x_minus <- matrix(data=NA,nrow=n/2,ncol=d)
for (j in 1:(n/2)){
z_plus[j,] <- r[j]*as.matrix(ortho)%*%cbind(as.vector(unit[j,]))
z_minus[j,] <- -z_plus[j,]
x_plus[j,] <- rbind(mu + as.matrix(L)%*%cbind(as.vector(z_plus[j,])))
x_minus[j,] <- rbind(mu + as.matrix(L)%*%cbind(as.vector(z_minus[j,])))
}
x_sum_plus[i] <- sum(apply(x_plus<unlist(ttt),1,all))
x_sum_minus[i] <- sum(apply(x_minus<unlist(ttt),1,all))
}
sum <- (sum(x_sum_plus)+sum(x_sum_minus))/(2*M*n/2)
sum
|
0ff2d9732650e2496b349c4ebcd98b045296b827
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pbatR/examples/ped.Rd.R
|
73463918752018a8f7ae3839fd7e2c91d1629bc3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,388
|
r
|
ped.Rd.R
|
library(pbatR)
### Name: ped
### Title: Pedigree Object
### Aliases: as.ped as.pedlist is.ped is.pedlist read.ped fread.ped
### write.ped is.pped as.pped read.pped sort.ped plotPed ped.markerNames
### Keywords: interface
### ** Examples
# A highly artificial example with not enough subjects to be run;
# however, it demonstrates how to put data in it.
x <- data.frame( pid = c(1,1,1,1,1),
id = c(1,2,3,4,5),
idfath = c(4,4,4,0,0),
idmoth = c(5,5,5,0,0),
sex = c(1,2,1,1,2),
AffectionStatus = c(1,0,0,1,0),
m1.a = c(1,1,1,1,1),
m1.b = c(1,2,1,1,2),
m2.a = c(4,4,4,4,4),
m2.b = c(3,3,3,4,3) )
x
myPed <- as.ped( x ) # Mark it with the class 'ped'
myPedlist <- as.pedlist( x ) # Instead mark it with 'pedlist'
myPed
myPedlist
# an alternate example of creating
names( x )[1:6] <- c( "mypedid", "subid", "fathid",
"mothid", "gender", "affection" );
x
myPed <- as.ped( x, pid="mypedid", id="subid", idfath="fathid",
idmoth="mothid", sex="gender", affection="affection" )
myPed # Note it's the same as before!
myPed <- as.ped( myPedlist ) # Easy conversion back
myPedlist <- as.pedlist( myPed ) # and forth between formats.
|
b19098c5f92defc997db146e75698cae989aa70f
|
2497f1e60663301ca24886d0cd8d8fe645641fca
|
/plot4.R
|
e5558d85e2094e5064058bc58c30b76e504bae2e
|
[] |
no_license
|
DB-2/ExData_Plotting1
|
a5dc575152c6e04077284e6bfc11f4f57063744d
|
3848df0766431cf418689de2207b092097bc49bf
|
refs/heads/master
| 2021-03-05T18:18:01.971146
| 2020-03-09T21:00:45
| 2020-03-09T21:00:45
| 246,140,512
| 0
| 0
| null | 2020-03-09T20:57:01
| 2020-03-09T20:57:00
| null |
UTF-8
|
R
| false
| false
| 1,978
|
r
|
plot4.R
|
## This is the script to produce plot4.png file
## Author : DBarry
## The plot is a line plot in base R based on the energy power consumption file below
## with displaying 4 graphs in 4 quadrants using par(mfrow=c(2,2))
## Read the zip file
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp,mode="wb")
## Extract the semi colon delimited file
unzfile <- unz(temp,"household_power_consumption.txt")
## Read to the 1/2/2007 and 2/2/2007 dates
epc<-read.table(file=unzfile, sep = ";",header = FALSE, na.strings ="?",stringsAsFactors= F,
skip=66637,nrows=2880,
col.names=c("Date", "Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity",
"Sub_metering_1","Sub_metering_2","Sub_metering_3"))
## Create a dattime variable from date and time columns
epc$dateTime <- strptime(paste(epc$Date,epc$Time), format="%d/%m/%Y %H:%M:%S")
png(file="plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
## Global Active Power
plot(x=epc$dateTime,y=epc$Global_active_power,type="n",xlab="", ylab="Global Active Power")
lines(x=epc$dateTime,y=epc$Global_active_power)
## Voltage
plot(x=epc$dateTime,y=epc$Voltage,type="n",xlab="datetime", ylab="Voltage")
lines(x=epc$dateTime,y=epc$Voltage)
## Sub Metering
plot(x=epc$dateTime,y=epc$Sub_metering_1,type="n",xlab="", ylab="Energy sub metering")
legend("topright", lty=1 , cex = 0.5, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),col=c("black", "red", "blue") )
lines(x=epc$dateTime,y=epc$Sub_metering_1,col="black")
lines(x=epc$dateTime,y=epc$Sub_metering_2,col="red")
lines(x=epc$dateTime,y=epc$Sub_metering_3,col="blue")
## Global Reactive Power
plot(x=epc$dateTime,y=epc$Global_reactive_power,type="n",xlab="datetime", ylab="Global_reactive_power")
lines(x=epc$dateTime,y=epc$Global_reactive_power)
dev.off()
|
180879776c5a552b77249bce15e7d4d68ccb800b
|
4ce0a8e66ad3694a60840ab3cd3c34ea82de94ea
|
/rdev/R/update.R
|
532bcb927b8020967f050e2eaaf04e009a7b96c2
|
[] |
no_license
|
curtisKJ/mrgsolve
|
5549fe81796e5dcd7172462624d016abe58bedc5
|
05818cc861e467db08a04b65af59d783bdd16a53
|
refs/heads/master
| 2021-01-17T11:33:21.588950
| 2016-05-17T06:12:23
| 2016-05-17T06:12:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,525
|
r
|
update.R
|
## This work is licensed under the Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License.
## To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-nd/4.0/ or send a letter to
## Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
setAs("NULL", "character", function(from) character(0))
sval <- unique(c("atol","rtol",
"events","verbose","debug","preclean","mindt",
"digits", "ixpr", "mxhnil","start", "end", "add", "delta",
"maxsteps", "hmin", "hmax","tscale", "request"))
##' @title Update the model object
##'
##' @description After the model object is created, update various attributes.
##'
##' @param object a model object
##' @param ... passed to other functions
##' @param merge logical indicating to merge (rather than replace) new and
##' existing attributes.
##' @param strict logical; used only when merge is \code{TRUE} and parameter list or initial conditions
##' list is being updated; if \code{TRUE}, no new items will be added; if \code{FALSE}, the parameter list may
##' expand.
##' @param super.strict logical; strict common area updating
##' @param data a list of items to update; not used for now
##' @return The updated model object is returned.
##' @details
##' See also \code{\link{mrgsolve_Ops}} for alternative ways to update events, parameters, and initial conditions in a model object.
##' @export
##' @name update
##' @aliases update,mrgmod-method
##' @examples
##' mod <- mrgsolve:::house()
##'
##' mod <- update(mod, end=120, delta=4, param=list(CL=19.1))
setMethod("update", "mrgmod", function(object,..., merge=TRUE,strict=TRUE,super.strict=FALSE,data=list()) {
x <- object
args <- list(...)
if(!is.mt(data)) args <- merge(args,data,strict=FALSE)
if(is.mt(args)) return(x)
args <- args[!is.na(args)]
a <- names(args)
valid.in <- which(charmatch(a,sval, nomatch=0)>0)
if(length(valid.in)>0) {
valid.full <- charmatch(a[valid.in],sval, nomatch=0)
for(i in 1:length(valid.in)) {
slot(x, sval[valid.full[i]]) <- args[[valid.in[i]]]
}
}
## If we're not merging, just replace and return:
if(!merge) {
if(exists("init",args)) {
stop("Error... initial conditions list (init) is only updateable when merge=TRUE.")
}
if(exists("param",args)) {
x@param <- as.param(args$param)
}
validObject(x)
return(x)
}
## Otherwise, merge if arguments are there:
## Initial conditions list:
if(exists("init",args)) {
i <- x@init@data
i <- merge(i, args$init, strict=strict)
slot(x, "init") <- as.init(i)
}
## Parameter update:
if(exists("param",args)) {
if(length(x@fixed)>0) {
if(any(is.element(names(args$param),names(x@fixed)))) warning("Attempted update of a $FIXED parameter.", call.=FALSE,immediate.=TRUE)
}
x@param <- as.param(merge(x@param@data,args$param,strict=strict,context="param"))
}
if(exists("omega", args)) {
x@omega <- update_matlist(x@omega,omat(args$omega),strict=strict, context="omat")
}
if(exists("sigma", args)) {
x@sigma <- update_matlist(x@sigma,smat(args$sigma), strict=strict, context="smat")
}
validObject(x)
return(x)
})
same_sig <- function(x,y) {
return(identical(unname(nrow(x)), unname(nrow(y))))
}
update_matlist <- function(x,y,strict=TRUE,context="update_matlist",...) {
n0 <- dim_matlist(x)
if(length(x)==0) stop(paste0(context, ": there is no matrix to update"))
anon <- all(names(y)=="...")
ss <- same_sig(x,y)
if(anon & !ss) stop(paste("Improper signature:", context), call.=FALSE)
if(ss & anon) {
## If we match the sig and all input is unnamed
labels <- names(x@data)
x@data <- y@data
names(x@data) <- labels
} else {
##if(anon & all(names(x)=="...")) stop(paste("Improper signature:",context), call.=FALSE)
x@data <- merge(x@data, y@data,strict=strict,context=context,...)
}
n <- dim_matlist(x)
if(!strict) {
x@n <- n
} else {
if(!identical(n0,n)) stop(paste("Improper dimension:",context), call.=FALSE)
}
validObject(x)
return(x)
}
##' @export
##' @rdname update
##' @param y another object involved in update
setMethod("update", "omegalist", function(object,y,...) {
update_matlist(object, omat(y),context="omat",...)
})
##' @export
##' @rdname update
setMethod("update", "sigmalist", function(object,y,...) {
update_matlist(object, smat(y),context="smat",...)
})
##' @export
##' @rdname update
setMethod("update", "parameter_list", function(object,y,...) {
as.param(merge(object@data, as.param(y)@data,context="param",...))
})
##' @export
##' @rdname update
setMethod("update", "ev", function(object,y,...) {
})
##' Update \code{model} or \code{project} in an \code{mrgmod} object.
##'
##' @param x mrgmod object
##' @param model model name
##' @param project project directory
##' @param ... passed along
##' @export
##' @return updated model object
setGeneric("relocate", function(x,...) standardGeneric("relocate"))
##' @export
##' @rdname relocate
setMethod("relocate", "mrgmod", function(x,model=NULL, project=NULL) {
if(!missing(model)) x@model <- model
if(!missing(project)) x@project <- normalizePath(project,winslash=.Platform$file.sep)
validObject(x)
return(x)
})
|
0d48cfb8d978c281454f955746959d74530fe553
|
9f57e0ad44b78d809c262fa0ffb659232fdb8d5e
|
/general-prob-stat/solve_markov.R
|
ef014070ca3429301f436f680df98f6a3c22c258
|
[] |
no_license
|
abhi8893/Intensive-R
|
c3439c177776f63705546c6666960fbc020c47e8
|
e340ad775bf25d5a17435f8ea18300013195e2c7
|
refs/heads/master
| 2020-09-22T00:57:36.118504
| 2020-08-31T09:23:57
| 2020-08-31T09:23:57
| 224,994,015
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 193
|
r
|
solve_markov.R
|
stnry.distr <- function(P){
n <- nrow(P)
A <- rbind(P - diag(1, n), 1)
b <- vector("numeric", n+1)
b[n+1] <- 1
x <- drop(solve(t(A) %*% A, t(A) %*% b))
return(x)
}
stnry.distr(P)
|
fe85871d9451cc5149c2177dae23ead23463d628
|
d4a41995051a487669e2cee9a7781d78899ab83d
|
/man/excludeWords.Rd
|
3e7dac88999c2165cf05e8d1373caf2986939472
|
[] |
no_license
|
lupok2001/patentr
|
df4b1c3e9c2205b14e2473b856475ddae43fca2a
|
fdb851193455a94f4c496c3984f5a3e1127dd655
|
refs/heads/master
| 2020-03-11T03:33:46.056792
| 2017-07-17T00:14:57
| 2017-07-17T00:14:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 434
|
rd
|
excludeWords.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{excludeWords}
\alias{excludeWords}
\title{A standard list of words to exclude in a patent word cloud.}
\format{A character vector.
\describe{
\item{excludeWords}{A character vector of words to exclude}
}}
\usage{
excludeWords
}
\description{
A standard list of words to exclude from a patent data word cloud.
}
\keyword{data}
|
f844520290e3d086cdd533b834830141b3e417e8
|
32a9905718d86aee7dbbcf1dcaceb9d81248a1e1
|
/Week1/Data-Science-Capstone-Healthcare.R
|
c7a59e99960e9f47fce608ae15bed1bd3572bd5f
|
[] |
no_license
|
ApLife1827/Data-Science-Capstone-Healthcare
|
3453e9feffaee3d2de49a5e95fd902c7c7de47c8
|
d4471e416fddae7307752bdcabaee7e27cca20af
|
refs/heads/main
| 2023-05-07T19:42:14.118465
| 2021-05-29T05:05:51
| 2021-05-29T05:05:51
| 365,283,247
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,110
|
r
|
Data-Science-Capstone-Healthcare.R
|
library(dplyr)
#Path of dataset
setwd("D:/aVDHOOT/SimpliLearn/Data Science Caption/Project 2/Healthcare - Diabetes")
getwd()
#Loading Dataset
data<-read.csv("health care diabetes.csv")
#Discriptive Analysis
View(data)
str(data)
summary(data)
#Handling Missing Values
table(is.na(data))
hist(data$Glucose,main="Frequency of Glucose",breaks = 8,col="darkorange")
table(data$Glucose)
data$Glucose[data$Glucose==0]<-mean(data$Glucose)
hist(data$BloodPressure,main="Frequency of BloodPressure",breaks = 8,col="darkorange")
table(data$Glucose)
data$BloodPressure[data$BloodPressure==0]<-mean(data$BloodPressure)
hist(data$SkinThickness,main="Frequency of SkinThickness",breaks = 8,col="darkorange")
table(data$SkinThickness)
data$SkinThickness[data$SkinThickness==0]<-mean(data$SkinThickness)
hist(data$Insulin,main="Frequency of Insulin",breaks = 8,col="darkorange")
table(data$Insulin)
data$Insulin[data$Insulin==0]<-mean(data$Insulin)
hist(data$BMI,main="Frequency of BMI",breaks = 8,col="darkorange")
table(data$BMI)
data$BMI[data$BMI==0]<-mean(data$BMI)
|
1729f7de33950ad6e1ca322b2e5174ed5b06709a
|
9cbe1f47f7cb14c85d69ab3d57539b82914a8a0d
|
/Weekly/week7_income.R
|
25436762054c4e4d4e89592ce2b3b71e171f2f6a
|
[] |
no_license
|
trippv/TidyTuesday
|
e53d57cc44279e3d9b52f2a31c20ad766c6581c7
|
135174504ecce7be58922f63a5383930b044e723
|
refs/heads/main
| 2023-03-10T19:04:04.637635
| 2021-02-27T21:06:44
| 2021-02-27T21:06:44
| 327,209,315
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,961
|
r
|
week7_income.R
|
library(tidyverse)
library(here)
library(skimr) # view data summary
library(ggrepel)
# Read data ---------------------------------------------------------------
income_distribution <- readr::read_csv(
'https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/income_distribution.csv')
#categories
skim(income_distribution)
### version 2
set.seed(2)
income_df <- income_distribution %>%
filter(race != "All Races") %>%
#filter(!str_detect(race, "Combination")) %>%
filter(str_detect(income_bracket, "over")) %>%
filter(year == 2019) %>%
mutate(income_th = income_mean / 1000) %>%
mutate(income_th_moe = income_mean_moe / 1000) %>%
mutate(number_mll = number / 1000000) %>%
mutate(loc = rnorm(7, 5, 2))
#add dummy year
ssystem <- data.frame(x = rep(4,5), y = seq(0, 160, 40), label = c(" ", "40 K", "80 K", "120 K", "160 K"))
colors <- RColorBrewer::brewer.pal(n = 7, "Dark2")
ggplot(income_df,
aes(x = loc, y = income_th, size = number_mll, color = race))+
geom_point() +
geom_segment(aes(x= loc,
xend = loc,
y = income_th - income_th_moe,
yend = income_th + income_th_moe), size = 1) +
scale_y_continuous(breaks = c(0,40, 80, 120, 160),limits = c(0, 165))+
geom_label_repel(data = income_df, aes(x = loc, y = income_th, label = race, color = race),
box.padding = 1,
inherit.aes = FALSE)+
geom_text(data = ssystem, aes(x = x, y = y+5 , label = label),
angle= 275,
size = 4,
color = "grey95",
inherit.aes = FALSE)+
coord_polar()+
scale_color_manual(values = colors)+
labs(size = "Number of \nhouseholds \nin millons", y = "", x = "",
title = "A UNIVERSE APART!",
subtitle = "The mean income (in thousands USD) and number of \nhousehold for year 2019 at the highest income \nbracket (>$200,000) demonstrates the wealth \ninequality by race",
caption = "Data: Urban Institute & U.S. Census | Viz: @MiguelTripp")+
guides(label = FALSE, color = FALSE)+
theme_minimal()+
theme(
text = element_text(size = 14, colour = "grey95"),
plot.title = element_text(size = 26, hjust = 0.5),
plot.subtitle = element_text(size = 18, hjust = 0, margin = margin(10,0,0,0, unit = "mm")),
plot.caption = element_text(hjust = 0.5),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
legend.position = "right",
legend.background = element_blank(),
plot.background = element_rect(fill = "#30475e"),
panel.background = element_rect(fill = "#30475e", color = NA))
ggsave(here("plots", "Week7_IncomeInequal.png"), width = 180, height = 240, units = "mm", dpi = 150)
|
48e2a1f716c2abba4addc52fafd27bc130bcc76b
|
862562e3b247cd95321195d91b46bf5d73b9596c
|
/man/otc_estimator.Rd
|
828022b26a129667948a3e4ac6995006371a85da
|
[] |
no_license
|
bsaul/dr
|
dd6b733dfaf935b83423aaa0431f2ba23ad1c7a6
|
327f3537fcd70b4b62bb3e3b2a9cfa628fcbf3f7
|
refs/heads/main
| 2023-04-07T09:35:48.876785
| 2018-11-27T15:35:15
| 2018-11-27T15:35:15
| 358,607,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 330
|
rd
|
otc_estimator.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimators.R
\name{otc_estimator}
\alias{otc_estimator}
\title{Makes OTC (outcome based) estimator for group-level data}
\usage{
otc_estimator(data, models, randomization, ...)
}
\description{
Makes OTC (outcome based) estimator for group-level data
}
|
d6f694823e032ce5d80d500d9b209fa370eafa66
|
bc5f18c64d9e46db53126976834f617054edb35e
|
/man/t3way.Rd
|
8fc061526f983c730e9846ef666fbc4296d5e2b3
|
[] |
no_license
|
cran/WRS2
|
d7a6e48fca12e5c892b1c1ecc4ae1a151b485f2c
|
da65525b80d08bcdaa5d155e5db6e4670b615788
|
refs/heads/master
| 2022-06-30T08:31:33.772817
| 2022-06-10T15:33:29
| 2022-06-10T15:33:29
| 25,037,146
| 1
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,663
|
rd
|
t3way.Rd
|
\name{t3way}
\alias{t3way}
\alias{print.t3way}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{A three-way ANOVA for trimmed means.
}
\description{
This function computes a three-way ANOVA for trimmed means with all interactions effects.
}
\usage{
t3way(formula, data, tr = 0.2, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{formula}{
an object of class formula.
}
\item{data}{
an optional data frame for the input data.
}
\item{tr}{
trim level for the mean.
}
\item{...}{
currently ignored.
}
}
\value{
Returns an object of class \code{t3way} containing:
\item{Qa}{first main effect}
\item{A.p.value}{p-value first main effect}
\item{Qb}{second main effect}
\item{B.p.value}{p-value second main effect}
\item{Qc}{third main effect}
\item{C.p.value}{p-value third main effect}
\item{Qab}{first two-way interaction effect}
\item{AB.p.value}{p-value first two-way interaction effect}
\item{Qac}{second two-way interaction effect}
\item{AC.p.value}{p-value second two-way interaction effect}
\item{Qbc}{third two-way interaction effect}
\item{BC.p.value}{p-value third two-way interaction effect}
\item{Qabc}{three-way interaction effect}
\item{ABC.p.value}{p-value three-way interaction effect}
\item{call}{function call}
\item{varnames}{variable names}
}
\references{
Wilcox, R. (2012). Introduction to Robust Estimation and Hypothesis Testing (3rd ed.). Elsevier.
}
\seealso{
\code{\link{t1way}}, \code{\link{t2way}}
}
\examples{
t3way(aggressive ~ degree*gender*type, data = movie)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ models }
|
c94df356fe933b21d384c589eb0e6a68623053f4
|
2d894fdf18957d978d387d65913c0910b3cb7f4d
|
/data-analysis-and-statistical-inference/lab-1-introduction-to-data-2/Subset - one last time.R
|
85c4adfb3feb5057bd3dc58203439a708ba6021b
|
[] |
no_license
|
oliverwreath/R_Practise
|
1c774499cc4aaccbe4c812a1905740dbf5c5ec15
|
e5534c38cd7cf7f7ea6cb17785b46f921f987089
|
refs/heads/master
| 2020-04-05T22:59:07.054485
| 2015-02-05T06:38:06
| 2015-02-05T06:38:06
| 30,278,555
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 211
|
r
|
Subset - one last time.R
|
# The cdc data frame is already loaded into the workspace
# Create the subset:
under23_and_smoke = subset(cdc, cdc$age < 23 & cdc$smoke100 == 1)
# Print the top six rows of the subset:
head(under23_and_smoke)
|
75877affd399ca7ad2305b9b6199dc5192af32ff
|
d478e6abcd6b3fe714b4cf57a3ed79ee4a7c6466
|
/scripts/scripts_analysis/build95_primetime/tf_activity_analysis_with_background.R
|
24d9dc6a22a823911b58eac47cd77cd592f5282e
|
[] |
no_license
|
jakeyeung/scChIC-analysis
|
45e4d2e6a1e1bd7719142694d24543d29cd7cfc3
|
f754aec5a4115fdede5c099f341d3f10e1a72bff
|
refs/heads/master
| 2020-06-02T23:09:33.060721
| 2019-06-11T09:20:17
| 2019-06-11T09:20:17
| 191,337,903
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,254
|
r
|
tf_activity_analysis_with_background.R
|
# Jake Yeung
# Date of Creation: 2019-04-04
# File: ~/projects/scchic/scripts/scripts_analysis/build95_primetime/tf_activity_analysis_with_background.R
# Background of TF activity
#
library(data.table)
library(dplyr)
library(ggplot2)
library(scales)
library(JFuncs)
library(umap)
library(ChIPseeker)
library(GenomicRanges)
library(TxDb.Mmusculus.UCSC.mm10.knownGene)
library(org.Mm.eg.db)
source("scripts/Rfunctions/MaraDownstream.R")
source("scripts/Rfunctions/BackgroundPermutationScripts.R")
source("scripts/Rfunctions/AuxLDA.R")
source("scripts/Rfunctions/Aux.R")
source("scripts/Rfunctions/PlotFunctions.R")
inf <- "/Users/yeung/data/scchic/robjs/TFactivity_genelevels_objects_build95.allmarks_reorient.withColnameList.2019-04-04.RData"
# Load data --------------------------------------------------------------
load(inf, v=T)
head(mara.outs$H3K4me1$zscore)
# Load zscore mats --------------------------------------------------------
jmarks <- c("H3K4me1", "H3K4me3", "H3K27me3", "H3K9me3")
names(jmarks) <- jmarks
inf.zscores <- lapply(jmarks, function(jmark) paste0("/Users/yeung/data/scchic/from_cluster/zscore_permute_summary/", jmark, ".zscore_permute_summary.txt.gz"))
dats <- lapply(inf.zscores, function(inf.zscore) read.table(gzfile(inf.zscore), header = FALSE, stringsAsFactors = FALSE))
zscore.long <- lapply(dats, function(dat){
colnames(dat) <- c("motif", "zscore", "seed", "mark")
return(dat)
})
zscore.long <- do.call(rbind, zscore.long)
print(head(zscore.long))
print(unique(zscore.long$motif))
zscore.long <- subset(zscore.long, mark != "exiting")
subset(zscore.long, grepl("Zscores", motif))
zscore.long$zscore <- as.numeric(zscore.long$zscore)
zscore.long.real <- lapply(jmarks, function(jmark){
return(mara.outs[[jmark]]$zscore %>% mutate(mark = jmark))
}) %>%
bind_rows() %>%
rename(zscore.real = zscore)
zscore.long <- left_join(zscore.long, zscore.long.real)
# Plot CEBPB zscore versus background model ------------------------------
# out <- GetPvalZscore(zscore.long %>% filter(mark == jmark & motif == jmotif), subset(mara.outs$H3K4me1$zscore, motif == jmotif)$zscore)
pvals.long <- zscore.long %>%
group_by(mark, motif) %>%
do(GetPvalZscore(., zscore.real = NULL, jprob = 0.9, show.plot = FALSE, return.pval.only = TRUE))
# plot top hits by mark
ggplot(pvals.long, aes(x = log10pval)) + geom_histogram() + facet_wrap(~mark, ncol = 1) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
# show top hits
pvals.top <- pvals.long %>%
group_by(mark) %>%
dplyr::top_n(n = 10, wt = -log10pval) %>%
arrange(log10pval)
print(dim(pvals.top))
print(split(pvals.top, f = pvals.top$mark))
# Plot top hits ----------------------------------------------------------
jmark <- "H3K4me1"
top.motifs <- subset(pvals.top, mark == jmark)$motif[1:10]
jcolvec <- c("gray85", "gray50", "darkblue")
for (jmotif in top.motifs){
m1 <- PlotMotifInUmap(jmotif, dat.merged.lst[[jmark]] %>% mutate(umap2 = -1 * umap2), mara.outs[[jmark]]$zscores, jmark, jsize = 0.75, colvec = jcolvec)
print(m1)
}
print(head(annots.lst[[jmark]]))
dsub <- dat.umap.long.new.lst[[1]]
# add activities here
dsub <- left_join(dsub, mara.outs$H3K4me1$act.long)
# ggplot(, aes(x = umap1, y = -1 * umap2)) + geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
PlotImputedPeaks(tm.result, jpeak, jchip, show.plot = TRUE, return.plot.only = TRUE, usettings=custom.settings)
# plot a gene
print(head(data.frame(subset(pvals.long, mark == ref.mark) %>% arrange(log10pval)), n = 30))
ref.mark <- "H3K4me1"
jgene <- "Pou2f2"
jmotif <- "Pou2f2"
jgene <- "Gfi1"
jmotif <- "Gfi1"
jgene <- "Mdb2"
jmotif <- "Mdb2"
jgene <- "Hmbox1"
jmotif <- "Hmbox1"
jgene <- "Nkx2.9"
jmotif <- "Nkx2.9"
jgene <- "Pax6"
jmotif <- "Pax6"
jgene <- "Bcl3"
jmotif <- "Bcl3"
jgene <- "Ebf1"
jmotif <- "Ebf1"
jgene <- "Cebpb"
jmotif <- "Cebpb"
m.peak <- PlotImputedPeaks3(counts.mat.sub.long, jpeak, jmark, gname = jgene, jcolvec = jcolvec, .log = TRUE, jpseudo = 0, jscale = 10^7)
print(m.peak)
out.sub <- GetPeaksFromGene(jgene, annots.lst[[ref.mark]])
(jpeak <- SelectBestPeak(out.sub$peaks, regions.annot, tm.result.lst[[ref.mark]]))
jscale.fac <- 1
jpseudo <- 10^-6
jsize <- 1
m.peak <- PlotImputedPeaks2(tm.result.lst[[jmark]], jpeak, jmarks[[jmark]],
# use.count.mat = count.mat.lst[[jmark]],
use.count.mat = NULL,
usettings=custom.settings.new.lst[[jmark]],
gname = jgene,
jsize = jsize, jcolvec = jcolvec, .log = TRUE, scale.fac = jscale.fac, pseudocount = jpseudo, y.axis.factor = -1)
m.motif <- PlotMotifInUmap(jmotif, dat.merged.lst[[jmark]] %>% mutate(umap2 = -1 * umap2), mara.outs[[jmark]]$zscores, jmark, jsize = 0.75, colvec = jcolvec)
multiplot(m.peak, m.motif, cols = 2)
# Save objects ------------------------------------------------------------
save(pvals.long, zscore.long, file = "~/data/scchic/robjs/TFactivity_zscore_analysis.RData")
#
# out <- GetPvalZscore(zscore.long %>% filter(mark == jmark & motif == jmotif), zscore.real = NULL)
#
# jprob <- 0.9
#
# jmark <- "H3K4me1"
# jmotif <- "Nfatc1"
# jmotif <- "Cebpb"
# jmotif <- "Zbtb16"
# jmotif <- "Tal1"
#
# jsub <- zscore.long %>% filter(mark == jmark & motif == jmotif) %>%
# arrange(zscore)
#
# zscore.real <- subset(mara.outs$H3K4me1$zscore, motif == jmotif)$zscore
#
# jsub$zscore <- as.numeric(jsub$zscore)
#
# ggplot(jsub, aes(x = log10(zscore))) + geom_density() + theme_bw() +
# theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
# geom_vline(xintercept = zscore.real) +
# ggtitle(jmotif)
#
# # reverse cumulative?
# jsub$zscore.cumsum <- cumsum(jsub$zscore)
# jsub$indx <- seq(nrow(jsub))
# jsub$frac.less.than <- jsub$indx / nrow(jsub)
# jsub$frac.more.than <- 1 - jsub$frac.less.than
# jsub$log10.frac.more.than <- log10(jsub$frac.more.than)
#
# # ggplot(jsub, aes(x = zscore)) + geom_density()
# ggplot(jsub, aes(x = zscore, y = log10(frac.more.than))) +
# geom_point() + theme_bw() +geom_vline(xintercept = zscore.real)
# ggplot(jsub %>% filter(zscore > quantile(zscore, probs = jprob)), aes(x = zscore, y = log10(frac.more.than))) +
# geom_point() + theme_bw() +geom_vline(xintercept = zscore.real)
#
# # fit linear model
# jsubsub <- jsub %>% filter(zscore > quantile(zscore, probs = jprob) & frac.more.than > 0)
# jfit <- lm(formula = log10.frac.more.than ~ zscore, data = jsubsub)
#
# log10pval <- predict(jfit, newdata = data.frame(zscore = zscore.real))
#
# xpred <- seq(min(jsubsub$zscore), max(zscore.real, jsubsub$zscore), length.out = 100)
# ypred <- predict(jfit, newdata = data.frame(zscore = xpred))
# pred.dat <- data.frame(log10.frac.more.than = ypred, zscore = xpred)
#
# ggplot(jsub %>% filter(zscore > quantile(zscore, probs = jprob)), aes(x = zscore, y = log10.frac.more.than)) +
# geom_point() + theme_bw() +
# geom_vline(xintercept = zscore.real, linetype = "dashed") +
# expand_limits(y = ceiling(log10pval)) +
# geom_line(mapping = aes(x = zscore, y = log10.frac.more.than), data = pred.dat)
|
de6d2284a7913c6b16b13416b77228cad67e53f2
|
bc3a58c0f3abd24f4f64f641152c09b79efefe38
|
/man/subsetExpressionSet2DS.Rd
|
4c27fe65538624ae1640f31cee3efb6222ce0bb0
|
[
"MIT"
] |
permissive
|
isglobal-brge/dsOmics
|
96aa2594cbe009f2899d99fdc5be43a96f50d6bf
|
78fee19320cdf360db7ec1aed2fb07ee4c533951
|
refs/heads/master
| 2023-04-07T09:23:17.202083
| 2023-03-15T09:31:40
| 2023-03-15T09:31:40
| 158,839,360
| 1
| 12
|
MIT
| 2021-02-02T10:21:06
| 2018-11-23T13:55:17
|
R
|
UTF-8
|
R
| false
| true
| 316
|
rd
|
subsetExpressionSet2DS.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subsetExpressionSet2DS.R
\name{subsetExpressionSet2DS}
\alias{subsetExpressionSet2DS}
\title{Subset ExpressionSet}
\usage{
subsetExpressionSet2DS(eSet, n_ids)
}
\value{
Subseted \code{ExpressionSet}
}
\description{
Subset ExpressionSet
}
|
f7d57d82ab3c28809fc6296e31cdb6eb29c0b63f
|
ecda5a82a32b9e5bcd4be7bc779b887bb504718d
|
/Figure_1.R
|
326134e655bf6c8cd499c74166e6b7cabeefe8ea
|
[] |
no_license
|
bongsongkim/BLUP
|
1a47b759f4ebbe074edfe0026fbfb45c7dab2b10
|
232856290e66d99f3affd511dc804cb93d02c5f5
|
refs/heads/master
| 2021-07-09T11:03:54.332648
| 2020-08-04T03:42:39
| 2020-08-04T03:42:39
| 173,872,130
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 424
|
r
|
Figure_1.R
|
# Figure 1(A) Correlation between Naive-BLUP and K-BLUP
dat1 <- read.csv("https://raw.githubusercontent.com/bongsongkim/BLUP/master/raw_data/Naive-BLUP_estimates.csv",header=T)
dat2 <- read.csv("https://raw.githubusercontent.com/bongsongkim/BLUP/master/raw_data/K-BLUP_estimates.csv",header=T)
plot(dat1[,2],dat2[,2],xlab="Naive BLUP",ylab="Conventional BLUP")
|
23ecd552463295455b782d31c3e12871fb41b451
|
7dcd8ca463f3d0d727ed631a35ef112d38d193f2
|
/R/2. Operations on File/file.R
|
3ab132bcae87a16c1218fa4bfb7a29845999edec
|
[
"MIT"
] |
permissive
|
shoaibrayeen/Data-Science-With-Python-And-R
|
03b38da9e8b0ebead34c51efa44f7e5052f773c4
|
2f4f398a2ea414395c4ff04b38c777f96f78bab2
|
refs/heads/master
| 2021-07-10T23:38:10.627283
| 2020-10-06T05:02:32
| 2020-10-06T05:02:32
| 199,718,898
| 0
| 1
|
MIT
| 2020-10-06T05:02:33
| 2019-07-30T19:59:58
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 993
|
r
|
file.R
|
trainData <- read.csv(url("https://cdn.skillenza.com/files/fd584fdd-89b4-47eb-a0f5-a8ac67842920/UNI.csv"))
testData <- read.csv(url("https://cdn.skillenza.com/files/b14b0903-97fd-4a41-8b66-aaa301f5fd8e/unitest.csv"))
write.csv(testData, file = "./CSV Files/test.csv",row.names=FALSE )
write.csv(trainData, file = "./CSV Files/train.csv",row.names=FALSE )
#Getting and Setting Paths
getwd()
setwd("./CSV Files")
getwd()
train <- read.csv("train.csv")
train
nrow(train)
ncol(train)
min(train$v.id)
max(train$v.id)
#measue of central Tendency
mean(train$v.id , na.rm = FALSE)
temp <- median(train$v.id , na.rm = FALSE)
# Mode
uniq <- unique(train$years)
uniq[which.max(tabulate(match(train$years , uniq)))]
#range Data
rangeData <- subset(train, v.id > temp & v.id < temp + 20 & years == 3 )
rangeData
NAval <- read.csv("NAfile.csv")
NAval
meanVal <-mean(NAval$id, na.rm = TRUE)
medianVal <-median(NAval$id, na.rm = TRUE)
# Ignoring missing Value
val <- na.omit(NAval)
var(val$id)
|
c52adde3342bf481cf2b11471c99ee0755dadb6d
|
a808c4c6fee16ffc5a03bfc09745b4930e2f52f8
|
/cachematrix.R
|
85b4cac58148dba643aaf0d2fd1660f356d57bd5
|
[] |
no_license
|
CathySunRprogramming/RProgrammingAssignment2
|
7c9b8b9303dbc3d8d98df20d3495d0e4c7af3ebe
|
2046fa3e965f20c1bcd6be31fc0ff6579233fa69
|
refs/heads/master
| 2021-01-15T19:28:19.212424
| 2015-01-24T22:40:34
| 2015-01-24T22:40:34
| 29,793,847
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,120
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## These two functions cache the inverse of a matrix
## Write a short comment describing this function
## 'makeCacheMatrix' function creates a special 'matrix' object
## that can Cache the inverse of matrix x
makeCacheMatrix <- function(x = matrix()) {
matrix <- NULL
set <- function(y){
x <<- y
matrix <<- NULL
}
get <- function() x
setinverse <- function() matrix <<- solve(x)
getinverse <- function() matrix
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## 'cacheSolve' function computes the inverse of the special 'matrix'
## returned by the function makeCacheMatrix.
## If the inverse has already been calculated, then the
## cachesolve should retrieve the inverese from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
matrix <- x$getinverse()
if(!is.null(matrix)){
message("getting cached data")
return(matrix)
}
data <- x$get()
matrix <- x$setinverse()
matrix
}
|
cd0c10d04dd92d1c437450b1cfb590d9285b81ee
|
be6be601a94983030e0dd71a28bb0d1e46192dd9
|
/r/07_run_sql_union_all.R
|
a5f67ce9815ec0deaaa28489cf58224dc75eccd7
|
[] |
no_license
|
andrw-jns/ipu_forecast
|
838c7eaedd1d6e9bd09b1026e71765b3fd8ef97e
|
4721c1c29167c1c0e2f57e8cd3435618d52bb66d
|
refs/heads/master
| 2020-03-14T16:12:18.461535
| 2018-06-19T15:33:45
| 2018-06-19T15:33:45
| 129,732,198
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,998
|
r
|
07_run_sql_union_all.R
|
##############################
"PROXIMITY TO DEATH:"
"UNION GROUPED DATASETS"
##############################
library(tidyverse)
library(dbplyr)
library(DBI)
library(odbc)
con_sw <- dbConnect(odbc(),
Driver = "SQL Server",
Server = "MLCSU-BI-SQL-SU",
Database = "StrategicWorking", # If searching for tables must specify correct DB
Trusted_Connection = "True")
str_flatten(map_chr(years,
function(x){
str_c("SELECT * FROM defaults.aj_180616_proxdeath_grouped", x, " UNION ALL ")
}
))
# Remove the last UNION ALL, add an INTO, and insert string into:
# last time:
# dbExecute(con_sw,
# "SELECT * INTO defaults.aj_180614_proxdeath_ALL FROM defaults.aj_180614_proxdeath_grouped0405 UNION ALL SELECT * FROM defaults.aj_180614_proxdeath_grouped0506 UNION ALL SELECT * FROM defaults.aj_180614_proxdeath_grouped0607 UNION ALL SELECT * FROM defaults.aj_180614_proxdeath_grouped0708 UNION ALL SELECT * FROM defaults.aj_180614_proxdeath_grouped0809 UNION ALL SELECT * FROM defaults.aj_180614_proxdeath_grouped0910 UNION ALL SELECT * FROM defaults.aj_180614_proxdeath_grouped1011 UNION ALL SELECT * FROM defaults.aj_180614_proxdeath_grouped1112 UNION ALL SELECT * FROM defaults.aj_180614_proxdeath_grouped1213 UNION ALL SELECT * FROM defaults.aj_180614_proxdeath_grouped1314 UNION ALL SELECT * FROM defaults.aj_180614_proxdeath_grouped1415 ")
# 41140624 rows.
dbExecute(con_sw,
"SELECT *
INTO defaults.aj_180616_proxdeath_ALL
FROM (
SELECT * FROM defaults.aj_180616_proxdeath_grouped0405 UNION ALL SELECT * FROM defaults.aj_180616_proxdeath_grouped0506 UNION ALL SELECT * FROM defaults.aj_180616_proxdeath_grouped0607 UNION ALL SELECT * FROM defaults.aj_180616_proxdeath_grouped0708 UNION ALL SELECT * FROM defaults.aj_180616_proxdeath_grouped0809 UNION ALL SELECT * FROM defaults.aj_180616_proxdeath_grouped0910 UNION ALL SELECT * FROM defaults.aj_180616_proxdeath_grouped1011 UNION ALL SELECT * FROM defaults.aj_180616_proxdeath_grouped1112 UNION ALL SELECT * FROM defaults.aj_180616_proxdeath_grouped1213 UNION ALL SELECT * FROM defaults.aj_180616_proxdeath_grouped1314 UNION ALL SELECT * FROM defaults.aj_180616_proxdeath_grouped1415
) CTE1
WHERE (year_adjust > 2004.0 AND year_adjust < 2015.0) AND (age_adjust > -1.0) AND (NOT(cohort IS NULL))")
# [1] 37654140
# about right - because removing large numbers in 2004 and 2015
# Interrogate -------------------------------------------------------------
test3 <- tbl(con_sw, in_schema("defaults", "aj_180616_proxdeath_ALL"))
# For reference:
test3 %>% head(10) %>% collect %>% View("REFERENCE")
test3 %>% count(is.na(cohort)) %>% collect %>% View
# 0. FILTER YEARS
# 1. MUST REMOVE ALL WITH AGE LESS THAN ZERO.
# 2. Remove 1 NAS in COHORT
# 3. What to do about ttd == 999999
# 4. Lots of instances of high -ve and +ve bed_days! Figure out what to do with these later on.
" DONE!"
|
ee4aaaf9971c2ea3b0f553fdf18597cd5bf11882
|
bdbf8ef9a130b38a00fc305973daa56ec4763290
|
/man/predict_check_input.Rd
|
aecfb84fa1a7714fe5cccebb41d1286d9846ad21
|
[
"MIT"
] |
permissive
|
qianshangzen/BGLR-R-package
|
9e5d73756d80f2280d918f6ca68114c1e28661ab
|
151129e30bc140fc09c0159cf46d94fed102fa9e
|
refs/heads/master
| 2022-11-27T16:19:17.950153
| 2020-08-11T22:35:31
| 2020-08-11T22:35:31
| 286,858,243
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,023
|
rd
|
predict_check_input.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict_check_input.R
\name{predict_check_input}
\alias{predict_check_input}
\title{Checking if all inputs are valid for function predict_blrf.
If not valid input exists, terminate program and output error message.}
\usage{
predict_check_input(blrf, confidence, probability, lower, upper)
}
\arguments{
\item{blrf}{blrf object.}
\item{confidence}{logical. If TURE, then output confidence interval.}
\item{probability}{logical. If TRUE, then output will be predict probability for factor
type of blrf. If FALSE, then the output will be predict label for "factor"
type of blrf or predict value for "numeric" type of blrf.}
\item{lower}{numeric. If confidence is TRUE, then define lower bound of ci.}
\item{upper}{numeric. If confidence is TRUE, then define upper bound of ci.}
}
\value{
logic.
}
\description{
Checking if all inputs are valid for function predict_blrf.
If not valid input exists, terminate program and output error message.
}
|
c63dbf4037e9a25a705630e4aee032f93d2f0757
|
9a1fba7d7f050275a958df3b18212a19d454524c
|
/Aula 1/vetor.R
|
480140ae7007e8f7ac92a73fa3d0146ee07bc809
|
[] |
no_license
|
Marcelo391/Curso-R-Coti
|
a20dbfd3f8fe62c89023128d7314360784b1f1ce
|
f183a7a3ab86b992f2f2eb577b200620a0100b91
|
refs/heads/master
| 2021-05-22T18:20:09.409843
| 2020-05-02T15:55:35
| 2020-05-02T15:55:35
| 253,037,019
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,012
|
r
|
vetor.R
|
#VETORES
#Atribuição
a = 10
c <- 10
a
c
#Pesquisa/Help sobre uma função
?c
#Criação de vetor
vetor <- c(1, 2, 3, 4, 5, 6)
#executar
vetor
#Vetor de caracteres
nomes <- c("Marcelo", "Sarah", "João")
nomes
#posição do vetor <- Primeira posição no R é sempre 1
nomes[1]
vetor[0] #<- Não existe
# Tamanho Vetor
length(vetor)
length(nomes)
#verificar se é vetor
is.vector(nomes)
#Vetor numérico a partir de um intervalo
numeros <- 1:10
numeros
#Função seq
numeros2 <- seq(0, 1, by = 0.1)
numeros2
#Vetores de tipos diferentes
c("a", numeros)
#Operações matemáticas com vetores
operacao <- seq(10, 40, by = 10)
operacao -3
operacao + 3
operacao * 3
operacao /3
round(operacao /3)
#Vetores de elementos repetidos
repetidos <- rep(1,5)
repetidos2 <- rep(c(1,2), c(3,4))
#Função paste() -> usada para vetores de caracteres
n <- c("Lucas", "Bia", "Ana")
paste(n, 1:3)
paste(n, "Oliveira")
paste("T", 1:3, sep="")
rep(paste("T", 1:3, sep=""), c(4,4,3))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.