blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
53ee41f928f076b9b2b887b7e54f6c91b1780bf6
|
5bfe5326df355d9014046646eda81ec5c9ea0b0e
|
/man/ez.combat.Rd
|
15f134929323f390daa2357c427ebbf37b1addee
|
[
"Artistic-2.0"
] |
permissive
|
TKoscik/ez.combat
|
c015f293d093d2115ee989906b914fb28f54bfa5
|
41d00cbc3d73f7ffc6d0f8bc131ea1b85a160a76
|
refs/heads/master
| 2023-01-13T22:52:49.239829
| 2023-01-05T19:23:14
| 2023-01-05T19:23:14
| 139,920,229
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,579
|
rd
|
ez.combat.Rd
|
\name{ez.combat}
\alias{ez.combat}
\title{ComBat Harmonization for Dataframe Objects}
\description{A easy to use function for applying ComBat Harmonization for batch effects on dataframe objects.}
\usage{
ez.combat(df,
batch.var,
adjust.var = "all",
exclude.var = NULL,
model = NULL,
output = c("overwrite", "append"),
error = c("abort", "continue"),
use.eb = TRUE,
verbose = FALSE)
}
\arguments{
\item{df}{Dataframe containing variables to be harmonized as well as the batch variable to harmonize for.}
\item{batch.var}{A character string or integer indicating the dataframe column containing the batch variable.}
\item{adjust.var}{A character vector indicating the names of the variables in the dataframe to be harmonized. Default value is "all", indicating that all variables should be harmonized (excluding the batch variable and any independent variables given in a model}
\item{exclude.var}{An optional character vector indicating which variables to exclude. Best used in conjunction with adjust.var = "all" if the number of variables to be excluded is fewer than the number to be included}
\item{model}{a character vector specifying the model to be considered during harmonization. Models do not need to specify the dependent variable, and should follow R's standard modelling syntax, e.g., "~ group * age"}
\item{output}{Select the behaviour of the output dataframe in the output list. "overwrite" will replace the unadjusted variables with adjusted ones, "append" will add the adjusted variables to the end of the output dataframe with ".cb" appended to the variable name.}
\item{error}{Changes behaviour for handling certain input errors. "continue" harmonization will continue if varaibles have to be excluded, "abort" will discontinue harmonization}
\item{use.eb}{Logical, Perform ComBat Harmonization with empirical bayes or without}
\item{verbose}{Logical, print progress messages to console}
}
\value{A list containing:
df: a dataframe with adjusted values
gamma.hat and delta hat: Estimated location and shift (L/S) parameters before empirical Bayes.
gamma.star and delta.star: Empirical Bayes estimated L/S parameters.
gamma.bar, t2, a.prior and b.prior: esimated prior distributions parameters.
}
\author{
Timothy R. Koscik <timothy-koscik@uiowa.edu>
}
\examples{
# Harmonize 'iris' data by removing species effects:
cb <- ez.combat(df = iris, batch.var = "Species")
## plots of raw iris data
plot(iris$Sepal.Length, iris$Petal.Length, col = iris$Species, pch = 16,
xlab = "Sepal Length", ylab = "Petal Length",
main = "Flower Characteristics by Species")
legend(x = 4.5, y = 7, legend = levels(iris$Species), col = c(1:3), pch = 16)
boxplot(iris$Sepal.Length ~ iris$Species, notch = T, las = 1,
xlab = "Species", ylab = "Sepal Length", main = "Sepal Length by Species",
cex.lab = 1.5, cex.axis = 1.5,cex.main = 2)
## plots after dataset is harmonized acropss specie, i.e, species effects are removed
plot(cb$df$Sepal.Length, cb$df$Petal.Length, col = cb$df$Species,
pch = 16,
xlab = "Sepal Length", ylab = "Petal Length",
main = "Flower Characteristics by Species - Harmonized")
legend(x = 4.5, y = 7, legend = levels(iris$Species), col = c(1:3), pch = 16)
boxplot(cb$df$Sepal.Length ~ cb$df$Species, notch = T, las = 1,
xlab = "Species", ylab = "Sepal Length",
main = "Sepal Length by Species - Harmonized",
cex.lab = 1.5, cex.axis = 1.5,cex.main = 2)
}
|
c04147be6a210969453dd8e1e1aa0af576c64b94
|
6afccd04e65601f1af177ec273b223e85fadf5f3
|
/etrib.R
|
c05e3224c6930d62cbb79f777c4802138992d0ca
|
[] |
no_license
|
patdab90/etrib
|
fccaa5433f70a3e70157cfceb9a8ad489c0b37ed
|
13901547b633cd9851457b2d80273fe2be01a854
|
refs/heads/master
| 2016-09-02T00:39:44.921882
| 2014-08-05T10:59:02
| 2014-08-05T10:59:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,005
|
r
|
etrib.R
|
M <- 1E+10
MINEPS <- 1E-10
source('etriutils.R')
source('etribbase.R')
source('etribcardinalities.R')
source('etribpairwisecomp.R')
etrib.init <- function(performances, profiles, assignments, monotonicity, th, cardinalities, pCk, pCl) {
stopifnot(ncol(performances) == ncol(profiles))
stopifnot(is.null(assignments) || ncol(assignments) == 3)
stopifnot((is.null(pCk)) || (ncol(pCk) == 3))
stopifnot(is.null(pCl) || ncol(pCl) == 3)
stopifnot(nrow(assignments) < nrow(performances))
stopifnot(nrow(th) == ncol(performances))
stopifnot(nrow(th) == length(monotonicity))
message("--- constructing base model")
n <- nrow(performances)
p <- nrow(profiles)-1
m <- ncol(performances)
A <- 1:n ## an
B <- 0:p ## profile
H <- 1:p ## klasy
J <- 1:m ## j
etrib <- list()
etrib$n <- n
etrib$p <- p
etrib$m <- m
names <- createTRIBVariables(A, H, J, pCk, pCl)
varnames <- names$varnames
etrib$binary <- names$binaryVarNames
etrib$constr$lhs <- intiMatrix(varnames)
etrib$constr$dir <- initDIRMatrix()
etrib$constr$rhs <- intiRHSMatrix()
etrib <- buildBaseModel(etrib, performances, profiles, monotonicity, th, A, H, J)
etrib <- buildAEModel(etrib, J, assignments)
etrib <- buildCCModel(etrib, A, H, J, cardinalities)
etrib <- buildPConstraint(etrib, J, H, pCk, pCl)
return(etrib)
}
etrib.extremeCardinalities <- function(etrib, max){
colname <- "MAX"
if(!max) {
colname <- "MIN"
}
extremeCardinalities <- matrix(0, nrow=etrib$p, ncol=1, dimnames=list(paste0("C",1:etrib$p),colname))
for(h in 1:etrib$p){
varnames <- c()
for(a in 1:etrib$n){
binary <- paste0("v(a",a,",h",h,")")
varnames <- c(varnames, binary)
}
objFunction <- etriutils.buildObjectiveFunction(varnames=colnames(etrib$constr$lhs),objectivesVarNames=varnames)
varTypes <- etriutils.getConstraintTypes(varnames=colnames(etrib$constr$lhs),binaryVarNames=etrib$binary)
ret <- etriutils.solve(objectioveFunction=objFunction,varTypes=varTypes,
lhs=etrib$constr$lhs,dir=etrib$constr$dir, rhs=etrib$constr$rhs,max=max)
extremeCardinalities[h,] <- ret$objval
}
return(extremeCardinalities)
}
etrib.isFeasible <- function(etrib){
objFunction <- etriutils.buildObjectiveFunction(varnames=colnames(etrib$constr$lhs),objectivesVarNames="e")
varTypes <- etriutils.getConstraintTypes(varnames=colnames(etrib$constr$lhs),binaryVarNames=etrib$binary)
ret <- etriutils.solve(objectioveFunction=objFunction,varTypes=varTypes,
lhs=etrib$constr$lhs,dir=etrib$constr$dir, rhs=etrib$constr$rhs,max=TRUE)
return(ret$status$code == 0 && ret$objval > 0)
}
etrib.preferenceRelation <- function(etrib){
preferenceRelation <- matrix(FALSE,ncol=etrib$n, nrow=etrib$n,
dimnames= list(paste0("a",1:etrib$n), paste0("a", 1:etrib$n)))
for(a in 1:etrib$n){
for(b in 1:etrib$n){
plModel <- buildPLModel(colnames(etrib$constr$lhs), a, b, etrib$p, 1:etrib$m)
lhs <- rbind(etrib$constr$lhs, plModel$lhs)
dir <- rbind(etrib$constr$dir, plModel$dir)
rhs <- rbind(etrib$constr$rhs, plModel$rhs)
objFunction <- etriutils.buildObjectiveFunction(varnames=colnames(etrib$constr$lhs),objectivesVarNames="e")
varTypes <- etriutils.getConstraintTypes(varnames=colnames(etrib$constr$lhs),binaryVarNames=etrib$binary)
ret <- etriutils.solve(objectioveFunction=objFunction,varTypes=varTypes,
lhs=lhs,dir=dir, rhs=rhs,max=TRUE)
preferenceRelation[a,b] <- ret$status$code != 0 || ret$objval < 0
}
}
return(preferenceRelation)
}
buildPLModel <- function(varnames, a, b, p, J){
cardenality <- list(lhs = matrix(nrow=0, ncol=length(varnames)),
rhs = matrix(nrow=0, ncol=1),
dir = matrix(nrow=0, ncol=1))
for(h in 1:(p-1)){
rnames <- c(paste0("PL1.",h), paste0("PL2.",h))
lhs <- matrix(0, nrow=2, ncol=length(varnames), dimnames=list(rnames, varnames))
lhs[1,paste0("c",J,"(a",b,",b",h,")")] = 1
lhs[,"L"] = -1
lhs[2,paste0("c",J,"(a",a,",b",h,")")] = 1
lhs[2,"e"] <- 1
dir <- matrix(c(">=","<="),nrow=2, ncol=1, dimnames=list(rnames))
rhs <- matrix(0,nrow=2, ncol=1, dimnames=list(rnames))
cardenality$lhs <- rbind(cardenality$lhs, lhs)
cardenality$dir <- rbind(cardenality$dir, dir)
cardenality$rhs <- rbind(cardenality$rhs, rhs)
}
return(cardenality)
}
etrib.possibleAssigment <- function(etrib){
possibleRanking <- matrix(FALSE,ncol=etrib$p, nrow=etrib$n,
dimnames= list(paste0("a",1:etrib$n), paste0("C", 1:etrib$p)))
for(i in 1:etrib$p){
for(j in 1:etrib$n){
paModel <- buildPAModel(colnames(etrib$constr$lhs),a=j,h=i,p=etrib$p, J=1:etrib$m)
lhs <- rbind(etrib$constr$lhs, paModel$lhs)
dir <- rbind(etrib$constr$dir, paModel$dir)
rhs <- rbind(etrib$constr$rhs, paModel$rhs)
objFunction <- etriutils.buildObjectiveFunction(varnames=colnames(etrib$constr$lhs),objectivesVarNames="e")
varTypes <- etriutils.getConstraintTypes(varnames=colnames(etrib$constr$lhs),binaryVarNames=etrib$binary)
ret <- etriutils.solve(objectioveFunction=objFunction,varTypes=varTypes,
lhs=lhs,dir=dir, rhs=rhs,max=TRUE)
possibleRanking[j,i] <- ret$status$code == 0 && ret$objval > 0
}
}
return(possibleRanking)
}
constraintsToString <- function(lhs, dir, rhs){
res <- matrix("", nrow=nrow(lhs), ncol=1, dimnames=list(rownames(lhs)))
for(j in 1:nrow(lhs)){
for(i in 1:ncol(lhs)){
if(lhs[j,i] != 0){
if(lhs[j,i] > 0)
if(lhs[j,i] == 1)
res[j,] <- paste(res[j,],"+",colnames(lhs)[i])
else
res[j,] <- paste(res[j,],"+",lhs[j,i],colnames(lhs)[i])
else
res[j,] <- paste(res[j,],lhs[j,i],colnames(lhs)[i])
}
}
res[j,] <- paste(res[j,],dir[j,],rhs[j,])
}
return(res)
}
buildPAModel <- function(varnames,a, h, p, J){
cardenality <- list(lhs = matrix(nrow=0, ncol=length(varnames)),
rhs = matrix(nrow=0, ncol=1),
dir = matrix(nrow=0, ncol=1))
colnames(cardenality$lhs) <- varnames
if(h > 1){
lhs <- matrix(0, nrow=1, ncol=length(varnames), dimnames=list("PA.1", varnames))
lhs[1,paste0("c",J,"(a",a,",b",h-1,")")] = 1
lhs[1,"L"] = -1
dir <- matrix(nrow=1, ncol=1, dimnames=list("PA.1"))
dir[1,] <- ">="
rhs <- matrix(nrow=1, ncol=1, dimnames=list("PA.1"))
rhs[1,] <- 0
cardenality$lhs <- rbind(cardenality$lhs, lhs)
cardenality$dir <- rbind(cardenality$dir, dir)
cardenality$rhs <- rbind(cardenality$rhs, rhs)
}
if(h < p){
lhs <- matrix(0, nrow=1, ncol=length(varnames), dimnames=list("PA.2", varnames))
lhs[1,paste0("c",J,"(a",a,",b",h,")")] = 1
lhs[1,"L"] <- -1
lhs[1,"e"] <- 1
dir <- matrix(nrow=1, ncol=1, dimnames=list("PA.2"))
dir[1,] <- "<="
rhs <- matrix(nrow=1, ncol=1, dimnames=list("PA.2"))
rhs[1,] <- 0
cardenality$lhs <- rbind(cardenality$lhs, lhs)
cardenality$dir <- rbind(cardenality$dir, dir)
cardenality$rhs <- rbind(cardenality$rhs, rhs)
}
return(cardenality)
}
buildBaseModel <- function(etrib, performances, profiles, monotonicity, th, A, H, J) {
etrib <- createB1Constraint(etrib, J)
etrib <- createB2Constraint(etrib, J, H)
etrib <- createB4Constraint(etrib)
etrib <- createB5Constraint(etrib, J)
etrib <- createB6Constraint(etrib, performances, profiles,monotonicity, th)
return(etrib)
}
buildAEModel <- function(etrib, J, assignments){
if(is.null(assignments)) return(etrib)
nAs <- nrow(assignments)
nrows <- nAs * 2
rnames <- paste0("AE.",1:nrows)
lhs <- matrix(0, nrow=nrows, ncol=ncol(etrib$constr$lhs), dimnames=list(rnames,colnames(etrib$constr$lhs)))
row <- 0
for(i in 1:nAs){
a <- assignments[i,]
row <- row + 1
lhs[row,paste0("c",J,"(a",a[1],",b",a[2]-1,")")] <- 1
lhs[row,"L"] <- -1
}
for(i in 1:nAs){
a <- assignments[i,]
row <- row + 1
lhs[row,paste0("c",J,"(a",a[1],",b",a[3],")")] <- 1
lhs[row,"L"] <- -1
lhs[row,"e"] <- 1
}
etrib$constr$lhs <- rbind(etrib$constr$lhs, lhs)
dir <- as.matrix(rep(c(">=","<="), each=nAs))
rownames(dir) <- rnames
etrib$constr$dir <- rbind(etrib$constr$dir, dir)
rhs <- as.matrix(rep(0, row))
rownames(rhs) <- rnames
etrib$constr$rhs <- rbind(etrib$constr$rhs, rhs)
return(etrib)
}
buildCCModel <- function(etrib, A, H, J, cardinalities){
etrib <- createCC1Constraints(etrib, A, H)
etrib <- createCC2Constraints(etrib, J)
etrib <- createCC3Constraints(etrib, J)
if(is.null(cardinalities)) return(etrib)
etrib <- createCC4Constraints(etrib, A, cardinalities)
etrib <- createCC5Constraints(etrib, A, cardinalities)
return(etrib)
}
buildPConstraint <- function(etrib, J, H, pCk, pCl){
if(!is.null(pCk)){
etrib <- createPC1Constrints(etrib, J, H, pCk)
etrib <- createPC2Constrints(etrib, J, H, pCk)
etrib <- createPC3Constrints(etrib, J, H, pCk)
}
if(!is.null(pCl)){
etrib <- createPU1Constrints(etrib, J, H, pCl)
etrib <- createPU2Constrints(etrib, J, H, pCl)
etrib <- createPU3Constrints(etrib, J, H, pCl)
}
return(etrib)
}
createTRIBVariables <- function(A, H, J, pCk, pCl){
varnames <- c()
p <- length(H)
varnames <- paste0("w",J)
varnames <- c(varnames, c("L"))
varnames <- c(varnames, c("e"))
##varnames = c(varnames, paste0("c", J, "(b", p, ",b0)"))
binaryVarNames <- c()
#if(FALSE)
for(a in A){
for(h in H){
binary <- paste0("v(a",a,",h",h,")")
varnames <- c(varnames, binary)
binaryVarNames <- c(binaryVarNames, binary)
}
}
for (a in A) {
for (b in 0:p) {
varnames <- c(varnames, paste0('c', J, '(a', a, ',b', b, ')'))
}
}
for (b in 0:p) {
for (a in A) {
varnames <- c(varnames, paste0('c', J, '(b', b, ',a', a, ')'))
}
}
if(!is.null(pCk))
for(row in 1:nrow(pCk)){
pck <- pCk[row,]
binary <- paste0("v(a",pck[1],",b",pck[2],",>=",pck[3],",h",1:(p-pck[3]),")")
varnames <- c(varnames, binary)
binaryVarNames <- c(binaryVarNames, binary)
}
if(!is.null(pCl))
for(row in 1:nrow(pCl)){
pcl <- pCl[row,]
binary <- paste0("v(a",pcl[1],",b",pcl[2],",<=",pcl[3],",h",1:(p-pcl[3]),")")
varnames <- c(varnames, binary)
binaryVarNames <- c(binaryVarNames, binary)
}
return(list(varnames=varnames, binaryVarNames=binaryVarNames))
}
intiMatrix <- function(names){
lhs <- matrix(0, ncol=length(names), nrow=1,dimnames=list("E",names))
lhs["E","e"] <- 1
return(lhs)
}
initDIRMatrix <- function(){
dir <- matrix(c(">="))
rownames(dir) <- c("E")
return(dir)
}
intiRHSMatrix <- function(){
rhs <- matrix(MINEPS)
rownames(rhs) <- "E"
return(rhs)
}
|
e01407643079f9252b8645c8b3498c0aa8ebfcfe
|
2b88050b540cc67759ad9722be2ae69b93466b4f
|
/man/check_stop_criteria.Rd
|
9b5dbace471623fd6623c1524e5fed2ba26e756f
|
[] |
no_license
|
fcampelo/MOEADr
|
632ff9dd2e6755ea5e9d5f389064688aee0878b4
|
0ac9b1962ef0c76eb1eedfa63756eeeec147135a
|
refs/heads/master
| 2023-06-30T11:01:23.181052
| 2023-01-06T12:39:42
| 2023-01-06T12:39:42
| 61,828,989
| 15
| 9
| null | 2023-06-10T13:58:44
| 2016-06-23T18:50:55
|
R
|
UTF-8
|
R
| false
| true
| 1,131
|
rd
|
check_stop_criteria.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check_stop_criteria.R
\name{check_stop_criteria}
\alias{check_stop_criteria}
\title{Stop criteria for MOEA/D}
\usage{
check_stop_criteria(stopcrit, call.env)
}
\arguments{
\item{stopcrit}{list containing the parameters defining the stop
handling method. See Section \verb{Stop Criteria} of the \code{\link[=moead]{moead()}}
documentation for details.}
\item{call.env}{List vector containing the stop criteria to be used.
See \code{\link[=moead]{moead()}} for details.}
}
\value{
Flag \code{keep.running}, indicating whether the algorithm should continue
(\code{TRUE}) or terminate (\code{FALSE}).
}
\description{
Verifies stop criteria for the MOEADr package.
}
\details{
This routine is intended to be used internally by \code{\link[=moead]{moead()}},
and should not be called directly by the user.
}
\section{References}{
F. Campelo, L.S. Batista, C. Aranha (2020): The {MOEADr} Package: A
Component-Based Framework for Multiobjective Evolutionary Algorithms Based on
Decomposition. Journal of Statistical Software \doi{10.18637/jss.v092.i06}\cr
}
|
86da7c8f27c12f458f88799be74160023167ab6e
|
83f40f224a0f935338010fde3398a2f3fef746e9
|
/TDS3301 - Data Mining/Assignment/Part 2/shiny/ui.R
|
7a786971abd881b868533ead2e016351da85364d
|
[
"Apache-2.0"
] |
permissive
|
jackwong95/MMURandomStuff
|
4cf78b5784cdf65b0302e568178121aa983b2003
|
ce0bedbba97344da8cd1d12411d47c3a31af09b1
|
refs/heads/master
| 2023-02-13T13:19:38.083830
| 2023-01-29T15:57:08
| 2023-01-29T15:57:08
| 49,407,629
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,354
|
r
|
ui.R
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
tags$head(
tags$style(HTML("
@import url('//fonts.googleapis.com/css?family=Lobster|Cabin:400,700');
h1 {
font-family: 'Lobster', cursive, bold;
font-weight: 500;
line-height: 3;
color: #00FFFF;
}
"))
),
headerPanel("My Convincing Results"),
mainPanel(
list(tags$head(tags$style("body {background-color: #ff3399; font-family: 'sans-serif; '}"))),
p("lhs rhs support confidence lift "),
p("{vanilla frappuccino,walnut cookie} => {chocolate tart} 2% 100% 19.607843"),
p("{coffee eclair,single espresso} => {blackberry tart} 2% 96% 13.127854"),
p("{apple croissant,apple danish} => {apple tart} 4% 95% 12.055455"),
p("{apple danish,cherry soda} => {apple tart} 3% 94% 11.891063"),
p("{apple croissant,cherry soda} => {apple tart} 3% 94% 11.891063")
)
))
|
7e925c2918ebb6ff6c6eb857cb57b1017920225b
|
3a5b24af385e8bd09526d4742c81bc3a2e01be4e
|
/man/mergeCellChat.Rd
|
058ef33cbe98347d8fe0612db606c7737a5fbd28
|
[] |
no_license
|
teryanarmen/CellChat
|
019aa5099f53518aef45b3c1bf8a7cdc8370b2a2
|
56ac7b92718517ab5f9cddb80ca859d6ae29bf30
|
refs/heads/master
| 2023-03-29T19:42:54.226398
| 2021-04-08T19:05:21
| 2021-04-08T19:05:21
| 356,020,334
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 565
|
rd
|
mergeCellChat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CellChat_class.R
\name{mergeCellChat}
\alias{mergeCellChat}
\title{Merge CellChat objects}
\usage{
mergeCellChat(
object.list,
add.names = NULL,
merge.data = FALSE,
cell.prefix = FALSE
)
}
\arguments{
\item{object.list}{A list of multiple CellChat objects}
\item{add.names}{A vector containing the name of each dataset}
\item{merge.data}{whether merging the data for all genes}
\item{cell.prefix}{whether prefix cell names}
}
\value{
}
\description{
Merge CellChat objects
}
|
2226091a5316b1a0f1e80d0bf425a506fb1c998c
|
ff478f2f7793123759f25fa62976f275d593fb10
|
/code/fncs/11_confusion_scheme.R
|
b4e4cbd89a34d31e38dbe87f74c55086515c8eb5
|
[] |
no_license
|
EUROMAMMALS/RP016
|
a74896138b024143ffcd00c5305ee4462b7a61a1
|
862e7672e175eb18c77054ae0bc6e701ac93fb2d
|
refs/heads/main
| 2023-04-13T19:11:48.430917
| 2023-02-09T16:24:11
| 2023-02-09T16:24:11
| 586,227,085
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 865
|
r
|
11_confusion_scheme.R
|
confusion_scheme <- function(xleft=4.160,ybottom=0.630,xright=4.660,ytop=0.680,
col=c('darkgreen','darkblue','green','lightblue'), xpd=NA){
# Plots a 2x2 confusion matrix scheme in your plot window. x and y values are based on
# axis values. You can specify the colors from upper left, upper right, lower left, lower right
par(xpd=xpd) # xpd NA or FALSE, if NA you can also plot outside the plot window
ymid <- (ytop+ybottom)/2 # define the middle along y axis
xmid <- (xright+xleft)/2 # define the middle along x axis
rect(xleft,ybottom,xright,ytop) # original rectangle (redundant)
rect(xleft,ymid,xmid,ytop, col= col[1]) # upperleft
rect(xmid,ymid,xright,ytop, col= col[2]) # upperright
rect(xmid,ybottom,xright,ymid, col= col[3]) # lowerleft
rect(xleft,ybottom,xmid,ymid, col= col[4]) # lowerright
}
|
eb9ea019b64c5b847566babc18ab1c385aef045d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/aprean3/examples/dst192.Rd.R
|
5d883a8bf678a2a3cf55153544e58816533d869e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 142
|
r
|
dst192.Rd.R
|
library(aprean3)
### Name: dst192
### Title: Dataset from Table 19.2
### Aliases: dst192
### Keywords: datasets
### ** Examples
dst192
|
96922b857ec389bcd74d6e0983ea9f27afbce003
|
35bace0a2490d9e15f021f752c854748c9ff378c
|
/man/od_fatal_alcohol_tox.Rd
|
d015c983403708c6c946197aa0a472a08349d878
|
[] |
no_license
|
doh-FXX0303/overdoser
|
077c0c1eebdf8a3f3a32a0ef2439aee61e546243
|
d0314c87545b5fd66026f3bd3d956afc7ce6de38
|
refs/heads/master
| 2023-03-15T20:48:32.451747
| 2018-10-29T22:47:03
| 2018-10-29T22:47:03
| 569,497,723
| 1
| 0
| null | 2022-11-23T00:46:34
| 2022-11-23T00:46:33
| null |
UTF-8
|
R
| false
| true
| 492
|
rd
|
od_fatal_alcohol_tox.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/od_fatal_alchohol_tox.R
\name{od_fatal_alcohol_tox}
\alias{od_fatal_alcohol_tox}
\title{Find any Alcohol toxicity}
\usage{
od_fatal_alcohol_tox(data, underly_col, mult_col)
}
\arguments{
\item{data}{input data}
\item{underly_col}{underlying cause column index}
\item{mult_col}{multicause index}
}
\value{
alcohol_tox
}
\description{
Find any Alcohol toxicity
}
\examples{
to be added
}
|
0c093673210ce842b46b17be07fd9963f6a3a837
|
0ef0c56da75c5fd15e1a95d7454b3bcfb2afa218
|
/PA1_template.R
|
9c1316d6ea272bd55d19800b423694c37d982cca
|
[] |
no_license
|
bvsrini/RepData_PeerAssessment1
|
1fe66fc4a7126b37f3ceddf6dda2874fec0fab4d
|
59b9ec530ebcbaaba9c4f178ee2c468b7b8a2468
|
refs/heads/master
| 2021-01-16T20:51:30.777567
| 2015-09-20T20:18:06
| 2015-09-20T20:18:06
| 42,819,303
| 0
| 0
| null | 2015-09-20T16:04:34
| 2015-09-20T16:04:33
| null |
UTF-8
|
R
| false
| false
| 4,231
|
r
|
PA1_template.R
|
## Loading and preprocessing the data
library(curl)
library(dplyr)
library(date)
library(lubridate)
library(ggplot2)
####Load the data
#Load the data and unzip
curl_download(url= 'https://d396qusza40orc.cloudfront.net/repdata%2Fdata%2Factivity.zip',destfile='repdata_activity.zip')
####Process/transform the data (if necessary) into a format suitable for your analysis
activity_data <- read.csv(unz("repdata_activity.zip", "activity.csv"), header=TRUE, as.is = TRUE, colClasses = c("integer","Date","integer"))
## What is mean total number of steps taken per day?
####For this part of the assignment, you can ignore the missing values in the dataset.
#omit NA rows and load them into another data frame, summarise so we can plot the first histogram
act_data <- na.omit(activity_data)
mn <- summarise(group_by(act_data,date),m=mean(steps),s= sum(steps))
# we are making the historgram and not a bar graph as defined by the question
# based on the total no of steps calculated in the previous step
hist(mn$s,main="Total number of steps taken each day",
xlab="Total steps taken in a day")
# Calculate mean and median
median(mn$s)
mean(mn$s)
## What is the average daily activity pattern?
#In order to make a time seies plot we need to group by the 5 minutes intervals of the day
#when the activity reading was taken across all days
mni <- summarise(group_by(act_data,interval),m=mean(steps))
#plot the time series and add an axis to see the maxium better for the next question
plot(m ~ interval, mni, xaxt = "n", type = "l",main="Average daily activity pattern",
xlab="5 Min interval", ylab="Average daily steps")
axis(1, mni$interval, cex.axis = .5)
top_label<-paste("Max Interval = ", format((mni[mni$m == max(mni$m),]$interval), digits=2), sep="")
abline(v=(mni[mni$m == max(mni$m),]$interval), col="red")
text(mni[mni$m == max(mni$m),]$interval, 200, top_label, pos=2)
#from the graph it is evident the that 835 minute interval contains maximum no of steps across all daya
#also confirmed by the code below
mni[mni$m == max(mni$m),]
## Imputing missing values
#load the NA cases into a data frame to know the no of total no of missing values
#and also to help processing for further
na_list <- activity_data[!complete.cases(activity_data),]
nrow(na_list)
#I am using the mean for the interval to impute the missing values. The preprocessing steps
#are as follows
na_data <- subset (merge(mni, na_list, by.x = "interval",by.y = "interval"), select = -steps)
colnames(na_data)[2] <- "steps"
#Rbind both complete cases and data that are filled in to get back the complete
#data set filled in. Check if there are NA's in the final set
act_dat1 <- rbind(activity_data[complete.cases(activity_data),],na_data)
sum(complete.cases(activity_data))
#summarise the new filled in data set
mn_filled <- summarise(group_by(act_dat1,date),m=mean(steps),s= sum(steps))
#create the histogram based on the total number of steps from the imputed data set
hist(mn_filled$s,main="Total number of steps taken each day(Imputed)",
xlab="Total steps taken in a day")
#Calculate the mean and Median. Since i used the mean to fill up the data the mean has
#not shifted , however the median has shifted
median(mn_filled$s)
mean(mn_filled$s)
## Are there differences in activity patterns between weekdays and weekends?
#I am creating a factor variable wkd_wke and adding it to the data set, using lubridate funcion.I felt this will give lesser steps
#than using weekdays(), as it is just a suggetion
act_dat1 <- act_dat1 %>% mutate(wkd_wke = factor(ifelse ( (wday(date) == 1 | wday(date) == 7), "Weekend","Weekday")))
#Summarise the based on new factor variable and interval
mniw <- summarise(group_by(act_dat1,wkd_wke,interval),m=mean(steps))
# I am using ggplot2 to plot the week day and week end panels. The instructions does not mandate
#using lattice plot
k<- ggplot(data=mniw,aes(x=interval,y=m ))
k<-k+facet_wrap(~wkd_wke,ncol=1)
k<-k+geom_line()
k<- k+ xlab("5 Min Interval")
k<- k+ ylab("Average Steps")
k<- k+ ggtitle("Activity patterns between weekdays & weekends") + theme(plot.title = element_text(lineheight=3, face="bold", color="black", size=14))
print(k)
|
bfcb8f44c9e650bc65d26896ba7f2e64e2da8b82
|
6638c6fe683991e37d16e1da65c7307b2efb856b
|
/lib/KNN.R
|
4b96341e06943db7a3b1694f43883ee9a8e23514
|
[] |
no_license
|
TZstatsADS/Spring2020-Project4-group5
|
4d960c5aa7ad157ebb8dc44e1739455be4ff4cff
|
156664645e9fecbf9345da1d389d1f981cc43012
|
refs/heads/master
| 2022-04-25T11:01:01.033150
| 2020-04-22T15:12:05
| 2020-04-22T15:12:05
| 252,766,431
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,486
|
r
|
KNN.R
|
KNN.post <- function(data, result){
Q <- result$q #movie
P <- result$p #user
R <- t(P) %*% Q # 32*2427
User_all <- colnames(P)
Movie_all <- colnames(Q)
U<-User_all %>% length()
I<-Movie_all %>% length()
distance_movie <- cosine(Q) %>% as.matrix() # use similarity matrix
rownames(distance_movie) <- Movie_all # 2427
colnames(distance_movie) <- Movie_all # 2427
for (u in 1:U){ # for every user (32 in total) # only train dataset(since A/R part only contains train dataset)
# find trainset: movie rated by u(all movies if rated by user u)
Movie_rated_by_u <- data[data$userId == u,]$movieId%>%unique%>%as.character()
# all movies if not rated by user u
Movie_NOTrated_by_u <- Movie_all[!Movie_all%in%Movie_rated_by_u] # predict rating of u
Rating_by_u<-R[u,]
for (i in Movie_NOTrated_by_u){ # for every movie not rated by u
tmp <- distance_movie[i,Movie_rated_by_u] %>% which.max %>% names
# find the highest dist.cosine score (which is the most simliarity)
Rating_by_u[i] <- Rating_by_u[tmp]
# using the most simliarity movie which is rated by u to replace the score of the movie which is not rated by u
}
R[u,] <- Rating_by_u
}
return(rating_knn=R)
}
|
d87a827292c579da0a372df3cc86df31f13ce26f
|
79b935ef556d5b9748b69690275d929503a90cf6
|
/man/Kdot.Rd
|
47273c7ff0e5616273069f5439cdf49be93ff39c
|
[] |
no_license
|
spatstat/spatstat.core
|
d0b94ed4f86a10fb0c9893b2d6d497183ece5708
|
6c80ceb9572d03f9046bc95c02d0ad53b6ff7f70
|
refs/heads/master
| 2022-06-26T21:58:46.194519
| 2022-05-24T05:37:16
| 2022-05-24T05:37:16
| 77,811,657
| 6
| 10
| null | 2022-03-09T02:53:21
| 2017-01-02T04:54:22
|
R
|
UTF-8
|
R
| false
| false
| 7,644
|
rd
|
Kdot.Rd
|
\name{Kdot}
\alias{Kdot}
\title{
Multitype K Function (i-to-any)
}
\description{
For a multitype point pattern,
estimate the multitype \eqn{K} function
which counts the expected number of other points of the process
within a given distance of a point of type \eqn{i}.
}
\usage{
Kdot(X, i, r=NULL, breaks=NULL, correction, ..., ratio=FALSE, from)
}
\arguments{
\item{X}{The observed point pattern,
from which an estimate of the multitype \eqn{K} function
\eqn{K_{i\bullet}(r)}{Ki.(r)} will be computed.
It must be a multitype point pattern (a marked point pattern
whose marks are a factor). See under Details.
}
\item{i}{The type (mark value)
of the points in \code{X} from which distances are measured.
A character string (or something that will be converted to a
character string).
Defaults to the first level of \code{marks(X)}.
}
\item{r}{numeric vector. The values of the argument \eqn{r}
at which the distribution function
\eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated.
There is a sensible default.
First-time users are strongly advised not to specify this argument.
See below for important conditions on \eqn{r}.
}
\item{breaks}{
This argument is for internal use only.
}
\item{correction}{
A character vector containing any selection of the
options \code{"border"}, \code{"bord.modif"},
\code{"isotropic"}, \code{"Ripley"}, \code{"translate"},
\code{"translation"},
\code{"none"} or \code{"best"}.
It specifies the edge correction(s) to be applied.
Alternatively \code{correction="all"} selects all options.
}
\item{\dots}{Ignored.}
\item{ratio}{
Logical.
If \code{TRUE}, the numerator and denominator of
each edge-corrected estimate will also be saved,
for use in analysing replicated point patterns.
}
\item{from}{An alternative way to specify \code{i}.}
}
\value{
An object of class \code{"fv"} (see \code{\link{fv.object}}).
Essentially a data frame containing numeric columns
\item{r}{the values of the argument \eqn{r}
at which the function \eqn{K_{i\bullet}(r)}{Ki.(r)} has been estimated
}
\item{theo}{the theoretical value of \eqn{K_{i\bullet}(r)}{Ki.(r)}
for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2}
}
together with a column or columns named
\code{"border"}, \code{"bord.modif"},
\code{"iso"} and/or \code{"trans"},
according to the selected edge corrections. These columns contain
estimates of the function \eqn{K_{i\bullet}(r)}{Ki.(r)}
obtained by the edge corrections named.
If \code{ratio=TRUE} then the return value also has two
attributes called \code{"numerator"} and \code{"denominator"}
which are \code{"fv"} objects
containing the numerators and denominators of each
estimate of \eqn{K(r)}.
}
\details{
This function \code{Kdot} and its companions
\code{\link{Kcross}} and \code{\link{Kmulti}}
are generalisations of the function \code{\link{Kest}}
to multitype point patterns.
A multitype point pattern is a spatial pattern of
points classified into a finite number of possible
``colours'' or ``types''. In the \pkg{spatstat} package,
a multitype pattern is represented as a single
point pattern object in which the points carry marks,
and the mark value attached to each point
determines the type of that point.
The argument \code{X} must be a point pattern (object of class
\code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}.
It must be a marked point pattern, and the mark vector
\code{X$marks} must be a factor.
The argument \code{i} will be interpreted as a
level of the factor \code{X$marks}.
If \code{i} is missing, it defaults to the first
level of the marks factor, \code{i = levels(X$marks)[1]}.
The ``type \eqn{i} to any type'' multitype \eqn{K} function
of a stationary multitype point process \eqn{X} is defined so that
\eqn{\lambda K_{i\bullet}(r)}{lambda Ki.(r)}
equals the expected number of
additional random points within a distance \eqn{r} of a
typical point of type \eqn{i} in the process \eqn{X}.
Here \eqn{\lambda}{lambda}
is the intensity of the process,
i.e. the expected number of points of \eqn{X} per unit area.
The function \eqn{K_{i\bullet}}{Ki.} is determined by the
second order moment properties of \eqn{X}.
An estimate of \eqn{K_{i\bullet}(r)}{Ki.(r)}
is a useful summary statistic in exploratory data analysis
of a multitype point pattern.
If the subprocess of type \eqn{i} points were independent
of the subprocess of points of all types not equal to \eqn{i},
then \eqn{K_{i\bullet}(r)}{Ki.(r)} would equal \eqn{\pi r^2}{pi * r^2}.
Deviations between the empirical \eqn{K_{i\bullet}}{Ki.} curve
and the theoretical curve \eqn{\pi r^2}{pi * r^2}
may suggest dependence between types.
This algorithm estimates the distribution function \eqn{K_{i\bullet}(r)}{Ki.(r)}
from the point pattern \code{X}. It assumes that \code{X} can be treated
as a realisation of a stationary (spatially homogeneous)
random spatial point process in the plane, observed through
a bounded window.
The window (which is specified in \code{X} as \code{Window(X)})
may have arbitrary shape.
Biases due to edge effects are
treated in the same manner as in \code{\link{Kest}},
using the chosen edge correction(s).
The argument \code{r} is the vector of values for the
distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated.
The values of \eqn{r} must be increasing nonnegative numbers
and the maximum \eqn{r} value must not exceed the radius of the
largest disc contained in the window.
The pair correlation function can also be applied to the
result of \code{Kdot}; see \code{\link{pcf}}.
}
\references{
Cressie, N.A.C. \emph{Statistics for spatial data}.
John Wiley and Sons, 1991.
Diggle, P.J. \emph{Statistical analysis of spatial point patterns}.
Academic Press, 1983.
Harkness, R.D and Isham, V. (1983)
A bivariate spatial point pattern of ants' nests.
\emph{Applied Statistics} \bold{32}, 293--303
Lotwick, H. W. and Silverman, B. W. (1982).
Methods for analysing spatial processes of several types of points.
\emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413.
Ripley, B.D. \emph{Statistical inference for spatial processes}.
Cambridge University Press, 1988.
Stoyan, D, Kendall, W.S. and Mecke, J.
\emph{Stochastic geometry and its applications}.
2nd edition. Springer Verlag, 1995.
}
\section{Warnings}{
The argument \code{i} is interpreted as
a level of the factor \code{X$marks}. It is converted to a character
string if it is not already a character string.
The value \code{i=1} does \bold{not}
refer to the first level of the factor.
The reduced sample estimator of \eqn{K_{i\bullet}}{Ki.} is pointwise approximately
unbiased, but need not be a valid distribution function; it may
not be a nondecreasing function of \eqn{r}.
}
\seealso{
\code{\link{Kdot}},
\code{\link{Kest}},
\code{\link{Kmulti}},
\code{\link{pcf}}
}
\examples{
# Lansing woods data: 6 types of trees
woods <- lansing
\testonly{woods <- woods[seq(1, npoints(woods), by=80)]}
Kh. <- Kdot(woods, "hickory")
# diagnostic plot for independence between hickories and other trees
plot(Kh.)
# synthetic example with two marks "a" and "b"
# pp <- runifpoispp(50)
# pp <- pp \%mark\% factor(sample(c("a","b"), npoints(pp), replace=TRUE))
# K <- Kdot(pp, "a")
}
\author{\adrian
and \rolf
}
\keyword{spatial}
\keyword{nonparametric}
|
43ca86648b14d49053022d2bd95d86401a275d59
|
5562044b1aa4b0c147e871a0eca25668644f8e2a
|
/R/upd.prob.R
|
73bd5d3425a324bceee547c4ffa5ffb70258299b
|
[] |
no_license
|
cran/RankAggreg
|
94e9018c8130792627aa9421d57b9e234957cace
|
70e56b13ba4e718e0cf461aa90551c755ef88419
|
refs/heads/master
| 2021-06-04T10:53:01.236658
| 2020-05-09T19:10:03
| 2020-05-09T19:10:03
| 17,693,066
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 304
|
r
|
upd.prob.R
|
`upd.prob` <-
function(samples, v, weight, comp.list)
{
p <- matrix(0, nrow=nrow(v), ncol=ncol(v))
s <- nrow(samples)
p <- apply(samples,2,function(x)
table(x)[match(as.character(comp.list), dimnames(table(x))[[1]])]/s)
p[is.na(p)] <- 0
(1-weight)*v + weight*p
}
|
89b855522525450c2367335560cc29d56a182395
|
a34c06ae520679dd370c0bd75aa9a5b83d0e622f
|
/svmAir.R
|
4be401b1aed9a7532f0339005dac1780e330eedb
|
[] |
no_license
|
WaughB/LIS4761_Data_Mining
|
f1f0fb6246ff9d81a55accc6149a0db902d23227
|
e1d6ad351eaf960c1e1c86b5bb908e47392a57aa
|
refs/heads/master
| 2020-06-11T14:37:15.378050
| 2019-07-29T16:31:23
| 2019-07-29T16:31:23
| 194,000,379
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,625
|
r
|
svmAir.R
|
# Brett W.
# LIS4761 - Data Mining
# Lesson 8: Support Vector Machines
# SVM Homework -- Using SVM on an Air Quality Dataset
# Necessary libraries.
require(dplyr)
require(kernlab)
require(ggplot2)
require(caret)
require(e1071)
require(gridExtra)
# Figure out averages of Ozone and Solar.R.
ozone_mean <- airquality %>%
summarize(ozone_mean = mean(airquality$Ozone, na.rm = TRUE))
solar_mean <- airquality %>%
summarize(solar_mean = mean(airquality$Solar.R, na.rm = TRUE))
# Create dataframe.
df <- airquality
# Replace NA's in Ozone and Solar.R with the average values.
ozone_mean <- as.integer(ozone_mean)
df$Ozone[is.na(df$Ozone)] <- ozone_mean
solar_mean <- as.integer(solar_mean)
df$Solar.R[is.na(df$Solar.R)] <- solar_mean
# Seperate data into training and testing set.
sz <- round(.8 * dim(df)[1])
training_set <- df[1:sz,]
testing_set <- df[-(1:sz),]
# Create model using KSVM.
ksvmMod <- ksvm(training_set$Ozone ~ ., training_set)
ksvmMod
ksvmPred <- predict(ksvmMod, testing_set)
ksvmResults <- table(ksvmPred, testing_set$Ozone)
# Calculate RMSE for KSVM.
ksvmRMSE <- RMSE(testing_set$Ozone, ksvmPred)
# Graph for KSVM.
ksvmGraph <- ggplot(df) +
geom_point(aes(x=Temp, y=Wind, color=ksvmRMSE, size = ksvmRMSE)) +
ggtitle("KSVM Performance")
# Create model using SVM.
svmMod <- svm(Ozone ~ ., data = df, kernal="radial", cost=25, na.action =na.omit, scale = TRUE, cross=10)
print(svmMod)
# Results of training set.
pred_train_svm <- predict(svmMod, training_set)
# Results of test set.
pred_test_svm <- predict(svmMod, testing_set)
# Calculate the RMSE for the SVM.
svmRMSE <- RMSE(testing_set$Ozone, pred_test_svm)
# Graph for SVM.
svmGraph <- ggplot(df) +
geom_point(aes(x=Temp, y=Wind, color=svmRMSE, size = svmRMSE)) +
ggtitle("SVM Performance")
# Create model using LM.
ControlParameters <-trainControl(method="repeatedcv",
number=10,
repeats=10)
lmMod <-train(Ozone ~ .,
data=training_set,
method="glm",
trControl= ControlParameters
)
lmMod
# Calculate the RMSE for the LM.
lmRMSE <- lmMod$results$RMSE
# Graph for LM.
lmGraph <- ggplot(df) +
geom_point(aes(x=Temp, y=Wind, color=lmRMSE, size = lmRMSE)) +
ggtitle("LM Performance")
# Combine three previous graphs for comparison.
grid.arrange(ksvmGraph, svmGraph, lmGraph, nrow = 3)
# Create goodOzone variable. Zero if lower than average,
# one if higher than usual.
df$goodOzone <- NA
df$goodOzone[df$Ozone >= ozone_mean] <- 1
df$goodOzone[df$Ozone < ozone_mean] <- 0
## Create KSVM based on goodOzone.
# Seperate data into training and testing set.
sz <- round(.8 * dim(df)[1])
training_set <- df[1:sz,]
testing_set <- df[-(1:sz),]
ksvmMod2 <- ksvm(training_set$goodOzone ~ ., training_set)
ksvmMod2
ksvmPred2 <- predict(ksvmMod2, testing_set)
ksvmResults2 <- table(ksvmPred2, testing_set$Ozone)
# Calculate RMSE for KSVM.
ksvmRMSE2 <- RMSE(testing_set$goodOzone, ksvmPred2)
# Graph for KSVM.
ksvmGraph2 <- ggplot(df) +
geom_point(aes(x=Temp, y=Wind, color=ksvmRMSE2, size = ksvmRMSE2)) +
ggtitle("Second KSVM Performance")
## Create SVM based on gooOzone. *** TODO ***
svmMod2 <- svm(goodOzone ~ ., data = df, kernal="radial", cost=25, na.action =na.omit, scale = TRUE, cross=10)
print(svmMod2)
# Results of training set.
pred_train_svm2 <- predict(svmMod2, training_set)
# Results of test set.
pred_test_svm2 <- predict(svmMod2, testing_set)
# Calculate the RMSE for the SVM.
svmRMSE2 <- RMSE(testing_set$Ozone, pred_test_svm2)
# Graph for SVM.
svmGraph2 <- ggplot(df) +
geom_point(aes(x=Temp, y=Wind, color=svmRMSE, size = svmRMSE)) +
ggtitle("SVM Performance")
## Create Naive-Bayes based on goodOzone.
nb <- naiveBayes(goodOzone ~ ., data = training_set)
test_nb <- predict(nb, testing_set)
# nbResults <- table(test_nb, testing_set$goodOzone)
nbRMSE <- RMSE(testing_set$goodOzone, test_nb)
# Calculate the RMSE for the Naive-Bayes.
nbGraph <- ggplot(df) +
geom_point(aes(x=Temp, y=Wind, color=nbRMSE, size = nbRMSE)) +
ggtitle("Naive-Bayes Performance")
# Combine three previous graphs for comparison.
grid.arrange(ksvmGraph2, svmGraph2, nbGraph, nrow = 3)
# Best model?
# The KSVM had the lowest RMSE of the models. It is
# worth noting that I did cross-validiate the other models,
# but I did not cross-validiate the KSVM.
# The linear model did not perform well relative to the
# KSVM and SVM. Because I did not eliminate more variables,
# the linear model had more difficulties with the greater
# number of features.
|
40eb02472b58a4d5d81bb7ccf7a5af59c76c1d82
|
6ba493ca9129518a3a9d52826beb6d3404b140da
|
/R/CAAMoonPhases_MeanPhase.R
|
da75e3ed600c17ccf13cefac0ba28c03d3b1aa44
|
[] |
no_license
|
helixcn/skycalc
|
a298e7e87a46a19ba2ef6826d611bd9db18e8ee2
|
2d338b461e44f872ceee13525ba19e17926b2a82
|
refs/heads/master
| 2021-06-16T08:54:46.457982
| 2021-03-25T02:15:38
| 2021-03-25T02:15:38
| 35,885,876
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 82
|
r
|
CAAMoonPhases_MeanPhase.R
|
CAAMoonPhases_MeanPhase <-
function(k){
.Call("CAAMoonPhases_MeanPhase", k)
}
|
966697e204a671415710cfdd46319ecdfe6ca2e2
|
2851571531c96d50b4257aeef037b5742011284b
|
/man/g.sib.det.Rd
|
0b3f1e56f60821ef7aed818a216914fb7d9ed441
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
wadpac/GGIR
|
b1771d5be02bdad7905514ad09909d0edc501e35
|
b596ca209cd97e32149e60621dcf7e8675a20628
|
refs/heads/master
| 2023-08-21T06:08:15.190626
| 2023-08-09T18:00:32
| 2023-08-09T18:00:32
| 88,166,964
| 87
| 73
|
Apache-2.0
| 2023-09-14T10:54:48
| 2017-04-13T13:20:23
|
R
|
UTF-8
|
R
| false
| false
| 1,900
|
rd
|
g.sib.det.Rd
|
\name{g.sib.det}
\alias{g.sib.det}
\title{
sustiained inactivty bouts detection
}
\description{
Detects sustiained inactivty bouts. Function not intended
for direct use by package user
}
\usage{
g.sib.det(M, IMP, I, twd = c(-12, 12),
acc.metric = "ENMO", desiredtz = "",
myfun=c(), sensor.location = "wrist", params_sleep = c(), zc.scale = 1, ...)
}
\arguments{
\item{M}{
Object produced by \link{g.getmeta}
}
\item{IMP}{
Object produced by \link{g.impute}
}
\item{I}{
Object produced by \link{g.inspectfile}
}
\item{twd}{
Vector of length 2, indicating the time window to consider
as hours relative to midnight.
}
\item{acc.metric}{
Which one of the metrics do you want to consider to analyze L5.
The metric of interest need to be calculated in
M (see \link{g.part1})
}
\item{desiredtz}{
See \link{g.part3}
}
\item{myfun}{
External function object to be applied to raw data.
See details \link{applyExtFunction}.
}
\item{sensor.location}{
Character to indicate sensor location, default is wrist.
If it is hip HDCZA algorithm also requires longitudinal axis of sensor to be
between -45 and +45 degrees.
}
\item{params_sleep}{
See \link{g.part3}
}
\item{zc.scale}{
Used for zero-crossing counts only. Scaling factor to be applied after
counts are calculated (GGIR part 3). See \link{GGIR}.
}
\item{...}{
Any argument used in the previous version of g.sib.det, which will now
be used to overrule the arguments specified with the parameter objects.
}
}
\value{
\itemize{
\item output = Dataframe for every epoch a classification
\item detection.failed = Boolean whether detection failed
\item L5list = L5 for every day (defined from noon to noon)
}
}
\keyword{internal}
\author{
Vincent T van Hees <v.vanhees@accelting.com>
}
|
3a0edebcf820ada96e377b817218c17ce3ffeeec
|
646ff7456fb2c84b5bb8af6ef4448f71b2a730f7
|
/man/appointments.Rd
|
d9a15c4b2de0fce728f2446ad170646355ea8b76
|
[
"MIT"
] |
permissive
|
Rkabacoff/qacData
|
edb6efdc04bb1814f579f89a24cf28373fe39bf6
|
3fa95cf72003972c98f035735ae3367aa69b0d0c
|
refs/heads/main
| 2022-03-26T01:35:36.463038
| 2022-02-23T19:06:40
| 2022-02-23T19:06:40
| 157,761,849
| 0
| 10
|
MIT
| 2018-11-30T01:03:33
| 2018-11-15T19:29:44
|
R
|
UTF-8
|
R
| false
| true
| 1,862
|
rd
|
appointments.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appointments.R
\docType{data}
\name{appointments}
\alias{appointments}
\title{Medical Appointment No Shows}
\format{
A data frame with 110527 rows and 14 variables:
\describe{
\item{\code{PatientId}}{double. Identification of a patient.}
\item{\code{AppointmentID}}{double. dentification of each appointment.}
\item{\code{Gender}}{factor. \code{Male, Female}.}
\item{\code{ScheduledDay}}{datatime. The day and time of the actual appointment,
when they have to visit the doctor.}
\item{\code{AppointmentDay}}{double. The day someone called or
registered the appointment, this is before appointment of course.}
\item{\code{Age}}{double. Age of the patient.}
\item{\code{Neighbourhood}}{character. Where the appointment takes place.}
\item{\code{Scholarship}}{integer. \code{0=FALSE, 1=TRUE}. Scholarship
is a social welfare program providing financial aid to poor Brazilian families.}
\item{\code{Hypertension}}{integer. \code{0=FALSE, 1=TRUE}.}
\item{\code{Diabetes}}{integer. \code{0=FALSE, 1=TRUE}.}
\item{\code{Alcoholism}}{integer. \code{0=FALSE, 1=TRUE}.}
\item{\code{Handcap}}{integer. \code{0=FALSE, 1=TRUE}.}
\item{\code{SMS_received}}{integer. \code{0=FALSE, 1=TRUE}.
1 or more messages sent to the patient.}
\item{\code{No_show}}{factor. \code{Yes, No.}}
}
}
\source{
Joni Hoppen, Kaggle Medical Appointment No Shows
\href{https://www.kaggle.com/joniarroba/noshowappointments}{https://www.kaggle.com/joniarroba/noshowappointments}.
}
\usage{
appointments
}
\description{
Predicting no-show medical appointments
}
\details{
This Kaggle competition was designed to challenge participants to
predict office no-shows. It is also a good dataset to practice
date and time manipulation.
}
\examples{
summary(appointments)
}
\keyword{datasets}
|
35cf3e91f685ff5c262af7f59387e26f1649ecb0
|
1b2f7c0ed5a4e06b580510cfa5a925c6c5d1c82f
|
/R Implementation/debseal_HW2.R
|
d264dcfa110b99d02bf51d532cab425765d4f236
|
[] |
no_license
|
debseal/DataMiningRep
|
ec176866494f3b2b92cdf29d9630b97ca24b4487
|
048a32a210e35e478a40c2fd1830910d48d2a995
|
refs/heads/master
| 2020-12-24T14:27:31.447609
| 2014-11-07T09:01:00
| 2014-11-07T09:01:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 84
|
r
|
debseal_HW2.R
|
### R code from vignette source 'C:/Users/debpriya.seal/Documents/debseal_HW2.Rnw'
|
797c3c075f3e4b941126cfd78c65b26e6d9b5962
|
c597f0e84f86372fa72d3cefe140480a824cf605
|
/lib/RGB_GBM/RGB_feature.R
|
21e45eb050d58ea214793ad5a0d42b8f1d37a519
|
[] |
no_license
|
TZstatsADS/Spring2018-Project3-Group8
|
51c0745e3371372d4082985269af5bc2fb380060
|
814bc21b7e921ef440156f44c0c028f4728a21b4
|
refs/heads/master
| 2021-03-27T09:51:24.698459
| 2018-03-28T23:19:09
| 2018-03-28T23:19:09
| 123,044,634
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,287
|
r
|
RGB_feature.R
|
#This function is used to extract RGB color histogram features
RGB_feature <- function(path, export=T){
library(EBImage)
library(plyr)
#define bins of color histogram for each channel
nR <- 8
nG <- 8
nB <- 8
Rbin <- seq(0, 1, length.out=nR)
Gbin <- seq(0, 1, length.out=nG)
Bbin <- seq(0, 1, length.out=nB)
n_images<- length(list.files(path))
image_hist_count<-function(pos){
mat <- imageData(readImage(paste0(path,sprintf("%04.f",pos), ".jpg")))
mat_as_rgb <-array(c(mat,mat,mat),dim = c(nrow(mat),ncol(mat),3))
count_rgb <- as.data.frame(table(factor(findInterval(mat_as_rgb[,,1], Rbin), levels=1:nR),
factor(findInterval(mat_as_rgb[,,2], Gbin), levels=1:nG),
factor(findInterval(mat_as_rgb[,,3], Bbin), levels=1:nB)))
#find frequency in color histogram for specific combination
rgb_feature <- as.numeric(count_rgb$Freq)/(nrow(mat)*ncol(mat))
return(rgb_feature)
}
rgb_feature<-ldply(1:n_images,image_hist_count)
### output constructed features
if(export){
write.csv(rgb_feature, file = "output/rgb_feature.csv")
}
return(data.frame(rgb_feature))
}
setwd('C:/Users/rolco/Desktop/project3')
RGB_features<-RGB_feature('data/images/',T)
|
c54c430d7d067b1960d72d0a53df2d9e4035855b
|
3058305c903e8406843cec0a69b27e67d8ebe5c0
|
/Functionality.R
|
cc189a23077fa9a0563432c1a7b69a3db4b4fc95
|
[] |
no_license
|
lc19940813/CapstoneProject
|
39a564af2664bb3f9a79814de6c207158c39533a
|
7fa848c59dc9628c3b19b7c1ccfd89cbfa077bda
|
refs/heads/master
| 2021-01-17T16:02:39.611226
| 2016-06-10T02:03:03
| 2016-06-10T02:03:03
| 60,817,917
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,709
|
r
|
Functionality.R
|
# This R file contains all the definition of functions of the main R file
# Chao Liu
# 06/2016
library(tm)
library(dplyr)
library(SnowballC)
library(wordcloud)
library(ngram)
training <- read.csv("train.csv",stringsAsFactors = FALSE)
wordtable <- read.csv("bigram.csv",stringsAsFactors = FALSE)
wordtable3 <- read.csv("trigram.csv",stringsAsFactors = FALSE)
wordtable4 <- read.csv("quadgram.csv",stringsAsFactors = FALSE)
worddict <- list(wordtable,wordtable3,wordtable4)
# This function takes a small part of the original data out as our training dataset
writeTestDocument <- function(twitter_lines, blogs_lines, news_lines){
twitter <- character()
con <- file("F://Data Science//Capstone//Coursera-SwiftKey//final//en_US//en_US.twitter.txt","r")
twitter <- append(twitter, readLines(con, twitter_lines))
write.table(twitter, file = "F://Data Science//Capstone//Coursera-SwiftKey//test//test_twitter.txt")
close(con)
blogs <- character()
con <- file("F://Data Science//Capstone//Coursera-SwiftKey//final//en_US//en_US.blogs.txt","r")
blogs <- append(blogs, readLines(con, blogs_lines))
write.table(blogs, file = "F://Data Science//Capstone//Coursera-SwiftKey//test//test_blogs.txt")
close(con)
news <- character()
con <- file("F://Data Science//Capstone//Coursera-SwiftKey//final//en_US//en_US.news.txt","r")
news <- append(news, readLines(con, news_lines))
write.table(news, file = "F://Data Science//Capstone//Coursera-SwiftKey//test//test_news.txt")
close(con)
rm(con);
rm(twitter);rm(blogs);rm(news);
}
# This function will clean the input words for further counting and calculation
clean_words <- function(words){
words <- removeNumbers(words)
words <- tolower(words)
words <- removeWords(words, stopwords("english"))
words <- removeWords(words, c("shit","piss","fuck","cunt","cocksucker","motherfucker","tits"))
words <- removePunctuation(words)
words <- wordStem(words)
words <- concatenate(strsplit(words," ")[[1]])
words
}
# The following functions can get the nearest number of the words that are able to constitute a dictionary
# to cover the corpus with the given coverage ratio
nearest_to_target <- function(freq, target){
which.min(abs(freq-target))
}
cover_ratio <- function(freq,ratio){
freq <- sort(freq, decreasing = TRUE)
temp <- numeric()
for(i in 1: length(freq)){
temp[i] <- sum(freq[1:i])/sum(freq)
}
nearest_to_target(temp,ratio)
}
# This function calculates the dissimilarity according to simple cosine relationship
dissimilarity <- function(x, y){
sum(x*y)/(sqrt(sum(x^2))*sqrt(sum(y^2)))
}
# This function will remove those uncommon words by steps since the grepl function will be out of the memory
# if we directly combine all the words together
remove_ngram_uncommon_words_by_step <- function(wordtable,uncommon_words,step = 1000){
k <- 0
l <- length(uncommon_words)
while ((k + 1) * step <= l) {
start <- 1+k*step
end <- (k+1)*step
pattern <- concatenate(uncommon_words[start:end], collapse = "|")
wordtable <- wordtable[!grepl(pattern = pattern,x = wordtable$ngrams),]
k <- k + 1
}
pattern <- concatenate(uncommon_words[(1+k*step):l], collapse = "|")
wordtable <- wordtable[!grepl(pattern = pattern,x = wordtable$ngrams),]
wordtable
}
# This function provides simple ngram model to prediect the next word
# The return value will be the 5 most probable words according to our training dataset
pred_ngram <- function(words){
words <- clean_words(words)
if(wordcount(words) == 0) return(" ")
n <- wordcount(words) + 1
if(n > 4) stop("Model has too many parameters")
res <- character()
index <- grep(paste0("^",words," .+"), worddict[[n-1]]$ngrams)
if(length(index) == 0){
return(" ")
}
m <- min(length(index),20)
temp <- worddict[[n-1]][index,]
#removeWords(temp$ngrams[1:n],paste(word, ""))
for(i in 1:m){
res[i] <- strsplit(temp$ngrams[i]," ")[[1]][length(strsplit(temp$ngrams[i]," ")[[1]])]
}
res
}
# This function will implement the simple backoff model that predicts the next word according to words
# collected from Corpus without any data smoothing or adjustment
simple_backoff_model <- function(input){
while(pred_ngram(input)[1] == " ") {
input <- concatenate(strsplit(input," ")[[1]][-1])
if(wordcount(input) == 0 ) {
return(" ")
}
}
pred_ngram(input)
}
# This function parses the input sentense and uses the simple backoff model to predict
pred_model <- function(input){
l <- wordcount(input)
if(l >= 4){
words <- concatenate(strsplit(input," ")[[1]][(l-2):l])
}
else {
words <- input
}
res <- simple_backoff_model(words)
if(res[1] == " ") res <- training$name[1:20]
res
}
# This function will count the frequency of given words pair
count_word_freq <- function(words){
n <- wordcount(words)
if(n == 0) return(0)
if(n > 4) stop("Model has too many parameters")
if(n == 1){
index <- grep(paste0("^",words,"$"),training$name)
if(length(index) == 0) return(0)
else{
return(training$freq[index])
}
}
else{
temp <- paste0("^",words," ")
index <- grep(temp,worddict[[n - 1]]$ngrams)
if(length(index) == 0) return(0)
else{
return(worddict[[n - 1]]$freq[index])
}
}
}
# This function will implement the Stupid Backoff model created by Google
prob_ngrams <- function(input, word){
word_f <- count_word_freq(word)
if(word_f == 0) return(0)
if(wordcount(input) == 0) return(word_f / sum(training$freq))
input <- clean_words(input)
words <- concatenate(input,word)
l <- count_word_freq(words)
if(l > 0){
return(l/count_word_freq(input))
}
else{
input <- concatenate(strsplit(input," ")[[1]][-1])
return(0.4*prob_ngrams(input,word))
}
}
# This function will compute scores after smoothing by Stupid backoff model
prob <- function(input){
input <- clean_words(input)
x <- training$name
sapply(x, function(word){prob_ngrams(input,word)})
}
# This function is the ultimate prediction model according to Stupid backoff model
pred_model_stupid <- function(input){
l <- wordcount(input)
if(l >= 4){
words <- concatenate(strsplit(input," ")[[1]][(l-2):l])
}
else {
words <- input
}
x <- prob(words)
y <- sort(x,decreasing = TRUE)
res <- character()
for(i in 1:5){
res[i] <- training$name[which(prob == y[i])]
}
res
}
|
da4b00b758f6a8bf3ff830bccd3c6ffb9b29f1de
|
60fc8a667347948b196fae19f4a50e8cd663577b
|
/TimeSeries/R_cisco1.R
|
746f651992dd5fd4e39f53ddf7c0297ce996481e
|
[] |
no_license
|
kumarivin/GitHub
|
85f9bb12d4c1be6cf193b77dc361d92281f8f7ae
|
5b118b161a612eacedc5338d984a53d5b833a1ba
|
refs/heads/master
| 2021-01-17T07:40:09.810792
| 2016-10-04T03:07:18
| 2016-10-04T03:07:18
| 37,492,134
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,861
|
r
|
R_cisco1.R
|
# LOAD LIBRARIES
# you may need to install packages, if this is the first time you # use them. Select Packages > Install Packages in R/RStudio)
setwd("C:/Users/TG/Documents/CSC 425/GNP_60_12_q")
library(tseries)
library(zoo)
#create new R dataframe
cisco = read.table('unemp_rate_m2005_2015.csv', header=T, sep=',')
# create time series for cisco prices
ciscots = zoo(cisco$rate, as.Date(as.character(cisco$date), format = "%m/%d/%Y"))
#To retrieve only dates use
time(ciscots)
# Retrieve start date
start(ciscots)
# Retrieve End date
end(ciscots)
# sort data in chronological order
# set variable Date as time/date variable
cisco$Date=as.Date(as.character(cisco$Date), format = "%m/%d/%y")
cisco=cisco[order(cisco$Date),]
#Creating new variables
# create lagged series using function lag(tsobject, k=-1);
pricelag = lag(ciscots, k=-1);
head(pricelag)
# diff = p_t - p_(t-1);
pricedif = diff(ciscots)
#compute simple returns ret = (p_t-p_(t-1))/p_(t-1)
ret=(ciscots-pricelag)/pricelag
#Example of data analysis for cisco dataset
#DEFINE LOG RETURNS
#rts is a time series object since it is created from a TS object
rts = diff(log(ciscots))
#to retrieve numerical values from time series use coredata()
# rt is a numerical vector (no date information)
rt=coredata(rts)
#print first 6 values
head(rt)
# LOAD LIBRARIES
# Load fBasics packages into current session
# To install the package the first time,
# select Tools from top Menu and select Install Packages
library(fBasics)
# COMPUTE SUMMARY STATISTICS
basicStats(rt)
# CREATE HISTOGRAM
# OPTIONAL creates 2 by 2 display for 4 plots
# par(mfcol=c(2,2))
hist(rt, xlab="Cisco log returns", prob=TRUE, main="Histogram")
# add approximating normal density curve
xfit<-seq(min(rt),max(rt),length=40)
yfit<-dnorm(xfit,mean=mean(rt),sd=sd(rt))
lines(xfit, yfit, col="blue", lwd=2)
# CREATE NORMAL PROBABILITY PLOT
qqnorm(rt)
qqline(rt, col = 2)
# CREATE TIME PLOTS
# simple plot where x-axis is not labeled with time
plot(rt)
# use time series object rts to draw time plot indexed with time
plot(rts)
# creates subsets of data for a certain period of time
rts_10 = window(rts, start = as.Date("2010-01-01"), end = as.Date("2010-12-31"))
# plot the new subset
plot(rts_10, type='l', ylab="log returns", main="Plot of 2010 data")
# NORMALITY TESTS
# Perform Jarque-Bera normality test.
normalTest(rts,method=c("jb"))
# COMPUTE ACF AND PLOT CORRELOGRAM
#prints acf values to console
acf(rt, plot=F)
#plot acf values on graph (correlogram)
acf(rt, plot=T)
# COMPUTE LJUNG-BOX TEST FOR WHITE NOISE (NO AUTOCORRELATION)
# to Lag 6
Box.test(rts,lag=6,type='Ljung')
# to Lag 12
Box.test(rts,lag=12,type='Ljung')
|
70de598038a8fce07c32a367fb22bbf01675b90b
|
5b153389e67e59a30aebf6a0d84b69fd89f805d4
|
/quantutils/man/is.missing.Rd
|
8b9dd93e837717cdcb3daf4cd872a305ffef01d2
|
[] |
no_license
|
dengyishuo/dengyishuo.github.com
|
480d9b5911851e56eca89c347b7dc5d83ea7e07d
|
86e88bbe0dc11b3acc2470206613bf6d579f5442
|
refs/heads/master
| 2021-03-12T23:17:10.808381
| 2019-08-01T08:13:15
| 2019-08-01T08:13:15
| 8,969,857
| 41
| 35
| null | null | null | null |
UTF-8
|
R
| false
| false
| 260
|
rd
|
is.missing.Rd
|
\name{is.missing}
\alias{is.missing}
\title{Is an object missing}
\usage{
is.missing(x)
}
\arguments{
\item{x}{any object}
}
\value{
Logical
}
\description{
Is an object missing
}
\details{
Is an object missing
}
\author{
Weilin Lin
}
|
45bb9280a6906dc5265ba356a846aed3929659bc
|
5d232ae6dc8bedba3bc3869b280c45c30a9064a5
|
/simulations/independenceTestChiSqMultinomial.R
|
f5e5178fa9be2ce5a750358874aacd45f0943312
|
[] |
no_license
|
jfiksel/compregpaper
|
727502d50fdf64359b97be823ccbaa00d40431cf
|
d79a54d035a3be7c0f90f6fd84bbf07596f940a6
|
refs/heads/master
| 2023-03-26T07:30:32.012691
| 2021-03-24T14:24:43
| 2021-03-24T14:24:43
| 258,839,237
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,698
|
r
|
independenceTestChiSqMultinomial.R
|
### set seed for simulations
set.seed(123)
seeds <- sample(-1e6:1e6, 10000, replace = F)
C <- 3
#########################
p_list <- lapply(1:10000, function(index) {
pvals <- lapply(c(100, 250, 500, 1000), function(n) {
set.seed(seeds[index])
### Sample x from uniform dirichlet
x <- t(rmultinom(n, 1, rep(1, C)))
### Simulate y from multinomial
y <- t(rmultinom(n, 1, rep(1, C)))
xcat <- max.col(x)
ycat <- max.col(y)
### Get associated pvals
p <- chisq.test(table(xcat, ycat))$p.value
return(data.frame(p = p,
n = n,
seed.index = index))
})
return(do.call(rbind, pvals))
})
p_df <- do.call(rbind, p_list)
for(i in c(1000, 2000, 5000, 10000)) {
print(p_df %>% filter(seed.index <= i) %>% group_by(n) %>% summarize(reject = mean(p <= .05)) )
}
p_list <- lapply(1:10000, function(index) {
pvals <- lapply(c(100, 250, 500, 1000), function(n) {
set.seed(seeds[index])
### Sample x from uniform dirichlet
x <- t(rmultinom(n, 1, c(.45, .2, .35)))
### Simulate y from multinomial
y <- t(rmultinom(n, 1, c(.25, .6, .15)))
xcat <- max.col(x)
ycat <- max.col(y)
### Get associated pvals
p <- chisq.test(table(xcat, ycat))$p.value
return(data.frame(p = p,
n = n,
seed.index = index))
})
return(do.call(rbind, pvals))
})
p_df <- do.call(rbind, p_list)
for(i in c(1000, 2000, 5000, 10000)) {
print(p_df %>% filter(seed.index <= i) %>% group_by(n) %>% summarize(reject = mean(p <= .05)) )
}
|
efe0356dbb05479edbdf441c2dc227eb331fe485
|
8c1834e19513a9d6f86808548fd473a9c0bfe0cf
|
/Infnet-Analytics/MBA Big Data - Analytics com R (Aulas 07 e 08)/Arquivos Etapa 04b/Etapa_04d_(Geolocalização 0).R
|
7df8db30a26d54434263bb05693b963d6c17229d
|
[] |
no_license
|
xBarbosa/Data-Analytics
|
d8f63137a0ea3e4e4677939e1e466673746f5a8a
|
d6bdf4cff3804312a00c1022c3ccb2ee5583ad52
|
refs/heads/master
| 2020-05-01T18:55:36.169279
| 2019-04-06T12:12:29
| 2019-04-06T12:12:29
| 177,635,035
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,372
|
r
|
Etapa_04d_(Geolocalização 0).R
|
# Disciplina: Big Data Analytics com R
# -------------------------
# EXEMPLO DE DIAGRAMA DE VORONOI
# DATASET: U.S.A AIRPORT LOCATIONS
# FONTE: http://flowingdata.com/2016/04/12/voronoi-diagram-and-delaunay-triangulation-in-r/
# D3.js SIMILAR: http://bl.ocks.org/mbostock/4360892
# -------------------------
# Instalando pacotes
#install.packages("deldir", dependencies=TRUE)
#install.packages(c("sp", "maps", "maptools", "mapproj"), dependencies=TRUE)
# Carregando pacotes
library(mapproj) # Projeções.
library(deldir) # Voronoi.
# Carregando os dados
airports <- read.csv("airport-locations.tsv", sep="\t", stringsAsFactors=FALSE)
# Usando uma função para manter apenas os estados contíguos.
source("latlong2state.R")
airports$state <- latlong2state(airports[,c("longitude", "latitude")])
airports_contig <- na.omit(airports)
# Projeções
airports_projected <- mapproject(airports_contig$longitude, airports_contig$latitude,
"albers", param=c(39,45))
# Visualização como mapa de pontos.
par(mar=c(0,0,0,0))
plot(airports_projected, asp=1, type="n", bty="n", xlab="", ylab="", axes=FALSE)
points(airports_projected, pch=20, cex=0.1, col="red")
# Visualização como diagrama de Voronoi
vtess <- deldir(airports_projected$x, airports_projected$y)
plot(vtess, wlines="tess", wpoints="none", number=FALSE, add=TRUE, lty=1)
|
d232281c6a1e9e96f28456394449b217c7ba8ab3
|
41d472fd9969b74cef4577ce946125fe8948d5ad
|
/LmFit326maxwell.R
|
77afb51708a49635dd102a2535a91ef7ac80caa7
|
[] |
no_license
|
processis/learnML
|
787905a762e6f403135a00f2a67db31fb5be44d1
|
903bd304ead3301ed2138ce0a5e01e0a954ebd08
|
refs/heads/master
| 2023-02-08T19:45:14.992164
| 2023-01-31T12:49:20
| 2023-01-31T12:49:20
| 236,637,831
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,248
|
r
|
LmFit326maxwell.R
|
## read minitab processed Maxwell67 data 2020.3.26
SwMainCost=read.csv("Maxwell326es.CSV")
# simple linear regression , Gareth 3.6.2
lm.fit=lm(ln.acorreff. ~ ln.totfp. , data=SwMainCost)
lm.fit
summary(lm.fit)
names(lm.fit)
coef(lm.fit)
confint(lm.fit)
#predict func to produce confid intervals and predict intervals, given ln.totfp.
predict(lm.fit,data.frame(ln.totfp. = c(3,5,7)),interval="confidence")
predict(lm.fit,data.frame(ln.totfp. = c(3,5,7)),interval="prediction")
#plot with least sq regression line
attach(SwMainCost)
plot(ln.totfp.,ln.acorreff.)
abline(lm.fit)
abline(lm.fit,lwd=3,col="red") #3times bold, in red
plot(ln.totfp.,ln.acorreff.,pch=20) # diff plotting symbol
plot(ln.totfp.,ln.acorreff.,pch="+") # diff plotting symbol
plot(1:20,1:20,pch=1:20) #fun
# 3.6.2 multiple linear regression
lm.fit=lm(ln.acorreff. ~ ln.totfp. + pjcl, data=SwMainCost)
lm.fit
summary(lm.fit)
coef(lm.fit)
confint(lm.fit)
# library(car)
# vif(lm.fit)
#try non linear transform of predictor 3.6.5
lm.fit2=lm(ln.acorreff.~ln.totfp.+I(ln.totfp.^2))
summary(lm.fit2)
# use anova to check if quadratic fit (fit2) is better
attach(SwMainCost)
lm.fit=(ln.acorreff. ~ ln.totfp.)
summary(lm.fit)
anova ( lm.fit,lm.fit2)
# P so big 1.8, so no difference
|
da3afc7bb3e0478a446e9856b276e6c51708e7bd
|
f581a1cec7e634730dc759dafe2c59caa6fc064d
|
/R/lpa.R
|
6158279654a0018b2c3c0eab13b5c631f6afd44f
|
[] |
no_license
|
ebmtnprof/rties
|
c02919b4ce78a5ac1342d2a52e8baaec427f2390
|
fae56523593318ded0d7d38c8533f39515711dfe
|
refs/heads/master
| 2022-09-17T08:06:59.139907
| 2022-08-23T00:41:03
| 2022-08-23T00:41:03
| 127,973,424
| 10
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,375
|
r
|
lpa.R
|
######## This file includes the function needed to estimate the latent profiles based on IC or CLO model parameters
#################### inspectProfiles
#' Provides information to help decide how many profiles to use for subsequent rties analyses.
#'
#' The function prints out the number of dyads in each profile for a specified number of profiles. It also prints out: 1) a figure showing the best clustering solution as indicated by BIC (e.g., the observed data separated into clusters, produced by mclust), 2) a line plot showing the content of the best solution (e.g., the mean parameter estimates for each profile) and 3) prototypical model-predicted trajectories for each profile. For the inertia-coordination model, it produces sets of prototypical examples by using the inertia-coordination parameters to predict temporal trajectories, with random noise added at each temporal step. This process is required because the inertia-coordination model only represents local dynamics and predictions bear no resemblance to observed variables without the addition of noise. An optional argument, "seed" sets the seed for the random number generator, so you can get the same plots each time. If the "seed" argument is used, then only one plot per profile is produced. For the coupled-oscillator, this step is not necessary and one prototypical trajectory is plotted for each profile.
#'
#' @param whichModel The name of the model that is being investigated (e.g., "inertCoord" or "clo")
#' @param prepData A dataframe that was produced with the "dataPrep" function.
#' @param paramEst A dataframe created by either indivInertCoord or indivClo containing the parameter estimates for each dyad.
#' @param n_profiles The number of latent profiles.
#' @param dist0name An optional name for the level-0 of the distinguishing variable (e.g., "Women"). Default is dist0.
#' @param dist1name An optional name for the level-1 of the distinguishing variable (e.g., "Men"). Default is dist1
#' @param plot_obs_name An optional name for the observed state variable to appear on plots (e.g., "Emotional Experience").
#' @param minMax An optional vector with desired minimum and maximum quantiles to be used for setting the y-axis range on the plots, e.g., minMax <- c(.1, .9) would set the y-axis limits to the 10th and 90th percentiles of the observed state variables. If not provided, the default is to use the minimum and maximum observed values of the state variables.
#' @param time_length An optional value specifying how many time points to plot across. Default is the 75th percentile for the time variable.
#' @param numPlots Only relevant for the inertCoord model. An optional value controlling how many random examples of each profile are produced. Default is 3.
#' @param seed Only relevant for the inertCoord model. An optional integer argument that sets the seed of R's random number generator to create reproducible trajectories. If used, the "numPlots" can be set to one - otherwise each plot is replicated 3 times.
#' @examples
#' data <- rties_ExampleDataShort
#' newData <- dataPrep(basedata=data, dyadId="couple", personId="person",
#' obs_name="dial", dist_name="female", time_name="time")
#' taus <-c(2,3)
#' embeds <- c(3,4)
#' delta <- 1
#' derivs <- estDerivs(prepData=newData, taus=taus, embeds=embeds, delta=delta, idConvention=500)
#' clo <- indivClo(derivData=derivs$data, whichModel="coupled")
#' profiles <- inspectProfiles(whichModel="clo", prepData=newData, paramEst=clo$params, n_profiles=2)
#' head(profiles)
#'
#' @return A dataframe called "profileData" that contains the profile classification for each dyad.
#' @import ggplot2
#' @import mclust
#' @export
inspectProfiles <- function(whichModel, prepData, paramEst, n_profiles, dist0name=NULL, dist1name=NULL, plot_obs_name = NULL, minMax=NULL, time_length=NULL, numPlots=NULL, seed = NULL)
{
if(is.null(dist0name)){dist0name <- "dist0"}
if(is.null(dist1name)){dist1name <- "dist1"}
if(is.null(plot_obs_name)){plot_obs_name <- "observed"}
if(is.null(minMax)){
min <- min(prepData$obs_deTrend, na.rm=T)
max <- max(prepData$obs_deTrend, na.rm=T)
} else {
min <- stats::quantile(prepData$obs_deTrend, minMax[1], na.rm=T)
max <- stats::quantile(prepData$obs_deTrend, minMax[2], na.rm=T)
}
if(is.null(time_length)){time_length <- as.numeric(stats::quantile(prepData$time, prob=.75))}
if(is.null(numPlots)) {numPlots <- 3}
if(!is.null(seed)) {seed = seed}
profileData <- list()
paramEst <- paramEst[stats::complete.cases(paramEst), ]
if(whichModel == "clo"){
vars1 <- c("obs_0","d1_0","p_obs_0","p_d1_0","obs_1","d1_1","p_obs_1","p_d1_1")
params <- paramEst[vars1]
lpa <- Mclust(params, G=n_profiles)
} else if (whichModel == "inertCoord"){
vars2 <- c("inert1", "coord1", "coord0", "inert0")
params <- paramEst[vars2]
lpa <- Mclust(params, G=n_profiles)
} else
print("Model must be inertCoord or clo")
# profileData
profileData$profile <- factor(lpa$classification)
profileData$profileN <- as.numeric(lpa$classification) - 1
profileData$dyad <- paramEst$dyad
profileData <- as.data.frame(profileData)
# classification table
print(table(lpa$classification))
# plot quality of solution
dr <- mclust::MclustDR(lpa, lambda=1)
graphics::plot(dr, what ="contour")
# plot content of solution
means <- as.data.frame(lpa$parameters$mean)
means$varNames <- rownames(means)
means$var <- c(1:dim(means)[1])
meansL <- stats::reshape(means, idvar="varNames", varying=list(1:n_profiles), timevar="profile", sep="", direction="long")
profile <- NULL
print(ggplot(data=meansL, aes_string(x="varNames", y="V1", group="profile")) +
geom_line(aes(colour=as.factor(profile))))
if(whichModel=="clo") {
cloPlotTraj(prepData=prepData, paramEst=paramEst, n_profiles=n_profiles, dist0name=dist0name, dist1name=dist1name, plot_obs_name=plot_obs_name, minMax=minMax, time_length=time_length)
} else if (whichModel=="inertCoord"){
inertCoordPlotTraj(prepData=prepData, paramEst=paramEst, n_profiles=n_profiles, dist0name=dist0name, dist1name=dist1name, plot_obs_name=plot_obs_name, minMax=minMax, time_length=time_length, numPlots=numPlots, seed=seed)
} else
print("Model must be inertCoord or clo")
return(profileData)
}
|
a41c888123e6adeca3943b9f1a904bd5e2395bc9
|
3e63a021d9f7ee9cd23da2246eacb68572fe3822
|
/R/cluster.by.distribution.R
|
56ffbe8b33e46fcb981024bab113a27b0465d27f
|
[] |
no_license
|
vegart/R-pkg-clusterd
|
fc9cdeaff02158bff68469887cbc9aaed9d71058
|
88c1f82554c8de3541887ad28934e478a81de034
|
refs/heads/master
| 2020-07-18T17:09:20.588981
| 2019-09-10T08:16:57
| 2019-09-10T08:16:57
| 206,279,761
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,983
|
r
|
cluster.by.distribution.R
|
#' @title cluster.by.distribution
#' @description The objective of clustering is to find how similar the distribution of data is, among different groups.
#' @param x data frame with a grouping variable, and at least one numeric variable to be used for comparing distribution
#' @param group_column column name of x which defines the group
#' @param target_columns column name of x which will be used to compare distribution
#' @param by bin size to be used in binned.dist.matrix, Default: 0.1
#' @param defined.k if not given, number of medoids is calculated in get.clara.object, Default: numeric(0)
#' @param clara.metric metric parameter given to cluster::clara Default: 'euclidean'
#' @param clusgap.B metric parameter given to cluster::clusGap, Default: 400
#' @return list
#' @details list(data=data.frame(),clara=function(){},silinfo=function(){},clustering=function(){})
#' @examples
#' \dontrun{
#' if(interactive()){
#' cluster.by.distribution(random.tibble,'label',c('rand'))
#' }
#' }
#' @seealso
#' \code{\link[dplyr]{group_by_all}},\code{\link[dplyr]{group_keys}},\code{\link[dplyr]{mutate}}
#' @rdname cluster.by.distribution
#' @export
#' @importFrom dplyr group_by_at group_split mutate
cluster.by.distribution <-
function(
x,
group_column,
target_columns,
by=0.1,
defined.k = numeric(0),
clara.metric='euclidean',
clusgap.B = 400
){
target.matrix <- binned.dist.matrix(x,group_column,target_columns,by=by)
clara.object <- get.clara.object(
is.k.predefined = (defined.k %>% length > 0)
,target.matrix
,defined.k = defined.k
,clara.metric=clara.metric
,clusgap.B=clusgap.B
)
labeled.x <- x %>% dplyr::group_by_at(group_column) %>% dplyr::group_split() %>%
lapply(function(z){
z %>% dplyr::mutate(
clustered = clara.object$clustering() %>% .[which(names(.) == unique(z[[group_column]]))]
)
}) %>% Reduce(rbind,.)
list(
list(data=labeled.x)
,clara.object
) %>% unlist(recursive=F)
}
|
b462235d739cfd2b7f4122f9e73539793f10b2fb
|
ea481968a765f97210e370b0a08f13d60a11f969
|
/R/lm_meta.R
|
0da2651017d7c8a29c73478a03fb84f7b0aedc64
|
[] |
no_license
|
ssmufer/MMUPHin
|
e77ded165ab5180136d34563cc5f791e75565b60
|
ca24f992850a877a4558b83515987dc6da403008
|
refs/heads/master
| 2023-05-07T21:37:04.250896
| 2021-05-26T17:13:10
| 2021-05-26T17:13:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,723
|
r
|
lm_meta.R
|
#' Covariate adjusted meta-analytical differential abundance testing
#'
#' \code{lm_meta} runs differential abundance models on microbial profiles
#' within individual studies/batches, and aggregates per-batch effect sizes with
#' a meta-analysis fixed/random effects model. It takes as input a
#' feature-by-sample microbial abundance table and the accompanying meta data
#' data frame which should includes the batch indicator variable, the main
#' exposure variable for differential abundance testing, and optional covariates
#' and random covariates. The function first runs
#' \code{\link[Maaslin2]{Maaslin2}} models on the exposure with optional
#' covariates/random covariates in each batch. The per-batch effect sizes are
#' then aggregated with \code{\link[metafor]{rma.uni}} and reported as output.
#' Additional parameters, including those for both
#' \code{\link[Maaslin2]{Maaslin2}} and \code{\link[metafor]{rma.uni}} can be
#' provided through \code{control} (see details).
#'
#' \code{control} should be provided as a named list of the following components
#' (can be a subset).
#' \describe{
#' \item{normalization}{
#' character. \code{normalization} parameter for Maaslin2. See
#' \code{\link[Maaslin2]{Maaslin2}} for details and allowed values. Default to
#' \code{"TSS"} (total sum scaling).
#' }
#' \item{transform}{
#' character. \code{transform} parameter for Maaslin2. See
#' \code{\link[Maaslin2]{Maaslin2}} for details and allowed values. Default to
#' \code{"LOG"} (log transformation).
#' }
#' \item{analysis_method}{
#' character. \code{analysis_method} parameter for Maaslin2. See
#' \code{\link[Maaslin2]{Maaslin2}} for details and allowed values. Default to
#' \code{"LM"} (linear modeling).
#' }
#' \item{rma_method}{
#' character. \code{method} parameter for rma.uni. See
#' \code{\link[metafor]{rma.uni}} for details and allowed values. Default to
#' \code{"REML"} (estricted maximum-likelihood estimator).
#' }
#' \item{output}{
#' character. Output directory for intermediate Maaslin2 output and the optional
#' forest plots. Default to \code{"MMUPHin_lm_meta"}.
#' }
#' \item{forest_plot}{
#' character. Suffix in the name for the generated forest plots visualizing
#' significant meta-analyitical differential abundance effects. Default to
#' \code{"forest.pdf"}. Can be set to \code{NULL} in which case no output will
#' be generated.
#' }
#' \item{rma_conv}{
#' numeric. Convergence threshold for rma.uni (corresponds to
#' \code{control$threshold}. See \code{\link[metafor]{rma.uni}} for details.
#' Default to 1e-4.
#' }
#' \item{rma_maxit}{
#' integer. Maximum number of iterations allowed for rma.uni (corresponds to
#' \code{control$maxiter}. See \code{\link[metafor]{rma.uni}} for details.
#' Default to 1000.
#' }
#' \item{verbose}{
#' logical. Indicates whether or not verbose information will be printed.
#' }
#' }
#'
#' @param feature_abd feature-by-sample matrix of abundances (proportions or
#' counts).
#' @param batch name of the batch variable. This variable in data should be a
#' factor variable and will be converted to so with a warning if otherwise.
#' @param exposure name of the exposure variable for differential abundance
#' testing.
#' @param covariates names of covariates to adjust for in Maaslin2
#' differential abundance testing models.
#' @param covariates_random names of random effects grouping covariates to
#' adjust for in Maaslin2 differential abundance testing models.
#' @param data data frame of metadata, columns must include exposure, batch,
#' and covariates and covariates_random (if specified).
#' @param control a named list of additional control parameters. See details.
#'
#' @return a list, with the following components:
#' \describe{
#' \item{meta_fits}{
#' data frame of per-feature meta-analyitical differential abundance results,
#' including columns for effect sizes, p-values and q-values, heterogeneity
#' statistics such as \eqn{\tau^2} and \eqn{I^2}, as well as weights for
#' individual batches. Many of these statistics are explained in detail in
#' \code{\link[metafor]{rma.uni}}.
#' }
#' \item{maaslin_fits}{
#' list of data frames, each one corresponding to the fitted results of
#' Maaslin2 in a individual batch. See \code{\link[Maaslin2]{Maaslin2}} on
#' details of these output.
#' }
#' \item{control}{list of additional control parameters used in the function
#' call.
#' }
#' }
#' @export
#' @author Siyuan Ma, \email{siyuanma@@g.harvard.edu}
#' @examples
#' data("CRC_abd", "CRC_meta")
#' fit_meta <- lm_meta(feature_abd = CRC_abd,
#' exposure = "study_condition",
#' batch = "studyID",
#' covariates = c("gender", "age"),
#' data = CRC_meta)$meta_fits
lm_meta <- function(feature_abd,
batch,
exposure,
covariates = NULL,
covariates_random = NULL,
data,
control) {
# Check and construct controls
control <- match_control(default = control_lm_meta,
control = control)
verbose <- control$verbose
# Check data formats
# Check feature abundance table
feature_abd <- as.matrix(feature_abd)
type_feature_abd <- check_feature_abd(feature_abd = feature_abd)
# Check metadata data frame
data <- as.data.frame(data)
samples <- check_samples(feature_abd = feature_abd,
data = data)
# Check variables are included in metadata data frame
if(length(batch) > 1)
stop("Only one batch variable is supported!")
df_batch <- check_metadata(data = data,
variables = batch)
df_meta <- check_metadata(data = data,
variables = c(exposure,
covariates,
covariates_random),
no_missing = FALSE)
# Check batch variable
var_batch <- check_batch(df_batch[[batch]], min_n_batch = 2)
n_batch <- nlevels(var_batch)
lvl_batch <- levels(var_batch)
if(verbose)
message("Found ", n_batch, " batches")
# Determine if exposure can be fitted on each batch
# First if exposure is character change to factor
if(is.character(df_meta[[exposure]]))
df_meta[[exposure]] <- as.factor(df_meta[[exposure]])
ind_exposure <- check_exposure(df_meta[[exposure]], var_batch)
if(any(!ind_exposure))
warning("Exposure variable is missing or has only one non-missing value",
" in the following batches; Maaslin2 won't be fitted on them\n",
paste(lvl_batch[!ind_exposure], collapse = ", "))
# Determine if/which covariates can be fitted on each batch
ind_covariates <- check_covariates(df_meta[covariates], var_batch)
for(covariate in covariates) {
if(any(ind_exposure & !ind_covariates[, covariate]))
warning("Covariate ", covariate,
" is missing or has only one non-missing value",
" in the following batches; will be excluded from model for",
" these batches:\n",
paste(lvl_batch[ind_exposure & !ind_covariates[, covariate]],
collapse = ", "))
}
# Determine if/which random covariates can be fitted on each batch
ind_covariates_random <- check_covariates_random(df_meta[covariates_random],
var_batch)
for(covariate in covariates_random) {
if(!any(ind_exposure & ind_covariates_random[, covariate]))
warning("Random covariate ", covariate,
" has no clustered observations!")
else if(verbose)
message("Random covariate ", covariate,
"will be fitted for the following batches:\n",
paste(lvl_batch[ind_exposure &
ind_covariates_random[, covariate]],
collapse = ", "))
}
# Create temporary output for Maaslin output files
dir.create(control$output, recursive = TRUE, showWarnings = FALSE)
# Fit individual models
maaslin_fits <- list()
for(i in seq_len(n_batch)) {
i_batch <- lvl_batch[i]
if(!ind_exposure[i_batch]) next
if(verbose) message("Fitting Maaslin2 on batch ", i_batch, "...")
i_feature_abd <- feature_abd[, var_batch == i_batch, drop = FALSE]
i_data <- df_meta[var_batch == i_batch, , drop = FALSE]
i_covariates <- covariates[ind_covariates[i_batch, , drop = TRUE]]
i_covariates_random <- covariates_random[
ind_covariates_random[i_batch, , drop = TRUE]]
i_output <- paste0(control$output, "/", i_batch)
dir.create(i_output, showWarnings = FALSE)
i_maaslin <- Maaslin2_wrapper(
feature_abd = i_feature_abd,
data = i_data,
exposure = exposure,
covariates = i_covariates,
covariates_random = i_covariates_random,
output = i_output,
normalization = control$normalization,
transform = control$transform,
analysis_method = control$analysis_method
)
maaslin_fits[[i_batch]] <- i_maaslin
maaslin_fits[[i_batch]]$batch <- i_batch
}
# Fit fixed/random effects models
if(verbose) message("Fitting meta-analysis model.")
meta_fits <- rma_wrapper(maaslin_fits,
method = control$rma_method,
output = control$output,
forest_plot = control$forest_plot,
rma_conv = control$rma_conv,
rma_maxit = control$rma_maxit,
verbose = verbose)
return(list(meta_fits = meta_fits,
maaslin_fits = maaslin_fits,
control = control))
}
|
1f594259806d25cf69df0d7c5f82c4729b6d4bf4
|
1fb0d37c7ba1afd1777097922def88c50a4b3117
|
/R/params_list.R
|
140aa57d88eecb52106d0375ef1e8f0247dc3c6a
|
[
"MIT"
] |
permissive
|
ZhangJieYeahBuddy/knowboxr
|
0dd51c0203b0d5dc1c07d1a6938b8f7b31577801
|
11bba8361ea545f96c48df404068131acbc8980e
|
refs/heads/master
| 2020-09-03T07:46:16.428585
| 2019-03-29T03:16:50
| 2019-03-29T03:34:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 900
|
r
|
params_list.R
|
# -------------------------------------------------------------------------
# Consul Parameters (Keys) Lookup Table for Functions in Package
# -------------------------------------------------------------------------
#' @keywords internal
func_list <- list(
# conn to MySQL database
est_mysql_conn = c(
"username",
"password",
"host",
"port",
"database"
),
# conn to Postgres database
est_pgres_conn = c(
"username",
"password",
"host",
"port",
"database"
),
# conn to Mongo database
est_mongo_conn = c(
"username",
"password",
"host",
"port",
"database",
"collection"
),
# reverse SSH tunnel
reverse_proxy = c(
"username",
"port",
"remotehost",
"remoteport",
"farawayhost",
"farawayport"
),
# download sheet from source
download_sheet = c(
"username",
"password"
)
)
|
3f5be8859b6ffcc51a19e3c171e4180a4e90cc19
|
9f353a8a69942f3f6fea818b9361a95693af22b2
|
/R/analysis1.R
|
fd17e7fd08f21d1bbc977d97cad37f9c4c40ae88
|
[] |
no_license
|
josephsdavid/teachR
|
f11d1fb19b5c15e8071085c5954e54cea7c7a8cf
|
f84b8e180e7e9d5444bdcee84faafc8d9e72b9f4
|
refs/heads/master
| 2020-06-03T14:43:17.148632
| 2020-03-29T20:06:53
| 2020-03-29T20:06:53
| 191,610,354
| 12
| 10
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,034
|
r
|
analysis1.R
|
library(tswgewrapped)
library(ggthemes)
library(ggplot2)
library(cowplot)
source("../R/preprocessing.R", echo = TRUE)
source("../R/helpers.R", echo = TRUE)
# data import
# imports the data as a hash table
fine_china <- preprocess("../data/")
names(fine_china)
# [1] "ChengduPM_" "ShenyangPM_" "ShanghaiPM_" "BeijingPM_" "GuangzhouPM_"
# <environment: 0x7bf3c10>
# shanghai
shang_US <- fine_china$ShanghaiPM_$PM_US
usShang <- resample(shang_US)
plotts.sample.wge(usShang$day)
plotts.sample.wge(usShang$week)
plotts.sample.wge(usShang$month)
plotts.sample.wge(usShang$sea)
decompose(usShang$day, "multiplicative") %>>% autoplot+ theme_economist()
decompose(usShang$day, "additive") %>>% autoplot+ theme_economist()
decompose(usShang$week, "multiplicative") %>>% autoplot+ theme_economist()
decompose(usShang$week, "additive") %>>% autoplot+ theme_economist()
decompose(usShang$month, "multiplicative") %>>% autoplot+ theme_economist()
decompose(usShang$month, "multiplicative") %>>% autoplot+ theme_economist()
decompose(usShang$sea, "additive") %>>% autoplot+ theme_economist()
decompose(usShang$sea, "additive") %>>% autoplot+ theme_economist()
usShang$week %>>% lagplot+ theme_economist()
usShang$day %>>% seasonplot + theme_economist()
usShang$day %>>% seasonplot(polar = TRUE) + theme_economist()
usShang$week %>>% seasonplot+ theme_economist()
usShang$week %>>% seasonplot(polar = T)+ theme_economist()
usShang$month %>>% seasonplot+ theme_economist()
usShang$month %>>% seasonplot(polar = T)+ theme_economist()
usShang$seas %>>% seasonplot(polar = T) + theme_economist()
usShang$seas %>>% seasonplot + theme_economist()
# next lets look at ses and holt models
library(fpp2)
sesd <- ses(usShang$day)
sesw <- ses(usShang$week)
sesm <- ses(usShang$month)
par(mfrow = c(1,3))
lapply(list(sesd,sesw,sesm), plot)
accuracy(fitted(sesd))
accuracy(fitted(sesw))
accuracy(fitted(sesm))
## Problem: implement the above for holt
# below is extra, see analysis2 for more complex fun
# We see with the weekly plot we have a lot of seasonality
# lets just for fun do some predictions with the daily data
shang <- usShang$day
plotts.sample.wge(shang)
# we have clear seasonality, and maybe a wandering behavior. I believe we have a biannual seasonality, based off of the monthly graph
shang %>>% ( difference(seasonal,., (365)) ) -> shang2
difference(arima, shang2, 1) -> shang3
aics <- shang3 %>>% aicbic(p=0:10)
pander(aics)
#
#
# *
#
# ------------------------
# p q aic
# -------- --- --- -------
# **20** 3 1 13.51
#
# **6** 0 5 13.52
#
# **4** 0 3 13.52
#
# **10** 1 3 13.52
#
# **3** 0 2 13.55
# ------------------------
#
# *
#
# ------------------------
# p q bic
# -------- --- --- -------
# **20** 3 1 13.54
#
# **4** 0 3 13.55
#
# **6** 0 5 13.55
#
# **10** 1 3 13.56
#
# **3** 0 2 13.57
# ------------------------
#
#
# <!-- end of list -->
#
#
# NULL
aicss <- shang %>>% ( difference(seasonal,., 365) ) %>>% aicbic(p=0:10)
pander(aics)
#
#
# *
#
# -----------------------
# p q aic
# -------- --- --- ------
# **20** 3 1 13.5
#
# **11** 1 4 13.5
#
# **26** 4 1 13.5
#
# **16** 2 3 13.5
#
# **24** 3 5 13.5
# -----------------------
#
# *
#
# ------------------------
# p q bic
# -------- --- --- -------
# **13** 2 0 13.53
#
# **3** 0 2 13.53
#
# **8** 1 1 13.53
#
# **7** 1 0 13.53
#
# **20** 3 1 13.53
# ------------------------
#
#
# <!-- end of list -->
#
#
# NULL
par(mfrow = c(1,1))
est_shang <- estimate(shang2, p=2, q = 0)
acf(est_shang$res)
ljung_box(est_shang$res, p =2, q =0)
shang_seasonal <- fore_and_assess(type = aruma,
x = shang,
s = 365,
phi = est_shang$phi,
n.ahead = 24,
limits = F
)
est_shang2 <- estimate(shang3, p = 3, q = 1)
acf(est_shang2$res)
ljung_box(est_shang2$res, 3, 1)
# [,1] [,2]
# test "Ljung-Box test" "Ljung-Box test"
# K 24 48
# chi.square 14.14806 35.92178
# df 20 44
# pval 0.8229101 0.8017901
shang_aruma <- fore_and_assess(type = aruma,
x = shang,
s = 365,
d = 1,
phi = est_shang2$phi,
theta = est_shang2$theta,
n.ahead = 24,
limits = F
)
shang_seasonal$ASE
# [1] 1154198
shang_aruma$ASE
# [1] 1154911
test <- window(shang_US, start = 7)[1:24]
ase(test, shang_aruma)
# [1] 1977888
ase(test, shang_seasonal)
# [1] 3278672
forecast(aruma, shang, s = 365, d = 1, phi = est_shang2$phi,theta = est_shang2$theta, n.ahead=500)
forecast(aruma, shang, s = 365, phi = est_shang$phi, n.ahead=500)
# ok looking damn good with the shang aruma
# Beijing
bj_US <- fine_china$BeijingPM_$PM_US
usBJ <- resample(bj_US)
plotts.sample.wge(usBJ$day)
plotts.sample.wge(usBJ$week)
plotts.sample.wge(usBJ$month)
plotts.sample.wge(usBJ$sea)
decompose(usBJ$day, "multiplicative") %>>% autoplot+ theme_economist()
decompose(usBJ$day, "additive") %>>% autoplot+ theme_economist()
decompose(usBJ$week, "multiplicative") %>>% autoplot+ theme_economist()
decompose(usBJ$week, "additive") %>>% autoplot+ theme_economist()
decompose(usBJ$month, "multiplicative") %>>% autoplot+ theme_economist()
decompose(usBJ$month, "multiplicative") %>>% autoplot+ theme_economist()
decompose(usBJ$sea, "additive") %>>% autoplot+ theme_economist()
decompose(usBJ$sea, "additive") %>>% autoplot+ theme_economist()
usBJ$week %>>% lagplot+ theme_economist()
usBJ$day %>>% seasonplot + theme_economist()
usBJ$day %>>% seasonplot(polar = TRUE) + theme_economist()
usBJ$week %>>% seasonplot+ theme_economist()
usBJ$week %>>% seasonplot(polar = T)+ theme_economist()
usBJ$month %>>% seasonplot+ theme_economist()
usBJ$month %>>% seasonplot(polar = T)+ theme_economist()
usBJ$seas %>>% seasonplot(polar = T) + theme_economist()
usBJ$seas %>>% seasonplot + theme_economist()
bj <- usBJ$day
bj %>>% (difference(seasonal,.,365)) -> bjtr
aicbj <- bj %>>% (difference(seasonal,.,365)) %>>%
aicbic(p = 0:10)
pander(aicbj)
#
#
# *
#
# ------------------------
# p q aic
# -------- --- --- -------
# **41** 6 4 15.31
#
# **53** 8 4 15.31
#
# **59** 9 4 15.31
#
# **47** 7 4 15.31
#
# **60** 9 5 15.31
# ------------------------
#
# *
#
# ------------------------
# p q bic
# -------- --- --- -------
# **13** 2 0 15.35
#
# **3** 0 2 15.35
#
# **8** 1 1 15.35
#
# **14** 2 1 15.35
#
# **19** 3 0 15.35
# ------------------------
#
#
# <!-- end of list -->
#
#
# NULL
est_bj <- estimate(bjtr, 6,4)
acf(est_bj$res)
ljung_box(est_bj$res,6,4)
# [,1] [,2]
# test "Ljung-Box test" "Ljung-Box test"
# K 24 48
# chi.square 28.84052 45.85887
# df 14 38
# pval 0.01098179 0.1784661
bj_seas <- fore_and_assess(type = aruma,
x = bj,
s = 365,
phi = est_bj$phi,
theta = est_bj$theta,
n.ahead = 24,
limits = F
)
test <- window(bj_US, start = 7)[1:24]
ase(test, bj_seas)
|
f42ae3debbafde90b47d3b288fe0e20aedc877ab
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/7109_0/rinput.R
|
f671f085f517c1bba1ffc3e91fb649509c975e95
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("7109_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="7109_0_unrooted.txt")
|
db378ad5e762955d8b116a50876a3fb67805e2cc
|
f15a0236a88c02e44933ebc7aa133fba72c1db55
|
/analysis/contact_analysis.R
|
bf270f9fb5ec41ec18f7b8e4c3f004631c26c3e4
|
[
"BSD-2-Clause"
] |
permissive
|
YTomTJ/edge4d
|
ffbe34cad8b5c785b784b4d223ee2f149f57e92f
|
e21a308d619c03db83ef68cf63b1637685bd2139
|
refs/heads/master
| 2023-03-16T08:37:22.507256
| 2017-05-08T20:51:24
| 2017-05-08T20:51:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,076
|
r
|
contact_analysis.R
|
library(plyr)
contact <- read.delim("live_data_output/072112_02_t11_50_contact_analysis.txt", stringsAsFactors=F)
contact$time <- contact$time - 850 ## time0 is 850
contact <- contact[which(contact$neighbor.trajectory.id >= 0),]
contact$pcent <- contact$neighbor.contact.sa / contact$total.contact.sa
PF.basal.cells.data1 <- c(0, 32, 38, 41, 47, 70, 79, 112, 117, 120, 144, 124, 152, 156, 183, 195, 202, 203)
PF.1.neighbors.data1 <- c(25, 34, 57, 74, 75, 87, 89, 98, 108, 109, 114, 116, 124, 146, 148, 177, 181, 198, 206, 221, 238)
PF.2.neighbors.data1 <- c(2, 10, 20, 30, 60, 64, 67, 86, 94, 100, 125, 136, 155, 162, 175, 182, 194, 215, 222, 315)
AF.basal.cells.data1 <- c(137,110,35,40,36,3,140,141,142,178,26)
AF.1.neighbors.data1 <- c(6, 9, 23, 27, 42, 48, 65, 71, 73, 95, 105, 113, 118, 123, 130, 173, 190, 193, 196, 213)
AF.2.neighbors.data1 <- c(4, 18, 58, 68, 72, 82, 84, 85, 88, 106, 131, 133, 145, 150, 153, 157, 159, 207, 214, 216, 226, 236, 247, 271, 279, 317)
all <- NULL
for(traj.id in c(PF.basal.cells.data1, AF.basal.cells.data1, PF.1.neighbors.data1, AF.1.neighbors.data1, PF.2.neighbors.data1, AF.2.neighbors.data1)) {
current <- contact[which(contact$trajectory.id == traj.id),]
proc.traj <- function(traj) {
if( sum(traj$time == 0) == 0 || sum(traj$time == 800) == 0) {
c(start.sa=NA,end.sa=NA, dx=NA, dy=NA, cx=NA, cy=NA, nx=NA, ny=NA)
}
else {
start.idx <- which(-100 <= traj$time & traj$time <= 0)
end.idx <- which(700 <= traj$time & traj$time <= 800)
cx <- traj$cent.x[which(traj$time==0)]
cy <- traj$cent.y[which(traj$time==0)]
nx <- traj$neighbor.cent.x[which(traj$time==0)]
ny <- traj$neighbor.cent.y[which(traj$time==0)]
dx <- nx - cx
dy <- ny - cy
##start.sa <- median(traj$neighbor.surface.area[start.idx], na.rm=T)
##end.sa <- median(traj$neighbor.surface.area[end.idx], na.rm=T)
start.sa <- median(traj$pcent[start.idx], na.rm=T)
end.sa <- median(traj$pcent[end.idx], na.rm=T)
c(start.sa = start.sa, end.sa = end.sa, dx=dx, dy=dy, cx=cx, cy=cy, nx=nx, ny=ny)
}
}
sa.frame <- ddply(current, c("trajectory.id", "neighbor.trajectory.id"), proc.traj)
sa.frame <- sa.frame[which(!(is.na(sa.frame$start.sa) | is.na(sa.frame$end.sa))),]
sa.frame$incr <- sa.frame$start.sa < sa.frame$end.sa
all <- rbind(all, sa.frame)
}
pdf("contact_area_dist.pdf")
## ‘c(bottom, left, top, right)’ ‘c(5, 4, 4, 2) + 0.1’.
op <- par(mar=(c(4, 4.8, 1, 1.25) + 0.1), lwd=2, ps=22)
plot(type="n", c(-8,8), c(-8,8), xlab="anterior-posterior span, microns", ylab = "dorsal-ventral span, microns")
points(all$dx[all$incr], all$dy[all$incr], xlim=c(-8,8), ylim=c(-8,8), col="green3")
points(all$dx[!all$incr], all$dy[!all$incr], xlim=c(-8,8), ylim=c(-8,8), col="magenta")
points(0,0, col="black", cex=4, pch=20)
par(op)
dev.off()
all.incr <- all[which(all$incr),]
all.decr <- all[which(!all$incr),]
pdf("contacts.pdf", 8, 4.25)
## ‘c(bottom, left, top, right)’ ‘c(5, 4, 4, 2) + 0.1’.
op <- par(mar=(c(4, 4.8, 1, 1.25) + 0.1), lwd=2, ps=16)
plot(c(0, 140),c(0,54), type="n", xlab="anterior-posterior span, microns", ylab="dorsal-ventral span, microns")
contact.0 <- contact[which(contact$time == 0),]
segments(contact.0$cent.x, contact.0$cent.y, contact.0$neighbor.cent.x, contact.0$neighbor.cent.y, col="lightgray")
segments(all.incr$cx, all.incr$cy, all.incr$nx, all.incr$ny, col="green", lwd=4)
segments(all.decr$cx, all.decr$cy, all.decr$nx, all.decr$ny, col="magenta", lwd=4)
points(c(contact.0$cent.x, contact.0$neighbor.cent.x),c(contact.0$cent.y, contact.0$neighbor.cent.y), pch=20, col="lightgray" )
points(c(all$cx, all$nx),c(all$cy, all$ny), pch=20, col="black" )
focal.x <- unique(contact.0[contact.0$trajectory.id == 141,]$cent.x)
focal.y <- unique(contact.0[contact.0$trajectory.id == 141,]$cent.y)
points(focal.x, focal.y, pch=22, col="yellow4" , cex=2, lwd=3, bg="yellow")
focal.x <- unique(contact.0[contact.0$trajectory.id == 23,]$cent.x)
focal.y <- unique(contact.0[contact.0$trajectory.id == 23,]$cent.y)
points(focal.x, focal.y, pch=22, col="darkred" , cex=2, lwd=3, bg="red")
focal.x <- unique(contact.0[contact.0$trajectory.id == 40,]$cent.x)
focal.y <- unique(contact.0[contact.0$trajectory.id == 40,]$cent.y)
points(focal.x, focal.y, pch=22, col="blue" , cex=2, lwd=3, bg="cyan")
## focal.x <- unique(contact.0[contact.0$trajectory.id == 26,]$cent.x)
## focal.y <- unique(contact.0[contact.0$trajectory.id == 26,]$cent.y)
## points(focal.x, focal.y, pch=20, col="orange" , cex=2)
par(op)
dev.off()
# 141
# decreasing 26, 40, 178
# increasing 23, 142
pdf("incr_example.pdf", 6,6)
op <- par(mar=(c(4.8, 4.8, 1, 1.25) + 0.1), lwd=2, ps=22)
plot(c(0,800), c(5,250), type="n", lwd=2, xlab="time (sec)", ylab=expression(cell~cell~contact~surface~area~microns^2))
cur.traj <- contact[which(contact$trajectory.id == 141 & contact$neighbor.trajectory.id == 23),]
cur.traj <- cur.traj[0 <= cur.traj$time & cur.traj$time <= 800,]
cur.traj <- cur.traj[sort(cur.traj$time, index.return=T)$ix,]
points(loess.smooth(cur.traj$time, runmed(cur.traj$neighbor.contact.sa, 3), family="gaussian"), type="l", lwd=3, col="red")
points(cur.traj$time, cur.traj$neighbor.contact.sa, type="p", pch=21, bg="red", col="darkred", cex=1.2)
par(op)
dev.off()
pdf("decr_example.pdf", 6,6)
op <- par(mar=(c(4.8, 4.8, 1, 1.25) + 0.1), lwd=2, ps=22)
plot(c(0,800), c(5,250), type="n", lwd=2, xlab="time (sec)", ylab=expression(cell~cell~contact~surface~area~microns^2))
cur.traj <- contact[which(contact$trajectory.id == 141 & contact$neighbor.trajectory.id == 40),]
cur.traj <- cur.traj[0 <= cur.traj$time & cur.traj$time <= 800,]
cur.traj <- cur.traj[sort(cur.traj$time, index.return=T)$ix,]
points(loess.smooth(cur.traj$time, runmed(cur.traj$neighbor.contact.sa, 3), family="gaussian"), type="l", lwd=3, col="cyan")
points(cur.traj$time, cur.traj$neighbor.contact.sa, type="p", pch=21, bg="cyan", col="blue", cex=1.2)
par(op)
dev.off()
## incr.contacts <- all[all$incr, c(1,2)]
## decr.contacts <- all[!all$incr, c(1,2)]
## pdf("incr.pdf")
## for(i in 1:dim(incr.contacts)[1]) {
## A <- incr.contacts$trajectory.id[i]
## B <- incr.contacts$neighbor.trajectory.id[i]
## cur.traj <- contact[which(contact$trajectory.id == A & contact$neighbor.trajectory.id == B),]
## cur.traj <- cur.traj[0 <= cur.traj$time & cur.traj$time <= 800,]
## cur.traj <- cur.traj[sort(cur.traj$time, index.return=T)$ix,]
## plot(cur.traj$time, cur.traj$neighbor.contact.sa, type="b")
## title(c(A,B))
## }
## dev.off()
## pdf("decr.pdf")
## for(i in 1:dim(decr.contacts)[1]) {
## A <- decr.contacts$trajectory.id[i]
## B <- decr.contacts$neighbor.trajectory.id[i]
## cur.traj <- contact[which(contact$trajectory.id == A & contact$neighbor.trajectory.id == B),]
## cur.traj <- cur.traj[0 <= cur.traj$time & cur.traj$time <= 800,]
## cur.traj <- cur.traj[sort(cur.traj$time, index.return=T)$ix,]
## plot(cur.traj$time, cur.traj$neighbor.contact.sa, type="b")
## title(c(A,B))
## }
## dev.off()
|
af069ac1f5d7fe7931f06192c9720fc5d19aea06
|
4b48647555feaac4cbb9bb4864db20e6e40a8980
|
/man/yside.Rd
|
1158107c4682a4849ac6d1b0f28717bb0a3cf30c
|
[
"MIT"
] |
permissive
|
seifudd/ggside
|
8d9fdca5b042f9528c5dc4ef5ce0d7f64537f730
|
442c83db4cca57bc9cc962be563fbd7df0463d86
|
refs/heads/master
| 2023-07-12T20:29:20.936007
| 2021-08-16T19:30:55
| 2021-08-16T19:30:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,736
|
rd
|
yside.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggside.R
\name{yside}
\alias{yside}
\title{The yside geometries}
\value{
geom_yside* return a YLayer object to be added to a ggplot
}
\description{
\code{yside} refers to the api of ggside. Any \code{geom_} with
\code{yside} will plot its respective geometry along the y-axis per
facet panel. The yside panel will plot to the right of the main
panel by default. This yside panel will always share the same scale
as it's main panel, but is expected to have a separate x-axis scaling.
}
\section{New Aesthetics}{
All \code{yside} Geometries have \code{yfill}, \code{ycolour}/\code{ycolor} available for
aesthetic mappings. These mappings behave exactly like the default
counterparts except that they are considered separate scales. All
\code{yside} geometries will use \code{yfill} over \code{fill}, but will default
to \code{fill} if \code{yfill} is not provided. The same goes for \code{ycolour} in
respects to \code{colour}. This comes in handy if you wish to map both \code{fill}
to one geometry as continuous, you can still map \code{yfill} for a separate
\code{yside} geometry without conflicts. See more information in
\code{vignette("ggside")}.
#' @section Exported Geometries:
The following are the \code{yside} variants of the \link{ggplot2} Geometries
\itemize{
\item \link{geom_ysidebar}
\item \link{geom_ysideboxplot}
\item \link{geom_ysidecol}
\item \link{geom_ysidedensity}
\item \link{geom_ysidefreqpoly}
\item \link{geom_ysidehistogram}
\item \link{geom_ysideline}
\item \link{geom_ysidepath}
\item \link{geom_ysidepoint}
\item \link{geom_ysidetext}
\item \link{geom_ysidetile}
\item \link{geom_ysideviolin}
}
}
\seealso{
\link{xside}
}
|
df18babc88df58cf7a8cb96373b6f6e0d7cb51ba
|
314bba245605c2e3abe598b0a99e932c6f346185
|
/Run.r
|
b7d8141b23cdc7c19205d3c1f23c3e1a763ff478
|
[] |
no_license
|
hajime0105/IRT-ICC
|
d5a71495a78259dbb67df77d5067e6d24c0d1496
|
fefb78ad371ce2ac8a969395dd87ccddf782d053
|
refs/heads/master
| 2021-01-10T11:32:31.391486
| 2015-11-17T04:51:14
| 2015-11-17T04:51:14
| 45,888,680
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 325
|
r
|
Run.r
|
library(ltm)
if (param == 1) {
source("makeData1PL.r")
} else {
source("makeData2PL.r")
}
source("changeData.r")
data <- read.csv("dataLogic.csv", header = FALSE)
mod <- rasch(data, IRT.param = TRUE)
print(mod)
plot(mod, lwd = 3, cex.axis = 2, cex.lab = 1.5, xlab = "θ", ylab = "P(θ)", main = "")
print(summary(mod))
|
7b992e2c05770447c986f4dc23836321b9940129
|
727b8af88b6d32bb1f4087537779ea269cf9cd07
|
/fast_sampler.R
|
886125272d82140fe8ff73b3576e7283b05fb7d4
|
[] |
no_license
|
Rene-Gutierrez/boom_project
|
3b13848465484f52fcafd2ecc740ee14029c26e3
|
cd6af9024ed2b504918f6baa82d5a5205f975396
|
refs/heads/main
| 2023-06-18T04:28:11.370121
| 2021-07-17T17:16:18
| 2021-07-17T17:16:18
| 360,395,925
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 387
|
r
|
fast_sampler.R
|
### Fast Sampler
fast_sampler <- function(Phi, D, a){
# Dimensions
n <- dim(Phi)[1]
p <- dim(Phi)[2]
# Step 1
u <- rnorm(n = p, mean = 0, sd = sqrt(D))
d <- rnorm(n = n, mean = 0, sd = 1)
# Step 2
v <- Phi %*% u + d
# Step 3
w <- solve(Phi %*% (D * t(Phi)) + diag(n), a - v)
# Step 4
t <- u + D * t(Phi) %*% w
# Returns the Sample
return(t)
}
|
ab1207d0a45f44d78cc8d6a2f114889740abd1ca
|
cbb79420b0e2ba0fa560aa6f4c793e5b43ad0519
|
/R/figure_03_S3.R
|
d647512adfe6325c90a9c8a28b1f6ec721afdb61
|
[
"Apache-2.0"
] |
permissive
|
csbl-usp/evolution_of_knowledge
|
b477f328a1fb8e9b07abb07e26b17de68d5a818e
|
81ab3511f5ea3c4e26e374ce8a67d0e67a38dc59
|
refs/heads/main
| 2023-04-17T07:08:37.080859
| 2021-06-16T14:33:16
| 2021-06-16T14:33:16
| 377,523,481
| 0
| 0
|
NOASSERTION
| 2021-06-16T14:25:17
| 2021-06-16T14:25:16
| null |
UTF-8
|
R
| false
| false
| 14,251
|
r
|
figure_03_S3.R
|
#With this scritp, you can reproduce the analysis and plot the images that
#compose panel A-C in figure 03 of the paper and figure S3
#load necessary packages
#file management
library(data.table)
#data manipulation
library(dplyr)
library(tidyverse)
library(reshape2)
library(stringr)
#network/gene analysis
library(igraph)
library(clusterProfiler)
library(limma)
#plotting
library(ggplot2)
library(ggrepel)
library(ggraph)
options(stringsAsFactors = F)
#load and create necessary objects
#pathways gmt
pathways_gmt <- fread("data/all_pathways_and_genes.csv")
reactome_gmt <- pathways_gmt %>%
filter(db=="REACTOME")
#gene disease data
data("all_edges")
all_edges <- do.call(rbind,edges_list)
dis_dis_nodes <- as.data.frame(fread("data/dis_dis_nodes.csv"))
dis_class_df <- dis_dis_nodes %>%
dplyr::select(1,3) %>%
dplyr::rename(disease=1,class=2)
diseases <- unique(all_edges$Target)
#conversion table for genes in all_edges
load("data/gene_convertion_table.RData")
all_edges_converted <- all_edges %>%
dplyr::left_join(gene_convertion_table,by=c("Source"="alias")) %>%
dplyr::select(-1) %>%
dplyr::select(converted,Target,everything())
#top 9 diseases
top_9_df <- readRDS("intermediate/top9_diseases_df.RDS")
top_9 <- top_9_df$disease
#run enrichment for genes related to top9 diseases in 2018
all_edges_top_9 <- all_edges_converted %>%
filter(Doc.2018 > 0) %>%
filter(Target %in% top_9)
for(i in 1:length(top_9)){
genes <- all_edges_top_9 %>%
filter(Target==top_9[i]) %>%
pull(converted) %>% unique()
enr <- enricher(genes,pAdjustMethod = "BH",TERM2GENE = reactome_gmt)
enr_res <- enr@result
enr_res$dis <- top_9[i]
if(i==1){
enr_res_all <- enr_res
}else{
enr_res_all <- rbind(enr_res_all,enr_res)
}
}
#filter significant terms
enr_res_all_sig <- enr_res_all %>%
filter(p.adjust < 0.01)
saveRDS(enr_res_all_sig,file = "intermediate/ORA_results_top9_diseases_2018.RDS")
enr_res_all_sig <- readRDS(file = "intermediate/ORA_results_top9_diseases_2018.RDS")
#run term-term network analysis to find term domains
enrichment_term_gene <- enr_res_all_sig %>%
dplyr::select(ID,geneID) %>%
separate_rows(geneID,sep = "/") %>%
dplyr::rename(term=1,gene=2)
#create a list containing all term genes (also in edges_all)
term_gene_list <- split(enrichment_term_gene$gene,
enrichment_term_gene$term)
#run enrichment using lapply and create enrichment dataframe for term-term
term_term_enrichment <- lapply(term_gene_list,enricher,
pAdjustMethod = "BH",TERM2GENE=enrichment_term_gene)
for(i in 1:length(term_term_enrichment)){
obj <- term_term_enrichment[[i]]
if(class(obj)=="NULL"){
next()
}
res <- obj@result
res$term <- names(term_term_enrichment)[i]
if(i==1){
term_term_all <- res
}else{
term_term_all <- rbind(term_term_all,res)
}
}
term_term_network <- term_term_all %>%
dplyr::select(ID,term,p.adjust) %>%
dplyr::mutate(weight=-log(p.adjust)) %>%
dplyr::select(-3) %>%
dplyr::rename(from=1,to=2) %>%
filter(from != to,
weight > -log(0.01))
rownames(term_term_network) <- NULL
#create igraph object and run louvain algorithm to detect clusters
g <- graph_from_data_frame(term_term_network,directed = F,
vertices = unique(c(term_term_network$from,
term_term_network$to)))
louv <- cluster_louvain(graph = g,weights = term_term_network$weight)
mods <- data.frame(node=louv$names,mod=louv$membership)
degree <- strength(graph = g,vids = V(g),mode = "all",weights = E(g)$weight)
nodes <- mods %>%
mutate(degree = degree) %>%
mutate(node=ifelse(str_sub(node,
start = nchar(node),
end = nchar(node))==" ",
no=node,
yes=str_sub(node,
start = 1,
end = nchar(node)-1)
))
#load manual annotation of clusters (made outside R by authors. In data/)
nodes_annot <- fread("data/term_term_nodes_annot.csv")
nodes_2 <- nodes %>%
dplyr::select(node) %>%
left_join(nodes_annot,by="node")
label_terms <- nodes_2 %>%
group_by(mod) %>%
top_n(degree,n = 2) %>%
ungroup()
nodes_2 <- nodes_2 %>%
mutate(label=ifelse(node %in% label_terms$node,
node,""))
table_S5 <- enr_res_all_sig %>%
dplyr::left_join(nodes_annot,by = c("ID"="node")) %>%
dplyr::filter(!is.na(annot)) %>%
dplyr::select(-degree)
#Save table S5 in .csv format (enriched terms for top 9 diseases in 2018)
write.csv(table_S5,file = "tables/table_S5.csv",row.names = F)
#update igraph object with degree, annot and labels
V(g)$degree <- nodes_2$degree
V(g)$mod <- nodes_2$annot
V(g)$label <- nodes_2$label
#remove smaller component (contains only 3 terms)
components <- igraph::clusters(g, mode="weak")
biggest_cluster_id <- which.max(components$csize)
vert_ids <- V(g)[components$membership == biggest_cluster_id]
g <- igraph::induced_subgraph(g, vert_ids)
####Figure 3A network####
#plot term-term network with mod annotations
load("data/term_term_network_palette.RData")
p <- ggraph(graph = g,layout = "auto")+
geom_edge_link(aes(color=log(weight)))+
geom_node_point(aes(size=degree,fill=mod),color="black",pch=21)+
#geom_node_text(aes(label = label),repel=TRUE)+
scale_fill_manual(values = pal)+
scale_edge_color_continuous(low = "white",high = "grey40")+
theme_void()
pdf(file = "figures/figure_03/panel_A/term_term_network_mods.pdf",
width = 10.3,height = 6.6)
print(p)
dev.off()
pdf(file = "figures/figure_03/panel_A/term_term_network_mod_facets.pdf",
width = 12.38,height = 8.06)
p <- ggraph(graph = g,layout = "auto")+
geom_edge_link(aes(color=log(weight)))+
geom_node_point(aes(size=degree,fill=mod),color="black",pch=21)+
scale_fill_manual(values = pal)+
scale_edge_color_continuous(low = "white",high = "grey40")+
theme_void()+
theme(legend.position = "none")+
facet_nodes(facets = ~mod)+
theme(strip.text.x = element_text(size = 7))
print(p)
dev.off()
#run enrichment for genes in each top 9 disease per year and plot results in network
years <- 1990:2018
for(i in 1:length(years)){
yr <- years[i]
col <- which(grepl(yr,colnames(all_edges_converted)))
df <- all_edges_converted %>%
dplyr::filter(Target %in% top_9) %>%
dplyr::select(1,2,col) %>%
dplyr::rename(gene=1,dis=2,docs=3) %>%
dplyr::filter(docs > 0)
gene_list <- split(df$gene,df$dis)
enr <- lapply(gene_list,enricher,pAdjustMethod = "BH",TERM2GENE = reactome_gmt)
for(j in 1:length(enr)){
enr_res <- enr[[j]]
if(class(enr_res)=="NULL"){next}
enr_res <- enr_res@result
enr_res$year <- yr
enr_res$dis <- names(enr)[j]
if(i==1 & j==1){
enr_res_all_dis <- enr_res
}else{
enr_res_all_dis <- rbind(enr_res_all_dis,enr_res)
}
}
}
saveRDS(enr_res_all_dis,file = "intermediate/ORA_results_top9_diseases_all_years.RDS")
enr_res_all_dis_selected <- enr_res_all_dis %>%
filter(p.adjust < 0.01) %>%
mutate(ID=ifelse(str_sub(ID,
start = nchar(ID),
end = nchar(ID))==" ",
no=ID,
yes=str_sub(ID,
start = 1,
end = nchar(ID)-1)
)) %>%
left_join(dis_class_df,by=c("dis"="disease")) %>%
filter(ID %in% terms_in_network) %>%
left_join(nodes_2 %>% dplyr::select(node,annot),by=c("ID"="node"))
enrichment_df_plot <- enr_res_all_dis_selected %>%
dplyr::select(ID,p.adjust,year,dis,class,annot) %>%
dplyr::mutate(score=-log(p.adjust))
#plot enrichment results for each module in boxplots
enrichment_boxplot_plot <- enrichment_df_plot %>%
filter(year==2018)
cols <- c("#68ad36","#ff743e", "#00b7da")
classes <- unique(enrichment_df_plot$class)
####Figure 3A boxplots####
p <- enrichment_boxplot_plot %>%
#filter(dis %in% top_9[c(1,2,6,12,11,10,25,26,20,19)]) %>%
# group_by(dis,class,annot) %>%
# summarise(score=mean(score)) %>%
# arrange(class,dis,desc(score)) %>%
# mutate(annot=factor(annot,levels = unique(annot))) %>%
mutate(class=factor(class,levels = c("INFECTIOUS","INFLAMMATORY","PSYCHIATRIC"))) %>%
ggplot(aes(x=class,y=log(score),fill=class))+
geom_jitter(aes(color=class),shape=1)+
geom_boxplot(outlier.color = NA,alpha=0.3)+
#scale_fill_gradient(low = "white",high = "red3")+
scale_fill_manual(values = cols)+
scale_color_manual(values = cols)+
facet_wrap(facets = ~annot,nrow = 1)+
theme_classic()+
theme(axis.text.x = element_blank(),
strip.text.x = element_text(size = 4))
pdf(file = "figures/figure_03/panel_A/annot_enrichment_boxplots_2018.pdf",
width = 13.92,height = 3.22)
print(p)
dev.off()
#####Figure 3B - networks####
#plot term-term network with nodes colored according to enrichment values
#in each disease in 2018
terms_in_network <- V(g)$name
g2 <- g
dis_enrichment <- enr_res_all_sig %>%
mutate(score=-log(p.adjust)) %>%
dcast(formula = ID~dis,value.var = "score") %>%
filter(ID %in% terms_in_network)
dis_enrichment[is.na(dis_enrichment)] <- 0
dis_enrichment <- dis_enrichment[match(V(g2)$name,dis_enrichment$ID),]
dis_enrichment <- dis_enrichment %>%
mutate(ID=ifelse(str_sub(ID,
start = nchar(ID),
end = nchar(ID))==" ",
no=ID,
yes=str_sub(ID,
start = 1,
end = nchar(ID)-1)
))
nodes_3 <- nodes_2 %>%
dplyr::select(-label) %>%
left_join(dis_enrichment,by=c("node"="ID"))
for(i in 1:length(top_9)){
dis <- top_9[i]
dis_fil <- gsub(dis,pattern = " ",replacement = "_")
col <- which(colnames(dis_enrichment)==dis)
V(g2)$enrichment <- log(dis_enrichment[,col]+1)
fil1 <- paste0("figures/figure_03/panel_B/term_term_network_full_enrichment_",dis_fil,".pdf")
pdf(file = fil1,
width = 7.3,height = 6.6)
p <- ggraph(graph = g2,layout = "auto")+
geom_edge_link(aes(color=log(weight)))+
geom_node_point(aes(size=degree,fill=enrichment),color="black",pch=21)+
scale_fill_gradient(low = "white",high = "red3")+
scale_edge_color_continuous(low = "white",high = "grey40")+
theme_void()+
ggtitle(dis)
print(p)
dev.off()
fil2 <- paste0("figures/figure_03/panel_B/term_term_network_mods_enrichment_",dis_fil,".pdf")
pdf(file = fil2,
width = 12.38,height = 8.06)
p <- ggraph(graph = g2,layout = "auto")+
geom_edge_link(aes(color=log(weight)))+
geom_node_point(aes(size=degree,fill=enrichment),color="black",pch=21)+
scale_fill_gradient(low = "white",high = "red3")+
scale_edge_color_continuous(low = "white",high = "grey40")+
theme_void()+
theme(legend.position = "none")+
facet_nodes(facets = ~mod)+
theme(strip.text.x = element_text(size = 7))+
ggtitle(dis)
print(p)
dev.off()
}
####Figure 3B boxplots####
p <- enrichment_heatmap_plot %>%
#filter(dis %in% top_9[c(25,26,20,19,1,2,6,12,11,10)]) %>%
filter(dis %in% top_9[c(18,26,4,17,10,2)]) %>%
# group_by(dis,class,annot) %>%
# summarise(score=mean(score)) %>%
arrange(class,dis,desc(score)) %>%
#mutate(dis=factor(dis,levels = top_9[c(25,26,20,19,1,2,6,12,11,10)])) %>%
mutate(dis=factor(dis,levels = top_9[c(18,26,4,17,10,2)])) %>%
mutate(annot=factor(annot,levels = unique(annot))) %>%
ggplot(aes(x=annot,y=log(score),fill=class))+
geom_jitter(aes(color=class),shape=1)+
geom_boxplot(outlier.color = NA,alpha=0.3)+
#scale_fill_gradient(low = "white",high = "red3")+
scale_fill_manual(values = cols)+
scale_color_manual(values = cols)+
coord_flip()+
facet_wrap(facets = ~dis,nrow = 1)+
theme_minimal()+
theme(axis.text.y = element_text(size = 7),
strip.text.x = element_text(size = 5),
legend.position = "none")
pdf(file = "figures/figure_03/panel_B/annot_enrichment_boxplots_per_dis.pdf",
width = 9,height = 3)
print(p)
dev.off()
####Figure S3 - boxplots####
p <- enrichment_boxplot_plot %>%
#filter(dis %in% top_9[c(25,26,20,19,1,2,6,12,11,10)]) %>%
filter(!dis %in% top_9[c(18,26,4,17,10,2)]) %>%
# group_by(dis,class,annot) %>%
# summarise(score=mean(score)) %>%
arrange(class,dis,desc(score)) %>%
#mutate(dis=factor(dis,levels = top_9[c(25,26,20,19,1,2,6,12,11,10)])) %>%
mutate(dis=factor(dis,levels = unique(dis))) %>%
mutate(annot=factor(annot,levels = unique(annot))) %>%
ggplot(aes(x=annot,y=log(score),fill=class))+
geom_jitter(aes(color=class),shape=1)+
geom_boxplot(outlier.color = NA,alpha=0.3)+
#scale_fill_gradient(low = "white",high = "red3")+
scale_fill_manual(values = cols)+
scale_color_manual(values = cols)+
coord_flip()+
facet_wrap(facets = ~dis,nrow = 3)+
theme_minimal()+
scale_x_discrete(name="cluster")+
scale_y_continuous(name="log(enrichment score)")+
theme(axis.text.y = element_text(size = 7),
strip.text.x = element_text(size = 5),
legend.position = "none")
pdf(file = "figures/figure_S3/annot_enrichment_boxplots_all_dis.pdf",
width = 10,height = 6.6)
print(p)
dev.off()
####Figure 3C####
#plot yearly enrichment by mod and dis category
annots <- unique(enrichment_df_plot$annot)
p <- enrichment_df_plot %>%
group_by(dis,class,year,annot) %>%
summarise(score=mean(score)) %>%
ungroup() %>%
mutate(class=factor(class,levels = c("INFECTIOUS","INFLAMMATORY","PSYCHIATRIC"))) %>%
arrange(class) %>%
mutate(dis=factor(dis,unique(dis))) %>%
mutate(annot=factor(annot,unique(annot))) %>%
ggplot(aes(x=year,y=dis,fill=class,color=class))+
geom_ridgeline(aes(height=score),scale=0.05,alpha=0.274,size=.2)+
scale_fill_manual(values=c("#68ad36", "#ff743e", "#00b7da"))+
scale_color_manual(values=c("#68ad36", "#ff743e", "#00b7da"))+
scale_x_continuous(breaks=c(1990,2005,2018))+
theme_ridges(font_size = 5)+
theme_minimal()+
theme(legend.position = 'none',
strip.text.x = element_text(size = 6,angle = 30),
axis.text.x = element_blank(),
axis.text.y = element_text(size = 6))+
facet_wrap(facets = ~annot,nrow=1)
pdf(file = "figures/figure_03/panel_C/annot_enrichment_evolution_top9.pdf",
width = 9.4,height = 5.4)
print(p)
dev.off()
|
4c33ca7a83788642706213c5ccb2154df1f88bc2
|
d8173649c1613b14bde4cf72317d5d56f8ba7b88
|
/man/BUSexample_data.Rd
|
79366192fdc451bdd32de3783c1a080589d30def
|
[] |
no_license
|
XiangyuLuo/BUScorrect
|
477be4a08de6be8484bc4ec79ccdf77a488a0be3
|
1a3fc2b0d65c4a9d1421347a4f8ab5486e12ecda
|
refs/heads/master
| 2020-03-21T15:48:34.198860
| 2019-06-14T09:40:05
| 2019-06-14T09:40:05
| 138,570,730
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,597
|
rd
|
BUSexample_data.Rd
|
\name{BUSexample_data}
\alias{BUSexample_data}
\title{
A simulated data set
}
\description{A simulated data set for demonstrating how to use the BUScorrect package}
\examples{
\dontrun{
#This data set is simulated according to the following R code
rm(list = ls(all = TRUE))
set.seed(123456)
B <- 3
#total number of batches
K <- 3
#total number of subtypes
G <- 2000
#total number of genes
pi <- matrix(NA, B, K)
# pi[b,k] stands for the proportion of kth subtype in bth batch
pi[1, ] <- c(0.2, 0.3, 0.5)
pi[2, ] <- c(0.4, 0.2, 0.4)
pi[3, ] <- c(0.3, 0.4, 0.3)
#total number of samples in each bacth.
n_vec <- rep(NA, B)
#n_vec[b] represents the total number of samples in batch b.
n_vec <- c(70, 80, 70)
#Data list
example_Data <- list()
#baseline expression level
alpha <- rep(2, G)
#subtype effect
mu <- matrix(NA, G, K)
#subtype effect, mu[g,k] stands for the kth-subtype effect of gene g
mu[ ,1] <- 0
#the first subtype is taken as the baseline subtype
#the subtype effect of subtype 1 is set to zero
mu[ ,2] <- c(rep(2,G/20), rep(0,G/20),rep(0, G-G/20-G/20))
mu[ ,3] <- c(rep(0,G/20), rep(2,G/20),rep(0, G-G/20-G/20))
#batch effect
gamma <- matrix(NA, B, G)
#'location' batch effect of gene g in batch b
gamma[1, ] <- 0
#the first batch is taken as the reference batch without batch effects
#the batch effect of batch 1 is set to zero
gamma[2, ] <- c(rep(3,G/5),rep(2,G/5),rep(1,G/5),
rep(2,G/5),rep(3,G/5))
gamma[3, ] <- c(rep(1,G/5),rep(2,G/5),rep(3,G/5),
rep(2,G/5),rep(1,G/5))
sigma_square <- matrix(NA, B,G)
#sigma_square[b,g] denotes the error variance of gene g in batch b.
sigma_square[1,] <- rep(0.1, G)
sigma_square[2,] <- rep(0.2, G)
sigma_square[3,] <- rep(0.15, G)
Z <- list()
#subtype indicator. Z[b,j] represents the subtype of sample j in batch b
Z[[1]] <- as.integer(c(rep(1,floor(pi[1,1]*n_vec[1])),rep(2,floor(pi[1,2]*n_vec[1])),
rep(3,floor(pi[1,3]*n_vec[1]))))
Z[[2]] <- as.integer(c(rep(1,floor(pi[2,1]*n_vec[2])),rep(2,floor(pi[2,2]*n_vec[2])),
rep(3,floor(pi[2,3]*n_vec[2]))))
Z[[3]] <- as.integer(c(rep(1,floor(pi[3,1]*n_vec[3])),rep(2,floor(pi[3,2]*n_vec[3])),
rep(3,floor(pi[3,3]*n_vec[3]))))
for(b in 1:B){ #generate data
num <- n_vec[b]
example_Data[[b]] <- sapply(1:num, function(j){
tmp <- alpha + mu[ ,Z[[b]][j]] + gamma[b, ] +
rnorm(G, sd = sqrt(sigma_square[b, ]))
tmp
})
}
BUSexample_data <- example_Data
}
}
|
588790de7c14f1e47d0c0a47e3813bad3ee7e4c3
|
c6e2b67fd3c237175098c1be621541dba2170ace
|
/tests/testthat/test-keyed-df.R
|
0ffd78276592112ca0e9304a67c93435d96b7b61
|
[] |
no_license
|
echasnovski/keyholder
|
666ff734edcb953dcb3161e5aab6294a46c5a4a1
|
f950226fe4692e15e9960a7d55a179b9e8ad9e4a
|
refs/heads/master
| 2023-03-16T14:20:15.738601
| 2023-03-12T09:59:02
| 2023-03-12T09:59:02
| 96,685,600
| 7
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,050
|
r
|
test-keyed-df.R
|
context("keyed-df")
# Input data --------------------------------------------------------------
df <- mtcars
df_keyed <- df %>% key_by(vs, am)
keys_df <- keys(df_keyed)
# is_keyed_df -------------------------------------------------------------
test_that("is_keyed_df works", {
expect_true(is_keyed_df(df_keyed))
expect_false(is_keyed_df(df))
class(df) <- c("keyed_df", "data.frame")
expect_false(is_keyed_df(df))
attr(df, "keys") <- seq_len(nrow(df) - 1)
expect_false(is_keyed_df(df))
attr(df, "keys") <- matrix(seq_len(nrow(df) - 1), ncol = 1)
expect_false(is_keyed_df(df))
attr(df, "keys") <- data.frame(x = seq_len(nrow(df) - 1))
expect_false(is_keyed_df(df))
df_bad_keyed <- add_class(df_keyed[[1]], "keyed_df")
attr(df_bad_keyed, "keys") <- keys_df
expect_false(is_keyed_df(df_bad_keyed))
df_mat <- as.matrix(df)
class(df_mat) <- c("keyed_df", "matrix")
attr(df_mat, "keys") <- matrix(1:32, ncol = 1)
expect_false(is_keyed_df(df_mat))
})
# is.keyed_df -------------------------------------------------------------
test_that("is.keyed_df works", {
expect_identical(is.keyed_df, is_keyed_df)
})
# print -------------------------------------------------------------------
test_that("print.keyed_df works", {
expect_output(print(df_keyed), "keyed object.*vs.*am")
expect_output(
df_keyed %>% remove_keys(everything(), .unkey = FALSE) %>% print(),
"keyed object.*no.*key"
)
})
# [ -----------------------------------------------------------------------
test_that("`[.keyed_df` works", {
i_idx <- 1:10
j_idx <- 1:3
output_1 <- df_keyed[i_idx, j_idx]
output_ref_1 <- df[i_idx, j_idx] %>%
assign_keys(keys_df[i_idx, ])
expect_identical(output_1, output_ref_1)
output_2 <- df_keyed[, j_idx]
output_ref_2 <- df[, j_idx] %>%
assign_keys(keys_df)
expect_identical(output_2, output_ref_2)
output_3 <- df_keyed[i_idx, logical(0)]
output_ref_3 <- df[i_idx, logical(0)] %>%
assign_keys(keys_df[i_idx, ])
expect_identical(output_3, output_ref_3)
})
|
5843768b6d2afeb9be7f9627e71dbb9b79f5bdc0
|
21642d86c73f6307380dead06681040f73b89971
|
/scripts/historical_lakeMAGs_reassembly_metadata_organization.R
|
7ebd7c897eb03f16c58dc83a0d1585bd25fde151
|
[] |
no_license
|
elizabethmcd/Lake-MAGs
|
a89f85ff078344b061239886ba393e5fc2d3d0e6
|
ec745695148c55d497e043a91c38be745faec540
|
refs/heads/master
| 2022-02-03T02:23:17.929130
| 2022-01-24T04:03:26
| 2022-01-24T04:03:26
| 200,298,745
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,189
|
r
|
historical_lakeMAGs_reassembly_metadata_organization.R
|
# Organizing re-assembly of Lake MAGs
library(tidyverse)
# Trout Bog epi
tb_epi_checkm <- read.delim("results/troutBog_epi_historical/troutBog-epi-checkm-stats.tsv", sep = "\t", header = FALSE)
tb_epi_gtdb <- read.delim("results/troutBog_epi_historical/troutBog-epi-gtdbtk.tsv", sep="\t")
tb_epi_bins <- read.table("results/troutBog_epi_historical/troutBog-epi-finalBins.txt")
colnames(tb_epi_checkm) <- c("user_genome", "lineage", "completeness", "contamination", "size_mbp", "contigs", "percent_gc", "un")
colnames(tb_epi_bins) <- c("user_genome")
tb_epi_merged <- left_join(tb_epi_bins, tb_epi_gtdb)
tb_epi_table <- left_join(tb_epi_merged, tb_epi_checkm) %>% select(-un, -lineage)
# Trout Bog hypo
tb_hypo_checkm <- read.delim("results/troutBog_hypo_historical/troutBog-hypo-checkM.tsv", sep="\t", header = FALSE)
tb_hypo_gtdb <- read.delim("results/troutBog_hypo_historical/troutBog-hypo-gtdb.tsv", sep="\t")
tb_hypo_bins <- read.table("results/troutBog_hypo_historical/troutBog-hypo-finalBins.txt")
colnames(tb_hypo_checkm) <- c("user_genome", "lineage", "completeness", "contamination", "size_mbp", "contigs", "percent_gc", "un")
colnames(tb_hypo_bins) <- c("user_genome")
tb_hypo_merged <- left_join(tb_hypo_bins, tb_hypo_gtdb)
tb_hypo_table <- left_join(tb_hypo_merged, tb_hypo_checkm) %>% select(-un, -lineage)
# Crystal Bog
crystal_checkm <- read.delim("results/crystalBog_historical/new_spades_assemb/crystalBog-new-checkm-stats.tsv", sep="\t", header = FALSE)
crystal_gtdb <- read.delim("results/crystalBog_historical/new_spades_assemb/crystalBog-new-gtdb.tsv", sep="\t")
crystal_bins <- read.table("results/crystalBog_historical/new_spades_assemb/crystalBog-new-finalBins.txt")
colnames(crystal_checkm) <- c("user_genome", "lineage", "completeness", "contamination", "size_mbp", "contigs", "percent_gc", "un")
colnames(crystal_bins) <- c("user_genome")
crystal_merged <- left_join(crystal_bins, crystal_gtdb)
crystal_table <- left_join(crystal_merged, crystal_checkm) %>% select(-un, -lineage)
# Mary
mary_checkm <- read.delim("results/mary_historical/new_spades_assemb/mary-checkm-stats.tsv", sep="\t", header=FALSE)
mary_gtdb <- read.delim("results/mary_historical/new_spades_assemb/mary-gtdb-classifications.tsv", sep="\t")
mary_bins <- read.table("results/mary_historical/new_spades_assemb/mary-new-finalBins.txt")
colnames(mary_checkm) <- c("user_genome", "lineage", "completeness", "contamination", "size_mbp", "contigs", "percent_gc", "un")
colnames(mary_bins) <- c("user_genome")
mary_merged <- left_join(mary_bins, mary_gtdb)
mary_table <- left_join(mary_merged, mary_checkm) %>% select(-un, -lineage)
write.csv(tb_epi_table, "results/troutBog_epi_historical/troutBog_epi_historical_metadata_final.csv", quote=FALSE, row.names = FALSE)
write.csv(tb_hypo_table, "results/troutBog_hypo_historical/troutBog_hypo_historical_metadata_final.csv", quote=FALSE, row.names=FALSE)
write.csv(crystal_table, "results/crystalBog_historical/new_spades_assemb/crystalBog_historical_metadata_final.csv", quote=FALSE, row.names = FALSE)
write.csv(mary_table, "results/mary_historical/new_spades_assemb/mary_historical_metadata_final.csv", quote=FALSE, row.names = FALSE)
|
0d9eb9bce84ce83200bdde5a6e6886260eba44cc
|
0602fe83b9dbbb78abf83bb64eca577684a89b81
|
/ProjektR/extractRandC1Patches.r
|
cb7b7e0a6ee45b1ed270bac4a80cab929b9bfa69
|
[] |
no_license
|
Barteboj/WKIRO_ZWIERZETA
|
2ed62c1ff98a999cd98e98db4e12ba8838e65666
|
a6545b52b31b551e81bfc10b98ca0541d9bca7b1
|
refs/heads/master
| 2021-01-20T15:45:01.705984
| 2017-06-16T18:03:21
| 2017-06-16T18:03:21
| 90,793,259
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,345
|
r
|
extractRandC1Patches.r
|
extractRandC1Patches = function(cItrainingOnly, numPatchSizes, numPatchesPerSize, patchSizes) {
#extracts random prototypes as part of the training of the C2 classification
#system.
#Note: we extract only from BAND 2. Extracting from all bands might help
#cPatches the returned prototypes
#cItrainingOnly the training images
#numPatchesPerSize is the number of sizes in which the prototypes come
#numPatchesPerSize is the number of prototypes extracted for each size
#patchSizes is the vector of the patche sizes
library('matlab')
source('C1.r')
nImages = length(cItrainingOnly)
#----Settings for Training the random patches--------#
rot = c(90, -45, 0, 45)
c1ScaleSS = c(1, 3)
RF_siz = c(11, 13)
c1SpaceSS = c(10)
minFS = 11
maxFS = 13
div = seq(4, 3.2, -0.05)
Div = div[3:4]
#--- END Settings for Training the random patches--------#
print('Initializing gabor filters -- partial set...')
initGaborResult = init_gabor(rot, RF_siz, Div)
fSiz = initGaborResult[[1]]
filters = initGaborResult[[2]]
c1OL = initGaborResult[[3]]
numSimpleFilters = initGaborResult[[4]]
print('done')
cPatches = list()
bsize = c(0, 0)
pind = zeros(numPatchSizes,1)
for (j in 1:numPatchSizes) {
cPatches[j] = list(zeros(patchSizes[j]^2*4,numPatchesPerSize))
}
for (i in 1:numPatchesPerSize) {
ii = floor(runif(1)*nImages) + 1
print(paste(as.character(i), ' from ', as.character(numPatchesPerSize), ' done'))
stim = cItrainingOnly[[ii]];
img_siz = size(stim);
c1Result = C1(stim, filters, fSiz, c1SpaceSS, c1ScaleSS, c1OL)
c1source = c1Result[[1]]
s1source = c1Result[[2]]
dimensions = dim(c1source[[1]][[1]])
len = length(c1source[[1]])
arrayDimensions = c(dimensions, len)
b = array(dim = arrayDimensions) #new C1 interface
for(i in 1:len) {
b[,,i] = c1source[[1]][[i]]
}
bsize[1] = dim(b)[1]
bsize[2] = dim(b)[2]
for (j in 1:numPatchSizes) {
xy = floor(runif(2)*(bsize - patchSizes[j])) + 1
seq
tmp = b[seq(from=xy[1], length.out = patchSizes[j]), seq(from=xy[2], length.out = patchSizes[j]),]
pind[j] = pind[j] + 1
cPatches[[j]][,pind[j]] = reshape(tmp, size(tmp))
}
}
return(cPatches)
}
|
29dbbaf97eb66f4498e77e25991022357fcb7bbf
|
a3adc89eb1fcceb2d209dae732cde0c8a50183a5
|
/run_analysis.R
|
0d02bbe4a1e2ee2ba7472d8e5ec6cc3c310f475e
|
[] |
no_license
|
adshank/GCDproject
|
c7fb72924732fa4340907c0012a1509cbd9ae25a
|
2b786f93cac7e76412f51b7dcfe0b688159f5920
|
refs/heads/master
| 2021-01-12T12:07:37.330535
| 2016-10-29T23:15:02
| 2016-10-29T23:15:02
| 72,310,412
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,140
|
r
|
run_analysis.R
|
# run_analysis.R script, created by Adam Shankland, 10/29/2016
# Completes steps 1-4 in Getting and Cleaning Data course project, Coursera
# See README.md for more information about this script
# Set working directory (wd_path must be changed to run script on a different machine)
wd_path <- "C:/Users/adshan/Documents/Coursera/Getting & Cleaning Data/UCI HAR Dataset"
setwd(wd_path)
# -- STEP 1: Merge the training and test sets to create one data set -- #
# Read in activity_labels.txt file as a table, and rename column headers appropriately
# activity_labels links the class labels with their activity name
activity_labels <- read.table("activity_labels.txt")
names(activity_labels) <- c("ID", "Activity")
# Read in the features.txt file as a table, and rename column headers appropriately
# features is a list of all features
features <- read.table("features.txt")
names(features) <- c("Feature_ID", "Feature_Name")
# Read in X_train.txt file, and rename column headers from list of features
# x_train is the training set
x_train <- read.table("train/X_train.txt")
names(x_train) <- features$Feature_Name
# Read in y_train.txt file, and rename column header appropriately
# y_train is the set of training labels
y_train <- read.table("train/y_train.txt")
names(y_train) <- "ID"
# Read in the subject_train.txt file, and rename column header appropriately
# Each row in subject_train identifies the subject who performed the activity for each
# window sample. Its range is from 1 to 30
subject_train <- read.table("train/subject_train.txt")
names(subject_train) <- "Subject_ID"
training_labels <- merge(y_train, activity_labels, by.x = "ID", by.y = "ID")
training_data <- cbind(training_labels, subject_train, x_train)
# ---------- Prepare test dataset ---------- #
# Read in X_test.txt file, and rename column headers from list of features
# x_test is the test set
x_test <- read.table("test/X_test.txt")
names(x_test) <- features$Feature_Name
# Read in the y_test.txt file, and rename column header appropriately
# y_test is the set of test labels
y_test <- read.table("test/y_test.txt")
names(y_test) <- "ID"
# Read in the subject_test.txt file, and rename column header appropriately
subject_test <- read.table("test/subject_test.txt")
names(subject_test) <- "Subject_ID"
test_labels <- merge(y_test, activity_labels, by.x = "ID", by.y = "ID")
test_data <- cbind(test_labels, subject_test, x_test)
# Create complete dataset, consisting of training data and test data
complete_data <- rbind(training_data, test_data)
# Check for missing values in complete dataset
sum(is.na(complete_data))
# -- STEP 2: Extract only the measurements on the mean and standard deviation for each measurement -- #
vector_of_locations <- (grep(".*mean.*|.*std.*", ignore.case = TRUE, names(complete_data)))
df_with_stats <- complete_data[vector_of_locations]
df_with_stats <- cbind(complete_data[,1], complete_data[,2], complete_data[,3], df_with_stats)
# -- STEP 3: Uses descriptive activity names -- #
# Step 3 was started during step 1 + finished in step 4
# -- STEP 4: Appropriately label data set -- #
# Also, clean up current column headers
# Rename first three columns, which lost their header in the previous cbind
colnames(df_with_stats)[1] <- "ID"
colnames(df_with_stats)[2] <- "Activity"
colnames(df_with_stats)[3] <- "Subject_ID"
# Tidy up some of the column headers with the gsub command
colnames(df_with_stats) <- gsub("mean", "Mean", colnames(df_with_stats))
colnames(df_with_stats) <- gsub("std", "Std.Dev", colnames(df_with_stats))
colnames(df_with_stats) <- gsub("[()]", "", colnames(df_with_stats))
colnames(df_with_stats) <- gsub("[-]", "", colnames(df_with_stats))
# -- STEP 5: Create a second, independent tidy data set with the avg. of each variable for each activity and each subject
final_dataset <- df_with_stats
final_dataset <- aggregate(final_dataset[, 4:89], list(final_dataset$Activity, final_dataset$Subject_ID), mean)
names(final_dataset)[1] <- "Activity"
names(final_dataset)[2] <- "Subject_ID"
# Write out tidy dataset
write.table(final_dataset, file = "./tidy_dataset.txt", row.names = FALSE)
|
d153505120044aef410b17355da9ef0a1cdba389
|
9a23d27ef39a865c3ebc521624cb7ab7c2e107d3
|
/R/rstudio_addins.R
|
88b8c18a8448e9f26313a611b4a7bb83b929c30b
|
[] |
no_license
|
conradbm/knitrdata
|
8c86cb83ff049b66fb07db8ab36e0d94e94a8f17
|
dccfe4922d4f4c386544cca6e15923717cd20c9e
|
refs/heads/master
| 2022-11-13T09:36:16.258977
| 2020-07-18T20:46:56
| 2020-07-18T20:46:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,003
|
r
|
rstudio_addins.R
|
# Functions meant to be used as Rstudio addins ---------------------------------------
# Some minor helper functions
isemp = function(x) is.null(x) || (x=="")
firstword = function(x,split=" ") sapply(strsplit(x,split=split),function(y) y[1])
# Function for creating a data chunk --------------------
#' Invoke Shiny gadget to create a data chunk
#'
#' As different elements of the data chunk are specified, other options will be modified
#' as is likely to be useful. For example, if a binary file is uploaded, then the \code{format}
#' will be set to \code{binary}, the \code{encoding} will be set to \code{base64} and the
#' \code{Encode data?} option will be checked. If these options are not appropriate, then they can
#' be altered afterwards.
#'
#' When the \code{Create chunk} button is clicked, the function will return the chunk contents
#' including header and tail.
#'
#' @param title Text to place in title bar of gadget
#' @param infobar HTML content to place in information bar at top of gadget
#'
#' @return Invisibly returns the text of the data chunk as a character vector, one line per element.
#'
#' @examples
#' \dontrun{
#' create_data_chunk_dialog()
#' }
#'
#' @export
#'
#' @family Chunk tools
#' @author David M. Kaplan \email{dmkaplan2000@@gmail.com}
#' @encoding UTF-8
create_data_chunk_dialog = function (
title="Data chunk creator",
infobar="<big><b>Fill out, then click above to create chunk</b></big>") {
pkgs = c("shiny","miniUI")
if (!all(sapply(pkgs,requireNamespace,quietly=TRUE)))
stop("This function requires that the following packages be installed: ",
paste(pkgs,collapse=", "))
ui <- miniUI::miniPage(
miniUI::gadgetTitleBar(title,left = miniUI::miniTitleBarCancelButton(),
right = miniUI::miniTitleBarButton("done", "Create chunk", primary = TRUE)),
miniUI::miniContentPanel(
shiny::uiOutput('infobar'),
shiny::hr(),
shiny::fileInput("filename","Data file: "),
shiny::textInput("chunk_label","Chunk label: ",placeholder="mydatachunk",width="100%"),
shiny::fillRow(
height="40pt",
shiny::radioButtons("format","Format: ",choices=c("text","binary"),selected="binary",inline=TRUE),
shiny::radioButtons("encoding","Encoding: ",choices=c("asis","base64","gpg"),select="base64",inline=TRUE)
),
shiny::uiOutput("gpg_receivers"),
shiny::fillRow(
height="40pt",
shiny::checkboxInput("md5sum","Do MD5 sum check? ",value=TRUE),
shiny::checkboxInput("encode","Encode the data? ",value=TRUE)
),
shiny::textInput("output.var","Output variable name (no quotes): ",placeholder="df",width="100%"),
shiny::textInput("output.file","Output file name (no quotes): ",placeholder="filepath/filename.ext",
width="100%"),
shiny::textInput("loader.function","Loader function: ",placeholder="read.csv",width="100%"),
shiny::textInput("eval","Evaluate chunk? ",value="TRUE",width="100%"),
shiny::checkboxInput("echo","Echo chunk contents? ",value=FALSE),
shiny::textInput("chunk_options_string","Additional chunk options (as they would appear in chunk header): ",
placeholder="loader.ops=list(header=FALSE), max.echo=5",width="100%")
)
)
server <- function(input, output, session) {
rv = shiny::reactiveValues(makechunk=FALSE)
output$infobar = shiny::renderUI(shiny::HTML(infobar))
# Add receivers list if GPG chosen
shiny::observeEvent(input$encoding,{
switch(
input$encoding,
"gpg" = {
# If gpg not installed warned and return to encoding=base64
if (!requireNamespace("gpg")) {
shiny::showModal(shiny::modalDialog("gpg package required for gpg encoding."))
shiny::updateRadioButtons(session,"encoding",selected="base64")
return()
}
keys = gpg::gpg_list_keys()
output$gpg_receivers = shiny::renderUI(
shiny::selectInput("keys","GPG receivers: ",paste(keys$id,keys$name),multiple=TRUE)
)
},
{
output$gpg_receivers = shiny::renderUI("")
}
)
})
# When data file set, determine defaults
shiny::observeEvent(input$filename,{
fn = input$filename$datapath
isbin = is.file.binary(fn)
fnext = tolower(tools::file_ext(fn))
shiny::updateRadioButtons(session,"format",selected=ifelse(isbin,"binary","text"))
shiny::updateRadioButtons(session,"encoding",selected=ifelse(isbin,"base64","asis"))
#shiny::updateCheckboxInput(session,"encode",value=isbin)
shiny::updateCheckboxInput(session,"md5sum",value=isbin) # To avoid issues with files not ending in newline
shiny::updateTextInput(session,"loader.function",
placeholder=ifelse(isbin,"readRDS","read.csv"))
# Try to guess loader.function
lfs = c(csv="read.csv",rds="readRDS")
if (fnext %in% names(lfs))
shiny::updateTextInput(session,"loader.function",
value=as.character(lfs[fnext]))
})
shiny::observeEvent(input$output.file,{
if (!isemp(input$output.file)) {
shiny::updateTextInput(
session,"eval",
value=paste0("!file.exists(\"",as.character(input$output.file),"\")")
)
}
})
# When the "create chunk" button is clicked, return a value
shiny::observeEvent(input$done, {
makechunk = TRUE
fn = input$filename$datapath
if (is.null(fn)) {
makechunk = FALSE
shiny::showModal(shiny::modalDialog("Data file must be specified."))
}
if (isemp(input$output.var) && isemp(input$output.file)) {
makechunk = FALSE
shiny::showModal(shiny::modalDialog("At least one of output variable or output file must be given."))
}
if (isemp(input$output.var) && !isemp(input$loader.function)) {
makechunk = FALSE
shiny::showModal(shiny::modalDialog("Loader function only has value if output variable name specified."))
}
if (input$encoding=="gpg" && isemp(input$keys)) {
makechunk = FALSE
shiny::showModal(shiny::modalDialog("One or more GPG receivers must be specified for GPG encoding."))
}
rv$makechunk = makechunk
})
shiny::observeEvent(rv$makechunk, {
if (rv$makechunk) {
fn = input$filename$datapath
md5sum = NULL
if (input$md5sum)
md5sum = tools::md5sum(fn)
names(md5sum) = NULL
# Read and encode data
if (input$encode && input$encoding != 'asis') {
ops = list()
if (input$encoding == "gpg")
ops = list(receiver=firstword(input$keys))
chunk = data_encode(fn,encoding = input$encoding,options=ops)
} else {
chunk = readLines(fn)
}
chunk = do.call(c,strsplit(chunk,"\n")) # Break into lines
chunk_label = NULL
if (!isemp(input$chunk_label))
chunk_label = input$chunk_label
args = list(text=chunk,
chunk_label=chunk_label,
format=input$format,encoding=input$encoding)
if (!isemp(input$output.var))
args$output.var=input$output.var
if (!isemp(input$output.file))
args$output.file=input$output.file
args$echo=input$echo
if (!isemp(md5sum))
args$md5sum = md5sum
# Things going in extra args
ev = NULL
if (!isemp(input$eval))
ev=paste0("eval=",input$eval)
lf = NULL
if (!isemp(input$loader.function))
lf=paste0("loader.function=",input$loader.function)
co = NULL
if (!isemp(input$chunk_options_string))
co = input$chunk_options_string
co=paste(c(ev,lf,co),collapse=",")
if (!isemp(co))
args$chunk_options_string = co
chunk = do.call(create_chunk,args)
shiny::stopApp(chunk)
}
})
# When the cancel button is clicked, return null
shiny::observeEvent(input$cancel, {
shiny::stopApp(NULL)
})
}
chunk = shiny::runGadget(
app=ui, server=server,
viewer = shiny::dialogViewer(title))
return(invisible(chunk))
}
# Insert data chunk ------------------------------
#' Invoke Rstudio addin to insert a data chunk in active source document
#'
#' As different elements of the data chunk are specified, other options will be modified
#' as is likely to be useful. For example, if a binary file is uploaded, then the \code{format}
#' will be set to \code{binary}, the \code{encoding} will be set to \code{base64} and the
#' \code{Encode data?} option will be checked. If these options are not appropriate, then they can
#' be altered afterwards.
#'
#' When the \code{Create chunk} button is clicked, the contents of the data chunk will be inserted
#' at the current cursor location of the active source document in the Rstudio editor.
#'
#' @param title Text to place in title bar of gadget.
#' @param chunk Text content of the data chunk. If not given (as is typically the case), the
#' \code{\link{create_data_chunk_dialog}} will be used to generate chunk contents.
#'
#' @return Returns \code{TRUE} if a chunk was inserted, \code{FALSE} otherwise.
#'
#' @examples
#' \dontrun{
#' insert_data_chunk_dialog()
#' }
#'
#' @export
#'
#' @family Chunk tools
#' @author David M. Kaplan \email{dmkaplan2000@@gmail.com}
#' @encoding UTF-8
insert_data_chunk_dialog = function (title="Data chunk inserter",
chunk = NULL) {
if (!requireNamespace("rstudioapi",quietly=TRUE))
stop("The rstudioapi package must be installed to use this function.")
# Active document stuff
context = rstudioapi::getSourceEditorContext()
ln = context$selection[[1]]$range$start["row"]
dp = rstudioapi::document_position(ln,1) # position object
# Infobar contents
infobar = paste0(
"<big><b>",
"Active document: ",ifelse(isemp(context$path),"<i>UNKNOWN</i>",context$path),
"<br/>",
"Line number: ",ln,
"</b></big>")
# Run dialog if chunk not given as argument
if (is.null(chunk))
chunk = create_data_chunk_dialog(title=title,infobar=infobar)
if (is.null(chunk))
return(invisible(FALSE))
# Insert text
rstudioapi::insertText(dp,paste0(paste(chunk,collapse="\n"),"\n"),context$id)
# Set position - sometimes causes errors for some unknown reason
rstudioapi::setCursorPosition(dp,context$id)
return(invisible(TRUE))
}
# Empty data chunk template ----------------------------
#' Insert an empty data chunk template in active source document
#'
#' This function is essentially the equivalent for data chunks
#' of the "Insert a new code chunk" menu item
#' available in Rstudio when a Rmarkdown document is open. It places at the current cursor
#' location an empty \code{data} chunk that can then be modified and filled in by hand.
#'
#' @return Returns \code{TRUE} if a chunk was inserted, \code{FALSE} otherwise.
#'
#' @examples
#' \dontrun{
#' insert_data_chunk_template()
#' }
#'
#' @export
#'
#' @family Chunk tools
#' @author David M. Kaplan \email{dmkaplan2000@@gmail.com}
#' @encoding UTF-8
insert_data_chunk_template = function() {
chunk = create_chunk(
paste(
sep="\n",
"# Instructions:",
"# 1) Fill in at least one of these chunk options: output.var & output.file",
"# 2) Add or modify other chunk options",
"# 3) Delete these instructions and replace with data"
),
format="text",encoding="asis",output.var=,output.file=,loader.function=NULL)
return(insert_data_chunk_dialog(chunk=chunk))
}
# Remove chunks ------------------------------
#' Invoke Rstudio addin to remove chunks from the active source document
#'
#' The dialog will present a data table list of chunks in the source document. Select the rows
#' that correspond to the chunks that you wish to remove and hit the \code{Remove chunks} button
#' to remove them.
#'
#' When the dialog is started, if the cursor is positioned inside a chunk in the source document,
#' then the row corresponding to this chunk will be selected by default.
#'
#' @param title Text to place in title bar of gadget.
#'
#' @return Returns \code{TRUE} if one or more chunks were removed, \code{FALSE} otherwise.
#'
#' @examples
#' \dontrun{
#' remove_chunks_dialog()
#' }
#'
#' @export
#'
#' @family Chunk tools
#' @author David M. Kaplan \email{dmkaplan2000@@gmail.com}
#' @encoding UTF-8
remove_chunks_dialog = function (title="Eliminate (data) chunks") {
if (!requireNamespace("shiny",quietly=TRUE) || !requireNamespace("miniUI",quietly=TRUE) ||
!requireNamespace("DT",quietly=TRUE) || !requireNamespace("rstudioapi",quietly=TRUE))
stop("This function requires that the shiny, miniUI, DT and rstudioapi packages be installed. Please install them before running.")
ui <- miniUI::miniPage(
miniUI::gadgetTitleBar(title,left = miniUI::miniTitleBarCancelButton(),
right = miniUI::miniTitleBarButton("done", "Remove chunks", primary = TRUE)),
miniUI::miniContentPanel(
shiny::h3(shiny::textOutput('actdoc')),
shiny::hr(),
DT::dataTableOutput('chunklist')
)
)
server <- function(input, output, session) {
context = rstudioapi::getSourceEditorContext()
output$actdoc = shiny::renderText(paste0("Active document: ",context$path))
chunks = list_rmd_chunks(context$contents)
rv = shiny::reactiveValues(chunks = chunks)
# See if highlighted area touches any chunks
se = c(context$selection[[1]]$range$start["row"],
context$selection[[1]]$range$end["row"])
row = which(chunks$start <= se[2] & se[1] <= chunks$end)
if (length(row) > 0) {
proxy = DT::dataTableProxy("chunklist",session)
DT::selectRows(proxy,selected=row)
}
# render data table when list of chunks changed
shiny::observeEvent(rv$chunks,{
output$chunklist = DT::renderDataTable(rv$chunks,server=FALSE)
})
shiny::observeEvent(input$done, {
rows = input$chunklist_rows_selected
if(is.null(rows) || length(rows)==0)
shiny::stopApp(FALSE)
contents = context$contents
chunks = rv$chunks[rows,]
# Get full list of line numbers to remove
se = as.data.frame(t(chunks[,c("start","end"),drop=FALSE]))
lns = do.call(c,lapply(se,function(x) x[1]:x[2]))
# Remove rows
contents = contents[-1*lns]
# Put into document
rstudioapi::setDocumentContents(paste(contents,collapse="\n"),context$id)
rstudioapi::setCursorPosition(
rstudioapi::document_position(min(lns)-1,1),
context$id)
shiny::stopApp(TRUE)
})
# When the cancel button is clicked, return null
shiny::observeEvent(input$cancel, {
shiny::stopApp(FALSE)
})
}
res = shiny::runGadget(
app=ui, server=server,
viewer = shiny::dialogViewer(title))
return(invisible(res))
}
|
0db9a7e5e4bf76e594a805481db48a2ff4ec0571
|
8d8470305ca859cce51f3fe462c2b3eec7452a06
|
/RCode/textaug.R
|
a9ae7d5a1d690c5111268f4f28e6bdddb62ade7f
|
[] |
no_license
|
kmcalist682336/SimultaneousScaling
|
256555c4a6218dbf4bb8f8527f06b105291d6a00
|
30931c387f0579ceedddeba3aad00312680881a3
|
refs/heads/master
| 2020-03-21T08:15:31.568215
| 2018-06-22T21:11:35
| 2018-06-22T21:11:35
| 138,330,954
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 999
|
r
|
textaug.R
|
augment.text.data <- function(aa,bb,mstar,p,d){
pred.mat <- bb%*%t(aa)
aug.mat <- function(j){
pm <- pred.mat[j,]
ms <- mstar[j,]
pmms <- data.table(cbind(seq(1,p),pm,ms))
setnames(pmms,c("id","pm","ms"))
setorder(pmms,"ms")
new.augs <- c()
um <- sort(unique(pmms$ms))
min.vals <- -Inf
for(i in 1:length(um)){
c.pmms <- pmms[ms == um[i]]
nas <- rtruncnorm(dim(c.pmms)[1], a = min.vals, b = Inf, mean = c.pmms$pm, sd = 1)
new.augs <- c(new.augs,nas)
min.vals <- max(new.augs)
}
pmms <- pmms[,new.augs := new.augs]
setorder(pmms,"id")
new.augs <- pmms$new.augs
new.augs <- (new.augs - mean(new.augs))/sd(new.augs)
return(new.augs)
}
aug.mat <- Vectorize(aug.mat,"j")
new.mm <- sapply(1:p)
new.mm <- t(new.mm)
#cl <- makeCluster(4)
#registerDoParallel(cl)
#new.mm <- foreach(j = 1:p, .combine = "rbind",.packages = c("data.table","truncnorm")) %dopar% aug.mat(j)
#stopCluster(cl)
return(new.mm)
}
|
4aa4b6779abe51c27bf9ef487de875595b0d5f9e
|
203486d84e6759bdd402c17b4c40a8a36edce293
|
/WorkInProgress/Rolling_Attrition.R
|
c014da0d56aafa12c56996c3fcb93052b33ad4c3
|
[] |
no_license
|
ndeprey/MPX
|
596384e0f019c64df31f3fa7c6b2707e1b849802
|
10c633500f0f7721aa5baa4cd2cb2e077b71f803
|
refs/heads/master
| 2021-01-19T15:35:04.201252
| 2017-01-12T19:57:33
| 2017-01-12T19:57:33
| 21,352,437
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 645
|
r
|
Rolling_Attrition.R
|
users <- read.csv("Last_Listen_Date_10_03.csv")
users$LastDayActive <- as.POSIXct(users$LastDayActive)
users$FirstDayActive <- as.POSIXct(users$FirstDayActive)
users$FirstDayActive <- as.Date(users$FirstDayActive)
users$LastDayActive <- as.Date(users$LastDayActive)
users$ActiveDateRange <- as.numeric(users$LastDayActive - users$FirstDayActive + 1)
users$pct_days_active <- users$activeDays / users$ActiveDateRange
users$start_week <- cut(users$FirstDayActive, "weeks", start.on.monday = FALSE)
users$end_week <- cut(users$LastDayActive, "weeks", start.on.monday = FALSE)
library(reshape2)
rolling_attr <- dcast(users, start_week ~ end_week)
|
1b432a7001e86ea07c6147aea119dff54849d0a9
|
fc7279546bff47fbc30953a9976c368ddc9daffe
|
/tase_index_selenium.r
|
0210905e7cc09bac0f7adcf25187df862c290585
|
[] |
no_license
|
githubfun/taseR
|
28a043df04202076e3128bbce2fd583b0f0d1522
|
d3ee1cd43434d18c5d0806a8b2d3271b9007e6fa
|
refs/heads/master
| 2017-12-01T11:27:15.859936
| 2015-07-07T03:33:03
| 2015-07-07T03:33:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,551
|
r
|
tase_index_selenium.r
|
library(lubridate)
library(rvest)
library(XML)
library(plyr)
library(dplyr)
library(RSelenium)
library(stringr)
setwd("C:\\Users\\yoni\\Documents\\GitHub\\tase")
RSelenium::startServer()
remDr <- remoteDriver()
remDr$open(silent = F)
df.in=data.frame(Name=c("TA25","TA100"),indexID=c(142,137))
df.in$from.date=rep(format(Sys.Date()-days(2),"%d/%m/%Y"),2)
df.in$to.date=rep(format(Sys.Date(),"%d/%m/%Y"),2)
tase.index.daily=ddply(df.in,.(Name),.fun = function(df){
#set url
url=paste0("http://www.tase.co.il/Eng/MarketData/Indices/MarketCap/Pages/IndexHistoryData.aspx?Action=1&addTab=&IndexId=",df$indexID)
#navigate to webpage
remDr$navigate(url)
#enter ui variables
webElem <- remDr$findElement(using = 'xpath', value = '//*[@id="ctl00_SPWebPartManager1_g_b2f63986_2b4a_438d_b1b1_fb08c9e1c862_ctl00_HistoryData1_rbFrequency1"]')
webElem$setElementAttribute(attributeName = 'checked',value = 'true')
webElem <- remDr$findElement(using = 'xpath', value = '//*[@id="ctl00_SPWebPartManager1_g_b2f63986_2b4a_438d_b1b1_fb08c9e1c862_ctl00_HistoryData1_rbPeriod8"]')
webElem$setElementAttribute(attributeName = 'checked',value = 'true')
remDr$executeScript(paste0("$('#ctl00_SPWebPartManager1_g_b2f63986_2b4a_438d_b1b1_fb08c9e1c862_ctl00_HistoryData1_dailyFromCalendar_TaseCalendar_dateInput_TextBox').val('",df$from.date,"');"), args = list())
remDr$executeScript(paste0("$('#ctl00_SPWebPartManager1_g_b2f63986_2b4a_438d_b1b1_fb08c9e1c862_ctl00_HistoryData1_dailyToCalendar_TaseCalendar_dateInput_TextBox').val('",df$to.date,"');"), args = list())
#click button
remDr$executeScript("$('#trhistory0').find(':button').click();", args = list())
#wait for data to load
Sys.sleep(5)
#import html table to parse
webElem <- remDr$findElement(using='xpath',value = '//*[@id="ctl00_SPWebPartManager1_g_b2f63986_2b4a_438d_b1b1_fb08c9e1c862_ctl00_HistoryData1_gridHistoryData_DataGrid1"]')
out=htmlParse(remDr$getPageSource(),asText = T)
dataNode=getNodeSet(out,"//table[contains(@id,'gridHistoryData_DataGrid1')]")
#parse table into data.frame
tase.out=readHTMLTable(dataNode[[1]],header = T)%>%
mutate_each(funs(as.Date(.,"%d/%m/%Y")),contains("date"))%>%
mutate_each(funs(as.numeric(gsub("[,|%]","",.))),
-contains("date"),-ends_with("type"))
return(tase.out)},
.progress = "text")
tase.index.intraday=ddply(df.in,.(Name),.fun = function(df){
#set url
url=paste0("http://www.tase.co.il/Eng/MarketData/Indices/MarketCap/Pages/IndexHistoryData.aspx?Action=1&addTab=&IndexId=",df.in$indexID)
#navigate to webpage
remDr$navigate(url[1])
#enter ui variables
webElem <- remDr$findElement(using = 'xpath', value = '//*[@id="ctl00_SPWebPartManager1_g_b2f63986_2b4a_438d_b1b1_fb08c9e1c862_ctl00_HistoryData1_rbIndPeriod4"]')
webElem$setElementAttribute(attributeName = 'checked',value = 'true')
remDr$executeScript(paste0("$('#ctl00_SPWebPartManager1_g_b2f63986_2b4a_438d_b1b1_fb08c9e1c862_ctl00_HistoryData1_InDayFromCalendar_TaseCalendar_dateInput_TextBox').val('",df.in$from.date[1],"');"), args = list())
remDr$executeScript(paste0("$('#ctl00_SPWebPartManager1_g_b2f63986_2b4a_438d_b1b1_fb08c9e1c862_ctl00_HistoryData1_InDayToCalendar_TaseCalendar_dateInput_TextBox').val('",df.in$to.date[1],"');"), args = list())
#click button
remDr$executeScript("$('#trhistory1').find(':button').click();", args = list())
#wait for data to load
Sys.sleep(2)
#import html table to parse
webElem <- remDr$findElement(using='xpath',value = '//*[@id="ctl00_SPWebPartManager1_g_b2f63986_2b4a_438d_b1b1_fb08c9e1c862_ctl00_HistoryData1_gridHistoryData_DataGrid1"]')
webElems<-webElem$findChildElements("css selector","a")
links=unlist(sapply(webElems,function(x){x$getElementAttribute('href')}))
tase.out=mdply(links,.fun = function(x) read.csv(x,header = T,skip = 1))
tase.out= tase.out%>%select(-X1)%>%
rename(datetime=Time)%>%
mutate(date=as.POSIXct(strptime(datetime,"%d/%m/%Y")),
datetime=as.POSIXct(strptime(datetime,"%d/%m/%Y %H:%M:%S")))
return(tase.out)},
.progress = "text")
tase.index.otc=ddply(df.in,.(Name),.fun = function(df){
url=paste0("http://www.tase.co.il/Eng/general/company/Pages/companyHistoryData.aspx?companyID=",
df$companyID,
"&subDataType=0",
"&shareID=",df$shareID)
#navigate to webpage
remDr$navigate(url)
#set ui
webElem <- remDr$findElement(using = 'xpath', value = '//*[@id="ctl00_SPWebPartManager1_g_301c6a3d_c058_41d6_8169_6d26c5d97050_ctl00_HistoryData1_rbPeriodOTC8"]')
webElem$setElementAttribute(attributeName = 'checked',value = 'true')
remDr$executeScript(paste0("$('#ctl00_SPWebPartManager1_g_301c6a3d_c058_41d6_8169_6d26c5d97050_ctl00_HistoryData1_calendarOTCFrom_TaseCalendar_dateInput_TextBox').val('",df$from.date,"');"), args = list())
remDr$executeScript(paste0("$('#ctl00_SPWebPartManager1_g_301c6a3d_c058_41d6_8169_6d26c5d97050_ctl00_HistoryData1_calendarOTCTo_TaseCalendar_dateInput_TextBox').val('",df$to.date,"');"), args = list())
#click button
remDr$executeScript("$('#trhistory3').find(':button').click();", args = list())
#wait for page to load
Sys.sleep(5)
#capture table
out=htmlParse(remDr$getPageSource(),asText = T)
#organise into data.frame
dataNode =getNodeSet(out,("//table[contains(@id,'gridHistoryData_DataGrid1')]"))
tase.out=readHTMLTable(dataNode[[1]],header = T)%>%mutate_each(funs(as.numeric(gsub("[,|%]","",.))),-contains("Date"))
return(tase.out)},
.progress = "text")
remDr$closeall()
|
da970e9e4dbba8e13d8438e48b8c0a2b7cf72386
|
00cc47dde2afd5a66293af0b8b07f380f993d2af
|
/rprojectSPL/lib/invalid.R
|
407687544df92771b5fcf74ca17e52fb62731f6e
|
[
"MIT"
] |
permissive
|
UTexas80/gitSPL
|
277ea78a08559115b15ff3e9046e1b0e18938280
|
212fe631d8162dabf4593768f7ff6aacc0457026
|
refs/heads/master
| 2021-01-25T09:04:31.709072
| 2019-04-30T14:35:47
| 2019-04-30T14:35:47
| 93,775,621
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24
|
r
|
invalid.R
|
invalid <-
character(0)
|
a8f3c91deedcc2dee28d08197e0a0199d4b8256c
|
0ebf0950d351f32a25dadb64b4a256a8a9022039
|
/man/parseRegion.Rd
|
871efd71a6d0ac17cd39a4ccb4416141ef5415dc
|
[] |
no_license
|
HenrikBengtsson/aroma.cn.eval
|
de02b8ef0ae30da40e32f9473d810e44b59213ec
|
0462706483101b74ac47057db4e36e2f7275763c
|
refs/heads/master
| 2020-04-26T16:09:27.712170
| 2019-01-06T20:41:30
| 2019-01-06T20:41:30
| 20,847,824
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,433
|
rd
|
parseRegion.Rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% parseRegion.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{parseRegion}
\alias{parseRegion}
\title{Parses a ROC change-point region string}
\description{
Parses a ROC change-point region string in the format
'<sample>:Chr<chr>@<start>-<stop>,cp=<pos>+/-<width>,s=<state0>/<state1>',
where <sample> is a sample name, <chr> is an index, <start>, <stop>
and <pos> (<width>) are genomic locations (lengths) (in units of Mb),
and <state0> and <state1> are integers specifying the genomic state of
the two segments flanking the change point at <pos>.
}
\usage{
parseRegion(region, xScale=1e+06, ...)
}
\arguments{
\item{region}{A \code{\link[base]{character}} string.}
\item{xScale}{A positive \code{\link[base]{numeric}} specifying the unit length.}
\item{...}{Not used.}
}
\value{
Returns a named \code{\link[base]{list}}.
}
\examples{
reg <- parseRegion("TCGA-23-1027:Chr2@108-140,cp=124+/-0.5,s=0/1")
str(reg)
stateFcn <- makeTruth(reg)
print(reg)
data <- data.frame(chromosome=2, x=seq(from=122e6, to=126e6, by=0.2e6))
data$state <- stateFcn(data)
print(data)
}
\seealso{
\code{\link{makeTruth}}().
}
\author{Henrik Bengtsson}
\keyword{internal}
\keyword{utilities}
|
28578615b56339579a031d47bace0de92799a991
|
f8f3d53abf579dfbf6d49cfb59295b1c3ddc3fb2
|
/man/code2office.Rd
|
2a9e7bf911d6df6c75daaa93ad55f3708be6be4e
|
[] |
no_license
|
cardiomoon/rrtable
|
9010574549a6fc41015f89638a708c691c7975cf
|
8346fca2bb0dc86df949fb31738e1af90eeb5a70
|
refs/heads/master
| 2023-03-15T20:43:07.685721
| 2023-03-12T11:36:34
| 2023-03-12T11:36:34
| 127,721,282
| 3
| 2
| null | 2021-11-17T01:08:31
| 2018-04-02T07:32:08
|
R
|
UTF-8
|
R
| false
| true
| 1,332
|
rd
|
code2office.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/code2pptx.R
\name{code2office}
\alias{code2office}
\title{Save plot/ggplot code to Microsoft Powerpoint format}
\usage{
code2office(
...,
ggobj = NULL,
target = "Report",
append = FALSE,
title = "",
type = "pptx",
preprocessing = "",
plottype = "auto",
echo = FALSE,
parallel = FALSE,
left = 1,
top = 1,
width = NULL,
height = NULL,
aspectr = NULL
)
}
\arguments{
\item{...}{Further argument to be passed to function dml()}
\item{ggobj}{a ggplot object}
\item{target}{name of output file}
\item{append}{logical value}
\item{title}{Optional character vector of plot title}
\item{type}{"pptx" or "docx"}
\item{preprocessing}{A string of R code or ""}
\item{plottype}{character One of c("auto","plot","ggplot","emf")}
\item{echo}{logical. If true, show code.}
\item{parallel}{logical. If true, add two plots side by side}
\item{left}{left margin}
\item{top}{top margin}
\item{width}{desired width of the plot}
\item{height}{desired height of the plot}
\item{aspectr}{desired aspect ratio of the plot}
}
\description{
Save plot/ggplot code to Microsoft Powerpoint format
}
\examples{
\dontrun{
code2office(plot(iris))
require(ggplot2)
gg=ggplot(data=mtcars,aes(x=wt,y=mpg))+geom_point()
code2office(ggobj=gg)
}
}
|
84957d953119fe1863aeccbee9dffa21a844d934
|
40234ef2ad5efa4c566ff501f3972ab03b181bd9
|
/data/charm/Step4_apply_thredds_bias_correction_model.R
|
e59780d8454d22f3f07795796e00580c23059636
|
[] |
no_license
|
cfree14/domoic_acid
|
63fefd3c577d0cd277747254aa50f425401c438f
|
dfe6f4d9b94ad7a71c092c92bf63100a46cb3d0c
|
refs/heads/master
| 2023-07-15T10:28:49.815164
| 2021-08-25T22:31:47
| 2021-08-25T22:31:47
| 279,933,613
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,358
|
r
|
Step4_apply_thredds_bias_correction_model.R
|
# Clear workspace
rm(list = ls())
# Setup
################################################################################
# Packages
library(mgcv)
library(ncdf4)
library(raster)
library(tidyverse)
library(lubridate)
library(betareg)
# Directories
plotdir <- "data/charm/figures"
datadir <- "data/charm/processed"
gisdir <- "data/cdfw/gis_data/raw/StateWaterJurisdiction/"
# Read THREDDS data (3/5/2014-5/7/2019)
pda_thredds <- raster::brick(file.path(datadir, "CHARM_THREDDS_DAP_20140305_to_20190507.grd"))
# Read ERDDAP data (6/19/2018-present)
pda_erddap <- raster::brick(file.path(datadir, "CHARM_ERDDAP_DAP_20180619_to_present.grd"))
# Read THREDDS to ERDDAP model
load(file.path(datadir, "thredds2erddap_data_and_model.Rdata"))
# Apply model to THREDDS data
################################################################################
#
if(F){
# Establish work flow with one layer
ras <- pda_thredds[[1]]
names(ras) <- "pda_thredds"
pred <- raster::predict(object=ras, model=gamfit, type="response")
plot(pred)
# Make function to predict
make_pred <- function(ras){
orig_name <- names(ras)
names(ras) <- "pda_thredds"
pred <- raster::predict(object=ras, model=gamfit, type="response")
names(pred) <- orig_name
return(pred)
}
# Test function
make_pred(ras=pda_thredds[[1]])
# Loop through layers
n <- nlayers(pda_thredds)
pred_list <- vector("list", n)
for(i in 1:n){
print(i)
pred_list[[i]] <- make_pred(ras=pda_thredds[[i]])
}
# Convert to brick
pred_stack <- raster::stack(pred_list)
pred_brick <- raster::brick(pred_stack)
# Export
writeRaster(pred_brick, file=file.path(datadir, "CHARM_THREDDS_DAP_20140305_to_20190507_bias_corrected.grd"), overwrite=T)
}
# Confirm that it worked
################################################################################
if(F){
# Read data
pda_thredds_fixed <- raster::brick(file.path(datadir, "CHARM_THREDDS_DAP_20140305_to_20190507_bias_corrected.grd"))
# Read statewide means
charm_means <- read.csv(file=file.path(datadir, "CHARM_pn_pda_pca_means_by_server.csv"), as.is=T) %>%
mutate(date=ymd(date),
variable=recode(variable, "Particulate domoic acid"="pDA"))
# Function
calc_avg_by_layer <- function(rbrick){
vals <- sapply(1:nlayers(rbrick), function(x) mean(getValues(rbrick[[x]]), na.rm=T))
return(vals)
}
# Build data frame
build_df <- function(rbrick, var, server){
vals <- calc_avg_by_layer(rbrick)
dates <- names(rbrick) %>% gsub("X", "", .) %>% ymd()
df <- tibble(server=server, variable=var, date=dates, risk=vals)
return(df)
}
# Calculate mean of corrected
pda_thredds_fixed_avg <- build_df(pda_thredds_fixed, var="pDA", server="THREDDS-fixed")
write.csv(pda_thredds_fixed_avg, file=file.path(datadir, "CHARM_pda_means_thredds_fixed.csv"))
# Merge and plot
charm_means1 <- bind_rows(charm_means, pda_thredds_fixed_avg) %>%
filter(variable=="pDA" & server!="THREDDS")
# Plot data
g <- ggplot(charm_means1, aes(x=date, y=risk, color=server)) +
# Add lines
geom_line(lwd=0.3) +
# Axes
ylim(0,1) +
scale_x_date(date_breaks = "1 year", date_labels = "%Y") +
# Labels
labs(x="", y="Risk") +
scale_color_discrete(name="Server") +
theme_bw()
g
}
|
6d637b27bc6c49e0f3f62abec2890c0848e322ff
|
4a2c6f223ff6063640475840209927bf85a9f33b
|
/lostruct/man/getMaxRad.Rd
|
9a011a2be525319809724f7cca5ff79cb18dc81b
|
[] |
no_license
|
petrelharp/local_pca
|
d69cc4122c381bf981af65a8beb8914fabede4d5
|
abf0c31da5cd74a1de62083580d482f5bd08d7de
|
refs/heads/master
| 2023-06-25T18:12:39.355780
| 2023-06-14T04:39:12
| 2023-06-14T04:39:12
| 47,361,457
| 61
| 13
| null | 2021-02-25T17:20:18
| 2015-12-03T21:23:41
|
HTML
|
UTF-8
|
R
| false
| true
| 296
|
rd
|
getMaxRad.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enclosing_circle.R
\name{getMaxRad}
\alias{getMaxRad}
\title{Vertex that produces the circle with the maximum radius}
\usage{
getMaxRad(xy, S)
}
\description{
Vertex that produces the circle with the maximum radius
}
|
b5c071458dbe48e909cbf90aca2c3d7bf5335171
|
543d87fd0201639b00e72950e80ecd98389d6de2
|
/R/functions.R
|
eb563f5f0ea4a94dc76a419aa11297ac9c818eb7
|
[
"MIT"
] |
permissive
|
mahdiprs/Group-Sampling
|
d9d3b72d061876ce1f4bea51ade01ec6331caa08
|
20fc0a22eb2f37926d7753518087364700b3031e
|
refs/heads/master
| 2023-05-10T18:20:07.478444
| 2023-05-10T00:51:26
| 2023-05-10T00:51:26
| 416,548,185
| 0
| 0
| null | 2021-10-13T01:28:34
| 2021-10-13T01:18:30
|
R
|
UTF-8
|
R
| false
| false
| 2,359
|
r
|
functions.R
|
##########################################################################
# Functions to estimate statistics introduced in
# Sections 3.1 - 3.3 of journal paper
##########################################################################
library(dplyr)
library(actuar)
library(extraDistr)
library(sqldf)
library(kSamples)
library(splus2R)
fp <- function(p, alpha, beta, Nbar, ty, b) {
fp =p^(alpha - 1)*(1 - (1 - p)^Nbar)^ty*(1 - p)^(beta - 1 + Nbar*(b - ty))
return(fp)
}
pf <- function(p, alpha, beta, Nbar, ty, b) {
#
#marginal = integrate(function(x) {fp(x,alpha, beta, Nbar, ty, b)}, 0,1)
pf = fp(p, alpha, beta, Nbar, ty, b)#/marginal$value
return(pf)
}
Tx_given_ty_NC <-function(N,Nbar,b,ty,l,alpha,beta){
val =0
if (l >= ty){
tmpVal =sapply(0:ty, function(r) (-1)^r *exp(lbeta(l+alpha,N+beta-l)+lchoose(ty,r)+lchoose(N-(b-ty+r)*Nbar,l)))
tmpVal = sum(tmpVal)
val = tmpVal
}else{
val =0
}
#print(cat(val,l))
return(val)
}
Tx_tx_given_ty_NC <-function(N,Nbar,b,ty,l,alpha,beta){
val =0
tmpVal =sapply(0:ty, function(r) (-1)^r *exp(lbeta(l+alpha,N+Nbar*r-Nbar*ty+beta-l)+lchoose(ty,r)+lchoose(N-b*Nbar,l)))
tmpVal = sum(tmpVal)
val = tmpVal
return(val)
}
# clustered functions
phi =function(p,theta,Nbar){
phi = 1 - exp(lgamma(theta*(1/p - 1) + Nbar)+lgamma(theta/p)-(lgamma(theta/p + Nbar)+lgamma(theta/p - theta)))
# 1 - gamma(theta*(1/p - 1) + Nbar)*gamma(theta/p)/(gamma(theta/p + Nbar)*gamma(theta/p - theta))
return(phi)
}
fpc = function(p, alpha, beta,theta, Nbar, ty, b){
fpc =p^(alpha - 1)*(1 - p)^(beta - 1)*phi(theta, p, Nbar)^ty*(1 - phi(theta, p, Nbar))^(b - ty)
return(fpc)
}
pfc <- function(p, alpha, beta, theta, Nbar, ty, b) {
#
#marginal = integrate(function(x) {fpc(x,alpha, beta, theta,Nbar, ty, b)}, 0,1)
pfc = fpc(p, alpha, beta, theta, Nbar, ty, b)#/marginal$value
return(pfc)
}
# zero truncated beta binomial distribution pdf
ztbb <- function(k,Nbar,alpha,beta) {
#
if (k>0){
ztbb = dbbinom(k, Nbar, alpha = alpha, beta = beta, log = FALSE)/
(1-dbbinom(0, Nbar, alpha = alpha, beta = beta, log = FALSE))
}else{
ztbb = 0
}
return(ztbb)
}
# simple sampling algorithm
sample_fun <- function(n,support, probs) sample(
support, # the support of pdf
n, # n draws
TRUE, # with replacement
probs # using these probabilities
)
|
86c8426fed7a16404797ba5af8f6633ce2eb4cf7
|
cd94ae361315380160c53aba76e55bad57c1ccdb
|
/man/date_to_fy.Rd
|
63238c6d9f03a9a8f3a36aa7cfe4b3b36c27e78f
|
[] |
no_license
|
rcgentzler/ojodb
|
c931836ff88b8ece481143c0752c16149d9851c1
|
7ba3700458023c8d8e39f6f6194692c277072df1
|
refs/heads/master
| 2023-01-20T20:48:12.975911
| 2020-11-20T19:48:53
| 2020-11-20T19:48:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 490
|
rd
|
date_to_fy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/date_to_fy.R
\name{date_to_fy}
\alias{date_to_fy}
\title{Determine the Oklahoma state fiscal year that a date falls in}
\usage{
date_to_fy(date)
}
\value{
Fiscal year of a date as an integer
}
\description{
Returns the Oklahoma state fiscal year (July 1 - June 30) in which a given date falls.
}
\examples{
\dontrun{
date_to_fy(ymd("2018-06-30"))
# Returns 2018
date_to_fy(ymd("2018-07-01"))
# Returns 2019
}
}
|
152e5404037fe63cf953b0bf29e13025556a35d6
|
61f27287fda5c604c2bc2dfd5a2aa6db87cb293b
|
/R/plot.TipologiaRodizio.R
|
8cd7903ca0972c937b1c96674a690e57d455c483
|
[] |
no_license
|
brunomssmelo/RcextTools
|
5effdea589d88fbe83e3a15f99490baf7d45af14
|
c9081f45b383ad57bdaea023830ef6c1e20a1aef
|
refs/heads/master
| 2020-12-03T15:33:40.675627
| 2017-06-16T04:29:39
| 2017-06-16T04:29:39
| 66,433,016
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,661
|
r
|
plot.TipologiaRodizio.R
|
#' Metodo S3 que plota na tela uma representacao visual do grafo do tipo `igraph` contido no objeto da classe `TipologiaRodizio`
#' @author Bruno M. S. S. Melo
#' @description Os diferentes agrupamentos representam empresas suspeitas de praticarem alguma acao colusiva num determinado mercado. As arestas apontam na direcao de um perdedor para um vencedor de licitacao. Empresas sao sempre perdedoras sao representadas por quadrados cinzas.
#' @param x objeto da classe `TipologiaRodizio`.
#' @param ... eventuais parametros adicionais.
#' @examples
#' \dontrun{
#' casosSuspeitos <- TipologiaRodizio(dados)
#' plot(casosSuspeitos)
#' }
#' @export
plot.TipologiaRodizio <- function(x, ...){
dadosGrafo <- visNetwork::toVisNetworkData(x$grafo)
dadosGrafo$nodes$group <- sapply(dadosGrafo$nodes$id, FUN = function(n){
group <- x$tabela[x$tabela$CNPJ == n,]$MERCADO_ATUACAO[1]
})
dadosGrafo$nodes[!complete.cases(dadosGrafo$nodes),]$group <- 0
# o operador %>% foi removido para solucionar erro gerado pelo "CRAN check":
# visNetwork::visNetwork(nodes = dadosGrafo$nodes, edges = dadosGrafo$edges) %>%
# visNetwork::visInteraction( navigationButtons = TRUE, multiselect = TRUE ) %>%
# visNetwork::visOptions(nodesIdSelection = TRUE) %>%
# visNetwork::visEdges(arrows = 'to') %>%
# visNetwork::visGroups(groupname = '0', color = 'grey', shape = "square")
vn <- visNetwork::visNetwork(nodes = dadosGrafo$nodes, edges = dadosGrafo$edges)
vn <- visNetwork::visOptions(vn, nodesIdSelection = TRUE)
vn <- visNetwork::visEdges(vn, arrows = 'to')
visNetwork::visGroups(vn, groupname = '0', color = 'grey', shape = "square")
}
|
a11fb1ef214bca4b41baa8a15cd887985af6a92b
|
8575c4ce854151973bb8f58b8a124f7b1816df45
|
/Rafael_R_scripts/ExClVal.R
|
dd496316f52050c6b6499cd5f147c004e50581ee
|
[] |
no_license
|
mlldantas/Gal_classification
|
e0a3ce375d0661ca1933b4d36ff20f6fb4d469cc
|
81c392ec828709d30dea351a2fe27ec81bc6e69d
|
refs/heads/master
| 2022-03-30T14:24:18.340900
| 2020-02-21T17:24:25
| 2020-02-21T17:24:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,478
|
r
|
ExClVal.R
|
fancy_scientific <- function(l) {
# turn in to character string in scientific notation
l <- format(l, scientific = TRUE)
# quote the part before the exponent to keep all the digits
l <- gsub("^(.*)e", "'\\1'e", l)
# turn the 'e+' into plotmath format
l <- gsub("e", "%*%10^", l)
# return this as an expression
parse(text=l)
}
# Function to perform cluster comparison
ExClVal <- function(class = class, clust = clust, data = data){
require(LaplacesDemon)
require(scales)
require(MASS)
require(ggplot2)
class <- as.factor(class)
# Find number of clusters and classes
Nclass <- length(levels(class))
Ncluster <- clust$G
cluster_sep <- list()
# Separate individual clusters
for(i in 1:Ncluster){
cluster_sep[[i]] = as.data.frame(data[(clust$classification ==i),])
cluster_sep[[i]] = cbind(cluster_sep[[i]],label=rep(LETTERS[i],nrow(cluster_sep[[i]])),
deparse.level = 2)
}
class_sep <- list()
# Separate individual clusters
for(i in 1:Nclass){
class_sep[[i]] = as.data.frame(data[(class == levels(class)[i]),])
class_sep[[i]] = cbind(class_sep[[i]],label=class[class == levels(class)[i]])
}
#Perform LDA on data containing both cluster and class for each combination
# Loop over clusters and classes
data <- matrix(list(),nrow=Ncluster,ncol=Nclass)
ldaResult <- matrix(list(),nrow=Ncluster,ncol=Nclass)
prediction <- matrix(list(),nrow=Ncluster,ncol=Nclass)
clusterProjected <- matrix(list(),nrow=Ncluster,ncol=Nclass)
classProjected <- matrix(list(),nrow=Ncluster,ncol=Nclass)
minRange <- matrix(list(),nrow=Ncluster,ncol=Nclass)
maxRange <- matrix(list(),nrow=Ncluster,ncol=Nclass)
pdfCluster <- matrix(list(),nrow=Ncluster,ncol=Nclass)
pdfClass <- matrix(list(),nrow=Ncluster,ncol=Nclass)
KL <- matrix(list(),nrow=Ncluster,ncol=Nclass)
gg <- matrix(list(),nrow=Ncluster,ncol=Nclass)
clcolor <- c("#FF1493","#7FFF00", "#00BFFF", "#FF8C00")
for (i in 1:Ncluster){
for (j in 1:Nclass){
data[[i,j]] = rbind(cluster_sep[[i]], class_sep[[j]])
data[[i,j]]$label <- droplevels(data[[i,j]]$label)
ldaResult[[i,j]] = lda(label ~ ., data[[i,j]])
prediction[[i,j]] = predict(ldaResult[[i,j]])
#Getting projected matrices for cluster and class from predicted values
clusterProjected[[i,j]] = prediction[[i,j]]$x[1:nrow(cluster_sep[[i]]),]
classProjected[[i,j]] = prediction[[i,j]]$x[(nrow(cluster_sep[[i]])+1):(dim(data[[i,j]])[1]),]
#Get probability density for each cluster and class
#Extending the range so that both densities are within minimum
#and maximum of obtained density values
minRange[[i,j]] = min(clusterProjected[[i,j]], classProjected[[i,j]])
maxRange[[i,j]] = max(clusterProjected[[i,j]], classProjected[[i,j]])
pdfCluster[[i,j]] = density(clusterProjected[[i,j]], from = minRange[[i,j]]-5, to=maxRange[[i,j]]+5,n=1024)
pdfClass[[i,j]] = density(classProjected[[i,j]], from = minRange[[i,j]]-5, to=maxRange[[i,j]]+5,n=1024)
#pdfCluster[[i,j]] = density(clusterProjected[[i,j]])
#pdfClass[[i,j]] = density(classProjected[[i,j]])
#Get probability density from the densities
pdfCluster[[i,j]]$y = pdfCluster[[i,j]]$y/max(pdfCluster[[i,j]]$y)
pdfClass[[i,j]]$y = pdfClass[[i,j]]$y/max(pdfClass[[i,j]]$y)
# Calcualte K-L distance using package Laplace Demon
KL[[i,j]] <- KLD(pdfCluster[[i,j]]$y,pdfClass[[i,j]]$y,base=2)$mean.sum.KLD
# Plot density of cluster vs classes
gg[[i,j]] <- ggplot(
data=data.frame(x=pdfCluster[[i,j]]$x,y=pdfCluster[[i,j]]$y),aes(x=x,y=y))+
geom_polygon(data=data.frame(x=pdfClass[[i,j]]$x,y=pdfClass[[i,j]]$y),aes(x=x,y=y),
fill="gray80",size=1.25,alpha=0.7)+
geom_polygon(linetype="dashed",fill = clcolor[i],alpha=0.4)+
theme_bw()+
scale_y_continuous(breaks=pretty_breaks())+
theme(legend.position = "none",plot.title = element_text(hjust=0.5),
axis.title.y=element_text(vjust=0.75),
axis.title.x=element_text(vjust=-0.25),
text = element_text(size=15))+xlab("LD1")+ylab("Density")+
ggtitle(paste("G",i,"/",levels(class)[j],sep=""))
#+
# annotate("text", x=Inf, y = Inf, label = paste("Cluster ",i,sep=""), vjust=3, hjust=1.5,size=5,
# color="red3")+
# annotate("text", x=Inf, y = Inf, label = levels(class)[j], vjust=5, hjust=1.5,size=5,
# color="blue3")
}
}
z <- matrix(unlist(KL),nrow=nrow(KL),ncol=ncol(KL))
rownames(z) <- c(seq(1:Ncluster))
colnames(z) <- c(levels(class))
return(list(KL=z,
pdfCluster = pdfCluster,
pdfClass = pdfClass,
gg=gg))
}
|
9a5aec6f03c118ff2c0e77143ab6deb54c38777f
|
f2373d4d8ad3fbd1735f280a21aec7eb0f666ffb
|
/man/uniqueBy.Rd
|
07ecda211c8f9bb8f1f85ddb3675e857dd1d984e
|
[] |
no_license
|
ahmczwg/psichomics
|
0c37b23fc06096309f9de71e4c8d4a3a15b982ab
|
cad23d89b8f07420b6cd90067b9ede60a4651881
|
refs/heads/master
| 2020-04-15T20:18:02.301980
| 2018-12-03T14:59:22
| 2018-12-03T14:59:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 442
|
rd
|
uniqueBy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{uniqueBy}
\alias{uniqueBy}
\title{Check unique rows of a data frame based on a set of its columns}
\usage{
uniqueBy(data, ...)
}
\arguments{
\item{data}{Data frame or matrix}
\item{...}{Name of columns}
}
\value{
Data frame with unique values based on set of columns
}
\description{
Check unique rows of a data frame based on a set of its columns
}
|
8b67447a943f07f9b02806fc563c5db656c138ff
|
55bdc9a36d8564216db073f19fffd931ffeaa9ae
|
/R/tests/testthat/test-crs-transform.R
|
1c65e12abb209410301a72457897302b91060d8c
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
awadhesh14/GeoSpark
|
2362d691d8e84397f9cee33692a609ee218faf9e
|
86b90fc41a342088d20429ebcd61a95b2f757903
|
refs/heads/master
| 2023-04-09T07:02:03.610169
| 2023-04-01T07:30:02
| 2023-04-01T07:30:02
| 202,829,602
| 0
| 0
|
Apache-2.0
| 2022-12-21T21:28:50
| 2019-08-17T03:20:13
|
Java
|
UTF-8
|
R
| false
| false
| 1,646
|
r
|
test-crs-transform.R
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
context("CRS transform")
test_that("crs_transform() works as expected", {
sc <- testthat_spark_connection()
pt_rdd <- sedona_read_dsv_to_typed_rdd(
sc, test_data("crs-test-point.csv"),
type = "point"
) %>%
crs_transform("epsg:4326", "epsg:3857")
expect_equivalent(
pt_rdd %>%
sdf_register() %>%
head(5) %>%
dplyr::transmute(x = ST_X(geometry), y = ST_Y(geometry)) %>%
# NOTE: the extra `sdf_register()` call is a workaround until SPARK-37202 is
# fixed
sdf_register(name = random_string()) %>%
collect(),
tibble::tribble(
~x, ~y,
-9833016.710450118, 3805934.914254189,
-9815699.961781807, 3810760.096874278,
-9839413.351030082, 3810273.8140140832,
-9820728.151861448, 3809444.5432437807,
-9832182.148227641, 3888758.1926142,
)
)
})
|
0779a351cbc2c7506af37cbe5f466a2dd278592b
|
277dbb992966a549176e2b7f526715574b421440
|
/R_training/실습제출/이정환/20191029/movie2.R
|
31561ba3b388d0eed8993854fde21b2a5a99918e
|
[] |
no_license
|
BaeYS-marketing/R
|
58bc7f448d7486510218035a3e09d1dd562bca4b
|
03b500cb428eded36d7c65bd8b2ee3437a7f5ef1
|
refs/heads/master
| 2020-12-11T04:30:28.034460
| 2020-01-17T08:47:38
| 2020-01-17T08:47:38
| 227,819,378
| 0
| 0
| null | 2019-12-13T12:06:33
| 2019-12-13T10:56:18
|
C++
|
UTF-8
|
R
| false
| false
| 653
|
r
|
movie2.R
|
# 실습2
site = "https://movie.daum.net/moviedb/grade?movieId=121137&type=netizen&page="
daummovie2 = NULL
for (i in 1:20) {
url = paste(site, i, sep='')
text = read_html(url)
grade = html_nodes(text, '#mArticle > div.detail_movie.detail_rating > div.movie_detail > div.main_detail > ul > li > div > div.raking_grade > em')
grade = html_text(grade)
review = html_nodes(text, '#mArticle > div.detail_movie.detail_rating > div.movie_detail > div.main_detail > ul > li > div > p')
review = html_text(review, trim=T)
df = data.frame(grade, review)
daummovie2 = rbind(daummovie2, df)
}
write.csv(daummovie2, 'daummovie2.csv', row.names = F)
|
822f6b31eb20358382cff415d56c31e531e23ed1
|
5397b2f52030662f0e55f23f82e45faa165b8346
|
/man/j_get_index.Rd
|
ce8c52878e4ccfda1a22b40df5890d55771a6229
|
[
"MIT"
] |
permissive
|
data-science-made-easy/james-old
|
8569dcc8ce74c68bcbb81106127da4b903103fcd
|
201cc8e527123a62a00d27cd45d365c463fc1411
|
refs/heads/master
| 2023-01-12T21:16:23.231628
| 2020-11-19T13:46:17
| 2020-11-19T13:46:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 583
|
rd
|
j_get_index.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/j_get_index.R
\name{j_get_index}
\alias{j_get_index}
\title{Get index}
\usage{
j_get_index(type, version, scenario = james.env$j_root$scenario,
project = james.env$j_root$project)
}
\arguments{
\item{type}{data type (default "")}
\item{version}{version of data you want index of (default most recent version only)}
\item{scenario}{scenario filter (defaults to active scenario)}
\item{project}{project filter (defaults to active project)}
}
\value{
index
}
\description{
Get index of data in j_ls()
}
|
1fa82fd7ca3f1310931225c53b750b07e73e4251
|
753d940b683819bf96d3fbabb9f23e631ed061a3
|
/man/topsort.Rd
|
068a2c5e6576c28d7f172d2cf48bec5e37dccaa0
|
[] |
no_license
|
ralmond/Peanut
|
d2762206dd396ac2d3b31d6105ea9f9c03901e6a
|
9f5ca85b8d2a48a6d4f1f34705f0e97351cacdf3
|
refs/heads/master
| 2023-06-29T02:01:18.055310
| 2023-04-14T15:59:31
| 2023-04-14T15:59:31
| 239,856,304
| 1
| 1
| null | 2023-04-14T15:50:54
| 2020-02-11T20:20:10
|
R
|
UTF-8
|
R
| false
| false
| 1,527
|
rd
|
topsort.Rd
|
\name{topsort}
\alias{topsort}
\title{Topologically sorts the rows and columns of an Omega matrix}
\description{
The structural part of the \eqn{\Omega}-matrix is an incidence matrix
where the entry is 1 if the node represented by the column is a parent
of the node represented by the child. This sorts the rows and columns
of the matrix (which should have the same names) so that the ancestors
of a node always appear prior to it in the sequence. As a
consequence, the values in the upper triangle of the
\eqn{\Omega}-matrix are always zero after sorting.
}
\usage{
topsort(Omega, noisy = FALSE)
}
\arguments{
\item{Omega}{A square matrix of 1's and zeros which corresponds to an
acyclic directed graph.}
\item{noisy}{A logical value. If true, details of progress through the
algorithm are printed.}
}
\value{
An ordering of the rows and columns which will sort the matrix.
}
\author{Russell Almond}
\note{
This will generate an error if the graph represented by the matrix is
cyclic.
}
\seealso{
\code{\link{Pnet2Omega}} uses this function to sort the columns in the
Omega matrix.
}
\examples{
## Sample Omega matrix.
omegamat <- read.csv(system.file("auxdata", "miniPP-omega.csv",
package="Peanut"),
row.names=1,stringsAsFactors=FALSE)
omega <- as.matrix(omegamat[,2:6])
## omega is already sorted so scramble it.
shuffle <- sample.int(5)
omegas <- omega[shuffle,shuffle]
ord <- topsort(omegas)
omegas[ord,ord]
}
\keyword{ graph }
|
c61ffc332bc0e29f57d9b9d5fea6ca6eacc0ef66
|
91a685393b3d9633f2cfab0d6e15c9134b10eddc
|
/man/tParams.Rd
|
39ea2042d368611bd1b1c53620b8953304cb91aa
|
[] |
no_license
|
ablejec/animatoR
|
c40ffeb05a79104ff1da81d04a8f0c5a44aaa7ea
|
841bbd7f0ed3eef50a1818964d5e9c0a7d5b8f3a
|
refs/heads/master
| 2021-01-19T00:58:01.980054
| 2020-02-07T09:09:53
| 2020-02-07T09:09:53
| 62,981,413
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 341
|
rd
|
tParams.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/animatoR-functions-knitr.R
\name{tParams}
\alias{tParams}
\title{Argument t}
\arguments{
\item{t}{numeric, homotopy parameter, limited between 0 and 1.
This parameter can be considered as fraction of animation duration time.}
}
\description{
Homotopy argument
}
|
53b8720998137637ad1a6a265a13347500ebb38a
|
0b1e3b297ab4034e66150e1782a0fc25de009bc4
|
/1_Data_for_pooled.R
|
e73a42265d014a16f81fa8939da5e2f358d06873
|
[] |
no_license
|
Alicja1990/Panel_analysis
|
b6bed08bded2db96c8dae099973ee3729200f6fd
|
f575b8ab2fb73cd6da6b872cc4716f08153d3ce7
|
refs/heads/master
| 2020-03-24T12:11:35.813577
| 2018-08-16T19:10:15
| 2018-08-16T19:10:15
| 142,706,740
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,512
|
r
|
1_Data_for_pooled.R
|
options(scipen = 100)
setwd("C:/Users/Alicja/Documents/Doktorat/Rozprawa doktorska/Panel_analysis/Data")
data_analizy_17 <- read.csv("data_analizy_17.csv", as.is = T)
data_analizy_18 <- read.csv("data_analizy_18.csv", as.is = T)
data_stooq <- read.csv2("stooq_17_18.csv", as.is = T)
data_18 <- merge(data_analizy_18, data_stooq, by.x = 1, by.y = "name")
data_stooq <- merge(data_analizy_18[, c("X1", "Nazwa_IZFiA", "Nazwa")], data_stooq, by.x = 1, by.y = "name")
data_17 <- merge(data_analizy_17, data_stooq, by = "Nazwa_IZFiA")
data_17 <- merge(data_17, data_18[, c("Data_utworzenia", "Nazwa_IZFiA", "Lokalizacja", "TFI")], by = "Nazwa_IZFiA")
data_18 <- data_18[, c("Nazwa", "Nazwa_IZFiA", "OB18", "TFI", "Data_utworzenia",
"PW18", "NW18", "Aktywa18",
"Profil18", "OZZ18", "OZN18", "OZU18", "OZW18", "TER18",
"mean_daily_log_rr_17", "std_daily_log_rr_17",
"var17_5", "var17_95", "es17", "eg17",
"yearly_rr_17", "yearly_std_17", "Lokalizacja")]
data_17 <- data_17[, c("Nazwa", "Nazwa_IZFiA", "OB17", "TFI",
"Data_utworzenia", "PW17", "NW17", "Aktywa17",
"Profil17", "OZZ17", "OZN17",
"OZU17", "OZW17", "TER17",
"mean_daily_log_rr_16", "std_daily_log_rr_16",
"var16_5", "var16_95", "es16", "eg16",
"yearly_rr_16", "yearly_std_16", "Lokalizacja")]
data_17$Rok <- "2017"
data_18$Rok <- "2018"
sr.oproc.lokat.17 <- 0.014
sr.oproc.lokat.16 <- 0.015
data_17$sr <- (as.numeric(data_17$yearly_rr_16) - sr.oproc.lokat.16) / as.numeric(data_17$yearly_std_16)
data_18$sr <- (as.numeric(data_18$yearly_rr_17) - sr.oproc.lokat.17) / as.numeric(data_18$yearly_std_17)
colnames(data_17) <- c("Nazwa", "Nazwa_IZFiA", "OB", "TFI", "Data_utworzenia",
"PW", "NW", "Aktywa",
"Profil", "OZZ", "OZN", "OZU", "OZW", "TER",
"mean_daily_log_rr", "std_daily_log_rr",
"var_5", "var_95", "es", "eg",
"yearly_rr", "yearly_std", "Lokalizacja", "Rok", "sr")
colnames(data_18) <- c("Nazwa", "Nazwa_IZFiA", "OB", "TFI", "Data_utworzenia",
"PW", "NW", "Aktywa",
"Profil", "OZZ", "OZN", "OZU", "OZW", "TER",
"mean_daily_log_rr", "std_daily_log_rr",
"var_5", "var_95", "es", "eg",
"yearly_rr", "yearly_std", "Lokalizacja", "Rok", "sr")
panel.data <- rbind(data_17, data_18)
panel.data$OZW <- gsub("%", "", panel.data$OZW)
panel.data$Profil <- gsub("akcji ", "akcji", panel.data$Profil)
panel.data$Profil <- gsub("dłużne ", "dłużne", panel.data$Profil)
panel.data$Profil <- gsub("mieszane polskie", "mieszane", panel.data$Profil)
panel.data$Profil <- gsub("mieszane ", "mieszane", panel.data$Profil)
panel.data$Profil <- gsub("gotówkowe i pieniężne ", "pieniężne", panel.data$Profil)
panel.data$Profil <- gsub("inne ", "akcji", panel.data$Profil)
panel.data$Profil <- gsub("rynku surowców ", "akcji", panel.data$Profil)
panel.data$Wiek <- as.numeric(time_length(difftime(Sys.Date(), panel.data$Data_utworzenia), "years"))
panel.data$Name_time <- paste(panel.data$Nazwa, panel.data$Rok, sep = "")
setwd("C:/Users/Alicja/Documents/Doktorat/Rozprawa doktorska/Panel_analysis/Data")
write.csv(panel.data, "Panel.data.csv")
|
ecc376de54c745683d1178169edef54a7a3fada1
|
a0ceb8a810553581850def0d17638c3fd7003895
|
/scripts/revision_scripts/revisions2/2-Liger_full_data_repressive_k9me3_cluster_specific_bins_keeptop_bymark_with_TSS.R
|
ccfa345d9975220129ec60d348a44622c9286ee7
|
[] |
no_license
|
jakeyeung/sortchicAllScripts
|
9e624762ca07c40d23e16dbd793ef9569c962473
|
ecf27415e4e92680488b6f228c813467617e7ee5
|
refs/heads/master
| 2023-04-15T22:48:52.272410
| 2022-10-24T10:45:24
| 2022-10-24T10:45:24
| 556,698,796
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,620
|
r
|
2-Liger_full_data_repressive_k9me3_cluster_specific_bins_keeptop_bymark_with_TSS.R
|
# Jake Yeung
# Date of Creation: 2022-07-22
# File: ~/projects/scchic/scripts/revision_scripts/revisions2/2-CCA_full_data_repressive_k9me3_cluster_specific_bins_keeptop_bymark.R
#
rm(list=ls())
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(Seurat)
library(hash)
library(igraph)
library(umap)
library(scchicFuncs)
library(topicmodels)
library(scchicFuncs)
suppressPackageStartupMessages(library("argparse"))
# create parser object
parser <- ArgumentParser()
# specify our desired options
# by default ArgumentParser will add an help option
parser$add_argument('-refmark', metavar='eg k4me1 k4me3 k27me3',
help='refmark')
parser$add_argument('-mark', metavar='eg k4me1 k4me3 k27me3',
help='mark')
parser$add_argument('-repressfactor', metavar="adj factor", default = 1, type="integer",
help='-1 or 1 to match rep mat to act mat')
parser$add_argument('-outdir', metavar='OUTDIR',
help='output dir')
parser$add_argument("-v", "--verbose", action="store_true", default=TRUE,
help="Print extra output [default]")
jsettings <- umap.defaults
jsettings[["n_neighbors"]] <- 30
jsettings[["min_dist"]] <- 0.1
jsettings[["random_state"]] <- 123
args <- parser$parse_args()
jmarkother <- args$mark
jmarkref <- args$refmark
outdir <- args$outdir
jfactor <- args$repressfactor
# jmarkother <- "k4me1"
# jmarkref <- "k9me3"
jmarksall <- c("k4me1", "k4me3", "k27me3", "k9me3"); names(jmarksall) <- jmarksall
jmarksoldall <- c("H3K4me1", "H3K4me3", "H3K27me3", "H3K9me3"); names(jmarksoldall) <- jmarksall
jmarks <- jmarksall[c(jmarkref, jmarkother)]
jmarksold <- jmarksoldall[c(jmarkref, jmarkother)]
jmarkothers <- jmarksall[jmarkother]
keeptop <- 500
# outdir <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/integration_data/CCA_outputs/with_k9me3_cluster_specific_bins_keeptop_", keeptop, "_bymark")
dir.create(outdir)
# Load new colors ---------------------------------------------------------
inf.colors.fixed <- "/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/primetime_plots/ctypes_on_umap_batch_corrected_colors_fixed/dat_colors_DC_monocyte_fixed.2022-05-17.txt"
dat.colors.fixed <- fread(inf.colors.fixed)
# Load meta ----------------------------------------------------------------
dat.meta.lst <- lapply(jmarks, function(jmark){
inf.meta <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/primetime_plots/umaps_pcas_with_batch_corrections/umap_metadata_primetime.", jmark, ".2022-04-21.txt")
dat.meta <- fread(inf.meta) %>%
left_join(., dat.colors.fixed) %>%
rowwise() %>%
mutate(colcode = colcodenew)
# replace colcode with colcodenew
})
inf.meta <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/primetime_plots/umaps_pcas_with_batch_corrections/umap_metadata_primetime.k4me1.2022-04-21.txt")
dat.meta.act <- fread(inf.meta) %>%
left_join(., dat.colors.fixed) %>%
rowwise() %>%
mutate(colcode = colcodenew)
dat.meta.colors <- subset(dat.meta.act, select = c(ctype.from.LL, colcode))
ctype2col <- hash::hash(dat.meta.colors$ctype.from.LL, dat.meta.colors$colcode)
# Load impute -------------------------------------------------------------
# lda.main <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/LDA_outputs_TSS_k9_cluster_specific_bins_keeptop_", keeptop)
lda.main <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/LDA_outputs_TSS_k9_cluster_specific_bins_keeptop_", keeptop, "_with_TSS")
bad.cells <- readr::read_lines("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/integration_data/CCA_outputs/k4me1_bad_cells.txt")
dat.impute.lst <- lapply(jmarksold, function(jmark){
# lda.dir <- file.path(lda.main, paste0("lda_outputs.count_mat_allmerged_for_LDA_k9_cluster_specific_bins_keeptop_", keeptop, ".", jmark, ".2022-07-22"))
# inf.impute <- file.path(lda.dir, paste0("ldaOut.count_mat_allmerged_for_LDA_k9_cluster_specific_bins_keeptop_", keeptop, ".", jmark, ".2022-07-22.Robj"))
lda.dir <- file.path(lda.main, paste0("lda_outputs.count_mat_allmerged_for_LDA_k9_cluster_specific_bins_keeptop_", keeptop, "_with_TSS.", jmark, ".2022-07-27"))
inf.impute <- file.path(lda.dir, paste0("ldaOut.count_mat_allmerged_for_LDA_k9_cluster_specific_bins_keeptop_", keeptop, "_with_TSS.", jmark, ".2022-07-27.Robj"))
load(inf.impute, v=T) # out.lda
tm.result <- posterior(out.lda) %>%
AddTopicToTmResult()
mat <- log2(t(tm.result$topics %*% tm.result$terms))
print("remove bad cells")
print(dim(mat))
cells.keep <- !colnames(mat) %in% bad.cells
mat <- mat[, cells.keep]
print("remove bad cells.. done")
print(dim(mat))
mat <- jfactor * mat
# if (jmark == "k27me3" | jmark == "k9me3"){
# } else {
# mat <- 1 * mat
# }
return(mat)
# mat[genes.keep, ]
})
common.markers <- Reduce(intersect, lapply(dat.impute.lst, function(jdat) rownames(jdat)) )
dat.impute.markers.lst <- lapply(dat.impute.lst, function(jdat){
jdat[common.markers, ]
})
dat.meta.lst <- lapply(jmarks, function(jmark){
inf.meta <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/primetime_plots/umaps_pcas_with_batch_corrections/umap_metadata_primetime.", jmark, ".2022-04-21.txt")
dat.meta <- fread(inf.meta) %>%
left_join(., dat.colors.fixed) %>%
rowwise() %>%
mutate(colcode = colcodenew) %>%
as.data.frame()
rownames(dat.meta) <- dat.meta$cell
return(dat.meta)
# replace colcode with colcodenew
})
dat.umap.annot.unif <- lapply(jmarks, function(jmark){
jdat <- dat.meta.lst[[jmark]]
jdat.select <- subset(jdat, select = c(cell, ctype.from.LL, colcode))
jdat.select$mark <- jmark
return(jdat.select)
}) %>%
bind_rows()
# Run Liger ---------------------------------------------------------------
# outdir <- "/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/integration_data/liger_outputs"
marks.combined.lst <- lapply(jmarkothers, function(jmark){
print(jmark)
raw.data <- list(ref = exp(dat.impute.markers.lst[[jmarkref]]), target = exp(dat.impute.markers.lst[[jmark]]))
common.genes <- intersect(rownames(raw.data[[1]]), rownames(raw.data[[2]]))
names(raw.data) <- c(jmarkref, jmark)
ligerobj <- rliger::createLiger(raw.data = raw.data)
ligerobj <- rliger::normalize(ligerobj)
# ligerobj <- rliger::selectGenes(ligerobj, var.thresh = -100, alpha.thresh = 1000, tol = 0.001, combine = "union", num.genes = 500, do.plot = TRUE)
ligerobj@var.genes <- common.genes
ligerobj <- rliger::scaleNotCenter(ligerobj, remove.missing = FALSE)
ligerobj <- rliger::optimizeALS(ligerobj, k = 20)
saveRDS(ligerobj, file = file.path(outdir, paste0("liger_", jmarkref, "_and_", jmark, "_optimizeALS.", Sys.Date(), ".rds")))
ligerobj <- rliger::quantile_norm(ligerobj)
ligerobj <- rliger::louvainCluster(ligerobj, resolution = 0.2)
ligerobj <- rliger::runUMAP(ligerobj, distance = 'cosine', n_neighbors = 30, min_dist = 0.3)
saveRDS(ligerobj, file = file.path(outdir, paste0("liger_", jmarkref, "_and_", jmark, "_UMAP.", Sys.Date(), ".rds")))
return(ligerobj)
})
saveRDS(marks.combined.lst, file = file.path(outdir, paste0("liger_all_marks_combined.", Sys.Date(), ".rds")))
|
2bef1fddab286ce8c1fdc4dbcda7feb55e66a66d
|
6464efbccd76256c3fb97fa4e50efb5d480b7c8c
|
/cran/paws.internet.of.things/man/iot_describe_domain_configuration.Rd
|
34cf0dd09c923787272a80f5b0444ff14e700d34
|
[
"Apache-2.0"
] |
permissive
|
johnnytommy/paws
|
019b410ad8d4218199eb7349eb1844864bd45119
|
a371a5f2207b534cf60735e693c809bd33ce3ccf
|
refs/heads/master
| 2020-09-14T23:09:23.848860
| 2020-04-06T21:49:17
| 2020-04-06T21:49:17
| 223,286,996
| 1
| 0
|
NOASSERTION
| 2019-11-22T00:29:10
| 2019-11-21T23:56:19
| null |
UTF-8
|
R
| false
| true
| 705
|
rd
|
iot_describe_domain_configuration.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot_operations.R
\name{iot_describe_domain_configuration}
\alias{iot_describe_domain_configuration}
\title{Gets summary information about a domain configuration}
\usage{
iot_describe_domain_configuration(domainConfigurationName)
}
\arguments{
\item{domainConfigurationName}{[required] The name of the domain configuration.}
}
\description{
Gets summary information about a domain configuration.
}
\details{
The domain configuration feature is in public preview and is subject to
change.
}
\section{Request syntax}{
\preformatted{svc$describe_domain_configuration(
domainConfigurationName = "string"
)
}
}
\keyword{internal}
|
632258220448513ba658425ed3710c06791f8e72
|
904022448a1599c2e6dcb2a438beb4b64efb65bc
|
/adr_cc_numbers.R
|
ed13b6c1118d58cc7250cad956a15ad7ef09dade
|
[] |
no_license
|
GKild/neuroblastoma
|
1bb9ec3df959482d6d7e9a718fc98bb813033a8f
|
a17d359b8ad915ce3eb831739254c951df1719e4
|
refs/heads/master
| 2023-03-04T04:24:57.783627
| 2021-02-16T11:12:00
| 2021-02-16T11:12:00
| 217,528,698
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,203
|
r
|
adr_cc_numbers.R
|
adr_all <- readRDS("/lustre/scratch117/casm/team274/my4/oldScratch/ProjectsExtras/SCP/Results/preProcess/fAdrenal/processedSeurat.RDS")
adr_all@meta.data$new_clust=as.character(adr_all@meta.data$seurat_clusters)
adr_all@meta.data$new_clust[which(adr_all@meta.data$new_clust%in%c("25"))]="SCPs"
adr_all@meta.data$new_clust[which(adr_all@meta.data$new_clust%in%c("13"))]="Sympathoblastic"
adr_all@meta.data$new_clust[which(adr_all@meta.data$new_clust%in%c("24"))]="Bridge"
adr_all@meta.data$new_clust[which(adr_all@meta.data$new_clust%in%c("19", "20"))]="Chromaffin"
adr_all@meta.data$new_clust[which(adr_all@meta.data$new_clust%in%c("3", "15", "14","17"))]="Endothelium"
adr_all@meta.data$new_clust[which(adr_all@meta.data$new_clust%in%c("23", "11", "18"))]="Mesenchyme"
adr_all@meta.data$new_clust[which(adr_all@meta.data$new_clust%in%c("22", "5", "28", "0", "1", "2", "10", "6", "8", "9","21"))]="Cortex"
adr_all@meta.data$new_clust[which(adr_all@meta.data$new_clust%in%c("16", "26"))]="Leukocytes"
adr_all@meta.data$new_clust[which(adr_all@meta.data$new_clust%in%c("30", "7", "4", "12"))]="Erythroblasts"
adr_all@meta.data$new_clust[which(adr_all@meta.data$new_clust%in%c("27", "31", "29"))]="Other"
#cell counts
srat_inhouse@meta.data$GOSH_ID=as.character(srat_inhouse@meta.data$orig.ident)
srat_inhouse@meta.data$GOSH_ID[which(srat_inhouse@meta.data$GOSH_ID%in%c("STDY7685340","STDY7685341","STDY7685342"))]="GOSH014"
srat_inhouse@meta.data$GOSH_ID[which(srat_inhouse@meta.data$GOSH_ID%in%c("STDY7843576", "STDY7843577", "STDY7843578"))]="GOSH023"
srat_inhouse@meta.data$GOSH_ID[which(srat_inhouse@meta.data$GOSH_ID%in%c("STDY7733084","STDY7733085", "STDY7733086"))]="GOSH019"
srat_inhouse@meta.data$GOSH_ID[which(srat_inhouse@meta.data$GOSH_ID%in%c("STDY8004894","STDY8004902", "STDY8004910"))]="GOSH025"
srat_inhouse@meta.data$GOSH_ID[which(srat_inhouse@meta.data$GOSH_ID%in%c("STDY7787237", "STDY7787238", "STDY7787239"))]="GOSH021"
DimPlot(srat_inhouse, group.by = "GOSH_ID")
DimPlot(adr_all, label=T)
table(srat_inhouse@meta.data$new_idents[which(srat_inhouse@meta.data$GOSH_ID=="GOSH-021")])
sapply(unique(srat_tumour@meta.data$unique_sample), function(x){
table(srat_tumour@meta.data$new_idents[which(srat_tumour@meta.data$unique_sample==x)])
})
adr_cc=readRDS("/lustre/scratch117/casm/team274/my4/oldScratch/ProjectsExtras/SCP/Results/preProcess/fAdrenal/baseSeurat.RDS")
ba1=readRDS("/lustre/scratch117/casm/team274/my4/oldScratch/preProcessSCP/output/babyAdrenal1_Seurat.RDS")
ba2=readRDS("/lustre/scratch117/casm/team274/my4/oldScratch/preProcessSCP/output/babyAdrenal2_Seurat.RDS")
bilateral=readRDS("/lustre/scratch117/casm/team274/my4/oldScratch/preProcessSCP/output/bilateralAdrenals_Seurat.RDS")
bilat_tech=readRDS("/lustre/scratch117/casm/team274/my4/oldScratch/preProcessSCP/output/techComparison_Seurat.RDS")
adr3=readRDS("/lustre/scratch117/casm/team274/my4/oldScratch/ProjectsExtras/SCP/Data/fAdrenal19wk/seurat.RDS")
adr_cc = NormalizeData(adr_cc)
adr_cc =FindVariableFeatures(adr_cc, selection.method = "vst", nfeatures = 2000)
adr_cc = ScaleData(adr_cc, features = rownames(adr_cc))
adr_cc = RunPCA(adr_cc, npcs = 75)
adr_cc = FindNeighbors(adr_cc, dims=1:75)
adr_cc = FindClusters(adr_cc, resolution = 1)
adr_cc = RunUMAP(adr_cc, dims=1:75, min.dist = 0.5, n.neighbors = 50)
DimPlot(adr_cc, label=T)
adr_cc@meta.data$new_clust=as.character(adr_cc@meta.data$seurat_clusters)
adr_cc@meta.data$new_clust[which(adr_cc@meta.data$new_clust%in%c("19"))]="SCPs"
adr_cc@meta.data$new_clust[which(adr_cc@meta.data$new_clust%in%c("28","25","33","11"))]="Sympathoblastic"
adr_cc@meta.data$new_clust[which(adr_cc@meta.data$new_clust%in%c("30"))]="Bridge"
adr_cc@meta.data$new_clust[which(adr_cc@meta.data$new_clust%in%c("40", "12"))]="Chromaffin"
adr_cc@meta.data$new_clust[which(adr_cc@meta.data$new_clust%in%c("4", "5", "20", "22", "32", "38", "44", "46"))]="Vascular Endothelium"
adr_cc@meta.data$new_clust[which(adr_cc@meta.data$new_clust%in%c("7", "21", "24", "27", "41"))]="Mesenchyme"
adr_cc@meta.data$new_clust[which(adr_cc@meta.data$new_clust%in%c("0", "1", "3", "23", "18", "31", "6", "17",
"14", "10","29","26", "9","35"))]="Cortex"
adr_cc@meta.data$new_clust[which(adr_cc@meta.data$new_clust%in%c("16", "42", "36", "39"))]="Leukocytes"
adr_cc@meta.data$new_clust[which(adr_cc@meta.data$new_clust%in%c("2", "8", "13", "15", "43"))]="Erythroblasts"
adr_cc@meta.data$new_clust[which(adr_cc@meta.data$new_clust%in%c("37", "34", "45", "47"))]="Other"
DimPlot(adr_cc, group.by = "new_clust")
w21_1=rownames(adr_cc@meta.data)[which(sapply(strsplit(names(adr_cc@active.ident), "_"), "[", 6)%in%c("Adr8710632", "Adr8710633"))]
w21_2=rownames(adr_cc@meta.data)[which(sapply(strsplit(names(adr_cc@active.ident), "_"), "[", 6)%in%c("Adr8710634", "Adr8710635"))]
adr_cc@meta.data$sample_name="x"
adr_cc@meta.data$sample_name[which(sapply(strsplit(names(adr_cc@active.ident), "_"), "[", 6)%in%c("Adr8710632", "Adr8710633"))]="w21_1"
adr_cc@meta.data$sample_name[which(sapply(strsplit(names(adr_cc@active.ident), "_"), "[", 6)%in%c("Adr8710634", "Adr8710635"))]="w21_2"
adr_cc@meta.data$sample_name[which(adr_cc@meta.data$orig.ident=="babyAdrenal1")]="w8"
adr_cc@meta.data$sample_name[which(adr_cc@meta.data$orig.ident=="babyAdrenal2")]="w8d6"
adr_cc@meta.data$sample_name[which(adr_cc@meta.data$orig.ident%in%c("5388STDY7717452","5388STDY7717453",
"5388STDY7717454","5388STDY7717455"))]="w10d5_1"
adr_cc@meta.data$sample_name[which(adr_cc@meta.data$orig.ident%in%c("5388STDY7717456","5388STDY7717458",
"5388STDY7717459"))]="w10d5_2"
adr_cc@meta.data$sample_name[which(adr_cc@meta.data$orig.ident%in%c("5698STDY7839907","5698STDY7839909",
"5698STDY7839917"))]="w11"
as.data.frame(sapply(unique(adr_cc@meta.data$sample_name), function(x){
table(adr_cc@meta.data$new_clust[which(adr_cc@meta.data$sample_name==x)])
}))
table(adr_cc@meta.data$sample_name)
WhichCells(adr_cc, idents=c(37,34,45,47))
|
a18d61a1776f02c82c3f57d537a3f26fae7d777e
|
7bc2a4cfd299263c8b9b1b4d1ea3afc8d598163f
|
/src/R/makedata_200930.R
|
b91be34d95591b82a25357939a91aceb1e08f722
|
[] |
no_license
|
mrmtshmp/predict.valid
|
740821a54f09da18eb2af1327de9b15391c62e04
|
9bc64dfa207d512d8e8d95f984134556b94c67c8
|
refs/heads/main
| 2022-12-20T06:25:36.794688
| 2020-10-14T11:51:55
| 2020-10-14T11:51:55
| 303,998,751
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 757
|
r
|
makedata_200930.R
|
#' Make data from dataset received 2020/09/29
#'
#' proj. predictCandida
#' PI: Taiga Miyazaki
#' author: Shimpei Morimoto
#'
dir.sub <- "./sub"
for(i in 1:length(list.files(dir.sub)))
source(
file = sprintf(
"./sub/%s",
list.files(dir.sub)[i]
)
)
df.col_info <-
readxl::read_excel(
path = path.orig_data,
sheet=2
)
df.ADS <-
readxl::read_excel(
path = path.orig_data,
sheet=1,
skip=3,
col_names = df.col_info$col_names,
col_types = df.col_info$col_type
) %>%
mutate(
AKI=
ifelse(is.na(AKI), 0, AKI),
CVC_rem_24hrs=
ifelse(is.na(CVC_rem_24hrs), 1, CVC_rem_24hrs)
)
save(
df.ADS,df.col_info,
file = sprintf(
"%s/%s", dir.ADS, fn.ADS
)
)
|
d3aca292082654497f25f7e8e3567305114149cd
|
d69e8c2403c7aed57c1401fbdaf90373daacf98b
|
/Code/P60_SmartSeq2_Batch_Corrected/02-Analyses.R
|
89aa9020cb6f475b12ebbab57e3b4ccd2e672376
|
[
"MIT"
] |
permissive
|
suterlab/SNAT-code-used-for-primary-data-analysis
|
b9ff67b64dbb87c6cdd2be774e455cbabbfe3bbb
|
60199934be21b74f8c90291dc441684503e25487
|
refs/heads/main
| 2021-08-17T15:42:07.680292
| 2021-04-20T15:33:35
| 2021-04-20T15:33:35
| 307,694,631
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,383
|
r
|
02-Analyses.R
|
library(Seurat)
library(tidyverse)
library(here)
library(cowplot)
library(stringr)
source(here("Code", "utils.R"))
resDir <- here("Results", "SS2_P60_BatchCorrection_woFilter50k", "02-Analyses")
dir.create(resDir, recursive = TRUE)
figNumer <- 1
# Upgrade Seurat v2 object to v3 object
scData <- readRDS(here("Results", "SS2_P60_BatchCorrection_woFilter50k", "01-CCA", "scData.rds"))
scData <- UpdateSeuratObject(scData)
scData <- RenameIdents(scData,
"0"="nm(R)SC cluster 1", "1"="mSC cluster 1",
"2"="mSC cluster 2", "3"="nm(R)SC cluster 2")
scData <- RunUMAP(scData, reduction = "cca.aligned", dims = 1:12)
scData$Plate <- str_extract(colnames(scData), "P60_(1|2)")
saveRDS(scData, file=file.path(resDir, "scData.rds"))
# Clustering plot
p1 <- DimPlot(scData, cols=cellCols, reduction = "umap", label=TRUE) +
theme(legend.position = "none")
p2 <- DimPlot(scData, cols=cellCols, reduction = "tsne", label=TRUE) +
theme(legend.position = "none")
p3 <- DimPlot(scData, reduction = "umap", label=TRUE, group.by="res.0.5") +
theme(legend.position = "none")
p4 <- DimPlot(scData, reduction = "tsne", label=TRUE, group.by="res.0.5") +
theme(legend.position = "none")
p <- plot_grid(p1, p2, p3, p4, labels="AUTO", ncol=2)
save_plot(filename=file.path(resDir, paste0(formatC(figNumer, width=2, flag="0"),
"-ClusteringPlot.pdf")),
p, ncol=2, nrow=2, base_asp = 1.2)
figNumer <- figNumer + 1
# QC plots
p1 <- FeaturePlot(object=scData, features="nFeature_RNA",
reduction="umap", cols=c("yellow", "red"))
p2 <- FeaturePlot(object=scData, features="nCount_RNA",
reduction="umap", cols=c("yellow", "red"))
p3 <- FeaturePlot(object=scData, features="percent.mito",
reduction="umap", cols=c("yellow", "red"))
p <- plot_grid(p1, p2, p3, ncol=3)
save_plot(filename=file.path(resDir, paste0(formatC(figNumer, width=2, flag="0"),
"-QCPlot_umap.pdf")),
p, ncol=3)
p1 <- FeaturePlot(object=scData, features="nFeature_RNA",
reduction="tsne", cols=c("yellow", "red"))
p2 <- FeaturePlot(object=scData, features="nCount_RNA",
reduction="tsne", cols=c("yellow", "red"))
p3 <- FeaturePlot(object=scData, features="percent.mito",
reduction="tsne", cols=c("yellow", "red"))
p <- plot_grid(p1, p2, p3, ncol=3)
save_plot(filename=file.path(resDir, paste0(formatC(figNumer, width=2, flag="0"),
"-QCPlot_tsne.pdf")),
p, ncol=3)
figNumer <- figNumer + 1
# Plate origin
p1 <- DimPlot(scData, reduction = "umap", group.by="Plate")
p2 <- DimPlot(scData, reduction = "tsne", group.by="Plate")
p <- plot_grid(p1, p2, labels="AUTO", ncol=2)
save_plot(filename=file.path(resDir, paste0(formatC(figNumer, width=2, flag="0"),
"-PlateOrigin.pdf")),
p, ncol=2, base_asp = 1.4)
figNumer <- figNumer + 1
# Gene expression
markers <- c("Mki67", "Top2a", "Ngfr", "Ncam1", "L1cam",
"Mpz", "Mbp", "Ncmap")
p <- FeaturePlot(scData, features=markers, reduction="tsne",
ncol=min(length(markers), 3))
save_plot(filename=file.path(resDir, paste0(formatC(figNumer, width=2, flag="0"),
"-GeneExpression.pdf")),
p, ncol=3, nrow=ceiling(length(markers)/3), base_asp = 1.4)
figNumer <- figNumer + 1
# markers, heatmap
markers <- FindAllMarkers(scData, only.pos=TRUE, return.thresh=0.01)
saveRDS(markers, file.path(resDir, paste0(formatC(figNumer, width=2, flag="0"),
"-pos_markers.rds")))
top10 <- markers %>% group_by(cluster) %>% top_n(10, avg_logFC)
p <- DoHeatmap(scData, group.colors=cellCols[levels(Idents(scData))],
features = top10$gene, raster=FALSE)
save_plot(filename=file.path(resDir, paste0(formatC(figNumer, width=2, flag="0"),
"-pos_markers.pdf")),
p, base_height = 6, base_width = 4)
figNumer <- figNumer + 1
# markers, heatmap with merged mSC
scData <- RenameIdents(scData, "mSC cluster 1"="mSC", "mSC cluster 2"="mSC")
markers <- FindAllMarkers(scData, only.pos=TRUE, return.thresh=0.01)
saveRDS(markers, file.path(resDir, paste0(formatC(figNumer, width=2, flag="0"),
"-pos_markers_mergedmSC.rds")))
top10 <- markers %>% group_by(cluster) %>% top_n(10, avg_logFC)
p <- DoHeatmap(scData, group.colors=cellCols[levels(Idents(scData))],
features = top10$gene, raster=FALSE)
save_plot(filename=file.path(resDir, paste0(formatC(figNumer, width=2, flag="0"),
"-pos_markers_mergedmSC.pdf")),
p, base_height = 6, base_width = 4)
figNumer <- figNumer + 1
# tSNE with merged mSC
p1 <- DimPlot(scData, cols=cellCols, reduction = "umap", label=TRUE) +
theme(legend.position = "none")
p2 <- DimPlot(scData, cols=cellCols, reduction = "tsne", label=TRUE) +
theme(legend.position = "none")
p <- plot_grid(p1, p2, labels="AUTO", ncol=2)
save_plot(filename=file.path(resDir, paste0(formatC(figNumer, width=2, flag="0"),
"-ClusteringPlot_mergedmSC.pdf")),
p, ncol=2, nrow=1, base_asp = 1.2)
|
5a49ebba70fc0d09e23c7c86c3bb30915c864b09
|
f130be9a29145c213abd2d833e203618ecd03ae5
|
/processData.R
|
a773061b282a6a5aa22ad203b5e91c9c6c16402c
|
[] |
no_license
|
rebekahlow-jy/nm3239-data-project-r
|
b4f5cbe62e1b7a427c6bd4919c9c021e795e9ab1
|
01e87a4ef001d4a36f54f59df9bfaaa7c4a81888
|
refs/heads/master
| 2021-05-07T01:46:18.355180
| 2017-11-12T11:10:42
| 2017-11-12T11:10:42
| 110,425,898
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,275
|
r
|
processData.R
|
library(dplyr)
library(tidyverse)
library(rvest)
library(stringr)
url_grants <- "http://www.hdb.gov.sg/cs/infoweb/residential/buying-a-flat/new/first-timer-and-second-timer-couple-applicants"
webpage_grants <- read_html(url_grants)
grant_amount <- html_nodes(webpage_grants, 'table')[2] %>% html_node("tbody") %>% html_nodes("tr") %>% html_nodes("td:last-child") %>% html_text()
grant_amount <- grant_amount[-c(1)]
grant_amount <- str_replace_all(grant_amount, "[$,]", "") %>% as.numeric()
income_level <- html_nodes(webpage_grants, 'table')[2] %>% html_node("tbody") %>% html_nodes("tr") %>% html_nodes("td:first-child") %>% html_text()
income_level <- income_level[-c(1)]
income_level <- str_replace_all(income_level, "[\t\n\r\v\f]", "")
df_grants <- data.frame(income_level=income_level, grant_amount=grant_amount)
df_grants$income_level <- factor(df_grants$income_level, levels = df_grants$income_level)
df_ageSpecificMarriageRate <- data.frame(year=csv_femaleAgeSpecificMarriageRate$year,
value=((csv_femaleAgeSpecificMarriageRate$value+csv_maleAgeSpecificMarriageRate$value)/2),
age_group=csv_femaleAgeSpecificMarriageRate$level_2
)
df_numMarriagesFemaleByAge <- group_by(csv_femaleAgeSpecificMarriageRate, age_group=level_2) %>%
summarise(number=sum(value))
df_numMarriagesMaleByAge <- group_by(csv_maleAgeSpecificMarriageRate, age_group=level_2) %>%
summarise(number=sum(value))
df_numMarriagesByAge <- data.frame(age_group=df_numMarriagesFemaleByAge$age_group,
number=((df_numMarriagesFemaleByAge$number+df_numMarriagesMaleByAge$number)/2)
)
df_flatsConstructedByHDB <- subset(csv_flatsConstructedByHDB, year>=1980)
df_ageSpecificMarriageRate_f <- subset(df_ageSpecificMarriageRate,
(age_group==('25 - 29 Years') | age_group==('30 - 34 Years'))
)
df_numMarriagesByYear_f <- group_by(df_ageSpecificMarriageRate_f, year=year) %>%
summarise(number=sum(value))
df_numDivorces_f <- subset(csv_totalDivorcesByDurationOfMarriage,
(level_2==('5-9 Years'))
)
|
b15da626da8f6406070aad056c2217954e413c6e
|
86014f58758b030b8bc54e88ac5f66078b18d07b
|
/R/GetInfoVisstat.r
|
5bf247cfff891b39fb9fdb1561a17f9414f27504
|
[] |
no_license
|
nielshintzen/visstat-extraction
|
f04e657e302fc37eb1639c548e6ca8131d2af63a
|
7073a8abaffc8f8ade469cea1154f4698e0d8675
|
refs/heads/master
| 2021-01-06T20:38:26.085103
| 2015-06-10T09:42:11
| 2015-06-10T09:42:11
| 37,180,996
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,162
|
r
|
GetInfoVisstat.r
|
GetInfoVisstat <- function(user=user,passwd=passwd)
{
visstat <- dBConnect(which.database="visstat",user=user,passwd=passwd)
#Get CATCHES
qca <- paste("select * FROM CATCHES WHERE rownum < 5")
print(sqlQuery(visstat,qca))
print("CATCHES")
# Get GEAR PROPERTIES
qgp <- paste("select * FROM GEAR_PROPERTIES WHERE rownum < 5")
print(sqlQuery(visstat,qgp))
print("GEAR PROPERTIES")
#Get PLATFORM PROPERTIES
qpp <- paste("select * FROM PLATFORM_PROPERTIES WHERE rownum < 5")
print(sqlQuery(visstat,qpp))
print("PLATFORM PROPERTIES")
#Get REGISTRATIONS
qre <- paste("select * FROM REGISTRATIONS WHERE rownum < 5")
print(sqlQuery(visstat,qre))
print("REGISTRATIONS")
#Get TRIPS
qtr <- paste("select * FROM TRIPS WHERE rownum < 5")
print(sqlQuery(visstat,qtr))
print("TRIPS")
#Get TAXONS
qtx <- paste("select * FROM TAXONS WHERE rownum < 5")
print(sqlQuery(visstat,qtx))
print("TAXONS")
qtp <- paste("select * FROM QUADRANT_PROPERTIES WHERE rownum < 5")
print(sqlQuery(visstat,qtp))
print("TAXONS")
}
|
485ea14760715bc234436b17f11b2327d21dcb11
|
19dfd82cc612e5f40fe680270d24cf90cf547300
|
/man/Rfam.Rd
|
be9343428fa468021b21e82269c79c450f04d649
|
[
"Artistic-2.0"
] |
permissive
|
YuLab-SMU/ggmsa
|
300d049a3470be301788119c4cf261aa1ef06958
|
f1e62e07daa1a97cc209baa909a56af0b398222a
|
refs/heads/master
| 2022-09-10T20:03:47.473418
| 2022-08-22T01:35:30
| 2022-08-22T01:35:30
| 131,572,300
| 160
| 30
| null | 2022-08-03T15:24:05
| 2018-04-30T08:28:53
|
R
|
UTF-8
|
R
| false
| true
| 609
|
rd
|
Rfam.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Rfam}
\alias{Rfam}
\title{Rfam}
\format{
a folder
}
\source{
\url{https://rfam.xfam.org/}
}
\description{
A folder containing seed alignment sequences and
corresponding consensus RNA secondary structure.
}
\details{
\itemize{
\item RF00458.fasta seed alignment sequences of Cripavirus internal
ribosome entry site (IRES)
\item RF03120.fasta seed alignment sequences of Sarbecovirus 5'UTR
\item RF03120_SS.txt consensus RNA secondary structure of
Sarbecovirus 5'UTR
}
}
\keyword{datasets}
|
1521fea05494d7d96235f3b1d8f3b3e644146a83
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/TDCor/R/ssq.delay.R
|
979ffc501a829b24b9b96a66a476234804b3bf45
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 515
|
r
|
ssq.delay.R
|
ssq.delay <-
function(delay,hr,ht,time_l,time_u,time_step,type,deriv)
{
u=seq(time_l,time_u,time_step)
if (deriv)
{
if (delay>=0)
{
k1=hr(u-delay,deriv=1)
k2=ht(u,deriv=1)
}else
{
k1=hr(u,deriv=1)
k2=ht(u+delay,deriv=1)
}
}else
{
if (delay>=0)
{
k1=hr(u-delay)
k2=ht(u)
}else
{
k1=hr(u)
k2=ht(u+delay)
}
}
if (type>0)
{
output=sum((k1-k2)^2)/length(u)
}else
{
if (deriv)
{
output=sum((-k1-k2)^2)/length(u)
}else
{
output=sum((1-k1-k2)^2)/length(u)
}
}
return(output)
}
|
2778982d944ce8f0ef55386f51cd1a5ef87513fb
|
dcb60332de2bf412c6ae0d0028337bd8cfc68f76
|
/misc/get_sn_atac.R
|
f698c2959fcb482fd3f3de5e6f5d58589473e91e
|
[] |
no_license
|
Jeremy37/ot
|
2037a10d38451d4d1321a81fbea3191dbcf0e7b7
|
5d04775f23af51ab8dc1d83c972b94c42138ddc8
|
refs/heads/master
| 2023-04-29T06:32:37.372719
| 2021-05-25T16:15:21
| 2021-05-25T16:15:21
| 112,335,122
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,424
|
r
|
get_sn_atac.R
|
#!/usr/bin/env Rscript
# This script is to get candidate causal ATAC QTL SNPs, which can then be
# used for Sarah's CRISPR assays for ATAC-altering variants
library(tidyverse)
#setwd("/Users/jeremys/work/opentargets/sensoryneurons/GRCh38/ATAC/")
args <- commandArgs(trailingOnly = TRUE)
leadSnpFile = args[1] #rasqual.1k.leadSNPs.fdr0.1.ann.txt
allSnpFile = args[2] #rasqual.1k.pthresh0.01.ppathresh.0.0001.txt
snPeakFile = args[3] #sensoryneuron_consensus_atac_peaks.GRCh38.bed
ipscPeakFile = args[4] #atac_ipsc_peaks.narrowPeak
leadSnpFile = "rasqual.1k.leadSNPs.fdr0.1.ann.txt"
allSnpFile = "rasqual.1k.pthresh0.01.ppathresh.0.0001.txt"
snPeakFile = "sensoryneuron_consensus_atac_peaks.GRCh38.txt"
ipscPeakFile = "atac_ipsc_peaks.narrowPeak"
leadsnp.df = readr::read_tsv(leadSnpFile, col_types="ccciccddddddddiiidddcc")
#allsnp.df = readr::read_tsv(allSnpFile) # Doesn't work... not sure why, "Error in make.names(x) : invalid multibyte string 1"
allsnp.df = read.delim(allSnpFile) %>% dplyr::rename(peak = gene)
snPeaks.df = readr::read_tsv(snPeakFile, col_types="cccii") %>%
dplyr::rename(peak=gene_id, chr=chromosome_name, sn_peak_start=exon_starts, sn_peak_end=exon_ends)
ipscPeaks.df = readr::read_tsv(ipscPeakFile, col_types="ciicicdddi", col_names=c("chr", "start", "end", "name", "score", "unk", "fc", "log10pval", "log10qval", "summitpos"))
ipscPeaks.df$chr = gsub("^chr", "", ipscPeaks.df$chr)
snp.df = leadsnp.df %>% dplyr::select(peak, FDR, geneid, symbol, pvalue) %>%
dplyr::rename(leadSnpPval = pvalue) %>%
dplyr::left_join(allsnp.df)
# Identify which iPSC peaks have at least one candidate causal SNP in our list,
# and then subset our SNP table to include all SNPs from those peaks where there
# was an overlap with an iPSC peak
candidate.snp.df = snp.df %>% dplyr::filter(PPA > 0.25)
candidate.snp.gr = GRanges(seqnames=candidate.snp.df$chr,
ranges=IRanges(start=candidate.snp.df$pos, end=candidate.snp.df$pos),
strand = NA,
candidate.snp.df)
ipsc.peaks.gr = GRanges(seqnames=ipscPeaks.df$chr, IRanges(start=ipscPeaks.df$start, ipscPeaks.df$end), strand=NA,
ipscPeaks.df %>% dplyr::select(-chr, -start, -end))
overlaps = as.data.frame(findOverlaps(candidate.snp.gr, ipsc.peaks.gr)) %>% dplyr::filter(!duplicated(queryHits))
peak.overlaps.df = data.frame(sn_peak = candidate.snp.df[overlaps$queryHits,]$peak,
ipsc_peak = ipscPeaks.df[overlaps$subjectHits,]$name)
#overlapping.snp.df = snp.df %>% dplyr::filter(peak %in% peak.overlaps) %>%
# dplyr::arrange(leadSnpPval)
overlapping.snp.df = peak.overlaps.df %>%
dplyr::left_join(ipscPeaks.df %>% dplyr::select(ipsc_peak=name, ipsc_peak_start=start, ipsc_peak_end=end)) %>%
dplyr::left_join(snPeaks.df %>% dplyr::select(sn_peak=peak, sn_peak_start, sn_peak_end)) %>%
dplyr::left_join(snp.df, by=c("sn_peak" = "peak")) %>%
dplyr::arrange(leadSnpPval, -PPA) %>%
dplyr::filter(FDR < 0.01, !duplicated(snpid)) %>%
dplyr::select(sn_peak, ipsc_peak, chr, ipsc_peak_start, ipsc_peak_end, ipsc_peak_end, sn_peak_start, sn_peak_end,
geneid, symbol, leadSnpPval, FDR, snpid, chr, pos, af, imputation_quality, effect_size, fsnps, rsnps, fsnp_genotype_corr, rsnp_genotype_corr, pvalue, PPA)
readr::write_tsv(overlapping.snp.df, "sensoryneurons.caqtls.in_ipsc_peaks.txt", col_names = T)
|
edce2581e399546ff4d016da29e5f1dda0961bec
|
f745c898aab20fa173bd75eb959e982b6e99ef0b
|
/betfair/Functions/twitter_graphs.R
|
c19adb6f8ec286b66028f833f643ed8499072cda
|
[] |
no_license
|
NimaHRaja/WorldCup2018
|
3a95fca0aac5cdf615e3ccf6f3af86764d235186
|
93f50d4cc2e982c29985be279fdaf8c8f0229643
|
refs/heads/master
| 2020-03-19T02:11:27.323390
| 2018-07-02T20:27:19
| 2018-07-02T20:27:19
| 135,607,539
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 656
|
r
|
twitter_graphs.R
|
#to be documented
# jpeg("betfair/outputs/twitter_BetfairWinnerTotalMatched_20180606.jpg", width = 800, height = 600)
ggplot(subset(all_data_odds, marketName == "Winner 2018"),
aes(x = as.POSIXct(time), y = totalMatched)) +
geom_point(colour = "blue") +
# ylim(c(0, max(subset(all_data_odds, marketName == "Winner 2018")$totalMatched)*1.1)) +
xlab("time") +
ylab("Total Matched") +
ggtitle("Betfair / Worldcup 2018 / Winner / Total Matched") +
scale_y_continuous(label = unit_format(unit = "£"),
limits = c(0, max(subset(all_data_odds, marketName == "Winner 2018")$totalMatched)*1.1))
# dev.off()
|
506015115f1e3783d52f524a33a5d7a4bc436325
|
afd52451e8845963de4ad1243005834fa0958beb
|
/data_handling.R
|
aa4e6039a7c282c4451589fdc6897540a25e7e72
|
[] |
no_license
|
plus4u/R
|
7c0d867767ae948b24a15322df11b500abcfd920
|
c8c25313567bd8bcf5142a04187a24e0d5ad12d1
|
refs/heads/master
| 2021-09-19T13:52:40.115595
| 2021-08-11T06:47:22
| 2021-08-11T06:47:22
| 155,179,952
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,689
|
r
|
data_handling.R
|
## data handling
rm(list = ls())
# vector
x1 <- c(1,2,3)
x <- 1:9
x
x1 <- c(1: 9)
n1 <- rep(10,3)
n1 <- rep(1:3, 3)
n1
x1.n1 <- data.frame(x1, n1)
x1.n1
d1 <- data.frame(A=c(1,2),B=c(3,4),C=c(5,6),D=c(7,7),E=c(8,8),F=c(9,9))
d1
subset(d1, select=c("A", "B"))
# Generate a random number : ?runif / ?sample / ?.Random.seed
x1 <- runif(10, 5.0, 7.5)
x3 <- sample(1:10, 1)
x4 <- sample(1:10, 5, replace=T)
sample(state.name, 10) # generate a random subset of any vector
# find dataset, package, variables : get a listing of pre-loaded data sets
data()
# Gives a list of attached packages (see library), and R objects, usually data.frames.
searchpaths()
attach() # detach()
search()
attach(iris)
head(Sepal.Width)
with( ) # within( )
ls() # all objects
rm(list = ls()) # delete all
search()
data(package="psy")
# Existing local data
mydata <- read.csv("filename.txt")
mydata <- read.table("filename.txt", sep="\t", header=TRUE)
# function
rnorm_fixed = function(n, mu=0, sigma=1) {
x = rnorm(n) # from standard normal distribution
x = sigma * x / sd(x) # scale to desired SD
x = x - mean(x) + mu # center around desired mean
return(x)
}
x = rnorm(n=20, mean=5, sd=10)
mean(x) # is e.g. 6.813...
sd(x) # is e.g. 10.222...
x = rnorm_fixed(n=20, mean=5, sd=10)
mean(x) # is 5
sd(x) # is 10
rnorm2 <- function(n,mean,sd) { mean+sd*scale(rnorm(n)) }
r <- rnorm2(100,4,1)
mean(r) ## 4
sd(r) ## 1
runif() # By default, its range is from 0 to 1.
runif(3, min=0, max=100)
rnorm(4)
#> [1] -2.3308287 -0.9073857 -0.7638332 -0.2193786
# Use a different mean and standard deviation
rnorm(4, mean=50, sd=10)
#> [1] 59.20927 40.12440 44.58840 41.97056
# To check that the distribution looks right, make a histogram of the numbers
x <- rnorm(400, mean=50, sd=10)
hist(x)
# data select # extract
d <- data.frame(Name = c("A", "B", "C", "D", "E"),Amount = c(150, 120, 175, 160, 120))
str(d)
subset(d, Amount==120)
d[Amount=150]
d[which.min(d$Amount), ]
# Name Amount
# 2 B 120
d[which(d$Amount == min(d$Amount)), ]
# Name Amount
# 2 B 120
# 5 E 120
# subset function
newdata <- subset(mydata, age >= 20 | age < 10,select=c(ID, Weight))
# package handling
d1 <- installed.packages()
str(d1)
d1 <- data.frame(d1)
str(d1)
attach(d1)
d2 <- d1[order(Package),]
head(d2)
sort(d1$Package)
head(d1)
install.packages("QuantPsyc")
library(lm.beta)
lm.beta(x1)
# matrix multiply
rep(bias, each=2)
# counting : Combining the length() and which() commands gives a handy method of counting elements that meet particular criteria.
b <- c(7, 2, 4, 3, -1, -2, 3, 3, 6, 8, 12, 7, 3)
b
# Let’s count the 3s in the vector b.
count3 <- length(which(b == 3))
count3 # 4
# In fact, you can count the number of elements that satisfy almost any given condition.
length(which(b < 7)) # 9
# Here is an alternative approach, also using the length() command, but also using square brackets for sub-setting:
length(b[ b < 7 ]) # 9
##
x <- c(1:10)
x[2] # 2, n 번째 값 출력
x[-10] # n 번째 값 제외하고 출력
append(x, 999, after=3) # vector에 새 요소(999) 4번째 자리에 새로 추가 / # after=0 은 맨 앞 자리
x1 <- 1:5
x2 <- 3:8
setdiff(x1, x2) # 1,2 vec1에 있는데 vec2에는 없는 요소만 출력
intersect(x1, x2) # 교집합
names(x) <- c("k", "l", "m") # 각 column 이름 지정
length(x) # 벡터의 전체 길이
sum(x) # 벡터의 합
NROW(x)# 배열 요소 수
nrow(x) # 행 수 (행렬만 가능)
3 %in% x # x 에 a가 있는지 검색(True/False)
x <- seq(1, 10, 2)
x <- seq(1, 10, 3)
x
x <- rep(1:5, 3)
x <- rep(1:5, each=2)
# matrix : variable data type
x <- c(1:10)
x <- matrix(x, 2, 5)
x
x <- matrix(c(1,2,3,4), nrow=2)
x <- matrix(c(1,2,3,4), nrow=2, byrow=T)
x[a,b]
colnames(x) <- c("A", "B")
x
t(x) # 전치행렬
solve(x) # 역행렬
x1 <- matrix(1:4, nrow = 2)
x2 <- matrix(1:4, nrow = 2, byrow = T)
x2
x1 * x2 # 행렬 각 요소끼리 곱
x1 %*% x2 # matmul
x[2,, drop = FALSE] # 행렬 로 뽑아내기
x
rbind(x1, c(99,100)) # 행 추가
cbind(x1, c(11,12)) # 열 추가
# list :
x <- list("a", "b", "c", "d")
### data.frame : vector, matrix, list
v1 <- 1:3
v2 <- c("a","b","c")
m1 <- matrix(1:4, nrow = 2)
x <- data.frame(v1, v2) # , m1)
x
x$column1 = 9 # 해당 column의 data 조회 # 데이터프레임[행번호] 와 같은 결과
x
a <- data.frame(col1 = factor(2), col2 = numeric(3))
a
a <- edit(a) # edit() 으로 R 데이터 편집기 실행
x <- read.csv("C:/~~경로/filename.csv", header = TRUE)
summary()
x <- edit(x)
x
mean(x$column1, na.rm=T) # na.rm = TRUE
x
attach(x) #
subset(x, v1<2)
x[c(2, 3, 1), 1:2 ]
ncol(x)
nrow(x)
names(x)
rownames(x)
#
c <- stack(list(v1=v1, v2=v2),x,drop = T)
c
#
l <- list(c)
l
l[["test"]] <- 1:3 # NULL
l[[3]] <- 2:5
l
getwd()
# renaming
# get column names
colnames(my_data)
# Rename column where names is "Sepal.Length"
names(my_data)[names(my_data) == "Sepal.Length"] <- "sepal_length"
names(my_data)[names(my_data) == "Sepal.Width"] <- "sepal_width"
my_data
##
library(dplyr)
df1 %>%
select(A, B, E)
select(df1, A, B, E)
##
df <- data.frame( c( 183, 85, 40), c( 175, 76, 35), c( 178, 79, 38 ))
names(df) <- c("Height", "Weight", "Age")
# All Rows and All Columns
df[,]
# First row and all columns
df[1,]
# First two rows and all columns
df[1:2,]
# First and third row and all columns
df[ c(1,3), ]
# First Row and 2nd and third column
df[1, 2:3]
# First, Second Row and Second and Third COlumn
df[1:2, 2:3]
# Just First Column with All rows
df[, 1]
# First and Third Column with All rows
df[,c(1,3)]
|
286b180efbc143efe8997dad0c2fad4fccc750a6
|
180be150463963373eadd36bbaa735061dc7f0b1
|
/man/visual2pw.Rd
|
e575c4bc283f56970c298cd6f581d948bcac76fc
|
[
"AFL-3.0"
] |
permissive
|
fskeo/GESTIA
|
4cf7b19d969acdf2078ca15b1dc8d5498987e5d1
|
d25976609fb9b5fc1eedb26cbc342744a41f8cea
|
refs/heads/master
| 2023-01-20T23:43:49.384722
| 2020-08-19T02:38:51
| 2020-08-19T02:38:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 839
|
rd
|
visual2pw.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/source_GESTIA.R
\name{visual2pw}
\alias{visual2pw}
\title{Visualize two pathways' components}
\usage{
visual2pw(ig.all, genesa, genesb, colorset = c("yellowgreen", "tomato",
"gold"), isolated = FALSE, ly = c("kk", "fr", "random", "grid"))
}
\arguments{
\item{ig.all}{The global network}
\item{genesa}{The gene symbols of pathway A}
\item{genesb}{The gene symbols of pathway B}
\item{colorset}{The color set for the shared genes, genes of pathway A, and the genes of pathway B. Default yellowgreen, tomato, gold}
\item{isolated}{Whether to plot the isolated genes. Default FALSE}
\item{ly}{Layout of the igraph. Available options: kk, fr, random, grid}
}
\value{
Plot directly.
}
\description{
Visualization of two pathways.
}
|
b32aede55098c6d5418fa90d62f4af62b5fba3a1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/m2r/examples/factor_poly.Rd.R
|
8f5dc15f8b1af5466284b88183e0bbddfda717e3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 957
|
r
|
factor_poly.Rd.R
|
library(m2r)
### Name: factor_poly
### Title: Factor a polynomial
### Aliases: factor_poly factor_poly.
### ** Examples
## Not run:
##D requires Macaulay2 be installed and an interactive session
##D
##D ##### basic usage
##D ########################################
##D
##D ring("x", "y", coefring = "QQ")
##D factor_poly("x^4 - y^4")
##D
##D # reference function
##D factor_poly.("x^4 - y^4")
##D
##D
##D ##### different inputs
##D ########################################
##D
##D # factor_poly accepts mpoly objects:
##D (p <- mp("x^4 - y^4"))
##D factor_poly.(p)
##D factor_poly(p)
##D mp("(x-y) (x+y) (x^2+y^2)")
##D
##D
##D
##D ##### other examples
##D ########################################
##D
##D ring("x","y", "z", coefring = "QQ")
##D (p <- mp("(x^2 - y) (x^2 + y) (x + y)^2 (x - z)^2"))
##D factor_poly.(p)
##D factor_poly(p)
##D
##D (p <- mp("(x-1)^3 (y-1)^3"))
##D factor_poly.(p)
##D factor_poly(p)
##D
## End(Not run)
|
e902bd3cc3e85be310a8bab7828cf03f44565b78
|
b051db434b8ec8e30ec4264181ba6bf86b539ce9
|
/man/validMassSpectraObject.Rd
|
38055a53fb138a247ace054ac22be28a95ed05d9
|
[] |
no_license
|
lorenzgerber/tofsims
|
6484b58a532385bcbc906fe2921190d2221cc77f
|
cf0791d3324638d604dea5a111729b507f5b2192
|
refs/heads/master
| 2021-06-25T23:27:27.148518
| 2020-10-15T06:32:08
| 2020-10-15T06:32:08
| 73,091,887
| 1
| 1
| null | 2016-11-07T15:24:28
| 2016-11-07T15:24:28
| null |
UTF-8
|
R
| false
| true
| 430
|
rd
|
validMassSpectraObject.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClassDefinition.R
\name{validMassSpectraObject}
\alias{validMassSpectraObject}
\title{Validation method function for class MassImage objects}
\usage{
validMassSpectraObject(object)
}
\arguments{
\item{object}{object of class MassSpectra}
}
\value{
boolean class validity test
}
\description{
Validation method function for class MassImage objects
}
|
b50fa61429a8c4a7d5ef03486807787c54e87004
|
98a79efaca28ba04ba44c8a18c03b0549d1549c7
|
/R/makeVAR.R
|
5cb6bf20d8627fd3e00ab0f1c1eb255a0ed3c901
|
[] |
no_license
|
joshoberman/backtest
|
81b2e5e14c6dfb300a7a54aacc5c9468362e2ca1
|
0c9f64977a98a0239800cd73c9d3619cf1559082
|
refs/heads/master
| 2021-01-01T18:46:00.947093
| 2017-07-26T14:27:40
| 2017-07-26T14:27:40
| 98,430,911
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,555
|
r
|
makeVAR.R
|
##Function to make VAR model with a given number of predictors and an n-Ahead projection
makeVARandPredict<-function(x,nAhead=24){
#Subset to remove month counter
x.noMnthCnt<-subset(x,select=-c(monthCounter))
x.noMnthCnt.ts<-ts(x.noMnthCnt,frequency=12)
#year on year difference
x.noMnthCnt.ts.diff<-diff(x.noMnthCnt.ts,lag=12)
#make a model w/ up to lag 6 coeffs
model<-VAR(x.noMnthCnt.ts.diff,p=6)
#get summary model statistics
summMod<-summary(model)
rsq<-summMod$varresult[[1]]$r.squared
adjusted_rsq<-summMod$varresult[[1]]$adj.r.squared
aic<-AIC(model)
#make forecasts of differenced values then de-difference
preds<-predict(model,n.ahead=nAhead)
fcstVals<-preds$fcst[[1]][,1]
fcstVals<-round(fcstVals)
predicted<-integer(length(fcstVals))
lastN<-tail(x.noMnthCnt.ts[,1],12)
for(j in 1:length(fcstVals)){
#for the first twelve forecast values, add to previous years values
if(j<=12){
p<-fcstVals[j]
prev<-lastN[j]
predicted[j]<-prev+p
}
else{
# for the rest, add to the 12 values from previous forecasted year
p<-fcstVals[j]
prev<-predicted[j-12]
predicted[j]<-prev+p
}
}
# generate monthCounters moving forward
timePointsForward<-seq(max(x$monthCounter)+1,max(x$monthCounter)+nAhead)
diagnostics<-list(rsq=rsq,adjusted_rsq=adjusted_rsq,AIC=aic)
#gather values in to output
all<-list(model_name="VAR",diagnostics=diagnostics,predicted_values_unweighted = predicted, predicted_monthCounters = timePointsForward)
all
}
|
642505f608ca7881114bdd0ac293fe7f2a7afe48
|
77157987168fc6a0827df2ecdd55104813be77b1
|
/palm/inst/testfiles/pbc_distances/libFuzzer_pbc_distances/pbc_distances_valgrind_files/1612987992-test.R
|
bdd725ae50fc325842d635f7fc39158479d34b91
|
[] |
no_license
|
akhikolla/updatedatatype-list2
|
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
|
a3a519440e02d89640c75207c73c1456cf86487d
|
refs/heads/master
| 2023-03-21T13:17:13.762823
| 2021-03-20T15:46:49
| 2021-03-20T15:46:49
| 349,766,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 589
|
r
|
1612987992-test.R
|
testlist <- list(lims = structure(c(2.37636445786895e-212, -Inf, 2.37636445786895e-212, 0), .Dim = c(2L, 2L)), points = structure(c(-Inf, NaN, -Inf, 3.50129302979654e-312, 5.82833713021365e+303, 0, 6.00939702786469e-307, Inf, 7.2911220195564e-304, 1.08650122118086e-310, 4.94065645841247e-324, 7.06327380456815e-304, NA, 2.00604628389942e-314, 4.94065645841247e-324, 1.37980654311726e-309, 4.94065645841247e-324, 7.29095846630662e-304, 1.3906711615669e-309, 5.64366668815242e+67, 9.39103090723215e+57 ), .Dim = c(7L, 3L)))
result <- do.call(palm:::pbc_distances,testlist)
str(result)
|
5607dc662fde26152b16d32ed772a9f13390e44c
|
311ae82a6efaf9c9cac8d1b517ba992815c88128
|
/Production/DSM/pH/digital_soil_mapping/model_fitting/variogram/d1_residual_variogram.R
|
7559bc5833b29aeba4ce8ea47b64fceb69ba7f19
|
[
"CC0-1.0"
] |
permissive
|
AusSoilsDSM/SLGA
|
8c77f0ad24a49e05f00c8a71b452214e401d6a3f
|
41d8e2c009c1595c87bdd805a8ba6c4a3f45cbd1
|
refs/heads/main
| 2023-03-18T13:18:33.073555
| 2023-03-07T23:54:51
| 2023-03-07T23:54:51
| 555,090,777
| 7
| 2
| null | 2023-03-07T21:55:39
| 2022-10-20T23:51:23
|
R
|
UTF-8
|
R
| false
| false
| 3,210
|
r
|
d1_residual_variogram.R
|
### TERN LANDSCAPES
# Soil pH model model fitting
# Author: Brendan Malone
# Email: brendan.malone@csiro.au
# created: 18.5.21
# modified: 18.5.21
# CODE PURPOSE
# # Apply model fits to all available data [excluding external data.
# need to estimate model residuals
# fixed parameters
vart<- "pH"
depth<- "d1"
# root directory
data.root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/data/curated_all/"
dists.root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/data/field_2_4a1_dists/"
params.out<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/data/ranger_model_hyperparams/"
model.out<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/models/"
funcs.out<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/rcode/miscell/"
slurm.root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/rcode/digital_soil_mapping/model_fitting/slurm/"
r.code<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/rcode/digital_soil_mapping/model_fitting/"
# libraries
library(caret);library(ranger);library(raster);library(rgdal);library(sp);library(gstat);library(automap);library(matrixStats);library(ithir)
source(paste0(funcs.out,"goof.R"))
#data
# site data
site.dat<- readRDS(paste0(data.root,"tern_soilpH4a1_siteDat_covariates_CALVALDAT_SimulationAverages.rds"))
### place where the models are situate
models<- list.files(path = paste0(model.out, depth), pattern = ".rds",full.names = TRUE)
models
empt.mat<- matrix(NA, nrow=nrow(site.dat),ncol=50)
for (i in 1:length(models)){
pred.mod<- readRDS(models[i])
# predict on calibration data
ranger.pred_c<-predict(pred.mod, site.dat)
goof(observed = site.dat$targetAVG, predicted = ranger.pred_c, plot.it = F)
empt.mat[,i]<- ranger.pred_c}
predAVG<- rowMeans(empt.mat)
site.dat$predAVG<- predAVG
names(site.dat)
site.dat$predResidual<- site.dat$targetAVG - site.dat$predAVG
site.dat<- site.dat[,c(1:10,51,52,11:50)]
hist(site.dat$predResidual)
saveRDS(object = site.dat, file = paste0(data.root,"tern_soilpH4a1_siteDat_covariates_CALVALDAT_SimulationResiduals_d1.rds"))
# fit variogram
site.dat<- as.data.frame(site.dat)
names(site.dat)[4:5]<- c("x", "y")
coordinates(site.dat)<- ~ x + y
# set coordinate reference system
crs(site.dat)<- "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
# transform coordinates to projected
site.dat<- spTransform(site.dat,CRSobj = "+proj=lcc +lat_1=-18 +lat_2=-36 +lat_0=0 +lon_0=134 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs")
site.dat<- as.data.frame(site.dat)
names(site.dat)
coordinates(site.dat)<- ~ x + y
vgm1<- variogram(predResidual~1, data = site.dat,width= 1000, cutoff=1500000)
afit<- autofitVariogram(predResidual~1, site.dat)
plot(afit)
afit
#afit # variogram parameters
plot(vgm1, model=afit$var_model)
### save the variogram point data and the variogram model
saveRDS(site.dat, paste0(data.root,"variogram_dat/4a1/tern_soilpH4a1_siteDat_covariates_CALVALDAT_SimulationResiduals_d1_ARD.rds"))
saveRDS(afit, paste0(model.out,"variogram_models/4a1/residuals_variogram_4a1_d1.rds"))
|
2bd0f0b473e2452cd857690fdaf36a49e6f86215
|
390b8ecc5591fe3562c5af26f5cd444442d07616
|
/R/interpolateData.R
|
6802ba3e7d19131135ea151e2e93844dc7fb33c4
|
[] |
no_license
|
selu220/weatheR
|
d86e05c5e17579d0d601145c4bc9c08e35c03527
|
18a0fd66ef17e907959cc99643b8edb0771c0f1e
|
refs/heads/master
| 2021-01-18T09:28:00.072649
| 2016-02-18T08:48:51
| 2016-02-18T08:48:51
| 51,706,012
| 0
| 0
| null | 2016-02-14T17:58:53
| 2016-02-14T17:58:53
| null |
UTF-8
|
R
| false
| false
| 3,975
|
r
|
interpolateData.R
|
#' Interpolate Weather Station Data
#'
#' This function takes in a list object of one or more data frames and will return
#' a data frame with hourly observations, with missing observations linearly interpolated
#'
#' @param station_data List object of weather data
#' @param type Type of data structure to return the weather data. "df" or "list"
#' @return This will return a list object with two elements: 1) The percentage number and percentage of interpolated values for each weather station; 2) A list of dataframes for each station.
#' @examples
#' \dontrun{
#' data(stations)
#' }
#' @export
interpolateData <- function(wx.list, type="df")
{
st.data <- wx.list$station_data
clean.list <- lapply(st.data, function(x) {
aggregate(cbind(LAT, LONG, ELEV, TEMP, DEW.POINT) ~
city + USAFID + distance + rank + YR + M + D + HR, data=x, mean)
})
# Create a column with the full posix date for each hour
for (i in 1:length(clean.list))
{
clean.list[[i]]$dates <- as.POSIXct(paste(paste(clean.list[[i]]$YR,"-",clean.list[[i]]$M,
"-",clean.list[[i]]$D, " ",clean.list[[i]]$HR,sep=""),
":",0,":",0,sep=""),"%Y-%m-%d %H:%M:%S", tz="UTC")
}
# Create a list of dataframes of each hour
hourly.list <- list()
for (i in 1:length(clean.list))
{
hourly.list[[i]] <- data.frame(hours=seq(
from=as.POSIXct(paste(min(clean.list[[i]]$YR),"-1-1 0:00", sep=""), tz="UTC"),
to=as.POSIXct(paste(max(clean.list[[i]]$YR),"-12-31 23:00", sep=""), tz="UTC"),
by="hour"))
}
missing <- matrix(nrow=length(clean.list), ncol=2,
dimnames=list(names(clean.list),c("num_interpolated", "pct_interpolated")))
for(i in 1:length(clean.list))
{
ms.num <- (nrow(hourly.list[[i]]) - nrow(clean.list[[i]]))
ms.pct <- (nrow(hourly.list[[i]]) - nrow(clean.list[[i]]))/nrow(hourly.list[[i]])
missing[i,] <- c(ms.num, ms.pct)
}
missing <- as.data.frame(missing)
print(missing)
out.list <- list()
for (i in 1:length(clean.list))
{
temp.df <- merge(hourly.list[[i]], clean.list[[i]], by.x="hours", by.y="dates", all.x=TRUE)
temp.df$city <- unique(na.omit(temp.df$city))[1]
temp.df$USAFID <- unique(na.omit(temp.df$USAFID))[1]
temp.df$distance <- unique(na.omit(temp.df$distance))[1]
temp.df$rank <- unique(na.omit(temp.df$rank))[1]
temp.df$LAT <- unique(na.omit(temp.df$LAT))[1]
temp.df$LONG <- unique(na.omit(temp.df$LONG))[1]
temp.df$ELEV <- unique(na.omit(temp.df$ELEV))[1]
temp.df$YR <- as.numeric(format(temp.df$hours,"%Y"))
temp.df$M <- as.numeric(format(temp.df$hours,"%m"))
temp.df$D <- as.numeric(format(temp.df$hours,"%d"))
temp.df$HR <- as.numeric(format(temp.df$hours,"%H"))
# Interpolation
temp.int <- approx(x=temp.df$hours, y=temp.df$TEMP, xout=temp.df$hours)
temp.df$TEMP <- temp.int$y
dew.int <- approx(x = temp.df$hours, y = temp.df$DEW.POINT, xout = temp.df$hours)
temp.df$DEW.POINT <- dew.int$y
df.name <- unique(paste0(temp.df$city,"_",temp.df$USAFID))
temp.list <- list(temp.df)
names(temp.list) <- df.name
# Merge the dataframes together
out.list <- c(out.list, temp.list)
}
# This will return one large dataframe in the 'station_list' list item in the output
if(type=="df")
{
oneDF <- suppressWarnings(Reduce(function(...) rbind(...), out.list))
final <- list(dl_status=wx.list$dl_status, removed_rows=wx.list$removed_rows,
station_names_final=wx.list$station_names_final, interpolated=missing,
station_data=oneDF)
}
# this will return a list of dataframes in the 'station_list' list item in the output
if(type=="list")
{
final <- list(dl_status=wx.list$dl_status, removed_rows=wx.list$removed_rows,
station_names_final=wx.list$station_names_final, interpolated=missing,
station_data=out.list)
}
return(final)
}
|
b380f2755c7e78e003ffa196e998ae27eba84934
|
88ab69d4cd76460be75494157528f0a4c829b27e
|
/R/Permafrost/Mapping/PermafrostCompara/CalDemKappa.R
|
dcba9acc8d70db08d68f0f682c85f87d22906195
|
[] |
no_license
|
smallwave/Noah-Tibet
|
92706f1932de81f9c6e0e5a057c0ab542353a0a0
|
8c2469608368375673075aa806e06eeeae1b379a
|
refs/heads/master
| 2020-12-24T15:23:31.060111
| 2016-10-09T02:24:59
| 2016-10-09T02:24:59
| 42,633,145
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,560
|
r
|
CalDemKappa.R
|
##########################################################################################################
# NAME
#
# PURPOSE
#
#
# PROGRAMMER(S)
# wuxb
# REVISION HISTORY
# 20160921 -- Initial version created and posted online
#
# REFERENCES
##########################################################################################################
library(sp)
library(plyr) # need for dataset ozone
library(raster)
library(rgdal) # for spTransform
library(psych) # kappa
inputCvs <- "F:/worktemp/Permafrost(Change)/Work/Res/CVS/PermafrostMapCompara.csv"
rasterFileMap <- "F:/worktemp/Permafrost(Change)/Data/QTPMap/Tif/2005map.tif" #1
rasterFileDem <- "E:/workspace/Write Paper/SoilTextureProduce/Data/DataSource/DEM/dem_tibet.tif"
#use over
# read Data
permafrostMap <- read.csv(inputCvs, head=TRUE,sep=",",check.names=FALSE)
#use over
# read Data
permafrostMapCor <-permafrostMap
coordinates(permafrostMapCor) <- c("x","y")
rasterData <- raster(rasterFileMap)
demExtract <- extract(rasterData,permafrostMapCor,sp =TRUE)
demExtractDf <-demExtract@data
subDemExtractDf<-demExtractDf[c("ID","X2005map")] #2
comparaPermafrostMap <- merge(permafrostMap,subDemExtractDf,by.x="ID",by.y="ID")
rasterData <- raster(rasterFileDem)
demExtract <- extract(rasterData,permafrostMapCor,sp =TRUE)
demExtractDf <-demExtract@data
subDemExtractDf<-demExtractDf[c("ID","dem_tibet")]
comparaPermafrostMapDem <- merge(comparaPermafrostMap,subDemExtractDf,by.x="ID",by.y="ID")
# processs
resOA <- NULL
demCls <-seq(from = 2000 , to = 6000 ,by = 100)
lenDemCls<-length(demCls) -1
for (i in 1:lenDemCls)
{
resYOA<-rep(c(0), 3)
first <-demCls[i]
end <-demCls[i+1]
resYOA[1]<- end
dfSel <- subset(comparaPermafrostMapDem, dem_tibet > first & dem_tibet < end)
dfSel <- na.omit(dfSel)
resYOA[2] <- nrow(dfSel)
if(nrow(dfSel) < 1)
{
resYOA[3] = 0
}
else
{
dfSelCom <- subset(dfSel, X2005 == X2005map) #3
resYOA[3] <- nrow(dfSelCom)/nrow(dfSel)
}
resOA<-rbind(resOA,resYOA)
}
#************************************************************************************************
# 5 write
#************************************************************************************************
## Write res
outPutName <- paste("F:/worktemp/Permafrost(Change)/Work/Res/CVS/PermafrostMapComparaDem.csv",sep='')
write.csv(resOA, file = outPutName)
|
ec915835005c6545313c0ef6a996136147f671a9
|
16d916a7a309a224fb038f294ac2926abfe5a23a
|
/R/rotavirus_functions.r
|
842a99ce0790f4ea5e1f5473cf4fa163f661a136
|
[] |
no_license
|
chrishedw/rotavirus
|
ed6fc72b8e7ab2d0472e554bff2c8fe39a6aeca8
|
ec72f2ecc1c60d06808bde08568d005ebea28482
|
refs/heads/master
| 2020-05-20T08:35:44.901117
| 2015-03-26T12:44:33
| 2015-03-26T12:44:33
| 32,926,524
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 409
|
r
|
rotavirus_functions.r
|
adds.one <- function(x){
if(!hasArg(x)){
stop("missing argument")
}
if(is.na(x) | is.null(x) | !is.numeric(x)){
stop("argument not numeric")
}
return(x+1)
}
calculate_NMB <- function(benefits,costs){
if(!hasArg(benefits) | !hasArg(costs)){
stop("missing argument")
}
retval <- benefits-costs
return(retval)
}
main <- function(){
print("I am running the main function now")
}
|
329cba1d10d75617996e6d5c900397a85d4ebc1b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/generalCorr/examples/bootSign.Rd.R
|
93af86faee81475af3c98e30b19c0e032c49ff34
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 980
|
r
|
bootSign.Rd.R
|
library(generalCorr)
### Name: bootSign
### Title: Probability of unambiguously correct (+ or -) sign from
### bootPairs output
### Aliases: bootSign
### Keywords: bootstrap, comparisons kernel meboot, pairwise regression,
### ** Examples
## Not run:
##D options(np.messages = FALSE)
##D set.seed(34);x=sample(1:10);y=sample(2:11)
##D bb=bootPairs(cbind(x,y),n999=29)
##D bootSign(bb,tau=0.476) #gives success rate in n999 bootstrap sum computations
##D
##D bb=bootPairs(airquality,n999=999);options(np.messages=FALSE)
##D bootSign(bb,tau=0.476)#signs for n999 bootstrap sum computations
##D
##D data('EuroCrime');options(np.messages=FALSE)
##D attach(EuroCrime)
##D bb=bootPairs(cbind(crim,off),n999=29) #col.1= crim causes off
##D #hence positive signs are more intuitively meaningful.
##D #note that n999=29 is too small for real problems, chosen for quickness here.
##D bootSign(bb,tau=0.476)#gives success rate in n999 bootstrap sum computations
## End(Not run)
|
16a7c22fa1bf4b22ee41c2a8cae31d2f71ca532b
|
7a9a8fb85481a80124bb1004eb3f4cfb46cdbede
|
/program12.R
|
1a6415871c42d6c11fa55a558d24b10af6065ac0
|
[] |
no_license
|
xinyizhao123/Predicting-Future-Ambient-Ozone
|
6459a9eef144bbf68416522f1987cf60f87af6bd
|
1b682e4fcc16f443b4d3d8c9216cb5f823ac2986
|
refs/heads/master
| 2020-05-25T14:58:23.133324
| 2016-10-06T00:52:51
| 2016-10-06T00:52:51
| 69,671,822
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,231
|
r
|
program12.R
|
setwd("C:/Users/Hitomi/Dropbox/Ozone project/data")
st <- read.csv("fulldata.csv", stringsAsFactors = FALSE)
st <- st[-c(1)]
st$date <- as.Date(as.character(st$date), format("%Y-%m-%d"))
simu$date <- as.Date(as.character(simu$date), format("%m/%d/%Y"))
str(st)
str(simu)
st <- st[order(st$siteID, st$date),]
simu <- simu[order(simu$siteID, simu$date),]
pro <- merge(st, simu, by=c("siteID", "date"), all=TRUE)
pro$year <- format(pro$date, "%Y")
pro$month <- format(pro$date, "%m")
table(pro$year, pro$month, pro$siteID)
write.csv(pro, "projectdata_12months.csv")
st <- pro
st <- subset(st, st$month != "01")
st <- subset(st, st$month != "02")
st <- subset(st, st$month != "11")
st <- subset(st, st$month != "12")
unique(st$month)
write.csv(st, "projectdata_mar2oct.csv")
table(st$year, st$month, st$siteID)
#### correlation between observed data and simulated data ###
abc <- st
ro1 <- 0
ro2 <- 0
tmp.id=unique(st$siteID)
for (iid in tmp.id){
ro1[iid] <- cor(abc[which(st$siteID==iid),"VOC"],st[which(st$siteID==iid),"VOCS_s"], use="pairwise.complete.obs")
ro2[iid] <- cor(abc[which(st$siteID==iid),"NOx"],st[which(st$siteID==iid),"NOx_s"], use="pairwise.complete.obs")
}
# VOC
ro1
# NOx
ro2
library(ggplot2)
st <- subset(st, st$VOC != "NA")
st <- subset(st, st$NOx != "NA")
st <- subset(st, st$year < 2006)
st0 <- subset(st, st$VOC<500)
ggplot(st0, aes(x=date, y=VOC)) + geom_point(aes(colour = factor(siteID))) + stat_smooth(aes(colour = factor(siteID))) +
ggtitle("Observed VOC with time (remove outliers)") + theme(axis.title = element_text(size = 15.5)) +
theme(plot.title = element_text(size = 19)) + theme(axis.text = element_text(size = 13))
ggplot(st, aes(x=date, y=VOCS_s)) + geom_point(aes(colour = factor(siteID))) + stat_smooth(aes(colour = factor(siteID))) +
ggtitle("Simulated VOC with time") + theme(axis.title = element_text(size = 15.5)) +
theme(plot.title = element_text(size = 19)) + theme(axis.text = element_text(size = 13))
st00 <- subset(st, st$NOx<120)
ggplot(st00, aes(x=date, y=NOx)) + geom_point(aes(colour = factor(siteID))) + stat_smooth(aes(colour = factor(siteID))) +
ggtitle("Observed NOx with time (remove outliers)") + theme(axis.title = element_text(size = 15.5)) +
theme(plot.title = element_text(size = 19)) + theme(axis.text = element_text(size = 13))
ggplot(st, aes(x=date, y=NOx_s)) + geom_point(aes(colour = factor(siteID))) + stat_smooth(aes(colour = factor(siteID))) +
ggtitle("Simulated NOx with time") + theme(axis.title = element_text(size = 15.5)) +
theme(plot.title = element_text(size = 19)) + theme(axis.text = element_text(size = 13))
st <- read.csv("projectdata_mar2oct.csv", stringsAsFactors = FALSE)
st <- st[-c(1)]
st <- subset(st, st$siteID =="6-71-1004")
#st <- subset(st, st$year < 2004)
st1 <- st
st1 <- subset(st1, st1$VOC != "NA")
st1 <- subset(st1, st1$NOx != "NA")
mean(st$ozone, na.rm = TRUE)
mean(st1$ozone, na.rm = TRUE)
hist(st$ozone)
hist(st1$ozone)
LL <- subset(LatLon, LatLon$LAT < 42 & LatLon$LON < -76)
m1 <- subset(LL, LL$LAT < 34.2 & LL$LAT > 33.9
& LL$LON < -117.3 & LL$LON > -117.9)
|
e0a419e5140eee20c7d4383fbb2e370d309304b3
|
56f9a8a7475ea8e81c88e7b4ddaffc75ff82d940
|
/plot.R
|
94392e09d170d6f87c03d3cd61f2be5905dd36b9
|
[
"MIT"
] |
permissive
|
OtakuSenpai/trace
|
5d8c04295c0a85b9865a0a36af7f6db9abfb097b
|
ca10fd16bfbcf840f0a9041e1aa4b5fe859b7668
|
refs/heads/master
| 2020-12-03T03:44:42.831222
| 2017-04-15T20:38:02
| 2017-04-15T20:38:02
| 95,768,677
| 1
| 0
| null | 2017-06-29T10:56:27
| 2017-06-29T10:56:27
| null |
UTF-8
|
R
| false
| false
| 870
|
r
|
plot.R
|
png(filename='chrome.png',width=728,height=400)
trace <- read.table('trace.tsv',header=T)
time <- trace$ms/1000
cpu <- trace$cpu_load_perc
ram <- trace$res_mem_kb/2^10
reads <- trace$read_b/2^20
writes <- trace$write_b/2^20
colors <- c('#0099cc','#9933cc','#669900','#ff8800')
plot.new()
title('Chrome cold-start')
par(mar=c(5,5,5,5))
box()
xlim <- c(0,100)
mtext("Time (s)",side=1,line=3)
# RAM, reads, writes
plot.window(xlim,ylim=c(0,max(ram,reads,writes)))
lines(time,ram,col=colors[1])
lines(time,reads,col=colors[2])
lines(time,writes,col=colors[3])
mtext("MB",side=2,line=3)
axis(2)
# CPU
plot.window(xlim,ylim=c(0,100))
lines(time,cpu,col=colors[4])
mtext("CPU load (%)",side=4,line=3)
axis(4)
axis(1)
# legend
legend('topleft',
legend=c("RAM (MB)","Reads (MB)","Writes (MB)","CPU (%)"),
col=colors,lty=c('solid','solid','solid','solid'))
|
a782def1f1ad78117787e5842cdc347c29d076f5
|
392b2626516030de72268f9c6f2a622cc08601ce
|
/man/Replace_ex.Rd
|
8b5fe3e12cee3751fa8c97326a0019435c433b6d
|
[] |
no_license
|
minghao2016/do
|
c047e262d3a87df5b82283e6599f50855f2bf917
|
f38962795f860c464fe99c8872f6e56632140d92
|
refs/heads/master
| 2022-04-05T06:37:35.108769
| 2019-12-16T23:43:40
| 2019-12-16T23:43:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 768
|
rd
|
Replace_ex.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Replace_ex.R
\name{Replace_ex}
\alias{Replace_ex}
\title{Replace Exactly}
\usage{
Replace_ex(x, from, to, pattern)
}
\arguments{
\item{x}{vector, dataframe or matrix}
\item{from}{replaced stings}
\item{to}{replacements}
\item{pattern}{a special pattern, see examples for detail}
}
\value{
replaced data
}
\description{
Replace Exactly
}
\examples{
a=c(1,2,3,1,4)
Replace_ex(x = a,from = c(1,2),to=5)
Replace_ex(x=a,pattern = c('1:5','2:5'))
Replace_ex(x=a,pattern = '[12]:5')
a=data.frame(v=c(1,2,3,2,4),
b=c(7,8,9,4,6))
Replace_ex(x = a,from = c(1,2),to=5)
Replace_ex(x=a,pattern = c('1:5','2:5'))
Replace_ex(x=a,pattern = '[12]:5')
}
|
bd0d0be471b54a3bbf7785961456e1b839d6983a
|
5c979309940cbb6458deac553ef3620d02e55f36
|
/man/double.Rd
|
48a66cc629676928a8945723598a2bfbcd8e07bc
|
[
"MIT"
] |
permissive
|
evanamiesgalonski/speciesdemo
|
ca65af7d2d4dbf9f32375aa69bb8950d0690249d
|
f49ede58c960c9b7d59b51956ef179e430ba1aef
|
refs/heads/master
| 2022-04-26T10:12:37.080098
| 2020-04-18T21:06:24
| 2020-04-18T21:06:24
| 255,143,434
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 288
|
rd
|
double.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/demo.R
\name{double}
\alias{double}
\title{double}
\usage{
double(num)
}
\arguments{
\item{num}{The number to be doubles}
}
\value{
The value for 'num', doubled
}
\description{
double
}
\examples{
double(2)
}
|
dccb96622827efe7371c27edb3efa51864b9786b
|
ce4a1573d7aeec032138536684918865ff765e85
|
/古松/server.R
|
bfd512a7123de612ec3bd7425f698d34d97ab3cf
|
[] |
no_license
|
wenjing14bjtu/railwayIdx
|
6f4d810a674483898ca60fd015bd6f84dab03bd7
|
ecfd82928cac2f1565c78e9506c3938c7e17bb73
|
refs/heads/master
| 2021-01-17T20:37:45.253728
| 2016-05-20T09:40:17
| 2016-05-20T09:40:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,552
|
r
|
server.R
|
shinyServer(function(input, output) {
#------------------------------
#货运量预测
#------------------------------
require(ggplot2)
require(DT)
require(e1071)
require(randomForest)
df<-read.csv("freight.csv",head=T)
df$tm<-as.Date.POSIXct(df$tm,"%Y-%m-%d",tz=Sys.timezone(location = TRUE)) #转化为日期型数据
#-------------olsRegModel为多元回归模型
olsRegModel<-lm(freight~iron+coal,data=df)
df$linearRegPred<-as.integer(predict(olsRegModel,newdata=df))
#-------rfRegModel是随机森林得到的回归模型,后面用predict直接调用此模型即可,因数量少,不运行交叉验证
rfRegModel<-randomForest(freight~iron+coal,data=df,importance=T, ntree=100,type="regression") #randFrstReg函数在randomForest.r文件中
df$frRegPred<-as.integer(predict(rfRegModel,df)) #<-----------随机森林的预测数据已经在这里计算得到
#-------svmRegModel是支持向量机得到的回归模型,后面也可以直接调用
svmRegModel<-svm(freight~iron+coal,data=df,type="eps-regression",cross=dim(df)[1]/2)
#svm 内含交叉验证,所以不需要再运行交叉验证
df$svmRegPred<-as.integer(predict(svmRegModel,df)) #<-----------支持向量机的预测数据已经在这里计算得到
len<-length(df$tm)
#plotCurve是画曲线的通过用函数,为了减少后面的代码量
plotCurve<-function(db,xdata,ydata)
{
len=dim(xdata)[1]
plt<-ggplot(db,x=c(xdata[1],xdata[len]),aes(x=xdata,y=ydata),color="red")
return(plt)
}
#---------------------------多元回归画线
output$linearplot <- renderPlot( {
if(input$year_start> input$year_end) {
if (input$stat_data) {
p<-plotCurve(df,df$tm,df$freight)
}
else
{
p<-plotCurve(df,df$tm,df$linearRegPred)
}
}
else{
dfsub<-subset(df,substr(df$tm,1,4)>=input$year_start)
dfsub<-subset(dfsub,substr(dfsub$tm,1,4)<=input$year_end)
if (input$stat_data) {
p<-plotCurve(dfsub,dfsub$tm,dfsub$freight)
}
else
{
p<-plotCurve(dfsub,dfsub$tm,dfsub$linearRegPred)
}
}
if(input$predict_data){
p<-p+geom_line(aes(x=tm,y=linearRegPred),color="blue",size=0.8)#+geom_ribbon(aes(ymin=bound[,2],ymax=bound[,3]),alpha=0.2)
#+stat_smooth(method=lm,color='black',level=0.95)
}
if (input$stat_data) {
p<-p+geom_point(aes(x=tm,y=freight),color="red",size=3,shape=21)
}
p+ylab("货运量(万吨)")+xlab("时间")+geom_point(shape=21,color='red',fill='cornsilk',size=3)
})
#----------------------------------------------------
#----------------------------------------------------
#多元回归预测计算
output$freight_output<-renderText({
x1<-as.numeric(input$iron_input)
x2<-as.numeric(input$coal_input)
iron<-c(x1)
coal<-c(x2)
tm<-c(2016)
freight<-c(0)
inputdata<-data.frame(tm,freight,iron,coal)
pred<-as.integer(predict(olsRegModel,inputdata))
paste("多元回归预测:",pred )
}
)
#-------------------------------------------------
#随机森林回归预测计算
output$freight_FRR<-renderText({
x1<-as.numeric(input$iron_input)
x2<-as.numeric(input$coal_input)
iron<-c(x1)
coal<-c(x2)
tm<-c(2016)
freight<-c(0)
inputdata<-data.frame(tm,freight,iron,coal)
railCarriage<-predict(rfRegModel,inputdata) #rfRegModel随机森林在最初已经计算得到
paste("随机森林回归预测:",as.integer(railCarriage[1]) )
}
)
#----------------------------------
#支持向量机回归预测计算
output$freight_zhi<-renderText({
x1<-as.numeric(input$iron_input)
x2<-as.numeric(input$coal_input)
iron<-c(x1)
coal<-c(x2)
tm<-c(2016)
freight<-c(0)
inputdata<-data.frame(tm,freight,iron,coal)
pred<-as.integer(predict(svmRegModel,inputdata))
paste("支持向量机预测:",pred)
}
)
#-------------------------------------
#-----------随机森林Tabset画线
output$rfplot <- renderPlot( {
if(input$year_start> input$year_end) {
if (input$stat_data) {
p<-plotCurve(df,df$tm,df$freight)
}
else
{
p<-plotCurve(df,df$tm,df$frRegPred)
}
}
else{
dfsub<-subset(df,substr(df$tm,1,4)>=input$year_start)
dfsub<-subset(dfsub,substr(dfsub$tm,1,4)<=input$year_end)
if (input$stat_data) {
p<-plotCurve(dfsub,dfsub$tm,dfsub$freight)
}
else
{
p<-plotCurve(dfsub,dfsub$tm,dfsub$frRegPred)
}
}
if(input$predict_data){
p<-p+geom_line(aes(x=tm,y=frRegPred),color="blue",size=0.8,show.legend = T)#+stat_smooth(method=rfRegModel,color='black',level=0.95)
}
if (input$stat_data) {
p<-p+geom_point(aes(x=tm,y=freight),color="red",size=3,shape=21)
}
p+ylab("货运量(万吨)")+xlab("时间")+geom_point(shape=21,color='red',fill='cornsilk',size=3)
})
#----------------------------支持向量机Tabset画线
output$svmplot <- renderPlot( {
if(input$year_start> input$year_end) {
if (input$stat_data) {
p<-plotCurve(df,df$tm,df$carriage)
}
else
{
p<-plotCurve(df,df$tm,df$svmRegPred)
}
}
else{
dfsub<-subset(df,substr(df$tm,1,4)>=input$year_start)
dfsub<-subset(dfsub,substr(dfsub$tm,1,4)<=input$year_end)
if (input$stat_data) {
p<-plotCurve(dfsub,dfsub$tm,dfsub$freight)
}
else
{
p<-plotCurve(dfsub,dfsub$tm,dfsub$svmRegPred)
}
}
if(input$predict_data){
p<-p+geom_line(aes(x=tm,y=svmRegPred),color="blue",size=0.8)#+stat_smooth(method=svmRegModel ,color='black',level=0.95)
}
if (input$stat_data) {
p<-p+geom_point(aes(x=tm,y=freight),color="red",size=3,shape=21)
}
p+ylab("货运量(万吨)")+xlab("时间")+geom_point(shape=21,color='red',fill='cornsilk',size=3)
})
#--------------------------------------
#----------------------datatable显示数据
#-----------------在df中,又增加了3列数据,存放预测结果,
output$table<-DT::renderDataTable(
DT::datatable(
data<-df,
colnames = c('日期', '货运量(万吨)','成品钢材产量(万吨)','原煤产量(万吨)','多元回归预测(万吨)','随机森林回归预测(万吨)','支持向量机回归预测(万吨)'),
rownames = TRUE)
)
}
)
|
faf908887c30fb4b6020110b6e996e20e1530450
|
d6ebbd011285a838e3ac2bfd7efc1070c7bc2d7f
|
/shinyApps/ui.R
|
6b4c69522a594962f2e18e84d0785ad46c706201
|
[] |
no_license
|
jpzhangvincent/ucdrocksgg
|
e85a9c3006e58186ecb803f6cbbb62515ce47556
|
7c252823b1e912a3d6f0f13ecf9ae3950d28059f
|
refs/heads/master
| 2021-01-10T09:22:55.609439
| 2015-10-24T23:08:29
| 2015-10-24T23:08:40
| 44,872,822
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,219
|
r
|
ui.R
|
# ui.R for green gov shinyApp
library(shiny)
library(shinydashboard)
library(rCharts)
library(graphics)
dashboardPage(
dashboardHeader(),
dashboardSidebar(
absolutePanel(
class = "main-sidebar",
strong(h1("Version 1.1"))
)
),
dashboardBody(
fluidRow({
column(
width = 12,
h1("Welcome to PROTOTYPE Green Gov Information Dashboard!", align = "center"),
h4("Here, you will find information about XXX", align = "center")
)
}), #this row is the title
fluidRow({
tabBox(
width = 12,
{tabPanel(
title = strong("Welcome"),
img(src='logo.jpg', width=500, align = "right"),
h4("Insert introduction here"),
HTML("<font size='4'>Contact: <br>Member1 <br>Member2 <br>Member3 <br>Member4<br></font>")
)}, # this is the welcome tab.
{tabPanel(
title = strong("CO2e"),
fluidRow(
tabBox(
width = 12,
tabPanel(
title = strong("Emission by type"),
column(
width=3,
uiOutput("DepartmentSelector"),
helpText("You can select a department for CO2 emission."),
actionButton("DepartmentButton","See Visualization!")
),
column(
width=9,
showOutput("CO2Plot1", "nvd3")
)
)
)
)
)}, # this is the CO2e tab.
{tabPanel(
title = strong("State Buildings Energy Use"),
fluidRow(
tabBox(
width = 12,
tabPanel(
title = strong("Tab 1.1"),
column(
width=3,
uiOutput("buildingBasicUnitSelector"),
helpText("You can select an individual building or an entire department."),
actionButton("buildingBasicUnitButton","See Visualization!")
),
column(
width=9,
showOutput("buildingPlot1", "nvd3")
)
)
)
)
)}, # this is the building tab.
{tabPanel(
title = strong("Fleet Program"),
fluidRow(
tabBox(
width = 12,
tabPanel(
title = strong("Tab 1.1"),
column(
width=3
),
column(
width=9
)
)
)
)
)}, # this is the fleet tab.
{tabPanel(
title = strong("Waste Management"),
fluidRow(
tabBox(
width = 12,
tabPanel(
title = strong("Tab 1.1"),
column(
width=3
),
column(
width=9
)
)
)
)
)} # this is the waste management tab.
)
}) # this divides the entire visualization top level parallel tabs
)
)
|
6d49f89ec551d84dfa56fcc7c81f4c8f3eb03f0c
|
93039300d230662ecfc357ea73417467f27d97eb
|
/EPDr-HowToExportToWordPress.R
|
cb6d2f87c643d47e82d5792222cfe5730eb336a4
|
[] |
no_license
|
dinilu/EPD-workflow
|
a480a2578d50192a8edff0a6dc2bc668a51be666
|
0260924b5d0d694baf63b9261f975f9f44d88e38
|
refs/heads/master
| 2020-04-06T23:15:07.745126
| 2017-01-27T14:23:36
| 2017-01-27T14:23:36
| 50,992,481
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 915
|
r
|
EPDr-HowToExportToWordPress.R
|
# Install RWordpress
#install_github(c("duncantl/XMLRPC", "duncantl/RWordPress"))
library(RWordPress)
# Set login parameters (replace admin,password and blog_url!)
options(WordpressLogin = c(dnietolugilde = 'g6yX:#!x'), WordpressURL = 'https://dnietolugilde.wordpress.com/xmlrpc.php')
# Include toc (comment out if not needed)
library(markdown)
options(markdown.HTML.options=c(markdownHTMLOptions(default=T), "toc"))
# Upload plots: set knitr options
library(knitr)
opts_knit$set(base.url="https://dl.dropboxusercontent.com/u/33940356/wordpress/epd-postgresql/", base.dir="")
, base.dir="D://Diego/Dropbox/Public/wordpress/EPD-PostgreSQL/")
# Post new entry to the wordpress blog and store the post id
knit2wp('vignettes/EPD-PostgreSQL.Rmd', title = 'Setting a PostgreSQL server for the European Pollen Database (EPD)', categories=c("EPDr"), mt_keywords=c("EPD", "PostgreSQL"), shortcode=TRUE, publish=TRUE)
|
7640d8dbfa9830b83660932e40e9bd4314fa63d8
|
0b7b0502560aa79d813224c62929095539427a89
|
/R/on_load.R
|
1edb35b5803a411ed92b2d9770dc4aa0f2839df3
|
[
"MIT"
] |
permissive
|
kant/rerddap
|
c1f890eca53719502d9eb8d94dbdcd4f03747b92
|
42329b752415b52eb6c219036724a4f139c0fd52
|
refs/heads/master
| 2020-12-02T20:17:56.251512
| 2019-12-31T01:11:53
| 2019-12-31T01:11:53
| 231,108,519
| 0
| 0
|
NOASSERTION
| 2019-12-31T15:12:39
| 2019-12-31T15:12:38
| null |
UTF-8
|
R
| false
| false
| 153
|
r
|
on_load.R
|
rrcache <- NULL # nocov start
.onLoad <- function(libname, pkgname) {
x <- hoardr::hoard()
x$cache_path_set('rerddap')
rrcache <<- x
} # nocov end
|
0a90fca5cf548528d0ad091d06d11e8b530002b5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/wpp2010/examples/e0.Rd.R
|
ba8eb1c01485a66612fb447af8994c155a1f326d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 277
|
r
|
e0.Rd.R
|
library(wpp2010)
### Name: e0
### Title: United Nations Time Series of Life Expectancy
### Aliases: e0 e0_supplemental e0F e0Fproj e0M e0Mproj e0F_supplemental
### e0M_supplemental
### Keywords: datasets
### ** Examples
data(e0M)
head(e0M)
data(e0Fproj)
str(e0Fproj)
|
56b526fa7e4bbc102e6ccbbc912629744de2dd36
|
35fdff8e2f540586e264f9ec8931b73957848192
|
/conifers_empirical_analysis/run_scripts/conifer_run_analyses.R
|
6b58d1a8bcd0d077ba3078cba1961359cad82120
|
[] |
no_license
|
hoehna/CoMET
|
a33fbb76eb81b04a128fd74fc6bf679e7524fd46
|
1e701bdfd7c598812d67a252936eeb4019eec224
|
refs/heads/master
| 2021-01-10T10:06:30.037115
| 2016-03-03T15:23:40
| 2016-03-03T15:23:40
| 51,861,998
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,535
|
r
|
conifer_run_analyses.R
|
# Data and priors used for the analysis
extinction_rate <- c('empirical')
diversification_shift_rate <- c(0.1,log(2),2,5)
mass_extinction_rate <- c(0.1,log(2),2,5)
runs <- 1:4
# Change this to the number of processors mclapply will use
num_cores <- 4
# Change this to the directory containing the dryad package
dir <- '~/repos/mass-extinction-simulation/dryad_package/conifers_empirical_analysis'
# Load the data
tree <- read.nexus(paste(dir,'/data/conifer_40my.tre',sep=''))
rho <- (tree$Nnode+1)/630
# This grid contrains all combinations of data and prior settings
grid <- expand.grid(runs=runs,
extinction_rate=extinction_rate,
diversification_shift_rate=diversification_shift_rate,
mass_extinction_rate=mass_extinction_rate)
grid$ID <- 1:nrow(grid)
# For each combination of prior settings, analyze the confier data
invisible(mclapply(1:nrow(grid),function(x){
row <- grid[x,]
S <- row[[3]]
M <- row[[4]]
R <- row[[1]]
this_dir <- paste(dir,'/output/shiftrate_',S,'/merate_',M,'/run_',R,sep='')
dir.create(this_dir,showWarnings=FALSE,recursive=TRUE)
tess.analysis(tree,initialSpeciationRate=1,initialExtinctionRate=0.5,
empiricalHyperPriors=TRUE,samplingProbability=rho,
numExpectedRateChanges = S,pMassExtinctionPriorShape1 = 2.5,
pMassExtinctionPriorShape2 = 7.5,numExpectedMassExtinctions = M,
MAX_ITERATIONS = 2e6,MIN_ESS = 500,dir = this_dir)
},mc.cores=num_cores))
|
414d3ba80cf4589a19973cbd46ad8383e58a5c85
|
1bda17508c8734f8aa1132d4f8f80b91ba332ef7
|
/R/0.preparation/old/1.create_raw/create_rawdata_population_kebeles.R
|
970bf3aca011c1394de60cc7f2bd22d11434cace
|
[] |
no_license
|
araupontones/ETH_IC
|
b08b12bee201905859292e53de36ced454bd5dba
|
681c6d7ae98926a111ed1b887974f0741519e2d4
|
refs/heads/main
| 2023-08-03T11:59:55.612161
| 2021-09-27T06:42:58
| 2021-09-27T06:42:58
| 329,865,799
| 0
| 0
| null | 2021-09-27T06:42:59
| 2021-01-15T09:25:24
|
R
|
UTF-8
|
R
| false
| false
| 2,682
|
r
|
create_rawdata_population_kebeles.R
|
#to read pdf files in R
library(tabulizer)
source("set_up.R")
dir_kebeles_download = file.path(dir_data_reference_downloads, "Kebeles_population")
#lisf of all pdf files
#list.files(dir_kebeles_download)
#extract_areas(file.path(dir_data_reference_downloads, "Kebeles_population", "Oromiya.pdf"), 985)
#tabulizer::locate_areas(file.path(dir_data_reference_downloads, "Kebeles_population", "Oromiya.pdf"), 985)
#define pages where the tables are located in each file
#Note: Dire Wara and Harari have a different format (we'd need to extract them separately)
pages_with_tables = list(#Addis_Ababa= c(172,173),
#Affar = c(333:338) ,
#Amhara = c(313:321),
#Benishangu_Gumuz =c(284:290) ,
#Dire_Dawa = c(136,136), #check thisone has a diff format
#Gambella = c(252:255),
#Harari = c(137, 137) , #also different
#National = ,
Oromiya = c(985:1074),
SNNPR = c(373:380)
#Somali = ,
#Tigray
)
#read pdfs and create tables at the region level
list_of_tables = map(list.files(dir_kebeles_download), function(region){
#get the region name
region_name = str_remove(region, ".pdf")
#print(region_name)
#skip file with these names
if(region_name %in% c("SNNPR", "Oromiya")){
print(region_name)
#define path to connect import pdf
pdf_raw = file.path(dir_kebeles_download, region)
#define pages where pdfs have the tables
pages = pages_with_tables[[region_name]]
print(pages)
#read table
table_list = extract_tables(pdf_raw,
pages = pages,
#area = list(c(100.05882, 14.98522, 741.23327, 558.12116)),
area = list(c(94.23915, 23.77084, 807.16390, 569.53711)),
#output = "data.frame",
method = "decide"
)
#Append all tables in one dataframe
table_df = do.call(rbind, table_list) %>%
as.tibble() %>%
mutate(Region = region_name)
#return table in
return(table_df)
}
})
#append tables and export to raw data
raw_data = do.call(rbind, list_of_tables)
names(raw_data)<- c("Kebele", "Population", "Male", "Female", "Number of households", "Number of household units", "Region")
export(raw_data, file.path(dir_data_reference_raw, "Kebele_population_SNNPR_Oromiya_raw.rds"))
|
e7fba641048c5ddfa8e45497431d6bdba1c91f4c
|
2d7dd1f3ab97fc89538dfab09b8b0acc1187c472
|
/R/data.R
|
6e53e9a8ace0ea3bf31341789aa16997c8a460b8
|
[
"MIT"
] |
permissive
|
inbo/inborutils
|
642e36bdbd514ce2b3b937bdcb387531859d93d0
|
fd1174a95770144024ad2e2e8938f40f5e542b2d
|
refs/heads/main
| 2023-05-23T07:18:40.570529
| 2023-03-24T15:46:48
| 2023-03-24T15:46:48
| 69,332,829
| 9
| 7
|
MIT
| 2023-03-24T15:46:50
| 2016-09-27T07:54:51
|
R
|
UTF-8
|
R
| false
| false
| 1,795
|
r
|
data.R
|
#' Example `data.frame` with species name column
#'
#' A dataset containing 3 taxa to be matched with GBIF Taxonomy Backbone. The
#' variable are as follows:
#'
#' @format A data frame with 3 rows and 3 variables
#' \itemize{
#' \item {`speciesName`: name of the species}
#' \item {`kingdom`: kingdom to which the species belongs}
#' \item {`euConcernStatus`: level of concern according to EU directives}
#' }
#'
#' @docType data
#' @keywords datasets
#' @family datasets
#' @name species_example
#' @usage species_example
NULL
#' Example `data.frame` with coordinates
#'
#' A dataset containing 52 coordinates as latitude and longitude
#'
#' @format A `data.frame` with 52 rows and 3 variables:
#' \itemize{
#' \item{`id`: resource identifier}
#' \item{`latitude`: Latitude of the coordinates}
#' \item{`longitude`: Longitude of the coordinates}
#' }
#'
#' @docType data
#' @keywords datasets
#' @family datasets
#' @name coordinate_example
#' @usage coordinate_example
NULL
#' Example `data.frame` with `KNMI` downloaded data
#'
#' A dataset containing the rainfall from January 1st till February 1st for
#' `Vlissingen` and `Westdorpe`, as downloaded from `KNMI`
#'
#' @format A `data.frame` with 1536 rows and 9 variables:
#' \itemize{
#' \item{`value`: measured value}
#' \item{`datetime`: `datetime` of the measurement}
#' \item{`unit`: unit (mm)}
#' \item{`variable_name`: precipitation}
#' \item{`longitude`: coordinate}
#' \item{`latitude`: coordinate}
#' \item{`location_name`: station name}
#' \item{`source_filename`: filename from which the data was read}
#' \item{`quality_code`: empty string as `KNMI` does not provide this}
#' }
#'
#' @docType data
#' @keywords datasets
#' @family datasets
#' @name rain_knmi_2012
#' @usage rain_knmi_2012
NULL
|
7dd428c528ab42634730d095ff1cba07a2da3769
|
394b0b27a68e590165d0dfb9243e7b2d5deaf4d5
|
/R/turnTaking.R
|
fc09942cea98e4b8c7d278ace098decd7879f206
|
[
"MIT"
] |
permissive
|
NastashaVelasco1987/zoomGroupStats
|
5b414b28e794eecbb9227d4b1cd81d46b00576e4
|
8f4975f36b5250a72e5075173caa875e8f9f368d
|
refs/heads/main
| 2023-05-05T18:23:17.777533
| 2021-05-24T16:08:23
| 2021-05-24T16:08:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,610
|
r
|
turnTaking.R
|
#' Simple conversational turn-taking analysis
#'
#' Generate a very basic analysis of the conversational turntaking in
#' either a Zoom transcript or a Zoom chat file.
#'
#' @param inputData data.frame output from either processZoomChat or processZoomTranscript
#' @param inputType string of either 'chat' or 'transcript'
#' @param meetingId string giving the name of the meeting identifier
#' @param speakerId string giving the name of the variable with the identity of the speaker
#'
#' @return list of four data.frames giving different levels of analysis for turn taking:
#' \itemize{
#' \item rawTurn - This data.frame gives a dataset with a
#' lagged column so that you could calculate custom metrics
#' \item aggTurnsDyad - This gives a dyad-level dataset so that
#' you know whose speech patterns came before whose
#' \item aggTurnsSpeaker - This gives a speaker-level dataset
#' with metrics that you could use to assess each given
#' person's influence on the conversation
#' \item aggTurnsSpeaker_noself - This is a replication of
#' the aggTurnsSpeaker dataset, but it excludes turns where
#' a speaker self-follows (i.e., Speaker A => Speaker A)
#' }
#' @export
#'
#' @examples
#' turn.out = turnTaking(inputData=sample_transcript_processed,
#' inputType='transcript', meetingId='batchMeetingId',
#' speakerId='userName')
#'
#' turn.out = turnTaking(inputData=sample_chat_processed,
#' inputType='chat', meetingId='batchMeetingId',
#' speakerId='userName')
#'
turnTaking = function(inputData, inputType, meetingId, speakerId) {
turnGap<-sd<-speakerCurrent<-speakerBefore<-numTurns<-turnGap_x<-NULL
## This should be done on a meeting-by-meeting basis. Will do in a crude brute force way for now
uniqueMeets = unique(inputData[,meetingId])
if(length(uniqueMeets) == 1) pbMin=0 else pbMin=1
pb = utils::txtProgressBar(min=pbMin, max=length(uniqueMeets), style=3)
for(m in 1:length(uniqueMeets)) {
utils::setTxtProgressBar(pb, m)
meetData = inputData[inputData[,meetingId] == uniqueMeets[m], ]
# Get the names of the unique speakers in this file
uniqueSpeakers = sort(unique(meetData[,speakerId]))
#Create lagged variables
meetData$speakerCurrent = meetData[,speakerId]
if(inputType == "transcript") {
meetData = meetData[order(meetData$utteranceEndSeconds), ]
meetData[, c("speakerBefore", "priorUtteranceEndSeconds")] = dplyr::lag(meetData[, c("speakerCurrent", "utteranceEndSeconds")])
meetData$turnGap = meetData$utteranceStartSeconds - meetData$priorUtteranceEndSeconds
} else if(inputType == "chat") {
meetData = meetData[order(meetData$messageTime), ]
meetData[, c("speakerBefore", "priorMessageTime")] = dplyr::lag(meetData[, c("speakerCurrent", "messageTime")])
meetData$turnGap = as.numeric(difftime(meetData$messageTime, meetData$priorMessageTime, units="secs"))
}
turnDyd = meetData[,c("speakerCurrent", "speakerBefore", "turnGap")]
turnDyd.dt = data.table::data.table(turnDyd)
turnDyd.agg = data.frame(turnDyd.dt[, list(numTurns = .N, turnGap_x = mean(turnGap, na.rm=T), turnGap_sd = sd(turnGap, na.rm=T)), by=list(speakerCurrent, speakerBefore)])
# Add zeros for pairs that didn't occur
for(b in uniqueSpeakers) {
for(c in uniqueSpeakers) {
if(nrow(turnDyd.agg[turnDyd.agg$speakerBefore == b & turnDyd.agg$speakerCurrent == c, ]) == 0) {
turnDyd.agg[nrow(turnDyd.agg)+1, ] = c(c, b, 0, NA, NA)
}
}
}
turnDyd.agg[,3:5] = lapply(turnDyd.agg[,3:5], as.numeric)
######## Create an individual level dataset focused for now on influence ########
turnDyd.dt2 = data.table::data.table(turnDyd.agg)
turnDyd.agg2 = data.frame(turnDyd.dt2[!is.na(speakerBefore), list(turnsAfterSpeaker = sum(numTurns), turnGapAfterSpeaker_x = mean(turnGap_x, na.rm=T), turnGapAfterSpeaker_sd = sd(turnGap_x, na.rm=T)), list(speakerBefore)])
totalTurns = sum(turnDyd.agg[!is.na(turnDyd.agg$speakerBefore), "numTurns"])
turnDyd.agg2$turnsAfterSpeaker_pct = turnDyd.agg2$turnsAfterSpeaker/totalTurns
# Do a version of this that excludes the self references
turnDyd.agg_noself = data.frame(turnDyd.dt2[!is.na(speakerBefore) & (speakerCurrent != speakerBefore), list(turnsAfterSpeaker = sum(numTurns), turnGapAfterSpeaker_x = mean(turnGap_x, na.rm=T), turnGapAfterSpeaker_sd = sd(turnGap_x, na.rm=T)), list(speakerBefore)])
totalTurns_noself = sum(turnDyd.agg[!is.na(turnDyd.agg$speakerBefore) & (turnDyd.agg$speakerCurrent != turnDyd.agg$speakerBefore), "numTurns"])
turnDyd.agg_noself$turnsAfterSpeaker_pct = turnDyd.agg_noself$turnsAfterSpeaker/totalTurns_noself
if(nrow(turnDyd) > 0) {
turnDyd[,meetingId] = uniqueMeets[m]
}
if(nrow(turnDyd.agg) > 0){
turnDyd.agg[,meetingId] = uniqueMeets[m]
}
if(nrow(turnDyd.agg2) > 0){
turnDyd.agg2[,meetingId] = uniqueMeets[m]
}
if(nrow(turnDyd.agg_noself) > 0){
turnDyd.agg_noself[,meetingId] = uniqueMeets[m]
}
if(m == 1) {
res1 = turnDyd
res2 = turnDyd.agg
res3 = turnDyd.agg2
res4 = turnDyd.agg_noself
} else {
res1 = rbind(res1, turnDyd)
res2 = rbind(res2, turnDyd.agg)
res3 = rbind(res3, turnDyd.agg2)
res4 = rbind(res4, turnDyd.agg_noself)
}
}
close(pb)
## output a few things
o.list = list("rawTurns" = res1, "aggTurnsDyad" = res2, "aggTurnsSpeaker" = res3, "aggTurnsSpeaker_noself" = res4)
return(o.list)
}
|
a3e6a46367190609b9cfc66d2075b99a00e001c8
|
aa1da12305bb9a442f1c1fa4389b92d601afdb50
|
/man/strat_metrics.Rd
|
eb87e9b29b1a29ad0c05fe59aff40c6173b0ccc0
|
[
"MIT"
] |
permissive
|
rubenvalpue/sgsR
|
845814721d1e9834d589cf7ac4bf8d5430c554cd
|
20edde477d2287214467ae27b1649371dabdd471
|
refs/heads/main
| 2023-04-17T06:21:42.364584
| 2021-04-26T17:44:16
| 2021-04-26T17:44:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,491
|
rd
|
strat_metrics.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/strat_metrics.R
\name{strat_metrics}
\alias{strat_metrics}
\title{Stratify metric raster using metric quantiles.}
\usage{
strat_metrics(
mraster,
metric = NULL,
metric2 = NULL,
nstrata,
nstrata2 = NULL,
plot = FALSE,
samp = 1,
details = FALSE
)
}
\arguments{
\item{mraster}{spatRaster. ALS metrics raster.}
\item{metric}{Character. Name of primary metric to stratify. If
\code{mraster} is has 1 layer it is taken as default.}
\item{metric2}{Character. Name of secondary metric to stratify.}
\item{nstrata}{Character. Number of desired strata.}
\item{nstrata2}{Numeric. Number of secondary strata within \code{nstrata}.}
\item{plot}{Logical. Plots output strata raster and visualized
strata with boundary dividers.}
\item{samp}{Numeric. For plotting - Determines proportion of cells
for strata visualization. Lower values reduce processing time.}
\item{details}{Logical. If \code{FALSE} (default) output is only
stratification raster. If \code{TRUE} return a list
where \code{$details} is additional stratification information and
\code{$raster} is the output stratification spatRaster.}
}
\value{
output stratification \code{spatRaster}
}
\description{
Stratify metric raster using metric quantiles.
}
\seealso{
Other stratify functions:
\code{\link{strat_breaks}()},
\code{\link{strat_kmeans}()},
\code{\link{strat_osb}()},
\code{\link{strat_pcomp}()}
}
\concept{stratify functions}
|
4ee5313afd447a0cf2356184e0692c6c79ab21ca
|
ad522819f54aa659c951ff39fff1dda0fff0f89f
|
/man/functional_db_to_amplitude.Rd
|
206230c6d5fa6008bb678000e5df6b0d18311906
|
[
"MIT"
] |
permissive
|
davidbrae/torchaudio
|
4dbc4e12067b14dedd8fa785a6b753719e39b0d3
|
d20ccc237a8eff58e77bb8e3f08ef24150a4fc4e
|
refs/heads/master
| 2023-07-20T16:06:59.791249
| 2021-08-29T19:16:50
| 2021-08-29T19:16:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 712
|
rd
|
functional_db_to_amplitude.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functional.R
\name{functional_db_to_amplitude}
\alias{functional_db_to_amplitude}
\title{DB to Amplitude (functional)}
\usage{
functional_db_to_amplitude(x, ref, power)
}
\arguments{
\item{x}{(Tensor): Input tensor before being converted to power/amplitude scale.}
\item{ref}{(float): Reference which the output will be scaled by. (Default: \code{1.0})}
\item{power}{(float): If power equals 1, will compute DB to power. If 0.5, will compute
DB to amplitude. (Default: \code{1.0})}
}
\value{
\code{tensor}: Output tensor in power/amplitude scale.
}
\description{
Turn a tensor from the decibel scale to the power/amplitude scale.
}
|
ac48b148c579eec053fc19f6f69a73cf411501e6
|
56ceeb0f231c60f2af78c66fc9c74d49ce398777
|
/man/simPlusMinus.Rd
|
1a73a296f957f2b0c54d7c08dc8afd8951c048f3
|
[
"MIT"
] |
permissive
|
ajrominger/RarePlusComMinus
|
741bab1eb6771232f758eb68095008b0009d460a
|
902476ad3998f19fb82592785127eac05843ca25
|
refs/heads/master
| 2021-06-29T19:33:00.592586
| 2021-06-25T19:32:06
| 2021-06-25T19:32:06
| 228,719,169
| 0
| 1
|
MIT
| 2021-06-07T14:09:01
| 2019-12-17T23:28:17
|
TeX
|
UTF-8
|
R
| false
| true
| 2,438
|
rd
|
simPlusMinus.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/indSwapTest.R, R/simPlusMinus.R
\name{indSwapTest}
\alias{indSwapTest}
\alias{simPlusMinus}
\alias{simpleSim}
\title{Explore the effect of using the independent swap algorithm on inference
of connection between abundance and positive or negative association networks}
\usage{
indSwapTest(sadStats, mcCores, ssadType = "nbinom", kfun, nsim)
simPlusMinus(sadStats, mcCores, ssadType = "nbinom", kfun, nsim)
simpleSim(nsite, nspp, mcCores, sadfun, ssadfun, nsim)
}
\arguments{
\item{sadStats}{a \code{data.frame} with columns \code{mod}, \code{par1}, \code{par2}}
\item{mcCores}{number of cores to use in \code{parallel::mclapply}}
\item{ssadType}{string specifying SSAD shape (e.g. \code{'nbinom'})}
\item{kfun}{function to relate k parameter of the SSAD to abundance}
\item{nsim}{number of simulations to run}
\item{nsite}{number of sites to simulate}
\item{nspp}{number of species to simulate}
\item{sadfun}{function to generate random SAD sample}
\item{ssadfun}{function to generate random SSAD sample}
}
\value{
a \code{data.frame} with \code{<= nsim} rows (some simulations may be
thrown out if they do not meet data filtering requirements), and columns corresponding
to summary statistics about the positive and negative network characteristics
a \code{data.frame} with \code{<= nsim} rows (some simulations may be
thrown out if they do not meet data filtering requirements), and columns corresponding
to summary statistics about the positive and negative network characteristics
}
\description{
This is largely a copy of the function \code{simPlusMinus} but
using the independent swap algorithm from \{picante\}
Simulate spatial replicates of abundance data and apply the same
analytical pipeline to those data that would be applied to real data
}
\details{
\code{simPlusMinus} draws random SAD and SSAD shapes from the raw data
and uses these to simulate more data and calculate network statistics on those
simulated data. \code{simpleSim} assumes one SAD and one SSAD and simulates data from
those, again calculated network statistics.
Note: any value passed to \code{ssadType} other than \code{'nbinom'} results
in a Poisson SSAD (i.e., there are only two options, negative binomial specified by
\code{'nbinom'} or Poisson specified by anything else)
}
\author{
Andy Rominger <ajrominger@gmail.com>
Andy Rominger <ajrominger@gmail.com>
}
|
1bd2a2b511910d0ba8839e0c4eba401b284d448d
|
98383fc7513540b66d98bf74abfe9bd9dc3f1a52
|
/data_preprocessing_2.R
|
de79afdb530f16de3f2874c1f425e20e958a54a9
|
[] |
no_license
|
AbdullahMakhdoom/Movie-Recommendation-System
|
4a6e279df3169a523359309778f6b089a3f95fd1
|
7fc2d520d4500161ee58a1f2ce89718df1531f46
|
refs/heads/master
| 2022-12-04T04:44:31.023096
| 2020-08-23T20:04:56
| 2020-08-23T20:04:56
| 289,512,496
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,402
|
r
|
data_preprocessing_2.R
|
# Selecting useful data
movie_ratings <- ratingMatrix[rowCounts(ratingMatrix) > 50,
colCounts(ratingMatrix) > 50]
movie_ratings
minimum_movies<- quantile(rowCounts(movie_ratings), 0.98)
minimum_users <- quantile(colCounts(movie_ratings), 0.98)
image(movie_ratings[rowCounts(movie_ratings) > minimum_movies,
colCounts(movie_ratings) > minimum_users],
main = "Heatmap of the top users and movies")
# Visualizing the distribution of the average ratings per user
average_ratings <- rowMeans(movie_ratings)
qplot(average_ratings, fill=I("steelblue"), col=I("red")) +
ggtitle("Distribution of the average rating per user")
# Data Normalization
normalized_ratings <- normalize(movie_ratings)
sum(rowMeans(normalized_ratings) > 0.00001)
image(normalized_ratings[rowCounts(normalized_ratings) > minimum_movies,
colCounts(normalized_ratings) > minimum_users],
main = "Normalized Ratings of the Top Users")
# Data Binarization
binary_minimum_movies <- quantile(rowCounts(movie_ratings), 0.95)
binary_minimum_users <- quantile(colCounts(movie_ratings), 0.95)
good_rated_films <- binarize(movie_ratings, minRating = 3)
image(good_rated_films[rowCounts(movie_ratings) > binary_minimum_movies,
colCounts(movie_ratings) > binary_minimum_users],
main = "Heatmap OF the top users and movies")
|
1aaa9703af3509623c99e453e4669669a45794b4
|
cba10b84d2cc708dd66148a4511451d77a92a7c5
|
/tests/testthat/test-plotCI.R
|
e2e3463363034d0a9502bb02d57e152cfc2bede2
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
r4ss/r4ss
|
03e626ae535ab959ff8109a1de37e3e8b44fe7ad
|
0ef80c1a57e4a05e6172338ddcb0cda49530fa93
|
refs/heads/main
| 2023-08-17T08:36:58.041402
| 2023-08-15T21:42:05
| 2023-08-15T21:42:05
| 19,840,143
| 35
| 57
| null | 2023-07-24T20:28:49
| 2014-05-16T00:51:48
|
R
|
UTF-8
|
R
| false
| false
| 301
|
r
|
test-plotCI.R
|
test_that("plotCI function works", {
# Note: could look into using snapshot test to verify output
set.seed(123)
x <- 1:10
y <- rnorm(10)
uiw <- 1
liw <- 2
output <- plotCI(x = x, y = y, uiw = uiw, liw = liw)
expect_equivalent(x, output[["x"]])
expect_equivalent(y, output[["y"]])
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.