blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
39254583af1b1156f9bd21c96e5d8995233d860f
|
50cae6d46e423d2fd3e5062bedf921adf232e8fb
|
/rscripts/enrichment_utils.r
|
78d4c72d625eb3fbb48b9a72c4e65bf7bd236b09
|
[] |
no_license
|
bjcbjc/mylib
|
779cd177cfbda7411f045f4aa045c7f37ff5c478
|
9c2d1bd3739b50bfe9e9c20a47971ade0ef67a5d
|
refs/heads/master
| 2021-01-15T15:48:50.711618
| 2016-09-23T14:29:51
| 2016-09-23T14:29:51
| 12,460,362
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,088
|
r
|
enrichment_utils.r
|
enrich.go <- function (testGene, backgroundGene, inputGeneId= 'ENSEMBL', database= 'org.Hs.eg.db', ontologyList = c('BP', 'MF', 'CC'), extractGeneInGO = FALSE, pvalCutoff = 0.01, customMapping = FALSE, customMappingFile = '/nethome/bjchen/DATA/GenomicInfo/ENSEMBL/ensembl_entrez_10302014.txt') {
library(database, character.only= TRUE)
library(GOstats)
eval(parse(text=paste0('databaseObj=',database)))
eval(parse(text=paste0('goGenes=mappedkeys(',sub('.db', 'GO', database, fixed= TRUE), ')')))
if ( inputGeneId == 'ENTREZID' ) {
testGeneVector = testGene
univGeneVector = backgroundGene
} else {
if (customMapping && inputGeneId == 'ENSEMBL') {
source('/nethome/bjchen/BJLib/rscripts/convertENSEMBL2ENTREZ.r')
genemap = ENSEMBL2ENTREZ_HUGO(testGene, customMappingFile, goGenes)
univmap = ENSEMBL2ENTREZ_HUGO(backgroundGene, customMappingFile, goGenes)
#remove empty
genemap = genemap[ genemap[, 'ENTREZID'] != '', ]
univmap = univmap[ univmap[, 'ENTREZID'] != '', ]
testGeneVector = genemap[, 'ENTREZID']
univGeneVector = univmap[, 'ENTREZID']
} else {
genemap = select(databaseObj, testGene, c('ENTREZID', 'SYMBOL'), inputGeneId)
univmap = select(databaseObj, backgroundGene, c('ENTREZID','SYMBOL'), inputGeneId)
testGeneVector = genemap[, 'ENTREZID']
univGeneVector = univmap[, 'ENTREZID']
}
}
#remove genes not in GO or duplicates
testGeneVector = unique( testGeneVector[ testGeneVector %in% goGenes ] )
univGeneVector = unique( univGeneVector[ univGeneVector %in% goGenes ] )
enrichRes = list()
for (ontIdx in 1:length(ontologyList)) {
param = new('GOHyperGParams', geneIds=testGeneVector, universeGeneIds=univGeneVector, annotation=database, ontology=ontologyList[ontIdx], pvalueCutoff=pvalCutoff, conditional=FALSE, testDirection='over')
hyp = hyperGTest(param)
sum_hyp = summary(hyp)
enrichRes[[ontologyList[ontIdx]]] = sum_hyp
#write.table(sum_hyp, outputFileName, sep='\t', quote=F, row.names=F)
if (extractGeneInGO) {
#extract genes that are in each annotation
gn2go = select(databaseObj, geneIds(hyp), c('GOALL') )
gn2go = gn2go[ gn2go[,'GOALL'] %in% sum_hyp[,1], ]
rowName = unique(gn2go[, 'ENTREZID'])
colName = unique(gn2go[, 'GOALL'])
rowIdx = match(gn2go[, 'ENTREZID'], rowName)
colIdx = match(gn2go[, 'GOALL'], colName)
idx = cbind(rowid=as.vector(t(rowIdx)), colid=as.vector(t(colIdx)))
binaryTable = matrix(0, nrow=length(rowName), ncol=length(colName), dimnames=list(rowName, colName))
binaryTable[idx] = 1
#convert GO ID to terms
idx = match(colnames(binaryTable), sum_hyp[,1])
colnames(binaryTable) = sum_hyp[idx,'Term']
#add ENSEMBL/SYMBOL
idx = match(rownames(binaryTable), genemap[, 'ENTREZID'])
geneInfo = genemap[idx, c('ENSEMBL', 'SYMBOL')]
rownames(geneInfo) = rownames(binaryTable)
binaryTable = cbind(geneInfo, binaryTable)
#write.table(binaryTable, paste0(outputTag,'.geneByGo_binaryTable_',ontologyList[ontIdx], '.txt'), sep='\t',quote=F,col.names=NA)
enrichRes[[paste0(ontologyList[ontIdx], '_binaryTable')]] = binaryTable
}
}
return(enrichRes)
}
enrich.go.parallel <- function (testGeneLists, backgroundGene, inputGeneId= 'ENSEMBL', database= 'org.Hs.eg.db', ontologyList = c('BP', 'MF', 'CC'), pvalCutoff = 0.01, customMapping = FALSE, customMappingFile = '/nethome/bjchen/DATA/GenomicInfo/ENSEMBL/ensembl_entrez_10302014.txt', cores= 10) {
library(database, character.only= TRUE)
library(doParallel)
eval(parse(text=paste0('databaseObj=',database)))
eval(parse(text=paste0('goGenes=mappedkeys(',sub('.db', 'GO', database, fixed= TRUE), ')')))
## testGeneLists are lists of genes; eg/ testGeneLists[[1]] contains a list of genes for enrichment
if ( inputGeneId != 'ENTREZID' ) {
cat('convert to ENTREZID first!\n')
return(NULL)
}
## need to detach because these packages use sqlite, which cann't be used in parallel;
## therefore, load these packages within the loop
#detach('package:GOstats', unload= TRUE)
detach(paste0('package:', database), unload= TRUE, character.only= TRUE)
registerDoParallel(cores= cores)
#remove genes not in GO or duplicates
univGeneVector = intersect(backgroundGene, goGenes)
allEnrichRes = foreach (eachIdx = 1:length(testGeneLists)) %dopar% {
if (is.null(testGeneLists[[eachIdx]])) {
enrichRes = list()
} else {
library(database, character.only= TRUE)
library(GOstats)
testGeneVector = intersect(testGeneLists[[eachIdx]], goGenes)
cat(sprintf('%d, %d\n', eachIdx, length(testGeneVector)))
enrichRes = list()
if (length(testGeneVector) > 0) {
for (ontIdx in 1:length(ontologyList)) {
enrichRes[[ontologyList[ontIdx]]] = tryCatch ({
param = new('GOHyperGParams', geneIds=testGeneVector, universeGeneIds=univGeneVector, annotation=database, ontology=ontologyList[ontIdx], pvalueCutoff=pvalCutoff, conditional=FALSE, testDirection='over')
hyp = hyperGTest(param)
sum_hyp = summary(hyp)
sum_hyp
}, error = function(err) {
cat(sprintf('Caught error, listIdx=%d, %d genes, %s\n', eachIdx, length(testGeneVector), err))
return(NA)
})
}
}
detach('package:GOstats', unload= TRUE)
detach(paste0('package:', database), unload= TRUE, character.only= TRUE)
}
enrichRes
}
return(allEnrichRes)
}
|
995aaff1e2a4c7391077047a898b43afc38a659a
|
96ff9975086b3f791f02857b42a3ee29822946ea
|
/aer/chap01.R
|
5b0fd795281bc1b9c2699275b72288f341953ead
|
[] |
no_license
|
ecodiegoale/trading
|
447f1175a42d3627067ff9bda391af837cfcbdb0
|
b4e880799e8cb750a3aef95a51d463666a0870bf
|
refs/heads/master
| 2021-12-05T23:21:15.281532
| 2015-08-24T16:10:18
| 2015-08-24T16:10:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,301
|
r
|
chap01.R
|
data("Journals", package = "AER")
dim(Journals)
names(Journals)
plot(log(subs) ~ log(price/citations), data = Journals)
j_lm <- lm(log(subs) ~ log(price/citations), data = Journals)
abline(j_lm)
summary(j_lm)
# experiment
plot(Journals$subs)
plot(log(Journals$subs))
plot(Journals$price / Journals$citations)
plot(log(Journals$price / Journals$citations))
# elasticity of the demand with respect to the price per citation is −0.5331, which
# is significantly different from 0 at all conventional levels.
j_lm$coefficients[2]
##
data("CPS1985", package = "AER")
cps <- CPS1985
library("quantreg")
cps_lm <- lm(formula = log(wage) ~ experience + I(experience^2) + education,
data = cps)
# quantile regression
cps_rq <- rq(formula = log(wage) ~ experience + I(experience^2) + education,
data = cps,
tau = seq(0.2, 0.8, by = 0.15))
cps2 <- data.frame(education = mean(cps$education),
experience = min(cps$experience):max(cps$experience))
cps2 <- cbind(cps2, predict(cps_lm, newdata = cps2, interval = "prediction"))
cps2 <- cbind(cps2, predict(cps_rq, newdata = cps2, type = ""))
plot(log(wage) ~ experience, data = cps)
for(i in 6:10)
lines(cps2[,i] ~ experience, data = cps2, col = "red")
plot(summary(cps_rq))
|
4ea78bb143325bfd694bac669754bb5865e9847e
|
880bb344072fa12bebaae9c0bc327b2a73a8adce
|
/man/transform_by_feature.Rd
|
3ecf3dd3cc83701442b86aa7c79743895f86a011
|
[] |
no_license
|
zhuchcn/Metabase
|
a761b2efc3dc304bd35776d2fe2040e10a7ffa40
|
85306a15b540c307b073b65fdc860dfd3e0d91be
|
refs/heads/master
| 2020-03-22T06:12:55.691167
| 2019-07-22T18:40:17
| 2019-07-22T18:40:17
| 139,618,397
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 719
|
rd
|
transform_by_feature.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Methods-mSet.R
\name{transform_by_feature}
\alias{transform_by_feature}
\title{Transform an mSet object by features}
\usage{
transform_by_feature(object, fun, ...)
}
\arguments{
\item{object}{An \code{\link{mSet-class}} or derived class object.}
\item{fun}{A function to apply.}
\item{...}{Arguments to pass to the function/}
}
\value{
An \code{\link{mSet-class}} or derived class object
}
\description{
Transform the conc_table slot of an \code{\link{mSet-class}}
object, one feature at a time. This is similar to MARGIN = 1 in the
\code{\link{apply}} function.
}
\seealso{
\code{\link{mSet-class}}, \code{\link{transform_by_features}}
}
|
8f201f7b44af1a5749c94112121fe53a76c381b0
|
2487dfa8bb23d3e1a9000dba265c416cccb69939
|
/demo/MCMCGuide11.R
|
67f64f3f279ab2633f2f33c596c0f4f56b96bd07
|
[] |
no_license
|
cran/R2MLwiN
|
f2c5694b60e3a392ad516ab63689c642f3fc72bb
|
593d94db244d3fc07538aedf83fc183859b9f5fd
|
refs/heads/master
| 2023-03-21T15:14:11.554599
| 2023-03-14T04:40:02
| 2023-03-14T04:40:02
| 17,681,793
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,211
|
r
|
MCMCGuide11.R
|
############################################################################
# MLwiN MCMC Manual
#
# 11 Poisson Response Modelling . . . . . . . . . . . . . . . . . . . . 153
#
# Browne, W.J. (2009) MCMC Estimation in MLwiN, v2.13. Centre for
# Multilevel Modelling, University of Bristol.
############################################################################
# R script to replicate all analyses using R2MLwiN
#
# Zhang, Z., Charlton, C., Parker, R, Leckie, G., and Browne, W.J.
# Centre for Multilevel Modelling, 2012
# http://www.bristol.ac.uk/cmm/software/R2MLwiN/
############################################################################
library(R2MLwiN)
# MLwiN folder
mlwin <- getOption("MLwiN_path")
while (!file.access(mlwin, mode = 1) == 0) {
cat("Please specify the root MLwiN folder or the full path to the MLwiN executable:\n")
mlwin <- scan(what = character(0), sep = "\n")
mlwin <- gsub("\\", "/", mlwin, fixed = TRUE)
}
options(MLwiN_path = mlwin)
# User's input if necessary
## Read mmmec data
data(mmmec, package = "R2MLwiN")
# 11.1 Simple Poisson regression model . . . . . . . . . . . . . . . . . 155
(mymodel1 <- runMLwiN(log(obs) ~ 1 + uvbi + offset(log(exp)), D = "Poisson", estoptions = list(EstM = 1, mcmcMeth = list(iterations = 50000)),
data = mmmec))
summary(mymodel1@chains[, "FP_uvbi"])
sixway(mymodel1@chains[, "FP_uvbi", drop = FALSE], "beta_1")
# 11.2 Adding in region level random effects . . . . . . . . . . . . . . 157
(mymodel2 <- runMLwiN(log(obs) ~ 1 + uvbi + offset(log(exp)) + (1 | region), D = "Poisson", estoptions = list(EstM = 1,
mcmcMeth = list(iterations = 50000, seed = 13)), data = mmmec))
summary(mymodel2@chains[, "FP_uvbi"])
sixway(mymodel2@chains[, "FP_uvbi", drop = FALSE], "beta_1")
# 11.3 Including nation effects in the model . . . . . . . . . . . . . . 159
(mymodel3 <- runMLwiN(log(obs) ~ 1 + uvbi + offset(log(exp)) + (1 | nation) + (1 | region), D = "Poisson", estoptions = list(EstM = 1,
mcmcMeth = list(iterations = 50000, seed = 13)), data = mmmec))
(mymodel4 <- runMLwiN(log(obs) ~ 0 + uvbi + nation + offset(log(exp)) + (1 | region), D = "Poisson", estoptions = list(EstM = 1,
mcmcMeth = list(iterations = 50000)), data = mmmec))
# 11.4 Interaction with UV exposure . . . . . . . . . . . . . . . . . . .161
(mymodel5 <- runMLwiN(log(obs) ~ 0 + nation + nation:uvbi + offset(log(exp)) + (1 | region), D = "Poisson", estoptions = list(EstM = 1,
mcmcMeth = list(iterations = 50000)), data = mmmec))
sixway(mymodel5@chains[, "FP_nationBelgium", drop = FALSE], acf.maxlag = 5000, "beta_1")
# 11.5 Problems with univariate updating Metropolis procedures . . . . . 163
(mymodel6 <- runMLwiN(log(obs) ~ 0 + nation + nation:uvbi + offset(log(exp)) + (1 | region), D = "Poisson", estoptions = list(EstM = 1,
mcmcMeth = list(iterations = 500000, thinning = 10)), data = mmmec))
sixway(mymodel6@chains[, "FP_nationBelgium", drop = FALSE], "beta_1")
# Chapter learning outcomes . . . . . . . . . . . . . . . . . . . . . . .128
############################################################################
|
38b26f27dad13a7074d1ccb3d123edf6f981b187
|
ae8016078b8562a186fc4b70d58e502299e948f4
|
/R/dynrFuncAddress.R
|
26739cf1cc43fbb3aa7ab2d41fd72a4e3bbfe9b5
|
[] |
no_license
|
mhunter1/dynr
|
6c6a4a8527471306087a4e790cf5e98b3376c566
|
1f4f98d30ebc8a8ef8e90c1365b2d4b588922770
|
refs/heads/master
| 2023-07-21T11:05:48.652487
| 2023-06-30T14:24:09
| 2023-06-30T14:24:09
| 221,282,889
| 4
| 10
| null | 2023-07-02T18:00:14
| 2019-11-12T18:17:57
|
R
|
UTF-8
|
R
| false
| false
| 9,560
|
r
|
dynrFuncAddress.R
|
#--------------------------------------------------
# .C2funcaddresses
# Purpose:
# takes in a C file or C scripts
# returns a list of addresses of the compiled model functions and maybe R functions for debug purposes
#------------------------------------------------
# Changed DLL name and directory to be user-specified and permanent
.C2funcaddress<-function(verbose,isContinuousTime, infile, outfile,compileLib){
#-------Set some variables: This function may later be extended----------
language <- "C"
#-------Get the full name of the library----------
if ( .Platform$OS.type == "windows" ) outfile <- gsub("\\\\", "/", outfile)
libLFile <- paste(outfile, .Platform$dynlib.ext, sep="")
if (compileLib|(!file.exists(libLFile))){#when the compileLib flag is TRUE or when the libLFile does not exist
#-------Check the input arguments----------------------------
code <- readLines(infile)
# ---- Write and compile the code ----
print("Get ready!!!!")
#filename<- basename(tempfile())
CompileCode(code, language, verbose, libLFile)
#---- SET A FINALIZER TO PERFORM CLEANUP: register an R function to be called upon garbage collection of object or at the end of an R session---
#cleanup <- function(env) {
# if ( filename %in% names(getLoadedDLLs()) ) dyn.unload(libLFile)
# unlink(libLFile)
# }
#reg.finalizer(environment(), cleanup, onexit=TRUE)
}
#-----dynamically load the library-------
DLL <- dyn.load( libLFile )
if (isContinuousTime){
res <- list(f_measure=getNativeSymbolInfo("function_measurement", DLL)$address,
f_dx_dt=getNativeSymbolInfo("function_dx_dt", DLL)$address,
f_dF_dx=getNativeSymbolInfo("function_dF_dx", DLL)$address,
f_dP_dt=getNativeSymbolInfo("function_dP_dt", DLL)$address,
f_initial_condition=getNativeSymbolInfo("function_initial_condition", DLL)$address,
f_regime_switch=getNativeSymbolInfo("function_regime_switch", DLL)$address,
f_noise_cov=getNativeSymbolInfo("function_noise_cov", DLL)$address,
f_transform=getNativeSymbolInfo("function_transform", DLL)$address)
}else{
res <- list(f_measure=getNativeSymbolInfo("function_measurement", DLL)$address,
f_dynamic=getNativeSymbolInfo("function_dynam", DLL)$address,
f_jacob_dynamic=getNativeSymbolInfo("function_jacob_dynam", DLL)$address,
f_initial_condition=getNativeSymbolInfo("function_initial_condition", DLL)$address,
f_regime_switch=getNativeSymbolInfo("function_regime_switch", DLL)$address,
f_noise_cov=getNativeSymbolInfo("function_noise_cov", DLL)$address,
f_transform=getNativeSymbolInfo("function_transform", DLL)$address)
}
return(list(address=res, libname=libLFile))
}
#--------------------------------------------------
# CompileCode: A function adapted from the compileCode function in the inline pacakge
# Purpose: compiles a C file to create a shared library
#------------------------------------------------
CompileCode <- function(code, language, verbose, libLFile) {
wd <- getwd()
on.exit(setwd(wd))
## Prepare temp file names
if ( .Platform$OS.type == "windows" ) {
## windows files
#outfile <- gsub("\\\\", "/", outfile)
## windows gsl flags
LIB_GSL <- Sys.getenv("LIB_GSL")
LIB_GSL <- gsub("\\\\", "/", LIB_GSL) # replace "\" with "/"
LIB_GSL <- gsub("\"", "", LIB_GSL) # remove "
gsl_cflags <- sprintf( "-I\"%s/include\"", LIB_GSL)
gsl_libs <- sprintf( "-L\"%s/lib/%s\" -lgsl -lgslcblas", LIB_GSL, .Platform$r_arch)
}else {
## UNIX-alike build
## Unix gsl flags
gsl_cflags <- system( "gsl-config --cflags" , intern = TRUE )
gsl_libs <- system( "gsl-config --libs" , intern = TRUE )
# Perhaps change above to use similar to configure.win syntax
# GSL_CFLAGS=`${R_HOME}/bin${R_ARCH_BIN}/Rscript.exe -e "RcppGSL:::CFlags()"`
# GSL_LIBS=`${R_HOME}/bin${R_ARCH_BIN}/Rscript.exe -e "RcppGSL:::LdFlags()"`
}
if (verbose) cat("Setting PKG_CPPFLAGS to", gsl_cflags, "\n")
Sys.setenv(PKG_CPPFLAGS=gsl_cflags)
if (verbose) cat("Setting PKG_LIBS to", gsl_libs, "\n")
Sys.setenv(PKG_LIBS=gsl_libs)
#libCFile <- paste(outfile, ".EXT", sep="")
libCFile <-sub(.Platform$dynlib.ext,".EXT",libLFile)
extension <- switch(language, "C++"=".cpp", C=".c", Fortran=".f", F95=".f95",
ObjectiveC=".m", "ObjectiveC++"=".mm")
libCFile <- sub(".EXT$", extension, libCFile)
#libLFile <- paste(outfile, .Platform$dynlib.ext, sep="")
## Write the code to the temp file for compilation
write(code, libCFile)
## Compile the code using the running version of R if several available
#if ( file.exists(libLFile) ) {file.remove( libLFile )}
setwd(dirname(libCFile))
errfile <- paste( basename(libCFile), ".err.txt", sep = "" )
cmd <- paste(R.home(component="bin"), "/R CMD SHLIB ", basename(libCFile), " ", gsl_libs, " ", " 2> ", errfile, sep="")
if (verbose) cat("Compilation argument:\n", cmd, "\n")
compiled <- system2(paste0(R.home(component="bin"), "/R"),
args=c("CMD", "SHLIB", basename(libCFile)),
stderr=errfile, stdout=verbose)
errmsg <- readLines(errfile)
unlink(errfile)
#if(length(errmsg) > 0){cat("May I present to you your error messages?\n")}
writeLines(errmsg)
setwd(wd)
#### Error Messages
#cat("I got here!!!")
errmsg = errmsg[!grep("warning",errmsg)]
if ( !file.exists(libLFile) | length(errmsg) > 0 ) {
cat("\nERROR(s) during compilation: source code errors or compiler configuration errors!\n")
cat("\nProgram source:\n")
codeWithLineNums <- paste(sprintf(fmt="%3d: ", 1:length(code)), code, sep="")
writeLines(codeWithLineNums)
stop("Compilation ERROR, function(s)/method(s) not created!")
}
#return( libLFile )
}
#------------------------------------------------------------------------------
# Check configuration
##' Check that dynr in configured properly
##'
##' @param verbose logical. Whether to print messages during/after checks
##'
##' @details
##' The 'dynr' package requires additional set-up and configuration beyond
##' just installing the package. In particular, it requires compiling C code
##' along with GSL to run (cook) models. This function runs some basic checks
##' of the configuration. We check that (1) R is on the PATH variable, (2)
##' Rtools exists and is on the PATH variable for Windows, (3) a C compiler
##' is available, and (4) GSL is available and on the PATH.
##'
##' In general, see the 'Installation for Users' vignette for set-up and
##' configuration instructions.
##'
##' @return No return value.
##'
##' @examples
##' \dontrun{dynr.config()}
dynr.config <- function(verbose=FALSE){
genmsg <- paste0(
"\nPlease read the 'Installation for Users' vignette at\n",
"https://cran.r-project.org/web/packages/dynr/",
"vignettes/InstallationForUsers.pdf",
"\nor\n",
"vignette(package='dynr', 'InstallationForUsers')\n")
# Check that R is on the path
noRmsg <- "R did not appear to be on the 'PATH' environment variable."
# Check that Rtools exists and is on the path
noRtoolsmsg <- "No Rtools found in 'PATH' environment variable."
# Check for a C compiler
noCmsg <- "No C compiler found."
# Check for GSL
noGSLmsg <- "LIB_GSL variable not found."
if ( .Platform$OS.type == "windows" ) {
path <- Sys.getenv("PATH")
path <- normalizePath(strsplit(path, split=';', fixed=TRUE)[[1]])
path <- gsub("\\\\", "/", path)
path <- paste(path, sep='', collapse=';')
rpath <- normalizePath(R.home(component="bin"))
rpath <- gsub("\\\\", "/", rpath)
findR <- grep(rpath, path)
if(length(findR) < 1 || findR != 1){
# Only warning
# R does NOT need to be on the path for windows unless you're a developer
warning(paste0(noRmsg, '\nThis is only needed for developers.\nIf that is not you, then happily ignore this warning.\n'))
}
if(grep('rtools', path, ignore.case=TRUE) != 1){
stop(paste0(noRtoolsmsg, genmsg))
}
if(!checkForCompiler() ){
stop(paste0(noCmsg, genmsg))
}
## windows gsl flags
LIB_GSL <- Sys.getenv("LIB_GSL")
LIB_GSL <- gsub("\\\\", "/", LIB_GSL) # replace "\" with "/"
LIB_GSL <- gsub("\"", "", LIB_GSL) # remove "
if(nchar(LIB_GSL) < 1){
stop(paste0(noGSLmsg, genmsg))
}
}else {
## UNIX-alike build
if(!checkForCompiler() ){
stop(paste0(noCmsg, genmsg))
}
## Unix gsl flags
# Wrap system command in try
# If try error, say can't find gsl
gsl_cflags <- try(system( "gsl-config --cflags", intern=TRUE), silent=TRUE)
if(inherits(gsl_cflags, 'try-error')){
stop("'gsl-config --cflags' failed. You probably need to install GSL and/or add it to your path.")
}
gsl_libs <- try(system( "gsl-config --libs", intern=TRUE), silent=TRUE)
if(inherits(gsl_cflags, 'try-error')){
stop("'gsl-config --libs' failed. You probably need to install GSL and/or add it to your path.")
}
# Sys.setenv(PATH=paste0(Sys.getenv("PATH"),":","/opt/local/bin"))
}
if(verbose){
message("Configuration check complete. Ready to rock and roll.")
}
}
# Taken from Rcpp
checkForCompiler <- function(minVersion = package_version("4.6.0")){
binaries <- c("g++", Sys.getenv("CXX", unset = ""),
Sys.getenv("CXX1X", unset = ""))
binpaths <- lapply(binaries, function(b) {
if (b == "")
NULL
else Sys.which(b)
})
allgood <- FALSE
rl <- lapply(binpaths, function(b) {
if (is.null(b))
return(NULL)
con <- pipe(paste(b, "-v 2>&1"), "r")
lines <- readLines(con)
close(con)
lines <- lines[grepl("^g.. version", lines)]
if (length(lines) == 0)
return(NULL)
ver <- strsplit(lines, " ")[[1]][3]
package_version(ver) >= minVersion
})
all(do.call(c, rl))
}
|
c091b5cb7ffe4906f2e21dec1619d87f35a6495d
|
5ef1116a839153add3d5e2d51751c81e169671b1
|
/analysis/experiment_01/Analysis.r
|
620c62126588342741512234dd9908c9c701dac8
|
[] |
no_license
|
adolfohermosillo/variable_telicity
|
2f0889eeea52d46b5ff156982aa7529975da6ac8
|
207d19e404cf441e49354a542df2a145650c18bd
|
refs/heads/master
| 2023-05-12T06:19:53.727947
| 2021-06-09T04:14:15
| 2021-06-09T04:14:15
| 353,233,905
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,806
|
r
|
Analysis.r
|
library(ggplot2)
library(tidyverse)
library(lme4)
library(wesanderson)
library(languageR)
#importing data set
data <- read_csv(file = '/Users/jesushermosillo/Desktop/Spring 2021/LINGUIST_245B/variable_telicity/data/experiment_01/variable_telicity_and_verbs_of_consumption-merged.csv')
#labels for plots
data$levels <- sprintf('level %d',data$state) #level 1, level 2, ..., etc
data$QDO_maximality <- factor(ifelse(data$maximality == 'maximal', 'Maximal', 'Non-Maximal'))
# exclude participants who fail to correctly answer control items
exclude_by_control <- data[data$item_type =='control' & data$response =='TRUE' , ]$workerid
data_ <- data[is.element(data$workerid,exclude_by_control) == FALSE,]
# exclude participants whose response to baseline is FALSE
exclude_by_baseline <- data_[data_$item_type =='baseline' & data_$response == 'FALSE', ]$workerid
data_1 <- data_[is.element(data_$workerid,exclude_by_baseline) == FALSE,]
#number of participants after exclusions
length(unique(unique(data_1$workerid)))
#Plotting proportion of responses by event progression levels
ggplot(data_1,aes( x = levels,fill = response)) +
geom_bar( position = "fill") +
theme_bw() + scale_fill_brewer(palette = "Paired") +
labs(x = "Event Progression Levels", fill = 'Response', y ='Proportion of Responses')
#Plotting proportion of responses by object maximality
ggplot(data_1,aes( x = QDO_maximality,fill = response)) +
geom_bar( position = "fill") +
theme_bw() + scale_fill_brewer(palette = "Paired") +
labs(x = "Object Maximality", fill = 'Response', y ='Proportion of Responses')
#Plotting proportion of responses by event progression levels for maximal and non-maximal objects
ggplot(data_1, aes( x = levels,fill = response)) +
geom_bar( position = "fill") +
facet_grid(~QDO_maximality) +
theme_bw() + scale_fill_brewer(palette = "Paired") +
labs(x = "Event Progression Levels", fill = 'Response', y ='Proportion of Responses')
# critical data points
data_critical = data_1[data_1$item_type =='critical',]
data_critical = data_critical %>% mutate(numeric_DO = as.numeric((data_critical$QDO_maximality))) %>% mutate (centered_QDOM = numeric_DO - mean(numeric_DO))
data_critical = data_critical %>% mutate(numeric_EP = as.numeric((data_critical$state))) %>% mutate (centered_state = numeric_EP - mean(numeric_EP))
# model does not converge
# Fixed effects: state + object maximality (both centered),
# Random effects: object maximality slopes and participant and item intercepts
model_0 = glmer(response ~ centered_state * centered_QDOM + (1 + centered_QDOM |workerid) + (1 + centered_QDOM |item) , data=data_critical, family="binomial")
summary(model_0)
# model does not converge
model_1 = glmer(response ~ centered_state * centered_QDOM + (1 |workerid) + (1 + centered_QDOM |item) , data=data_critical, family="binomial")
summary(model_1)
# model does not converge
model_2 = glmer(response ~ centered_state * centered_QDOM + (1 + centered_QDOM |workerid) + (1 |item) , data=data_critical, family="binomial")
summary(model_2)
# model does not converge
model_3= glmer(response ~ centered_state * centered_QDOM + (1 |workerid) + (1 |item) , data=data_critical, family="binomial")
summary(model_3)
# model CONVERGES!!!!
model_4 = glmer(response ~ centered_state * centered_QDOM + (1 |workerid) , data=data_critical, family="binomial")
summary(model_4)
# model does not converge
model_4 = glmer(response ~ centered_state * centered_QDOM + (1 + centered_QDOM |workerid ) , data=data_critical, family="binomial")
summary(model_4)
# model CONVERGES!!!!
model_5 = glmer(response ~ centered_state * centered_QDOM + (1 |item) , data=data_critical, family="binomial")
summary(model_5)
# final model
summary(model_4)
|
11b69eed558bc770a283f745503818cb87d971df
|
7cecfa40ea47424e87af11659faa91e6c260fa8c
|
/man/longVarioMultiple.Rd
|
a6dc3df370dc75cf08c1779038fbb22d597f8aad
|
[] |
no_license
|
PratheepaJ/bootLong
|
7b444c006a98ef9370cc4247c312bb734420cd38
|
7986e753dd0e63e078631cec257f8550189183b7
|
refs/heads/master
| 2021-03-22T01:15:07.640516
| 2020-04-02T13:52:31
| 2020-04-02T13:52:31
| 121,357,751
| 8
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,224
|
rd
|
longVarioMultiple.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/longVarioMultiple.R
\name{longVarioMultiple}
\alias{longVarioMultiple}
\title{Plots the variogram for multiple taxa given the indices.}
\usage{
longVarioMultiple(pstr, main_factor, time_var, subjectID_var,
starttaxa = 1, endtaxa = 4, point = FALSE, taxlevel = "Species")
}
\arguments{
\item{pstr}{(Required). A \code{\link{phyloseq-class}}. The arcsinh transformation of the otu table.}
\item{main_factor}{Character string. The name of the variable to map main factor in the plot (factor vector).}
\item{time_var}{Character string. The name of the variable to map time in the plot (integer vector).}
\item{subjectID_var}{Character string. The name of the variable to map Subject Ids in the plot (factor vector).}
\item{starttaxa}{Numeric. The first index of taxa ordered by total reads}
\item{endtaxa}{Numeric. The last index of taxa ordered by total reads}
\item{point}{Logical. Whether variogram with observed values.}
\item{taxlevel}{Character string. The taxonomy level for the plot title}
}
\value{
\code{ggplot} object of variogram for for multiple taxa.
}
\description{
Plots the variogram for multiple taxa given the indices.
}
|
f68963a78a112705dea421f9bfde18f4d9cb7c9b
|
b9121d329483b371fcd048e036661cfa288bfe08
|
/lectures/data-raw/aklweather.R
|
4d86e2860ca2a9a3c0e3592e4f96590e4a720b5f
|
[] |
no_license
|
earowang/stats220
|
3ca02f5113294a40b3101edfe78e0edcda4c6ca2
|
183c260d3ac3036f4e842829f8f6501c1da3c33e
|
refs/heads/master
| 2023-08-26T02:48:58.138357
| 2021-06-07T22:55:18
| 2021-06-07T22:55:18
| 242,031,925
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 995
|
r
|
aklweather.R
|
## code to prepare `aklweather` dataset goes here
library(tidyverse)
library(lubridate)
# Read stations data
stations <- read_table("data/ghcnd/ghcnd-stations.txt",
col_names = c("id", "lat", "lon"))
library(ggmap)
akl_box <- c(left = 174.69, bottom = -37.09, right = 174.94, top = -36.60)
akl_map <- get_map(akl_box)
ggmap(akl_map) +
geom_point(aes(x = lon, y = lat), stations)
akl_id <- stations %>%
filter(akl_box["left"] < lon, lon < akl_box["right"],
akl_box["bottom"] < lat, lat < akl_box["top"])
library(rnoaa)
startdate <- make_date(2019) + days(251) * 0:15
enddate <- startdate - days(1)
enddate <- c(enddate[-1], today())
akl_ncdc <- map2_dfr(startdate, enddate, function(x, y) {
ncdcout <- ncdc(datasetid = "GHCND",
stationid = paste0("GHCND:", akl_id$id), limit = 1000,
startdate = x, enddate = y)
ncdcout$data
})
aklweather <- akl_ncdc %>%
mutate(date = as_date(ymd_hms(date))) %>%
rename_with(tolower)
write_csv(aklweather, "data/ghcnd/ghcnd-akl.csv")
|
86534c8472247e824f086203302cc619e8499e6e
|
c7ea7d89e36aa3d38924f2ee32f286e3a88b7ee6
|
/ui.R
|
5be88678617f353b26e01fcd07f84812cf564e7b
|
[
"MIT"
] |
permissive
|
guptatan/staff_projection
|
2503f3b1b4d4eab73a7b12312c8a5a8c8a30d3ae
|
be9c03eb43e508b8b4bd07b734b39d49cc8541ec
|
refs/heads/master
| 2023-07-14T19:22:54.894761
| 2021-09-01T17:39:59
| 2021-09-01T17:39:59
| 254,688,075
| 0
| 0
| null | 2020-04-10T16:53:53
| 2020-04-10T16:53:52
| null |
UTF-8
|
R
| false
| false
| 9,060
|
r
|
ui.R
|
library(shiny)
library(tidyverse)
library(rhandsontable)
library(highcharter)
library(shinyjs)
library(plotly)
# Define UI for application that draws a histogram
shinyUI(
fluidPage(
includeCSS("styles.css"),
titlePanel("Project Your Staffing Needs"),
# Start - Sidebar
sidebarLayout(
sidebarPanel(
width = 4,
# step1 - Census
h4("Step 1: Input Projected Census"),
actionButton(
"prejected_cesus",
label = "Input Projected Census",
width = "100%",
icon("database"),
class = "main-button margin-bottom20"
),
# step1 - Advanced Section
useShinyjs(),
shinyWidgets::materialSwitch(inputId = "advanced_census_input", label = strong("Advanced"), value = FALSE, status = "success"),
helpText(id = "advanced_input_help", "Estimate staffing for COVID and non-COVID patients"),
numericInput(
"total_bed",
"Total number of beds",
1000,
min = 0,
max = 1000,
step = 10,
width = "70%"
),
shinyWidgets::setSliderColor("#404040", c(3)),
sliderInput("icu_perc", label = "Proportion of all beds allocated to ICU", min = 0, max = 100, post = " %", value = 30),
sliderInput("capacity_perc", label = "Bed Occupancy", min = 0, max = 100, post = " %", value = 91),
hr(),
# step2 - Staff ratio
h4("Step 2: Edit Staffing Ratios"),
actionButton(
"update_gen",
"Edit Staffing Ratios",
icon("user-md"),
width = "100%",
class = "main-button margin-bottom10"
),
actionButton(
"update_capacity",
"Enter Total Employees",
icon("clipboard-list"),
width = "100%",
class = "main-button margin-bottom10"
),
hr(),
# step3 - Plots
h4("Step 3: Generate Plots"),
actionButton(
"generateButton",
label = "Generate Plot",
width = "100%",
icon("chart-line"),
class = "main-button margin-bottom20"
),
useShinyjs(),
shinyWidgets::materialSwitch(inputId = "show_icu_non_icu_plots", label = strong("Show ICU and Non-ICU plots"), value = FALSE, status = "success"),
hr(),
# step4 - Download
h4("Step 4: Download Tables"),
downloadButton(
"downloadData_combine_file",
"Download Combined File",
class = "main-button",
style = "width: 100%;"
)
),
# End sidebar
# Start mainPanel --------
mainPanel(
width = 8,
tags$style(HTML("
.tabbable > .nav > li[class=active] > a[data-value='Normal'] {background-color: #9dc183; color:black}
.tabbable > .nav > li[class=active] > a[data-value='Crisis'] {background-color: #8D021F; color:white}
")),
tabsetPanel(
id = "inTabset",
type = "tabs",
# tabPanel("test", tableOutput("test")),
# plots ------
tabPanel(
"Normal",
width = 8,
div(uiOutput("plot_norm"), class = "plot"),
div(tableOutput("table_result_normal"), class = "font-size")
),
tabPanel(
value = "Crisis",
title = "Stretch",
div(uiOutput("plot_crisis"), class = "plot"),
div(tableOutput("table_result_crisis"), class = "font-size")
),
# projected census ------
tabPanel(
value = "census",
title = "Projected Census",
h4(
"Option 1: Upload your projected patient census (from ",
a("CHIME", target = "_blank", href = "https://penn-chime.phl.io/"), "or using our ",
a("template", href = "data/projected_census_template.csv", target = "_blank", download = "projected_census_template.csv"),
")"
),
fileInput(
"chime_up",
"Click browse to select file (.csv)",
multiple = FALSE,
accept = c(
"text/csv",
"text/comma-separated-values,text/plain",
".csv"
)
),
hr(),
h4("Option 2: Input your projected patient census manually"),
p(strong("Right click"), "in a cell to add and delete row;", "select cell and type the new value.",
class = "font-size13"
),
actionButton(
"reset_census",
"Clear Table",
icon("table"),
class = "main-button margin-bottom20"
),
actionButton(
"default_chime",
"Reset to Default",
icon("undo"),
class = "main-button margin-bottom20"
),
div(rHandsontableOutput("prejected_census"), class = "font-size margin-bottom10"),
helpText(
class = "text-margin margin-top10", strong("hospitalized:"), "Number of patients that are hospitalized in", em("Non-ICU"), "units; ", br(),
strong("icu:"), "Number of patients that are in", em("ICU"), "units"
)
),
# editable tables -------
tabPanel(
value = "edit_ratio_table",
title = "Patient-to-Staff Ratios",
# h4("Option 1: Upload Staffing Ratios File (using our",
# a("template", href='data/Staffing_role_and_ratio_template.xlsx',target="_blank", download = 'Staffing_role_and_ratio_template.xlsx'),
# ")"),
#
# fileInput(
# "team_in",
# "Click browse to select file (.xlsx)",
# multiple = FALSE,
# accept = c(".xlsx")
# ),
# hr(),
h4("Option 1: Edit Staffing Ratio Table Below"),
helpText(
class = "text-margin",
strong("Important note:"),
"These estimates are designed to give a sense of general staffing needs, but your needs may vary based on local conditions."
),
sliderInput("reduction", label = "Expected staffing reduction (due to sickness, quarantine restrictions, or other)", min = 0, max = 100, post = " %", value = 30, width = "600px"),
p(
strong("Right click"),
"in a cell to add and delete row;",
"select cell and type the new value",
class = "font-size13 margin-top20 margin-bottom10"
),
actionButton(
"reset",
"Clear Table",
icon("table"),
class = "main-button margin-bottom10"
),
actionButton(
"reset_to_ori",
"Reset to Default",
icon("undo"),
class = "main-button margin-bottom10"
),
h4("ICU"),
div(rHandsontableOutput("x1"), class = "font-size margin-bottom20"),
h4("Non-ICU"),
div(rHandsontableOutput("x2"), class = "font-size margin-bottom20"),
downloadButton(
"downloadData_all_ratio",
"Download Staffing Ratios Tables",
class = "main-button"
),
helpText(
class = "text-margin margin-top10",
strong("Role: "),
"List of possible staff roles",
br(),
strong("Ratio (Normal): "),
"The patient:staff ratio (i.e. how many patients each staff member cares for)",
br(),
strong("Ratio (Stretch): "),
"The patient:staff ratio during a ‘Stretch mode’ (i.e. the maximum number patients each staff member can care for)",
br(),
br(),
strong("*"), em("Default patient-to-staff ratios are based on real staffing ratios at a collaborating academic medical center that has undertaken extensive emergency preparedness work for this pandemic.")
)
),
# Capacity tab UI code here
tabPanel(
value = "capacity_table",
title = "Total Employees",
h4("Option 1: Edit Staff Capacity Table Below"),
p(
strong("Right click"),
"in a cell to add and delete row;",
"select cell and type the new value",
class = "font-size13"
),
actionButton("clear_capacity", "Clear Table", icon("table"),
class = "main-button margin-bottom20"
),
actionButton("reset_default_capacity", "Reset to Default", icon("undo"),
class = "main-button margin-bottom20"
),
div(rHandsontableOutput("x3"), class = "font-size"),
)
)
)
)
)
)
|
e6e398f958aad4af4394bc225d29ce4599aad217
|
a176626eb55b6525d5a41e2079537f2ef51d4dc7
|
/Uni/Projects/code/P046.Israel_MAIAC/archive/cnnew/buildfiles/aq25_body.r
|
ac946605adc102e054123447f1e4f5be6fad0434
|
[] |
no_license
|
zeltak/org
|
82d696b30c7013e95262ad55f839998d0280b72b
|
d279a80198a1dbf7758c9dd56339e8a5b5555ff2
|
refs/heads/master
| 2021-01-21T04:27:34.752197
| 2016-04-16T04:27:57
| 2016-04-16T04:27:57
| 18,008,592
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,563
|
r
|
aq25_body.r
|
#-------------------> Year 2010
#if needed load res table
#res<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
### import data
m1.2010 <-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2010.rds")
################# clean BAD STN PM25 and check if improved model?
raWDaf <- ddply(m1.2010, c( "stn"),
function(x) {
mod1 <- lm(PM25 ~ aod, data=x)
data.frame(R2 = round(summary(mod1)$r.squared, 5),
nsamps = length(summary(mod1)$resid))
})
raWDaf
raWDaf<-as.data.table(raWDaf)
bad<- raWDaf[R2< 0.01]
bad[,badid := paste(stn,sep="-")]
#################BAD STN
m1.2010[,badid := paste(stn,sep="-")]
####Take out bad stations
m1.2010 <- m1.2010[!(m1.2010$badid %in% bad$badid), ]
#scale data
m1.2010[,elev.s:= scale(elev)]
m1.2010[,tden.s:= scale(tden)]
m1.2010[,pden.s:= scale(pden)]
m1.2010[,dist2A1.s:= scale(dist2A1)]
m1.2010[,dist2water.s:= scale(dist2water)]
m1.2010[,dist2rail.s:= scale(dist2rail)]
m1.2010[,Dist2road.s:= scale(Dist2road)]
m1.2010[,ndvi.s:= scale(ndvi)]
m1.2010[,MeanPbl.s:= scale(MeanPbl)]
m1.2010[,p_ind.s:= scale(p_ind)]
m1.2010[,p_for.s:= scale(p_for)]
m1.2010[,p_farm.s:= scale(p_farm)]
m1.2010[,p_dos.s:= scale(p_dos)]
m1.2010[,p_dev.s:= scale(p_dev)]
m1.2010[,p_os.s:= scale(p_os)]
m1.2010[,tempa.s:= scale(tempa)]
m1.2010[,WDa.s:= scale(WDa)]
m1.2010[,WSa.s:= scale(WSa)]
m1.2010[,RHa.s:= scale(RHa)]
m1.2010[,Raina.s:= scale(Raina)]
m1.2010[,NO2a.s:= scale(NO2a)]
m1.formula <- as.formula(PM25~ aod
+tempa.s+WDa.s+WSa.s+MeanPbl.s #temporal
+elev.s+tden.s+pden.s+Dist2road.s+ndvi.s #spatial
+p_os.s +p_dev.s+p_dos.s+p_farm.s+p_for.s+p_ind.s #land use
#+aod*Dust #interactions
+(1+aod|day/reg_num)) #+(1|stn) !!! stn screws up mod3
#full fit
m1.fit.2010 <- lmer(m1.formula,data=m1.2010,weights=normwt)
m1.2010$pred.m1 <- predict(m1.fit.2010)
res[res$year=="2010", 'm1.R2'] <- print(summary(lm(PM25~pred.m1,data=m1.2010))$r.squared)
#RMSPE
res[res$year=="2010", 'm1.PE'] <- print(rmse(residuals(m1.fit.2010)))
#spatial
###to check
spatial2010<-m1.2010 %>%
group_by(stn) %>%
summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2010.spat<- lm(barpm ~ barpred, data=spatial2010)
res[res$year=="2010", 'm1.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2010))$r.squared)
res[res$year=="2010", 'm1.PE.s'] <- print(rmse(residuals(m1.fit.2010.spat)))
#temporal
tempo2010<-left_join(m1.2010,spatial2010)
tempo2010$delpm <-tempo2010$PM25-tempo2010$barpm
tempo2010$delpred <-tempo2010$pred.m1-tempo2010$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2010)
res[res$year=="2010", 'm1.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2010))$r.squared)
saveRDS(m1.2010,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2010.pred.rds")
#---------------->>>> CV
#s1
splits_s1 <- splitdf(m1.2010)
test_s1 <- splits_s1$testset
train_s1 <- splits_s1$trainset
out_train_s1 <- lmer(m1.formula,data = train_s1,weights=normwt)
test_s1$pred.m1.cv <- predict(object=out_train_s1 ,newdata=test_s1,allow.new.levels=TRUE,re.form=NULL )
test_s1$iter<-"s1"
#s2
splits_s2 <- splitdf(m1.2010)
test_s2 <- splits_s2$testset
train_s2 <- splits_s2$trainset
out_train_s2 <- lmer(m1.formula,data = train_s2,weights=normwt)
test_s2$pred.m1.cv <- predict(object=out_train_s2 ,newdata=test_s2,allow.new.levels=TRUE,re.form=NULL )
test_s2$iter<-"s2"
#s3
splits_s3 <- splitdf(m1.2010)
test_s3 <- splits_s3$testset
train_s3 <- splits_s3$trainset
out_train_s3 <- lmer(m1.formula,data = train_s3,weights=normwt)
test_s3$pred.m1.cv <- predict(object=out_train_s3 ,newdata=test_s3,allow.new.levels=TRUE,re.form=NULL )
test_s3$iter<-"s3"
#s4
splits_s4 <- splitdf(m1.2010)
test_s4 <- splits_s4$testset
train_s4 <- splits_s4$trainset
out_train_s4 <- lmer(m1.formula,data = train_s4,weights=normwt)
test_s4$pred.m1.cv <- predict(object=out_train_s4 ,newdata=test_s4,allow.new.levels=TRUE,re.form=NULL )
test_s4$iter<-"s4"
#s5
splits_s5 <- splitdf(m1.2010)
test_s5 <- splits_s5$testset
train_s5 <- splits_s5$trainset
out_train_s5 <- lmer(m1.formula,data = train_s5,weights=normwt)
test_s5$pred.m1.cv <- predict(object=out_train_s5 ,newdata=test_s5,allow.new.levels=TRUE,re.form=NULL )
test_s5$iter<-"s5"
#s6
splits_s6 <- splitdf(m1.2010)
test_s6 <- splits_s6$testset
train_s6 <- splits_s6$trainset
out_train_s6 <- lmer(m1.formula,data = train_s6,weights=normwt)
test_s6$pred.m1.cv <- predict(object=out_train_s6 ,newdata=test_s6,allow.new.levels=TRUE,re.form=NULL )
test_s6$iter<-"s6"
#s7
splits_s7 <- splitdf(m1.2010)
test_s7 <- splits_s7$testset
train_s7 <- splits_s7$trainset
out_train_s7 <- lmer(m1.formula,data = train_s7,weights=normwt)
test_s7$pred.m1.cv <- predict(object=out_train_s7 ,newdata=test_s7,allow.new.levels=TRUE,re.form=NULL )
test_s7$iter<-"s7"
#s8
splits_s8 <- splitdf(m1.2010)
test_s8 <- splits_s8$testset
train_s8 <- splits_s8$trainset
out_train_s8 <- lmer(m1.formula,data = train_s8,weights=normwt)
test_s8$pred.m1.cv <- predict(object=out_train_s8 ,newdata=test_s8,allow.new.levels=TRUE,re.form=NULL )
test_s8$iter<-"s8"
#s9
splits_s9 <- splitdf(m1.2010)
test_s9 <- splits_s9$testset
train_s9 <- splits_s9$trainset
out_train_s9 <- lmer(m1.formula,data = train_s9,weights=normwt)
test_s9$pred.m1.cv <- predict(object=out_train_s9 ,newdata=test_s9,allow.new.levels=TRUE,re.form=NULL )
test_s9$iter<-"s9"
#s10
splits_s10 <- splitdf(m1.2010)
test_s10 <- splits_s10$testset
train_s10 <- splits_s10$trainset
out_train_s10 <- lmer(m1.formula,data = train_s10,weights=normwt)
test_s10$pred.m1.cv <- predict(object=out_train_s10 ,newdata=test_s10,allow.new.levels=TRUE,re.form=NULL )
test_s10$iter<-"s10"
#BIND 1 dataset
m1.2010.cv<- data.table(rbind(test_s1,test_s2,test_s3,test_s4,test_s5,test_s6,test_s7,test_s8,test_s9, test_s10))
# cleanup (remove from WS) objects from CV
rm(list = ls(pattern = "train_|test_"))
#table updates
m1.fit.2010.cv<-lm(PM25~pred.m1.cv,data=m1.2010.cv)
res[res$year=="2010", 'm1cv.R2'] <- print(summary(lm(PM25~pred.m1.cv,data=m1.2010.cv))$r.squared)
res[res$year=="2010", 'm1cv.I'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.2010.cv))$coef[1,1])
res[res$year=="2010", 'm1cv.I.se'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.2010.cv))$coef[1,2])
res[res$year=="2010", 'm1cv.S'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.2010.cv))$coef[2,1])
res[res$year=="2010", 'm1cv.S.se'] <-print(summary(lm(PM25~pred.m1.cv,data=m1.2010.cv))$coef[2,2])
#RMSPE
res[res$year=="2010", 'm1cv.PE'] <- print(rmse(residuals(m1.fit.2010.cv)))
#spatial
spatial2010.cv<-m1.2010.cv %>%
group_by(stn) %>%
summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2010.cv.s <- lm(barpm ~ barpred, data=spatial2010.cv)
res[res$year=="2010", 'm1cv.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2010.cv))$r.squared)
res[res$year=="2010", 'm1cv.PE.s'] <- print(rmse(residuals(m1.fit.2010.cv.s)))
#temporal
tempo2010.cv<-left_join(m1.2010.cv,spatial2010.cv)
tempo2010.cv$delpm <-tempo2010.cv$PM25-tempo2010.cv$barpm
tempo2010.cv$delpred <-tempo2010.cv$pred.m1.cv-tempo2010.cv$barpred
mod_temporal.cv <- lm(delpm ~ delpred, data=tempo2010.cv)
res[res$year=="2010", 'm1cv.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2010.cv))$r.squared)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#add 50m LU to CV data
setkey(m1.2010.cv,stn)
setkey(luf,stn)
m1.2010.cv.loc <- merge(m1.2010.cv, luf, all.x = T)
#m1.2010.cv.loc<-na.omit(m1.2010.cv.loc)
#create residual mp3 variable
m1.2010.cv.loc$res.m1<-m1.2010.cv.loc$PM25-m1.2010.cv.loc$pred.m1.cv
#The GAM model
gam.out<-gam(res.m1~s(loc.tden)+s(tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m1.2010.cv.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m1.2010.cv.loc$pred.m1.loc <-predict(gam.out)
m1.2010.cv.loc$pred.m1.both <- m1.2010.cv.loc$pred.m1.cv + m1.2010.cv.loc$pred.m1.loc
res[res$year=="2010", 'm1cv.loc.R2'] <- print(summary(lm(PM25~pred.m1.both,data=m1.2010.cv.loc))$r.squared)
res[res$year=="2010", 'm1cv.loc.I'] <-print(summary(lm(PM25~pred.m1.both,data=m1.2010.cv.loc))$coef[1,1])
res[res$year=="2010", 'm1cv.loc.I.se'] <-print(summary(lm(PM25~pred.m1.both,data=m1.2010.cv.loc))$coef[1,2])
res[res$year=="2010", 'm1cv.loc.S'] <-print(summary(lm(PM25~pred.m1.both,data=m1.2010.cv.loc))$coef[2,1])
res[res$year=="2010", 'm1cv.loc.S.se'] <-print(summary(lm(PM25~pred.m1.both,data=m1.2010.cv.loc))$coef[2,2])
#RMSPE
res[res$year=="2010", 'm1cv.loc.PE'] <- print(rmse(residuals(m1.fit.2010.cv)))
#spatial
spatial2010.cv.loc<-m1.2010.cv.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.2010.cv.loc.s <- lm(barpm ~ barpred, data=spatial2010.cv.loc)
res[res$year=="2010", 'm1cv.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2010.cv.loc))$r.squared)
res[res$year=="2010", 'm1cv.loc.PE.s'] <- print(rmse(residuals(m1.fit.2010.cv.loc.s)))
#temporal
tempo2010.loc.cv<-left_join(m1.2010.cv.loc,spatial2010.cv.loc)
tempo2010.loc.cv$delpm <-tempo2010.loc.cv$PM25-tempo2010.loc.cv$barpm
tempo2010.loc.cv$delpred <-tempo2010.loc.cv$pred.m1.both-tempo2010.loc.cv$barpred
mod_temporal.loc.cv <- lm(delpm ~ delpred, data=tempo2010.loc.cv)
res[res$year=="2010", 'm1cv.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2010.loc.cv))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2010.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
saveRDS(m1.2010.cv.loc,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2010.predCV.rds")
###############
#MOD2
###############
m2.2010<-readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2010.rds")
m2.2010[,elev.s:= scale(elev)]
m2.2010[,tden.s:= scale(tden)]
m2.2010[,pden.s:= scale(pden)]
m2.2010[,dist2A1.s:= scale(dist2A1)]
m2.2010[,dist2water.s:= scale(dist2water)]
m2.2010[,dist2rail.s:= scale(dist2rail)]
m2.2010[,Dist2road.s:= scale(Dist2road)]
m2.2010[,ndvi.s:= scale(ndvi)]
m2.2010[,MeanPbl.s:= scale(MeanPbl)]
m2.2010[,p_ind.s:= scale(p_ind)]
m2.2010[,p_for.s:= scale(p_for)]
m2.2010[,p_farm.s:= scale(p_farm)]
m2.2010[,p_dos.s:= scale(p_dos)]
m2.2010[,p_dev.s:= scale(p_dev)]
m2.2010[,p_os.s:= scale(p_os)]
m2.2010[,tempa.s:= scale(tempa)]
m2.2010[,WDa.s:= scale(WDa)]
m2.2010[,WSa.s:= scale(WSa)]
m2.2010[,RHa.s:= scale(RHa)]
m2.2010[,Raina.s:= scale(Raina)]
m2.2010[,NO2a.s:= scale(NO2a)]
#generate predictions
m2.2010[, pred.m2 := predict(object=m1.fit.2010,newdata=m2.2010,allow.new.levels=TRUE,re.form=NULL)]
describe(m2.2010$pred.m2)
#delete implossible valuesOA[24~
m2.2010 <- m2.2010[pred.m2 > 0.00000000000001 , ]
m2.2010 <- m2.2010[pred.m2 < 1500 , ]
saveRDS(m2.2010,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2010.pred2.rds")
#-------------->prepare for mod3
m2.2010[, bimon := (m + 1) %/% 2]
setkey(m2.2010,day, aodid)
m2.2010<-m2.2010[!is.na(meanPM25)]
#run the lmer part regressing stage 2 pred Vs mean pm
#in israel check per month, also check 30km band and other methods for meanpm
m2.smooth = lme(pred.m2 ~ meanPM25,random = list(aodid= ~1 + meanPM25),control=lmeControl(opt = "optim"), data= m2.2010 )
#xm2.smooth = lmer(pred.m2 ~ meanPM25+(1+ meanPM25|aodid), data= m2.2010 )
#correlate to see everything from mod2 and the mpm works
m2.2010[, pred.t31 := predict(m2.smooth)]
m2.2010[, resid := residuals(m2.smooth)]
res[res$year=="2010", 'm3.t31'] <- print(summary(lm(pred.m2~pred.t31,data=m2.2010))$r.squared)
#split the files to the separate bi monthly datsets
T2010_bimon1 <- subset(m2.2010 ,m2.2010$bimon == "1")
T2010_bimon2 <- subset(m2.2010 ,m2.2010$bimon == "2")
T2010_bimon3 <- subset(m2.2010 ,m2.2010$bimon == "3")
T2010_bimon4 <- subset(m2.2010 ,m2.2010$bimon == "4")
T2010_bimon5 <- subset(m2.2010 ,m2.2010$bimon == "5")
T2010_bimon6 <- subset(m2.2010 ,m2.2010$bimon == "6")
#run the separate splines (smooth) for x and y for each bimon
#whats the default band (distance) that the spline goes out and uses
fit2_1 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2010_bimon1 )
fit2_2 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2010_bimon2 )
fit2_3 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2010_bimon3 )
fit2_4 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2010_bimon4 )
fit2_5 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2010_bimon5 )
fit2_6 <- gam(resid ~ s(x_aod_ITM,y_aod_ITM), data= T2010_bimon6 )
#get the predicted-fitted
Xpred_1 <- (T2010_bimon1$pred.t31 - fit2_1$fitted)
Xpred_2 <- (T2010_bimon2$pred.t31 - fit2_2$fitted)
Xpred_3 <- (T2010_bimon3$pred.t31 - fit2_3$fitted)
Xpred_4 <- (T2010_bimon4$pred.t31 - fit2_4$fitted)
Xpred_5 <- (T2010_bimon5$pred.t31 - fit2_5$fitted)
Xpred_6 <- (T2010_bimon6$pred.t31 - fit2_6$fitted)
#remerge to 1 file
m2.2010$pred.t32 <- c( Xpred_1,Xpred_2, Xpred_3, Xpred_4, Xpred_5, Xpred_6)
#this is important so that its sorted as in the first gamm
setkey(m2.2010,day, aodid)
#rerun the lme on the predictions including the spatial spline (smooth)
Final_pred_2010 <- lme(pred.t32 ~ meanPM25 ,random = list(aodid= ~1 + meanPM25 ),control=lmeControl(opt = "optim"),data= m2.2010 )
m2.2010[, pred.t33 := predict(Final_pred_2010)]
#check correlations
res[res$year=="2010", 'm3.t33'] <- print(summary(lm(pred.m2 ~ pred.t33,data=m2.2010))$r.squared)
#------------------------>>>
#import mod3
data.m3 <- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2010.rds")
#for PM25
data.m3 <- data.m3[,c(1,2,5,29:32,52,53),with=FALSE]
data.m3[, bimon := (m + 1) %/% 2]
setkey(data.m3,day, aodid)
data.m3<-data.m3[!is.na(meanPM25)]
#generate m.3 initial pred
data.m3$pred.m3.mix <- predict(Final_pred_2010,data.m3)
#create unique grid
ugrid <-data.m3 %>%
group_by(aodid) %>%
summarise(lat_aod = mean(lat_aod, na.rm=TRUE), long_aod = mean(long_aod, na.rm=TRUE),x_aod_ITM = mean(x_aod_ITM, na.rm=TRUE), y_aod_ITM = mean(y_aod_ITM, na.rm=TRUE))
#### PREDICT Gam part
#split back into bimons to include the gam prediction in final prediction
data.m3_bimon1 <- data.m3[bimon == 1, ]
data.m3_bimon2 <- data.m3[bimon == 2, ]
data.m3_bimon3 <- data.m3[bimon == 3, ]
data.m3_bimon4 <- data.m3[bimon == 4, ]
data.m3_bimon5 <- data.m3[bimon == 5, ]
data.m3_bimon6 <- data.m3[bimon == 6, ]
#addin unique grid to each bimon
uniq_gid_bimon1 <- ugrid
uniq_gid_bimon2 <- ugrid
uniq_gid_bimon3 <- ugrid
uniq_gid_bimon4 <- ugrid
uniq_gid_bimon5 <- ugrid
uniq_gid_bimon6 <- ugrid
#get predictions for Bimon residuals
uniq_gid_bimon1$gpred <- predict.gam(fit2_1,uniq_gid_bimon1)
uniq_gid_bimon2$gpred <- predict.gam(fit2_2,uniq_gid_bimon2)
uniq_gid_bimon3$gpred <- predict.gam(fit2_3,uniq_gid_bimon3)
uniq_gid_bimon4$gpred <- predict.gam(fit2_4,uniq_gid_bimon4)
uniq_gid_bimon5$gpred <- predict.gam(fit2_5,uniq_gid_bimon5)
uniq_gid_bimon6$gpred <- predict.gam(fit2_6,uniq_gid_bimon6)
#merge things back togheter
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> merges
setkey(uniq_gid_bimon1,aodid)
setkey(data.m3_bimon1,aodid)
data.m3_bimon1 <- merge(data.m3_bimon1, uniq_gid_bimon1[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon2,aodid)
setkey(data.m3_bimon2,aodid)
data.m3_bimon2 <- merge(data.m3_bimon2, uniq_gid_bimon2[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon3,aodid)
setkey(data.m3_bimon3,aodid)
data.m3_bimon3 <- merge(data.m3_bimon3, uniq_gid_bimon3[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon4,aodid)
setkey(data.m3_bimon4,aodid)
data.m3_bimon4 <- merge(data.m3_bimon4, uniq_gid_bimon4[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon5,aodid)
setkey(data.m3_bimon5,aodid)
data.m3_bimon5 <- merge(data.m3_bimon5, uniq_gid_bimon5[,list(aodid,gpred)], all.x = T)
setkey(uniq_gid_bimon6,aodid)
setkey(data.m3_bimon6,aodid)
data.m3_bimon6 <- merge(data.m3_bimon6, uniq_gid_bimon6[,list(aodid,gpred)], all.x = T)
#reattach all parts
mod3 <- rbind(data.m3_bimon1,data.m3_bimon2,data.m3_bimon3,data.m3_bimon4,data.m3_bimon5,data.m3_bimon6)
# create pred.m3
mod3$pred.m3 <-mod3$pred.m3.mix+mod3$gpred
#hist(mod3$pred.m3)
#describe(mod3$pred.m3)
#recode negative into zero
mod3 <- mod3[pred.m3 >= 0]
saveRDS(mod3,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2010.pred.rds")
#clean
keep(mod3,res,rmse, sure=TRUE)
gc()
#########################
#prepare for m3.R2
#########################
#load mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2010.pred.rds")
mod1[,aodid:= paste(mod1$long_aod,mod1$lat_aod,sep="-")]
mod1<-mod1[,c("aodid","day","PM25","pred.m1","stn","MeanPbl","WSa","WDa","tempa"),with=FALSE]
#R2.m3
setkey(mod3,day,aodid)
setkey(mod1,day,aodid)
m1.2010 <- merge(mod1,mod3[, list(day,aodid,pred.m3)], all.x = T)
m3.fit.2010<- summary(lm(PM25~pred.m3,data=m1.2010))
res[res$year=="2010", 'm3.R2'] <- print(summary(lm(PM25~pred.m3,data=m1.2010))$r.squared)
#RMSPE
res[res$year=="2010", 'm3.PE'] <- print(rmse(residuals(m3.fit.2010)))
#spatial
###to check
spatial2010<-m1.2010 %>%
group_by(stn) %>%
summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m1.fit.2010.spat<- lm(barpm ~ barpred, data=spatial2010)
res[res$year=="2010", 'm3.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2010))$r.squared)
res[res$year=="2010", 'm3.PE.s'] <- print(rmse(residuals(m1.fit.2010.spat)))
#temporal
tempo2010<-left_join(m1.2010,spatial2010)
tempo2010$delpm <-tempo2010$PM25-tempo2010$barpm
tempo2010$delpred <-tempo2010$pred.m3-tempo2010$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempo2010)
res[res$year=="2010", 'm3.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2010))$r.squared)
#############save midpoint
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/res.AQ.2010.rds")
saveRDS(res, "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/resALL.AQ.rds")
#########################
#import mod2
mod2<- readRDS( "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod2.AQ.2010.pred2.rds")
mod2<-mod2[,c("aodid","day","pred.m2"),with=FALSE]
#----------------> store the best available
mod3best <- mod3[, list(aodid, x_aod_ITM, y_aod_ITM, day, pred.m3)]
setkey(mod3best, day, aodid)
setkey(mod2, day, aodid)
mod3best <- merge(mod3best, mod2[,list(aodid, day, pred.m2)], all.x = T)
#reload mod1
mod1<- readRDS("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod1.AQ.2010.pred.rds")
mod1$aodid<-paste(mod1$long_aod,mod1$lat_aod,sep="-")
mod1<-mod1[,c("aodid","day","PM25","pred.m1"),with=FALSE]
setkey(mod1,day,aodid)
mod3best <- merge(mod3best, mod1, all.x = T)
mod3best[,bestpred := pred.m3]
mod3best[!is.na(pred.m2),bestpred := pred.m2]
mod3best[!is.na(pred.m1),bestpred := pred.m1]
#save
saveRDS(mod3best,"/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/mod3.AQ.2010.FINAL.rds")
#save for GIS
write.csv(mod3best[, list(LTPM = mean(bestpred, na.rm = T),
predvariance = var(bestpred, na.rm = T),
predmin = min(bestpred, na.rm = T),
predmax = max(bestpred, na.rm = T),
npred = sum(!is.na(bestpred)),
npred.m1 = sum(!is.na(pred.m1)),
npred.m2 = sum(!is.na(pred.m2)),
npred.m3 = sum(!is.na(pred.m3)),
x_aod_ITM = x_aod_ITM[1], y_aod_ITM = y_aod_ITM[1]),by=aodid], "/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN000_RWORKDIR/LTPM3.AQ.2010.csv", row.names = F)
#-------->>> loc stage
luf<-fread("/media/NAS/Uni/Projects/P046_Israel_MAIAC/3.Work/2.Gather_data/FN004_LU_full_dataset/local.csv")
setnames(luf,"tden","loc.tden")
setnames(luf,"elev50","loc.elev")
#rename dataset
m3.2010<-m1.2010
m3.2010<-na.omit(m3.2010)
#add 50m LU to CV data
setkey(m3.2010,stn)
setkey(luf,stn)
m3.2010.loc <- merge(m3.2010, luf, all.x = T)
#create residual mp3 variable
m3.2010.loc$res.m3<-m3.2010.loc$PM25-m3.2010.loc$pred.m3
#The GAM model
gam.out<-gam(res.m3~s(loc.tden)+s(loc.tden,MeanPbl)+s(loc.tden,WSa)+s(loc_p_os,fx=FALSE,k=4,bs='cr')+s(loc.elev,fx=FALSE,k=4,bs='cr')+s(dA1,fx=FALSE,k=4,bs='cr')+s(dsea,fx=FALSE,k=4,bs='cr'),data=m3.2010.loc)
#plot(bp.model.ps)
#summary(bp.model.ps)
## reg
m3.2010.loc$pred.m3.loc <-predict(gam.out)
m3.2010.loc$pred.m3.both <- m3.2010.loc$pred.m3 + m3.2010.loc$pred.m3.loc
res[res$year=="2010", 'm3.loc.R2'] <- print(summary(lm(PM25~pred.m3.both,data=m3.2010.loc))$r.squared)
res[res$year=="2010", 'm3.loc.I'] <-print(summary(lm(PM25~pred.m3.both,data=m3.2010.loc))$coef[1,1])
res[res$year=="2010", 'm3.loc.I.se'] <-print(summary(lm(PM25~pred.m3.both,data=m3.2010.loc))$coef[1,2])
res[res$year=="2010", 'm3.loc.S'] <-print(summary(lm(PM25~pred.m3.both,data=m3.2010.loc))$coef[2,1])
res[res$year=="2010", 'm3.loc.S.se'] <-print(summary(lm(PM25~pred.m3.both,data=m3.2010.loc))$coef[2,2])
#RMSPE
res[res$year=="2010", 'm3.loc.PE'] <- print(rmse(residuals(m3.fit.2010)))
#spatial
spatial2010.loc<-m3.2010.loc %>%
group_by(stn) %>%
summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m3, na.rm=TRUE))
m3.fit.2010.loc.s <- lm(barpm ~ barpred, data=spatial2010.loc)
res[res$year=="2010", 'm3.loc.R2.s'] <- print(summary(lm(barpm ~ barpred, data=spatial2010.loc))$r.squared)
res[res$year=="2010", 'm3.loc.PE.s'] <- print(rmse(residuals(m3.fit.2010.loc.s)))
#temporal
tempo2010.loc<-left_join(m3.2010.loc,spatial2010.loc)
tempo2010.loc$delpm <-tempo2010.loc$PM25-tempo2010.loc$barpm
tempo2010.loc$delpred <-tempo2010.loc$pred.m3.both-tempo2010.loc$barpred
mod_temporal.loc <- lm(delpm ~ delpred, data=tempo2010.loc)
res[res$year=="2010", 'm3.loc.R2.t'] <- print(summary(lm(delpm ~ delpred, data=tempo2010.loc))$r.squared)
keep(res, sure=TRUE)
gc()
|
7c965adfddf66cb80cc52fd4f5ad5d830443692b
|
92b79862c8bcb6b8056ea2cfc2609e07c29c5377
|
/PCAExample.R
|
42b9d5b89bdf0fb8bb99d2047317685e9ad77b72
|
[] |
no_license
|
d0p34m1n3/PortfolioOptimisation
|
8cd647cdad6966fa26ae7136b8515334ba870dd6
|
a6d5c24efd9032f5075b9e90a906f6e3491d1542
|
refs/heads/master
| 2020-04-13T17:08:19.121951
| 2015-05-15T12:28:13
| 2015-05-15T12:28:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 960
|
r
|
PCAExample.R
|
rm(list=ls())
require(RCurl)
sit = getURLContent('https://github.com/systematicinvestor/SIT/raw/master/sit.gz', binary=TRUE, followlocation = TRUE, ssl.verifypeer = FALSE)
con = gzcon(rawConnection(sit, 'rb'))
source(con)
close(con)
load.packages('quantmod')
data <- new.env()
tickers<-spl("VBMFX,VTSMX,VGTSX,VGSIX")
getSymbols(tickers, src = 'yahoo', from = '1980-01-01', env = data, auto.assign = T)
for(i in ls(data)) data[[i]] = adjustOHLC(data[[i]], use.Adjusted=T)
bt.prep(data, align='remove.na', dates='1990::2013')
prices<-data$prices
ret<-na.omit(prices/mlag(prices) - 1)
weight<-matrix(1/ncol(ret),nrow=1,ncol=ncol(ret))
p.ret<-(weight) %*% t(ret)
demean = scale(coredata(ret), center=TRUE, scale=FALSE)
covm<-cov(demean)
pca<-prcomp(ret,cor=F)
evec<-pca$rotation[] #eigen vectors
eval <- pca$sdev^2 #eigen values
print(diag(t(evec) %*% covm %*% evec))
print(eval)
inv.evec<-solve(evec) #inverse of eigenvector
pc.port<-inv.evec %*% t(ret)
|
5d2130606abcd213a0b066cabd6189d05a0fd1f6
|
b3b3e49423863dee73cc9d02d69e1bc029f51dd2
|
/man/methodList.Rd
|
87b54cc23ce3e1588646f6556df5564d2687b2d6
|
[] |
no_license
|
vjcitn/benchOOM
|
0fc5d6d8160f84bf3f89eb87c773212007af9dc8
|
e6983335cdfb974e53568c8954eed5ff3d643ed9
|
refs/heads/master
| 2021-07-09T10:20:01.712116
| 2021-04-16T03:59:01
| 2021-04-16T03:59:01
| 80,768,960
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 375
|
rd
|
methodList.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataBench.R
\name{methodList}
\alias{methodList}
\title{helper for creating a methodlist}
\usage{
methodList(methods = c("hdf5", "bigm", "sqlite", "ff"))
}
\arguments{
\item{methods}{a character vector with tags for available round trip methods}
}
\description{
helper for creating a methodlist
}
|
208aff327cebf909cbf866d65d053f2754701711
|
86976cc7492495a5a5bcb2f5ffa63731a634768c
|
/man/ShakespeareWordHist.Rd
|
d06c9545acc78c82e91b8894624e06c9de384fa0
|
[] |
no_license
|
GiancoAngelozzi/preseqR
|
e9d9d84bd1c4d5584d0d98ea380ca85f98787a4e
|
99362f67dd09b15722cb91632f5be1784251751d
|
refs/heads/master
| 2021-01-12T19:59:38.680781
| 2015-06-04T00:00:00
| 2015-06-04T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 474
|
rd
|
ShakespeareWordHist.Rd
|
\name{ShakespeareWordHist}
\alias{ShakespeareWordHist}
\docType{data}
\title{Shakespeare's word type frequencies}
\description{The Shakespeare's word type frequencies data was from
Efron, B., & Thisted, R. (1976).}
\references{
Efron, B., & Thisted, R. (1976). Estimating the number of unseen species:
How many words did Shakespeare know?. Biometrika, 63(3), 435-447.
}
\examples{
##load library
library(preseqR)
##load data
data(ShakespeareWordHist)
}
\keyword{ data }
|
66a1389ee50ff5ca0dc8f7e18a6be8cd65b8a8ba
|
e9da61695d915c1ea1cd76dbf644b149b4c8c690
|
/Code/R/setup.R
|
f64da57c34331debebb118b5443e4a658a4405b6
|
[] |
no_license
|
gbohner/Cambridge_talk_20190306
|
90aa3a485dfeb7328d7fb9d5c36e5c5091b28569
|
90c43c7e889fc47bab3b74d465a9ae54c3f5a56b
|
refs/heads/master
| 2020-04-25T04:40:39.922001
| 2019-03-05T20:50:01
| 2019-03-05T20:50:01
| 172,518,640
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 612
|
r
|
setup.R
|
setwd("~")
system("cd ~")
if (!file.exists("Cambridge_talk_20190306")){
system("git clone https://github.com/gbohner/Cambridge_talk_20190306")
}
setwd("~/Cambridge_talk_20190306")
system("git pull")
setwd("~")
system("cp ~/Cambridge_talk_20190306/Code/R/* ~/")
# Set the libPaths to the already installed packages
curLibPaths = .libPaths()
curLibPaths[[2]] = paste0(curLibPaths[[2]],"/3.5")
.libPaths(curLibPaths)
# Therefore we do NOT need to reinstall packages on every user (takes very long)
#source("install_packages.R")
# Open the file that we want to edit
file.edit("scrnaseq_dimred_notebook.Rmd")
|
800b08459f4d77ddc763e52784a71ec3d2a88250
|
43332ed929f6417071c96464de00d1c5caf5df98
|
/R/geom-segment2.R
|
e8d8c2e9b4abb65dd01e1c02e1cbde4a0c7a5a89
|
[] |
no_license
|
aaa7260/ggcor
|
e81a305855b724abaa4e115b29e40d582b2ac1a3
|
dd3687430b77a9326dfcc258d359ac7c108a65ad
|
refs/heads/master
| 2023-02-27T00:15:07.307000
| 2020-07-22T13:44:24
| 2020-07-22T13:44:24
| 334,437,269
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,742
|
r
|
geom-segment2.R
|
#' Segment2 layer
#'
#' @inheritParams ggplot2::layer
#' @inheritParams ggplot2::geom_segment
#' @section Aesthetics:
#' \code{geom_segment2()} understands the following aesthetics (required aesthetics are in bold):
#' \itemize{
#' \item \strong{\code{x}}
#' \item \strong{\code{y}}
#' \item \strong{\code{xend}}
#' \item \strong{\code{yend}}
#' \item \code{alpha}
#' \item \code{colour}
#' \item \code{linetype}
#' \item \code{width}
#' }
#' @importFrom ggplot2 layer ggproto geom_segment GeomSegment aes draw_key_path
#' @rdname geom_segment2
#' @author Houyun Huang, Lei Zhou, Jian Chen, Taiyun Wei
#' @export
geom_segment2 <- function(mapping = NULL,
data = NULL,
stat = "identity",
position = "identity",
...,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomSegment2,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
na.rm = na.rm,
...
)
)
}
#' @rdname geom_segment2
#' @format NULL
#' @usage NULL
#' @export
GeomSegment2 <- ggproto(
"GeomSegment2", GeomSegment,
default_aes = aes(colour = "grey35", size = 0.5, linetype = 1, alpha = NA),
required_aes = c("x", "y", "xend", "yend"),
draw_panel = function(data, panel_params, coord, arrow = NULL, arrow.fill = NULL,
lineend = "butt", linejoin = "round", na.rm = FALSE) {
if(empty(data)) {
return(ggplot2::zeroGrob())
}
coords <- coord$transform(data, panel_params)
arrow.fill <- arrow.fill %||% coords$colour
x <- y <- xend <- yend <- NULL
ends <- dplyr::rename(data[setdiff(names(data), c("x", "y"))], x = xend, y = yend)
ends <- coord$transform(ends, panel_params)
ends <- dplyr::rename(ends, xend = x, yend = y)
coords <- cbind(coords[setdiff(names(coords), c("xend", "yend"))], ends[c("xend", "yend")])
ggname("geom_segment2",
grid::segmentsGrob(coords$x, coords$y, coords$xend, coords$yend,
default.units = "native",
gp = grid::gpar(
col = scales::alpha(coords$colour, coords$alpha),
fill = scales::alpha(arrow.fill, coords$alpha),
lwd = coords$size * ggplot2::.pt,
lty = coords$linetype,
lineend = lineend,
linejoin = linejoin
),
arrow = arrow
)
)
},
draw_key = draw_key_path
)
|
b88130c7f952c7bf0b2141c7556c7023edb8d651
|
3db221fa13b0806047df98103c0c03df5520cc9b
|
/R scripts for analysis/export_data.r
|
08b2856815f7c254b8594ca0da72c89b131de0e2
|
[] |
no_license
|
xsatishx/cloud-repo
|
55d48aa37ecb7daf772d754e3c2e74a7f6819349
|
50bc0d4ecda5a09bdf29cf78ffdd8c72d59d8f7f
|
refs/heads/master
| 2020-12-25T16:54:21.281357
| 2018-10-24T04:38:46
| 2018-10-24T04:38:46
| 49,554,166
| 1
| 2
| null | 2020-07-24T04:39:07
| 2016-01-13T06:23:16
|
HTML
|
UTF-8
|
R
| false
| false
| 163
|
r
|
export_data.r
|
export_data <- function(data_object, file_name){
write.table(data_object, file=file_name, sep="\t", col.names = NA, row.names = TRUE, quote = FALSE, eol="\n")
}
|
d917e63728782522208321f25a76d192e9c581be
|
92e240738a4ccf673b9f3610386eaa08eef26d6f
|
/volatility/momentum/study.R
|
6b0b368177dd139818bc662ad5f31685748af051
|
[] |
no_license
|
stockviz/blog
|
564a4671202b92a2d63f13f0207fd8a35810c0b6
|
e00c055742a1229c612669ee29d846a6e2475a43
|
refs/heads/master
| 2023-09-01T15:59:07.746886
| 2023-08-31T04:01:37
| 2023-08-31T04:01:37
| 138,372,618
| 12
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,702
|
r
|
study.R
|
source("d:/stockviz/r/config.r")
source("D:/StockViz/public/blog/common/plot.common.R")
reportPath<-"."
library('RODBC')
library('quantmod')
library('PerformanceAnalytics')
library('lubridate')
library('tidyverse')
library('reshape2')
library('ggthemes')
options(stringsAsFactors = FALSE)
options("scipen"=100)
pdf(NULL)
lcon <- odbcDriverConnect(sprintf("Driver={ODBC Driver 17 for SQL Server};Server=%s;Database=%s;Uid=%s;Pwd=%s;", ldbserver, "StockViz", ldbuser, ldbpassword), case = "nochange", believeNRows = TRUE)
startDate <- as.Date("2010-01-01")
endDate <- as.Date("2015-12-31")
#indexName <- "NIFTY MIDCAP150 MOMENTUM 50 TR"
indexName <- "NIFTY200 MOMENTUM 30 TR"
nEod <- sqlQuery(lcon, sprintf("select px_close, time_stamp from bhav_index where index_name='%s' and time_stamp >= '%s' and time_stamp <= '%s'", indexName, startDate, endDate))
nXts <- xts(nEod[,1], nEod[,2])
nXts <- merge(nXts, dailyReturn(nXts)) #1, 2
nXts <- merge(nXts,
rollapply(nXts[,2], 50, sd), #3
rollapply(nXts[,2], 100, sd), #4
rollapply(nXts[,2], 200, sd), #5
rollapply(nXts[,2], 5, Return.cumulative), #6
rollapply(nXts[,2], 20, Return.cumulative) #7
)
nXts <- merge(nXts, stats::lag(nXts[, 6], -5), stats::lag(nXts[, 7], -20))
names(nXts) <- c("INDEX", "RET", "SD50", "SD100", "SD200", "RET5", "RET20", "RET5_LAG5", "RET20_LAG20")
aNames <- c("SD50", "SD100", "SD200")
bNames <- c("RET5_LAG5", "RET20_LAG20")
doScatter <- function(aName, bName){
toPlot <- data.frame(nXts[, c(aName, bName)])
ggplot(toPlot, aes_string(x=aName, y=bName)) +
theme_economist() +
geom_point() +
labs(fill="", color="", size="", title=sprintf("%s Forward Returns vs. Historical Volatility", indexName), subtitle=sprintf("[%s:%s]", startDate, endDate)) +
annotate("text", x=min(toPlot[,1]), y=min(toPlot[,2]), label = "@StockViz", hjust=0, vjust=0, col="white", cex=6, fontface = "bold", alpha = 0.8)
ggsave(sprintf("%s/%s.%s.%s.png", reportPath, indexName, aName, bName), width=16, height=8, units="in")
}
plotOverlapping <- function(){
for(i in 1:length(aNames)){
for(j in 1:length(bNames)){
doScatter(aNames[i], bNames[j])
}
}
}
plotOverlapping()
################################################################
# monthly rebalance based on static look-back SD and static threshold
backtest <- function(sdLb){
# back test to calc max return & min drawdown SDs
nXts <- xts(nEod[,1], nEod[,2])
nXts <- merge(nXts, dailyReturn(nXts), monthlyReturn(nXts)) #1, 2, 3
nXts <- merge(nXts, rollapply(nXts[,2], sdLb, sd)) #4
nXts <- na.omit(nXts)
nXts <- merge(nXts, stats::lag(nXts[, 3], -1)) #5
names(nXts) <- c("INDEX", "RET", "RETM", "SD200", "RETM_LAG1")
sdThreshs <- seq(0.01, 0.05, by=0.005)
threshRets <- do.call(merge, lapply(sdThreshs, function(X) ifelse(nXts$SD200 > X, 0, nXts$RETM_LAG1)))
names(threshRets) <- sapply(sdThreshs, function(X) paste0("RETM", X))
cumRets <- sort(apply(threshRets, 2, Return.cumulative), decreasing=T)
print(cumRets)
maxDDs <- sort(apply(threshRets, 2, maxDrawdown))
maxRetSd <- as.numeric(gsub("RETM", "", names(cumRets[1])))
minDDSd <- as.numeric(gsub("RETM", "", names(maxDDs[1])))
toPlot <- na.omit(merge(nXts$RETM_LAG1, threshRets))
Common.PlotCumReturns(toPlot, sprintf("%s/%d-day std. dev.", indexName, sdLb), "(EOM rebalance)", sprintf("%s/%s.%d.cumulative.png", reportPath, indexName, sdLb))
# forward test
startDate <- as.Date("2016-01-01")
endDate <- as.Date("2022-10-31")
nEod <- sqlQuery(lcon, sprintf("select px_close, time_stamp from bhav_index where index_name='%s' and time_stamp >= '%s' and time_stamp <= '%s'", indexName, startDate-360, endDate))
nXts <- xts(nEod[,1], nEod[,2])
nXts <- merge(nXts, dailyReturn(nXts), monthlyReturn(nXts)) #1, 2, 3
nXts <- merge(nXts, rollapply(nXts[,2], sdLb, sd)) #4
nXts <- na.omit(nXts)
nXts <- merge(nXts, stats::lag(nXts[, 3], -1)) #5
names(nXts) <- c("INDEX", "RET", "RETM", "SD200", "RETM_LAG1")
nXts$MAX_RET <- ifelse(nXts$SD200 > maxRetSd, 0, nXts$RETM_LAG1)
nXts$MIN_DD <- ifelse(nXts$SD200 > minDDSd, 0, nXts$RETM_LAG1)
toPlot <- na.omit(nXts[paste0(startDate, "/"), c("RETM_LAG1", "MAX_RET", "MIN_DD")])
Common.PlotCumReturns(toPlot, sprintf("%s/%.3f %.3f %d-day std. dev.", indexName, maxRetSd, minDDSd, sdLb), "(EOM rebalance)", sprintf("%s/%s.max.%d.cumulative.png", reportPath, indexName, sdLb))
toPlot <- toPlot["2020-05-01/",]
Common.PlotCumReturns(toPlot, sprintf("%s/%.3f %.3f %d-day std. dev.", indexName, maxRetSd, minDDSd, sdLb), "(EOM rebalance)", sprintf("%s/%s.max.%d.cumulative.2020.png", reportPath, indexName, sdLb))
}
sdLbs <- c(10, 20, 50, 100, 200) #days
lapply(sdLbs, function(X) backtest(X))
|
5596e5d141205e3ef858bb4fe388a5f6d9e1eb36
|
fb31e635792ce4ffbcaff2e145a6fbb37d7e9852
|
/man/truncation.Rd
|
8f787d4a44957df2196b398304c0828da4df6bf1
|
[] |
no_license
|
khliland/plsVarSel
|
57c3ccfefcbe549b0fcdff452e581e1b0db0833f
|
5c08b5b59444de3505fe579b12cbc46ddad4f3dc
|
refs/heads/master
| 2023-01-24T07:50:38.251503
| 2023-01-12T10:35:48
| 2023-01-12T10:35:48
| 55,041,088
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,319
|
rd
|
truncation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trunc.R
\name{truncation}
\alias{truncation}
\title{Trunction PLS}
\usage{
truncation(..., Y.add, weights, method = "truncation")
}
\arguments{
\item{...}{arguments passed on to \code{mvrV}).}
\item{Y.add}{optional additional response vector/matrix found in the input data.}
\item{weights}{optional object weighting vector.}
\item{method}{choice (default = \code{truncation}).}
}
\value{
Returns an object of class mvrV, simliar to to mvr object of the pls package.
}
\description{
Distribution based truncation for variable selection in subspace
methods for multivariate regression.
}
\details{
Loading weights are truncated around their median based on confidence intervals
for modelling without replicates (Lenth et al.). The arguments passed to \code{mvrV} include
all possible arguments to \code{\link[pls:mvr]{cppls}} and the following truncation parameters
(with defaults) trunc.pow=FALSE, truncation=NULL, trunc.width=NULL, trunc.weight=0,
reorth=FALSE, symmetric=FALSE.
The default way of performing truncation involves the following parameter values:
truncation="Lenth", trunc.width=0.95, indicating Lenth's confidence intervals (assymmetric),
with a confidence of 95%. trunc.weight can be set to a number between 0 and 1 to give a
shrinkage instead of a hard threshold. An alternative truncation strategy can be used with:
truncation="quantile", in which a quantile line is used for detecting outliers/inliers.
}
\examples{
data(yarn, package = "pls")
tr <- truncation(density ~ NIR, ncomp=5, data=yarn, validation="CV",
truncation="Lenth", trunc.width=0.95) # Default truncation
summary(tr)
}
\references{
K.H. Liland, M. Høy, H. Martens, S. Sæbø: Distribution based truncation for
variable selection in subspace methods for multivariate regression, Chemometrics and
Intelligent Laboratory Systems 122 (2013) 103-111.
}
\seealso{
\code{\link{VIP}} (SR/sMC/LW/RC), \code{\link{filterPLSR}}, \code{\link{shaving}},
\code{\link{stpls}}, \code{\link{truncation}},
\code{\link{bve_pls}}, \code{\link{ga_pls}}, \code{\link{ipw_pls}}, \code{\link{mcuve_pls}},
\code{\link{rep_pls}}, \code{\link{spa_pls}},
\code{\link{lda_from_pls}}, \code{\link{lda_from_pls_cv}}, \code{\link{setDA}}.
}
\author{
Kristian Hovde Liland.
}
|
8def0853f7552a2a1fafa543053997c14d718922
|
5a7523ffc00dfaee9d283f12d8e6f6e761ad4afa
|
/logreg code.R
|
8c1260b367c87ae7438eb4f8e8eaee7d7d1cc80e
|
[] |
no_license
|
ellierk/honors-thesis
|
8a6854cde600639ef9c46450cb5a9c4c2c71e3a8
|
0ac22674e98a80cb14dc88d2883d04121d7bfbd1
|
refs/heads/master
| 2023-01-11T09:18:38.728534
| 2020-11-19T02:19:57
| 2020-11-19T02:19:57
| 296,682,069
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 991
|
r
|
logreg code.R
|
df1 <- squid %>% select(site, date, spacing_m, temp_avg, sal_avg, dist_from_bags, reclaimed, absent)
df2 <- df1[1,] %>% mutate(consumed = "1")
v = df1$reclaimed
i = 1
l=1
while(i < length(v)) {
l=1
while(l < v[i]) {
ifelse(l < df1$absent[i],
(df2 = df2 %>% add_row(site = df1$site[i], date = df1$date[i], spacing_m = df1$spacing_m[i], temp_avg= df1$temp_avg[i],
sal_avg= df1$sal_avg[i], dist_from_bags = df1$dist_from_bags[i], reclaimed = df1$reclaimed[i],
absent = df1$absent[i], consumed = "1")),
(df2 = df2 %>% add_row(site = df1$site[i], date = df1$date[i], spacing_m = df1$spacing_m[i], temp_avg= df1$temp_avg[i],
sal_avg= df1$sal_avg[i], dist_from_bags = df1$dist_from_bags[i], reclaimed = df1$reclaimed[i],
absent = df1$absent[i], consumed = "0"))
)
l = l+1
}
i = i+1
}
write.csv(df2, "logreg.csv")
|
354b67fd613f866810f70f3a525224aaca65f45a
|
e9af29f8578f6fed182a12d625b6e846b3c3086b
|
/cds/exdata/week3/03_04_R_colors.R
|
4cda29650f48cc18fc6ea541352dd6a99d013af7
|
[] |
no_license
|
ceik/coursera
|
05750f640d98ffdfb4853871e3ae0c9c1ef86851
|
751a04177990edd7ba7c5a8c6bd976dcfcb62dfc
|
refs/heads/master
| 2021-01-10T13:28:38.220484
| 2018-10-28T08:10:59
| 2018-10-28T08:10:59
| 50,228,306
| 0
| 0
| null | 2016-01-24T12:47:38
| 2016-01-23T07:22:44
|
HTML
|
UTF-8
|
R
| false
| false
| 3,429
|
r
|
03_04_R_colors.R
|
##### Plotting and Color in R #####
#########################################
### Problem
# Colors can make it much easier for the reader to understand your data/plot.
# The default color schemes in R are often suboptimal. This is true both from a
# design and a functional (colors are often supposed to illustrate data) POV.
#
# E.g. the default colors for the col argument are:
# 1 = Open black
# 2 = Red
# 3 = Green
#
# Default image colors are heat colors (red = low, yellow = high) or topo colors
# (blue = low, green = mid, yellow = high)
# Some packages and functions can help here.
### Color Utilities in R
# the grDevices package has two functions (colorRamp() and colorRampPalette())
# that take palettes of colors and interpolate between them.
# The function
colors()
# lists the names of colors that can be used in any plotting function.
# The colorRamp() takes a palette of colors as an input and returns a function
# that takes values between 0 and 1, indicating the extremes of the given color
# palette. (See also the grey() function)
# The new function returns a matrix of RGB values.
pal <- colorRamp(c("red", "blue"))
pal(0)
pal(1)
pal(0.5)
pal(seq(0,1, len=10))
# The colorRampPalette() function also takes a palette of colors as an input and
# returns a function. However this new function takes integer arguments and
# retuns a vector of colors interpolating the palette. (Like heat.colors() and
# topo.colors())
# The new function returns a character vector of hexadecimals. In these values
# the first to digits represent red, the second two represent green, and the
# last three represent blue. F is the highest, 0 the lowest number.
palp <- colorRampPalette(c("red", "yellow"))
palp(2)
# The following gives a total number of 10 colors between red and yellow.
palp(10)
### RColorBrewer Package
# This package contains useful color palettes for three types of data:
# Sequential data: Ordered data, e.g. going from something low to high.
# Diverging data: Showing the deviation from something (e.g. a mean or specific
# value).
# Qualitative data: Categorical or factor data that is not ordered.
# Palette information from this package can be used in conjunction with the
# colorRamp() and colorRampPalette() functions.
# Lecture slides contain all color palettes in the package.
install.packages("RColorBrewer")
library("RColorBrewer")
# This also lists all available palettes:
?brewer.pal
cols <- brewer.pal(3, "BuGn")
cols
palb <- colorRampPalette(cols)
palb
library(datasets)
image(volcano, col = palb(20))
### The smoothScatter() Function
# The smoothScatter() function is very useful for plotting many points at once
# (that would overlap each other). The function creates a 2-D histogram of the
# points in your plot and plots those histograms
x <- rnorm(10000)
y <- rnorm(10000)
smoothScatter(x,y)
### Other Useful Functions
# The rgb() function converts any rgb values into hexadecimal color values,
# which can be passed to functions that require hexadecimal then. Transparency
# can be added via the alpha argument of the rgb() function.
a <- rnorm(2000)
b <- rnorm(2000)
plot(a, b, pch = 19)
# Adding some transparancy to this will make it a bit histogram like.
plot(a, b, col = rgb(0,0,0, alpha = 0.2), pch = 19)
# The colorspace package can be used to gain different kinds of controls over
# colors. Read documentation for more info.
|
bb621f18b6639bd9266b4edd7506d4b14f73d235
|
c18fdf99a4972d7c8da846723283578ec6a6584e
|
/montecarlo/test.R
|
d59a2adbc9fc080fcf34ce08f27bfdd616a5ae40
|
[] |
no_license
|
jiqis/sandbox
|
39ce6c11a7059652af4fa0c71bc0f2c88569aa49
|
a38470689c818d3a526f7458ce731692a295cc9a
|
refs/heads/master
| 2021-09-21T16:42:04.086678
| 2018-08-29T00:37:55
| 2018-08-29T00:37:55
| null | 0
| 0
| null | null | null | null |
MacCentralEurope
|
R
| false
| false
| 153
|
r
|
test.R
|
#ÉtÉ@ÉCÉč
data <- read.csv("./data.csv")
data.temp<-data$average_temperature
data.dlen<-data$day_length
m1<-lm(data.temp~data.dlen)
summary(m1)
|
60517d296f758ec86438883f4fc5a6c47e47753d
|
e6d29f8a2fea50e45e37285d3d47985763fd8c03
|
/man/ipaq_scores.Rd
|
b419e4e9fb1ee0e8e57a25d10f2741f0cdca121a
|
[
"MIT"
] |
permissive
|
Mariana-plr/IPAQlong
|
de0b7b8d532e2d3dccd20204c42911b8c0f897c2
|
6f57ae46fa8779e08fa8efac56ce9b394b8f267e
|
refs/heads/main
| 2023-08-04T09:11:44.930621
| 2023-07-29T12:03:08
| 2023-07-29T12:03:08
| 471,093,506
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,143
|
rd
|
ipaq_scores.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ipaq_scores.R
\name{ipaq_scores}
\alias{ipaq_scores}
\title{IPAQ scores}
\usage{
ipaq_scores(data, truncate = F)
}
\arguments{
\item{data}{A data frame object containing 25 columns with the replies to the IPAQ long format (parts 1-4).
Yes/no replies should be coded as yes-1, no-0. Time should be in minutes.}
\item{truncate}{Logical vector. If TRUE all walking, moderate and vigorous time variables are truncated following the IPAQ short rule.
Variables exceeding 180 minutes are truncated to be equal to 180 minutes. Default FALSE.}
}
\value{
A data frame object with the continuous (metabolic equivalent of task minutes (MET-min)/week) and categorical scores (low, moderate, high).
Returns NA for cases with missing values.
}
\description{
Calculates the continuous and categorical scores for the 'International Physical Activity Questionnaire (IPAQ)'
long form.
}
\references{
The IPAQ Group (2005). Guidelines for Data Processing and Analysis of the International Physical Activity Questionnaire. Retrieved from <https://sites.google.com/site/theipaq/home>
}
|
82f3d969d6eba6c1e92a8d496424703d44aa8567
|
e4ff3a5fc17302d8d4fd86b38072e67ffe1aedec
|
/man/wfshat.Rd
|
00f3e770e45a8b753bfeca6750bd85eefca32d22
|
[] |
no_license
|
cran/robeth
|
5782cfcb86e6931152dc8a0b9b8f71e97068e449
|
5b60aabc7c1b21f15d73d1246ab40c1defdf5f7f
|
refs/heads/master
| 2023-08-31T11:44:10.694653
| 2023-08-22T08:00:05
| 2023-08-22T09:31:11
| 17,699,284
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 482
|
rd
|
wfshat.Rd
|
\name{wfshat}
\alias{wfshat}
\title{ Schweppe original weight proposal }
\description{
See Marazzi A. (1993), p.137}
\usage{
wfshat(xt, n = nrow(xt), sh)
}
\arguments{
\item{xt}{ See reference}
\item{n}{ See reference}
\item{sh}{ See reference}
}
\value{
See reference
}
\references{
Marazzi A. (1993) \emph{Algorithm, Routines, and S functions
for Robust Statistics}. Wadsworth & Brooks/cole, Pacific Grove,
California. p.137
}
\keyword{robust}
|
e0cfd4eb6ce27bd1346a03d8212383c8dc992a2a
|
8a6e3522bfbefa3abd245ff413bc5824544bc231
|
/R/print.summary.spsur.R
|
2a2bab75a27651aa961c83ce65139bc6a9f0900e
|
[] |
no_license
|
rominsal/spsur
|
be3f1a338b9ba97750ed3c8dfd0ab1aea0b4170c
|
75fc1a8d6fbf91572f65d64df030d20c0aea6dc0
|
refs/heads/master
| 2022-05-14T12:36:34.534139
| 2022-04-22T20:43:55
| 2022-04-22T20:43:55
| 131,492,488
| 12
| 4
| null | 2022-04-22T20:43:55
| 2018-04-29T12:25:20
|
R
|
UTF-8
|
R
| false
| false
| 2,605
|
r
|
print.summary.spsur.R
|
#' @name print.summary.spsur
#' @rdname print.summary.spsur
#'
#' @title Print method for objects of class summary.spsur.
#'
#' @param x object of class \emph{summary.spsur}.
#' @param digits number of digits to show in printed tables.
#' Default: max(3L, getOption("digits") - 3L).
#' @param ... further arguments passed to or from other methods.
#'
#' @author
#' \tabular{ll}{
#' Fernando Lopez \tab \email{fernando.lopez@@upct.es} \cr
#' Roman Minguez \tab \email{roman.minguez@@uclm.es} \cr
#' Jesus Mur \tab \email{jmur@@unizar.es} \cr
#' }
#'
#' @seealso
#' \code{\link{summary.spsur}}.
#'
#'
#' @examples
#' # See examples for \code{\link{spsurml}} or
#' # \code{\link{spsur3sls}} functions.
#' @export
print.summary.spsur <- function(x, digits = max(3L, getOption("digits") - 3L),
...) {
G <- x$G
cat("Call:\n")
print(x$call)
cat("\n","\n")
cat("Spatial SUR model type: ",x$type,"\n\n")
for (i in 1:G){
cat("Equation ",i,"\n")
printCoefmat(x$coef_table[[i]], P.values = TRUE, has.Pvalue = TRUE)
cat("R-squared: ", formatC(x$R2[i+1], digits = 4), " ", sep = "")
cat("\n"," ")
}
cat("\n")
if (x$Tm>1 | x$G>1){
cat("Variance-Covariance Matrix of inter-equation residuals:")
prmatrix(x$Sigma,digits=4,
rowlab=rep("", nrow(x$Sigma)), collab = rep("", ncol(x$Sigma)))
} else {
cat("Residual standard error:",formatC(sqrt(x$Sigma), digits = 4, width = 6))
}
if (x$Tm>1 | x$G>1){
cat("Correlation Matrix of inter-equation residuals:")
prmatrix(x$Sigma_corr,digits=3,
rowlab=rep("",nrow(x$Sigma)), collab = rep("",ncol(x$Sigma)))
}
if (x$Tm>1 | x$G>1){
cat("\n R-sq. pooled: ", formatC(x$R2[1], digits = 4)," ", sep = "")
if(!is.null(x$llsur)){
cat("\n Log-Likelihood: ",formatC(x$llsur, digits = 6, width = 6))
}
}
# Se ajusta a una Chi con G*(G-1)/2 gl
if (x$Tm>1 | x$G>1){
# Only report the BP test for multiequations
if(!is.null(x$BP)){
cat("\n Breusch-Pagan: ",formatC(x$BP, digits = 4),
" p-value: (",formatC(pchisq(x$BP, df = G*(G - 1)/2, lower.tail = FALSE),
digits = 3, width = 4),") ", sep = "")
}
}
if(!is.null(x$LMM)){
# If Tm=G=1 the LMM test have df=1
if (x$Tm==1 & x$G==1){
df= 1}
else {df= G*(G - 1)/2
}
cat("\n LMM: ",formatC(x$LMM, digits = 5),
" p-value: (",formatC(pchisq(x$LMM, df = df,
lower.tail = FALSE),
digits = 3, width = 4),")\n", sep = "")
}
invisible(x)
}
|
ed5f6d1fccfd130dbd971cfe390dec078f7e9b91
|
4d286db1d87c5adcaaf3e19b3022090f316aa794
|
/scripts/Group_google.R
|
3354a5d7a3dc673c65be16164f5c9b10cecd1d6e
|
[] |
no_license
|
MortonArb-ForestEcology/Phenology_Forecasting
|
06dbad302ac334b6f335cbe5796479b51cad09f8
|
f1f19696dc938ab0edb2a80c398ad3b28dd8e898
|
refs/heads/master
| 2023-04-30T02:50:57.704185
| 2023-04-18T17:35:40
| 2023-04-18T17:35:40
| 249,529,541
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,247
|
r
|
Group_google.R
|
#----------------------------------------------------------------------------------------------------------------------------------#
# Function by : Lucien Fitzpatrick
# Project: Living Collections Phenology Forecasting
# Purpose: A function that will download all of the google forms of interest
# Inputs: 2018 to present phenology monitoring data from the googlesheet "Phenology_Observations_GoogleForm" in the "LivingCollections-Phenology/Data_Observations/" folder
# The clean_google_form.r script which defines the clean.google function. Found in the Github repository "Phenology_ LivingCollections"
# Outputs:A dataframe containing the information from all desired google forms.
#Enter the genus of interest as a vector and the final year of observations you want as a variable
#The function will crash if you have a genus included without end years we have a form for i.e("Ulmus" with range 2018:2019)
#This only matters for end years. Start years are adjusted to match the first year the genus has a form.
#Tends to tke about 30 seconds per form
group.google <- function(x, ystart, yend){
dat.pheno <- data.frame()
#Checking which genus we are working with an setting correct parameters
for(p in x){
if (p == "Quercus") {
yrange <- c(yend:ystart) #THIS MUST BE REVERSED AS A WORKAROUND TO AN ISSUE WITH 2018 QUERCUS. SEEMS ONLY PC ISSUE
} else if (p == "Acer") {
if(ystart < 2019){
ystart <- 2019
}
yrange <- c(ystart:yend)
} else if (p == "Ulmus") {
if(ystart < 2020){
ystart <- 2020
}
yrange <- c(ystart:yend)
}
collection <- p
#Downloading the googleform from every year in the requested range
for(yr in yrange){
temp <- clean.google(google.key = "1eEsiJ9FdDiNj_2QwjT5-Muv-t0e-b1UGu0AFBRyITSg", collection=collection, dat.yr=yr)
temp$Year <- yr
temp$Collection <- as.factor(collection)
# names(temp) <- tolower(names(temp))
#Work around for clean.google not changing 2018 names. THIS ALSO MEANS RANGE MUST GO REVERSE FOR QUERCUS
if(yr == "2018"){
colnames(temp) <- as.character(colnames(dat.pheno))
}
dat.pheno <- rbind(dat.pheno, temp)
}
}
return(dat.pheno)
}
|
367ddbe70595a3267ba6d5752d952f29d2b91117
|
a24962c483b7e4366b7dba2bad71423962cdd6e4
|
/examples/examples.exp2.R
|
b0558337e89edf4de0dc38b4805b3b8f5257e0a0
|
[] |
no_license
|
singmann/acss
|
ebbbaddbd1ee9b234d7c681464328ef28e200f0d
|
cc694cb7ed01b3e43f43b6dae832947aff84ccca
|
refs/heads/master
| 2020-04-06T14:59:17.739935
| 2016-09-01T07:25:38
| 2016-09-01T07:25:38
| 14,473,891
| 4
| 1
| null | 2013-12-14T20:57:35
| 2013-11-17T20:13:39
|
R
|
UTF-8
|
R
| false
| false
| 569
|
r
|
examples.exp2.R
|
# load data
data(exp2)
exp2$K <- acss(exp2$string, 6)[,"K.6"]
m_log <- glm(response ~ K, exp2, family = binomial)
summary(m_log)
# odds ratio of K:
exp(coef(m_log)[2])
# calculate threshold of 0.5
(threshold <- -coef(m_log)[1]/coef(m_log)[2])
require(effects)
require(lattice)
plot(Effect("K", m_log), rescale.axis = FALSE, ylim = c(0, 1))
trellis.focus("panel", 1, 1)
panel.lines(rep(threshold, 2), c(0, 0.5), col = "black", lwd = 2.5, lty = 3)
panel.lines(c(33,threshold), c(0.5, 0.5), col = "black", lwd = 2.5, lty = 3)
trellis.unfocus()
|
2ff78d5ee461ff70a782942a55f56e494e12f27f
|
44598c891266cd295188326f2bb8d7755481e66b
|
/DbtTools/GraphAlgorithms/man/PlotGabriel4BestMatches.Rd
|
28bab1a5e783eae83b66496da7a72d4a98dc82ac
|
[] |
no_license
|
markus-flicke/KD_Projekt_1
|
09a66f5e2ef06447d4b0408f54487b146d21f1e9
|
1958c81a92711fb9cd4ccb0ea16ffc6b02a50fe4
|
refs/heads/master
| 2020-03-13T23:12:31.501130
| 2018-05-21T22:25:37
| 2018-05-21T22:25:37
| 131,330,787
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 902
|
rd
|
PlotGabriel4BestMatches.Rd
|
\name{PlotGabriel4BestMatches}
\alias{PlotGabriel4BestMatches}
\title{Plot Gabriel4BestMatches}
\description{
Zeichnen des Delaunay Graphen, Punkte ggf. nach Cls gefaerbt
}
\usage{
PlotGabriel4BestMatches(BestMatches,MatrixOrSize,Cls,IsTiled)
}
\arguments{
\item{BestMatches}{BestMatches, (1:d,1:3)}
\item{MatrixOrSize}{Default 50,82; A vector of length 2, containing the number of lines and columns of the SOM corresponding to the BestMatches, (2)}
\item{Cls}{Classes of bestmatches or []}
\item{IsTiled}{==1 (default) => Randkorrigiertes Universum}
}
\details{
...
}
\value{
%- description of return values
\item{Gabriel}{Sparse Vector des Gabrielgraphen in squareform, (1:d,1:d)}
}
\references{
\url{www.uni-marburg.de/fb12/datenbionik}
}
\author{ Rabea Griese }
\keyword{Gabriel}%- zero or more keywords (all storaged in /doc/KEYWORDS, for search and help)
|
93c571391fd3f5b2b105d4a1bb2330a0eb35c343
|
76b1a90fa416132972b40e40323a156060ada7ba
|
/man/print.ckmr.Rd
|
b6ee1119c8b4fdab86d7fba1d730147afc1c7a35
|
[] |
no_license
|
krshedd/CKMRsim
|
8b558df4bd241e9990fecefac072d46b771bec93
|
930d7fc6523071fe9de857224e400a8281c43b81
|
refs/heads/master
| 2020-04-07T07:04:49.295560
| 2018-11-19T05:26:06
| 2018-11-19T05:26:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 329
|
rd
|
print.ckmr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ckmr-class.R
\name{print.ckmr}
\alias{print.ckmr}
\title{print method for ckmr class}
\usage{
\method{print}{ckmr}(C)
}
\arguments{
\item{C}{an object of class \code{\link{ckmr_class}}.}
}
\description{
Just wraps a call to the format.ckmr function
}
|
1a953da741d2571bc8b60b6122988f0bf9cd7285
|
07c8bb22b912f441b9fcaa46b4080ec141a0943e
|
/day2examples.R
|
5944606b6f840ab553d0be43e3abbaa7b4e34d13
|
[] |
no_license
|
thepingryschool/r-examples-at
|
382e21be5775179a845f3eb0166bb7a797c7164a
|
bad6ad8f50e2fd447a57df1bd5beda47802aa622
|
refs/heads/master
| 2020-04-01T23:33:46.691952
| 2019-01-10T18:28:34
| 2019-01-10T18:28:34
| 153,763,777
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 756
|
r
|
day2examples.R
|
# This example script shows the properties of vectors
# 2 ways to create a vector
v1 = c(1L, 2L, 3)
v1
v2 = seq(1, 10)
v2
v3 = 1:10
v3
# coercion
v4 = c(1L, 2L, 3, "4", "h")
v4
print("-------------------")
#vectors hold same type
typeof(v1)
class(v1)
typeof(v2)
class(v2)
typeof(v4)
print("-------------------")
#length of a vector
length(v2)
print("-------------------")
#a vector of length 0 is NULL
a = c()
a
print("-------------------")
#vectors can have attributes or names
at = c('A', 'B', 'C')
names(v1) = at
v1
print("-------------------")
v5 = c(C = 1, D = 2)
v5
print("-------------------")
v6 = c("A" = 2, 'B' = 3)
v6
print("-------------------")
names(v6)
attributes(v5)
#vectors do not have dimension
ve = c(1, v2, c("a", "b"))
ve
|
0c4099eab2e20ee75616304cef6ed8d3163de716
|
4903d9852fc3f599ee04d03312ef69ee40adfc27
|
/plot2.R
|
75544071e73db3d936fa3cb5f4e1e4ff8017dc71
|
[] |
no_license
|
sarahkurihara/ExData_Plotting1
|
aae4f42cd8fc88612429f582997ebdac6a89e91b
|
37a8b1f55e812bd139f6bcbc725ea9ff83c31f4e
|
refs/heads/master
| 2022-06-23T17:23:21.958110
| 2020-05-09T21:44:56
| 2020-05-09T21:44:56
| 262,641,819
| 0
| 0
| null | 2020-05-09T19:15:47
| 2020-05-09T19:15:46
| null |
UTF-8
|
R
| false
| false
| 504
|
r
|
plot2.R
|
plot2fn <- function() {
##load data
data <- read.csv("household_power_consumption.txt", header = TRUE, sep = ";")
##subset data from the dates 2007-02-01 and 2007-02-02
data <- subset(data, Date == "2/2/2007" | Date == "1/2/2007")
#Convert date and time
datetime <- as.POSIXct(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S")
#Plot 2
plot(datetime, GAP,
type = "l",
ylab = "Global Active Power (kilowatts)",
xlab = " ")
}
|
98f932d411577c4e9fd668f1de9b17d57d986906
|
4e344e89e17232cb54a233a642bcc27cf64e9efc
|
/R/loadDefinitiveModel.R
|
a4e5f935d89ce6fe972103935b9531d70bfdf46a
|
[] |
no_license
|
jamiepg3/mopa
|
f02d79fd51a5a83b6b1f8c497e6aaaf81686ecab
|
23ab02fe342ae4421a65c07bec2ee4f3ba2efea8
|
refs/heads/master
| 2017-11-30T17:22:59.545467
| 2015-06-10T07:31:05
| 2015-06-10T07:31:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,996
|
r
|
loadDefinitiveModel.R
|
#' @title Load Rdata files storing definitive fitted model
#' @description Load information from Rdata generated with function allModelling based
#' in an index to select the definitive fitted model/s, ideally, the index returned by
#' function indextent should be used.
#'
#' @param data Object with the same structure as the object returned by function bindPresAbs.
#' @param extents Named integer returned by function indextent (a named index
#' corresponding to the definitive extents to be considered)
#' @param slot Any character of the following: "allmod", auc", "kappa", "tss", "mod", "p"
#' @param algorithm Any character of the following: "glm", "svm", "maxent", "mars", "randomForest",
#' "cart.rpart" or "cart.tree"
#' @param sourcedir Character of the path where Rdata objects are
#'
#'
#'
#' @return Depending on the specified slot:
#' \item{allmod }{fitted model using all data for training}
#' \item{auc }{AUC statistic in the cross validation}
#' \item{kappa }{kappa statistic in the cross validation}
#' \item{tss }{true skill statistic in the cross validation }
#' \item{mod }{fitted model with partitioned data}
#' \item{p }{cross model prediction}
#'
#'
#' @details detail.
#'
#'
#'
#' @author M. Iturbide \email{maibide@@gmail.com}
#'
#' @examples
#' \dontrun{
#' data(presaus)
#' data(biostack)
#' ##modeling
#' modirs <-allModeling(data = presaus, varstack = biostack, k = 10, "mars")
#' ##loading#'
#' auc_mars <-loadTestValues(data = presaus, "auc", "mars")
#' ind <- indextent(testmat = auc_mars, diagrams = TRUE)
#'
#' def <-loadDefinitiveModel(data = presaus, extents = ind, slot = "allmod", algorithm = "mars")
#' }
#'
#' @references Iturbide, M., Bedia, J., Herrera, S., del Hierro, O., Pinto, M., Gutierrez, J.M., 2015.
#' A framework for species distribution modelling with improved pseudo-absence generation. Ecological
#' Modelling. DOI:10.1016/j.ecolmodel.2015.05.018.
#'
#' @export
loadDefinitiveModel<-function(data, extents, slot=c("allmod", "auc", "kappa", "tss", "mod", "p"),
algorithm = c("glm","svm","maxent","mars","randomForest","cart.rpart",
"cart.tree"), sourcedir = getwd()){
if (class(data[[1]]) != "list"){
dat<-list(data)
}else{dat<-data}
modelslot<-list()
for (i in 1:length(data)){
g<-names(data)[i]
if (class(data[[1]]) != "list"){
load(paste(sourcedir,"/", algorithm, "_bg", names(extents)[i],".Rdata",sep=""))
} else {
load(paste(sourcedir,"/", algorithm, "_bg", names(extents)[i],"_hg",g,".Rdata",sep=""))
}
if (slot == "allmod"){modelslot[[i]]<-mod$allmod
} else if (slot == "auc"){modelslot[[i]]<-mod$auc
} else if (slot == "kappa"){modelslot[[i]]<-mod$kappa
} else if (slot == "tss"){modelslot[[i]]<-mod$tss
} else if (slot == "mod"){modelslot[[i]]<-mod$mod
} else if (slot == "p"){modelslot[[i]]<-mod$p}
}
names(modelslot)<-names(data)
return (modelslot)
}
|
45d61c65c2dc5647e0fb567508176e0abbc8efda
|
3cf7dbc603a37c4b1609913acb5f693e8bc6f2c5
|
/diamonds.R
|
1e18140446ca29d00c53207e01d7d92d24645e66
|
[] |
no_license
|
ckp-koiken/R4DS
|
6c579f3e5649675c8b70161f07859b042b173d0e
|
b491461b88a4f2208fb2a3e18e1622ec8f2a0e4d
|
refs/heads/main
| 2023-04-12T09:10:43.145617
| 2021-05-13T04:23:37
| 2021-05-13T04:23:37
| 366,662,265
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 236
|
r
|
diamonds.R
|
library(tidyverse)
# 6章 ワークフロー:プロジェクト --------------------------------------------------------
ggplot(diamonds, aes(carat, price))+
geom_hex()
ggsave("diamonds.pdf")
write_csv(diamonds, "diamonds.csv")
|
507ab5ea5e9e897b6f7f23872f2d9003e41a1895
|
96e6210bf4b9c0573621772bf42790e7b675c37c
|
/Data Analysis/YaoZhang_HW2.R
|
d692c2d53e97fb50980cd8654dca2d477a5e71fb
|
[] |
no_license
|
ohana1128/HW
|
ede2dd666df37c2a22b4523c21f95c6b23586c2a
|
2aadc47bb54be650cf34536b88add39ced1caacd
|
refs/heads/master
| 2021-01-19T04:15:37.060084
| 2016-09-13T20:19:35
| 2016-09-13T20:19:35
| 45,416,689
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,124
|
r
|
YaoZhang_HW2.R
|
pain <- read.csv("HW2Data2.csv")
dim(pain)
##1.Draw a boxplot for pain relief scores against different pain levels.
library(ggplot2)
ggplot(pain,aes(x=factor(PainLevel),y=Relief))+geom_boxplot()
##2.Perform a single factor ANOVA and compare the differences in “Relief Scores” of each “PainLevel” group.
##check normality
library(reshape)
pain.melt <- melt(pain)
group1 <- pain.melt[pain.melt$variable=="Relief",2]
qqnorm(group1)
qqline(group1)
##check equal variance
bartlett.test(value~variable,data=pain.melt)
##ANOVA
my.aov <- aov(PainLevel~Relief,data=pain)
summary(my.aov)
##3.Perform a two factor ANOVA with the following factors: Codeine and pain levels.
my.aovtwo <- aov(Relief~PainLevel*Codeine,data=pain)
summary(my.aovtwo)
##4.Report any significant effects from the two factor ANOVA.
##5.Use Scheffe’s method to conduct a multiple comparison for the different pairs of pain level.
library(sp)
library(agricolae)
agricolae::scheffe.test(my.aovtwo,"PainLevel",group=TRUE,console = TRUE)
##6. Based on the result from Scheffe’s method and the boxplot, discuss how to regroup different painlevels.
|
7962ee999a31cbfa891b8ca60f99668d27d92ccb
|
16f8c4d8fdda15632647077bdee8cf4289219202
|
/R/b_ssa_analysis_1dssa.R
|
4189b5d3b4b2a806ce8f0375bd67a20c297394f0
|
[] |
no_license
|
polinazhornikova/Thesis-2018
|
34ec066684ea54f591b8c1b33e634420798334ba
|
70f2597bbab8d8f5bdc2b9b0a4cf3b4c58495013
|
refs/heads/master
| 2020-03-17T17:25:54.787365
| 2018-06-05T23:25:00
| 2018-06-05T23:25:00
| 133,788,036
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,090
|
r
|
b_ssa_analysis_1dssa.R
|
###Fragment initial
fragmentStart("fragments_ch1/1dssa_init.tex")
source('main.grouping.auto.R')
library(lattice)
library(Rssa)
N <- 199
omega1 <- 0.1
omega2 <- 0.25
fragmentStop()
###end
###Fragment 1dssa_series
fragmentStart("fragments_ch1/1dssa_series.tex")
x <- exp(0.01 * (1:N)) + 2*cos(2 * pi * omega1 * (1:N)) + exp(0.009 * (1:N)) * cos(2 * pi * omega2 * (1:N)) + rnorm(n=N,sd=3)
s <- ssa(x, L = 100)
fragmentSkip(pdf("img_ch1/1dssa_vectors.pdf", paper = "special", width = 5, height = 2))
plot(s, type="vectors", layout = c(5, 2))
fragmentSkip(dev.off())
fragmentSkip(pdf("img_ch1/1dssa_vectors_pair.pdf", paper = "special", width = 5, height = 2))
plot(s, type="paired", layout = c(5, 2))
fragmentSkip(dev.off())
fragmentStop()
###end
###Fragment 1dssa_trend
fragmentStart("fragments_ch1/1dssa_trend.tex")
g_trend <- grouping.auto(s, grouping.method = 'pgram',
freq.bins = list(0.01), threshold = 0.9)
print(g_trend$F1)
fragmentStop()
###end
###Fragment 1dssa_em_freq
fragmentStart("fragments_ch1/1dssa_em_freq.tex")
g_em_freq <- general.grouping.auto(s, grouping.method = "freq.1dssa", s_0 = 1,
rho_0 = 0.9)
print(g_em_freq)
fragmentStop()
###end
###Fragment 1dssa_em_tau
fragmentStart("fragments_ch1/1dssa_em_tau.tex")
g_em_tau <- general.grouping.auto(s, grouping.method = "tau.1dssa", treshold = 0.01)
print(g_em_tau$idx)
fragmentStop()
###end
###Fragment 1dssa_recon
fragmentStart("fragments_ch1/1dssa_rec.tex")
r <- reconstruct(s, groups = list(T = g_trend, P = c(g_em_freq$I_1,
g_em_freq$I_2)))
fragmentSkip(pdf("img_ch1/1dssa_rec.pdf", paper = "special", width = 4, height = 3))
d <- data.frame(X=x, N=1:N, T=r$T, P=r$P)
xyplot(T + P +X ~ N, data = d, type ='l', ylab = '',
auto.key = list(points = FALSE, lines = TRUE))
fragmentSkip(dev.off())
d <- data.frame(X=x, N=1:N)
fragmentSkip(pdf("img_ch1/1dssa_ser.pdf", paper = "special", width = 4, height = 3))
xyplot(X ~ N, data = d, type ='l')
fragmentSkip(dev.off())
fragmentStop()
###end
|
2860cdfbfc589f6efcdf6de04c7d3f9736b0e319
|
f6275d025b109bac1c648e5f3e3df5525f4a6966
|
/WikiMultipleMonths.R
|
b543941040b6e58be57ac50c4f370c7ae57d34f3
|
[] |
no_license
|
bladster/RStudio
|
f8e2e221be850141a271393dc3b7a9ab6ed6537f
|
41f50dc200e81c30ee8ff8617c856e04734b84ca
|
refs/heads/master
| 2021-01-01T04:26:55.438647
| 2016-05-04T13:35:51
| 2016-05-04T13:35:51
| 58,054,704
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 430
|
r
|
WikiMultipleMonths.R
|
allURLs <- NULL
for (year in (2008:2014)) {
for (month in (1:12)) {
if ((year == 2014) && (month > 10)) {
next
}
theURL <- "http://stats.grok.se/json/en/"
theURL <- paste0(theURL,year)
if (month < 10) {
theURL <- paste0(theURL,"0")
}
theURL <- paste0(theURL,month)
theURL <- paste0(theURL,"/Friday")
allURLs <- c(allURLs, theURL)
}
}
|
298acda74a98daa6e9bd8bbeb3892c447eef8cab
|
0b01338c904a2662bc925a5df0462ae36f2210c4
|
/14_QCmetrics.R
|
f337fa9ca357f8d311ce649d4b58e429dbed5f6d
|
[] |
no_license
|
JMF47/recountNNLSpaper
|
946980021d82e1b73eafab1fb69534e2c8d962fa
|
d251efaae1a20f2d96bf2f103445ab8a68905fc2
|
refs/heads/master
| 2021-09-14T12:20:29.791693
| 2018-05-13T17:22:10
| 2018-05-13T17:22:10
| 117,008,135
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,657
|
r
|
14_QCmetrics.R
|
### Spot-check
rm(list=ls())
library(GenomicRanges); library(rtracklayer); library(stringr); library(Biostrings); library(Rsamtools)
library(SummarizedExperiment); library(recountNNLSdata); library(recountNNLS)
url_table <- recount::recount_url
unique_ids = unique(url_table$project)
unique_ids = as.character(unique_ids[! unique_ids %in% c("TCGA", "SRP012682")])
# out = NULL
overall_map = NULL
for(unique_id in unique_ids){
message(which(unique_ids == unique_id))
out_dir = paste0("/dcl01/leek/data/ta_poc/recount_out/rse_new/", unique_id)
out_file = paste0(out_dir, "/rse_tx.RData")
load(out_file)
pheno = colData(rse_tx)
total_reads = pheno$reads_downloaded/(pheno$paired_end+1)
mapped_reads = pheno$mapped_read_count/(pheno$paired_end+1)
tx_reads = apply(assays(rse_tx)$fragments, 2, sum, na.rm=TRUE)
time = pheno$biosample_submission_date
info = cbind(pheno[,c(1:4, 6, 8:9)], time, pheno$rls, pheno$rls_group, total_reads, mapped_reads, tx_reads)
overall_map = rbind(overall_map, info)
}
map_perc = overall_map[,12]/overall_map[,11]
tx_perc = overall_map[,13]/overall_map[,12]
num_fail_tx = by(tx_perc, overall_map$project, function(x) sum(x<0.5))
num_fail_map = by(map_perc, overall_map$project, function(x) sum(x<0.8))
num_sample = by(rep(1, length(map_perc)), overall_map$project, sum)
cbind(num_fail_tx, num_fail_map, num_sample)
quantile(tx_perc, seq(0, 1, by=0.05))
quantile(map_perc, seq(0, 1, by=0.05))
###
# SRP050272 - 83 samples, lncRNAs, some low map rates, high intergenic region counts
# ERP001908 - 62 miRNAs of tumors
# ERP001344 - 34 viral hepatitis agents
# ERP004592 - 23 miRNAs Hungtingtons
# SRP014675 - 43 miRNAs
|
c605faef4cf85a5b16136c40fc8fd801f2447c02
|
22b36960caa879eb916d08e47b13b0ad0cc3ac2d
|
/config.R
|
5118b7436d296c163a58a0829ae127530545c453
|
[] |
no_license
|
labepi/tsclas
|
09f845def04444f987c52d2c5eb357143565da23
|
fb26127bb90995a98b597f6918bf3a3fbd067a74
|
refs/heads/master
| 2023-04-25T00:51:05.772334
| 2021-05-15T23:30:07
| 2021-05-15T23:30:07
| 276,223,623
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 630
|
r
|
config.R
|
# path to the bandt-pompe functions
bandt_pompe_path="../bandt_pompe"
# path to the skinny-dip files
skinnydip_path='../skinny-dip/code/skinny-dip'
# path to datasets
dataset_path='./data'
# print the debug messages
DEBUG=TRUE
# to load the pre-computed features for a given dataset
LOAD_PRECOMPUTED=TRUE
#LOAD_PRECOMPUTED=FALSE
# the seed to use
# NOTE: it will be overwritten if passed as command line argument
SEED=1
# the percentage of train dataset to split
TRAIN_PCT=0.8
# if parallelism is enabled for parameter tunning
DO_PARALLEL=TRUE
# number of cores to use
CORES_NUM=3
#CORES_NUM=-1 # to use: detectCores()-1
|
c45e7d0abc49e404d5f2f56274ae886c99a13091
|
d1a388b98b8c248c5f0388672189acedf96b0a93
|
/Simulation_juin2018/Estimation Paper 2 juin 2016/Calibration HN/Calibration Price Linear/Methode HN.R
|
e67ae1b03581effdef29117636d1e4063597f86f
|
[] |
no_license
|
Fanirisoa/dynamic_pricing
|
1951438ea282358cf0aa90a04b1273846cd70836
|
66722ae75f446a3c9e2a672890ae808c679f72cd
|
refs/heads/master
| 2023-04-09T04:36:15.646529
| 2021-04-20T19:38:35
| 2021-04-20T19:38:35
| 212,493,315
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,075
|
r
|
Methode HN.R
|
rm(list=ls())
gc()
library(compiler)
enableJIT(1)
enableJIT(3)
#####################################################
### Load both data.contract and data.ret #######
#####################################################
load("DataPrice2009.Rdata")
## load("Dataprice2010.Rdata")
#####################################################
### Source function to use #######
#####################################################
##source("C:/Users/e0g411k03dt/Desktop/Estimation Paper 2 April 2016/Calibration HN/Calibration Price Linear/Calibration HN.r")
source("C:/Users/e0g411k03dt/Desktop/Estimation Paper 2 April 2016/Calibration HN/Calibration Price Linear/Calibration HN Nogammastart.r")
source("C:/Users/e0g411k03dt/Desktop/Estimation Paper 2 April 2016/Calibration HN/Calibration Price Linear/Matrixe RMSE HN.r")
#####################################################
### Parameters of the model #######
#####################################################
### Initial parameter ##
## a0=para_h[1]; a1=para_h[2]; gama=para_h[3]; b1= para_h[4] ; lamda0= para_h[5]
para_h<-c(1.180234e-12, 1.547729e-06, 4.550518e+02, 6.500111e-01,0.25764 )
para_h<-c(5.02e-6, 1.32e-6, 421.39, 0.589,0.5)
para_h<-c(5.436223e-06, 1.334501e-06, 4.634877e+02, 6.302493e-01,0.42273)
### Solution
para_h1<-c(6.593975e-06, 1.094715e-06, 4.634877e+02 , 6.302488e-01,0.812503)
para_h1<-c(6.228100e-06, 1.069426e-06, 4.550518e+02, 6.499864e-01, 2.576463e-01)
#####################################################
### Volatility #######
#####################################################
ts.vol=h(para_h1,Data.ret)
ts.plot(ts.vol, col = "steelblue", main = "IG Garch Model",xlab="2009",ylab="Volatility")
grid()
############################################################
#### NLS estimation ##
############################################################
start.time <- Sys.time()
NLSMSE=optim(par=para_h,fn=RMSE,Data.ret=Data.ret,Data.N=Data.N,method="Nelder-Mead",control = list(maxit = 900))
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
para_h1<- NLSMSE$par
sigma(NLSMSE)
sigma.hat(para_h1)
############################################################
#### RMSE ##
############################################################
start.time <- Sys.time()
ValRMSE=RMSE(para_h1,Data.ret,Data.N)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
start.time <- Sys.time()
MRMSE=MatriceRMSE(para_h1,Data.N)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
############################################################
#### Hessian Matrix ##
############################################################
start.time <- Sys.time()
hess = hessian(func=RMSE, x=para_h,Data.ret=Data.ret,Data.contract=Data.contract)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
hessc <- hessian(func=RMSE, x=para_h, "complex",Data.ret=Data.ret,Data.contract=Data.contract)
all.equal(hess, hessc, tolerance = .Machine$double.eps)
############################################################
#### Vega ##
############################################################
start.time <- Sys.time()
Vega1=Vega(Data.N, type="C")
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
#####################################################
### Volatility and Price #######
#####################################################
l=100
start.time <- Sys.time()
P=Price_fft(para_h=para_h,Data.ret=Data.ret,Data.contract=Data.N[1:l,])
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
C=Data.N$C
Ca=C[1:l]
Pa=P[1:l]
ts.plot(Pa,ts(Ca), col =c("steelblue","red"), main = "Valuation with IG Garch Model",xlab="2009",ylab="Prices")
legend(175, 250, c("C(t)","C*(t)"), col =c("steelblue","red"), lty = c(1, 1))
#####################################################
### Implicite volatility #######
#####################################################
Ip <- function(para_h,Data.ret,Data.contract)
{
T=Data.contract$T #### Time to maturity expressed in terms of years in terms of days
S=Data.contract$S #### Prix du sous-jacent: Data.contract$S
K=Data.contract$K #### Strike Prix d'exercice: data$strike
r=Data.contract$r #### Interest rate Data.contract$r
C=Data.contract$C #### Call price
d=Data.contract$d #### Call dividende
Ip <- rep(NA, length(C))
for (i in 1:length(C)){
Ip[i] = implied.vol(S[i], K[i], T[i], r[i], C[i],d[i], type="C")
}
return(Ip)
}
start.time <- Sys.time()
Ipv=Ip(para_h1,Data.ret,Data.N)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
|
32daba3c3a24fc13aa539713447441d5e72451d7
|
efcdc10a70c28a624b812d20cb5777f848b59a54
|
/simulate_Exp.R
|
168c1fde7c9de0c7b84165482d04d8ff1655b081
|
[] |
no_license
|
glmbraun/mNodes
|
fdb6437ec34330b6be0f2a330b23ebbee5c612ef
|
05d136498a55902b55a3457bb467963ac679a8b2
|
refs/heads/master
| 2022-12-24T02:25:31.456026
| 2020-09-16T09:56:54
| 2020-09-16T09:56:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,089
|
r
|
simulate_Exp.R
|
# Load packages
library(Matrix)
library(lbfgs)
library(ggplot2)
library(dplyr)
library(RSpectra)
library(aricode)
library(kpodclustr)
library(janitor)
library(tidyr)
library(purrr)
library(grid)
library(microbenchmark)
library(foreach)
library(doParallel)
library(igraph)
library(gridExtra)
library(lemon)
library(latex2exp)
library(GreedySBTM)
library(xtable)
library(stargazer)
#############################
# Results on simulated data #
#############################
repSim_miss<-function(nb,K,nb_nodes,L,rho,a,b,r){
score<-matrix(NA,nrow=6,ncol=nb)
rownames(score)<-c("sum_of_adj","sum_of_sq","k_pod","OLMF","imput","tot_obs_nodes")
pi_L<-array(NA,c(K,K,L))
for(i in 1:nb){
# Generate a MLSBM (with balanced communities)
alpha<-rep(1/K,K)
Z = t(rmultinom(nb_nodes,1, alpha))
convZ<-convertZ(Z)
for(j in 1:L){
pi_L[,,j]<-array(conMat(K,a,b,r))
}
tensorAdj<-sampleMLSBM(nb_nodes,K,pi_L,convZ)
# Generate missing nodes
miss<-missMLSBM(tensorAdj,rho,with_na = TRUE)
tensorAdj<-miss[[1]]
tot_obs_nodes<-miss[[2]]
obs_nodes_mat<-miss[[3]]
score[6,i]<-sum(tot_obs_nodes)
#Delete nodes that not have been observed at least one time
tensorAdj_del<-delete_missing_nodes_tensor(tensorAdj,tot_obs_nodes)
convZ_del<-delete_missing_nodes_Z(convZ,tot_obs_nodes)
obs_nodes_mat_del<-obs_nodes_mat[tot_obs_nodes%>%as.logical(),]
tensorAdj_del0<-tensorAdj_del%>%replace_na(0)
# Apply different clustering methods and compute the score
## Sum of adjacency matrices
sum_of_adjMat<-sum_of_adj(tensorAdj_del0)
clust<-clust_on_mat_eig(sum_of_adjMat,K)
score[1,i]<-NMI(clust,convZ_del)
# Sum of squared adjacency matrices
# sum_of_square<-sum_of_square_adj(tensorAdj_del0)
# clust<-clust_on_mat_eig(sum_of_square,K)
# score[2,i]<-NMI(clust,convZ_del)
# kpod_clust
tot_obs_nodes<-tot_obs_nodes%>%as.logical()
final_ag<-final_aggr_miss(tensorAdj_del,K,obs_nodes_mat_del)
clust<-kpod(final_ag,K)$cluster
score[3,i]<-NMI(clust,convZ_del)
# OLMF modified for missing nodes
n_obs<-dim(tensorAdj_del0)[1]
score[4,i]<-NMI(lmfo_miss(tensorAdj_del0,obs_nodes_mat_del,K),convZ_del)
#Imputation method
tensorAdj_in<-tensorAdj_del0
for(rep in 1:20){
Z_in<-convertClust(clust)
tensorAdj_out<-impute_nodes(Z_in,tensorAdj_in,obs_nodes_mat_del,K)
sum_of_adjn<-sum_of_adj(tensorAdj_out)
clust<-clust_on_mat_eig(sum_of_adjn,K)
tensorAdj_in<-tensorAdj_out
}
score[5,i]<-NMI(clust,convZ_del)
}
return(t(rowMeans(score)))
}
# test<-repSim_miss(1,3,600,2,0.8,0.18,0.19,0.5)
#############################
# Variying number of layers
#############################
seq_l<-seq(3,6,by=3)
cl <- makeForkCluster(10)
registerDoParallel(cl)
result <- foreach(rho=1:10,.combine = rbind) %dopar% {
lapply(seq_l,function(l){repSim_miss(1,3,1000,l,rho/10,0.18,0.19,0.7)%>%as.data.frame()%>%mutate(rho=rho/10,layer=l)})
}
stopCluster(cl)
registerDoSEQ()
data<-array(NA,dim=c(0,7))
for(i in 1:length(result)){
data<-rbind(data,result[[i]])
}
###############################
# Variying number of nodes
###############################
# Experiment 2' : L=3, K= 3 fixed and n vary. The experiment is repeated for different values of rho.
# a= 0.18 b=0.19 r=0.7 so there is no overlap
seq_n<-seq(600,2600,by=200)
cl <- makeForkCluster(10)
registerDoParallel(cl)
result2 <- foreach(rho=1:10,.combine = rbind) %dopar% {
lapply(seq_n,function(n){repSim_miss(20,3,n,3,rho/10,0.18,0.19,0.7)%>%as.data.frame()%>%mutate(rho=rho/10,nodes=n)})
}
stopCluster(cl)
registerDoSEQ()
data2<-array(NA,dim=c(0,8))
for(i in 1:length(result2)){
data2<-rbind(data2,result2[[i]])
}
#################################
# Plots
#################################
for(l in seq_l){
assign(paste("d", l, sep = ""),data%>%filter(layer==l))
pv<-ggplot(get(paste("d", l, sep = "")), aes(rho,label=tot_obs_nodes)) +
geom_line(aes(y = sum_of_adj, colour = "sum_of_adj",linetype="sum_of_adj")) +
geom_line(aes(y = k_pod, colour = "k_pod_clust",linetype = "k_pod_clust"))+
geom_line(aes(y = OLMF, colour = "OLMF",linetype="OLMF"))+
geom_line(aes(y = imput, colour = "Impute",linetype="Impute"))+
scale_color_manual("",values=c("deepskyblue2","black","orange","deepskyblue3"))+
scale_linetype_manual("",values=c("dotted","solid","longdash","solid"))+
labs(y="NMI",x=TeX("$\\rho$"),title=paste("L=",l,", K=3, n=1000",sep=""))+
theme(plot.margin = unit(c(1,1,1,1), "lines"),plot.title = element_text(hjust = 0.5, vjust=5)) +
coord_cartesian(clip = "off")
for(i in 1:10){
pv<-pv + annotation_custom(
grob = textGrob(label = round(get(paste("d", l, sep = ""))$tot_obs_nodes[i]), hjust = 0, gp = gpar(cex = 0.6)),
xmin = get(paste("d", l, sep = ""))$rho[i], # Vertical position of the textGrob
xmax = get(paste("d", l, sep = ""))$rho[i],
ymin = 1.05, # Note: The grobs are positioned outside the plot area
ymax = 1.05)
}
assign(paste("p",l,sep=""),pv)
}
grid_arrange_shared_legend(p3,p6,p9,p12,nrow=2,ncol=2)
## Varying number of layer for a given rho
seq_rho<-seq(0.1,1,by=0.1)
for(r in seq_rho){
assign(paste("d", r, sep = ""),data%>%filter(rho==r))
pv<-ggplot(get(paste("d", r, sep = "")), aes(layer,label=tot_obs_nodes)) +
geom_line(aes(y = sum_of_adj, colour = "sum_of_adj",linetype="sum_of_adj")) +
geom_line(aes(y = k_pod, colour = "k_pod_clust",linetype="k_pod_clust"))+
geom_line(aes(y = OLMF, colour = "OLMF",linetype="OLMF"))+
geom_line(aes(y = imput, colour = "Impute",linetype="Impute"))+
scale_color_manual("",values=c("deepskyblue2","black","orange","deepskyblue3"))+
scale_linetype_manual("",values=c("dotted","solid","longdash","solid"))+
labs(y="NMI",x="L",title=TeX(sprintf("$\\rho = %1.1f$ K=3, n=1000",r)))+
theme(plot.margin = unit(c(1,1,1,1), "lines"),legend.title=element_blank(),plot.title = element_text(hjust = 0.5, vjust=5)) +
coord_cartesian(ylim=c(0,1),clip = "off")
for(i in 1:4){
pv<-pv + annotation_custom(
grob = textGrob(label = round(get(paste("d", r, sep = ""))$tot_obs_nodes[i]), hjust = 0, gp = gpar(cex = 0.6)),
xmin = get(paste("d", r, sep = ""))$layer[i], # Vertical position of the textGrob
xmax = get(paste("d", r, sep = ""))$layer[i],
ymin = 1.05, # Note: The grobs are positioned outside the plot area
ymax = 1.05)
}
assign(paste("p",r,sep=""),pv)
}
grid_arrange_shared_legend(p0.4,p0.5,p0.6,p0.8,nrow=2,ncol=2)
# Variying number of nodes
for(r in seq_rho){
assign(paste("d2", r, sep = ""),data2%>%filter(rho==r))
pv<-ggplot(get(paste("d2", r, sep = "")), aes(nodes,label=tot_obs_nodes)) +
geom_line(aes(y = sum_of_adj, colour = "sum_of_adj",linetype="sum_of_adj")) +
geom_line(aes(y = k_pod, colour = "k_pod_clust",linetype = "k_pod_clust"))+
geom_line(aes(y = OLMF, colour = "OLMF",linetype="OLMF"))+
geom_line(aes(y = imput, colour = "Impute",linetype="Impute"))+
scale_color_manual("",values=c("deepskyblue2","black","orange","deepskyblue3"))+
scale_linetype_manual("",values=c("dotted","solid","longdash","solid"))+
labs(y="NMI",x="n",title=TeX(sprintf("$\\rho = %1.1f$ K=3, n=1000",r)))+
theme(plot.margin = unit(c(1,1,1,1), "lines"),legend.title=element_blank(),plot.title = element_text(hjust = 0.5, vjust=5)) +
coord_cartesian(ylim=c(0,1),clip = "off")
for(i in 1:10){
pv<-pv + annotation_custom(
grob = textGrob(label = round(get(paste("d2", r, sep = ""))$tot_obs_nodes[i]), hjust = 0, gp = gpar(cex = 0.6)),
xmin = get(paste("d2", r, sep = ""))$nodes[i], # Vertical position of the textGrob
xmax =get(paste("d2", r, sep = ""))$nodes[i],
ymin = 1.05, # Note: The grobs are positioned outside the plot area
ymax = 1.05)
}
assign(paste("pp",r,sep=""),pv)
}
grid_arrange_shared_legend(pp0.4,pp0.5,pp0.6,pp0.8,nrow=2,ncol=2)
|
2e470ed0828738f6564344f695eb503550bbd434
|
0f7405b90fade51b2ce5218869513065f8c6ad34
|
/R/GammaExample.R
|
de9389f1a71ba10f9300533bf8f82412f782ed99
|
[] |
no_license
|
anhnguyendepocen/AdvancedRegression
|
4dcaf01554af22c27b51f39094149f6bbdada810
|
5bbd35c8196759dd80bea953a13faa80c8764b99
|
refs/heads/master
| 2022-03-09T23:29:17.615787
| 2019-10-05T14:08:37
| 2019-10-05T14:08:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,383
|
r
|
GammaExample.R
|
#' GammaExample
#'
#' Using the \code{real estate} data, perform a regression using
#' a box-cox transformation
#'
#' @return Nothing
#' @export
#' @importFrom tibble tibble
#' @importFrom magrittr %>%
#' @import stats
#'
#' @examples{
#' GammaExample()
#' }
GammaExample <- function() {
real.estate.data <- AdvancedRegression::real_estate
#rescaling variables and specifying reference categories
price10K <- real.estate.data$price / 10000
sqftK <- real.estate.data$sqft / 1000
heating.rel <- relevel(real.estate.data$heating, ref = "none")
AC.rel <- relevel(real.estate.data$AC, ref = "no")
lotK <- real.estate.data$lot / 1000
#fitting gamma regression
fitted.model <- glm(price10K ~ beds + baths + sqftK + heating.rel + AC.rel + lotK,
data = real.estate.data, family = stats::Gamma(link = log))
summary(fitted.model) %>% print()
#checking model fit
intercept.only.model <- glm(price10K ~ 1, family = stats::Gamma(link = log))
deviance_pvalue(intercept.only.model, fitted.model, df = 7)
#using fitted model for prediction
predict_data <- tibble::tibble(beds = 4, baths = 2, sqftK = 1.68,
heating.rel = "central", AC.rel = "no", lotK = 5)
prediction <- 10000 * predict(fitted.model, type = "response", predict_data)
print(predict_data)
paste0('Prediction: ', prediction) %>% print()
}
|
aa1848502b82f92e5c16e26b730ba6426d6f2160
|
ed558382701960dd53b98c2a6501ce40170015d1
|
/qtls_input_data.R
|
d94579be1f70c1ea00b961f5b609f5a52b446e2d
|
[] |
no_license
|
Diennguyen8290/gtex_deconvolution
|
93fd08039b71baed4de0ba12563f5701593d8bb7
|
fc8f11eb988c51cddc3472f94990f2cfa7fae1d6
|
refs/heads/master
| 2022-02-19T11:00:42.997817
| 2019-09-17T20:18:27
| 2019-09-17T20:18:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,249
|
r
|
qtls_input_data.R
|
message("Loading input data...")
bcftools = "/frazer01/software/bcftools-1.9/bcftools"
vcftools = "/frazer01/software/vcftools-0.1.14/bin/vcftools"
rscript = "/frazer01/home/matteo/software/R-3.5.1/bin/Rscript"
ipscore_vcf_input = "/frazer01/projects/reference_files/cellType_Invariant/IPSCORE_WGS.biallelic.b37.vcf.gz"
gtex_vcf_input = "/frazer01/projects/GTEx_v7/decrypted/GenotypeFiles/phg000830.v1.GTEx_WGS.genotype-calls-vcf.c1/GTEx_Analysis_2016-01-15_v7_WholeGenomeSeq_635Ind_AllVar_QC_metrics.vcf.gz"
# Constants
qtl_distance = 1e6 # Distance threshold between each gene/peak and variants
maf_threshold = 0.01 # MAF threshold for variants used for QTL analysis
phenotype_min_value = 0.5 # Threshold to consider a gene/peak expressed
phenotype_min_samples = 0.1 # Fraction of samples that have expression above phenotype_min_value
# For QTL analysis, divided by assay:
#vars0_rna = c("gt" , "population1", "age_sample", "sex", paste("PC", 1:10, sep = ""), "(1|wgs_id)", "(1|family_id)") # list of variants for LMM formula
#vars1_rna = c("gt:population1", "gt:age_sample", "gt:population1:age_sample") # list of interaction terms to test vs LMM without interactions
|
b23cfe608ae0c72470fda9ed6c9e113246ba1811
|
55f40ba14ebc4cfe0e01023ec96cbf0550576e06
|
/bayeosSpreadsheet/R/spreadsheet.R
|
9fd8182ef230f3b44a72e0a1f25f348fc1f7c5ea
|
[] |
no_license
|
BayCEER/BayEOS-R
|
ee3d1039aca045454bf1d8c51944595a8b76aaa6
|
c7d4e5f93e0949c6d19a5a7e5ba35a69aab51adb
|
refs/heads/master
| 2022-12-10T12:05:08.935235
| 2022-12-08T12:53:44
| 2022-12-08T12:53:44
| 68,090,578
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,217
|
r
|
spreadsheet.R
|
# Came from RExcelML
if(FALSE) {
setClass("OOWorkbook", contains = "ZipArchiveEntry")
setClass("OOWorksheet", representation(content = "XMLInternalDocument", name = "ZipArchiveEntry"))
setClass("OOWorksheetFile", contains = "ZipArchiveEntry")
setMethod("names", "OOWorkbook",
function(x) {
doc = xmlParse(x[["content.xml"]])
unlist(getNodeSet(doc, "//table:table/@table:name", "table"))
})
#setMethod("[[", "Workbook"
}
read.ods =
function(file, header = TRUE, simplify = TRUE,
doc = xmlParse(zipArchive(file)[["content.xml"]]),
stringsAsFactors = TRUE,
tz='')
{
# defining package variable ROpenOfficeConversionTZ
ROpenOfficeConversionTZ<<-tz
tb = getNodeSet(doc, "//table:table", "table")
ans = lapply(tb, read.odsSheet, header = header, stringsAsFactors = stringsAsFactors)
n = sapply(ans, is.null)
if(any(n)) {
ans = ans[!n]
tb = tb[!n]
}
if(simplify && length(ans) == 1)
return(ans[[1]])
names(ans) = sapply(tb, xmlGetAttr, "name")
ans
}
read.odsSheet =
#
#
# We need to deal with blank rows and where cells are repeated.
# In progress
#
# tb is the table:table node.
#
function(tb, header = TRUE, stringsAsFactors = TRUE) # really a header?
{
# Get num columns from the first row.
numCols = xmlGetAttr(tb[[1]], "number-columns-repeated", 0, as.integer)
# Get all rows which have a cell with a office:value entry. Otherwise
# it is an empty row.
rows = getNodeSet(tb, "./table:table-row[./table:table-cell[@office:value | @office:value-type]]",
OpenOfficeNamespaces[c("office", "table")])
if(length(rows) == 0)
return(NULL)
rowNames = FALSE
varNames = character()
if(header) {
varNames = xmlSApply(rows[[1]], getCellValue)
# if(length(varNames) && is.na(varNames[1])) {
# rowNames = TRUE
# varNames = varNames[-1]
# }
rows = rows[-1]
}
# This gets the types by row and this might be ragged, i.e. not by column
# Now changed to expand the missing columns so won't be ragged.
types = t(sapply(rows, function(x) unlist(xmlApply(x, getCellType))))
# Now get all the cells, expanding the elements that are missing so all the
# the result will be a matrix
ans = t(sapply(rows,
function(x) {
unlist(xmlApply(x, getCellValue), recursive = FALSE)
}))
realCols = apply(ans, 2, function(x) any(!is.na(x)))
if(!realCols[1]) {
rowNames = FALSE
}
if(header) {
if(is.na(varNames[1]) && realCols[1]) {
rowNames = TRUE
}
}
ans = ans[ , realCols ]
types = types[, realCols]
if(!is.matrix(ans)) {
tp = getColType(types)
return(if(is.function(tp))
tp(ans)
else
as(ans, tp))
}
if(length(varNames))
varNames = varNames[realCols]
# This seems to go along rows, not columns
tmp = lapply(seq(length = ncol(ans)),
function(i) {
if(all(is.na(types[,i])))
return(NULL)
tp = unique(unlist(types[,i]))
if(is.null(tp))
return(NULL)
colType = getColType(tp)
if(is.function(colType))
colType(ans[,i])
else
as(unlist(ans[,i]), colType)
})
tmp = tmp[!sapply(tmp, is.null)]
ans = as.data.frame(tmp, stringsAsFactors = stringsAsFactors)
if(rowNames) {
rownames(ans) = ans[,1]
ans = ans[,-1]
varNames = varNames[-1]
}
structure(ans, names = if(length(varNames))
as.character(varNames)
else
paste("V", seq(length = length(tmp)), sep = ""))
}
getCellType =
function(node)
{
n = xmlGetAttr(node, "number-columns-repeated", 0)
txt = xmlGetAttr(node, "value-type", NA)
if(n > 0)
rep(txt, n)
else
txt
}
getCellValue =
#
# Get the cell value or a collection of NAs if this is a number-columns-repeated cell.
#
function(node)
{
n = xmlGetAttr(node, "number-columns-repeated", 0)
txt = xmlGetAttr(node, "value", NA)
if(is.na(txt)) txt = xmlGetAttr(node, "date-value", NA)
if(is.na(txt) ) txt = xmlGetAttr(node, "time-value", NA)
if(is.na(txt)) txt= { if(xmlSize(node)) xmlValue(node) else as.character(NA) }
if(n > 0)
rep(txt, n)
else
txt
}
# Added converion methods for time and datetime
# As there is no R time class time values will get
# converted to POSIXct of 1970-01-01 (S.Holzheu)
Rtypes = c("string" = "character",
"float" = "numeric",
"time" = function(x) { as.POSIXct(paste('1970-01-01',x), format="%Y-%m-%d PT%HH%MM%SS",tz=ROpenOfficeConversionTZ) },
"date" = function(x) { if(grepl('T',x[1])) as.POSIXct(x, format="%Y-%m-%dT%H:%M:%S",tz=ROpenOfficeConversionTZ) else as.Date(x,format="%Y-%m-%d") },
"percentage" = "numeric")
getColType =
function(types)
{
types = types[!is.na(types)]
if(length(types) == 1)
Rtypes[[types]]
else {
i = match(types, names(Rtypes))
Rtypes[[min(i)]]
}
}
|
42bfab76a271129480e020b90c0b537765877a6c
|
60a0c792683c89108386e03a67e3b84560d61a10
|
/R/install_pip.R
|
8473b64797b46338f88aaa9c42aa239421c7bccc
|
[] |
no_license
|
cran/mhcnuggetsr
|
ec51828c2ec775d837a705cff14dc2fe77e62c90
|
e781842724b9c950524f6e17c47717325cfca6c2
|
refs/heads/master
| 2023-01-20T20:23:10.719279
| 2020-11-04T10:30:02
| 2020-11-04T10:30:02
| 310,515,922
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 432
|
r
|
install_pip.R
|
#' Install pip.
#' @return Nothing
#' @examples
#' \dontrun{
#' install_pip()
#' }
#' @author Richèl J.C. Bilderbeek
#' @export
install_pip <- function() {
script_filename <- tempfile()
utils::download.file(
url = "https://bootstrap.pypa.io/get-pip.py",
destfile = script_filename,
quiet = TRUE
)
system2(
reticulate::py_config()$python,
args = c(script_filename, "--user"),
stdout = FALSE
)
}
|
5cbaa298693e57d482904f3fc42ab0e295e5d404
|
9f0cd62433a8c400113076c35454683a84b9fc64
|
/R/synth_diff.R
|
9186cf495515763b2386b0f31bcbae25eb18e9b4
|
[
"MIT"
] |
permissive
|
MatthieuStigler/multiDiff
|
6c88f683290b9802dc952df79747df1a39f49fa5
|
6c9aff0a7d77089f1b740762d2037e411e248a23
|
refs/heads/master
| 2023-09-03T00:54:24.226258
| 2023-08-23T12:57:52
| 2023-08-23T12:57:52
| 190,827,577
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,751
|
r
|
synth_diff.R
|
#' Run synthetic diff-diff
#'
#' @template param_mdd_dat
#' @param add_weights return the weights?
#'
#' @examples
#' if(require(synthdid)){
#' data('california_prop99')
#' mdd_california_prop99 <- mdd_data_format(california_prop99,
#' y_var = "PacksPerCapita",time.index = "Year",
#' treat = "treated", unit.index = "State")
#' mdd_synthdid(mdd_dat=mdd_california_prop99)
#' }
#' @export
mdd_synthdid <- function(mdd_dat, add_weights=FALSE){
if(!requireNamespace("synthdid", quietly = TRUE)) stop("Please install `synthdid`")
## prep data
mdd_dat_slot <- intrnl_mdd_get_mdd_slot(mdd_dat)
mdd_vars <- mdd_dat_slot$var_names
tr_quo <- rlang::sym(mdd_vars$treat)
time_quo <- rlang::sym(mdd_vars$time.index)
unit_quo <- rlang::sym(mdd_vars$unit.index)
setup <- synthdid::panel.matrices(as.data.frame(mdd_dat) %>%
mutate({{tr_quo}} := as.integer({{tr_quo}})),
unit=mdd_vars$unit.index,
time = mdd_vars$time.index,
outcome = mdd_vars$y_var,
treatment = mdd_vars$treat)
## estimate
res <- synthdid::synthdid_estimate(setup$Y, setup$N0, setup$T0)
## re-estimate?
if(add_weights){
W <- attributes(res)$weights
W_time <- W$lambda
W_time_full <- c(W_time, rep(1, length(mdd_dat_slot$treated_periods)))
W_time_full_df <- tibble({{time_quo}} := mdd_dat_slot$periods, weight_time=W_time_full)
W_units <- W$omega
W_units_full <- c(W_units, rep(1, mdd_dat_slot$n_units-length(W_units)))
W_units_full_df <- tibble({{unit_quo}} := rownames(setup$Y), weight_unit=W_units_full)
## add weights to data
mdd_dat_full <- mdd_dat %>%
left_join(W_time_full_df, by = mdd_vars$time.index) %>%
left_join(W_units_full_df, by = mdd_vars$unit.index) %>%
mutate(weights= .data$weight_unit * .data$weight_time)
attr(mdd_dat_full, "mdd_dat_slot") <- mdd_dat_slot ## attributes are lost by mutate!!
## re-estimate
# res <- mdd_DD_simple(mdd_dat_full, weights = mdd_dat_full$weights)
attr(res, "mdd_data") <- mdd_dat_full
}
attr(res, "mdd_dat_slot") <- mdd_dat_slot
# class(res) <- c(class(res), "mdd_synthdid") ## see issue https://github.com/synth-inference/synthdid/issues/100
res
}
#' @export
coef.synthdid_estimate <- function(object, ...) as.double(object)
## tidy method, see https://www.tidymodels.org/learn/develop/broom/
#' @importFrom generics tidy
#' @export
generics::tidy
#' Tidy a 'synthdid_estimate' object
#' @param x output from \code{\link{mdd_synthdid}} or synthdid::synthdid_estimate
#' @param conf.int,conf.level,... as standard
#' @param method method used in synthdid::vcov.synthdid_estimate
#' @export
tidy.synthdid_estimate <- function(x, conf.int=FALSE, conf.level=0.95, method='jackknife', ...){
term <- attr(x, "mdd_dat_slot")$var_names$treat
if(is.null(term)) term <- NA_character_
coef <- as.double(x)
# se = sqrt(synthdid::vcov.synthdid_estimate(x, method=method))
se = sqrt(stats::vcov(x, method=method))
res <- data.frame(term = term,
estimate = coef,
std.error =se,
statistic =coef/se,
p.value =2 * stats::pnorm(abs(coef/se), lower.tail = FALSE))
if(conf.int) {
left_Q <- (1-conf.level)/2
quants <- stats::qnorm(c(left_Q, 1-left_Q))
CI_LH <- rep(coef,2) + quants * se[[1]]
res <- cbind(res,
data.frame(conf.low = CI_LH[1],
conf.high =CI_LH[2]))
}
res
}
if(FALSE){
if(require(synthdid)){
# data('california_prop99')
n_distinct(california_prop99$State) # 39 states
n_distinct(filter(california_prop99, treated==1)$State) # 1 treated state
n_distinct(california_prop99$Year) # 31 years
n_distinct(filter(california_prop99, treated==1)$Year) # 12 treated years, 19 untreated
mdd_california_prop99 <- mdd_data_format(california_prop99, y_var = "PacksPerCapita",
time.index = "Year", treat = "treated", unit.index = "State")
mdd_california_prop99
res <- mdd_synthdid(mdd_dat=mdd_california_prop99)
res
coef(res)
tidy(res)
## General
dat_sim_N1 <- mdd_data_format(sim_dat_common(N = 1000, timing_treatment = 5:10, perc_treat=0.001))
dat_sim <- mdd_data_format(sim_dat_common(N = 1000, timing_treatment = 5:10, perc_treat=0.1))
dat_sim
# coef(mdd_DD_simple(dat_sim))
res_gen <- mdd_synthdid(dat_sim)
coef(res_gen)
vcov(res_gen, method = "jackknife")
tidy(res_gen)
tidy(res_gen, conf.int = TRUE)
}
}
|
b9f5c5e47eb03c4bbac6c8bdf32360fbf75ac185
|
b74674a867526ef01ad189adda2da14b8be4fa59
|
/R/account_account_timezone.R
|
ec949b1b8a20ac64b835b9dc930e910a2ddd50b3
|
[
"MIT"
] |
permissive
|
brandseye/brandseyer2
|
d33a84c7194b397068793da6a305154addf73aa2
|
59fab98f11b9b05cbe5cac6409b2453374b7267e
|
refs/heads/master
| 2021-09-15T17:17:38.686089
| 2021-09-01T19:04:48
| 2021-09-01T19:04:48
| 132,138,395
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,932
|
r
|
account_account_timezone.R
|
# Copyright (c) 2018, Brandseye PTY (LTD)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#' Get an account's timezone
#'
#' Returns a vector of timezones for the given accounts.
#' An account's data is reported in a particular timezone, set for that account.
#' All dates provided in filters are assumed to be given in that timezone.
#'
#' @param account An account object, or list of account objects.
#'
#' @return A character vector of timezone that account data is reported in.
#' @export
#' @author Constance Neeser
#'
#' @examples
#'
#' account("TEST01AA") %>%
#' account_timezone()
#'
#' account("TEST01AA", "TEST02AA") %>%
#' account_timezone()
account_timezone <- function(account) {
UseMethod("account_timezone")
}
#' @export
account_timezone.brandseyer2.account <- function(account) {
account$timezone
}
#' @export
account_timezone.list <- function(account) {
map_chr(account, account_timezone)
}
|
602b11e3dba58f66c26e647b00e204363318d9f1
|
eb2cd7490e2e4fc1c18fed6019c3888892b6bb0d
|
/Assignment_3/p7/GaussianProbability.R
|
9031cf352bc33d9604aa0d13127b649e0ae34737
|
[] |
no_license
|
ksingla025/Machine-Learning-Assignments
|
8f75e1a36de1cfdffcba7fe2826c559cb0171bb6
|
0ea8518af52b080ac6635782f1952437e2d3674d
|
refs/heads/master
| 2020-05-18T08:29:39.321179
| 2014-11-22T12:38:04
| 2014-11-22T12:38:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 99
|
r
|
GaussianProbability.R
|
GaussianProbability <- function(x, mean, sd) {
(1/(2*pi)) * exp(-1 * ((x - mean)^2)/(2 * sd^2))
}
|
d26ce92a822121844f4b333ac928b9dc7105c9fa
|
e79df1c2164b29c127c2102bc4495b2384f3895e
|
/R/attribute_scrap.R
|
d10e1af5fa0d698f27373207e32614bcdb2d6638
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
feddelegrand7/ralger
|
1b263a3c15c3b208a96a633f30ec2433efb4aabc
|
57ebc6b07511675c23d91007e701a9722aeb86d4
|
refs/heads/master
| 2023-03-13T08:54:24.066033
| 2023-03-05T20:41:03
| 2023-03-05T20:41:03
| 241,394,878
| 162
| 18
|
NOASSERTION
| 2022-06-18T19:16:22
| 2020-02-18T15:21:00
|
R
|
UTF-8
|
R
| false
| false
| 2,648
|
r
|
attribute_scrap.R
|
# scraping attributes from HTML elements
#' Scraping attributes from HTML elements
#'
#' @description This function is used to scrape attributes from HTML elements
#'
#' @param link the link of the web page to scrape
#' @param node the HTML element to consider
#' @param attr the attribute to scrape
#' @param askRobot logical. Should the function ask the robots.txt if we're allowed or not to scrape the web page ? Default is FALSE.
#' @return a character vector.
#' @examples
#' \donttest{
#' # Extracting the web links within the World Bank research and publications page
#'
#' link <- "https://ropensci.org/"
#'
#' # scraping the class attributes' names from all the anchor
#'
#' attribute_scrap(link = link, node = "a", attr = "class")
#' }
#'
#' @export
#' @importFrom rvest html_nodes html_text html_attr %>%
#' @importFrom xml2 read_html
#' @importFrom crayon green
#' @importFrom crayon bgRed
#' @importFrom robotstxt paths_allowed
#' @importFrom curl has_internet
attribute_scrap <- function(link,
node,
attr,
askRobot = FALSE) {
if (any(missing(link), missing(node), missing(attr))) {
stop("'link', 'node' and 'attr' are a mandatory argument")
}
if (any(!is.character(link), !is.character(node), !is.character(attr))) {
stop("'link', 'node' and 'attr' must be provided as character vectors")
}
################################### Ask Robot Part #################################################
if (askRobot) {
if (paths_allowed(link) == TRUE) {
message(green("the robot.txt doesn't prohibit scraping this web page"))
} else {
message(bgRed(
"WARNING: the robot.txt doesn't allow scraping this web page"
))
}
}
########################################################################################################
tryCatch(
expr = {
links <- lapply(
link,
function(url) {
url %>%
read_html() %>%
html_nodes(node) %>%
html_attr(attr)
}
)
return(unlist(links))
},
error = function(cond) {
if (!has_internet()) {
message("Please check your internet connexion: ")
message(cond)
return(NA)
} else if (grepl("current working directory", cond) ||
grepl("HTTP error 404", cond)) {
message(paste0("The URL doesn't seem to be a valid one: ", link))
message(paste0("Here the original error message: ", cond))
return(NA)
} else {
message("Undefined Error: ", cond)
return(NA)
}
}
)
}
|
6995476c7344bf43dc9986c237c5b6a64207f5e0
|
7b8478fa05b32da12634bbbe313ef78173a4004f
|
/man/desc.Rd
|
59328fd4d2a15611055c4bb5411d3b7397966cfc
|
[] |
no_license
|
jeblundell/multiplyr
|
92d41b3679184cf1c3a637014846a92b2db5b8e2
|
079ece826fcb94425330f3bfb1edce125f7ee7d1
|
refs/heads/develop
| 2020-12-25T18:02:10.156393
| 2017-11-07T12:48:41
| 2017-11-07T12:48:41
| 58,939,162
| 4
| 1
| null | 2017-11-07T12:01:35
| 2016-05-16T14:30:38
|
R
|
UTF-8
|
R
| false
| true
| 451
|
rd
|
desc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{desc}
\alias{desc}
\title{Arrange specified column in descending order}
\usage{
desc(var)
}
\arguments{
\item{var}{Variable to arrange in descending order}
}
\description{
Arrange specified column in descending order
}
\examples{
\donttest{
dat <- Multiplyr (x=1:100, cl=2)
dat \%>\% arrange(desc(x))
dat \%>\% shutdown()
}
}
\seealso{
\code{\link{arrange}}
}
|
3a3c5f472af46c9acf1ee9426def44be3bf751f1
|
b528057b1cdf0d288405efa3ce0d42186191c7f0
|
/plan_prep_trendchanges.R
|
ce9d062e5d31af5103b019eec37e52489379b376
|
[] |
no_license
|
sarcusa/4ka_steph
|
6d909d7d73144099bf57ea7d68cca5fe3e14a160
|
4db07bd56edaba0eaeae3bc4c8a20a20756f6a59
|
refs/heads/master
| 2023-02-18T18:08:02.752606
| 2021-01-24T19:16:45
| 2021-01-24T19:16:45
| 281,261,458
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 735
|
r
|
plan_prep_trendchanges.R
|
TrendChanges_prep1 <- function(input_data, input_param){
print("Start brocken stick function")
analysis_3a = BrokenStick(input_data, input_param)
out3a = {
datPath = file.path(createPaths(), 'RData', 'TrendChanges.RData')
save(analysis_3a, file = datPath)
}
print("completed brocken stick function")
return(analysis_3a)
}
TrendChanges_prep2 <- function(input_data, input_param){
print("start brockenstick null function")
analysis_3b = BrokenStick_null(input_data, input_param)
out_3b = {
datPath = file.path(createPaths(), 'RData', 'BS_results_plusNull_complete.RData')
save(analysis_3b, file = datPath)
}
print("completed brockenstick null function")
return(analysis_3b)
}
|
f235dbf39bc33b615486cc45d4ea629eb3c52cfd
|
7a0db46e0d8207d2e7cdb1447a2ed2029d97f47d
|
/R/class_fuzzycluster.R
|
3455c071eb3474e318a2bfa8affd3f1923b71fb5
|
[] |
no_license
|
fauzipandya/advfclust
|
726571a0436befde2482b2c2d002c41bd0711f14
|
ebeaa97ce8ca4e8aeea10695b0ebb7c09ede25d6
|
refs/heads/master
| 2020-09-21T19:37:09.567968
| 2016-09-24T16:45:42
| 2016-09-24T16:45:42
| 66,117,846
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 881
|
r
|
class_fuzzycluster.R
|
#' Fuzzy Result
#' @import methods
#' @name fuzzycluster-class
#' @rdname fuzzycluster-class
#' @slot centroid centroid matrix
#' @slot distance distance matrix
#' @slot func.obj function objective
#' @slot call.func called function
#' @slot fuzzyfier fuzzyness parameter
#' @slot method.fuzzy method of fuzzy clustering used
#' @slot member membership matrix
#' @slot hard.label hard.label
#' @exportClass fuzzycluster
#' @include class_membership.R
setClass("fuzzycluster",
representation= representation(centroid="matrix",
distance="matrix",
func.obj="numeric",
call.func="character",
fuzzyfier="numeric",
method.fuzzy="character"),
contains = "membership"
)
|
06804f61a2ab51cee45e8eecf54121b51cff0daf
|
2949cf3cb789a42eafa00ba74f04938ed9cb6af0
|
/final version/train.R
|
c778a95d7582969babb659d41146c3d562418209
|
[] |
no_license
|
TZstatsADS/Fall2016-proj3-grp2
|
6611fdf0f9d2394d2754ccbea3c8101db85dd967
|
182a5c42254b83ac5e9259f61bda0fd76a327008
|
refs/heads/master
| 2021-05-01T04:33:27.680337
| 2016-11-02T21:13:53
| 2016-11-02T21:13:53
| 71,174,560
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,187
|
r
|
train.R
|
#########################################################
### Train a classification model with training images ###
#########################################################
### Author: Group 2
### Project 3
### ADS Fall 2016
train <- function(feature.adv, feature.baseline, label_train){
### Train a Gradient Boosting Model (GBM) using processed features from training images
### Input:
### - processed features in Feature_eval.RData
### - class labels for training images
### Output: two model object
### load libraries
library("gbm")
library("e1071")
### change names
X.a <- t(feature.adv)
X.b <- t(feature.baseline)
y <- label_train
######################
### Baseline Model ###
### Train with gradient boosting model using feature.baseline
### Model selection with cross-validation
source("./final version/cross_validation.R")
source("./final version/train_gbm.R")
source("./final version/test_gbm.R")
# Choosing between different values of depth and nm for GBM
depth_values <- seq(3, 9, 2)
nm_values <- c(5, 10, 20)
err.cv <- matrix(NA, length(depth_values), length(nm_values))
K <- 5 # number of CV folds
#we took a day to tune here
for(i in 1:length(depth_values)){
cat("i=", i, "\n")
d <- depth_values[i]
for (j in 1:length(nm_values)){
cat("j=", j, "\n")
nm <- nm_values[j]
par <- list(depth=d, n.minobsinnode=nm)
err.cv[i, j] <- cv.function(X.b, y, par, K, train_gbm, test_gbm)
}
}
p.min <- which(err.cv == min(err.cv), arr.ind = TRUE)
par.b.best <- list(depth = depth_values[p.min[1]],
n.minobsinnode = nm_values[p.min[2]])
tm_train_gbm <- system.time(gbm_train <- train_gbm(X.b, y, par.b.best))
#####################
### Advance Model ###
### Train with support vector machine using feature.adv
### Model selection with cross-validation
source("./final version/train_svm.R")
source("./final version/test_svm.R")
# Choosing between different values of cost C for Linear SVM
C <- c(1, 10, 100, 500, 1000)
err_cv_svm_c <- rep(NA, length(C))
K <- 5 # number of CV folds
for(k in 1:length(C)){
cat("k=", k, "\n")
par <- list(kernel = "linear", cost = C[k])
err_cv_svm_c[k] <- cv.function(X.a, y, par, K, train_svm, test_svm)
}
# Choose the best parameter value
C_best <- C[which.min(err_cv_svm_c)]
# Choosing between different values of gamma G for RBF SVM
G <- c(0.01, 0.001, 0.0001, 0.00001)
err_cv_svm_g <- rep(NA, length(G))
K <- 5 # number of CV folds
for(k in 1:length(G)){
cat("k=", k, "\n")
par <- list(kernel = "radial", cost = C_best, gamma = G[k])
err_cv_svm_g[k] <- cv.function(X.a, y, par, K, train_svm, test_svm)
}
# Choose the best parameter value
G_best <- G[which.min(err_cv_svm_g)]
# Two best parameters
par.a.best <- list(kernel = "radial", cost = C_best, gamma = G_best)
tm_train_svm <- system.time(svm_train <- train_svm(X.a, y, par.a.best))
#################################
#return two trained model objects
return(ba.train = list(baseline = gbm_train, advance = svm_train))
}
|
fbaf401dbcd34dbe8dd97ec383c5baa8b9e7c780
|
a7961945baed475b1540ebc61a918a36159a5543
|
/cachematrix.R
|
a7db79924be47d1284bf1e7f089cddf16393450b
|
[] |
no_license
|
theodore-rice/ProgrammingAssignment2
|
7b0e36d5d0a35e121405d1c26d3a1012a54851b7
|
d89bd3a12c2c32b1e3f3fd1e2361591074b9dc25
|
refs/heads/master
| 2020-12-07T13:44:55.022049
| 2015-06-13T00:51:29
| 2015-06-13T00:51:29
| 37,348,525
| 0
| 0
| null | 2015-06-12T22:58:17
| 2015-06-12T22:58:17
| null |
UTF-8
|
R
| false
| false
| 1,192
|
r
|
cachematrix.R
|
## This pair of functions cache the inverse of a matrix, so that if the
## inverse is needed in subsequent computations, it does not need to be
## recomputed
## This creates a special matrix, which is really a list which
## 1) sets the value of the matrix
## 2) gets the value of the matrix
## 3) sets the value of the inverse
## 4) gets the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv<<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This functions looks to see if the inverse is cached, if so it
## returns the cached value, otherwise it computes the inverse and
## caches it.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached value for the inverse")
return(inv)
##function terminates
}
matrix <- x$get()
inverse <- solve(matrix)
x$setinverse(inverse)
inverse
}
|
744615c2fa274b79f2bff7500f057de05ff2c9bd
|
b5b43e544d901665cc2a2ff2518fff5b44d882a0
|
/man/compare_title_paper.Rd
|
248de1cb4464c44c54b97d4b3fa33354ff73207b
|
[] |
no_license
|
jamielatham15/bibliographica
|
14b894fb16403a22051903598fd2b443709fdb6d
|
62bb9feb231c1d6b93960ced52404bcfb496252e
|
refs/heads/master
| 2021-01-23T01:51:17.343584
| 2017-03-03T11:14:56
| 2017-03-03T11:14:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 778
|
rd
|
compare_title_paper.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compare_title_paper.R
\name{compare_title_paper}
\alias{compare_title_paper}
\title{Compare Title Count and Paper Consumption}
\usage{
compare_title_paper(x, field, selected = NULL, plot.mode = "text")
}
\arguments{
\item{x}{data frame}
\item{field}{Field to analyze}
\item{selected}{Limit the analysis on selected entries}
\item{plot.mode}{"text" or "point"}
}
\value{
List:
\itemize{
\item{plot}{ggplot object}
\item{table}{summary table}
}
}
\description{
Compare title count and paper consumption for selected field.
}
\examples{
\dontrun{compare_title_paper(df, "author")}
}
\author{
Leo Lahti \email{leo.lahti@iki.fi}
}
\references{
See citation("bibliographica")
}
\keyword{utilities}
|
edb6e26fc6f15ebcea0a3bfc8f509059e44b2072
|
edaca2016899ea5755eef108898e05223a6094ff
|
/TempModelFunctions.R
|
2ad8625a4822c9af60a42ae685ded20d101aad73
|
[] |
no_license
|
DCBraun/Temperature-Modelling
|
e2a9464c9e8bbdc7031c46c673d22dd08a2365ec
|
cf4e1519bcd1eed304ca776a59e1789556be0928
|
refs/heads/master
| 2020-04-09T18:37:11.255255
| 2015-02-06T23:14:16
| 2015-02-06T23:14:16
| 31,793,340
| 0
| 1
| null | 2015-03-06T23:11:03
| 2015-03-06T23:11:02
| null |
UTF-8
|
R
| false
| false
| 3,148
|
r
|
TempModelFunctions.R
|
############################
# TempModelFunctions.R
# Project: TemperatureModelling
# File for the functions that appear in the temperature modelling files
# Not all of these functions are currently used in the TempModel.r code
# Some were used and have since been removed, others were created but never used.
# Created January 29, 2015
# A Putt
#############################
########## HarmFunc ##########
# Create a function for getting the harmonics
# Data=the data to be modelled
# Period is 365
HarmFunc <- function(data) {
cosine <- cos(2*pi*data$dateindex/365)
sine <- sin(2*pi*data$dateindex/365)
cosine2 <- cos(2*pi*data$dateindex*2/365)
sine2 <- sin(2*pi*data$dateindex*2/365)
cosine3 <- cos(2*pi*data$dateindex*3/365)
sine3 <- sin(2*pi*data$dateindex*3/365)
return(list(cosine=cosine,sine=sine,cosine2=cosine2,sine2=sine2,cosine3=cosine3,sine3=sine3))
}
########## LmPlotFunc ##########
# Create a function for plotting the linear model results (only shows one fit, but easily modified)
# data=the data used for the lm; yvals=the variable modelled against the index; lmdata=the linear model
ylabT <- expression(paste("Temperature (",degree,"C)"))
ylabD <- paste("Discharge m3/s")
LmPlotFunc <- function(data,yvals,lmdata,yLab,title) {
windows()
par(mfrow=c(1,1))
par(oma = rep(2, 4))
par(mar = c(4, 5, 1, 1))
plot(data$dateyear, yvals, type = "l", xlab = "Time (Days)", ylab = yLab, cex.lab = 1.6, lwd = 2, col = "darkgrey")
mtext(side=3, line=1.5, cex=1.6, sprintf("Step 1: Harmonic Model Fits: %s",title))
lines(data$dateyear, predict(lmdata,newdata=data.frame(dateindex=data$dateindex)), lwd = 3, lty = 5)
}
########## Model Diagnostics ##########
# Create a function to test model fit
# The stationarity test would probably be best done with plots
ModelDiagnostics <- function(data,lmmodel,title) {
windows()
Stationarity <- Box.test(residuals(lmmodel),lag=5) # low p value indicates stationarity
par(mfrow=c(2,3))
par(oma = rep(2, 4))
Predicted <- predict(lmmodel)
Residuals <- data$watertemp-Predicted
PartialAutocorrelation <- acf(Residuals,type="partial",na.action=na.pass) # Most of my models have significant autocorrelation at 1
SerialAutocorrelation <- acf(Residuals,na.action=na.pass) # Most of my models have significant autocorrelation at 1
ResidPlot <- plot(Residuals,Predicted)
NormalQQPlot <- qqnorm(Residuals)
Hist <- hist(Residuals)
mtext(side=3, title, outer=TRUE)
FuncOut <- list(Stationarity=Stationarity,SerialAutocorrelation=SerialAutocorrelation)
return(FuncOut)
}
########## tslag ##########
# Create a function that outputs a lagged variable
tslag <- function(var, lagNum=1) {
n <- length(var)
c(rep(NA,lagNum),var)[1:n]
}
########## NormalTest ##########
# Create a function to test for normality in a variable
NormalTest <- function(variable,title) {
windows()
#pval <- shapiro.test(variable)$p.value
#print(sprintf("Shapiro test p-value: %s", pval))
#print("Reject null hypothesis of normality if p <= 0.1")
par(mfrow=c(1,2))
par(oma = rep(2, 4))
hist(variable)
qqnorm(variable)
mtext(side=3,title,outer=TRUE)
}
|
5cd1fbefd051c508e9bd2d42f81680341b347eb0
|
e5e7b46cb9bcf6954614d52bba8e1408b175c000
|
/plot4.R
|
7bef02af773d49af32530b10f38106854be25702
|
[] |
no_license
|
MariaMontesdeOca/ExData_PA1
|
596d6198ea59f984ff076fdb759617d080661f98
|
1be0a18c662c6daa7aac88bba4d97ff45689bba6
|
refs/heads/master
| 2020-06-04T16:52:02.780131
| 2015-04-12T12:12:58
| 2015-04-12T12:12:58
| 33,812,540
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,537
|
r
|
plot4.R
|
#Read the data into R
data<-read.table("household_power_consumption.txt",sep=";",header=TRUE,colClasses=c(rep("character",2),rep("numeric",7)),na="?")
#Convierte Date en formato fecha
data$Date<-as.Date(data$Date,"%d/%m/%Y")
#Select only the dates that we're gonna plot
refineddata<-subset(data,Date=="2007-02-02"|Date=="2007-02-01")
#Creating the plot4.png file
##Creating the file and setting the format of the graph
png(file="plot4.png",width=480,height=480,units="px",bg="white")
par(mfrow=c(2,2),mar=c(5,4,2,1))
## Creating plot 4.1 (plot2 modifiying ylab)
plot(refineddata$Global_active_power,type="l",ylab="Global Active Power",xaxt="n",xlab="")
axis(1,at=c(0,1500,nrow(refineddata)),labels=c("Thu","Fri","Sat"))
## Creating plot 4.2
plot(refineddata$Voltage,type="l",ylab="Voltage",xlab="datetime",xaxt="n")
axis(1,at=c(0,1500,nrow(refineddata)),labels=c("Thu","Fri","Sat"))
## Creating plot 4.3 (plot3 modifiying bty="n")
plot(refineddata$Sub_metering_1,type="l",ylab="Energy sub metering",xaxt="n", xlab="")
lines(refineddata$Sub_metering_2,col="red")
lines(refineddata$Sub_metering_3,col="blue")
axis(1,at=c(0,1500,nrow(refineddata)),labels=c("Thu","Fri","Sat"))
legend("topright",bty="n",lty=1,lwd=1,col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
##Creating plot 4.4
plot(refineddata$Global_reactive_power,ylab="Global_reactive_power",type="l",xlab="datetime",xaxt="n")
axis(1,at=c(0,1500,nrow(refineddata)),labels=c("Thu","Fri","Sat"))
##Closing the .png file
dev.off()
|
6f94b0a18304420dcc680aea2864e1f8350e09b6
|
08b7728c2120413ad2266a5e7ae3403e6b67471b
|
/cryptoJNS/man/affineCipher.Rd
|
94b2e006bc8ab923af821101f2be1e6ceee80587
|
[] |
no_license
|
JamesSolum/Codes-and-Encryption
|
8241438fa8fa6c1e36ecfc293d6c1bf02cc3f36b
|
59df5d2ca7505795bd876db2b532351a28a9a122
|
refs/heads/master
| 2020-05-29T21:04:36.541728
| 2017-02-21T00:09:00
| 2017-02-21T00:09:00
| 82,615,292
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 755
|
rd
|
affineCipher.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/affineCipher.R
\name{affineCipher}
\alias{affineCipher}
\title{Encrypt a string using a shift cipher}
\usage{
affineCipher(plainText, stretch, shift)
}
\arguments{
\item{plainText}{A string of lowercase letters}
\item{stretch}{An integer used to stretch the function.}
\item{shift}{An integer (mod 26) used to shift.}
}
\value{
An encrypted (or decrypted) string, where each letter has been shifted by \code{shift} and stretched by \code{stretch} places.
}
\description{
Encrypt a string using a shift cipher
}
\examples{
plainText <- "dog"
cipherText <- affineCipher(plainText,3,7)
print(cipherText)
You cannot decrypt a cipherText using the encryptAffineCipher function
}
|
9d2a2a8d5295fc3a7b39b614271bca5b98fa8fa9
|
6037c26c14d146cd9bcaf612eea52aeec10d48d1
|
/man/merge.Rd
|
39a281d83e8ecc8be6021bf4303655f63c33dce7
|
[
"MIT"
] |
permissive
|
HaidYi/DASC
|
ed1860cb63d1ddb039e7f1a727cc65e4f71e3e12
|
5f36f7aa02d810be0acde2f90efafbb116d9affb
|
refs/heads/master
| 2021-01-18T17:50:31.758972
| 2018-06-01T09:00:36
| 2018-06-01T09:00:36
| 114,265,991
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 795
|
rd
|
merge.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DASC.R
\name{merge}
\alias{merge}
\title{Combine two trees into one}
\usage{
merge(x, y, X)
}
\arguments{
\item{x}{the index of the node}
\item{y}{the index of the node}
\item{X}{the saved vector with the information of the parent of every node}
}
\value{
\code{X} A updated X vector with updates on father of every node
}
\description{
Combine two trees into one
}
\details{
During the traversal of the graph matrix, merge function joins two
disjoint sets into a single subset. It is a union step of Disjoint-set
algorithm by Bernard A. Galler and Michael J. Fischer. For further details,
please refer to:
\url{https://en.wikipedia.org/wiki/Disjoint-set_data_structure}
}
\author{
Haidong Yi, Ayush T. Raman
}
|
9a08eb9af0d49467f25cd0e0c8dfe87c0ad9d5db
|
3a0f2d5f03f7b196b86793938c1d892d629cac70
|
/database/data/2016/player-stats/wk1-12/16_playerJoin.R
|
d4fe9f3ef4f2cbee97d25345a5711adf0de55e65
|
[] |
no_license
|
designer-citrusbits/Fantasy-Football-Database
|
db80d2e7bd5d6f6dc590fe0c5df5e4895bfcc0d7
|
0bc1343432cb8e89f8be7dc2932d6c57cf4f3fa9
|
refs/heads/master
| 2020-04-19T23:36:06.673291
| 2017-03-31T17:18:48
| 2017-03-31T17:18:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,581
|
r
|
16_playerJoin.R
|
# File: 16_playerJoin.R
# Description: combines multiple player csv's into one
# Date: 1 December 2016
# Author: Mark Eidsaune
library("RCurl")
library("plyr")
library("dplyr")
durl <- "https://raw.githubusercontent.com/edavis25/Fantasy-Football-Database/master/database/data/2016/player-stats/wk1-12/11-29-2016-defense.csv"
defense <- read.csv(text = getURL(durl))
names(defense) <- c("Name", "Tm", "DefInt","DefIntYds", "DefIntTd", "DefIntLng",
"DefSk", "DefTkl", "DefAst", "DefFR", "DefFRYrds", "DefFRTd",
"DefFF", "URL")
kurl <- "https://raw.githubusercontent.com/edavis25/Fantasy-Football-Database/master/database/data/2016/player-stats/wk1-12/11-29-2016-kicking.csv"
kicks <- read.csv(text = getURL(kurl))
# Remove punting stats
kicks <- kicks[, c(1,2,3,4,5,6,11)]
names(kicks) <- c("Name", "Tm", "XPMade", "XPAtt", "FGMade", "FGAtt", "URL")
ourl <- "https://raw.githubusercontent.com/edavis25/Fantasy-Football-Database/master/database/data/2016/player-stats/wk1-12/11-29-2016-offense.csv"
offense <- read.csv(text = getURL(ourl))
names(offense) <- c("Name", "Tm", "PassCmp", "PassAtt", "PassYds", "PassTD",
"Interceptions", "SkTaken", "SkYds", "PassLng", "QbRating", "RushAtt",
"RushYds", "RushTd", "RushLng", "RecTgt", "Receptions", "RecYds",
"RecTd", "RecLng", "Fmb", "FL", "URL")
rurl <- "https://raw.githubusercontent.com/edavis25/Fantasy-Football-Database/master/database/data/2016/player-stats/wk1-12/11-29-2016-returns.csv"
returns <- read.csv(text = getURL(rurl))
names(returns) <- c("Name", "Tm", "KickRet", "KickRetYds", "KickYdsRet", "KickRetTD",
"KickRetLng", "PuntRet", "PuntRetYds","PuntYdsReturn", "PuntRetTd", "PuntRetLng",
"URL")
allStats <- rbind.fill(defense, kicks, offense, returns)
allStats[is.na(allStats)] <- 0
allStats <- allStats %>% select(Name, URL, PassCmp, PassAtt, PassYds, PassTD,
Interceptions, SkTaken, SkYds, PassLng, QbRating,
RushAtt, RushYds, RushTd, RushLng, RecTgt, Receptions,
RecYds, RecTd, RecLng, Fmb, FL, DefInt, DefInt, DefIntYds,
DefIntTd, DefIntLng, DefSk, DefTkl, DefAst, DefFR, DefFRYrds,
DefFRTd, DefFF, KickRet, KickRetYds, KickYdsRet, KickRetTD,
KickRetLng, PuntRet, PuntRetYds, PuntYdsReturn, PuntRetTd,
PuntRetLng, XPMade, XPAtt, FGMade, FGAtt)
|
593d86a70e6a377b0a56d38be9439ead1b3cb4a5
|
375250017804497a9830ad9244a9a4140f9dccef
|
/inversion/deprecated/inversion_ps.multiPFT.US-WCr.multivariate.prior.R
|
531eb2606685f88344103aa16efbf6605ce2d2da
|
[] |
no_license
|
serbinsh/edr-da
|
21d0aa8f45622707443a60fa5845fc5ef4e8b722
|
462ffc9104532b94139c072c59aad5b2383cda1e
|
refs/heads/multi-pft
| 2021-01-12T13:12:36.735625
| 2017-10-03T21:33:54
| 2017-10-03T21:33:54
| 72,149,480
| 2
| 0
| null | 2017-10-03T21:33:55
| 2016-10-27T21:24:19
|
R
|
UTF-8
|
R
| false
| false
| 10,404
|
r
|
inversion_ps.multiPFT.US-WCr.multivariate.prior.R
|
#--------------------------------------------------------------------------------------------------#
#
#
# S. Serbin & A. Shiklomanov
#--------------------------------------------------------------------------------------------------#
#---------------- Close all devices and delete all variables. -------------------------------------#
rm(list=ls(all=TRUE)) # clear workspace
graphics.off() # close any open graphics
closeAllConnections() # close any open connections to files
dlm <- .Platform$file.sep # <--- What is the platform specific delimiter?
## Load functions
source("common.R")
library(mvtnorm)
## Load priors
load(file = normalizePath('priors/stan_priors_sun.RData')) # for sun exposed leaves only
priors <- priors_sun$means
PEcAn.utils::logger.info(" *** Multi PFT *** Running with sun exposed priors only")
edr.exe.name <- 'ed_2.1'
#load(file = normalizePath('priors/prior_all.RData')) # based on leaves from sun/shaded positions
#priors <- priors_all$means
#PEcAn.utils::logger.info(" *** Single PFT *** Running with priors using all leaves (sun and shade)")
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
## Set user email address
email_add <- "sserbin@bnl.gov"
## Setup tag
dttag <- strftime(Sys.time(), "%Y%m%d_%H%M%S")
## Define PFT and canopy structure
#pft <- c("temperate.Late_Hardwood","temperate.North_Mid_Hardwood")
pft <- list("temperate.Early_Hardwood","temperate.North_Mid_Hardwood","temperate.Late_Hardwood")
dens <- 0.015
dbh <- 20 # 20, 30 or 40
lai <- getvar("LAI_CO", dbh, pft)
num.cohorts <- 3
multi.pft <- "EMLH"
data_dir <- normalizePath(paste0('../run-ed/',num.cohorts,'cohort/dens',dens,'/dbh',dbh,'/',multi.pft))
paths <- list(ed2in = file.path(data_dir, 'ED2IN'),
history = file.path(data_dir, 'outputs'))
#--------------------------------------------------------------------------------------------------#
## Setup PROSPECT for psuedo data
prospect_ver <- 5
pp <- list()
pp[["temperate.Early_Hardwood"]] <- c("N" = 1.4, "Cab" = 30, "Car" = 8, "Cw" = 0.01, "Cm" = 0.01)
pp[["temperate.Late_Hardwood"]] <- c("N" = 1.95, "Cab" = 65, "Car" = 8, "Cw" = 0.01, "Cm" = 0.01)
pp[["temperate.North_Mid_Hardwood"]] <- c("N" = 1.8, "Cab" = 45, "Car" = 8, "Cw" = 0.01, "Cm" = 0.01)
spectra_list <- list()
spectra_list[["temperate.Early_Hardwood"]] <- prospect(pp$temperate.Early_Hardwood, prospect_ver, TRUE)
spectra_list[["temperate.Late_Hardwood"]] <- prospect(pp$temperate.Late_Hardwood, prospect_ver, TRUE)
spectra_list[["temperate.North_Mid_Hardwood"]] <- prospect(pp$temperate.North_Mid_Hardwood, prospect_ver, TRUE)
## Setup EDR wavelengths and run date
par.wl = 400:2499
nir.wl = 2500
datetime <- ISOdate(2004, 07, 01, 16, 00, 00)
#--------------------------------------------------------------------------------------------------#
## Setup output
main_out <- paste("PDA", format(Sys.time(), format="%Y%m%d_%H%M%S"), sep = "_")
if (! file.exists(main_out)) dir.create(main_out,recursive=TRUE)
PEcAn.utils::logger.info(paste0("Running inversion in dir: ",main_out))
#--------------------------------------------------------------------------------------------------#
## Output directory function
outdir_path <- function(runID) {
#paste("inversion_prior", dttag, runID, sep = ".")
paste0(main_out,"/inversion_prior.", dttag, ".",runID)
}
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
# Setup the output directories
run_first <- function(inputs) {
outdir <- outdir_path(inputs$runID)
dir.create(outdir, showWarnings = FALSE)
try_link <- link_ed(outdir)
trait.values <- list()
for (i in seq_along(pft)) {
trait.values[[pft[[i]]]] <- list()
}
albedo <- EDR(paths = paths,
spectra_list = spectra_list,
par.wl = par.wl,
nir.wl = nir.wl,
datetime = datetime,
trait.values = trait.values,
edr.exe.name = edr.exe.name,
output.path = outdir)
return(albedo)
}
# Create initial output directory
first_run <- run_first(list(runID = 0))
#--------------------------------------------------------------------------------------------------#
print(head(first_run))
print(tail(first_run))
print(range(first_run))
extract_params <- function(params) {
list(prospect.params =
list('temperate.Early_Hardwood' = params[1:5],
'temperate.North_Mid_Hardwood' = params[6:10],
'temperate.Late_Hardwood' = params[11:15]),
trait.values =
list('temperate.Early_Hardwood' = list('orient_factor' = params[16],
'clumping_factor' = params[17]),
'temperate.North_Mid_Hardwood' = list('orient_factor' = params[18],
'clumping_factor' = params[19]),
'temperate.Late_Hardwood' = list('orient_factor' = params[20],
'clumping_factor' = params[21])
)
)
}
invert_model <- function(param, runID = 0) {
outdir <- outdir_path(runID)
paths_run <- list(ed2in = NA, history = outdir)
# Parse parameters
pars_list <- extract_params(param)
spectra_list <- lapply(pars_list$prospect.params, prospect, version = 5, include.wl = TRUE)
trait.values <- pars_list$trait.values
albedo <- EDR(spectra_list = spectra_list,
trait.values = trait.values,
paths = paths_run,
par.wl = par.wl,
nir.wl = nir.wl,
datetime = datetime,
edr.exe.name = "ed_2.1",
output.path = outdir,
change.history.time = FALSE)
# Create quick figure
#waves <- seq(400,2500,1)
#png(paste(outdir,"/",'simulated_albedo.png',sep="/"),width=4900, height =2700,res=400)
#par(mfrow=c(1,1), mar=c(4.3,4.5,1.0,1), oma=c(0.1,0.1,0.1,0.1)) # B L T R
#plot(waves,unlist(albedo)*100,type="l",lwd=3,ylim=c(0,60),xlab="Wavelength (nm)",ylab="Reflectance (%)",
#cex.axis=1.5, cex.lab=1.7,col="black")
#rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col =
#"grey80")
#lines(waves,unlist(albedo)*100,lwd=3, col="black")
#dev.off()
return(albedo)
}
# Simulate observations
inits <- c(unlist(pp[c('temperate.Early_Hardwood',
'temperate.North_Mid_Hardwood',
'temperate.Late_Hardwood')]),
0.5, 0.5, # Early orient, clumping
0.5, 0.5, # Mid orient, clumping
0.5, 0.5) # Late orient, clumping
obs <- invert_model(inits, runID = 0) + generate.noise()
prior <- function(params) {
do_prosp_prior <- function(params, pft) {
dmvnorm(params,
priors_sun$means$M[1:5, pft],
priors_sun$means$Sigma[1:5, 1:5, pft],
log = TRUE)
}
prospect_prior <- do_prosp_prior(params[1:5], 'temperate.Early_Hardwood') +
do_prosp_prior(params[6:10], 'temperate.North_Mid_Hardwood') +
do_prosp_prior(params[11:15], 'temperate.Late_Hardwood')
traits_prior <-
# Early Hardwood
dunif(params[16], 0, 1, log = TRUE) + # Orient
dunif(params[17], -1, 1, log = TRUE) + # Clumping
# North Mid Hardwood
dunif(params[18], 0, 1, log = TRUE) + # Orient
dunif(params[19], -1, 1, log = TRUE) + # Clumping
# Late Hardwood
dunif(params[20], 0, 1, log = TRUE) + # Orient
dunif(params[21], -1, 1, log = TRUE) # Clumping
return(prospect_prior + traits_prior)
}
init_function <- function() {
c(1.1, 10, 2, 0.01, 0.01,
1.1, 10, 2, 0.01, 0.01,
1.1, 10, 2, 0.01, 0.01,
0, 0,
0, 0,
0, 0)
}
# Test that prior function generates meaningful values
prior(inits)
prior(init_function())
param.mins <- c(1, 0, 0, 0, 0,
1, 0, 0, 0, 0,
1, 0, 0, 0, 0,
0, 0,
0, 0,
0, 0)
param.maxs <- c(Inf, Inf, Inf, Inf, Inf,
Inf, Inf, Inf, Inf, Inf,
Inf, Inf, Inf, Inf, Inf,
1, 1,
1, 1,
1, 1)
## Setup PDA options
invert.options <- list(model = invert_model,
run_first = run_first,
inits.function = init_function,
prior.function = prior,
param.mins = param.mins,
param.maxs = param.maxs,
nchains = 5,
ngibbs.step = 2000,
adapt = 1000,
do.lsq = FALSE)
samples <- invert.auto(observed = obs,
invert.options = invert.options,
parallel = TRUE,
save.samples = file.path(main_out, 'test_samples_ongoing.rds'))
saveRDS(samples, file = file.path(main_out, 'PDA_samples_output.rds')
samples.bt <- PEcAn.assim.batch::autoburnin(samples$samples)
samples.bt <- PEcAn.assim.batch::makeMCMCList(samples.bt)
par(mfrow=c(1,1), mar=c(2,2,0.3,0.4), oma=c(0.1,0.1,0.1,0.1)) # B, L, T, R
png(paste0(main_out,"/",paste("trace", runtag, "png", sep = ".")), width = 1500, height = 1600, res=150)
plot(samples.bt)
dev.off()
rawsamps <- do.call(rbind, samples.bt)
par(mfrow=c(1,1), mar=c(2,2,0.3,0.4), oma=c(0.1,0.1,0.1,0.1)) # B, L, T, R
png(paste0(main_out,"/",paste("pairs", runtag, "png", sep = ".")), width = 1500, height = 1600, res=150)
pairs(rawsamps)
dev.off()
par(mfrow=c(1,1), mar=c(2,2,0.3,0.4), oma=c(0.1,0.1,0.1,0.1)) # B, L, T, R
png(paste0(main_out,"/",paste("deviance", runtag, "png", sep = ".")), width = 1500, height = 1600, res=150)
plot(PEcAn.assim.batch::makeMCMCList(input.pda.data$deviance))
dev.off()
par(mfrow=c(1,1), mar=c(2,2,0.3,0.4), oma=c(0.1,0.1,0.1,0.1)) # B, L, T, R
png(paste0(main_out,"/",paste("n_eff", runtag, "png", sep = ".")), width = 1500, height = 1600, res=150)
plot(PEcAn.assim.batch::makeMCMCList(input.pda.data$n_eff_list))
dev.off()
|
b327594281849146e551e25b8eb18f4fe44f5d4e
|
cdccaa7a602c7d61108abdbb27833d33376e6fca
|
/R 프로그래밍 기초/R Basic(3) - String Function.R
|
40204e44108e388a388e57b51c8d8af9248c9c54
|
[] |
no_license
|
JEONSUN/Jeon-S_R
|
0509766421d1cc747754d7bf9315964a01978b5a
|
8053ce8a29b4abe78827cbea587f13d09a25613e
|
refs/heads/master
| 2020-08-10T18:58:31.910324
| 2020-06-28T17:22:19
| 2020-06-28T17:22:19
| 214,401,217
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,796
|
r
|
R Basic(3) - String Function.R
|
# 문자열을 위한 함수
# 함수 nchar() : 문자열을 구성하고 있는 문자 개수
x <- c('park','lee','kwon')
nchar(x)
# 한글도 글자수를 셈
nchar('응용통계학과')
#########################
# 함수 paste(): 문자열의 결합
#옵션 sep의 활용
paste('모든','사람에게는','통계적','사고능력이','필요하다')
paste('모든','사람에게는','통계적','사고능력이','필요하다',
sep = '-')
paste('모든','사람에게는','통계적','사고능력이','필요하다',
sep = '')
#입력된 숫자는 문자로 전환되어 문자열과 결합
paste('원주율은', pi,'이다')
#문자형 벡터가 입력되면 대응되는 요소끼리 결합
#벡터의 길이가 서로 다르면 순환법칙 적용
paste('stat',1:3,sep = '') # stat이 3번 반복
paste(c('stat','math'),1:3, sep = '-') # 길이가 짧은 벡터를 순환법칙으로 맞춤
##############################
# 빈칸 없이 문자열 결합
# 1. 함수 paste()에 옵션 sep = ''사용
# 2. 함수 paste0()사용
paste0('stat',1:3) # sep보다 paste0가 훨씬 편하다
# 문자형 벡터의 문자열을 하나로 결합 : 옵션 collapse
paste0(letters, collapse = '') # 문자형 벡터를 하나로 합칠때
paste0(LETTERS, collapse = ",") # 구분자를 ,로 줌
#############################################
# substr(): 주어진 문자열의 일부분 선택
# substy(x, start, stop)
# -start, stop : 정수형 스칼라 또는 벡터(대응되는 숫자끼리 시작점과 끝점 구성)
substr('statistics',1, 4) #시작점 1, 끝점 4
x <- c('응용통계학과','정보통계학과','학생회장')
substr(x,3,6) #시작점 3번째~6번글자 출력
substr(x,c(1,3),c(2,6)) # 시작점 1,끝점 2
# 시작점 3,끝점 2 다시 1,2 반복
# 예제 : 문자형 벡터 x에는 미국의 세 도시와 그 도시가 속한 주 이름이 입력
x <- c("New York,NY","Ann Arbor, MI","Chicago, IL")
# 세 도시가 속한 주 이름만을 선택하여 출력
substr(x, nchar(x)-1,nchar(x))
######################################3
# 함수 strsplit() : 문자열의 분리
# 옵션 split에 지정된 기준으로 분리. 결과는 리스트
# 세 도시의 이름과 주 이름 분리
y <- strsplit(x, split = ",")
y
# unlist를 사용해 리스트 y를 벡터로 변환
unlist(y)
# 문자열을 구성하는 모든 문자의 분리
unlist(strsplit('PARK',split = ""))
# 점(.)을 기준으로 문자열 분리하는 경우
# 옵션 split = "." 원하는 결과를 얻을 수 없음.
unlist(strsplit("a.b.c",split = "."))
# 옵션 split = "[.]" 또는 split = "||."
unlist(strsplit("a.b.c",split = "[.]"))
# 옵션 split에는 정규표현식이 사용되며
# 정규표현식에서 점(.)은 다른의미가 있다
#################################################
# 함수 toupper(), tolower() : 대(소)문자로 수정
x <-c('park','lee','kwon')
(y <- toupper(x))
tolower(y)
# 벡터 x의 첫 글자만 대문자로 변환
substr(x,1,1) <- toupper(substr(x,1,1))
x
#############################################
# 함수 sub(), gsub() : 문자열의 치환
# sub(old,new,문자열) : 문자열의 첫 번째 old만 new로 치환
# gsub(old,new,문자열) : 문자열의 모든 old가 new로 치환
x <- "Park hates stats. He hates math, too."
sub("hat", "lov", x) # 첫번째 hat만 바뀜
gsub("hat", "lov", x) # 전부 바뀜
# 예제
# 문자열 "banana1","banana2","banana3" 생성
x <- c("banana1","banana2","banana3") # 반복이므로
x <- paste0('banana',1:3) # paste0로 사용하는게 더 간단함
x
# 첫 번째 a를 A로 변경
sub('a','A',x)
# 모든 a를 A로 변경
gsub('a','A',x)
# 문자열의 일부 삭제는 new에 ""입력
z <- "Everybody cannot do it"
sub("not","",z)
|
0c85a1a14f7124da617310c9acecd9e3bb7a2f9b
|
b9ee91175c4115fea52323a8e4019be9d44bd2e5
|
/ISLR/random_forest_class.R
|
171550d1d19af5626a2719b9b907b439d7a0ff1d
|
[] |
no_license
|
creyesp/r-course
|
c50ec510262c4e7e6c55a0b12395c22df67cf923
|
f8bc8b99a466baf733080b16ee5cac20ff118c6d
|
refs/heads/master
| 2023-04-10T15:54:10.910664
| 2020-12-09T00:16:26
| 2020-12-09T00:16:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,835
|
r
|
random_forest_class.R
|
# https://cran.r-project.org/web/packages/randomForest/randomForest.pdf
#
# install.packages('randomForest')
# install.packages("caret")
# install.packages("e1071")
# install.packages("doParallel")
library(caret)
library(e1071)
library(doParallel)
library(dplyr)
library(randomForest)
# library(doMC)
# registerDoMC(cores=2)
set.seed(1234)
cl <- makeCluster(detectCores())
registerDoParallel(cl)
data_train <- read.csv("https://raw.githubusercontent.com/guru99-edu/R-Programming/master/train.csv") %>% na.omit()
data_test <- read.csv("https://raw.githubusercontent.com/guru99-edu/R-Programming/master/test.csv") %>% na.omit()
data_train <- data_train %>% mutate(Survived = factor(Survived))
data_test <- data_test %>% mutate(Survived = factor(Survived))
glimpse(data_train)
glimpse(data_test)
##### Caret
# Default setting
# K-fold cross validation is controlled by the trainControl() function
#
# trainControl(method = "cv", number = n, search ="grid")
# arguments
# - method = "cv": The method used to resample the dataset.
# - number = n: Number of folders to create
# - search = "grid": Use the search grid method. For randomized method, use "grid"
# Note: You can refer to the vignette to see the other arguments of the function.
# Define the control
trControl <- trainControl(method = "cv",
number = 10,
search = "grid",
allowParallel=TRUE
)
## Caret
# train(formula, df, method = "rf", metric= "Accuracy", trControl = trainControl(), tuneGrid = NULL)
# argument
# - `formula`: Define the formula of the algorithm
# - `method`: Define which model to train. Note, at the end of the tutorial, there is a list of all the models that can be trained
# - `metric` = "Accuracy": Define how to select the optimal model
# - `trControl = trainControl()`: Define the control parameters
# - `tuneGrid = NULL`: Return a data frame with all the possible combination
# Run the model
# rf_default <- train(Survived~.,
# data = data_train,
# method = "rf",
# metric = "Accuracy",
# trControl = trControl)
# # Print the results
# print(rf_default)
##
# Search best mtry
# You can test the model with values of mtry from 1 to 10
tuneGrid <- expand.grid(.mtry = c(1:5))
rf_mtry <- train(Survived~.,
data = data_train,
method = "rf",
metric = "Accuracy",
tuneGrid = tuneGrid,
trControl = trControl,
importance = TRUE,
nodesize = 14,
ntree = 300)
print(rf_mtry)
prediction <-predict(fit_rf, data_test)
confusionMatrix(prediction, data_test$survived)
varImpPlot(fit_rf)
stopCluster(cl)
# names>(getModelInfo())
|
65ae08d8bac45fdd41f7523d0112c5cedcd2f12f
|
584c1168097d5939f5c2f9cefbd1aab3767b5b7b
|
/AleatoriosShiny.R
|
79b88a88495617ac70f549f8305617b9c57b4fd0
|
[] |
no_license
|
erikonchis/AleatoriosShiny
|
7b9f99c8087805a214ea12c9b98da7b2216aea69
|
4689a9734e699bd2973b6b69640d2edda56475c6
|
refs/heads/master
| 2016-08-11T07:11:13.155904
| 2016-02-26T18:21:46
| 2016-02-26T18:21:46
| 51,943,718
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 342
|
r
|
AleatoriosShiny.R
|
library (shiny)
ui <- fluidPage(
titlePanel("Genera y grafica números aleatorios"),
actionButton(inputId = "clicks", label="Genera datos"),
plotOutput("grafica")
)
server <- function(input, output){
datos <- eventReactive(input$clicks, {rnorm(100)})
output$grafica <- renderPlot({plot(datos())})
}
shinyApp(ui=ui, server=server)
|
d9a6a6e487e04e7562f75cc807c4f327f89b1051
|
2527964c2e0e5391667b665e2f291e2d179ead94
|
/R/CompressedDataFrameList-class.R
|
c330f0b1f55734d349e598bdc473ec5cb17e2ad2
|
[] |
no_license
|
Bioconductor/IRanges
|
c5d2e040c59b6b35ca5355e31a300327f057acba
|
2a45618f740681086b8ff9d5064cbafd5bfd2113
|
refs/heads/devel
| 2023-07-10T00:02:55.299599
| 2023-06-22T02:44:53
| 2023-06-22T02:44:53
| 101,238,649
| 18
| 19
| null | 2023-06-13T19:15:31
| 2017-08-24T01:02:36
|
R
|
UTF-8
|
R
| false
| false
| 5,550
|
r
|
CompressedDataFrameList-class.R
|
### =========================================================================
### CompressedDataFrameList objects
### -------------------------------------------------------------------------
setClass("CompressedDataFrameList",
contains=c("DataFrameList", "CompressedList"),
representation("VIRTUAL", unlistData="DataFrame"),
prototype(unlistData=new("DFrame"))
)
setClass("CompressedDFrameList",
contains=c("DFrameList", "CompressedDataFrameList"),
representation(unlistData="DFrame")
)
setClass("CompressedSplitDataFrameList",
contains=c("SplitDataFrameList", "CompressedDataFrameList"),
representation("VIRTUAL")
)
setClass("CompressedSplitDFrameList",
contains=c("SplitDFrameList", "CompressedDFrameList",
"CompressedSplitDataFrameList")
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Accessor methods.
###
### Deprecated.
### IMPORTANT NOTE: We won't be able to go thru the Defunct cycle because
### a lot of code around assumes that ncol() can be called on an arbitrary
### object!
setMethod("ncol", "CompressedSplitDataFrameList",
function(x)
{
msg <- c("The ncol() method for CompressedSplitDataFrameList ",
"objects is deprecated. Please use ncols() on these ",
"objects instead.")
.Deprecated(msg=wmsg(msg))
if (length(x) == 0L)
0L
else
structure(rep.int(ncol(x@unlistData), length(x)),
names = names(x))
})
setMethod("ncols", "CompressedSplitDataFrameList",
function(x, use.names=TRUE)
{
if (!isTRUEorFALSE(use.names))
stop(wmsg("'use.names' must be TRUE or FALSE"))
ans_names <- if (use.names) names(x) else NULL
structure(rep.int(ncol(x@unlistData), length(x)), names=ans_names)
}
)
setMethod("colnames", "CompressedSplitDataFrameList",
function(x, do.NULL = TRUE, prefix = "col")
{
if (length(x)) {
nms <- colnames(x@unlistData, do.NULL = do.NULL, prefix = prefix)
rep(CharacterList(nms), length(x))
} else NULL
})
setReplaceMethod("rownames", "CompressedSplitDataFrameList",
function(x, value)
{
if (is.null(value)) {
rownames(x@unlistData) <- NULL
} else if (is(value, "CharacterList")){
if (length(x) != length(value))
stop("replacement value must be the same length as x")
rownames(x@unlistData) <- unlist(value, use.names=FALSE)
} else {
stop("replacement value must either be NULL or a CharacterList")
}
x
})
setReplaceMethod("colnames", "CompressedSplitDataFrameList",
function(x, value)
{
if (is.null(value)) {
colnames(x@unlistData) <- NULL
} else if (is.character(value)) {
colnames(x@unlistData) <- value
} else if (is(value, "CharacterList")){
if (length(x) != length(value))
stop("replacement value must be the same length as x")
if (length(x) > 0)
colnames(x@unlistData) <- unlist(value[[1L]])
} else {
stop("replacement value must either be NULL or a CharacterList")
}
x
})
setMethod("commonColnames", "CompressedSplitDataFrameList",
function(x) colnames(unlist(x, use.names=FALSE)))
setMethod("columnMetadata", "CompressedSplitDataFrameList", function(x) {
mcols(x@unlistData, use.names=FALSE)
})
setReplaceMethod("columnMetadata", "CompressedSplitDataFrameList",
function(x, value) {
mcols(x@unlistData) <- value
x
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Subsetting.
###
setMethod("[", "CompressedSplitDataFrameList",
function(x, i, j, ..., drop=TRUE)
{
if (!missing(j))
x@unlistData <- x@unlistData[, j, drop=FALSE]
if (!missing(i))
x <- callNextMethod(x, i)
if (((nargs() - !missing(drop)) > 2) &&
(ncol(x@unlistData) == 1) && (missing(drop) || drop)) {
x <- relist(x@unlistData[[1L]], x)
}
x
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Coercion
###
setAs("ANY", "CompressedDataFrameList",
function(from) as(from, "CompressedDFrameList")
)
setAs("ANY", "CompressedSplitDataFrameList",
function(from) as(from, "CompressedSplitDFrameList")
)
setListCoercions("DFrame")
setAs("ANY", "CompressedSplitDFrameList",
function(from) {
coerceToCompressedList(from, "DFrame")
})
setAs("ANY", "SplitDFrameList",
function(from) as(from, "CompressedSplitDFrameList"))
setAs("DataFrame", "SplitDFrameList",
function(from) as(from, "CompressedSplitDFrameList"))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Display
###
setMethod("classNameForDisplay", "CompressedDFrameList",
function(x) sub("^Compressed", "", sub("DFrame", "DataFrame", class(x)))
)
|
3235190c0d45566aab159aab975703816ba2d0fb
|
4b08dfacf916de2bf5ef0ce4a0d2040b70946794
|
/R/desolve.R
|
a4d83f39eb1aa059a3ab16f31b456b6530b1a652
|
[] |
no_license
|
richfitz/diversitree
|
7426e13415829e3fc6a37265926a36461d458cc6
|
8869a002f8c978883f5027254f6fbc00ccfa8443
|
refs/heads/master
| 2023-08-08T23:13:59.081736
| 2023-05-03T14:43:17
| 2023-05-03T14:43:17
| 3,779,673
| 16
| 13
| null | 2023-08-24T14:46:37
| 2012-03-20T20:29:11
|
R
|
UTF-8
|
R
| false
| false
| 726
|
r
|
desolve.R
|
lsoda.trim <- function(...) {
ret <- t(lsoda(...)[-1,-1,drop=FALSE])
dimnames(ret) <- NULL
ret
}
## This sets things up the way that deSolve likes them
derivs.for.deSolve <- function(f)
function(...) list(f(...))
make.ode.deSolve <- function(info, control) {
if ( !is.function(info$derivs) )
stop("info$derivs must be a function")
derivs <- derivs.for.deSolve(info$derivs)
rtol <- atol <- control$tol
if ( isTRUE(info$time.varying) ) {
tm <- info$tm
function(vars, times, pars) {
tm$set(pars)
lsoda.trim(vars, times, derivs, pars, rtol=rtol, atol=atol)
}
} else {
function(vars, times, pars) {
lsoda.trim(vars, times, derivs, pars, rtol=rtol, atol=atol)
}
}
}
|
d389642d6a247046b93f3932bdbae5d2f72555fb
|
49ff0bc7c07087584b907d08e68d398e7293d910
|
/mbg/mbg_core_code/mbg_central/LBDCore/tests/testthat.R
|
042278b66566b1d93f7f085faa18deff4f23d617
|
[] |
no_license
|
The-Oxford-GBD-group/typhi_paratyphi_modelling_code
|
db7963836c9ce9cec3ca8da3a4645c4203bf1352
|
4219ee6b1fb122c9706078e03dd1831f24bdaa04
|
refs/heads/master
| 2023-07-30T07:05:28.802523
| 2021-09-27T12:11:17
| 2021-09-27T12:11:17
| 297,317,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 91
|
r
|
testthat.R
|
library(testthat)
library(LBDCore)
singularity_version <- "default"
test_check("LBDCore")
|
01ed5600bdb454a97c4bdc647a2989f13295804e
|
f94c4b201a378fa7a14e9f88d3004b755b022ead
|
/ExtraPackages/linus/stock.Analyze/man/get.script.name.Rd
|
6bf599de39c83bc90ffc3d3a67bb99b93f7ab50a
|
[] |
no_license
|
linushsao/Analysis.of.trading.strategies
|
5b9b58f90582ad98eedf0946d18be9d66bcc069b
|
6af5730b7c9cc5d10a1002b9709fba855b095d2a
|
refs/heads/master
| 2021-11-10T19:38:02.399402
| 2021-11-07T09:16:18
| 2021-11-07T09:16:18
| 251,311,218
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 371
|
rd
|
get.script.name.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/m_get.script.name.R
\name{get.script.name}
\alias{get.script.name}
\title{A get.script.name Function}
\usage{
get.script.name()
}
\arguments{
\item{x}{A numeric vector.}
}
\description{
This function allows you to multi paste vector.
}
\examples{
median_function(seq(1:10))
}
\keyword{median}
|
987a0955ac87b4c608840dce3f6e4b1c6c3c1d81
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/SixSigma/examples/ss.data.pb1.Rd.R
|
2ce29b542c4b71544360a42db61856f0f998258d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 376
|
r
|
ss.data.pb1.Rd.R
|
library(SixSigma)
### Name: ss.data.pb1
### Title: Particle Boards Example - Individual Data
### Aliases: ss.data.pb1
### Keywords: cc data
### ** Examples
data(ss.data.pb1)
summary(ss.data.pb1)
library(qcc)
pb.groups.one <- with(ss.data.pb1, qcc.groups(pb.humidity, pb.group))
pb.xbar.one <- qcc(pb.groups.one, type="xbar.one")
summary(pb.xbar.one)
plot(pb.xbar.one)
|
75511777f011a87873a3fd330c54d6d586b5a6ab
|
b3a1025e9ec447064e0bec82630e261acb5a2949
|
/src/plot_figures.r
|
488d72f3e3feabedbe718e9016a8b455a2a78656
|
[] |
no_license
|
schlogl2017/rates_measurement
|
0f80ad0df55be53358dacaaa9bd5b64b70f32c15
|
c05cc3a4a7e31c03bfe77c69e50a2ffbb6a545b4
|
refs/heads/master
| 2022-11-20T07:05:26.628270
| 2018-09-11T19:59:02
| 2018-09-11T19:59:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,549
|
r
|
plot_figures.r
|
library(tidyr)
library(ggplot2)
library(dplyr)
library(cowplot)
library(readr)
####Figure: Analytically derived rate and inferred rate when true model is MutSel and inference model is JC#####
r_an <- read_csv("../analytical_rates/ten_sites_aa.csv")
r_inf<- read_csv("../inferred_rates/processed_rates/rates_ten_sites_aa.csv")
#normalize inferred rates
r_inf %>% group_by(time,rep) %>% mutate(rate_norm = rate / mean(rate)) -> r_norm_inf
#sites to plot
sites_to_plot <- c(1,2,4,5)
plot_lst <- list()
for (i in sites_to_plot){
r_an_filtered <- filter(r_an,site==i+1)
r_inf_filtered <- filter(r_norm_inf,site==i)
p_rates <- ggplot(r_an_filtered,aes(x=time)) +
background_grid("xy")+
geom_line(aes(y=r_tilde),color="black",size=1.2) +
geom_line(aes(y=r_tilde_small_t), color="royalblue1",size=1.2) +
geom_line(aes(y=r_tilde_large_t), color="green3",size=1.2) +
stat_summary(data= r_inf_filtered,
inherit.aes=FALSE,
aes(x=time,y=rate_norm),
color="orangered",
fun.y = mean,
fun.ymin = function(x) mean(x) - sd(x)/sqrt(length(x)),
fun.ymax = function(x) mean(x) + sd(x)/sqrt(length(x)),
geom = "pointrange",
size=0.5)+
xlab("Time") +
ylab("Relative rate") +
coord_cartesian(ylim=c(0,2.1),xlim=c(0,0.82))+
scale_y_continuous(breaks=seq(0,2.5,0.5),expand = c(0.01, 0),label=c("0","0.5","1.0","1.5","2.0","2.5")) +
scale_x_continuous(breaks=seq(0,0.8,0.2),expand = c(0.01, 0),label=c("0","0.2","0.4","0.6","0.8")) +
geom_hline(yintercept=1)+
theme(axis.title = element_text(size = 22),
axis.text = element_text(size = 20),
legend.position="none")
if (i==2 | i==4 | i==5) p_rates <- p_rates+theme(axis.title.y = element_blank())
plot_lst[[length(plot_lst)+1]] <- p_rates
}
prow <- plot_grid(plotlist=plot_lst,
labels="AUTO",
label_size = 20,
align = 'vh',
hjust = -1,
ncol=4,
nrow=1)
save_plot("../plots/rates_true_MutSel_inf_JC.png", prow,
ncol = 4, # we're saving a grid plot of 2 columns
nrow = 1, # and 2 rows
# each individual subplot should have an aspect ratio of 1.3
base_aspect_ratio = 1.3)
####Figure: Accuracy of the inferred rate#####
r_inf <- read.csv( "../inferred_rates/processed_rates/rates_site_dupl.csv")
r_an <- read_csv("../analytical_rates/all_sites_aa.csv")
true_r <- r_an %>%
filter(site==4,time==0.480002)
r <- r_inf %>%
group_by(site_dupl, rep) %>%
mutate(rate_norm=rate/mean(rate)) %>%
filter(site==3)
p <- ggplot(r,aes(site_dupl,rate_norm)) +
geom_hline(aes(yintercept=true_r$r_tilde),color="orangered",size=0.5)+
geom_point(size=0.9, alpha=0.8, position = position_jitter(width = 0.15, height = 0.01)) +
ylab("Relative rate") +
xlab("Site duplicates") +
coord_cartesian(ylim=c(0.001,1000),xlim=c(8.5,110000))+
scale_y_log10(breaks=c(0.001,0.01,0.1,1,10,100,1000),label=c("0.001","0.01","0.1","1","10","100","1,000")) +
scale_x_log10(breaks=c(10,100,1000,10000,100000),label=c("10","100","1,000","10,000","100,000"))+
theme(axis.title = element_text(size = 10),
axis.text = element_text(size = 10))
save_plot("../plots/inf_rate_accuracy_v_site_dupl.png",plot=p)
####Figure: Analytically derived rate and the true rate when true model and inference model is JC#####
r_an <- read_csv("../analytical_rates/ten_sites_aa_true_JC.csv")
r_inf <- read_csv("../inferred_rates/processed_rates/rates_ten_sites_JC.csv")
r_inf %>% group_by(time,rep) %>% mutate(rate_norm = rate / mean(rate)) -> r_norm_inf
sites_to_plot <- c(1:6)
plot_lst <- list()
for (i in sites_to_plot){
r_an_filtered <- filter(r_an,site==i)
r_inf_filtered <- filter(r_norm_inf,site==i+1)
p_rates <- ggplot(r_an_filtered) +
background_grid("xy")+
geom_line(aes(time,true_r),color="black",size=1.2) +
stat_summary(data= r_inf_filtered,
inherit.aes=FALSE,
aes(x=time,y=rate_norm),
color="orangered",
fun.y = mean,
fun.ymin = function(x) mean(x) - sd(x)/sqrt(length(x)),
fun.ymax = function(x) mean(x) + sd(x)/sqrt(length(x)),
geom = "pointrange",
size=0.5)+
xlab("Time") +
ylab("Relative rate") +
coord_cartesian(ylim=c(0,2.5),xlim=c(0,1))+
scale_y_continuous(breaks=seq(0,2.5,0.5),label=c("0","0.5","1.0","1.5","2.0","2.5")) +
scale_x_continuous(breaks=seq(0,1,0.2),expand = c(0.01, 0),label=c("0","0.2","0.4","0.6","0.8","1.0")) +
geom_hline(yintercept=1)+
theme(axis.title = element_text(size = 18),
axis.text = element_text(size = 16),
legend.position="none")
if (i==2 | i==3 | i==5 | i==6) p_rates <- p_rates+theme(axis.title.y = element_blank())
plot_lst[[length(plot_lst)+1]] <- p_rates
}
prow <- plot_grid(plotlist=plot_lst,
labels="AUTO",
align = 'vh',
hjust = -1,
ncol=3,
nrow=2)
save_plot("../plots/rates_true_JC_inf_JC.png", prow,
ncol = 3, # we're saving a grid plot of 2 columns
nrow = 2, # and 2 rows
# each individual subplot should have an aspect ratio of 1.3
base_aspect_ratio = 1.3)
####Figure: a comparison of rates inferred with Jukes-Cantor-like matrix with observed frequencies and equilibrium frequencies #####
r_an <- read_csv("../analytical_rates/all_sites_aa.csv")
r_inf <- read_csv("../inferred_rates/processed_rates/rates_all_sites.csv")
#get the mean inferred rate at each time point
r_an_temp <- r_an %>%
group_by(site) %>%
summarise(r_tilde_small_t=r_tilde_small_t[1])
r_an_temp$site <- 1:length(r_an_temp$site)
d_label <- data.frame(time=c(0.0009,0.009,0.09,0.9),
time_label=c('0.0009','0.009','0.09','0.9'))
r <- r_inf %>%
group_by(time,model,num_taxa) %>%
mutate(inf_rate_mean=mean(rate),inf_rate_norm=rate/inf_rate_mean) %>%
filter(num_taxa==512, model=="JC" | model=="JC_equalf") %>%
group_by(site,model,time) %>%
summarise(inf_rate_norm_mean=mean(inf_rate_norm)) %>%
left_join(r_an_temp,by='site') %>%
left_join(d_label)
r_JC <- r %>% ungroup() %>%
filter(model=="JC") %>%
mutate(rate_JC=inf_rate_norm_mean) %>%
select(rate_JC,site,time)
r_JC_equalf <- r %>% ungroup() %>%
filter(model=="JC_equalf") %>%
mutate(rate_JC_equalf=inf_rate_norm_mean) %>%
select(rate_JC_equalf,site,time,time_label)
r_combined <- r_JC %>% left_join(r_JC_equalf,by=c("site","time"))
p <- ggplot(r_combined,aes(rate_JC_equalf,rate_JC)) +
geom_point(size=1.2,alpha=0.8) +
geom_abline(color="red")+
xlab("Inferred rate (equal frequencies)") +
ylab("Inferred rate (observed frequencies)") +
facet_wrap( ~ time_label) +
coord_cartesian(ylim=c(0,2.1), xlim=c(0,2.1))+
scale_y_continuous(breaks=seq(0,2.2,1)) +
scale_x_continuous(breaks=seq(0,2.2,1)) +
theme(axis.title = element_text(size = 12),
axis.text = element_text(size = 12))+
panel_border()
save_plot("../plots/rates_inf_diff_JC.png", p,
ncol = 1, # we're saving a grid plot of 2 columns
nrow = 1, # and 2 rows
# each individual subplot should have an aspect ratio of 1.3
base_aspect_ratio = 1.3)
####Figure: a comparison of rates inferred with JC, JTT, WAG, LG#####
r_an <- read_csv("../analytical_rates/all_sites_aa.csv")
r_inf <- read_csv("../inferred_rates/processed_rates/rates_all_sites.csv")
#get the mean inferred rate at each time point
r_an_temp <- r_an %>%
group_by(site) %>%
summarise(r_tilde_small_t=r_tilde_small_t[1])
r_an_temp$site <- 1:length(r_an_temp$site)
d_label <- data.frame(time=c(0.0009,0.009,0.09,0.9),
time_label=c('0.0009','0.009','0.09','0.9'))
model_label <- data.frame(model = c("JC_equalf", "LG", "JTT", "WAG"),
model_label = c("JC", "LG", "JTT", "WAG"))
r <- r_inf %>%
group_by(time,model,num_taxa) %>%
mutate(inf_rate_mean=mean(rate),inf_rate_norm=rate/inf_rate_mean) %>%
filter(num_taxa==512, model!="JC") %>%
group_by(site,model,time) %>%
summarise(inf_rate_norm_mean=mean(inf_rate_norm)) %>%
left_join(r_an_temp,by='site') %>%
left_join(d_label) %>%
left_join(model_label)
p <- ggplot(r,aes(r_tilde_small_t,inf_rate_norm_mean)) +
#background_grid("xy")+
geom_point(size=1.2,alpha=0.8) +
geom_abline(color="red")+
ylab("Inferred rate") +
xlab("Analytically derived rate") +
#xlab(expression(paste("Analytically Derived Rates (", hat(r)^(k), "for small t)"))) +
facet_grid(model_label ~ time_label) +
coord_cartesian(ylim=c(0,3), xlim=c(0,3))+
scale_y_continuous(breaks=seq(0,3,1)) +
scale_x_continuous(breaks=seq(0,3,1)) +
theme(axis.title = element_text(size = 16),
axis.text = element_text(size = 14))+
panel_border()
save_plot("../plots/inf_v_an_rates_all_matrices.png", p,
ncol = 1, # we're saving a grid plot of 2 columns
nrow = 1, # and 2 rows
# each individual subplot should have an aspect ratio of 1.3
base_height=7,
base_width=7)
####Figure: Analytically derived rate and inferred rate when true model is MutSel and inference model is JC#####
r_an <- read_csv("../analytical_rates/ten_sites_codon.csv")
r_inf<- read_csv("../inferred_rates/processed_rates/rates_translated.csv")
#normalize inferred rates
r_inf %>% group_by(time,rep) %>% mutate(rate_norm = rate / mean(rate)) -> r_norm_inf
#sites to plot
sites_to_plot <- c(1,2,4,5,7,9)
plot_lst <- list()
for (i in sites_to_plot){
r_an_filtered <- filter(r_an,site==i+1)
r_inf_filtered <- filter(r_norm_inf,site==i)
p_rates <- ggplot(r_an_filtered,aes(x=time*0.77)) +
background_grid("xy")+
geom_line(aes(y=r_tilde),color="black",size=1.2) +
geom_line(aes(y=r_tilde_small_t), color="royalblue1",size=1.2) +
geom_line(aes(y=r_tilde_large_t), color="green3",size=1.2) +
stat_summary(data= r_inf_filtered,
inherit.aes=FALSE,
aes(x=time*0.77,y=rate_norm),
color="orangered",
fun.y = mean,
fun.ymin = function(x) mean(x) - sd(x)/sqrt(length(x)),
fun.ymax = function(x) mean(x) + sd(x)/sqrt(length(x)),
geom = "pointrange",
size=0.5)+
xlab("Time") +
ylab("Relative rate") +
coord_cartesian(ylim=c(0,2),xlim=c(0,1))+
scale_y_continuous(breaks=seq(0,2,0.5),label=c("0","0.5","1.0","1.5","2.0")) +
scale_x_continuous(breaks=seq(0,1,0.2),expand = c(0.01, 0),label=c("0","0.2","0.4","0.6","0.8","1.0")) +
geom_hline(yintercept=1)+
theme(axis.title = element_text(size = 18),
axis.text = element_text(size = 16),
legend.position="none")
if (i==2 | i==4 | i==7 | i==9) p_rates <- p_rates+theme(axis.title.y = element_blank())
plot_lst[[length(plot_lst)+1]] <- p_rates
}
prow <- plot_grid(plotlist=plot_lst,
labels="AUTO",
align = 'vh',
hjust = -1,
ncol=3,
nrow=2)
save_plot("../plots/rates_true_codon_MutSel_inf_JC.png", prow,
ncol = 3, # we're saving a grid plot of 2 columns
nrow = 2, # and 2 rows
# each individual subplot should have an aspect ratio of 1.3
base_aspect_ratio = 1.3)
|
9a8bd3062355f142cc34588d4c7c16411b4dd11c
|
0b1e134538b514ba9c0aed8f4f4dc70d719a847b
|
/man/print-methods.Rd
|
9d3ea54df6b4c136b2a7f67dbf19fcce7a8bddf3
|
[] |
no_license
|
cran/ppmlasso
|
e6ed9e8b6b6190b2bf7ddc38108213aa6f667495
|
1a0ebbb5ea95088c6d639e6f034c452786dd9445
|
refs/heads/master
| 2022-12-18T09:59:20.444663
| 2022-12-01T11:50:02
| 2022-12-01T11:50:02
| 17,698,696
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 429
|
rd
|
print-methods.Rd
|
\name{print-methods}
\docType{methods}
\alias{print-methods}
\alias{print,ppmlasso-method}
\title{Methods for function \code{print}}
\description{
Methods for function \code{\link{print}}
}
\section{Methods}{
\describe{
\item{\code{signature(x = "ppmlasso")}}{
Prints output for a \code{ppmlasso} object with details controlled by arguments of the
\code{\link{print.ppmlasso}} function.
}
}}
\keyword{methods}
|
57f65942ad9f35bcfa7ca978826bb61b24a7d6b1
|
cedee53b1bdaf0d70fc83cf6d13ea2aa0897f295
|
/RandVarGen/server.R
|
fe132b86c858fc3a3bfe8e4af35312aea6628230
|
[
"MIT"
] |
permissive
|
awstown/shiny1
|
b12a892278b4980ceca293dc408f61642d22de75
|
62e2f54120aa4a3b75fc16da09999cec3c361947
|
refs/heads/master
| 2021-09-22T16:06:45.337636
| 2018-09-11T19:25:11
| 2018-09-11T19:25:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,399
|
r
|
server.R
|
library(shiny)
source("rvg.R")
shinyServer(function(input, output) {
#######################################
# Probability Integral Transform
#
# Exponential Example
exp.all.out<-reactiveValues()
observe({
if(input$go.exp==0){exp.all.out$history<-init.all()}
else{
exp.all.out$history<-add.exp(isolate(exp.all.out$history),isolate(input$lambda),isolate(input$num.exp))
}
})
output$PITexpPlot <- renderPlot({
input$go.exp
input$clear.exp
input$lambda
par(mfrow=c(1,2),oma=c(0,0,0,0),mar=c(5.1,2.1,1,1.1))
isolate(plot.unif(exp.all.out$history))
isolate(plot.exp(input$lambda,exp.all.out$history))
})
observe({
input$pitEx
input$lambda
input$clear.exp
exp.all.out$history<-init.all()
})
output$totalcountExp<-renderUI({
input$go.exp
last<-length(exp.all.out$history$X)
if(last>0){
isolate(paste("Total number of replicates: ",last))
}
})
output$summaryExp <- renderUI({
input$go.exp
last<-length(exp.all.out$history$U)
if(last>0){
strexp1<-paste("The most recent value of U is:",
round(exp.all.out$history$U[last],3))
strexp2<-"This gives the following for x:"
HTML(paste(strexp1,strexp2,sep='<br/>'))
}})
output$invExp<-renderUI({
input$go.exp
last<-length(exp.all.out$history$X)
u<-exp.all.out$history$U[last]
x<-exp.all.out$history$X[last]
lambda<-input$lambda
if(last>0){
withMathJax(sprintf("$$x= \\frac{-ln(1-u)}{\\lambda} = \\frac{-ln(1-%0.3f)}{%0.1f} = %0.3f$$", u,lambda,x))
}
})
# Linear example
linear.all.out<-reactiveValues()
observe({
if(input$go.linear==0){linear.all.out$history<-init.all()}
else{
linear.all.out$history<-add.linear(isolate(linear.all.out$history),isolate(input$num.linear))
}
})
output$PITlinearPlot <- renderPlot({
input$go.linear
input$clear.linear
par(mfrow=c(1,2),oma=c(0,0,0,0),mar=c(5.1,2.1,1,1.1))
isolate(plot.unif(linear.all.out$history))
isolate(plot.linear(linear.all.out$history))
})
observe({
input$pitEx
input$clear.linear
linear.all.out$history<-init.all()
})
output$totalcountLin<-renderUI({
input$go.linear
last<-length(linear.all.out$history$X)
if(last>0){
isolate(paste("Total number of replicates: ",last))
}
})
output$summaryLin <- renderUI({
input$go.linear
last<-length(linear.all.out$history$U)
if(last>0){
strexp1<-paste("The most recent value of U is:",
round(linear.all.out$history$U[last],3))
strexp2<-"This gives the following for x:"
HTML(paste(strexp1,strexp2,sep='<br/>'))
}})
output$invLin<-renderUI({
input$go.linear
last<-length(linear.all.out$history$X)
u<-linear.all.out$history$U[last]
x<-linear.all.out$history$X[last]
if(last>0){
withMathJax(sprintf("$$x= 4\\sqrt{u} = 4\\sqrt{%0.3f} = %0.3f$$", u,x))
}
})
##########################################
# Accept-Reject
#
# Beta example
beta.all.out<-reactiveValues()
observe({
if(input$go==0){beta.all.out$history<-temp.start(isolate(input$alpha),isolate(input$beta))}
else{
beta.all.out$history<-temp.start2(isolate(beta.all.out$history),isolate(input$alpha),isolate(input$beta),isolate(input$num))
}
})
observe({
input$alpha
input$beta
input$clear
beta.all.out$history<-temp.start(input$alpha,input$beta)
})
output$densityPlot <- renderPlot({
input$go
input$clear
input$alpha
input$beta
par(mfrow=c(1,2),oma=c(0,0,0,0),mar=c(5.1,2.1,1,1.1))
isolate(plot.unif(beta.all.out$history))
isolate(plot.beta(input$alpha,input$beta,beta.all.out$history))
})
output$summary <- renderUI({
input$go
last<-length(beta.all.out$history$Y)
if(last>0){
str1<-paste("The most recent value of U is:",
round(beta.all.out$history$U[last],3), "(enlarged and green)")
str2<-paste("The most recent value of Y is:",
round(beta.all.out$history$Y[last],3), "(enlarged, and green if accepted; red if rejected)")
str3<-paste("The value of Y is", ifelse(beta.all.out$history$status[last]=="accept","<b>accepted</b>","<b>rejected</b>"),"because:")
HTML(paste(str1,str2,str3,sep='<br/>'))
}})
output$accrej<-renderUI({
input$go
last<-length(beta.all.out$history$Y)
if(last>0){
u<-beta.all.out$history$U[last]
fy<-beta.all.out$history$fy[last]
M<-beta.all.out$history$M[last]
gy<-beta.all.out$history$gy[last]
ratio<-fy/(M*gy)
if(beta.all.out$history$status[last]=="accept"){
withMathJax(sprintf("$$%0.3f \\leq \\frac{f(y)}{Mg(y)} =
\\frac{\\frac{\\Gamma(\\alpha + \\beta)}{\\Gamma(\\alpha)\\Gamma(\\beta)}y^{\\alpha-1}(1-y)^{\\beta-1}}{M \\cdot 1_{\\{0 \\leq y \\leq 1\\}}}
= \\frac{%0.2f}{%0.3f \\cdot 1} = %0.2f$$",
u,fy,M,ratio))
} else {
withMathJax(sprintf("$$%0.3f > \\frac{f(y)}{Mg(y)} =
\\frac{\\frac{\\Gamma(\\alpha + \\beta)}{\\Gamma(\\alpha)\\Gamma(\\beta)}y^{\\alpha-1}(1-y)^{\\beta-1}}{M \\cdot 1_{\\{0 \\leq y \\leq 1\\}}}
= \\frac{%0.2f}{%0.2f \\cdot 1} = %0.2f$$",
u,fy,M,ratio))
}
}
})
output$unifnote<-renderText({
input$alpha
input$beta
if(input$alpha==1 & input$beta==1){
"Note that for the Beta(1,1) distribution, every point will be accepted, as we would expect
since it is equivalent to the Uniform[0,1] distribution."
}
})
output$M<- renderText({
input$alpha
input$beta
isolate(paste("For the current set of parameter values, M = ",
round(optimize(dbeta,shape1=input$alpha,shape2=input$beta,interval=c(0,1),maximum=T)$objective,digits=3),".",sep=""))
})
output$totalcount<-renderUI({
input$go
last<-length(beta.all.out$history$Y)
if(last>0){
isolate(paste("Total number of replicates: ",last))
}
})
# Truncated normal example
tnorm.all.out<-reactiveValues()
observe({
if(input$tnormGo==0){tnorm.all.out$history<-start.tnorm()}
else{
tnorm.all.out$history<-add.tnorm(isolate(tnorm.all.out$history),isolate(input$tnormNum))
}
})
observe({
input$tnormClear
tnorm.all.out$history<-start.tnorm()
})
output$tnormDensityPlot <- renderPlot({
input$tnormGo
input$tnormClear
par(mfrow=c(1,2),oma=c(0,0,0,0),mar=c(5.1,2.1,1,1.1))
isolate(plot.unif(tnorm.all.out$history))
isolate(plot.tnorm(tnorm.all.out$history))
})
output$tnormsummary <- renderUI({
input$tnormGo
last<-length(tnorm.all.out$history$Y)
if(last>0){
str1<-paste("The most recent value of U is:",
round(tnorm.all.out$history$U[last],3), "(enlarged and green)")
str2<-paste("The most recent value of Y is:",
round(tnorm.all.out$history$Y[last],3), "(enlarged, and green if accepted; red if rejected)")
str3<-paste("The value of Y is", ifelse(tnorm.all.out$history$status[last]=="accept","<b>accepted</b>","<b>rejected</b>"),"because:")
HTML(paste(str1,str2,str3,sep='<br/>'))
}})
output$tnormaccrej<-renderUI({
input$tnormGo
last<-length(tnorm.all.out$history$Y)
if(last>0){
u<-tnorm.all.out$history$U[last]
fy<-tnorm.all.out$history$fy[last]
M<-tnorm.all.out$history$M[last]
gy<-tnorm.all.out$history$gy[last]
y<-tnorm.all.out$history$Y[last]
ratio<-fy/(M*gy)
if(tnorm.all.out$history$status[last]=="accept"){
withMathJax(sprintf("$$%0.3f \\leq \\frac{f(y)}{Mg(y)} =
\\frac{\\frac{1}{\\sqrt{2 \\pi}}e^{-\\frac{1}{2}(%0.2f)^2}
\\cdot \\left[\\frac{1}{1-\\Phi(2)}\\right] }{M \\cdot e^{2-%0.2f} }
= \\frac{%0.2f}{%0.3f \\cdot %0.3f} = %0.2f$$",
u,y,y,fy,M,gy,ratio))
} else {
withMathJax(sprintf("$$%0.3f > \\frac{f(y)}{Mg(y)} =
\\frac{\\frac{1}{\\sqrt{2 \\pi}}e^{-\\frac{1}{2}(%0.2f)^2}
\\cdot \\left[\\frac{1}{1-\\Phi(2)}\\right] }{M \\cdot e^{2-%0.2f} }
= \\frac{%0.2f}{%0.3f \\cdot %0.3f} = %0.2f$$",
u,y,y,fy,M,gy,ratio))
}
}
})
output$tnormRatio<-renderUI({
input$tnormGo
num<-length(tnorm.all.out$history$Y)
if(num>0){
str4<-paste("The proportion of points that have been accepted is <b>",
round(sum(tnorm.all.out$history$status=="accept")/num,3),"</b> (out of ",num,")",sep="")
HTML(str4)
}
})
}) # end of shinyServer
|
41bd7b09525be138478311dd96e52a1a79f96712
|
c5087b724353b127d69bca2eff5b62aa600a71b8
|
/R/pad-trim.r
|
466e21f0f00de189569a568eb40c58232b8b1d81
|
[] |
no_license
|
hyiltiz/stringr
|
9e0793655827c60f5f5e9ee470a2e35ffe0e94a6
|
82c4235354deec2338ef636ce604fab01c4338cc
|
refs/heads/master
| 2020-12-11T03:25:34.074649
| 2015-03-31T13:11:31
| 2015-03-31T13:11:31
| 33,868,168
| 1
| 0
| null | 2015-04-13T12:49:15
| 2015-04-13T12:49:15
| null |
UTF-8
|
R
| false
| false
| 1,691
|
r
|
pad-trim.r
|
#' Pad a string.
#'
#' Vectorised over \code{string}, \code{width} and \code{pad}.
#'
#' @param string A character vector.
#' @param width Minimum width of padded strings.
#' @param side Side on which padding character is added (left, right or both).
#' @param pad Single padding character (default is a space).
#' @return A character vector.
#' @seealso \code{\link{str_trim}} to remove whitespace
#' @export
#' @examples
#' rbind(
#' str_pad("hadley", 30, "left"),
#' str_pad("hadley", 30, "right"),
#' str_pad("hadley", 30, "both")
#' )
#'
#' # All arguments are vectorised except side
#' str_pad(c("a", "abc", "abcdef"), 10)
#' str_pad("a", c(5, 10, 20))
#' str_pad("a", 10, pad = c("-", "_", " "))
#'
#' # Longer strings are returned unchanged
#' str_pad("hadley", 3)
str_pad <- function(string, width, side = c("left", "right", "both"), pad = " ") {
side <- match.arg(side)
switch(side,
left = stri_pad_left(string, width, pad = pad),
right = stri_pad_right(string, width, pad = pad),
both = stri_pad_both(string, width, pad = pad)
)
}
#' Trim whitespace from start and end of string.
#'
#' @param string A character vector.
#' @param side Side on which to remove whitespace (left, right or both).
#' @return A character vector.
#' @export
#' @seealso \code{\link{str_pad}} to add whitespace
#' @examples
#' str_trim(" String with trailing and leading white space\t")
#' str_trim("\n\nString with trailing and leading white space\n\n")
str_trim <- function(string, side = c("both", "left", "right")) {
side <- match.arg(side)
switch(side,
left = stri_trim_left(string),
right = stri_trim_right(string),
both = stri_trim_both(string)
)
}
|
03e548e7cc9cea51d4f80a7eb30c7bae2ee8d30a
|
93819d8d464369f0036a61a35358c20108755ff9
|
/code/working/makedata_partitions.R
|
8c735e709ca8e21f6d4f73724345040d99bf44e1
|
[
"MIT",
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
jdblischak/fucci-seq
|
3606074f6fd901509140d48bc847934af52aa4c8
|
de244c3247d225b8886ec778c7063a9fb2731e30
|
refs/heads/master
| 2022-01-21T09:20:38.761518
| 2022-01-06T20:04:30
| 2022-01-06T20:04:30
| 72,239,601
| 7
| 5
|
NOASSERTION
| 2019-09-17T15:36:03
| 2016-10-28T20:25:18
|
R
|
UTF-8
|
R
| false
| false
| 3,754
|
r
|
makedata_partitions.R
|
################################################
# Description:
# Partition samples to training and validation
################################################
library(Biobase)
df <- readRDS(file="data/eset-final.rds")
pdata <- pData(df)
fdata <- fData(df)
# select endogeneous genes
counts <- exprs(df)[grep("ENSG", rownames(df)), ]
log2cpm.all <- t(log2(1+(10^6)*(t(counts)/pdata$molecules)))
#macosko <- readRDS("data/cellcycle-genes-previous-studies/rds/macosko-2015.rds")
counts <- counts[,order(pdata$theta)]
log2cpm.all <- log2cpm.all[,order(pdata$theta)]
pdata <- pdata[order(pdata$theta),]
log2cpm.quant <- readRDS("output/npreg-trendfilter-quantile.Rmd/log2cpm.quant.rds")
#######################----- validation sample of random cells
source("peco/R/primes.R")
source("peco/R/partitionSamples.R")
# select external validation samples
set.seed(99)
nvalid <- round(ncol(log2cpm.quant)*.15)
ii.valid <- sample(1:ncol(log2cpm.quant), nvalid, replace = F)
ii.nonvalid <- setdiff(1:ncol(log2cpm.quant), ii.valid)
log2cpm.quant.nonvalid <- log2cpm.quant[,ii.nonvalid]
log2cpm.quant.valid <- log2cpm.quant[,ii.valid]
folds <- partitionSamples(1:ncol(log2cpm.quant.nonvalid), runs=5,
nsize.each = rep(151,5))
fold_indices <- folds$partitions
saveRDS(fold_indices, file="data/results/fold_indices.rds")
#######################----- validation sample of random indivdiual
for (ind in unique(pdata$chip_id)) {
set.seed(99)
# nvalid <- round(ncol(log2cpm.quant)*.15)
ii.valid <- c(1:nrow(pdata))[which(pdata$chip_id == ind)]
ii.nonvalid <- c(1:nrow(pdata))[which(pdata$chip_id != ind)]
pdata.nonvalid <- pdata[ii.nonvalid,]
pdata.valid <- pdata[ii.valid,]
# log2cpm.quant.nonvalid <- log2cpm.quant[,ii.nonvalid]
# log2cpm.quant.valid <- log2cpm.quant[,ii.valid]
# theta <- pdata$theta
# names(theta) <- rownames(pdata)
# log2cpm.nonvalid <- log2cpm.all[,ii.nonvalid]
# log2cpm.valid <- log2cpm.all[,ii.valid]
#
# theta.nonvalid <- theta[ii.nonvalid]
# theta.valid <- theta[ii.valid]
# #sig.genes <- readRDS("output/npreg-trendfilter-quantile.Rmd/out.stats.ordered.sig.476.rds")
# data_training <- list(theta.nonvalid=theta.nonvalid,
# log2cpm.quant.nonvalid=log2cpm.quant.nonvalid,
# log2cpm.nonvalid=log2cpm.nonvalid,
# pdata.nonvalid=pdata.nonvalid,
# fdata=fdata)
#
# data_withheld <- list(theta.valid=theta.valid,
# log2cpm.quant.valid=log2cpm.quant.valid,
# log2cpm.valid=log2cpm.valid,
# pdata.valid=pdata.valid,
# fdata=fdata)
#
# saveRDS(data_training, file=paste0("data/results/ind_",ind,"_data_training.rds"))
# saveRDS(data_withheld, file=paste0("data/results/ind_",ind,"_data_withheld.rds"))
############# <- get training partitions
# split by individaul
# # get predicted times
# # set training samples
# source("peco/R/primes.R")
# source("peco/R/partitionSamples.R")
# folds <- partitionSamples(1:ncol(log2cpm.quant.nonvalid), runs=5,
# nsize.each = c(rep(round(ncol(log2cpm.quant.nonvalid)/5),4),
# ncol(log2cpm.quant.nonvalid)-sum(rep(round(ncol(log2cpm.quant.nonvalid)/5),4))))
fold_indices <- lapply(1:length(unique(pdata.nonvalid$chip_id)), function(i) {
ind_test <- unique(pdata.nonvalid$chip_id)[i]
test <- which(pdata.nonvalid$chip_id==ind_test)
train <- which(pdata.nonvalid$chip_id!=ind_test)
return(list(test=test, train=train))
})
# folds$partitions
saveRDS(fold_indices, file=paste0("data/results/ind_",ind,"_fold_indices.rds"))
}
|
50475107245fe689f0b24e1b4973fcf5fa610e5a
|
49ff0bc7c07087584b907d08e68d398e7293d910
|
/mbg/mbg_core_code/mbg_central/LBDCore/R/save_custom_raking_outputs.R
|
ab234ab25c48394ab475f8519a5a39fcb67a346f
|
[] |
no_license
|
The-Oxford-GBD-group/typhi_paratyphi_modelling_code
|
db7963836c9ce9cec3ca8da3a4645c4203bf1352
|
4219ee6b1fb122c9706078e03dd1831f24bdaa04
|
refs/heads/master
| 2023-07-30T07:05:28.802523
| 2021-09-27T12:11:17
| 2021-09-27T12:11:17
| 297,317,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,313
|
r
|
save_custom_raking_outputs.R
|
#' @title Save outputs from custom raking function
#' @description This function saves outputs from \code{custom_rake}
#'
#' @param custom_rake_output Output list from custom raking function
#' @param outdir Directory for files to be saved to
#' @param indicator Name of indicator being modeled
#' @param age_group Name of age group
#' @param prefix Character string to be added to files to avoid overwriting non-custom files in folder
#' @param reg Name of region that was modeled
#'
#' @return NULL
#' @export
#'
#' @examples
#' \dontrun{
#' save_custom_raking_outputs(custom_rake_output,
#' outdir = sprintf("/share/geospatial/mbg/u5m/\%s_\%s/output/\%s",indicator, age_group, run_date),
#' indicator,
#' age_group,
#' prefix = "custom_india",
#' reg = "south_asia")
#' }
save_custom_raking_outputs <- function(custom_rake_output,
outdir,
indicator,
prefix,
reg) {
ol <- custom_rake_output
raked_cell_pred <- ol[["raked_cell_pred"]]
save(raked_cell_pred, file = sprintf("%s/%s_%s_raked_cell_draws_eb_bin0_%s_0.RData", outdir, prefix, indicator, reg))
simple_raster <- ol[["simple_raster"]]
writeRaster(simple_raster, file = sprintf("%s/%s_%s_simple_raster", outdir, prefix, indicator), format = "GTiff")
raking_factors <- ol[["raking_factors"]]
write.csv(raking_factors, file = sprintf("%s/%s_%s_%s_rf.csv", outdir, prefix, indicator, reg))
adm0_geo <- ol[["adm0_geo"]]
write.csv(adm0_geo, file = sprintf("%s/%s_%s_adm0_geo.csv", outdir, prefix, indicator))
mean_raster <- ol[["mean_raked_raster"]]
writeRaster(mean_raster, file = sprintf("%s/%s_%s_mean_raked_2000_2015", outdir, prefix, indicator), format = "GTiff")
cirange_raster <- ol[["cirange_raked_raster"]]
writeRaster(cirange_raster, file = sprintf("%s/%s_%s_cirange_raked_2000_2015", outdir, prefix, indicator), format = "GTiff")
upper_raster <- ol[["upper_raked_raster"]]
writeRaster(upper_raster, file = sprintf("%s/%s_%s_upper_raked_2000_2015", outdir, prefix, indicator), format = "GTiff")
lower_raster <- ol[["lower_raked_raster"]]
writeRaster(lower_raster, file = sprintf("%s/%s_%s_lower_raked_2000_2015", outdir, prefix, indicator), format = "GTiff")
}
|
1b6c555c96ec40b6e96f5244656ba8e00cd2fcb3
|
739e57abf7c1723a655c31616dbeead52cd423d5
|
/analysis/sequential/model/kmcox_combine.R
|
fd0e48fa26e6c00965317882d3d8be151cddb2e5
|
[
"MIT"
] |
permissive
|
opensafely/covid-vaccine-effectiveness-sequential-vs-single
|
d6dde4b62ebec8f2bb8844e148efe06dd44adc1b
|
46c1314fe25b17d3e04d355e832e853b1464b531
|
refs/heads/main
| 2023-08-23T10:42:13.962541
| 2023-02-20T16:59:22
| 2023-02-20T16:59:22
| 588,559,908
| 3
| 0
|
MIT
| 2023-05-16T16:40:35
| 2023-01-13T12:22:53
|
R
|
UTF-8
|
R
| false
| false
| 2,308
|
r
|
kmcox_combine.R
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Purpose: Combine km estimates from different outcomes
# - The script must be accompanied by three arguments:
# `brand` - the brand used
# `subgroup` - the subgroup variable
# `outcome` - the outcome variable
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Preliminaries ----
# import libraries
library('tidyverse')
library('here')
library('glue')
library('survival')
# import custom user functions and paramters
source(here("analysis", "design.R"))
source(here("analysis", "functions", "utility.R"))
# create output directory
outdir <- here("output", "sequential", "combine")
fs::dir_create(outdir)
# define metaparams
metaparams <-
expand_grid(
brand = model_brands,
outcome = model_outcomes,
subgroup = model_subgroups
)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# combine files ----
combine_files <- function(filename) {
metaparams %>%
mutate(
data = pmap(list(brand, subgroup, outcome), function(brand, subgroup, outcome) {
subgroup <- as.character(subgroup)
dat <- read_rds(here("output", "sequential", brand, "model", subgroup, outcome, glue("{filename}.rds")))
dat %>%
ungroup() %>%
add_column(
subgroup_level = as.character(.[[subgroup]]),
.before=1
) %>%
select(-all_of(subgroup))
})
) %>%
unnest(data) %>%
write_csv(file.path(outdir, glue("{filename}.csv")))
}
combine_files("km_estimates_rounded")
combine_files("km_estimates_unrounded")
combine_files("contrasts_km_cuts_rounded")
combine_files("contrasts_cox_cuts")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
## move km plots to single folder ----
move_plots <- function(filename) {
metaparams %>%
rowwise() %>%
mutate(
plotdir = here("output", "sequential", brand, "model", subgroup, outcome, glue("{filename}.png")),
plotnewdir = file.path(outdir, glue("{filename}_{brand}_{subgroup}_{outcome}.png")),
) %>%
{walk2(.$plotdir, .$plotnewdir, ~fs::file_copy(.x, .y, overwrite = TRUE))}
}
move_plots("km_plot_rounded")
move_plots("km_plot_unrounded")
|
8771bdc950da76300abaf80f3ec581437ef9b91f
|
9947a34b5354c41cb4998e761062f8d82f566b6b
|
/Core_manuscript/Pinechrom/Scripts/run_deseq2_atac.R
|
a6a9762caeced67bdba0f621f391e304e0f30b0d
|
[] |
no_license
|
jknightlab/ATACseq_pipeline
|
4f3935576e23d36d880116190f3ab63b86537ded
|
7a30e39a87eaa7cb583a414b7661497886fa8199
|
refs/heads/master
| 2020-12-07T17:10:17.748148
| 2016-09-28T18:53:16
| 2016-09-28T18:53:16
| 46,871,204
| 5
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,285
|
r
|
run_deseq2_atac.R
|
library(DESeq2)
args<-commandArgs(TRUE)
input_counts <- args[1]
input_colnames <- args[2]
output_header <- args[3]
# Generating filenames
filename_count_table <- paste(output_header, ".normalized_counts.txt", sep="")
filename_de_plots <- paste(output_header, ".DE_plots.pdf", sep="")
filename_de_results <- paste(output_header, ".DE_results.txt", sep="")
## -------------------------------
##
## Reading the input data
##
## -------------------------------
# Input -- a file containing count data and info about conditions
countData <- read.table(input_counts, sep='\t', header=TRUE)
colData <- read.table(input_colnames, sep='\t', header=TRUE)
print ("Data was successfully loaded.")
# Converting input data into a deseq object
dds <- DESeqDataSetFromMatrix(countData = countData,
colData = colData,
design = ~ condition)
# Filtering to remove low counts
dds <- dds[ rowSums(counts(dds)) > 1, ]
# Tell deseq which samples to compaire
dds$condition <- factor(dds$condition,
levels=c("untreated","treated"))
print ("DESeq2 object was created.")
## -------------------------------
##
## Running DE analysis
##
## -------------------------------
# Running DE analysis
dds <- DESeq(dds)
normalized_counts <- counts(dds, normalized=T)
write.table(as.data.frame(normalized_counts),
file=filename_count_table, quote=FALSE, sep='\t')
print ("Differential expression analysis was finished.")
# Extracting the results matrix
# The results are shown for treated vs untreated, fold change log2 (treated/untreated)
res <- results(dds)
# alpha is the FDR cutoff, we can change it
res05 <- results(dds, alpha=0.05)
## -------------------------------
##
## Extra plots
##
## -------------------------------
library("ggplot2")
library("pheatmap")
library("RColorBrewer")
print ("Started to generate plots.")
pdf(filename_de_plots)
# plot MA
plotMA(res, main="DESeq2", ylim=c(-2,2))
resMLE <- results(dds, addMLE=TRUE)
plotMA(resMLE, MLE=TRUE, main="unshrunken LFC", ylim=c(-2,2))
# plot counts
plotCounts(dds, gene=which.min(res$padj), intgroup="condition")
# plot counts in ggplot
d <- plotCounts(dds, gene=which.min(res$padj), intgroup="condition",
returnData=TRUE)
ggplot(d, aes(x=condition, y=count)) +
geom_point(position=position_jitter(w=0.1,h=0)) +
scale_y_log10(breaks=c(25,100,400))
# plot heatmap of counts
rld <- rlog(dds)
vsd <- varianceStabilizingTransformation(dds)
select <- order(rowMeans(counts(dds,normalized=TRUE)),decreasing=TRUE)[1:20]
nt <- normTransform(dds) # defaults to log2(x+1)
log2.norm.counts <- assay(nt)[select,]
df <- as.data.frame(colData(dds)[,c("condition","type")])
pheatmap(log2.norm.counts, cluster_rows=FALSE, show_rownames=FALSE,
cluster_cols=FALSE, annotation_col=df)
pheatmap(assay(rld)[select,], cluster_rows=FALSE, show_rownames=FALSE,
cluster_cols=FALSE, annotation_col=df)
pheatmap(assay(vsd)[select,], cluster_rows=FALSE, show_rownames=FALSE,
cluster_cols=FALSE, annotation_col=df)
# plot sample to sample distance
sampleDists <- dist(t(assay(rld)))
sampleDistMatrix <- as.matrix(sampleDists)
rownames(sampleDistMatrix) <- paste(rld$condition, rld$type, sep="-")
colnames(sampleDistMatrix) <- NULL
colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
pheatmap(sampleDistMatrix,
clustering_distance_rows=sampleDists,
clustering_distance_cols=sampleDists,
col=colors)
# plot PCA
plotPCA(rld, intgroup=c("condition", "type"))
data <- plotPCA(rld, intgroup=c("condition", "type"), returnData=TRUE)
percentVar <- round(100 * attr(data, "percentVar"))
ggplot(data, aes(PC1, PC2, color=condition, shape=type)) +
geom_point(size=3) +
xlab(paste0("PC1: ",percentVar[1],"% variance")) +
ylab(paste0("PC2: ",percentVar[2],"% variance"))
dev.off()
print ("Finished generating plots.")
## -------------------------------
##
## Exporting results
##
## -------------------------------
print ("Preparing to print the list of differentially expressed regions.")
# Sort results on adjusted pvalue
resOrdered <- res[order(res$padj),]
# Write output in a csv file
write.table(as.data.frame(resOrdered), file=filename_de_results,
quote=FALSE, sep='\t')
print ("Finished printing DESeq2 output.")
|
7b58f9c8f4fb380de36d5ada70d80dc7e233d757
|
b68f6f88ee633d16bd6d116ab4cdcb8fae1849d9
|
/RGuide/Chapter 4_6.R
|
3f0503b6c38a4120a865a9125cad7893e7f25621
|
[
"MIT"
] |
permissive
|
HealthyUncertainty/healthyuncertainty.github.io
|
9a6bc9279fa064a4b368923ba131a3ac9212ea2d
|
8546fb628da98640c29e268466273ef970530d4c
|
refs/heads/master
| 2023-04-01T12:31:43.422831
| 2021-04-03T01:28:15
| 2021-04-03T01:28:15
| 314,864,416
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,984
|
r
|
Chapter 4_6.R
|
#####################################################################################
# #
# TITLE: Building Health State Transition Models in R: Chapter 9 #
# #
# DESCRIPTION: We apply costs and utilities to our survival model #
# #
# AUTHOR: Ian Cromwell #
# #
# DATE: November, 2014 #
# #
#####################################################################################
### SET WORKING DIRECTORY
setwd("WHATEVER DIRECTORY YOU'VE SAVED THESE FILES IN")
### LOAD IN VALUES FROM PREVIOUS CHAPTERS
source("Chapter 4_5.R")
### APPLY COSTS TO HEALTH STATES ###
# Create blank arrays for costs
cost_HSW <- cost_HSX <- array(0, dim=c(ncycle,1,n))
cost_HSY <- cost_HSZ <- array(0, dim=c(ncycle,2,n))
# Populate arrays
for (j in 1:n){
for (i in 1:ncycle){
cost_HSW[i,,j] <- HS_W[i,,j]*C_W[j]
cost_HSX[i,,j] <- HS_X[i,,j]*C_X[j]
cost_HSY[i,1,j] <- HS_Y[i,1,j]*(C_Ytransition[j] + C_Y[j])
cost_HSY[i,2,j] <- HS_Y[i,2,j]*C_Y[j]
cost_HSZ[i,1,j] <- HS_Z[i,1,j]*C_Ztransition[j]
}}
### APPLY UTILITIES TO HEALTH STATES ###
# Create blank arrays for utilities
qol_HSW <- qol_HSX <- array(0, dim=c(ncycle,1,n))
qol_HSY <- array(0, dim=c(ncycle,2,n))
# Populate arrays
for (j in 1:n){
for (i in 1:ncycle){
qol_HSW[i,,j] <- HS_W[i,,j]*U_W[j]
qol_HSX[i,,j] <- HS_X[i,,j]*U_X[j]
qol_HSY[i,,j] <- HS_Y[i,,j]*U_Y[j]
}}
|
314b796df07259051c67eda6471b9377c8d9db89
|
09e288ff4c1637e4e1501d3283bb7cabc6d62eba
|
/diaryRepository.R
|
64273d23fe6bfe05f0613e843ab6117e3e890a64
|
[] |
no_license
|
vika95viktoria/Diary-in-R
|
68d8c8b73fe8d71fc56ed8e28264055b57ba7a27
|
aeb85a98f00347042d1b7391299c5333d4159c9b
|
refs/heads/main
| 2023-01-11T10:47:13.008069
| 2020-11-15T12:15:10
| 2020-11-15T12:15:10
| 313,022,037
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,213
|
r
|
diaryRepository.R
|
library(RPostgreSQL)
library(DBI)
library(stringr)
pw<- {
"test123"
}
get_connection <- function() {
con <-dbConnect(dbDriver("PostgreSQL"),
dbname="diary",
host="host.docker.internal",
port=5432,
user="test_user",
password=pw)
return (con)
}
get_all_posts <- function() {
conn <- get_connection()
res <- dbGetQuery(conn,' select * from "post"')
dbDisconnect(conn)
return (res)
}
get_post_by_id <- function(id) {
conn <- get_connection()
res <- dbGetQuery(conn,str_interp(' select * from "post" where id = ${id}'))
dbDisconnect(conn)
return(res)
}
insert_post <- function(post) {
conn <- get_connection()
rs <- dbSendStatement(
conn,
str_interp("INSERT INTO post (text_post, date_posted, headline)
VALUES ('${post$description}', now(), '${post$headline}')")
)
dbHasCompleted(rs)
dbGetRowsAffected(rs)
dbClearResult(rs)
dbDisconnect(conn)
}
get_last_post <- function() {
conn <- get_connection()
res <- dbGetQuery(conn,str_interp('select * from "post"
order by id desc limit 1'))
dbDisconnect(conn)
return(res)
}
|
e9799af14a0aa7228fa51afbabc1afec80079b86
|
7fd895918aa4c54b619c3fe7ea8a2171d0b9e585
|
/man/bal.tab.matchit.Rd
|
f713340cac08a89c3212b443be3f9537f1d821e0
|
[] |
no_license
|
EvaMaeRey/cobalt
|
15a6d580bfd5c0383bfab972f7516cd2439bc086
|
82c3c71736911702534311db4a47beaa8ccd9e46
|
refs/heads/master
| 2020-03-16T08:32:39.644168
| 2018-05-08T11:40:51
| 2018-05-08T11:40:51
| 132,597,911
| 1
| 0
| null | 2018-05-08T11:16:31
| 2018-05-08T11:16:31
| null |
UTF-8
|
R
| false
| false
| 7,323
|
rd
|
bal.tab.matchit.Rd
|
\name{bal.tab.matchit}
\alias{bal.tab.matchit}
\title{
Balance Statistics for Matchit Objects
}
\description{
Generates balance statistics for \code{matchit} objects from \pkg{MatchIt}.
}
\usage{
\method{bal.tab}{matchit}(m,
int = FALSE,
distance = NULL,
addl = NULL,
data = NULL,
continuous = c("std", "raw"),
binary = c("raw", "std"),
s.d.denom = c("treated", "control", "pooled"),
m.threshold = NULL,
v.threshold = NULL,
ks.threshold = NULL,
imbalanced.only = FALSE,
un = FALSE,
disp.bal.tab = TRUE,
disp.means = FALSE,
disp.v.ratio = FALSE,
disp.ks = FALSE,
disp.subclass = FALSE,
cluster = NULL,
which.cluster = NULL,
cluster.summary = TRUE,
quick = FALSE,
...)
}
\arguments{
\item{m}{
a \code{matchit} object; the output of a call to \code{matchit()} from the \pkg{MatchIt} package.
}
\item{int}{
\code{logical} or \code{numeric}; whether or not to include powers and 2-way interactions of covariates included in \code{covs} and in \code{addl}. If \code{numeric} and equal to \code{1} or \code{2}, squares of each covariate will be displayed; greater numbers will display corresponding powers up to the provided input (e.g., \code{3} will display squares and cubes of each covariate).
}
\item{distance}{
Optional; either a vector or data.frame containing distance values (e.g., propensity scores) for each unit or a string containing the name of the distance variable in \code{data}. Note that the distance measure generated by \code{matchit()} is automatically included.
}
\item{addl}{
an optional data frame or the quoted names of additional covariates for which to present balance. These may be covariates included in the original dataset but not included in the call to \code{matchit()}. If variable names are specified, \code{bal.tab()} will look first in the argument to \code{data}, if specified, and next in the \code{matchit} object.
}
\item{data}{
an optional data frame containing variables that might be named in arguments to \code{distance}, \code{addl}, and \code{cluster}.
}
\item{continuous}{
whether mean differences for continuous variables should be standardized ("std") or raw ("raw"). Default "std". Abbreviations allowed.
}
\item{binary}{
whether mean differences for binary variables (i.e., difference in proportion) should be standardized ("std") or raw ("raw"). Default "raw". Abbreviations allowed.
}
\item{s.d.denom}{
whether the denominator for standardized differences (if any are calculated) should be the standard deviation of the treated group ("treated"), the standard deviation of the control group ("control"), or the pooled standard deviation ("pooled"), computed as the square root of the mean of the group variances. Abbreviations allowed. The default is "treated".
}
\item{m.threshold}{
a numeric value for the threshold for mean differences. .1 is recommended.
}
\item{v.threshold}{
a numeric value for the threshold for variance ratios. Will automatically convert to the inverse if less than 1.
}
\item{ks.threshold}{
a numeric value for the threshold for Kolmogorov-Smirnov statistics. Must be between 0 and 1.
}
\item{imbalanced.only}{
\code{logical}; whether to display only the covariates that failed to meet at least one of balance thresholds.
}
\item{un}{
\code{logical}; whether to print statistics for the unadjusted sample as well as for the adjusted sample.
}
\item{disp.bal.tab}{
\code{logical}; whether to display the table of balance statistics.
}
\item{disp.means}{
\code{logical}; whether to print the group means in balance output.
}
\item{disp.v.ratio}{
\code{logical}; whether to display variance ratios in balance output.
}
\item{disp.ks}{
\code{logical}; whether to display Kolmogorov-Smirnov statistics in balance output.
}
\item{disp.subclass}{
\code{logical}; whether to display balance information for individual subclasses if subclassification is used in conditioning. See \code{\link{bal.tab.subclass}} for details.
}
\item{cluster}{
either a vector containing cluster membserhip for each unit or a string containing the name of the cluster membership variable in \code{data} or the CBPS object. See \code{\link{bal.tab.cluster}} for details.
}
\item{which.cluster}{
which cluster(s) to display if \code{cluster} is specified. See \code{\link{bal.tab.cluster}} for details.
}
\item{cluster.summary}{
\code{logical}; whether to display the cluster summary table if \code{cluster} is specified. See \code{\link{bal.tab.cluster}} for details.
}
\item{quick}{
\code{logical}; if \code{TRUE}, will not compute any values that will not be displayed. Leave \code{FALSE} if computed values not displayed will be used later.
}
\item{...}{
further arguments passed to or from other methods. They are ignored in this function.
}
}
\details{
\code{bal.tab.matchit()} generates a list of balance summaries for the \code{matchit} object given, and functions similarly to \code{summary.matchit()} in \pkg{MatchIt}. \code{bal.tab()} behaves differently depending on whether subclasses are used in conditioning or not. If they are used, \code{bal.tab()} creates balance statistics for each subclass and for the sample in aggregate; see \code{\link{bal.tab.subclass}} for more information.
All balance statistics are calculated whether they are displayed by \code{print} or not, unless \code{quick = TRUE}. The threshold values (\code{m.threshold}, \code{v.threshold}, and \code{ks.threshold}) control whether extra columns should be inserted into the Balance table describing whether the balance statistics in question exceeded or were within the threshold. Including these thresholds also creates summary tables tallying the number of variables that exceeded and were within the threshold and displaying the variables with the greatest imbalance on that balance measure. When subclassification is used, the extra threshold columns are placed within the balance tables for each subclass as well as in the aggregate balance table, and the summary tables display balance for each subclass.
}
\value{
If subclassification is used, an object of class \code{"bal.tab.subclass"} containing balance summaries within and across subclasses. See \code{\link{bal.tab.subclass}} fo details.
If matching is used and clusters are not specified, an object of class \code{"bal.tab"} containing balance summaries for the \code{matchit} object. See \code{\link{bal.tab}} for details.
If clusters are specified, an object of class \code{"bal.tab.cluster"} containing balance summaries within each cluster and a summary of balance across clusters. See \code{\link{bal.tab.cluster}} for details.
}
\author{
Noah Greifer
}
\seealso{
\code{\link{bal.tab}} for details of calculations.
}
\examples{library(MatchIt); data("lalonde", package = "cobalt")
## Nearest Neighbor matching
m.out1 <- matchit(treat ~ age + educ + race +
married + nodegree + re74 + re75,
data = lalonde, method = "nearest")
bal.tab(m.out1, un = TRUE, m.threshold = .1,
v.threshold = 2)
## Subclassification
m.out2 <- matchit(treat ~ age + educ + race +
married + nodegree + re74 + re75,
data = lalonde, method = "subclass")
bal.tab(m.out2, disp.subclass = TRUE)
}
|
55077a49221b294fbee65c861470d1ffb9c6ad7a
|
3410f4f90f0e431bf3a6105d8ea73215c548066f
|
/outcome.R
|
2111bdce2b7827e94e7d9c8828b88df862581ba5
|
[] |
no_license
|
jhonatansgg/Final-project
|
6ecb550e9aad285097efa7dada26855a15587e40
|
1c9a9dbd7fc2b283209a6812ada0cf6b4c2597f8
|
refs/heads/master
| 2022-12-03T18:04:45.693629
| 2020-08-07T05:01:57
| 2020-08-07T05:01:57
| 285,741,190
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 317
|
r
|
outcome.R
|
outcome <- read.csv("outcome-of-care-measures.csv",colClasses = "character")
head(outcome)
ncol(outcome)
nrow(outcome)
names(outcome)
outcome[,11] <- as.numeric(outcome[,11])
hist(outcome[,11],xlab = "Muertes",main = "Tasas De Mortalidad Hospitalarias De 30 Dias(Al Mes) Por Ataque Cardiaco"
,col = "lightblue")
|
ced435b31ea3f1e726db9a992a950e0178a2b9b2
|
a14a124c6901ad11bb21b5742ecce3052aad9aba
|
/man/fun_logic_assg.Rd
|
1b1df74cd8c8921d5990e003f1b492a35ea8e95b
|
[] |
no_license
|
DataCollaborativeForJustice/mjp
|
8ecde44e11227aaf429b5f84adb4b5d3567bd4a3
|
f9d7ae05a58a3c28cc22e5ae40aa8226ac158014
|
refs/heads/master
| 2022-03-13T13:10:13.742983
| 2019-09-23T13:50:18
| 2019-09-23T13:50:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,132
|
rd
|
fun_logic_assg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fun_logic_assg.r
\name{fun_logic_assg}
\alias{fun_logic_assg}
\title{Logical assignment, a MJP team defined function}
\usage{
fun_logic_assg(x, reference, typo_col = "TYPO", replace_col = "REPLACEMENT")
}
\arguments{
\item{x}{A input variable/ column/ vector of strings to perform the logical assignment on}
\item{reference}{A reference table (lookup table) having at least 2 column for matching the typo and returning the replacement}
\item{typo_col}{The name ( as a "character") of the column in the reference table that allows the function to match with the input column "x"}
\item{replace_col}{The name (as a "character) of the column in the reference table that contains the coresponding values to be returned.}
}
\value{
A vector (character) that contains the replacement values from the reference table
}
\description{
Logical assignment, a MJP team defined function
}
\examples{
ref_table <- data.frame(cars = rownames(mtcars), cyl = mtcars$cyl)
fun_logic_assg(rownames(mtcars) , ref_table, typo_col = "cars", replace_col = "cyl")
}
|
e73d24ae591648c8692d11810f7e9d4a6c001fea
|
c58a1595115fea554db8cd6578279f574eabfa0e
|
/man/chk_names_complete.Rd
|
98dea08e6465e6827a006959a9fddbdeae8f2aa7
|
[
"MIT"
] |
permissive
|
bayesiandemography/demcheck
|
129aca86fecda02be83bea73e639fb45d366c651
|
c52c3e4201e54ead631e587ebf94f97f9c7a05a0
|
refs/heads/master
| 2021-12-28T15:40:54.771894
| 2021-12-17T03:10:50
| 2021-12-17T03:10:50
| 200,993,678
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 716
|
rd
|
chk_names_complete.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chk-composite.R, R/err-composite.R
\name{chk_names_complete}
\alias{chk_names_complete}
\alias{err_names_complete}
\title{Check that an object has a complete set of names}
\usage{
chk_names_complete(x, name)
err_names_complete(x, name)
}
\arguments{
\item{x}{An object, typically a vector.}
\item{name}{The name for \code{x} that
will be used in error messages.}
}
\description{
Check that an object has a complete set of names
}
\examples{
x <- c(a = 1, b = 1.2)
chk_names_complete(x = x, name = "x")#'
}
\seealso{
\code{\link{chk_names_dimnames_complete}},
\code{\link{chk_has_dimnames}},
\code{\link{chk_array_metadata_complete}}
}
|
6f96970180cd7fe8aa85b2b71ab8652369a72df7
|
40bbc7902204805f6975d62d15bffb433ad8d572
|
/cleaning.R
|
d02999d59c0adfe73a8636ce1ad8aa62f3b288b4
|
[] |
no_license
|
simon00001/capstone
|
4cb645fea2c30caf2e65352cab3e4041085bfc6a
|
b5f10042d4ba1e1ccb0598adfd322c16cad493ab
|
refs/heads/master
| 2016-09-13T15:02:47.929905
| 2016-04-25T03:57:33
| 2016-04-25T03:57:33
| 57,006,059
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,635
|
r
|
cleaning.R
|
# This is module for a Shiny web application.
#
# Application : Word Prediction module
# by : Simon Kong
# date : 21 Apr 2016
suppressPackageStartupMessages(c(
library(shiny),
library(tm),
library(stylo),
library(stringr)))
# Loading N-gram for bi, tri and four-gram table
quadData <- readRDS(file="./data/quadData.RData")
triData <- readRDS(file="./data/triData.RData")
biData <- readRDS(file="./data/biData.RData")
# Cleaning model for input text text pre-processing before calling predict module
dataCleaner<-function(text){
cleanText <- tolower(text)
cleanText <- removePunctuation(cleanText)
cleanText <- removeNumbers(cleanText)
cleanText <- str_replace_all(cleanText, "[^[:alnum:]]", " ")
cleanText <- stripWhitespace(cleanText)
return(cleanText)
}
# cleaning module for input text as English text
cleanText <- function(text){
textInput <- dataCleaner(text)
textInput <- txt.to.words.ext(textInput,
language="English.all",
preserve.case = TRUE)
return(textInput)
}
# Prediction next word module pre-formating before processing
nextWordPrediction <- function(wordCount,textInput){
# if sentences more or equal to 3 words, take the last 3 words for prediction
if (wordCount>=3) {
textInput <- textInput[(wordCount-2):wordCount]
}
else if(wordCount==2) {
textInput <- c(NA,textInput)
}
else {
textInput <- c(NA,NA,textInput)
}
# Word prediction main module call
# If no word prediction found in Quad-gram table, then back-off call to tri-gram, follows by bi-gram
wordPrediction <- as.character(quadData[quadData$unigram==textInput[1] &
quadData$bigram==textInput[2] &
quadData$trigram==textInput[3],][1,]$quadgram)
if(is.na(wordPrediction)) {
wordPrediction1 <- as.character(triData[triData$unigram==textInput[2] &
triData$bigram==textInput[3],][1,]$trigram)
if(is.na(wordPrediction)) {
wordPrediction <- as.character(biData[biData$unigram==textInput[3],][1,]$bigram)
}
}
print(wordPrediction)
}
|
1378aaf0c7a3ca369e4818e5147a1c4989b42d43
|
5fd3ddd30766a4eae04069b44bfe4f85f9dfaa40
|
/R/getNHINoBasedOnRCFNo.R
|
55c3850a61d37f96c18d46722bbf92b5d8ef36bb
|
[] |
no_license
|
Angelacheese/pharm
|
1a18155194cbc6551f12e28083f2a01a347dd0fb
|
9f7d600752641edb30353f4575d89a9db6cc67ab
|
refs/heads/master
| 2022-10-06T03:13:18.425075
| 2019-07-18T08:09:56
| 2019-07-18T08:09:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 590
|
r
|
getNHINoBasedOnRCFNo.R
|
#' Get NHINO based on RCFNO
#'
#' @import dplyr
#' @param df data.frame include RCFNO
#' @param RCFNoColName A colum for RCFNo of df
#' @export
get.NHINoViaRCFNo <- function(df, RCFNoColName = RCFNO){
colnames(df)[colnames(df)==deparse(substitute(RCFNoColName))] <- "RCFNO1"
NHINoData <- df %>% select(RCFNO1) %>% as.data.table()
NHINoData[, RCFNo := substr(RCFNO1, 1, 7)]
NHINoData <- left_join(NHINoData, resCGDAx,by = "RCFNo") %>%
select(RCFNO1, NHINO1)
colnames(NHINoData)[colnames(NHINoData)== "RCFNO1"] <- deparse(substitute(RCFNoColName))
return(NHINoData)
}
|
7a9547f37a67fe18c408ebdcdb85ea1b66dbd066
|
319a13e48a7e26e5ab660c9dba30521834203dab
|
/RFiles/arma.R
|
23a7bdf8d4b1c810264a588ec49f91c1d8963981
|
[] |
no_license
|
marco-tn/StaticticsBasic
|
9b4914bec56571754b54481e53f389654178cb3b
|
6dba8d4ac01759cd1e7af302386b9a34f3594475
|
refs/heads/master
| 2020-08-03T13:06:57.357421
| 2019-09-30T03:00:07
| 2019-09-30T03:00:07
| 211,762,589
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 502
|
r
|
arma.R
|
## ARMA(2,1)モデルのシミュレーション
n <- 1000
a <- c(0.8, -0.64) # AR の係数
b <- -0.5 # MA の係数
# for文で生成
epsilon <- rnorm(n)
x0 <- rnorm(2) # 初期値を乱数で指定
x <- ts(double(n))
x[1:2] <- x0
for(i in 3:n) x[i] <- a %*% x[i - 1:2] + b*epsilon[i-1] + epsilon[i]
plot(x)
# arima.simで生成する方法(初期値の指定は出来ない)
# 関数arma.simのノイズはデフォルトでは標準正規列
y <- arima.sim(list(ar=a, ma = b), n)
lines(y,col="red")
|
d4a2fe2b4f4068ef6aedcaac7574a3aecffbf338
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pastecs/examples/decloess.Rd.R
|
8187ea96fb2d38b38ab1e54542b650907df31dc3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 461
|
r
|
decloess.Rd.R
|
library(pastecs)
### Name: decloess
### Title: Time series decomposition by the LOESS method
### Aliases: decloess
### Keywords: ts smooth
### ** Examples
data(releve)
melo.regy <- regul(releve$Day, releve$Melosul, xmin=9, n=87,
units="daystoyears", frequency=24, tol=2.2, methods="linear",
datemin="21/03/1989", dateformat="d/m/Y")
melo.ts <- tseries(melo.regy)
melo.dec <- decloess(melo.ts, s.window="periodic")
plot(melo.dec, col=1:3)
|
7b8a0b48593b3cafebbf1afb236290bb5835e65e
|
85b850779b0e241cc4e1bfe0c3cae7304f0e1102
|
/ui.r
|
b8640045ff25fb10270af9439ae07af97762df4a
|
[] |
no_license
|
NikhilSinghChandel/CodeTheGame-IITBombay-Final
|
aa004ba574bb08c4e675bf78f152daf72d095c2c
|
2db119ac74f659eb4a346387b6c4e68320846e99
|
refs/heads/master
| 2020-03-22T10:27:32.444988
| 2018-07-05T21:51:37
| 2018-07-05T21:51:37
| 139,903,822
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 489
|
r
|
ui.r
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Code the Game"),
sidebarPanel(
fileInput('file1', 'Choose CSV File',
accept=c('text/csv', 'text/comma-separated-values,text/plain', '.csv')),
tags$hr(),
fileInput('file2', 'Choose CSV File',
accept=c('text/csv', 'text/comma-separated-values,text/plain', '.csv')),
tags$hr(),
downloadButton('downloadData', 'Download')),
mainPanel(
tableOutput('table')
)
))
|
58c977ea7e74827adf6d7f4a1fc6fd9242eb0127
|
039881b907d13512a8e5ca551aba51a739aa7a7e
|
/R/tm1_get_dimension_elements.R
|
6a4aab9c191a9319dd7eb942153c193d9d4e008e
|
[] |
no_license
|
catgopal/tm1r
|
0276bc7df03f0071addcaa09ec5820772f7b0905
|
679b5a556b8ab3e7b4befa238372d0e16a8707ca
|
refs/heads/master
| 2021-09-28T14:06:26.750075
| 2018-11-17T21:33:55
| 2018-11-17T21:33:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,574
|
r
|
tm1_get_dimension_elements.R
|
tm1_get_dimension_elements <- function(tm1_connection, dimension) {
tm1_adminhost <- tm1_connection$adminhost
tm1_httpport <- tm1_connection$port
tm1_auth_key <- tm1_connection$key
tm1_ssl <- tm1_connection$ssl
# added because some http does not know space
dimension <- gsub(" ", "%20", dimension, fixed=TRUE)
u1 <- ifelse(tm1_ssl==TRUE, "https://", "http://")
#u1 <- "https://"
u2 <- tm1_adminhost
u3 <- ":"
u4 <- tm1_httpport
u5 <- "/api/v1/Dimensions('"
u6 <- dimension
u7 <- "')/Hierarchies('"
u8 <- dimension
u9 <- "')/Members?$select=Name&$expand=Element($select=Type)&$format=application/json;odata.metadata=none"
# url development
url <- paste0(u1, u2, u3, u4, u5, u6, u7, u8, u9)
#url = "https://localhost:8881/api/v1/Dimensions('Account1')/Hierarchies('Account1')/Members?
#$select=Name&$expand=Element($select=Type)&$format=application/json;odata.metadata=none"
# post request
tm1_process_return <-
httr::GET(url,
httr::add_headers("Authorization" = tm1_auth_key))
# check return if error
if (is.null(jsonlite::fromJSON(httr::content(tm1_process_return, "text"))$error$message) == FALSE) {
message(jsonlite::fromJSON(httr::content(tm1_process_return, "text"))$error$message)
stop()
}
# make it proper
tm1_dim_els <- jsonlite::fromJSON(httr::content(tm1_process_return, "text"))$value
#change to data frame
tm1_dim_els <- as.data.frame(tm1_dim_els)
colnames(tm1_dim_els)[2] <- "Type"
#return
return(tm1_dim_els)
}
|
00d068cda51410851b766889a4dc2245730c72f9
|
21da3f5a064c26741a0f3667c1e3f864e35c5874
|
/Map-Making-USA/mini_project-2_1999.R
|
8a11e470e4fd412de8707b21d078951bcbc81d38
|
[] |
no_license
|
divyanshu93/Implementation-of-Statistical-Methods
|
d30f3ced4283e716cb4bec8f4ccd066d6ecac6e0
|
8a78e56bde30f296454e09704d61fe18c5ca308d
|
refs/heads/master
| 2021-01-17T18:35:32.917009
| 2016-06-06T00:17:56
| 2016-06-06T00:17:56
| 60,486,735
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,436
|
r
|
mini_project-2_1999.R
|
usa.df<-map_data("state")
str(usa.df)
colnames(usa.df)[5]<-"state"
usa.df$state<-as.factor(usa.df$state)
str(usa.df)
usa.dat <- read.table("/home/div/Documents/Frank_top_2012.csv", header = T, sep = ",")
str(usa.dat)
usa.df <- join(usa.df, usa.dat, by = "state", type = "inner")
str(usa.df)
usa.df = usa.df[usa.df$year==1999,]
str(usa.df)
range(usa.df$top1)
brks<-c(0.184, 0.210, 0.220, 0.250, 0.270, 0.312, 0.376, 0.44, 0.5)
p <- ggplot() +
geom_polygon(data = usa.df, aes(x = long, y = lat, group = group, fill = top1),color = "black", size = 0.15) +
scale_fill_distiller(palette = "Reds", breaks = brks, trans = "reverse") +
theme_nothing(legend = TRUE) +
labs(title = "Top 1% earners in USA in 1999 ", fill = "")
#+
#geom_text(aes(x = lat_c, y = lon_c, group = group, label = state),
# data = centers,
# alpha = 1,
# color = "black")
ggsave(p, file = "/home/div/Documents/usa_top1_1999.pdf")
#--------------------------------------------OR----------------------------------------
#q <- ggplot() +
# geom_polygon(data = usa.df, aes(x = long, y = lat, group = group, fill = top1),
# + color = "black", size = 0.15) +
# scale_color_brewer(palette="Set1") +
# theme_nothing(legend = TRUE) +
# labs(title = "Top 1% earners in USA in 2012 ", fill = "")
#ggsave(q, file = "usa_top1_2012C.pdf")
|
5ccd9e9bf0b741d1c5003951a9ee6e5e185c97cb
|
55a6299959d29fb2dccb55b6ee70d8b495e69bd4
|
/R/cubinfAM.R
|
2dab224b95824a48b85ca3fc585066de1bac77b4
|
[] |
no_license
|
cran/robcbi
|
0b56c104b729378c1f5fda9ea15838861543e653
|
be190c9343a869ef7c4855fcbac559442d57ed87
|
refs/heads/master
| 2020-03-13T20:51:04.996148
| 2019-07-23T14:00:05
| 2019-07-23T14:00:05
| 131,283,133
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,767
|
r
|
cubinfAM.R
|
cubinf.control <- function(tlo = 0.001, tua = 1.e-06, mxx = 30, mxt = 10, mxf = 10 ,
ntm = 0, gma = 1, iug = 1, ipo = 1, ilg = 2, icn = 1, icv = 1,
ufact=0, cpar=1.5, null.dev = TRUE, ...)
{list(tlo = tlo, tua = tua, mxx = mxx, mxt = mxt, mxf = mxf,
ntm = ntm, gma = gma, iug=iug, ipo=ipo, ilg=ilg, icn = icn,
icv = icv, ufact=ufact, cpar=cpar, null.dev=null.dev, ...)}
cubinf <- function(x, y, weights = NULL, start=NULL, etastart=NULL, mustart=NULL,
offset = NULL, family = binomial(), control = cubinf.control(...), intercept=FALSE, ...){
#
x <- as.matrix(x)
n <- nobs <- nrow(x)
if (is.null(weights)) weights <- rep.int(1,n)
if(any(weights != round(weights))) stop("Weights must be integer number")
if(is.character(family)) family <- get(family,mode="function",envir=parent.frame())
if(is.function(family)) family <- family()
nn <- family$family #["name"]
if (nn == "gaussian") stop("Use lm(formula, ..., method=\"robust\") for the gaussian case")
wi <- weights
ics <- 0
if (nn == "binomial") {
ics <- 2
if (is.matrix(y)) {
if (dim(y)[2]>2) stop("Only binomial response matrices (2 columns)")
ni <- as.vector(y %*% c(1,1))
y <- y[,1]
ly <- length(y)}
else {
ics <- 1
if (is.factor(y)) y <- as.numeric(y != levels(y)[1])
else y <- as.vector(y)
ly <- length(y)
ni <- rep(1,ly)
}
}
if (nn == "poisson") {
if (any(y < 0)) stop("Negative values not allowed for the Poisson family")
ics <- 3; ly <- length(y)
ni <- rep(1,ly)}
if (ics == 0) stop(paste(nn,": family not implemented for method='cubinf'", sep=" "))
eta <- ci <- ai <- rsdev <- y
yy <- y; nni <- ni
dn <- dimnames(x)
xn <- dn[[2]]
yn <- dn[[1]]
#
# Data preprocessing for weights >= 0
if (intercept & any(x[,1]!=1)) x <- cbind(1,x)
p <- ncol(x)
EMPTY <- p==0
ncov <- p*(p+1)/2
if (is.null(offset) || length(offset)==1) offset <- rep(0,ly)
zero <- wi == 0
if (any(zero)) {
pos <- !zero
x0 <- x[zero, , drop=FALSE]
y0 <- y[zero]
x <- x[pos, , drop=FALSE]
y <- y[pos]
ni <- ni[pos]
wi <- wi[pos]
offset <- offset[pos]
}
nw <- length(wi)
ind <- rep.int(1:nw,wi)
x <- x[ind, , drop=FALSE]
y <- y[ind]
offset <- offset[ind]
ni <- ni[ind]
#
# Initializations
qrx <- qr(x,tol=1e-7,LAPACK=FALSE)[c("qr", "rank", "pivot", "qraux")]
qrx$tol <- 1e-7
rank <- qrx$rank
piv <- 1:rank
control <- do.call("cubinf.control", control)
tlo <- control$tlo
tua <- control$tua
mxx <- control$mxx
mxt <- control$mxt
mxf <- control$mxf
ntm <- control$ntm
gma <- control$gma
iug <- control$iug
ipo <- control$ipo
ilg <- control$ilg
icn <- control$icn
icv <- control$icv
null.dev <- control$null.dev
tmp <- control$singular.ok
if (!is.null(tmp)) singular.ok <- tmp else singular.ok <- FALSE
tmp <- control$qr.out
if (!is.null(tmp)) qr.out <- tmp else qr.out <- FALSE
if (rank < p) {
if (!singular.ok) stop(paste("x is singular, rank(x)=", rank))
else {piv <- qrx$pivot[1:rank]
x <- x[, piv, drop=FALSE]
if (any(zero)) x0 <- x0[,piv,drop=FALSE]
xn <- dimnames(x)[[1]] }
}
# old <- comval()
ufact <- control$ufact
if (ufact==0) ufact <- 1.1
upar <- ufact*sqrt(rank)
cpar <- control$cpar
# dev <- control$dev
# Deviance for the model reduced to the constant term.
if(null.dev) {Null.dev <- cubinf.null(x, y, ni, offset, ics, family, control)
if (nrow(x)==1 & intercept) return(list(deviance=Null.dev)) }
else Null.dev <- NULL
#
# Initial theta, A (A0) and c (c0)
zdir <- tempdir(); tmpcbi <- tempfile("Rcbi",zdir)
sink(tmpcbi, type="output") #Redirection of message 460 in RYWALG
if (ufact >= 20) cpar <- 20*cpar
z <- gintac(x, y, ni, offset, icase=ics, tolt=10*tlo,tola=10*tlo, b=upar, c=cpar)
theta0 <- z$theta[1:rank]; A0 <- z$a; c0 <- z$ci
#
# Initial cut off points a_i (wa)
wa <- upar/pmax(1.e-3,z$dist)
#
# Initial covariance matrix of coefficient estimates
vtheta <- as.vector(x%*%theta0)
# z <- gfedca(vtheta, c0, wa, ni, offset, ics)
# zc <- ktaskw(x, z$D, z$E, f=1/n, f1=1, iainv=0); covi <- zc$cov
z <- gfedcaAM(vtheta, c0, wa, ni, offset, ics)
zc <- covarAM(x, z$D, z$E,intercept=FALSE)$Cov
covi <- zc[row(zc) <= col(zc)]
iii <- cumsum(1:p)
adiag <- round(covi[iii],4)
jjj <- (iii)[adiag==0]
if (length(jjj)>0) { covi[jjj] <- 1; cat("Info: initial cov re-defined\n",covi[1:ncov],"\n")}
if (icn != 1) {
zc <- mchl(covi, rank)
zi <- minv(zc$a, rank)
covi <- mtt1(zi$r, rank)$b}
#
# Final theta, A, c (ci) and a(wa)
zf <- gymain(x, y, ni, covi, A0, theta0, offset, b=upar, gam=gma,
tau=tua, icase=ics, iugl=iug, iopt=ipo, ialg=ilg, icnvt=icn,
icnva=icv, maxit=mxx, maxtt=mxt, maxta=mxf, maxtc=mxt,
nitmnt=ntm, nitmna=ntm, tol=tlo, tolt=10*tlo, tola=10*tlo, tolc=10*tlo)
sink()
unlink(tmpcbi,force=TRUE)
nit <- zf$nit; converged <- TRUE
if (mxx > 1 && nit == mxx) {cat("\nWarning: Maximum number of iterations [mxx=", mxx,"] reached.\n")
converged <- FALSE}
coefs <- zf$theta
#
# Deviance
#
zd <- glmdev(y, ni, zf$ci, zf$wa, zf$vtheta, offset, icase = ics)
#
# Final covariance matrix of coeff. estimates (modified march 2018)
# sink("tmpzzz", type="output") #Redirection of message 450 in KTASKW
# z <- gfedca(zf$vtheta, zf$ci, zf$wa, ni, offset, ics)
# zc <- ktaskw(x, z$D, z$E, f=1/n, f1=1, iainv=0)
z <- gfedcaAM(zf$vtheta, zf$ci, zf$wa, ni, offset, ics)
zc <- covarAM(x, z$D, z$E,intercept=FALSE)$Cov
# sink()
A <- matrix(0, nrow=rank, ncol=rank)
cov <- zc[1:rank,1:rank]
i2 <- 0
for(i in 1:rank) {
i1 <- i2 + 1
i2 <- i1 + i - 1
A[i, 1:i] <- zf$a[i1:i2]
# cov[i,1:i] <- zc$cov[i1:i2]
# cov[1:i,i] <- zc$cov[i1:i2]
}
xn <- dimnames(x)[[2]]
xn <- xn[piv]
attributes(coefs) <- NULL
attributes(A) <- NULL
attributes(cov) <- NULL
attr(A,"dim") <- c(rank,rank)
attr(cov,"dim") <- c(rank,rank)
names(coefs) <- xn
dimnames(A) <- list(xn,xn)
dimnames(cov) <- list(xn,xn)
asgn <- attr(x, "assign")
ai <- zf$wa; ci <- zf$ci; rsdev <- zd$li-zd$sc
# zl <- lrfctd(ics,y,ci,vtheta,offset,ai,ni,1,1,1)
# Li <- zl$f0; li <- zl$f1; lip <- zl$f2 # rs <- zf$rs
#
# Compute eta, mu and residuals.
dni <- c(ind[1],diff(ind))
lll <- dni!=0
iii <- cumsum(dni[lll])
lll <- as.vector(1*lll)
jjj <- (1:n)*lll
eta[iii] <- zf$vtheta[jjj]
if(any(offset!=0)) offset[iii] <- offset[jjj]
ci[iii] <- zf$ci[jjj]
ai[iii] <- zf$wa[jjj]
# Li[iii] <- Li[jjj]
# li[iii] <- li[jjj]
# lip[iii] <- lip[jjj]
# rs[iii] <- rs[jjj]
rsdev[iii] <- zd$li[jjj]-zd$sc[jjj]
dni <- nni
ni <- rep(1,length(eta))
ni[iii] <- dni[jjj]
if (any(zero)) {
eta[zero] <- as.vector(x0 %*% coefs)
ci[zero] <- 0
ai[zero] <- 0
# Li[zero] <- 0
# li[zero] <- 0
# lip[zero] <- 0
# rs[zero] <- 0
offset[zero] <- 0
rsdev[zero] <- 0}
mu <- family$linkinv(eta+offset)
names(eta) <- yn
if(rank < p) {
coefs[piv[ - seq(rank)]] <- NA
pasgn <- asgn
newpos <- match(1:p, piv)
names(newpos) <- xn
for(j in names(asgn)) {
aj <- asgn[[j]]
aj <- aj[ok <- (nj <- newpos[aj]) <= rank]
if(length(aj)) {
asgn[[j]] <- aj
pasgn[[j]] <- nj[ok]
}
else asgn[[j]] <- pasgn[[j]] <- NULL
}
cnames <- xn[piv]
}
new.dev <- zd$dev
# new.dev <- sum(family$dev.resids(yy/ni, mu, w = rep(1.0, n)))
resp <- yy/nni - mu
if (any(zero)) {resp[zero] <- 0}
names(ai) <- yn
df.residual <- ly - rank - sum(weights==0)
fit <- list(coefficients = coefs, fitted.values=mu,
# effects = effects, weights=weights,
ci=ci, rank = rank, assign = asgn,
df.residual = df.residual, control=control)
if(rank < p) { if(df.residual > 0) fit$assign.residual <- (rank + 1):n}
if(qr.out) fit$qr <- qrx
fit$A <- A
fit$ai <- ai
fit$cov <- cov
fit$class <- "cubinf"
fit$converged <- converged
if (!all(weights==1)) fit$prior.weights <- weights
if (!all(ni==1)) fit$ni <- ni
rsdev <- sign(y-mu)*sqrt(2*abs(rsdev)*weights)
attributes(zf$grad) <- NULL
attributes(zf$hessnv) <- NULL
residuals <- yy/nni - ci - mu
attributes(residuals) <- NULL
# Restore common values for ROBETH
# dfcomn(ipsi = old$ipsi, c = old$c, d = old$d, beta = old$bta)
c(fit, list(family = family$family, ics=ics, linear.predictors = eta,
deviance = new.dev, null.deviance=Null.dev, iter = nit, qr=qrx,
y = yy/nni, contrasts = attr(x,"contrasts"), rsdev = rsdev,
gradient = zf$grad, inv.hessian = zf$hessnv, residuals = residuals))
}
cubinf.null <-
function(x, y, ni, offset, ics, family, control)
{
control <- do.call("cubinf.control", control)
tlo <- control$tlo
tua <- control$tua
mxx <- control$mxx
mxt <- control$mxt
mxf <- control$mxf
ntm <- control$ntm
gma <- control$gma
iug <- control$iug
ipo <- control$ipo
ilg <- control$ilg
icn <- control$icn
icv <- control$icv
rank <- 1
ufact <- control$ufact
if (ufact==0) ufact <- 1.1
upar <- ufact
cpar <- control$cpar
if (ufact >= 20) cpar <- 20*cpar
ly <- length(y)
w <- rep(1,ly)
ai <- ci <- rep(0,ly)
# intl <- attr(x, "term.labels")
# int <- if(is.null(intl)) FALSE else as.logical(match(intl[1], c("(Int.)",
# "(Intercept)"), FALSE))
linkinv <- family$linkinv
dev.resids <- family$dev.resids
# if(!int) {eta <- rep(0,ly); mu <- linkinv(eta+offset)
# cval <- 0.5; if (ics==3) cval <- 1
# ci <- rep(cval,ly); ai <- rep(9999.,ly) } else {
X <- matrix(rep(1,ly),ncol=1)
if (ufact >= 20) cpar <- 20*cpar
zdir <- tempdir(); tmpcbi <- tempfile("Rcbi",zdir)
sink(tmpcbi, type="output") #Redirection of message 460 in RYWALG
z <- gintac(X, y, ni, offset, icase = ics, tolt=10*tlo,
tola=10*tlo, b = upar, c = cpar)
t0 <- z$theta[1]; A0 <- z$a; c0 <- z$ci
wa <- upar/pmax(1.e-3,z$dist)
vtheta <- rep(t0,ly)
# z <- gfedca(vtheta, c0, wa, ni, offset, ics)
z <- gfedcaAM(vtheta, c0, wa, ni, offset, ics)
zc <- ktaskw(X, z$D, z$E, f=1/ly, f1=1, iainv=0)
covi <- zc$cov
if (icn != 1) covi <- 1/covi
zf <- gymain(X, y, ni, covi, A0, t0, offset, b=upar, gam=gma,
tau=tua, icase=ics, iugl=iug, iopt=ipo, ialg=ilg, icnvt=icn,
icnva=icv, maxit=mxx, maxtt=mxt, maxta=mxf, maxtc=mxt,
nitmnt=ntm, nitmna=ntm, tol=tlo, tolt=10*tlo, tola=10*tlo,
tolc=10*tlo)
sink(); unlink(tmpcbi,force=TRUE)
ai <- zf$wa
ci <- zf$ci
eta <- zf$vtheta
# mu <- linkinv(eta+offset)
# }
zd <- glmdev(y,ni,ci,ai,eta,offset=offset,icase=ics)
zd$dev
}
gfedcaAM <- function(vtheta, c0, wa, ni, offset,ics, precision=0) {
psi <- function(x,c) { max(-c,min(x,c) ) }
vtheta1 <- vtheta+offset
n <- length(vtheta)
E <- D <- rep(0,n)
for ( i in 1:n) {
sumE <- sumD <- 0; j <- 0; termE <- termD <- 100
if (ics==1 | ics==2) {probi <- exp(vtheta1[i])/(1+exp(vtheta1[i]))
lambdai <- ni[i]*probi }
if (ics==3) {lambdai <- exp(vtheta1[i])}
while (max(termE,termD) > precision) {
if (ics==1 | ics==2) {lpij <- dbinom(j,ni[i], probi, log = TRUE) }
if (ics==3) {lpij <- dpois(j,lambdai, log = TRUE) }
tmpsi <- psi( j-c0[i]-lambdai, c=wa[i])
termE <- log( tmpsi^2 ) + lpij
termE <- exp(termE)
sumE <- sumE + termE
tmpsi <- tmpsi*(j-lambdai)
if (tmpsi>0) {
termD <- log(tmpsi) + lpij
termD <- exp(termD)
sumD <- sumD + termD
} else {
termD <- tmpsi*exp(lpij)
sumD <- sumD + termD
termD <- abs(termD)
}
j <- j+1
}
E[i] <- sumE; D[i] <- sumD}
list(D=D,E=E) }
covarAM <- function(X,D,E,intercept=TRUE,tol=sqrt(.Machine$double.eps)) {
XI = X
if (intercept) XI = cbind(1,X)
S1 <- t(XI)%*%(D*XI)
S2 <- t(XI)%*%(E*XI)
#SI = solve(S1)
Xsvd <- svd(S1)
Positive <- Xsvd$d > max(tol * Xsvd$d[1L], 0)
if (all(Positive))
SI <- Xsvd$v %*% (1/Xsvd$d * t(Xsvd$u))
else if (!any(Positive))
SI <- array(0, dim(S1)[2L:1L])
else
SI <- Xsvd$v[, Positive, drop = FALSE] %*% ((1/Xsvd$d[Positive]) *
t(Xsvd$u[, Positive, drop = FALSE]))
Cov <- SI%*%S2%*%SI
list(Cov=Cov) }
|
b0adec19e18cc3367ef0cc94107ceadc3ed0181d
|
d191f64fc6fbe26d7927e1b1fc69d6015eb57e79
|
/ForecastContestExample.R
|
638afa9b2d0b3091759ac977773407f27412ebcd
|
[] |
no_license
|
XQ2013/Forcast-R-Example
|
6ba6c54322fb7179ecfdec1b124cb0e3172e9d20
|
240a4feb34186602bc514d6625ceb6477a3ef805
|
refs/heads/master
| 2020-07-01T21:15:45.191004
| 2019-10-01T18:07:56
| 2019-10-01T18:07:56
| 201,303,379
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,842
|
r
|
ForecastContestExample.R
|
# Load the raw training data and replace missing values with NA
donations<- read.csv('donations.csv',header=T,na.strings=c(""))
donations <- donations[,-c(1,18)]
# Output the number of missing values for each column
sapply(donations,function(x) sum(is.na(x)))
# Quick check for how many different values for each feature
sapply(donations, function(x) length(unique(x)))
# R should automatically code Gifts, Gender as a factor(). A factor is R's way of dealing with
# categorical variables
is.factor(donations$Gender) # Returns TRUE
is.factor(donations$Gifts) # Returns False
donations$Gifts <- factor(x = donations$Gifts, levels = c(0,1), labels = c("No", "Yes"))
is.factor(donations$Gifts) # Returns TRUE
# Check categorical variables encoding for better understanding of the fitted model
contrasts(donations$Gender)
for (i in c(3, 15, 16)) {
donations[[i]] <- gsub(pattern = "\\$", replacement = "", x = donations[[i]])
donations[[i]] <- as.numeric(donations[[i]]) }
donations$Gifts <- ifelse(donations$Gifts == "Yes", 1,0)
donations$Largest <- as.numeric(donations$Largest)
donations$Last <- as.numeric(donations$Last)
donations$St <- as.character(donations$St)
donations$St <- ifelse(donations$St =="IL",1,0)
donations$Type <- as.character(donations$Type)
donations$Type <- ifelse(donations$Type == "Graduate", 1, 0)
donations$LGDate<-as.numeric(donations$LGDate)
donations$Class <- as.numeric(donations$Class)
donations$Class1 <- 2015- donations$Class
donations$FY1314 <- donations$FY13*donations$FY14
donations$log13 <- log(donations$FY13 + 1)
donations$log14 <- log(donations$FY14 + 1)
donations$logLast <- log(donations$Last + 1)
Lg<-as.numeric(donations$LGDate)
Cu<-as.Date("15Sep2015", "%d%b%Y")
donations$newdate=Cu-Lg
donations$newdate <- as.numeric(donations$newdate)
currentdate <- as.Date("15Sep2015", "%d%b%Y")
donations$TG <- as.character(donations$TestGroup1)
donations$TestGroup1B <- ifelse(donations$TG == "B", 1, 0)
donations$TestGroup1C <- ifelse(donations$TG == "C", 1, 0)
donations$TestGroup1D <- ifelse(donations$TG == "D", 1, 0)
# data transformation before generating train and test
# Spliting the Data (Test & Train)
set.seed(1234)
Partition <- rbinom(n = nrow(donations) ,size = 1, prob = 0.7)
donations <- data.frame(donations, Partition)
train <- donations[donations$Partition==1, ]
test <- donations[donations$Partition==0, ]
# logistic model
fit.logit <- glm(data = train, formula = Gifts ~ logLast+FY1314+log14+log13+newdate, family = binomial(),na.action = na.exclude)
summary(fit.logit)
# test the model
prob <- predict.glm(fit.logit, test, type = "response")
gifts <- as.numeric(test$Gifts)
mean(gifts*log(prob) + (1 - gifts)*log(1-prob))
# regression model
train2 <- train[train$CnGf_1_Amount > 0, ]
test2 <- test[test$CnGf_1_Amount > 0, ]
fit.reg <- lm(data = train2, formula = CnGf_1_Amount ~ Last+log14+log13+Class1+Left+newdate)
summary(fit.reg)
# test the model
pred <- predict.lm(object = fit.reg, newdata = test2)
mean(abs(pred-test2$CnGf_1_Amount))
# test the logit model
#attach(donations)
donations$Gifts <- as.numeric(donations$Gifts)
donations$Gender <- as.numeric(donations$Gender)
prob = .335
donations$Baselinell <- (donations$Gifts*log(prob) + (1 - donations$Gifts)*log(1-prob))
summary(donations)
# # test the regression model
test <- donations[donations$CnGf_1_Amount > 0, ]
test$Baseline <- (abs( test$CnGf_1_Amount -test$Last))
summary(test)
# model
# Logistic Model
## Gift = 1.049e+01 +(-1.234e-01)logLast+(-1.678e-05)FY1314 + 5.006e-01log14 + 3.414e-01log13 + (-7.925e-04)newdate
# # Regression
# Donation Amount= 129.010335 + 0.906707Last + (-0.334862)log14 + 0.746060log13 + (-0.130387)Class1 + 0.020227Left + (-0.007011)newdate
|
8ab15d63403f01018eea82354a47fe1a69a06217
|
b3b3a690ac233a93732ad3e73a2ee64c1c4a756c
|
/man/alnum.Rd
|
eced767cf789122daf51105107fdd954dd75ef62
|
[] |
no_license
|
kcf-jackson/combinatorParser
|
f707de5da1905a43cccd269007e55ed3b4f3fa6b
|
25182c466fbadc11f832823c01e9773032c6ea2f
|
refs/heads/master
| 2020-04-07T19:20:07.547755
| 2019-04-15T19:12:45
| 2019-04-15T19:12:45
| 158,644,538
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 286
|
rd
|
alnum.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/character_classes.R
\docType{data}
\name{alnum}
\alias{alnum}
\title{Alpha-numeric}
\format{An object of class \code{character} of length 62.}
\usage{
alnum
}
\description{
Alpha-numeric
}
\keyword{datasets}
|
db09c045ffff3a89ff4c077944008e823c354d7b
|
a3a3a26d0ef80fba3d5c133d62e649dd55937f2c
|
/tests/testthat/test_polydata.R
|
e0a52645f0d7410544b2e9539c89631a24c6e219
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
dblodgett-usgs/NCDFSG
|
74181ab2d24d588072cf0add71f67141c5c48f14
|
ec92886d9da885125fd08a059312ca2c3d86c465
|
refs/heads/master
| 2021-01-19T00:18:01.959465
| 2017-04-07T00:50:41
| 2017-04-07T00:50:41
| 73,005,025
| 1
| 1
| null | 2017-03-25T14:29:45
| 2016-11-06T16:53:21
|
R
|
UTF-8
|
R
| false
| false
| 3,221
|
r
|
test_polydata.R
|
library("ncdf4")
context("NCDF SG polygonData tests")
# data prep.
# library(rgdal)
# shapeData<-readOGR(dsn = "data/Yahara_alb/Yahara_River_HRUs_alb_eq.shp",
# layer = "Yahara_River_HRUs_alb_eq",
# stringsAsFactors = FALSE)
# saveRDS(shapeData,file="data/yahara_shapefile_data.rds")
test_that("A whole shapefile can be written", {
polygonData <- readRDS("data/yahara_shapefile_data.rds")
nc_file <- ToNCDFSG(nc_file=tempfile(), geomData = polygonData)
nc<-nc_open(nc_file)
crs <- list(grid_mapping_name = "albers_conical_equal_area",
longitude_of_central_meridian = -96,
latitude_of_projection_origin = 23,
false_easting = 0.0,
false_northing = 0.0,
standard_parallel = c(29.5, 45.5),
semi_major_axis = 6378137.0,
inverse_flattening = 298.257223563,
longitude_of_prime_meridian = 0)
expect_equal(ncatt_get(nc, pkg.env$crs_var_name)[names(crs)], crs)
expect_equal(as.numeric(polygonData@data$GRIDCODE),as.numeric(ncvar_get(nc, varid = "GRIDCODE")))
expect_equal(length(nc$dim$instance$vals), length(polygonData@polygons))
for(var in names(polygonData@data)) {
expect_equal(ncatt_get(nc, var, pkg.env$geometry_container_att_name)$value,
pkg.env$geom_container_var_name)
}
coords<-polygonData@polygons[[1]]@Polygons[[1]]@coords
expect_equal(as.numeric(coords[nrow(coords):1,1]),as.numeric(ncvar_get(nc, varid = "x", start = c(1), count = c(118))))
expect_equal(as.numeric(coords[nrow(coords):1,2]),as.numeric(ncvar_get(nc, varid = "y", start = c(1), count = c(118))))
# Check to make sure a hole is encoded correctly.
node_count <- ncvar_get(nc, pkg.env$node_count_var_name)
part_node_count <- ncvar_get(nc, pkg.env$part_node_count_var_name)
part_type <- ncvar_get(nc, pkg.env$part_type_var_name)
expect_equal(length(polygonData@polygons), length(node_count))
p <- 1
for(i in 1:length(node_count)) {
nCount <- 0
for(j in 1:length(polygonData@polygons[[i]]@Polygons)) {
if(polygonData@polygons[[i]]@Polygons[[j]]@hole) expect_equal(part_type[p], pkg.env$hole_val)
expect_equal(length(polygonData@polygons[[i]]@Polygons[[j]]@coords[,1]), part_node_count[p])
nCount <- nCount + part_node_count[p]
p <- p + 1
}
expect_equal(nCount, node_count[i])
}
checkAllPoly(polygonData, ncvar_get(nc,pkg.env$node_count_var_name),
ncvar_get(nc,pkg.env$part_node_count_var_name),
ncvar_get(nc,pkg.env$part_type_var_name))
returnPolyData<-FromNCDFSG(nc_file)
compareSP(polygonData, returnPolyData)
for(name in names(polygonData@data)) {
expect_equal(as.character(polygonData@data[name]), as.character(returnPolyData@data[name]))
}
for(i in 1:length(returnPolyData@polygons)) {
expect_equal(length(returnPolyData@polygons[[i]]@Polygons), length(polygonData@polygons[[i]]@Polygons))
for(j in 1:length(returnPolyData@polygons[[i]]@Polygons)) {
expect_equal(length(returnPolyData@polygons[[i]]@Polygons[[j]]@coords), length(polygonData@polygons[[i]]@Polygons[[j]]@coords))
}
}
# writePolyShape(returnPolyData, "yaharaData_test")
})
|
3ce8532047b80125c3a0f1c636dc105ccbf75a33
|
9e72f2d88e396432a7bdf217c8408f8a1fff02e8
|
/wordcloud2_project.R
|
969c76667074b4db3090c49ec204a2c35cfacf6e
|
[] |
no_license
|
SeokHyeon-Hwang/R_data_analysis
|
271cdc33b601d0cc61788e4a0fc1e51795daccbd
|
61c4af51e1cac736a0290c6ac6c2dc2a927256f1
|
refs/heads/master
| 2021-07-11T23:02:26.650102
| 2019-03-05T04:52:33
| 2019-03-05T04:52:33
| 148,569,523
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,555
|
r
|
wordcloud2_project.R
|
rm(list=ls())
# clawling & wordcloud2
## 01.clawling
# install.packages('rvest')
library(rvest)
### data road from wed(movie review)
all_reviews<-c()
test<-paste0('https://movie.daum.net/moviedb/grade?movieId=109924&type=netizen&page=', 2)
test
### read html
htxt=read_html(test)
htxt
table=html_nodes(htxt, '.review_info')
table
content=html_nodes(table, '.desc_review')
content
reviews=html_text(content)
reviews
reviews<-gsub('\n', '', reviews)
reviews
reviews<-gsub('\t', '', reviews)
reviews
writeLines(reviews, 'C:/Users/ktm/reviews2.txt')
## 02. clawling with for repeat
all_url<-c()
for (i in 1:20){
### road url
test<-paste0('https://movie.daum.net/moviedb/grade?movieId=46092&type=netizen&page=',i)
all_url=c(all_url,test)
}
all_url
all_reviews=c()
for (i in 1:20){
### read html
htxt=read_html(all_url[i]) #리뷰영역 가져오기
table=html_nodes(htxt, '.review_info') #리뷰 가져오기
content=html_nodes(table, '.desc_review')
reviews=html_text(content)
reviews<-gsub('\n', '', reviews) # 불필요 부호 제거
reviews<-gsub('\t', '', reviews) # 불필요 부호 제거
all_reviews<-c(all_reviews, reviews)
}
all_reviews
## wordcloud
### extract noun
nouns<-sapply(all_reviews, extractNoun, USE.NAMES = F)
str(nouns)
nouns<-unlist(nouns)
str(nouns)
### Delete useless noun, confirm frequency
nouns<-gsub('\\d+', '', nouns)
nouns<-gsub('\t', '', nouns)
nouns<-gsub('[a-zA-Z]', '', nouns)
nouns<-nouns[nchar(nouns)>=2]
nouns
wordFreq<-
|
a6af73dfe2941a352d7b8c7128ba30fdf95026c6
|
77157987168fc6a0827df2ecdd55104813be77b1
|
/onlineforecast/R/operator_multiply.R
|
2ce598f0d54a20521110444b2659c834264555a9
|
[] |
no_license
|
akhikolla/updatedatatype-list2
|
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
|
a3a519440e02d89640c75207c73c1456cf86487d
|
refs/heads/master
| 2023-03-21T13:17:13.762823
| 2021-03-20T15:46:49
| 2021-03-20T15:46:49
| 349,766,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,464
|
r
|
operator_multiply.R
|
## Do this in a separate file to see the generated help:
#library(devtools)
#document()
#load_all(as.package("../../onlineforecast"))
#?"%**%"
#' Multiplication of each element in a list (x) with y
#'
#' Each element of x is multiplied with y using the usual elementwise '*' operator.
#'
#' Typical use is when a function, e.g. \code{\link{bspline}()}, returns a list of matrices (e.g. one for each base spline) and they should individually be multiplied with y (a vector, matrix, etc.).
#'
#' Since this is intended to be used for forecast models in the transformation stage
#' then there are some percularities:
#'
#' If the number of columns or the names of the columns are not equal for one element in x
#' and y, then only the columns with same names are used, hence the resulting matrices can be
#' of lower dimensions.
#'
#' See the example \url{https://onlineforecasting.org/examples/solar-power-forecasting.html} where the operator is used.
#'
#' @title Multiplication of list with y, elementwise
#' @param x a list of matrices, data.frames, etc.
#' @param y a vector, data.frame or matrix
#' @return A list of same length of x
#' @examples
#'
#' x <- list(matrix(1:9,3), matrix(9:1,3))
#' x
#'
#' y <- matrix(2,3,3)
#' y
#'
#' x %**% y
#'
#' y <- 1:3
#'
#' x %**% y
#'
#' # Naming percularity
#' nams(x[[1]]) <- c("k1","k2","k3")
#' nams(x[[2]]) <- c("k2","k3","k4")
#' y <- matrix(2,3,3)
#' nams(y) <- c("k1","k3","k7")
#'
#' # Now the only the horizons matching will be used
#' x %**% y
#'
#' @export
"%**%" <- function(x, y) {
if( is.null(dim(y)) ){
## y is not matrix like
lapply(x, function(xx) {
xx * y
})
}else{
## y is matrix like
lapply(x, function(xx) {
## Check if different horizon k columns
colmatch <- TRUE
if (ncol(xx) != ncol(y)) {
colmatch <- FALSE
}else if(any(nams(xx) != nams(y))){
colmatch <- FALSE
}
if(!colmatch){
## Not same columns, take only the k in both
nms <- nams(xx)[nams(xx) %in% nams(y)]
xx <- xx[, nms]
y <- y[, nms]
}
## Now multiply
val <- xx * y
## Must be data.frame
if( is.null(dim(val)) ){
val <- data.frame(val)
nams(val) <- nms
}
return(val)
})
}
}
|
3df9fdf1351c52b17ac96dacbf29b2a6967278ef
|
42cf1ccab678eb9a3b7df1e8b79b8addea0341ee
|
/man/recent_changes.Rd
|
cec02abbb19c8bee15ca7dae51182495a157f63a
|
[
"MIT"
] |
permissive
|
cran/WikipediR
|
936188140398c1ece227d72ea35d92e8c7da7e4c
|
36e90097daf8623df2fcce9819b548d6f8824846
|
refs/heads/master
| 2020-12-29T02:38:25.834033
| 2017-02-05T07:44:55
| 2017-02-05T07:44:55
| 18,895,732
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,037
|
rd
|
recent_changes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/recent_changes.R
\name{recent_changes}
\alias{recent_changes}
\title{Retrieves entries from the RecentChanges feed}
\usage{
recent_changes(language = NULL, project = NULL, domain = NULL,
properties = c("user", "userid", "comment", "parsedcomment", "flags",
"timestamp", "title", "ids", "sizes", "redirect", "loginfo", "tags", "sha1"),
type = c("edit", "external", "new", "log"), tag = NULL, dir = "newer",
limit = 50, top = FALSE, clean_response = FALSE, ...)
}
\arguments{
\item{language}{The language code of the project you wish to query,
if appropriate.}
\item{project}{The project you wish to query ("wikiquote"), if appropriate.
Should be provided in conjunction with \code{language}.}
\item{domain}{as an alternative to a \code{language} and \code{project} combination,
you can also provide a domain ("rationalwiki.org") to the URL constructor, allowing
for the querying of non-Wikimedia MediaWiki instances.}
\item{properties}{Properties you're trying to retrieve about each entry, Options include
"user" (the username of the person responsible for that entry), "userid" (the userID of said
person), "comment" (the edit summary associated with the entry), "parsedcomment" (the same,
but parsed, generating HTML from any wikitext in that comment), "flags" (whether the revision
was 'minor' or not), "timestamp", "title" (the name of the page the entry affected), "ids"
(the page id, along with the old and new revision IDs when applicable) "sizes" (the size,
in uncompressed bytes, of the entry, and, in the case of revisions, the size of the edit
it displaced), "tags" (any tags associated with the revision) and "loginfo" (applicable only
to log entries, and consisting of log ID numbers, log types and actions, and so on) and "sha1"
(the SHA-1 hash of the revision text).}
\item{type}{The type of entry you want to retrieve; can be any permutation of "edit" (edits to existing pages),
"external" (external actions that impact on the project - primarily wikidata changes),
"new" (the creation of new pages) and "log" (log entries). By default, all of these entry types
are included.}
\item{tag}{Only return items with particular "tags", such as "mobile edit". NULL by
default.}
\item{dir}{Should it go from newest to oldest ("newer"), or oldest to newest ("older")?
By default, set to "newer".}
\item{limit}{The number of entries you'd like to return. By default, set to 50, which is
also the maximum number per-request for logged-out users.}
\item{top}{Should the request only return "top" entries - in other words, the most recent
entry on a page? Set to FALSE by default.}
\item{clean_response}{whether to do some basic sanitising of the resulting data structure.
Set to FALSE by default.}
\item{...}{further arguments to pass to httr's GET.}
}
\description{
wiki_recentchanges retrieves a stream of entries from Special:RecentChanges, with a variety of
associated metadata and filtering (of both entries *and* that metadata.
}
|
21ad55cfbfb33001ca92b5cea24045df5602673e
|
45e79381152047a7777d50271e38c726013be682
|
/man/getExperimentContainers.Rd
|
97f78151bd7122f82c2ab2e3bcc48f047e07df41
|
[] |
no_license
|
ceparman/Core5.3
|
e13530d3fd7a457ee39935400766badddc5120bd
|
de8f6e946ca159332979807eba997b5efbf123d4
|
refs/heads/master
| 2020-03-23T08:08:06.929458
| 2019-02-06T14:24:36
| 2019-02-06T14:24:36
| 141,309,256
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,257
|
rd
|
getExperimentContainers.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getExperimentContainers.R
\name{getExperimentContainers}
\alias{getExperimentContainers}
\title{getExperimentContainers - Gets experiment containers from experiment identified by barcode.}
\usage{
getExperimentContainers(coreApi, experimentType, barcode, useVerbose = FALSE)
}
\arguments{
\item{coreApi}{coreApi object with valid jsessionid}
\item{experimentType}{experiment entity type to get}
\item{barcode}{barcode of experiment to query}
\item{useVerbose}{TRUE or FALSE to indicate if verbose options should be used in http POST}
}
\value{
returns a list $entity contains barcodes of the containers, $response contains the entire http response
}
\description{
\code{getExperimentContainers} Gets experiment contaniers from experiment identified by experiment barcode.
}
\details{
\code{getExperimentContainers} Gets experiment containers from experiment identified by barcode.
}
\examples{
\dontrun{
api<-CoreAPIV2::CoreAPI("PATH TO JSON FILE")
login<- CoreAPIV2::authBasic(api)
exptCaontainerBarcodes <- CoreAPIV2::getExperimentContainers(login$coreApi,"entityType","barcode")
CoreAPIV2:logOut(login$coreApi)
}
}
\author{
Craig Parman ngsAnalytics, ngsanalytics.com
}
|
3e2a2697610d828ff4873c7e07a74d2b33074557
|
bf380c13cfeb3fa08ee3d54d79f2fbaac0cc50b9
|
/Week1/UsingR.R
|
39ac92a56b146e82c8e02dcdfe3ce1918dd9c08e
|
[] |
no_license
|
vitorpbarbosa7/RegressionModels
|
7b2d4a82091543d5221b35752df0ee14dc87ed6b
|
70158d2a62cff63e14a096de0c0df976cd972f7f
|
refs/heads/master
| 2023-03-11T04:32:32.713550
| 2021-02-26T16:47:24
| 2021-02-26T16:47:24
| 288,875,558
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 317
|
r
|
UsingR.R
|
library(UsingR)
# Carregar os dados -------------------------------------------------------
#Dados utilizados por Galton em 1885
data("galton")
library(reshape)
long = melt(galton)
g = ggplot(long, aes(x = value, fill = variable)) +
geom_histogram(colour = "black", binwidth = 1) +
facet_grid(.~variable)
|
f9f09aa9baf974963d00c00548e7567a2e2537b2
|
8a2b0cab64ac5f28bedfb06684774b2464bfa87c
|
/functions/PAF.R
|
861e87058a111d382a20e272b15b7d3eea96d98b
|
[] |
no_license
|
walkabillylab/ITHIM-R
|
037264528ffef905a8c9b32c4f9500d8601bae63
|
d6809907950af715a68d03c4a4dcd6851170994e
|
refs/heads/master
| 2020-04-02T06:13:15.504959
| 2018-11-06T15:55:35
| 2018-11-06T15:55:35
| 154,136,680
| 0
| 0
| null | 2018-11-06T15:55:37
| 2018-10-22T12:06:04
|
HTML
|
UTF-8
|
R
| false
| false
| 173
|
r
|
PAF.R
|
PAF <- function(pop, cn, mat){
##!! hard coding of indices: 1=sex, 2=age or age_cat
paf <- apply(mat,1,function(x)sum(pop[[cn]][pop[[1]]==x[1]&pop[[2]]==x[2]]))
paf
}
|
be363b0b6002d7b8dcd9fdf38584f6cc7c8edeec
|
3dad5087fae24d0dd09803e7dd0e8511f96dac76
|
/man/do_the_perms.Rd
|
b8fe16f4f7e386ac11790adb456d9366c4c82cf3
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
eriqande/inbredPermute
|
badecc3a6b955d8f5aa1842b44b6a588d53e6991
|
dfc9ff6085199187f8b21c8cc3050b3f7999800d
|
refs/heads/master
| 2020-03-30T02:55:29.136330
| 2014-10-22T19:04:15
| 2014-10-22T19:04:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 275
|
rd
|
do_the_perms.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{do_the_perms}
\alias{do_the_perms}
\title{function to do the permutations}
\usage{
do_the_perms(kg_file, trio_file, meta_file, REPS = 1000,
ItalianCommas = FALSE)
}
\description{
function to do the permutations
}
|
16703ab9668ed1105c86f02e83c4ea5427e3bbb8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/seewave/examples/simspec.Rd.R
|
369eddddc54717e3500a750732a3bd46d1ba307e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 987
|
r
|
simspec.Rd.R
|
library(seewave)
### Name: simspec
### Title: Similarity between two frequency spectra
### Aliases: simspec
### Keywords: dplot ts
### ** Examples
a<-noisew(f=8000,d=1)
b<-synth(f=8000,d=1,cf=2000)
c<-synth(f=8000,d=1,cf=1000)
d<-noisew(f=8000,d=1)
speca<-spec(a,f=8000,at=0.5,plot=FALSE)
specb<-spec(b,f=8000,at=0.5,plot=FALSE)
specc<-spec(c,f=8000,at=0.5,plot=FALSE)
specd<-spec(d,f=8000,at=0.5,plot=FALSE)
simspec(speca,speca)
simspec(speca,specb)
simspec(speca,specc,plot=TRUE)
simspec(specb,specc,plot=TRUE)
#[1] 12.05652
simspec(speca,specd,plot=TRUE)
## mel scale
require(tuneR)
data(orni)
data(tico)
orni.mel <- melfcc(orni, nbands = 256, dcttype = "t3", fbtype = "htkmel", spec_out=TRUE)
orni.mel.mean <- apply(orni.mel$aspectrum, MARGIN=2, FUN=mean)
tico.mel <- melfcc(tico, nbands = 256, dcttype = "t3", fbtype = "htkmel", spec_out=TRUE)
tico.mel.mean <- apply(tico.mel$aspectrum, MARGIN=2, FUN=mean)
simspec(orni.mel.mean, tico.mel.mean, f=22050, mel=TRUE, plot=TRUE)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.